Compare commits
107 Commits
fix/357_35
...
fix/398
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
886c3e37ff | ||
|
|
38deb09aa9 | ||
|
|
6d326dce1f | ||
|
|
6c8e177d3e | ||
|
|
879a548345 | ||
|
|
64cd81d5ba | ||
|
|
61e75949c8 | ||
|
|
a5d24bfad7 | ||
|
|
04595c0093 | ||
|
|
06fdb4889d | ||
|
|
38d4f9f918 | ||
|
|
63dc7c6983 | ||
|
|
d57c6059fc | ||
|
|
eb152f9fae | ||
|
|
1a8e98fec0 | ||
|
|
83a1c7ea31 | ||
|
|
79c63a7d3d | ||
|
|
753721eee0 | ||
|
|
09607296af | ||
|
|
c88040084a | ||
|
|
2573eb7314 | ||
|
|
414c356446 | ||
|
|
c38ded7a39 | ||
|
|
0cfed84cd0 | ||
|
|
9611bc9bcf | ||
|
|
6fe2a790fd | ||
|
|
5ea71837e7 | ||
|
|
bf5ebc72b6 | ||
|
|
a32c7bcbba | ||
|
|
98c6e79db6 | ||
|
|
c962a9cd5a | ||
|
|
5d20a6d694 | ||
|
|
cb4e8c543a | ||
|
|
2ba213de49 | ||
|
|
849aabb575 | ||
|
|
dd33df2033 | ||
|
|
94eb2820fd | ||
|
|
e49708770c | ||
|
|
5eafa01843 | ||
|
|
0c1477e087 | ||
|
|
ef73d98873 | ||
|
|
ec92c0ea6d | ||
|
|
ee14b89868 | ||
|
|
be68160cd9 | ||
|
|
dbc15b1bc3 | ||
|
|
dc6ce16e5a | ||
|
|
0c9d4ad6e2 | ||
|
|
13d57b77d4 | ||
|
|
f9e5bd5bf0 | ||
|
|
adf2b06efa | ||
|
|
80e3966e4e | ||
|
|
3662a057dc | ||
|
|
bdf336f9bf | ||
|
|
f6c310fa22 | ||
|
|
d658894b7f | ||
|
|
783744b497 | ||
|
|
de9ac41f76 | ||
|
|
060202e557 | ||
|
|
8d45ac14cc | ||
|
|
47ee2247c8 | ||
|
|
c16c8d54db | ||
|
|
3e669a0739 | ||
|
|
02e175c8a0 | ||
|
|
b4e98e7624 | ||
|
|
2392529092 | ||
|
|
f9f5772d92 | ||
|
|
4267d7340e | ||
|
|
dcf923551b | ||
|
|
69a5ac3a56 | ||
|
|
7b8c1ebdf1 | ||
|
|
580b623939 | ||
|
|
ac21fbb181 | ||
|
|
588ae65dfd | ||
|
|
30acba39a5 | ||
|
|
3a5bb3dc45 | ||
|
|
f42c0d956e | ||
|
|
0ed13fcf0f | ||
|
|
afc87910e6 | ||
|
|
b97eca9620 | ||
|
|
f4aa8661c4 | ||
|
|
8f0ae3a341 | ||
|
|
b5450bd221 | ||
|
|
88dbe4ea85 | ||
|
|
f0b5956b54 | ||
|
|
e5000246b3 | ||
|
|
9dacf1e530 | ||
|
|
f248ed2875 | ||
|
|
4e6295885b | ||
|
|
2357232cae | ||
|
|
39d8115dda | ||
|
|
bd71b04a9d | ||
|
|
c0b03cd832 | ||
|
|
9b7c740145 | ||
|
|
4f929fb8da | ||
|
|
24ee87d14e | ||
|
|
55862628fb | ||
|
|
fbd731f020 | ||
|
|
a8b750ad75 | ||
|
|
1054b6d2f5 | ||
|
|
669ce41c2e | ||
|
|
7c4683012f | ||
|
|
cfcdc1e342 | ||
|
|
07cf03a408 | ||
|
|
dd17d2cbec | ||
|
|
f3d14c6746 | ||
|
|
447332e558 | ||
|
|
9bbc19ae44 |
2
.github/pull_request_template.md
vendored
2
.github/pull_request_template.md
vendored
@@ -4,7 +4,7 @@
|
||||
|
||||
|
||||
## 🔗 Related PR / Issue
|
||||
Link: #
|
||||
Fixes: #
|
||||
|
||||
|
||||
## ✅ Prerequisites (**X** in brackets)
|
||||
|
||||
@@ -100,7 +100,7 @@ apt install -y nodejs
|
||||
```bash
|
||||
# Clone the repository
|
||||
git clone https://github.com/community-scripts/ProxmoxVE-Local.git /opt/PVESciptslocal
|
||||
cd PVESciptslocal
|
||||
cd /opt/PVESciptslocal
|
||||
|
||||
# Install dependencies and build
|
||||
npm install
|
||||
|
||||
1727
package-lock.json
generated
1727
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
76
package.json
76
package.json
@@ -25,35 +25,35 @@
|
||||
"typecheck": "tsc --noEmit"
|
||||
},
|
||||
"dependencies": {
|
||||
"@prisma/adapter-better-sqlite3": "^7.0.1",
|
||||
"@prisma/client": "^7.0.1",
|
||||
"@prisma/adapter-better-sqlite3": "^7.2.0",
|
||||
"@prisma/client": "^7.2.0",
|
||||
"@radix-ui/react-dropdown-menu": "^2.1.16",
|
||||
"@radix-ui/react-slot": "^1.2.4",
|
||||
"@t3-oss/env-nextjs": "^0.13.8",
|
||||
"@t3-oss/env-nextjs": "^0.13.10",
|
||||
"@tailwindcss/typography": "^0.5.19",
|
||||
"@tanstack/react-query": "^5.90.11",
|
||||
"@trpc/client": "^11.7.2",
|
||||
"@trpc/react-query": "^11.7.2",
|
||||
"@trpc/server": "^11.7.2",
|
||||
"@tanstack/react-query": "^5.90.18",
|
||||
"@trpc/client": "^11.8.1",
|
||||
"@trpc/react-query": "^11.8.1",
|
||||
"@trpc/server": "^11.8.1",
|
||||
"@types/react-syntax-highlighter": "^15.5.13",
|
||||
"@types/ws": "^8.18.1",
|
||||
"@xterm/addon-fit": "^0.10.0",
|
||||
"@xterm/addon-web-links": "^0.11.0",
|
||||
"@xterm/xterm": "^5.5.0",
|
||||
"@xterm/addon-fit": "^0.11.0",
|
||||
"@xterm/addon-web-links": "^0.12.0",
|
||||
"@xterm/xterm": "^6.0.0",
|
||||
"axios": "^1.13.2",
|
||||
"bcryptjs": "^3.0.3",
|
||||
"better-sqlite3": "^12.4.6",
|
||||
"better-sqlite3": "^12.6.0",
|
||||
"class-variance-authority": "^0.7.1",
|
||||
"clsx": "^2.1.1",
|
||||
"cron-validator": "^1.4.0",
|
||||
"dotenv": "^17.2.3",
|
||||
"jsonwebtoken": "^9.0.2",
|
||||
"lucide-react": "^0.555.0",
|
||||
"next": "^16.0.5",
|
||||
"jsonwebtoken": "^9.0.3",
|
||||
"lucide-react": "^0.562.0",
|
||||
"next": "^16.1.3",
|
||||
"node-cron": "^4.2.1",
|
||||
"node-pty": "^1.0.0",
|
||||
"react": "^19.2.0",
|
||||
"react-dom": "^19.2.0",
|
||||
"node-pty": "^1.1.0",
|
||||
"react": "^19.2.3",
|
||||
"react-dom": "^19.2.3",
|
||||
"react-markdown": "^10.1.0",
|
||||
"react-syntax-highlighter": "^16.1.0",
|
||||
"refractor": "^5.0.0",
|
||||
@@ -62,37 +62,37 @@
|
||||
"strip-ansi": "^7.1.2",
|
||||
"superjson": "^2.2.6",
|
||||
"tailwind-merge": "^3.4.0",
|
||||
"ws": "^8.18.3",
|
||||
"zod": "^4.1.13"
|
||||
"ws": "^8.19.0",
|
||||
"zod": "^4.3.5"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@tailwindcss/postcss": "^4.1.17",
|
||||
"@tailwindcss/postcss": "^4.1.18",
|
||||
"@testing-library/jest-dom": "^6.9.1",
|
||||
"@testing-library/react": "^16.3.0",
|
||||
"@testing-library/react": "^16.3.1",
|
||||
"@testing-library/user-event": "^14.6.1",
|
||||
"@types/bcryptjs": "^3.0.0",
|
||||
"@types/better-sqlite3": "^7.6.13",
|
||||
"@types/jsonwebtoken": "^9.0.10",
|
||||
"@types/node": "^24.10.1",
|
||||
"@types/node": "^24.10.9",
|
||||
"@types/node-cron": "^3.0.11",
|
||||
"@types/react": "^19.2.7",
|
||||
"@types/react": "^19.2.8",
|
||||
"@types/react-dom": "^19.2.3",
|
||||
"@vitejs/plugin-react": "^5.1.1",
|
||||
"@vitest/coverage-v8": "^4.0.14",
|
||||
"@vitest/ui": "^4.0.14",
|
||||
"baseline-browser-mapping": "^2.8.32",
|
||||
"eslint": "^9.39.1",
|
||||
"eslint-config-next": "^16.0.5",
|
||||
"jsdom": "^27.2.0",
|
||||
"@vitejs/plugin-react": "^5.1.2",
|
||||
"@vitest/coverage-v8": "^4.0.17",
|
||||
"@vitest/ui": "^4.0.17",
|
||||
"baseline-browser-mapping": "^2.9.15",
|
||||
"eslint": "^9.39.2",
|
||||
"eslint-config-next": "^16.1.3",
|
||||
"jsdom": "^27.4.0",
|
||||
"postcss": "^8.5.6",
|
||||
"prettier": "^3.7.1",
|
||||
"prettier-plugin-tailwindcss": "^0.7.1",
|
||||
"prisma": "^7.0.1",
|
||||
"tailwindcss": "^4.1.17",
|
||||
"tsx": "^4.19.4",
|
||||
"prettier": "^3.8.0",
|
||||
"prettier-plugin-tailwindcss": "^0.7.2",
|
||||
"prisma": "^7.2.0",
|
||||
"tailwindcss": "^4.1.18",
|
||||
"tsx": "^4.21.0",
|
||||
"typescript": "^5.9.3",
|
||||
"typescript-eslint": "^8.48.0",
|
||||
"vitest": "^4.0.14"
|
||||
"typescript-eslint": "^8.53.0",
|
||||
"vitest": "^4.0.17"
|
||||
},
|
||||
"ct3aMetadata": {
|
||||
"initVersion": "7.39.3"
|
||||
@@ -104,4 +104,4 @@
|
||||
"overrides": {
|
||||
"prismjs": "^1.30.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2021-2025 community-scripts ORG
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: tteck (tteckster)
|
||||
# Co-Author: MickLesk
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
@@ -6,33 +6,65 @@
|
||||
if ! command -v curl >/dev/null 2>&1; then
|
||||
apk update && apk add curl >/dev/null 2>&1
|
||||
fi
|
||||
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/core.func"
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/error-handler.func"
|
||||
load_functions
|
||||
catch_errors
|
||||
|
||||
# This function enables IPv6 if it's not disabled and sets verbose mode
|
||||
verb_ip6() {
|
||||
set_std_mode # Set STD mode based on VERBOSE
|
||||
|
||||
if [ "$DISABLEIPV6" == "yes" ]; then
|
||||
if [ "${IPV6_METHOD:-}" = "disable" ]; then
|
||||
msg_info "Disabling IPv6 (this may affect some services)"
|
||||
$STD sysctl -w net.ipv6.conf.all.disable_ipv6=1
|
||||
echo "net.ipv6.conf.all.disable_ipv6 = 1" >>/etc/sysctl.conf
|
||||
$STD sysctl -w net.ipv6.conf.default.disable_ipv6=1
|
||||
$STD sysctl -w net.ipv6.conf.lo.disable_ipv6=1
|
||||
mkdir -p /etc/sysctl.d
|
||||
$STD tee /etc/sysctl.d/99-disable-ipv6.conf >/dev/null <<EOF
|
||||
net.ipv6.conf.all.disable_ipv6 = 1
|
||||
net.ipv6.conf.default.disable_ipv6 = 1
|
||||
net.ipv6.conf.lo.disable_ipv6 = 1
|
||||
EOF
|
||||
$STD rc-update add sysctl default
|
||||
msg_ok "Disabled IPv6"
|
||||
fi
|
||||
}
|
||||
|
||||
# This function catches errors and handles them with the error handler function
|
||||
catch_errors() {
|
||||
set -Eeuo pipefail
|
||||
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
|
||||
set -Eeuo pipefail
|
||||
trap 'error_handler $? $LINENO "$BASH_COMMAND"' ERR
|
||||
trap on_exit EXIT
|
||||
trap on_interrupt INT
|
||||
trap on_terminate TERM
|
||||
|
||||
error_handler() {
|
||||
local exit_code="$1"
|
||||
local line_number="$2"
|
||||
local command="$3"
|
||||
|
||||
if [[ "$exit_code" -eq 0 ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
printf "\e[?25h"
|
||||
echo -e "\n${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}\n"
|
||||
exit "$exit_code"
|
||||
}
|
||||
|
||||
# This function handles errors
|
||||
error_handler() {
|
||||
on_exit() {
|
||||
local exit_code="$?"
|
||||
local line_number="$1"
|
||||
local command="$2"
|
||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||
echo -e "\n$error_message\n"
|
||||
[[ -n "${lockfile:-}" && -e "$lockfile" ]] && rm -f "$lockfile"
|
||||
exit "$exit_code"
|
||||
}
|
||||
|
||||
on_interrupt() {
|
||||
echo -e "\n${RD}Interrupted by user (SIGINT)${CL}"
|
||||
exit 130
|
||||
}
|
||||
|
||||
on_terminate() {
|
||||
echo -e "\n${RD}Terminated by signal (SIGTERM)${CL}"
|
||||
exit 143
|
||||
}
|
||||
|
||||
# This function sets up the Container OS by generating the locale, setting the timezone, and checking the network connection
|
||||
@@ -61,10 +93,10 @@ network_check() {
|
||||
set +e
|
||||
trap - ERR
|
||||
if ping -c 1 -W 1 1.1.1.1 &>/dev/null || ping -c 1 -W 1 8.8.8.8 &>/dev/null || ping -c 1 -W 1 9.9.9.9 &>/dev/null; then
|
||||
msg_ok "Internet Connected"
|
||||
ipv4_status="${GN}✔${CL} IPv4"
|
||||
else
|
||||
msg_error "Internet NOT Connected"
|
||||
read -r -p "Would you like to continue anyway? <y/N> " prompt
|
||||
ipv4_status="${RD}✖${CL} IPv4"
|
||||
read -r -p "Internet NOT connected. Continue anyway? <y/N> " prompt
|
||||
if [[ "${prompt,,}" =~ ^(y|yes)$ ]]; then
|
||||
echo -e "${INFO}${RD}Expect Issues Without Internet${CL}"
|
||||
else
|
||||
@@ -73,7 +105,11 @@ network_check() {
|
||||
fi
|
||||
fi
|
||||
RESOLVEDIP=$(getent hosts github.com | awk '{ print $1 }')
|
||||
if [[ -z "$RESOLVEDIP" ]]; then msg_error "DNS Lookup Failure"; else msg_ok "DNS Resolved github.com to ${BL}$RESOLVEDIP${CL}"; fi
|
||||
if [[ -z "$RESOLVEDIP" ]]; then
|
||||
msg_error "Internet: ${ipv4_status} DNS Failed"
|
||||
else
|
||||
msg_ok "Internet: ${ipv4_status} DNS: ${BL}${RESOLVEDIP}${CL}"
|
||||
fi
|
||||
set -e
|
||||
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
|
||||
}
|
||||
@@ -82,7 +118,7 @@ network_check() {
|
||||
update_os() {
|
||||
msg_info "Updating Container OS"
|
||||
$STD apk -U upgrade
|
||||
#source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/tools.func)
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/tools.func"
|
||||
msg_ok "Updated Container OS"
|
||||
}
|
||||
|
||||
@@ -154,10 +190,4 @@ EOF
|
||||
echo "bash -c \"\$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/${app}.sh)\"" >/usr/bin/update
|
||||
chmod +x /usr/bin/update
|
||||
|
||||
if [[ -n "${SSH_AUTHORIZED_KEY}" ]]; then
|
||||
mkdir -p /root/.ssh
|
||||
echo "${SSH_AUTHORIZED_KEY}" >/root/.ssh/authorized_keys
|
||||
chmod 700 /root/.ssh
|
||||
chmod 600 /root/.ssh/authorized_keys
|
||||
fi
|
||||
}
|
||||
507
scripts/core/alpine-tools.func
Normal file
507
scripts/core/alpine-tools.func
Normal file
@@ -0,0 +1,507 @@
|
||||
#!/bin/ash
|
||||
# shellcheck shell=ash
|
||||
|
||||
# Expects existing msg_* functions and optional $STD from the framework.
|
||||
|
||||
# ------------------------------
|
||||
# helpers
|
||||
# ------------------------------
|
||||
lower() { printf '%s' "$1" | tr '[:upper:]' '[:lower:]'; }
|
||||
has() { command -v "$1" >/dev/null 2>&1; }
|
||||
|
||||
need_tool() {
|
||||
# usage: need_tool curl jq unzip ...
|
||||
# setup missing tools via apk
|
||||
local missing=0 t
|
||||
for t in "$@"; do
|
||||
if ! has "$t"; then missing=1; fi
|
||||
done
|
||||
if [ "$missing" -eq 1 ]; then
|
||||
msg_info "Installing tools: $*"
|
||||
apk add --no-cache "$@" >/dev/null 2>&1 || {
|
||||
msg_error "apk add failed for: $*"
|
||||
return 1
|
||||
}
|
||||
msg_ok "Tools ready: $*"
|
||||
fi
|
||||
}
|
||||
|
||||
net_resolves() {
|
||||
# better handling for missing getent on Alpine
|
||||
# usage: net_resolves api.github.com
|
||||
local host="$1"
|
||||
ping -c1 -W1 "$host" >/dev/null 2>&1 || nslookup "$host" >/dev/null 2>&1
|
||||
}
|
||||
|
||||
ensure_usr_local_bin_persist() {
|
||||
local PROFILE_FILE="/etc/profile.d/10-localbin.sh"
|
||||
if [ ! -f "$PROFILE_FILE" ]; then
|
||||
echo 'case ":$PATH:" in *:/usr/local/bin:*) ;; *) export PATH="/usr/local/bin:$PATH";; esac' >"$PROFILE_FILE"
|
||||
chmod +x "$PROFILE_FILE"
|
||||
fi
|
||||
}
|
||||
|
||||
download_with_progress() {
|
||||
# $1 url, $2 dest
|
||||
local url="$1" out="$2" cl
|
||||
need_tool curl pv || return 1
|
||||
cl=$(curl -fsSLI "$url" 2>/dev/null | awk 'tolower($0) ~ /^content-length:/ {print $2}' | tr -d '\r')
|
||||
if [ -n "$cl" ]; then
|
||||
curl -fsSL "$url" | pv -s "$cl" >"$out" || {
|
||||
msg_error "Download failed: $url"
|
||||
return 1
|
||||
}
|
||||
else
|
||||
curl -fL# -o "$out" "$url" || {
|
||||
msg_error "Download failed: $url"
|
||||
return 1
|
||||
}
|
||||
fi
|
||||
}
|
||||
|
||||
# ------------------------------
|
||||
# GitHub: check Release
|
||||
# ------------------------------
|
||||
check_for_gh_release() {
|
||||
# app, repo, [pinned]
|
||||
local app="$1" source="$2" pinned="${3:-}"
|
||||
local app_lc
|
||||
app_lc="$(lower "$app" | tr -d ' ')"
|
||||
local current_file="$HOME/.${app_lc}"
|
||||
local current="" release tag
|
||||
|
||||
msg_info "Check for update: $app"
|
||||
|
||||
net_resolves api.github.com || {
|
||||
msg_error "DNS/network error: api.github.com"
|
||||
return 1
|
||||
}
|
||||
need_tool curl jq || return 1
|
||||
|
||||
tag=$(curl -fsSL "https://api.github.com/repos/${source}/releases/latest" | jq -r '.tag_name // empty')
|
||||
[ -z "$tag" ] && {
|
||||
msg_error "Unable to fetch latest tag for $app"
|
||||
return 1
|
||||
}
|
||||
release="${tag#v}"
|
||||
|
||||
[ -f "$current_file" ] && current="$(cat "$current_file")"
|
||||
|
||||
if [ -n "$pinned" ]; then
|
||||
if [ "$pinned" = "$release" ]; then
|
||||
msg_ok "$app pinned to v$pinned (no update)"
|
||||
return 1
|
||||
fi
|
||||
if [ "$current" = "$pinned" ]; then
|
||||
msg_ok "$app pinned v$pinned installed (upstream v$release)"
|
||||
return 1
|
||||
fi
|
||||
msg_info "$app pinned v$pinned (upstream v$release) → update/downgrade"
|
||||
CHECK_UPDATE_RELEASE="$pinned"
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [ "$release" != "$current" ] || [ ! -f "$current_file" ]; then
|
||||
CHECK_UPDATE_RELEASE="$release"
|
||||
msg_info "New release available: v$release (current: v${current:-none})"
|
||||
return 0
|
||||
fi
|
||||
|
||||
msg_ok "$app is up to date (v$release)"
|
||||
return 1
|
||||
}
|
||||
|
||||
# ------------------------------
|
||||
# GitHub: get Release & deploy (Alpine)
|
||||
# modes: tarball | prebuild | singlefile
|
||||
# ------------------------------
|
||||
fetch_and_deploy_gh() {
|
||||
# $1 app, $2 repo, [$3 mode], [$4 version], [$5 target], [$6 asset_pattern
|
||||
local app="$1" repo="$2" mode="${3:-tarball}" version="${4:-latest}" target="${5:-/opt/$1}" pattern="${6:-}"
|
||||
local app_lc
|
||||
app_lc="$(lower "$app" | tr -d ' ')"
|
||||
local vfile="$HOME/.${app_lc}"
|
||||
local json url filename tmpd unpack
|
||||
|
||||
net_resolves api.github.com || {
|
||||
msg_error "DNS/network error"
|
||||
return 1
|
||||
}
|
||||
need_tool curl jq tar || return 1
|
||||
[ "$mode" = "prebuild" ] || [ "$mode" = "singlefile" ] && need_tool unzip >/dev/null 2>&1 || true
|
||||
|
||||
tmpd="$(mktemp -d)" || return 1
|
||||
mkdir -p "$target"
|
||||
|
||||
# Release JSON
|
||||
if [ "$version" = "latest" ]; then
|
||||
json="$(curl -fsSL "https://api.github.com/repos/$repo/releases/latest")" || {
|
||||
msg_error "GitHub API failed"
|
||||
rm -rf "$tmpd"
|
||||
return 1
|
||||
}
|
||||
else
|
||||
json="$(curl -fsSL "https://api.github.com/repos/$repo/releases/tags/$version")" || {
|
||||
msg_error "GitHub API failed"
|
||||
rm -rf "$tmpd"
|
||||
return 1
|
||||
}
|
||||
fi
|
||||
|
||||
# correct Version
|
||||
version="$(printf '%s' "$json" | jq -r '.tag_name // empty')"
|
||||
version="${version#v}"
|
||||
|
||||
[ -z "$version" ] && {
|
||||
msg_error "No tag in release json"
|
||||
rm -rf "$tmpd"
|
||||
return 1
|
||||
}
|
||||
|
||||
case "$mode" in
|
||||
tarball | source)
|
||||
url="$(printf '%s' "$json" | jq -r '.tarball_url // empty')"
|
||||
[ -z "$url" ] && url="https://github.com/$repo/archive/refs/tags/v$version.tar.gz"
|
||||
filename="${app_lc}-${version}.tar.gz"
|
||||
download_with_progress "$url" "$tmpd/$filename" || {
|
||||
rm -rf "$tmpd"
|
||||
return 1
|
||||
}
|
||||
tar -xzf "$tmpd/$filename" -C "$tmpd" || {
|
||||
msg_error "tar extract failed"
|
||||
rm -rf "$tmpd"
|
||||
return 1
|
||||
}
|
||||
unpack="$(find "$tmpd" -mindepth 1 -maxdepth 1 -type d | head -n1)"
|
||||
# copy content of unpack to target
|
||||
(cd "$unpack" && tar -cf - .) | (cd "$target" && tar -xf -) || {
|
||||
msg_error "copy failed"
|
||||
rm -rf "$tmpd"
|
||||
return 1
|
||||
}
|
||||
;;
|
||||
prebuild)
|
||||
[ -n "$pattern" ] || {
|
||||
msg_error "prebuild requires asset pattern"
|
||||
rm -rf "$tmpd"
|
||||
return 1
|
||||
}
|
||||
url="$(printf '%s' "$json" | jq -r '.assets[].browser_download_url' | awk -v p="$pattern" '
|
||||
BEGIN{IGNORECASE=1}
|
||||
$0 ~ p {print; exit}
|
||||
')"
|
||||
[ -z "$url" ] && {
|
||||
msg_error "asset not found for pattern: $pattern"
|
||||
rm -rf "$tmpd"
|
||||
return 1
|
||||
}
|
||||
filename="${url##*/}"
|
||||
download_with_progress "$url" "$tmpd/$filename" || {
|
||||
rm -rf "$tmpd"
|
||||
return 1
|
||||
}
|
||||
# unpack archive (Zip or tarball)
|
||||
case "$filename" in
|
||||
*.zip)
|
||||
need_tool unzip || {
|
||||
rm -rf "$tmpd"
|
||||
return 1
|
||||
}
|
||||
mkdir -p "$tmpd/unp"
|
||||
unzip -q "$tmpd/$filename" -d "$tmpd/unp"
|
||||
;;
|
||||
*.tar.gz | *.tgz | *.tar.xz | *.tar.zst | *.tar.bz2)
|
||||
mkdir -p "$tmpd/unp"
|
||||
tar -xf "$tmpd/$filename" -C "$tmpd/unp"
|
||||
;;
|
||||
*)
|
||||
msg_error "unsupported archive: $filename"
|
||||
rm -rf "$tmpd"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
# top-level folder strippen
|
||||
if [ "$(find "$tmpd/unp" -mindepth 1 -maxdepth 1 -type d | wc -l)" -eq 1 ] && [ -z "$(find "$tmpd/unp" -mindepth 1 -maxdepth 1 -type f | head -n1)" ]; then
|
||||
unpack="$(find "$tmpd/unp" -mindepth 1 -maxdepth 1 -type d)"
|
||||
(cd "$unpack" && tar -cf - .) | (cd "$target" && tar -xf -) || {
|
||||
msg_error "copy failed"
|
||||
rm -rf "$tmpd"
|
||||
return 1
|
||||
}
|
||||
else
|
||||
(cd "$tmpd/unp" && tar -cf - .) | (cd "$target" && tar -xf -) || {
|
||||
msg_error "copy failed"
|
||||
rm -rf "$tmpd"
|
||||
return 1
|
||||
}
|
||||
fi
|
||||
;;
|
||||
singlefile)
|
||||
[ -n "$pattern" ] || {
|
||||
msg_error "singlefile requires asset pattern"
|
||||
rm -rf "$tmpd"
|
||||
return 1
|
||||
}
|
||||
url="$(printf '%s' "$json" | jq -r '.assets[].browser_download_url' | awk -v p="$pattern" '
|
||||
BEGIN{IGNORECASE=1}
|
||||
$0 ~ p {print; exit}
|
||||
')"
|
||||
[ -z "$url" ] && {
|
||||
msg_error "asset not found for pattern: $pattern"
|
||||
rm -rf "$tmpd"
|
||||
return 1
|
||||
}
|
||||
filename="${url##*/}"
|
||||
download_with_progress "$url" "$target/$app" || {
|
||||
rm -rf "$tmpd"
|
||||
return 1
|
||||
}
|
||||
chmod +x "$target/$app"
|
||||
;;
|
||||
*)
|
||||
msg_error "Unknown mode: $mode"
|
||||
rm -rf "$tmpd"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "$version" >"$vfile"
|
||||
ensure_usr_local_bin_persist
|
||||
rm -rf "$tmpd"
|
||||
msg_ok "Deployed $app ($version) → $target"
|
||||
}
|
||||
|
||||
# ------------------------------
|
||||
# yq (mikefarah) – Alpine
|
||||
# ------------------------------
|
||||
setup_yq() {
|
||||
# prefer apk, unless FORCE_GH=1
|
||||
if [ "${FORCE_GH:-0}" != "1" ] && apk info -e yq >/dev/null 2>&1; then
|
||||
msg_info "Updating yq via apk"
|
||||
apk add --no-cache --upgrade yq >/dev/null 2>&1 || true
|
||||
msg_ok "yq ready ($(yq --version 2>/dev/null))"
|
||||
return 0
|
||||
fi
|
||||
|
||||
need_tool curl || return 1
|
||||
local arch bin url tmp
|
||||
case "$(uname -m)" in
|
||||
x86_64) arch="amd64" ;;
|
||||
aarch64) arch="arm64" ;;
|
||||
*)
|
||||
msg_error "Unsupported arch for yq: $(uname -m)"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
url="https://github.com/mikefarah/yq/releases/latest/download/yq_linux_${arch}"
|
||||
tmp="$(mktemp)"
|
||||
download_with_progress "$url" "$tmp" || return 1
|
||||
install -m 0755 "$tmp" /usr/local/bin/yq
|
||||
rm -f "$tmp"
|
||||
msg_ok "Setup yq ($(yq --version 2>/dev/null))"
|
||||
}
|
||||
|
||||
# ------------------------------
|
||||
# Adminer – Alpine
|
||||
# ------------------------------
|
||||
setup_adminer() {
|
||||
need_tool curl || return 1
|
||||
msg_info "Setup Adminer (Alpine)"
|
||||
mkdir -p /var/www/localhost/htdocs/adminer
|
||||
curl -fsSL https://github.com/vrana/adminer/releases/latest/download/adminer.php \
|
||||
-o /var/www/localhost/htdocs/adminer/index.php || {
|
||||
msg_error "Adminer download failed"
|
||||
return 1
|
||||
}
|
||||
msg_ok "Adminer at /adminer (served by your webserver)"
|
||||
}
|
||||
|
||||
# ------------------------------
|
||||
# uv – Alpine (musl tarball)
|
||||
# optional: PYTHON_VERSION="3.12"
|
||||
# ------------------------------
|
||||
setup_uv() {
|
||||
need_tool curl tar || return 1
|
||||
local UV_BIN="/usr/local/bin/uv"
|
||||
local arch tarball url tmpd ver installed
|
||||
|
||||
case "$(uname -m)" in
|
||||
x86_64) arch="x86_64-unknown-linux-musl" ;;
|
||||
aarch64) arch="aarch64-unknown-linux-musl" ;;
|
||||
*)
|
||||
msg_error "Unsupported arch for uv: $(uname -m)"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
|
||||
ver="$(curl -fsSL https://api.github.com/repos/astral-sh/uv/releases/latest | jq -r '.tag_name' 2>/dev/null)"
|
||||
ver="${ver#v}"
|
||||
[ -z "$ver" ] && {
|
||||
msg_error "uv: cannot determine latest version"
|
||||
return 1
|
||||
}
|
||||
|
||||
if has "$UV_BIN"; then
|
||||
installed="$($UV_BIN -V 2>/dev/null | awk '{print $2}')"
|
||||
[ "$installed" = "$ver" ] && {
|
||||
msg_ok "uv $ver already installed"
|
||||
return 0
|
||||
}
|
||||
msg_info "Updating uv $installed → $ver"
|
||||
else
|
||||
msg_info "Setup uv $ver"
|
||||
fi
|
||||
|
||||
tmpd="$(mktemp -d)" || return 1
|
||||
tarball="uv-${arch}.tar.gz"
|
||||
url="https://github.com/astral-sh/uv/releases/download/v${ver}/${tarball}"
|
||||
|
||||
download_with_progress "$url" "$tmpd/uv.tar.gz" || {
|
||||
rm -rf "$tmpd"
|
||||
return 1
|
||||
}
|
||||
tar -xzf "$tmpd/uv.tar.gz" -C "$tmpd" || {
|
||||
msg_error "uv: extract failed"
|
||||
rm -rf "$tmpd"
|
||||
return 1
|
||||
}
|
||||
|
||||
# tar contains ./uv
|
||||
if [ -x "$tmpd/uv" ]; then
|
||||
install -m 0755 "$tmpd/uv" "$UV_BIN"
|
||||
else
|
||||
# fallback: in subfolder
|
||||
install -m 0755 "$tmpd"/*/uv "$UV_BIN" 2>/dev/null || {
|
||||
msg_error "uv binary not found in tar"
|
||||
rm -rf "$tmpd"
|
||||
return 1
|
||||
}
|
||||
fi
|
||||
rm -rf "$tmpd"
|
||||
ensure_usr_local_bin_persist
|
||||
msg_ok "Setup uv $ver"
|
||||
|
||||
if [ -n "${PYTHON_VERSION:-}" ]; then
|
||||
local match
|
||||
match="$(uv python list --only-downloads 2>/dev/null | awk -v maj="$PYTHON_VERSION" '
|
||||
$0 ~ "^cpython-"maj"\\." { print $0 }' | awk -F- '{print $2}' | sort -V | tail -n1)"
|
||||
[ -z "$match" ] && {
|
||||
msg_error "No matching Python for $PYTHON_VERSION"
|
||||
return 1
|
||||
}
|
||||
if ! uv python list | grep -q "cpython-${match}-linux"; then
|
||||
msg_info "Installing Python $match via uv"
|
||||
uv python install "$match" || {
|
||||
msg_error "uv python install failed"
|
||||
return 1
|
||||
}
|
||||
msg_ok "Python $match installed (uv)"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# ------------------------------
|
||||
# Java – Alpine (OpenJDK)
|
||||
# JAVA_VERSION: 17|21 (Default 21)
|
||||
# ------------------------------
|
||||
setup_java() {
|
||||
local JAVA_VERSION="${JAVA_VERSION:-21}" pkg
|
||||
case "$JAVA_VERSION" in
|
||||
17) pkg="openjdk17-jdk" ;;
|
||||
21 | *) pkg="openjdk21-jdk" ;;
|
||||
esac
|
||||
msg_info "Setup Java (OpenJDK $JAVA_VERSION)"
|
||||
apk add --no-cache "$pkg" >/dev/null 2>&1 || {
|
||||
msg_error "apk add $pkg failed"
|
||||
return 1
|
||||
}
|
||||
# set JAVA_HOME
|
||||
local prof="/etc/profile.d/20-java.sh"
|
||||
if [ ! -f "$prof" ]; then
|
||||
echo 'export JAVA_HOME=$(dirname $(dirname $(readlink -f $(command -v java))))' >"$prof"
|
||||
echo 'case ":$PATH:" in *:$JAVA_HOME/bin:*) ;; *) export PATH="$JAVA_HOME/bin:$PATH";; esac' >>"$prof"
|
||||
chmod +x "$prof"
|
||||
fi
|
||||
msg_ok "Java ready: $(java -version 2>&1 | head -n1)"
|
||||
}
|
||||
|
||||
# ------------------------------
|
||||
# Go – Alpine (apk prefers, else tarball)
|
||||
# ------------------------------
|
||||
setup_go() {
|
||||
if [ -z "${GO_VERSION:-}" ]; then
|
||||
msg_info "Setup Go (apk)"
|
||||
apk add --no-cache go >/dev/null 2>&1 || {
|
||||
msg_error "apk add go failed"
|
||||
return 1
|
||||
}
|
||||
msg_ok "Go ready: $(go version 2>/dev/null)"
|
||||
return 0
|
||||
fi
|
||||
|
||||
need_tool curl tar || return 1
|
||||
local ARCH TARBALL URL TMP
|
||||
case "$(uname -m)" in
|
||||
x86_64) ARCH="amd64" ;;
|
||||
aarch64) ARCH="arm64" ;;
|
||||
*)
|
||||
msg_error "Unsupported arch for Go: $(uname -m)"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
TARBALL="go${GO_VERSION}.linux-${ARCH}.tar.gz"
|
||||
URL="https://go.dev/dl/${TARBALL}"
|
||||
msg_info "Setup Go $GO_VERSION (tarball)"
|
||||
TMP="$(mktemp)"
|
||||
download_with_progress "$URL" "$TMP" || return 1
|
||||
rm -rf /usr/local/go
|
||||
tar -C /usr/local -xzf "$TMP" || {
|
||||
msg_error "extract go failed"
|
||||
rm -f "$TMP"
|
||||
return 1
|
||||
}
|
||||
rm -f "$TMP"
|
||||
ln -sf /usr/local/go/bin/go /usr/local/bin/go
|
||||
ln -sf /usr/local/go/bin/gofmt /usr/local/bin/gofmt
|
||||
ensure_usr_local_bin_persist
|
||||
msg_ok "Go ready: $(go version 2>/dev/null)"
|
||||
}
|
||||
|
||||
# ------------------------------
|
||||
# Composer – Alpine
|
||||
# uses php83-cli + openssl + phar
|
||||
# ------------------------------
|
||||
setup_composer() {
|
||||
local COMPOSER_BIN="/usr/local/bin/composer"
|
||||
if ! has php; then
|
||||
# prefers php83
|
||||
msg_info "Installing PHP CLI for Composer"
|
||||
apk add --no-cache php83-cli php83-openssl php83-phar php83-iconv >/dev/null 2>&1 || {
|
||||
# Fallback to generic php if 83 not available
|
||||
apk add --no-cache php-cli php-openssl php-phar php-iconv >/dev/null 2>&1 || {
|
||||
msg_error "Failed to install php-cli for composer"
|
||||
return 1
|
||||
}
|
||||
}
|
||||
msg_ok "PHP CLI ready: $(php -v | head -n1)"
|
||||
fi
|
||||
|
||||
if [ -x "$COMPOSER_BIN" ]; then
|
||||
msg_info "Updating Composer"
|
||||
else
|
||||
msg_info "Setup Composer"
|
||||
fi
|
||||
|
||||
need_tool curl || return 1
|
||||
curl -fsSL https://getcomposer.org/installer -o /tmp/composer-setup.php || {
|
||||
msg_error "composer installer download failed"
|
||||
return 1
|
||||
}
|
||||
php /tmp/composer-setup.php --install-dir=/usr/local/bin --filename=composer >/dev/null 2>&1 || {
|
||||
msg_error "composer install failed"
|
||||
return 1
|
||||
}
|
||||
rm -f /tmp/composer-setup.php
|
||||
ensure_usr_local_bin_persist
|
||||
msg_ok "Composer ready: $(composer --version 2>/dev/null)"
|
||||
}
|
||||
@@ -1,7 +1,154 @@
|
||||
# Copyright (c) 2021-2025 community-scripts ORG
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: michelroegl-brunner
|
||||
# License: MIT | https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/LICENSE
|
||||
|
||||
# ==============================================================================
|
||||
# API.FUNC - TELEMETRY & DIAGNOSTICS API
|
||||
# ==============================================================================
|
||||
#
|
||||
# Provides functions for sending anonymous telemetry data to Community-Scripts
|
||||
# API for analytics and diagnostics purposes.
|
||||
#
|
||||
# Features:
|
||||
# - Container/VM creation statistics
|
||||
# - Installation success/failure tracking
|
||||
# - Error code mapping and reporting
|
||||
# - Privacy-respecting anonymous telemetry
|
||||
#
|
||||
# Usage:
|
||||
# source <(curl -fsSL .../api.func)
|
||||
# post_to_api # Report container creation
|
||||
# post_update_to_api # Report installation status
|
||||
#
|
||||
# Privacy:
|
||||
# - Only anonymous statistics (no personal data)
|
||||
# - User can opt-out via diagnostics settings
|
||||
# - Random UUID for session tracking only
|
||||
#
|
||||
# ==============================================================================
|
||||
|
||||
# ==============================================================================
|
||||
# SECTION 1: ERROR CODE DESCRIPTIONS
|
||||
# ==============================================================================
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# explain_exit_code()
|
||||
#
|
||||
# - Maps numeric exit codes to human-readable error descriptions
|
||||
# - Supports:
|
||||
# * Generic/Shell errors (1, 2, 126, 127, 128, 130, 137, 139, 143)
|
||||
# * Package manager errors (APT, DPKG: 100, 101, 255)
|
||||
# * Node.js/npm errors (243-249, 254)
|
||||
# * Python/pip/uv errors (210-212)
|
||||
# * PostgreSQL errors (231-234)
|
||||
# * MySQL/MariaDB errors (241-244)
|
||||
# * MongoDB errors (251-254)
|
||||
# * Proxmox custom codes (200-231)
|
||||
# - Returns description string for given exit code
|
||||
# - Shared function with error_handler.func for consistency
|
||||
# ------------------------------------------------------------------------------
|
||||
explain_exit_code() {
|
||||
local code="$1"
|
||||
case "$code" in
|
||||
# --- Generic / Shell ---
|
||||
1) echo "General error / Operation not permitted" ;;
|
||||
2) echo "Misuse of shell builtins (e.g. syntax error)" ;;
|
||||
126) echo "Command invoked cannot execute (permission problem?)" ;;
|
||||
127) echo "Command not found" ;;
|
||||
128) echo "Invalid argument to exit" ;;
|
||||
130) echo "Terminated by Ctrl+C (SIGINT)" ;;
|
||||
137) echo "Killed (SIGKILL / Out of memory?)" ;;
|
||||
139) echo "Segmentation fault (core dumped)" ;;
|
||||
143) echo "Terminated (SIGTERM)" ;;
|
||||
|
||||
# --- Package manager / APT / DPKG ---
|
||||
100) echo "APT: Package manager error (broken packages / dependency problems)" ;;
|
||||
101) echo "APT: Configuration error (bad sources.list, malformed config)" ;;
|
||||
255) echo "DPKG: Fatal internal error" ;;
|
||||
|
||||
# --- Node.js / npm / pnpm / yarn ---
|
||||
243) echo "Node.js: Out of memory (JavaScript heap out of memory)" ;;
|
||||
245) echo "Node.js: Invalid command-line option" ;;
|
||||
246) echo "Node.js: Internal JavaScript Parse Error" ;;
|
||||
247) echo "Node.js: Fatal internal error" ;;
|
||||
248) echo "Node.js: Invalid C++ addon / N-API failure" ;;
|
||||
249) echo "Node.js: Inspector error" ;;
|
||||
254) echo "npm/pnpm/yarn: Unknown fatal error" ;;
|
||||
|
||||
# --- Python / pip / uv ---
|
||||
210) echo "Python: Virtualenv / uv environment missing or broken" ;;
|
||||
211) echo "Python: Dependency resolution failed" ;;
|
||||
212) echo "Python: Installation aborted (permissions or EXTERNALLY-MANAGED)" ;;
|
||||
|
||||
# --- PostgreSQL ---
|
||||
231) echo "PostgreSQL: Connection failed (server not running / wrong socket)" ;;
|
||||
232) echo "PostgreSQL: Authentication failed (bad user/password)" ;;
|
||||
233) echo "PostgreSQL: Database does not exist" ;;
|
||||
234) echo "PostgreSQL: Fatal error in query / syntax" ;;
|
||||
|
||||
# --- MySQL / MariaDB ---
|
||||
241) echo "MySQL/MariaDB: Connection failed (server not running / wrong socket)" ;;
|
||||
242) echo "MySQL/MariaDB: Authentication failed (bad user/password)" ;;
|
||||
243) echo "MySQL/MariaDB: Database does not exist" ;;
|
||||
244) echo "MySQL/MariaDB: Fatal error in query / syntax" ;;
|
||||
|
||||
# --- MongoDB ---
|
||||
251) echo "MongoDB: Connection failed (server not running)" ;;
|
||||
252) echo "MongoDB: Authentication failed (bad user/password)" ;;
|
||||
253) echo "MongoDB: Database not found" ;;
|
||||
254) echo "MongoDB: Fatal query error" ;;
|
||||
|
||||
# --- Proxmox Custom Codes ---
|
||||
200) echo "Custom: Failed to create lock file" ;;
|
||||
203) echo "Custom: Missing CTID variable" ;;
|
||||
204) echo "Custom: Missing PCT_OSTYPE variable" ;;
|
||||
205) echo "Custom: Invalid CTID (<100)" ;;
|
||||
206) echo "Custom: CTID already in use (check 'pct list' and /etc/pve/lxc/)" ;;
|
||||
207) echo "Custom: Password contains unescaped special characters (-, /, \\, *, etc.)" ;;
|
||||
208) echo "Custom: Invalid configuration (DNS/MAC/Network format error)" ;;
|
||||
209) echo "Custom: Container creation failed (check logs for pct create output)" ;;
|
||||
210) echo "Custom: Cluster not quorate" ;;
|
||||
211) echo "Custom: Timeout waiting for template lock (concurrent download in progress)" ;;
|
||||
214) echo "Custom: Not enough storage space" ;;
|
||||
215) echo "Custom: Container created but not listed (ghost state - check /etc/pve/lxc/)" ;;
|
||||
216) echo "Custom: RootFS entry missing in config (incomplete creation)" ;;
|
||||
217) echo "Custom: Storage does not support rootdir (check storage capabilities)" ;;
|
||||
218) echo "Custom: Template file corrupted or incomplete download (size <1MB or invalid archive)" ;;
|
||||
220) echo "Custom: Unable to resolve template path" ;;
|
||||
221) echo "Custom: Template file exists but not readable (check file permissions)" ;;
|
||||
222) echo "Custom: Template download failed after 3 attempts (network/storage issue)" ;;
|
||||
223) echo "Custom: Template not available after download (storage sync issue)" ;;
|
||||
225) echo "Custom: No template available for OS/Version (check 'pveam available')" ;;
|
||||
231) echo "Custom: LXC stack upgrade/retry failed (outdated pve-container - check https://github.com/community-scripts/ProxmoxVE/discussions/8126)" ;;
|
||||
|
||||
# --- Default ---
|
||||
*) echo "Unknown error" ;;
|
||||
esac
|
||||
}
|
||||
|
||||
# ==============================================================================
|
||||
# SECTION 2: TELEMETRY FUNCTIONS
|
||||
# ==============================================================================
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# post_to_api()
|
||||
#
|
||||
# - Sends LXC container creation statistics to Community-Scripts API
|
||||
# - Only executes if:
|
||||
# * curl is available
|
||||
# * DIAGNOSTICS=yes
|
||||
# * RANDOM_UUID is set
|
||||
# - Payload includes:
|
||||
# * Container type, disk size, CPU cores, RAM
|
||||
# * OS type and version
|
||||
# * IPv6 disable status
|
||||
# * Application name (NSAPP)
|
||||
# * Installation method
|
||||
# * PVE version
|
||||
# * Status: "installing"
|
||||
# * Random UUID for session tracking
|
||||
# - Anonymous telemetry (no personal data)
|
||||
# ------------------------------------------------------------------------------
|
||||
post_to_api() {
|
||||
|
||||
if ! command -v curl &>/dev/null; then
|
||||
@@ -30,7 +177,6 @@ post_to_api() {
|
||||
"ram_size": $RAM_SIZE,
|
||||
"os_type": "$var_os",
|
||||
"os_version": "$var_version",
|
||||
"disableip6": "",
|
||||
"nsapp": "$NSAPP",
|
||||
"method": "$METHOD(PVE-Local)",
|
||||
"pve_version": "$pve_version",
|
||||
@@ -39,14 +185,26 @@ post_to_api() {
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
if [[ "$DIAGNOSTICS" == "yes" ]]; then
|
||||
RESPONSE=$(curl -s -w "%{http_code}" -L -X POST "$API_URL" --post301 --post302 \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "$JSON_PAYLOAD") || true
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# post_to_api_vm()
|
||||
#
|
||||
# - Sends VM creation statistics to Community-Scripts API
|
||||
# - Similar to post_to_api() but for virtual machines (not containers)
|
||||
# - Reads DIAGNOSTICS from /usr/local/community-scripts/diagnostics file
|
||||
# - Payload differences:
|
||||
# * ct_type=2 (VM instead of LXC)
|
||||
# * type="vm"
|
||||
# * Disk size without 'G' suffix (parsed from DISK_SIZE variable)
|
||||
# - Only executes if DIAGNOSTICS=yes and RANDOM_UUID is set
|
||||
# ------------------------------------------------------------------------------
|
||||
post_to_api_vm() {
|
||||
|
||||
if [[ ! -f /usr/local/community-scripts/diagnostics ]]; then
|
||||
@@ -81,7 +239,6 @@ post_to_api_vm() {
|
||||
"ram_size": $RAM_SIZE,
|
||||
"os_type": "$var_os",
|
||||
"os_version": "$var_version",
|
||||
"disableip6": "",
|
||||
"nsapp": "$NSAPP",
|
||||
"method": "$METHOD(PVE-Local)",
|
||||
"pve_version": "$pve_version",
|
||||
@@ -90,7 +247,6 @@ post_to_api_vm() {
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
if [[ "$DIAGNOSTICS" == "yes" ]]; then
|
||||
RESPONSE=$(curl -s -w "%{http_code}" -L -X POST "$API_URL" --post301 --post302 \
|
||||
-H "Content-Type: application/json" \
|
||||
@@ -98,19 +254,54 @@ EOF
|
||||
fi
|
||||
}
|
||||
|
||||
POST_UPDATE_DONE=false
|
||||
# ------------------------------------------------------------------------------
|
||||
# post_update_to_api()
|
||||
#
|
||||
# - Reports installation completion status to API
|
||||
# - Prevents duplicate submissions via POST_UPDATE_DONE flag
|
||||
# - Arguments:
|
||||
# * $1: status ("success" or "failed")
|
||||
# * $2: exit_code (default: 1 for failed, 0 for success)
|
||||
# - Payload includes:
|
||||
# * Final status (success/failed)
|
||||
# * Error description via get_error_description()
|
||||
# * Random UUID for session correlation
|
||||
# - Only executes once per session
|
||||
# - Silently returns if:
|
||||
# * curl not available
|
||||
# * Already reported (POST_UPDATE_DONE=true)
|
||||
# * DIAGNOSTICS=no
|
||||
# ------------------------------------------------------------------------------
|
||||
post_update_to_api() {
|
||||
|
||||
if ! command -v curl &>/dev/null; then
|
||||
return
|
||||
fi
|
||||
|
||||
# Initialize flag if not set (prevents 'unbound variable' error with set -u)
|
||||
POST_UPDATE_DONE=${POST_UPDATE_DONE:-false}
|
||||
|
||||
if [ "$POST_UPDATE_DONE" = true ]; then
|
||||
return 0
|
||||
fi
|
||||
exit_code=${2:-1}
|
||||
local API_URL="http://api.community-scripts.org/upload/updatestatus"
|
||||
local status="${1:-failed}"
|
||||
local error="${2:-No error message}"
|
||||
if [[ "$status" == "failed" ]]; then
|
||||
local exit_code="${2:-1}"
|
||||
elif [[ "$status" == "success" ]]; then
|
||||
local exit_code="${2:-0}"
|
||||
fi
|
||||
|
||||
if [[ -z "$exit_code" ]]; then
|
||||
exit_code=1
|
||||
fi
|
||||
|
||||
error=$(explain_exit_code "$exit_code")
|
||||
|
||||
if [ -z "$error" ]; then
|
||||
error="Unknown error"
|
||||
fi
|
||||
|
||||
JSON_PAYLOAD=$(
|
||||
cat <<EOF
|
||||
@@ -121,7 +312,6 @@ post_update_to_api() {
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
if [[ "$DIAGNOSTICS" == "yes" ]]; then
|
||||
RESPONSE=$(curl -s -w "%{http_code}" -L -X POST "$API_URL" --post301 --post302 \
|
||||
-H "Content-Type: application/json" \
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
505
scripts/core/cloud-init.func
Normal file
505
scripts/core/cloud-init.func
Normal file
@@ -0,0 +1,505 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: community-scripts ORG
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/branch/main/LICENSE
|
||||
# Revision: 1
|
||||
|
||||
# ==============================================================================
|
||||
# CLOUD-INIT.FUNC - VM CLOUD-INIT CONFIGURATION LIBRARY
|
||||
# ==============================================================================
|
||||
#
|
||||
# Universal helper library for Cloud-Init configuration in Proxmox VMs.
|
||||
# Provides functions for:
|
||||
#
|
||||
# - Native Proxmox Cloud-Init setup (user, password, network, SSH keys)
|
||||
# - Interactive configuration dialogs (whiptail)
|
||||
# - IP address retrieval via qemu-guest-agent
|
||||
# - Cloud-Init status monitoring and waiting
|
||||
#
|
||||
# Usage:
|
||||
# source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/cloud-init.func)
|
||||
# setup_cloud_init "$VMID" "$STORAGE" "$HN" "yes"
|
||||
#
|
||||
# Compatible with: Debian, Ubuntu, and all Cloud-Init enabled distributions
|
||||
# ==============================================================================
|
||||
|
||||
# ==============================================================================
|
||||
# SECTION 1: CONFIGURATION DEFAULTS
|
||||
# ==============================================================================
|
||||
# These can be overridden before sourcing this library
|
||||
|
||||
CLOUDINIT_DEFAULT_USER="${CLOUDINIT_DEFAULT_USER:-root}"
|
||||
CLOUDINIT_DNS_SERVERS="${CLOUDINIT_DNS_SERVERS:-1.1.1.1 8.8.8.8}"
|
||||
CLOUDINIT_SEARCH_DOMAIN="${CLOUDINIT_SEARCH_DOMAIN:-local}"
|
||||
CLOUDINIT_SSH_KEYS="${CLOUDINIT_SSH_KEYS:-/root/.ssh/authorized_keys}"
|
||||
|
||||
# ==============================================================================
|
||||
# SECTION 2: HELPER FUNCTIONS
|
||||
# ==============================================================================
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# _ci_msg - Internal message helper with fallback
|
||||
# ------------------------------------------------------------------------------
|
||||
function _ci_msg_info() { msg_info "$1" 2>/dev/null || echo "[INFO] $1"; }
|
||||
function _ci_msg_ok() { msg_ok "$1" 2>/dev/null || echo "[OK] $1"; }
|
||||
function _ci_msg_warn() { msg_warn "$1" 2>/dev/null || echo "[WARN] $1"; }
|
||||
function _ci_msg_error() { msg_error "$1" 2>/dev/null || echo "[ERROR] $1"; }
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# validate_ip_cidr - Validate IP address in CIDR format
|
||||
# Usage: validate_ip_cidr "192.168.1.100/24" && echo "Valid"
|
||||
# Returns: 0 if valid, 1 if invalid
|
||||
# ------------------------------------------------------------------------------
|
||||
function validate_ip_cidr() {
|
||||
local ip_cidr="$1"
|
||||
# Match: 0-255.0-255.0-255.0-255/0-32
|
||||
if [[ "$ip_cidr" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}/([0-9]|[1-2][0-9]|3[0-2])$ ]]; then
|
||||
# Validate each octet is 0-255
|
||||
local ip="${ip_cidr%/*}"
|
||||
IFS='.' read -ra octets <<<"$ip"
|
||||
for octet in "${octets[@]}"; do
|
||||
((octet > 255)) && return 1
|
||||
done
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# validate_ip - Validate plain IP address (no CIDR)
|
||||
# Usage: validate_ip "192.168.1.1" && echo "Valid"
|
||||
# ------------------------------------------------------------------------------
|
||||
function validate_ip() {
|
||||
local ip="$1"
|
||||
if [[ "$ip" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
|
||||
IFS='.' read -ra octets <<<"$ip"
|
||||
for octet in "${octets[@]}"; do
|
||||
((octet > 255)) && return 1
|
||||
done
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
# ==============================================================================
|
||||
# SECTION 3: MAIN CLOUD-INIT FUNCTIONS
|
||||
# ==============================================================================
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# setup_cloud_init - Configures Proxmox Native Cloud-Init
|
||||
# ------------------------------------------------------------------------------
|
||||
# Parameters:
|
||||
# $1 - VMID (required)
|
||||
# $2 - Storage name (required)
|
||||
# $3 - Hostname (optional, default: vm-<vmid>)
|
||||
# $4 - Enable Cloud-Init (yes/no, default: no)
|
||||
# $5 - User (optional, default: root)
|
||||
# $6 - Network mode (dhcp/static, default: dhcp)
|
||||
# $7 - Static IP (optional, format: 192.168.1.100/24)
|
||||
# $8 - Gateway (optional)
|
||||
# $9 - Nameservers (optional, default: 1.1.1.1 8.8.8.8)
|
||||
#
|
||||
# Returns: 0 on success, 1 on failure
|
||||
# Exports: CLOUDINIT_USER, CLOUDINIT_PASSWORD, CLOUDINIT_CRED_FILE
|
||||
# ==============================================================================
|
||||
function setup_cloud_init() {
|
||||
local vmid="$1"
|
||||
local storage="$2"
|
||||
local hostname="${3:-vm-${vmid}}"
|
||||
local enable="${4:-no}"
|
||||
local ciuser="${5:-$CLOUDINIT_DEFAULT_USER}"
|
||||
local network_mode="${6:-dhcp}"
|
||||
local static_ip="${7:-}"
|
||||
local gateway="${8:-}"
|
||||
local nameservers="${9:-$CLOUDINIT_DNS_SERVERS}"
|
||||
|
||||
# Skip if not enabled
|
||||
if [ "$enable" != "yes" ]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Validate static IP if provided
|
||||
if [ "$network_mode" = "static" ]; then
|
||||
if [ -n "$static_ip" ] && ! validate_ip_cidr "$static_ip"; then
|
||||
_ci_msg_error "Invalid static IP format: $static_ip (expected: x.x.x.x/xx)"
|
||||
return 1
|
||||
fi
|
||||
if [ -n "$gateway" ] && ! validate_ip "$gateway"; then
|
||||
_ci_msg_error "Invalid gateway IP format: $gateway"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
_ci_msg_info "Configuring Cloud-Init"
|
||||
|
||||
# Create Cloud-Init drive (try ide2 first, then scsi1 as fallback)
|
||||
if ! qm set "$vmid" --ide2 "${storage}:cloudinit" >/dev/null 2>&1; then
|
||||
qm set "$vmid" --scsi1 "${storage}:cloudinit" >/dev/null 2>&1
|
||||
fi
|
||||
|
||||
# Set user
|
||||
qm set "$vmid" --ciuser "$ciuser" >/dev/null
|
||||
|
||||
# Generate and set secure random password
|
||||
local cipassword=$(openssl rand -base64 16)
|
||||
qm set "$vmid" --cipassword "$cipassword" >/dev/null
|
||||
|
||||
# Add SSH keys if available
|
||||
if [ -f "$CLOUDINIT_SSH_KEYS" ]; then
|
||||
qm set "$vmid" --sshkeys "$CLOUDINIT_SSH_KEYS" >/dev/null 2>&1 || true
|
||||
fi
|
||||
|
||||
# Configure network
|
||||
if [ "$network_mode" = "static" ] && [ -n "$static_ip" ] && [ -n "$gateway" ]; then
|
||||
qm set "$vmid" --ipconfig0 "ip=${static_ip},gw=${gateway}" >/dev/null
|
||||
else
|
||||
qm set "$vmid" --ipconfig0 "ip=dhcp" >/dev/null
|
||||
fi
|
||||
|
||||
# Set DNS servers
|
||||
qm set "$vmid" --nameserver "$nameservers" >/dev/null
|
||||
|
||||
# Set search domain
|
||||
qm set "$vmid" --searchdomain "$CLOUDINIT_SEARCH_DOMAIN" >/dev/null
|
||||
|
||||
# Enable package upgrades on first boot (if supported by Proxmox version)
|
||||
qm set "$vmid" --ciupgrade 1 >/dev/null 2>&1 || true
|
||||
|
||||
# Save credentials to file (with restrictive permissions)
|
||||
local cred_file="/tmp/${hostname}-${vmid}-cloud-init-credentials.txt"
|
||||
umask 077
|
||||
cat >"$cred_file" <<EOF
|
||||
╔══════════════════════════════════════════════════════════════════╗
|
||||
║ ⚠️ SECURITY WARNING: DELETE THIS FILE AFTER NOTING CREDENTIALS ║
|
||||
╚══════════════════════════════════════════════════════════════════╝
|
||||
|
||||
Cloud-Init Credentials
|
||||
────────────────────────────────────────
|
||||
VM ID: ${vmid}
|
||||
Hostname: ${hostname}
|
||||
Created: $(date)
|
||||
|
||||
Username: ${ciuser}
|
||||
Password: ${cipassword}
|
||||
|
||||
Network: ${network_mode}$([ "$network_mode" = "static" ] && echo " (IP: ${static_ip}, GW: ${gateway})" || echo " (DHCP)")
|
||||
DNS: ${nameservers}
|
||||
|
||||
────────────────────────────────────────
|
||||
SSH Access (if keys configured):
|
||||
ssh ${ciuser}@<vm-ip>
|
||||
|
||||
Proxmox UI Configuration:
|
||||
VM ${vmid} > Cloud-Init > Edit
|
||||
- User, Password, SSH Keys
|
||||
- Network (IP Config)
|
||||
- DNS, Search Domain
|
||||
|
||||
────────────────────────────────────────
|
||||
🗑️ To delete this file:
|
||||
rm -f ${cred_file}
|
||||
────────────────────────────────────────
|
||||
EOF
|
||||
chmod 600 "$cred_file"
|
||||
|
||||
_ci_msg_ok "Cloud-Init configured (User: ${ciuser})"
|
||||
|
||||
# Export for use in calling script (DO NOT display password here - will be shown in summary)
|
||||
export CLOUDINIT_USER="$ciuser"
|
||||
export CLOUDINIT_PASSWORD="$cipassword"
|
||||
export CLOUDINIT_CRED_FILE="$cred_file"
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# ==============================================================================
|
||||
# SECTION 4: INTERACTIVE CONFIGURATION
|
||||
# ==============================================================================
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# configure_cloud_init_interactive - Whiptail dialog for Cloud-Init setup
|
||||
# ------------------------------------------------------------------------------
|
||||
# Prompts user for Cloud-Init configuration choices
|
||||
# Returns configuration via exported variables:
|
||||
# - CLOUDINIT_ENABLE (yes/no)
|
||||
# - CLOUDINIT_USER
|
||||
# - CLOUDINIT_NETWORK_MODE (dhcp/static)
|
||||
# - CLOUDINIT_IP (if static)
|
||||
# - CLOUDINIT_GW (if static)
|
||||
# - CLOUDINIT_DNS
|
||||
# ------------------------------------------------------------------------------
|
||||
function configure_cloud_init_interactive() {
|
||||
local default_user="${1:-root}"
|
||||
|
||||
# Check if whiptail is available
|
||||
if ! command -v whiptail >/dev/null 2>&1; then
|
||||
echo "Warning: whiptail not available, skipping interactive configuration"
|
||||
export CLOUDINIT_ENABLE="no"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Ask if user wants to enable Cloud-Init
|
||||
if ! (whiptail --backtitle "Proxmox VE Helper Scripts" --title "CLOUD-INIT" \
|
||||
--yesno "Enable Cloud-Init for VM configuration?\n\nCloud-Init allows automatic configuration of:\n• User accounts and passwords\n• SSH keys\n• Network settings (DHCP/Static)\n• DNS configuration\n\nYou can also configure these settings later in Proxmox UI." 16 68); then
|
||||
export CLOUDINIT_ENABLE="no"
|
||||
return 0
|
||||
fi
|
||||
|
||||
export CLOUDINIT_ENABLE="yes"
|
||||
|
||||
# Username
|
||||
if CLOUDINIT_USER=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox \
|
||||
"Cloud-Init Username" 8 58 "$default_user" --title "USERNAME" 3>&1 1>&2 2>&3); then
|
||||
export CLOUDINIT_USER="${CLOUDINIT_USER:-$default_user}"
|
||||
else
|
||||
export CLOUDINIT_USER="$default_user"
|
||||
fi
|
||||
|
||||
# Network configuration
|
||||
if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "NETWORK MODE" \
|
||||
--yesno "Use DHCP for network configuration?\n\nSelect 'No' for static IP configuration." 10 58); then
|
||||
export CLOUDINIT_NETWORK_MODE="dhcp"
|
||||
else
|
||||
export CLOUDINIT_NETWORK_MODE="static"
|
||||
|
||||
# Static IP with validation
|
||||
while true; do
|
||||
if CLOUDINIT_IP=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox \
|
||||
"Static IP Address (CIDR format)\nExample: 192.168.1.100/24" 9 58 "" --title "IP ADDRESS" 3>&1 1>&2 2>&3); then
|
||||
if validate_ip_cidr "$CLOUDINIT_IP"; then
|
||||
export CLOUDINIT_IP
|
||||
break
|
||||
else
|
||||
whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID IP" \
|
||||
--msgbox "Invalid IP format: $CLOUDINIT_IP\n\nPlease use CIDR format: x.x.x.x/xx\nExample: 192.168.1.100/24" 10 50
|
||||
fi
|
||||
else
|
||||
_ci_msg_warn "Static IP required, falling back to DHCP"
|
||||
export CLOUDINIT_NETWORK_MODE="dhcp"
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
# Gateway with validation
|
||||
if [ "$CLOUDINIT_NETWORK_MODE" = "static" ]; then
|
||||
while true; do
|
||||
if CLOUDINIT_GW=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox \
|
||||
"Gateway IP Address\nExample: 192.168.1.1" 8 58 "" --title "GATEWAY" 3>&1 1>&2 2>&3); then
|
||||
if validate_ip "$CLOUDINIT_GW"; then
|
||||
export CLOUDINIT_GW
|
||||
break
|
||||
else
|
||||
whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID GATEWAY" \
|
||||
--msgbox "Invalid gateway format: $CLOUDINIT_GW\n\nPlease use format: x.x.x.x\nExample: 192.168.1.1" 10 50
|
||||
fi
|
||||
else
|
||||
_ci_msg_warn "Gateway required, falling back to DHCP"
|
||||
export CLOUDINIT_NETWORK_MODE="dhcp"
|
||||
break
|
||||
fi
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
||||
# DNS Servers
|
||||
if CLOUDINIT_DNS=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox \
|
||||
"DNS Servers (space-separated)" 8 58 "1.1.1.1 8.8.8.8" --title "DNS SERVERS" 3>&1 1>&2 2>&3); then
|
||||
export CLOUDINIT_DNS="${CLOUDINIT_DNS:-1.1.1.1 8.8.8.8}"
|
||||
else
|
||||
export CLOUDINIT_DNS="1.1.1.1 8.8.8.8"
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# ==============================================================================
|
||||
# SECTION 5: UTILITY FUNCTIONS
|
||||
# ==============================================================================
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# display_cloud_init_info - Show Cloud-Init summary after setup
|
||||
# ------------------------------------------------------------------------------
|
||||
function display_cloud_init_info() {
|
||||
local vmid="$1"
|
||||
local hostname="${2:-}"
|
||||
|
||||
if [ -n "$CLOUDINIT_CRED_FILE" ] && [ -f "$CLOUDINIT_CRED_FILE" ]; then
|
||||
if [ -n "${INFO:-}" ]; then
|
||||
echo -e "\n${INFO}${BOLD:-}${GN:-} Cloud-Init Configuration:${CL:-}"
|
||||
echo -e "${TAB:- }${DGN:-}User: ${BGN:-}${CLOUDINIT_USER:-root}${CL:-}"
|
||||
echo -e "${TAB:- }${DGN:-}Password: ${BGN:-}${CLOUDINIT_PASSWORD}${CL:-}"
|
||||
echo -e "${TAB:- }${DGN:-}Credentials: ${BL:-}${CLOUDINIT_CRED_FILE}${CL:-}"
|
||||
echo -e "${TAB:- }${RD:-}⚠️ Delete credentials file after noting password!${CL:-}"
|
||||
echo -e "${TAB:- }${YW:-}💡 Configure in Proxmox UI: VM ${vmid} > Cloud-Init${CL:-}"
|
||||
else
|
||||
echo ""
|
||||
echo "[INFO] Cloud-Init Configuration:"
|
||||
echo " User: ${CLOUDINIT_USER:-root}"
|
||||
echo " Password: ${CLOUDINIT_PASSWORD}"
|
||||
echo " Credentials: ${CLOUDINIT_CRED_FILE}"
|
||||
echo " ⚠️ Delete credentials file after noting password!"
|
||||
echo " Configure in Proxmox UI: VM ${vmid} > Cloud-Init"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# cleanup_cloud_init_credentials - Remove credentials file
|
||||
# ------------------------------------------------------------------------------
|
||||
# Usage: cleanup_cloud_init_credentials
|
||||
# Call this after user has noted/saved the credentials
|
||||
# ------------------------------------------------------------------------------
|
||||
function cleanup_cloud_init_credentials() {
|
||||
if [ -n "$CLOUDINIT_CRED_FILE" ] && [ -f "$CLOUDINIT_CRED_FILE" ]; then
|
||||
rm -f "$CLOUDINIT_CRED_FILE"
|
||||
_ci_msg_ok "Credentials file removed: $CLOUDINIT_CRED_FILE"
|
||||
unset CLOUDINIT_CRED_FILE
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# has_cloud_init - Check if VM has Cloud-Init configured
|
||||
# ------------------------------------------------------------------------------
|
||||
function has_cloud_init() {
|
||||
local vmid="$1"
|
||||
qm config "$vmid" 2>/dev/null | grep -qE "(ide2|scsi1):.*cloudinit"
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# regenerate_cloud_init - Regenerate Cloud-Init configuration
|
||||
# ------------------------------------------------------------------------------
|
||||
function regenerate_cloud_init() {
|
||||
local vmid="$1"
|
||||
|
||||
if has_cloud_init "$vmid"; then
|
||||
_ci_msg_info "Regenerating Cloud-Init configuration"
|
||||
qm cloudinit update "$vmid" >/dev/null 2>&1 || true
|
||||
_ci_msg_ok "Cloud-Init configuration regenerated"
|
||||
return 0
|
||||
else
|
||||
_ci_msg_warn "VM $vmid does not have Cloud-Init configured"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# get_vm_ip - Get VM IP address via qemu-guest-agent
|
||||
# ------------------------------------------------------------------------------
|
||||
function get_vm_ip() {
|
||||
local vmid="$1"
|
||||
local timeout="${2:-30}"
|
||||
|
||||
local elapsed=0
|
||||
while [ $elapsed -lt $timeout ]; do
|
||||
local vm_ip=$(qm guest cmd "$vmid" network-get-interfaces 2>/dev/null |
|
||||
jq -r '.[] | select(.name != "lo") | ."ip-addresses"[]? | select(."ip-address-type" == "ipv4") | ."ip-address"' 2>/dev/null | head -1)
|
||||
|
||||
if [ -n "$vm_ip" ]; then
|
||||
echo "$vm_ip"
|
||||
return 0
|
||||
fi
|
||||
|
||||
sleep 2
|
||||
elapsed=$((elapsed + 2))
|
||||
done
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# wait_for_cloud_init - Wait for Cloud-Init to complete (requires SSH access)
|
||||
# ------------------------------------------------------------------------------
|
||||
function wait_for_cloud_init() {
|
||||
local vmid="$1"
|
||||
local timeout="${2:-300}"
|
||||
local vm_ip="${3:-}"
|
||||
|
||||
# Get IP if not provided
|
||||
if [ -z "$vm_ip" ]; then
|
||||
vm_ip=$(get_vm_ip "$vmid" 60)
|
||||
fi
|
||||
|
||||
if [ -z "$vm_ip" ]; then
|
||||
_ci_msg_warn "Unable to determine VM IP address"
|
||||
return 1
|
||||
fi
|
||||
|
||||
_ci_msg_info "Waiting for Cloud-Init to complete on ${vm_ip}"
|
||||
|
||||
local elapsed=0
|
||||
while [ $elapsed -lt $timeout ]; do
|
||||
if timeout 10 ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \
|
||||
"${CLOUDINIT_USER:-root}@${vm_ip}" "cloud-init status --wait" 2>/dev/null; then
|
||||
_ci_msg_ok "Cloud-Init completed successfully"
|
||||
return 0
|
||||
fi
|
||||
sleep 10
|
||||
elapsed=$((elapsed + 10))
|
||||
done
|
||||
|
||||
_ci_msg_warn "Cloud-Init did not complete within ${timeout}s"
|
||||
return 1
|
||||
}
|
||||
|
||||
# ==============================================================================
|
||||
# SECTION 6: EXPORTS
|
||||
# ==============================================================================
|
||||
# Export all functions for use in other scripts
|
||||
|
||||
export -f setup_cloud_init 2>/dev/null || true
|
||||
export -f configure_cloud_init_interactive 2>/dev/null || true
|
||||
export -f display_cloud_init_info 2>/dev/null || true
|
||||
export -f cleanup_cloud_init_credentials 2>/dev/null || true
|
||||
export -f has_cloud_init 2>/dev/null || true
|
||||
export -f regenerate_cloud_init 2>/dev/null || true
|
||||
export -f get_vm_ip 2>/dev/null || true
|
||||
export -f wait_for_cloud_init 2>/dev/null || true
|
||||
export -f validate_ip_cidr 2>/dev/null || true
|
||||
export -f validate_ip 2>/dev/null || true
|
||||
|
||||
# ==============================================================================
|
||||
# SECTION 7: EXAMPLES & DOCUMENTATION
|
||||
# ==============================================================================
|
||||
: <<'EXAMPLES'
|
||||
|
||||
# Example 1: Simple DHCP setup (most common)
|
||||
setup_cloud_init "$VMID" "$STORAGE" "$HN" "yes"
|
||||
|
||||
# Example 2: Static IP setup
|
||||
setup_cloud_init "$VMID" "$STORAGE" "myserver" "yes" "root" "static" "192.168.1.100/24" "192.168.1.1"
|
||||
|
||||
# Example 3: Interactive configuration in advanced_settings()
|
||||
configure_cloud_init_interactive "admin"
|
||||
if [ "$CLOUDINIT_ENABLE" = "yes" ]; then
|
||||
setup_cloud_init "$VMID" "$STORAGE" "$HN" "yes" "$CLOUDINIT_USER" \
|
||||
"$CLOUDINIT_NETWORK_MODE" "$CLOUDINIT_IP" "$CLOUDINIT_GW" "$CLOUDINIT_DNS"
|
||||
fi
|
||||
|
||||
# Example 4: Display info after VM creation
|
||||
display_cloud_init_info "$VMID" "$HN"
|
||||
|
||||
# Example 5: Check if VM has Cloud-Init
|
||||
if has_cloud_init "$VMID"; then
|
||||
echo "Cloud-Init is configured"
|
||||
fi
|
||||
|
||||
# Example 6: Wait for Cloud-Init to complete after VM start
|
||||
if [ "$START_VM" = "yes" ]; then
|
||||
qm start "$VMID"
|
||||
sleep 30
|
||||
wait_for_cloud_init "$VMID" 300
|
||||
fi
|
||||
|
||||
# Example 7: Cleanup credentials file after user has noted password
|
||||
display_cloud_init_info "$VMID" "$HN"
|
||||
read -p "Have you saved the credentials? (y/N): " -r
|
||||
[[ $REPLY =~ ^[Yy]$ ]] && cleanup_cloud_init_credentials
|
||||
|
||||
# Example 8: Validate IP before using
|
||||
if validate_ip_cidr "192.168.1.100/24"; then
|
||||
echo "Valid IP/CIDR"
|
||||
fi
|
||||
|
||||
EXAMPLES
|
||||
@@ -1,699 +0,0 @@
|
||||
config_file() {
|
||||
CONFIG_FILE="/opt/community-scripts/.settings"
|
||||
|
||||
if [[ -f "/opt/community-scripts/${NSAPP}.conf" ]]; then
|
||||
CONFIG_FILE="/opt/community-scripts/${NSAPP}.conf"
|
||||
fi
|
||||
|
||||
if CONFIG_FILE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set absolute path to config file" 8 58 "$CONFIG_FILE" --title "CONFIG FILE" 3>&1 1>&2 2>&3); then
|
||||
if [[ ! -f "$CONFIG_FILE" ]]; then
|
||||
echo -e "${CROSS}${RD}Config file not found, exiting script!.${CL}"
|
||||
exit
|
||||
else
|
||||
echo -e "${INFO}${BOLD}${DGN}Using config File: ${BGN}$CONFIG_FILE${CL}"
|
||||
source "$CONFIG_FILE"
|
||||
fi
|
||||
fi
|
||||
if [[ -n "${CT_ID-}" ]]; then
|
||||
if [[ "$CT_ID" =~ ^([0-9]{3,4})-([0-9]{3,4})$ ]]; then
|
||||
MIN_ID=${BASH_REMATCH[1]}
|
||||
MAX_ID=${BASH_REMATCH[2]}
|
||||
if ((MIN_ID >= MAX_ID)); then
|
||||
msg_error "Invalid Container ID range. The first number must be smaller than the second number, was ${CT_ID}"
|
||||
exit
|
||||
fi
|
||||
|
||||
LIST_OF_IDS=$(pvesh get /cluster/resources --type vm --output-format json 2>/dev/null | grep -oP '"vmid":\s*\K\d+') || true
|
||||
if [[ -n "$LIST_OF_IDS" ]]; then
|
||||
for ((ID = MIN_ID; ID <= MAX_ID; ID++)); do
|
||||
if ! grep -q "^$ID$" <<<"$LIST_OF_IDS"; then
|
||||
CT_ID=$ID
|
||||
break
|
||||
fi
|
||||
done
|
||||
fi
|
||||
echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}"
|
||||
|
||||
elif [[ "$CT_ID" =~ ^[0-9]+$ ]]; then
|
||||
LIST_OF_IDS=$(pvesh get /cluster/resources --type vm --output-format json 2>/dev/null | grep -oP '"vmid":\s*\K\d+') || true
|
||||
if [[ -n "$LIST_OF_IDS" ]]; then
|
||||
|
||||
if ! grep -q "^$CT_ID$" <<<"$LIST_OF_IDS"; then
|
||||
echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}"
|
||||
else
|
||||
msg_error "Container ID $CT_ID already exists"
|
||||
exit
|
||||
fi
|
||||
else
|
||||
echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}"
|
||||
fi
|
||||
else
|
||||
msg_error "Invalid Container ID format. Needs to be 0000-9999 or 0-9999, was ${CT_ID}"
|
||||
exit
|
||||
fi
|
||||
else
|
||||
if CT_ID=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Container ID" 8 58 "$NEXTID" --title "CONTAINER ID" 3>&1 1>&2 2>&3); then
|
||||
if [ -z "$CT_ID" ]; then
|
||||
CT_ID="$NEXTID"
|
||||
echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}"
|
||||
else
|
||||
echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}"
|
||||
fi
|
||||
else
|
||||
exit_script
|
||||
fi
|
||||
|
||||
fi
|
||||
if [[ -n "${CT_TYPE-}" ]]; then
|
||||
if [[ "$CT_TYPE" -eq 0 ]]; then
|
||||
CT_TYPE_DESC="Privileged"
|
||||
elif [[ "$CT_TYPE" -eq 1 ]]; then
|
||||
CT_TYPE_DESC="Unprivileged"
|
||||
else
|
||||
msg_error "Unknown setting for CT_TYPE, should be 1 or 0, was ${CT_TYPE}"
|
||||
exit
|
||||
fi
|
||||
echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$CT_TYPE_DESC${CL}"
|
||||
else
|
||||
if CT_TYPE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "CONTAINER TYPE" --radiolist "Choose Type" 10 58 2 \
|
||||
"1" "Unprivileged" ON \
|
||||
"0" "Privileged" OFF \
|
||||
3>&1 1>&2 2>&3); then
|
||||
if [ -n "$CT_TYPE" ]; then
|
||||
CT_TYPE_DESC="Unprivileged"
|
||||
if [ "$CT_TYPE" -eq 0 ]; then
|
||||
CT_TYPE_DESC="Privileged"
|
||||
fi
|
||||
echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$CT_TYPE_DESC${CL}"
|
||||
fi
|
||||
else
|
||||
exit_script
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -n "${PW-}" ]]; then
|
||||
if [[ "$PW" == "none" ]]; then
|
||||
PW=""
|
||||
else
|
||||
if [[ "$PW" == *" "* ]]; then
|
||||
msg_error "Password cannot be empty"
|
||||
exit
|
||||
elif [[ ${#PW} -lt 5 ]]; then
|
||||
msg_error "Password must be at least 5 characters long"
|
||||
exit
|
||||
else
|
||||
echo -e "${VERIFYPW}${BOLD}${DGN}Root Password: ${BGN}********${CL}"
|
||||
fi
|
||||
PW="-password $PW"
|
||||
fi
|
||||
else
|
||||
while true; do
|
||||
if PW1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --passwordbox "\nSet Root Password (needed for root ssh access)" 9 58 --title "PASSWORD (leave blank for automatic login)" 3>&1 1>&2 2>&3); then
|
||||
if [[ -n "$PW1" ]]; then
|
||||
if [[ "$PW1" == *" "* ]]; then
|
||||
whiptail --msgbox "Password cannot contain spaces. Please try again." 8 58
|
||||
elif [ ${#PW1} -lt 5 ]; then
|
||||
whiptail --msgbox "Password must be at least 5 characters long. Please try again." 8 58
|
||||
else
|
||||
if PW2=$(whiptail --backtitle "Proxmox VE Helper Scripts" --passwordbox "\nVerify Root Password" 9 58 --title "PASSWORD VERIFICATION" 3>&1 1>&2 2>&3); then
|
||||
if [[ "$PW1" == "$PW2" ]]; then
|
||||
PW="-password $PW1"
|
||||
echo -e "${VERIFYPW}${BOLD}${DGN}Root Password: ${BGN}********${CL}"
|
||||
break
|
||||
else
|
||||
whiptail --msgbox "Passwords do not match. Please try again." 8 58
|
||||
fi
|
||||
else
|
||||
exit_script
|
||||
fi
|
||||
fi
|
||||
else
|
||||
PW1="Automatic Login"
|
||||
PW=""
|
||||
echo -e "${VERIFYPW}${BOLD}${DGN}Root Password: ${BGN}$PW1${CL}"
|
||||
break
|
||||
fi
|
||||
else
|
||||
exit_script
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
if [[ -n "${HN-}" ]]; then
|
||||
echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}"
|
||||
else
|
||||
if CT_NAME=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Hostname" 8 58 "$NSAPP" --title "HOSTNAME" 3>&1 1>&2 2>&3); then
|
||||
if [ -z "$CT_NAME" ]; then
|
||||
HN="$NSAPP"
|
||||
else
|
||||
HN=$(echo "${CT_NAME,,}" | tr -d ' ')
|
||||
fi
|
||||
echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}"
|
||||
else
|
||||
exit_script
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -n "${DISK_SIZE-}" ]]; then
|
||||
if [[ "$DISK_SIZE" =~ ^-?[0-9]+$ ]]; then
|
||||
echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE} GB${CL}"
|
||||
else
|
||||
msg_error "DISK_SIZE must be an integer, was ${DISK_SIZE}"
|
||||
exit
|
||||
fi
|
||||
else
|
||||
if DISK_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Disk Size in GB" 8 58 "$var_disk" --title "DISK SIZE" 3>&1 1>&2 2>&3); then
|
||||
if [ -z "$DISK_SIZE" ]; then
|
||||
DISK_SIZE="$var_disk"
|
||||
echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE} GB${CL}"
|
||||
else
|
||||
if ! [[ $DISK_SIZE =~ $INTEGER ]]; then
|
||||
echo -e "{INFO}${HOLD}${RD} DISK SIZE MUST BE AN INTEGER NUMBER!${CL}"
|
||||
advanced_settings
|
||||
fi
|
||||
echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE} GB${CL}"
|
||||
fi
|
||||
else
|
||||
exit_script
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -n "${CORE_COUNT-}" ]]; then
|
||||
if [[ "$CORE_COUNT" =~ ^-?[0-9]+$ ]]; then
|
||||
echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}${CORE_COUNT}${CL}"
|
||||
else
|
||||
msg_error "CORE_COUNT must be an integer, was ${CORE_COUNT}"
|
||||
exit
|
||||
fi
|
||||
else
|
||||
if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 "$var_cpu" --title "CORE COUNT" 3>&1 1>&2 2>&3); then
|
||||
if [ -z "$CORE_COUNT" ]; then
|
||||
CORE_COUNT="$var_cpu"
|
||||
echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}"
|
||||
else
|
||||
echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}"
|
||||
fi
|
||||
else
|
||||
exit_script
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -n "${RAM_SIZE-}" ]]; then
|
||||
if [[ "$RAM_SIZE" =~ ^-?[0-9]+$ ]]; then
|
||||
echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}"
|
||||
else
|
||||
msg_error "RAM_SIZE must be an integer, was ${RAM_SIZE}"
|
||||
exit
|
||||
fi
|
||||
else
|
||||
if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 "$var_ram" --title "RAM" 3>&1 1>&2 2>&3); then
|
||||
if [ -z "$RAM_SIZE" ]; then
|
||||
RAM_SIZE="$var_ram"
|
||||
echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}"
|
||||
else
|
||||
echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}"
|
||||
fi
|
||||
else
|
||||
exit_script
|
||||
fi
|
||||
fi
|
||||
|
||||
IFACE_FILEPATH_LIST="/etc/network/interfaces"$'\n'$(find "/etc/network/interfaces.d/" -type f)
|
||||
BRIDGES=""
|
||||
OLD_IFS=$IFS
|
||||
IFS=$'\n'
|
||||
|
||||
for iface_filepath in ${IFACE_FILEPATH_LIST}; do
|
||||
|
||||
iface_indexes_tmpfile=$(mktemp -q -u '.iface-XXXX')
|
||||
(grep -Pn '^\s*iface' "${iface_filepath}" | cut -d':' -f1 && wc -l "${iface_filepath}" | cut -d' ' -f1) | awk 'FNR==1 {line=$0; next} {print line":"$0-1; line=$0}' >"${iface_indexes_tmpfile}" || true
|
||||
|
||||
if [ -f "${iface_indexes_tmpfile}" ]; then
|
||||
|
||||
while read -r pair; do
|
||||
start=$(echo "${pair}" | cut -d':' -f1)
|
||||
end=$(echo "${pair}" | cut -d':' -f2)
|
||||
if awk "NR >= ${start} && NR <= ${end}" "${iface_filepath}" | grep -qP '^\s*(bridge[-_](ports|stp|fd|vlan-aware|vids)|ovs_type\s+OVSBridge)\b'; then
|
||||
iface_name=$(sed "${start}q;d" "${iface_filepath}" | awk '{print $2}')
|
||||
BRIDGES="${iface_name}"$'\n'"${BRIDGES}"
|
||||
fi
|
||||
|
||||
done <"${iface_indexes_tmpfile}"
|
||||
rm -f "${iface_indexes_tmpfile}"
|
||||
fi
|
||||
|
||||
done
|
||||
IFS=$OLD_IFS
|
||||
BRIDGES=$(echo "$BRIDGES" | grep -v '^\s*$' | sort | uniq)
|
||||
|
||||
if [[ -n "${BRG-}" ]]; then
|
||||
if echo "$BRIDGES" | grep -q "${BRG}"; then
|
||||
echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}$BRG${CL}"
|
||||
else
|
||||
msg_error "Bridge '${BRG}' does not exist in /etc/network/interfaces or /etc/network/interfaces.d/sdn"
|
||||
exit
|
||||
fi
|
||||
else
|
||||
BRG=$(whiptail --backtitle "Proxmox VE Helper Scripts" --menu "Select network bridge:" 15 40 6 $(echo "$BRIDGES" | awk '{print $0, "Bridge"}') 3>&1 1>&2 2>&3)
|
||||
if [ -z "$BRG" ]; then
|
||||
exit_script
|
||||
else
|
||||
echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}$BRG${CL}"
|
||||
fi
|
||||
fi
|
||||
|
||||
local ip_cidr_regex='^([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})/([0-9]{1,2})$'
|
||||
local ip_regex='^([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})$'
|
||||
|
||||
if [[ -n ${NET-} ]]; then
|
||||
if [ "$NET" == "dhcp" ]; then
|
||||
echo -e "${NETWORK}${BOLD}${DGN}IP Address: ${BGN}DHCP${CL}"
|
||||
echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}Default${CL}"
|
||||
GATE=""
|
||||
elif [[ "$NET" =~ $ip_cidr_regex ]]; then
|
||||
echo -e "${NETWORK}${BOLD}${DGN}IP Address: ${BGN}$NET${CL}"
|
||||
if [[ -n "$GATE" ]]; then
|
||||
[[ "$GATE" =~ ",gw=" ]] && GATE="${GATE##,gw=}"
|
||||
if [[ "$GATE" =~ $ip_regex ]]; then
|
||||
echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}$GATE${CL}"
|
||||
GATE=",gw=$GATE"
|
||||
else
|
||||
msg_error "Invalid IP Address format for Gateway. Needs to be 0.0.0.0, was ${GATE}"
|
||||
exit
|
||||
fi
|
||||
|
||||
else
|
||||
while true; do
|
||||
GATE1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Enter gateway IP address" 8 58 --title "Gateway IP" 3>&1 1>&2 2>&3)
|
||||
if [ -z "$GATE1" ]; then
|
||||
whiptail --backtitle "Proxmox VE Helper Scripts" --msgbox "Gateway IP address cannot be empty" 8 58
|
||||
elif [[ ! "$GATE1" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
|
||||
whiptail --backtitle "Proxmox VE Helper Scripts" --msgbox "Invalid IP address format" 8 58
|
||||
else
|
||||
GATE=",gw=$GATE1"
|
||||
echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}$GATE1${CL}"
|
||||
break
|
||||
fi
|
||||
done
|
||||
fi
|
||||
elif [[ "$NET" == *-* ]]; then
|
||||
IFS="-" read -r ip_start ip_end <<<"$NET"
|
||||
|
||||
if [[ ! "$ip_start" =~ $ip_cidr_regex ]] || [[ ! "$ip_end" =~ $ip_cidr_regex ]]; then
|
||||
msg_error "Invalid IP range format, was $NET should be 0.0.0.0/0-0.0.0.0/0"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ip1="${ip_start%%/*}"
|
||||
ip2="${ip_end%%/*}"
|
||||
cidr="${ip_start##*/}"
|
||||
|
||||
ip_to_int() {
|
||||
local IFS=.
|
||||
read -r i1 i2 i3 i4 <<<"$1"
|
||||
echo $(((i1 << 24) + (i2 << 16) + (i3 << 8) + i4))
|
||||
}
|
||||
|
||||
int_to_ip() {
|
||||
local ip=$1
|
||||
echo "$(((ip >> 24) & 0xFF)).$(((ip >> 16) & 0xFF)).$(((ip >> 8) & 0xFF)).$((ip & 0xFF))"
|
||||
}
|
||||
|
||||
start_int=$(ip_to_int "$ip1")
|
||||
end_int=$(ip_to_int "$ip2")
|
||||
|
||||
for ((ip_int = start_int; ip_int <= end_int; ip_int++)); do
|
||||
ip=$(int_to_ip $ip_int)
|
||||
msg_info "Checking IP: $ip"
|
||||
if ! ping -c 2 -W 1 "$ip" >/dev/null 2>&1; then
|
||||
NET="$ip/$cidr"
|
||||
msg_ok "Using free IP Address: ${BGN}$NET${CL}"
|
||||
sleep 3
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [[ "$NET" == *-* ]]; then
|
||||
msg_error "No free IP found in range"
|
||||
exit 1
|
||||
fi
|
||||
if [ -n "$GATE" ]; then
|
||||
if [[ "$GATE" =~ $ip_regex ]]; then
|
||||
echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}$GATE${CL}"
|
||||
GATE=",gw=$GATE"
|
||||
else
|
||||
msg_error "Invalid IP Address format for Gateway. Needs to be 0.0.0.0, was ${GATE}"
|
||||
exit
|
||||
fi
|
||||
else
|
||||
while true; do
|
||||
GATE1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Enter gateway IP address" 8 58 --title "Gateway IP" 3>&1 1>&2 2>&3)
|
||||
if [ -z "$GATE1" ]; then
|
||||
whiptail --backtitle "Proxmox VE Helper Scripts" --msgbox "Gateway IP address cannot be empty" 8 58
|
||||
elif [[ ! "$GATE1" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
|
||||
whiptail --backtitle "Proxmox VE Helper Scripts" --msgbox "Invalid IP address format" 8 58
|
||||
else
|
||||
GATE=",gw=$GATE1"
|
||||
echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}$GATE1${CL}"
|
||||
break
|
||||
fi
|
||||
done
|
||||
fi
|
||||
else
|
||||
msg_error "Invalid IP Address format. Needs to be 0.0.0.0/0 or a range like 10.0.0.1/24-10.0.0.10/24, was ${NET}"
|
||||
exit
|
||||
fi
|
||||
else
|
||||
while true; do
|
||||
NET=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Static IPv4 CIDR Address (/24)" 8 58 dhcp --title "IP ADDRESS" 3>&1 1>&2 2>&3)
|
||||
exit_status=$?
|
||||
if [ $exit_status -eq 0 ]; then
|
||||
if [ "$NET" = "dhcp" ]; then
|
||||
echo -e "${NETWORK}${BOLD}${DGN}IP Address: ${BGN}$NET${CL}"
|
||||
break
|
||||
else
|
||||
if [[ "$NET" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}/([0-9]|[1-2][0-9]|3[0-2])$ ]]; then
|
||||
echo -e "${NETWORK}${BOLD}${DGN}IP Address: ${BGN}$NET${CL}"
|
||||
break
|
||||
else
|
||||
whiptail --backtitle "Proxmox VE Helper Scripts" --msgbox "$NET is an invalid IPv4 CIDR address. Please enter a valid IPv4 CIDR address or 'dhcp'" 8 58
|
||||
fi
|
||||
fi
|
||||
else
|
||||
exit_script
|
||||
fi
|
||||
done
|
||||
if [ "$NET" != "dhcp" ]; then
|
||||
while true; do
|
||||
GATE1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Enter gateway IP address" 8 58 --title "Gateway IP" 3>&1 1>&2 2>&3)
|
||||
if [ -z "$GATE1" ]; then
|
||||
whiptail --backtitle "Proxmox VE Helper Scripts" --msgbox "Gateway IP address cannot be empty" 8 58
|
||||
elif [[ ! "$GATE1" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
|
||||
whiptail --backtitle "Proxmox VE Helper Scripts" --msgbox "Invalid IP address format" 8 58
|
||||
else
|
||||
GATE=",gw=$GATE1"
|
||||
echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}$GATE1${CL}"
|
||||
break
|
||||
fi
|
||||
done
|
||||
else
|
||||
GATE=""
|
||||
echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}Default${CL}"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$var_os" == "alpine" ]; then
|
||||
APT_CACHER=""
|
||||
APT_CACHER_IP=""
|
||||
else
|
||||
if [[ -n "${APT_CACHER_IP-}" ]]; then
|
||||
if [[ ! $APT_CACHER_IP == "none" ]]; then
|
||||
APT_CACHER="yes"
|
||||
echo -e "${NETWORK}${BOLD}${DGN}APT-CACHER IP Address: ${BGN}$APT_CACHER_IP${CL}"
|
||||
else
|
||||
APT_CACHER=""
|
||||
echo -e "${NETWORK}${BOLD}${DGN}APT-Cacher IP Address: ${BGN}No${CL}"
|
||||
fi
|
||||
else
|
||||
if APT_CACHER_IP=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set APT-Cacher IP (leave blank for none)" 8 58 --title "APT-Cacher IP" 3>&1 1>&2 2>&3); then
|
||||
APT_CACHER="${APT_CACHER_IP:+yes}"
|
||||
echo -e "${NETWORK}${BOLD}${DGN}APT-Cacher IP Address: ${BGN}${APT_CACHER_IP:-Default}${CL}"
|
||||
if [[ -n $APT_CACHER_IP ]]; then
|
||||
APT_CACHER_IP="none"
|
||||
fi
|
||||
else
|
||||
exit_script
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -n "${MTU-}" ]]; then
|
||||
if [[ "$MTU" =~ ^-?[0-9]+$ ]]; then
|
||||
echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU${CL}"
|
||||
MTU=",mtu=$MTU"
|
||||
else
|
||||
msg_error "MTU must be an integer, was ${MTU}"
|
||||
exit
|
||||
fi
|
||||
else
|
||||
if MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default [The MTU of your selected vmbr, default is 1500])" 8 58 --title "MTU SIZE" 3>&1 1>&2 2>&3); then
|
||||
if [ -z "$MTU1" ]; then
|
||||
MTU1="Default"
|
||||
MTU=""
|
||||
else
|
||||
MTU=",mtu=$MTU1"
|
||||
fi
|
||||
echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}"
|
||||
else
|
||||
exit_script
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "$IPV6_METHOD" == "static" ]]; then
|
||||
if [[ -n "$IPV6STATIC" ]]; then
|
||||
IP6=",ip6=${IPV6STATIC}"
|
||||
echo -e "${NETWORK}${BOLD}${DGN}IPv6 Address: ${BGN}${IPV6STATIC}${CL}"
|
||||
else
|
||||
msg_error "IPV6_METHOD is set to static but IPV6STATIC is empty"
|
||||
exit
|
||||
fi
|
||||
elif [[ "$IPV6_METHOD" == "auto" ]]; then
|
||||
IP6=",ip6=auto"
|
||||
echo -e "${NETWORK}${BOLD}${DGN}IPv6 Address: ${BGN}auto${CL}"
|
||||
else
|
||||
IP6=""
|
||||
echo -e "${NETWORK}${BOLD}${DGN}IPv6 Address: ${BGN}none${CL}"
|
||||
fi
|
||||
|
||||
if [[ -n "${SD-}" ]]; then
|
||||
if [[ "$SD" == "none" ]]; then
|
||||
SD=""
|
||||
echo -e "${SEARCH}${BOLD}${DGN}DNS Search Domain: ${BGN}Host${CL}"
|
||||
else
|
||||
# Strip prefix if present for config file storage
|
||||
local SD_VALUE="$SD"
|
||||
[[ "$SD" =~ ^-searchdomain= ]] && SD_VALUE="${SD#-searchdomain=}"
|
||||
echo -e "${SEARCH}${BOLD}${DGN}DNS Search Domain: ${BGN}$SD_VALUE${CL}"
|
||||
SD="-searchdomain=$SD_VALUE"
|
||||
fi
|
||||
else
|
||||
if SD=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a DNS Search Domain (leave blank for HOST)" 8 58 --title "DNS Search Domain" 3>&1 1>&2 2>&3); then
|
||||
if [ -z "$SD" ]; then
|
||||
SX=Host
|
||||
SD=""
|
||||
else
|
||||
SX=$SD
|
||||
SD="-searchdomain=$SD"
|
||||
fi
|
||||
echo -e "${SEARCH}${BOLD}${DGN}DNS Search Domain: ${BGN}$SX${CL}"
|
||||
else
|
||||
exit_script
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -n "${NS-}" ]]; then
|
||||
if [[ $NS == "none" ]]; then
|
||||
NS=""
|
||||
echo -e "${NETWORK}${BOLD}${DGN}DNS Server IP Address: ${BGN}Host${CL}"
|
||||
else
|
||||
# Strip prefix if present for config file storage
|
||||
local NS_VALUE="$NS"
|
||||
[[ "$NS" =~ ^-nameserver= ]] && NS_VALUE="${NS#-nameserver=}"
|
||||
if [[ "$NS_VALUE" =~ $ip_regex ]]; then
|
||||
echo -e "${NETWORK}${BOLD}${DGN}DNS Server IP Address: ${BGN}$NS_VALUE${CL}"
|
||||
NS="-nameserver=$NS_VALUE"
|
||||
else
|
||||
msg_error "Invalid IP Address format for DNS Server. Needs to be 0.0.0.0, was ${NS_VALUE}"
|
||||
exit
|
||||
fi
|
||||
fi
|
||||
else
|
||||
if NX=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a DNS Server IP (leave blank for HOST)" 8 58 --title "DNS SERVER IP" 3>&1 1>&2 2>&3); then
|
||||
if [ -z "$NX" ]; then
|
||||
NX=Host
|
||||
NS=""
|
||||
else
|
||||
NS="-nameserver=$NX"
|
||||
fi
|
||||
echo -e "${NETWORK}${BOLD}${DGN}DNS Server IP Address: ${BGN}$NX${CL}"
|
||||
else
|
||||
exit_script
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -n "${MAC-}" ]]; then
|
||||
if [[ "$MAC" == "none" ]]; then
|
||||
MAC=""
|
||||
echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}Host${CL}"
|
||||
else
|
||||
# Strip prefix if present for config file storage
|
||||
local MAC_VALUE="$MAC"
|
||||
[[ "$MAC" =~ ^,hwaddr= ]] && MAC_VALUE="${MAC#,hwaddr=}"
|
||||
if [[ "$MAC_VALUE" =~ ^([A-Fa-f0-9]{2}:){5}[A-Fa-f0-9]{2}$ ]]; then
|
||||
echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC_VALUE${CL}"
|
||||
MAC=",hwaddr=$MAC_VALUE"
|
||||
else
|
||||
msg_error "MAC Address must be in the format xx:xx:xx:xx:xx:xx, was ${MAC_VALUE}"
|
||||
exit
|
||||
fi
|
||||
fi
|
||||
else
|
||||
if MAC1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a MAC Address(leave blank for generated MAC)" 8 58 --title "MAC ADDRESS" 3>&1 1>&2 2>&3); then
|
||||
if [ -z "$MAC1" ]; then
|
||||
MAC1="Default"
|
||||
MAC=""
|
||||
else
|
||||
MAC=",hwaddr=$MAC1"
|
||||
echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC1${CL}"
|
||||
fi
|
||||
else
|
||||
exit_script
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -n "${VLAN-}" ]]; then
|
||||
if [[ "$VLAN" == "none" ]]; then
|
||||
VLAN=""
|
||||
echo -e "${VLANTAG}${BOLD}${DGN}Vlan: ${BGN}Host${CL}"
|
||||
else
|
||||
# Strip prefix if present for config file storage
|
||||
local VLAN_VALUE="$VLAN"
|
||||
[[ "$VLAN" =~ ^,tag= ]] && VLAN_VALUE="${VLAN#,tag=}"
|
||||
if [[ "$VLAN_VALUE" =~ ^-?[0-9]+$ ]]; then
|
||||
echo -e "${VLANTAG}${BOLD}${DGN}Vlan: ${BGN}$VLAN_VALUE${CL}"
|
||||
VLAN=",tag=$VLAN_VALUE"
|
||||
else
|
||||
msg_error "VLAN must be an integer, was ${VLAN_VALUE}"
|
||||
exit
|
||||
fi
|
||||
fi
|
||||
else
|
||||
if VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for no VLAN)" 8 58 --title "VLAN" 3>&1 1>&2 2>&3); then
|
||||
if [ -z "$VLAN1" ]; then
|
||||
VLAN1="Default"
|
||||
VLAN=""
|
||||
else
|
||||
VLAN=",tag=$VLAN1"
|
||||
fi
|
||||
echo -e "${VLANTAG}${BOLD}${DGN}Vlan: ${BGN}$VLAN1${CL}"
|
||||
else
|
||||
exit_script
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -n "${TAGS-}" ]]; then
|
||||
if [[ "$TAGS" == *"DEFAULT"* ]]; then
|
||||
TAGS="${TAGS//DEFAULT/}"
|
||||
TAGS="${TAGS//;/}"
|
||||
TAGS="$TAGS;${var_tags:-}"
|
||||
echo -e "${NETWORK}${BOLD}${DGN}Tags: ${BGN}$TAGS${CL}"
|
||||
fi
|
||||
else
|
||||
TAGS="community-scripts;"
|
||||
if ADV_TAGS=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Custom Tags?[If you remove all, there will be no tags!]" 8 58 "${TAGS}" --title "Advanced Tags" 3>&1 1>&2 2>&3); then
|
||||
if [ -n "${ADV_TAGS}" ]; then
|
||||
ADV_TAGS=$(echo "$ADV_TAGS" | tr -d '[:space:]')
|
||||
TAGS="${ADV_TAGS}"
|
||||
else
|
||||
TAGS=";"
|
||||
fi
|
||||
echo -e "${NETWORK}${BOLD}${DGN}Tags: ${BGN}$TAGS${CL}"
|
||||
else
|
||||
exit_script
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -n "${SSH-}" ]]; then
|
||||
if [[ "$SSH" == "yes" ]]; then
|
||||
echo -e "${ROOTSSH}${BOLD}${DGN}Root SSH Access: ${BGN}$SSH${CL}"
|
||||
if [[ ! -z "$SSH_AUTHORIZED_KEY" ]]; then
|
||||
echo -e "${ROOTSSH}${BOLD}${DGN}SSH Authorized Key: ${BGN}********************${CL}"
|
||||
else
|
||||
echo -e "${ROOTSSH}${BOLD}${DGN}SSH Authorized Key: ${BGN}None${CL}"
|
||||
fi
|
||||
elif [[ "$SSH" == "no" ]]; then
|
||||
echo -e "${ROOTSSH}${BOLD}${DGN}Root SSH Access: ${BGN}$SSH${CL}"
|
||||
else
|
||||
msg_error "SSH needs to be 'yes' or 'no', was ${SSH}"
|
||||
exit
|
||||
fi
|
||||
else
|
||||
SSH_AUTHORIZED_KEY="$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "SSH Authorized key for root (leave empty for none)" 8 58 --title "SSH Key" 3>&1 1>&2 2>&3)"
|
||||
if [[ -z "${SSH_AUTHORIZED_KEY}" ]]; then
|
||||
SSH_AUTHORIZED_KEY=""
|
||||
fi
|
||||
if [[ "$PW" == -password* || -n "$SSH_AUTHORIZED_KEY" ]]; then
|
||||
if (whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "SSH ACCESS" --yesno "Enable Root SSH Access?" 10 58); then
|
||||
SSH="yes"
|
||||
else
|
||||
SSH="no"
|
||||
fi
|
||||
echo -e "${ROOTSSH}${BOLD}${DGN}Root SSH Access: ${BGN}$SSH${CL}"
|
||||
else
|
||||
SSH="no"
|
||||
echo -e "${ROOTSSH}${BOLD}${DGN}Root SSH Access: ${BGN}$SSH${CL}"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -n "$ENABLE_FUSE" ]]; then
|
||||
if [[ "$ENABLE_FUSE" == "yes" ]]; then
|
||||
echo -e "${FUSE}${BOLD}${DGN}Enable FUSE: ${BGN}Yes${CL}"
|
||||
elif [[ "$ENABLE_FUSE" == "no" ]]; then
|
||||
echo -e "${FUSE}${BOLD}${DGN}Enable FUSE: ${BGN}No${CL}"
|
||||
else
|
||||
msg_error "Enable FUSE needs to be 'yes' or 'no', was ${ENABLE_FUSE}"
|
||||
exit
|
||||
fi
|
||||
else
|
||||
if (whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "FUSE" --yesno "Enable FUSE?" 10 58); then
|
||||
ENABLE_FUSE="yes"
|
||||
else
|
||||
ENABLE_FUSE="no"
|
||||
fi
|
||||
echo -e "${FUSE}${BOLD}${DGN}Enable FUSE: ${BGN}$ENABLE_FUSE${CL}"
|
||||
fi
|
||||
|
||||
if [[ -n "$ENABLE_TUN" ]]; then
|
||||
if [[ "$ENABLE_TUN" == "yes" ]]; then
|
||||
echo -e "${FUSE}${BOLD}${DGN}Enable TUN: ${BGN}Yes${CL}"
|
||||
elif [[ "$ENABLE_TUN" == "no" ]]; then
|
||||
echo -e "${FUSE}${BOLD}${DGN}Enable TUN: ${BGN}No${CL}"
|
||||
else
|
||||
msg_error "Enable TUN needs to be 'yes' or 'no', was ${ENABLE_TUN}"
|
||||
exit
|
||||
fi
|
||||
else
|
||||
if (whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "TUN" --yesno "Enable TUN?" 10 58); then
|
||||
ENABLE_TUN="yes"
|
||||
else
|
||||
ENABLE_TUN="no"
|
||||
fi
|
||||
echo -e "${FUSE}${BOLD}${DGN}Enable TUN: ${BGN}$ENABLE_TUN${CL}"
|
||||
fi
|
||||
|
||||
if [[ -n "${VERBOSE-}" ]]; then
|
||||
if [[ "$VERBOSE" == "yes" ]]; then
|
||||
echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}$VERBOSE${CL}"
|
||||
elif [[ "$VERBOSE" == "no" ]]; then
|
||||
echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}No${CL}"
|
||||
else
|
||||
msg_error "Verbose Mode needs to be 'yes' or 'no', was ${VERBOSE}"
|
||||
exit
|
||||
fi
|
||||
else
|
||||
if (whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "VERBOSE MODE" --yesno "Enable Verbose Mode?" 10 58); then
|
||||
VERBOSE="yes"
|
||||
else
|
||||
VERBOSE="no"
|
||||
fi
|
||||
echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}$VERBOSE${CL}"
|
||||
fi
|
||||
|
||||
if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "ADVANCED SETTINGS WITH CONFIG FILE COMPLETE" --yesno "Ready to create ${APP} LXC?" 10 58); then
|
||||
echo -e "${CREATING}${BOLD}${RD}Creating a ${APP} LXC using the above settings${CL}"
|
||||
else
|
||||
clear
|
||||
header_info
|
||||
echo -e "${INFO}${HOLD} ${GN}Using Config File on node $PVEHOST_NAME${CL}"
|
||||
config_file
|
||||
fi
|
||||
}
|
||||
@@ -1,13 +1,35 @@
|
||||
# Copyright (c) 2021-2025 community-scripts ORG
|
||||
#!/usr/bin/env bash
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# License: MIT | https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/LICENSE
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Loads core utility groups once (colors, formatting, icons, defaults).
|
||||
# ------------------------------------------------------------------------------
|
||||
# ==============================================================================
|
||||
# CORE FUNCTIONS - LXC CONTAINER UTILITIES
|
||||
# ==============================================================================
|
||||
#
|
||||
# This file provides core utility functions for LXC container management
|
||||
# including colors, formatting, validation checks, message output, and
|
||||
# execution helpers used throughout the Community-Scripts ecosystem.
|
||||
#
|
||||
# Usage:
|
||||
# source <(curl -fsSL https://git.community-scripts.org/.../core.func)
|
||||
# load_functions
|
||||
#
|
||||
# ==============================================================================
|
||||
|
||||
[[ -n "${_CORE_FUNC_LOADED:-}" ]] && return
|
||||
_CORE_FUNC_LOADED=1
|
||||
|
||||
# ==============================================================================
|
||||
# SECTION 1: INITIALIZATION & SETUP
|
||||
# ==============================================================================
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# load_functions()
|
||||
#
|
||||
# - Initializes all core utility groups (colors, formatting, icons, defaults)
|
||||
# - Ensures functions are loaded only once via __FUNCTIONS_LOADED flag
|
||||
# - Must be called at start of any script using these utilities
|
||||
# ------------------------------------------------------------------------------
|
||||
load_functions() {
|
||||
[[ -n "${__FUNCTIONS_LOADED:-}" ]] && return
|
||||
__FUNCTIONS_LOADED=1
|
||||
@@ -16,58 +38,14 @@ load_functions() {
|
||||
icons
|
||||
default_vars
|
||||
set_std_mode
|
||||
# add more
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Error & Signal Handling – robust, universal, subshell-safe
|
||||
# ============================================================================
|
||||
|
||||
_tool_error_hint() {
|
||||
local cmd="$1"
|
||||
local code="$2"
|
||||
case "$cmd" in
|
||||
curl)
|
||||
case "$code" in
|
||||
6) echo "Curl: Could not resolve host (DNS problem)" ;;
|
||||
7) echo "Curl: Failed to connect to host (connection refused)" ;;
|
||||
22) echo "Curl: HTTP error (404/403 etc)" ;;
|
||||
28) echo "Curl: Operation timeout" ;;
|
||||
*) echo "Curl: Unknown error ($code)" ;;
|
||||
esac
|
||||
;;
|
||||
wget)
|
||||
echo "Wget failed – URL unreachable or permission denied"
|
||||
;;
|
||||
systemctl)
|
||||
echo "Systemd unit failure – check service name and permissions"
|
||||
;;
|
||||
jq)
|
||||
echo "jq parse error – malformed JSON or missing key"
|
||||
;;
|
||||
mariadb | mysql)
|
||||
echo "MySQL/MariaDB command failed – check credentials or DB"
|
||||
;;
|
||||
unzip)
|
||||
echo "unzip failed – corrupt file or missing permission"
|
||||
;;
|
||||
tar)
|
||||
echo "tar failed – invalid format or missing binary"
|
||||
;;
|
||||
node | npm | pnpm | yarn)
|
||||
echo "Node tool failed – check version compatibility or package.json"
|
||||
;;
|
||||
*) echo "" ;;
|
||||
esac
|
||||
}
|
||||
|
||||
catch_errors() {
|
||||
set -Eeuo pipefail
|
||||
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Sets ANSI color codes used for styled terminal output.
|
||||
# color()
|
||||
#
|
||||
# - Sets ANSI color codes for styled terminal output
|
||||
# - Variables: YW (yellow), YWB (yellow bright), BL (blue), RD (red)
|
||||
# GN (green), DGN (dark green), BGN (background green), CL (clear)
|
||||
# ------------------------------------------------------------------------------
|
||||
color() {
|
||||
YW=$(echo "\033[33m")
|
||||
@@ -80,7 +58,14 @@ color() {
|
||||
CL=$(echo "\033[m")
|
||||
}
|
||||
|
||||
# Special for spinner and colorized output via printf
|
||||
# ------------------------------------------------------------------------------
|
||||
# color_spinner()
|
||||
#
|
||||
# - Sets ANSI color codes specifically for spinner animation
|
||||
# - Variables: CS_YW (spinner yellow), CS_YWB (spinner yellow bright),
|
||||
# CS_CL (spinner clear)
|
||||
# - Used by spinner() function to avoid color conflicts
|
||||
# ------------------------------------------------------------------------------
|
||||
color_spinner() {
|
||||
CS_YW=$'\033[33m'
|
||||
CS_YWB=$'\033[93m'
|
||||
@@ -88,7 +73,12 @@ color_spinner() {
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Defines formatting helpers like tab, bold, and line reset sequences.
|
||||
# formatting()
|
||||
#
|
||||
# - Defines formatting helpers for terminal output
|
||||
# - BFR: Backspace and clear line sequence
|
||||
# - BOLD: Bold text escape code
|
||||
# - TAB/TAB3: Indentation spacing
|
||||
# ------------------------------------------------------------------------------
|
||||
formatting() {
|
||||
BFR="\\r\\033[K"
|
||||
@@ -99,7 +89,11 @@ formatting() {
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Sets symbolic icons used throughout user feedback and prompts.
|
||||
# icons()
|
||||
#
|
||||
# - Sets symbolic emoji icons used throughout user feedback
|
||||
# - Provides consistent visual indicators for success, error, info, etc.
|
||||
# - Icons: CM (checkmark), CROSS (error), INFO (info), HOURGLASS (wait), etc.
|
||||
# ------------------------------------------------------------------------------
|
||||
icons() {
|
||||
CM="${TAB}✔️${TAB}"
|
||||
@@ -129,22 +123,31 @@ icons() {
|
||||
CREATING="${TAB}🚀${TAB}${CL}"
|
||||
ADVANCED="${TAB}🧩${TAB}${CL}"
|
||||
FUSE="${TAB}🗂️${TAB}${CL}"
|
||||
GPU="${TAB}🎮${TAB}${CL}"
|
||||
HOURGLASS="${TAB}⏳${TAB}"
|
||||
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Sets default retry and wait variables used for system actions.
|
||||
# default_vars()
|
||||
#
|
||||
# - Sets default retry and wait variables used for system actions
|
||||
# - RETRY_NUM: Maximum number of retry attempts (default: 10)
|
||||
# - RETRY_EVERY: Seconds to wait between retries (default: 3)
|
||||
# - i: Counter variable initialized to RETRY_NUM
|
||||
# ------------------------------------------------------------------------------
|
||||
default_vars() {
|
||||
RETRY_NUM=10
|
||||
RETRY_EVERY=3
|
||||
i=$RETRY_NUM
|
||||
#[[ "${VAR_OS:-}" == "unknown" ]]
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Sets default verbose mode for script and os execution.
|
||||
# set_std_mode()
|
||||
#
|
||||
# - Sets default verbose mode for script and OS execution
|
||||
# - If VERBOSE=yes: STD="" (show all output)
|
||||
# - If VERBOSE=no: STD="silent" (suppress output via silent() wrapper)
|
||||
# - If DEV_MODE_TRACE=true: Enables bash tracing (set -x)
|
||||
# ------------------------------------------------------------------------------
|
||||
set_std_mode() {
|
||||
if [ "${VERBOSE:-no}" = "yes" ]; then
|
||||
@@ -152,138 +155,338 @@ set_std_mode() {
|
||||
else
|
||||
STD="silent"
|
||||
fi
|
||||
}
|
||||
|
||||
# Silent execution function
|
||||
silent() {
|
||||
"$@" >/dev/null 2>&1
|
||||
}
|
||||
|
||||
# Function to download & save header files
|
||||
get_header() {
|
||||
local app_name=$(echo "${APP,,}" | tr -d ' ')
|
||||
local app_type=${APP_TYPE:-ct}
|
||||
local header_url="https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/${app_type}/headers/${app_name}"
|
||||
local local_header_path="/usr/local/community-scripts/headers/${app_type}/${app_name}"
|
||||
|
||||
mkdir -p "$(dirname "$local_header_path")"
|
||||
|
||||
if [ ! -s "$local_header_path" ]; then
|
||||
if ! curl -fsSL "$header_url" -o "$local_header_path"; then
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
cat "$local_header_path" 2>/dev/null || true
|
||||
}
|
||||
|
||||
header_info() {
|
||||
local app_name=$(echo "${APP,,}" | tr -d ' ')
|
||||
local header_content
|
||||
|
||||
header_content=$(get_header "$app_name") || header_content=""
|
||||
|
||||
clear
|
||||
local term_width
|
||||
term_width=$(tput cols 2>/dev/null || echo 120)
|
||||
|
||||
if [ -n "$header_content" ]; then
|
||||
echo "$header_content"
|
||||
# Enable bash tracing if trace mode active
|
||||
if [[ "${DEV_MODE_TRACE:-false}" == "true" ]]; then
|
||||
set -x
|
||||
export PS4='+(${BASH_SOURCE}:${LINENO}): ${FUNCNAME[0]:+${FUNCNAME[0]}(): }'
|
||||
fi
|
||||
}
|
||||
|
||||
ensure_tput() {
|
||||
if ! command -v tput >/dev/null 2>&1; then
|
||||
if grep -qi 'alpine' /etc/os-release; then
|
||||
apk add --no-cache ncurses >/dev/null 2>&1
|
||||
elif command -v apt-get >/dev/null 2>&1; then
|
||||
apt-get update -qq >/dev/null
|
||||
apt-get install -y -qq ncurses-bin >/dev/null 2>&1
|
||||
# ------------------------------------------------------------------------------
|
||||
# parse_dev_mode()
|
||||
#
|
||||
# - Parses comma-separated dev_mode variable (e.g., "motd,keep,trace")
|
||||
# - Sets global flags for each mode:
|
||||
# * DEV_MODE_MOTD: Setup SSH/MOTD before installation
|
||||
# * DEV_MODE_KEEP: Never delete container on failure
|
||||
# * DEV_MODE_TRACE: Enable bash set -x tracing
|
||||
# * DEV_MODE_PAUSE: Pause after each msg_info step
|
||||
# * DEV_MODE_BREAKPOINT: Open shell on error instead of cleanup
|
||||
# * DEV_MODE_LOGS: Persist all logs to /var/log/community-scripts/
|
||||
# * DEV_MODE_DRYRUN: Show commands without executing
|
||||
# - Call this early in script execution
|
||||
# ------------------------------------------------------------------------------
|
||||
parse_dev_mode() {
|
||||
local mode
|
||||
# Initialize all flags to false
|
||||
export DEV_MODE_MOTD=false
|
||||
export DEV_MODE_KEEP=false
|
||||
export DEV_MODE_TRACE=false
|
||||
export DEV_MODE_PAUSE=false
|
||||
export DEV_MODE_BREAKPOINT=false
|
||||
export DEV_MODE_LOGS=false
|
||||
export DEV_MODE_DRYRUN=false
|
||||
|
||||
# Parse comma-separated modes
|
||||
if [[ -n "${dev_mode:-}" ]]; then
|
||||
IFS=',' read -ra MODES <<<"$dev_mode"
|
||||
for mode in "${MODES[@]}"; do
|
||||
mode="$(echo "$mode" | xargs)" # Trim whitespace
|
||||
case "$mode" in
|
||||
motd) export DEV_MODE_MOTD=true ;;
|
||||
keep) export DEV_MODE_KEEP=true ;;
|
||||
trace) export DEV_MODE_TRACE=true ;;
|
||||
pause) export DEV_MODE_PAUSE=true ;;
|
||||
breakpoint) export DEV_MODE_BREAKPOINT=true ;;
|
||||
logs) export DEV_MODE_LOGS=true ;;
|
||||
dryrun) export DEV_MODE_DRYRUN=true ;;
|
||||
*)
|
||||
if declare -f msg_warn >/dev/null 2>&1; then
|
||||
msg_warn "Unknown dev_mode: '$mode' (ignored)"
|
||||
else
|
||||
echo "[WARN] Unknown dev_mode: '$mode' (ignored)" >&2
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Show active dev modes
|
||||
local active_modes=()
|
||||
[[ $DEV_MODE_MOTD == true ]] && active_modes+=("motd")
|
||||
[[ $DEV_MODE_KEEP == true ]] && active_modes+=("keep")
|
||||
[[ $DEV_MODE_TRACE == true ]] && active_modes+=("trace")
|
||||
[[ $DEV_MODE_PAUSE == true ]] && active_modes+=("pause")
|
||||
[[ $DEV_MODE_BREAKPOINT == true ]] && active_modes+=("breakpoint")
|
||||
[[ $DEV_MODE_LOGS == true ]] && active_modes+=("logs")
|
||||
[[ $DEV_MODE_DRYRUN == true ]] && active_modes+=("dryrun")
|
||||
|
||||
if [[ ${#active_modes[@]} -gt 0 ]]; then
|
||||
if declare -f msg_custom >/dev/null 2>&1; then
|
||||
msg_custom "🔧" "${YWB}" "Dev modes active: ${active_modes[*]}"
|
||||
else
|
||||
echo "[DEV] Active modes: ${active_modes[*]}" >&2
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
is_alpine() {
|
||||
local os_id="${var_os:-${PCT_OSTYPE:-}}"
|
||||
# ==============================================================================
|
||||
# SECTION 2: VALIDATION CHECKS
|
||||
# ==============================================================================
|
||||
|
||||
if [[ -z "$os_id" && -f /etc/os-release ]]; then
|
||||
os_id="$(
|
||||
. /etc/os-release 2>/dev/null
|
||||
echo "${ID:-}"
|
||||
)"
|
||||
# ------------------------------------------------------------------------------
|
||||
# shell_check()
|
||||
#
|
||||
# - Verifies that the script is running under Bash shell
|
||||
# - Exits with error message if different shell is detected
|
||||
# - Required because scripts use Bash-specific features
|
||||
# ------------------------------------------------------------------------------
|
||||
shell_check() {
|
||||
if [[ "$(ps -p $$ -o comm=)" != "bash" ]]; then
|
||||
clear
|
||||
msg_error "Your default shell is currently not set to Bash. To use these scripts, please switch to the Bash shell."
|
||||
echo -e "\nExiting..."
|
||||
sleep 2
|
||||
exit
|
||||
fi
|
||||
|
||||
[[ "$os_id" == "alpine" ]]
|
||||
}
|
||||
|
||||
is_verbose_mode() {
|
||||
local verbose="${VERBOSE:-${var_verbose:-no}}"
|
||||
local tty_status
|
||||
if [[ -t 2 ]]; then
|
||||
tty_status="interactive"
|
||||
else
|
||||
tty_status="not-a-tty"
|
||||
fi
|
||||
[[ "$verbose" != "no" || ! -t 2 ]]
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Handles specific curl error codes and displays descriptive messages.
|
||||
# root_check()
|
||||
#
|
||||
# - Verifies script is running with root privileges
|
||||
# - Detects if executed via sudo (which can cause issues)
|
||||
# - Exits with error if not running as root directly
|
||||
# ------------------------------------------------------------------------------
|
||||
__curl_err_handler() {
|
||||
local exit_code="$1"
|
||||
local target="$2"
|
||||
local curl_msg="$3"
|
||||
root_check() {
|
||||
if [[ "$(id -u)" -ne 0 || $(ps -o comm= -p $PPID) == "sudo" ]]; then
|
||||
clear
|
||||
msg_error "Please run this script as root."
|
||||
echo -e "\nExiting..."
|
||||
sleep 2
|
||||
exit
|
||||
fi
|
||||
}
|
||||
|
||||
case $exit_code in
|
||||
1) msg_error "Unsupported protocol: $target" ;;
|
||||
2) msg_error "Curl init failed: $target" ;;
|
||||
3) msg_error "Malformed URL: $target" ;;
|
||||
5) msg_error "Proxy resolution failed: $target" ;;
|
||||
6) msg_error "Host resolution failed: $target" ;;
|
||||
7) msg_error "Connection failed: $target" ;;
|
||||
9) msg_error "Access denied: $target" ;;
|
||||
18) msg_error "Partial file transfer: $target" ;;
|
||||
22) msg_error "HTTP error (e.g. 400/404): $target" ;;
|
||||
23) msg_error "Write error on local system: $target" ;;
|
||||
26) msg_error "Read error from local file: $target" ;;
|
||||
28) msg_error "Timeout: $target" ;;
|
||||
35) msg_error "SSL connect error: $target" ;;
|
||||
47) msg_error "Too many redirects: $target" ;;
|
||||
51) msg_error "SSL cert verify failed: $target" ;;
|
||||
52) msg_error "Empty server response: $target" ;;
|
||||
55) msg_error "Send error: $target" ;;
|
||||
56) msg_error "Receive error: $target" ;;
|
||||
60) msg_error "SSL CA not trusted: $target" ;;
|
||||
67) msg_error "Login denied by server: $target" ;;
|
||||
78) msg_error "Remote file not found (404): $target" ;;
|
||||
*) msg_error "Curl failed with code $exit_code: $target" ;;
|
||||
esac
|
||||
# ------------------------------------------------------------------------------
|
||||
# pve_check()
|
||||
#
|
||||
# - Validates Proxmox VE version compatibility
|
||||
# - Supported: PVE 8.0-8.9 and PVE 9.0-9.1
|
||||
# - Exits with error message if unsupported version detected
|
||||
# ------------------------------------------------------------------------------
|
||||
pve_check() {
|
||||
local PVE_VER
|
||||
PVE_VER="$(pveversion | awk -F'/' '{print $2}' | awk -F'-' '{print $1}')"
|
||||
|
||||
[[ -n "$curl_msg" ]] && printf "%s\n" "$curl_msg" >&2
|
||||
# Check for Proxmox VE 8.x: allow 8.0–8.9
|
||||
if [[ "$PVE_VER" =~ ^8\.([0-9]+) ]]; then
|
||||
local MINOR="${BASH_REMATCH[1]}"
|
||||
if ((MINOR < 0 || MINOR > 9)); then
|
||||
msg_error "This version of Proxmox VE is not supported."
|
||||
msg_error "Supported: Proxmox VE version 8.0 – 8.9"
|
||||
exit 1
|
||||
fi
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Check for Proxmox VE 9.x: allow 9.0–9.1
|
||||
if [[ "$PVE_VER" =~ ^9\.([0-9]+) ]]; then
|
||||
local MINOR="${BASH_REMATCH[1]}"
|
||||
if ((MINOR < 0 || MINOR > 1)); then
|
||||
msg_error "This version of Proxmox VE is not yet supported."
|
||||
msg_error "Supported: Proxmox VE version 9.0 – 9.1"
|
||||
exit 1
|
||||
fi
|
||||
return 0
|
||||
fi
|
||||
|
||||
# All other unsupported versions
|
||||
msg_error "This version of Proxmox VE is not supported."
|
||||
msg_error "Supported versions: Proxmox VE 8.0 – 8.9 or 9.0 – 9.1"
|
||||
exit 1
|
||||
}
|
||||
|
||||
fatal() {
|
||||
msg_error "$1"
|
||||
kill -INT $$
|
||||
# ------------------------------------------------------------------------------
|
||||
# arch_check()
|
||||
#
|
||||
# - Validates system architecture is amd64/x86_64
|
||||
# - Exits with error message for unsupported architectures (e.g., ARM/PiMox)
|
||||
# - Provides link to ARM64-compatible scripts
|
||||
# ------------------------------------------------------------------------------
|
||||
arch_check() {
|
||||
if [ "$(dpkg --print-architecture)" != "amd64" ]; then
|
||||
echo -e "\n ${INFO}${YWB}This script will not work with PiMox! \n"
|
||||
echo -e "\n ${YWB}Visit https://github.com/asylumexp/Proxmox for ARM64 support. \n"
|
||||
echo -e "Exiting..."
|
||||
sleep 2
|
||||
exit
|
||||
fi
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# ssh_check()
|
||||
#
|
||||
# - Detects if script is running over SSH connection
|
||||
# - Warns user for external SSH connections (recommends Proxmox shell)
|
||||
# - Skips warning for local/same-subnet connections
|
||||
# - Does not abort execution, only warns
|
||||
# ------------------------------------------------------------------------------
|
||||
ssh_check() {
|
||||
if [ -n "$SSH_CLIENT" ]; then
|
||||
local client_ip=$(awk '{print $1}' <<<"$SSH_CLIENT")
|
||||
local host_ip=$(hostname -I | awk '{print $1}')
|
||||
|
||||
# Check if connection is local (Proxmox WebUI or same machine)
|
||||
# - localhost (127.0.0.1, ::1)
|
||||
# - same IP as host
|
||||
# - local network range (10.x, 172.16-31.x, 192.168.x)
|
||||
if [[ "$client_ip" == "127.0.0.1" || "$client_ip" == "::1" || "$client_ip" == "$host_ip" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
# Check if client is in same local network (optional, safer approach)
|
||||
local host_subnet=$(echo "$host_ip" | cut -d. -f1-3)
|
||||
local client_subnet=$(echo "$client_ip" | cut -d. -f1-3)
|
||||
if [[ "$host_subnet" == "$client_subnet" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
# Only warn for truly external connections
|
||||
msg_warn "Running via external SSH (client: $client_ip)."
|
||||
msg_warn "For better stability, consider using the Proxmox Shell (Console) instead."
|
||||
fi
|
||||
}
|
||||
|
||||
# ==============================================================================
|
||||
# SECTION 3: EXECUTION HELPERS
|
||||
# ==============================================================================
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# get_active_logfile()
|
||||
#
|
||||
# - Returns the appropriate log file based on execution context
|
||||
# - BUILD_LOG: Host operations (container creation)
|
||||
# - INSTALL_LOG: Container operations (application installation)
|
||||
# - Fallback to BUILD_LOG if neither is set
|
||||
# ------------------------------------------------------------------------------
|
||||
get_active_logfile() {
|
||||
if [[ -n "${INSTALL_LOG:-}" ]]; then
|
||||
echo "$INSTALL_LOG"
|
||||
elif [[ -n "${BUILD_LOG:-}" ]]; then
|
||||
echo "$BUILD_LOG"
|
||||
else
|
||||
# Fallback for legacy scripts
|
||||
echo "/tmp/build-$(date +%Y%m%d_%H%M%S).log"
|
||||
fi
|
||||
}
|
||||
|
||||
# Legacy compatibility: SILENT_LOGFILE points to active log
|
||||
SILENT_LOGFILE="$(get_active_logfile)"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# silent()
|
||||
#
|
||||
# - Executes command with output redirected to active log file
|
||||
# - On error: displays last 10 lines of log and exits with original exit code
|
||||
# - Temporarily disables error trap to capture exit code correctly
|
||||
# - Sources explain_exit_code() for detailed error messages
|
||||
# ------------------------------------------------------------------------------
|
||||
silent() {
|
||||
local cmd="$*"
|
||||
local caller_line="${BASH_LINENO[0]:-unknown}"
|
||||
local logfile="$(get_active_logfile)"
|
||||
|
||||
# Dryrun mode: Show command without executing
|
||||
if [[ "${DEV_MODE_DRYRUN:-false}" == "true" ]]; then
|
||||
if declare -f msg_custom >/dev/null 2>&1; then
|
||||
msg_custom "🔍" "${BL}" "[DRYRUN] $cmd"
|
||||
else
|
||||
echo "[DRYRUN] $cmd" >&2
|
||||
fi
|
||||
return 0
|
||||
fi
|
||||
|
||||
set +Eeuo pipefail
|
||||
trap - ERR
|
||||
|
||||
"$@" >>"$logfile" 2>&1
|
||||
local rc=$?
|
||||
|
||||
set -Eeuo pipefail
|
||||
trap 'error_handler' ERR
|
||||
|
||||
if [[ $rc -ne 0 ]]; then
|
||||
# Source explain_exit_code if needed
|
||||
if ! declare -f explain_exit_code >/dev/null 2>&1; then
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/error-handler.func"
|
||||
fi
|
||||
|
||||
local explanation
|
||||
explanation="$(explain_exit_code "$rc")"
|
||||
|
||||
printf "\e[?25h"
|
||||
msg_error "in line ${caller_line}: exit code ${rc} (${explanation})"
|
||||
msg_custom "→" "${YWB}" "${cmd}"
|
||||
|
||||
if [[ -s "$logfile" ]]; then
|
||||
local log_lines=$(wc -l <"$logfile")
|
||||
echo "--- Last 10 lines of silent log ---"
|
||||
tail -n 10 "$logfile"
|
||||
echo "-----------------------------------"
|
||||
|
||||
# Show how to view full log if there are more lines
|
||||
if [[ $log_lines -gt 10 ]]; then
|
||||
msg_custom "📋" "${YW}" "View full log (${log_lines} lines): ${logfile}"
|
||||
fi
|
||||
fi
|
||||
|
||||
exit "$rc"
|
||||
fi
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# spinner()
|
||||
#
|
||||
# - Displays animated spinner with rotating characters (⠋ ⠙ ⠹ ⠸ ⠼ ⠴ ⠦ ⠧ ⠇ ⠏)
|
||||
# - Shows SPINNER_MSG alongside animation
|
||||
# - Runs in infinite loop until killed by stop_spinner()
|
||||
# - Uses color_spinner() colors for output
|
||||
# ------------------------------------------------------------------------------
|
||||
spinner() {
|
||||
local chars=(⠋ ⠙ ⠹ ⠸ ⠼ ⠴ ⠦ ⠧ ⠇ ⠏)
|
||||
local msg="${SPINNER_MSG:-Processing...}"
|
||||
local i=0
|
||||
while true; do
|
||||
local index=$((i++ % ${#chars[@]}))
|
||||
printf "\r\033[2K%s %b" "${CS_YWB}${chars[$index]}${CS_CL}" "${CS_YWB}${SPINNER_MSG:-}${CS_CL}"
|
||||
printf "\r\033[2K%s %b" "${CS_YWB}${chars[$index]}${CS_CL}" "${CS_YWB}${msg}${CS_CL}"
|
||||
sleep 0.1
|
||||
done
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# clear_line()
|
||||
#
|
||||
# - Clears current terminal line using tput or ANSI escape codes
|
||||
# - Moves cursor to beginning of line (carriage return)
|
||||
# - Erases from cursor to end of line
|
||||
# - Fallback to ANSI codes if tput not available
|
||||
# ------------------------------------------------------------------------------
|
||||
clear_line() {
|
||||
tput cr 2>/dev/null || echo -en "\r"
|
||||
tput el 2>/dev/null || echo -en "\033[K"
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# stop_spinner()
|
||||
#
|
||||
# - Stops running spinner process by PID
|
||||
# - Reads PID from SPINNER_PID variable or /tmp/.spinner.pid file
|
||||
# - Attempts graceful kill, then forced kill if needed
|
||||
# - Cleans up temp file and resets terminal state
|
||||
# - Unsets SPINNER_PID and SPINNER_MSG variables
|
||||
# ------------------------------------------------------------------------------
|
||||
stop_spinner() {
|
||||
local pid="${SPINNER_PID:-}"
|
||||
[[ -z "$pid" && -f /tmp/.spinner.pid ]] && pid=$(</tmp/.spinner.pid)
|
||||
@@ -301,6 +504,19 @@ stop_spinner() {
|
||||
stty sane 2>/dev/null || true
|
||||
}
|
||||
|
||||
# ==============================================================================
|
||||
# SECTION 4: MESSAGE OUTPUT
|
||||
# ==============================================================================
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# msg_info()
|
||||
#
|
||||
# - Displays informational message with spinner animation
|
||||
# - Shows each unique message only once (tracked via MSG_INFO_SHOWN)
|
||||
# - In verbose/Alpine mode: shows hourglass icon instead of spinner
|
||||
# - Stops any existing spinner before starting new one
|
||||
# - Backgrounds spinner process and stores PID for later cleanup
|
||||
# ------------------------------------------------------------------------------
|
||||
msg_info() {
|
||||
local msg="$1"
|
||||
[[ -z "$msg" ]] && return
|
||||
@@ -317,6 +533,12 @@ msg_info() {
|
||||
if is_verbose_mode || is_alpine; then
|
||||
local HOURGLASS="${TAB}⏳${TAB}"
|
||||
printf "\r\e[2K%s %b" "$HOURGLASS" "${YW}${msg}${CL}" >&2
|
||||
|
||||
# Pause mode: Wait for Enter after each step
|
||||
if [[ "${DEV_MODE_PAUSE:-false}" == "true" ]]; then
|
||||
echo -en "\n${YWB}[PAUSE]${CL} Press Enter to continue..." >&2
|
||||
read -r
|
||||
fi
|
||||
return
|
||||
fi
|
||||
|
||||
@@ -325,29 +547,68 @@ msg_info() {
|
||||
SPINNER_PID=$!
|
||||
echo "$SPINNER_PID" >/tmp/.spinner.pid
|
||||
disown "$SPINNER_PID" 2>/dev/null || true
|
||||
|
||||
# Pause mode: Stop spinner and wait
|
||||
if [[ "${DEV_MODE_PAUSE:-false}" == "true" ]]; then
|
||||
stop_spinner
|
||||
echo -en "\n${YWB}[PAUSE]${CL} Press Enter to continue..." >&2
|
||||
read -r
|
||||
fi
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# msg_ok()
|
||||
#
|
||||
# - Displays success message with checkmark icon
|
||||
# - Stops spinner and clears line before output
|
||||
# - Removes message from MSG_INFO_SHOWN to allow re-display
|
||||
# - Uses green color for success indication
|
||||
# ------------------------------------------------------------------------------
|
||||
msg_ok() {
|
||||
local msg="$1"
|
||||
[[ -z "$msg" ]] && return
|
||||
stop_spinner
|
||||
clear_line
|
||||
printf "%s %b\n" "$CM" "${GN}${msg}${CL}" >&2
|
||||
echo -e "$CM ${GN}${msg}${CL}"
|
||||
unset MSG_INFO_SHOWN["$msg"]
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# msg_error()
|
||||
#
|
||||
# - Displays error message with cross/X icon
|
||||
# - Stops spinner before output
|
||||
# - Uses red color for error indication
|
||||
# - Outputs to stderr
|
||||
# ------------------------------------------------------------------------------
|
||||
msg_error() {
|
||||
stop_spinner
|
||||
local msg="$1"
|
||||
echo -e "${BFR:-} ${CROSS:-✖️} ${RD}${msg}${CL}"
|
||||
echo -e "${BFR:-}${CROSS:-✖️} ${RD}${msg}${CL}" >&2
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# msg_warn()
|
||||
#
|
||||
# - Displays warning message with info/lightbulb icon
|
||||
# - Stops spinner before output
|
||||
# - Uses bright yellow color for warning indication
|
||||
# - Outputs to stderr
|
||||
# ------------------------------------------------------------------------------
|
||||
msg_warn() {
|
||||
stop_spinner
|
||||
local msg="$1"
|
||||
echo -e "${BFR:-} ${INFO:-ℹ️} ${YWB}${msg}${CL}"
|
||||
echo -e "${BFR:-}${INFO:-ℹ️} ${YWB}${msg}${CL}" >&2
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# msg_custom()
|
||||
#
|
||||
# - Displays custom message with user-defined symbol and color
|
||||
# - Arguments: symbol, color code, message text
|
||||
# - Stops spinner before output
|
||||
# - Useful for specialized status messages
|
||||
# ------------------------------------------------------------------------------
|
||||
msg_custom() {
|
||||
local symbol="${1:-"[*]"}"
|
||||
local color="${2:-"\e[36m"}"
|
||||
@@ -357,17 +618,181 @@ msg_custom() {
|
||||
echo -e "${BFR:-} ${symbol} ${color}${msg}${CL:-\e[0m}"
|
||||
}
|
||||
|
||||
run_container_safe() {
|
||||
local ct="$1"
|
||||
shift
|
||||
local cmd="$*"
|
||||
|
||||
lxc-attach -n "$ct" -- bash -euo pipefail -c "
|
||||
trap 'echo Aborted in container; exit 130' SIGINT SIGTERM
|
||||
$cmd
|
||||
" || __handle_general_error "lxc-attach to CT $ct"
|
||||
# ------------------------------------------------------------------------------
|
||||
# msg_debug()
|
||||
#
|
||||
# - Displays debug message with timestamp when var_full_verbose=1
|
||||
# - Automatically enables var_verbose if not already set
|
||||
# - Shows date/time prefix for log correlation
|
||||
# - Uses bright yellow color for debug output
|
||||
# ------------------------------------------------------------------------------
|
||||
msg_debug() {
|
||||
if [[ "${var_full_verbose:-0}" == "1" ]]; then
|
||||
[[ "${var_verbose:-0}" != "1" ]] && var_verbose=1
|
||||
echo -e "${YWB}[$(date '+%F %T')] [DEBUG]${CL} $*"
|
||||
fi
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# msg_dev()
|
||||
#
|
||||
# - Display development mode messages with 🔧 icon
|
||||
# - Only shown when dev_mode is active
|
||||
# - Useful for debugging and development-specific output
|
||||
# - Format: [DEV] message with distinct formatting
|
||||
# - Usage: msg_dev "Container ready for debugging"
|
||||
# ------------------------------------------------------------------------------
|
||||
msg_dev() {
|
||||
if [[ -n "${dev_mode:-}" ]]; then
|
||||
echo -e "${SEARCH}${BOLD}${DGN}🔧 [DEV]${CL} $*"
|
||||
fi
|
||||
}
|
||||
#
|
||||
# - Displays error message and immediately terminates script
|
||||
# - Sends SIGINT to current process to trigger error handler
|
||||
# - Use for unrecoverable errors that require immediate exit
|
||||
# ------------------------------------------------------------------------------
|
||||
fatal() {
|
||||
msg_error "$1"
|
||||
kill -INT $$
|
||||
}
|
||||
|
||||
# ==============================================================================
|
||||
# SECTION 5: UTILITY FUNCTIONS
|
||||
# ==============================================================================
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# exit_script()
|
||||
#
|
||||
# - Called when user cancels an action
|
||||
# - Clears screen and displays exit message
|
||||
# - Exits with default exit code
|
||||
# ------------------------------------------------------------------------------
|
||||
exit_script() {
|
||||
clear
|
||||
echo -e "\n${CROSS}${RD}User exited script${CL}\n"
|
||||
exit
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# get_header()
|
||||
#
|
||||
# - Downloads and caches application header ASCII art
|
||||
# - Falls back to local cache if already downloaded
|
||||
# - Determines app type (ct/vm) from APP_TYPE variable
|
||||
# - Returns header content or empty string on failure
|
||||
# ------------------------------------------------------------------------------
|
||||
get_header() {
|
||||
local app_name=$(echo "${APP,,}" | tr -d ' ')
|
||||
local app_type=${APP_TYPE:-ct} # Default to 'ct' if not set
|
||||
local header_url="https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/${app_type}/headers/${app_name}"
|
||||
local local_header_path="/usr/local/community-scripts/headers/${app_type}/${app_name}"
|
||||
|
||||
mkdir -p "$(dirname "$local_header_path")"
|
||||
|
||||
if [ ! -s "$local_header_path" ]; then
|
||||
if ! curl -fsSL "$header_url" -o "$local_header_path"; then
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
cat "$local_header_path" 2>/dev/null || true
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# header_info()
|
||||
#
|
||||
# - Displays application header ASCII art at top of screen
|
||||
# - Clears screen before displaying header
|
||||
# - Detects terminal width for formatting
|
||||
# - Returns silently if header not available
|
||||
# ------------------------------------------------------------------------------
|
||||
header_info() {
|
||||
local app_name=$(echo "${APP,,}" | tr -d ' ')
|
||||
local header_content
|
||||
|
||||
header_content=$(get_header "$app_name") || header_content=""
|
||||
|
||||
clear
|
||||
local term_width
|
||||
term_width=$(tput cols 2>/dev/null || echo 120)
|
||||
|
||||
if [ -n "$header_content" ]; then
|
||||
echo "$header_content"
|
||||
fi
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# ensure_tput()
|
||||
#
|
||||
# - Ensures tput command is available for terminal control
|
||||
# - Installs ncurses-bin on Debian/Ubuntu or ncurses on Alpine
|
||||
# - Required for clear_line() and terminal width detection
|
||||
# ------------------------------------------------------------------------------
|
||||
ensure_tput() {
|
||||
if ! command -v tput >/dev/null 2>&1; then
|
||||
if grep -qi 'alpine' /etc/os-release; then
|
||||
apk add --no-cache ncurses >/dev/null 2>&1
|
||||
elif command -v apt-get >/dev/null 2>&1; then
|
||||
apt-get update -qq >/dev/null
|
||||
apt-get install -y -qq ncurses-bin >/dev/null 2>&1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# is_alpine()
|
||||
#
|
||||
# - Detects if running on Alpine Linux
|
||||
# - Checks var_os, PCT_OSTYPE, or /etc/os-release
|
||||
# - Returns 0 if Alpine, 1 otherwise
|
||||
# - Used to adjust behavior for Alpine-specific commands
|
||||
# ------------------------------------------------------------------------------
|
||||
is_alpine() {
|
||||
local os_id="${var_os:-${PCT_OSTYPE:-}}"
|
||||
|
||||
if [[ -z "$os_id" && -f /etc/os-release ]]; then
|
||||
os_id="$(
|
||||
. /etc/os-release 2>/dev/null
|
||||
echo "${ID:-}"
|
||||
)"
|
||||
fi
|
||||
|
||||
[[ "$os_id" == "alpine" ]]
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# is_verbose_mode()
|
||||
#
|
||||
# - Determines if script should run in verbose mode
|
||||
# - Checks VERBOSE and var_verbose variables
|
||||
# - Also returns true if not running in TTY (pipe/redirect scenario)
|
||||
# - Used by msg_info() to decide between spinner and static output
|
||||
# ------------------------------------------------------------------------------
|
||||
is_verbose_mode() {
|
||||
local verbose="${VERBOSE:-${var_verbose:-no}}"
|
||||
local tty_status
|
||||
if [[ -t 2 ]]; then
|
||||
tty_status="interactive"
|
||||
else
|
||||
tty_status="not-a-tty"
|
||||
fi
|
||||
[[ "$verbose" != "no" || ! -t 2 ]]
|
||||
}
|
||||
|
||||
# ==============================================================================
|
||||
# SECTION 6: CLEANUP & MAINTENANCE
|
||||
# ==============================================================================
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# cleanup_lxc()
|
||||
#
|
||||
# - Comprehensive cleanup of package managers, caches, and logs
|
||||
# - Supports Alpine (apk), Debian/Ubuntu (apt), and language package managers
|
||||
# - Cleans: Python (pip/uv), Node.js (npm/yarn/pnpm), Go, Rust, Ruby, PHP
|
||||
# - Truncates log files and vacuums systemd journal
|
||||
# - Run at end of container creation to minimize disk usage
|
||||
# ------------------------------------------------------------------------------
|
||||
cleanup_lxc() {
|
||||
msg_info "Cleaning up"
|
||||
|
||||
@@ -384,20 +809,15 @@ cleanup_lxc() {
|
||||
find /tmp /var/tmp -type f -name 'tmp*' -delete 2>/dev/null || true
|
||||
find /tmp /var/tmp -type f -name 'tempfile*' -delete 2>/dev/null || true
|
||||
|
||||
# Truncate writable log files silently (permission errors ignored)
|
||||
if command -v truncate >/dev/null 2>&1; then
|
||||
find /var/log -type f -writable -print0 2>/dev/null |
|
||||
xargs -0 -n1 truncate -s 0 2>/dev/null || true
|
||||
# Node.js npm - directly remove cache directory
|
||||
# npm cache clean/verify can fail with ENOTEMPTY errors, so we skip them
|
||||
if command -v npm &>/dev/null; then
|
||||
rm -rf /root/.npm/_cacache /root/.npm/_logs 2>/dev/null || true
|
||||
fi
|
||||
|
||||
# Python pip
|
||||
if command -v pip &>/dev/null; then $STD pip cache purge || true; fi
|
||||
# Node.js npm
|
||||
if command -v npm &>/dev/null; then $STD npm cache clean --force || true; fi
|
||||
# Node.js yarn
|
||||
if command -v yarn &>/dev/null; then $STD yarn cache clean || true; fi
|
||||
if command -v yarn &>/dev/null; then yarn cache clean &>/dev/null || true; fi
|
||||
# Node.js pnpm
|
||||
if command -v pnpm &>/dev/null; then $STD pnpm store prune || true; fi
|
||||
if command -v pnpm &>/dev/null; then pnpm store prune &>/dev/null || true; fi
|
||||
# Go
|
||||
if command -v go &>/dev/null; then $STD go clean -cache -modcache || true; fi
|
||||
# Rust cargo
|
||||
@@ -405,14 +825,21 @@ cleanup_lxc() {
|
||||
# Ruby gem
|
||||
if command -v gem &>/dev/null; then $STD gem cleanup || true; fi
|
||||
# Composer (PHP)
|
||||
if command -v composer &>/dev/null; then $STD composer clear-cache || true; fi
|
||||
if command -v composer &>/dev/null; then COMPOSER_ALLOW_SUPERUSER=1 $STD composer clear-cache || true; fi
|
||||
|
||||
if command -v journalctl &>/dev/null; then
|
||||
$STD journalctl --vacuum-time=10m || true
|
||||
fi
|
||||
msg_ok "Cleaned"
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# check_or_create_swap()
|
||||
#
|
||||
# - Checks if swap is active on system
|
||||
# - Offers to create swap file if none exists
|
||||
# - Prompts user for swap size in MB
|
||||
# - Creates /swapfile with specified size
|
||||
# - Activates swap immediately
|
||||
# - Returns 0 if swap active or successfully created, 1 if declined/failed
|
||||
# ------------------------------------------------------------------------------
|
||||
check_or_create_swap() {
|
||||
msg_info "Checking for active swap"
|
||||
|
||||
@@ -451,7 +878,8 @@ check_or_create_swap() {
|
||||
fi
|
||||
}
|
||||
|
||||
trap 'stop_spinner' EXIT INT TERM
|
||||
# ==============================================================================
|
||||
# SIGNAL TRAPS
|
||||
# ==============================================================================
|
||||
|
||||
# Initialize functions when core.func is sourced
|
||||
load_functions
|
||||
trap 'stop_spinner' EXIT INT TERM
|
||||
|
||||
@@ -1,380 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (c) 2021-2025 tteck
|
||||
# Author: tteck (tteckster)
|
||||
# Co-Author: MickLesk
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
|
||||
# This sets verbose mode if the global variable is set to "yes"
|
||||
# if [ "$VERBOSE" == "yes" ]; then set -x; fi
|
||||
|
||||
source "$(dirname "$0")/core.func"
|
||||
|
||||
|
||||
# This sets error handling options and defines the error_handler function to handle errors
|
||||
set -Eeuo pipefail
|
||||
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
|
||||
trap on_exit EXIT
|
||||
trap on_interrupt INT
|
||||
trap on_terminate TERM
|
||||
|
||||
function on_exit() {
|
||||
local exit_code="$?"
|
||||
[[ -n "${lockfile:-}" && -e "$lockfile" ]] && rm -f "$lockfile"
|
||||
exit "$exit_code"
|
||||
}
|
||||
|
||||
function error_handler() {
|
||||
local exit_code="$?"
|
||||
local line_number="$1"
|
||||
local command="$2"
|
||||
printf "\e[?25h"
|
||||
echo -e "\n${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}\n"
|
||||
exit "$exit_code"
|
||||
}
|
||||
|
||||
function on_interrupt() {
|
||||
echo -e "\n${RD}Interrupted by user (SIGINT)${CL}"
|
||||
exit 130
|
||||
}
|
||||
|
||||
function on_terminate() {
|
||||
echo -e "\n${RD}Terminated by signal (SIGTERM)${CL}"
|
||||
exit 143
|
||||
}
|
||||
|
||||
function exit_script() {
|
||||
clear
|
||||
printf "\e[?25h"
|
||||
echo -e "\n${CROSS}${RD}User exited script${CL}\n"
|
||||
kill 0
|
||||
exit 1
|
||||
}
|
||||
|
||||
function check_storage_support() {
|
||||
local CONTENT="$1"
|
||||
local -a VALID_STORAGES=()
|
||||
while IFS= read -r line; do
|
||||
local STORAGE_NAME
|
||||
STORAGE_NAME=$(awk '{print $1}' <<<"$line")
|
||||
[[ -z "$STORAGE_NAME" ]] && continue
|
||||
VALID_STORAGES+=("$STORAGE_NAME")
|
||||
done < <(pvesm status -content "$CONTENT" 2>/dev/null | awk 'NR>1')
|
||||
|
||||
[[ ${#VALID_STORAGES[@]} -gt 0 ]]
|
||||
}
|
||||
|
||||
# This function selects a storage pool for a given content type (e.g., rootdir, vztmpl).
|
||||
function select_storage() {
|
||||
local CLASS=$1 CONTENT CONTENT_LABEL
|
||||
|
||||
case $CLASS in
|
||||
container)
|
||||
CONTENT='rootdir'
|
||||
CONTENT_LABEL='Container'
|
||||
;;
|
||||
template)
|
||||
CONTENT='vztmpl'
|
||||
CONTENT_LABEL='Container template'
|
||||
;;
|
||||
iso)
|
||||
CONTENT='iso'
|
||||
CONTENT_LABEL='ISO image'
|
||||
;;
|
||||
images)
|
||||
CONTENT='images'
|
||||
CONTENT_LABEL='VM Disk image'
|
||||
;;
|
||||
backup)
|
||||
CONTENT='backup'
|
||||
CONTENT_LABEL='Backup'
|
||||
;;
|
||||
snippets)
|
||||
CONTENT='snippets'
|
||||
CONTENT_LABEL='Snippets'
|
||||
;;
|
||||
*)
|
||||
msg_error "Invalid storage class '$CLASS'"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# Check for preset STORAGE variable
|
||||
if [ "$CONTENT" = "rootdir" ] && [ -n "${STORAGE:-}" ]; then
|
||||
if pvesm status -content "$CONTENT" | awk 'NR>1 {print $1}' | grep -qx "$STORAGE"; then
|
||||
STORAGE_RESULT="$STORAGE"
|
||||
msg_info "Using preset storage: $STORAGE_RESULT for $CONTENT_LABEL"
|
||||
return 0
|
||||
else
|
||||
msg_error "Preset storage '$STORAGE' is not valid for content type '$CONTENT'."
|
||||
return 2
|
||||
fi
|
||||
fi
|
||||
|
||||
local -A STORAGE_MAP
|
||||
local -a MENU
|
||||
local COL_WIDTH=0
|
||||
|
||||
while read -r TAG TYPE _ TOTAL USED FREE _; do
|
||||
[[ -n "$TAG" && -n "$TYPE" ]] || continue
|
||||
local STORAGE_NAME="$TAG"
|
||||
local DISPLAY="${STORAGE_NAME} (${TYPE})"
|
||||
local USED_FMT=$(numfmt --to=iec --from-unit=K --format %.1f <<<"$USED")
|
||||
local FREE_FMT=$(numfmt --to=iec --from-unit=K --format %.1f <<<"$FREE")
|
||||
local INFO="Free: ${FREE_FMT}B Used: ${USED_FMT}B"
|
||||
STORAGE_MAP["$DISPLAY"]="$STORAGE_NAME"
|
||||
MENU+=("$DISPLAY" "$INFO" "OFF")
|
||||
((${#DISPLAY} > COL_WIDTH)) && COL_WIDTH=${#DISPLAY}
|
||||
done < <(pvesm status -content "$CONTENT" | awk 'NR>1')
|
||||
|
||||
if [ ${#MENU[@]} -eq 0 ]; then
|
||||
msg_error "No storage found for content type '$CONTENT'."
|
||||
return 2
|
||||
fi
|
||||
|
||||
if [ $((${#MENU[@]} / 3)) -eq 1 ]; then
|
||||
STORAGE_RESULT="${STORAGE_MAP[${MENU[0]}]}"
|
||||
STORAGE_INFO="${MENU[1]}"
|
||||
return 0
|
||||
fi
|
||||
|
||||
local WIDTH=$((COL_WIDTH + 42))
|
||||
while true; do
|
||||
local DISPLAY_SELECTED
|
||||
DISPLAY_SELECTED=$(whiptail --backtitle "Proxmox VE Helper Scripts" \
|
||||
--title "Storage Pools" \
|
||||
--radiolist "Which storage pool for ${CONTENT_LABEL,,}?\n(Spacebar to select)" \
|
||||
16 "$WIDTH" 6 "${MENU[@]}" 3>&1 1>&2 2>&3)
|
||||
|
||||
# Cancel or ESC
|
||||
[[ $? -ne 0 ]] && exit_script
|
||||
|
||||
# Strip trailing whitespace or newline (important for storages like "storage (dir)")
|
||||
DISPLAY_SELECTED=$(sed 's/[[:space:]]*$//' <<<"$DISPLAY_SELECTED")
|
||||
|
||||
if [[ -z "$DISPLAY_SELECTED" || -z "${STORAGE_MAP[$DISPLAY_SELECTED]+_}" ]]; then
|
||||
whiptail --msgbox "No valid storage selected. Please try again." 8 58
|
||||
continue
|
||||
fi
|
||||
|
||||
STORAGE_RESULT="${STORAGE_MAP[$DISPLAY_SELECTED]}"
|
||||
for ((i = 0; i < ${#MENU[@]}; i += 3)); do
|
||||
if [[ "${MENU[$i]}" == "$DISPLAY_SELECTED" ]]; then
|
||||
STORAGE_INFO="${MENU[$i + 1]}"
|
||||
break
|
||||
fi
|
||||
done
|
||||
return 0
|
||||
done
|
||||
}
|
||||
|
||||
# Test if required variables are set
|
||||
[[ "${CTID:-}" ]] || {
|
||||
msg_error "You need to set 'CTID' variable."
|
||||
exit 203
|
||||
}
|
||||
[[ "${PCT_OSTYPE:-}" ]] || {
|
||||
msg_error "You need to set 'PCT_OSTYPE' variable."
|
||||
exit 204
|
||||
}
|
||||
|
||||
# Test if ID is valid
|
||||
[ "$CTID" -ge "100" ] || {
|
||||
msg_error "ID cannot be less than 100."
|
||||
exit 205
|
||||
}
|
||||
|
||||
# Test if ID is in use
|
||||
if qm status "$CTID" &>/dev/null || pct status "$CTID" &>/dev/null; then
|
||||
echo -e "ID '$CTID' is already in use."
|
||||
unset CTID
|
||||
msg_error "Cannot use ID that is already in use."
|
||||
exit 206
|
||||
fi
|
||||
|
||||
# This checks for the presence of valid Container Storage and Template Storage locations
|
||||
msg_info "Validating storage"
|
||||
if ! check_storage_support "rootdir"; then
|
||||
msg_error "No valid storage found for 'rootdir' [Container]"
|
||||
exit 1
|
||||
fi
|
||||
if ! check_storage_support "vztmpl"; then
|
||||
msg_error "No valid storage found for 'vztmpl' [Template]"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
#msg_info "Checking template storage"
|
||||
while true; do
|
||||
if select_storage template; then
|
||||
TEMPLATE_STORAGE="$STORAGE_RESULT"
|
||||
TEMPLATE_STORAGE_INFO="$STORAGE_INFO"
|
||||
msg_ok "Storage ${BL}$TEMPLATE_STORAGE${CL} ($TEMPLATE_STORAGE_INFO) [Template]"
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
while true; do
|
||||
if select_storage container; then
|
||||
CONTAINER_STORAGE="$STORAGE_RESULT"
|
||||
CONTAINER_STORAGE_INFO="$STORAGE_INFO"
|
||||
msg_ok "Storage ${BL}$CONTAINER_STORAGE${CL} ($CONTAINER_STORAGE_INFO) [Container]"
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
# Check free space on selected container storage
|
||||
STORAGE_FREE=$(pvesm status | awk -v s="$CONTAINER_STORAGE" '$1 == s { print $6 }')
|
||||
REQUIRED_KB=$((${PCT_DISK_SIZE:-8} * 1024 * 1024))
|
||||
if [ "$STORAGE_FREE" -lt "$REQUIRED_KB" ]; then
|
||||
msg_error "Not enough space on '$CONTAINER_STORAGE'. Needed: ${PCT_DISK_SIZE:-8}G."
|
||||
exit 214
|
||||
fi
|
||||
|
||||
# Check Cluster Quorum if in Cluster
|
||||
if [ -f /etc/pve/corosync.conf ]; then
|
||||
msg_info "Checking cluster quorum"
|
||||
if ! pvecm status | awk -F':' '/^Quorate/ { exit ($2 ~ /Yes/) ? 0 : 1 }'; then
|
||||
|
||||
msg_error "Cluster is not quorate. Start all nodes or configure quorum device (QDevice)."
|
||||
exit 210
|
||||
fi
|
||||
msg_ok "Cluster is quorate"
|
||||
fi
|
||||
|
||||
# Update LXC template list
|
||||
TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION:-}"
|
||||
case "$PCT_OSTYPE" in
|
||||
debian | ubuntu)
|
||||
TEMPLATE_PATTERN="-standard_"
|
||||
;;
|
||||
alpine | fedora | rocky | centos)
|
||||
TEMPLATE_PATTERN="-default_"
|
||||
;;
|
||||
*)
|
||||
TEMPLATE_PATTERN=""
|
||||
;;
|
||||
esac
|
||||
|
||||
# 1. Check local templates first
|
||||
msg_info "Searching for template '$TEMPLATE_SEARCH'"
|
||||
mapfile -t TEMPLATES < <(
|
||||
pveam list "$TEMPLATE_STORAGE" |
|
||||
awk -v s="$TEMPLATE_SEARCH" -v p="$TEMPLATE_PATTERN" '$1 ~ s && $1 ~ p {print $1}' |
|
||||
sed 's/.*\///' | sort -t - -k 2 -V
|
||||
)
|
||||
|
||||
if [ ${#TEMPLATES[@]} -gt 0 ]; then
|
||||
TEMPLATE_SOURCE="local"
|
||||
else
|
||||
msg_info "No local template found, checking online repository"
|
||||
pveam update >/dev/null 2>&1
|
||||
mapfile -t TEMPLATES < <(
|
||||
pveam update >/dev/null 2>&1 &&
|
||||
pveam available -section system |
|
||||
sed -n "s/.*\($TEMPLATE_SEARCH.*$TEMPLATE_PATTERN.*\)/\1/p" |
|
||||
sort -t - -k 2 -V
|
||||
)
|
||||
TEMPLATE_SOURCE="online"
|
||||
fi
|
||||
|
||||
TEMPLATE="${TEMPLATES[-1]}"
|
||||
TEMPLATE_PATH="$(pvesm path $TEMPLATE_STORAGE:vztmpl/$TEMPLATE 2>/dev/null ||
|
||||
echo "/var/lib/vz/template/cache/$TEMPLATE")"
|
||||
msg_ok "Template ${BL}$TEMPLATE${CL} [$TEMPLATE_SOURCE]"
|
||||
|
||||
# 4. Validate template (exists & not corrupted)
|
||||
TEMPLATE_VALID=1
|
||||
|
||||
if [ ! -s "$TEMPLATE_PATH" ]; then
|
||||
TEMPLATE_VALID=0
|
||||
elif ! tar --use-compress-program=zstdcat -tf "$TEMPLATE_PATH" >/dev/null 2>&1; then
|
||||
TEMPLATE_VALID=0
|
||||
fi
|
||||
|
||||
if [ "$TEMPLATE_VALID" -eq 0 ]; then
|
||||
msg_warn "Template $TEMPLATE is missing or corrupted. Re-downloading."
|
||||
[[ -f "$TEMPLATE_PATH" ]] && rm -f "$TEMPLATE_PATH"
|
||||
for attempt in {1..3}; do
|
||||
msg_info "Attempt $attempt: Downloading LXC template..."
|
||||
if pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >/dev/null 2>&1; then
|
||||
msg_ok "Template download successful."
|
||||
break
|
||||
fi
|
||||
if [ $attempt -eq 3 ]; then
|
||||
msg_error "Failed after 3 attempts. Please check network access or manually run:\n pveam download $TEMPLATE_STORAGE $TEMPLATE"
|
||||
exit 208
|
||||
fi
|
||||
sleep $((attempt * 5))
|
||||
done
|
||||
fi
|
||||
|
||||
msg_info "Creating LXC Container"
|
||||
# Check and fix subuid/subgid
|
||||
grep -q "root:100000:65536" /etc/subuid || echo "root:100000:65536" >>/etc/subuid
|
||||
grep -q "root:100000:65536" /etc/subgid || echo "root:100000:65536" >>/etc/subgid
|
||||
|
||||
# Combine all options
|
||||
PCT_OPTIONS=(${PCT_OPTIONS[@]:-${DEFAULT_PCT_OPTIONS[@]}})
|
||||
[[ " ${PCT_OPTIONS[@]} " =~ " -rootfs " ]] || PCT_OPTIONS+=(-rootfs "$CONTAINER_STORAGE:${PCT_DISK_SIZE:-8}")
|
||||
|
||||
# Secure creation of the LXC container with lock and template check
|
||||
lockfile="/tmp/template.${TEMPLATE}.lock"
|
||||
exec 9>"$lockfile" || {
|
||||
msg_error "Failed to create lock file '$lockfile'."
|
||||
exit 200
|
||||
}
|
||||
flock -w 60 9 || {
|
||||
msg_error "Timeout while waiting for template lock"
|
||||
exit 211
|
||||
}
|
||||
|
||||
if ! pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" &>/dev/null; then
|
||||
msg_error "Container creation failed. Checking if template is corrupted or incomplete."
|
||||
|
||||
if [[ ! -s "$TEMPLATE_PATH" || "$(stat -c%s "$TEMPLATE_PATH")" -lt 1000000 ]]; then
|
||||
msg_error "Template file too small or missing – re-downloading."
|
||||
rm -f "$TEMPLATE_PATH"
|
||||
elif ! zstdcat "$TEMPLATE_PATH" | tar -tf - &>/dev/null; then
|
||||
msg_error "Template appears to be corrupted – re-downloading."
|
||||
rm -f "$TEMPLATE_PATH"
|
||||
else
|
||||
msg_error "Template is valid, but container creation still failed."
|
||||
exit 209
|
||||
fi
|
||||
|
||||
# Retry download
|
||||
for attempt in {1..3}; do
|
||||
msg_info "Attempt $attempt: Re-downloading template..."
|
||||
if timeout 120 pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >/dev/null; then
|
||||
msg_ok "Template re-download successful."
|
||||
break
|
||||
fi
|
||||
if [ "$attempt" -eq 3 ]; then
|
||||
msg_error "Three failed attempts. Aborting."
|
||||
exit 208
|
||||
fi
|
||||
sleep $((attempt * 5))
|
||||
done
|
||||
|
||||
sleep 1 # I/O-Sync-Delay
|
||||
msg_ok "Re-downloaded LXC Template"
|
||||
fi
|
||||
|
||||
if ! pct list | awk '{print $1}' | grep -qx "$CTID"; then
|
||||
msg_error "Container ID $CTID not listed in 'pct list' – unexpected failure."
|
||||
exit 215
|
||||
fi
|
||||
|
||||
if ! grep -q '^rootfs:' "/etc/pve/lxc/$CTID.conf"; then
|
||||
msg_error "RootFS entry missing in container config – storage not correctly assigned."
|
||||
exit 216
|
||||
fi
|
||||
|
||||
if grep -q '^hostname:' "/etc/pve/lxc/$CTID.conf"; then
|
||||
CT_HOSTNAME=$(grep '^hostname:' "/etc/pve/lxc/$CTID.conf" | awk '{print $2}')
|
||||
if [[ ! "$CT_HOSTNAME" =~ ^[a-z0-9-]+$ ]]; then
|
||||
msg_warn "Hostname '$CT_HOSTNAME' contains invalid characters – may cause issues with networking or DNS."
|
||||
fi
|
||||
fi
|
||||
|
||||
msg_ok "LXC Container ${BL}$CTID${CL} ${GN}was successfully created."
|
||||
322
scripts/core/error-handler.func
Normal file
322
scripts/core/error-handler.func
Normal file
@@ -0,0 +1,322 @@
|
||||
#!/usr/bin/env bash
|
||||
# ------------------------------------------------------------------------------
|
||||
# ERROR HANDLER - ERROR & SIGNAL MANAGEMENT
|
||||
# ------------------------------------------------------------------------------
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: MickLesk (CanbiZ)
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# ------------------------------------------------------------------------------
|
||||
#
|
||||
# Provides comprehensive error handling and signal management for all scripts.
|
||||
# Includes:
|
||||
# - Exit code explanations (shell, package managers, databases, custom codes)
|
||||
# - Error handler with detailed logging
|
||||
# - Signal handlers (EXIT, INT, TERM)
|
||||
# - Initialization function for trap setup
|
||||
#
|
||||
# Usage:
|
||||
# source <(curl -fsSL .../error_handler.func)
|
||||
# catch_errors
|
||||
#
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# ==============================================================================
|
||||
# SECTION 1: EXIT CODE EXPLANATIONS
|
||||
# ==============================================================================
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# explain_exit_code()
|
||||
#
|
||||
# - Maps numeric exit codes to human-readable error descriptions
|
||||
# - Supports:
|
||||
# * Generic/Shell errors (1, 2, 126, 127, 128, 130, 137, 139, 143)
|
||||
# * Package manager errors (APT, DPKG: 100, 101, 255)
|
||||
# * Node.js/npm errors (243-249, 254)
|
||||
# * Python/pip/uv errors (210-212)
|
||||
# * PostgreSQL errors (231-234)
|
||||
# * MySQL/MariaDB errors (241-244)
|
||||
# * MongoDB errors (251-254)
|
||||
# * Proxmox custom codes (200-231)
|
||||
# - Returns description string for given exit code
|
||||
# ------------------------------------------------------------------------------
|
||||
explain_exit_code() {
|
||||
local code="$1"
|
||||
case "$code" in
|
||||
# --- Generic / Shell ---
|
||||
1) echo "General error / Operation not permitted" ;;
|
||||
2) echo "Misuse of shell builtins (e.g. syntax error)" ;;
|
||||
126) echo "Command invoked cannot execute (permission problem?)" ;;
|
||||
127) echo "Command not found" ;;
|
||||
128) echo "Invalid argument to exit" ;;
|
||||
130) echo "Terminated by Ctrl+C (SIGINT)" ;;
|
||||
137) echo "Killed (SIGKILL / Out of memory?)" ;;
|
||||
139) echo "Segmentation fault (core dumped)" ;;
|
||||
143) echo "Terminated (SIGTERM)" ;;
|
||||
|
||||
# --- Package manager / APT / DPKG ---
|
||||
100) echo "APT: Package manager error (broken packages / dependency problems)" ;;
|
||||
101) echo "APT: Configuration error (bad sources.list, malformed config)" ;;
|
||||
255) echo "DPKG: Fatal internal error" ;;
|
||||
|
||||
# --- Node.js / npm / pnpm / yarn ---
|
||||
243) echo "Node.js: Out of memory (JavaScript heap out of memory)" ;;
|
||||
245) echo "Node.js: Invalid command-line option" ;;
|
||||
246) echo "Node.js: Internal JavaScript Parse Error" ;;
|
||||
247) echo "Node.js: Fatal internal error" ;;
|
||||
248) echo "Node.js: Invalid C++ addon / N-API failure" ;;
|
||||
249) echo "Node.js: Inspector error" ;;
|
||||
254) echo "npm/pnpm/yarn: Unknown fatal error" ;;
|
||||
|
||||
# --- Python / pip / uv ---
|
||||
210) echo "Python: Virtualenv / uv environment missing or broken" ;;
|
||||
211) echo "Python: Dependency resolution failed" ;;
|
||||
212) echo "Python: Installation aborted (permissions or EXTERNALLY-MANAGED)" ;;
|
||||
|
||||
# --- PostgreSQL ---
|
||||
231) echo "PostgreSQL: Connection failed (server not running / wrong socket)" ;;
|
||||
232) echo "PostgreSQL: Authentication failed (bad user/password)" ;;
|
||||
233) echo "PostgreSQL: Database does not exist" ;;
|
||||
234) echo "PostgreSQL: Fatal error in query / syntax" ;;
|
||||
|
||||
# --- MySQL / MariaDB ---
|
||||
241) echo "MySQL/MariaDB: Connection failed (server not running / wrong socket)" ;;
|
||||
242) echo "MySQL/MariaDB: Authentication failed (bad user/password)" ;;
|
||||
243) echo "MySQL/MariaDB: Database does not exist" ;;
|
||||
244) echo "MySQL/MariaDB: Fatal error in query / syntax" ;;
|
||||
|
||||
# --- MongoDB ---
|
||||
251) echo "MongoDB: Connection failed (server not running)" ;;
|
||||
252) echo "MongoDB: Authentication failed (bad user/password)" ;;
|
||||
253) echo "MongoDB: Database not found" ;;
|
||||
254) echo "MongoDB: Fatal query error" ;;
|
||||
|
||||
# --- Proxmox Custom Codes ---
|
||||
200) echo "Proxmox: Failed to create lock file" ;;
|
||||
203) echo "Proxmox: Missing CTID variable" ;;
|
||||
204) echo "Proxmox: Missing PCT_OSTYPE variable" ;;
|
||||
205) echo "Proxmox: Invalid CTID (<100)" ;;
|
||||
206) echo "Proxmox: CTID already in use" ;;
|
||||
207) echo "Proxmox: Password contains unescaped special characters" ;;
|
||||
208) echo "Proxmox: Invalid configuration (DNS/MAC/Network format)" ;;
|
||||
209) echo "Proxmox: Container creation failed" ;;
|
||||
210) echo "Proxmox: Cluster not quorate" ;;
|
||||
211) echo "Proxmox: Timeout waiting for template lock" ;;
|
||||
212) echo "Proxmox: Storage type 'iscsidirect' does not support containers (VMs only)" ;;
|
||||
213) echo "Proxmox: Storage type does not support 'rootdir' content" ;;
|
||||
214) echo "Proxmox: Not enough storage space" ;;
|
||||
215) echo "Proxmox: Container created but not listed (ghost state)" ;;
|
||||
216) echo "Proxmox: RootFS entry missing in config" ;;
|
||||
217) echo "Proxmox: Storage not accessible" ;;
|
||||
219) echo "Proxmox: CephFS does not support containers - use RBD" ;;
|
||||
224) echo "Proxmox: PBS storage is for backups only" ;;
|
||||
218) echo "Proxmox: Template file corrupted or incomplete" ;;
|
||||
220) echo "Proxmox: Unable to resolve template path" ;;
|
||||
221) echo "Proxmox: Template file not readable" ;;
|
||||
222) echo "Proxmox: Template download failed" ;;
|
||||
223) echo "Proxmox: Template not available after download" ;;
|
||||
225) echo "Proxmox: No template available for OS/Version" ;;
|
||||
231) echo "Proxmox: LXC stack upgrade failed" ;;
|
||||
|
||||
# --- Default ---
|
||||
*) echo "Unknown error" ;;
|
||||
esac
|
||||
}
|
||||
|
||||
# ==============================================================================
|
||||
# SECTION 2: ERROR HANDLERS
|
||||
# ==============================================================================
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# error_handler()
|
||||
#
|
||||
# - Main error handler triggered by ERR trap
|
||||
# - Arguments: exit_code, command, line_number
|
||||
# - Behavior:
|
||||
# * Returns silently if exit_code is 0 (success)
|
||||
# * Sources explain_exit_code() for detailed error description
|
||||
# * Displays error message with:
|
||||
# - Line number where error occurred
|
||||
# - Exit code with explanation
|
||||
# - Command that failed
|
||||
# * Shows last 20 lines of SILENT_LOGFILE if available
|
||||
# * Copies log to container /root for later inspection
|
||||
# * Exits with original exit code
|
||||
# ------------------------------------------------------------------------------
|
||||
error_handler() {
|
||||
local exit_code=${1:-$?}
|
||||
local command=${2:-${BASH_COMMAND:-unknown}}
|
||||
local line_number=${BASH_LINENO[0]:-unknown}
|
||||
|
||||
command="${command//\$STD/}"
|
||||
|
||||
if [[ "$exit_code" -eq 0 ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
local explanation
|
||||
explanation="$(explain_exit_code "$exit_code")"
|
||||
|
||||
printf "\e[?25h"
|
||||
|
||||
# Use msg_error if available, fallback to echo
|
||||
if declare -f msg_error >/dev/null 2>&1; then
|
||||
msg_error "in line ${line_number}: exit code ${exit_code} (${explanation}): while executing command ${command}"
|
||||
else
|
||||
echo -e "\n${RD}[ERROR]${CL} in line ${RD}${line_number}${CL}: exit code ${RD}${exit_code}${CL} (${explanation}): while executing command ${YWB}${command}${CL}\n"
|
||||
fi
|
||||
|
||||
if [[ -n "${DEBUG_LOGFILE:-}" ]]; then
|
||||
{
|
||||
echo "------ ERROR ------"
|
||||
echo "Timestamp : $(date '+%Y-%m-%d %H:%M:%S')"
|
||||
echo "Exit Code : $exit_code ($explanation)"
|
||||
echo "Line : $line_number"
|
||||
echo "Command : $command"
|
||||
echo "-------------------"
|
||||
} >>"$DEBUG_LOGFILE"
|
||||
fi
|
||||
|
||||
# Get active log file (BUILD_LOG or INSTALL_LOG)
|
||||
local active_log=""
|
||||
if declare -f get_active_logfile >/dev/null 2>&1; then
|
||||
active_log="$(get_active_logfile)"
|
||||
elif [[ -n "${SILENT_LOGFILE:-}" ]]; then
|
||||
active_log="$SILENT_LOGFILE"
|
||||
fi
|
||||
|
||||
if [[ -n "$active_log" && -s "$active_log" ]]; then
|
||||
echo "--- Last 20 lines of silent log ---"
|
||||
tail -n 20 "$active_log"
|
||||
echo "-----------------------------------"
|
||||
|
||||
# Detect context: Container (INSTALL_LOG set + /root exists) vs Host (BUILD_LOG)
|
||||
if [[ -n "${INSTALL_LOG:-}" && -d /root ]]; then
|
||||
# CONTAINER CONTEXT: Copy log and create flag file for host
|
||||
local container_log="/root/.install-${SESSION_ID:-error}.log"
|
||||
cp "$active_log" "$container_log" 2>/dev/null || true
|
||||
|
||||
# Create error flag file with exit code for host detection
|
||||
echo "$exit_code" >"/root/.install-${SESSION_ID:-error}.failed" 2>/dev/null || true
|
||||
|
||||
if declare -f msg_custom >/dev/null 2>&1; then
|
||||
msg_custom "📋" "${YW}" "Log saved to: ${container_log}"
|
||||
else
|
||||
echo -e "${YW}Log saved to:${CL} ${BL}${container_log}${CL}"
|
||||
fi
|
||||
else
|
||||
# HOST CONTEXT: Show local log path and offer container cleanup
|
||||
if declare -f msg_custom >/dev/null 2>&1; then
|
||||
msg_custom "📋" "${YW}" "Full log: ${active_log}"
|
||||
else
|
||||
echo -e "${YW}Full log:${CL} ${BL}${active_log}${CL}"
|
||||
fi
|
||||
|
||||
# Offer to remove container if it exists (build errors after container creation)
|
||||
if [[ -n "${CTID:-}" ]] && command -v pct &>/dev/null && pct status "$CTID" &>/dev/null; then
|
||||
echo ""
|
||||
echo -en "${YW}Remove broken container ${CTID}? (Y/n) [auto-remove in 60s]: ${CL}"
|
||||
|
||||
if read -t 60 -r response; then
|
||||
if [[ -z "$response" || "$response" =~ ^[Yy]$ ]]; then
|
||||
echo -e "\n${YW}Removing container ${CTID}${CL}"
|
||||
pct stop "$CTID" &>/dev/null || true
|
||||
pct destroy "$CTID" &>/dev/null || true
|
||||
echo -e "${GN}✔${CL} Container ${CTID} removed"
|
||||
elif [[ "$response" =~ ^[Nn]$ ]]; then
|
||||
echo -e "\n${YW}Container ${CTID} kept for debugging${CL}"
|
||||
fi
|
||||
else
|
||||
# Timeout - auto-remove
|
||||
echo -e "\n${YW}No response - auto-removing container${CL}"
|
||||
pct stop "$CTID" &>/dev/null || true
|
||||
pct destroy "$CTID" &>/dev/null || true
|
||||
echo -e "${GN}✔${CL} Container ${CTID} removed"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
exit "$exit_code"
|
||||
}
|
||||
|
||||
# ==============================================================================
|
||||
# SECTION 3: SIGNAL HANDLERS
|
||||
# ==============================================================================
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# on_exit()
|
||||
#
|
||||
# - EXIT trap handler
|
||||
# - Cleans up lock files if lockfile variable is set
|
||||
# - Exits with captured exit code
|
||||
# - Always runs on script termination (success or failure)
|
||||
# ------------------------------------------------------------------------------
|
||||
on_exit() {
|
||||
local exit_code=$?
|
||||
[[ -n "${lockfile:-}" && -e "$lockfile" ]] && rm -f "$lockfile"
|
||||
exit "$exit_code"
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# on_interrupt()
|
||||
#
|
||||
# - SIGINT (Ctrl+C) trap handler
|
||||
# - Displays "Interrupted by user" message
|
||||
# - Exits with code 130 (128 + SIGINT=2)
|
||||
# ------------------------------------------------------------------------------
|
||||
on_interrupt() {
|
||||
if declare -f msg_error >/dev/null 2>&1; then
|
||||
msg_error "Interrupted by user (SIGINT)"
|
||||
else
|
||||
echo -e "\n${RD}Interrupted by user (SIGINT)${CL}"
|
||||
fi
|
||||
exit 130
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# on_terminate()
|
||||
#
|
||||
# - SIGTERM trap handler
|
||||
# - Displays "Terminated by signal" message
|
||||
# - Exits with code 143 (128 + SIGTERM=15)
|
||||
# - Triggered by external process termination
|
||||
# ------------------------------------------------------------------------------
|
||||
on_terminate() {
|
||||
if declare -f msg_error >/dev/null 2>&1; then
|
||||
msg_error "Terminated by signal (SIGTERM)"
|
||||
else
|
||||
echo -e "\n${RD}Terminated by signal (SIGTERM)${CL}"
|
||||
fi
|
||||
exit 143
|
||||
}
|
||||
|
||||
# ==============================================================================
|
||||
# SECTION 4: INITIALIZATION
|
||||
# ==============================================================================
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# catch_errors()
|
||||
#
|
||||
# - Initializes error handling and signal traps
|
||||
# - Enables strict error handling:
|
||||
# * set -Ee: Exit on error, inherit ERR trap in functions
|
||||
# * set -o pipefail: Pipeline fails if any command fails
|
||||
# * set -u: (optional) Exit on undefined variable (if STRICT_UNSET=1)
|
||||
# - Sets up traps:
|
||||
# * ERR → error_handler
|
||||
# * EXIT → on_exit
|
||||
# * INT → on_interrupt
|
||||
# * TERM → on_terminate
|
||||
# - Call this function early in every script
|
||||
# ------------------------------------------------------------------------------
|
||||
catch_errors() {
|
||||
set -Ee -o pipefail
|
||||
if [ "${STRICT_UNSET:-0}" = "1" ]; then
|
||||
set -u
|
||||
fi
|
||||
|
||||
trap 'error_handler' ERR
|
||||
trap on_exit EXIT
|
||||
trap on_interrupt INT
|
||||
trap on_terminate TERM
|
||||
}
|
||||
@@ -1,48 +1,79 @@
|
||||
# Copyright (c) 2021-2025 michelroegl-brunner
|
||||
# Author: michelroegl-brunner
|
||||
# License: MIT
|
||||
# https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# Copyright (c) 2021-2026 community-scripts ORG
|
||||
# Author: tteck (tteckster)
|
||||
# Co-Author: MickLesk
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
|
||||
# ==============================================================================
|
||||
# INSTALL.FUNC - CONTAINER INSTALLATION & SETUP
|
||||
# ==============================================================================
|
||||
#
|
||||
# This file provides installation functions executed inside LXC containers
|
||||
# after creation. Handles:
|
||||
#
|
||||
# - Network connectivity verification (IPv4/IPv6)
|
||||
# - OS updates and package installation
|
||||
# - DNS resolution checks
|
||||
# - MOTD and SSH configuration
|
||||
# - Container customization and auto-login
|
||||
#
|
||||
# Usage:
|
||||
# - Sourced by <app>-install.sh scripts
|
||||
# - Executes via pct exec inside container
|
||||
# - Requires internet connectivity
|
||||
#
|
||||
# ==============================================================================
|
||||
|
||||
# ==============================================================================
|
||||
# SECTION 1: INITIALIZATION
|
||||
# ==============================================================================
|
||||
|
||||
if ! command -v curl >/dev/null 2>&1; then
|
||||
printf "\r\e[2K%b" '\033[93m Setup Source \033[m' >&2
|
||||
apt-get update >/dev/null 2>&1
|
||||
apt-get install -y curl >/dev/null 2>&1
|
||||
apt update >/dev/null 2>&1
|
||||
apt install -y curl >/dev/null 2>&1
|
||||
fi
|
||||
# core.func is included in FUNCTIONS_FILE_PATH
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/core.func"
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/error-handler.func"
|
||||
load_functions
|
||||
# This function enables IPv6 if it's not disabled and sets verbose mode
|
||||
catch_errors
|
||||
|
||||
# ==============================================================================
|
||||
# SECTION 2: NETWORK & CONNECTIVITY
|
||||
# ==============================================================================
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# verb_ip6()
|
||||
#
|
||||
# - Configures IPv6 based on DISABLEIPV6 variable
|
||||
# - If DISABLEIPV6=yes: disables IPv6 via sysctl
|
||||
# - Sets verbose mode via set_std_mode()
|
||||
# ------------------------------------------------------------------------------
|
||||
verb_ip6() {
|
||||
set_std_mode # Set STD mode based on VERBOSE
|
||||
|
||||
if [ "$DISABLEIPV6" == "yes" ]; then
|
||||
echo "net.ipv6.conf.all.disable_ipv6 = 1" >>/etc/sysctl.conf
|
||||
$STD sysctl -p
|
||||
if [ "${IPV6_METHOD:-}" = "disable" ]; then
|
||||
msg_info "Disabling IPv6 (this may affect some services)"
|
||||
mkdir -p /etc/sysctl.d
|
||||
$STD tee /etc/sysctl.d/99-disable-ipv6.conf >/dev/null <<EOF
|
||||
# Disable IPv6 (set by community-scripts)
|
||||
net.ipv6.conf.all.disable_ipv6 = 1
|
||||
net.ipv6.conf.default.disable_ipv6 = 1
|
||||
net.ipv6.conf.lo.disable_ipv6 = 1
|
||||
EOF
|
||||
$STD sysctl -p /etc/sysctl.d/99-disable-ipv6.conf
|
||||
msg_ok "Disabled IPv6"
|
||||
fi
|
||||
}
|
||||
|
||||
# This function sets error handling options and defines the error_handler function to handle errors
|
||||
catch_errors() {
|
||||
set -Eeuo pipefail
|
||||
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
|
||||
}
|
||||
|
||||
# This function handles errors
|
||||
error_handler() {
|
||||
printf "\e[?25h"
|
||||
local exit_code="$?"
|
||||
local line_number="$1"
|
||||
local command="$2"
|
||||
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
|
||||
echo -e "\n$error_message"
|
||||
if [[ "$line_number" -eq 51 ]]; then
|
||||
echo -e "The silent function has suppressed the error, run the script with verbose mode enabled, which will provide more detailed output.\n"
|
||||
post_update_to_api "failed" "No error message, script ran in silent mode"
|
||||
else
|
||||
post_update_to_api "failed" "${command}"
|
||||
fi
|
||||
}
|
||||
|
||||
# This function sets up the Container OS by generating the locale, setting the timezone, and checking the network connection
|
||||
# ------------------------------------------------------------------------------
|
||||
# setting_up_container()
|
||||
#
|
||||
# - Verifies network connectivity via hostname -I
|
||||
# - Retries up to RETRY_NUM times with RETRY_EVERY seconds delay
|
||||
# - Removes Python EXTERNALLY-MANAGED restrictions
|
||||
# - Disables systemd-networkd-wait-online.service for faster boot
|
||||
# - Exits with error if network unavailable after retries
|
||||
# ------------------------------------------------------------------------------
|
||||
setting_up_container() {
|
||||
msg_info "Setting up Container OS"
|
||||
for ((i = RETRY_NUM; i > 0; i--)); do
|
||||
@@ -64,8 +95,17 @@ setting_up_container() {
|
||||
msg_ok "Network Connected: ${BL}$(hostname -I)"
|
||||
}
|
||||
|
||||
# This function checks the network connection by pinging a known IP address and prompts the user to continue if the internet is not connected
|
||||
# This function checks the network connection by pinging a known IP address and prompts the user to continue if the internet is not connected
|
||||
# ------------------------------------------------------------------------------
|
||||
# network_check()
|
||||
#
|
||||
# - Comprehensive network connectivity check for IPv4 and IPv6
|
||||
# - Tests connectivity to multiple DNS servers:
|
||||
# * IPv4: 1.1.1.1 (Cloudflare), 8.8.8.8 (Google), 9.9.9.9 (Quad9)
|
||||
# * IPv6: 2606:4700:4700::1111, 2001:4860:4860::8888, 2620:fe::fe
|
||||
# - Verifies DNS resolution for GitHub and Community-Scripts domains
|
||||
# - Prompts user to continue if no internet detected
|
||||
# - Uses fatal() on DNS resolution failure for critical hosts
|
||||
# ------------------------------------------------------------------------------
|
||||
network_check() {
|
||||
set +e
|
||||
trap - ERR
|
||||
@@ -125,7 +165,19 @@ network_check() {
|
||||
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
|
||||
}
|
||||
|
||||
# This function updates the Container OS by running apt-get update and upgrade
|
||||
# ==============================================================================
|
||||
# SECTION 3: OS UPDATE & PACKAGE MANAGEMENT
|
||||
# ==============================================================================
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# update_os()
|
||||
#
|
||||
# - Updates container OS via apt-get update and dist-upgrade
|
||||
# - Configures APT cacher proxy if CACHER=yes (accelerates package downloads)
|
||||
# - Removes Python EXTERNALLY-MANAGED restrictions for pip
|
||||
# - Sources tools.func for additional setup functions after update
|
||||
# - Uses $STD wrapper to suppress output unless VERBOSE=yes
|
||||
# ------------------------------------------------------------------------------
|
||||
update_os() {
|
||||
msg_info "Updating Container OS"
|
||||
if [[ "$CACHER" == "yes" ]]; then
|
||||
@@ -145,29 +197,37 @@ EOF
|
||||
rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED
|
||||
msg_ok "Updated Container OS"
|
||||
|
||||
# tools.func is included in FUNCTIONS_FILE_PATH
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/tools.func"
|
||||
}
|
||||
|
||||
# This function modifies the message of the day (motd) and SSH settings
|
||||
# ==============================================================================
|
||||
# SECTION 4: MOTD & SSH CONFIGURATION
|
||||
# ==============================================================================
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# motd_ssh()
|
||||
#
|
||||
# - Configures Message of the Day (MOTD) with container information
|
||||
# - Creates /etc/profile.d/00_lxc-details.sh with:
|
||||
# * Application name
|
||||
# * Warning banner (DEV repository)
|
||||
# * OS name and version
|
||||
# * Hostname and IP address
|
||||
# * GitHub repository link
|
||||
# - Disables executable flag on /etc/update-motd.d/* scripts
|
||||
# - Enables root SSH access if SSH_ROOT=yes
|
||||
# - Configures TERM environment variable for better terminal support
|
||||
# ------------------------------------------------------------------------------
|
||||
motd_ssh() {
|
||||
# Set terminal to 256-color mode
|
||||
grep -qxF "export TERM='xterm-256color'" /root/.bashrc || echo "export TERM='xterm-256color'" >>/root/.bashrc
|
||||
|
||||
# Get OS information (Debian / Ubuntu)
|
||||
if [ -f "/etc/os-release" ]; then
|
||||
OS_NAME=$(grep ^NAME /etc/os-release | cut -d= -f2 | tr -d '"')
|
||||
OS_VERSION=$(grep ^VERSION_ID /etc/os-release | cut -d= -f2 | tr -d '"')
|
||||
elif [ -f "/etc/debian_version" ]; then
|
||||
OS_NAME="Debian"
|
||||
OS_VERSION=$(cat /etc/debian_version)
|
||||
fi
|
||||
|
||||
PROFILE_FILE="/etc/profile.d/00_lxc-details.sh"
|
||||
echo "echo -e \"\"" >"$PROFILE_FILE"
|
||||
echo -e "echo -e \"${BOLD}${APPLICATION} LXC Container${CL}"\" >>"$PROFILE_FILE"
|
||||
echo -e "echo -e \"${TAB}${GATEWAY}${YW} Provided by: ${GN}community-scripts ORG ${YW}| GitHub: ${GN}https://github.com/community-scripts/ProxmoxVE${CL}\"" >>"$PROFILE_FILE"
|
||||
echo "echo \"\"" >>"$PROFILE_FILE"
|
||||
echo -e "echo -e \"${TAB}${OS}${YW} OS: ${GN}${OS_NAME} - Version: ${OS_VERSION}${CL}\"" >>"$PROFILE_FILE"
|
||||
echo -e "echo -e \"${TAB}${OS}${YW} OS: ${GN}\$(grep ^NAME /etc/os-release | cut -d= -f2 | tr -d '\"') - Version: \$(grep ^VERSION_ID /etc/os-release | cut -d= -f2 | tr -d '\"')${CL}\"" >>"$PROFILE_FILE"
|
||||
echo -e "echo -e \"${TAB}${HOSTNAME}${YW} Hostname: ${GN}\$(hostname)${CL}\"" >>"$PROFILE_FILE"
|
||||
echo -e "echo -e \"${TAB}${INFO}${YW} IP Address: ${GN}\$(hostname -I | awk '{print \$1}')${CL}\"" >>"$PROFILE_FILE"
|
||||
|
||||
@@ -180,7 +240,19 @@ motd_ssh() {
|
||||
fi
|
||||
}
|
||||
|
||||
# This function customizes the container by modifying the getty service and enabling auto-login for the root user
|
||||
# ==============================================================================
|
||||
# SECTION 5: CONTAINER CUSTOMIZATION
|
||||
# ==============================================================================
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# customize()
|
||||
#
|
||||
# - Customizes container for passwordless root login if PASSWORD is empty
|
||||
# - Configures getty for auto-login via /etc/systemd/system/container-getty@1.service.d/override.conf
|
||||
# - Creates /usr/bin/update script for easy application updates
|
||||
# - Injects SSH authorized keys if SSH_AUTHORIZED_KEY variable is set
|
||||
# - Sets proper permissions on SSH directories and key files
|
||||
# ------------------------------------------------------------------------------
|
||||
customize() {
|
||||
if [[ "$PASSWORD" == "" ]]; then
|
||||
msg_info "Customizing Container"
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
44
scripts/ct/debian.sh
Normal file
44
scripts/ct/debian.sh
Normal file
@@ -0,0 +1,44 @@
|
||||
#!/usr/bin/env bash
|
||||
SCRIPT_DIR="$(dirname "$0")"
|
||||
source "$SCRIPT_DIR/../core/build.func"
|
||||
# Copyright (c) 2021-2026 tteck
|
||||
# Author: tteck (tteckster)
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# Source: https://www.debian.org/
|
||||
|
||||
APP="Debian"
|
||||
var_tags="${var_tags:-os}"
|
||||
var_cpu="${var_cpu:-1}"
|
||||
var_ram="${var_ram:-512}"
|
||||
var_disk="${var_disk:-2}"
|
||||
var_os="${var_os:-debian}"
|
||||
var_version="${var_version:-13}"
|
||||
var_unprivileged="${var_unprivileged:-1}"
|
||||
|
||||
header_info "$APP"
|
||||
variables
|
||||
color
|
||||
catch_errors
|
||||
|
||||
function update_script() {
|
||||
header_info
|
||||
check_container_storage
|
||||
check_container_resources
|
||||
if [[ ! -d /var ]]; then
|
||||
msg_error "No ${APP} Installation Found!"
|
||||
exit
|
||||
fi
|
||||
msg_info "Updating $APP LXC"
|
||||
$STD apt update
|
||||
$STD apt -y upgrade
|
||||
msg_ok "Updated $APP LXC"
|
||||
msg_ok "Updated successfully!"
|
||||
exit
|
||||
}
|
||||
|
||||
start
|
||||
build_container
|
||||
description
|
||||
|
||||
msg_ok "Completed successfully!\n"
|
||||
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
|
||||
18
scripts/install/debian-install.sh
Normal file
18
scripts/install/debian-install.sh
Normal file
@@ -0,0 +1,18 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (c) 2021-2026 tteck
|
||||
# Author: tteck (tteckster)
|
||||
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
|
||||
# Source: https://www.debian.org/
|
||||
|
||||
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
|
||||
color
|
||||
verb_ip6
|
||||
catch_errors
|
||||
setting_up_container
|
||||
network_check
|
||||
update_os
|
||||
|
||||
motd_ssh
|
||||
customize
|
||||
cleanup_lxc
|
||||
473
server.js
473
server.js
@@ -75,9 +75,14 @@ const handle = app.getRequestHandler();
|
||||
* @property {boolean} [isUpdate]
|
||||
* @property {boolean} [isShell]
|
||||
* @property {boolean} [isBackup]
|
||||
* @property {boolean} [isClone]
|
||||
* @property {string} [containerId]
|
||||
* @property {string} [storage]
|
||||
* @property {string} [backupStorage]
|
||||
* @property {number} [cloneCount]
|
||||
* @property {string[]} [hostnames]
|
||||
* @property {'lxc'|'vm'} [containerType]
|
||||
* @property {Record<string, string|number|boolean>} [envVars]
|
||||
*/
|
||||
|
||||
class ScriptExecutionHandler {
|
||||
@@ -295,19 +300,21 @@ class ScriptExecutionHandler {
|
||||
* @param {WebSocketMessage} message
|
||||
*/
|
||||
async handleMessage(ws, message) {
|
||||
const { action, scriptPath, executionId, input, mode, server, isUpdate, isShell, isBackup, containerId, storage, backupStorage } = message;
|
||||
const { action, scriptPath, executionId, input, mode, server, isUpdate, isShell, isBackup, isClone, containerId, storage, backupStorage, cloneCount, hostnames, containerType, envVars } = message;
|
||||
|
||||
switch (action) {
|
||||
case 'start':
|
||||
if (scriptPath && executionId) {
|
||||
if (isBackup && containerId && storage) {
|
||||
if (isClone && containerId && storage && server && cloneCount && hostnames && containerType) {
|
||||
await this.startSSHCloneExecution(ws, containerId, executionId, storage, server, containerType, cloneCount, hostnames);
|
||||
} else if (isBackup && containerId && storage) {
|
||||
await this.startBackupExecution(ws, containerId, executionId, storage, mode, server);
|
||||
} else if (isUpdate && containerId) {
|
||||
await this.startUpdateExecution(ws, containerId, executionId, mode, server, backupStorage);
|
||||
} else if (isShell && containerId) {
|
||||
await this.startShellExecution(ws, containerId, executionId, mode, server);
|
||||
} else {
|
||||
await this.startScriptExecution(ws, scriptPath, executionId, mode, server);
|
||||
await this.startScriptExecution(ws, scriptPath, executionId, mode, server, envVars);
|
||||
}
|
||||
} else {
|
||||
this.sendMessage(ws, {
|
||||
@@ -345,8 +352,9 @@ class ScriptExecutionHandler {
|
||||
* @param {string} executionId
|
||||
* @param {string} mode
|
||||
* @param {ServerInfo|null} server
|
||||
* @param {Object} [envVars] - Optional environment variables to pass to the script
|
||||
*/
|
||||
async startScriptExecution(ws, scriptPath, executionId, mode = 'local', server = null) {
|
||||
async startScriptExecution(ws, scriptPath, executionId, mode = 'local', server = null, envVars = {}) {
|
||||
/** @type {number|null} */
|
||||
let installationId = null;
|
||||
|
||||
@@ -375,7 +383,7 @@ class ScriptExecutionHandler {
|
||||
|
||||
// Handle SSH execution
|
||||
if (mode === 'ssh' && server) {
|
||||
await this.startSSHScriptExecution(ws, scriptPath, executionId, server, installationId);
|
||||
await this.startSSHScriptExecution(ws, scriptPath, executionId, server, installationId, envVars);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -401,19 +409,32 @@ class ScriptExecutionHandler {
|
||||
return;
|
||||
}
|
||||
|
||||
// Format environment variables for local execution
|
||||
// Convert envVars object to environment variables
|
||||
const envWithVars = {
|
||||
...process.env,
|
||||
TERM: 'xterm-256color', // Enable proper terminal support
|
||||
FORCE_ANSI: 'true', // Allow ANSI codes for proper display
|
||||
COLUMNS: '80', // Set terminal width
|
||||
LINES: '24' // Set terminal height
|
||||
};
|
||||
|
||||
// Add envVars to environment
|
||||
if (envVars && typeof envVars === 'object') {
|
||||
for (const [key, value] of Object.entries(envVars)) {
|
||||
/** @type {Record<string, string>} */
|
||||
const envRecord = envWithVars;
|
||||
envRecord[key] = String(value);
|
||||
}
|
||||
}
|
||||
|
||||
// Start script execution with pty for proper TTY support
|
||||
const childProcess = ptySpawn('bash', [resolvedPath], {
|
||||
cwd: scriptsDir,
|
||||
name: 'xterm-256color',
|
||||
cols: 80,
|
||||
rows: 24,
|
||||
env: {
|
||||
...process.env,
|
||||
TERM: 'xterm-256color', // Enable proper terminal support
|
||||
FORCE_ANSI: 'true', // Allow ANSI codes for proper display
|
||||
COLUMNS: '80', // Set terminal width
|
||||
LINES: '24' // Set terminal height
|
||||
}
|
||||
env: envWithVars
|
||||
});
|
||||
|
||||
// pty handles encoding automatically
|
||||
@@ -516,8 +537,9 @@ class ScriptExecutionHandler {
|
||||
* @param {string} executionId
|
||||
* @param {ServerInfo} server
|
||||
* @param {number|null} installationId
|
||||
* @param {Object} [envVars] - Optional environment variables to pass to the script
|
||||
*/
|
||||
async startSSHScriptExecution(ws, scriptPath, executionId, server, installationId = null) {
|
||||
async startSSHScriptExecution(ws, scriptPath, executionId, server, installationId = null, envVars = {}) {
|
||||
const sshService = getSSHExecutionService();
|
||||
|
||||
// Send start message
|
||||
@@ -606,7 +628,8 @@ class ScriptExecutionHandler {
|
||||
|
||||
// Clean up
|
||||
this.activeExecutions.delete(executionId);
|
||||
}
|
||||
},
|
||||
envVars
|
||||
));
|
||||
|
||||
// Store the execution with installation ID
|
||||
@@ -832,6 +855,422 @@ class ScriptExecutionHandler {
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Start SSH clone execution
|
||||
* Gets next IDs sequentially: get next ID → clone → get next ID → clone, etc.
|
||||
* @param {ExtendedWebSocket} ws
|
||||
* @param {string} containerId
|
||||
* @param {string} executionId
|
||||
* @param {string} storage
|
||||
* @param {ServerInfo} server
|
||||
* @param {'lxc'|'vm'} containerType
|
||||
* @param {number} cloneCount
|
||||
* @param {string[]} hostnames
|
||||
*/
|
||||
async startSSHCloneExecution(ws, containerId, executionId, storage, server, containerType, cloneCount, hostnames) {
|
||||
const sshService = getSSHExecutionService();
|
||||
|
||||
this.sendMessage(ws, {
|
||||
type: 'start',
|
||||
data: `Starting clone operation: Creating ${cloneCount} clone(s) of ${containerType.toUpperCase()} ${containerId}...`,
|
||||
timestamp: Date.now()
|
||||
});
|
||||
|
||||
try {
|
||||
// Step 1: Stop source container/VM
|
||||
this.sendMessage(ws, {
|
||||
type: 'output',
|
||||
data: `\n[Step 1/${4 + cloneCount}] Stopping source ${containerType.toUpperCase()} ${containerId}...\n`,
|
||||
timestamp: Date.now()
|
||||
});
|
||||
|
||||
const stopCommand = containerType === 'lxc' ? `pct stop ${containerId}` : `qm stop ${containerId}`;
|
||||
await new Promise(/** @type {(resolve: (value?: void) => void, reject: (error?: any) => void) => void} */ ((resolve, reject) => {
|
||||
sshService.executeCommand(
|
||||
server,
|
||||
stopCommand,
|
||||
/** @param {string} data */
|
||||
(data) => {
|
||||
this.sendMessage(ws, {
|
||||
type: 'output',
|
||||
data: data,
|
||||
timestamp: Date.now()
|
||||
});
|
||||
},
|
||||
/** @param {string} error */
|
||||
(error) => {
|
||||
this.sendMessage(ws, {
|
||||
type: 'error',
|
||||
data: error,
|
||||
timestamp: Date.now()
|
||||
});
|
||||
},
|
||||
/** @param {number} code */
|
||||
(code) => {
|
||||
if (code === 0) {
|
||||
this.sendMessage(ws, {
|
||||
type: 'output',
|
||||
data: `\n[Step 1/${4 + cloneCount}] Source ${containerType.toUpperCase()} stopped successfully.\n`,
|
||||
timestamp: Date.now()
|
||||
});
|
||||
resolve();
|
||||
} else {
|
||||
// Continue even if stop fails (might already be stopped)
|
||||
this.sendMessage(ws, {
|
||||
type: 'output',
|
||||
data: `\n[Step 1/${4 + cloneCount}] Stop command completed with exit code ${code} (container may already be stopped).\n`,
|
||||
timestamp: Date.now()
|
||||
});
|
||||
resolve();
|
||||
}
|
||||
}
|
||||
);
|
||||
}));
|
||||
|
||||
// Step 2: Clone for each clone count (get next ID sequentially before each clone)
|
||||
const clonedIds = [];
|
||||
for (let i = 0; i < cloneCount; i++) {
|
||||
const cloneNumber = i + 1;
|
||||
const hostname = hostnames[i];
|
||||
|
||||
// Get next ID for this clone
|
||||
this.sendMessage(ws, {
|
||||
type: 'output',
|
||||
data: `\n[Step ${2 + i}/${4 + cloneCount}] Getting next available ID for clone ${cloneNumber}...\n`,
|
||||
timestamp: Date.now()
|
||||
});
|
||||
|
||||
let nextId = '';
|
||||
try {
|
||||
let output = '';
|
||||
await new Promise(/** @type {(resolve: (value?: void) => void, reject: (error?: any) => void) => void} */ ((resolve, reject) => {
|
||||
sshService.executeCommand(
|
||||
server,
|
||||
'pvesh get /cluster/nextid',
|
||||
/** @param {string} data */
|
||||
(data) => {
|
||||
output += data;
|
||||
},
|
||||
/** @param {string} error */
|
||||
(error) => {
|
||||
reject(new Error(`Failed to get next ID: ${error}`));
|
||||
},
|
||||
/** @param {number} exitCode */
|
||||
(exitCode) => {
|
||||
if (exitCode === 0) {
|
||||
resolve();
|
||||
} else {
|
||||
reject(new Error(`pvesh command failed with exit code ${exitCode}`));
|
||||
}
|
||||
}
|
||||
);
|
||||
}));
|
||||
|
||||
nextId = output.trim();
|
||||
if (!nextId || !/^\d+$/.test(nextId)) {
|
||||
throw new Error('Invalid next ID received');
|
||||
}
|
||||
|
||||
this.sendMessage(ws, {
|
||||
type: 'output',
|
||||
data: `\n[Step ${2 + i}/${4 + cloneCount}] Got next ID: ${nextId}\n`,
|
||||
timestamp: Date.now()
|
||||
});
|
||||
} catch (error) {
|
||||
this.sendMessage(ws, {
|
||||
type: 'error',
|
||||
data: `\n[Step ${2 + i}/${4 + cloneCount}] Failed to get next ID: ${error instanceof Error ? error.message : String(error)}\n`,
|
||||
timestamp: Date.now()
|
||||
});
|
||||
throw error;
|
||||
}
|
||||
|
||||
clonedIds.push(nextId);
|
||||
|
||||
// Clone the container/VM
|
||||
this.sendMessage(ws, {
|
||||
type: 'output',
|
||||
data: `\n[Step ${2 + i}/${4 + cloneCount}] Cloning ${containerType.toUpperCase()} ${containerId} to ${nextId} with hostname ${hostname}...\n`,
|
||||
timestamp: Date.now()
|
||||
});
|
||||
|
||||
const cloneCommand = containerType === 'lxc'
|
||||
? `pct clone ${containerId} ${nextId} --hostname ${hostname} --storage ${storage}`
|
||||
: `qm clone ${containerId} ${nextId} --name ${hostname} --storage ${storage}`;
|
||||
|
||||
await new Promise(/** @type {(resolve: (value?: void) => void, reject: (error?: any) => void) => void} */ ((resolve, reject) => {
|
||||
sshService.executeCommand(
|
||||
server,
|
||||
cloneCommand,
|
||||
/** @param {string} data */
|
||||
(data) => {
|
||||
this.sendMessage(ws, {
|
||||
type: 'output',
|
||||
data: data,
|
||||
timestamp: Date.now()
|
||||
});
|
||||
},
|
||||
/** @param {string} error */
|
||||
(error) => {
|
||||
this.sendMessage(ws, {
|
||||
type: 'error',
|
||||
data: error,
|
||||
timestamp: Date.now()
|
||||
});
|
||||
},
|
||||
/** @param {number} code */
|
||||
(code) => {
|
||||
if (code === 0) {
|
||||
this.sendMessage(ws, {
|
||||
type: 'output',
|
||||
data: `\n[Step ${2 + i}/${4 + cloneCount}] Clone ${cloneNumber} created successfully.\n`,
|
||||
timestamp: Date.now()
|
||||
});
|
||||
resolve();
|
||||
} else {
|
||||
this.sendMessage(ws, {
|
||||
type: 'error',
|
||||
data: `\nClone ${cloneNumber} failed with exit code: ${code}\n`,
|
||||
timestamp: Date.now()
|
||||
});
|
||||
reject(new Error(`Clone ${cloneNumber} failed with exit code ${code}`));
|
||||
}
|
||||
}
|
||||
);
|
||||
}));
|
||||
}
|
||||
|
||||
// Step 3: Start source container/VM
|
||||
this.sendMessage(ws, {
|
||||
type: 'output',
|
||||
data: `\n[Step ${2 + cloneCount + 1}/${4 + cloneCount}] Starting source ${containerType.toUpperCase()} ${containerId}...\n`,
|
||||
timestamp: Date.now()
|
||||
});
|
||||
|
||||
const startSourceCommand = containerType === 'lxc' ? `pct start ${containerId}` : `qm start ${containerId}`;
|
||||
await new Promise(/** @type {(resolve: (value?: void) => void, reject: (error?: any) => void) => void} */ ((resolve) => {
|
||||
sshService.executeCommand(
|
||||
server,
|
||||
startSourceCommand,
|
||||
/** @param {string} data */
|
||||
(data) => {
|
||||
this.sendMessage(ws, {
|
||||
type: 'output',
|
||||
data: data,
|
||||
timestamp: Date.now()
|
||||
});
|
||||
},
|
||||
/** @param {string} error */
|
||||
(error) => {
|
||||
this.sendMessage(ws, {
|
||||
type: 'error',
|
||||
data: error,
|
||||
timestamp: Date.now()
|
||||
});
|
||||
},
|
||||
/** @param {number} code */
|
||||
(code) => {
|
||||
if (code === 0) {
|
||||
this.sendMessage(ws, {
|
||||
type: 'output',
|
||||
data: `\n[Step ${2 + cloneCount + 1}/${4 + cloneCount}] Source ${containerType.toUpperCase()} started successfully.\n`,
|
||||
timestamp: Date.now()
|
||||
});
|
||||
} else {
|
||||
this.sendMessage(ws, {
|
||||
type: 'output',
|
||||
data: `\n[Step ${2 + cloneCount + 1}/${4 + cloneCount}] Start command completed with exit code ${code}.\n`,
|
||||
timestamp: Date.now()
|
||||
});
|
||||
}
|
||||
resolve();
|
||||
}
|
||||
);
|
||||
}));
|
||||
|
||||
// Step 4: Start target containers/VMs
|
||||
this.sendMessage(ws, {
|
||||
type: 'output',
|
||||
data: `\n[Step ${2 + cloneCount + 2}/${4 + cloneCount}] Starting cloned ${containerType.toUpperCase()}(s)...\n`,
|
||||
timestamp: Date.now()
|
||||
});
|
||||
|
||||
for (let i = 0; i < cloneCount; i++) {
|
||||
const cloneNumber = i + 1;
|
||||
const nextId = clonedIds[i];
|
||||
|
||||
const startTargetCommand = containerType === 'lxc' ? `pct start ${nextId}` : `qm start ${nextId}`;
|
||||
await new Promise(/** @type {(resolve: (value?: void) => void, reject: (error?: any) => void) => void} */ ((resolve) => {
|
||||
sshService.executeCommand(
|
||||
server,
|
||||
startTargetCommand,
|
||||
/** @param {string} data */
|
||||
(data) => {
|
||||
this.sendMessage(ws, {
|
||||
type: 'output',
|
||||
data: data,
|
||||
timestamp: Date.now()
|
||||
});
|
||||
},
|
||||
/** @param {string} error */
|
||||
(error) => {
|
||||
this.sendMessage(ws, {
|
||||
type: 'error',
|
||||
data: error,
|
||||
timestamp: Date.now()
|
||||
});
|
||||
},
|
||||
/** @param {number} code */
|
||||
(code) => {
|
||||
if (code === 0) {
|
||||
this.sendMessage(ws, {
|
||||
type: 'output',
|
||||
data: `\nClone ${cloneNumber} (ID: ${nextId}) started successfully.\n`,
|
||||
timestamp: Date.now()
|
||||
});
|
||||
} else {
|
||||
this.sendMessage(ws, {
|
||||
type: 'output',
|
||||
data: `\nClone ${cloneNumber} (ID: ${nextId}) start completed with exit code ${code}.\n`,
|
||||
timestamp: Date.now()
|
||||
});
|
||||
}
|
||||
resolve();
|
||||
}
|
||||
);
|
||||
}));
|
||||
}
|
||||
|
||||
// Step 5: Add to database
|
||||
this.sendMessage(ws, {
|
||||
type: 'output',
|
||||
data: `\n[Step ${2 + cloneCount + 3}/${4 + cloneCount}] Adding cloned ${containerType.toUpperCase()}(s) to database...\n`,
|
||||
timestamp: Date.now()
|
||||
});
|
||||
|
||||
for (let i = 0; i < cloneCount; i++) {
|
||||
const nextId = clonedIds[i];
|
||||
const hostname = hostnames[i];
|
||||
|
||||
try {
|
||||
// Read config file to get hostname/name
|
||||
const configPath = containerType === 'lxc'
|
||||
? `/etc/pve/lxc/${nextId}.conf`
|
||||
: `/etc/pve/qemu-server/${nextId}.conf`;
|
||||
|
||||
let configContent = '';
|
||||
await new Promise(/** @type {(resolve: (value?: void) => void) => void} */ ((resolve) => {
|
||||
sshService.executeCommand(
|
||||
server,
|
||||
`cat "${configPath}" 2>/dev/null || echo ""`,
|
||||
/** @param {string} data */
|
||||
(data) => {
|
||||
configContent += data;
|
||||
},
|
||||
() => resolve(),
|
||||
() => resolve()
|
||||
);
|
||||
}));
|
||||
|
||||
// Parse config for hostname/name
|
||||
let finalHostname = hostname;
|
||||
if (configContent.trim()) {
|
||||
const lines = configContent.split('\n');
|
||||
for (const line of lines) {
|
||||
const trimmed = line.trim();
|
||||
if (containerType === 'lxc' && trimmed.startsWith('hostname:')) {
|
||||
finalHostname = trimmed.substring(9).trim();
|
||||
break;
|
||||
} else if (containerType === 'vm' && trimmed.startsWith('name:')) {
|
||||
finalHostname = trimmed.substring(5).trim();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!finalHostname) {
|
||||
finalHostname = `${containerType}-${nextId}`;
|
||||
}
|
||||
|
||||
// Create installed script record
|
||||
const script = await this.db.createInstalledScript({
|
||||
script_name: finalHostname,
|
||||
script_path: `cloned/${finalHostname}`,
|
||||
container_id: nextId,
|
||||
server_id: server.id,
|
||||
execution_mode: 'ssh',
|
||||
status: 'success',
|
||||
output_log: `Cloned ${containerType.toUpperCase()}`
|
||||
});
|
||||
|
||||
// For LXC, store config in database
|
||||
if (containerType === 'lxc' && configContent.trim()) {
|
||||
// Simple config parser
|
||||
/** @type {any} */
|
||||
const configData = {};
|
||||
const lines = configContent.split('\n');
|
||||
for (const line of lines) {
|
||||
const trimmed = line.trim();
|
||||
if (!trimmed || trimmed.startsWith('#')) continue;
|
||||
|
||||
const [key, ...valueParts] = trimmed.split(':');
|
||||
const value = valueParts.join(':').trim();
|
||||
|
||||
if (key === 'hostname') configData.hostname = value;
|
||||
else if (key === 'arch') configData.arch = value;
|
||||
else if (key === 'cores') configData.cores = parseInt(value) || null;
|
||||
else if (key === 'memory') configData.memory = parseInt(value) || null;
|
||||
else if (key === 'swap') configData.swap = parseInt(value) || null;
|
||||
else if (key === 'onboot') configData.onboot = parseInt(value) || null;
|
||||
else if (key === 'ostype') configData.ostype = value;
|
||||
else if (key === 'unprivileged') configData.unprivileged = parseInt(value) || null;
|
||||
else if (key === 'tags') configData.tags = value;
|
||||
else if (key === 'rootfs') {
|
||||
const match = value.match(/^([^:]+):([^,]+)/);
|
||||
if (match) {
|
||||
configData.rootfs_storage = match[1];
|
||||
const sizeMatch = value.match(/size=([^,]+)/);
|
||||
if (sizeMatch) {
|
||||
configData.rootfs_size = sizeMatch[1];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
await this.db.createLXCConfig(script.id, configData);
|
||||
}
|
||||
|
||||
this.sendMessage(ws, {
|
||||
type: 'output',
|
||||
data: `\nClone ${i + 1} (ID: ${nextId}, Hostname: ${finalHostname}) added to database successfully.\n`,
|
||||
timestamp: Date.now()
|
||||
});
|
||||
} catch (error) {
|
||||
this.sendMessage(ws, {
|
||||
type: 'error',
|
||||
data: `\nError adding clone ${i + 1} (ID: ${nextId}) to database: ${error instanceof Error ? error.message : String(error)}\n`,
|
||||
timestamp: Date.now()
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
this.sendMessage(ws, {
|
||||
type: 'output',
|
||||
data: `\n\n[Clone operation completed successfully!]\nCreated ${cloneCount} clone(s) of ${containerType.toUpperCase()} ${containerId}.\n`,
|
||||
timestamp: Date.now()
|
||||
});
|
||||
|
||||
this.activeExecutions.delete(executionId);
|
||||
} catch (error) {
|
||||
this.sendMessage(ws, {
|
||||
type: 'error',
|
||||
data: `\n\n[Clone operation failed!]\nError: ${error instanceof Error ? error.message : String(error)}\n`,
|
||||
timestamp: Date.now()
|
||||
});
|
||||
this.activeExecutions.delete(executionId);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Start update execution (pct enter + update command)
|
||||
* @param {ExtendedWebSocket} ws
|
||||
@@ -1171,6 +1610,7 @@ class ScriptExecutionHandler {
|
||||
// TerminalHandler removed - not used by current application
|
||||
|
||||
app.prepare().then(() => {
|
||||
console.log('> Next.js app prepared successfully');
|
||||
const httpServer = createServer(async (req, res) => {
|
||||
try {
|
||||
// Be sure to pass `true` as the second argument to `url.parse`.
|
||||
@@ -1276,4 +1716,9 @@ app.prepare().then(() => {
|
||||
autoSyncModule.setupGracefulShutdown();
|
||||
}
|
||||
});
|
||||
}).catch((err) => {
|
||||
console.error('> Failed to start server:', err.message);
|
||||
console.error('> If you see "Could not find a production build", run: npm run build');
|
||||
console.error('> Full error:', err);
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
129
src/app/_components/CloneCountInputModal.tsx
Normal file
129
src/app/_components/CloneCountInputModal.tsx
Normal file
@@ -0,0 +1,129 @@
|
||||
'use client';
|
||||
|
||||
import { useState, useEffect } from 'react';
|
||||
import { Button } from './ui/button';
|
||||
import { Input } from './ui/input';
|
||||
import { Copy, X } from 'lucide-react';
|
||||
import { useRegisterModal } from './modal/ModalStackProvider';
|
||||
|
||||
interface CloneCountInputModalProps {
|
||||
isOpen: boolean;
|
||||
onClose: () => void;
|
||||
onSubmit: (count: number) => void;
|
||||
storageName: string;
|
||||
}
|
||||
|
||||
export function CloneCountInputModal({
|
||||
isOpen,
|
||||
onClose,
|
||||
onSubmit,
|
||||
storageName
|
||||
}: CloneCountInputModalProps) {
|
||||
const [cloneCount, setCloneCount] = useState<number>(1);
|
||||
|
||||
useRegisterModal(isOpen, { id: 'clone-count-input-modal', allowEscape: true, onClose });
|
||||
|
||||
useEffect(() => {
|
||||
if (isOpen) {
|
||||
setCloneCount(1); // Reset to default when modal opens
|
||||
}
|
||||
}, [isOpen]);
|
||||
|
||||
if (!isOpen) return null;
|
||||
|
||||
const handleSubmit = () => {
|
||||
if (cloneCount >= 1) {
|
||||
onSubmit(cloneCount);
|
||||
setCloneCount(1); // Reset after submit
|
||||
}
|
||||
};
|
||||
|
||||
const handleClose = () => {
|
||||
setCloneCount(1); // Reset on close
|
||||
onClose();
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="fixed inset-0 backdrop-blur-sm bg-black/50 flex items-center justify-center z-50 p-4">
|
||||
<div className="bg-card rounded-lg shadow-xl max-w-md w-full border border-border">
|
||||
{/* Header */}
|
||||
<div className="flex items-center justify-between p-6 border-b border-border">
|
||||
<div className="flex items-center gap-3">
|
||||
<Copy className="h-6 w-6 text-primary" />
|
||||
<h2 className="text-2xl font-bold text-card-foreground">Clone Count</h2>
|
||||
</div>
|
||||
<Button
|
||||
onClick={handleClose}
|
||||
variant="ghost"
|
||||
size="icon"
|
||||
className="text-muted-foreground hover:text-foreground"
|
||||
>
|
||||
<X className="h-5 w-5" />
|
||||
</Button>
|
||||
</div>
|
||||
|
||||
{/* Content */}
|
||||
<div className="p-6">
|
||||
<p className="text-sm text-muted-foreground mb-4">
|
||||
How many clones would you like to create?
|
||||
</p>
|
||||
|
||||
{storageName && (
|
||||
<div className="mb-4 p-3 bg-muted/50 rounded-lg">
|
||||
<p className="text-sm text-muted-foreground">Storage:</p>
|
||||
<p className="text-sm font-medium text-foreground">{storageName}</p>
|
||||
</div>
|
||||
)}
|
||||
|
||||
<div className="space-y-2 mb-6">
|
||||
<label htmlFor="cloneCount" className="block text-sm font-medium text-foreground">
|
||||
Number of Clones
|
||||
</label>
|
||||
<Input
|
||||
id="cloneCount"
|
||||
type="number"
|
||||
min="1"
|
||||
max="100"
|
||||
value={cloneCount}
|
||||
onChange={(e) => {
|
||||
const value = parseInt(e.target.value, 10);
|
||||
if (!isNaN(value) && value >= 1 && value <= 100) {
|
||||
setCloneCount(value);
|
||||
} else if (e.target.value === '') {
|
||||
setCloneCount(1);
|
||||
}
|
||||
}}
|
||||
className="w-full"
|
||||
placeholder="1"
|
||||
/>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
Enter a number between 1 and 100
|
||||
</p>
|
||||
</div>
|
||||
|
||||
{/* Action Buttons */}
|
||||
<div className="flex flex-col sm:flex-row justify-end gap-3">
|
||||
<Button
|
||||
onClick={handleClose}
|
||||
variant="outline"
|
||||
size="default"
|
||||
className="w-full sm:w-auto"
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button
|
||||
onClick={handleSubmit}
|
||||
disabled={cloneCount < 1 || cloneCount > 100}
|
||||
variant="default"
|
||||
size="default"
|
||||
className="w-full sm:w-auto"
|
||||
>
|
||||
Continue
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
954
src/app/_components/ConfigurationModal.tsx
Normal file
954
src/app/_components/ConfigurationModal.tsx
Normal file
@@ -0,0 +1,954 @@
|
||||
'use client';
|
||||
|
||||
import { useState, useEffect } from 'react';
|
||||
import { api } from '~/trpc/react';
|
||||
import type { Script } from '~/types/script';
|
||||
import type { Server } from '~/types/server';
|
||||
import { Button } from './ui/button';
|
||||
import { Input } from './ui/input';
|
||||
import { useRegisterModal } from './modal/ModalStackProvider';
|
||||
|
||||
export type EnvVars = Record<string, string | number | boolean>;
|
||||
|
||||
interface ConfigurationModalProps {
|
||||
isOpen: boolean;
|
||||
onClose: () => void;
|
||||
onConfirm: (envVars: EnvVars) => void;
|
||||
script: Script | null;
|
||||
server: Server | null;
|
||||
mode: 'default' | 'advanced';
|
||||
}
|
||||
|
||||
export function ConfigurationModal({
|
||||
isOpen,
|
||||
onClose,
|
||||
onConfirm,
|
||||
script,
|
||||
server,
|
||||
mode,
|
||||
}: ConfigurationModalProps) {
|
||||
useRegisterModal(isOpen, { id: 'configuration-modal', allowEscape: true, onClose });
|
||||
|
||||
// Fetch script data if we only have slug
|
||||
const { data: scriptData } = api.scripts.getScriptBySlug.useQuery(
|
||||
{ slug: script?.slug ?? '' },
|
||||
{ enabled: !!script?.slug && isOpen }
|
||||
);
|
||||
|
||||
const actualScript = script ?? (scriptData?.script ?? null);
|
||||
|
||||
// Fetch storages
|
||||
const { data: rootfsStoragesData } = api.scripts.getRootfsStorages.useQuery(
|
||||
{ serverId: server?.id ?? 0, forceRefresh: false },
|
||||
{ enabled: !!server?.id && isOpen }
|
||||
);
|
||||
|
||||
const { data: templateStoragesData } = api.scripts.getTemplateStorages.useQuery(
|
||||
{ serverId: server?.id ?? 0, forceRefresh: false },
|
||||
{ enabled: !!server?.id && isOpen && mode === 'advanced' }
|
||||
);
|
||||
|
||||
// Get resources from JSON
|
||||
const resources = actualScript?.install_methods?.[0]?.resources;
|
||||
const slug = actualScript?.slug ?? '';
|
||||
|
||||
// Default mode state
|
||||
const [containerStorage, setContainerStorage] = useState<string>('');
|
||||
|
||||
// Advanced mode state
|
||||
const [advancedVars, setAdvancedVars] = useState<EnvVars>({});
|
||||
|
||||
// Validation errors
|
||||
const [errors, setErrors] = useState<Record<string, string>>({});
|
||||
|
||||
// Initialize defaults when script/server data is available
|
||||
useEffect(() => {
|
||||
if (!actualScript || !server) return;
|
||||
|
||||
if (mode === 'default') {
|
||||
// Default mode: minimal vars
|
||||
setContainerStorage('');
|
||||
} else {
|
||||
// Advanced mode: all vars with defaults
|
||||
const defaults: EnvVars = {
|
||||
var_ctid: '', // Empty = use next available ID
|
||||
// Resources from JSON
|
||||
var_cpu: resources?.cpu ?? 1,
|
||||
var_ram: resources?.ram ?? 1024,
|
||||
var_disk: resources?.hdd ?? 4,
|
||||
var_unprivileged: script?.privileged === false ? 1 : (script?.privileged === true ? 0 : 1),
|
||||
|
||||
// Network defaults
|
||||
var_net: 'dhcp',
|
||||
var_brg: 'vmbr0',
|
||||
var_gateway: '',
|
||||
var_ipv6_method: 'none',
|
||||
var_ipv6_static: '',
|
||||
var_vlan: '',
|
||||
var_mtu: 1500,
|
||||
var_mac: '',
|
||||
var_ns: '',
|
||||
var_searchdomain: '',
|
||||
|
||||
// Identity
|
||||
var_hostname: slug,
|
||||
var_pw: '',
|
||||
var_tags: 'community-script',
|
||||
|
||||
// SSH
|
||||
var_ssh: 'no',
|
||||
var_ssh_authorized_key: '',
|
||||
|
||||
// Features
|
||||
var_nesting: 1,
|
||||
var_fuse: 0,
|
||||
var_keyctl: 0,
|
||||
var_mknod: 0,
|
||||
var_mount_fs: '',
|
||||
var_protection: 'no',
|
||||
|
||||
// System
|
||||
var_timezone: '',
|
||||
var_verbose: 'no',
|
||||
var_apt_cacher: 'no',
|
||||
var_apt_cacher_ip: '',
|
||||
|
||||
// Storage
|
||||
var_container_storage: '',
|
||||
var_template_storage: '',
|
||||
};
|
||||
setAdvancedVars(defaults);
|
||||
}
|
||||
}, [actualScript, server, mode, resources, slug]);
|
||||
|
||||
// Validation functions
|
||||
const validateIPv4 = (ip: string): boolean => {
|
||||
if (!ip) return true; // Empty is allowed (auto)
|
||||
const pattern = /^(\d{1,3}\.){3}\d{1,3}$/;
|
||||
if (!pattern.test(ip)) return false;
|
||||
const parts = ip.split('.').map(Number);
|
||||
return parts.every(p => p >= 0 && p <= 255);
|
||||
};
|
||||
|
||||
const validateCIDR = (cidr: string): boolean => {
|
||||
if (!cidr) return true; // Empty is allowed
|
||||
const pattern = /^([0-9]{1,3}\.){3}[0-9]{1,3}\/([0-9]|[1-2][0-9]|3[0-2])$/;
|
||||
if (!pattern.test(cidr)) return false;
|
||||
const parts = cidr.split('/');
|
||||
if (parts.length !== 2) return false;
|
||||
const [ip, prefix] = parts;
|
||||
if (!ip || !prefix) return false;
|
||||
const ipParts = ip.split('.').map(Number);
|
||||
if (!ipParts.every(p => p >= 0 && p <= 255)) return false;
|
||||
const prefixNum = parseInt(prefix, 10);
|
||||
return prefixNum >= 0 && prefixNum <= 32;
|
||||
};
|
||||
|
||||
const validateIPv6 = (ipv6: string): boolean => {
|
||||
if (!ipv6) return true; // Empty is allowed
|
||||
// Basic IPv6 validation (simplified - allows compressed format)
|
||||
const pattern = /^([0-9a-fA-F]{0,4}:){2,7}[0-9a-fA-F]{0,4}(\/\d{1,3})?$/;
|
||||
return pattern.test(ipv6);
|
||||
};
|
||||
|
||||
const validateMAC = (mac: string): boolean => {
|
||||
if (!mac) return true; // Empty is allowed (auto)
|
||||
const pattern = /^([0-9A-Fa-f]{2}:){5}([0-9A-Fa-f]{2})$/;
|
||||
return pattern.test(mac);
|
||||
};
|
||||
|
||||
const validatePositiveInt = (value: string | number | undefined): boolean => {
|
||||
if (value === '' || value === undefined) return true;
|
||||
const num = typeof value === 'string' ? parseInt(value, 10) : value;
|
||||
return !isNaN(num) && num > 0;
|
||||
};
|
||||
|
||||
const validateForm = (): boolean => {
|
||||
const newErrors: Record<string, string> = {};
|
||||
|
||||
if (mode === 'default') {
|
||||
// Default mode: only storage is optional
|
||||
// No validation needed
|
||||
} else {
|
||||
// Advanced mode: validate all fields
|
||||
if (advancedVars.var_gateway && !validateIPv4(advancedVars.var_gateway as string)) {
|
||||
newErrors.var_gateway = 'Invalid IPv4 address';
|
||||
}
|
||||
if (advancedVars.var_mac && !validateMAC(advancedVars.var_mac as string)) {
|
||||
newErrors.var_mac = 'Invalid MAC address format (XX:XX:XX:XX:XX:XX)';
|
||||
}
|
||||
if (advancedVars.var_ns && !validateIPv4(advancedVars.var_ns as string)) {
|
||||
newErrors.var_ns = 'Invalid IPv4 address';
|
||||
}
|
||||
if (advancedVars.var_apt_cacher_ip && !validateIPv4(advancedVars.var_apt_cacher_ip as string)) {
|
||||
newErrors.var_apt_cacher_ip = 'Invalid IPv4 address';
|
||||
}
|
||||
// Validate IPv4 CIDR if network mode is static
|
||||
const netValue = advancedVars.var_net;
|
||||
const isStaticMode = netValue === 'static' || (typeof netValue === 'string' && netValue.includes('/'));
|
||||
if (isStaticMode) {
|
||||
const cidrValue = (typeof netValue === 'string' && netValue.includes('/')) ? netValue : (advancedVars.var_ip as string ?? '');
|
||||
if (cidrValue && !validateCIDR(cidrValue)) {
|
||||
newErrors.var_ip = 'Invalid CIDR format (e.g., 10.10.10.1/24)';
|
||||
}
|
||||
}
|
||||
// Validate IPv6 static if IPv6 method is static
|
||||
if (advancedVars.var_ipv6_method === 'static' && advancedVars.var_ipv6_static) {
|
||||
if (!validateIPv6(advancedVars.var_ipv6_static as string)) {
|
||||
newErrors.var_ipv6_static = 'Invalid IPv6 address';
|
||||
}
|
||||
}
|
||||
if (!validatePositiveInt(advancedVars.var_cpu as string | number | undefined)) {
|
||||
newErrors.var_cpu = 'Must be a positive integer';
|
||||
}
|
||||
if (!validatePositiveInt(advancedVars.var_ram as string | number | undefined)) {
|
||||
newErrors.var_ram = 'Must be a positive integer';
|
||||
}
|
||||
if (!validatePositiveInt(advancedVars.var_disk as string | number | undefined)) {
|
||||
newErrors.var_disk = 'Must be a positive integer';
|
||||
}
|
||||
if (advancedVars.var_mtu && !validatePositiveInt(advancedVars.var_mtu as string | number | undefined)) {
|
||||
newErrors.var_mtu = 'Must be a positive integer';
|
||||
}
|
||||
if (advancedVars.var_vlan && !validatePositiveInt(advancedVars.var_vlan as string | number | undefined)) {
|
||||
newErrors.var_vlan = 'Must be a positive integer';
|
||||
}
|
||||
// Container ID (CTID): if set, must be integer >= 100
|
||||
const ctidVal = advancedVars.var_ctid;
|
||||
if (ctidVal !== '' && ctidVal !== undefined && typeof ctidVal !== 'boolean') {
|
||||
const ctidNum = typeof ctidVal === 'string' ? parseInt(ctidVal, 10) : ctidVal;
|
||||
if (isNaN(ctidNum) || ctidNum < 100) {
|
||||
newErrors.var_ctid = 'Must be 100 or greater';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
setErrors(newErrors);
|
||||
return Object.keys(newErrors).length === 0;
|
||||
};
|
||||
|
||||
const handleConfirm = () => {
|
||||
if (!validateForm()) {
|
||||
return;
|
||||
}
|
||||
|
||||
let envVars: EnvVars = {};
|
||||
|
||||
if (mode === 'default') {
|
||||
// Default mode: minimal vars
|
||||
envVars = {
|
||||
var_hostname: slug,
|
||||
var_brg: 'vmbr0',
|
||||
var_net: 'dhcp',
|
||||
var_ipv6_method: 'auto',
|
||||
var_ssh: 'no',
|
||||
var_nesting: 1,
|
||||
var_verbose: 'no',
|
||||
var_cpu: resources?.cpu ?? 1,
|
||||
var_ram: resources?.ram ?? 1024,
|
||||
var_disk: resources?.hdd ?? 4,
|
||||
var_unprivileged: script?.privileged === false ? 1 : (script?.privileged === true ? 0 : 1),
|
||||
};
|
||||
|
||||
if (containerStorage) {
|
||||
envVars.var_container_storage = containerStorage;
|
||||
}
|
||||
} else {
|
||||
// Advanced mode: all vars
|
||||
envVars = { ...advancedVars };
|
||||
|
||||
// If network mode is static and var_ip is set, replace var_net with the CIDR
|
||||
if (envVars.var_net === 'static' && envVars.var_ip) {
|
||||
envVars.var_net = envVars.var_ip as string;
|
||||
delete envVars.var_ip; // Remove the temporary var_ip
|
||||
}
|
||||
|
||||
// Format password correctly: if var_pw is set, format it as "-password <password>"
|
||||
// build.func expects PW to be in "-password <password>" format when added to PCT_OPTIONS
|
||||
const rawPassword = envVars.var_pw;
|
||||
const hasPassword = rawPassword && typeof rawPassword === 'string' && rawPassword.trim() !== '';
|
||||
const hasSSHKey = envVars.var_ssh_authorized_key && typeof envVars.var_ssh_authorized_key === 'string' && envVars.var_ssh_authorized_key.trim() !== '';
|
||||
|
||||
if (hasPassword) {
|
||||
// Remove any existing "-password" prefix to avoid double-formatting
|
||||
const cleanPassword = rawPassword.startsWith('-password ')
|
||||
? rawPassword.substring(11)
|
||||
: rawPassword;
|
||||
// Format as "-password <password>" for build.func
|
||||
envVars.var_pw = `-password ${cleanPassword}`;
|
||||
} else {
|
||||
// Empty password means auto-login, clear var_pw
|
||||
envVars.var_pw = '';
|
||||
}
|
||||
|
||||
|
||||
if ((hasPassword || hasSSHKey) && envVars.var_ssh !== 'no') {
|
||||
envVars.var_ssh = 'yes';
|
||||
}
|
||||
}
|
||||
|
||||
// Remove empty string values (but keep 0, false, etc.)
|
||||
const cleaned: EnvVars = {};
|
||||
for (const [key, value] of Object.entries(envVars)) {
|
||||
if (value !== '' && value !== undefined) {
|
||||
// Send var_ctid as number so the script receives a numeric ID
|
||||
if (key === 'var_ctid') {
|
||||
cleaned[key] = Number(value);
|
||||
} else {
|
||||
cleaned[key] = value;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Always set mode to "default" (build.func line 1783 expects this)
|
||||
cleaned.mode = 'default';
|
||||
|
||||
onConfirm(cleaned);
|
||||
};
|
||||
|
||||
const updateAdvancedVar = (key: string, value: string | number | boolean) => {
|
||||
setAdvancedVars(prev => ({ ...prev, [key]: value }));
|
||||
// Clear error for this field
|
||||
if (errors[key]) {
|
||||
setErrors(prev => {
|
||||
const newErrors = { ...prev };
|
||||
delete newErrors[key];
|
||||
return newErrors;
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
if (!isOpen) return null;
|
||||
|
||||
const rootfsStorages = rootfsStoragesData?.storages ?? [];
|
||||
const templateStorages = templateStoragesData?.storages ?? [];
|
||||
|
||||
return (
|
||||
<div className="fixed inset-0 backdrop-blur-sm bg-black/50 flex items-center justify-center z-50 p-4">
|
||||
<div className="bg-card rounded-lg shadow-xl max-w-4xl w-full border border-border max-h-[90vh] overflow-y-auto">
|
||||
{/* Header */}
|
||||
<div className="flex items-center justify-between p-6 border-b border-border">
|
||||
<h2 className="text-xl font-bold text-foreground">
|
||||
{mode === 'default' ? 'Default Configuration' : 'Advanced Configuration'}
|
||||
</h2>
|
||||
<Button
|
||||
onClick={onClose}
|
||||
variant="ghost"
|
||||
size="icon"
|
||||
className="text-muted-foreground hover:text-foreground"
|
||||
>
|
||||
<svg className="w-6 h-6" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M6 18L18 6M6 6l12 12" />
|
||||
</svg>
|
||||
</Button>
|
||||
</div>
|
||||
|
||||
{/* Content */}
|
||||
<div className="p-6">
|
||||
{mode === 'default' ? (
|
||||
/* Default Mode */
|
||||
<div className="space-y-6">
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-foreground mb-2">
|
||||
Container Storage
|
||||
</label>
|
||||
<select
|
||||
value={containerStorage}
|
||||
onChange={(e) => setContainerStorage(e.target.value)}
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm text-foreground focus:ring-2 focus:ring-ring focus:outline-none"
|
||||
>
|
||||
<option value="">Auto (let script choose)</option>
|
||||
{rootfsStorages.map((storage) => (
|
||||
<option key={storage.name} value={storage.name}>
|
||||
{storage.name} ({storage.type})
|
||||
</option>
|
||||
))}
|
||||
</select>
|
||||
{rootfsStorages.length === 0 && (
|
||||
<p className="mt-1 text-xs text-muted-foreground">
|
||||
Could not fetch storages. Script will use default selection.
|
||||
</p>
|
||||
)}
|
||||
</div>
|
||||
|
||||
<div className="bg-muted/50 rounded-lg p-4 border border-border">
|
||||
<h3 className="text-sm font-medium text-foreground mb-2">Default Values</h3>
|
||||
<div className="text-xs text-muted-foreground space-y-1">
|
||||
<p>Hostname: {slug}</p>
|
||||
<p>Bridge: vmbr0</p>
|
||||
<p>Network: DHCP</p>
|
||||
<p>IPv6: Auto</p>
|
||||
<p>SSH: Disabled</p>
|
||||
<p>Nesting: Enabled</p>
|
||||
<p>CPU: {resources?.cpu ?? 1}</p>
|
||||
<p>RAM: {resources?.ram ?? 1024} MB</p>
|
||||
<p>Disk: {resources?.hdd ?? 4} GB</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
) : (
|
||||
/* Advanced Mode */
|
||||
<div className="space-y-6">
|
||||
{/* Container ID (CTID) - at top so user can set a specific ID */}
|
||||
<div>
|
||||
<h3 className="text-lg font-medium text-foreground mb-4">Container ID (CTID)</h3>
|
||||
<div className="grid grid-cols-2 gap-4">
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-foreground mb-2">
|
||||
Container ID (CTID)
|
||||
</label>
|
||||
<Input
|
||||
type="number"
|
||||
min="100"
|
||||
value={typeof advancedVars.var_ctid === 'boolean' ? '' : (advancedVars.var_ctid ?? '')}
|
||||
onChange={(e) => {
|
||||
const v = e.target.value;
|
||||
updateAdvancedVar('var_ctid', v === '' ? '' : parseInt(v, 10) || '');
|
||||
}}
|
||||
placeholder="Auto (next available)"
|
||||
className={errors.var_ctid ? 'border-destructive' : ''}
|
||||
/>
|
||||
{errors.var_ctid && (
|
||||
<p className="mt-1 text-xs text-destructive">{errors.var_ctid}</p>
|
||||
)}
|
||||
<p className="mt-1 text-xs text-muted-foreground">
|
||||
Leave empty to use the next available ID. Must be 100 or greater.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Resources */}
|
||||
<div>
|
||||
<h3 className="text-lg font-medium text-foreground mb-4">Resources</h3>
|
||||
<div className="grid grid-cols-2 gap-4">
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-foreground mb-2">
|
||||
CPU Cores *
|
||||
</label>
|
||||
<Input
|
||||
type="number"
|
||||
min="1"
|
||||
value={typeof advancedVars.var_cpu === 'boolean' ? '' : (advancedVars.var_cpu ?? '')}
|
||||
onChange={(e) => updateAdvancedVar('var_cpu', parseInt(e.target.value) || 1)}
|
||||
className={errors.var_cpu ? 'border-destructive' : ''}
|
||||
/>
|
||||
{errors.var_cpu && (
|
||||
<p className="mt-1 text-xs text-destructive">{errors.var_cpu}</p>
|
||||
)}
|
||||
</div>
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-foreground mb-2">
|
||||
RAM (MB) *
|
||||
</label>
|
||||
<Input
|
||||
type="number"
|
||||
min="1"
|
||||
value={typeof advancedVars.var_ram === 'boolean' ? '' : (advancedVars.var_ram ?? '')}
|
||||
onChange={(e) => updateAdvancedVar('var_ram', parseInt(e.target.value) || 1024)}
|
||||
className={errors.var_ram ? 'border-destructive' : ''}
|
||||
/>
|
||||
{errors.var_ram && (
|
||||
<p className="mt-1 text-xs text-destructive">{errors.var_ram}</p>
|
||||
)}
|
||||
</div>
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-foreground mb-2">
|
||||
Disk Size (GB) *
|
||||
</label>
|
||||
<Input
|
||||
type="number"
|
||||
min="1"
|
||||
value={typeof advancedVars.var_disk === 'boolean' ? '' : (advancedVars.var_disk ?? '')}
|
||||
onChange={(e) => updateAdvancedVar('var_disk', parseInt(e.target.value) || 4)}
|
||||
className={errors.var_disk ? 'border-destructive' : ''}
|
||||
/>
|
||||
{errors.var_disk && (
|
||||
<p className="mt-1 text-xs text-destructive">{errors.var_disk}</p>
|
||||
)}
|
||||
</div>
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-foreground mb-2">
|
||||
Unprivileged
|
||||
</label>
|
||||
<select
|
||||
value={typeof advancedVars.var_unprivileged === 'boolean' ? (advancedVars.var_unprivileged ? 0 : 1) : (advancedVars.var_unprivileged ?? 1)}
|
||||
onChange={(e) => updateAdvancedVar('var_unprivileged', parseInt(e.target.value))}
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm text-foreground focus:ring-2 focus:ring-ring focus:outline-none"
|
||||
>
|
||||
<option value={1}>Yes (Unprivileged)</option>
|
||||
<option value={0}>No (Privileged)</option>
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Network */}
|
||||
<div>
|
||||
<h3 className="text-lg font-medium text-foreground mb-4">Network</h3>
|
||||
<div className="grid grid-cols-2 gap-4">
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-foreground mb-2">
|
||||
Network Mode
|
||||
</label>
|
||||
<select
|
||||
value={(typeof advancedVars.var_net === 'string' && advancedVars.var_net.includes('/')) ? 'static' : (typeof advancedVars.var_net === 'boolean' ? 'dhcp' : (advancedVars.var_net ?? 'dhcp'))}
|
||||
onChange={(e) => {
|
||||
if (e.target.value === 'static') {
|
||||
updateAdvancedVar('var_net', 'static');
|
||||
} else {
|
||||
updateAdvancedVar('var_net', e.target.value);
|
||||
// Clear IPv4 IP when switching away from static
|
||||
if (advancedVars.var_ip) {
|
||||
updateAdvancedVar('var_ip', '');
|
||||
}
|
||||
}
|
||||
}}
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm text-foreground focus:ring-2 focus:ring-ring focus:outline-none"
|
||||
>
|
||||
<option value="dhcp">DHCP</option>
|
||||
<option value="static">Static</option>
|
||||
</select>
|
||||
</div>
|
||||
{(advancedVars.var_net === 'static' || (typeof advancedVars.var_net === 'string' && advancedVars.var_net.includes('/'))) && (
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-foreground mb-2">
|
||||
IPv4 Address (CIDR) *
|
||||
</label>
|
||||
<Input
|
||||
type="text"
|
||||
value={(typeof advancedVars.var_net === 'string' && advancedVars.var_net.includes('/')) ? advancedVars.var_net : (advancedVars.var_ip as string | undefined ?? '')}
|
||||
onChange={(e) => {
|
||||
// Store in var_ip temporarily, will be moved to var_net on confirm
|
||||
updateAdvancedVar('var_ip', e.target.value);
|
||||
}}
|
||||
placeholder="10.10.10.1/24"
|
||||
className={errors.var_ip ? 'border-destructive' : ''}
|
||||
/>
|
||||
{errors.var_ip && (
|
||||
<p className="mt-1 text-xs text-destructive">{errors.var_ip}</p>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-foreground mb-2">
|
||||
Bridge
|
||||
</label>
|
||||
<Input
|
||||
type="text"
|
||||
value={typeof advancedVars.var_brg === 'boolean' ? '' : String(advancedVars.var_brg ?? '')}
|
||||
onChange={(e) => updateAdvancedVar('var_brg', e.target.value)}
|
||||
placeholder="vmbr0"
|
||||
/>
|
||||
</div>
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-foreground mb-2">
|
||||
Gateway (IP)
|
||||
</label>
|
||||
<Input
|
||||
type="text"
|
||||
value={typeof advancedVars.var_gateway === 'boolean' ? '' : String(advancedVars.var_gateway ?? '')}
|
||||
onChange={(e) => updateAdvancedVar('var_gateway', e.target.value)}
|
||||
placeholder="Auto"
|
||||
className={errors.var_gateway ? 'border-destructive' : ''}
|
||||
/>
|
||||
{errors.var_gateway && (
|
||||
<p className="mt-1 text-xs text-destructive">{errors.var_gateway}</p>
|
||||
)}
|
||||
</div>
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-foreground mb-2">
|
||||
IPv6 Method
|
||||
</label>
|
||||
<select
|
||||
value={typeof advancedVars.var_ipv6_method === 'boolean' ? 'none' : String(advancedVars.var_ipv6_method ?? 'none')}
|
||||
onChange={(e) => {
|
||||
updateAdvancedVar('var_ipv6_method', e.target.value);
|
||||
// Clear IPv6 static when switching away from static
|
||||
if (e.target.value !== 'static' && advancedVars.var_ipv6_static) {
|
||||
updateAdvancedVar('var_ipv6_static', '');
|
||||
}
|
||||
}}
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm text-foreground focus:ring-2 focus:ring-ring focus:outline-none"
|
||||
>
|
||||
<option value="none">None</option>
|
||||
<option value="auto">Auto</option>
|
||||
<option value="dhcp">DHCP</option>
|
||||
<option value="static">Static</option>
|
||||
<option value="disable">Disable</option>
|
||||
</select>
|
||||
</div>
|
||||
{advancedVars.var_ipv6_method === 'static' && (
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-foreground mb-2">
|
||||
IPv6 Static Address *
|
||||
</label>
|
||||
<Input
|
||||
type="text"
|
||||
value={typeof advancedVars.var_ipv6_static === 'boolean' ? '' : String(advancedVars.var_ipv6_static ?? '')}
|
||||
onChange={(e) => updateAdvancedVar('var_ipv6_static', e.target.value)}
|
||||
placeholder="2001:db8::1/64"
|
||||
className={errors.var_ipv6_static ? 'border-destructive' : ''}
|
||||
/>
|
||||
{errors.var_ipv6_static && (
|
||||
<p className="mt-1 text-xs text-destructive">{errors.var_ipv6_static}</p>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-foreground mb-2">
|
||||
VLAN Tag
|
||||
</label>
|
||||
<Input
|
||||
type="number"
|
||||
min="1"
|
||||
value={typeof advancedVars.var_vlan === 'boolean' ? '' : String(advancedVars.var_vlan ?? '')}
|
||||
onChange={(e) => updateAdvancedVar('var_vlan', e.target.value ? parseInt(e.target.value) : '')}
|
||||
placeholder="None"
|
||||
className={errors.var_vlan ? 'border-destructive' : ''}
|
||||
/>
|
||||
{errors.var_vlan && (
|
||||
<p className="mt-1 text-xs text-destructive">{errors.var_vlan}</p>
|
||||
)}
|
||||
</div>
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-foreground mb-2">
|
||||
MTU
|
||||
</label>
|
||||
<Input
|
||||
type="number"
|
||||
min="1"
|
||||
value={typeof advancedVars.var_mtu === 'boolean' ? '' : String(advancedVars.var_mtu ?? '')}
|
||||
onChange={(e) => updateAdvancedVar('var_mtu', e.target.value ? parseInt(e.target.value) : 1500)}
|
||||
placeholder="1500"
|
||||
className={errors.var_mtu ? 'border-destructive' : ''}
|
||||
/>
|
||||
{errors.var_mtu && (
|
||||
<p className="mt-1 text-xs text-destructive">{errors.var_mtu}</p>
|
||||
)}
|
||||
</div>
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-foreground mb-2">
|
||||
MAC Address
|
||||
</label>
|
||||
<Input
|
||||
type="text"
|
||||
value={typeof advancedVars.var_mac === 'boolean' ? '' : String(advancedVars.var_mac ?? '')}
|
||||
onChange={(e) => updateAdvancedVar('var_mac', e.target.value)}
|
||||
placeholder="Auto"
|
||||
className={errors.var_mac ? 'border-destructive' : ''}
|
||||
/>
|
||||
{errors.var_mac && (
|
||||
<p className="mt-1 text-xs text-destructive">{errors.var_mac}</p>
|
||||
)}
|
||||
</div>
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-foreground mb-2">
|
||||
DNS Nameserver (IP)
|
||||
</label>
|
||||
<Input
|
||||
type="text"
|
||||
value={typeof advancedVars.var_ns === 'boolean' ? '' : String(advancedVars.var_ns ?? '')}
|
||||
onChange={(e) => updateAdvancedVar('var_ns', e.target.value)}
|
||||
placeholder="Auto"
|
||||
className={errors.var_ns ? 'border-destructive' : ''}
|
||||
/>
|
||||
{errors.var_ns && (
|
||||
<p className="mt-1 text-xs text-destructive">{errors.var_ns}</p>
|
||||
)}
|
||||
</div>
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-foreground mb-2">
|
||||
DNS Search Domain
|
||||
</label>
|
||||
<Input
|
||||
type="text"
|
||||
value={typeof advancedVars.var_searchdomain === 'boolean' ? '' : String(advancedVars.var_searchdomain ?? '')}
|
||||
onChange={(e) => updateAdvancedVar('var_searchdomain', e.target.value)}
|
||||
placeholder="e.g. local, home.lan"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Identity & Metadata */}
|
||||
<div>
|
||||
<h3 className="text-lg font-medium text-foreground mb-4">Identity & Metadata</h3>
|
||||
<div className="grid grid-cols-2 gap-4">
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-foreground mb-2">
|
||||
Hostname *
|
||||
</label>
|
||||
<Input
|
||||
type="text"
|
||||
value={typeof advancedVars.var_hostname === 'boolean' ? '' : String(advancedVars.var_hostname ?? '')}
|
||||
onChange={(e) => updateAdvancedVar('var_hostname', e.target.value)}
|
||||
placeholder={slug}
|
||||
/>
|
||||
</div>
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-foreground mb-2">
|
||||
Root Password
|
||||
</label>
|
||||
<Input
|
||||
type="password"
|
||||
value={typeof advancedVars.var_pw === 'boolean' ? '' : String(advancedVars.var_pw ?? '')}
|
||||
onChange={(e) => updateAdvancedVar('var_pw', e.target.value)}
|
||||
placeholder="Random (empty = auto-login)"
|
||||
/>
|
||||
</div>
|
||||
<div className="col-span-2">
|
||||
<label className="block text-sm font-medium text-foreground mb-2">
|
||||
Tags (comma-separated)
|
||||
</label>
|
||||
<Input
|
||||
type="text"
|
||||
value={typeof advancedVars.var_tags === 'boolean' ? '' : String(advancedVars.var_tags ?? '')}
|
||||
onChange={(e) => updateAdvancedVar('var_tags', e.target.value)}
|
||||
placeholder="community-script"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* SSH Access */}
|
||||
<div>
|
||||
<h3 className="text-lg font-medium text-foreground mb-4">SSH Access</h3>
|
||||
<div className="grid grid-cols-2 gap-4">
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-foreground mb-2">
|
||||
Enable SSH
|
||||
</label>
|
||||
<select
|
||||
value={typeof advancedVars.var_ssh === 'boolean' ? (advancedVars.var_ssh ? 'yes' : 'no') : String(advancedVars.var_ssh ?? 'no')}
|
||||
onChange={(e) => updateAdvancedVar('var_ssh', e.target.value)}
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm text-foreground focus:ring-2 focus:ring-ring focus:outline-none"
|
||||
>
|
||||
<option value="no">No</option>
|
||||
<option value="yes">Yes</option>
|
||||
</select>
|
||||
</div>
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-foreground mb-2">
|
||||
SSH Authorized Key
|
||||
</label>
|
||||
<Input
|
||||
type="text"
|
||||
value={typeof advancedVars.var_ssh_authorized_key === 'boolean' ? '' : String(advancedVars.var_ssh_authorized_key ?? '')}
|
||||
onChange={(e) => updateAdvancedVar('var_ssh_authorized_key', e.target.value)}
|
||||
placeholder="ssh-rsa AAAA..."
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Container Features */}
|
||||
<div>
|
||||
<h3 className="text-lg font-medium text-foreground mb-4">Container Features</h3>
|
||||
<div className="grid grid-cols-2 gap-4">
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-foreground mb-2">
|
||||
Nesting (Docker)
|
||||
</label>
|
||||
<select
|
||||
value={typeof advancedVars.var_nesting === 'boolean' ? 1 : (advancedVars.var_nesting ?? 1)}
|
||||
onChange={(e) => updateAdvancedVar('var_nesting', parseInt(e.target.value))}
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm text-foreground focus:ring-2 focus:ring-ring focus:outline-none"
|
||||
>
|
||||
<option value={1}>Enabled</option>
|
||||
<option value={0}>Disabled</option>
|
||||
</select>
|
||||
</div>
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-foreground mb-2">
|
||||
FUSE
|
||||
</label>
|
||||
<select
|
||||
value={typeof advancedVars.var_fuse === 'boolean' ? 0 : (advancedVars.var_fuse ?? 0)}
|
||||
onChange={(e) => updateAdvancedVar('var_fuse', parseInt(e.target.value))}
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm text-foreground focus:ring-2 focus:ring-ring focus:outline-none"
|
||||
>
|
||||
<option value={0}>Disabled</option>
|
||||
<option value={1}>Enabled</option>
|
||||
</select>
|
||||
</div>
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-foreground mb-2">
|
||||
Keyctl
|
||||
</label>
|
||||
<select
|
||||
value={typeof advancedVars.var_keyctl === 'boolean' ? 0 : (advancedVars.var_keyctl ?? 0)}
|
||||
onChange={(e) => updateAdvancedVar('var_keyctl', parseInt(e.target.value))}
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm text-foreground focus:ring-2 focus:ring-ring focus:outline-none"
|
||||
>
|
||||
<option value={0}>Disabled</option>
|
||||
<option value={1}>Enabled</option>
|
||||
</select>
|
||||
</div>
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-foreground mb-2">
|
||||
Mknod
|
||||
</label>
|
||||
<select
|
||||
value={typeof advancedVars.var_mknod === 'boolean' ? 0 : (advancedVars.var_mknod ?? 0)}
|
||||
onChange={(e) => updateAdvancedVar('var_mknod', parseInt(e.target.value))}
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm text-foreground focus:ring-2 focus:ring-ring focus:outline-none"
|
||||
>
|
||||
<option value={0}>Disabled</option>
|
||||
<option value={1}>Enabled</option>
|
||||
</select>
|
||||
</div>
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-foreground mb-2">
|
||||
Mount Filesystems
|
||||
</label>
|
||||
<Input
|
||||
type="text"
|
||||
value={typeof advancedVars.var_mount_fs === 'boolean' ? '' : String(advancedVars.var_mount_fs ?? '')}
|
||||
onChange={(e) => updateAdvancedVar('var_mount_fs', e.target.value)}
|
||||
placeholder="nfs,cifs"
|
||||
/>
|
||||
</div>
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-foreground mb-2">
|
||||
Protection
|
||||
</label>
|
||||
<select
|
||||
value={typeof advancedVars.var_protection === 'boolean' ? (advancedVars.var_protection ? 'yes' : 'no') : String(advancedVars.var_protection ?? 'no')}
|
||||
onChange={(e) => updateAdvancedVar('var_protection', e.target.value)}
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm text-foreground focus:ring-2 focus:ring-ring focus:outline-none"
|
||||
>
|
||||
<option value="no">No</option>
|
||||
<option value="yes">Yes</option>
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* System Configuration */}
|
||||
<div>
|
||||
<h3 className="text-lg font-medium text-foreground mb-4">System Configuration</h3>
|
||||
<div className="grid grid-cols-2 gap-4">
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-foreground mb-2">
|
||||
Timezone
|
||||
</label>
|
||||
<Input
|
||||
type="text"
|
||||
value={typeof advancedVars.var_timezone === 'boolean' ? '' : String(advancedVars.var_timezone ?? '')}
|
||||
onChange={(e) => updateAdvancedVar('var_timezone', e.target.value)}
|
||||
placeholder="System"
|
||||
/>
|
||||
</div>
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-foreground mb-2">
|
||||
Verbose
|
||||
</label>
|
||||
<select
|
||||
value={typeof advancedVars.var_verbose === 'boolean' ? (advancedVars.var_verbose ? 'yes' : 'no') : String(advancedVars.var_verbose ?? 'no')}
|
||||
onChange={(e) => updateAdvancedVar('var_verbose', e.target.value)}
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm text-foreground focus:ring-2 focus:ring-ring focus:outline-none"
|
||||
>
|
||||
<option value="no">No</option>
|
||||
<option value="yes">Yes</option>
|
||||
</select>
|
||||
</div>
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-foreground mb-2">
|
||||
APT Cacher
|
||||
</label>
|
||||
<select
|
||||
value={typeof advancedVars.var_apt_cacher === 'boolean' ? (advancedVars.var_apt_cacher ? 'yes' : 'no') : String(advancedVars.var_apt_cacher ?? 'no')}
|
||||
onChange={(e) => updateAdvancedVar('var_apt_cacher', e.target.value)}
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm text-foreground focus:ring-2 focus:ring-ring focus:outline-none"
|
||||
>
|
||||
<option value="no">No</option>
|
||||
<option value="yes">Yes</option>
|
||||
</select>
|
||||
</div>
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-foreground mb-2">
|
||||
APT Cacher IP
|
||||
</label>
|
||||
<Input
|
||||
type="text"
|
||||
value={typeof advancedVars.var_apt_cacher_ip === 'boolean' ? '' : String(advancedVars.var_apt_cacher_ip ?? '')}
|
||||
onChange={(e) => updateAdvancedVar('var_apt_cacher_ip', e.target.value)}
|
||||
placeholder="192.168.1.10"
|
||||
className={errors.var_apt_cacher_ip ? 'border-destructive' : ''}
|
||||
/>
|
||||
{errors.var_apt_cacher_ip && (
|
||||
<p className="mt-1 text-xs text-destructive">{errors.var_apt_cacher_ip}</p>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Storage Selection */}
|
||||
<div>
|
||||
<h3 className="text-lg font-medium text-foreground mb-4">Storage Selection</h3>
|
||||
<div className="grid grid-cols-2 gap-4">
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-foreground mb-2">
|
||||
Container Storage
|
||||
</label>
|
||||
<select
|
||||
value={typeof advancedVars.var_container_storage === 'boolean' ? '' : String(advancedVars.var_container_storage ?? '')}
|
||||
onChange={(e) => updateAdvancedVar('var_container_storage', e.target.value)}
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm text-foreground focus:ring-2 focus:ring-ring focus:outline-none"
|
||||
>
|
||||
<option value="">Auto</option>
|
||||
{rootfsStorages.map((storage) => (
|
||||
<option key={storage.name} value={storage.name}>
|
||||
{storage.name} ({storage.type})
|
||||
</option>
|
||||
))}
|
||||
</select>
|
||||
{rootfsStorages.length === 0 && (
|
||||
<p className="mt-1 text-xs text-muted-foreground">
|
||||
Could not fetch storages. Leave empty for auto selection.
|
||||
</p>
|
||||
)}
|
||||
</div>
|
||||
<div>
|
||||
<label className="block text-sm font-medium text-foreground mb-2">
|
||||
Template Storage
|
||||
</label>
|
||||
<select
|
||||
value={typeof advancedVars.var_template_storage === 'boolean' ? '' : String(advancedVars.var_template_storage ?? '')}
|
||||
onChange={(e) => updateAdvancedVar('var_template_storage', e.target.value)}
|
||||
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm text-foreground focus:ring-2 focus:ring-ring focus:outline-none"
|
||||
>
|
||||
<option value="">Auto</option>
|
||||
{templateStorages.map((storage) => (
|
||||
<option key={storage.name} value={storage.name}>
|
||||
{storage.name} ({storage.type})
|
||||
</option>
|
||||
))}
|
||||
</select>
|
||||
{templateStorages.length === 0 && (
|
||||
<p className="mt-1 text-xs text-muted-foreground">
|
||||
Could not fetch storages. Leave empty for auto selection.
|
||||
</p>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Action Buttons */}
|
||||
<div className="flex justify-end space-x-3 mt-6 pt-6 border-t border-border">
|
||||
<Button onClick={onClose} variant="outline" size="default">
|
||||
Cancel
|
||||
</Button>
|
||||
<Button onClick={handleConfirm} variant="default" size="default">
|
||||
Confirm
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -2,26 +2,31 @@
|
||||
|
||||
import { useState, useEffect } from 'react';
|
||||
import type { Server } from '../../types/server';
|
||||
import type { Script } from '../../types/script';
|
||||
import { Button } from './ui/button';
|
||||
import { ColorCodedDropdown } from './ColorCodedDropdown';
|
||||
import { SettingsModal } from './SettingsModal';
|
||||
import { ConfigurationModal, type EnvVars } from './ConfigurationModal';
|
||||
import { useRegisterModal } from './modal/ModalStackProvider';
|
||||
|
||||
|
||||
interface ExecutionModeModalProps {
|
||||
isOpen: boolean;
|
||||
onClose: () => void;
|
||||
onExecute: (mode: 'local' | 'ssh', server?: Server) => void;
|
||||
onExecute: (mode: 'local' | 'ssh', server?: Server, envVars?: EnvVars) => void;
|
||||
scriptName: string;
|
||||
script?: Script | null;
|
||||
}
|
||||
|
||||
export function ExecutionModeModal({ isOpen, onClose, onExecute, scriptName }: ExecutionModeModalProps) {
|
||||
export function ExecutionModeModal({ isOpen, onClose, onExecute, scriptName, script }: ExecutionModeModalProps) {
|
||||
useRegisterModal(isOpen, { id: 'execution-mode-modal', allowEscape: true, onClose });
|
||||
const [servers, setServers] = useState<Server[]>([]);
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
const [selectedServer, setSelectedServer] = useState<Server | null>(null);
|
||||
const [settingsModalOpen, setSettingsModalOpen] = useState(false);
|
||||
const [configModalOpen, setConfigModalOpen] = useState(false);
|
||||
const [configMode, setConfigMode] = useState<'default' | 'advanced'>('default');
|
||||
|
||||
useEffect(() => {
|
||||
if (isOpen) {
|
||||
@@ -64,19 +69,25 @@ export function ExecutionModeModal({ isOpen, onClose, onExecute, scriptName }: E
|
||||
}
|
||||
};
|
||||
|
||||
const handleExecute = () => {
|
||||
const handleConfigModeSelect = (mode: 'default' | 'advanced') => {
|
||||
if (!selectedServer) {
|
||||
setError('Please select a server for SSH execution');
|
||||
setError('Please select a server first');
|
||||
return;
|
||||
}
|
||||
|
||||
onExecute('ssh', selectedServer);
|
||||
setConfigMode(mode);
|
||||
setConfigModalOpen(true);
|
||||
};
|
||||
|
||||
const handleConfigConfirm = (envVars: EnvVars) => {
|
||||
if (!selectedServer) return;
|
||||
setConfigModalOpen(false);
|
||||
onExecute('ssh', selectedServer, envVars);
|
||||
onClose();
|
||||
};
|
||||
|
||||
|
||||
const handleServerSelect = (server: Server | null) => {
|
||||
setSelectedServer(server);
|
||||
setError(null); // Clear error when server is selected
|
||||
};
|
||||
|
||||
|
||||
@@ -164,6 +175,31 @@ export function ExecutionModeModal({ isOpen, onClose, onExecute, scriptName }: E
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Configuration Mode Selection */}
|
||||
<div className="space-y-3">
|
||||
<p className="text-sm text-muted-foreground text-center">
|
||||
Choose configuration mode:
|
||||
</p>
|
||||
<div className="flex gap-3">
|
||||
<Button
|
||||
onClick={() => handleConfigModeSelect('default')}
|
||||
variant="default"
|
||||
size="default"
|
||||
className="flex-1"
|
||||
>
|
||||
Default
|
||||
</Button>
|
||||
<Button
|
||||
onClick={() => handleConfigModeSelect('advanced')}
|
||||
variant="outline"
|
||||
size="default"
|
||||
className="flex-1"
|
||||
>
|
||||
Advanced (Beta)
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Action Buttons */}
|
||||
<div className="flex justify-end space-x-3">
|
||||
<Button
|
||||
@@ -173,13 +209,6 @@ export function ExecutionModeModal({ isOpen, onClose, onExecute, scriptName }: E
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button
|
||||
onClick={handleExecute}
|
||||
variant="default"
|
||||
size="default"
|
||||
>
|
||||
Install
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
) : (
|
||||
@@ -204,6 +233,33 @@ export function ExecutionModeModal({ isOpen, onClose, onExecute, scriptName }: E
|
||||
/>
|
||||
</div>
|
||||
|
||||
{/* Configuration Mode Selection - only show when server is selected */}
|
||||
{selectedServer && (
|
||||
<div className="space-y-3 pt-4 border-t border-border">
|
||||
<p className="text-sm text-muted-foreground text-center">
|
||||
Choose configuration mode:
|
||||
</p>
|
||||
<div className="flex gap-3">
|
||||
<Button
|
||||
onClick={() => handleConfigModeSelect('default')}
|
||||
variant="default"
|
||||
size="default"
|
||||
className="flex-1"
|
||||
>
|
||||
Default
|
||||
</Button>
|
||||
<Button
|
||||
onClick={() => handleConfigModeSelect('advanced')}
|
||||
variant="outline"
|
||||
size="default"
|
||||
className="flex-1"
|
||||
>
|
||||
Advanced
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Action Buttons */}
|
||||
<div className="flex justify-end space-x-3">
|
||||
<Button
|
||||
@@ -213,15 +269,6 @@ export function ExecutionModeModal({ isOpen, onClose, onExecute, scriptName }: E
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button
|
||||
onClick={handleExecute}
|
||||
disabled={!selectedServer}
|
||||
variant="default"
|
||||
size="default"
|
||||
className={!selectedServer ? 'bg-muted-foreground cursor-not-allowed' : ''}
|
||||
>
|
||||
Run on Server
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
@@ -234,6 +281,16 @@ export function ExecutionModeModal({ isOpen, onClose, onExecute, scriptName }: E
|
||||
isOpen={settingsModalOpen}
|
||||
onClose={handleSettingsModalClose}
|
||||
/>
|
||||
|
||||
{/* Configuration Modal */}
|
||||
<ConfigurationModal
|
||||
isOpen={configModalOpen}
|
||||
onClose={() => setConfigModalOpen(false)}
|
||||
onConfirm={handleConfigConfirm}
|
||||
script={script ?? null}
|
||||
server={selectedServer}
|
||||
mode={configMode}
|
||||
/>
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ export function Footer({ onOpenReleaseNotes }: FooterProps) {
|
||||
<div className="container mx-auto px-4">
|
||||
<div className="flex flex-col sm:flex-row items-center justify-between gap-2 text-sm text-muted-foreground">
|
||||
<div className="flex items-center gap-2">
|
||||
<span>© 2024 PVE Scripts Local</span>
|
||||
<span>© 2026 PVE Scripts Local</span>
|
||||
{versionData?.success && versionData.version && (
|
||||
<Button
|
||||
variant="ghost"
|
||||
|
||||
@@ -1630,7 +1630,7 @@ export function GeneralSettingsModal({
|
||||
https://github.com/owner/repo)
|
||||
</p>
|
||||
</div>
|
||||
<div className="flex items-center justify-between">
|
||||
<div className="border-border flex items-center justify-between gap-3 rounded-lg border p-3">
|
||||
<div>
|
||||
<p className="text-foreground text-sm font-medium">
|
||||
Enable after adding
|
||||
@@ -1644,6 +1644,7 @@ export function GeneralSettingsModal({
|
||||
onCheckedChange={setNewRepoEnabled}
|
||||
disabled={isAddingRepo}
|
||||
label="Enable repository"
|
||||
labelPosition="left"
|
||||
/>
|
||||
</div>
|
||||
<Button
|
||||
@@ -1739,44 +1740,7 @@ export function GeneralSettingsModal({
|
||||
{repo.enabled ? "• Enabled" : "• Disabled"}
|
||||
</p>
|
||||
</div>
|
||||
<div className="flex items-center gap-2">
|
||||
<Toggle
|
||||
checked={repo.enabled}
|
||||
onCheckedChange={async (enabled) => {
|
||||
setMessage(null);
|
||||
try {
|
||||
const result =
|
||||
await updateRepoMutation.mutateAsync({
|
||||
id: repo.id,
|
||||
enabled,
|
||||
});
|
||||
if (result.success) {
|
||||
setMessage({
|
||||
type: "success",
|
||||
text: `Repository ${enabled ? "enabled" : "disabled"} successfully!`,
|
||||
});
|
||||
await refetchRepositories();
|
||||
} else {
|
||||
setMessage({
|
||||
type: "error",
|
||||
text:
|
||||
result.error ??
|
||||
"Failed to update repository",
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
setMessage({
|
||||
type: "error",
|
||||
text:
|
||||
error instanceof Error
|
||||
? error.message
|
||||
: "Failed to update repository",
|
||||
});
|
||||
}
|
||||
}}
|
||||
disabled={updateRepoMutation.isPending}
|
||||
label={repo.enabled ? "Disable" : "Enable"}
|
||||
/>
|
||||
<div className="flex items-center gap-2 flex-shrink-0">
|
||||
<Button
|
||||
onClick={async () => {
|
||||
if (!repo.is_removable) {
|
||||
@@ -1837,6 +1801,44 @@ export function GeneralSettingsModal({
|
||||
>
|
||||
<Trash2 className="h-4 w-4" />
|
||||
</Button>
|
||||
<Toggle
|
||||
checked={repo.enabled}
|
||||
onCheckedChange={async (enabled) => {
|
||||
setMessage(null);
|
||||
try {
|
||||
const result =
|
||||
await updateRepoMutation.mutateAsync({
|
||||
id: repo.id,
|
||||
enabled,
|
||||
});
|
||||
if (result.success) {
|
||||
setMessage({
|
||||
type: "success",
|
||||
text: `Repository ${enabled ? "enabled" : "disabled"} successfully!`,
|
||||
});
|
||||
await refetchRepositories();
|
||||
} else {
|
||||
setMessage({
|
||||
type: "error",
|
||||
text:
|
||||
result.error ??
|
||||
"Failed to update repository",
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
setMessage({
|
||||
type: "error",
|
||||
text:
|
||||
error instanceof Error
|
||||
? error.message
|
||||
: "Failed to update repository",
|
||||
});
|
||||
}
|
||||
}}
|
||||
disabled={updateRepoMutation.isPending}
|
||||
label={repo.enabled ? "Disable" : "Enable"}
|
||||
labelPosition="left"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
),
|
||||
|
||||
@@ -12,6 +12,7 @@ import { LoadingModal } from "./LoadingModal";
|
||||
import { LXCSettingsModal } from "./LXCSettingsModal";
|
||||
import { StorageSelectionModal } from "./StorageSelectionModal";
|
||||
import { BackupWarningModal } from "./BackupWarningModal";
|
||||
import { CloneCountInputModal } from "./CloneCountInputModal";
|
||||
import type { Storage } from "~/server/services/storageService";
|
||||
import { getContrastColor } from "../../lib/colorUtils";
|
||||
import {
|
||||
@@ -68,6 +69,12 @@ export function InstalledScriptsTab() {
|
||||
server?: any;
|
||||
backupStorage?: string;
|
||||
isBackupOnly?: boolean;
|
||||
isClone?: boolean;
|
||||
executionId?: string;
|
||||
cloneCount?: number;
|
||||
hostnames?: string[];
|
||||
containerType?: 'lxc' | 'vm';
|
||||
storage?: string;
|
||||
} | null>(null);
|
||||
const [openingShell, setOpeningShell] = useState<{
|
||||
id: number;
|
||||
@@ -82,6 +89,14 @@ export function InstalledScriptsTab() {
|
||||
const [isLoadingStorages, setIsLoadingStorages] = useState(false);
|
||||
const [showBackupWarning, setShowBackupWarning] = useState(false);
|
||||
const [isPreUpdateBackup, setIsPreUpdateBackup] = useState(false); // Track if storage selection is for pre-update backup
|
||||
const [pendingCloneScript, setPendingCloneScript] = useState<InstalledScript | null>(null);
|
||||
const [cloneStorages, setCloneStorages] = useState<Storage[]>([]);
|
||||
const [isLoadingCloneStorages, setIsLoadingCloneStorages] = useState(false);
|
||||
const [showCloneStorageSelection, setShowCloneStorageSelection] = useState(false);
|
||||
const [showCloneCountInput, setShowCloneCountInput] = useState(false);
|
||||
const [cloneContainerType, setCloneContainerType] = useState<'lxc' | 'vm' | null>(null);
|
||||
const [selectedCloneStorage, setSelectedCloneStorage] = useState<Storage | null>(null);
|
||||
// cloneCount is passed as parameter to handleCloneCountSubmit, no need for state
|
||||
const [editingScriptId, setEditingScriptId] = useState<number | null>(null);
|
||||
const [editFormData, setEditFormData] = useState<{
|
||||
script_name: string;
|
||||
@@ -925,6 +940,201 @@ export function InstalledScriptsTab() {
|
||||
setShowStorageSelection(true);
|
||||
};
|
||||
|
||||
// Clone queries
|
||||
|
||||
const getContainerHostnameQuery = api.installedScripts.getContainerHostname.useQuery(
|
||||
{
|
||||
containerId: pendingCloneScript?.container_id ?? '',
|
||||
serverId: pendingCloneScript?.server_id ?? 0,
|
||||
containerType: cloneContainerType ?? 'lxc'
|
||||
},
|
||||
{ enabled: false }
|
||||
);
|
||||
|
||||
const executeCloneMutation = api.installedScripts.executeClone.useMutation();
|
||||
const utils = api.useUtils();
|
||||
|
||||
const fetchCloneStorages = async (serverId: number, _forceRefresh = false) => {
|
||||
setIsLoadingCloneStorages(true);
|
||||
try {
|
||||
// Use utils.fetch to call with the correct serverId
|
||||
const result = await utils.installedScripts.getCloneStorages.fetch({
|
||||
serverId,
|
||||
forceRefresh: _forceRefresh
|
||||
});
|
||||
if (result?.success && result.storages) {
|
||||
setCloneStorages(result.storages as Storage[]);
|
||||
} else {
|
||||
setErrorModal({
|
||||
isOpen: true,
|
||||
title: 'Failed to Fetch Storages',
|
||||
message: result?.error ?? 'Unknown error occurred',
|
||||
type: 'error'
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
setErrorModal({
|
||||
isOpen: true,
|
||||
title: 'Failed to Fetch Storages',
|
||||
message: error instanceof Error ? error.message : 'Unknown error occurred',
|
||||
type: 'error'
|
||||
});
|
||||
} finally {
|
||||
setIsLoadingCloneStorages(false);
|
||||
}
|
||||
};
|
||||
|
||||
const handleCloneScript = async (script: InstalledScript) => {
|
||||
if (!script.container_id) {
|
||||
setErrorModal({
|
||||
isOpen: true,
|
||||
title: 'Clone Failed',
|
||||
message: 'No Container ID available for this script',
|
||||
details: 'This script does not have a valid container ID and cannot be cloned.'
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
if (!script.server_id) {
|
||||
setErrorModal({
|
||||
isOpen: true,
|
||||
title: 'Clone Not Available',
|
||||
message: 'Clone is only available for SSH scripts with a configured server.',
|
||||
type: 'error'
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Store the script and determine container type using is_vm property
|
||||
setPendingCloneScript(script);
|
||||
|
||||
// Use is_vm property from batch detection (from main branch)
|
||||
// If not available, default to LXC
|
||||
const containerType = script.is_vm ? 'vm' : 'lxc';
|
||||
setCloneContainerType(containerType);
|
||||
|
||||
// Fetch storages and show selection modal
|
||||
void fetchCloneStorages(script.server_id, false);
|
||||
setShowCloneStorageSelection(true);
|
||||
};
|
||||
|
||||
const handleCloneStorageSelected = (storage: Storage) => {
|
||||
setShowCloneStorageSelection(false);
|
||||
setSelectedCloneStorage(storage);
|
||||
setShowCloneCountInput(true);
|
||||
};
|
||||
|
||||
const handleCloneCountSubmit = async (count: number) => {
|
||||
setShowCloneCountInput(false);
|
||||
|
||||
if (!pendingCloneScript || !cloneContainerType) {
|
||||
setErrorModal({
|
||||
isOpen: true,
|
||||
title: 'Clone Failed',
|
||||
message: 'Missing required information for cloning.',
|
||||
type: 'error'
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
// Get original hostname
|
||||
const hostnameResult = await getContainerHostnameQuery.refetch();
|
||||
|
||||
if (!hostnameResult.data?.success || !hostnameResult.data.hostname) {
|
||||
setErrorModal({
|
||||
isOpen: true,
|
||||
title: 'Clone Failed',
|
||||
message: 'Could not retrieve container hostname.',
|
||||
type: 'error'
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
const originalHostname = hostnameResult.data.hostname;
|
||||
|
||||
// Generate clone hostnames using utils to call with originalHostname
|
||||
const hostnamesResult = await utils.installedScripts.generateCloneHostnames.fetch({
|
||||
originalHostname,
|
||||
containerType: cloneContainerType ?? 'lxc',
|
||||
serverId: pendingCloneScript.server_id!,
|
||||
count
|
||||
});
|
||||
|
||||
if (!hostnamesResult?.success || !hostnamesResult.hostnames.length) {
|
||||
setErrorModal({
|
||||
isOpen: true,
|
||||
title: 'Clone Failed',
|
||||
message: hostnamesResult?.error ?? 'Could not generate clone hostnames.',
|
||||
type: 'error'
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
const hostnames = hostnamesResult.hostnames;
|
||||
|
||||
// Execute clone (nextIds will be obtained sequentially in server.js)
|
||||
const cloneResult = await executeCloneMutation.mutateAsync({
|
||||
containerId: pendingCloneScript.container_id!,
|
||||
serverId: pendingCloneScript.server_id!,
|
||||
storage: selectedCloneStorage!.name,
|
||||
cloneCount: count,
|
||||
hostnames: hostnames,
|
||||
containerType: cloneContainerType
|
||||
});
|
||||
|
||||
if (!cloneResult.success || !cloneResult.executionId) {
|
||||
setErrorModal({
|
||||
isOpen: true,
|
||||
title: 'Clone Failed',
|
||||
message: cloneResult.error ?? 'Failed to start clone operation.',
|
||||
type: 'error'
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Get server info for websocket
|
||||
const server = pendingCloneScript.server_id && pendingCloneScript.server_user ? {
|
||||
id: pendingCloneScript.server_id,
|
||||
name: pendingCloneScript.server_name,
|
||||
ip: pendingCloneScript.server_ip,
|
||||
user: pendingCloneScript.server_user,
|
||||
password: pendingCloneScript.server_password,
|
||||
auth_type: pendingCloneScript.server_auth_type ?? 'password',
|
||||
ssh_key: pendingCloneScript.server_ssh_key,
|
||||
ssh_key_passphrase: pendingCloneScript.server_ssh_key_passphrase,
|
||||
ssh_port: pendingCloneScript.server_ssh_port ?? 22,
|
||||
} : null;
|
||||
|
||||
// Set up terminal for clone execution
|
||||
setUpdatingScript({
|
||||
id: pendingCloneScript.id,
|
||||
containerId: pendingCloneScript.container_id!,
|
||||
server: server,
|
||||
isClone: true,
|
||||
executionId: cloneResult.executionId,
|
||||
cloneCount: count,
|
||||
hostnames: hostnames,
|
||||
containerType: cloneContainerType,
|
||||
storage: selectedCloneStorage!.name
|
||||
});
|
||||
|
||||
// Reset clone state
|
||||
setPendingCloneScript(null);
|
||||
setCloneStorages([]);
|
||||
setSelectedCloneStorage(null);
|
||||
setCloneContainerType(null);
|
||||
// Reset clone count (no state variable needed, count is passed as parameter)
|
||||
} catch (error) {
|
||||
setErrorModal({
|
||||
isOpen: true,
|
||||
title: 'Clone Failed',
|
||||
message: error instanceof Error ? error.message : 'Unknown error occurred',
|
||||
type: 'error'
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const handleOpenShell = (script: InstalledScript) => {
|
||||
if (!script.container_id) {
|
||||
setErrorModal({
|
||||
@@ -1216,26 +1426,25 @@ export function InstalledScriptsTab() {
|
||||
<div className="mb-8" data-terminal="update">
|
||||
<Terminal
|
||||
scriptPath={
|
||||
updatingScript.isBackupOnly
|
||||
updatingScript.isClone
|
||||
? `clone-${updatingScript.containerId}`
|
||||
: updatingScript.isBackupOnly
|
||||
? `backup-${updatingScript.containerId}`
|
||||
: `update-${updatingScript.containerId}`
|
||||
}
|
||||
onClose={handleCloseUpdateTerminal}
|
||||
mode={updatingScript.server ? "ssh" : "local"}
|
||||
server={updatingScript.server}
|
||||
isUpdate={!updatingScript.isBackupOnly}
|
||||
isUpdate={!updatingScript.isBackupOnly && !updatingScript.isClone}
|
||||
isBackup={updatingScript.isBackupOnly}
|
||||
isClone={updatingScript.isClone}
|
||||
containerId={updatingScript.containerId}
|
||||
storage={
|
||||
updatingScript.isBackupOnly
|
||||
? updatingScript.backupStorage
|
||||
: undefined
|
||||
}
|
||||
backupStorage={
|
||||
!updatingScript.isBackupOnly
|
||||
? updatingScript.backupStorage
|
||||
: undefined
|
||||
}
|
||||
executionId={updatingScript.executionId}
|
||||
cloneCount={updatingScript.cloneCount}
|
||||
hostnames={updatingScript.hostnames}
|
||||
containerType={updatingScript.containerType}
|
||||
storage={updatingScript.isClone ? updatingScript.storage : (updatingScript.isBackupOnly ? updatingScript.backupStorage : undefined)}
|
||||
backupStorage={!updatingScript.isBackupOnly && !updatingScript.isClone ? updatingScript.backupStorage : undefined}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
@@ -1716,6 +1925,7 @@ export function InstalledScriptsTab() {
|
||||
onCancel={handleCancelEdit}
|
||||
onUpdate={() => handleUpdateScript(script)}
|
||||
onBackup={() => handleBackupScript(script)}
|
||||
onClone={() => handleCloneScript(script)}
|
||||
onShell={() => handleOpenShell(script)}
|
||||
onDelete={() => handleDeleteScript(Number(script.id))}
|
||||
isUpdating={updateScriptMutation.isPending}
|
||||
@@ -2067,8 +2277,22 @@ export function InstalledScriptsTab() {
|
||||
</DropdownMenuItem>
|
||||
)}
|
||||
{script.container_id &&
|
||||
script.execution_mode === "ssh" &&
|
||||
!script.is_vm && (
|
||||
script.execution_mode === "ssh" && (
|
||||
<DropdownMenuItem
|
||||
onClick={() =>
|
||||
handleCloneScript(script)
|
||||
}
|
||||
disabled={
|
||||
containerStatuses.get(script.id) ===
|
||||
"stopped"
|
||||
}
|
||||
className="text-muted-foreground hover:text-foreground hover:bg-muted/20 focus:bg-muted/20"
|
||||
>
|
||||
Clone
|
||||
</DropdownMenuItem>
|
||||
)}
|
||||
{script.container_id &&
|
||||
script.execution_mode === "ssh" && (
|
||||
<DropdownMenuItem
|
||||
onClick={() =>
|
||||
handleOpenShell(script)
|
||||
@@ -2357,6 +2581,43 @@ export function InstalledScriptsTab() {
|
||||
}}
|
||||
/>
|
||||
|
||||
{/* Clone Storage Selection Modal */}
|
||||
<StorageSelectionModal
|
||||
isOpen={showCloneStorageSelection}
|
||||
onClose={() => {
|
||||
setShowCloneStorageSelection(false);
|
||||
setPendingCloneScript(null);
|
||||
setCloneStorages([]);
|
||||
}}
|
||||
onSelect={handleCloneStorageSelected}
|
||||
storages={cloneStorages}
|
||||
isLoading={isLoadingCloneStorages}
|
||||
onRefresh={() => {
|
||||
if (pendingCloneScript?.server_id) {
|
||||
void fetchCloneStorages(pendingCloneScript.server_id, true);
|
||||
}
|
||||
}}
|
||||
title="Select Clone Storage"
|
||||
description="Select a storage to use for cloning. Only storages with rootdir content are shown."
|
||||
filterFn={(storage) => {
|
||||
return storage.content.includes('rootdir');
|
||||
}}
|
||||
showBackupTag={false}
|
||||
/>
|
||||
|
||||
{/* Clone Count Input Modal */}
|
||||
<CloneCountInputModal
|
||||
isOpen={showCloneCountInput}
|
||||
onClose={() => {
|
||||
setShowCloneCountInput(false);
|
||||
setPendingCloneScript(null);
|
||||
setCloneStorages([]);
|
||||
setSelectedCloneStorage(null);
|
||||
}}
|
||||
onSubmit={handleCloneCountSubmit}
|
||||
storageName={selectedCloneStorage?.name ?? ''}
|
||||
/>
|
||||
|
||||
{/* LXC Settings Modal */}
|
||||
<LXCSettingsModal
|
||||
isOpen={lxcSettingsModal.isOpen}
|
||||
|
||||
@@ -28,6 +28,7 @@ interface ScriptDetailModalProps {
|
||||
scriptName: string,
|
||||
mode?: "local" | "ssh",
|
||||
server?: Server,
|
||||
envVars?: Record<string, string | number | boolean>,
|
||||
) => void;
|
||||
}
|
||||
|
||||
@@ -183,7 +184,7 @@ export function ScriptDetailModal({
|
||||
setExecutionModeOpen(true);
|
||||
};
|
||||
|
||||
const handleExecuteScript = (mode: "local" | "ssh", server?: Server) => {
|
||||
const handleExecuteScript = (mode: "local" | "ssh", server?: Server, envVars?: Record<string, string | number | boolean>) => {
|
||||
if (!script || !onInstallScript) return;
|
||||
|
||||
// Find the script path based on selected version type
|
||||
@@ -197,8 +198,8 @@ export function ScriptDetailModal({
|
||||
const scriptPath = `scripts/${scriptMethod.script}`;
|
||||
const scriptName = script.name;
|
||||
|
||||
// Pass execution mode and server info to the parent
|
||||
onInstallScript(scriptPath, scriptName, mode, server);
|
||||
// Pass execution mode, server info, and envVars to the parent
|
||||
onInstallScript(scriptPath, scriptName, mode, server, envVars);
|
||||
|
||||
onClose(); // Close the modal when starting installation
|
||||
}
|
||||
@@ -935,6 +936,7 @@ export function ScriptDetailModal({
|
||||
{script && (
|
||||
<ExecutionModeModal
|
||||
scriptName={script.name}
|
||||
script={script}
|
||||
isOpen={executionModeOpen}
|
||||
onClose={() => setExecutionModeOpen(false)}
|
||||
onExecute={handleExecuteScript}
|
||||
|
||||
@@ -46,6 +46,7 @@ interface ScriptInstallationCardProps {
|
||||
onCancel: () => void;
|
||||
onUpdate: () => void;
|
||||
onBackup?: () => void;
|
||||
onClone?: () => void;
|
||||
onShell: () => void;
|
||||
onDelete: () => void;
|
||||
isUpdating: boolean;
|
||||
@@ -71,6 +72,7 @@ export function ScriptInstallationCard({
|
||||
onCancel,
|
||||
onUpdate,
|
||||
onBackup,
|
||||
onClone,
|
||||
onShell,
|
||||
onDelete,
|
||||
isUpdating,
|
||||
@@ -319,7 +321,16 @@ export function ScriptInstallationCard({
|
||||
Backup
|
||||
</DropdownMenuItem>
|
||||
)}
|
||||
{script.container_id && script.execution_mode === 'ssh' && !script.is_vm && (
|
||||
{script.container_id && script.execution_mode === 'ssh' && onClone && (
|
||||
<DropdownMenuItem
|
||||
onClick={onClone}
|
||||
disabled={containerStatus === 'stopped'}
|
||||
className="text-muted-foreground hover:text-foreground hover:bg-muted/20 focus:bg-muted/20"
|
||||
>
|
||||
Clone
|
||||
</DropdownMenuItem>
|
||||
)}
|
||||
{script.container_id && script.execution_mode === 'ssh' && (
|
||||
<DropdownMenuItem
|
||||
onClick={onShell}
|
||||
disabled={containerStatus === 'stopped'}
|
||||
|
||||
@@ -13,6 +13,10 @@ interface StorageSelectionModalProps {
|
||||
storages: Storage[];
|
||||
isLoading: boolean;
|
||||
onRefresh: () => void;
|
||||
title?: string;
|
||||
description?: string;
|
||||
filterFn?: (storage: Storage) => boolean;
|
||||
showBackupTag?: boolean;
|
||||
}
|
||||
|
||||
export function StorageSelectionModal({
|
||||
@@ -21,7 +25,11 @@ export function StorageSelectionModal({
|
||||
onSelect,
|
||||
storages,
|
||||
isLoading,
|
||||
onRefresh
|
||||
onRefresh,
|
||||
title = 'Select Storage',
|
||||
description = 'Select a storage to use.',
|
||||
filterFn,
|
||||
showBackupTag = true
|
||||
}: StorageSelectionModalProps) {
|
||||
const [selectedStorage, setSelectedStorage] = useState<Storage | null>(null);
|
||||
|
||||
@@ -41,8 +49,8 @@ export function StorageSelectionModal({
|
||||
onClose();
|
||||
};
|
||||
|
||||
// Filter to show only backup-capable storages
|
||||
const backupStorages = storages.filter(s => s.supportsBackup);
|
||||
// Filter storages using filterFn if provided, otherwise filter to show only backup-capable storages
|
||||
const filteredStorages = filterFn ? storages.filter(filterFn) : storages.filter(s => s.supportsBackup);
|
||||
|
||||
return (
|
||||
<div className="fixed inset-0 backdrop-blur-sm bg-black/50 flex items-center justify-center z-50 p-4">
|
||||
@@ -51,7 +59,7 @@ export function StorageSelectionModal({
|
||||
<div className="flex items-center justify-between p-6 border-b border-border">
|
||||
<div className="flex items-center gap-3">
|
||||
<Database className="h-6 w-6 text-primary" />
|
||||
<h2 className="text-2xl font-bold text-card-foreground">Select Backup Storage</h2>
|
||||
<h2 className="text-2xl font-bold text-card-foreground">{title}</h2>
|
||||
</div>
|
||||
<Button
|
||||
onClick={handleClose}
|
||||
@@ -72,7 +80,7 @@ export function StorageSelectionModal({
|
||||
<div className="inline-block animate-spin rounded-full h-8 w-8 border-b-2 border-primary mb-4"></div>
|
||||
<p className="text-muted-foreground">Loading storages...</p>
|
||||
</div>
|
||||
) : backupStorages.length === 0 ? (
|
||||
) : filteredStorages.length === 0 ? (
|
||||
<div className="text-center py-8">
|
||||
<Database className="h-12 w-12 text-muted-foreground mx-auto mb-4" />
|
||||
<p className="text-foreground mb-2">No backup-capable storages found</p>
|
||||
@@ -87,12 +95,12 @@ export function StorageSelectionModal({
|
||||
) : (
|
||||
<>
|
||||
<p className="text-sm text-muted-foreground mb-4">
|
||||
Select a storage to use for the backup. Only storages that support backups are shown.
|
||||
{description}
|
||||
</p>
|
||||
|
||||
{/* Storage List */}
|
||||
<div className="space-y-2 max-h-96 overflow-y-auto mb-4">
|
||||
{backupStorages.map((storage) => (
|
||||
{filteredStorages.map((storage) => (
|
||||
<div
|
||||
key={storage.name}
|
||||
onClick={() => setSelectedStorage(storage)}
|
||||
@@ -106,9 +114,11 @@ export function StorageSelectionModal({
|
||||
<div className="flex-1">
|
||||
<div className="flex items-center gap-2 mb-1">
|
||||
<h3 className="font-medium text-foreground">{storage.name}</h3>
|
||||
<span className="px-2 py-0.5 text-xs font-medium rounded bg-success/20 text-success border border-success/30">
|
||||
Backup
|
||||
</span>
|
||||
{showBackupTag && (
|
||||
<span className="px-2 py-0.5 text-xs font-medium rounded bg-success/20 text-success border border-success/30">
|
||||
Backup
|
||||
</span>
|
||||
)}
|
||||
<span className="px-2 py-0.5 text-xs font-medium rounded bg-muted text-muted-foreground">
|
||||
{storage.type}
|
||||
</span>
|
||||
|
||||
@@ -13,9 +13,15 @@ interface TerminalProps {
|
||||
isUpdate?: boolean;
|
||||
isShell?: boolean;
|
||||
isBackup?: boolean;
|
||||
isClone?: boolean;
|
||||
containerId?: string;
|
||||
storage?: string;
|
||||
backupStorage?: string;
|
||||
executionId?: string;
|
||||
cloneCount?: number;
|
||||
hostnames?: string[];
|
||||
containerType?: 'lxc' | 'vm';
|
||||
envVars?: Record<string, string | number | boolean>;
|
||||
}
|
||||
|
||||
interface TerminalMessage {
|
||||
@@ -24,7 +30,7 @@ interface TerminalMessage {
|
||||
timestamp: number;
|
||||
}
|
||||
|
||||
export function Terminal({ scriptPath, onClose, mode = 'local', server, isUpdate = false, isShell = false, isBackup = false, containerId, storage, backupStorage }: TerminalProps) {
|
||||
export function Terminal({ scriptPath, onClose, mode = 'local', server, isUpdate = false, isShell = false, isBackup = false, isClone = false, containerId, storage, backupStorage, executionId: propExecutionId, cloneCount, hostnames, containerType, envVars }: TerminalProps) {
|
||||
const [isConnected, setIsConnected] = useState(false);
|
||||
const [isRunning, setIsRunning] = useState(false);
|
||||
const [isClient, setIsClient] = useState(false);
|
||||
@@ -39,7 +45,16 @@ export function Terminal({ scriptPath, onClose, mode = 'local', server, isUpdate
|
||||
const fitAddonRef = useRef<any>(null);
|
||||
const wsRef = useRef<WebSocket | null>(null);
|
||||
const inputHandlerRef = useRef<((data: string) => void) | null>(null);
|
||||
const [executionId, setExecutionId] = useState(() => `exec_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`);
|
||||
const [executionId, setExecutionId] = useState(() => propExecutionId ?? `exec_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`);
|
||||
|
||||
// Update executionId when propExecutionId changes
|
||||
useEffect(() => {
|
||||
if (propExecutionId) {
|
||||
setExecutionId(propExecutionId);
|
||||
}
|
||||
}, [propExecutionId]);
|
||||
|
||||
const effectiveExecutionId = propExecutionId ?? executionId;
|
||||
const isConnectingRef = useRef<boolean>(false);
|
||||
const hasConnectedRef = useRef<boolean>(false);
|
||||
|
||||
@@ -277,7 +292,7 @@ export function Terminal({ scriptPath, onClose, mode = 'local', server, isUpdate
|
||||
if (wsRef.current && wsRef.current.readyState === WebSocket.OPEN) {
|
||||
const message = {
|
||||
action: 'input',
|
||||
executionId,
|
||||
executionId: effectiveExecutionId,
|
||||
input: data
|
||||
};
|
||||
wsRef.current.send(JSON.stringify(message));
|
||||
@@ -325,9 +340,11 @@ export function Terminal({ scriptPath, onClose, mode = 'local', server, isUpdate
|
||||
|
||||
// Only auto-start on initial connection, not on reconnections
|
||||
if (isInitialConnection && !isRunning) {
|
||||
// Generate a new execution ID for the initial run
|
||||
const newExecutionId = `exec_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
|
||||
setExecutionId(newExecutionId);
|
||||
// Use propExecutionId if provided, otherwise generate a new one
|
||||
const newExecutionId = propExecutionId ?? `exec_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
|
||||
if (!propExecutionId) {
|
||||
setExecutionId(newExecutionId);
|
||||
}
|
||||
|
||||
const message = {
|
||||
action: 'start',
|
||||
@@ -338,9 +355,14 @@ export function Terminal({ scriptPath, onClose, mode = 'local', server, isUpdate
|
||||
isUpdate,
|
||||
isShell,
|
||||
isBackup,
|
||||
isClone,
|
||||
containerId,
|
||||
storage,
|
||||
backupStorage
|
||||
backupStorage,
|
||||
cloneCount,
|
||||
hostnames,
|
||||
containerType,
|
||||
envVars
|
||||
};
|
||||
ws.send(JSON.stringify(message));
|
||||
}
|
||||
@@ -380,13 +402,15 @@ export function Terminal({ scriptPath, onClose, mode = 'local', server, isUpdate
|
||||
wsRef.current.close();
|
||||
}
|
||||
};
|
||||
}, [scriptPath, mode, server, isUpdate, isShell, containerId, isMobile]);
|
||||
}, [scriptPath, mode, server, isUpdate, isShell, containerId, isMobile, envVars]);
|
||||
|
||||
const startScript = () => {
|
||||
if (wsRef.current && wsRef.current.readyState === WebSocket.OPEN && !isRunning) {
|
||||
// Generate a new execution ID for each script run
|
||||
const newExecutionId = `exec_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
|
||||
setExecutionId(newExecutionId);
|
||||
// Generate a new execution ID for each script run (unless propExecutionId is provided)
|
||||
const newExecutionId = propExecutionId ?? `exec_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
|
||||
if (!propExecutionId) {
|
||||
setExecutionId(newExecutionId);
|
||||
}
|
||||
|
||||
setIsStopped(false);
|
||||
wsRef.current.send(JSON.stringify({
|
||||
@@ -395,9 +419,17 @@ export function Terminal({ scriptPath, onClose, mode = 'local', server, isUpdate
|
||||
executionId: newExecutionId,
|
||||
mode,
|
||||
server,
|
||||
envVars,
|
||||
isUpdate,
|
||||
isShell,
|
||||
containerId
|
||||
isBackup,
|
||||
isClone,
|
||||
containerId,
|
||||
storage,
|
||||
backupStorage,
|
||||
cloneCount,
|
||||
hostnames,
|
||||
containerType
|
||||
}));
|
||||
}
|
||||
};
|
||||
|
||||
@@ -416,11 +416,20 @@ export function VersionDisplay({ onOpenReleaseNotes }: VersionDisplayProps = {})
|
||||
setShowUpdateConfirmation(true);
|
||||
};
|
||||
|
||||
// Helper to generate secure random string
|
||||
function getSecureRandomString(length: number): string {
|
||||
const array = new Uint8Array(length);
|
||||
window.crypto.getRandomValues(array);
|
||||
// Convert to base36 string (alphanumeric)
|
||||
return Array.from(array, b => b.toString(36)).join('').substr(0, length);
|
||||
}
|
||||
|
||||
const handleConfirmUpdate = () => {
|
||||
// Close the confirmation modal
|
||||
setShowUpdateConfirmation(false);
|
||||
// Start the actual update process
|
||||
const sessionId = `update_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
|
||||
const randomSuffix = getSecureRandomString(9);
|
||||
const sessionId = `update_${Date.now()}_${randomSuffix}`;
|
||||
const startTime = Date.now();
|
||||
|
||||
setIsUpdating(true);
|
||||
|
||||
@@ -6,30 +6,40 @@ export interface ToggleProps
|
||||
checked?: boolean;
|
||||
onCheckedChange?: (checked: boolean) => void;
|
||||
label?: string;
|
||||
labelPosition?: 'left' | 'right';
|
||||
}
|
||||
|
||||
const Toggle = React.forwardRef<HTMLInputElement, ToggleProps>(
|
||||
({ className, checked, onCheckedChange, label, ...props }, ref) => {
|
||||
({ className, checked, onCheckedChange, label, labelPosition = 'right', ...props }, ref) => {
|
||||
const toggleSwitch = (
|
||||
<label className="relative inline-flex items-center cursor-pointer">
|
||||
<input
|
||||
type="checkbox"
|
||||
className="sr-only"
|
||||
checked={checked}
|
||||
onChange={(e) => onCheckedChange?.(e.target.checked)}
|
||||
ref={ref}
|
||||
{...props}
|
||||
/>
|
||||
<div className={cn(
|
||||
"w-11 h-6 peer-focus:outline-none peer-focus:ring-4 peer-focus:ring-primary/20 rounded-full peer after:content-[''] after:absolute after:top-[2px] after:left-[2px] after:bg-white after:border-gray-300 dark:after:border-gray-500 after:border after:rounded-full after:h-5 after:w-5 after:transition-transform after:duration-300 after:ease-in-out after:shadow-md transition-colors duration-300 ease-in-out border-2 border-gray-300 dark:border-gray-600",
|
||||
checked
|
||||
? "bg-blue-500 dark:bg-blue-600 after:translate-x-full"
|
||||
: "bg-gray-300 dark:bg-gray-700",
|
||||
className
|
||||
)} />
|
||||
</label>
|
||||
);
|
||||
|
||||
return (
|
||||
<div className="flex items-center space-x-3">
|
||||
<label className="relative inline-flex items-center cursor-pointer">
|
||||
<input
|
||||
type="checkbox"
|
||||
className="sr-only"
|
||||
checked={checked}
|
||||
onChange={(e) => onCheckedChange?.(e.target.checked)}
|
||||
ref={ref}
|
||||
{...props}
|
||||
/>
|
||||
<div className={cn(
|
||||
"w-11 h-6 peer-focus:outline-none peer-focus:ring-4 peer-focus:ring-primary/20 rounded-full peer after:content-[''] after:absolute after:top-[2px] after:left-[2px] after:bg-white after:border-gray-300 dark:after:border-gray-500 after:border after:rounded-full after:h-5 after:w-5 after:transition-transform after:duration-300 after:ease-in-out after:shadow-md transition-colors duration-300 ease-in-out border-2 border-gray-300 dark:border-gray-600",
|
||||
checked
|
||||
? "bg-blue-500 dark:bg-blue-600 after:translate-x-full"
|
||||
: "bg-gray-300 dark:bg-gray-700",
|
||||
className
|
||||
)} />
|
||||
</label>
|
||||
{label && (
|
||||
{label && labelPosition === 'left' && (
|
||||
<span className="text-sm font-medium text-foreground">
|
||||
{label}
|
||||
</span>
|
||||
)}
|
||||
{toggleSwitch}
|
||||
{label && labelPosition === 'right' && (
|
||||
<span className="text-sm font-medium text-foreground">
|
||||
{label}
|
||||
</span>
|
||||
|
||||
@@ -32,6 +32,7 @@ export default function Home() {
|
||||
name: string;
|
||||
mode?: "local" | "ssh";
|
||||
server?: Server;
|
||||
envVars?: Record<string, string | number | boolean>;
|
||||
} | null>(null);
|
||||
const [activeTab, setActiveTab] = useState<
|
||||
"scripts" | "downloaded" | "installed" | "backups"
|
||||
@@ -209,8 +210,9 @@ export default function Home() {
|
||||
scriptName: string,
|
||||
mode?: "local" | "ssh",
|
||||
server?: Server,
|
||||
envVars?: Record<string, string | number | boolean>,
|
||||
) => {
|
||||
setRunningScript({ path: scriptPath, name: scriptName, mode, server });
|
||||
setRunningScript({ path: scriptPath, name: scriptName, mode, server, envVars });
|
||||
// Scroll to terminal after a short delay to ensure it's rendered
|
||||
setTimeout(scrollToTerminal, 100);
|
||||
};
|
||||
@@ -360,6 +362,7 @@ export default function Home() {
|
||||
onClose={handleCloseTerminal}
|
||||
mode={runningScript.mode}
|
||||
server={runningScript.server}
|
||||
envVars={runningScript.envVars}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
|
||||
@@ -442,22 +442,18 @@ async function isVM(scriptId: number, containerId: string, serverId: number | nu
|
||||
return true; // VM config file exists
|
||||
}
|
||||
|
||||
// Check LXC config file
|
||||
let lxcConfigExists = false;
|
||||
// Check LXC config file (not needed for return value, but check for completeness)
|
||||
await new Promise<void>((resolve) => {
|
||||
void sshExecutionService.executeCommand(
|
||||
server as Server,
|
||||
`test -f "${lxcConfigPath}" && echo "exists" || echo "not_exists"`,
|
||||
(data: string) => {
|
||||
if (data.includes('exists')) {
|
||||
lxcConfigExists = true;
|
||||
}
|
||||
(_data: string) => {
|
||||
// Data handler not needed - just checking if file exists
|
||||
},
|
||||
() => resolve(),
|
||||
() => resolve()
|
||||
);
|
||||
});
|
||||
|
||||
|
||||
return false; // Always LXC since VM config doesn't exist
|
||||
} catch (error) {
|
||||
@@ -510,7 +506,7 @@ async function batchDetectContainerTypes(server: Server): Promise<Map<string, bo
|
||||
|
||||
// Get containers from pct list
|
||||
let pctOutput = '';
|
||||
await new Promise<void>((resolve, reject) => {
|
||||
await new Promise<void>((resolve) => {
|
||||
void sshExecutionService.executeCommand(
|
||||
server,
|
||||
'pct list',
|
||||
@@ -530,7 +526,7 @@ async function batchDetectContainerTypes(server: Server): Promise<Map<string, bo
|
||||
|
||||
// Get VMs from qm list
|
||||
let qmOutput = '';
|
||||
await new Promise<void>((resolve, reject) => {
|
||||
await new Promise<void>((resolve) => {
|
||||
void sshExecutionService.executeCommand(
|
||||
server,
|
||||
'qm list',
|
||||
@@ -2651,5 +2647,562 @@ EOFCONFIG`;
|
||||
executionId: null
|
||||
};
|
||||
}
|
||||
}),
|
||||
|
||||
// Get next free ID from cluster (single ID for sequential cloning)
|
||||
getClusterNextId: publicProcedure
|
||||
.input(z.object({
|
||||
serverId: z.number()
|
||||
}))
|
||||
.query(async ({ input }) => {
|
||||
try {
|
||||
const db = getDatabase();
|
||||
const server = await db.getServerById(input.serverId);
|
||||
|
||||
if (!server) {
|
||||
return {
|
||||
success: false,
|
||||
error: 'Server not found',
|
||||
nextId: null
|
||||
};
|
||||
}
|
||||
|
||||
const { getSSHExecutionService } = await import('~/server/ssh-execution-service');
|
||||
const sshExecutionService = getSSHExecutionService();
|
||||
|
||||
let output = '';
|
||||
await new Promise<void>((resolve, reject) => {
|
||||
sshExecutionService.executeCommand(
|
||||
server as Server,
|
||||
'pvesh get /cluster/nextid',
|
||||
(data: string) => {
|
||||
output += data;
|
||||
},
|
||||
(error: string) => {
|
||||
reject(new Error(`Failed to get next ID: ${error}`));
|
||||
},
|
||||
(exitCode: number) => {
|
||||
if (exitCode === 0) {
|
||||
resolve();
|
||||
} else {
|
||||
reject(new Error(`pvesh command failed with exit code ${exitCode}`));
|
||||
}
|
||||
}
|
||||
);
|
||||
});
|
||||
|
||||
const nextId = output.trim();
|
||||
if (!nextId || !/^\d+$/.test(nextId)) {
|
||||
return {
|
||||
success: false,
|
||||
error: 'Invalid next ID received',
|
||||
nextId: null
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
nextId
|
||||
};
|
||||
} catch (error) {
|
||||
console.error('Error in getClusterNextId:', error);
|
||||
return {
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : 'Failed to get next ID',
|
||||
nextId: null
|
||||
};
|
||||
}
|
||||
}),
|
||||
|
||||
// Get container hostname/name
|
||||
getContainerHostname: publicProcedure
|
||||
.input(z.object({
|
||||
containerId: z.string(),
|
||||
serverId: z.number(),
|
||||
containerType: z.enum(['lxc', 'vm'])
|
||||
}))
|
||||
.query(async ({ input }) => {
|
||||
try {
|
||||
const db = getDatabase();
|
||||
const server = await db.getServerById(input.serverId);
|
||||
|
||||
if (!server) {
|
||||
return {
|
||||
success: false,
|
||||
error: 'Server not found',
|
||||
hostname: null
|
||||
};
|
||||
}
|
||||
|
||||
const { getSSHExecutionService } = await import('~/server/ssh-execution-service');
|
||||
const sshExecutionService = getSSHExecutionService();
|
||||
|
||||
const configPath = input.containerType === 'lxc'
|
||||
? `/etc/pve/lxc/${input.containerId}.conf`
|
||||
: `/etc/pve/qemu-server/${input.containerId}.conf`;
|
||||
|
||||
let configContent = '';
|
||||
await new Promise<void>((resolve) => {
|
||||
sshExecutionService.executeCommand(
|
||||
server as Server,
|
||||
`cat "${configPath}" 2>/dev/null || echo ""`,
|
||||
(data: string) => {
|
||||
configContent += data;
|
||||
},
|
||||
() => resolve(), // Don't fail on error
|
||||
() => resolve() // Always resolve
|
||||
);
|
||||
});
|
||||
|
||||
if (!configContent.trim()) {
|
||||
return {
|
||||
success: true,
|
||||
hostname: null
|
||||
};
|
||||
}
|
||||
|
||||
// Parse config for hostname (LXC) or name (VM)
|
||||
const lines = configContent.split('\n');
|
||||
for (const line of lines) {
|
||||
const trimmed = line.trim();
|
||||
if (input.containerType === 'lxc' && trimmed.startsWith('hostname:')) {
|
||||
const hostname = trimmed.substring(9).trim();
|
||||
return {
|
||||
success: true,
|
||||
hostname
|
||||
};
|
||||
} else if (input.containerType === 'vm' && trimmed.startsWith('name:')) {
|
||||
const name = trimmed.substring(5).trim();
|
||||
return {
|
||||
success: true,
|
||||
hostname: name
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
hostname: null
|
||||
};
|
||||
} catch (error) {
|
||||
console.error('Error in getContainerHostname:', error);
|
||||
return {
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : 'Failed to get container hostname',
|
||||
hostname: null
|
||||
};
|
||||
}
|
||||
}),
|
||||
|
||||
// Get clone storages (rootdir or images content)
|
||||
getCloneStorages: publicProcedure
|
||||
.input(z.object({
|
||||
serverId: z.number(),
|
||||
forceRefresh: z.boolean().optional().default(false)
|
||||
}))
|
||||
.query(async ({ input }) => {
|
||||
try {
|
||||
const db = getDatabase();
|
||||
const server = await db.getServerById(input.serverId);
|
||||
|
||||
if (!server) {
|
||||
return {
|
||||
success: false,
|
||||
error: 'Server not found',
|
||||
storages: [],
|
||||
cached: false
|
||||
};
|
||||
}
|
||||
|
||||
const storageService = getStorageService();
|
||||
const { default: SSHService } = await import('~/server/ssh-service');
|
||||
const { getSSHExecutionService } = await import('~/server/ssh-execution-service');
|
||||
const sshService = new SSHService();
|
||||
const sshExecutionService = getSSHExecutionService();
|
||||
|
||||
// Test SSH connection first
|
||||
const connectionTest = await sshService.testSSHConnection(server as Server);
|
||||
if (!(connectionTest as any).success) {
|
||||
return {
|
||||
success: false,
|
||||
error: `SSH connection failed: ${(connectionTest as any).error ?? 'Unknown error'}`,
|
||||
storages: [],
|
||||
cached: false
|
||||
};
|
||||
}
|
||||
|
||||
// Get server hostname to filter storages
|
||||
let serverHostname = '';
|
||||
try {
|
||||
await new Promise<void>((resolve, reject) => {
|
||||
sshExecutionService.executeCommand(
|
||||
server as Server,
|
||||
'hostname',
|
||||
(data: string) => {
|
||||
serverHostname += data;
|
||||
},
|
||||
(error: string) => {
|
||||
reject(new Error(`Failed to get hostname: ${error}`));
|
||||
},
|
||||
(exitCode: number) => {
|
||||
if (exitCode === 0) {
|
||||
resolve();
|
||||
} else {
|
||||
reject(new Error(`hostname command failed with exit code ${exitCode}`));
|
||||
}
|
||||
}
|
||||
);
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('Error getting server hostname:', error);
|
||||
// Continue without filtering if hostname can't be retrieved
|
||||
}
|
||||
|
||||
const normalizedHostname = serverHostname.trim().toLowerCase();
|
||||
|
||||
// Check if we have cached data
|
||||
const wasCached = !input.forceRefresh;
|
||||
|
||||
// Fetch storages (will use cache if not forcing refresh)
|
||||
const allStorages = await storageService.getStorages(server as Server, input.forceRefresh);
|
||||
|
||||
// Filter storages by node hostname matching and content type (only rootdir for cloning)
|
||||
const applicableStorages = allStorages.filter(storage => {
|
||||
// Check content type - must have rootdir for cloning
|
||||
const hasRootdir = storage.content.includes('rootdir');
|
||||
if (!hasRootdir) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// If storage has no nodes specified, it's available on all nodes
|
||||
if (!storage.nodes || storage.nodes.length === 0) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// If we couldn't get hostname, include all storages (fallback)
|
||||
if (!normalizedHostname) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check if server hostname is in the nodes array (case-insensitive, trimmed)
|
||||
const normalizedNodes = storage.nodes.map(node => node.trim().toLowerCase());
|
||||
return normalizedNodes.includes(normalizedHostname);
|
||||
});
|
||||
|
||||
return {
|
||||
success: true,
|
||||
storages: applicableStorages,
|
||||
cached: wasCached && applicableStorages.length > 0
|
||||
};
|
||||
} catch (error) {
|
||||
console.error('Error in getCloneStorages:', error);
|
||||
return {
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : 'Failed to fetch storages',
|
||||
storages: [],
|
||||
cached: false
|
||||
};
|
||||
}
|
||||
}),
|
||||
|
||||
// Generate clone hostnames
|
||||
generateCloneHostnames: publicProcedure
|
||||
.input(z.object({
|
||||
originalHostname: z.string(),
|
||||
containerType: z.enum(['lxc', 'vm']),
|
||||
serverId: z.number(),
|
||||
count: z.number().min(1).max(100)
|
||||
}))
|
||||
.query(async ({ input }) => {
|
||||
try {
|
||||
const db = getDatabase();
|
||||
const server = await db.getServerById(input.serverId);
|
||||
|
||||
if (!server) {
|
||||
return {
|
||||
success: false,
|
||||
error: 'Server not found',
|
||||
hostnames: []
|
||||
};
|
||||
}
|
||||
|
||||
const { getSSHExecutionService } = await import('~/server/ssh-execution-service');
|
||||
const sshExecutionService = getSSHExecutionService();
|
||||
|
||||
// Get all existing containers/VMs to find existing clones (check both LXC and VM)
|
||||
const existingHostnames = new Set<string>();
|
||||
|
||||
// Check LXC containers
|
||||
let lxcOutput = '';
|
||||
try {
|
||||
await new Promise<void>((resolve) => {
|
||||
sshExecutionService.executeCommand(
|
||||
server as Server,
|
||||
'pct list',
|
||||
(data: string) => {
|
||||
lxcOutput += data;
|
||||
},
|
||||
(error: string) => {
|
||||
console.error(`pct list error for server ${server.name}:`, error);
|
||||
resolve();
|
||||
},
|
||||
() => resolve()
|
||||
);
|
||||
});
|
||||
|
||||
const lxcLines = lxcOutput.split('\n').filter(line => line.trim());
|
||||
for (const line of lxcLines) {
|
||||
if (line.includes('CTID') || line.includes('NAME')) continue;
|
||||
const parts = line.trim().split(/\s+/);
|
||||
if (parts.length >= 3) {
|
||||
const name = parts.slice(2).join(' ').trim();
|
||||
if (name) {
|
||||
existingHostnames.add(name.toLowerCase());
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// Continue even if LXC list fails
|
||||
}
|
||||
|
||||
// Check VMs
|
||||
let vmOutput = '';
|
||||
try {
|
||||
await new Promise<void>((resolve) => {
|
||||
sshExecutionService.executeCommand(
|
||||
server as Server,
|
||||
'qm list',
|
||||
(data: string) => {
|
||||
vmOutput += data;
|
||||
},
|
||||
(error: string) => {
|
||||
console.error(`qm list error for server ${server.name}:`, error);
|
||||
resolve();
|
||||
},
|
||||
() => resolve()
|
||||
);
|
||||
});
|
||||
|
||||
const vmLines = vmOutput.split('\n').filter(line => line.trim());
|
||||
for (const line of vmLines) {
|
||||
if (line.includes('VMID') || line.includes('NAME')) continue;
|
||||
const parts = line.trim().split(/\s+/);
|
||||
if (parts.length >= 3) {
|
||||
const name = parts.slice(2).join(' ').trim();
|
||||
if (name) {
|
||||
existingHostnames.add(name.toLowerCase());
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// Continue even if VM list fails
|
||||
}
|
||||
|
||||
// Find next available clone number
|
||||
const clonePattern = new RegExp(`^${input.originalHostname.toLowerCase()}-clone-(\\d+)$`);
|
||||
const existingCloneNumbers: number[] = [];
|
||||
|
||||
for (const hostname of existingHostnames) {
|
||||
const match = hostname.match(clonePattern);
|
||||
if (match) {
|
||||
existingCloneNumbers.push(parseInt(match[1] ?? '0', 10));
|
||||
}
|
||||
}
|
||||
|
||||
// Determine starting number
|
||||
let nextNumber = 1;
|
||||
if (existingCloneNumbers.length > 0) {
|
||||
existingCloneNumbers.sort((a, b) => a - b);
|
||||
const lastNumber = existingCloneNumbers[existingCloneNumbers.length - 1];
|
||||
if (lastNumber !== undefined) {
|
||||
nextNumber = lastNumber + 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Generate hostnames
|
||||
const hostnames: string[] = [];
|
||||
for (let i = 0; i < input.count; i++) {
|
||||
hostnames.push(`${input.originalHostname}-clone-${nextNumber + i}`);
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
hostnames
|
||||
};
|
||||
} catch (error) {
|
||||
console.error('Error in generateCloneHostnames:', error);
|
||||
return {
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : 'Failed to generate clone hostnames',
|
||||
hostnames: []
|
||||
};
|
||||
}
|
||||
}),
|
||||
|
||||
// Execute clone (prepare for websocket execution)
|
||||
// Note: nextIds will be obtained sequentially during cloning in server.js
|
||||
executeClone: publicProcedure
|
||||
.input(z.object({
|
||||
containerId: z.string(),
|
||||
serverId: z.number(),
|
||||
storage: z.string(),
|
||||
cloneCount: z.number().min(1).max(100),
|
||||
hostnames: z.array(z.string()),
|
||||
containerType: z.enum(['lxc', 'vm'])
|
||||
}))
|
||||
.mutation(async ({ input }) => {
|
||||
try {
|
||||
const db = getDatabase();
|
||||
const server = await db.getServerById(input.serverId);
|
||||
|
||||
if (!server) {
|
||||
return {
|
||||
success: false,
|
||||
error: 'Server not found',
|
||||
executionId: null
|
||||
};
|
||||
}
|
||||
|
||||
const { default: SSHService } = await import('~/server/ssh-service');
|
||||
const sshService = new SSHService();
|
||||
|
||||
// Test SSH connection first
|
||||
const connectionTest = await sshService.testSSHConnection(server as Server);
|
||||
if (!(connectionTest as any).success) {
|
||||
return {
|
||||
success: false,
|
||||
error: `SSH connection failed: ${(connectionTest as any).error ?? 'Unknown error'}`,
|
||||
executionId: null
|
||||
};
|
||||
}
|
||||
|
||||
// Validate inputs
|
||||
if (input.hostnames.length !== input.cloneCount) {
|
||||
return {
|
||||
success: false,
|
||||
error: 'Hostnames count must match clone count',
|
||||
executionId: null
|
||||
};
|
||||
}
|
||||
|
||||
// Generate execution ID for websocket tracking
|
||||
const executionId = `clone_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
|
||||
|
||||
return {
|
||||
success: true,
|
||||
executionId,
|
||||
containerId: input.containerId,
|
||||
storage: input.storage,
|
||||
cloneCount: input.cloneCount,
|
||||
hostnames: input.hostnames,
|
||||
containerType: input.containerType,
|
||||
server: server as Server
|
||||
};
|
||||
} catch (error) {
|
||||
console.error('Error in executeClone:', error);
|
||||
return {
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : 'Failed to execute clone',
|
||||
executionId: null
|
||||
};
|
||||
}
|
||||
}),
|
||||
|
||||
// Add cloned container to database
|
||||
addClonedContainerToDatabase: publicProcedure
|
||||
.input(z.object({
|
||||
containerId: z.string(),
|
||||
serverId: z.number(),
|
||||
containerType: z.enum(['lxc', 'vm'])
|
||||
}))
|
||||
.mutation(async ({ input }) => {
|
||||
try {
|
||||
const db = getDatabase();
|
||||
const server = await db.getServerById(input.serverId);
|
||||
|
||||
if (!server) {
|
||||
return {
|
||||
success: false,
|
||||
error: 'Server not found',
|
||||
scriptId: null
|
||||
};
|
||||
}
|
||||
|
||||
const { getSSHExecutionService } = await import('~/server/ssh-execution-service');
|
||||
const sshExecutionService = getSSHExecutionService();
|
||||
|
||||
// Read config file to get hostname/name
|
||||
const configPath = input.containerType === 'lxc'
|
||||
? `/etc/pve/lxc/${input.containerId}.conf`
|
||||
: `/etc/pve/qemu-server/${input.containerId}.conf`;
|
||||
|
||||
let configContent = '';
|
||||
await new Promise<void>((resolve) => {
|
||||
sshExecutionService.executeCommand(
|
||||
server as Server,
|
||||
`cat "${configPath}" 2>/dev/null || echo ""`,
|
||||
(data: string) => {
|
||||
configContent += data;
|
||||
},
|
||||
() => resolve(),
|
||||
() => resolve()
|
||||
);
|
||||
});
|
||||
|
||||
if (!configContent.trim()) {
|
||||
return {
|
||||
success: false,
|
||||
error: 'Config file not found',
|
||||
scriptId: null
|
||||
};
|
||||
}
|
||||
|
||||
// Parse config for hostname/name
|
||||
let hostname = '';
|
||||
const lines = configContent.split('\n');
|
||||
for (const line of lines) {
|
||||
const trimmed = line.trim();
|
||||
if (input.containerType === 'lxc' && trimmed.startsWith('hostname:')) {
|
||||
hostname = trimmed.substring(9).trim();
|
||||
break;
|
||||
} else if (input.containerType === 'vm' && trimmed.startsWith('name:')) {
|
||||
hostname = trimmed.substring(5).trim();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!hostname) {
|
||||
hostname = `${input.containerType}-${input.containerId}`;
|
||||
}
|
||||
|
||||
// Create installed script record
|
||||
const script = await db.createInstalledScript({
|
||||
script_name: hostname,
|
||||
script_path: `cloned/${hostname}`,
|
||||
container_id: input.containerId,
|
||||
server_id: input.serverId,
|
||||
execution_mode: 'ssh',
|
||||
status: 'success',
|
||||
output_log: `Cloned container/VM`
|
||||
});
|
||||
|
||||
// For LXC, store config in database
|
||||
if (input.containerType === 'lxc') {
|
||||
const parsedConfig = parseRawConfig(configContent);
|
||||
await db.createLXCConfig(script.id, parsedConfig);
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
scriptId: script.id
|
||||
};
|
||||
} catch (error) {
|
||||
console.error('Error in addClonedContainerToDatabase:', error);
|
||||
return {
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : 'Failed to add cloned container to database',
|
||||
scriptId: null
|
||||
};
|
||||
}
|
||||
})
|
||||
});
|
||||
|
||||
@@ -7,7 +7,10 @@ import { localScriptsService } from "~/server/services/localScripts";
|
||||
import { scriptDownloaderService } from "~/server/services/scriptDownloader.js";
|
||||
import { AutoSyncService } from "~/server/services/autoSyncService";
|
||||
import { repositoryService } from "~/server/services/repositoryService";
|
||||
import { getStorageService } from "~/server/services/storageService";
|
||||
import { getDatabase } from "~/server/database-prisma";
|
||||
import type { ScriptCard } from "~/types/script";
|
||||
import type { Server } from "~/types/server";
|
||||
|
||||
export const scriptsRouter = createTRPCRouter({
|
||||
// Get all available scripts
|
||||
@@ -637,5 +640,194 @@ export const scriptsRouter = createTRPCRouter({
|
||||
status: null
|
||||
};
|
||||
}
|
||||
}),
|
||||
|
||||
// Get rootfs storages for a server (for container creation)
|
||||
getRootfsStorages: publicProcedure
|
||||
.input(z.object({
|
||||
serverId: z.number(),
|
||||
forceRefresh: z.boolean().optional().default(false)
|
||||
}))
|
||||
.query(async ({ input }) => {
|
||||
try {
|
||||
const db = getDatabase();
|
||||
const server = await db.getServerById(input.serverId);
|
||||
|
||||
if (!server) {
|
||||
return {
|
||||
success: false,
|
||||
error: 'Server not found',
|
||||
storages: []
|
||||
};
|
||||
}
|
||||
|
||||
// Get server hostname to filter storages by node assignment
|
||||
const { getSSHExecutionService } = await import('~/server/ssh-execution-service');
|
||||
const sshExecutionService = getSSHExecutionService();
|
||||
let serverHostname = '';
|
||||
try {
|
||||
await new Promise<void>((resolve, reject) => {
|
||||
void sshExecutionService.executeCommand(
|
||||
server as Server,
|
||||
'hostname',
|
||||
(data: string) => {
|
||||
serverHostname += data;
|
||||
},
|
||||
(error: string) => {
|
||||
reject(new Error(`Failed to get hostname: ${error}`));
|
||||
},
|
||||
(exitCode: number) => {
|
||||
if (exitCode === 0) {
|
||||
resolve();
|
||||
} else {
|
||||
reject(new Error(`hostname command failed with exit code ${exitCode}`));
|
||||
}
|
||||
}
|
||||
);
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('Error getting server hostname:', error);
|
||||
// Continue without filtering if hostname can't be retrieved
|
||||
}
|
||||
|
||||
const normalizedHostname = serverHostname.trim().toLowerCase();
|
||||
|
||||
const storageService = getStorageService();
|
||||
const allStorages = await storageService.getStorages(server as Server, input.forceRefresh);
|
||||
|
||||
// Filter storages by node hostname matching and content type (rootdir for containers)
|
||||
const rootfsStorages = allStorages.filter(storage => {
|
||||
// Check content type - must have rootdir for containers
|
||||
const hasRootdir = storage.content.includes('rootdir');
|
||||
if (!hasRootdir) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// If storage has no nodes specified, it's available on all nodes
|
||||
if (!storage.nodes || storage.nodes.length === 0) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// If we couldn't get hostname, include all storages (fallback)
|
||||
if (!normalizedHostname) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check if server hostname is in the nodes array (case-insensitive, trimmed)
|
||||
const normalizedNodes = storage.nodes.map(node => node.trim().toLowerCase());
|
||||
return normalizedNodes.includes(normalizedHostname);
|
||||
});
|
||||
|
||||
return {
|
||||
success: true,
|
||||
storages: rootfsStorages.map(s => ({
|
||||
name: s.name,
|
||||
type: s.type,
|
||||
content: s.content
|
||||
}))
|
||||
};
|
||||
} catch (error) {
|
||||
console.error('Error fetching rootfs storages:', error);
|
||||
// Return empty array on error (as per plan requirement)
|
||||
return {
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : 'Failed to fetch storages',
|
||||
storages: []
|
||||
};
|
||||
}
|
||||
}),
|
||||
|
||||
// Get template storages for a server (for template storage selection)
|
||||
getTemplateStorages: publicProcedure
|
||||
.input(z.object({
|
||||
serverId: z.number(),
|
||||
forceRefresh: z.boolean().optional().default(false)
|
||||
}))
|
||||
.query(async ({ input }) => {
|
||||
try {
|
||||
const db = getDatabase();
|
||||
const server = await db.getServerById(input.serverId);
|
||||
|
||||
if (!server) {
|
||||
return {
|
||||
success: false,
|
||||
error: 'Server not found',
|
||||
storages: []
|
||||
};
|
||||
}
|
||||
|
||||
// Get server hostname to filter storages by node assignment
|
||||
const { getSSHExecutionService } = await import('~/server/ssh-execution-service');
|
||||
const sshExecutionService = getSSHExecutionService();
|
||||
let serverHostname = '';
|
||||
try {
|
||||
await new Promise<void>((resolve, reject) => {
|
||||
void sshExecutionService.executeCommand(
|
||||
server as Server,
|
||||
'hostname',
|
||||
(data: string) => {
|
||||
serverHostname += data;
|
||||
},
|
||||
(error: string) => {
|
||||
reject(new Error(`Failed to get hostname: ${error}`));
|
||||
},
|
||||
(exitCode: number) => {
|
||||
if (exitCode === 0) {
|
||||
resolve();
|
||||
} else {
|
||||
reject(new Error(`hostname command failed with exit code ${exitCode}`));
|
||||
}
|
||||
}
|
||||
);
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('Error getting server hostname:', error);
|
||||
// Continue without filtering if hostname can't be retrieved
|
||||
}
|
||||
|
||||
const normalizedHostname = serverHostname.trim().toLowerCase();
|
||||
|
||||
const storageService = getStorageService();
|
||||
const allStorages = await storageService.getStorages(server as Server, input.forceRefresh);
|
||||
|
||||
// Filter storages by node hostname matching and content type (vztmpl for templates)
|
||||
const templateStorages = allStorages.filter(storage => {
|
||||
// Check content type - must have vztmpl for templates
|
||||
const hasVztmpl = storage.content.includes('vztmpl');
|
||||
if (!hasVztmpl) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// If storage has no nodes specified, it's available on all nodes
|
||||
if (!storage.nodes || storage.nodes.length === 0) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// If we couldn't get hostname, include all storages (fallback)
|
||||
if (!normalizedHostname) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check if server hostname is in the nodes array (case-insensitive, trimmed)
|
||||
const normalizedNodes = storage.nodes.map(node => node.trim().toLowerCase());
|
||||
return normalizedNodes.includes(normalizedHostname);
|
||||
});
|
||||
|
||||
return {
|
||||
success: true,
|
||||
storages: templateStorages.map(s => ({
|
||||
name: s.name,
|
||||
type: s.type,
|
||||
content: s.content
|
||||
}))
|
||||
};
|
||||
} catch (error) {
|
||||
console.error('Error fetching template storages:', error);
|
||||
return {
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : 'Failed to fetch storages',
|
||||
storages: []
|
||||
};
|
||||
}
|
||||
})
|
||||
});
|
||||
|
||||
@@ -238,6 +238,27 @@ export const versionRouter = createTRPCRouter({
|
||||
// Clear/create the log file
|
||||
await writeFile(logPath, '', 'utf-8');
|
||||
|
||||
// Always fetch the latest update.sh from GitHub before running
|
||||
// This ensures we always use the newest update script, avoiding
|
||||
// the "chicken-and-egg" problem where old scripts can't update properly
|
||||
const updateScriptUrl = 'https://raw.githubusercontent.com/community-scripts/ProxmoxVE-Local/main/update.sh';
|
||||
try {
|
||||
const response = await fetch(updateScriptUrl);
|
||||
if (response.ok) {
|
||||
const latestScript = await response.text();
|
||||
await writeFile(updateScriptPath, latestScript, { mode: 0o755 });
|
||||
// Log that we fetched the latest script
|
||||
await writeFile(logPath, '[INFO] Fetched latest update.sh from GitHub\n', { flag: 'a' });
|
||||
} else {
|
||||
// If fetch fails, log warning but continue with local script
|
||||
await writeFile(logPath, `[WARNING] Could not fetch latest update.sh (HTTP ${response.status}), using local version\n`, { flag: 'a' });
|
||||
}
|
||||
} catch (fetchError) {
|
||||
// If fetch fails, log warning but continue with local script
|
||||
const errorMsg = fetchError instanceof Error ? fetchError.message : 'Unknown error';
|
||||
await writeFile(logPath, `[WARNING] Could not fetch latest update.sh: ${errorMsg}, using local version\n`, { flag: 'a' });
|
||||
}
|
||||
|
||||
// Spawn the update script as a detached process using nohup
|
||||
// This allows it to run independently and kill the parent Node.js process
|
||||
// Redirect output to log file
|
||||
|
||||
@@ -1,9 +1,22 @@
|
||||
import 'dotenv/config'
|
||||
import { PrismaClient } from '../../prisma/generated/prisma/client.ts'
|
||||
import { PrismaBetterSqlite3 } from '@prisma/adapter-better-sqlite3'
|
||||
import { existsSync, mkdirSync } from 'fs'
|
||||
import { dirname } from 'path'
|
||||
|
||||
const globalForPrisma = globalThis;
|
||||
|
||||
// Ensure database directory exists before initializing Prisma
|
||||
// DATABASE_URL format: file:/path/to/database.db
|
||||
const dbUrl = process.env.DATABASE_URL || 'file:./data/settings.db';
|
||||
const dbPath = dbUrl.replace(/^file:/, '');
|
||||
const dbDir = dirname(dbPath);
|
||||
|
||||
if (!existsSync(dbDir)) {
|
||||
console.log(`Creating database directory: ${dbDir}`);
|
||||
mkdirSync(dbDir, { recursive: true });
|
||||
}
|
||||
|
||||
const adapter = new PrismaBetterSqlite3({ url: process.env.DATABASE_URL });
|
||||
|
||||
export const prisma = globalForPrisma.prisma ?? new PrismaClient({ adapter });
|
||||
|
||||
@@ -1,9 +1,22 @@
|
||||
import 'dotenv/config'
|
||||
import { PrismaClient } from '../../prisma/generated/prisma/client'
|
||||
import { PrismaBetterSqlite3 } from '@prisma/adapter-better-sqlite3'
|
||||
import { existsSync, mkdirSync } from 'fs'
|
||||
import { dirname } from 'path'
|
||||
|
||||
const globalForPrisma = globalThis as { prisma?: PrismaClient };
|
||||
|
||||
// Ensure database directory exists before initializing Prisma
|
||||
// DATABASE_URL format: file:/path/to/database.db
|
||||
const dbUrl = process.env.DATABASE_URL || 'file:./data/settings.db';
|
||||
const dbPath = dbUrl.replace(/^file:/, '');
|
||||
const dbDir = dirname(dbPath);
|
||||
|
||||
if (!existsSync(dbDir)) {
|
||||
console.log(`Creating database directory: ${dbDir}`);
|
||||
mkdirSync(dbDir, { recursive: true });
|
||||
}
|
||||
|
||||
const adapter = new PrismaBetterSqlite3({ url: process.env.DATABASE_URL! });
|
||||
|
||||
export const prisma: PrismaClient = globalForPrisma.prisma ?? new PrismaClient({
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
/* eslint-disable @typescript-eslint/no-floating-promises, @typescript-eslint/prefer-nullish-coalescing, @typescript-eslint/no-unused-vars, @typescript-eslint/prefer-regexp-exec, @typescript-eslint/prefer-optional-chain */
|
||||
import { getSSHExecutionService } from '../ssh-execution-service';
|
||||
import { getStorageService } from './storageService';
|
||||
import { getDatabase } from '../database-prisma';
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
/* eslint-disable @typescript-eslint/prefer-nullish-coalescing */
|
||||
import { writeFile, mkdir, readdir, readFile } from 'fs/promises';
|
||||
import { join } from 'path';
|
||||
import { env } from '../../env.js';
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
/* eslint-disable @typescript-eslint/no-floating-promises, @typescript-eslint/prefer-optional-chain, @typescript-eslint/prefer-nullish-coalescing, @typescript-eslint/prefer-regexp-exec, @typescript-eslint/prefer-for-of */
|
||||
import { getSSHExecutionService } from '../ssh-execution-service';
|
||||
import type { Server } from '~/types/server';
|
||||
|
||||
|
||||
@@ -85,9 +85,10 @@ class SSHExecutionService {
|
||||
* @param {Function} onData - Callback for data output
|
||||
* @param {Function} onError - Callback for errors
|
||||
* @param {Function} onExit - Callback for process exit
|
||||
* @param {Object} [envVars] - Optional environment variables to pass to the script
|
||||
* @returns {Promise<Object>} Process information
|
||||
*/
|
||||
async executeScript(server, scriptPath, onData, onError, onExit) {
|
||||
async executeScript(server, scriptPath, onData, onError, onExit, envVars = {}) {
|
||||
try {
|
||||
await this.transferScriptsFolder(server, onData, onError);
|
||||
|
||||
@@ -98,8 +99,43 @@ class SSHExecutionService {
|
||||
// Build SSH command based on authentication type
|
||||
const { command, args } = this.buildSSHCommand(server);
|
||||
|
||||
// Format environment variables as var_name=value pairs
|
||||
const envVarsString = Object.entries(envVars)
|
||||
.map(([key, value]) => {
|
||||
// Escape special characters in values
|
||||
const escapedValue = String(value).replace(/'/g, "'\\''");
|
||||
return `${key}='${escapedValue}'`;
|
||||
})
|
||||
.join(' ');
|
||||
|
||||
// Build the command with environment variables
|
||||
let scriptCommand = `cd /tmp/scripts && chmod +x ${relativeScriptPath} && export TERM=xterm-256color && export COLUMNS=120 && export LINES=30 && export COLORTERM=truecolor && export FORCE_COLOR=1 && export NO_COLOR=0 && export CLICOLOR=1 && export CLICOLOR_FORCE=1`;
|
||||
|
||||
if (envVarsString) {
|
||||
scriptCommand += ` && ${envVarsString} bash ${relativeScriptPath}`;
|
||||
} else {
|
||||
scriptCommand += ` && bash ${relativeScriptPath}`;
|
||||
}
|
||||
|
||||
// Log the full command that will be executed
|
||||
console.log('='.repeat(80));
|
||||
console.log(`[SSH Execution] Executing on host: ${server.ip} (${server.name || 'Unnamed'})`);
|
||||
console.log(`[SSH Execution] Script path: ${scriptPath}`);
|
||||
console.log(`[SSH Execution] Relative script path: ${relativeScriptPath}`);
|
||||
if (Object.keys(envVars).length > 0) {
|
||||
console.log(`[SSH Execution] Environment variables (${Object.keys(envVars).length} vars):`);
|
||||
Object.entries(envVars).forEach(([key, value]) => {
|
||||
console.log(` ${key}=${String(value)}`);
|
||||
});
|
||||
} else {
|
||||
console.log(`[SSH Execution] No environment variables provided`);
|
||||
}
|
||||
console.log(`[SSH Execution] Full command:`);
|
||||
console.log(scriptCommand);
|
||||
console.log('='.repeat(80));
|
||||
|
||||
// Add the script execution command to the args
|
||||
args.push(`cd /tmp/scripts && chmod +x ${relativeScriptPath} && export TERM=xterm-256color && export COLUMNS=120 && export LINES=30 && export COLORTERM=truecolor && export FORCE_COLOR=1 && export NO_COLOR=0 && export CLICOLOR=1 && export CLICOLOR_FORCE=1 && bash ${relativeScriptPath}`);
|
||||
args.push(scriptCommand);
|
||||
|
||||
// Use ptySpawn for proper terminal emulation and color support
|
||||
const sshCommand = ptySpawn(command, args, {
|
||||
|
||||
356
update.sh
356
update.sh
@@ -4,7 +4,7 @@
|
||||
# Enhanced update script for ProxmoxVE-Local
|
||||
# Fetches latest release from GitHub and backs up data directory
|
||||
|
||||
set -euo pipefail # Exit on error, undefined vars, pipe failures
|
||||
set -euo pipefail # Exit on error, undefined vars, pipe failures
|
||||
|
||||
# Add error trap for debugging
|
||||
trap 'echo "Error occurred at line $LINENO, command: $BASH_COMMAND"' ERR
|
||||
@@ -38,7 +38,7 @@ load_github_token() {
|
||||
log "Using GitHub token from environment variable"
|
||||
return 0
|
||||
fi
|
||||
|
||||
|
||||
# Try .env file
|
||||
if [ -f ".env" ]; then
|
||||
local env_token
|
||||
@@ -49,21 +49,21 @@ load_github_token() {
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
# Try .github_token file
|
||||
if [ -f ".github_token" ]; then
|
||||
GITHUB_TOKEN=$(cat .github_token | tr -d '\n\r')
|
||||
log "Using GitHub token from .github_token file"
|
||||
return 0
|
||||
fi
|
||||
|
||||
|
||||
# Try ~/.github_token file
|
||||
if [ -f "$HOME/.github_token" ]; then
|
||||
GITHUB_TOKEN=$(cat "$HOME/.github_token" | tr -d '\n\r')
|
||||
log "Using GitHub token from ~/.github_token file"
|
||||
return 0
|
||||
fi
|
||||
|
||||
|
||||
log_warning "No GitHub token found. Using unauthenticated requests (lower rate limits)"
|
||||
log_warning "To use a token, add GITHUB_TOKEN=your_token to .env file or set GITHUB_TOKEN environment variable"
|
||||
return 1
|
||||
@@ -72,7 +72,7 @@ load_github_token() {
|
||||
# Initialize log file
|
||||
init_log() {
|
||||
# Clear/create log file
|
||||
> "$LOG_FILE"
|
||||
>"$LOG_FILE"
|
||||
log "Starting ProxmoxVE-Local update process..."
|
||||
log "Log file: $LOG_FILE"
|
||||
}
|
||||
@@ -97,40 +97,40 @@ log_warning() {
|
||||
# Check if required tools are available
|
||||
check_dependencies() {
|
||||
log "Checking dependencies..."
|
||||
|
||||
|
||||
local missing_deps=()
|
||||
|
||||
if ! command -v curl &> /dev/null; then
|
||||
|
||||
if ! command -v curl &>/dev/null; then
|
||||
missing_deps+=("curl")
|
||||
fi
|
||||
|
||||
if ! command -v jq &> /dev/null; then
|
||||
|
||||
if ! command -v jq &>/dev/null; then
|
||||
missing_deps+=("jq")
|
||||
fi
|
||||
|
||||
if ! command -v npm &> /dev/null; then
|
||||
|
||||
if ! command -v npm &>/dev/null; then
|
||||
missing_deps+=("npm")
|
||||
fi
|
||||
|
||||
if ! command -v node &> /dev/null; then
|
||||
|
||||
if ! command -v node &>/dev/null; then
|
||||
missing_deps+=("node")
|
||||
fi
|
||||
|
||||
|
||||
if [ ${#missing_deps[@]} -ne 0 ]; then
|
||||
log_error "Missing dependencies: ${missing_deps[*]}"
|
||||
log_error "Please install the missing dependencies and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
log_success "All dependencies are available"
|
||||
}
|
||||
|
||||
# Get latest release info from GitHub API
|
||||
get_latest_release() {
|
||||
log "Fetching latest release information from GitHub..."
|
||||
|
||||
|
||||
local curl_opts="-s --connect-timeout 15 --max-time 60 --retry 2 --retry-delay 3"
|
||||
|
||||
|
||||
# Add authentication header if token is available
|
||||
if [ -n "$GITHUB_TOKEN" ]; then
|
||||
curl_opts="$curl_opts -H \"Authorization: token $GITHUB_TOKEN\""
|
||||
@@ -138,35 +138,35 @@ get_latest_release() {
|
||||
else
|
||||
log "Using unauthenticated GitHub API request (lower rate limits)"
|
||||
fi
|
||||
|
||||
|
||||
local release_info
|
||||
if ! release_info=$(eval "curl $curl_opts \"$GITHUB_API/releases/latest\""); then
|
||||
log_error "Failed to fetch release information from GitHub API (timeout or network error)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
# Check if response is valid JSON
|
||||
if ! echo "$release_info" | jq empty 2>/dev/null; then
|
||||
log_error "Invalid JSON response from GitHub API"
|
||||
log "Response: $release_info"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
local tag_name
|
||||
local download_url
|
||||
local published_at
|
||||
|
||||
|
||||
tag_name=$(echo "$release_info" | jq -r '.tag_name')
|
||||
download_url=$(echo "$release_info" | jq -r '.tarball_url')
|
||||
published_at=$(echo "$release_info" | jq -r '.published_at')
|
||||
|
||||
|
||||
if [ "$tag_name" = "null" ] || [ "$download_url" = "null" ] || [ -z "$tag_name" ] || [ -z "$download_url" ]; then
|
||||
log_error "Failed to parse release information from API response"
|
||||
log "Tag name: $tag_name"
|
||||
log "Download URL: $download_url"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
log_success "Latest release: $tag_name (published: $published_at)"
|
||||
echo "$tag_name|$download_url"
|
||||
}
|
||||
@@ -174,16 +174,16 @@ get_latest_release() {
|
||||
# Backup data directory, .env file, and scripts directories
|
||||
backup_data() {
|
||||
log "Creating backup directory at $BACKUP_DIR..."
|
||||
|
||||
|
||||
if ! mkdir -p "$BACKUP_DIR"; then
|
||||
log_error "Failed to create backup directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
# Backup data directory
|
||||
if [ -d "$DATA_DIR" ]; then
|
||||
log "Backing up data directory..."
|
||||
|
||||
|
||||
if ! cp -r "$DATA_DIR" "$BACKUP_DIR/data"; then
|
||||
log_error "Failed to backup data directory"
|
||||
exit 1
|
||||
@@ -193,7 +193,7 @@ backup_data() {
|
||||
else
|
||||
log_warning "Data directory not found, skipping backup"
|
||||
fi
|
||||
|
||||
|
||||
# Backup .env file
|
||||
if [ -f ".env" ]; then
|
||||
log "Backing up .env file..."
|
||||
@@ -206,7 +206,7 @@ backup_data() {
|
||||
else
|
||||
log_warning ".env file not found, skipping backup"
|
||||
fi
|
||||
|
||||
|
||||
# Backup scripts directories
|
||||
local scripts_dirs=("scripts/ct" "scripts/install" "scripts/tools" "scripts/vm")
|
||||
for scripts_dir in "${scripts_dirs[@]}"; do
|
||||
@@ -230,60 +230,60 @@ download_release() {
|
||||
local release_info="$1"
|
||||
local tag_name="${release_info%|*}"
|
||||
local download_url="${release_info#*|}"
|
||||
|
||||
|
||||
log "Downloading release $tag_name..."
|
||||
|
||||
|
||||
local temp_dir="/tmp/pve-update-$$"
|
||||
local archive_file="$temp_dir/release.tar.gz"
|
||||
|
||||
|
||||
# Create temporary directory
|
||||
if ! mkdir -p "$temp_dir"; then
|
||||
log_error "Failed to create temporary directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
# Download release with timeout and progress
|
||||
if ! curl -L --connect-timeout 30 --max-time 300 --retry 3 --retry-delay 5 -o "$archive_file" "$download_url" 2>/dev/null; then
|
||||
log_error "Failed to download release from GitHub"
|
||||
rm -rf "$temp_dir"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
# Verify download
|
||||
if [ ! -f "$archive_file" ] || [ ! -s "$archive_file" ]; then
|
||||
log_error "Downloaded file is empty or missing"
|
||||
rm -rf "$temp_dir"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
log_success "Downloaded release"
|
||||
|
||||
|
||||
# Extract release
|
||||
if ! tar -xzf "$archive_file" -C "$temp_dir" 2>/dev/null; then
|
||||
log_error "Failed to extract release"
|
||||
rm -rf "$temp_dir"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
# Find the extracted directory (GitHub tarballs have a root directory)
|
||||
local extracted_dir
|
||||
extracted_dir=$(find "$temp_dir" -maxdepth 1 -type d -name "community-scripts-ProxmoxVE-Local-*" 2>/dev/null | head -1)
|
||||
|
||||
|
||||
# Try alternative patterns if not found
|
||||
if [ -z "$extracted_dir" ]; then
|
||||
extracted_dir=$(find "$temp_dir" -maxdepth 1 -type d -name "${REPO_NAME}-*" 2>/dev/null | head -1)
|
||||
fi
|
||||
|
||||
|
||||
if [ -z "$extracted_dir" ]; then
|
||||
extracted_dir=$(find "$temp_dir" -maxdepth 1 -type d ! -name "$temp_dir" 2>/dev/null | head -1)
|
||||
fi
|
||||
|
||||
|
||||
if [ -z "$extracted_dir" ]; then
|
||||
log_error "Could not find extracted directory"
|
||||
rm -rf "$temp_dir"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
log_success "Release extracted successfully"
|
||||
echo "$extracted_dir"
|
||||
}
|
||||
@@ -291,11 +291,11 @@ download_release() {
|
||||
# Clear the original directory before updating
|
||||
clear_original_directory() {
|
||||
log "Clearing original directory..."
|
||||
|
||||
|
||||
# Remove old lock files and node_modules before update
|
||||
rm -f package-lock.json 2>/dev/null
|
||||
rm -rf node_modules 2>/dev/null
|
||||
|
||||
|
||||
# List of files/directories to preserve (already backed up)
|
||||
local preserve_patterns=(
|
||||
"data"
|
||||
@@ -308,48 +308,48 @@ clear_original_directory() {
|
||||
".git"
|
||||
"scripts"
|
||||
)
|
||||
|
||||
|
||||
# Remove all files except preserved ones
|
||||
while IFS= read -r file; do
|
||||
local should_preserve=false
|
||||
local filename=$(basename "$file")
|
||||
|
||||
|
||||
for pattern in "${preserve_patterns[@]}"; do
|
||||
if [[ "$filename" == $pattern ]]; then
|
||||
should_preserve=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
if [ "$should_preserve" = false ]; then
|
||||
rm -f "$file"
|
||||
fi
|
||||
done < <(find . -maxdepth 1 -type f ! -name ".*")
|
||||
|
||||
|
||||
# Remove all directories except preserved ones
|
||||
while IFS= read -r dir; do
|
||||
local should_preserve=false
|
||||
local dirname=$(basename "$dir")
|
||||
|
||||
|
||||
for pattern in "${preserve_patterns[@]}"; do
|
||||
if [[ "$dirname" == $pattern ]]; then
|
||||
should_preserve=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
if [ "$should_preserve" = false ]; then
|
||||
rm -rf "$dir"
|
||||
fi
|
||||
done < <(find . -maxdepth 1 -type d ! -name "." ! -name "..")
|
||||
|
||||
|
||||
log_success "Original directory cleared"
|
||||
}
|
||||
|
||||
# Restore backup files before building
|
||||
restore_backup_files() {
|
||||
log "Restoring .env, data directory, and scripts directories from backup..."
|
||||
|
||||
|
||||
if [ -d "$BACKUP_DIR" ]; then
|
||||
# Restore .env file
|
||||
if [ -f "$BACKUP_DIR/.env" ]; then
|
||||
@@ -365,7 +365,7 @@ restore_backup_files() {
|
||||
else
|
||||
log_warning "No .env file backup found"
|
||||
fi
|
||||
|
||||
|
||||
# Restore data directory
|
||||
if [ -d "$BACKUP_DIR/data" ]; then
|
||||
if [ -d "data" ]; then
|
||||
@@ -380,24 +380,24 @@ restore_backup_files() {
|
||||
else
|
||||
log_warning "No data directory backup found"
|
||||
fi
|
||||
|
||||
|
||||
# Restore scripts directories
|
||||
local scripts_dirs=("ct" "install" "tools" "vm")
|
||||
for backup_name in "${scripts_dirs[@]}"; do
|
||||
if [ -d "$BACKUP_DIR/$backup_name" ]; then
|
||||
local target_dir="scripts/$backup_name"
|
||||
log "Restoring $target_dir directory from backup..."
|
||||
|
||||
|
||||
# Ensure scripts directory exists
|
||||
if [ ! -d "scripts" ]; then
|
||||
mkdir -p "scripts"
|
||||
fi
|
||||
|
||||
|
||||
# Remove existing directory if it exists
|
||||
if [ -d "$target_dir" ]; then
|
||||
rm -rf "$target_dir"
|
||||
fi
|
||||
|
||||
|
||||
if cp -r "$BACKUP_DIR/$backup_name" "$target_dir"; then
|
||||
log_success "$target_dir directory restored from backup"
|
||||
else
|
||||
@@ -417,7 +417,13 @@ restore_backup_files() {
|
||||
# Verify database was restored correctly
|
||||
verify_database_restored() {
|
||||
log "Verifying database was restored correctly..."
|
||||
|
||||
|
||||
# Ensure data directory exists (will be auto-created by app if needed)
|
||||
if [ ! -d "data" ]; then
|
||||
log "Creating data directory..."
|
||||
mkdir -p data
|
||||
fi
|
||||
|
||||
# Check for both possible database filenames
|
||||
local db_file=""
|
||||
if [ -f "data/database.sqlite" ]; then
|
||||
@@ -425,23 +431,25 @@ verify_database_restored() {
|
||||
elif [ -f "data/settings.db" ]; then
|
||||
db_file="data/settings.db"
|
||||
else
|
||||
log_error "Database file not found after restore! (checked database.sqlite and settings.db)"
|
||||
return 1
|
||||
# Database doesn't exist yet - this is OK for new installations
|
||||
# The app will create it automatically via Prisma migrations
|
||||
log_warning "No existing database file found - will be created automatically on first start"
|
||||
return 0
|
||||
fi
|
||||
|
||||
|
||||
local db_size=$(stat -f%z "$db_file" 2>/dev/null || stat -c%s "$db_file" 2>/dev/null)
|
||||
if [ "$db_size" -eq 0 ]; then
|
||||
log_warning "Database file is empty - will be recreated by Prisma migrations"
|
||||
return 0 # Don't fail the update, let Prisma recreate the database
|
||||
return 0 # Don't fail the update, let Prisma recreate the database
|
||||
fi
|
||||
|
||||
|
||||
log_success "Database verified (file: $db_file, size: $db_size bytes)"
|
||||
}
|
||||
|
||||
# Ensure DATABASE_URL is set in .env file for Prisma
|
||||
ensure_database_url() {
|
||||
log "Ensuring DATABASE_URL is set in .env file..."
|
||||
|
||||
|
||||
# Check if .env file exists
|
||||
if [ ! -f ".env" ]; then
|
||||
log_warning ".env file not found, creating from .env.example..."
|
||||
@@ -452,19 +460,19 @@ ensure_database_url() {
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
# Check if DATABASE_URL is already set
|
||||
if grep -q "^DATABASE_URL=" .env; then
|
||||
log "DATABASE_URL already exists in .env file"
|
||||
return 0
|
||||
fi
|
||||
|
||||
|
||||
# Add DATABASE_URL to .env file
|
||||
log "Adding DATABASE_URL to .env file..."
|
||||
echo "" >> .env
|
||||
echo "# Database" >> .env
|
||||
echo "DATABASE_URL=\"file:./data/settings.db\"" >> .env
|
||||
|
||||
echo "" >>.env
|
||||
echo "# Database" >>.env
|
||||
echo "DATABASE_URL=\"file:./data/settings.db\"" >>.env
|
||||
|
||||
log_success "DATABASE_URL added to .env file"
|
||||
}
|
||||
|
||||
@@ -481,11 +489,9 @@ check_service() {
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
# Stop the application before updating
|
||||
stop_application() {
|
||||
|
||||
|
||||
|
||||
# Change to the application directory if we're not already there
|
||||
local app_dir
|
||||
if [ -f "package.json" ] && [ -f "server.js" ]; then
|
||||
@@ -503,9 +509,9 @@ stop_application() {
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
log "Working from application directory: $(pwd)"
|
||||
|
||||
|
||||
# Check if systemd service is running and disable it temporarily
|
||||
if check_service && systemctl is-active --quiet pvescriptslocal.service; then
|
||||
log "Disabling systemd service temporarily to prevent auto-restart..."
|
||||
@@ -518,7 +524,7 @@ stop_application() {
|
||||
else
|
||||
log "No running systemd service found"
|
||||
fi
|
||||
|
||||
|
||||
# Kill any remaining npm/node processes
|
||||
log "Killing any remaining npm/node processes..."
|
||||
local pids
|
||||
@@ -537,9 +543,9 @@ stop_application() {
|
||||
# Update application files
|
||||
update_files() {
|
||||
local source_dir="$1"
|
||||
|
||||
|
||||
log "Updating application files..."
|
||||
|
||||
|
||||
# List of files/directories to exclude from update
|
||||
local exclude_patterns=(
|
||||
"data"
|
||||
@@ -555,48 +561,48 @@ update_files() {
|
||||
"scripts/tools"
|
||||
"scripts/vm"
|
||||
)
|
||||
|
||||
|
||||
# Find the actual source directory (strip the top-level directory)
|
||||
local actual_source_dir
|
||||
actual_source_dir=$(find "$source_dir" -maxdepth 1 -type d -name "community-scripts-ProxmoxVE-Local-*" | head -1)
|
||||
|
||||
|
||||
if [ -z "$actual_source_dir" ]; then
|
||||
log_error "Could not find the actual source directory in $source_dir"
|
||||
return 1
|
||||
fi
|
||||
|
||||
|
||||
# Verify critical files exist in source
|
||||
if [ ! -f "$actual_source_dir/package.json" ]; then
|
||||
log_error "package.json not found in source directory!"
|
||||
return 1
|
||||
fi
|
||||
|
||||
|
||||
# Use process substitution instead of pipe to avoid subshell issues
|
||||
local files_copied=0
|
||||
local files_excluded=0
|
||||
|
||||
|
||||
# Create a temporary file list to avoid process substitution issues
|
||||
local file_list="/tmp/file_list_$$.txt"
|
||||
find "$actual_source_dir" -type f > "$file_list"
|
||||
|
||||
find "$actual_source_dir" -type f >"$file_list"
|
||||
|
||||
while IFS= read -r file; do
|
||||
local rel_path="${file#$actual_source_dir/}"
|
||||
local should_exclude=false
|
||||
|
||||
|
||||
for pattern in "${exclude_patterns[@]}"; do
|
||||
if [[ "$rel_path" == $pattern ]] || [[ "$rel_path" == $pattern/* ]]; then
|
||||
should_exclude=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
if [ "$should_exclude" = false ]; then
|
||||
local target_dir
|
||||
target_dir=$(dirname "$rel_path")
|
||||
if [ "$target_dir" != "." ]; then
|
||||
mkdir -p "$target_dir"
|
||||
fi
|
||||
|
||||
|
||||
if ! cp "$file" "$rel_path"; then
|
||||
log_error "Failed to copy $rel_path"
|
||||
rm -f "$file_list"
|
||||
@@ -606,48 +612,47 @@ update_files() {
|
||||
else
|
||||
files_excluded=$((files_excluded + 1))
|
||||
fi
|
||||
done < "$file_list"
|
||||
|
||||
done <"$file_list"
|
||||
|
||||
# Clean up temporary file
|
||||
rm -f "$file_list"
|
||||
|
||||
|
||||
# Verify critical files were copied
|
||||
if [ ! -f "package.json" ]; then
|
||||
log_error "package.json was not copied to target directory!"
|
||||
return 1
|
||||
fi
|
||||
|
||||
|
||||
if [ ! -f "package-lock.json" ]; then
|
||||
log_warning "package-lock.json was not copied!"
|
||||
fi
|
||||
|
||||
|
||||
log_success "Application files updated successfully ($files_copied files)"
|
||||
}
|
||||
|
||||
|
||||
# Install dependencies and build
|
||||
install_and_build() {
|
||||
log "Installing dependencies..."
|
||||
|
||||
|
||||
# Verify package.json exists
|
||||
if [ ! -f "package.json" ]; then
|
||||
log_error "package.json not found! Cannot install dependencies."
|
||||
return 1
|
||||
fi
|
||||
|
||||
|
||||
if [ ! -f "package-lock.json" ]; then
|
||||
log_warning "No package-lock.json found, npm will generate one"
|
||||
fi
|
||||
|
||||
|
||||
# Create temporary file for npm output
|
||||
local npm_log="/tmp/npm_install_$$.log"
|
||||
|
||||
|
||||
# Ensure NODE_ENV is not set to production during install (we need devDependencies for build)
|
||||
local old_node_env="${NODE_ENV:-}"
|
||||
export NODE_ENV=development
|
||||
|
||||
|
||||
# Run npm install to get ALL dependencies including devDependencies
|
||||
if ! npm install --include=dev > "$npm_log" 2>&1; then
|
||||
if ! npm install --include=dev >"$npm_log" 2>&1; then
|
||||
log_error "Failed to install dependencies"
|
||||
log_error "npm install output (last 30 lines):"
|
||||
tail -30 "$npm_log" | while read -r line; do
|
||||
@@ -656,20 +661,20 @@ install_and_build() {
|
||||
rm -f "$npm_log"
|
||||
return 1
|
||||
fi
|
||||
|
||||
|
||||
# Restore NODE_ENV
|
||||
if [ -n "$old_node_env" ]; then
|
||||
export NODE_ENV="$old_node_env"
|
||||
else
|
||||
unset NODE_ENV
|
||||
fi
|
||||
|
||||
|
||||
log_success "Dependencies installed successfully"
|
||||
rm -f "$npm_log"
|
||||
|
||||
|
||||
# Generate Prisma client
|
||||
log "Generating Prisma client..."
|
||||
if ! npx prisma generate > "$npm_log" 2>&1; then
|
||||
if ! npx prisma generate >"$npm_log" 2>&1; then
|
||||
log_error "Failed to generate Prisma client"
|
||||
log_error "Prisma generate output:"
|
||||
cat "$npm_log" | while read -r line; do
|
||||
@@ -679,7 +684,7 @@ install_and_build() {
|
||||
return 1
|
||||
fi
|
||||
log_success "Prisma client generated successfully"
|
||||
|
||||
|
||||
# Check if Prisma migrations exist and are compatible
|
||||
if [ -d "prisma/migrations" ]; then
|
||||
log "Existing migration history detected"
|
||||
@@ -688,10 +693,10 @@ install_and_build() {
|
||||
else
|
||||
log_warning "No existing migration history found - this may be a fresh install"
|
||||
fi
|
||||
|
||||
|
||||
# Run Prisma migrations
|
||||
log "Running Prisma migrations..."
|
||||
if ! npx prisma migrate deploy > "$npm_log" 2>&1; then
|
||||
if ! npx prisma migrate deploy >"$npm_log" 2>&1; then
|
||||
log_warning "Prisma migrations failed or no migrations to run"
|
||||
log "Prisma migrate output:"
|
||||
cat "$npm_log" | while read -r line; do
|
||||
@@ -701,15 +706,18 @@ install_and_build() {
|
||||
log_success "Prisma migrations completed successfully"
|
||||
fi
|
||||
rm -f "$npm_log"
|
||||
|
||||
|
||||
log "Building application..."
|
||||
# Set NODE_ENV to production for build
|
||||
export NODE_ENV=production
|
||||
|
||||
# Unset TURBOPACK to prevent "Multiple bundler flags" error with --webpack
|
||||
unset TURBOPACK 2>/dev/null || true
|
||||
export TURBOPACK=''
|
||||
|
||||
# Create temporary file for npm build output
|
||||
local build_log="/tmp/npm_build_$$.log"
|
||||
|
||||
if ! npm run build > "$build_log" 2>&1; then
|
||||
|
||||
if ! TURBOPACK='' npm run build >"$build_log" 2>&1; then
|
||||
log_error "Failed to build application"
|
||||
log_error "npm run build output:"
|
||||
cat "$build_log" | while read -r line; do
|
||||
@@ -718,18 +726,18 @@ install_and_build() {
|
||||
rm -f "$build_log"
|
||||
return 1
|
||||
fi
|
||||
|
||||
|
||||
# Log success and clean up
|
||||
log_success "Application built successfully"
|
||||
rm -f "$build_log"
|
||||
|
||||
|
||||
log_success "Dependencies installed and application built successfully"
|
||||
}
|
||||
|
||||
# Start the application after updating
|
||||
start_application() {
|
||||
log "Starting application..."
|
||||
|
||||
|
||||
# Use the global variable to determine how to start
|
||||
if [ "$SERVICE_WAS_RUNNING" = true ] && check_service; then
|
||||
log "Service was running before update, re-enabling and starting systemd service..."
|
||||
@@ -761,11 +769,11 @@ start_application() {
|
||||
# Start application with npm
|
||||
start_with_npm() {
|
||||
log "Starting application with npm start..."
|
||||
|
||||
|
||||
# Start in background
|
||||
nohup npm start > server.log 2>&1 &
|
||||
nohup npm start >server.log 2>&1 &
|
||||
local npm_pid=$!
|
||||
|
||||
|
||||
# Wait a moment and check if it started
|
||||
sleep 3
|
||||
if kill -0 $npm_pid 2>/dev/null; then
|
||||
@@ -776,13 +784,30 @@ start_with_npm() {
|
||||
fi
|
||||
}
|
||||
|
||||
# Re-enable the systemd service on failure to prevent users from being locked out
|
||||
re_enable_service_on_failure() {
|
||||
if check_service; then
|
||||
log "Re-enabling systemd service after failure..."
|
||||
if systemctl enable pvescriptslocal.service 2>/dev/null; then
|
||||
log_success "Service re-enabled"
|
||||
if systemctl start pvescriptslocal.service 2>/dev/null; then
|
||||
log_success "Service started"
|
||||
else
|
||||
log_warning "Failed to start service - manual intervention may be required"
|
||||
fi
|
||||
else
|
||||
log_warning "Failed to re-enable service - manual intervention may be required"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Rollback function
|
||||
rollback() {
|
||||
log_warning "Rolling back to previous version..."
|
||||
|
||||
|
||||
if [ -d "$BACKUP_DIR" ]; then
|
||||
log "Restoring from backup directory: $BACKUP_DIR"
|
||||
|
||||
|
||||
# Restore data directory
|
||||
if [ -d "$BACKUP_DIR/data" ]; then
|
||||
log "Restoring data directory..."
|
||||
@@ -797,7 +822,7 @@ rollback() {
|
||||
else
|
||||
log_warning "No data directory backup found"
|
||||
fi
|
||||
|
||||
|
||||
# Restore .env file
|
||||
if [ -f "$BACKUP_DIR/.env" ]; then
|
||||
log "Restoring .env file..."
|
||||
@@ -812,24 +837,24 @@ rollback() {
|
||||
else
|
||||
log_warning "No .env file backup found"
|
||||
fi
|
||||
|
||||
|
||||
# Restore scripts directories
|
||||
local scripts_dirs=("ct" "install" "tools" "vm")
|
||||
for backup_name in "${scripts_dirs[@]}"; do
|
||||
if [ -d "$BACKUP_DIR/$backup_name" ]; then
|
||||
local target_dir="scripts/$backup_name"
|
||||
log "Restoring $target_dir directory from backup..."
|
||||
|
||||
|
||||
# Ensure scripts directory exists
|
||||
if [ ! -d "scripts" ]; then
|
||||
mkdir -p "scripts"
|
||||
fi
|
||||
|
||||
|
||||
# Remove existing directory if it exists
|
||||
if [ -d "$target_dir" ]; then
|
||||
rm -rf "$target_dir"
|
||||
fi
|
||||
|
||||
|
||||
if mv "$BACKUP_DIR/$backup_name" "$target_dir"; then
|
||||
log_success "$target_dir directory restored from backup"
|
||||
else
|
||||
@@ -839,14 +864,17 @@ rollback() {
|
||||
log_warning "No $backup_name directory backup found"
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
# Clean up backup directory
|
||||
log "Cleaning up backup directory..."
|
||||
rm -rf "$BACKUP_DIR"
|
||||
else
|
||||
log_error "No backup directory found for rollback"
|
||||
fi
|
||||
|
||||
|
||||
# Re-enable the service so users aren't locked out
|
||||
re_enable_service_on_failure
|
||||
|
||||
log_error "Update failed. Please check the logs and try again."
|
||||
exit 1
|
||||
}
|
||||
@@ -865,14 +893,14 @@ check_node_version() {
|
||||
|
||||
log "Detected Node.js version: $current"
|
||||
|
||||
if (( major_version < 24 )); then
|
||||
if ((major_version == 24)); then
|
||||
log_success "Node.js 24 already installed"
|
||||
elif ((major_version < 24)); then
|
||||
log_warning "Node.js < 24 detected → upgrading to Node.js 24 LTS..."
|
||||
upgrade_node_to_24
|
||||
elif (( major_version > 24 )); then
|
||||
else
|
||||
log_warning "Node.js > 24 detected → script tested only up to Node 24"
|
||||
log "Continuing anyway…"
|
||||
else
|
||||
log_success "Node.js 24 already installed"
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -880,22 +908,39 @@ check_node_version() {
|
||||
upgrade_node_to_24() {
|
||||
log "Preparing Node.js 24 upgrade…"
|
||||
|
||||
# Remove old nodesource repo if it exists
|
||||
# Remove old nodesource repo files if they exist
|
||||
if [ -f /etc/apt/sources.list.d/nodesource.list ]; then
|
||||
log "Removing old nodesource.list file..."
|
||||
rm -f /etc/apt/sources.list.d/nodesource.list
|
||||
fi
|
||||
if [ -f /etc/apt/sources.list.d/nodesource.sources ]; then
|
||||
log "Removing old nodesource.sources file..."
|
||||
rm -f /etc/apt/sources.list.d/nodesource.sources
|
||||
fi
|
||||
|
||||
# Update apt cache first
|
||||
log "Updating apt cache..."
|
||||
apt-get update >>"$LOG_FILE" 2>&1 || true
|
||||
|
||||
# Install NodeSource repo for Node.js 24
|
||||
curl -fsSL https://deb.nodesource.com/setup_24.x -o /tmp/node24_setup.sh
|
||||
if ! bash /tmp/node24_setup.sh > /tmp/node24_setup.log 2>&1; then
|
||||
log "Downloading Node.js 24 setup script..."
|
||||
if ! curl -fsSL https://deb.nodesource.com/setup_24.x -o /tmp/node24_setup.sh; then
|
||||
log_error "Failed to download Node.js 24 setup script"
|
||||
re_enable_service_on_failure
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! bash /tmp/node24_setup.sh >/tmp/node24_setup.log 2>&1; then
|
||||
log_error "Failed to configure Node.js 24 repository"
|
||||
tail -20 /tmp/node24_setup.log | while read -r line; do log_error "$line"; done
|
||||
re_enable_service_on_failure
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "Installing Node.js 24…"
|
||||
if ! apt-get install -y nodejs >> "$LOG_FILE" 2>&1; then
|
||||
if ! apt-get install -y nodejs >>"$LOG_FILE" 2>&1; then
|
||||
log_error "Failed to install Node.js 24"
|
||||
re_enable_service_on_failure
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -912,21 +957,21 @@ main() {
|
||||
init_log
|
||||
log "Running as detached process"
|
||||
sleep 3
|
||||
|
||||
|
||||
else
|
||||
init_log
|
||||
fi
|
||||
|
||||
|
||||
# Check if we're running from the application directory and not already relocated
|
||||
if [ -z "${PVE_UPDATE_RELOCATED:-}" ] && [ -f "package.json" ] && [ -f "server.js" ]; then
|
||||
log "Detected running from application directory"
|
||||
bash "$0" --relocated
|
||||
exit $?
|
||||
fi
|
||||
|
||||
|
||||
# Ensure we're in the application directory
|
||||
local app_dir
|
||||
|
||||
|
||||
# First check if we're already in the right directory
|
||||
if [ -f "package.json" ] && [ -f "server.js" ]; then
|
||||
app_dir="$(pwd)"
|
||||
@@ -943,79 +988,76 @@ main() {
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
# Check dependencies
|
||||
check_dependencies
|
||||
|
||||
|
||||
# Load GitHub token for higher rate limits
|
||||
load_github_token
|
||||
|
||||
|
||||
# Check if service was running before update
|
||||
if check_service && systemctl is-active --quiet pvescriptslocal.service; then
|
||||
SERVICE_WAS_RUNNING=true
|
||||
else
|
||||
SERVICE_WAS_RUNNING=false
|
||||
fi
|
||||
|
||||
|
||||
# Get latest release info
|
||||
local release_info
|
||||
release_info=$(get_latest_release)
|
||||
|
||||
|
||||
# Backup data directory
|
||||
backup_data
|
||||
|
||||
|
||||
# Stop the application before updating
|
||||
stop_application
|
||||
|
||||
# Check Node.js version
|
||||
check_node_version
|
||||
|
||||
#Update Node.js to 24
|
||||
upgrade_node_to_24
|
||||
|
||||
# Download and extract release
|
||||
local source_dir
|
||||
source_dir=$(download_release "$release_info")
|
||||
|
||||
|
||||
# Clear the original directory before updating
|
||||
clear_original_directory
|
||||
|
||||
|
||||
# Update files
|
||||
if ! update_files "$source_dir"; then
|
||||
log_error "File update failed, rolling back..."
|
||||
rollback
|
||||
fi
|
||||
|
||||
|
||||
# Restore .env and data directory before building
|
||||
restore_backup_files
|
||||
|
||||
|
||||
# Verify database was restored correctly
|
||||
if ! verify_database_restored; then
|
||||
log_error "Database verification failed, rolling back..."
|
||||
rollback
|
||||
fi
|
||||
|
||||
|
||||
# Ensure DATABASE_URL is set for Prisma
|
||||
ensure_database_url
|
||||
|
||||
|
||||
# Install dependencies and build
|
||||
if ! install_and_build; then
|
||||
log_error "Install and build failed, rolling back..."
|
||||
rollback
|
||||
fi
|
||||
|
||||
|
||||
# Start the application
|
||||
if ! start_application; then
|
||||
log_error "Failed to start application after update"
|
||||
rollback
|
||||
fi
|
||||
|
||||
|
||||
# Cleanup only after successful start
|
||||
rm -rf "$source_dir"
|
||||
rm -rf "/tmp/pve-update-$$"
|
||||
rm -rf "$BACKUP_DIR"
|
||||
log "Backup directory cleaned up"
|
||||
|
||||
|
||||
log_success "Update completed successfully!"
|
||||
}
|
||||
|
||||
@@ -1023,4 +1065,4 @@ main() {
|
||||
if ! main "$@"; then
|
||||
log_error "Update script failed with exit code $?"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
Reference in New Issue
Block a user