Compare commits

...

29 Commits

Author SHA1 Message Date
Michel Roegl-Brunner
f4aa8661c4 Bump all .func scripts to be inline with the new core 2025-12-04 10:06:17 +01:00
Michel Roegl-Brunner
f0b5956b54 Merge pull request #374 from community-scripts/dependabot/npm_and_yarn/next-16.0.6
build(deps): Bump next from 16.0.5 to 16.0.6
2025-12-03 10:31:02 +01:00
Michel Roegl-Brunner
e5000246b3 Merge pull request #375 from community-scripts/dependabot/npm_and_yarn/vitest/coverage-v8-4.0.15
build(deps-dev): Bump @vitest/coverage-v8 from 4.0.14 to 4.0.15
2025-12-03 10:30:55 +01:00
Michel Roegl-Brunner
9dacf1e530 Merge pull request #376 from community-scripts/dependabot/npm_and_yarn/typescript-eslint-8.48.1
build(deps-dev): Bump typescript-eslint from 8.48.0 to 8.48.1
2025-12-03 10:30:48 +01:00
dependabot[bot]
f248ed2875 build(deps-dev): Bump typescript-eslint from 8.48.0 to 8.48.1
Bumps [typescript-eslint](https://github.com/typescript-eslint/typescript-eslint/tree/HEAD/packages/typescript-eslint) from 8.48.0 to 8.48.1.
- [Release notes](https://github.com/typescript-eslint/typescript-eslint/releases)
- [Changelog](https://github.com/typescript-eslint/typescript-eslint/blob/main/packages/typescript-eslint/CHANGELOG.md)
- [Commits](https://github.com/typescript-eslint/typescript-eslint/commits/v8.48.1/packages/typescript-eslint)

---
updated-dependencies:
- dependency-name: typescript-eslint
  dependency-version: 8.48.1
  dependency-type: direct:development
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-12-02 19:39:14 +00:00
dependabot[bot]
4e6295885b build(deps-dev): Bump @vitest/coverage-v8 from 4.0.14 to 4.0.15
Bumps [@vitest/coverage-v8](https://github.com/vitest-dev/vitest/tree/HEAD/packages/coverage-v8) from 4.0.14 to 4.0.15.
- [Release notes](https://github.com/vitest-dev/vitest/releases)
- [Commits](https://github.com/vitest-dev/vitest/commits/v4.0.15/packages/coverage-v8)

---
updated-dependencies:
- dependency-name: "@vitest/coverage-v8"
  dependency-version: 4.0.15
  dependency-type: direct:development
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-12-02 19:38:54 +00:00
dependabot[bot]
2357232cae build(deps): Bump next from 16.0.5 to 16.0.6
Bumps [next](https://github.com/vercel/next.js) from 16.0.5 to 16.0.6.
- [Release notes](https://github.com/vercel/next.js/releases)
- [Changelog](https://github.com/vercel/next.js/blob/canary/release.js)
- [Commits](https://github.com/vercel/next.js/compare/v16.0.5...v16.0.6)

---
updated-dependencies:
- dependency-name: next
  dependency-version: 16.0.6
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-12-02 19:38:31 +00:00
Michel Roegl-Brunner
39d8115dda Merge pull request #369 from community-scripts/dependabot/npm_and_yarn/prettier-plugin-tailwindcss-0.7.2 2025-12-02 18:39:34 +01:00
dependabot[bot]
bd71b04a9d build(deps-dev): Bump prettier-plugin-tailwindcss from 0.7.1 to 0.7.2
Bumps [prettier-plugin-tailwindcss](https://github.com/tailwindlabs/prettier-plugin-tailwindcss) from 0.7.1 to 0.7.2.
- [Release notes](https://github.com/tailwindlabs/prettier-plugin-tailwindcss/releases)
- [Changelog](https://github.com/tailwindlabs/prettier-plugin-tailwindcss/blob/main/CHANGELOG.md)
- [Commits](https://github.com/tailwindlabs/prettier-plugin-tailwindcss/compare/v0.7.1...v0.7.2)

---
updated-dependencies:
- dependency-name: prettier-plugin-tailwindcss
  dependency-version: 0.7.2
  dependency-type: direct:development
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-12-02 10:12:17 +00:00
Michel Roegl-Brunner
c0b03cd832 Merge pull request #370 from community-scripts/dependabot/npm_and_yarn/tsx-4.21.0
build(deps-dev): Bump tsx from 4.20.6 to 4.21.0
2025-12-02 11:11:23 +01:00
Michel Roegl-Brunner
9b7c740145 Merge pull request #371 from community-scripts/dependabot/npm_and_yarn/prettier-3.7.3
build(deps-dev): Bump prettier from 3.7.1 to 3.7.3
2025-12-02 11:11:16 +01:00
Michel Roegl-Brunner
4f929fb8da Merge pull request #372 from community-scripts/dependabot/npm_and_yarn/eslint-config-next-16.0.6
build(deps-dev): Bump eslint-config-next from 16.0.5 to 16.0.6
2025-12-02 11:11:06 +01:00
Michel Roegl-Brunner
24ee87d14e Merge pull request #373 from community-scripts/dependabot/npm_and_yarn/better-sqlite3-12.5.0
build(deps): Bump better-sqlite3 from 12.4.6 to 12.5.0
2025-12-02 11:10:58 +01:00
dependabot[bot]
55862628fb build(deps): Bump better-sqlite3 from 12.4.6 to 12.5.0
Bumps [better-sqlite3](https://github.com/WiseLibs/better-sqlite3) from 12.4.6 to 12.5.0.
- [Release notes](https://github.com/WiseLibs/better-sqlite3/releases)
- [Commits](https://github.com/WiseLibs/better-sqlite3/compare/v12.4.6...v12.5.0)

---
updated-dependencies:
- dependency-name: better-sqlite3
  dependency-version: 12.5.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-12-01 22:42:45 +00:00
dependabot[bot]
fbd731f020 build(deps-dev): Bump eslint-config-next from 16.0.5 to 16.0.6
Bumps [eslint-config-next](https://github.com/vercel/next.js/tree/HEAD/packages/eslint-config-next) from 16.0.5 to 16.0.6.
- [Release notes](https://github.com/vercel/next.js/releases)
- [Changelog](https://github.com/vercel/next.js/blob/canary/release.js)
- [Commits](https://github.com/vercel/next.js/commits/v16.0.6/packages/eslint-config-next)

---
updated-dependencies:
- dependency-name: eslint-config-next
  dependency-version: 16.0.6
  dependency-type: direct:development
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-12-01 22:42:27 +00:00
dependabot[bot]
a8b750ad75 build(deps-dev): Bump prettier from 3.7.1 to 3.7.3
Bumps [prettier](https://github.com/prettier/prettier) from 3.7.1 to 3.7.3.
- [Release notes](https://github.com/prettier/prettier/releases)
- [Changelog](https://github.com/prettier/prettier/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prettier/prettier/compare/3.7.1...3.7.3)

---
updated-dependencies:
- dependency-name: prettier
  dependency-version: 3.7.3
  dependency-type: direct:development
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-12-01 22:42:03 +00:00
dependabot[bot]
1054b6d2f5 build(deps-dev): Bump tsx from 4.20.6 to 4.21.0
Bumps [tsx](https://github.com/privatenumber/tsx) from 4.20.6 to 4.21.0.
- [Release notes](https://github.com/privatenumber/tsx/releases)
- [Changelog](https://github.com/privatenumber/tsx/blob/master/release.config.cjs)
- [Commits](https://github.com/privatenumber/tsx/compare/v4.20.6...v4.21.0)

---
updated-dependencies:
- dependency-name: tsx
  dependency-version: 4.21.0
  dependency-type: direct:development
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-12-01 22:41:52 +00:00
github-actions[bot]
669ce41c2e chore: add VERSION v0.5.1 (#366)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2025-12-01 09:35:27 +00:00
Michel Roegl-Brunner
7c4683012f Update update.sh 2025-12-01 10:29:41 +01:00
github-actions[bot]
cfcdc1e342 chore: add VERSION v0.5.1 (#361)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2025-11-29 15:57:44 +00:00
Michel Roegl-Brunner
07cf03a408 Merge pull request #360 from community-scripts/feat/clone_lxc_vm
feat: Add VM/LXC cloning functionality
2025-11-29 16:56:48 +01:00
Michel Roegl-Brunner
dd17d2cbec feat: Add VM/LXC cloning functionality
- Add CloneCountInputModal component for specifying clone count
- Implement clone handlers and state management in InstalledScriptsTab
- Add clone menu item to ScriptInstallationCard
- Extend StorageSelectionModal to support clone storage selection (rootdir only)
- Add clone terminal support to Terminal component
- Implement startSSHCloneExecution in server.js with sequential ID retrieval
- Add clone-related API endpoints (getClusterNextId, getContainerType, getCloneStorages, generateCloneHostnames, executeClone, addClonedContainerToDatabase)
- Integrate with VM/LXC detection from main branch
- Fix storage fetching to use correct serverId parameter
- Fix clone execution to pass storage parameter correctly
- Remove unused eslint-disable comments
2025-11-29 16:53:58 +01:00
Michel Roegl-Brunner
f3d14c6746 Merge pull request #359 from community-scripts/fix/352
fix: align toggle switches in repository settings
2025-11-29 16:14:07 +01:00
Michel Roegl-Brunner
447332e558 fix: align toggle switches in repository settings
- Remove fixed label width from Toggle component
- Move delete button to left of toggle switches
- Add matching border/padding to 'Enable after adding' section to align with repository items
- Ensure all toggles have consistent right-side alignment
2025-11-29 16:12:20 +01:00
Michel Roegl-Brunner
9bbc19ae44 Merge pull request #358 from community-scripts/fix/357_356
fix: Add dynamic text to container control loading modal
2025-11-29 16:00:49 +01:00
Michel Roegl-Brunner
5564ae0393 fix: add dynamic text to container control loading modal
- Update LoadingModal to display action text (Starting/Stopping LXC/VM)
- Update handleStartStop to include container type (LXC/VM) in action text
- Show clear feedback when starting or stopping containers
2025-11-29 15:58:30 +01:00
Michel Roegl-Brunner
93d7842f6c feat: implement batch container type detection for performance optimization
- Add batchDetectContainerTypes() helper function that uses pct list and qm list to detect all container types in 2 SSH calls per server
- Update getAllInstalledScripts to use batch detection instead of individual isVM() calls per script
- Update getInstalledScriptsByServer to use batch detection for single server
- Update database queries to include lxc_config relation for fallback detection
- Fix isVM() function to properly default to LXC when VM config doesn't exist
- Significantly improves performance: reduces from N SSH calls per script to 2 SSH calls per server
2025-11-29 15:55:43 +01:00
Michel Roegl-Brunner
84c02048bc Fix a false detection as a VM when it is a LXC 2025-11-29 15:41:49 +01:00
github-actions[bot]
66a3bb3203 chore: add VERSION v0.5.0 (#355)
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
2025-11-28 14:04:05 +00:00
25 changed files with 7092 additions and 2724 deletions

View File

@@ -1 +1 @@
0.4.13
0.5.1

937
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -27,7 +27,6 @@
"dependencies": {
"@prisma/adapter-better-sqlite3": "^7.0.1",
"@prisma/client": "^7.0.1",
"better-sqlite3": "^12.4.6",
"@radix-ui/react-dropdown-menu": "^2.1.16",
"@radix-ui/react-slot": "^1.2.4",
"@t3-oss/env-nextjs": "^0.13.8",
@@ -43,13 +42,14 @@
"@xterm/xterm": "^5.5.0",
"axios": "^1.13.2",
"bcryptjs": "^3.0.3",
"better-sqlite3": "^12.5.0",
"class-variance-authority": "^0.7.1",
"clsx": "^2.1.1",
"cron-validator": "^1.4.0",
"dotenv": "^17.2.3",
"jsonwebtoken": "^9.0.2",
"lucide-react": "^0.555.0",
"next": "^16.0.5",
"next": "^16.0.6",
"node-cron": "^4.2.1",
"node-pty": "^1.0.0",
"react": "^19.2.0",
@@ -78,19 +78,20 @@
"@types/react": "^19.2.7",
"@types/react-dom": "^19.2.3",
"@vitejs/plugin-react": "^5.1.1",
"@vitest/coverage-v8": "^4.0.14",
"@vitest/coverage-v8": "^4.0.15",
"@vitest/ui": "^4.0.14",
"baseline-browser-mapping": "^2.8.32",
"eslint": "^9.39.1",
"eslint-config-next": "^16.0.5",
"eslint-config-next": "^16.0.6",
"jsdom": "^27.2.0",
"postcss": "^8.5.6",
"prettier": "^3.7.1",
"prettier-plugin-tailwindcss": "^0.7.1",
"prettier": "^3.7.3",
"prettier-plugin-tailwindcss": "^0.7.2",
"prisma": "^7.0.1",
"tailwindcss": "^4.1.17",
"tsx": "^4.21.0",
"typescript": "^5.9.3",
"typescript-eslint": "^8.48.0",
"tsx": "^4.19.4",
"typescript-eslint": "^8.48.1",
"vitest": "^4.0.14"
},
"ct3aMetadata": {
@@ -103,4 +104,4 @@
"overrides": {
"prismjs": "^1.30.0"
}
}
}

View File

@@ -6,33 +6,65 @@
if ! command -v curl >/dev/null 2>&1; then
apk update && apk add curl >/dev/null 2>&1
fi
source "$(dirname "${BASH_SOURCE[0]}")/core.func"
source "$(dirname "${BASH_SOURCE[0]}")/error-handler.func"
load_functions
catch_errors
# This function enables IPv6 if it's not disabled and sets verbose mode
verb_ip6() {
set_std_mode # Set STD mode based on VERBOSE
if [ "$DISABLEIPV6" == "yes" ]; then
if [ "${IPV6_METHOD:-}" = "disable" ]; then
msg_info "Disabling IPv6 (this may affect some services)"
$STD sysctl -w net.ipv6.conf.all.disable_ipv6=1
echo "net.ipv6.conf.all.disable_ipv6 = 1" >>/etc/sysctl.conf
$STD sysctl -w net.ipv6.conf.default.disable_ipv6=1
$STD sysctl -w net.ipv6.conf.lo.disable_ipv6=1
mkdir -p /etc/sysctl.d
$STD tee /etc/sysctl.d/99-disable-ipv6.conf >/dev/null <<EOF
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
EOF
$STD rc-update add sysctl default
msg_ok "Disabled IPv6"
fi
}
# This function catches errors and handles them with the error handler function
catch_errors() {
set -Eeuo pipefail
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
set -Eeuo pipefail
trap 'error_handler $? $LINENO "$BASH_COMMAND"' ERR
trap on_exit EXIT
trap on_interrupt INT
trap on_terminate TERM
error_handler() {
local exit_code="$1"
local line_number="$2"
local command="$3"
if [[ "$exit_code" -eq 0 ]]; then
return 0
fi
printf "\e[?25h"
echo -e "\n${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}\n"
exit "$exit_code"
}
# This function handles errors
error_handler() {
on_exit() {
local exit_code="$?"
local line_number="$1"
local command="$2"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
echo -e "\n$error_message\n"
[[ -n "${lockfile:-}" && -e "$lockfile" ]] && rm -f "$lockfile"
exit "$exit_code"
}
on_interrupt() {
echo -e "\n${RD}Interrupted by user (SIGINT)${CL}"
exit 130
}
on_terminate() {
echo -e "\n${RD}Terminated by signal (SIGTERM)${CL}"
exit 143
}
# This function sets up the Container OS by generating the locale, setting the timezone, and checking the network connection
@@ -61,10 +93,10 @@ network_check() {
set +e
trap - ERR
if ping -c 1 -W 1 1.1.1.1 &>/dev/null || ping -c 1 -W 1 8.8.8.8 &>/dev/null || ping -c 1 -W 1 9.9.9.9 &>/dev/null; then
msg_ok "Internet Connected"
ipv4_status="${GN}✔${CL} IPv4"
else
msg_error "Internet NOT Connected"
read -r -p "Would you like to continue anyway? <y/N> " prompt
ipv4_status="${RD}✖${CL} IPv4"
read -r -p "Internet NOT connected. Continue anyway? <y/N> " prompt
if [[ "${prompt,,}" =~ ^(y|yes)$ ]]; then
echo -e "${INFO}${RD}Expect Issues Without Internet${CL}"
else
@@ -73,7 +105,11 @@ network_check() {
fi
fi
RESOLVEDIP=$(getent hosts github.com | awk '{ print $1 }')
if [[ -z "$RESOLVEDIP" ]]; then msg_error "DNS Lookup Failure"; else msg_ok "DNS Resolved github.com to ${BL}$RESOLVEDIP${CL}"; fi
if [[ -z "$RESOLVEDIP" ]]; then
msg_error "Internet: ${ipv4_status} DNS Failed"
else
msg_ok "Internet: ${ipv4_status} DNS: ${BL}${RESOLVEDIP}${CL}"
fi
set -e
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
}
@@ -82,7 +118,7 @@ network_check() {
update_os() {
msg_info "Updating Container OS"
$STD apk -U upgrade
#source <(curl -fsSL https://git.community-scripts.org/community-scripts/ProxmoxVED/raw/branch/main/misc/tools.func)
source "$(dirname "${BASH_SOURCE[0]}")/tools.func"
msg_ok "Updated Container OS"
}
@@ -154,10 +190,4 @@ EOF
echo "bash -c \"\$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/${app}.sh)\"" >/usr/bin/update
chmod +x /usr/bin/update
if [[ -n "${SSH_AUTHORIZED_KEY}" ]]; then
mkdir -p /root/.ssh
echo "${SSH_AUTHORIZED_KEY}" >/root/.ssh/authorized_keys
chmod 700 /root/.ssh
chmod 600 /root/.ssh/authorized_keys
fi
}

View File

@@ -2,6 +2,153 @@
# Author: michelroegl-brunner
# License: MIT | https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/LICENSE
# ==============================================================================
# API.FUNC - TELEMETRY & DIAGNOSTICS API
# ==============================================================================
#
# Provides functions for sending anonymous telemetry data to Community-Scripts
# API for analytics and diagnostics purposes.
#
# Features:
# - Container/VM creation statistics
# - Installation success/failure tracking
# - Error code mapping and reporting
# - Privacy-respecting anonymous telemetry
#
# Usage:
# source <(curl -fsSL .../api.func)
# post_to_api # Report container creation
# post_update_to_api # Report installation status
#
# Privacy:
# - Only anonymous statistics (no personal data)
# - User can opt-out via diagnostics settings
# - Random UUID for session tracking only
#
# ==============================================================================
# ==============================================================================
# SECTION 1: ERROR CODE DESCRIPTIONS
# ==============================================================================
# ------------------------------------------------------------------------------
# explain_exit_code()
#
# - Maps numeric exit codes to human-readable error descriptions
# - Supports:
# * Generic/Shell errors (1, 2, 126, 127, 128, 130, 137, 139, 143)
# * Package manager errors (APT, DPKG: 100, 101, 255)
# * Node.js/npm errors (243-249, 254)
# * Python/pip/uv errors (210-212)
# * PostgreSQL errors (231-234)
# * MySQL/MariaDB errors (241-244)
# * MongoDB errors (251-254)
# * Proxmox custom codes (200-231)
# - Returns description string for given exit code
# - Shared function with error_handler.func for consistency
# ------------------------------------------------------------------------------
explain_exit_code() {
local code="$1"
case "$code" in
# --- Generic / Shell ---
1) echo "General error / Operation not permitted" ;;
2) echo "Misuse of shell builtins (e.g. syntax error)" ;;
126) echo "Command invoked cannot execute (permission problem?)" ;;
127) echo "Command not found" ;;
128) echo "Invalid argument to exit" ;;
130) echo "Terminated by Ctrl+C (SIGINT)" ;;
137) echo "Killed (SIGKILL / Out of memory?)" ;;
139) echo "Segmentation fault (core dumped)" ;;
143) echo "Terminated (SIGTERM)" ;;
# --- Package manager / APT / DPKG ---
100) echo "APT: Package manager error (broken packages / dependency problems)" ;;
101) echo "APT: Configuration error (bad sources.list, malformed config)" ;;
255) echo "DPKG: Fatal internal error" ;;
# --- Node.js / npm / pnpm / yarn ---
243) echo "Node.js: Out of memory (JavaScript heap out of memory)" ;;
245) echo "Node.js: Invalid command-line option" ;;
246) echo "Node.js: Internal JavaScript Parse Error" ;;
247) echo "Node.js: Fatal internal error" ;;
248) echo "Node.js: Invalid C++ addon / N-API failure" ;;
249) echo "Node.js: Inspector error" ;;
254) echo "npm/pnpm/yarn: Unknown fatal error" ;;
# --- Python / pip / uv ---
210) echo "Python: Virtualenv / uv environment missing or broken" ;;
211) echo "Python: Dependency resolution failed" ;;
212) echo "Python: Installation aborted (permissions or EXTERNALLY-MANAGED)" ;;
# --- PostgreSQL ---
231) echo "PostgreSQL: Connection failed (server not running / wrong socket)" ;;
232) echo "PostgreSQL: Authentication failed (bad user/password)" ;;
233) echo "PostgreSQL: Database does not exist" ;;
234) echo "PostgreSQL: Fatal error in query / syntax" ;;
# --- MySQL / MariaDB ---
241) echo "MySQL/MariaDB: Connection failed (server not running / wrong socket)" ;;
242) echo "MySQL/MariaDB: Authentication failed (bad user/password)" ;;
243) echo "MySQL/MariaDB: Database does not exist" ;;
244) echo "MySQL/MariaDB: Fatal error in query / syntax" ;;
# --- MongoDB ---
251) echo "MongoDB: Connection failed (server not running)" ;;
252) echo "MongoDB: Authentication failed (bad user/password)" ;;
253) echo "MongoDB: Database not found" ;;
254) echo "MongoDB: Fatal query error" ;;
# --- Proxmox Custom Codes ---
200) echo "Custom: Failed to create lock file" ;;
203) echo "Custom: Missing CTID variable" ;;
204) echo "Custom: Missing PCT_OSTYPE variable" ;;
205) echo "Custom: Invalid CTID (<100)" ;;
206) echo "Custom: CTID already in use (check 'pct list' and /etc/pve/lxc/)" ;;
207) echo "Custom: Password contains unescaped special characters (-, /, \\, *, etc.)" ;;
208) echo "Custom: Invalid configuration (DNS/MAC/Network format error)" ;;
209) echo "Custom: Container creation failed (check logs for pct create output)" ;;
210) echo "Custom: Cluster not quorate" ;;
211) echo "Custom: Timeout waiting for template lock (concurrent download in progress)" ;;
214) echo "Custom: Not enough storage space" ;;
215) echo "Custom: Container created but not listed (ghost state - check /etc/pve/lxc/)" ;;
216) echo "Custom: RootFS entry missing in config (incomplete creation)" ;;
217) echo "Custom: Storage does not support rootdir (check storage capabilities)" ;;
218) echo "Custom: Template file corrupted or incomplete download (size <1MB or invalid archive)" ;;
220) echo "Custom: Unable to resolve template path" ;;
221) echo "Custom: Template file exists but not readable (check file permissions)" ;;
222) echo "Custom: Template download failed after 3 attempts (network/storage issue)" ;;
223) echo "Custom: Template not available after download (storage sync issue)" ;;
225) echo "Custom: No template available for OS/Version (check 'pveam available')" ;;
231) echo "Custom: LXC stack upgrade/retry failed (outdated pve-container - check https://github.com/community-scripts/ProxmoxVE/discussions/8126)" ;;
# --- Default ---
*) echo "Unknown error" ;;
esac
}
# ==============================================================================
# SECTION 2: TELEMETRY FUNCTIONS
# ==============================================================================
# ------------------------------------------------------------------------------
# post_to_api()
#
# - Sends LXC container creation statistics to Community-Scripts API
# - Only executes if:
# * curl is available
# * DIAGNOSTICS=yes
# * RANDOM_UUID is set
# - Payload includes:
# * Container type, disk size, CPU cores, RAM
# * OS type and version
# * IPv6 disable status
# * Application name (NSAPP)
# * Installation method
# * PVE version
# * Status: "installing"
# * Random UUID for session tracking
# - Anonymous telemetry (no personal data)
# ------------------------------------------------------------------------------
post_to_api() {
if ! command -v curl &>/dev/null; then
@@ -30,7 +177,6 @@ post_to_api() {
"ram_size": $RAM_SIZE,
"os_type": "$var_os",
"os_version": "$var_version",
"disableip6": "",
"nsapp": "$NSAPP",
"method": "$METHOD(PVE-Local)",
"pve_version": "$pve_version",
@@ -39,14 +185,26 @@ post_to_api() {
}
EOF
)
if [[ "$DIAGNOSTICS" == "yes" ]]; then
RESPONSE=$(curl -s -w "%{http_code}" -L -X POST "$API_URL" --post301 --post302 \
-H "Content-Type: application/json" \
-d "$JSON_PAYLOAD") || true
fi
}
# ------------------------------------------------------------------------------
# post_to_api_vm()
#
# - Sends VM creation statistics to Community-Scripts API
# - Similar to post_to_api() but for virtual machines (not containers)
# - Reads DIAGNOSTICS from /usr/local/community-scripts/diagnostics file
# - Payload differences:
# * ct_type=2 (VM instead of LXC)
# * type="vm"
# * Disk size without 'G' suffix (parsed from DISK_SIZE variable)
# - Only executes if DIAGNOSTICS=yes and RANDOM_UUID is set
# ------------------------------------------------------------------------------
post_to_api_vm() {
if [[ ! -f /usr/local/community-scripts/diagnostics ]]; then
@@ -81,7 +239,6 @@ post_to_api_vm() {
"ram_size": $RAM_SIZE,
"os_type": "$var_os",
"os_version": "$var_version",
"disableip6": "",
"nsapp": "$NSAPP",
"method": "$METHOD(PVE-Local)",
"pve_version": "$pve_version",
@@ -90,7 +247,6 @@ post_to_api_vm() {
}
EOF
)
if [[ "$DIAGNOSTICS" == "yes" ]]; then
RESPONSE=$(curl -s -w "%{http_code}" -L -X POST "$API_URL" --post301 --post302 \
-H "Content-Type: application/json" \
@@ -98,19 +254,54 @@ EOF
fi
}
POST_UPDATE_DONE=false
# ------------------------------------------------------------------------------
# post_update_to_api()
#
# - Reports installation completion status to API
# - Prevents duplicate submissions via POST_UPDATE_DONE flag
# - Arguments:
# * $1: status ("success" or "failed")
# * $2: exit_code (default: 1 for failed, 0 for success)
# - Payload includes:
# * Final status (success/failed)
# * Error description via get_error_description()
# * Random UUID for session correlation
# - Only executes once per session
# - Silently returns if:
# * curl not available
# * Already reported (POST_UPDATE_DONE=true)
# * DIAGNOSTICS=no
# ------------------------------------------------------------------------------
post_update_to_api() {
if ! command -v curl &>/dev/null; then
return
fi
# Initialize flag if not set (prevents 'unbound variable' error with set -u)
POST_UPDATE_DONE=${POST_UPDATE_DONE:-false}
if [ "$POST_UPDATE_DONE" = true ]; then
return 0
fi
exit_code=${2:-1}
local API_URL="http://api.community-scripts.org/upload/updatestatus"
local status="${1:-failed}"
local error="${2:-No error message}"
if [[ "$status" == "failed" ]]; then
local exit_code="${2:-1}"
elif [[ "$status" == "success" ]]; then
local exit_code="${2:-0}"
fi
if [[ -z "$exit_code" ]]; then
exit_code=1
fi
error=$(explain_exit_code "$exit_code")
if [ -z "$error" ]; then
error="Unknown error"
fi
JSON_PAYLOAD=$(
cat <<EOF
@@ -121,7 +312,6 @@ post_update_to_api() {
}
EOF
)
if [[ "$DIAGNOSTICS" == "yes" ]]; then
RESPONSE=$(curl -s -w "%{http_code}" -L -X POST "$API_URL" --post301 --post302 \
-H "Content-Type: application/json" \

File diff suppressed because it is too large Load Diff

View File

@@ -1,699 +0,0 @@
config_file() {
CONFIG_FILE="/opt/community-scripts/.settings"
if [[ -f "/opt/community-scripts/${NSAPP}.conf" ]]; then
CONFIG_FILE="/opt/community-scripts/${NSAPP}.conf"
fi
if CONFIG_FILE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set absolute path to config file" 8 58 "$CONFIG_FILE" --title "CONFIG FILE" 3>&1 1>&2 2>&3); then
if [[ ! -f "$CONFIG_FILE" ]]; then
echo -e "${CROSS}${RD}Config file not found, exiting script!.${CL}"
exit
else
echo -e "${INFO}${BOLD}${DGN}Using config File: ${BGN}$CONFIG_FILE${CL}"
source "$CONFIG_FILE"
fi
fi
if [[ -n "${CT_ID-}" ]]; then
if [[ "$CT_ID" =~ ^([0-9]{3,4})-([0-9]{3,4})$ ]]; then
MIN_ID=${BASH_REMATCH[1]}
MAX_ID=${BASH_REMATCH[2]}
if ((MIN_ID >= MAX_ID)); then
msg_error "Invalid Container ID range. The first number must be smaller than the second number, was ${CT_ID}"
exit
fi
LIST_OF_IDS=$(pvesh get /cluster/resources --type vm --output-format json 2>/dev/null | grep -oP '"vmid":\s*\K\d+') || true
if [[ -n "$LIST_OF_IDS" ]]; then
for ((ID = MIN_ID; ID <= MAX_ID; ID++)); do
if ! grep -q "^$ID$" <<<"$LIST_OF_IDS"; then
CT_ID=$ID
break
fi
done
fi
echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}"
elif [[ "$CT_ID" =~ ^[0-9]+$ ]]; then
LIST_OF_IDS=$(pvesh get /cluster/resources --type vm --output-format json 2>/dev/null | grep -oP '"vmid":\s*\K\d+') || true
if [[ -n "$LIST_OF_IDS" ]]; then
if ! grep -q "^$CT_ID$" <<<"$LIST_OF_IDS"; then
echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}"
else
msg_error "Container ID $CT_ID already exists"
exit
fi
else
echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}"
fi
else
msg_error "Invalid Container ID format. Needs to be 0000-9999 or 0-9999, was ${CT_ID}"
exit
fi
else
if CT_ID=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Container ID" 8 58 "$NEXTID" --title "CONTAINER ID" 3>&1 1>&2 2>&3); then
if [ -z "$CT_ID" ]; then
CT_ID="$NEXTID"
echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}"
else
echo -e "${CONTAINERID}${BOLD}${DGN}Container ID: ${BGN}$CT_ID${CL}"
fi
else
exit_script
fi
fi
if [[ -n "${CT_TYPE-}" ]]; then
if [[ "$CT_TYPE" -eq 0 ]]; then
CT_TYPE_DESC="Privileged"
elif [[ "$CT_TYPE" -eq 1 ]]; then
CT_TYPE_DESC="Unprivileged"
else
msg_error "Unknown setting for CT_TYPE, should be 1 or 0, was ${CT_TYPE}"
exit
fi
echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$CT_TYPE_DESC${CL}"
else
if CT_TYPE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --title "CONTAINER TYPE" --radiolist "Choose Type" 10 58 2 \
"1" "Unprivileged" ON \
"0" "Privileged" OFF \
3>&1 1>&2 2>&3); then
if [ -n "$CT_TYPE" ]; then
CT_TYPE_DESC="Unprivileged"
if [ "$CT_TYPE" -eq 0 ]; then
CT_TYPE_DESC="Privileged"
fi
echo -e "${CONTAINERTYPE}${BOLD}${DGN}Container Type: ${BGN}$CT_TYPE_DESC${CL}"
fi
else
exit_script
fi
fi
if [[ -n "${PW-}" ]]; then
if [[ "$PW" == "none" ]]; then
PW=""
else
if [[ "$PW" == *" "* ]]; then
msg_error "Password cannot be empty"
exit
elif [[ ${#PW} -lt 5 ]]; then
msg_error "Password must be at least 5 characters long"
exit
else
echo -e "${VERIFYPW}${BOLD}${DGN}Root Password: ${BGN}********${CL}"
fi
PW="-password $PW"
fi
else
while true; do
if PW1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --passwordbox "\nSet Root Password (needed for root ssh access)" 9 58 --title "PASSWORD (leave blank for automatic login)" 3>&1 1>&2 2>&3); then
if [[ -n "$PW1" ]]; then
if [[ "$PW1" == *" "* ]]; then
whiptail --msgbox "Password cannot contain spaces. Please try again." 8 58
elif [ ${#PW1} -lt 5 ]; then
whiptail --msgbox "Password must be at least 5 characters long. Please try again." 8 58
else
if PW2=$(whiptail --backtitle "Proxmox VE Helper Scripts" --passwordbox "\nVerify Root Password" 9 58 --title "PASSWORD VERIFICATION" 3>&1 1>&2 2>&3); then
if [[ "$PW1" == "$PW2" ]]; then
PW="-password $PW1"
echo -e "${VERIFYPW}${BOLD}${DGN}Root Password: ${BGN}********${CL}"
break
else
whiptail --msgbox "Passwords do not match. Please try again." 8 58
fi
else
exit_script
fi
fi
else
PW1="Automatic Login"
PW=""
echo -e "${VERIFYPW}${BOLD}${DGN}Root Password: ${BGN}$PW1${CL}"
break
fi
else
exit_script
fi
done
fi
if [[ -n "${HN-}" ]]; then
echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}"
else
if CT_NAME=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Hostname" 8 58 "$NSAPP" --title "HOSTNAME" 3>&1 1>&2 2>&3); then
if [ -z "$CT_NAME" ]; then
HN="$NSAPP"
else
HN=$(echo "${CT_NAME,,}" | tr -d ' ')
fi
echo -e "${HOSTNAME}${BOLD}${DGN}Hostname: ${BGN}$HN${CL}"
else
exit_script
fi
fi
if [[ -n "${DISK_SIZE-}" ]]; then
if [[ "$DISK_SIZE" =~ ^-?[0-9]+$ ]]; then
echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE} GB${CL}"
else
msg_error "DISK_SIZE must be an integer, was ${DISK_SIZE}"
exit
fi
else
if DISK_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Disk Size in GB" 8 58 "$var_disk" --title "DISK SIZE" 3>&1 1>&2 2>&3); then
if [ -z "$DISK_SIZE" ]; then
DISK_SIZE="$var_disk"
echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE} GB${CL}"
else
if ! [[ $DISK_SIZE =~ $INTEGER ]]; then
echo -e "{INFO}${HOLD}${RD} DISK SIZE MUST BE AN INTEGER NUMBER!${CL}"
advanced_settings
fi
echo -e "${DISKSIZE}${BOLD}${DGN}Disk Size: ${BGN}${DISK_SIZE} GB${CL}"
fi
else
exit_script
fi
fi
if [[ -n "${CORE_COUNT-}" ]]; then
if [[ "$CORE_COUNT" =~ ^-?[0-9]+$ ]]; then
echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}${CORE_COUNT}${CL}"
else
msg_error "CORE_COUNT must be an integer, was ${CORE_COUNT}"
exit
fi
else
if CORE_COUNT=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate CPU Cores" 8 58 "$var_cpu" --title "CORE COUNT" 3>&1 1>&2 2>&3); then
if [ -z "$CORE_COUNT" ]; then
CORE_COUNT="$var_cpu"
echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}"
else
echo -e "${CPUCORE}${BOLD}${DGN}CPU Cores: ${BGN}$CORE_COUNT${CL}"
fi
else
exit_script
fi
fi
if [[ -n "${RAM_SIZE-}" ]]; then
if [[ "$RAM_SIZE" =~ ^-?[0-9]+$ ]]; then
echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}"
else
msg_error "RAM_SIZE must be an integer, was ${RAM_SIZE}"
exit
fi
else
if RAM_SIZE=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Allocate RAM in MiB" 8 58 "$var_ram" --title "RAM" 3>&1 1>&2 2>&3); then
if [ -z "$RAM_SIZE" ]; then
RAM_SIZE="$var_ram"
echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}"
else
echo -e "${RAMSIZE}${BOLD}${DGN}RAM Size: ${BGN}${RAM_SIZE} MiB${CL}"
fi
else
exit_script
fi
fi
IFACE_FILEPATH_LIST="/etc/network/interfaces"$'\n'$(find "/etc/network/interfaces.d/" -type f)
BRIDGES=""
OLD_IFS=$IFS
IFS=$'\n'
for iface_filepath in ${IFACE_FILEPATH_LIST}; do
iface_indexes_tmpfile=$(mktemp -q -u '.iface-XXXX')
(grep -Pn '^\s*iface' "${iface_filepath}" | cut -d':' -f1 && wc -l "${iface_filepath}" | cut -d' ' -f1) | awk 'FNR==1 {line=$0; next} {print line":"$0-1; line=$0}' >"${iface_indexes_tmpfile}" || true
if [ -f "${iface_indexes_tmpfile}" ]; then
while read -r pair; do
start=$(echo "${pair}" | cut -d':' -f1)
end=$(echo "${pair}" | cut -d':' -f2)
if awk "NR >= ${start} && NR <= ${end}" "${iface_filepath}" | grep -qP '^\s*(bridge[-_](ports|stp|fd|vlan-aware|vids)|ovs_type\s+OVSBridge)\b'; then
iface_name=$(sed "${start}q;d" "${iface_filepath}" | awk '{print $2}')
BRIDGES="${iface_name}"$'\n'"${BRIDGES}"
fi
done <"${iface_indexes_tmpfile}"
rm -f "${iface_indexes_tmpfile}"
fi
done
IFS=$OLD_IFS
BRIDGES=$(echo "$BRIDGES" | grep -v '^\s*$' | sort | uniq)
if [[ -n "${BRG-}" ]]; then
if echo "$BRIDGES" | grep -q "${BRG}"; then
echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}$BRG${CL}"
else
msg_error "Bridge '${BRG}' does not exist in /etc/network/interfaces or /etc/network/interfaces.d/sdn"
exit
fi
else
BRG=$(whiptail --backtitle "Proxmox VE Helper Scripts" --menu "Select network bridge:" 15 40 6 $(echo "$BRIDGES" | awk '{print $0, "Bridge"}') 3>&1 1>&2 2>&3)
if [ -z "$BRG" ]; then
exit_script
else
echo -e "${BRIDGE}${BOLD}${DGN}Bridge: ${BGN}$BRG${CL}"
fi
fi
local ip_cidr_regex='^([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})/([0-9]{1,2})$'
local ip_regex='^([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})$'
if [[ -n ${NET-} ]]; then
if [ "$NET" == "dhcp" ]; then
echo -e "${NETWORK}${BOLD}${DGN}IP Address: ${BGN}DHCP${CL}"
echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}Default${CL}"
GATE=""
elif [[ "$NET" =~ $ip_cidr_regex ]]; then
echo -e "${NETWORK}${BOLD}${DGN}IP Address: ${BGN}$NET${CL}"
if [[ -n "$GATE" ]]; then
[[ "$GATE" =~ ",gw=" ]] && GATE="${GATE##,gw=}"
if [[ "$GATE" =~ $ip_regex ]]; then
echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}$GATE${CL}"
GATE=",gw=$GATE"
else
msg_error "Invalid IP Address format for Gateway. Needs to be 0.0.0.0, was ${GATE}"
exit
fi
else
while true; do
GATE1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Enter gateway IP address" 8 58 --title "Gateway IP" 3>&1 1>&2 2>&3)
if [ -z "$GATE1" ]; then
whiptail --backtitle "Proxmox VE Helper Scripts" --msgbox "Gateway IP address cannot be empty" 8 58
elif [[ ! "$GATE1" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
whiptail --backtitle "Proxmox VE Helper Scripts" --msgbox "Invalid IP address format" 8 58
else
GATE=",gw=$GATE1"
echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}$GATE1${CL}"
break
fi
done
fi
elif [[ "$NET" == *-* ]]; then
IFS="-" read -r ip_start ip_end <<<"$NET"
if [[ ! "$ip_start" =~ $ip_cidr_regex ]] || [[ ! "$ip_end" =~ $ip_cidr_regex ]]; then
msg_error "Invalid IP range format, was $NET should be 0.0.0.0/0-0.0.0.0/0"
exit 1
fi
ip1="${ip_start%%/*}"
ip2="${ip_end%%/*}"
cidr="${ip_start##*/}"
ip_to_int() {
local IFS=.
read -r i1 i2 i3 i4 <<<"$1"
echo $(((i1 << 24) + (i2 << 16) + (i3 << 8) + i4))
}
int_to_ip() {
local ip=$1
echo "$(((ip >> 24) & 0xFF)).$(((ip >> 16) & 0xFF)).$(((ip >> 8) & 0xFF)).$((ip & 0xFF))"
}
start_int=$(ip_to_int "$ip1")
end_int=$(ip_to_int "$ip2")
for ((ip_int = start_int; ip_int <= end_int; ip_int++)); do
ip=$(int_to_ip $ip_int)
msg_info "Checking IP: $ip"
if ! ping -c 2 -W 1 "$ip" >/dev/null 2>&1; then
NET="$ip/$cidr"
msg_ok "Using free IP Address: ${BGN}$NET${CL}"
sleep 3
break
fi
done
if [[ "$NET" == *-* ]]; then
msg_error "No free IP found in range"
exit 1
fi
if [ -n "$GATE" ]; then
if [[ "$GATE" =~ $ip_regex ]]; then
echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}$GATE${CL}"
GATE=",gw=$GATE"
else
msg_error "Invalid IP Address format for Gateway. Needs to be 0.0.0.0, was ${GATE}"
exit
fi
else
while true; do
GATE1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Enter gateway IP address" 8 58 --title "Gateway IP" 3>&1 1>&2 2>&3)
if [ -z "$GATE1" ]; then
whiptail --backtitle "Proxmox VE Helper Scripts" --msgbox "Gateway IP address cannot be empty" 8 58
elif [[ ! "$GATE1" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
whiptail --backtitle "Proxmox VE Helper Scripts" --msgbox "Invalid IP address format" 8 58
else
GATE=",gw=$GATE1"
echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}$GATE1${CL}"
break
fi
done
fi
else
msg_error "Invalid IP Address format. Needs to be 0.0.0.0/0 or a range like 10.0.0.1/24-10.0.0.10/24, was ${NET}"
exit
fi
else
while true; do
NET=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Static IPv4 CIDR Address (/24)" 8 58 dhcp --title "IP ADDRESS" 3>&1 1>&2 2>&3)
exit_status=$?
if [ $exit_status -eq 0 ]; then
if [ "$NET" = "dhcp" ]; then
echo -e "${NETWORK}${BOLD}${DGN}IP Address: ${BGN}$NET${CL}"
break
else
if [[ "$NET" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}/([0-9]|[1-2][0-9]|3[0-2])$ ]]; then
echo -e "${NETWORK}${BOLD}${DGN}IP Address: ${BGN}$NET${CL}"
break
else
whiptail --backtitle "Proxmox VE Helper Scripts" --msgbox "$NET is an invalid IPv4 CIDR address. Please enter a valid IPv4 CIDR address or 'dhcp'" 8 58
fi
fi
else
exit_script
fi
done
if [ "$NET" != "dhcp" ]; then
while true; do
GATE1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Enter gateway IP address" 8 58 --title "Gateway IP" 3>&1 1>&2 2>&3)
if [ -z "$GATE1" ]; then
whiptail --backtitle "Proxmox VE Helper Scripts" --msgbox "Gateway IP address cannot be empty" 8 58
elif [[ ! "$GATE1" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
whiptail --backtitle "Proxmox VE Helper Scripts" --msgbox "Invalid IP address format" 8 58
else
GATE=",gw=$GATE1"
echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}$GATE1${CL}"
break
fi
done
else
GATE=""
echo -e "${GATEWAY}${BOLD}${DGN}Gateway IP Address: ${BGN}Default${CL}"
fi
fi
if [ "$var_os" == "alpine" ]; then
APT_CACHER=""
APT_CACHER_IP=""
else
if [[ -n "${APT_CACHER_IP-}" ]]; then
if [[ ! $APT_CACHER_IP == "none" ]]; then
APT_CACHER="yes"
echo -e "${NETWORK}${BOLD}${DGN}APT-CACHER IP Address: ${BGN}$APT_CACHER_IP${CL}"
else
APT_CACHER=""
echo -e "${NETWORK}${BOLD}${DGN}APT-Cacher IP Address: ${BGN}No${CL}"
fi
else
if APT_CACHER_IP=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set APT-Cacher IP (leave blank for none)" 8 58 --title "APT-Cacher IP" 3>&1 1>&2 2>&3); then
APT_CACHER="${APT_CACHER_IP:+yes}"
echo -e "${NETWORK}${BOLD}${DGN}APT-Cacher IP Address: ${BGN}${APT_CACHER_IP:-Default}${CL}"
if [[ -n $APT_CACHER_IP ]]; then
APT_CACHER_IP="none"
fi
else
exit_script
fi
fi
fi
if [[ -n "${MTU-}" ]]; then
if [[ "$MTU" =~ ^-?[0-9]+$ ]]; then
echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU${CL}"
MTU=",mtu=$MTU"
else
msg_error "MTU must be an integer, was ${MTU}"
exit
fi
else
if MTU1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Interface MTU Size (leave blank for default [The MTU of your selected vmbr, default is 1500])" 8 58 --title "MTU SIZE" 3>&1 1>&2 2>&3); then
if [ -z "$MTU1" ]; then
MTU1="Default"
MTU=""
else
MTU=",mtu=$MTU1"
fi
echo -e "${DEFAULT}${BOLD}${DGN}Interface MTU Size: ${BGN}$MTU1${CL}"
else
exit_script
fi
fi
if [[ "$IPV6_METHOD" == "static" ]]; then
if [[ -n "$IPV6STATIC" ]]; then
IP6=",ip6=${IPV6STATIC}"
echo -e "${NETWORK}${BOLD}${DGN}IPv6 Address: ${BGN}${IPV6STATIC}${CL}"
else
msg_error "IPV6_METHOD is set to static but IPV6STATIC is empty"
exit
fi
elif [[ "$IPV6_METHOD" == "auto" ]]; then
IP6=",ip6=auto"
echo -e "${NETWORK}${BOLD}${DGN}IPv6 Address: ${BGN}auto${CL}"
else
IP6=""
echo -e "${NETWORK}${BOLD}${DGN}IPv6 Address: ${BGN}none${CL}"
fi
if [[ -n "${SD-}" ]]; then
if [[ "$SD" == "none" ]]; then
SD=""
echo -e "${SEARCH}${BOLD}${DGN}DNS Search Domain: ${BGN}Host${CL}"
else
# Strip prefix if present for config file storage
local SD_VALUE="$SD"
[[ "$SD" =~ ^-searchdomain= ]] && SD_VALUE="${SD#-searchdomain=}"
echo -e "${SEARCH}${BOLD}${DGN}DNS Search Domain: ${BGN}$SD_VALUE${CL}"
SD="-searchdomain=$SD_VALUE"
fi
else
if SD=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a DNS Search Domain (leave blank for HOST)" 8 58 --title "DNS Search Domain" 3>&1 1>&2 2>&3); then
if [ -z "$SD" ]; then
SX=Host
SD=""
else
SX=$SD
SD="-searchdomain=$SD"
fi
echo -e "${SEARCH}${BOLD}${DGN}DNS Search Domain: ${BGN}$SX${CL}"
else
exit_script
fi
fi
if [[ -n "${NS-}" ]]; then
if [[ $NS == "none" ]]; then
NS=""
echo -e "${NETWORK}${BOLD}${DGN}DNS Server IP Address: ${BGN}Host${CL}"
else
# Strip prefix if present for config file storage
local NS_VALUE="$NS"
[[ "$NS" =~ ^-nameserver= ]] && NS_VALUE="${NS#-nameserver=}"
if [[ "$NS_VALUE" =~ $ip_regex ]]; then
echo -e "${NETWORK}${BOLD}${DGN}DNS Server IP Address: ${BGN}$NS_VALUE${CL}"
NS="-nameserver=$NS_VALUE"
else
msg_error "Invalid IP Address format for DNS Server. Needs to be 0.0.0.0, was ${NS_VALUE}"
exit
fi
fi
else
if NX=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a DNS Server IP (leave blank for HOST)" 8 58 --title "DNS SERVER IP" 3>&1 1>&2 2>&3); then
if [ -z "$NX" ]; then
NX=Host
NS=""
else
NS="-nameserver=$NX"
fi
echo -e "${NETWORK}${BOLD}${DGN}DNS Server IP Address: ${BGN}$NX${CL}"
else
exit_script
fi
fi
if [[ -n "${MAC-}" ]]; then
if [[ "$MAC" == "none" ]]; then
MAC=""
echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}Host${CL}"
else
# Strip prefix if present for config file storage
local MAC_VALUE="$MAC"
[[ "$MAC" =~ ^,hwaddr= ]] && MAC_VALUE="${MAC#,hwaddr=}"
if [[ "$MAC_VALUE" =~ ^([A-Fa-f0-9]{2}:){5}[A-Fa-f0-9]{2}$ ]]; then
echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC_VALUE${CL}"
MAC=",hwaddr=$MAC_VALUE"
else
msg_error "MAC Address must be in the format xx:xx:xx:xx:xx:xx, was ${MAC_VALUE}"
exit
fi
fi
else
if MAC1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a MAC Address(leave blank for generated MAC)" 8 58 --title "MAC ADDRESS" 3>&1 1>&2 2>&3); then
if [ -z "$MAC1" ]; then
MAC1="Default"
MAC=""
else
MAC=",hwaddr=$MAC1"
echo -e "${MACADDRESS}${BOLD}${DGN}MAC Address: ${BGN}$MAC1${CL}"
fi
else
exit_script
fi
fi
if [[ -n "${VLAN-}" ]]; then
if [[ "$VLAN" == "none" ]]; then
VLAN=""
echo -e "${VLANTAG}${BOLD}${DGN}Vlan: ${BGN}Host${CL}"
else
# Strip prefix if present for config file storage
local VLAN_VALUE="$VLAN"
[[ "$VLAN" =~ ^,tag= ]] && VLAN_VALUE="${VLAN#,tag=}"
if [[ "$VLAN_VALUE" =~ ^-?[0-9]+$ ]]; then
echo -e "${VLANTAG}${BOLD}${DGN}Vlan: ${BGN}$VLAN_VALUE${CL}"
VLAN=",tag=$VLAN_VALUE"
else
msg_error "VLAN must be an integer, was ${VLAN_VALUE}"
exit
fi
fi
else
if VLAN1=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set a Vlan(leave blank for no VLAN)" 8 58 --title "VLAN" 3>&1 1>&2 2>&3); then
if [ -z "$VLAN1" ]; then
VLAN1="Default"
VLAN=""
else
VLAN=",tag=$VLAN1"
fi
echo -e "${VLANTAG}${BOLD}${DGN}Vlan: ${BGN}$VLAN1${CL}"
else
exit_script
fi
fi
if [[ -n "${TAGS-}" ]]; then
if [[ "$TAGS" == *"DEFAULT"* ]]; then
TAGS="${TAGS//DEFAULT/}"
TAGS="${TAGS//;/}"
TAGS="$TAGS;${var_tags:-}"
echo -e "${NETWORK}${BOLD}${DGN}Tags: ${BGN}$TAGS${CL}"
fi
else
TAGS="community-scripts;"
if ADV_TAGS=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "Set Custom Tags?[If you remove all, there will be no tags!]" 8 58 "${TAGS}" --title "Advanced Tags" 3>&1 1>&2 2>&3); then
if [ -n "${ADV_TAGS}" ]; then
ADV_TAGS=$(echo "$ADV_TAGS" | tr -d '[:space:]')
TAGS="${ADV_TAGS}"
else
TAGS=";"
fi
echo -e "${NETWORK}${BOLD}${DGN}Tags: ${BGN}$TAGS${CL}"
else
exit_script
fi
fi
if [[ -n "${SSH-}" ]]; then
if [[ "$SSH" == "yes" ]]; then
echo -e "${ROOTSSH}${BOLD}${DGN}Root SSH Access: ${BGN}$SSH${CL}"
if [[ ! -z "$SSH_AUTHORIZED_KEY" ]]; then
echo -e "${ROOTSSH}${BOLD}${DGN}SSH Authorized Key: ${BGN}********************${CL}"
else
echo -e "${ROOTSSH}${BOLD}${DGN}SSH Authorized Key: ${BGN}None${CL}"
fi
elif [[ "$SSH" == "no" ]]; then
echo -e "${ROOTSSH}${BOLD}${DGN}Root SSH Access: ${BGN}$SSH${CL}"
else
msg_error "SSH needs to be 'yes' or 'no', was ${SSH}"
exit
fi
else
SSH_AUTHORIZED_KEY="$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox "SSH Authorized key for root (leave empty for none)" 8 58 --title "SSH Key" 3>&1 1>&2 2>&3)"
if [[ -z "${SSH_AUTHORIZED_KEY}" ]]; then
SSH_AUTHORIZED_KEY=""
fi
if [[ "$PW" == -password* || -n "$SSH_AUTHORIZED_KEY" ]]; then
if (whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "SSH ACCESS" --yesno "Enable Root SSH Access?" 10 58); then
SSH="yes"
else
SSH="no"
fi
echo -e "${ROOTSSH}${BOLD}${DGN}Root SSH Access: ${BGN}$SSH${CL}"
else
SSH="no"
echo -e "${ROOTSSH}${BOLD}${DGN}Root SSH Access: ${BGN}$SSH${CL}"
fi
fi
if [[ -n "$ENABLE_FUSE" ]]; then
if [[ "$ENABLE_FUSE" == "yes" ]]; then
echo -e "${FUSE}${BOLD}${DGN}Enable FUSE: ${BGN}Yes${CL}"
elif [[ "$ENABLE_FUSE" == "no" ]]; then
echo -e "${FUSE}${BOLD}${DGN}Enable FUSE: ${BGN}No${CL}"
else
msg_error "Enable FUSE needs to be 'yes' or 'no', was ${ENABLE_FUSE}"
exit
fi
else
if (whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "FUSE" --yesno "Enable FUSE?" 10 58); then
ENABLE_FUSE="yes"
else
ENABLE_FUSE="no"
fi
echo -e "${FUSE}${BOLD}${DGN}Enable FUSE: ${BGN}$ENABLE_FUSE${CL}"
fi
if [[ -n "$ENABLE_TUN" ]]; then
if [[ "$ENABLE_TUN" == "yes" ]]; then
echo -e "${FUSE}${BOLD}${DGN}Enable TUN: ${BGN}Yes${CL}"
elif [[ "$ENABLE_TUN" == "no" ]]; then
echo -e "${FUSE}${BOLD}${DGN}Enable TUN: ${BGN}No${CL}"
else
msg_error "Enable TUN needs to be 'yes' or 'no', was ${ENABLE_TUN}"
exit
fi
else
if (whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "TUN" --yesno "Enable TUN?" 10 58); then
ENABLE_TUN="yes"
else
ENABLE_TUN="no"
fi
echo -e "${FUSE}${BOLD}${DGN}Enable TUN: ${BGN}$ENABLE_TUN${CL}"
fi
if [[ -n "${VERBOSE-}" ]]; then
if [[ "$VERBOSE" == "yes" ]]; then
echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}$VERBOSE${CL}"
elif [[ "$VERBOSE" == "no" ]]; then
echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}No${CL}"
else
msg_error "Verbose Mode needs to be 'yes' or 'no', was ${VERBOSE}"
exit
fi
else
if (whiptail --backtitle "Proxmox VE Helper Scripts" --defaultno --title "VERBOSE MODE" --yesno "Enable Verbose Mode?" 10 58); then
VERBOSE="yes"
else
VERBOSE="no"
fi
echo -e "${SEARCH}${BOLD}${DGN}Verbose Mode: ${BGN}$VERBOSE${CL}"
fi
if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "ADVANCED SETTINGS WITH CONFIG FILE COMPLETE" --yesno "Ready to create ${APP} LXC?" 10 58); then
echo -e "${CREATING}${BOLD}${RD}Creating a ${APP} LXC using the above settings${CL}"
else
clear
header_info
echo -e "${INFO}${HOLD} ${GN}Using Config File on node $PVEHOST_NAME${CL}"
config_file
fi
}

View File

@@ -1,13 +1,35 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2025 community-scripts ORG
# License: MIT | https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/LICENSE
# ------------------------------------------------------------------------------
# Loads core utility groups once (colors, formatting, icons, defaults).
# ------------------------------------------------------------------------------
# ==============================================================================
# CORE FUNCTIONS - LXC CONTAINER UTILITIES
# ==============================================================================
#
# This file provides core utility functions for LXC container management
# including colors, formatting, validation checks, message output, and
# execution helpers used throughout the Community-Scripts ecosystem.
#
# Usage:
# source <(curl -fsSL https://git.community-scripts.org/.../core.func)
# load_functions
#
# ==============================================================================
[[ -n "${_CORE_FUNC_LOADED:-}" ]] && return
_CORE_FUNC_LOADED=1
# ==============================================================================
# SECTION 1: INITIALIZATION & SETUP
# ==============================================================================
# ------------------------------------------------------------------------------
# load_functions()
#
# - Initializes all core utility groups (colors, formatting, icons, defaults)
# - Ensures functions are loaded only once via __FUNCTIONS_LOADED flag
# - Must be called at start of any script using these utilities
# ------------------------------------------------------------------------------
load_functions() {
[[ -n "${__FUNCTIONS_LOADED:-}" ]] && return
__FUNCTIONS_LOADED=1
@@ -16,58 +38,14 @@ load_functions() {
icons
default_vars
set_std_mode
# add more
}
# ============================================================================
# Error & Signal Handling robust, universal, subshell-safe
# ============================================================================
_tool_error_hint() {
local cmd="$1"
local code="$2"
case "$cmd" in
curl)
case "$code" in
6) echo "Curl: Could not resolve host (DNS problem)" ;;
7) echo "Curl: Failed to connect to host (connection refused)" ;;
22) echo "Curl: HTTP error (404/403 etc)" ;;
28) echo "Curl: Operation timeout" ;;
*) echo "Curl: Unknown error ($code)" ;;
esac
;;
wget)
echo "Wget failed URL unreachable or permission denied"
;;
systemctl)
echo "Systemd unit failure check service name and permissions"
;;
jq)
echo "jq parse error malformed JSON or missing key"
;;
mariadb | mysql)
echo "MySQL/MariaDB command failed check credentials or DB"
;;
unzip)
echo "unzip failed corrupt file or missing permission"
;;
tar)
echo "tar failed invalid format or missing binary"
;;
node | npm | pnpm | yarn)
echo "Node tool failed check version compatibility or package.json"
;;
*) echo "" ;;
esac
}
catch_errors() {
set -Eeuo pipefail
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
}
# ------------------------------------------------------------------------------
# Sets ANSI color codes used for styled terminal output.
# color()
#
# - Sets ANSI color codes for styled terminal output
# - Variables: YW (yellow), YWB (yellow bright), BL (blue), RD (red)
# GN (green), DGN (dark green), BGN (background green), CL (clear)
# ------------------------------------------------------------------------------
color() {
YW=$(echo "\033[33m")
@@ -80,7 +58,14 @@ color() {
CL=$(echo "\033[m")
}
# Special for spinner and colorized output via printf
# ------------------------------------------------------------------------------
# color_spinner()
#
# - Sets ANSI color codes specifically for spinner animation
# - Variables: CS_YW (spinner yellow), CS_YWB (spinner yellow bright),
# CS_CL (spinner clear)
# - Used by spinner() function to avoid color conflicts
# ------------------------------------------------------------------------------
color_spinner() {
CS_YW=$'\033[33m'
CS_YWB=$'\033[93m'
@@ -88,7 +73,12 @@ color_spinner() {
}
# ------------------------------------------------------------------------------
# Defines formatting helpers like tab, bold, and line reset sequences.
# formatting()
#
# - Defines formatting helpers for terminal output
# - BFR: Backspace and clear line sequence
# - BOLD: Bold text escape code
# - TAB/TAB3: Indentation spacing
# ------------------------------------------------------------------------------
formatting() {
BFR="\\r\\033[K"
@@ -99,7 +89,11 @@ formatting() {
}
# ------------------------------------------------------------------------------
# Sets symbolic icons used throughout user feedback and prompts.
# icons()
#
# - Sets symbolic emoji icons used throughout user feedback
# - Provides consistent visual indicators for success, error, info, etc.
# - Icons: CM (checkmark), CROSS (error), INFO (info), HOURGLASS (wait), etc.
# ------------------------------------------------------------------------------
icons() {
CM="${TAB}✔️${TAB}"
@@ -130,21 +124,29 @@ icons() {
ADVANCED="${TAB}🧩${TAB}${CL}"
FUSE="${TAB}🗂️${TAB}${CL}"
HOURGLASS="${TAB}⏳${TAB}"
}
# ------------------------------------------------------------------------------
# Sets default retry and wait variables used for system actions.
# default_vars()
#
# - Sets default retry and wait variables used for system actions
# - RETRY_NUM: Maximum number of retry attempts (default: 10)
# - RETRY_EVERY: Seconds to wait between retries (default: 3)
# - i: Counter variable initialized to RETRY_NUM
# ------------------------------------------------------------------------------
default_vars() {
RETRY_NUM=10
RETRY_EVERY=3
i=$RETRY_NUM
#[[ "${VAR_OS:-}" == "unknown" ]]
}
# ------------------------------------------------------------------------------
# Sets default verbose mode for script and os execution.
# set_std_mode()
#
# - Sets default verbose mode for script and OS execution
# - If VERBOSE=yes: STD="" (show all output)
# - If VERBOSE=no: STD="silent" (suppress output via silent() wrapper)
# - If DEV_MODE_TRACE=true: Enables bash tracing (set -x)
# ------------------------------------------------------------------------------
set_std_mode() {
if [ "${VERBOSE:-no}" = "yes" ]; then
@@ -152,138 +154,338 @@ set_std_mode() {
else
STD="silent"
fi
}
# Silent execution function
silent() {
"$@" >/dev/null 2>&1
}
# Function to download & save header files
get_header() {
local app_name=$(echo "${APP,,}" | tr -d ' ')
local app_type=${APP_TYPE:-ct}
local header_url="https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/${app_type}/headers/${app_name}"
local local_header_path="/usr/local/community-scripts/headers/${app_type}/${app_name}"
mkdir -p "$(dirname "$local_header_path")"
if [ ! -s "$local_header_path" ]; then
if ! curl -fsSL "$header_url" -o "$local_header_path"; then
return 1
fi
fi
cat "$local_header_path" 2>/dev/null || true
}
header_info() {
local app_name=$(echo "${APP,,}" | tr -d ' ')
local header_content
header_content=$(get_header "$app_name") || header_content=""
clear
local term_width
term_width=$(tput cols 2>/dev/null || echo 120)
if [ -n "$header_content" ]; then
echo "$header_content"
# Enable bash tracing if trace mode active
if [[ "${DEV_MODE_TRACE:-false}" == "true" ]]; then
set -x
export PS4='+(${BASH_SOURCE}:${LINENO}): ${FUNCNAME[0]:+${FUNCNAME[0]}(): }'
fi
}
ensure_tput() {
if ! command -v tput >/dev/null 2>&1; then
if grep -qi 'alpine' /etc/os-release; then
apk add --no-cache ncurses >/dev/null 2>&1
elif command -v apt-get >/dev/null 2>&1; then
apt-get update -qq >/dev/null
apt-get install -y -qq ncurses-bin >/dev/null 2>&1
# ------------------------------------------------------------------------------
# parse_dev_mode()
#
# - Parses comma-separated dev_mode variable (e.g., "motd,keep,trace")
# - Sets global flags for each mode:
# * DEV_MODE_MOTD: Setup SSH/MOTD before installation
# * DEV_MODE_KEEP: Never delete container on failure
# * DEV_MODE_TRACE: Enable bash set -x tracing
# * DEV_MODE_PAUSE: Pause after each msg_info step
# * DEV_MODE_BREAKPOINT: Open shell on error instead of cleanup
# * DEV_MODE_LOGS: Persist all logs to /var/log/community-scripts/
# * DEV_MODE_DRYRUN: Show commands without executing
# - Call this early in script execution
# ------------------------------------------------------------------------------
parse_dev_mode() {
local mode
# Initialize all flags to false
export DEV_MODE_MOTD=false
export DEV_MODE_KEEP=false
export DEV_MODE_TRACE=false
export DEV_MODE_PAUSE=false
export DEV_MODE_BREAKPOINT=false
export DEV_MODE_LOGS=false
export DEV_MODE_DRYRUN=false
# Parse comma-separated modes
if [[ -n "${dev_mode:-}" ]]; then
IFS=',' read -ra MODES <<<"$dev_mode"
for mode in "${MODES[@]}"; do
mode="$(echo "$mode" | xargs)" # Trim whitespace
case "$mode" in
motd) export DEV_MODE_MOTD=true ;;
keep) export DEV_MODE_KEEP=true ;;
trace) export DEV_MODE_TRACE=true ;;
pause) export DEV_MODE_PAUSE=true ;;
breakpoint) export DEV_MODE_BREAKPOINT=true ;;
logs) export DEV_MODE_LOGS=true ;;
dryrun) export DEV_MODE_DRYRUN=true ;;
*)
if declare -f msg_warn >/dev/null 2>&1; then
msg_warn "Unknown dev_mode: '$mode' (ignored)"
else
echo "[WARN] Unknown dev_mode: '$mode' (ignored)" >&2
fi
;;
esac
done
# Show active dev modes
local active_modes=()
[[ $DEV_MODE_MOTD == true ]] && active_modes+=("motd")
[[ $DEV_MODE_KEEP == true ]] && active_modes+=("keep")
[[ $DEV_MODE_TRACE == true ]] && active_modes+=("trace")
[[ $DEV_MODE_PAUSE == true ]] && active_modes+=("pause")
[[ $DEV_MODE_BREAKPOINT == true ]] && active_modes+=("breakpoint")
[[ $DEV_MODE_LOGS == true ]] && active_modes+=("logs")
[[ $DEV_MODE_DRYRUN == true ]] && active_modes+=("dryrun")
if [[ ${#active_modes[@]} -gt 0 ]]; then
if declare -f msg_custom >/dev/null 2>&1; then
msg_custom "🔧" "${YWB}" "Dev modes active: ${active_modes[*]}"
else
echo "[DEV] Active modes: ${active_modes[*]}" >&2
fi
fi
fi
}
is_alpine() {
local os_id="${var_os:-${PCT_OSTYPE:-}}"
# ==============================================================================
# SECTION 2: VALIDATION CHECKS
# ==============================================================================
if [[ -z "$os_id" && -f /etc/os-release ]]; then
os_id="$(
. /etc/os-release 2>/dev/null
echo "${ID:-}"
)"
# ------------------------------------------------------------------------------
# shell_check()
#
# - Verifies that the script is running under Bash shell
# - Exits with error message if different shell is detected
# - Required because scripts use Bash-specific features
# ------------------------------------------------------------------------------
shell_check() {
if [[ "$(ps -p $$ -o comm=)" != "bash" ]]; then
clear
msg_error "Your default shell is currently not set to Bash. To use these scripts, please switch to the Bash shell."
echo -e "\nExiting..."
sleep 2
exit
fi
[[ "$os_id" == "alpine" ]]
}
is_verbose_mode() {
local verbose="${VERBOSE:-${var_verbose:-no}}"
local tty_status
if [[ -t 2 ]]; then
tty_status="interactive"
else
tty_status="not-a-tty"
fi
[[ "$verbose" != "no" || ! -t 2 ]]
}
# ------------------------------------------------------------------------------
# Handles specific curl error codes and displays descriptive messages.
# root_check()
#
# - Verifies script is running with root privileges
# - Detects if executed via sudo (which can cause issues)
# - Exits with error if not running as root directly
# ------------------------------------------------------------------------------
__curl_err_handler() {
local exit_code="$1"
local target="$2"
local curl_msg="$3"
root_check() {
if [[ "$(id -u)" -ne 0 || $(ps -o comm= -p $PPID) == "sudo" ]]; then
clear
msg_error "Please run this script as root."
echo -e "\nExiting..."
sleep 2
exit
fi
}
case $exit_code in
1) msg_error "Unsupported protocol: $target" ;;
2) msg_error "Curl init failed: $target" ;;
3) msg_error "Malformed URL: $target" ;;
5) msg_error "Proxy resolution failed: $target" ;;
6) msg_error "Host resolution failed: $target" ;;
7) msg_error "Connection failed: $target" ;;
9) msg_error "Access denied: $target" ;;
18) msg_error "Partial file transfer: $target" ;;
22) msg_error "HTTP error (e.g. 400/404): $target" ;;
23) msg_error "Write error on local system: $target" ;;
26) msg_error "Read error from local file: $target" ;;
28) msg_error "Timeout: $target" ;;
35) msg_error "SSL connect error: $target" ;;
47) msg_error "Too many redirects: $target" ;;
51) msg_error "SSL cert verify failed: $target" ;;
52) msg_error "Empty server response: $target" ;;
55) msg_error "Send error: $target" ;;
56) msg_error "Receive error: $target" ;;
60) msg_error "SSL CA not trusted: $target" ;;
67) msg_error "Login denied by server: $target" ;;
78) msg_error "Remote file not found (404): $target" ;;
*) msg_error "Curl failed with code $exit_code: $target" ;;
esac
# ------------------------------------------------------------------------------
# pve_check()
#
# - Validates Proxmox VE version compatibility
# - Supported: PVE 8.0-8.9 and PVE 9.0-9.1
# - Exits with error message if unsupported version detected
# ------------------------------------------------------------------------------
pve_check() {
local PVE_VER
PVE_VER="$(pveversion | awk -F'/' '{print $2}' | awk -F'-' '{print $1}')"
[[ -n "$curl_msg" ]] && printf "%s\n" "$curl_msg" >&2
# Check for Proxmox VE 8.x: allow 8.08.9
if [[ "$PVE_VER" =~ ^8\.([0-9]+) ]]; then
local MINOR="${BASH_REMATCH[1]}"
if ((MINOR < 0 || MINOR > 9)); then
msg_error "This version of Proxmox VE is not supported."
msg_error "Supported: Proxmox VE version 8.0 8.9"
exit 1
fi
return 0
fi
# Check for Proxmox VE 9.x: allow 9.09.1
if [[ "$PVE_VER" =~ ^9\.([0-9]+) ]]; then
local MINOR="${BASH_REMATCH[1]}"
if ((MINOR < 0 || MINOR > 1)); then
msg_error "This version of Proxmox VE is not yet supported."
msg_error "Supported: Proxmox VE version 9.0 9.1"
exit 1
fi
return 0
fi
# All other unsupported versions
msg_error "This version of Proxmox VE is not supported."
msg_error "Supported versions: Proxmox VE 8.0 8.9 or 9.0 9.1"
exit 1
}
fatal() {
msg_error "$1"
kill -INT $$
# ------------------------------------------------------------------------------
# arch_check()
#
# - Validates system architecture is amd64/x86_64
# - Exits with error message for unsupported architectures (e.g., ARM/PiMox)
# - Provides link to ARM64-compatible scripts
# ------------------------------------------------------------------------------
arch_check() {
if [ "$(dpkg --print-architecture)" != "amd64" ]; then
echo -e "\n ${INFO}${YWB}This script will not work with PiMox! \n"
echo -e "\n ${YWB}Visit https://github.com/asylumexp/Proxmox for ARM64 support. \n"
echo -e "Exiting..."
sleep 2
exit
fi
}
# ------------------------------------------------------------------------------
# ssh_check()
#
# - Detects if script is running over SSH connection
# - Warns user for external SSH connections (recommends Proxmox shell)
# - Skips warning for local/same-subnet connections
# - Does not abort execution, only warns
# ------------------------------------------------------------------------------
ssh_check() {
if [ -n "$SSH_CLIENT" ]; then
local client_ip=$(awk '{print $1}' <<<"$SSH_CLIENT")
local host_ip=$(hostname -I | awk '{print $1}')
# Check if connection is local (Proxmox WebUI or same machine)
# - localhost (127.0.0.1, ::1)
# - same IP as host
# - local network range (10.x, 172.16-31.x, 192.168.x)
if [[ "$client_ip" == "127.0.0.1" || "$client_ip" == "::1" || "$client_ip" == "$host_ip" ]]; then
return
fi
# Check if client is in same local network (optional, safer approach)
local host_subnet=$(echo "$host_ip" | cut -d. -f1-3)
local client_subnet=$(echo "$client_ip" | cut -d. -f1-3)
if [[ "$host_subnet" == "$client_subnet" ]]; then
return
fi
# Only warn for truly external connections
msg_warn "Running via external SSH (client: $client_ip)."
msg_warn "For better stability, consider using the Proxmox Shell (Console) instead."
fi
}
# ==============================================================================
# SECTION 3: EXECUTION HELPERS
# ==============================================================================
# ------------------------------------------------------------------------------
# get_active_logfile()
#
# - Returns the appropriate log file based on execution context
# - BUILD_LOG: Host operations (container creation)
# - INSTALL_LOG: Container operations (application installation)
# - Fallback to BUILD_LOG if neither is set
# ------------------------------------------------------------------------------
get_active_logfile() {
if [[ -n "${INSTALL_LOG:-}" ]]; then
echo "$INSTALL_LOG"
elif [[ -n "${BUILD_LOG:-}" ]]; then
echo "$BUILD_LOG"
else
# Fallback for legacy scripts
echo "/tmp/build-$(date +%Y%m%d_%H%M%S).log"
fi
}
# Legacy compatibility: SILENT_LOGFILE points to active log
SILENT_LOGFILE="$(get_active_logfile)"
# ------------------------------------------------------------------------------
# silent()
#
# - Executes command with output redirected to active log file
# - On error: displays last 10 lines of log and exits with original exit code
# - Temporarily disables error trap to capture exit code correctly
# - Sources explain_exit_code() for detailed error messages
# ------------------------------------------------------------------------------
silent() {
local cmd="$*"
local caller_line="${BASH_LINENO[0]:-unknown}"
local logfile="$(get_active_logfile)"
# Dryrun mode: Show command without executing
if [[ "${DEV_MODE_DRYRUN:-false}" == "true" ]]; then
if declare -f msg_custom >/dev/null 2>&1; then
msg_custom "🔍" "${BL}" "[DRYRUN] $cmd"
else
echo "[DRYRUN] $cmd" >&2
fi
return 0
fi
set +Eeuo pipefail
trap - ERR
"$@" >>"$logfile" 2>&1
local rc=$?
set -Eeuo pipefail
trap 'error_handler' ERR
if [[ $rc -ne 0 ]]; then
# Source explain_exit_code if needed
if ! declare -f explain_exit_code >/dev/null 2>&1; then
source "$(dirname "${BASH_SOURCE[0]}")/error-handler.func"
fi
local explanation
explanation="$(explain_exit_code "$rc")"
printf "\e[?25h"
msg_error "in line ${caller_line}: exit code ${rc} (${explanation})"
msg_custom "→" "${YWB}" "${cmd}"
if [[ -s "$logfile" ]]; then
local log_lines=$(wc -l <"$logfile")
echo "--- Last 10 lines of silent log ---"
tail -n 10 "$logfile"
echo "-----------------------------------"
# Show how to view full log if there are more lines
if [[ $log_lines -gt 10 ]]; then
msg_custom "📋" "${YW}" "View full log (${log_lines} lines): ${logfile}"
fi
fi
exit "$rc"
fi
}
# ------------------------------------------------------------------------------
# spinner()
#
# - Displays animated spinner with rotating characters (⠋ ⠙ ⠹ ⠸ ⠼ ⠴ ⠦ ⠧ ⠇ ⠏)
# - Shows SPINNER_MSG alongside animation
# - Runs in infinite loop until killed by stop_spinner()
# - Uses color_spinner() colors for output
# ------------------------------------------------------------------------------
spinner() {
local chars=(⠋ ⠙ ⠹ ⠸ ⠼ ⠴ ⠦ ⠧ ⠇ ⠏)
local msg="${SPINNER_MSG:-Processing...}"
local i=0
while true; do
local index=$((i++ % ${#chars[@]}))
printf "\r\033[2K%s %b" "${CS_YWB}${chars[$index]}${CS_CL}" "${CS_YWB}${SPINNER_MSG:-}${CS_CL}"
printf "\r\033[2K%s %b" "${CS_YWB}${chars[$index]}${CS_CL}" "${CS_YWB}${msg}${CS_CL}"
sleep 0.1
done
}
# ------------------------------------------------------------------------------
# clear_line()
#
# - Clears current terminal line using tput or ANSI escape codes
# - Moves cursor to beginning of line (carriage return)
# - Erases from cursor to end of line
# - Fallback to ANSI codes if tput not available
# ------------------------------------------------------------------------------
clear_line() {
tput cr 2>/dev/null || echo -en "\r"
tput el 2>/dev/null || echo -en "\033[K"
}
# ------------------------------------------------------------------------------
# stop_spinner()
#
# - Stops running spinner process by PID
# - Reads PID from SPINNER_PID variable or /tmp/.spinner.pid file
# - Attempts graceful kill, then forced kill if needed
# - Cleans up temp file and resets terminal state
# - Unsets SPINNER_PID and SPINNER_MSG variables
# ------------------------------------------------------------------------------
stop_spinner() {
local pid="${SPINNER_PID:-}"
[[ -z "$pid" && -f /tmp/.spinner.pid ]] && pid=$(</tmp/.spinner.pid)
@@ -301,6 +503,19 @@ stop_spinner() {
stty sane 2>/dev/null || true
}
# ==============================================================================
# SECTION 4: MESSAGE OUTPUT
# ==============================================================================
# ------------------------------------------------------------------------------
# msg_info()
#
# - Displays informational message with spinner animation
# - Shows each unique message only once (tracked via MSG_INFO_SHOWN)
# - In verbose/Alpine mode: shows hourglass icon instead of spinner
# - Stops any existing spinner before starting new one
# - Backgrounds spinner process and stores PID for later cleanup
# ------------------------------------------------------------------------------
msg_info() {
local msg="$1"
[[ -z "$msg" ]] && return
@@ -317,6 +532,12 @@ msg_info() {
if is_verbose_mode || is_alpine; then
local HOURGLASS="${TAB}⏳${TAB}"
printf "\r\e[2K%s %b" "$HOURGLASS" "${YW}${msg}${CL}" >&2
# Pause mode: Wait for Enter after each step
if [[ "${DEV_MODE_PAUSE:-false}" == "true" ]]; then
echo -en "\n${YWB}[PAUSE]${CL} Press Enter to continue..." >&2
read -r
fi
return
fi
@@ -325,29 +546,68 @@ msg_info() {
SPINNER_PID=$!
echo "$SPINNER_PID" >/tmp/.spinner.pid
disown "$SPINNER_PID" 2>/dev/null || true
# Pause mode: Stop spinner and wait
if [[ "${DEV_MODE_PAUSE:-false}" == "true" ]]; then
stop_spinner
echo -en "\n${YWB}[PAUSE]${CL} Press Enter to continue..." >&2
read -r
fi
}
# ------------------------------------------------------------------------------
# msg_ok()
#
# - Displays success message with checkmark icon
# - Stops spinner and clears line before output
# - Removes message from MSG_INFO_SHOWN to allow re-display
# - Uses green color for success indication
# ------------------------------------------------------------------------------
msg_ok() {
local msg="$1"
[[ -z "$msg" ]] && return
stop_spinner
clear_line
printf "%s %b\n" "$CM" "${GN}${msg}${CL}" >&2
echo -e "$CM ${GN}${msg}${CL}"
unset MSG_INFO_SHOWN["$msg"]
}
# ------------------------------------------------------------------------------
# msg_error()
#
# - Displays error message with cross/X icon
# - Stops spinner before output
# - Uses red color for error indication
# - Outputs to stderr
# ------------------------------------------------------------------------------
msg_error() {
stop_spinner
local msg="$1"
echo -e "${BFR:-} ${CROSS:-✖️} ${RD}${msg}${CL}"
echo -e "${BFR:-}${CROSS:-✖️} ${RD}${msg}${CL}" >&2
}
# ------------------------------------------------------------------------------
# msg_warn()
#
# - Displays warning message with info/lightbulb icon
# - Stops spinner before output
# - Uses bright yellow color for warning indication
# - Outputs to stderr
# ------------------------------------------------------------------------------
msg_warn() {
stop_spinner
local msg="$1"
echo -e "${BFR:-} ${INFO:-} ${YWB}${msg}${CL}"
echo -e "${BFR:-}${INFO:-} ${YWB}${msg}${CL}" >&2
}
# ------------------------------------------------------------------------------
# msg_custom()
#
# - Displays custom message with user-defined symbol and color
# - Arguments: symbol, color code, message text
# - Stops spinner before output
# - Useful for specialized status messages
# ------------------------------------------------------------------------------
msg_custom() {
local symbol="${1:-"[*]"}"
local color="${2:-"\e[36m"}"
@@ -357,17 +617,181 @@ msg_custom() {
echo -e "${BFR:-} ${symbol} ${color}${msg}${CL:-\e[0m}"
}
run_container_safe() {
local ct="$1"
shift
local cmd="$*"
lxc-attach -n "$ct" -- bash -euo pipefail -c "
trap 'echo Aborted in container; exit 130' SIGINT SIGTERM
$cmd
" || __handle_general_error "lxc-attach to CT $ct"
# ------------------------------------------------------------------------------
# msg_debug()
#
# - Displays debug message with timestamp when var_full_verbose=1
# - Automatically enables var_verbose if not already set
# - Shows date/time prefix for log correlation
# - Uses bright yellow color for debug output
# ------------------------------------------------------------------------------
msg_debug() {
if [[ "${var_full_verbose:-0}" == "1" ]]; then
[[ "${var_verbose:-0}" != "1" ]] && var_verbose=1
echo -e "${YWB}[$(date '+%F %T')] [DEBUG]${CL} $*"
fi
}
# ------------------------------------------------------------------------------
# msg_dev()
#
# - Display development mode messages with 🔧 icon
# - Only shown when dev_mode is active
# - Useful for debugging and development-specific output
# - Format: [DEV] message with distinct formatting
# - Usage: msg_dev "Container ready for debugging"
# ------------------------------------------------------------------------------
msg_dev() {
if [[ -n "${dev_mode:-}" ]]; then
echo -e "${SEARCH}${BOLD}${DGN}🔧 [DEV]${CL} $*"
fi
}
#
# - Displays error message and immediately terminates script
# - Sends SIGINT to current process to trigger error handler
# - Use for unrecoverable errors that require immediate exit
# ------------------------------------------------------------------------------
fatal() {
msg_error "$1"
kill -INT $$
}
# ==============================================================================
# SECTION 5: UTILITY FUNCTIONS
# ==============================================================================
# ------------------------------------------------------------------------------
# exit_script()
#
# - Called when user cancels an action
# - Clears screen and displays exit message
# - Exits with default exit code
# ------------------------------------------------------------------------------
exit_script() {
clear
echo -e "\n${CROSS}${RD}User exited script${CL}\n"
exit
}
# ------------------------------------------------------------------------------
# get_header()
#
# - Downloads and caches application header ASCII art
# - Falls back to local cache if already downloaded
# - Determines app type (ct/vm) from APP_TYPE variable
# - Returns header content or empty string on failure
# ------------------------------------------------------------------------------
get_header() {
local app_name=$(echo "${APP,,}" | tr -d ' ')
local app_type=${APP_TYPE:-ct} # Default to 'ct' if not set
local header_url="https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/${app_type}/headers/${app_name}"
local local_header_path="/usr/local/community-scripts/headers/${app_type}/${app_name}"
mkdir -p "$(dirname "$local_header_path")"
if [ ! -s "$local_header_path" ]; then
if ! curl -fsSL "$header_url" -o "$local_header_path"; then
return 1
fi
fi
cat "$local_header_path" 2>/dev/null || true
}
# ------------------------------------------------------------------------------
# header_info()
#
# - Displays application header ASCII art at top of screen
# - Clears screen before displaying header
# - Detects terminal width for formatting
# - Returns silently if header not available
# ------------------------------------------------------------------------------
header_info() {
local app_name=$(echo "${APP,,}" | tr -d ' ')
local header_content
header_content=$(get_header "$app_name") || header_content=""
clear
local term_width
term_width=$(tput cols 2>/dev/null || echo 120)
if [ -n "$header_content" ]; then
echo "$header_content"
fi
}
# ------------------------------------------------------------------------------
# ensure_tput()
#
# - Ensures tput command is available for terminal control
# - Installs ncurses-bin on Debian/Ubuntu or ncurses on Alpine
# - Required for clear_line() and terminal width detection
# ------------------------------------------------------------------------------
ensure_tput() {
if ! command -v tput >/dev/null 2>&1; then
if grep -qi 'alpine' /etc/os-release; then
apk add --no-cache ncurses >/dev/null 2>&1
elif command -v apt-get >/dev/null 2>&1; then
apt-get update -qq >/dev/null
apt-get install -y -qq ncurses-bin >/dev/null 2>&1
fi
fi
}
# ------------------------------------------------------------------------------
# is_alpine()
#
# - Detects if running on Alpine Linux
# - Checks var_os, PCT_OSTYPE, or /etc/os-release
# - Returns 0 if Alpine, 1 otherwise
# - Used to adjust behavior for Alpine-specific commands
# ------------------------------------------------------------------------------
is_alpine() {
local os_id="${var_os:-${PCT_OSTYPE:-}}"
if [[ -z "$os_id" && -f /etc/os-release ]]; then
os_id="$(
. /etc/os-release 2>/dev/null
echo "${ID:-}"
)"
fi
[[ "$os_id" == "alpine" ]]
}
# ------------------------------------------------------------------------------
# is_verbose_mode()
#
# - Determines if script should run in verbose mode
# - Checks VERBOSE and var_verbose variables
# - Also returns true if not running in TTY (pipe/redirect scenario)
# - Used by msg_info() to decide between spinner and static output
# ------------------------------------------------------------------------------
is_verbose_mode() {
local verbose="${VERBOSE:-${var_verbose:-no}}"
local tty_status
if [[ -t 2 ]]; then
tty_status="interactive"
else
tty_status="not-a-tty"
fi
[[ "$verbose" != "no" || ! -t 2 ]]
}
# ==============================================================================
# SECTION 6: CLEANUP & MAINTENANCE
# ==============================================================================
# ------------------------------------------------------------------------------
# cleanup_lxc()
#
# - Comprehensive cleanup of package managers, caches, and logs
# - Supports Alpine (apk), Debian/Ubuntu (apt), and language package managers
# - Cleans: Python (pip/uv), Node.js (npm/yarn/pnpm), Go, Rust, Ruby, PHP
# - Truncates log files and vacuums systemd journal
# - Run at end of container creation to minimize disk usage
# ------------------------------------------------------------------------------
cleanup_lxc() {
msg_info "Cleaning up"
@@ -390,8 +814,6 @@ cleanup_lxc() {
xargs -0 -n1 truncate -s 0 2>/dev/null || true
fi
# Python pip
if command -v pip &>/dev/null; then $STD pip cache purge || true; fi
# Node.js npm
if command -v npm &>/dev/null; then $STD npm cache clean --force || true; fi
# Node.js yarn
@@ -413,6 +835,16 @@ cleanup_lxc() {
msg_ok "Cleaned"
}
# ------------------------------------------------------------------------------
# check_or_create_swap()
#
# - Checks if swap is active on system
# - Offers to create swap file if none exists
# - Prompts user for swap size in MB
# - Creates /swapfile with specified size
# - Activates swap immediately
# - Returns 0 if swap active or successfully created, 1 if declined/failed
# ------------------------------------------------------------------------------
check_or_create_swap() {
msg_info "Checking for active swap"
@@ -451,7 +883,8 @@ check_or_create_swap() {
fi
}
trap 'stop_spinner' EXIT INT TERM
# ==============================================================================
# SIGNAL TRAPS
# ==============================================================================
# Initialize functions when core.func is sourced
load_functions
trap 'stop_spinner' EXIT INT TERM

View File

@@ -1,380 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2025 tteck
# Author: tteck (tteckster)
# Co-Author: MickLesk
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# This sets verbose mode if the global variable is set to "yes"
# if [ "$VERBOSE" == "yes" ]; then set -x; fi
source "$(dirname "$0")/core.func"
# This sets error handling options and defines the error_handler function to handle errors
set -Eeuo pipefail
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
trap on_exit EXIT
trap on_interrupt INT
trap on_terminate TERM
function on_exit() {
local exit_code="$?"
[[ -n "${lockfile:-}" && -e "$lockfile" ]] && rm -f "$lockfile"
exit "$exit_code"
}
function error_handler() {
local exit_code="$?"
local line_number="$1"
local command="$2"
printf "\e[?25h"
echo -e "\n${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}\n"
exit "$exit_code"
}
function on_interrupt() {
echo -e "\n${RD}Interrupted by user (SIGINT)${CL}"
exit 130
}
function on_terminate() {
echo -e "\n${RD}Terminated by signal (SIGTERM)${CL}"
exit 143
}
function exit_script() {
clear
printf "\e[?25h"
echo -e "\n${CROSS}${RD}User exited script${CL}\n"
kill 0
exit 1
}
function check_storage_support() {
local CONTENT="$1"
local -a VALID_STORAGES=()
while IFS= read -r line; do
local STORAGE_NAME
STORAGE_NAME=$(awk '{print $1}' <<<"$line")
[[ -z "$STORAGE_NAME" ]] && continue
VALID_STORAGES+=("$STORAGE_NAME")
done < <(pvesm status -content "$CONTENT" 2>/dev/null | awk 'NR>1')
[[ ${#VALID_STORAGES[@]} -gt 0 ]]
}
# This function selects a storage pool for a given content type (e.g., rootdir, vztmpl).
function select_storage() {
local CLASS=$1 CONTENT CONTENT_LABEL
case $CLASS in
container)
CONTENT='rootdir'
CONTENT_LABEL='Container'
;;
template)
CONTENT='vztmpl'
CONTENT_LABEL='Container template'
;;
iso)
CONTENT='iso'
CONTENT_LABEL='ISO image'
;;
images)
CONTENT='images'
CONTENT_LABEL='VM Disk image'
;;
backup)
CONTENT='backup'
CONTENT_LABEL='Backup'
;;
snippets)
CONTENT='snippets'
CONTENT_LABEL='Snippets'
;;
*)
msg_error "Invalid storage class '$CLASS'"
return 1
;;
esac
# Check for preset STORAGE variable
if [ "$CONTENT" = "rootdir" ] && [ -n "${STORAGE:-}" ]; then
if pvesm status -content "$CONTENT" | awk 'NR>1 {print $1}' | grep -qx "$STORAGE"; then
STORAGE_RESULT="$STORAGE"
msg_info "Using preset storage: $STORAGE_RESULT for $CONTENT_LABEL"
return 0
else
msg_error "Preset storage '$STORAGE' is not valid for content type '$CONTENT'."
return 2
fi
fi
local -A STORAGE_MAP
local -a MENU
local COL_WIDTH=0
while read -r TAG TYPE _ TOTAL USED FREE _; do
[[ -n "$TAG" && -n "$TYPE" ]] || continue
local STORAGE_NAME="$TAG"
local DISPLAY="${STORAGE_NAME} (${TYPE})"
local USED_FMT=$(numfmt --to=iec --from-unit=K --format %.1f <<<"$USED")
local FREE_FMT=$(numfmt --to=iec --from-unit=K --format %.1f <<<"$FREE")
local INFO="Free: ${FREE_FMT}B Used: ${USED_FMT}B"
STORAGE_MAP["$DISPLAY"]="$STORAGE_NAME"
MENU+=("$DISPLAY" "$INFO" "OFF")
((${#DISPLAY} > COL_WIDTH)) && COL_WIDTH=${#DISPLAY}
done < <(pvesm status -content "$CONTENT" | awk 'NR>1')
if [ ${#MENU[@]} -eq 0 ]; then
msg_error "No storage found for content type '$CONTENT'."
return 2
fi
if [ $((${#MENU[@]} / 3)) -eq 1 ]; then
STORAGE_RESULT="${STORAGE_MAP[${MENU[0]}]}"
STORAGE_INFO="${MENU[1]}"
return 0
fi
local WIDTH=$((COL_WIDTH + 42))
while true; do
local DISPLAY_SELECTED
DISPLAY_SELECTED=$(whiptail --backtitle "Proxmox VE Helper Scripts" \
--title "Storage Pools" \
--radiolist "Which storage pool for ${CONTENT_LABEL,,}?\n(Spacebar to select)" \
16 "$WIDTH" 6 "${MENU[@]}" 3>&1 1>&2 2>&3)
# Cancel or ESC
[[ $? -ne 0 ]] && exit_script
# Strip trailing whitespace or newline (important for storages like "storage (dir)")
DISPLAY_SELECTED=$(sed 's/[[:space:]]*$//' <<<"$DISPLAY_SELECTED")
if [[ -z "$DISPLAY_SELECTED" || -z "${STORAGE_MAP[$DISPLAY_SELECTED]+_}" ]]; then
whiptail --msgbox "No valid storage selected. Please try again." 8 58
continue
fi
STORAGE_RESULT="${STORAGE_MAP[$DISPLAY_SELECTED]}"
for ((i = 0; i < ${#MENU[@]}; i += 3)); do
if [[ "${MENU[$i]}" == "$DISPLAY_SELECTED" ]]; then
STORAGE_INFO="${MENU[$i + 1]}"
break
fi
done
return 0
done
}
# Test if required variables are set
[[ "${CTID:-}" ]] || {
msg_error "You need to set 'CTID' variable."
exit 203
}
[[ "${PCT_OSTYPE:-}" ]] || {
msg_error "You need to set 'PCT_OSTYPE' variable."
exit 204
}
# Test if ID is valid
[ "$CTID" -ge "100" ] || {
msg_error "ID cannot be less than 100."
exit 205
}
# Test if ID is in use
if qm status "$CTID" &>/dev/null || pct status "$CTID" &>/dev/null; then
echo -e "ID '$CTID' is already in use."
unset CTID
msg_error "Cannot use ID that is already in use."
exit 206
fi
# This checks for the presence of valid Container Storage and Template Storage locations
msg_info "Validating storage"
if ! check_storage_support "rootdir"; then
msg_error "No valid storage found for 'rootdir' [Container]"
exit 1
fi
if ! check_storage_support "vztmpl"; then
msg_error "No valid storage found for 'vztmpl' [Template]"
exit 1
fi
#msg_info "Checking template storage"
while true; do
if select_storage template; then
TEMPLATE_STORAGE="$STORAGE_RESULT"
TEMPLATE_STORAGE_INFO="$STORAGE_INFO"
msg_ok "Storage ${BL}$TEMPLATE_STORAGE${CL} ($TEMPLATE_STORAGE_INFO) [Template]"
break
fi
done
while true; do
if select_storage container; then
CONTAINER_STORAGE="$STORAGE_RESULT"
CONTAINER_STORAGE_INFO="$STORAGE_INFO"
msg_ok "Storage ${BL}$CONTAINER_STORAGE${CL} ($CONTAINER_STORAGE_INFO) [Container]"
break
fi
done
# Check free space on selected container storage
STORAGE_FREE=$(pvesm status | awk -v s="$CONTAINER_STORAGE" '$1 == s { print $6 }')
REQUIRED_KB=$((${PCT_DISK_SIZE:-8} * 1024 * 1024))
if [ "$STORAGE_FREE" -lt "$REQUIRED_KB" ]; then
msg_error "Not enough space on '$CONTAINER_STORAGE'. Needed: ${PCT_DISK_SIZE:-8}G."
exit 214
fi
# Check Cluster Quorum if in Cluster
if [ -f /etc/pve/corosync.conf ]; then
msg_info "Checking cluster quorum"
if ! pvecm status | awk -F':' '/^Quorate/ { exit ($2 ~ /Yes/) ? 0 : 1 }'; then
msg_error "Cluster is not quorate. Start all nodes or configure quorum device (QDevice)."
exit 210
fi
msg_ok "Cluster is quorate"
fi
# Update LXC template list
TEMPLATE_SEARCH="${PCT_OSTYPE}-${PCT_OSVERSION:-}"
case "$PCT_OSTYPE" in
debian | ubuntu)
TEMPLATE_PATTERN="-standard_"
;;
alpine | fedora | rocky | centos)
TEMPLATE_PATTERN="-default_"
;;
*)
TEMPLATE_PATTERN=""
;;
esac
# 1. Check local templates first
msg_info "Searching for template '$TEMPLATE_SEARCH'"
mapfile -t TEMPLATES < <(
pveam list "$TEMPLATE_STORAGE" |
awk -v s="$TEMPLATE_SEARCH" -v p="$TEMPLATE_PATTERN" '$1 ~ s && $1 ~ p {print $1}' |
sed 's/.*\///' | sort -t - -k 2 -V
)
if [ ${#TEMPLATES[@]} -gt 0 ]; then
TEMPLATE_SOURCE="local"
else
msg_info "No local template found, checking online repository"
pveam update >/dev/null 2>&1
mapfile -t TEMPLATES < <(
pveam update >/dev/null 2>&1 &&
pveam available -section system |
sed -n "s/.*\($TEMPLATE_SEARCH.*$TEMPLATE_PATTERN.*\)/\1/p" |
sort -t - -k 2 -V
)
TEMPLATE_SOURCE="online"
fi
TEMPLATE="${TEMPLATES[-1]}"
TEMPLATE_PATH="$(pvesm path $TEMPLATE_STORAGE:vztmpl/$TEMPLATE 2>/dev/null ||
echo "/var/lib/vz/template/cache/$TEMPLATE")"
msg_ok "Template ${BL}$TEMPLATE${CL} [$TEMPLATE_SOURCE]"
# 4. Validate template (exists & not corrupted)
TEMPLATE_VALID=1
if [ ! -s "$TEMPLATE_PATH" ]; then
TEMPLATE_VALID=0
elif ! tar --use-compress-program=zstdcat -tf "$TEMPLATE_PATH" >/dev/null 2>&1; then
TEMPLATE_VALID=0
fi
if [ "$TEMPLATE_VALID" -eq 0 ]; then
msg_warn "Template $TEMPLATE is missing or corrupted. Re-downloading."
[[ -f "$TEMPLATE_PATH" ]] && rm -f "$TEMPLATE_PATH"
for attempt in {1..3}; do
msg_info "Attempt $attempt: Downloading LXC template..."
if pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >/dev/null 2>&1; then
msg_ok "Template download successful."
break
fi
if [ $attempt -eq 3 ]; then
msg_error "Failed after 3 attempts. Please check network access or manually run:\n pveam download $TEMPLATE_STORAGE $TEMPLATE"
exit 208
fi
sleep $((attempt * 5))
done
fi
msg_info "Creating LXC Container"
# Check and fix subuid/subgid
grep -q "root:100000:65536" /etc/subuid || echo "root:100000:65536" >>/etc/subuid
grep -q "root:100000:65536" /etc/subgid || echo "root:100000:65536" >>/etc/subgid
# Combine all options
PCT_OPTIONS=(${PCT_OPTIONS[@]:-${DEFAULT_PCT_OPTIONS[@]}})
[[ " ${PCT_OPTIONS[@]} " =~ " -rootfs " ]] || PCT_OPTIONS+=(-rootfs "$CONTAINER_STORAGE:${PCT_DISK_SIZE:-8}")
# Secure creation of the LXC container with lock and template check
lockfile="/tmp/template.${TEMPLATE}.lock"
exec 9>"$lockfile" || {
msg_error "Failed to create lock file '$lockfile'."
exit 200
}
flock -w 60 9 || {
msg_error "Timeout while waiting for template lock"
exit 211
}
if ! pct create "$CTID" "${TEMPLATE_STORAGE}:vztmpl/${TEMPLATE}" "${PCT_OPTIONS[@]}" &>/dev/null; then
msg_error "Container creation failed. Checking if template is corrupted or incomplete."
if [[ ! -s "$TEMPLATE_PATH" || "$(stat -c%s "$TEMPLATE_PATH")" -lt 1000000 ]]; then
msg_error "Template file too small or missing re-downloading."
rm -f "$TEMPLATE_PATH"
elif ! zstdcat "$TEMPLATE_PATH" | tar -tf - &>/dev/null; then
msg_error "Template appears to be corrupted re-downloading."
rm -f "$TEMPLATE_PATH"
else
msg_error "Template is valid, but container creation still failed."
exit 209
fi
# Retry download
for attempt in {1..3}; do
msg_info "Attempt $attempt: Re-downloading template..."
if timeout 120 pveam download "$TEMPLATE_STORAGE" "$TEMPLATE" >/dev/null; then
msg_ok "Template re-download successful."
break
fi
if [ "$attempt" -eq 3 ]; then
msg_error "Three failed attempts. Aborting."
exit 208
fi
sleep $((attempt * 5))
done
sleep 1 # I/O-Sync-Delay
msg_ok "Re-downloaded LXC Template"
fi
if ! pct list | awk '{print $1}' | grep -qx "$CTID"; then
msg_error "Container ID $CTID not listed in 'pct list' unexpected failure."
exit 215
fi
if ! grep -q '^rootfs:' "/etc/pve/lxc/$CTID.conf"; then
msg_error "RootFS entry missing in container config storage not correctly assigned."
exit 216
fi
if grep -q '^hostname:' "/etc/pve/lxc/$CTID.conf"; then
CT_HOSTNAME=$(grep '^hostname:' "/etc/pve/lxc/$CTID.conf" | awk '{print $2}')
if [[ ! "$CT_HOSTNAME" =~ ^[a-z0-9-]+$ ]]; then
msg_warn "Hostname '$CT_HOSTNAME' contains invalid characters may cause issues with networking or DNS."
fi
fi
msg_ok "LXC Container ${BL}$CTID${CL} ${GN}was successfully created."

View File

@@ -1,48 +1,79 @@
# Copyright (c) 2021-2025 michelroegl-brunner
# Author: michelroegl-brunner
# License: MIT
# https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Copyright (c) 2021-2025 community-scripts ORG
# Author: tteck (tteckster)
# Co-Author: MickLesk
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# ==============================================================================
# INSTALL.FUNC - CONTAINER INSTALLATION & SETUP
# ==============================================================================
#
# This file provides installation functions executed inside LXC containers
# after creation. Handles:
#
# - Network connectivity verification (IPv4/IPv6)
# - OS updates and package installation
# - DNS resolution checks
# - MOTD and SSH configuration
# - Container customization and auto-login
#
# Usage:
# - Sourced by <app>-install.sh scripts
# - Executes via pct exec inside container
# - Requires internet connectivity
#
# ==============================================================================
# ==============================================================================
# SECTION 1: INITIALIZATION
# ==============================================================================
if ! command -v curl >/dev/null 2>&1; then
printf "\r\e[2K%b" '\033[93m Setup Source \033[m' >&2
apt-get update >/dev/null 2>&1
apt-get install -y curl >/dev/null 2>&1
apt update >/dev/null 2>&1
apt install -y curl >/dev/null 2>&1
fi
# core.func is included in FUNCTIONS_FILE_PATH
source "$(dirname "${BASH_SOURCE[0]}")/core.func"
source "$(dirname "${BASH_SOURCE[0]}")/error-handler.func"
load_functions
# This function enables IPv6 if it's not disabled and sets verbose mode
catch_errors
# ==============================================================================
# SECTION 2: NETWORK & CONNECTIVITY
# ==============================================================================
# ------------------------------------------------------------------------------
# verb_ip6()
#
# - Configures IPv6 based on DISABLEIPV6 variable
# - If DISABLEIPV6=yes: disables IPv6 via sysctl
# - Sets verbose mode via set_std_mode()
# ------------------------------------------------------------------------------
verb_ip6() {
set_std_mode # Set STD mode based on VERBOSE
if [ "$DISABLEIPV6" == "yes" ]; then
echo "net.ipv6.conf.all.disable_ipv6 = 1" >>/etc/sysctl.conf
$STD sysctl -p
if [ "${IPV6_METHOD:-}" = "disable" ]; then
msg_info "Disabling IPv6 (this may affect some services)"
mkdir -p /etc/sysctl.d
$STD tee /etc/sysctl.d/99-disable-ipv6.conf >/dev/null <<EOF
# Disable IPv6 (set by community-scripts)
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
EOF
$STD sysctl -p /etc/sysctl.d/99-disable-ipv6.conf
msg_ok "Disabled IPv6"
fi
}
# This function sets error handling options and defines the error_handler function to handle errors
catch_errors() {
set -Eeuo pipefail
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
}
# This function handles errors
error_handler() {
printf "\e[?25h"
local exit_code="$?"
local line_number="$1"
local command="$2"
local error_message="${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}"
echo -e "\n$error_message"
if [[ "$line_number" -eq 51 ]]; then
echo -e "The silent function has suppressed the error, run the script with verbose mode enabled, which will provide more detailed output.\n"
post_update_to_api "failed" "No error message, script ran in silent mode"
else
post_update_to_api "failed" "${command}"
fi
}
# This function sets up the Container OS by generating the locale, setting the timezone, and checking the network connection
# ------------------------------------------------------------------------------
# setting_up_container()
#
# - Verifies network connectivity via hostname -I
# - Retries up to RETRY_NUM times with RETRY_EVERY seconds delay
# - Removes Python EXTERNALLY-MANAGED restrictions
# - Disables systemd-networkd-wait-online.service for faster boot
# - Exits with error if network unavailable after retries
# ------------------------------------------------------------------------------
setting_up_container() {
msg_info "Setting up Container OS"
for ((i = RETRY_NUM; i > 0; i--)); do
@@ -64,8 +95,17 @@ setting_up_container() {
msg_ok "Network Connected: ${BL}$(hostname -I)"
}
# This function checks the network connection by pinging a known IP address and prompts the user to continue if the internet is not connected
# This function checks the network connection by pinging a known IP address and prompts the user to continue if the internet is not connected
# ------------------------------------------------------------------------------
# network_check()
#
# - Comprehensive network connectivity check for IPv4 and IPv6
# - Tests connectivity to multiple DNS servers:
# * IPv4: 1.1.1.1 (Cloudflare), 8.8.8.8 (Google), 9.9.9.9 (Quad9)
# * IPv6: 2606:4700:4700::1111, 2001:4860:4860::8888, 2620:fe::fe
# - Verifies DNS resolution for GitHub and Community-Scripts domains
# - Prompts user to continue if no internet detected
# - Uses fatal() on DNS resolution failure for critical hosts
# ------------------------------------------------------------------------------
network_check() {
set +e
trap - ERR
@@ -125,7 +165,19 @@ network_check() {
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
}
# This function updates the Container OS by running apt-get update and upgrade
# ==============================================================================
# SECTION 3: OS UPDATE & PACKAGE MANAGEMENT
# ==============================================================================
# ------------------------------------------------------------------------------
# update_os()
#
# - Updates container OS via apt-get update and dist-upgrade
# - Configures APT cacher proxy if CACHER=yes (accelerates package downloads)
# - Removes Python EXTERNALLY-MANAGED restrictions for pip
# - Sources tools.func for additional setup functions after update
# - Uses $STD wrapper to suppress output unless VERBOSE=yes
# ------------------------------------------------------------------------------
update_os() {
msg_info "Updating Container OS"
if [[ "$CACHER" == "yes" ]]; then
@@ -145,10 +197,27 @@ EOF
rm -rf /usr/lib/python3.*/EXTERNALLY-MANAGED
msg_ok "Updated Container OS"
# tools.func is included in FUNCTIONS_FILE_PATH
source "$(dirname "${BASH_SOURCE[0]}")/tools.func"
}
# This function modifies the message of the day (motd) and SSH settings
# ==============================================================================
# SECTION 4: MOTD & SSH CONFIGURATION
# ==============================================================================
# ------------------------------------------------------------------------------
# motd_ssh()
#
# - Configures Message of the Day (MOTD) with container information
# - Creates /etc/profile.d/00_lxc-details.sh with:
# * Application name
# * Warning banner (DEV repository)
# * OS name and version
# * Hostname and IP address
# * GitHub repository link
# - Disables executable flag on /etc/update-motd.d/* scripts
# - Enables root SSH access if SSH_ROOT=yes
# - Configures TERM environment variable for better terminal support
# ------------------------------------------------------------------------------
motd_ssh() {
# Set terminal to 256-color mode
grep -qxF "export TERM='xterm-256color'" /root/.bashrc || echo "export TERM='xterm-256color'" >>/root/.bashrc
@@ -180,7 +249,19 @@ motd_ssh() {
fi
}
# This function customizes the container by modifying the getty service and enabling auto-login for the root user
# ==============================================================================
# SECTION 5: CONTAINER CUSTOMIZATION
# ==============================================================================
# ------------------------------------------------------------------------------
# customize()
#
# - Customizes container for passwordless root login if PASSWORD is empty
# - Configures getty for auto-login via /etc/systemd/system/container-getty@1.service.d/override.conf
# - Creates /usr/bin/update script for easy application updates
# - Injects SSH authorized keys if SSH_AUTHORIZED_KEY variable is set
# - Sets proper permissions on SSH directories and key files
# ------------------------------------------------------------------------------
customize() {
if [[ "$PASSWORD" == "" ]]; then
msg_info "Customizing Container"

View File

@@ -72,23 +72,19 @@ stop_all_services() {
local service_patterns=("$@")
for pattern in "${service_patterns[@]}"; do
# Find all matching services (use || true to avoid pipeline failures)
local services
services=$(systemctl list-units --type=service --all 2>/dev/null |
grep -oE "${pattern}[^ ]*\.service" 2>/dev/null |
sort -u 2>/dev/null || true)
# Find all matching services
systemctl list-units --type=service --all 2>/dev/null |
grep -oE "${pattern}[^ ]*\.service" |
sort -u |
while read -r service; do
# Only process if we found any services
if [[ -n "$services" ]]; then
while IFS= read -r service; do
[[ -z "$service" ]] && continue
$STD systemctl stop "$service" 2>/dev/null || true
$STD systemctl disable "$service" 2>/dev/null || true
done <<<"$services"
fi
done
done
return 0
}
# ------------------------------------------------------------------------------
@@ -1214,7 +1210,7 @@ setup_deb822_repo() {
local gpg_url="$2"
local repo_url="$3"
local suite="$4"
local component="${5-main}"
local component="${5:-main}"
local architectures="${6-}" # optional
# Validate required parameters
@@ -1535,9 +1531,10 @@ check_for_gh_release() {
# ------------------------------------------------------------------------------
create_self_signed_cert() {
local APP_NAME="${1:-${APPLICATION}}"
local CERT_DIR="/etc/ssl/${APP_NAME}"
local CERT_KEY="${CERT_DIR}/${APP_NAME}.key"
local CERT_CRT="${CERT_DIR}/${APP_NAME}.crt"
local APP_NAME_LC=$(echo "${APP_NAME,,}" | tr -d ' ')
local CERT_DIR="/etc/ssl/${APP_NAME_LC}"
local CERT_KEY="${CERT_DIR}/${APP_NAME_LC}.key"
local CERT_CRT="${CERT_DIR}/${APP_NAME_LC}.crt"
if [[ -f "$CERT_CRT" && -f "$CERT_KEY" ]]; then
return 0
@@ -1551,7 +1548,8 @@ create_self_signed_cert() {
mkdir -p "$CERT_DIR"
$STD openssl req -new -newkey rsa:2048 -days 365 -nodes -x509 \
-subj "/C=US/ST=State/L=City/O=Organization/CN=${APP_NAME}" \
-subj "/CN=${APP_NAME}" \
-addext "subjectAltName=DNS:${APP_NAME}" \
-keyout "$CERT_KEY" \
-out "$CERT_CRT" || {
msg_error "Failed to create self-signed certificate"
@@ -2786,12 +2784,19 @@ function setup_java() {
INSTALLED_VERSION=$(dpkg -l 2>/dev/null | awk '/temurin-.*-jdk/{print $2}' | grep -oP 'temurin-\K[0-9]+' | head -n1 || echo "")
fi
# Validate INSTALLED_VERSION is not empty if matched
# Validate INSTALLED_VERSION is not empty if JDK package found
local JDK_COUNT=0
JDK_COUNT=$(dpkg -l 2>/dev/null | grep -c "temurin-.*-jdk")
JDK_COUNT=$(dpkg -l 2>/dev/null | grep -c "temurin-.*-jdk" || true)
if [[ -z "$INSTALLED_VERSION" && "${JDK_COUNT:-0}" -gt 0 ]]; then
msg_warn "Found Temurin JDK but cannot determine version"
INSTALLED_VERSION="0"
msg_warn "Found Temurin JDK but cannot determine version - attempting reinstall"
# Try to get actual package name for purge
local OLD_PACKAGE
OLD_PACKAGE=$(dpkg -l 2>/dev/null | awk '/temurin-.*-jdk/{print $2}' | head -n1 || echo "")
if [[ -n "$OLD_PACKAGE" ]]; then
msg_info "Removing existing package: $OLD_PACKAGE"
$STD apt purge -y "$OLD_PACKAGE" || true
fi
INSTALLED_VERSION="" # Reset to trigger fresh install
fi
# Scenario 1: Already at correct version
@@ -2939,9 +2944,16 @@ setup_mariadb() {
# Resolve "latest" to actual version
if [[ "$MARIADB_VERSION" == "latest" ]]; then
if ! curl -fsI --max-time 10 http://mirror.mariadb.org/repo/ >/dev/null 2>&1; then
msg_warn "MariaDB mirror not reachable - trying cached package list fallback"
# Fallback: try to use a known stable version
MARIADB_VERSION="12.0"
msg_warn "MariaDB mirror not reachable - trying mariadb_repo_setup fallback"
# Try using official mariadb_repo_setup script as fallback
if curl -fsSL --max-time 15 https://r.mariadb.com/downloads/mariadb_repo_setup 2>/dev/null | bash -s -- --skip-verify >/dev/null 2>&1; then
msg_ok "MariaDB repository configured via mariadb_repo_setup"
# Extract version from configured repo
MARIADB_VERSION=$(grep -oP 'repo/\K[0-9]+\.[0-9]+\.[0-9]+' /etc/apt/sources.list.d/mariadb.list 2>/dev/null | head -n1 || echo "12.2")
else
msg_warn "mariadb_repo_setup failed - using hardcoded fallback version"
MARIADB_VERSION="12.2"
fi
else
MARIADB_VERSION=$(curl -fsSL --max-time 15 http://mirror.mariadb.org/repo/ 2>/dev/null |
grep -Eo '[0-9]+\.[0-9]+\.[0-9]+/' |
@@ -2951,8 +2963,14 @@ setup_mariadb() {
head -n1 || echo "")
if [[ -z "$MARIADB_VERSION" ]]; then
msg_warn "Could not parse latest GA MariaDB version from mirror - using fallback"
MARIADB_VERSION="12.0"
msg_warn "Could not parse latest GA MariaDB version from mirror - trying mariadb_repo_setup"
if curl -fsSL --max-time 15 https://r.mariadb.com/downloads/mariadb_repo_setup 2>/dev/null | bash -s -- --skip-verify >/dev/null 2>&1; then
msg_ok "MariaDB repository configured via mariadb_repo_setup"
MARIADB_VERSION=$(grep -oP 'repo/\K[0-9]+\.[0-9]+\.[0-9]+' /etc/apt/sources.list.d/mariadb.list 2>/dev/null | head -n1 || echo "12.2")
else
msg_warn "mariadb_repo_setup failed - using hardcoded fallback version"
MARIADB_VERSION="12.2"
fi
fi
fi
fi
@@ -3117,7 +3135,8 @@ function setup_mariadb_db() {
$STD mariadb -u root -e "FLUSH PRIVILEGES;"
local CREDS_FILE="${MARIADB_DB_CREDS_FILE:-${HOME}/${APPLICATION}.creds}"
local app_name="${APPLICATION,,}"
local CREDS_FILE="${MARIADB_DB_CREDS_FILE:-${HOME}/${app_name}.creds}"
{
echo "MariaDB Credentials"
echo "Database: $MARIADB_DB_NAME"
@@ -3226,7 +3245,6 @@ function setup_mongodb() {
return 1
}
# Verify MongoDB was installed correctly
if ! command -v mongod >/dev/null 2>&1; then
msg_error "MongoDB binary not found after installation"
return 1
@@ -3402,12 +3420,12 @@ EOF
# - Optionally installs or updates global npm modules
#
# Variables:
# NODE_VERSION - Node.js version to install (default: 22)
# NODE_VERSION - Node.js version to install (default: 24 LTS)
# NODE_MODULE - Comma-separated list of global modules (e.g. "yarn,@vue/cli@5.0.0")
# ------------------------------------------------------------------------------
function setup_nodejs() {
local NODE_VERSION="${NODE_VERSION:-22}"
local NODE_VERSION="${NODE_VERSION:-24}"
local NODE_MODULE="${NODE_MODULE:-}"
# ALWAYS clean up legacy installations first (nvm, etc.) to prevent conflicts
@@ -3469,14 +3487,11 @@ function setup_nodejs() {
return 1
}
# CRITICAL: Force APT cache refresh AFTER repository setup
# This ensures NodeSource is the only nodejs source in APT cache
# Force APT cache refresh after repository setup
$STD apt update
# Install dependencies (NodeSource is now the only nodejs source)
ensure_dependencies curl ca-certificates gnupg
# Install Node.js from NodeSource
install_packages_with_retry "nodejs" || {
msg_error "Failed to install Node.js ${NODE_VERSION} from NodeSource"
return 1
@@ -3627,59 +3642,57 @@ function setup_php() {
local CURRENT_PHP=""
CURRENT_PHP=$(is_tool_installed "php" 2>/dev/null) || true
# Scenario 1: Already at target version - just update packages
if [[ -n "$CURRENT_PHP" && "$CURRENT_PHP" == "$PHP_VERSION" ]]; then
msg_info "Update PHP $PHP_VERSION"
# Ensure Sury repo is available
if [[ ! -f /etc/apt/sources.list.d/php.sources ]]; then
manage_tool_repository "php" "$PHP_VERSION" "" "https://packages.sury.org/debsuryorg-archive-keyring.deb" || {
msg_error "Failed to setup PHP repository"
return 1
}
fi
ensure_apt_working || return 1
# Perform upgrade with retry logic (non-fatal if fails)
upgrade_packages_with_retry "php${PHP_VERSION}" || true
cache_installed_version "php" "$PHP_VERSION"
msg_ok "Update PHP $PHP_VERSION"
else
# Scenario 2: Different version installed - clean upgrade
if [[ -n "$CURRENT_PHP" && "$CURRENT_PHP" != "$PHP_VERSION" ]]; then
msg_info "Upgrade PHP from $CURRENT_PHP to $PHP_VERSION"
# Stop and disable ALL PHP-FPM versions
stop_all_services "php.*-fpm"
remove_old_tool_version "php"
else
msg_info "Setup PHP $PHP_VERSION"
fi
# Prepare repository (cleanup + validation)
prepare_repository_setup "php" "deb.sury.org-php" || {
msg_error "Failed to prepare PHP repository"
return 1
}
# Setup Sury repository
manage_tool_repository "php" "$PHP_VERSION" "" "https://packages.sury.org/debsuryorg-archive-keyring.deb" || {
msg_error "Failed to setup PHP repository"
return 1
}
ensure_apt_working || return 1
# Remove conflicting PHP version before pinning
if [[ -n "$CURRENT_PHP" && "$CURRENT_PHP" != "$PHP_VERSION" ]]; then
msg_info "Removing conflicting PHP ${CURRENT_PHP} (need ${PHP_VERSION})"
stop_all_services "php.*-fpm"
$STD apt purge -y "php*" 2>/dev/null || true
$STD apt autoremove -y 2>/dev/null || true
fi
# Build module list
# NOW create pinning for the desired version
mkdir -p /etc/apt/preferences.d
cat <<EOF >/etc/apt/preferences.d/php-pin
Package: php${PHP_VERSION}*
Pin: version ${PHP_VERSION}.*
Pin-Priority: 1001
Package: php[0-9].*
Pin: release o=packages.sury.org-php
Pin-Priority: -1
EOF
# Setup repository
prepare_repository_setup "php" "deb.sury.org-php" || {
msg_error "Failed to prepare PHP repository"
return 1
}
manage_tool_repository "php" "$PHP_VERSION" "" "https://packages.sury.org/debsuryorg-archive-keyring.deb" || {
msg_error "Failed to setup PHP repository"
return 1
}
ensure_apt_working || return 1
$STD apt update
# Get available PHP version from repository
local AVAILABLE_PHP_VERSION=""
AVAILABLE_PHP_VERSION=$(apt-cache show "php${PHP_VERSION}" 2>/dev/null | grep -m1 "^Version:" | awk '{print $2}' | cut -d- -f1) || true
if [[ -z "$AVAILABLE_PHP_VERSION" ]]; then
msg_error "PHP ${PHP_VERSION} not found in configured repositories"
return 1
fi
# Build module list - without version pinning (preferences.d handles it)
local MODULE_LIST="php${PHP_VERSION}"
IFS=',' read -ra MODULES <<<"$COMBINED_MODULES"
for mod in "${MODULES[@]}"; do
if apt-cache show "php${PHP_VERSION}-${mod}" >/dev/null 2>&1; then
MODULE_LIST+=" php${PHP_VERSION}-${mod}"
fi
MODULE_LIST+=" php${PHP_VERSION}-${mod}"
done
if [[ "$PHP_FPM" == "YES" ]]; then
MODULE_LIST+=" php${PHP_VERSION}-fpm"
fi
@@ -3687,18 +3700,52 @@ function setup_php() {
# install apache2 with PHP support if requested
if [[ "$PHP_APACHE" == "YES" ]]; then
if ! dpkg -l 2>/dev/null | grep -q "libapache2-mod-php${PHP_VERSION}"; then
install_packages_with_retry "apache2" "libapache2-mod-php${PHP_VERSION}" || {
msg_error "Failed to install Apache with PHP module"
msg_info "Installing Apache with PHP ${PHP_VERSION} module"
install_packages_with_retry "apache2" || {
msg_error "Failed to install Apache"
return 1
}
install_packages_with_retry "libapache2-mod-php${PHP_VERSION}" || {
msg_warn "Failed to install libapache2-mod-php${PHP_VERSION}, continuing without Apache module"
}
fi
fi
# Install PHP packages with retry logic
install_packages_with_retry $MODULE_LIST || {
msg_error "Failed to install PHP packages"
return 1
}
# Install PHP packages (pinning via preferences.d ensures correct version)
msg_info "Installing PHP ${PHP_VERSION} packages"
if ! install_packages_with_retry $MODULE_LIST; then
msg_warn "Failed to install PHP packages, attempting individual installation"
# Install main package first (critical)
install_packages_with_retry "php${PHP_VERSION}" || {
msg_error "Failed to install php${PHP_VERSION}"
return 1
}
# Try to install Apache module individually if requested
if [[ "$PHP_APACHE" == "YES" ]]; then
install_packages_with_retry "libapache2-mod-php${PHP_VERSION}" || {
msg_warn "Could not install libapache2-mod-php${PHP_VERSION}"
}
fi
# Try to install modules individually - skip those that don't exist
for pkg in "${MODULES[@]}"; do
if apt-cache search "^php${PHP_VERSION}-${pkg}\$" 2>/dev/null | grep -q "^php${PHP_VERSION}-${pkg}"; then
install_packages_with_retry "php${PHP_VERSION}-${pkg}" || {
msg_warn "Could not install php${PHP_VERSION}-${pkg}"
}
fi
done
if [[ "$PHP_FPM" == "YES" ]]; then
if apt-cache search "^php${PHP_VERSION}-fpm\$" 2>/dev/null | grep -q "^php${PHP_VERSION}-fpm"; then
install_packages_with_retry "php${PHP_VERSION}-fpm" || {
msg_warn "Could not install php${PHP_VERSION}-fpm"
}
fi
fi
fi
cache_installed_version "php" "$PHP_VERSION"
# Patch all relevant php.ini files
@@ -3734,7 +3781,23 @@ function setup_php() {
fi
fi
msg_ok "Setup PHP $PHP_VERSION"
# Verify PHP installation - critical check
if ! command -v php >/dev/null 2>&1; then
msg_error "PHP installation verification failed - php command not found"
return 1
fi
local INSTALLED_VERSION=$(php -v 2>/dev/null | awk '/^PHP/{print $2}' | cut -d. -f1,2)
if [[ "$INSTALLED_VERSION" != "$PHP_VERSION" ]]; then
msg_error "PHP version mismatch: requested ${PHP_VERSION} but got ${INSTALLED_VERSION}"
msg_error "This indicates a critical package installation issue"
# Don't cache wrong version
return 1
fi
cache_installed_version "php" "$INSTALLED_VERSION"
msg_ok "Setup PHP ${INSTALLED_VERSION}"
}
# ------------------------------------------------------------------------------
@@ -3805,11 +3868,14 @@ function setup_postgresql() {
local SUITE
case "$DISTRO_CODENAME" in
trixie | forky | sid)
if verify_repo_available "https://apt.postgresql.org/pub/repos/apt" "trixie-pgdg"; then
SUITE="trixie-pgdg"
else
SUITE="bookworm-pgdg"
fi
;;
*)
SUITE=$(get_fallback_suite "$DISTRO_ID" "$DISTRO_CODENAME" "https://apt.postgresql.org/pub/repos/apt")
@@ -3972,7 +4038,8 @@ function setup_postgresql_db() {
fi
# Save credentials
local CREDS_FILE="${PG_DB_CREDS_FILE:-${HOME}/${APPLICATION}.creds}"
local app_name="${APPLICATION,,}"
local CREDS_FILE="${PG_DB_CREDS_FILE:-${HOME}/${app_name}.creds}"
{
echo "PostgreSQL Credentials"
echo "Database: $PG_DB_NAME"
@@ -4374,7 +4441,7 @@ function setup_rust() {
}
# Update to latest patch version
$STD rustup update "$RUST_TOOLCHAIN" || true
$STD rustup update "$RUST_TOOLCHAIN" </dev/null || true
# Ensure PATH is updated for current shell session
export PATH="$CARGO_BIN:$PATH"
@@ -4702,3 +4769,214 @@ function setup_yq() {
cache_installed_version "yq" "$FINAL_VERSION"
msg_ok "Setup yq $FINAL_VERSION"
}
# ------------------------------------------------------------------------------
# Docker Engine Installation and Management (All-In-One)
#
# Description:
# - Detects and migrates old Docker installations
# - Installs/Updates Docker Engine via official repository
# - Optional: Installs/Updates Portainer CE
# - Updates running containers interactively
# - Cleans up legacy repository files
#
# Usage:
# setup_docker
# DOCKER_PORTAINER="true" setup_docker
# DOCKER_LOG_DRIVER="json-file" setup_docker
#
# Variables:
# DOCKER_PORTAINER - Install Portainer CE (optional, "true" to enable)
# DOCKER_LOG_DRIVER - Log driver (optional, default: "journald")
# DOCKER_SKIP_UPDATES - Skip container update check (optional, "true" to skip)
#
# Features:
# - Migrates from get.docker.com to repository-based installation
# - Updates Docker Engine if newer version available
# - Interactive container update with multi-select
# - Portainer installation and update support
# ------------------------------------------------------------------------------
function setup_docker() {
local docker_installed=false
local portainer_installed=false
# Check if Docker is already installed
if command -v docker &>/dev/null; then
docker_installed=true
DOCKER_CURRENT_VERSION=$(docker --version | grep -oP '\d+\.\d+\.\d+' | head -1)
msg_info "Docker $DOCKER_CURRENT_VERSION detected"
fi
# Check if Portainer is running
if docker ps --format '{{.Names}}' 2>/dev/null | grep -q '^portainer$'; then
portainer_installed=true
msg_info "Portainer container detected"
fi
# Cleanup old repository configurations
if [ -f /etc/apt/sources.list.d/docker.list ]; then
msg_info "Migrating from old Docker repository format"
rm -f /etc/apt/sources.list.d/docker.list
rm -f /etc/apt/keyrings/docker.asc
fi
# Setup/Update Docker repository
msg_info "Setting up Docker Repository"
setup_deb822_repo \
"docker" \
"https://download.docker.com/linux/$(get_os_info id)/gpg" \
"https://download.docker.com/linux/$(get_os_info id)" \
"$(get_os_info codename)" \
"stable" \
"$(dpkg --print-architecture)"
# Install or upgrade Docker
if [ "$docker_installed" = true ]; then
msg_info "Checking for Docker updates"
DOCKER_LATEST_VERSION=$(apt-cache policy docker-ce | grep Candidate | awk '{print $2}' | cut -d':' -f2 | cut -d'-' -f1)
if [ "$DOCKER_CURRENT_VERSION" != "$DOCKER_LATEST_VERSION" ]; then
msg_info "Updating Docker $DOCKER_CURRENT_VERSION → $DOCKER_LATEST_VERSION"
$STD apt install -y --only-upgrade \
docker-ce \
docker-ce-cli \
containerd.io \
docker-buildx-plugin \
docker-compose-plugin
msg_ok "Updated Docker to $DOCKER_LATEST_VERSION"
else
msg_ok "Docker is up-to-date ($DOCKER_CURRENT_VERSION)"
fi
else
msg_info "Installing Docker"
$STD apt install -y \
docker-ce \
docker-ce-cli \
containerd.io \
docker-buildx-plugin \
docker-compose-plugin
DOCKER_CURRENT_VERSION=$(docker --version | grep -oP '\d+\.\d+\.\d+' | head -1)
msg_ok "Installed Docker $DOCKER_CURRENT_VERSION"
fi
# Configure daemon.json
local log_driver="${DOCKER_LOG_DRIVER:-journald}"
mkdir -p /etc/docker
if [ ! -f /etc/docker/daemon.json ]; then
cat <<EOF >/etc/docker/daemon.json
{
"log-driver": "$log_driver"
}
EOF
fi
# Enable and start Docker
systemctl enable -q --now docker
# Portainer Management
if [[ "${DOCKER_PORTAINER:-}" == "true" ]]; then
if [ "$portainer_installed" = true ]; then
msg_info "Checking for Portainer updates"
PORTAINER_CURRENT=$(docker inspect portainer --format='{{.Config.Image}}' 2>/dev/null | cut -d':' -f2)
PORTAINER_LATEST=$(curl -fsSL https://registry.hub.docker.com/v2/repositories/portainer/portainer-ce/tags?page_size=100 | grep -oP '"name":"\K[0-9]+\.[0-9]+\.[0-9]+"' | head -1 | tr -d '"')
if [ "$PORTAINER_CURRENT" != "$PORTAINER_LATEST" ]; then
read -r -p "${TAB3}Update Portainer $PORTAINER_CURRENT → $PORTAINER_LATEST? <y/N> " prompt
if [[ ${prompt,,} =~ ^(y|yes)$ ]]; then
msg_info "Updating Portainer"
docker stop portainer
docker rm portainer
docker pull portainer/portainer-ce:latest
docker run -d \
-p 9000:9000 \
-p 9443:9443 \
--name=portainer \
--restart=always \
-v /var/run/docker.sock:/var/run/docker.sock \
-v portainer_data:/data \
portainer/portainer-ce:latest
msg_ok "Updated Portainer to $PORTAINER_LATEST"
fi
else
msg_ok "Portainer is up-to-date ($PORTAINER_CURRENT)"
fi
else
msg_info "Installing Portainer"
docker volume create portainer_data
docker run -d \
-p 9000:9000 \
-p 9443:9443 \
--name=portainer \
--restart=always \
-v /var/run/docker.sock:/var/run/docker.sock \
-v portainer_data:/data \
portainer/portainer-ce:latest
LOCAL_IP=$(hostname -I | awk '{print $1}')
msg_ok "Installed Portainer (http://${LOCAL_IP}:9000)"
fi
fi
# Interactive Container Update Check
if [[ "${DOCKER_SKIP_UPDATES:-}" != "true" ]] && [ "$docker_installed" = true ]; then
msg_info "Checking for container updates"
# Get list of running containers with update status
local containers_with_updates=()
local container_info=()
local index=1
while IFS= read -r container; do
local name=$(echo "$container" | awk '{print $1}')
local image=$(echo "$container" | awk '{print $2}')
local current_digest=$(docker inspect "$name" --format='{{.Image}}' 2>/dev/null | cut -d':' -f2 | cut -c1-12)
# Pull latest image digest
docker pull "$image" >/dev/null 2>&1
local latest_digest=$(docker inspect "$image" --format='{{.Id}}' 2>/dev/null | cut -d':' -f2 | cut -c1-12)
if [ "$current_digest" != "$latest_digest" ]; then
containers_with_updates+=("$name")
container_info+=("${index}) ${name} (${image})")
((index++))
fi
done < <(docker ps --format '{{.Names}} {{.Image}}')
if [ ${#containers_with_updates[@]} -gt 0 ]; then
echo ""
echo "${TAB3}Container updates available:"
for info in "${container_info[@]}"; do
echo "${TAB3} $info"
done
echo ""
read -r -p "${TAB3}Select containers to update (e.g., 1,3,5 or 'all' or 'none'): " selection
if [[ ${selection,,} == "all" ]]; then
for container in "${containers_with_updates[@]}"; do
msg_info "Updating container: $container"
docker stop "$container"
docker rm "$container"
# Note: This requires the original docker run command - best to recreate via compose
msg_ok "Stopped and removed $container (please recreate with updated image)"
done
elif [[ ${selection,,} != "none" ]]; then
IFS=',' read -ra SELECTED <<<"$selection"
for num in "${SELECTED[@]}"; do
num=$(echo "$num" | xargs) # trim whitespace
if [[ "$num" =~ ^[0-9]+$ ]] && [ "$num" -ge 1 ] && [ "$num" -le "${#containers_with_updates[@]}" ]; then
container="${containers_with_updates[$((num - 1))]}"
msg_info "Updating container: $container"
docker stop "$container"
docker rm "$container"
msg_ok "Stopped and removed $container (please recreate with updated image)"
fi
done
fi
else
msg_ok "All containers are up-to-date"
fi
fi
msg_ok "Docker setup completed"
}

426
server.js
View File

@@ -75,9 +75,13 @@ const handle = app.getRequestHandler();
* @property {boolean} [isUpdate]
* @property {boolean} [isShell]
* @property {boolean} [isBackup]
* @property {boolean} [isClone]
* @property {string} [containerId]
* @property {string} [storage]
* @property {string} [backupStorage]
* @property {number} [cloneCount]
* @property {string[]} [hostnames]
* @property {'lxc'|'vm'} [containerType]
*/
class ScriptExecutionHandler {
@@ -295,12 +299,14 @@ class ScriptExecutionHandler {
* @param {WebSocketMessage} message
*/
async handleMessage(ws, message) {
const { action, scriptPath, executionId, input, mode, server, isUpdate, isShell, isBackup, containerId, storage, backupStorage } = message;
const { action, scriptPath, executionId, input, mode, server, isUpdate, isShell, isBackup, isClone, containerId, storage, backupStorage, cloneCount, hostnames, containerType } = message;
switch (action) {
case 'start':
if (scriptPath && executionId) {
if (isBackup && containerId && storage) {
if (isClone && containerId && storage && server && cloneCount && hostnames && containerType) {
await this.startSSHCloneExecution(ws, containerId, executionId, storage, server, containerType, cloneCount, hostnames);
} else if (isBackup && containerId && storage) {
await this.startBackupExecution(ws, containerId, executionId, storage, mode, server);
} else if (isUpdate && containerId) {
await this.startUpdateExecution(ws, containerId, executionId, mode, server, backupStorage);
@@ -832,6 +838,422 @@ class ScriptExecutionHandler {
});
}
/**
* Start SSH clone execution
* Gets next IDs sequentially: get next ID → clone → get next ID → clone, etc.
* @param {ExtendedWebSocket} ws
* @param {string} containerId
* @param {string} executionId
* @param {string} storage
* @param {ServerInfo} server
* @param {'lxc'|'vm'} containerType
* @param {number} cloneCount
* @param {string[]} hostnames
*/
async startSSHCloneExecution(ws, containerId, executionId, storage, server, containerType, cloneCount, hostnames) {
const sshService = getSSHExecutionService();
this.sendMessage(ws, {
type: 'start',
data: `Starting clone operation: Creating ${cloneCount} clone(s) of ${containerType.toUpperCase()} ${containerId}...`,
timestamp: Date.now()
});
try {
// Step 1: Stop source container/VM
this.sendMessage(ws, {
type: 'output',
data: `\n[Step 1/${4 + cloneCount}] Stopping source ${containerType.toUpperCase()} ${containerId}...\n`,
timestamp: Date.now()
});
const stopCommand = containerType === 'lxc' ? `pct stop ${containerId}` : `qm stop ${containerId}`;
await new Promise(/** @type {(resolve: (value?: void) => void, reject: (error?: any) => void) => void} */ ((resolve, reject) => {
sshService.executeCommand(
server,
stopCommand,
/** @param {string} data */
(data) => {
this.sendMessage(ws, {
type: 'output',
data: data,
timestamp: Date.now()
});
},
/** @param {string} error */
(error) => {
this.sendMessage(ws, {
type: 'error',
data: error,
timestamp: Date.now()
});
},
/** @param {number} code */
(code) => {
if (code === 0) {
this.sendMessage(ws, {
type: 'output',
data: `\n[Step 1/${4 + cloneCount}] Source ${containerType.toUpperCase()} stopped successfully.\n`,
timestamp: Date.now()
});
resolve();
} else {
// Continue even if stop fails (might already be stopped)
this.sendMessage(ws, {
type: 'output',
data: `\n[Step 1/${4 + cloneCount}] Stop command completed with exit code ${code} (container may already be stopped).\n`,
timestamp: Date.now()
});
resolve();
}
}
);
}));
// Step 2: Clone for each clone count (get next ID sequentially before each clone)
const clonedIds = [];
for (let i = 0; i < cloneCount; i++) {
const cloneNumber = i + 1;
const hostname = hostnames[i];
// Get next ID for this clone
this.sendMessage(ws, {
type: 'output',
data: `\n[Step ${2 + i}/${4 + cloneCount}] Getting next available ID for clone ${cloneNumber}...\n`,
timestamp: Date.now()
});
let nextId = '';
try {
let output = '';
await new Promise(/** @type {(resolve: (value?: void) => void, reject: (error?: any) => void) => void} */ ((resolve, reject) => {
sshService.executeCommand(
server,
'pvesh get /cluster/nextid',
/** @param {string} data */
(data) => {
output += data;
},
/** @param {string} error */
(error) => {
reject(new Error(`Failed to get next ID: ${error}`));
},
/** @param {number} exitCode */
(exitCode) => {
if (exitCode === 0) {
resolve();
} else {
reject(new Error(`pvesh command failed with exit code ${exitCode}`));
}
}
);
}));
nextId = output.trim();
if (!nextId || !/^\d+$/.test(nextId)) {
throw new Error('Invalid next ID received');
}
this.sendMessage(ws, {
type: 'output',
data: `\n[Step ${2 + i}/${4 + cloneCount}] Got next ID: ${nextId}\n`,
timestamp: Date.now()
});
} catch (error) {
this.sendMessage(ws, {
type: 'error',
data: `\n[Step ${2 + i}/${4 + cloneCount}] Failed to get next ID: ${error instanceof Error ? error.message : String(error)}\n`,
timestamp: Date.now()
});
throw error;
}
clonedIds.push(nextId);
// Clone the container/VM
this.sendMessage(ws, {
type: 'output',
data: `\n[Step ${2 + i}/${4 + cloneCount}] Cloning ${containerType.toUpperCase()} ${containerId} to ${nextId} with hostname ${hostname}...\n`,
timestamp: Date.now()
});
const cloneCommand = containerType === 'lxc'
? `pct clone ${containerId} ${nextId} --hostname ${hostname} --storage ${storage}`
: `qm clone ${containerId} ${nextId} --name ${hostname} --storage ${storage}`;
await new Promise(/** @type {(resolve: (value?: void) => void, reject: (error?: any) => void) => void} */ ((resolve, reject) => {
sshService.executeCommand(
server,
cloneCommand,
/** @param {string} data */
(data) => {
this.sendMessage(ws, {
type: 'output',
data: data,
timestamp: Date.now()
});
},
/** @param {string} error */
(error) => {
this.sendMessage(ws, {
type: 'error',
data: error,
timestamp: Date.now()
});
},
/** @param {number} code */
(code) => {
if (code === 0) {
this.sendMessage(ws, {
type: 'output',
data: `\n[Step ${2 + i}/${4 + cloneCount}] Clone ${cloneNumber} created successfully.\n`,
timestamp: Date.now()
});
resolve();
} else {
this.sendMessage(ws, {
type: 'error',
data: `\nClone ${cloneNumber} failed with exit code: ${code}\n`,
timestamp: Date.now()
});
reject(new Error(`Clone ${cloneNumber} failed with exit code ${code}`));
}
}
);
}));
}
// Step 3: Start source container/VM
this.sendMessage(ws, {
type: 'output',
data: `\n[Step ${2 + cloneCount + 1}/${4 + cloneCount}] Starting source ${containerType.toUpperCase()} ${containerId}...\n`,
timestamp: Date.now()
});
const startSourceCommand = containerType === 'lxc' ? `pct start ${containerId}` : `qm start ${containerId}`;
await new Promise(/** @type {(resolve: (value?: void) => void, reject: (error?: any) => void) => void} */ ((resolve) => {
sshService.executeCommand(
server,
startSourceCommand,
/** @param {string} data */
(data) => {
this.sendMessage(ws, {
type: 'output',
data: data,
timestamp: Date.now()
});
},
/** @param {string} error */
(error) => {
this.sendMessage(ws, {
type: 'error',
data: error,
timestamp: Date.now()
});
},
/** @param {number} code */
(code) => {
if (code === 0) {
this.sendMessage(ws, {
type: 'output',
data: `\n[Step ${2 + cloneCount + 1}/${4 + cloneCount}] Source ${containerType.toUpperCase()} started successfully.\n`,
timestamp: Date.now()
});
} else {
this.sendMessage(ws, {
type: 'output',
data: `\n[Step ${2 + cloneCount + 1}/${4 + cloneCount}] Start command completed with exit code ${code}.\n`,
timestamp: Date.now()
});
}
resolve();
}
);
}));
// Step 4: Start target containers/VMs
this.sendMessage(ws, {
type: 'output',
data: `\n[Step ${2 + cloneCount + 2}/${4 + cloneCount}] Starting cloned ${containerType.toUpperCase()}(s)...\n`,
timestamp: Date.now()
});
for (let i = 0; i < cloneCount; i++) {
const cloneNumber = i + 1;
const nextId = clonedIds[i];
const startTargetCommand = containerType === 'lxc' ? `pct start ${nextId}` : `qm start ${nextId}`;
await new Promise(/** @type {(resolve: (value?: void) => void, reject: (error?: any) => void) => void} */ ((resolve) => {
sshService.executeCommand(
server,
startTargetCommand,
/** @param {string} data */
(data) => {
this.sendMessage(ws, {
type: 'output',
data: data,
timestamp: Date.now()
});
},
/** @param {string} error */
(error) => {
this.sendMessage(ws, {
type: 'error',
data: error,
timestamp: Date.now()
});
},
/** @param {number} code */
(code) => {
if (code === 0) {
this.sendMessage(ws, {
type: 'output',
data: `\nClone ${cloneNumber} (ID: ${nextId}) started successfully.\n`,
timestamp: Date.now()
});
} else {
this.sendMessage(ws, {
type: 'output',
data: `\nClone ${cloneNumber} (ID: ${nextId}) start completed with exit code ${code}.\n`,
timestamp: Date.now()
});
}
resolve();
}
);
}));
}
// Step 5: Add to database
this.sendMessage(ws, {
type: 'output',
data: `\n[Step ${2 + cloneCount + 3}/${4 + cloneCount}] Adding cloned ${containerType.toUpperCase()}(s) to database...\n`,
timestamp: Date.now()
});
for (let i = 0; i < cloneCount; i++) {
const nextId = clonedIds[i];
const hostname = hostnames[i];
try {
// Read config file to get hostname/name
const configPath = containerType === 'lxc'
? `/etc/pve/lxc/${nextId}.conf`
: `/etc/pve/qemu-server/${nextId}.conf`;
let configContent = '';
await new Promise(/** @type {(resolve: (value?: void) => void) => void} */ ((resolve) => {
sshService.executeCommand(
server,
`cat "${configPath}" 2>/dev/null || echo ""`,
/** @param {string} data */
(data) => {
configContent += data;
},
() => resolve(),
() => resolve()
);
}));
// Parse config for hostname/name
let finalHostname = hostname;
if (configContent.trim()) {
const lines = configContent.split('\n');
for (const line of lines) {
const trimmed = line.trim();
if (containerType === 'lxc' && trimmed.startsWith('hostname:')) {
finalHostname = trimmed.substring(9).trim();
break;
} else if (containerType === 'vm' && trimmed.startsWith('name:')) {
finalHostname = trimmed.substring(5).trim();
break;
}
}
}
if (!finalHostname) {
finalHostname = `${containerType}-${nextId}`;
}
// Create installed script record
const script = await this.db.createInstalledScript({
script_name: finalHostname,
script_path: `cloned/${finalHostname}`,
container_id: nextId,
server_id: server.id,
execution_mode: 'ssh',
status: 'success',
output_log: `Cloned ${containerType.toUpperCase()}`
});
// For LXC, store config in database
if (containerType === 'lxc' && configContent.trim()) {
// Simple config parser
/** @type {any} */
const configData = {};
const lines = configContent.split('\n');
for (const line of lines) {
const trimmed = line.trim();
if (!trimmed || trimmed.startsWith('#')) continue;
const [key, ...valueParts] = trimmed.split(':');
const value = valueParts.join(':').trim();
if (key === 'hostname') configData.hostname = value;
else if (key === 'arch') configData.arch = value;
else if (key === 'cores') configData.cores = parseInt(value) || null;
else if (key === 'memory') configData.memory = parseInt(value) || null;
else if (key === 'swap') configData.swap = parseInt(value) || null;
else if (key === 'onboot') configData.onboot = parseInt(value) || null;
else if (key === 'ostype') configData.ostype = value;
else if (key === 'unprivileged') configData.unprivileged = parseInt(value) || null;
else if (key === 'tags') configData.tags = value;
else if (key === 'rootfs') {
const match = value.match(/^([^:]+):([^,]+)/);
if (match) {
configData.rootfs_storage = match[1];
const sizeMatch = value.match(/size=([^,]+)/);
if (sizeMatch) {
configData.rootfs_size = sizeMatch[1];
}
}
}
}
await this.db.createLXCConfig(script.id, configData);
}
this.sendMessage(ws, {
type: 'output',
data: `\nClone ${i + 1} (ID: ${nextId}, Hostname: ${finalHostname}) added to database successfully.\n`,
timestamp: Date.now()
});
} catch (error) {
this.sendMessage(ws, {
type: 'error',
data: `\nError adding clone ${i + 1} (ID: ${nextId}) to database: ${error instanceof Error ? error.message : String(error)}\n`,
timestamp: Date.now()
});
}
}
this.sendMessage(ws, {
type: 'output',
data: `\n\n[Clone operation completed successfully!]\nCreated ${cloneCount} clone(s) of ${containerType.toUpperCase()} ${containerId}.\n`,
timestamp: Date.now()
});
this.activeExecutions.delete(executionId);
} catch (error) {
this.sendMessage(ws, {
type: 'error',
data: `\n\n[Clone operation failed!]\nError: ${error instanceof Error ? error.message : String(error)}\n`,
timestamp: Date.now()
});
this.activeExecutions.delete(executionId);
}
}
/**
* Start update execution (pct enter + update command)
* @param {ExtendedWebSocket} ws

View File

@@ -0,0 +1,129 @@
'use client';
import { useState, useEffect } from 'react';
import { Button } from './ui/button';
import { Input } from './ui/input';
import { Copy, X } from 'lucide-react';
import { useRegisterModal } from './modal/ModalStackProvider';
interface CloneCountInputModalProps {
isOpen: boolean;
onClose: () => void;
onSubmit: (count: number) => void;
storageName: string;
}
export function CloneCountInputModal({
isOpen,
onClose,
onSubmit,
storageName
}: CloneCountInputModalProps) {
const [cloneCount, setCloneCount] = useState<number>(1);
useRegisterModal(isOpen, { id: 'clone-count-input-modal', allowEscape: true, onClose });
useEffect(() => {
if (isOpen) {
setCloneCount(1); // Reset to default when modal opens
}
}, [isOpen]);
if (!isOpen) return null;
const handleSubmit = () => {
if (cloneCount >= 1) {
onSubmit(cloneCount);
setCloneCount(1); // Reset after submit
}
};
const handleClose = () => {
setCloneCount(1); // Reset on close
onClose();
};
return (
<div className="fixed inset-0 backdrop-blur-sm bg-black/50 flex items-center justify-center z-50 p-4">
<div className="bg-card rounded-lg shadow-xl max-w-md w-full border border-border">
{/* Header */}
<div className="flex items-center justify-between p-6 border-b border-border">
<div className="flex items-center gap-3">
<Copy className="h-6 w-6 text-primary" />
<h2 className="text-2xl font-bold text-card-foreground">Clone Count</h2>
</div>
<Button
onClick={handleClose}
variant="ghost"
size="icon"
className="text-muted-foreground hover:text-foreground"
>
<X className="h-5 w-5" />
</Button>
</div>
{/* Content */}
<div className="p-6">
<p className="text-sm text-muted-foreground mb-4">
How many clones would you like to create?
</p>
{storageName && (
<div className="mb-4 p-3 bg-muted/50 rounded-lg">
<p className="text-sm text-muted-foreground">Storage:</p>
<p className="text-sm font-medium text-foreground">{storageName}</p>
</div>
)}
<div className="space-y-2 mb-6">
<label htmlFor="cloneCount" className="block text-sm font-medium text-foreground">
Number of Clones
</label>
<Input
id="cloneCount"
type="number"
min="1"
max="100"
value={cloneCount}
onChange={(e) => {
const value = parseInt(e.target.value, 10);
if (!isNaN(value) && value >= 1 && value <= 100) {
setCloneCount(value);
} else if (e.target.value === '') {
setCloneCount(1);
}
}}
className="w-full"
placeholder="1"
/>
<p className="text-xs text-muted-foreground">
Enter a number between 1 and 100
</p>
</div>
{/* Action Buttons */}
<div className="flex flex-col sm:flex-row justify-end gap-3">
<Button
onClick={handleClose}
variant="outline"
size="default"
className="w-full sm:w-auto"
>
Cancel
</Button>
<Button
onClick={handleSubmit}
disabled={cloneCount < 1 || cloneCount > 100}
variant="default"
size="default"
className="w-full sm:w-auto"
>
Continue
</Button>
</div>
</div>
</div>
</div>
);
}

View File

@@ -1630,7 +1630,7 @@ export function GeneralSettingsModal({
https://github.com/owner/repo)
</p>
</div>
<div className="flex items-center justify-between">
<div className="border-border flex items-center justify-between gap-3 rounded-lg border p-3">
<div>
<p className="text-foreground text-sm font-medium">
Enable after adding
@@ -1644,6 +1644,7 @@ export function GeneralSettingsModal({
onCheckedChange={setNewRepoEnabled}
disabled={isAddingRepo}
label="Enable repository"
labelPosition="left"
/>
</div>
<Button
@@ -1739,44 +1740,7 @@ export function GeneralSettingsModal({
{repo.enabled ? "• Enabled" : "• Disabled"}
</p>
</div>
<div className="flex items-center gap-2">
<Toggle
checked={repo.enabled}
onCheckedChange={async (enabled) => {
setMessage(null);
try {
const result =
await updateRepoMutation.mutateAsync({
id: repo.id,
enabled,
});
if (result.success) {
setMessage({
type: "success",
text: `Repository ${enabled ? "enabled" : "disabled"} successfully!`,
});
await refetchRepositories();
} else {
setMessage({
type: "error",
text:
result.error ??
"Failed to update repository",
});
}
} catch (error) {
setMessage({
type: "error",
text:
error instanceof Error
? error.message
: "Failed to update repository",
});
}
}}
disabled={updateRepoMutation.isPending}
label={repo.enabled ? "Disable" : "Enable"}
/>
<div className="flex items-center gap-2 flex-shrink-0">
<Button
onClick={async () => {
if (!repo.is_removable) {
@@ -1837,6 +1801,44 @@ export function GeneralSettingsModal({
>
<Trash2 className="h-4 w-4" />
</Button>
<Toggle
checked={repo.enabled}
onCheckedChange={async (enabled) => {
setMessage(null);
try {
const result =
await updateRepoMutation.mutateAsync({
id: repo.id,
enabled,
});
if (result.success) {
setMessage({
type: "success",
text: `Repository ${enabled ? "enabled" : "disabled"} successfully!`,
});
await refetchRepositories();
} else {
setMessage({
type: "error",
text:
result.error ??
"Failed to update repository",
});
}
} catch (error) {
setMessage({
type: "error",
text:
error instanceof Error
? error.message
: "Failed to update repository",
});
}
}}
disabled={updateRepoMutation.isPending}
label={repo.enabled ? "Disable" : "Enable"}
labelPosition="left"
/>
</div>
</div>
),

View File

@@ -12,6 +12,7 @@ import { LoadingModal } from "./LoadingModal";
import { LXCSettingsModal } from "./LXCSettingsModal";
import { StorageSelectionModal } from "./StorageSelectionModal";
import { BackupWarningModal } from "./BackupWarningModal";
import { CloneCountInputModal } from "./CloneCountInputModal";
import type { Storage } from "~/server/services/storageService";
import { getContrastColor } from "../../lib/colorUtils";
import {
@@ -68,6 +69,12 @@ export function InstalledScriptsTab() {
server?: any;
backupStorage?: string;
isBackupOnly?: boolean;
isClone?: boolean;
executionId?: string;
cloneCount?: number;
hostnames?: string[];
containerType?: 'lxc' | 'vm';
storage?: string;
} | null>(null);
const [openingShell, setOpeningShell] = useState<{
id: number;
@@ -82,6 +89,14 @@ export function InstalledScriptsTab() {
const [isLoadingStorages, setIsLoadingStorages] = useState(false);
const [showBackupWarning, setShowBackupWarning] = useState(false);
const [isPreUpdateBackup, setIsPreUpdateBackup] = useState(false); // Track if storage selection is for pre-update backup
const [pendingCloneScript, setPendingCloneScript] = useState<InstalledScript | null>(null);
const [cloneStorages, setCloneStorages] = useState<Storage[]>([]);
const [isLoadingCloneStorages, setIsLoadingCloneStorages] = useState(false);
const [showCloneStorageSelection, setShowCloneStorageSelection] = useState(false);
const [showCloneCountInput, setShowCloneCountInput] = useState(false);
const [cloneContainerType, setCloneContainerType] = useState<'lxc' | 'vm' | null>(null);
const [selectedCloneStorage, setSelectedCloneStorage] = useState<Storage | null>(null);
// cloneCount is passed as parameter to handleCloneCountSubmit, no need for state
const [editingScriptId, setEditingScriptId] = useState<number | null>(null);
const [editFormData, setEditFormData] = useState<{
script_name: string;
@@ -709,6 +724,8 @@ export function InstalledScriptsTab() {
return;
}
const containerType = script.is_vm ? "VM" : "LXC";
setConfirmationModal({
isOpen: true,
variant: "simple",
@@ -718,7 +735,7 @@ export function InstalledScriptsTab() {
setControllingScriptId(script.id);
setLoadingModal({
isOpen: true,
action: `${action === "start" ? "Starting" : "Stopping"} container ${script.container_id}...`,
action: `${action === "start" ? "Starting" : "Stopping"} ${containerType}...`,
});
void controlContainerMutation.mutate({ id: script.id, action });
setConfirmationModal(null);
@@ -923,6 +940,201 @@ export function InstalledScriptsTab() {
setShowStorageSelection(true);
};
// Clone queries
const getContainerHostnameQuery = api.installedScripts.getContainerHostname.useQuery(
{
containerId: pendingCloneScript?.container_id ?? '',
serverId: pendingCloneScript?.server_id ?? 0,
containerType: cloneContainerType ?? 'lxc'
},
{ enabled: false }
);
const executeCloneMutation = api.installedScripts.executeClone.useMutation();
const utils = api.useUtils();
const fetchCloneStorages = async (serverId: number, _forceRefresh = false) => {
setIsLoadingCloneStorages(true);
try {
// Use utils.fetch to call with the correct serverId
const result = await utils.installedScripts.getCloneStorages.fetch({
serverId,
forceRefresh: _forceRefresh
});
if (result?.success && result.storages) {
setCloneStorages(result.storages as Storage[]);
} else {
setErrorModal({
isOpen: true,
title: 'Failed to Fetch Storages',
message: result?.error ?? 'Unknown error occurred',
type: 'error'
});
}
} catch (error) {
setErrorModal({
isOpen: true,
title: 'Failed to Fetch Storages',
message: error instanceof Error ? error.message : 'Unknown error occurred',
type: 'error'
});
} finally {
setIsLoadingCloneStorages(false);
}
};
const handleCloneScript = async (script: InstalledScript) => {
if (!script.container_id) {
setErrorModal({
isOpen: true,
title: 'Clone Failed',
message: 'No Container ID available for this script',
details: 'This script does not have a valid container ID and cannot be cloned.'
});
return;
}
if (!script.server_id) {
setErrorModal({
isOpen: true,
title: 'Clone Not Available',
message: 'Clone is only available for SSH scripts with a configured server.',
type: 'error'
});
return;
}
// Store the script and determine container type using is_vm property
setPendingCloneScript(script);
// Use is_vm property from batch detection (from main branch)
// If not available, default to LXC
const containerType = script.is_vm ? 'vm' : 'lxc';
setCloneContainerType(containerType);
// Fetch storages and show selection modal
void fetchCloneStorages(script.server_id, false);
setShowCloneStorageSelection(true);
};
const handleCloneStorageSelected = (storage: Storage) => {
setShowCloneStorageSelection(false);
setSelectedCloneStorage(storage);
setShowCloneCountInput(true);
};
const handleCloneCountSubmit = async (count: number) => {
setShowCloneCountInput(false);
if (!pendingCloneScript || !cloneContainerType) {
setErrorModal({
isOpen: true,
title: 'Clone Failed',
message: 'Missing required information for cloning.',
type: 'error'
});
return;
}
try {
// Get original hostname
const hostnameResult = await getContainerHostnameQuery.refetch();
if (!hostnameResult.data?.success || !hostnameResult.data.hostname) {
setErrorModal({
isOpen: true,
title: 'Clone Failed',
message: 'Could not retrieve container hostname.',
type: 'error'
});
return;
}
const originalHostname = hostnameResult.data.hostname;
// Generate clone hostnames using utils to call with originalHostname
const hostnamesResult = await utils.installedScripts.generateCloneHostnames.fetch({
originalHostname,
containerType: cloneContainerType ?? 'lxc',
serverId: pendingCloneScript.server_id!,
count
});
if (!hostnamesResult?.success || !hostnamesResult.hostnames.length) {
setErrorModal({
isOpen: true,
title: 'Clone Failed',
message: hostnamesResult?.error ?? 'Could not generate clone hostnames.',
type: 'error'
});
return;
}
const hostnames = hostnamesResult.hostnames;
// Execute clone (nextIds will be obtained sequentially in server.js)
const cloneResult = await executeCloneMutation.mutateAsync({
containerId: pendingCloneScript.container_id!,
serverId: pendingCloneScript.server_id!,
storage: selectedCloneStorage!.name,
cloneCount: count,
hostnames: hostnames,
containerType: cloneContainerType
});
if (!cloneResult.success || !cloneResult.executionId) {
setErrorModal({
isOpen: true,
title: 'Clone Failed',
message: cloneResult.error ?? 'Failed to start clone operation.',
type: 'error'
});
return;
}
// Get server info for websocket
const server = pendingCloneScript.server_id && pendingCloneScript.server_user ? {
id: pendingCloneScript.server_id,
name: pendingCloneScript.server_name,
ip: pendingCloneScript.server_ip,
user: pendingCloneScript.server_user,
password: pendingCloneScript.server_password,
auth_type: pendingCloneScript.server_auth_type ?? 'password',
ssh_key: pendingCloneScript.server_ssh_key,
ssh_key_passphrase: pendingCloneScript.server_ssh_key_passphrase,
ssh_port: pendingCloneScript.server_ssh_port ?? 22,
} : null;
// Set up terminal for clone execution
setUpdatingScript({
id: pendingCloneScript.id,
containerId: pendingCloneScript.container_id!,
server: server,
isClone: true,
executionId: cloneResult.executionId,
cloneCount: count,
hostnames: hostnames,
containerType: cloneContainerType,
storage: selectedCloneStorage!.name
});
// Reset clone state
setPendingCloneScript(null);
setCloneStorages([]);
setSelectedCloneStorage(null);
setCloneContainerType(null);
// Reset clone count (no state variable needed, count is passed as parameter)
} catch (error) {
setErrorModal({
isOpen: true,
title: 'Clone Failed',
message: error instanceof Error ? error.message : 'Unknown error occurred',
type: 'error'
});
}
};
const handleOpenShell = (script: InstalledScript) => {
if (!script.container_id) {
setErrorModal({
@@ -1214,26 +1426,25 @@ export function InstalledScriptsTab() {
<div className="mb-8" data-terminal="update">
<Terminal
scriptPath={
updatingScript.isBackupOnly
updatingScript.isClone
? `clone-${updatingScript.containerId}`
: updatingScript.isBackupOnly
? `backup-${updatingScript.containerId}`
: `update-${updatingScript.containerId}`
}
onClose={handleCloseUpdateTerminal}
mode={updatingScript.server ? "ssh" : "local"}
server={updatingScript.server}
isUpdate={!updatingScript.isBackupOnly}
isUpdate={!updatingScript.isBackupOnly && !updatingScript.isClone}
isBackup={updatingScript.isBackupOnly}
isClone={updatingScript.isClone}
containerId={updatingScript.containerId}
storage={
updatingScript.isBackupOnly
? updatingScript.backupStorage
: undefined
}
backupStorage={
!updatingScript.isBackupOnly
? updatingScript.backupStorage
: undefined
}
executionId={updatingScript.executionId}
cloneCount={updatingScript.cloneCount}
hostnames={updatingScript.hostnames}
containerType={updatingScript.containerType}
storage={updatingScript.isClone ? updatingScript.storage : (updatingScript.isBackupOnly ? updatingScript.backupStorage : undefined)}
backupStorage={!updatingScript.isBackupOnly && !updatingScript.isClone ? updatingScript.backupStorage : undefined}
/>
</div>
)}
@@ -1714,6 +1925,7 @@ export function InstalledScriptsTab() {
onCancel={handleCancelEdit}
onUpdate={() => handleUpdateScript(script)}
onBackup={() => handleBackupScript(script)}
onClone={() => handleCloneScript(script)}
onShell={() => handleOpenShell(script)}
onDelete={() => handleDeleteScript(Number(script.id))}
isUpdating={updateScriptMutation.isPending}
@@ -2065,8 +2277,22 @@ export function InstalledScriptsTab() {
</DropdownMenuItem>
)}
{script.container_id &&
script.execution_mode === "ssh" &&
!script.is_vm && (
script.execution_mode === "ssh" && (
<DropdownMenuItem
onClick={() =>
handleCloneScript(script)
}
disabled={
containerStatuses.get(script.id) ===
"stopped"
}
className="text-muted-foreground hover:text-foreground hover:bg-muted/20 focus:bg-muted/20"
>
Clone
</DropdownMenuItem>
)}
{script.container_id &&
script.execution_mode === "ssh" && (
<DropdownMenuItem
onClick={() =>
handleOpenShell(script)
@@ -2355,6 +2581,43 @@ export function InstalledScriptsTab() {
}}
/>
{/* Clone Storage Selection Modal */}
<StorageSelectionModal
isOpen={showCloneStorageSelection}
onClose={() => {
setShowCloneStorageSelection(false);
setPendingCloneScript(null);
setCloneStorages([]);
}}
onSelect={handleCloneStorageSelected}
storages={cloneStorages}
isLoading={isLoadingCloneStorages}
onRefresh={() => {
if (pendingCloneScript?.server_id) {
void fetchCloneStorages(pendingCloneScript.server_id, true);
}
}}
title="Select Clone Storage"
description="Select a storage to use for cloning. Only storages with rootdir content are shown."
filterFn={(storage) => {
return storage.content.includes('rootdir');
}}
showBackupTag={false}
/>
{/* Clone Count Input Modal */}
<CloneCountInputModal
isOpen={showCloneCountInput}
onClose={() => {
setShowCloneCountInput(false);
setPendingCloneScript(null);
setCloneStorages([]);
setSelectedCloneStorage(null);
}}
onSubmit={handleCloneCountSubmit}
storageName={selectedCloneStorage?.name ?? ''}
/>
{/* LXC Settings Modal */}
<LXCSettingsModal
isOpen={lxcSettingsModal.isOpen}

View File

@@ -16,7 +16,7 @@ interface LoadingModalProps {
export function LoadingModal({
isOpen,
action: _action,
action,
logs = [],
isComplete = false,
title,
@@ -64,6 +64,11 @@ export function LoadingModal({
)}
</div>
{/* Action text - displayed prominently */}
{action && (
<p className="text-foreground text-base font-medium">{action}</p>
)}
{/* Static title text */}
{title && <p className="text-muted-foreground text-sm">{title}</p>}

View File

@@ -46,6 +46,7 @@ interface ScriptInstallationCardProps {
onCancel: () => void;
onUpdate: () => void;
onBackup?: () => void;
onClone?: () => void;
onShell: () => void;
onDelete: () => void;
isUpdating: boolean;
@@ -71,6 +72,7 @@ export function ScriptInstallationCard({
onCancel,
onUpdate,
onBackup,
onClone,
onShell,
onDelete,
isUpdating,
@@ -319,7 +321,16 @@ export function ScriptInstallationCard({
Backup
</DropdownMenuItem>
)}
{script.container_id && script.execution_mode === 'ssh' && !script.is_vm && (
{script.container_id && script.execution_mode === 'ssh' && onClone && (
<DropdownMenuItem
onClick={onClone}
disabled={containerStatus === 'stopped'}
className="text-muted-foreground hover:text-foreground hover:bg-muted/20 focus:bg-muted/20"
>
Clone
</DropdownMenuItem>
)}
{script.container_id && script.execution_mode === 'ssh' && (
<DropdownMenuItem
onClick={onShell}
disabled={containerStatus === 'stopped'}

View File

@@ -13,6 +13,10 @@ interface StorageSelectionModalProps {
storages: Storage[];
isLoading: boolean;
onRefresh: () => void;
title?: string;
description?: string;
filterFn?: (storage: Storage) => boolean;
showBackupTag?: boolean;
}
export function StorageSelectionModal({
@@ -21,7 +25,11 @@ export function StorageSelectionModal({
onSelect,
storages,
isLoading,
onRefresh
onRefresh,
title = 'Select Storage',
description = 'Select a storage to use.',
filterFn,
showBackupTag = true
}: StorageSelectionModalProps) {
const [selectedStorage, setSelectedStorage] = useState<Storage | null>(null);
@@ -41,8 +49,8 @@ export function StorageSelectionModal({
onClose();
};
// Filter to show only backup-capable storages
const backupStorages = storages.filter(s => s.supportsBackup);
// Filter storages using filterFn if provided, otherwise filter to show only backup-capable storages
const filteredStorages = filterFn ? storages.filter(filterFn) : storages.filter(s => s.supportsBackup);
return (
<div className="fixed inset-0 backdrop-blur-sm bg-black/50 flex items-center justify-center z-50 p-4">
@@ -51,7 +59,7 @@ export function StorageSelectionModal({
<div className="flex items-center justify-between p-6 border-b border-border">
<div className="flex items-center gap-3">
<Database className="h-6 w-6 text-primary" />
<h2 className="text-2xl font-bold text-card-foreground">Select Backup Storage</h2>
<h2 className="text-2xl font-bold text-card-foreground">{title}</h2>
</div>
<Button
onClick={handleClose}
@@ -72,7 +80,7 @@ export function StorageSelectionModal({
<div className="inline-block animate-spin rounded-full h-8 w-8 border-b-2 border-primary mb-4"></div>
<p className="text-muted-foreground">Loading storages...</p>
</div>
) : backupStorages.length === 0 ? (
) : filteredStorages.length === 0 ? (
<div className="text-center py-8">
<Database className="h-12 w-12 text-muted-foreground mx-auto mb-4" />
<p className="text-foreground mb-2">No backup-capable storages found</p>
@@ -87,12 +95,12 @@ export function StorageSelectionModal({
) : (
<>
<p className="text-sm text-muted-foreground mb-4">
Select a storage to use for the backup. Only storages that support backups are shown.
{description}
</p>
{/* Storage List */}
<div className="space-y-2 max-h-96 overflow-y-auto mb-4">
{backupStorages.map((storage) => (
{filteredStorages.map((storage) => (
<div
key={storage.name}
onClick={() => setSelectedStorage(storage)}
@@ -106,9 +114,11 @@ export function StorageSelectionModal({
<div className="flex-1">
<div className="flex items-center gap-2 mb-1">
<h3 className="font-medium text-foreground">{storage.name}</h3>
<span className="px-2 py-0.5 text-xs font-medium rounded bg-success/20 text-success border border-success/30">
Backup
</span>
{showBackupTag && (
<span className="px-2 py-0.5 text-xs font-medium rounded bg-success/20 text-success border border-success/30">
Backup
</span>
)}
<span className="px-2 py-0.5 text-xs font-medium rounded bg-muted text-muted-foreground">
{storage.type}
</span>

View File

@@ -13,9 +13,14 @@ interface TerminalProps {
isUpdate?: boolean;
isShell?: boolean;
isBackup?: boolean;
isClone?: boolean;
containerId?: string;
storage?: string;
backupStorage?: string;
executionId?: string;
cloneCount?: number;
hostnames?: string[];
containerType?: 'lxc' | 'vm';
}
interface TerminalMessage {
@@ -24,7 +29,7 @@ interface TerminalMessage {
timestamp: number;
}
export function Terminal({ scriptPath, onClose, mode = 'local', server, isUpdate = false, isShell = false, isBackup = false, containerId, storage, backupStorage }: TerminalProps) {
export function Terminal({ scriptPath, onClose, mode = 'local', server, isUpdate = false, isShell = false, isBackup = false, isClone = false, containerId, storage, backupStorage, executionId: propExecutionId, cloneCount, hostnames, containerType }: TerminalProps) {
const [isConnected, setIsConnected] = useState(false);
const [isRunning, setIsRunning] = useState(false);
const [isClient, setIsClient] = useState(false);
@@ -39,7 +44,16 @@ export function Terminal({ scriptPath, onClose, mode = 'local', server, isUpdate
const fitAddonRef = useRef<any>(null);
const wsRef = useRef<WebSocket | null>(null);
const inputHandlerRef = useRef<((data: string) => void) | null>(null);
const [executionId, setExecutionId] = useState(() => `exec_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`);
const [executionId, setExecutionId] = useState(() => propExecutionId ?? `exec_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`);
// Update executionId when propExecutionId changes
useEffect(() => {
if (propExecutionId) {
setExecutionId(propExecutionId);
}
}, [propExecutionId]);
const effectiveExecutionId = propExecutionId ?? executionId;
const isConnectingRef = useRef<boolean>(false);
const hasConnectedRef = useRef<boolean>(false);
@@ -277,7 +291,7 @@ export function Terminal({ scriptPath, onClose, mode = 'local', server, isUpdate
if (wsRef.current && wsRef.current.readyState === WebSocket.OPEN) {
const message = {
action: 'input',
executionId,
executionId: effectiveExecutionId,
input: data
};
wsRef.current.send(JSON.stringify(message));
@@ -325,9 +339,11 @@ export function Terminal({ scriptPath, onClose, mode = 'local', server, isUpdate
// Only auto-start on initial connection, not on reconnections
if (isInitialConnection && !isRunning) {
// Generate a new execution ID for the initial run
const newExecutionId = `exec_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
setExecutionId(newExecutionId);
// Use propExecutionId if provided, otherwise generate a new one
const newExecutionId = propExecutionId ?? `exec_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
if (!propExecutionId) {
setExecutionId(newExecutionId);
}
const message = {
action: 'start',
@@ -338,9 +354,13 @@ export function Terminal({ scriptPath, onClose, mode = 'local', server, isUpdate
isUpdate,
isShell,
isBackup,
isClone,
containerId,
storage,
backupStorage
backupStorage,
cloneCount,
hostnames,
containerType
};
ws.send(JSON.stringify(message));
}
@@ -384,9 +404,11 @@ export function Terminal({ scriptPath, onClose, mode = 'local', server, isUpdate
const startScript = () => {
if (wsRef.current && wsRef.current.readyState === WebSocket.OPEN && !isRunning) {
// Generate a new execution ID for each script run
const newExecutionId = `exec_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
setExecutionId(newExecutionId);
// Generate a new execution ID for each script run (unless propExecutionId is provided)
const newExecutionId = propExecutionId ?? `exec_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
if (!propExecutionId) {
setExecutionId(newExecutionId);
}
setIsStopped(false);
wsRef.current.send(JSON.stringify({
@@ -397,7 +419,14 @@ export function Terminal({ scriptPath, onClose, mode = 'local', server, isUpdate
server,
isUpdate,
isShell,
containerId
isBackup,
isClone,
containerId,
storage,
backupStorage,
cloneCount,
hostnames,
containerType
}));
}
};

View File

@@ -6,30 +6,40 @@ export interface ToggleProps
checked?: boolean;
onCheckedChange?: (checked: boolean) => void;
label?: string;
labelPosition?: 'left' | 'right';
}
const Toggle = React.forwardRef<HTMLInputElement, ToggleProps>(
({ className, checked, onCheckedChange, label, ...props }, ref) => {
({ className, checked, onCheckedChange, label, labelPosition = 'right', ...props }, ref) => {
const toggleSwitch = (
<label className="relative inline-flex items-center cursor-pointer">
<input
type="checkbox"
className="sr-only"
checked={checked}
onChange={(e) => onCheckedChange?.(e.target.checked)}
ref={ref}
{...props}
/>
<div className={cn(
"w-11 h-6 peer-focus:outline-none peer-focus:ring-4 peer-focus:ring-primary/20 rounded-full peer after:content-[''] after:absolute after:top-[2px] after:left-[2px] after:bg-white after:border-gray-300 dark:after:border-gray-500 after:border after:rounded-full after:h-5 after:w-5 after:transition-transform after:duration-300 after:ease-in-out after:shadow-md transition-colors duration-300 ease-in-out border-2 border-gray-300 dark:border-gray-600",
checked
? "bg-blue-500 dark:bg-blue-600 after:translate-x-full"
: "bg-gray-300 dark:bg-gray-700",
className
)} />
</label>
);
return (
<div className="flex items-center space-x-3">
<label className="relative inline-flex items-center cursor-pointer">
<input
type="checkbox"
className="sr-only"
checked={checked}
onChange={(e) => onCheckedChange?.(e.target.checked)}
ref={ref}
{...props}
/>
<div className={cn(
"w-11 h-6 peer-focus:outline-none peer-focus:ring-4 peer-focus:ring-primary/20 rounded-full peer after:content-[''] after:absolute after:top-[2px] after:left-[2px] after:bg-white after:border-gray-300 dark:after:border-gray-500 after:border after:rounded-full after:h-5 after:w-5 after:transition-transform after:duration-300 after:ease-in-out after:shadow-md transition-colors duration-300 ease-in-out border-2 border-gray-300 dark:border-gray-600",
checked
? "bg-blue-500 dark:bg-blue-600 after:translate-x-full"
: "bg-gray-300 dark:bg-gray-700",
className
)} />
</label>
{label && (
{label && labelPosition === 'left' && (
<span className="text-sm font-medium text-foreground">
{label}
</span>
)}
{toggleSwitch}
{label && labelPosition === 'right' && (
<span className="text-sm font-medium text-foreground">
{label}
</span>

View File

@@ -442,30 +442,130 @@ async function isVM(scriptId: number, containerId: string, serverId: number | nu
return true; // VM config file exists
}
// Check LXC config file
let lxcConfigExists = false;
// Check LXC config file (not needed for return value, but check for completeness)
await new Promise<void>((resolve) => {
void sshExecutionService.executeCommand(
server as Server,
`test -f "${lxcConfigPath}" && echo "exists" || echo "not_exists"`,
(data: string) => {
if (data.includes('exists')) {
lxcConfigExists = true;
}
(_data: string) => {
// Data handler not needed - just checking if file exists
},
() => resolve(),
() => resolve()
);
});
// If LXC config exists, it's an LXC container
return !lxcConfigExists; // Return true if it's a VM (neither config exists defaults to false/LXC)
return false; // Always LXC since VM config doesn't exist
} catch (error) {
console.error('Error determining container type:', error);
return false; // Default to LXC on error
}
}
// Helper function to batch detect container types for all containers on a server
// Returns a Map of container_id -> isVM (true for VM, false for LXC)
async function batchDetectContainerTypes(server: Server): Promise<Map<string, boolean>> {
const containerTypeMap = new Map<string, boolean>();
try {
// Import SSH services
const { default: SSHService } = await import('~/server/ssh-service');
const { default: SSHExecutionService } = await import('~/server/ssh-execution-service');
const sshService = new SSHService();
const sshExecutionService = new SSHExecutionService();
// Test SSH connection first
const connectionTest = await sshService.testSSHConnection(server);
if (!(connectionTest as any).success) {
console.warn(`SSH connection failed for server ${server.name}, skipping batch detection`);
return containerTypeMap; // Return empty map if SSH fails
}
// Helper function to parse list output and extract IDs
const parseListOutput = (output: string): string[] => {
const ids: string[] = [];
const lines = output.split('\n').filter(line => line.trim());
for (const line of lines) {
// Skip header lines
if (line.includes('VMID') || line.includes('CTID')) continue;
// Extract first column (ID)
const parts = line.trim().split(/\s+/);
if (parts.length > 0) {
const id = parts[0]?.trim();
// Validate ID format (3-4 digits typically)
if (id && /^\d{3,4}$/.test(id)) {
ids.push(id);
}
}
}
return ids;
};
// Get containers from pct list
let pctOutput = '';
await new Promise<void>((resolve) => {
void sshExecutionService.executeCommand(
server,
'pct list',
(data: string) => {
pctOutput += data;
},
(error: string) => {
console.error(`pct list error for server ${server.name}:`, error);
// Don't reject, just continue - might be no containers
resolve();
},
(_exitCode: number) => {
resolve();
}
);
});
// Get VMs from qm list
let qmOutput = '';
await new Promise<void>((resolve) => {
void sshExecutionService.executeCommand(
server,
'qm list',
(data: string) => {
qmOutput += data;
},
(error: string) => {
console.error(`qm list error for server ${server.name}:`, error);
// Don't reject, just continue - might be no VMs
resolve();
},
(_exitCode: number) => {
resolve();
}
);
});
// Parse IDs from both lists
const containerIds = parseListOutput(pctOutput);
const vmIds = parseListOutput(qmOutput);
// Mark all LXC containers as false (not VM)
for (const id of containerIds) {
containerTypeMap.set(id, false);
}
// Mark all VMs as true (is VM)
for (const id of vmIds) {
containerTypeMap.set(id, true);
}
} catch (error) {
console.error(`Error in batchDetectContainerTypes for server ${server.name}:`, error);
// Return empty map on error - individual checks will fall back to isVM()
}
return containerTypeMap;
}
export const installedScriptsRouter = createTRPCRouter({
// Get all installed scripts
@@ -475,13 +575,52 @@ export const installedScriptsRouter = createTRPCRouter({
const db = getDatabase();
const scripts = await db.getAllInstalledScripts();
// Group scripts by server_id for batch detection
const scriptsByServer = new Map<number, any[]>();
const serversMap = new Map<number, Server>();
for (const script of scripts) {
if (script.server_id && script.server) {
if (!scriptsByServer.has(script.server_id)) {
scriptsByServer.set(script.server_id, []);
serversMap.set(script.server_id, script.server as Server);
}
scriptsByServer.get(script.server_id)!.push(script);
}
}
// Batch detect container types for each server
const containerTypeMap = new Map<string, boolean>();
const batchDetectionPromises = Array.from(serversMap.entries()).map(async ([serverId, server]) => {
try {
const serverTypeMap = await batchDetectContainerTypes(server);
// Merge into main map with server-specific prefix to avoid collisions
// Actually, container IDs are unique across the cluster, so we can use them directly
for (const [containerId, isVM] of serverTypeMap.entries()) {
containerTypeMap.set(containerId, isVM);
}
} catch (error) {
console.error(`Error batch detecting types for server ${serverId}:`, error);
// Continue with other servers
}
});
await Promise.all(batchDetectionPromises);
// Transform scripts to flatten server data for frontend compatibility
const transformedScripts = await Promise.all(scripts.map(async (script: any) => {
// Determine if it's a VM or LXC
const transformedScripts = scripts.map((script: any) => {
// Determine if it's a VM or LXC from batch detection map, fall back to isVM() if not found
let is_vm = false;
if (script.container_id && script.server_id) {
is_vm = await isVM(script.id, script.container_id, script.server_id);
// First check if we have it in the batch detection map
if (containerTypeMap.has(script.container_id)) {
is_vm = containerTypeMap.get(script.container_id) ?? false;
} else {
// Fall back to checking LXCConfig in database (fast, no SSH needed)
// If LXCConfig exists, it's an LXC container
const hasLXCConfig = script.lxc_config !== null && script.lxc_config !== undefined;
is_vm = !hasLXCConfig; // If no LXCConfig, might be VM, but default to false for safety
}
}
return {
@@ -498,7 +637,7 @@ export const installedScriptsRouter = createTRPCRouter({
is_vm,
server: undefined // Remove nested server object
};
}));
});
return {
success: true,
@@ -522,13 +661,31 @@ export const installedScriptsRouter = createTRPCRouter({
const db = getDatabase();
const scripts = await db.getInstalledScriptsByServer(input.serverId);
// Batch detect container types for this server
let containerTypeMap = new Map<string, boolean>();
if (scripts.length > 0 && scripts[0]?.server) {
try {
containerTypeMap = await batchDetectContainerTypes(scripts[0].server as Server);
} catch (error) {
console.error(`Error batch detecting types for server ${input.serverId}:`, error);
// Continue with empty map, will fall back to LXCConfig check
}
}
// Transform scripts to flatten server data for frontend compatibility
const transformedScripts = await Promise.all(scripts.map(async (script: any) => {
// Determine if it's a VM or LXC
const transformedScripts = scripts.map((script: any) => {
// Determine if it's a VM or LXC from batch detection map, fall back to LXCConfig check if not found
let is_vm = false;
if (script.container_id && script.server_id) {
is_vm = await isVM(script.id, script.container_id, script.server_id);
// First check if we have it in the batch detection map
if (containerTypeMap.has(script.container_id)) {
is_vm = containerTypeMap.get(script.container_id) ?? false;
} else {
// Fall back to checking LXCConfig in database (fast, no SSH needed)
// If LXCConfig exists, it's an LXC container
const hasLXCConfig = script.lxc_config !== null && script.lxc_config !== undefined;
is_vm = !hasLXCConfig; // If no LXCConfig, might be VM, but default to false for safety
}
}
return {
@@ -545,7 +702,7 @@ export const installedScriptsRouter = createTRPCRouter({
is_vm,
server: undefined // Remove nested server object
};
}));
});
return {
success: true,
@@ -2490,5 +2647,562 @@ EOFCONFIG`;
executionId: null
};
}
}),
// Get next free ID from cluster (single ID for sequential cloning)
getClusterNextId: publicProcedure
.input(z.object({
serverId: z.number()
}))
.query(async ({ input }) => {
try {
const db = getDatabase();
const server = await db.getServerById(input.serverId);
if (!server) {
return {
success: false,
error: 'Server not found',
nextId: null
};
}
const { getSSHExecutionService } = await import('~/server/ssh-execution-service');
const sshExecutionService = getSSHExecutionService();
let output = '';
await new Promise<void>((resolve, reject) => {
sshExecutionService.executeCommand(
server as Server,
'pvesh get /cluster/nextid',
(data: string) => {
output += data;
},
(error: string) => {
reject(new Error(`Failed to get next ID: ${error}`));
},
(exitCode: number) => {
if (exitCode === 0) {
resolve();
} else {
reject(new Error(`pvesh command failed with exit code ${exitCode}`));
}
}
);
});
const nextId = output.trim();
if (!nextId || !/^\d+$/.test(nextId)) {
return {
success: false,
error: 'Invalid next ID received',
nextId: null
};
}
return {
success: true,
nextId
};
} catch (error) {
console.error('Error in getClusterNextId:', error);
return {
success: false,
error: error instanceof Error ? error.message : 'Failed to get next ID',
nextId: null
};
}
}),
// Get container hostname/name
getContainerHostname: publicProcedure
.input(z.object({
containerId: z.string(),
serverId: z.number(),
containerType: z.enum(['lxc', 'vm'])
}))
.query(async ({ input }) => {
try {
const db = getDatabase();
const server = await db.getServerById(input.serverId);
if (!server) {
return {
success: false,
error: 'Server not found',
hostname: null
};
}
const { getSSHExecutionService } = await import('~/server/ssh-execution-service');
const sshExecutionService = getSSHExecutionService();
const configPath = input.containerType === 'lxc'
? `/etc/pve/lxc/${input.containerId}.conf`
: `/etc/pve/qemu-server/${input.containerId}.conf`;
let configContent = '';
await new Promise<void>((resolve) => {
sshExecutionService.executeCommand(
server as Server,
`cat "${configPath}" 2>/dev/null || echo ""`,
(data: string) => {
configContent += data;
},
() => resolve(), // Don't fail on error
() => resolve() // Always resolve
);
});
if (!configContent.trim()) {
return {
success: true,
hostname: null
};
}
// Parse config for hostname (LXC) or name (VM)
const lines = configContent.split('\n');
for (const line of lines) {
const trimmed = line.trim();
if (input.containerType === 'lxc' && trimmed.startsWith('hostname:')) {
const hostname = trimmed.substring(9).trim();
return {
success: true,
hostname
};
} else if (input.containerType === 'vm' && trimmed.startsWith('name:')) {
const name = trimmed.substring(5).trim();
return {
success: true,
hostname: name
};
}
}
return {
success: true,
hostname: null
};
} catch (error) {
console.error('Error in getContainerHostname:', error);
return {
success: false,
error: error instanceof Error ? error.message : 'Failed to get container hostname',
hostname: null
};
}
}),
// Get clone storages (rootdir or images content)
getCloneStorages: publicProcedure
.input(z.object({
serverId: z.number(),
forceRefresh: z.boolean().optional().default(false)
}))
.query(async ({ input }) => {
try {
const db = getDatabase();
const server = await db.getServerById(input.serverId);
if (!server) {
return {
success: false,
error: 'Server not found',
storages: [],
cached: false
};
}
const storageService = getStorageService();
const { default: SSHService } = await import('~/server/ssh-service');
const { getSSHExecutionService } = await import('~/server/ssh-execution-service');
const sshService = new SSHService();
const sshExecutionService = getSSHExecutionService();
// Test SSH connection first
const connectionTest = await sshService.testSSHConnection(server as Server);
if (!(connectionTest as any).success) {
return {
success: false,
error: `SSH connection failed: ${(connectionTest as any).error ?? 'Unknown error'}`,
storages: [],
cached: false
};
}
// Get server hostname to filter storages
let serverHostname = '';
try {
await new Promise<void>((resolve, reject) => {
sshExecutionService.executeCommand(
server as Server,
'hostname',
(data: string) => {
serverHostname += data;
},
(error: string) => {
reject(new Error(`Failed to get hostname: ${error}`));
},
(exitCode: number) => {
if (exitCode === 0) {
resolve();
} else {
reject(new Error(`hostname command failed with exit code ${exitCode}`));
}
}
);
});
} catch (error) {
console.error('Error getting server hostname:', error);
// Continue without filtering if hostname can't be retrieved
}
const normalizedHostname = serverHostname.trim().toLowerCase();
// Check if we have cached data
const wasCached = !input.forceRefresh;
// Fetch storages (will use cache if not forcing refresh)
const allStorages = await storageService.getStorages(server as Server, input.forceRefresh);
// Filter storages by node hostname matching and content type (only rootdir for cloning)
const applicableStorages = allStorages.filter(storage => {
// Check content type - must have rootdir for cloning
const hasRootdir = storage.content.includes('rootdir');
if (!hasRootdir) {
return false;
}
// If storage has no nodes specified, it's available on all nodes
if (!storage.nodes || storage.nodes.length === 0) {
return true;
}
// If we couldn't get hostname, include all storages (fallback)
if (!normalizedHostname) {
return true;
}
// Check if server hostname is in the nodes array (case-insensitive, trimmed)
const normalizedNodes = storage.nodes.map(node => node.trim().toLowerCase());
return normalizedNodes.includes(normalizedHostname);
});
return {
success: true,
storages: applicableStorages,
cached: wasCached && applicableStorages.length > 0
};
} catch (error) {
console.error('Error in getCloneStorages:', error);
return {
success: false,
error: error instanceof Error ? error.message : 'Failed to fetch storages',
storages: [],
cached: false
};
}
}),
// Generate clone hostnames
generateCloneHostnames: publicProcedure
.input(z.object({
originalHostname: z.string(),
containerType: z.enum(['lxc', 'vm']),
serverId: z.number(),
count: z.number().min(1).max(100)
}))
.query(async ({ input }) => {
try {
const db = getDatabase();
const server = await db.getServerById(input.serverId);
if (!server) {
return {
success: false,
error: 'Server not found',
hostnames: []
};
}
const { getSSHExecutionService } = await import('~/server/ssh-execution-service');
const sshExecutionService = getSSHExecutionService();
// Get all existing containers/VMs to find existing clones (check both LXC and VM)
const existingHostnames = new Set<string>();
// Check LXC containers
let lxcOutput = '';
try {
await new Promise<void>((resolve) => {
sshExecutionService.executeCommand(
server as Server,
'pct list',
(data: string) => {
lxcOutput += data;
},
(error: string) => {
console.error(`pct list error for server ${server.name}:`, error);
resolve();
},
() => resolve()
);
});
const lxcLines = lxcOutput.split('\n').filter(line => line.trim());
for (const line of lxcLines) {
if (line.includes('CTID') || line.includes('NAME')) continue;
const parts = line.trim().split(/\s+/);
if (parts.length >= 3) {
const name = parts.slice(2).join(' ').trim();
if (name) {
existingHostnames.add(name.toLowerCase());
}
}
}
} catch {
// Continue even if LXC list fails
}
// Check VMs
let vmOutput = '';
try {
await new Promise<void>((resolve) => {
sshExecutionService.executeCommand(
server as Server,
'qm list',
(data: string) => {
vmOutput += data;
},
(error: string) => {
console.error(`qm list error for server ${server.name}:`, error);
resolve();
},
() => resolve()
);
});
const vmLines = vmOutput.split('\n').filter(line => line.trim());
for (const line of vmLines) {
if (line.includes('VMID') || line.includes('NAME')) continue;
const parts = line.trim().split(/\s+/);
if (parts.length >= 3) {
const name = parts.slice(2).join(' ').trim();
if (name) {
existingHostnames.add(name.toLowerCase());
}
}
}
} catch {
// Continue even if VM list fails
}
// Find next available clone number
const clonePattern = new RegExp(`^${input.originalHostname.toLowerCase()}-clone-(\\d+)$`);
const existingCloneNumbers: number[] = [];
for (const hostname of existingHostnames) {
const match = hostname.match(clonePattern);
if (match) {
existingCloneNumbers.push(parseInt(match[1] ?? '0', 10));
}
}
// Determine starting number
let nextNumber = 1;
if (existingCloneNumbers.length > 0) {
existingCloneNumbers.sort((a, b) => a - b);
const lastNumber = existingCloneNumbers[existingCloneNumbers.length - 1];
if (lastNumber !== undefined) {
nextNumber = lastNumber + 1;
}
}
// Generate hostnames
const hostnames: string[] = [];
for (let i = 0; i < input.count; i++) {
hostnames.push(`${input.originalHostname}-clone-${nextNumber + i}`);
}
return {
success: true,
hostnames
};
} catch (error) {
console.error('Error in generateCloneHostnames:', error);
return {
success: false,
error: error instanceof Error ? error.message : 'Failed to generate clone hostnames',
hostnames: []
};
}
}),
// Execute clone (prepare for websocket execution)
// Note: nextIds will be obtained sequentially during cloning in server.js
executeClone: publicProcedure
.input(z.object({
containerId: z.string(),
serverId: z.number(),
storage: z.string(),
cloneCount: z.number().min(1).max(100),
hostnames: z.array(z.string()),
containerType: z.enum(['lxc', 'vm'])
}))
.mutation(async ({ input }) => {
try {
const db = getDatabase();
const server = await db.getServerById(input.serverId);
if (!server) {
return {
success: false,
error: 'Server not found',
executionId: null
};
}
const { default: SSHService } = await import('~/server/ssh-service');
const sshService = new SSHService();
// Test SSH connection first
const connectionTest = await sshService.testSSHConnection(server as Server);
if (!(connectionTest as any).success) {
return {
success: false,
error: `SSH connection failed: ${(connectionTest as any).error ?? 'Unknown error'}`,
executionId: null
};
}
// Validate inputs
if (input.hostnames.length !== input.cloneCount) {
return {
success: false,
error: 'Hostnames count must match clone count',
executionId: null
};
}
// Generate execution ID for websocket tracking
const executionId = `clone_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
return {
success: true,
executionId,
containerId: input.containerId,
storage: input.storage,
cloneCount: input.cloneCount,
hostnames: input.hostnames,
containerType: input.containerType,
server: server as Server
};
} catch (error) {
console.error('Error in executeClone:', error);
return {
success: false,
error: error instanceof Error ? error.message : 'Failed to execute clone',
executionId: null
};
}
}),
// Add cloned container to database
addClonedContainerToDatabase: publicProcedure
.input(z.object({
containerId: z.string(),
serverId: z.number(),
containerType: z.enum(['lxc', 'vm'])
}))
.mutation(async ({ input }) => {
try {
const db = getDatabase();
const server = await db.getServerById(input.serverId);
if (!server) {
return {
success: false,
error: 'Server not found',
scriptId: null
};
}
const { getSSHExecutionService } = await import('~/server/ssh-execution-service');
const sshExecutionService = getSSHExecutionService();
// Read config file to get hostname/name
const configPath = input.containerType === 'lxc'
? `/etc/pve/lxc/${input.containerId}.conf`
: `/etc/pve/qemu-server/${input.containerId}.conf`;
let configContent = '';
await new Promise<void>((resolve) => {
sshExecutionService.executeCommand(
server as Server,
`cat "${configPath}" 2>/dev/null || echo ""`,
(data: string) => {
configContent += data;
},
() => resolve(),
() => resolve()
);
});
if (!configContent.trim()) {
return {
success: false,
error: 'Config file not found',
scriptId: null
};
}
// Parse config for hostname/name
let hostname = '';
const lines = configContent.split('\n');
for (const line of lines) {
const trimmed = line.trim();
if (input.containerType === 'lxc' && trimmed.startsWith('hostname:')) {
hostname = trimmed.substring(9).trim();
break;
} else if (input.containerType === 'vm' && trimmed.startsWith('name:')) {
hostname = trimmed.substring(5).trim();
break;
}
}
if (!hostname) {
hostname = `${input.containerType}-${input.containerId}`;
}
// Create installed script record
const script = await db.createInstalledScript({
script_name: hostname,
script_path: `cloned/${hostname}`,
container_id: input.containerId,
server_id: input.serverId,
execution_mode: 'ssh',
status: 'success',
output_log: `Cloned container/VM`
});
// For LXC, store config in database
if (input.containerType === 'lxc') {
const parsedConfig = parseRawConfig(configContent);
await db.createLXCConfig(script.id, parsedConfig);
}
return {
success: true,
scriptId: script.id
};
} catch (error) {
console.error('Error in addClonedContainerToDatabase:', error);
return {
success: false,
error: error instanceof Error ? error.message : 'Failed to add cloned container to database',
scriptId: null
};
}
})
});

View File

@@ -281,7 +281,8 @@ class DatabaseServicePrisma {
async getAllInstalledScripts(): Promise<InstalledScriptWithServer[]> {
const result = await prisma.installedScript.findMany({
include: {
server: true
server: true,
lxc_config: true
},
orderBy: { installation_date: 'desc' }
});
@@ -302,7 +303,8 @@ class DatabaseServicePrisma {
const result = await prisma.installedScript.findMany({
where: { server_id },
include: {
server: true
server: true,
lxc_config: true
},
orderBy: { installation_date: 'desc' }
});

View File

@@ -1,4 +1,3 @@
/* eslint-disable @typescript-eslint/no-floating-promises, @typescript-eslint/prefer-nullish-coalescing, @typescript-eslint/no-unused-vars, @typescript-eslint/prefer-regexp-exec, @typescript-eslint/prefer-optional-chain */
import { getSSHExecutionService } from '../ssh-execution-service';
import { getStorageService } from './storageService';
import { getDatabase } from '../database-prisma';

View File

@@ -1,4 +1,3 @@
/* eslint-disable @typescript-eslint/prefer-nullish-coalescing */
import { writeFile, mkdir, readdir, readFile } from 'fs/promises';
import { join } from 'path';
import { env } from '../../env.js';

View File

@@ -1,4 +1,3 @@
/* eslint-disable @typescript-eslint/no-floating-promises, @typescript-eslint/prefer-optional-chain, @typescript-eslint/prefer-nullish-coalescing, @typescript-eslint/prefer-regexp-exec, @typescript-eslint/prefer-for-of */
import { getSSHExecutionService } from '../ssh-execution-service';
import type { Server } from '~/types/server';