From 0ed13fcf0f982f46fa1848ca1aa02c6f2f5dc84b Mon Sep 17 00:00:00 2001 From: Michel Roegl-Brunner Date: Thu, 4 Dec 2025 14:36:54 +0100 Subject: [PATCH 1/3] Fix linter errors: use Record type, remove unused function, fix floating promises and unsafe types --- scripts/core/alpine-tools.func | 507 ++++++++++++ scripts/core/cloud-init.func | 505 ++++++++++++ scripts/core/error-handler.func | 317 ++++++++ scripts/ct/debian.sh | 44 + scripts/install/debian-install.sh | 18 + server.js | 40 +- src/app/_components/ConfigurationModal.tsx | 899 +++++++++++++++++++++ src/app/_components/ExecutionModeModal.tsx | 103 ++- src/app/_components/ScriptDetailModal.tsx | 8 +- src/app/_components/Terminal.tsx | 9 +- src/app/page.tsx | 5 +- src/server/api/routers/scripts.ts | 192 +++++ src/server/ssh-execution-service.js | 40 +- 13 files changed, 2642 insertions(+), 45 deletions(-) create mode 100644 scripts/core/alpine-tools.func create mode 100644 scripts/core/cloud-init.func create mode 100644 scripts/core/error-handler.func create mode 100644 scripts/ct/debian.sh create mode 100644 scripts/install/debian-install.sh create mode 100644 src/app/_components/ConfigurationModal.tsx diff --git a/scripts/core/alpine-tools.func b/scripts/core/alpine-tools.func new file mode 100644 index 0000000..0acb6ab --- /dev/null +++ b/scripts/core/alpine-tools.func @@ -0,0 +1,507 @@ +#!/bin/ash +# shellcheck shell=ash + +# Expects existing msg_* functions and optional $STD from the framework. + +# ------------------------------ +# helpers +# ------------------------------ +lower() { printf '%s' "$1" | tr '[:upper:]' '[:lower:]'; } +has() { command -v "$1" >/dev/null 2>&1; } + +need_tool() { + # usage: need_tool curl jq unzip ... + # setup missing tools via apk + local missing=0 t + for t in "$@"; do + if ! has "$t"; then missing=1; fi + done + if [ "$missing" -eq 1 ]; then + msg_info "Installing tools: $*" + apk add --no-cache "$@" >/dev/null 2>&1 || { + msg_error "apk add failed for: $*" + return 1 + } + msg_ok "Tools ready: $*" + fi +} + +net_resolves() { + # better handling for missing getent on Alpine + # usage: net_resolves api.github.com + local host="$1" + ping -c1 -W1 "$host" >/dev/null 2>&1 || nslookup "$host" >/dev/null 2>&1 +} + +ensure_usr_local_bin_persist() { + local PROFILE_FILE="/etc/profile.d/10-localbin.sh" + if [ ! -f "$PROFILE_FILE" ]; then + echo 'case ":$PATH:" in *:/usr/local/bin:*) ;; *) export PATH="/usr/local/bin:$PATH";; esac' >"$PROFILE_FILE" + chmod +x "$PROFILE_FILE" + fi +} + +download_with_progress() { + # $1 url, $2 dest + local url="$1" out="$2" cl + need_tool curl pv || return 1 + cl=$(curl -fsSLI "$url" 2>/dev/null | awk 'tolower($0) ~ /^content-length:/ {print $2}' | tr -d '\r') + if [ -n "$cl" ]; then + curl -fsSL "$url" | pv -s "$cl" >"$out" || { + msg_error "Download failed: $url" + return 1 + } + else + curl -fL# -o "$out" "$url" || { + msg_error "Download failed: $url" + return 1 + } + fi +} + +# ------------------------------ +# GitHub: check Release +# ------------------------------ +check_for_gh_release() { + # app, repo, [pinned] + local app="$1" source="$2" pinned="${3:-}" + local app_lc + app_lc="$(lower "$app" | tr -d ' ')" + local current_file="$HOME/.${app_lc}" + local current="" release tag + + msg_info "Check for update: $app" + + net_resolves api.github.com || { + msg_error "DNS/network error: api.github.com" + return 1 + } + need_tool curl jq || return 1 + + tag=$(curl -fsSL "https://api.github.com/repos/${source}/releases/latest" | jq -r '.tag_name // empty') + [ -z "$tag" ] && { + msg_error "Unable to fetch latest tag for $app" + return 1 + } + release="${tag#v}" + + [ -f "$current_file" ] && current="$(cat "$current_file")" + + if [ -n "$pinned" ]; then + if [ "$pinned" = "$release" ]; then + msg_ok "$app pinned to v$pinned (no update)" + return 1 + fi + if [ "$current" = "$pinned" ]; then + msg_ok "$app pinned v$pinned installed (upstream v$release)" + return 1 + fi + msg_info "$app pinned v$pinned (upstream v$release) → update/downgrade" + CHECK_UPDATE_RELEASE="$pinned" + return 0 + fi + + if [ "$release" != "$current" ] || [ ! -f "$current_file" ]; then + CHECK_UPDATE_RELEASE="$release" + msg_info "New release available: v$release (current: v${current:-none})" + return 0 + fi + + msg_ok "$app is up to date (v$release)" + return 1 +} + +# ------------------------------ +# GitHub: get Release & deploy (Alpine) +# modes: tarball | prebuild | singlefile +# ------------------------------ +fetch_and_deploy_gh() { + # $1 app, $2 repo, [$3 mode], [$4 version], [$5 target], [$6 asset_pattern + local app="$1" repo="$2" mode="${3:-tarball}" version="${4:-latest}" target="${5:-/opt/$1}" pattern="${6:-}" + local app_lc + app_lc="$(lower "$app" | tr -d ' ')" + local vfile="$HOME/.${app_lc}" + local json url filename tmpd unpack + + net_resolves api.github.com || { + msg_error "DNS/network error" + return 1 + } + need_tool curl jq tar || return 1 + [ "$mode" = "prebuild" ] || [ "$mode" = "singlefile" ] && need_tool unzip >/dev/null 2>&1 || true + + tmpd="$(mktemp -d)" || return 1 + mkdir -p "$target" + + # Release JSON + if [ "$version" = "latest" ]; then + json="$(curl -fsSL "https://api.github.com/repos/$repo/releases/latest")" || { + msg_error "GitHub API failed" + rm -rf "$tmpd" + return 1 + } + else + json="$(curl -fsSL "https://api.github.com/repos/$repo/releases/tags/$version")" || { + msg_error "GitHub API failed" + rm -rf "$tmpd" + return 1 + } + fi + + # correct Version + version="$(printf '%s' "$json" | jq -r '.tag_name // empty')" + version="${version#v}" + + [ -z "$version" ] && { + msg_error "No tag in release json" + rm -rf "$tmpd" + return 1 + } + + case "$mode" in + tarball | source) + url="$(printf '%s' "$json" | jq -r '.tarball_url // empty')" + [ -z "$url" ] && url="https://github.com/$repo/archive/refs/tags/v$version.tar.gz" + filename="${app_lc}-${version}.tar.gz" + download_with_progress "$url" "$tmpd/$filename" || { + rm -rf "$tmpd" + return 1 + } + tar -xzf "$tmpd/$filename" -C "$tmpd" || { + msg_error "tar extract failed" + rm -rf "$tmpd" + return 1 + } + unpack="$(find "$tmpd" -mindepth 1 -maxdepth 1 -type d | head -n1)" + # copy content of unpack to target + (cd "$unpack" && tar -cf - .) | (cd "$target" && tar -xf -) || { + msg_error "copy failed" + rm -rf "$tmpd" + return 1 + } + ;; + prebuild) + [ -n "$pattern" ] || { + msg_error "prebuild requires asset pattern" + rm -rf "$tmpd" + return 1 + } + url="$(printf '%s' "$json" | jq -r '.assets[].browser_download_url' | awk -v p="$pattern" ' + BEGIN{IGNORECASE=1} + $0 ~ p {print; exit} + ')" + [ -z "$url" ] && { + msg_error "asset not found for pattern: $pattern" + rm -rf "$tmpd" + return 1 + } + filename="${url##*/}" + download_with_progress "$url" "$tmpd/$filename" || { + rm -rf "$tmpd" + return 1 + } + # unpack archive (Zip or tarball) + case "$filename" in + *.zip) + need_tool unzip || { + rm -rf "$tmpd" + return 1 + } + mkdir -p "$tmpd/unp" + unzip -q "$tmpd/$filename" -d "$tmpd/unp" + ;; + *.tar.gz | *.tgz | *.tar.xz | *.tar.zst | *.tar.bz2) + mkdir -p "$tmpd/unp" + tar -xf "$tmpd/$filename" -C "$tmpd/unp" + ;; + *) + msg_error "unsupported archive: $filename" + rm -rf "$tmpd" + return 1 + ;; + esac + # top-level folder strippen + if [ "$(find "$tmpd/unp" -mindepth 1 -maxdepth 1 -type d | wc -l)" -eq 1 ] && [ -z "$(find "$tmpd/unp" -mindepth 1 -maxdepth 1 -type f | head -n1)" ]; then + unpack="$(find "$tmpd/unp" -mindepth 1 -maxdepth 1 -type d)" + (cd "$unpack" && tar -cf - .) | (cd "$target" && tar -xf -) || { + msg_error "copy failed" + rm -rf "$tmpd" + return 1 + } + else + (cd "$tmpd/unp" && tar -cf - .) | (cd "$target" && tar -xf -) || { + msg_error "copy failed" + rm -rf "$tmpd" + return 1 + } + fi + ;; + singlefile) + [ -n "$pattern" ] || { + msg_error "singlefile requires asset pattern" + rm -rf "$tmpd" + return 1 + } + url="$(printf '%s' "$json" | jq -r '.assets[].browser_download_url' | awk -v p="$pattern" ' + BEGIN{IGNORECASE=1} + $0 ~ p {print; exit} + ')" + [ -z "$url" ] && { + msg_error "asset not found for pattern: $pattern" + rm -rf "$tmpd" + return 1 + } + filename="${url##*/}" + download_with_progress "$url" "$target/$app" || { + rm -rf "$tmpd" + return 1 + } + chmod +x "$target/$app" + ;; + *) + msg_error "Unknown mode: $mode" + rm -rf "$tmpd" + return 1 + ;; + esac + + echo "$version" >"$vfile" + ensure_usr_local_bin_persist + rm -rf "$tmpd" + msg_ok "Deployed $app ($version) → $target" +} + +# ------------------------------ +# yq (mikefarah) – Alpine +# ------------------------------ +setup_yq() { + # prefer apk, unless FORCE_GH=1 + if [ "${FORCE_GH:-0}" != "1" ] && apk info -e yq >/dev/null 2>&1; then + msg_info "Updating yq via apk" + apk add --no-cache --upgrade yq >/dev/null 2>&1 || true + msg_ok "yq ready ($(yq --version 2>/dev/null))" + return 0 + fi + + need_tool curl || return 1 + local arch bin url tmp + case "$(uname -m)" in + x86_64) arch="amd64" ;; + aarch64) arch="arm64" ;; + *) + msg_error "Unsupported arch for yq: $(uname -m)" + return 1 + ;; + esac + url="https://github.com/mikefarah/yq/releases/latest/download/yq_linux_${arch}" + tmp="$(mktemp)" + download_with_progress "$url" "$tmp" || return 1 + install -m 0755 "$tmp" /usr/local/bin/yq + rm -f "$tmp" + msg_ok "Setup yq ($(yq --version 2>/dev/null))" +} + +# ------------------------------ +# Adminer – Alpine +# ------------------------------ +setup_adminer() { + need_tool curl || return 1 + msg_info "Setup Adminer (Alpine)" + mkdir -p /var/www/localhost/htdocs/adminer + curl -fsSL https://github.com/vrana/adminer/releases/latest/download/adminer.php \ + -o /var/www/localhost/htdocs/adminer/index.php || { + msg_error "Adminer download failed" + return 1 + } + msg_ok "Adminer at /adminer (served by your webserver)" +} + +# ------------------------------ +# uv – Alpine (musl tarball) +# optional: PYTHON_VERSION="3.12" +# ------------------------------ +setup_uv() { + need_tool curl tar || return 1 + local UV_BIN="/usr/local/bin/uv" + local arch tarball url tmpd ver installed + + case "$(uname -m)" in + x86_64) arch="x86_64-unknown-linux-musl" ;; + aarch64) arch="aarch64-unknown-linux-musl" ;; + *) + msg_error "Unsupported arch for uv: $(uname -m)" + return 1 + ;; + esac + + ver="$(curl -fsSL https://api.github.com/repos/astral-sh/uv/releases/latest | jq -r '.tag_name' 2>/dev/null)" + ver="${ver#v}" + [ -z "$ver" ] && { + msg_error "uv: cannot determine latest version" + return 1 + } + + if has "$UV_BIN"; then + installed="$($UV_BIN -V 2>/dev/null | awk '{print $2}')" + [ "$installed" = "$ver" ] && { + msg_ok "uv $ver already installed" + return 0 + } + msg_info "Updating uv $installed → $ver" + else + msg_info "Setup uv $ver" + fi + + tmpd="$(mktemp -d)" || return 1 + tarball="uv-${arch}.tar.gz" + url="https://github.com/astral-sh/uv/releases/download/v${ver}/${tarball}" + + download_with_progress "$url" "$tmpd/uv.tar.gz" || { + rm -rf "$tmpd" + return 1 + } + tar -xzf "$tmpd/uv.tar.gz" -C "$tmpd" || { + msg_error "uv: extract failed" + rm -rf "$tmpd" + return 1 + } + + # tar contains ./uv + if [ -x "$tmpd/uv" ]; then + install -m 0755 "$tmpd/uv" "$UV_BIN" + else + # fallback: in subfolder + install -m 0755 "$tmpd"/*/uv "$UV_BIN" 2>/dev/null || { + msg_error "uv binary not found in tar" + rm -rf "$tmpd" + return 1 + } + fi + rm -rf "$tmpd" + ensure_usr_local_bin_persist + msg_ok "Setup uv $ver" + + if [ -n "${PYTHON_VERSION:-}" ]; then + local match + match="$(uv python list --only-downloads 2>/dev/null | awk -v maj="$PYTHON_VERSION" ' + $0 ~ "^cpython-"maj"\\." { print $0 }' | awk -F- '{print $2}' | sort -V | tail -n1)" + [ -z "$match" ] && { + msg_error "No matching Python for $PYTHON_VERSION" + return 1 + } + if ! uv python list | grep -q "cpython-${match}-linux"; then + msg_info "Installing Python $match via uv" + uv python install "$match" || { + msg_error "uv python install failed" + return 1 + } + msg_ok "Python $match installed (uv)" + fi + fi +} + +# ------------------------------ +# Java – Alpine (OpenJDK) +# JAVA_VERSION: 17|21 (Default 21) +# ------------------------------ +setup_java() { + local JAVA_VERSION="${JAVA_VERSION:-21}" pkg + case "$JAVA_VERSION" in + 17) pkg="openjdk17-jdk" ;; + 21 | *) pkg="openjdk21-jdk" ;; + esac + msg_info "Setup Java (OpenJDK $JAVA_VERSION)" + apk add --no-cache "$pkg" >/dev/null 2>&1 || { + msg_error "apk add $pkg failed" + return 1 + } + # set JAVA_HOME + local prof="/etc/profile.d/20-java.sh" + if [ ! -f "$prof" ]; then + echo 'export JAVA_HOME=$(dirname $(dirname $(readlink -f $(command -v java))))' >"$prof" + echo 'case ":$PATH:" in *:$JAVA_HOME/bin:*) ;; *) export PATH="$JAVA_HOME/bin:$PATH";; esac' >>"$prof" + chmod +x "$prof" + fi + msg_ok "Java ready: $(java -version 2>&1 | head -n1)" +} + +# ------------------------------ +# Go – Alpine (apk prefers, else tarball) +# ------------------------------ +setup_go() { + if [ -z "${GO_VERSION:-}" ]; then + msg_info "Setup Go (apk)" + apk add --no-cache go >/dev/null 2>&1 || { + msg_error "apk add go failed" + return 1 + } + msg_ok "Go ready: $(go version 2>/dev/null)" + return 0 + fi + + need_tool curl tar || return 1 + local ARCH TARBALL URL TMP + case "$(uname -m)" in + x86_64) ARCH="amd64" ;; + aarch64) ARCH="arm64" ;; + *) + msg_error "Unsupported arch for Go: $(uname -m)" + return 1 + ;; + esac + TARBALL="go${GO_VERSION}.linux-${ARCH}.tar.gz" + URL="https://go.dev/dl/${TARBALL}" + msg_info "Setup Go $GO_VERSION (tarball)" + TMP="$(mktemp)" + download_with_progress "$URL" "$TMP" || return 1 + rm -rf /usr/local/go + tar -C /usr/local -xzf "$TMP" || { + msg_error "extract go failed" + rm -f "$TMP" + return 1 + } + rm -f "$TMP" + ln -sf /usr/local/go/bin/go /usr/local/bin/go + ln -sf /usr/local/go/bin/gofmt /usr/local/bin/gofmt + ensure_usr_local_bin_persist + msg_ok "Go ready: $(go version 2>/dev/null)" +} + +# ------------------------------ +# Composer – Alpine +# uses php83-cli + openssl + phar +# ------------------------------ +setup_composer() { + local COMPOSER_BIN="/usr/local/bin/composer" + if ! has php; then + # prefers php83 + msg_info "Installing PHP CLI for Composer" + apk add --no-cache php83-cli php83-openssl php83-phar php83-iconv >/dev/null 2>&1 || { + # Fallback to generic php if 83 not available + apk add --no-cache php-cli php-openssl php-phar php-iconv >/dev/null 2>&1 || { + msg_error "Failed to install php-cli for composer" + return 1 + } + } + msg_ok "PHP CLI ready: $(php -v | head -n1)" + fi + + if [ -x "$COMPOSER_BIN" ]; then + msg_info "Updating Composer" + else + msg_info "Setup Composer" + fi + + need_tool curl || return 1 + curl -fsSL https://getcomposer.org/installer -o /tmp/composer-setup.php || { + msg_error "composer installer download failed" + return 1 + } + php /tmp/composer-setup.php --install-dir=/usr/local/bin --filename=composer >/dev/null 2>&1 || { + msg_error "composer install failed" + return 1 + } + rm -f /tmp/composer-setup.php + ensure_usr_local_bin_persist + msg_ok "Composer ready: $(composer --version 2>/dev/null)" +} \ No newline at end of file diff --git a/scripts/core/cloud-init.func b/scripts/core/cloud-init.func new file mode 100644 index 0000000..ea95d9a --- /dev/null +++ b/scripts/core/cloud-init.func @@ -0,0 +1,505 @@ +#!/usr/bin/env bash +# Copyright (c) 2021-2025 community-scripts ORG +# Author: community-scripts ORG +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/branch/main/LICENSE +# Revision: 1 + +# ============================================================================== +# CLOUD-INIT.FUNC - VM CLOUD-INIT CONFIGURATION LIBRARY +# ============================================================================== +# +# Universal helper library for Cloud-Init configuration in Proxmox VMs. +# Provides functions for: +# +# - Native Proxmox Cloud-Init setup (user, password, network, SSH keys) +# - Interactive configuration dialogs (whiptail) +# - IP address retrieval via qemu-guest-agent +# - Cloud-Init status monitoring and waiting +# +# Usage: +# source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/cloud-init.func) +# setup_cloud_init "$VMID" "$STORAGE" "$HN" "yes" +# +# Compatible with: Debian, Ubuntu, and all Cloud-Init enabled distributions +# ============================================================================== + +# ============================================================================== +# SECTION 1: CONFIGURATION DEFAULTS +# ============================================================================== +# These can be overridden before sourcing this library + +CLOUDINIT_DEFAULT_USER="${CLOUDINIT_DEFAULT_USER:-root}" +CLOUDINIT_DNS_SERVERS="${CLOUDINIT_DNS_SERVERS:-1.1.1.1 8.8.8.8}" +CLOUDINIT_SEARCH_DOMAIN="${CLOUDINIT_SEARCH_DOMAIN:-local}" +CLOUDINIT_SSH_KEYS="${CLOUDINIT_SSH_KEYS:-/root/.ssh/authorized_keys}" + +# ============================================================================== +# SECTION 2: HELPER FUNCTIONS +# ============================================================================== + +# ------------------------------------------------------------------------------ +# _ci_msg - Internal message helper with fallback +# ------------------------------------------------------------------------------ +function _ci_msg_info() { msg_info "$1" 2>/dev/null || echo "[INFO] $1"; } +function _ci_msg_ok() { msg_ok "$1" 2>/dev/null || echo "[OK] $1"; } +function _ci_msg_warn() { msg_warn "$1" 2>/dev/null || echo "[WARN] $1"; } +function _ci_msg_error() { msg_error "$1" 2>/dev/null || echo "[ERROR] $1"; } + +# ------------------------------------------------------------------------------ +# validate_ip_cidr - Validate IP address in CIDR format +# Usage: validate_ip_cidr "192.168.1.100/24" && echo "Valid" +# Returns: 0 if valid, 1 if invalid +# ------------------------------------------------------------------------------ +function validate_ip_cidr() { + local ip_cidr="$1" + # Match: 0-255.0-255.0-255.0-255/0-32 + if [[ "$ip_cidr" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}/([0-9]|[1-2][0-9]|3[0-2])$ ]]; then + # Validate each octet is 0-255 + local ip="${ip_cidr%/*}" + IFS='.' read -ra octets <<<"$ip" + for octet in "${octets[@]}"; do + ((octet > 255)) && return 1 + done + return 0 + fi + return 1 +} + +# ------------------------------------------------------------------------------ +# validate_ip - Validate plain IP address (no CIDR) +# Usage: validate_ip "192.168.1.1" && echo "Valid" +# ------------------------------------------------------------------------------ +function validate_ip() { + local ip="$1" + if [[ "$ip" =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then + IFS='.' read -ra octets <<<"$ip" + for octet in "${octets[@]}"; do + ((octet > 255)) && return 1 + done + return 0 + fi + return 1 +} + +# ============================================================================== +# SECTION 3: MAIN CLOUD-INIT FUNCTIONS +# ============================================================================== + +# ------------------------------------------------------------------------------ +# setup_cloud_init - Configures Proxmox Native Cloud-Init +# ------------------------------------------------------------------------------ +# Parameters: +# $1 - VMID (required) +# $2 - Storage name (required) +# $3 - Hostname (optional, default: vm-) +# $4 - Enable Cloud-Init (yes/no, default: no) +# $5 - User (optional, default: root) +# $6 - Network mode (dhcp/static, default: dhcp) +# $7 - Static IP (optional, format: 192.168.1.100/24) +# $8 - Gateway (optional) +# $9 - Nameservers (optional, default: 1.1.1.1 8.8.8.8) +# +# Returns: 0 on success, 1 on failure +# Exports: CLOUDINIT_USER, CLOUDINIT_PASSWORD, CLOUDINIT_CRED_FILE +# ============================================================================== +function setup_cloud_init() { + local vmid="$1" + local storage="$2" + local hostname="${3:-vm-${vmid}}" + local enable="${4:-no}" + local ciuser="${5:-$CLOUDINIT_DEFAULT_USER}" + local network_mode="${6:-dhcp}" + local static_ip="${7:-}" + local gateway="${8:-}" + local nameservers="${9:-$CLOUDINIT_DNS_SERVERS}" + + # Skip if not enabled + if [ "$enable" != "yes" ]; then + return 0 + fi + + # Validate static IP if provided + if [ "$network_mode" = "static" ]; then + if [ -n "$static_ip" ] && ! validate_ip_cidr "$static_ip"; then + _ci_msg_error "Invalid static IP format: $static_ip (expected: x.x.x.x/xx)" + return 1 + fi + if [ -n "$gateway" ] && ! validate_ip "$gateway"; then + _ci_msg_error "Invalid gateway IP format: $gateway" + return 1 + fi + fi + + _ci_msg_info "Configuring Cloud-Init" + + # Create Cloud-Init drive (try ide2 first, then scsi1 as fallback) + if ! qm set "$vmid" --ide2 "${storage}:cloudinit" >/dev/null 2>&1; then + qm set "$vmid" --scsi1 "${storage}:cloudinit" >/dev/null 2>&1 + fi + + # Set user + qm set "$vmid" --ciuser "$ciuser" >/dev/null + + # Generate and set secure random password + local cipassword=$(openssl rand -base64 16) + qm set "$vmid" --cipassword "$cipassword" >/dev/null + + # Add SSH keys if available + if [ -f "$CLOUDINIT_SSH_KEYS" ]; then + qm set "$vmid" --sshkeys "$CLOUDINIT_SSH_KEYS" >/dev/null 2>&1 || true + fi + + # Configure network + if [ "$network_mode" = "static" ] && [ -n "$static_ip" ] && [ -n "$gateway" ]; then + qm set "$vmid" --ipconfig0 "ip=${static_ip},gw=${gateway}" >/dev/null + else + qm set "$vmid" --ipconfig0 "ip=dhcp" >/dev/null + fi + + # Set DNS servers + qm set "$vmid" --nameserver "$nameservers" >/dev/null + + # Set search domain + qm set "$vmid" --searchdomain "$CLOUDINIT_SEARCH_DOMAIN" >/dev/null + + # Enable package upgrades on first boot (if supported by Proxmox version) + qm set "$vmid" --ciupgrade 1 >/dev/null 2>&1 || true + + # Save credentials to file (with restrictive permissions) + local cred_file="/tmp/${hostname}-${vmid}-cloud-init-credentials.txt" + umask 077 + cat >"$cred_file" < + +Proxmox UI Configuration: + VM ${vmid} > Cloud-Init > Edit + - User, Password, SSH Keys + - Network (IP Config) + - DNS, Search Domain + +──────────────────────────────────────── +🗑️ To delete this file: + rm -f ${cred_file} +──────────────────────────────────────── +EOF + chmod 600 "$cred_file" + + _ci_msg_ok "Cloud-Init configured (User: ${ciuser})" + + # Export for use in calling script (DO NOT display password here - will be shown in summary) + export CLOUDINIT_USER="$ciuser" + export CLOUDINIT_PASSWORD="$cipassword" + export CLOUDINIT_CRED_FILE="$cred_file" + + return 0 +} + +# ============================================================================== +# SECTION 4: INTERACTIVE CONFIGURATION +# ============================================================================== + +# ------------------------------------------------------------------------------ +# configure_cloud_init_interactive - Whiptail dialog for Cloud-Init setup +# ------------------------------------------------------------------------------ +# Prompts user for Cloud-Init configuration choices +# Returns configuration via exported variables: +# - CLOUDINIT_ENABLE (yes/no) +# - CLOUDINIT_USER +# - CLOUDINIT_NETWORK_MODE (dhcp/static) +# - CLOUDINIT_IP (if static) +# - CLOUDINIT_GW (if static) +# - CLOUDINIT_DNS +# ------------------------------------------------------------------------------ +function configure_cloud_init_interactive() { + local default_user="${1:-root}" + + # Check if whiptail is available + if ! command -v whiptail >/dev/null 2>&1; then + echo "Warning: whiptail not available, skipping interactive configuration" + export CLOUDINIT_ENABLE="no" + return 1 + fi + + # Ask if user wants to enable Cloud-Init + if ! (whiptail --backtitle "Proxmox VE Helper Scripts" --title "CLOUD-INIT" \ + --yesno "Enable Cloud-Init for VM configuration?\n\nCloud-Init allows automatic configuration of:\n• User accounts and passwords\n• SSH keys\n• Network settings (DHCP/Static)\n• DNS configuration\n\nYou can also configure these settings later in Proxmox UI." 16 68); then + export CLOUDINIT_ENABLE="no" + return 0 + fi + + export CLOUDINIT_ENABLE="yes" + + # Username + if CLOUDINIT_USER=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox \ + "Cloud-Init Username" 8 58 "$default_user" --title "USERNAME" 3>&1 1>&2 2>&3); then + export CLOUDINIT_USER="${CLOUDINIT_USER:-$default_user}" + else + export CLOUDINIT_USER="$default_user" + fi + + # Network configuration + if (whiptail --backtitle "Proxmox VE Helper Scripts" --title "NETWORK MODE" \ + --yesno "Use DHCP for network configuration?\n\nSelect 'No' for static IP configuration." 10 58); then + export CLOUDINIT_NETWORK_MODE="dhcp" + else + export CLOUDINIT_NETWORK_MODE="static" + + # Static IP with validation + while true; do + if CLOUDINIT_IP=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox \ + "Static IP Address (CIDR format)\nExample: 192.168.1.100/24" 9 58 "" --title "IP ADDRESS" 3>&1 1>&2 2>&3); then + if validate_ip_cidr "$CLOUDINIT_IP"; then + export CLOUDINIT_IP + break + else + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID IP" \ + --msgbox "Invalid IP format: $CLOUDINIT_IP\n\nPlease use CIDR format: x.x.x.x/xx\nExample: 192.168.1.100/24" 10 50 + fi + else + _ci_msg_warn "Static IP required, falling back to DHCP" + export CLOUDINIT_NETWORK_MODE="dhcp" + break + fi + done + + # Gateway with validation + if [ "$CLOUDINIT_NETWORK_MODE" = "static" ]; then + while true; do + if CLOUDINIT_GW=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox \ + "Gateway IP Address\nExample: 192.168.1.1" 8 58 "" --title "GATEWAY" 3>&1 1>&2 2>&3); then + if validate_ip "$CLOUDINIT_GW"; then + export CLOUDINIT_GW + break + else + whiptail --backtitle "Proxmox VE Helper Scripts" --title "INVALID GATEWAY" \ + --msgbox "Invalid gateway format: $CLOUDINIT_GW\n\nPlease use format: x.x.x.x\nExample: 192.168.1.1" 10 50 + fi + else + _ci_msg_warn "Gateway required, falling back to DHCP" + export CLOUDINIT_NETWORK_MODE="dhcp" + break + fi + done + fi + fi + + # DNS Servers + if CLOUDINIT_DNS=$(whiptail --backtitle "Proxmox VE Helper Scripts" --inputbox \ + "DNS Servers (space-separated)" 8 58 "1.1.1.1 8.8.8.8" --title "DNS SERVERS" 3>&1 1>&2 2>&3); then + export CLOUDINIT_DNS="${CLOUDINIT_DNS:-1.1.1.1 8.8.8.8}" + else + export CLOUDINIT_DNS="1.1.1.1 8.8.8.8" + fi + + return 0 +} + +# ============================================================================== +# SECTION 5: UTILITY FUNCTIONS +# ============================================================================== + +# ------------------------------------------------------------------------------ +# display_cloud_init_info - Show Cloud-Init summary after setup +# ------------------------------------------------------------------------------ +function display_cloud_init_info() { + local vmid="$1" + local hostname="${2:-}" + + if [ -n "$CLOUDINIT_CRED_FILE" ] && [ -f "$CLOUDINIT_CRED_FILE" ]; then + if [ -n "${INFO:-}" ]; then + echo -e "\n${INFO}${BOLD:-}${GN:-} Cloud-Init Configuration:${CL:-}" + echo -e "${TAB:- }${DGN:-}User: ${BGN:-}${CLOUDINIT_USER:-root}${CL:-}" + echo -e "${TAB:- }${DGN:-}Password: ${BGN:-}${CLOUDINIT_PASSWORD}${CL:-}" + echo -e "${TAB:- }${DGN:-}Credentials: ${BL:-}${CLOUDINIT_CRED_FILE}${CL:-}" + echo -e "${TAB:- }${RD:-}⚠️ Delete credentials file after noting password!${CL:-}" + echo -e "${TAB:- }${YW:-}💡 Configure in Proxmox UI: VM ${vmid} > Cloud-Init${CL:-}" + else + echo "" + echo "[INFO] Cloud-Init Configuration:" + echo " User: ${CLOUDINIT_USER:-root}" + echo " Password: ${CLOUDINIT_PASSWORD}" + echo " Credentials: ${CLOUDINIT_CRED_FILE}" + echo " ⚠️ Delete credentials file after noting password!" + echo " Configure in Proxmox UI: VM ${vmid} > Cloud-Init" + fi + fi +} + +# ------------------------------------------------------------------------------ +# cleanup_cloud_init_credentials - Remove credentials file +# ------------------------------------------------------------------------------ +# Usage: cleanup_cloud_init_credentials +# Call this after user has noted/saved the credentials +# ------------------------------------------------------------------------------ +function cleanup_cloud_init_credentials() { + if [ -n "$CLOUDINIT_CRED_FILE" ] && [ -f "$CLOUDINIT_CRED_FILE" ]; then + rm -f "$CLOUDINIT_CRED_FILE" + _ci_msg_ok "Credentials file removed: $CLOUDINIT_CRED_FILE" + unset CLOUDINIT_CRED_FILE + return 0 + fi + return 1 +} + +# ------------------------------------------------------------------------------ +# has_cloud_init - Check if VM has Cloud-Init configured +# ------------------------------------------------------------------------------ +function has_cloud_init() { + local vmid="$1" + qm config "$vmid" 2>/dev/null | grep -qE "(ide2|scsi1):.*cloudinit" +} + +# ------------------------------------------------------------------------------ +# regenerate_cloud_init - Regenerate Cloud-Init configuration +# ------------------------------------------------------------------------------ +function regenerate_cloud_init() { + local vmid="$1" + + if has_cloud_init "$vmid"; then + _ci_msg_info "Regenerating Cloud-Init configuration" + qm cloudinit update "$vmid" >/dev/null 2>&1 || true + _ci_msg_ok "Cloud-Init configuration regenerated" + return 0 + else + _ci_msg_warn "VM $vmid does not have Cloud-Init configured" + return 1 + fi +} + +# ------------------------------------------------------------------------------ +# get_vm_ip - Get VM IP address via qemu-guest-agent +# ------------------------------------------------------------------------------ +function get_vm_ip() { + local vmid="$1" + local timeout="${2:-30}" + + local elapsed=0 + while [ $elapsed -lt $timeout ]; do + local vm_ip=$(qm guest cmd "$vmid" network-get-interfaces 2>/dev/null | + jq -r '.[] | select(.name != "lo") | ."ip-addresses"[]? | select(."ip-address-type" == "ipv4") | ."ip-address"' 2>/dev/null | head -1) + + if [ -n "$vm_ip" ]; then + echo "$vm_ip" + return 0 + fi + + sleep 2 + elapsed=$((elapsed + 2)) + done + + return 1 +} + +# ------------------------------------------------------------------------------ +# wait_for_cloud_init - Wait for Cloud-Init to complete (requires SSH access) +# ------------------------------------------------------------------------------ +function wait_for_cloud_init() { + local vmid="$1" + local timeout="${2:-300}" + local vm_ip="${3:-}" + + # Get IP if not provided + if [ -z "$vm_ip" ]; then + vm_ip=$(get_vm_ip "$vmid" 60) + fi + + if [ -z "$vm_ip" ]; then + _ci_msg_warn "Unable to determine VM IP address" + return 1 + fi + + _ci_msg_info "Waiting for Cloud-Init to complete on ${vm_ip}" + + local elapsed=0 + while [ $elapsed -lt $timeout ]; do + if timeout 10 ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \ + "${CLOUDINIT_USER:-root}@${vm_ip}" "cloud-init status --wait" 2>/dev/null; then + _ci_msg_ok "Cloud-Init completed successfully" + return 0 + fi + sleep 10 + elapsed=$((elapsed + 10)) + done + + _ci_msg_warn "Cloud-Init did not complete within ${timeout}s" + return 1 +} + +# ============================================================================== +# SECTION 6: EXPORTS +# ============================================================================== +# Export all functions for use in other scripts + +export -f setup_cloud_init 2>/dev/null || true +export -f configure_cloud_init_interactive 2>/dev/null || true +export -f display_cloud_init_info 2>/dev/null || true +export -f cleanup_cloud_init_credentials 2>/dev/null || true +export -f has_cloud_init 2>/dev/null || true +export -f regenerate_cloud_init 2>/dev/null || true +export -f get_vm_ip 2>/dev/null || true +export -f wait_for_cloud_init 2>/dev/null || true +export -f validate_ip_cidr 2>/dev/null || true +export -f validate_ip 2>/dev/null || true + +# ============================================================================== +# SECTION 7: EXAMPLES & DOCUMENTATION +# ============================================================================== +: <<'EXAMPLES' + +# Example 1: Simple DHCP setup (most common) +setup_cloud_init "$VMID" "$STORAGE" "$HN" "yes" + +# Example 2: Static IP setup +setup_cloud_init "$VMID" "$STORAGE" "myserver" "yes" "root" "static" "192.168.1.100/24" "192.168.1.1" + +# Example 3: Interactive configuration in advanced_settings() +configure_cloud_init_interactive "admin" +if [ "$CLOUDINIT_ENABLE" = "yes" ]; then + setup_cloud_init "$VMID" "$STORAGE" "$HN" "yes" "$CLOUDINIT_USER" \ + "$CLOUDINIT_NETWORK_MODE" "$CLOUDINIT_IP" "$CLOUDINIT_GW" "$CLOUDINIT_DNS" +fi + +# Example 4: Display info after VM creation +display_cloud_init_info "$VMID" "$HN" + +# Example 5: Check if VM has Cloud-Init +if has_cloud_init "$VMID"; then + echo "Cloud-Init is configured" +fi + +# Example 6: Wait for Cloud-Init to complete after VM start +if [ "$START_VM" = "yes" ]; then + qm start "$VMID" + sleep 30 + wait_for_cloud_init "$VMID" 300 +fi + +# Example 7: Cleanup credentials file after user has noted password +display_cloud_init_info "$VMID" "$HN" +read -p "Have you saved the credentials? (y/N): " -r +[[ $REPLY =~ ^[Yy]$ ]] && cleanup_cloud_init_credentials + +# Example 8: Validate IP before using +if validate_ip_cidr "192.168.1.100/24"; then + echo "Valid IP/CIDR" +fi + +EXAMPLES \ No newline at end of file diff --git a/scripts/core/error-handler.func b/scripts/core/error-handler.func new file mode 100644 index 0000000..9ad4f6d --- /dev/null +++ b/scripts/core/error-handler.func @@ -0,0 +1,317 @@ +#!/usr/bin/env bash +# ------------------------------------------------------------------------------ +# ERROR HANDLER - ERROR & SIGNAL MANAGEMENT +# ------------------------------------------------------------------------------ +# Copyright (c) 2021-2025 community-scripts ORG +# Author: MickLesk (CanbiZ) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# ------------------------------------------------------------------------------ +# +# Provides comprehensive error handling and signal management for all scripts. +# Includes: +# - Exit code explanations (shell, package managers, databases, custom codes) +# - Error handler with detailed logging +# - Signal handlers (EXIT, INT, TERM) +# - Initialization function for trap setup +# +# Usage: +# source <(curl -fsSL .../error_handler.func) +# catch_errors +# +# ------------------------------------------------------------------------------ + +# ============================================================================== +# SECTION 1: EXIT CODE EXPLANATIONS +# ============================================================================== + +# ------------------------------------------------------------------------------ +# explain_exit_code() +# +# - Maps numeric exit codes to human-readable error descriptions +# - Supports: +# * Generic/Shell errors (1, 2, 126, 127, 128, 130, 137, 139, 143) +# * Package manager errors (APT, DPKG: 100, 101, 255) +# * Node.js/npm errors (243-249, 254) +# * Python/pip/uv errors (210-212) +# * PostgreSQL errors (231-234) +# * MySQL/MariaDB errors (260-263) +# * MongoDB errors (251-253) +# * Proxmox custom codes (200-209, 213-223, 225) +# - Returns description string for given exit code +# ------------------------------------------------------------------------------ +explain_exit_code() { + local code="$1" + case "$code" in + # --- Generic / Shell --- + 1) echo "General error / Operation not permitted" ;; + 2) echo "Misuse of shell builtins (e.g. syntax error)" ;; + 126) echo "Command invoked cannot execute (permission problem?)" ;; + 127) echo "Command not found" ;; + 128) echo "Invalid argument to exit" ;; + 130) echo "Terminated by Ctrl+C (SIGINT)" ;; + 137) echo "Killed (SIGKILL / Out of memory?)" ;; + 139) echo "Segmentation fault (core dumped)" ;; + 143) echo "Terminated (SIGTERM)" ;; + + # --- Package manager / APT / DPKG --- + 100) echo "APT: Package manager error (broken packages / dependency problems)" ;; + 101) echo "APT: Configuration error (bad sources.list, malformed config)" ;; + 255) echo "DPKG: Fatal internal error" ;; + + # --- Node.js / npm / pnpm / yarn --- + 243) echo "Node.js: Out of memory (JavaScript heap out of memory)" ;; + 245) echo "Node.js: Invalid command-line option" ;; + 246) echo "Node.js: Internal JavaScript Parse Error" ;; + 247) echo "Node.js: Fatal internal error" ;; + 248) echo "Node.js: Invalid C++ addon / N-API failure" ;; + 249) echo "Node.js: Inspector error" ;; + 254) echo "npm/pnpm/yarn: Unknown fatal error" ;; + + # --- Python / pip / uv --- + 210) echo "Python: Virtualenv / uv environment missing or broken" ;; + 211) echo "Python: Dependency resolution failed" ;; + 212) echo "Python: Installation aborted (permissions or EXTERNALLY-MANAGED)" ;; + + # --- PostgreSQL --- + 231) echo "PostgreSQL: Connection failed (server not running / wrong socket)" ;; + 232) echo "PostgreSQL: Authentication failed (bad user/password)" ;; + 233) echo "PostgreSQL: Database does not exist" ;; + 234) echo "PostgreSQL: Fatal error in query / syntax" ;; + + # --- MySQL / MariaDB --- + 260) echo "MySQL/MariaDB: Connection failed (server not running / wrong socket)" ;; + 261) echo "MySQL/MariaDB: Authentication failed (bad user/password)" ;; + 262) echo "MySQL/MariaDB: Database does not exist" ;; + 263) echo "MySQL/MariaDB: Fatal error in query / syntax" ;; + + # --- MongoDB --- + 251) echo "MongoDB: Connection failed (server not running)" ;; + 252) echo "MongoDB: Authentication failed (bad user/password)" ;; + 253) echo "MongoDB: Database not found" ;; + + # --- Proxmox Custom Codes --- + 200) echo "Custom: Failed to create lock file" ;; + 201) echo "Custom: Cluster not quorate" ;; + 202) echo "Custom: Timeout waiting for template lock (concurrent download in progress)" ;; + 203) echo "Custom: Missing CTID variable" ;; + 204) echo "Custom: Missing PCT_OSTYPE variable" ;; + 205) echo "Custom: Invalid CTID (<100)" ;; + 206) echo "Custom: CTID already in use (check 'pct list' and /etc/pve/lxc/)" ;; + 207) echo "Custom: Password contains unescaped special characters (-, /, \\, *, etc.)" ;; + 208) echo "Custom: Invalid configuration (DNS/MAC/Network format error)" ;; + 209) echo "Custom: Container creation failed (check logs for pct create output)" ;; + 213) echo "Custom: LXC stack upgrade/retry failed (outdated pve-container - check https://github.com/community-scripts/ProxmoxVE/discussions/8126)" ;; + 214) echo "Custom: Not enough storage space" ;; + 215) echo "Custom: Container created but not listed (ghost state - check /etc/pve/lxc/)" ;; + 216) echo "Custom: RootFS entry missing in config (incomplete creation)" ;; + 217) echo "Custom: Storage does not support rootdir (check storage capabilities)" ;; + 218) echo "Custom: Template file corrupted or incomplete download (size <1MB or invalid archive)" ;; + 220) echo "Custom: Unable to resolve template path" ;; + 221) echo "Custom: Template file exists but not readable (check file permissions)" ;; + 222) echo "Custom: Template download failed after 3 attempts (network/storage issue)" ;; + 223) echo "Custom: Template not available after download (storage sync issue)" ;; + 225) echo "Custom: No template available for OS/Version (check 'pveam available')" ;; + + # --- Default --- + *) echo "Unknown error" ;; + esac +} + +# ============================================================================== +# SECTION 2: ERROR HANDLERS +# ============================================================================== + +# ------------------------------------------------------------------------------ +# error_handler() +# +# - Main error handler triggered by ERR trap +# - Arguments: exit_code, command, line_number +# - Behavior: +# * Returns silently if exit_code is 0 (success) +# * Sources explain_exit_code() for detailed error description +# * Displays error message with: +# - Line number where error occurred +# - Exit code with explanation +# - Command that failed +# * Shows last 20 lines of SILENT_LOGFILE if available +# * Copies log to container /root for later inspection +# * Exits with original exit code +# ------------------------------------------------------------------------------ +error_handler() { + local exit_code=${1:-$?} + local command=${2:-${BASH_COMMAND:-unknown}} + local line_number=${BASH_LINENO[0]:-unknown} + + command="${command//\$STD/}" + + if [[ "$exit_code" -eq 0 ]]; then + return 0 + fi + + local explanation + explanation="$(explain_exit_code "$exit_code")" + + printf "\e[?25h" + + # Use msg_error if available, fallback to echo + if declare -f msg_error >/dev/null 2>&1; then + msg_error "in line ${line_number}: exit code ${exit_code} (${explanation}): while executing command ${command}" + else + echo -e "\n${RD}[ERROR]${CL} in line ${RD}${line_number}${CL}: exit code ${RD}${exit_code}${CL} (${explanation}): while executing command ${YWB}${command}${CL}\n" + fi + + if [[ -n "${DEBUG_LOGFILE:-}" ]]; then + { + echo "------ ERROR ------" + echo "Timestamp : $(date '+%Y-%m-%d %H:%M:%S')" + echo "Exit Code : $exit_code ($explanation)" + echo "Line : $line_number" + echo "Command : $command" + echo "-------------------" + } >>"$DEBUG_LOGFILE" + fi + + # Get active log file (BUILD_LOG or INSTALL_LOG) + local active_log="" + if declare -f get_active_logfile >/dev/null 2>&1; then + active_log="$(get_active_logfile)" + elif [[ -n "${SILENT_LOGFILE:-}" ]]; then + active_log="$SILENT_LOGFILE" + fi + + if [[ -n "$active_log" && -s "$active_log" ]]; then + echo "--- Last 20 lines of silent log ---" + tail -n 20 "$active_log" + echo "-----------------------------------" + + # Detect context: Container (INSTALL_LOG set + /root exists) vs Host (BUILD_LOG) + if [[ -n "${INSTALL_LOG:-}" && -d /root ]]; then + # CONTAINER CONTEXT: Copy log and create flag file for host + local container_log="/root/.install-${SESSION_ID:-error}.log" + cp "$active_log" "$container_log" 2>/dev/null || true + + # Create error flag file with exit code for host detection + echo "$exit_code" >"/root/.install-${SESSION_ID:-error}.failed" 2>/dev/null || true + + if declare -f msg_custom >/dev/null 2>&1; then + msg_custom "📋" "${YW}" "Log saved to: ${container_log}" + else + echo -e "${YW}Log saved to:${CL} ${BL}${container_log}${CL}" + fi + else + # HOST CONTEXT: Show local log path and offer container cleanup + if declare -f msg_custom >/dev/null 2>&1; then + msg_custom "📋" "${YW}" "Full log: ${active_log}" + else + echo -e "${YW}Full log:${CL} ${BL}${active_log}${CL}" + fi + + # Offer to remove container if it exists (build errors after container creation) + if [[ -n "${CTID:-}" ]] && command -v pct &>/dev/null && pct status "$CTID" &>/dev/null; then + echo "" + echo -en "${YW}Remove broken container ${CTID}? (Y/n) [auto-remove in 60s]: ${CL}" + + if read -t 60 -r response; then + if [[ -z "$response" || "$response" =~ ^[Yy]$ ]]; then + echo -e "\n${YW}Removing container ${CTID}${CL}" + pct stop "$CTID" &>/dev/null || true + pct destroy "$CTID" &>/dev/null || true + echo -e "${GN}✔${CL} Container ${CTID} removed" + elif [[ "$response" =~ ^[Nn]$ ]]; then + echo -e "\n${YW}Container ${CTID} kept for debugging${CL}" + fi + else + # Timeout - auto-remove + echo -e "\n${YW}No response - auto-removing container${CL}" + pct stop "$CTID" &>/dev/null || true + pct destroy "$CTID" &>/dev/null || true + echo -e "${GN}✔${CL} Container ${CTID} removed" + fi + fi + fi + fi + + exit "$exit_code" +} + +# ============================================================================== +# SECTION 3: SIGNAL HANDLERS +# ============================================================================== + +# ------------------------------------------------------------------------------ +# on_exit() +# +# - EXIT trap handler +# - Cleans up lock files if lockfile variable is set +# - Exits with captured exit code +# - Always runs on script termination (success or failure) +# ------------------------------------------------------------------------------ +on_exit() { + local exit_code=$? + [[ -n "${lockfile:-}" && -e "$lockfile" ]] && rm -f "$lockfile" + exit "$exit_code" +} + +# ------------------------------------------------------------------------------ +# on_interrupt() +# +# - SIGINT (Ctrl+C) trap handler +# - Displays "Interrupted by user" message +# - Exits with code 130 (128 + SIGINT=2) +# ------------------------------------------------------------------------------ +on_interrupt() { + if declare -f msg_error >/dev/null 2>&1; then + msg_error "Interrupted by user (SIGINT)" + else + echo -e "\n${RD}Interrupted by user (SIGINT)${CL}" + fi + exit 130 +} + +# ------------------------------------------------------------------------------ +# on_terminate() +# +# - SIGTERM trap handler +# - Displays "Terminated by signal" message +# - Exits with code 143 (128 + SIGTERM=15) +# - Triggered by external process termination +# ------------------------------------------------------------------------------ +on_terminate() { + if declare -f msg_error >/dev/null 2>&1; then + msg_error "Terminated by signal (SIGTERM)" + else + echo -e "\n${RD}Terminated by signal (SIGTERM)${CL}" + fi + exit 143 +} + +# ============================================================================== +# SECTION 4: INITIALIZATION +# ============================================================================== + +# ------------------------------------------------------------------------------ +# catch_errors() +# +# - Initializes error handling and signal traps +# - Enables strict error handling: +# * set -Ee: Exit on error, inherit ERR trap in functions +# * set -o pipefail: Pipeline fails if any command fails +# * set -u: (optional) Exit on undefined variable (if STRICT_UNSET=1) +# - Sets up traps: +# * ERR → error_handler +# * EXIT → on_exit +# * INT → on_interrupt +# * TERM → on_terminate +# - Call this function early in every script +# ------------------------------------------------------------------------------ +catch_errors() { + set -Ee -o pipefail + if [ "${STRICT_UNSET:-0}" = "1" ]; then + set -u + fi + + trap 'error_handler' ERR + trap on_exit EXIT + trap on_interrupt INT + trap on_terminate TERM +} \ No newline at end of file diff --git a/scripts/ct/debian.sh b/scripts/ct/debian.sh new file mode 100644 index 0000000..c82b107 --- /dev/null +++ b/scripts/ct/debian.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash +SCRIPT_DIR="$(dirname "$0")" +source "$SCRIPT_DIR/../core/build.func" +# Copyright (c) 2021-2025 tteck +# Author: tteck (tteckster) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://www.debian.org/ + +APP="Debian" +var_tags="${var_tags:-os}" +var_cpu="${var_cpu:-1}" +var_ram="${var_ram:-512}" +var_disk="${var_disk:-2}" +var_os="${var_os:-debian}" +var_version="${var_version:-13}" +var_unprivileged="${var_unprivileged:-1}" + +header_info "$APP" +variables +color +catch_errors + +function update_script() { + header_info + check_container_storage + check_container_resources + if [[ ! -d /var ]]; then + msg_error "No ${APP} Installation Found!" + exit + fi + msg_info "Updating $APP LXC" + $STD apt update + $STD apt -y upgrade + msg_ok "Updated $APP LXC" + msg_ok "Updated successfully!" + exit +} + +start +build_container +description + +msg_ok "Completed Successfully!\n" +echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}" diff --git a/scripts/install/debian-install.sh b/scripts/install/debian-install.sh new file mode 100644 index 0000000..83539ab --- /dev/null +++ b/scripts/install/debian-install.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021-2025 tteck +# Author: tteck (tteckster) +# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE +# Source: https://www.debian.org/ + +source /dev/stdin <<<"$FUNCTIONS_FILE_PATH" +color +verb_ip6 +catch_errors +setting_up_container +network_check +update_os + +motd_ssh +customize +cleanup_lxc diff --git a/server.js b/server.js index c9a4dc5..e304fb8 100644 --- a/server.js +++ b/server.js @@ -299,7 +299,7 @@ class ScriptExecutionHandler { * @param {WebSocketMessage} message */ async handleMessage(ws, message) { - const { action, scriptPath, executionId, input, mode, server, isUpdate, isShell, isBackup, isClone, containerId, storage, backupStorage, cloneCount, hostnames, containerType } = message; + const { action, scriptPath, executionId, input, mode, server, isUpdate, isShell, isBackup, isClone, containerId, storage, backupStorage, cloneCount, hostnames, containerType, envVars } = message; switch (action) { case 'start': @@ -313,7 +313,7 @@ class ScriptExecutionHandler { } else if (isShell && containerId) { await this.startShellExecution(ws, containerId, executionId, mode, server); } else { - await this.startScriptExecution(ws, scriptPath, executionId, mode, server); + await this.startScriptExecution(ws, scriptPath, executionId, mode, server, envVars); } } else { this.sendMessage(ws, { @@ -351,8 +351,9 @@ class ScriptExecutionHandler { * @param {string} executionId * @param {string} mode * @param {ServerInfo|null} server + * @param {Object} [envVars] - Optional environment variables to pass to the script */ - async startScriptExecution(ws, scriptPath, executionId, mode = 'local', server = null) { + async startScriptExecution(ws, scriptPath, executionId, mode = 'local', server = null, envVars = {}) { /** @type {number|null} */ let installationId = null; @@ -381,7 +382,7 @@ class ScriptExecutionHandler { // Handle SSH execution if (mode === 'ssh' && server) { - await this.startSSHScriptExecution(ws, scriptPath, executionId, server, installationId); + await this.startSSHScriptExecution(ws, scriptPath, executionId, server, installationId, envVars); return; } @@ -407,19 +408,30 @@ class ScriptExecutionHandler { return; } + // Format environment variables for local execution + // Convert envVars object to environment variables + const envWithVars = { + ...process.env, + TERM: 'xterm-256color', // Enable proper terminal support + FORCE_ANSI: 'true', // Allow ANSI codes for proper display + COLUMNS: '80', // Set terminal width + LINES: '24' // Set terminal height + }; + + // Add envVars to environment + if (envVars && typeof envVars === 'object') { + for (const [key, value] of Object.entries(envVars)) { + envWithVars[key] = String(value); + } + } + // Start script execution with pty for proper TTY support const childProcess = ptySpawn('bash', [resolvedPath], { cwd: scriptsDir, name: 'xterm-256color', cols: 80, rows: 24, - env: { - ...process.env, - TERM: 'xterm-256color', // Enable proper terminal support - FORCE_ANSI: 'true', // Allow ANSI codes for proper display - COLUMNS: '80', // Set terminal width - LINES: '24' // Set terminal height - } + env: envWithVars }); // pty handles encoding automatically @@ -522,8 +534,9 @@ class ScriptExecutionHandler { * @param {string} executionId * @param {ServerInfo} server * @param {number|null} installationId + * @param {Object} [envVars] - Optional environment variables to pass to the script */ - async startSSHScriptExecution(ws, scriptPath, executionId, server, installationId = null) { + async startSSHScriptExecution(ws, scriptPath, executionId, server, installationId = null, envVars = {}) { const sshService = getSSHExecutionService(); // Send start message @@ -612,7 +625,8 @@ class ScriptExecutionHandler { // Clean up this.activeExecutions.delete(executionId); - } + }, + envVars )); // Store the execution with installation ID diff --git a/src/app/_components/ConfigurationModal.tsx b/src/app/_components/ConfigurationModal.tsx new file mode 100644 index 0000000..c8197a4 --- /dev/null +++ b/src/app/_components/ConfigurationModal.tsx @@ -0,0 +1,899 @@ +'use client'; + +import { useState, useEffect } from 'react'; +import { api } from '~/trpc/react'; +import type { Script } from '~/types/script'; +import type { Server } from '~/types/server'; +import { Button } from './ui/button'; +import { Input } from './ui/input'; +import { useRegisterModal } from './modal/ModalStackProvider'; + +export type EnvVars = Record; + +interface ConfigurationModalProps { + isOpen: boolean; + onClose: () => void; + onConfirm: (envVars: EnvVars) => void; + script: Script | null; + server: Server | null; + mode: 'default' | 'advanced'; +} + +export function ConfigurationModal({ + isOpen, + onClose, + onConfirm, + script, + server, + mode, +}: ConfigurationModalProps) { + useRegisterModal(isOpen, { id: 'configuration-modal', allowEscape: true, onClose }); + + // Fetch script data if we only have slug + const { data: scriptData } = api.scripts.getScriptBySlug.useQuery( + { slug: script?.slug ?? '' }, + { enabled: !!script?.slug && isOpen } + ); + + const actualScript = script ?? (scriptData?.script ?? null); + + // Fetch storages + const { data: rootfsStoragesData } = api.scripts.getRootfsStorages.useQuery( + { serverId: server?.id ?? 0, forceRefresh: false }, + { enabled: !!server?.id && isOpen } + ); + + const { data: templateStoragesData } = api.scripts.getTemplateStorages.useQuery( + { serverId: server?.id ?? 0, forceRefresh: false }, + { enabled: !!server?.id && isOpen && mode === 'advanced' } + ); + + // Get resources from JSON + const resources = actualScript?.install_methods?.[0]?.resources; + const slug = actualScript?.slug ?? ''; + + // Default mode state + const [containerStorage, setContainerStorage] = useState(''); + + // Advanced mode state + const [advancedVars, setAdvancedVars] = useState({}); + + // Validation errors + const [errors, setErrors] = useState>({}); + + // Initialize defaults when script/server data is available + useEffect(() => { + if (!actualScript || !server) return; + + if (mode === 'default') { + // Default mode: minimal vars + setContainerStorage(''); + } else { + // Advanced mode: all vars with defaults + const defaults: EnvVars = { + // Resources from JSON + var_cpu: resources?.cpu ?? 1, + var_ram: resources?.ram ?? 1024, + var_disk: resources?.hdd ?? 4, + var_unprivileged: resources?.privileged === false ? 1 : (resources?.privileged === true ? 0 : 1), + + // Network defaults + var_net: 'dhcp', + var_brg: 'vmbr0', + var_gateway: '', + var_ipv6_method: 'none', + var_ipv6_static: '', + var_vlan: '', + var_mtu: 1500, + var_mac: '', + var_ns: '', + + // Identity + var_hostname: slug, + var_pw: '', + var_tags: 'community-script', + + // SSH + var_ssh: 'no', + var_ssh_authorized_key: '', + + // Features + var_nesting: 1, + var_fuse: 0, + var_keyctl: 0, + var_mknod: 0, + var_mount_fs: '', + var_protection: 'no', + + // System + var_timezone: '', + var_verbose: 'no', + var_apt_cacher: 'no', + var_apt_cacher_ip: '', + + // Storage + var_container_storage: '', + var_template_storage: '', + }; + setAdvancedVars(defaults); + } + }, [actualScript, server, mode, resources, slug]); + + // Validation functions + const validateIPv4 = (ip: string): boolean => { + if (!ip) return true; // Empty is allowed (auto) + const pattern = /^(\d{1,3}\.){3}\d{1,3}$/; + if (!pattern.test(ip)) return false; + const parts = ip.split('.').map(Number); + return parts.every(p => p >= 0 && p <= 255); + }; + + const validateCIDR = (cidr: string): boolean => { + if (!cidr) return true; // Empty is allowed + const pattern = /^([0-9]{1,3}\.){3}[0-9]{1,3}\/([0-9]|[1-2][0-9]|3[0-2])$/; + if (!pattern.test(cidr)) return false; + const parts = cidr.split('/'); + if (parts.length !== 2) return false; + const [ip, prefix] = parts; + if (!ip || !prefix) return false; + const ipParts = ip.split('.').map(Number); + if (!ipParts.every(p => p >= 0 && p <= 255)) return false; + const prefixNum = parseInt(prefix, 10); + return prefixNum >= 0 && prefixNum <= 32; + }; + + const validateIPv6 = (ipv6: string): boolean => { + if (!ipv6) return true; // Empty is allowed + // Basic IPv6 validation (simplified - allows compressed format) + const pattern = /^([0-9a-fA-F]{0,4}:){2,7}[0-9a-fA-F]{0,4}(\/\d{1,3})?$/; + return pattern.test(ipv6); + }; + + const validateMAC = (mac: string): boolean => { + if (!mac) return true; // Empty is allowed (auto) + const pattern = /^([0-9A-Fa-f]{2}:){5}([0-9A-Fa-f]{2})$/; + return pattern.test(mac); + }; + + const validatePositiveInt = (value: string | number | undefined): boolean => { + if (value === '' || value === undefined) return true; + const num = typeof value === 'string' ? parseInt(value, 10) : value; + return !isNaN(num) && num > 0; + }; + + const validateForm = (): boolean => { + const newErrors: Record = {}; + + if (mode === 'default') { + // Default mode: only storage is optional + // No validation needed + } else { + // Advanced mode: validate all fields + if (advancedVars.var_gateway && !validateIPv4(advancedVars.var_gateway as string)) { + newErrors.var_gateway = 'Invalid IPv4 address'; + } + if (advancedVars.var_mac && !validateMAC(advancedVars.var_mac as string)) { + newErrors.var_mac = 'Invalid MAC address format (XX:XX:XX:XX:XX:XX)'; + } + if (advancedVars.var_ns && !validateIPv4(advancedVars.var_ns as string)) { + newErrors.var_ns = 'Invalid IPv4 address'; + } + if (advancedVars.var_apt_cacher_ip && !validateIPv4(advancedVars.var_apt_cacher_ip as string)) { + newErrors.var_apt_cacher_ip = 'Invalid IPv4 address'; + } + // Validate IPv4 CIDR if network mode is static + const netValue = advancedVars.var_net; + const isStaticMode = netValue === 'static' || (typeof netValue === 'string' && netValue.includes('/')); + if (isStaticMode) { + const cidrValue = (typeof netValue === 'string' && netValue.includes('/')) ? netValue : (advancedVars.var_ip as string ?? ''); + if (cidrValue && !validateCIDR(cidrValue)) { + newErrors.var_ip = 'Invalid CIDR format (e.g., 10.10.10.1/24)'; + } + } + // Validate IPv6 static if IPv6 method is static + if (advancedVars.var_ipv6_method === 'static' && advancedVars.var_ipv6_static) { + if (!validateIPv6(advancedVars.var_ipv6_static as string)) { + newErrors.var_ipv6_static = 'Invalid IPv6 address'; + } + } + if (!validatePositiveInt(advancedVars.var_cpu)) { + newErrors.var_cpu = 'Must be a positive integer'; + } + if (!validatePositiveInt(advancedVars.var_ram)) { + newErrors.var_ram = 'Must be a positive integer'; + } + if (!validatePositiveInt(advancedVars.var_disk)) { + newErrors.var_disk = 'Must be a positive integer'; + } + if (advancedVars.var_mtu && !validatePositiveInt(advancedVars.var_mtu)) { + newErrors.var_mtu = 'Must be a positive integer'; + } + if (advancedVars.var_vlan && !validatePositiveInt(advancedVars.var_vlan)) { + newErrors.var_vlan = 'Must be a positive integer'; + } + } + + setErrors(newErrors); + return Object.keys(newErrors).length === 0; + }; + + const handleConfirm = () => { + if (!validateForm()) { + return; + } + + let envVars: EnvVars = {}; + + if (mode === 'default') { + // Default mode: minimal vars + envVars = { + var_hostname: slug, + var_brg: 'vmbr0', + var_net: 'dhcp', + var_ipv6_method: 'auto', + var_ssh: 'no', + var_nesting: 1, + var_verbose: 'no', + var_cpu: resources?.cpu ?? 1, + var_ram: resources?.ram ?? 1024, + var_disk: resources?.hdd ?? 4, + var_unprivileged: resources?.privileged === false ? 1 : (resources?.privileged === true ? 0 : 1), + }; + + if (containerStorage) { + envVars.var_container_storage = containerStorage; + } + } else { + // Advanced mode: all vars + envVars = { ...advancedVars }; + + // If network mode is static and var_ip is set, replace var_net with the CIDR + if (envVars.var_net === 'static' && envVars.var_ip) { + envVars.var_net = envVars.var_ip as string; + delete envVars.var_ip; // Remove the temporary var_ip + } + + // Format password correctly: if var_pw is set, format it as "-password " + // build.func expects PW to be in "-password " format when added to PCT_OPTIONS + const rawPassword = envVars.var_pw; + const hasPassword = rawPassword && typeof rawPassword === 'string' && rawPassword.trim() !== ''; + const hasSSHKey = envVars.var_ssh_authorized_key && typeof envVars.var_ssh_authorized_key === 'string' && envVars.var_ssh_authorized_key.trim() !== ''; + + if (hasPassword) { + // Remove any existing "-password" prefix to avoid double-formatting + const cleanPassword = rawPassword.startsWith('-password ') + ? rawPassword.substring(11) + : rawPassword; + // Format as "-password " for build.func + envVars.var_pw = `-password ${cleanPassword}`; + } else { + // Empty password means auto-login, clear var_pw + envVars.var_pw = ''; + } + + + if ((hasPassword || hasSSHKey) && envVars.var_ssh !== 'no') { + envVars.var_ssh = 'yes'; + } + } + + // Remove empty string values (but keep 0, false, etc.) + const cleaned: EnvVars = {}; + for (const [key, value] of Object.entries(envVars)) { + if (value !== '' && value !== undefined) { + cleaned[key] = value; + } + } + + // Always set mode to "default" (build.func line 1783 expects this) + cleaned.mode = 'default'; + + onConfirm(cleaned); + }; + + const updateAdvancedVar = (key: string, value: string | number | boolean) => { + setAdvancedVars(prev => ({ ...prev, [key]: value })); + // Clear error for this field + if (errors[key]) { + setErrors(prev => { + const newErrors = { ...prev }; + delete newErrors[key]; + return newErrors; + }); + } + }; + + if (!isOpen) return null; + + const rootfsStorages = rootfsStoragesData?.storages ?? []; + const templateStorages = templateStoragesData?.storages ?? []; + + return ( +
+
+ {/* Header */} +
+

+ {mode === 'default' ? 'Default Configuration' : 'Advanced Configuration'} +

+ +
+ + {/* Content */} +
+ {mode === 'default' ? ( + /* Default Mode */ +
+
+ + + {rootfsStorages.length === 0 && ( +

+ Could not fetch storages. Script will use default selection. +

+ )} +
+ +
+

Default Values

+
+

Hostname: {slug}

+

Bridge: vmbr0

+

Network: DHCP

+

IPv6: Auto

+

SSH: Disabled

+

Nesting: Enabled

+

CPU: {resources?.cpu ?? 1}

+

RAM: {resources?.ram ?? 1024} MB

+

Disk: {resources?.hdd ?? 4} GB

+
+
+
+ ) : ( + /* Advanced Mode */ +
+ {/* Resources */} +
+

Resources

+
+
+ + updateAdvancedVar('var_cpu', parseInt(e.target.value) || 1)} + className={errors.var_cpu ? 'border-destructive' : ''} + /> + {errors.var_cpu && ( +

{errors.var_cpu}

+ )} +
+
+ + updateAdvancedVar('var_ram', parseInt(e.target.value) || 1024)} + className={errors.var_ram ? 'border-destructive' : ''} + /> + {errors.var_ram && ( +

{errors.var_ram}

+ )} +
+
+ + updateAdvancedVar('var_disk', parseInt(e.target.value) || 4)} + className={errors.var_disk ? 'border-destructive' : ''} + /> + {errors.var_disk && ( +

{errors.var_disk}

+ )} +
+
+ + +
+
+
+ + {/* Network */} +
+

Network

+
+
+ + +
+ {(advancedVars.var_net === 'static' || (typeof advancedVars.var_net === 'string' && advancedVars.var_net.includes('/'))) && ( +
+ + { + // Store in var_ip temporarily, will be moved to var_net on confirm + updateAdvancedVar('var_ip', e.target.value); + }} + placeholder="10.10.10.1/24" + className={errors.var_ip ? 'border-destructive' : ''} + /> + {errors.var_ip && ( +

{errors.var_ip}

+ )} +
+ )} +
+ + updateAdvancedVar('var_brg', e.target.value)} + placeholder="vmbr0" + /> +
+
+ + updateAdvancedVar('var_gateway', e.target.value)} + placeholder="Auto" + className={errors.var_gateway ? 'border-destructive' : ''} + /> + {errors.var_gateway && ( +

{errors.var_gateway}

+ )} +
+
+ + +
+ {advancedVars.var_ipv6_method === 'static' && ( +
+ + updateAdvancedVar('var_ipv6_static', e.target.value)} + placeholder="2001:db8::1/64" + className={errors.var_ipv6_static ? 'border-destructive' : ''} + /> + {errors.var_ipv6_static && ( +

{errors.var_ipv6_static}

+ )} +
+ )} +
+ + updateAdvancedVar('var_vlan', e.target.value ? parseInt(e.target.value) : '')} + placeholder="None" + className={errors.var_vlan ? 'border-destructive' : ''} + /> + {errors.var_vlan && ( +

{errors.var_vlan}

+ )} +
+
+ + updateAdvancedVar('var_mtu', e.target.value ? parseInt(e.target.value) : 1500)} + placeholder="1500" + className={errors.var_mtu ? 'border-destructive' : ''} + /> + {errors.var_mtu && ( +

{errors.var_mtu}

+ )} +
+
+ + updateAdvancedVar('var_mac', e.target.value)} + placeholder="Auto" + className={errors.var_mac ? 'border-destructive' : ''} + /> + {errors.var_mac && ( +

{errors.var_mac}

+ )} +
+
+ + updateAdvancedVar('var_ns', e.target.value)} + placeholder="Auto" + className={errors.var_ns ? 'border-destructive' : ''} + /> + {errors.var_ns && ( +

{errors.var_ns}

+ )} +
+
+
+ + {/* Identity & Metadata */} +
+

Identity & Metadata

+
+
+ + updateAdvancedVar('var_hostname', e.target.value)} + placeholder={slug} + /> +
+
+ + updateAdvancedVar('var_pw', e.target.value)} + placeholder="Random (empty = auto-login)" + /> +
+
+ + updateAdvancedVar('var_tags', e.target.value)} + placeholder="community-script" + /> +
+
+
+ + {/* SSH Access */} +
+

SSH Access

+
+
+ + +
+
+ + updateAdvancedVar('var_ssh_authorized_key', e.target.value)} + placeholder="ssh-rsa AAAA..." + /> +
+
+
+ + {/* Container Features */} +
+

Container Features

+
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + updateAdvancedVar('var_mount_fs', e.target.value)} + placeholder="nfs,cifs" + /> +
+
+ + +
+
+
+ + {/* System Configuration */} +
+

System Configuration

+
+
+ + updateAdvancedVar('var_timezone', e.target.value)} + placeholder="System" + /> +
+
+ + +
+
+ + +
+
+ + updateAdvancedVar('var_apt_cacher_ip', e.target.value)} + placeholder="192.168.1.10" + className={errors.var_apt_cacher_ip ? 'border-destructive' : ''} + /> + {errors.var_apt_cacher_ip && ( +

{errors.var_apt_cacher_ip}

+ )} +
+
+
+ + {/* Storage Selection */} +
+

Storage Selection

+
+
+ + + {rootfsStorages.length === 0 && ( +

+ Could not fetch storages. Leave empty for auto selection. +

+ )} +
+
+ + + {templateStorages.length === 0 && ( +

+ Could not fetch storages. Leave empty for auto selection. +

+ )} +
+
+
+
+ )} + + {/* Action Buttons */} +
+ + +
+
+
+
+ ); +} + diff --git a/src/app/_components/ExecutionModeModal.tsx b/src/app/_components/ExecutionModeModal.tsx index bc715c1..5cca184 100644 --- a/src/app/_components/ExecutionModeModal.tsx +++ b/src/app/_components/ExecutionModeModal.tsx @@ -2,26 +2,31 @@ import { useState, useEffect } from 'react'; import type { Server } from '../../types/server'; +import type { Script } from '../../types/script'; import { Button } from './ui/button'; import { ColorCodedDropdown } from './ColorCodedDropdown'; import { SettingsModal } from './SettingsModal'; +import { ConfigurationModal, type EnvVars } from './ConfigurationModal'; import { useRegisterModal } from './modal/ModalStackProvider'; interface ExecutionModeModalProps { isOpen: boolean; onClose: () => void; - onExecute: (mode: 'local' | 'ssh', server?: Server) => void; + onExecute: (mode: 'local' | 'ssh', server?: Server, envVars?: EnvVars) => void; scriptName: string; + script?: Script | null; } -export function ExecutionModeModal({ isOpen, onClose, onExecute, scriptName }: ExecutionModeModalProps) { +export function ExecutionModeModal({ isOpen, onClose, onExecute, scriptName, script }: ExecutionModeModalProps) { useRegisterModal(isOpen, { id: 'execution-mode-modal', allowEscape: true, onClose }); const [servers, setServers] = useState([]); const [loading, setLoading] = useState(false); const [error, setError] = useState(null); const [selectedServer, setSelectedServer] = useState(null); const [settingsModalOpen, setSettingsModalOpen] = useState(false); + const [configModalOpen, setConfigModalOpen] = useState(false); + const [configMode, setConfigMode] = useState<'default' | 'advanced'>('default'); useEffect(() => { if (isOpen) { @@ -64,19 +69,25 @@ export function ExecutionModeModal({ isOpen, onClose, onExecute, scriptName }: E } }; - const handleExecute = () => { + const handleConfigModeSelect = (mode: 'default' | 'advanced') => { if (!selectedServer) { - setError('Please select a server for SSH execution'); + setError('Please select a server first'); return; } - - onExecute('ssh', selectedServer); + setConfigMode(mode); + setConfigModalOpen(true); + }; + + const handleConfigConfirm = (envVars: EnvVars) => { + if (!selectedServer) return; + setConfigModalOpen(false); + onExecute('ssh', selectedServer, envVars); onClose(); }; - const handleServerSelect = (server: Server | null) => { setSelectedServer(server); + setError(null); // Clear error when server is selected }; @@ -164,6 +175,31 @@ export function ExecutionModeModal({ isOpen, onClose, onExecute, scriptName }: E + {/* Configuration Mode Selection */} +
+

+ Choose configuration mode: +

+
+ + +
+
+ {/* Action Buttons */}
-
) : ( @@ -204,6 +233,33 @@ export function ExecutionModeModal({ isOpen, onClose, onExecute, scriptName }: E /> + {/* Configuration Mode Selection - only show when server is selected */} + {selectedServer && ( +
+

+ Choose configuration mode: +

+
+ + +
+
+ )} + {/* Action Buttons */}
-
)} @@ -234,6 +281,16 @@ export function ExecutionModeModal({ isOpen, onClose, onExecute, scriptName }: E isOpen={settingsModalOpen} onClose={handleSettingsModalClose} /> + + {/* Configuration Modal */} + setConfigModalOpen(false)} + onConfirm={handleConfigConfirm} + script={script ?? null} + server={selectedServer} + mode={configMode} + /> ); } diff --git a/src/app/_components/ScriptDetailModal.tsx b/src/app/_components/ScriptDetailModal.tsx index efa91ea..07a703b 100644 --- a/src/app/_components/ScriptDetailModal.tsx +++ b/src/app/_components/ScriptDetailModal.tsx @@ -28,6 +28,7 @@ interface ScriptDetailModalProps { scriptName: string, mode?: "local" | "ssh", server?: Server, + envVars?: Record, ) => void; } @@ -183,7 +184,7 @@ export function ScriptDetailModal({ setExecutionModeOpen(true); }; - const handleExecuteScript = (mode: "local" | "ssh", server?: Server) => { + const handleExecuteScript = (mode: "local" | "ssh", server?: Server, envVars?: Record) => { if (!script || !onInstallScript) return; // Find the script path based on selected version type @@ -197,8 +198,8 @@ export function ScriptDetailModal({ const scriptPath = `scripts/${scriptMethod.script}`; const scriptName = script.name; - // Pass execution mode and server info to the parent - onInstallScript(scriptPath, scriptName, mode, server); + // Pass execution mode, server info, and envVars to the parent + onInstallScript(scriptPath, scriptName, mode, server, envVars); onClose(); // Close the modal when starting installation } @@ -935,6 +936,7 @@ export function ScriptDetailModal({ {script && ( setExecutionModeOpen(false)} onExecute={handleExecuteScript} diff --git a/src/app/_components/Terminal.tsx b/src/app/_components/Terminal.tsx index 0aa4d59..d86367e 100644 --- a/src/app/_components/Terminal.tsx +++ b/src/app/_components/Terminal.tsx @@ -21,6 +21,7 @@ interface TerminalProps { cloneCount?: number; hostnames?: string[]; containerType?: 'lxc' | 'vm'; + envVars?: Record; } interface TerminalMessage { @@ -29,7 +30,7 @@ interface TerminalMessage { timestamp: number; } -export function Terminal({ scriptPath, onClose, mode = 'local', server, isUpdate = false, isShell = false, isBackup = false, isClone = false, containerId, storage, backupStorage, executionId: propExecutionId, cloneCount, hostnames, containerType }: TerminalProps) { +export function Terminal({ scriptPath, onClose, mode = 'local', server, isUpdate = false, isShell = false, isBackup = false, isClone = false, containerId, storage, backupStorage, executionId: propExecutionId, cloneCount, hostnames, containerType, envVars }: TerminalProps) { const [isConnected, setIsConnected] = useState(false); const [isRunning, setIsRunning] = useState(false); const [isClient, setIsClient] = useState(false); @@ -360,7 +361,8 @@ export function Terminal({ scriptPath, onClose, mode = 'local', server, isUpdate backupStorage, cloneCount, hostnames, - containerType + containerType, + envVars }; ws.send(JSON.stringify(message)); } @@ -400,7 +402,7 @@ export function Terminal({ scriptPath, onClose, mode = 'local', server, isUpdate wsRef.current.close(); } }; - }, [scriptPath, mode, server, isUpdate, isShell, containerId, isMobile]); + }, [scriptPath, mode, server, isUpdate, isShell, containerId, isMobile, envVars]); const startScript = () => { if (wsRef.current && wsRef.current.readyState === WebSocket.OPEN && !isRunning) { @@ -417,6 +419,7 @@ export function Terminal({ scriptPath, onClose, mode = 'local', server, isUpdate executionId: newExecutionId, mode, server, + envVars, isUpdate, isShell, isBackup, diff --git a/src/app/page.tsx b/src/app/page.tsx index 10ee292..f6778c7 100644 --- a/src/app/page.tsx +++ b/src/app/page.tsx @@ -32,6 +32,7 @@ export default function Home() { name: string; mode?: "local" | "ssh"; server?: Server; + envVars?: Record; } | null>(null); const [activeTab, setActiveTab] = useState< "scripts" | "downloaded" | "installed" | "backups" @@ -209,8 +210,9 @@ export default function Home() { scriptName: string, mode?: "local" | "ssh", server?: Server, + envVars?: Record, ) => { - setRunningScript({ path: scriptPath, name: scriptName, mode, server }); + setRunningScript({ path: scriptPath, name: scriptName, mode, server, envVars }); // Scroll to terminal after a short delay to ensure it's rendered setTimeout(scrollToTerminal, 100); }; @@ -360,6 +362,7 @@ export default function Home() { onClose={handleCloseTerminal} mode={runningScript.mode} server={runningScript.server} + envVars={runningScript.envVars} /> )} diff --git a/src/server/api/routers/scripts.ts b/src/server/api/routers/scripts.ts index 5543d88..b0ee8a0 100644 --- a/src/server/api/routers/scripts.ts +++ b/src/server/api/routers/scripts.ts @@ -7,7 +7,10 @@ import { localScriptsService } from "~/server/services/localScripts"; import { scriptDownloaderService } from "~/server/services/scriptDownloader.js"; import { AutoSyncService } from "~/server/services/autoSyncService"; import { repositoryService } from "~/server/services/repositoryService"; +import { getStorageService } from "~/server/services/storageService"; +import { getDatabase } from "~/server/database-prisma"; import type { ScriptCard } from "~/types/script"; +import type { Server } from "~/types/server"; export const scriptsRouter = createTRPCRouter({ // Get all available scripts @@ -637,5 +640,194 @@ export const scriptsRouter = createTRPCRouter({ status: null }; } + }), + + // Get rootfs storages for a server (for container creation) + getRootfsStorages: publicProcedure + .input(z.object({ + serverId: z.number(), + forceRefresh: z.boolean().optional().default(false) + })) + .query(async ({ input }) => { + try { + const db = getDatabase(); + const server = await db.getServerById(input.serverId); + + if (!server) { + return { + success: false, + error: 'Server not found', + storages: [] + }; + } + + // Get server hostname to filter storages by node assignment + const { getSSHExecutionService } = await import('~/server/ssh-execution-service'); + const sshExecutionService = getSSHExecutionService(); + let serverHostname = ''; + try { + await new Promise((resolve, reject) => { + void sshExecutionService.executeCommand( + server as Server, + 'hostname', + (data: string) => { + serverHostname += data; + }, + (error: string) => { + reject(new Error(`Failed to get hostname: ${error}`)); + }, + (exitCode: number) => { + if (exitCode === 0) { + resolve(); + } else { + reject(new Error(`hostname command failed with exit code ${exitCode}`)); + } + } + ); + }); + } catch (error) { + console.error('Error getting server hostname:', error); + // Continue without filtering if hostname can't be retrieved + } + + const normalizedHostname = serverHostname.trim().toLowerCase(); + + const storageService = getStorageService(); + const allStorages = await storageService.getStorages(server as Server, input.forceRefresh); + + // Filter storages by node hostname matching and content type (rootdir for containers) + const rootfsStorages = allStorages.filter(storage => { + // Check content type - must have rootdir for containers + const hasRootdir = storage.content.includes('rootdir'); + if (!hasRootdir) { + return false; + } + + // If storage has no nodes specified, it's available on all nodes + if (!storage.nodes || storage.nodes.length === 0) { + return true; + } + + // If we couldn't get hostname, include all storages (fallback) + if (!normalizedHostname) { + return true; + } + + // Check if server hostname is in the nodes array (case-insensitive, trimmed) + const normalizedNodes = storage.nodes.map(node => node.trim().toLowerCase()); + return normalizedNodes.includes(normalizedHostname); + }); + + return { + success: true, + storages: rootfsStorages.map(s => ({ + name: s.name, + type: s.type, + content: s.content + })) + }; + } catch (error) { + console.error('Error fetching rootfs storages:', error); + // Return empty array on error (as per plan requirement) + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to fetch storages', + storages: [] + }; + } + }), + + // Get template storages for a server (for template storage selection) + getTemplateStorages: publicProcedure + .input(z.object({ + serverId: z.number(), + forceRefresh: z.boolean().optional().default(false) + })) + .query(async ({ input }) => { + try { + const db = getDatabase(); + const server = await db.getServerById(input.serverId); + + if (!server) { + return { + success: false, + error: 'Server not found', + storages: [] + }; + } + + // Get server hostname to filter storages by node assignment + const { getSSHExecutionService } = await import('~/server/ssh-execution-service'); + const sshExecutionService = getSSHExecutionService(); + let serverHostname = ''; + try { + await new Promise((resolve, reject) => { + void sshExecutionService.executeCommand( + server as Server, + 'hostname', + (data: string) => { + serverHostname += data; + }, + (error: string) => { + reject(new Error(`Failed to get hostname: ${error}`)); + }, + (exitCode: number) => { + if (exitCode === 0) { + resolve(); + } else { + reject(new Error(`hostname command failed with exit code ${exitCode}`)); + } + } + ); + }); + } catch (error) { + console.error('Error getting server hostname:', error); + // Continue without filtering if hostname can't be retrieved + } + + const normalizedHostname = serverHostname.trim().toLowerCase(); + + const storageService = getStorageService(); + const allStorages = await storageService.getStorages(server as Server, input.forceRefresh); + + // Filter storages by node hostname matching and content type (vztmpl for templates) + const templateStorages = allStorages.filter(storage => { + // Check content type - must have vztmpl for templates + const hasVztmpl = storage.content.includes('vztmpl'); + if (!hasVztmpl) { + return false; + } + + // If storage has no nodes specified, it's available on all nodes + if (!storage.nodes || storage.nodes.length === 0) { + return true; + } + + // If we couldn't get hostname, include all storages (fallback) + if (!normalizedHostname) { + return true; + } + + // Check if server hostname is in the nodes array (case-insensitive, trimmed) + const normalizedNodes = storage.nodes.map(node => node.trim().toLowerCase()); + return normalizedNodes.includes(normalizedHostname); + }); + + return { + success: true, + storages: templateStorages.map(s => ({ + name: s.name, + type: s.type, + content: s.content + })) + }; + } catch (error) { + console.error('Error fetching template storages:', error); + return { + success: false, + error: error instanceof Error ? error.message : 'Failed to fetch storages', + storages: [] + }; + } }) }); diff --git a/src/server/ssh-execution-service.js b/src/server/ssh-execution-service.js index 0534b3e..4ed52b4 100644 --- a/src/server/ssh-execution-service.js +++ b/src/server/ssh-execution-service.js @@ -85,9 +85,10 @@ class SSHExecutionService { * @param {Function} onData - Callback for data output * @param {Function} onError - Callback for errors * @param {Function} onExit - Callback for process exit + * @param {Object} [envVars] - Optional environment variables to pass to the script * @returns {Promise} Process information */ - async executeScript(server, scriptPath, onData, onError, onExit) { + async executeScript(server, scriptPath, onData, onError, onExit, envVars = {}) { try { await this.transferScriptsFolder(server, onData, onError); @@ -98,8 +99,43 @@ class SSHExecutionService { // Build SSH command based on authentication type const { command, args } = this.buildSSHCommand(server); + // Format environment variables as var_name=value pairs + const envVarsString = Object.entries(envVars) + .map(([key, value]) => { + // Escape special characters in values + const escapedValue = String(value).replace(/'/g, "'\\''"); + return `${key}='${escapedValue}'`; + }) + .join(' '); + + // Build the command with environment variables + let scriptCommand = `cd /tmp/scripts && chmod +x ${relativeScriptPath} && export TERM=xterm-256color && export COLUMNS=120 && export LINES=30 && export COLORTERM=truecolor && export FORCE_COLOR=1 && export NO_COLOR=0 && export CLICOLOR=1 && export CLICOLOR_FORCE=1`; + + if (envVarsString) { + scriptCommand += ` && ${envVarsString} bash ${relativeScriptPath}`; + } else { + scriptCommand += ` && bash ${relativeScriptPath}`; + } + + // Log the full command that will be executed + console.log('='.repeat(80)); + console.log(`[SSH Execution] Executing on host: ${server.ip} (${server.name || 'Unnamed'})`); + console.log(`[SSH Execution] Script path: ${scriptPath}`); + console.log(`[SSH Execution] Relative script path: ${relativeScriptPath}`); + if (Object.keys(envVars).length > 0) { + console.log(`[SSH Execution] Environment variables (${Object.keys(envVars).length} vars):`); + Object.entries(envVars).forEach(([key, value]) => { + console.log(` ${key}=${String(value)}`); + }); + } else { + console.log(`[SSH Execution] No environment variables provided`); + } + console.log(`[SSH Execution] Full command:`); + console.log(scriptCommand); + console.log('='.repeat(80)); + // Add the script execution command to the args - args.push(`cd /tmp/scripts && chmod +x ${relativeScriptPath} && export TERM=xterm-256color && export COLUMNS=120 && export LINES=30 && export COLORTERM=truecolor && export FORCE_COLOR=1 && export NO_COLOR=0 && export CLICOLOR=1 && export CLICOLOR_FORCE=1 && bash ${relativeScriptPath}`); + args.push(scriptCommand); // Use ptySpawn for proper terminal emulation and color support const sshCommand = ptySpawn(command, args, { From 7b8c1ebdf1f0958be1507d2b26b814064e158f80 Mon Sep 17 00:00:00 2001 From: Michel Roegl-Brunner Date: Fri, 5 Dec 2025 15:53:50 +0100 Subject: [PATCH 2/3] feat: Add default and advanced install method selection - Add ConfigurationModal component for selecting default or advanced installation mode - Default mode: Uses predefined defaults with minimal user input (hostname from slug, vmbr0, dhcp, etc.) - Advanced mode: Full configuration modal with all environment variables customizable - Add support for IPv4 CIDR input when network mode is 'static' - Add support for IPv6 static address input when IPv6 method is 'static' - Implement password formatting as '-password ' for build.func compatibility - Auto-enable SSH when password or SSH keys are provided - Add storage selection dropdowns filtered by server node assignment - Pass environment variables through entire execution stack (frontend -> WebSocket -> SSH/local execution) - Add mode environment variable (always set to 'default' for script execution) - Update ExecutionModeModal to show 'Advanced (Beta)' option --- server.js | 5 +- src/app/_components/ConfigurationModal.tsx | 74 +++++++++++----------- src/app/_components/ExecutionModeModal.tsx | 2 +- 3 files changed, 42 insertions(+), 39 deletions(-) diff --git a/server.js b/server.js index e304fb8..864acf7 100644 --- a/server.js +++ b/server.js @@ -82,6 +82,7 @@ const handle = app.getRequestHandler(); * @property {number} [cloneCount] * @property {string[]} [hostnames] * @property {'lxc'|'vm'} [containerType] + * @property {Record} [envVars] */ class ScriptExecutionHandler { @@ -421,7 +422,9 @@ class ScriptExecutionHandler { // Add envVars to environment if (envVars && typeof envVars === 'object') { for (const [key, value] of Object.entries(envVars)) { - envWithVars[key] = String(value); + /** @type {Record} */ + const envRecord = envWithVars; + envRecord[key] = String(value); } } diff --git a/src/app/_components/ConfigurationModal.tsx b/src/app/_components/ConfigurationModal.tsx index c8197a4..34cf120 100644 --- a/src/app/_components/ConfigurationModal.tsx +++ b/src/app/_components/ConfigurationModal.tsx @@ -75,7 +75,7 @@ export function ConfigurationModal({ var_cpu: resources?.cpu ?? 1, var_ram: resources?.ram ?? 1024, var_disk: resources?.hdd ?? 4, - var_unprivileged: resources?.privileged === false ? 1 : (resources?.privileged === true ? 0 : 1), + var_unprivileged: script?.privileged === false ? 1 : (script?.privileged === true ? 0 : 1), // Network defaults var_net: 'dhcp', @@ -196,19 +196,19 @@ export function ConfigurationModal({ newErrors.var_ipv6_static = 'Invalid IPv6 address'; } } - if (!validatePositiveInt(advancedVars.var_cpu)) { + if (!validatePositiveInt(advancedVars.var_cpu as string | number | undefined)) { newErrors.var_cpu = 'Must be a positive integer'; } - if (!validatePositiveInt(advancedVars.var_ram)) { + if (!validatePositiveInt(advancedVars.var_ram as string | number | undefined)) { newErrors.var_ram = 'Must be a positive integer'; } - if (!validatePositiveInt(advancedVars.var_disk)) { + if (!validatePositiveInt(advancedVars.var_disk as string | number | undefined)) { newErrors.var_disk = 'Must be a positive integer'; } - if (advancedVars.var_mtu && !validatePositiveInt(advancedVars.var_mtu)) { + if (advancedVars.var_mtu && !validatePositiveInt(advancedVars.var_mtu as string | number | undefined)) { newErrors.var_mtu = 'Must be a positive integer'; } - if (advancedVars.var_vlan && !validatePositiveInt(advancedVars.var_vlan)) { + if (advancedVars.var_vlan && !validatePositiveInt(advancedVars.var_vlan as string | number | undefined)) { newErrors.var_vlan = 'Must be a positive integer'; } } @@ -237,7 +237,7 @@ export function ConfigurationModal({ var_cpu: resources?.cpu ?? 1, var_ram: resources?.ram ?? 1024, var_disk: resources?.hdd ?? 4, - var_unprivileged: resources?.privileged === false ? 1 : (resources?.privileged === true ? 0 : 1), + var_unprivileged: script?.privileged === false ? 1 : (script?.privileged === true ? 0 : 1), }; if (containerStorage) { @@ -385,7 +385,7 @@ export function ConfigurationModal({ updateAdvancedVar('var_cpu', parseInt(e.target.value) || 1)} className={errors.var_cpu ? 'border-destructive' : ''} /> @@ -400,7 +400,7 @@ export function ConfigurationModal({ updateAdvancedVar('var_ram', parseInt(e.target.value) || 1024)} className={errors.var_ram ? 'border-destructive' : ''} /> @@ -415,7 +415,7 @@ export function ConfigurationModal({ updateAdvancedVar('var_disk', parseInt(e.target.value) || 4)} className={errors.var_disk ? 'border-destructive' : ''} /> @@ -428,7 +428,7 @@ export function ConfigurationModal({ Unprivileged { if (e.target.value === 'static') { updateAdvancedVar('var_net', 'static'); @@ -492,7 +492,7 @@ export function ConfigurationModal({ updateAdvancedVar('var_brg', e.target.value)} placeholder="vmbr0" /> @@ -503,7 +503,7 @@ export function ConfigurationModal({ updateAdvancedVar('var_gateway', e.target.value)} placeholder="Auto" className={errors.var_gateway ? 'border-destructive' : ''} @@ -517,7 +517,7 @@ export function ConfigurationModal({ IPv6 Method updateAdvancedVar('var_ipv6_static', e.target.value)} placeholder="2001:db8::1/64" className={errors.var_ipv6_static ? 'border-destructive' : ''} @@ -558,7 +558,7 @@ export function ConfigurationModal({ updateAdvancedVar('var_vlan', e.target.value ? parseInt(e.target.value) : '')} placeholder="None" className={errors.var_vlan ? 'border-destructive' : ''} @@ -574,7 +574,7 @@ export function ConfigurationModal({ updateAdvancedVar('var_mtu', e.target.value ? parseInt(e.target.value) : 1500)} placeholder="1500" className={errors.var_mtu ? 'border-destructive' : ''} @@ -589,7 +589,7 @@ export function ConfigurationModal({ updateAdvancedVar('var_mac', e.target.value)} placeholder="Auto" className={errors.var_mac ? 'border-destructive' : ''} @@ -604,7 +604,7 @@ export function ConfigurationModal({ updateAdvancedVar('var_ns', e.target.value)} placeholder="Auto" className={errors.var_ns ? 'border-destructive' : ''} @@ -626,7 +626,7 @@ export function ConfigurationModal({ updateAdvancedVar('var_hostname', e.target.value)} placeholder={slug} /> @@ -637,7 +637,7 @@ export function ConfigurationModal({ updateAdvancedVar('var_pw', e.target.value)} placeholder="Random (empty = auto-login)" /> @@ -648,7 +648,7 @@ export function ConfigurationModal({ updateAdvancedVar('var_tags', e.target.value)} placeholder="community-script" /> @@ -665,7 +665,7 @@ export function ConfigurationModal({ Enable SSH updateAdvancedVar('var_ssh_authorized_key', e.target.value)} placeholder="ssh-rsa AAAA..." /> @@ -696,7 +696,7 @@ export function ConfigurationModal({ Nesting (Docker) updateAdvancedVar('var_fuse', parseInt(e.target.value))} className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm text-foreground focus:ring-2 focus:ring-ring focus:outline-none" > @@ -722,7 +722,7 @@ export function ConfigurationModal({ Keyctl updateAdvancedVar('var_mknod', parseInt(e.target.value))} className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm text-foreground focus:ring-2 focus:ring-ring focus:outline-none" > @@ -749,7 +749,7 @@ export function ConfigurationModal({ updateAdvancedVar('var_mount_fs', e.target.value)} placeholder="nfs,cifs" /> @@ -759,7 +759,7 @@ export function ConfigurationModal({ Protection updateAdvancedVar('var_timezone', e.target.value)} placeholder="System" /> @@ -790,7 +790,7 @@ export function ConfigurationModal({ Verbose updateAdvancedVar('var_apt_cacher', e.target.value)} className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm text-foreground focus:ring-2 focus:ring-ring focus:outline-none" > @@ -817,7 +817,7 @@ export function ConfigurationModal({ updateAdvancedVar('var_apt_cacher_ip', e.target.value)} placeholder="192.168.1.10" className={errors.var_apt_cacher_ip ? 'border-destructive' : ''} @@ -838,7 +838,7 @@ export function ConfigurationModal({ Container Storage updateAdvancedVar('var_template_storage', e.target.value)} className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm text-foreground focus:ring-2 focus:ring-ring focus:outline-none" > diff --git a/src/app/_components/ExecutionModeModal.tsx b/src/app/_components/ExecutionModeModal.tsx index 5cca184..fce78ca 100644 --- a/src/app/_components/ExecutionModeModal.tsx +++ b/src/app/_components/ExecutionModeModal.tsx @@ -195,7 +195,7 @@ export function ExecutionModeModal({ isOpen, onClose, onExecute, scriptName, scr size="default" className="flex-1" > - Advanced + Advanced (Beta) From 69a5ac3a566f2890abf9af5de8dc4016e697ee92 Mon Sep 17 00:00:00 2001 From: Michel Roegl-Brunner Date: Fri, 5 Dec 2025 16:03:28 +0100 Subject: [PATCH 3/3] Nump core --- scripts/core/build.func | 91 ++++++++++++++++----------------- scripts/core/error-handler.func | 55 +++++++++++--------- scripts/core/tools.func | 15 ++++-- 3 files changed, 86 insertions(+), 75 deletions(-) diff --git a/scripts/core/build.func b/scripts/core/build.func index 2d81be9..1b11f66 100755 --- a/scripts/core/build.func +++ b/scripts/core/build.func @@ -2441,10 +2441,15 @@ build_container() { if echo "$pci_vga_info" | grep -q "\[10de:"; then msg_custom "🎮" "${GN}" "Detected NVIDIA GPU" - # Simple passthrough - just bind /dev/nvidia* devices if they exist - for d in /dev/nvidia* /dev/nvidiactl /dev/nvidia-modeset /dev/nvidia-uvm /dev/nvidia-uvm-tools; do - [[ -e "$d" ]] && NVIDIA_DEVICES+=("$d") + for d in /dev/nvidia*; do + [[ -c "$d" ]] && NVIDIA_DEVICES+=("$d") done + # Also check for devices inside /dev/nvidia-caps/ directory + if [[ -d /dev/nvidia-caps ]]; then + for d in /dev/nvidia-caps/*; do + [[ -c "$d" ]] && NVIDIA_DEVICES+=("$d") + done + fi if [[ ${#NVIDIA_DEVICES[@]} -gt 0 ]]; then msg_custom "🎮" "${GN}" "Found ${#NVIDIA_DEVICES[@]} NVIDIA device(s) for passthrough" @@ -2954,15 +2959,14 @@ fix_gpu_gids() { # For privileged containers: also fix permissions inside container if [[ "$CT_TYPE" == "0" ]]; then - pct exec "$CTID" -- bash -c " + pct exec "$CTID" -- sh -c " if [ -d /dev/dri ]; then for dev in /dev/dri/*; do if [ -e \"\$dev\" ]; then - if [[ \"\$dev\" =~ renderD ]]; then - chgrp ${render_gid} \"\$dev\" 2>/dev/null || true - else - chgrp ${video_gid} \"\$dev\" 2>/dev/null || true - fi + case \"\$dev\" in + *renderD*) chgrp ${render_gid} \"\$dev\" 2>/dev/null || true ;; + *) chgrp ${video_gid} \"\$dev\" 2>/dev/null || true ;; + esac chmod 660 \"\$dev\" 2>/dev/null || true fi done @@ -3211,42 +3215,26 @@ create_lxc_container() { fi fi - # Validate content types - msg_info "Validating content types of storage '$CONTAINER_STORAGE'" - STORAGE_CONTENT=$(grep -A4 -E "^(zfspool|dir|lvmthin|lvm|linstor): $CONTAINER_STORAGE" /etc/pve/storage.cfg | grep content | awk '{$1=""; print $0}' | xargs) - msg_debug "Storage '$CONTAINER_STORAGE' has content types: $STORAGE_CONTENT" + msg_info "Validating storage '$CONTAINER_STORAGE'" + STORAGE_TYPE=$(grep -E "^[^:]+: $CONTAINER_STORAGE$" /etc/pve/storage.cfg | cut -d: -f1 | head -1) - # Check storage type for special handling - STORAGE_TYPE=$(grep -E "^[^:]+: $CONTAINER_STORAGE$" /etc/pve/storage.cfg | cut -d: -f1) - if [[ "$STORAGE_TYPE" == "linstor" ]]; then - msg_info "Detected LINSTOR storage - verifying cluster connectivity" - if ! pvesm status -storage "$CONTAINER_STORAGE" &>/dev/null; then - msg_error "LINSTOR storage '$CONTAINER_STORAGE' not accessible. Check LINSTOR cluster health." - exit 217 - fi - fi - - grep -qw "rootdir" <<<"$STORAGE_CONTENT" || { - msg_error "Storage '$CONTAINER_STORAGE' does not support 'rootdir'. Cannot create LXC." - exit 217 - } - $STD msg_ok "Storage '$CONTAINER_STORAGE' supports 'rootdir'" - - msg_info "Validating content types of template storage '$TEMPLATE_STORAGE'" - TEMPLATE_CONTENT=$(grep -A4 -E "^[^:]+: $TEMPLATE_STORAGE" /etc/pve/storage.cfg | grep content | awk '{$1=""; print $0}' | xargs) - msg_debug "Template storage '$TEMPLATE_STORAGE' has content types: $TEMPLATE_CONTENT" - - # Check if template storage is LINSTOR (may need special handling) - TEMPLATE_TYPE=$(grep -E "^[^:]+: $TEMPLATE_STORAGE$" /etc/pve/storage.cfg | cut -d: -f1) - if [[ "$TEMPLATE_TYPE" == "linstor" ]]; then - msg_info "Template storage uses LINSTOR - ensuring resource availability" - fi - - if ! grep -qw "vztmpl" <<<"$TEMPLATE_CONTENT"; then - msg_warn "Template storage '$TEMPLATE_STORAGE' does not declare 'vztmpl'. This may cause pct create to fail." - else - $STD msg_ok "Template storage '$TEMPLATE_STORAGE' supports 'vztmpl'" + case "$STORAGE_TYPE" in + iscsidirect) exit 212 ;; + iscsi | zfs) exit 213 ;; + cephfs) exit 219 ;; + pbs) exit 224 ;; + linstor | rbd | nfs | cifs) + pvesm status -storage "$CONTAINER_STORAGE" &>/dev/null || exit 217 + ;; + esac + + pvesm status -content rootdir 2>/dev/null | awk 'NR>1{print $1}' | grep -qx "$CONTAINER_STORAGE" || exit 213Add a comment on line R3227Add diff commentMarkdown input: edit mode selected.WritePreviewAdd a suggestionHeadingBoldItalicQuoteCodeLinkUnordered listNumbered listTask listMentionReferenceSaved repliesAdd FilesPaste, drop, or click to add filesCancelCommentStart a reviewReturn to code + msg_ok "Storage '$CONTAINER_STORAGE' ($STORAGE_TYPE) validated" + + if ! pvesm status -content vztmpl 2>/dev/null | awk 'NR>1{print $1}' | grep -qx "$TEMPLATE_STORAGE"; then + msg_warn "Template storage '$TEMPLATE_STORAGE' may not support 'vztmpl'" fi + msg_ok "Template storage '$TEMPLATE_STORAGE' validated" # Free space check STORAGE_FREE=$(pvesm status | awk -v s="$CONTAINER_STORAGE" '$1 == s { print $6 }') @@ -3261,7 +3249,7 @@ create_lxc_container() { msg_info "Checking cluster quorum" if ! pvecm status | awk -F':' '/^Quorate/ { exit ($2 ~ /Yes/) ? 0 : 1 }'; then msg_error "Cluster is not quorate. Start all nodes or configure quorum device (QDevice)." - exit 201 + exit 210 fi msg_ok "Cluster is quorate" fi @@ -3417,6 +3405,15 @@ create_lxc_container() { ONLINE_TEMPLATE="" [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]] && ONLINE_TEMPLATE="${ONLINE_TEMPLATES[-1]}" + if [[ ${#ONLINE_TEMPLATES[@]} -gt 0 ]]; then + count=0 + for idx in "${!ONLINE_TEMPLATES[@]}"; do + ((count++)) + [[ $count -ge 3 ]] && break + done + ONLINE_TEMPLATE="${ONLINE_TEMPLATES[$idx]}" + fi + if [[ ${#LOCAL_TEMPLATES[@]} -gt 0 ]]; then TEMPLATE="${LOCAL_TEMPLATES[-1]}" TEMPLATE_SOURCE="local" @@ -3554,7 +3551,7 @@ create_lxc_container() { } flock -w 60 9 || { msg_error "Timeout while waiting for template lock." - exit 202 + exit 211 } LOGFILE="/tmp/pct_create_${CTID}_$(date +%Y%m%d_%H%M%S)_${SESSION_ID}.log" @@ -3604,11 +3601,11 @@ create_lxc_container() { 2) echo "Upgrade was declined. Please update and re-run: apt update && apt install --only-upgrade pve-container lxc-pve" - exit 213 + exit 231 ;; 3) echo "Upgrade and/or retry failed. Please inspect: $LOGFILE" - exit 213 + exit 231 ;; esac else diff --git a/scripts/core/error-handler.func b/scripts/core/error-handler.func index 9ad4f6d..e227c39 100644 --- a/scripts/core/error-handler.func +++ b/scripts/core/error-handler.func @@ -79,38 +79,43 @@ explain_exit_code() { 234) echo "PostgreSQL: Fatal error in query / syntax" ;; # --- MySQL / MariaDB --- - 260) echo "MySQL/MariaDB: Connection failed (server not running / wrong socket)" ;; - 261) echo "MySQL/MariaDB: Authentication failed (bad user/password)" ;; - 262) echo "MySQL/MariaDB: Database does not exist" ;; - 263) echo "MySQL/MariaDB: Fatal error in query / syntax" ;; + 241) echo "MySQL/MariaDB: Connection failed (server not running / wrong socket)" ;; + 242) echo "MySQL/MariaDB: Authentication failed (bad user/password)" ;; + 243) echo "MySQL/MariaDB: Database does not exist" ;; + 244) echo "MySQL/MariaDB: Fatal error in query / syntax" ;; # --- MongoDB --- 251) echo "MongoDB: Connection failed (server not running)" ;; 252) echo "MongoDB: Authentication failed (bad user/password)" ;; 253) echo "MongoDB: Database not found" ;; + 254) echo "MongoDB: Fatal query error" ;; # --- Proxmox Custom Codes --- - 200) echo "Custom: Failed to create lock file" ;; - 201) echo "Custom: Cluster not quorate" ;; - 202) echo "Custom: Timeout waiting for template lock (concurrent download in progress)" ;; - 203) echo "Custom: Missing CTID variable" ;; - 204) echo "Custom: Missing PCT_OSTYPE variable" ;; - 205) echo "Custom: Invalid CTID (<100)" ;; - 206) echo "Custom: CTID already in use (check 'pct list' and /etc/pve/lxc/)" ;; - 207) echo "Custom: Password contains unescaped special characters (-, /, \\, *, etc.)" ;; - 208) echo "Custom: Invalid configuration (DNS/MAC/Network format error)" ;; - 209) echo "Custom: Container creation failed (check logs for pct create output)" ;; - 213) echo "Custom: LXC stack upgrade/retry failed (outdated pve-container - check https://github.com/community-scripts/ProxmoxVE/discussions/8126)" ;; - 214) echo "Custom: Not enough storage space" ;; - 215) echo "Custom: Container created but not listed (ghost state - check /etc/pve/lxc/)" ;; - 216) echo "Custom: RootFS entry missing in config (incomplete creation)" ;; - 217) echo "Custom: Storage does not support rootdir (check storage capabilities)" ;; - 218) echo "Custom: Template file corrupted or incomplete download (size <1MB or invalid archive)" ;; - 220) echo "Custom: Unable to resolve template path" ;; - 221) echo "Custom: Template file exists but not readable (check file permissions)" ;; - 222) echo "Custom: Template download failed after 3 attempts (network/storage issue)" ;; - 223) echo "Custom: Template not available after download (storage sync issue)" ;; - 225) echo "Custom: No template available for OS/Version (check 'pveam available')" ;; + 200) echo "Proxmox: Failed to create lock file" ;; + 203) echo "Proxmox: Missing CTID variable" ;; + 204) echo "Proxmox: Missing PCT_OSTYPE variable" ;; + 205) echo "Proxmox: Invalid CTID (<100)" ;; + 206) echo "Proxmox: CTID already in use" ;; + 207) echo "Proxmox: Password contains unescaped special characters" ;; + 208) echo "Proxmox: Invalid configuration (DNS/MAC/Network format)" ;; + 209) echo "Proxmox: Container creation failed" ;; + 210) echo "Proxmox: Cluster not quorate" ;; + 211) echo "Proxmox: Timeout waiting for template lock" ;; + 212) echo "Proxmox: Storage type 'iscsidirect' does not support containers (VMs only)" ;; + 213) echo "Proxmox: Storage type does not support 'rootdir' content" ;; + 214) echo "Proxmox: Not enough storage space" ;; + 215) echo "Proxmox: Container created but not listed (ghost state)" ;; + 216) echo "Proxmox: RootFS entry missing in config" ;; + 217) echo "Proxmox: Storage not accessible" ;; + 219) echo "Proxmox: CephFS does not support containers - use RBD" ;; + 224) echo "Proxmox: PBS storage is for backups only" ;; + 218) echo "Proxmox: Template file corrupted or incomplete" ;; + 220) echo "Proxmox: Unable to resolve template path" ;; + 221) echo "Proxmox: Template file not readable" ;; + 222) echo "Proxmox: Template download failed" ;; + 223) echo "Proxmox: Template not available after download" ;; + 225) echo "Proxmox: No template available for OS/Version" ;; + 231) echo "Proxmox: LXC stack upgrade failed" ;; # --- Default --- *) echo "Unknown error" ;; diff --git a/scripts/core/tools.func b/scripts/core/tools.func index 1c73fd6..aee7b40 100644 --- a/scripts/core/tools.func +++ b/scripts/core/tools.func @@ -192,6 +192,8 @@ install_packages_with_retry() { if [[ $retry -le $max_retries ]]; then msg_warn "Package installation failed, retrying ($retry/$max_retries)..." sleep 2 + # Fix any interrupted dpkg operations before retry + $STD dpkg --configure -a 2>/dev/null || true $STD apt update 2>/dev/null || true fi done @@ -217,6 +219,8 @@ upgrade_packages_with_retry() { if [[ $retry -le $max_retries ]]; then msg_warn "Package upgrade failed, retrying ($retry/$max_retries)..." sleep 2 + # Fix any interrupted dpkg operations before retry + $STD dpkg --configure -a 2>/dev/null || true $STD apt update 2>/dev/null || true fi done @@ -1182,6 +1186,11 @@ cleanup_orphaned_sources() { # This should be called at the start of any setup function # ------------------------------------------------------------------------------ ensure_apt_working() { + # Fix interrupted dpkg operations first + # This can happen if a previous installation was interrupted (e.g., by script error) + if [[ -f /var/lib/dpkg/lock-frontend ]] || dpkg --audit 2>&1 | grep -q "interrupted"; then + $STD dpkg --configure -a 2>/dev/null || true + fi # Clean up orphaned sources first cleanup_orphaned_sources @@ -2944,12 +2953,12 @@ setup_mariadb() { # Resolve "latest" to actual version if [[ "$MARIADB_VERSION" == "latest" ]]; then if ! curl -fsI --max-time 10 http://mirror.mariadb.org/repo/ >/dev/null 2>&1; then - msg_warn "MariaDB mirror not reachable - trying mariadb_repo_setup fallback" + msg_warn "MariaDB mirror not reachable - trying mariadb_repo_setup fallback" # Try using official mariadb_repo_setup script as fallback if curl -fsSL --max-time 15 https://r.mariadb.com/downloads/mariadb_repo_setup 2>/dev/null | bash -s -- --skip-verify >/dev/null 2>&1; then msg_ok "MariaDB repository configured via mariadb_repo_setup" # Extract version from configured repo - MARIADB_VERSION=$(grep -oP 'repo/\K[0-9]+\.[0-9]+\.[0-9]+' /etc/apt/sources.list.d/mariadb.list 2>/dev/null | head -n1 || echo "12.2") + MARIADB_VERSION=$(grep -oP 'repo/\K[0-9]+\.[0-9]+\.[0-9]+' /etc/apt/sources.list.d/mariadb.list 2>/dev/null | head -n1 || echo "12.2")Expand commentComment on line R2948ResolvedCode has comments. Press enter to view. else msg_warn "mariadb_repo_setup failed - using hardcoded fallback version" MARIADB_VERSION="12.2" @@ -2963,7 +2972,7 @@ setup_mariadb() { head -n1 || echo "") if [[ -z "$MARIADB_VERSION" ]]; then - msg_warn "Could not parse latest GA MariaDB version from mirror - trying mariadb_repo_setup" + msg_warn "Could not parse latest GA MariaDB version from mirror - trying mariadb_repo_setup" if curl -fsSL --max-time 15 https://r.mariadb.com/downloads/mariadb_repo_setup 2>/dev/null | bash -s -- --skip-verify >/dev/null 2>&1; then msg_ok "MariaDB repository configured via mariadb_repo_setup" MARIADB_VERSION=$(grep -oP 'repo/\K[0-9]+\.[0-9]+\.[0-9]+' /etc/apt/sources.list.d/mariadb.list 2>/dev/null | head -n1 || echo "12.2")