Compare commits

..

1 Commits

Author SHA1 Message Date
CanbiZ
60f81fabc3 Improve server startup logging and update script fetching
Adds success and error logging to the Next.js app preparation process in server.js, including guidance for missing production builds. In versionRouter, always fetches the latest update.sh from GitHub before running updates, logging the outcome and falling back to the local script if fetching fails.
2026-01-13 18:00:53 +01:00
14 changed files with 900 additions and 2128 deletions

View File

@@ -4,7 +4,7 @@
## 🔗 Related PR / Issue
Fixes: #
Link: #
## ✅ Prerequisites (**X** in brackets)

View File

@@ -1 +1 @@
0.5.5
0.5.4

190
package-lock.json generated
View File

@@ -15,7 +15,7 @@
"@radix-ui/react-slot": "^1.2.4",
"@t3-oss/env-nextjs": "^0.13.10",
"@tailwindcss/typography": "^0.5.19",
"@tanstack/react-query": "^5.90.18",
"@tanstack/react-query": "^5.90.16",
"@trpc/client": "^11.8.1",
"@trpc/react-query": "^11.8.1",
"@trpc/server": "^11.8.1",
@@ -33,7 +33,7 @@
"dotenv": "^17.2.3",
"jsonwebtoken": "^9.0.3",
"lucide-react": "^0.562.0",
"next": "^16.1.3",
"next": "^16.1.1",
"node-cron": "^4.2.1",
"node-pty": "^1.1.0",
"react": "^19.2.3",
@@ -57,19 +57,19 @@
"@types/bcryptjs": "^3.0.0",
"@types/better-sqlite3": "^7.6.13",
"@types/jsonwebtoken": "^9.0.10",
"@types/node": "^24.10.9",
"@types/node": "^24.10.4",
"@types/node-cron": "^3.0.11",
"@types/react": "^19.2.8",
"@types/react-dom": "^19.2.3",
"@vitejs/plugin-react": "^5.1.2",
"@vitest/coverage-v8": "^4.0.17",
"@vitest/ui": "^4.0.17",
"baseline-browser-mapping": "^2.9.15",
"baseline-browser-mapping": "^2.9.14",
"eslint": "^9.39.2",
"eslint-config-next": "^16.1.3",
"eslint-config-next": "^16.1.1",
"jsdom": "^27.4.0",
"postcss": "^8.5.6",
"prettier": "^3.8.0",
"prettier": "^3.7.4",
"prettier-plugin-tailwindcss": "^0.7.2",
"prisma": "^7.2.0",
"tailwindcss": "^4.1.18",
@@ -195,7 +195,6 @@
"integrity": "sha512-e7jT4DxYvIDLk1ZHmU/m/mB19rex9sv0c2ftBtjSBv+kVM/902eh0fINUzD7UwLLNR+jU585GxUJ8/EBfAM5fw==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@babel/code-frame": "^7.27.1",
"@babel/generator": "^7.28.5",
@@ -311,7 +310,7 @@
"version": "7.27.1",
"resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz",
"integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==",
"dev": true,
"devOptional": true,
"license": "MIT",
"engines": {
"node": ">=6.9.0"
@@ -321,7 +320,7 @@
"version": "7.28.5",
"resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz",
"integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==",
"dev": true,
"devOptional": true,
"license": "MIT",
"engines": {
"node": ">=6.9.0"
@@ -355,7 +354,7 @@
"version": "7.28.5",
"resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.5.tgz",
"integrity": "sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ==",
"dev": true,
"devOptional": true,
"license": "MIT",
"dependencies": {
"@babel/types": "^7.28.5"
@@ -446,7 +445,7 @@
"version": "7.28.5",
"resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.5.tgz",
"integrity": "sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA==",
"dev": true,
"devOptional": true,
"license": "MIT",
"dependencies": {
"@babel/helper-string-parser": "^7.27.1",
@@ -591,7 +590,6 @@
}
],
"license": "MIT",
"peer": true,
"engines": {
"node": ">=18"
},
@@ -635,7 +633,6 @@
}
],
"license": "MIT",
"peer": true,
"engines": {
"node": ">=18"
}
@@ -645,8 +642,7 @@
"resolved": "https://registry.npmjs.org/@electric-sql/pglite/-/pglite-0.3.2.tgz",
"integrity": "sha512-zfWWa+V2ViDCY/cmUfRqeWY1yLto+EpxjXnZzenB1TyxsTiXaTWeZFIZw6mac52BsuQm0RjCnisjBtdBaXOI6w==",
"devOptional": true,
"license": "Apache-2.0",
"peer": true
"license": "Apache-2.0"
},
"node_modules/@electric-sql/pglite-socket": {
"version": "0.0.6",
@@ -1955,15 +1951,15 @@
}
},
"node_modules/@next/env": {
"version": "16.1.3",
"resolved": "https://registry.npmjs.org/@next/env/-/env-16.1.3.tgz",
"integrity": "sha512-BLP14oBOvZWXgfdJf9ao+VD8O30uE+x7PaV++QtACLX329WcRSJRO5YJ+Bcvu0Q+c/lei41TjSiFf6pXqnpbQA==",
"version": "16.1.1",
"resolved": "https://registry.npmjs.org/@next/env/-/env-16.1.1.tgz",
"integrity": "sha512-3oxyM97Sr2PqiVyMyrZUtrtM3jqqFxOQJVuKclDsgj/L728iZt/GyslkN4NwarledZATCenbk4Offjk1hQmaAA==",
"license": "MIT"
},
"node_modules/@next/eslint-plugin-next": {
"version": "16.1.3",
"resolved": "https://registry.npmjs.org/@next/eslint-plugin-next/-/eslint-plugin-next-16.1.3.tgz",
"integrity": "sha512-MqBh3ltFAy0AZCRFVdjVjjeV7nEszJDaVIpDAnkQcn8U9ib6OEwkSnuK6xdYxMGPhV/Y4IlY6RbDipPOpLfBqQ==",
"version": "16.1.1",
"resolved": "https://registry.npmjs.org/@next/eslint-plugin-next/-/eslint-plugin-next-16.1.1.tgz",
"integrity": "sha512-Ovb/6TuLKbE1UiPcg0p39Ke3puyTCIKN9hGbNItmpQsp+WX3qrjO3WaMVSi6JHr9X1NrmthqIguVHodMJbh/dw==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -1971,9 +1967,9 @@
}
},
"node_modules/@next/swc-darwin-arm64": {
"version": "16.1.3",
"resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-16.1.3.tgz",
"integrity": "sha512-CpOD3lmig6VflihVoGxiR/l5Jkjfi4uLaOR4ziriMv0YMDoF6cclI+p5t2nstM8TmaFiY6PCTBgRWB57/+LiBA==",
"version": "16.1.1",
"resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-16.1.1.tgz",
"integrity": "sha512-JS3m42ifsVSJjSTzh27nW+Igfha3NdBOFScr9C80hHGrWx55pTrVL23RJbqir7k7/15SKlrLHhh/MQzqBBYrQA==",
"cpu": [
"arm64"
],
@@ -1987,9 +1983,9 @@
}
},
"node_modules/@next/swc-darwin-x64": {
"version": "16.1.3",
"resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-16.1.3.tgz",
"integrity": "sha512-aF4us2JXh0zn3hNxvL1Bx3BOuh8Lcw3p3Xnurlvca/iptrDH1BrpObwkw9WZra7L7/0qB9kjlREq3hN/4x4x+Q==",
"version": "16.1.1",
"resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-16.1.1.tgz",
"integrity": "sha512-hbyKtrDGUkgkyQi1m1IyD3q4I/3m9ngr+V93z4oKHrPcmxwNL5iMWORvLSGAf2YujL+6HxgVvZuCYZfLfb4bGw==",
"cpu": [
"x64"
],
@@ -2003,9 +1999,9 @@
}
},
"node_modules/@next/swc-linux-arm64-gnu": {
"version": "16.1.3",
"resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-16.1.3.tgz",
"integrity": "sha512-8VRkcpcfBtYvhGgXAF7U3MBx6+G1lACM1XCo1JyaUr4KmAkTNP8Dv2wdMq7BI+jqRBw3zQE7c57+lmp7jCFfKA==",
"version": "16.1.1",
"resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-16.1.1.tgz",
"integrity": "sha512-/fvHet+EYckFvRLQ0jPHJCUI5/B56+2DpI1xDSvi80r/3Ez+Eaa2Yq4tJcRTaB1kqj/HrYKn8Yplm9bNoMJpwQ==",
"cpu": [
"arm64"
],
@@ -2019,9 +2015,9 @@
}
},
"node_modules/@next/swc-linux-arm64-musl": {
"version": "16.1.3",
"resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-16.1.3.tgz",
"integrity": "sha512-UbFx69E2UP7MhzogJRMFvV9KdEn4sLGPicClwgqnLht2TEi204B71HuVfps3ymGAh0c44QRAF+ZmvZZhLLmhNg==",
"version": "16.1.1",
"resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-16.1.1.tgz",
"integrity": "sha512-MFHrgL4TXNQbBPzkKKur4Fb5ICEJa87HM7fczFs2+HWblM7mMLdco3dvyTI+QmLBU9xgns/EeeINSZD6Ar+oLg==",
"cpu": [
"arm64"
],
@@ -2035,9 +2031,9 @@
}
},
"node_modules/@next/swc-linux-x64-gnu": {
"version": "16.1.3",
"resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-16.1.3.tgz",
"integrity": "sha512-SzGTfTjR5e9T+sZh5zXqG/oeRQufExxBF6MssXS7HPeZFE98JDhCRZXpSyCfWrWrYrzmnw/RVhlP2AxQm+wkRQ==",
"version": "16.1.1",
"resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-16.1.1.tgz",
"integrity": "sha512-20bYDfgOQAPUkkKBnyP9PTuHiJGM7HzNBbuqmD0jiFVZ0aOldz+VnJhbxzjcSabYsnNjMPsE0cyzEudpYxsrUQ==",
"cpu": [
"x64"
],
@@ -2051,9 +2047,9 @@
}
},
"node_modules/@next/swc-linux-x64-musl": {
"version": "16.1.3",
"resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-16.1.3.tgz",
"integrity": "sha512-HlrDpj0v+JBIvQex1mXHq93Mht5qQmfyci+ZNwGClnAQldSfxI6h0Vupte1dSR4ueNv4q7qp5kTnmLOBIQnGow==",
"version": "16.1.1",
"resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-16.1.1.tgz",
"integrity": "sha512-9pRbK3M4asAHQRkwaXwu601oPZHghuSC8IXNENgbBSyImHv/zY4K5udBusgdHkvJ/Tcr96jJwQYOll0qU8+fPA==",
"cpu": [
"x64"
],
@@ -2067,9 +2063,9 @@
}
},
"node_modules/@next/swc-win32-arm64-msvc": {
"version": "16.1.3",
"resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-16.1.3.tgz",
"integrity": "sha512-3gFCp83/LSduZMSIa+lBREP7+5e7FxpdBoc9QrCdmp+dapmTK9I+SLpY60Z39GDmTXSZA4huGg9WwmYbr6+WRw==",
"version": "16.1.1",
"resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-16.1.1.tgz",
"integrity": "sha512-bdfQkggaLgnmYrFkSQfsHfOhk/mCYmjnrbRCGgkMcoOBZ4n+TRRSLmT/CU5SATzlBJ9TpioUyBW/vWFXTqQRiA==",
"cpu": [
"arm64"
],
@@ -2083,9 +2079,9 @@
}
},
"node_modules/@next/swc-win32-x64-msvc": {
"version": "16.1.3",
"resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-16.1.3.tgz",
"integrity": "sha512-1SZVfFT8zmMB+Oblrh5OKDvUo5mYQOkX2We6VGzpg7JUVZlqe4DYOFGKYZKTweSx1gbMixyO1jnFT4thU+nNHQ==",
"version": "16.1.1",
"resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-16.1.1.tgz",
"integrity": "sha512-Ncwbw2WJ57Al5OX0k4chM68DKhEPlrXBaSXDCi2kPi5f4d8b3ejr3RRJGfKBLrn2YJL5ezNS7w2TZLHSti8CMw==",
"cpu": [
"x64"
],
@@ -3709,9 +3705,9 @@
}
},
"node_modules/@tanstack/query-core": {
"version": "5.90.18",
"resolved": "https://registry.npmjs.org/@tanstack/query-core/-/query-core-5.90.18.tgz",
"integrity": "sha512-rbGx6bHgPNVzutP7BEr+53UPKohpckqlMAad+To9UxTbeaQ+kC/1SDRj+QzkwbQ7qhLT/1IKp34yS6thda6fzA==",
"version": "5.90.16",
"resolved": "https://registry.npmjs.org/@tanstack/query-core/-/query-core-5.90.16.tgz",
"integrity": "sha512-MvtWckSVufs/ja463/K4PyJeqT+HMlJWtw6PrCpywznd2NSgO3m4KwO9RqbFqGg6iDE8vVMFWMeQI4Io3eEYww==",
"license": "MIT",
"funding": {
"type": "github",
@@ -3719,13 +3715,12 @@
}
},
"node_modules/@tanstack/react-query": {
"version": "5.90.18",
"resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-5.90.18.tgz",
"integrity": "sha512-KqNZX0C5IFz4639zR1ilnQ288tQdJrMNLtzmlzyJ14xauBkhtLEy3mPU/V4KiHsr41eL1ILZbDP36TB12lYfCQ==",
"version": "5.90.16",
"resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-5.90.16.tgz",
"integrity": "sha512-bpMGOmV4OPmif7TNMteU/Ehf/hoC0Kf98PDc0F4BZkFrEapRMEqI/V6YS0lyzwSV6PQpY1y4xxArUIfBW5LVxQ==",
"license": "MIT",
"peer": true,
"dependencies": {
"@tanstack/query-core": "5.90.18"
"@tanstack/query-core": "5.90.16"
},
"funding": {
"type": "github",
@@ -3833,7 +3828,6 @@
"https://trpc.io/sponsor"
],
"license": "MIT",
"peer": true,
"peerDependencies": {
"@trpc/server": "11.8.1",
"typescript": ">=5.7.2"
@@ -3864,7 +3858,6 @@
"https://trpc.io/sponsor"
],
"license": "MIT",
"peer": true,
"peerDependencies": {
"typescript": ">=5.7.2"
}
@@ -3885,7 +3878,8 @@
"resolved": "https://registry.npmjs.org/@types/aria-query/-/aria-query-5.0.4.tgz",
"integrity": "sha512-rfT93uj5s0PRL7EzccGMs3brplhcrghnDoV26NqKhCAS1hVo+WdNsPvE/yb6ilfr5hi2MEk6d5EWJTKdxg8jVw==",
"dev": true,
"license": "MIT"
"license": "MIT",
"peer": true
},
"node_modules/@types/babel__core": {
"version": "7.20.5",
@@ -4045,9 +4039,9 @@
"license": "MIT"
},
"node_modules/@types/node": {
"version": "24.10.9",
"resolved": "https://registry.npmjs.org/@types/node/-/node-24.10.9.tgz",
"integrity": "sha512-ne4A0IpG3+2ETuREInjPNhUGis1SFjv1d5asp8MzEAGtOZeTeHVDOYqOgqfhvseqg/iXty2hjBf1zAOb7RNiNw==",
"version": "24.10.4",
"resolved": "https://registry.npmjs.org/@types/node/-/node-24.10.4.tgz",
"integrity": "sha512-vnDVpYPMzs4wunl27jHrfmwojOGKya0xyM3sH+UE5iv5uPS6vX7UIoh6m+vQc5LGBq52HBKPIn/zcSZVzeDEZg==",
"license": "MIT",
"dependencies": {
"undici-types": "~7.16.0"
@@ -4071,7 +4065,6 @@
"resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.8.tgz",
"integrity": "sha512-3MbSL37jEchWZz2p2mjntRZtPt837ij10ApxKfgmXCTuHWagYg7iA5bqPw6C8BMPfwidlvfPI/fxOc42HLhcyg==",
"license": "MIT",
"peer": true,
"dependencies": {
"csstype": "^3.2.2"
}
@@ -4082,7 +4075,6 @@
"integrity": "sha512-jp2L/eY6fn+KgVVQAOqYItbF0VY/YApe5Mz2F0aykSO8gx31bYCZyvSeYxCHKvzHG5eZjc+zyaS5BrBWya2+kQ==",
"devOptional": true,
"license": "MIT",
"peer": true,
"peerDependencies": {
"@types/react": "^19.2.0"
}
@@ -4156,7 +4148,6 @@
"integrity": "sha512-npiaib8XzbjtzS2N4HlqPvlpxpmZ14FjSJrteZpPxGUaYPlvhzlzUZ4mZyABo0EFrOWnvyd0Xxroq//hKhtAWg==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@typescript-eslint/scope-manager": "8.53.0",
"@typescript-eslint/types": "8.53.0",
@@ -4811,7 +4802,6 @@
"integrity": "sha512-hRDjg6dlDz7JlZAvjbiCdAJ3SDG+NH8tjZe21vjxfvT2ssYAn72SRXMge3dKKABm3bIJ3C+3wdunIdur8PHEAw==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@vitest/utils": "4.0.17",
"fflate": "^0.8.2",
@@ -4869,7 +4859,6 @@
"integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==",
"dev": true,
"license": "MIT",
"peer": true,
"bin": {
"acorn": "bin/acorn"
},
@@ -4920,6 +4909,7 @@
"integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
"dev": true,
"license": "MIT",
"peer": true,
"engines": {
"node": ">=8"
}
@@ -5276,9 +5266,9 @@
"license": "MIT"
},
"node_modules/baseline-browser-mapping": {
"version": "2.9.15",
"resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.9.15.tgz",
"integrity": "sha512-kX8h7K2srmDyYnXRIppo4AH/wYgzWVCs+eKr3RusRSQ5PvRYoEFmR/I0PbdTjKFAoKqp5+kbxnNTFO9jOfSVJg==",
"version": "2.9.14",
"resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.9.14.tgz",
"integrity": "sha512-B0xUquLkiGLgHhpPBqvl7GWegWBUNuujQ6kXd/r1U38ElPT6Ok8KZ8e+FpUGEc2ZoRQUzq/aUnaKFc/svWUGSg==",
"license": "Apache-2.0",
"bin": {
"baseline-browser-mapping": "dist/cli.js"
@@ -5381,7 +5371,6 @@
}
],
"license": "MIT",
"peer": true,
"dependencies": {
"baseline-browser-mapping": "^2.8.25",
"caniuse-lite": "^1.0.30001754",
@@ -6107,7 +6096,8 @@
"resolved": "https://registry.npmjs.org/dom-accessibility-api/-/dom-accessibility-api-0.5.16.tgz",
"integrity": "sha512-X7BJ2yElsnOJ30pZF4uIIDfBEVgF4XEBxL9Bxhy6dnrm5hkzqmsWHGTiHqRiITNhMyFLyAiWndIJP7Z1NTteDg==",
"dev": true,
"license": "MIT"
"license": "MIT",
"peer": true
},
"node_modules/dotenv": {
"version": "17.2.3",
@@ -6466,7 +6456,6 @@
"integrity": "sha512-LEyamqS7W5HB3ujJyvi0HQK/dtVINZvd5mAAp9eT5S/ujByGjiZLCzPcHVzuXbpJDJF/cxwHlfceVUDZ2lnSTw==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@eslint-community/eslint-utils": "^4.8.0",
"@eslint-community/regexpp": "^4.12.1",
@@ -6522,13 +6511,13 @@
}
},
"node_modules/eslint-config-next": {
"version": "16.1.3",
"resolved": "https://registry.npmjs.org/eslint-config-next/-/eslint-config-next-16.1.3.tgz",
"integrity": "sha512-q2Z87VSsoJcv+vgR+Dm8NPRf+rErXcRktuBR5y3umo/j5zLjIWH7rqBCh3X804gUGKbOrqbgsLUkqDE35C93Gw==",
"version": "16.1.1",
"resolved": "https://registry.npmjs.org/eslint-config-next/-/eslint-config-next-16.1.1.tgz",
"integrity": "sha512-55nTpVWm3qeuxoQKLOjQVciKZJUphKrNM0fCcQHAIOGl6VFXgaqeMfv0aKJhs7QtcnlAPhNVqsqRfRjeKBPIUA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@next/eslint-plugin-next": "16.1.3",
"@next/eslint-plugin-next": "16.1.1",
"eslint-import-resolver-node": "^0.3.6",
"eslint-import-resolver-typescript": "^3.5.2",
"eslint-plugin-import": "^2.32.0",
@@ -6652,7 +6641,6 @@
"integrity": "sha512-whOE1HFo/qJDyX4SnXzP4N6zOWn79WhnCUY/iDR0mPfQZO8wcYE4JClzI2oZrhBnnMUCBCHZhO6VQyoBU95mZA==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@rtsao/scc": "^1.1.0",
"array-includes": "^3.1.9",
@@ -7666,7 +7654,6 @@
"integrity": "sha512-BIdolzGpDO9MQ4nu3AUuDwHZZ+KViNm+EZ75Ae55eMXMqLVhDFqEMXxtUe9Qh8hjL+pIna/frs2j6Y2yD5Ua/g==",
"devOptional": true,
"license": "MIT",
"peer": true,
"engines": {
"node": ">=16.9.0"
}
@@ -8440,7 +8427,6 @@
"integrity": "sha512-mjzqwWRD9Y1J1KUi7W97Gja1bwOOM5Ug0EZ6UDK3xS7j7mndrkwozHtSblfomlzyB4NepioNt+B2sOSzczVgtQ==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@acemir/cssom": "^0.9.28",
"@asamuzakjp/dom-selector": "^6.7.6",
@@ -9065,6 +9051,7 @@
"integrity": "sha512-h5bgJWpxJNswbU7qCrV0tIKQCaS3blPDrqKWx+QxzuzL1zGUzij9XCWLrSLsJPu5t+eWA/ycetzYAO5IOMcWAQ==",
"dev": true,
"license": "MIT",
"peer": true,
"bin": {
"lz-string": "bin/bin.js"
}
@@ -10211,12 +10198,12 @@
"license": "MIT"
},
"node_modules/next": {
"version": "16.1.3",
"resolved": "https://registry.npmjs.org/next/-/next-16.1.3.tgz",
"integrity": "sha512-gthG3TRD+E3/mA0uDQb9lqBmx1zVosq5kIwxNN6+MRNd085GzD+9VXMPUs+GGZCbZ+GDZdODUq4Pm7CTXK6ipw==",
"version": "16.1.1",
"resolved": "https://registry.npmjs.org/next/-/next-16.1.1.tgz",
"integrity": "sha512-QI+T7xrxt1pF6SQ/JYFz95ro/mg/1Znk5vBebsWwbpejj1T0A23hO7GYEaVac9QUOT2BIMiuzm0L99ooq7k0/w==",
"license": "MIT",
"dependencies": {
"@next/env": "16.1.3",
"@next/env": "16.1.1",
"@swc/helpers": "0.5.15",
"baseline-browser-mapping": "^2.8.3",
"caniuse-lite": "^1.0.30001579",
@@ -10230,14 +10217,14 @@
"node": ">=20.9.0"
},
"optionalDependencies": {
"@next/swc-darwin-arm64": "16.1.3",
"@next/swc-darwin-x64": "16.1.3",
"@next/swc-linux-arm64-gnu": "16.1.3",
"@next/swc-linux-arm64-musl": "16.1.3",
"@next/swc-linux-x64-gnu": "16.1.3",
"@next/swc-linux-x64-musl": "16.1.3",
"@next/swc-win32-arm64-msvc": "16.1.3",
"@next/swc-win32-x64-msvc": "16.1.3",
"@next/swc-darwin-arm64": "16.1.1",
"@next/swc-darwin-x64": "16.1.1",
"@next/swc-linux-arm64-gnu": "16.1.1",
"@next/swc-linux-arm64-musl": "16.1.1",
"@next/swc-linux-x64-gnu": "16.1.1",
"@next/swc-linux-x64-musl": "16.1.1",
"@next/swc-win32-arm64-msvc": "16.1.1",
"@next/swc-win32-x64-msvc": "16.1.1",
"sharp": "^0.34.4"
},
"peerDependencies": {
@@ -10818,12 +10805,11 @@
}
},
"node_modules/prettier": {
"version": "3.8.0",
"resolved": "https://registry.npmjs.org/prettier/-/prettier-3.8.0.tgz",
"integrity": "sha512-yEPsovQfpxYfgWNhCfECjG5AQaO+K3dp6XERmOepyPDVqcJm+bjyCVO3pmU+nAPe0N5dDvekfGezt/EIiRe1TA==",
"version": "3.7.4",
"resolved": "https://registry.npmjs.org/prettier/-/prettier-3.7.4.tgz",
"integrity": "sha512-v6UNi1+3hSlVvv8fSaoUbggEM5VErKmmpGA7Pl3HF8V6uKY7rvClBOJlH6yNwQtfTueNkGVpOv/mtWL9L4bgRA==",
"dev": true,
"license": "MIT",
"peer": true,
"bin": {
"prettier": "bin/prettier.cjs"
},
@@ -10919,6 +10905,7 @@
"integrity": "sha512-Qb1gy5OrP5+zDf2Bvnzdl3jsTf1qXVMazbvCoKhtKqVs4/YK4ozX4gKQJJVyNe+cajNPn0KoC0MC3FUmaHWEmQ==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"ansi-regex": "^5.0.1",
"ansi-styles": "^5.0.0",
@@ -10934,6 +10921,7 @@
"integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==",
"dev": true,
"license": "MIT",
"peer": true,
"engines": {
"node": ">=10"
},
@@ -10948,7 +10936,6 @@
"devOptional": true,
"hasInstallScript": true,
"license": "Apache-2.0",
"peer": true,
"dependencies": {
"@prisma/config": "7.2.0",
"@prisma/dev": "0.17.0",
@@ -11137,7 +11124,6 @@
"resolved": "https://registry.npmjs.org/react/-/react-19.2.3.tgz",
"integrity": "sha512-Ku/hhYbVjOQnXDZFv2+RibmLFGwFdeeKHFcOTlrt7xplBnya5OGn/hIRDsqDiSUcfORsDC7MPxwork8jBwsIWA==",
"license": "MIT",
"peer": true,
"engines": {
"node": ">=0.10.0"
}
@@ -11147,7 +11133,6 @@
"resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.3.tgz",
"integrity": "sha512-yELu4WmLPw5Mr/lmeEpox5rw3RETacE++JgHqQzd2dg+YbJuat3jH4ingc+WPZhxaoFzdv9y33G+F7Nl5O0GBg==",
"license": "MIT",
"peer": true,
"dependencies": {
"scheduler": "^0.27.0"
},
@@ -11160,7 +11145,8 @@
"resolved": "https://registry.npmjs.org/react-is/-/react-is-17.0.2.tgz",
"integrity": "sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==",
"dev": true,
"license": "MIT"
"license": "MIT",
"peer": true
},
"node_modules/react-markdown": {
"version": "10.1.0",
@@ -12383,8 +12369,7 @@
"version": "4.1.18",
"resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.1.18.tgz",
"integrity": "sha512-4+Z+0yiYyEtUVCScyfHCxOYP06L5Ne+JiHhY2IjR2KWMIWhJOYZKLSGZaP5HkZ8+bY0cxfzwDE5uOmzFXyIwxw==",
"license": "MIT",
"peer": true
"license": "MIT"
},
"node_modules/tapable": {
"version": "2.3.0",
@@ -12486,7 +12471,6 @@
"integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
"dev": true,
"license": "MIT",
"peer": true,
"engines": {
"node": ">=12"
},
@@ -13263,7 +13247,6 @@
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz",
"integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==",
"license": "Apache-2.0",
"peer": true,
"bin": {
"tsc": "bin/tsc",
"tsserver": "bin/tsserver"
@@ -13539,7 +13522,6 @@
"integrity": "sha512-mm1rxUsmOxzrwnX5arGS+U4T25RdvpPjPN4yR0u9pUBov9+zGVtO84tif1eY4r6zWxVxu3KzIyknJy3rxfRZZg==",
"devOptional": true,
"license": "MIT",
"peer": true,
"peerDependencies": {
"typescript": ">=5"
},
@@ -13583,7 +13565,6 @@
"integrity": "sha512-NL8jTlbo0Tn4dUEXEsUg8KeyG/Lkmc4Fnzb8JXN/Ykm9G4HNImjtABMJgkQoVjOBN/j2WAwDTRytdqJbZsah7w==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"esbuild": "^0.25.0",
"fdir": "^6.5.0",
@@ -13677,7 +13658,6 @@
"integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
"dev": true,
"license": "MIT",
"peer": true,
"engines": {
"node": ">=12"
},
@@ -13691,7 +13671,6 @@
"integrity": "sha512-FQMeF0DJdWY0iOnbv466n/0BudNdKj1l5jYgl5JVTwjSsZSlqyXFt/9+1sEyhR6CLowbZpV7O1sCHrzBhucKKg==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@vitest/expect": "4.0.17",
"@vitest/mocker": "4.0.17",
@@ -14035,7 +14014,6 @@
"resolved": "https://registry.npmjs.org/zod/-/zod-4.3.5.tgz",
"integrity": "sha512-k7Nwx6vuWx1IJ9Bjuf4Zt1PEllcwe7cls3VNzm4CQ1/hgtFUK2bRNG3rvnpPUhFjmqJKAKtjV576KnUkHocg/g==",
"license": "MIT",
"peer": true,
"funding": {
"url": "https://github.com/sponsors/colinhacks"
}

View File

@@ -31,7 +31,7 @@
"@radix-ui/react-slot": "^1.2.4",
"@t3-oss/env-nextjs": "^0.13.10",
"@tailwindcss/typography": "^0.5.19",
"@tanstack/react-query": "^5.90.18",
"@tanstack/react-query": "^5.90.16",
"@trpc/client": "^11.8.1",
"@trpc/react-query": "^11.8.1",
"@trpc/server": "^11.8.1",
@@ -49,7 +49,7 @@
"dotenv": "^17.2.3",
"jsonwebtoken": "^9.0.3",
"lucide-react": "^0.562.0",
"next": "^16.1.3",
"next": "^16.1.1",
"node-cron": "^4.2.1",
"node-pty": "^1.1.0",
"react": "^19.2.3",
@@ -73,19 +73,19 @@
"@types/bcryptjs": "^3.0.0",
"@types/better-sqlite3": "^7.6.13",
"@types/jsonwebtoken": "^9.0.10",
"@types/node": "^24.10.9",
"@types/node": "^24.10.4",
"@types/node-cron": "^3.0.11",
"@types/react": "^19.2.8",
"@types/react-dom": "^19.2.3",
"@vitejs/plugin-react": "^5.1.2",
"@vitest/coverage-v8": "^4.0.17",
"@vitest/ui": "^4.0.17",
"baseline-browser-mapping": "^2.9.15",
"baseline-browser-mapping": "^2.9.14",
"eslint": "^9.39.2",
"eslint-config-next": "^16.1.3",
"eslint-config-next": "^16.1.1",
"jsdom": "^27.4.0",
"postcss": "^8.5.6",
"prettier": "^3.8.0",
"prettier": "^3.7.4",
"prettier-plugin-tailwindcss": "^0.7.2",
"prisma": "^7.2.0",
"tailwindcss": "^4.1.18",

View File

@@ -11,9 +11,6 @@ source "$(dirname "${BASH_SOURCE[0]}")/error-handler.func"
load_functions
catch_errors
# Get LXC IP address (must be called INSIDE container, after network is up)
get_lxc_ip
# This function enables IPv6 if it's not disabled and sets verbose mode
verb_ip6() {
set_std_mode # Set STD mode based on VERBOSE
@@ -128,13 +125,22 @@ update_os() {
# This function modifies the message of the day (motd) and SSH settings
motd_ssh() {
echo "export TERM='xterm-256color'" >>/root/.bashrc
IP=$(ip -4 addr show eth0 | awk '/inet / {print $2}' | cut -d/ -f1 | head -n 1)
if [ -f "/etc/os-release" ]; then
OS_NAME=$(grep ^NAME /etc/os-release | cut -d= -f2 | tr -d '"')
OS_VERSION=$(grep ^VERSION_ID /etc/os-release | cut -d= -f2 | tr -d '"')
else
OS_NAME="Alpine Linux"
OS_VERSION="Unknown"
fi
PROFILE_FILE="/etc/profile.d/00_lxc-details.sh"
echo "echo -e \"\"" >"$PROFILE_FILE"
echo -e "echo -e \"${BOLD}${APPLICATION} LXC Container${CL}"\" >>"$PROFILE_FILE"
echo -e "echo -e \"${TAB}${GATEWAY}${YW} Provided by: ${GN}community-scripts ORG ${YW}| GitHub: ${GN}https://github.com/community-scripts/ProxmoxVE${CL}\"" >>"$PROFILE_FILE"
echo "echo \"\"" >>"$PROFILE_FILE"
echo -e "echo -e \"${TAB}${OS}${YW} OS: ${GN}\$(grep ^NAME /etc/os-release | cut -d= -f2 | tr -d '\"') - Version: \$(grep ^VERSION_ID /etc/os-release | cut -d= -f2 | tr -d '\"')${CL}\"" >>"$PROFILE_FILE"
echo -e "echo -e \"${TAB}${OS}${YW} OS: ${GN}${OS_NAME} - Version: ${OS_VERSION}${CL}\"" >>"$PROFILE_FILE"
echo -e "echo -e \"${TAB}${HOSTNAME}${YW} Hostname: ${GN}\$(hostname)${CL}\"" >>"$PROFILE_FILE"
echo -e "echo -e \"${TAB}${INFO}${YW} IP Address: ${GN}\$(ip -4 addr show eth0 | awk '/inet / {print \$2}' | cut -d/ -f1 | head -n 1)${CL}\"" >>"$PROFILE_FILE"

View File

@@ -1,188 +1,507 @@
#!/bin/ash
# shellcheck shell=ash
# Copyright (c) 2021-2026 community-scripts ORG
# Author: MickLesk
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
if ! command -v curl >/dev/null 2>&1; then
apk update && apk add curl >/dev/null 2>&1
fi
source "$(dirname "${BASH_SOURCE[0]}")/core.func"
source "$(dirname "${BASH_SOURCE[0]}")/error-handler.func"
load_functions
catch_errors
# Expects existing msg_* functions and optional $STD from the framework.
# Get LXC IP address (must be called INSIDE container, after network is up)
get_lxc_ip
# ------------------------------
# helpers
# ------------------------------
lower() { printf '%s' "$1" | tr '[:upper:]' '[:lower:]'; }
has() { command -v "$1" >/dev/null 2>&1; }
# This function enables IPv6 if it's not disabled and sets verbose mode
verb_ip6() {
set_std_mode # Set STD mode based on VERBOSE
if [ "${IPV6_METHOD:-}" = "disable" ]; then
msg_info "Disabling IPv6 (this may affect some services)"
$STD sysctl -w net.ipv6.conf.all.disable_ipv6=1
$STD sysctl -w net.ipv6.conf.default.disable_ipv6=1
$STD sysctl -w net.ipv6.conf.lo.disable_ipv6=1
mkdir -p /etc/sysctl.d
$STD tee /etc/sysctl.d/99-disable-ipv6.conf >/dev/null <<EOF
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
EOF
$STD rc-update add sysctl default
msg_ok "Disabled IPv6"
need_tool() {
# usage: need_tool curl jq unzip ...
# setup missing tools via apk
local missing=0 t
for t in "$@"; do
if ! has "$t"; then missing=1; fi
done
if [ "$missing" -eq 1 ]; then
msg_info "Installing tools: $*"
apk add --no-cache "$@" >/dev/null 2>&1 || {
msg_error "apk add failed for: $*"
return 1
}
msg_ok "Tools ready: $*"
fi
}
set -Eeuo pipefail
trap 'error_handler $? $LINENO "$BASH_COMMAND"' ERR
trap on_exit EXIT
trap on_interrupt INT
trap on_terminate TERM
net_resolves() {
# better handling for missing getent on Alpine
# usage: net_resolves api.github.com
local host="$1"
ping -c1 -W1 "$host" >/dev/null 2>&1 || nslookup "$host" >/dev/null 2>&1
}
error_handler() {
local exit_code="$1"
local line_number="$2"
local command="$3"
ensure_usr_local_bin_persist() {
local PROFILE_FILE="/etc/profile.d/10-localbin.sh"
if [ ! -f "$PROFILE_FILE" ]; then
echo 'case ":$PATH:" in *:/usr/local/bin:*) ;; *) export PATH="/usr/local/bin:$PATH";; esac' >"$PROFILE_FILE"
chmod +x "$PROFILE_FILE"
fi
}
if [[ "$exit_code" -eq 0 ]]; then
download_with_progress() {
# $1 url, $2 dest
local url="$1" out="$2" cl
need_tool curl pv || return 1
cl=$(curl -fsSLI "$url" 2>/dev/null | awk 'tolower($0) ~ /^content-length:/ {print $2}' | tr -d '\r')
if [ -n "$cl" ]; then
curl -fsSL "$url" | pv -s "$cl" >"$out" || {
msg_error "Download failed: $url"
return 1
}
else
curl -fL# -o "$out" "$url" || {
msg_error "Download failed: $url"
return 1
}
fi
}
# ------------------------------
# GitHub: check Release
# ------------------------------
check_for_gh_release() {
# app, repo, [pinned]
local app="$1" source="$2" pinned="${3:-}"
local app_lc
app_lc="$(lower "$app" | tr -d ' ')"
local current_file="$HOME/.${app_lc}"
local current="" release tag
msg_info "Check for update: $app"
net_resolves api.github.com || {
msg_error "DNS/network error: api.github.com"
return 1
}
need_tool curl jq || return 1
tag=$(curl -fsSL "https://api.github.com/repos/${source}/releases/latest" | jq -r '.tag_name // empty')
[ -z "$tag" ] && {
msg_error "Unable to fetch latest tag for $app"
return 1
}
release="${tag#v}"
[ -f "$current_file" ] && current="$(cat "$current_file")"
if [ -n "$pinned" ]; then
if [ "$pinned" = "$release" ]; then
msg_ok "$app pinned to v$pinned (no update)"
return 1
fi
if [ "$current" = "$pinned" ]; then
msg_ok "$app pinned v$pinned installed (upstream v$release)"
return 1
fi
msg_info "$app pinned v$pinned (upstream v$release) → update/downgrade"
CHECK_UPDATE_RELEASE="$pinned"
return 0
fi
printf "\e[?25h"
echo -e "\n${RD}[ERROR]${CL} in line ${RD}$line_number${CL}: exit code ${RD}$exit_code${CL}: while executing command ${YW}$command${CL}\n"
exit "$exit_code"
}
on_exit() {
local exit_code="$?"
[[ -n "${lockfile:-}" && -e "$lockfile" ]] && rm -f "$lockfile"
exit "$exit_code"
}
on_interrupt() {
echo -e "\n${RD}Interrupted by user (SIGINT)${CL}"
exit 130
}
on_terminate() {
echo -e "\n${RD}Terminated by signal (SIGTERM)${CL}"
exit 143
}
# This function sets up the Container OS by generating the locale, setting the timezone, and checking the network connection
setting_up_container() {
msg_info "Setting up Container OS"
while [ $i -gt 0 ]; do
if [ "$(ip addr show | grep 'inet ' | grep -v '127.0.0.1' | awk '{print $2}' | cut -d'/' -f1)" != "" ]; then
break
fi
echo 1>&2 -en "${CROSS}${RD} No Network! "
sleep $RETRY_EVERY
i=$((i - 1))
done
if [ "$(ip addr show | grep 'inet ' | grep -v '127.0.0.1' | awk '{print $2}' | cut -d'/' -f1)" = "" ]; then
echo 1>&2 -e "\n${CROSS}${RD} No Network After $RETRY_NUM Tries${CL}"
echo -e "${NETWORK}Check Network Settings"
exit 1
if [ "$release" != "$current" ] || [ ! -f "$current_file" ]; then
CHECK_UPDATE_RELEASE="$release"
msg_info "New release available: v$release (current: v${current:-none})"
return 0
fi
msg_ok "Set up Container OS"
msg_ok "Network Connected: ${BL}$(ip addr show | grep 'inet ' | awk '{print $2}' | cut -d'/' -f1 | tail -n1)${CL}"
msg_ok "$app is up to date (v$release)"
return 1
}
# This function checks the network connection by pinging a known IP address and prompts the user to continue if the internet is not connected
network_check() {
set +e
trap - ERR
if ping -c 1 -W 1 1.1.1.1 &>/dev/null || ping -c 1 -W 1 8.8.8.8 &>/dev/null || ping -c 1 -W 1 9.9.9.9 &>/dev/null; then
ipv4_status="${GN}✔${CL} IPv4"
# ------------------------------
# GitHub: get Release & deploy (Alpine)
# modes: tarball | prebuild | singlefile
# ------------------------------
fetch_and_deploy_gh() {
# $1 app, $2 repo, [$3 mode], [$4 version], [$5 target], [$6 asset_pattern
local app="$1" repo="$2" mode="${3:-tarball}" version="${4:-latest}" target="${5:-/opt/$1}" pattern="${6:-}"
local app_lc
app_lc="$(lower "$app" | tr -d ' ')"
local vfile="$HOME/.${app_lc}"
local json url filename tmpd unpack
net_resolves api.github.com || {
msg_error "DNS/network error"
return 1
}
need_tool curl jq tar || return 1
[ "$mode" = "prebuild" ] || [ "$mode" = "singlefile" ] && need_tool unzip >/dev/null 2>&1 || true
tmpd="$(mktemp -d)" || return 1
mkdir -p "$target"
# Release JSON
if [ "$version" = "latest" ]; then
json="$(curl -fsSL "https://api.github.com/repos/$repo/releases/latest")" || {
msg_error "GitHub API failed"
rm -rf "$tmpd"
return 1
}
else
ipv4_status="${RD}✖${CL} IPv4"
read -r -p "Internet NOT connected. Continue anyway? <y/N> " prompt
if [[ "${prompt,,}" =~ ^(y|yes)$ ]]; then
echo -e "${INFO}${RD}Expect Issues Without Internet${CL}"
json="$(curl -fsSL "https://api.github.com/repos/$repo/releases/tags/$version")" || {
msg_error "GitHub API failed"
rm -rf "$tmpd"
return 1
}
fi
# correct Version
version="$(printf '%s' "$json" | jq -r '.tag_name // empty')"
version="${version#v}"
[ -z "$version" ] && {
msg_error "No tag in release json"
rm -rf "$tmpd"
return 1
}
case "$mode" in
tarball | source)
url="$(printf '%s' "$json" | jq -r '.tarball_url // empty')"
[ -z "$url" ] && url="https://github.com/$repo/archive/refs/tags/v$version.tar.gz"
filename="${app_lc}-${version}.tar.gz"
download_with_progress "$url" "$tmpd/$filename" || {
rm -rf "$tmpd"
return 1
}
tar -xzf "$tmpd/$filename" -C "$tmpd" || {
msg_error "tar extract failed"
rm -rf "$tmpd"
return 1
}
unpack="$(find "$tmpd" -mindepth 1 -maxdepth 1 -type d | head -n1)"
# copy content of unpack to target
(cd "$unpack" && tar -cf - .) | (cd "$target" && tar -xf -) || {
msg_error "copy failed"
rm -rf "$tmpd"
return 1
}
;;
prebuild)
[ -n "$pattern" ] || {
msg_error "prebuild requires asset pattern"
rm -rf "$tmpd"
return 1
}
url="$(printf '%s' "$json" | jq -r '.assets[].browser_download_url' | awk -v p="$pattern" '
BEGIN{IGNORECASE=1}
$0 ~ p {print; exit}
')"
[ -z "$url" ] && {
msg_error "asset not found for pattern: $pattern"
rm -rf "$tmpd"
return 1
}
filename="${url##*/}"
download_with_progress "$url" "$tmpd/$filename" || {
rm -rf "$tmpd"
return 1
}
# unpack archive (Zip or tarball)
case "$filename" in
*.zip)
need_tool unzip || {
rm -rf "$tmpd"
return 1
}
mkdir -p "$tmpd/unp"
unzip -q "$tmpd/$filename" -d "$tmpd/unp"
;;
*.tar.gz | *.tgz | *.tar.xz | *.tar.zst | *.tar.bz2)
mkdir -p "$tmpd/unp"
tar -xf "$tmpd/$filename" -C "$tmpd/unp"
;;
*)
msg_error "unsupported archive: $filename"
rm -rf "$tmpd"
return 1
;;
esac
# top-level folder strippen
if [ "$(find "$tmpd/unp" -mindepth 1 -maxdepth 1 -type d | wc -l)" -eq 1 ] && [ -z "$(find "$tmpd/unp" -mindepth 1 -maxdepth 1 -type f | head -n1)" ]; then
unpack="$(find "$tmpd/unp" -mindepth 1 -maxdepth 1 -type d)"
(cd "$unpack" && tar -cf - .) | (cd "$target" && tar -xf -) || {
msg_error "copy failed"
rm -rf "$tmpd"
return 1
}
else
echo -e "${NETWORK}Check Network Settings"
exit 1
(cd "$tmpd/unp" && tar -cf - .) | (cd "$target" && tar -xf -) || {
msg_error "copy failed"
rm -rf "$tmpd"
return 1
}
fi
;;
singlefile)
[ -n "$pattern" ] || {
msg_error "singlefile requires asset pattern"
rm -rf "$tmpd"
return 1
}
url="$(printf '%s' "$json" | jq -r '.assets[].browser_download_url' | awk -v p="$pattern" '
BEGIN{IGNORECASE=1}
$0 ~ p {print; exit}
')"
[ -z "$url" ] && {
msg_error "asset not found for pattern: $pattern"
rm -rf "$tmpd"
return 1
}
filename="${url##*/}"
download_with_progress "$url" "$target/$app" || {
rm -rf "$tmpd"
return 1
}
chmod +x "$target/$app"
;;
*)
msg_error "Unknown mode: $mode"
rm -rf "$tmpd"
return 1
;;
esac
echo "$version" >"$vfile"
ensure_usr_local_bin_persist
rm -rf "$tmpd"
msg_ok "Deployed $app ($version) → $target"
}
# ------------------------------
# yq (mikefarah) Alpine
# ------------------------------
setup_yq() {
# prefer apk, unless FORCE_GH=1
if [ "${FORCE_GH:-0}" != "1" ] && apk info -e yq >/dev/null 2>&1; then
msg_info "Updating yq via apk"
apk add --no-cache --upgrade yq >/dev/null 2>&1 || true
msg_ok "yq ready ($(yq --version 2>/dev/null))"
return 0
fi
need_tool curl || return 1
local arch bin url tmp
case "$(uname -m)" in
x86_64) arch="amd64" ;;
aarch64) arch="arm64" ;;
*)
msg_error "Unsupported arch for yq: $(uname -m)"
return 1
;;
esac
url="https://github.com/mikefarah/yq/releases/latest/download/yq_linux_${arch}"
tmp="$(mktemp)"
download_with_progress "$url" "$tmp" || return 1
install -m 0755 "$tmp" /usr/local/bin/yq
rm -f "$tmp"
msg_ok "Setup yq ($(yq --version 2>/dev/null))"
}
# ------------------------------
# Adminer Alpine
# ------------------------------
setup_adminer() {
need_tool curl || return 1
msg_info "Setup Adminer (Alpine)"
mkdir -p /var/www/localhost/htdocs/adminer
curl -fsSL https://github.com/vrana/adminer/releases/latest/download/adminer.php \
-o /var/www/localhost/htdocs/adminer/index.php || {
msg_error "Adminer download failed"
return 1
}
msg_ok "Adminer at /adminer (served by your webserver)"
}
# ------------------------------
# uv Alpine (musl tarball)
# optional: PYTHON_VERSION="3.12"
# ------------------------------
setup_uv() {
need_tool curl tar || return 1
local UV_BIN="/usr/local/bin/uv"
local arch tarball url tmpd ver installed
case "$(uname -m)" in
x86_64) arch="x86_64-unknown-linux-musl" ;;
aarch64) arch="aarch64-unknown-linux-musl" ;;
*)
msg_error "Unsupported arch for uv: $(uname -m)"
return 1
;;
esac
ver="$(curl -fsSL https://api.github.com/repos/astral-sh/uv/releases/latest | jq -r '.tag_name' 2>/dev/null)"
ver="${ver#v}"
[ -z "$ver" ] && {
msg_error "uv: cannot determine latest version"
return 1
}
if has "$UV_BIN"; then
installed="$($UV_BIN -V 2>/dev/null | awk '{print $2}')"
[ "$installed" = "$ver" ] && {
msg_ok "uv $ver already installed"
return 0
}
msg_info "Updating uv $installed → $ver"
else
msg_info "Setup uv $ver"
fi
tmpd="$(mktemp -d)" || return 1
tarball="uv-${arch}.tar.gz"
url="https://github.com/astral-sh/uv/releases/download/v${ver}/${tarball}"
download_with_progress "$url" "$tmpd/uv.tar.gz" || {
rm -rf "$tmpd"
return 1
}
tar -xzf "$tmpd/uv.tar.gz" -C "$tmpd" || {
msg_error "uv: extract failed"
rm -rf "$tmpd"
return 1
}
# tar contains ./uv
if [ -x "$tmpd/uv" ]; then
install -m 0755 "$tmpd/uv" "$UV_BIN"
else
# fallback: in subfolder
install -m 0755 "$tmpd"/*/uv "$UV_BIN" 2>/dev/null || {
msg_error "uv binary not found in tar"
rm -rf "$tmpd"
return 1
}
fi
rm -rf "$tmpd"
ensure_usr_local_bin_persist
msg_ok "Setup uv $ver"
if [ -n "${PYTHON_VERSION:-}" ]; then
local match
match="$(uv python list --only-downloads 2>/dev/null | awk -v maj="$PYTHON_VERSION" '
$0 ~ "^cpython-"maj"\\." { print $0 }' | awk -F- '{print $2}' | sort -V | tail -n1)"
[ -z "$match" ] && {
msg_error "No matching Python for $PYTHON_VERSION"
return 1
}
if ! uv python list | grep -q "cpython-${match}-linux"; then
msg_info "Installing Python $match via uv"
uv python install "$match" || {
msg_error "uv python install failed"
return 1
}
msg_ok "Python $match installed (uv)"
fi
fi
RESOLVEDIP=$(getent hosts github.com | awk '{ print $1 }')
if [[ -z "$RESOLVEDIP" ]]; then
msg_error "Internet: ${ipv4_status} DNS Failed"
}
# ------------------------------
# Java Alpine (OpenJDK)
# JAVA_VERSION: 17|21 (Default 21)
# ------------------------------
setup_java() {
local JAVA_VERSION="${JAVA_VERSION:-21}" pkg
case "$JAVA_VERSION" in
17) pkg="openjdk17-jdk" ;;
21 | *) pkg="openjdk21-jdk" ;;
esac
msg_info "Setup Java (OpenJDK $JAVA_VERSION)"
apk add --no-cache "$pkg" >/dev/null 2>&1 || {
msg_error "apk add $pkg failed"
return 1
}
# set JAVA_HOME
local prof="/etc/profile.d/20-java.sh"
if [ ! -f "$prof" ]; then
echo 'export JAVA_HOME=$(dirname $(dirname $(readlink -f $(command -v java))))' >"$prof"
echo 'case ":$PATH:" in *:$JAVA_HOME/bin:*) ;; *) export PATH="$JAVA_HOME/bin:$PATH";; esac' >>"$prof"
chmod +x "$prof"
fi
msg_ok "Java ready: $(java -version 2>&1 | head -n1)"
}
# ------------------------------
# Go Alpine (apk prefers, else tarball)
# ------------------------------
setup_go() {
if [ -z "${GO_VERSION:-}" ]; then
msg_info "Setup Go (apk)"
apk add --no-cache go >/dev/null 2>&1 || {
msg_error "apk add go failed"
return 1
}
msg_ok "Go ready: $(go version 2>/dev/null)"
return 0
fi
need_tool curl tar || return 1
local ARCH TARBALL URL TMP
case "$(uname -m)" in
x86_64) ARCH="amd64" ;;
aarch64) ARCH="arm64" ;;
*)
msg_error "Unsupported arch for Go: $(uname -m)"
return 1
;;
esac
TARBALL="go${GO_VERSION}.linux-${ARCH}.tar.gz"
URL="https://go.dev/dl/${TARBALL}"
msg_info "Setup Go $GO_VERSION (tarball)"
TMP="$(mktemp)"
download_with_progress "$URL" "$TMP" || return 1
rm -rf /usr/local/go
tar -C /usr/local -xzf "$TMP" || {
msg_error "extract go failed"
rm -f "$TMP"
return 1
}
rm -f "$TMP"
ln -sf /usr/local/go/bin/go /usr/local/bin/go
ln -sf /usr/local/go/bin/gofmt /usr/local/bin/gofmt
ensure_usr_local_bin_persist
msg_ok "Go ready: $(go version 2>/dev/null)"
}
# ------------------------------
# Composer Alpine
# uses php83-cli + openssl + phar
# ------------------------------
setup_composer() {
local COMPOSER_BIN="/usr/local/bin/composer"
if ! has php; then
# prefers php83
msg_info "Installing PHP CLI for Composer"
apk add --no-cache php83-cli php83-openssl php83-phar php83-iconv >/dev/null 2>&1 || {
# Fallback to generic php if 83 not available
apk add --no-cache php-cli php-openssl php-phar php-iconv >/dev/null 2>&1 || {
msg_error "Failed to install php-cli for composer"
return 1
}
}
msg_ok "PHP CLI ready: $(php -v | head -n1)"
fi
if [ -x "$COMPOSER_BIN" ]; then
msg_info "Updating Composer"
else
msg_ok "Internet: ${ipv4_status} DNS: ${BL}${RESOLVEDIP}${CL}"
fi
set -e
trap 'error_handler $LINENO "$BASH_COMMAND"' ERR
}
# This function updates the Container OS by running apt-get update and upgrade
update_os() {
msg_info "Updating Container OS"
$STD apk -U upgrade
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/tools.func)
msg_ok "Updated Container OS"
}
# This function modifies the message of the day (motd) and SSH settings
motd_ssh() {
echo "export TERM='xterm-256color'" >>/root/.bashrc
PROFILE_FILE="/etc/profile.d/00_lxc-details.sh"
echo "echo -e \"\"" >"$PROFILE_FILE"
echo -e "echo -e \"${BOLD}${APPLICATION} LXC Container${CL}"\" >>"$PROFILE_FILE"
echo -e "echo -e \"${TAB}${GATEWAY}${YW} Provided by: ${GN}community-scripts ORG ${YW}| GitHub: ${GN}https://github.com/community-scripts/ProxmoxVE${CL}\"" >>"$PROFILE_FILE"
echo "echo \"\"" >>"$PROFILE_FILE"
echo -e "echo -e \"${TAB}${OS}${YW} OS: ${GN}\$(grep ^NAME /etc/os-release | cut -d= -f2 | tr -d '\"') - Version: \$(grep ^VERSION_ID /etc/os-release | cut -d= -f2 | tr -d '\"')${CL}\"" >>"$PROFILE_FILE"
echo -e "echo -e \"${TAB}${HOSTNAME}${YW} Hostname: ${GN}\$(hostname)${CL}\"" >>"$PROFILE_FILE"
echo -e "echo -e \"${TAB}${INFO}${YW} IP Address: ${GN}\$(ip -4 addr show eth0 | awk '/inet / {print \$2}' | cut -d/ -f1 | head -n 1)${CL}\"" >>"$PROFILE_FILE"
# Configure SSH if enabled
if [[ "${SSH_ROOT}" == "yes" ]]; then
# Enable sshd service
$STD rc-update add sshd
# Allow root login via SSH
sed -i "s/#PermitRootLogin prohibit-password/PermitRootLogin yes/g" /etc/ssh/sshd_config
# Start the sshd service
$STD /etc/init.d/sshd start
fi
}
# Validate Timezone for some LXC's
validate_tz() {
[[ -f "/usr/share/zoneinfo/$1" ]]
}
# This function customizes the container and enables passwordless login for the root user
customize() {
if [[ "$PASSWORD" == "" ]]; then
msg_info "Customizing Container"
passwd -d root >/dev/null 2>&1
# Ensure agetty is available
apk add --no-cache --force-broken-world util-linux >/dev/null 2>&1
# Create persistent autologin boot script
mkdir -p /etc/local.d
cat <<'EOF' >/etc/local.d/autologin.start
#!/bin/sh
sed -i 's|^tty1::respawn:.*|tty1::respawn:/sbin/agetty --autologin root --noclear tty1 38400 linux|' /etc/inittab
kill -HUP 1
EOF
touch /root/.hushlogin
chmod +x /etc/local.d/autologin.start
rc-update add local >/dev/null 2>&1
# Apply autologin immediately for current session
/etc/local.d/autologin.start
msg_ok "Customized Container"
msg_info "Setup Composer"
fi
echo "bash -c \"\$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/ct/${app}.sh)\"" >/usr/bin/update
chmod +x /usr/bin/update
need_tool curl || return 1
curl -fsSL https://getcomposer.org/installer -o /tmp/composer-setup.php || {
msg_error "composer installer download failed"
return 1
}
php /tmp/composer-setup.php --install-dir=/usr/local/bin --filename=composer >/dev/null 2>&1 || {
msg_error "composer install failed"
return 1
}
rm -f /tmp/composer-setup.php
ensure_usr_local_bin_persist
msg_ok "Composer ready: $(composer --version 2>/dev/null)"
}

File diff suppressed because it is too large Load Diff

View File

@@ -127,34 +127,6 @@ icons() {
HOURGLASS="${TAB}⏳${TAB}"
}
# ------------------------------------------------------------------------------
# ensure_profile_loaded()
#
# - Sources /etc/profile.d/*.sh scripts if not already loaded
# - Fixes PATH issues when running via pct enter/exec (non-login shells)
# - Safe to call multiple times (uses guard variable)
# - Should be called in update_script() or any script running inside LXC
# ------------------------------------------------------------------------------
ensure_profile_loaded() {
# Skip if already loaded or running on Proxmox host
[[ -n "${_PROFILE_LOADED:-}" ]] && return
command -v pveversion &>/dev/null && return
# Source all profile.d scripts to ensure PATH is complete
if [[ -d /etc/profile.d ]]; then
for script in /etc/profile.d/*.sh; do
[[ -r "$script" ]] && source "$script"
done
fi
# Also ensure /usr/local/bin is in PATH (common install location)
if [[ ":$PATH:" != *":/usr/local/bin:"* ]]; then
export PATH="/usr/local/bin:$PATH"
fi
export _PROFILE_LOADED=1
}
# ------------------------------------------------------------------------------
# default_vars()
#
@@ -815,9 +787,11 @@ is_verbose_mode() {
# ------------------------------------------------------------------------------
# cleanup_lxc()
#
# - Cleans package manager and language caches (safe for installs AND updates)
# - Supports Alpine (apk), Debian/Ubuntu (apt), Python, Node.js, Go, Rust, Ruby, PHP
# - Uses fallback error handling to prevent cleanup failures from breaking installs
# - Comprehensive cleanup of package managers, caches, and logs
# - Supports Alpine (apk), Debian/Ubuntu (apt), and language package managers
# - Cleans: Python (pip/uv), Node.js (npm/yarn/pnpm), Go, Rust, Ruby, PHP
# - Truncates log files and vacuums systemd journal
# - Run at end of container creation to minimize disk usage
# ------------------------------------------------------------------------------
cleanup_lxc() {
msg_info "Cleaning up"
@@ -826,52 +800,32 @@ cleanup_lxc() {
$STD apk cache clean || true
rm -rf /var/cache/apk/*
else
$STD apt -y autoremove 2>/dev/null || msg_warn "apt autoremove failed (non-critical)"
$STD apt -y autoclean 2>/dev/null || msg_warn "apt autoclean failed (non-critical)"
$STD apt -y clean 2>/dev/null || msg_warn "apt clean failed (non-critical)"
$STD apt -y autoremove || true
$STD apt -y autoclean || true
$STD apt -y clean || true
fi
# Clear temp artifacts (keep sockets/FIFOs; ignore errors)
find /tmp /var/tmp -type f -name 'tmp*' -delete 2>/dev/null || true
find /tmp /var/tmp -type f -name 'tempfile*' -delete 2>/dev/null || true
# Python
if command -v pip &>/dev/null; then
rm -rf /root/.cache/pip 2>/dev/null || true
fi
if command -v uv &>/dev/null; then
rm -rf /root/.cache/uv 2>/dev/null || true
fi
# Node.js
# Node.js npm - directly remove cache directory
# npm cache clean/verify can fail with ENOTEMPTY errors, so we skip them
if command -v npm &>/dev/null; then
rm -rf /root/.npm/_cacache /root/.npm/_logs 2>/dev/null || true
fi
if command -v yarn &>/dev/null; then
rm -rf /root/.cache/yarn /root/.yarn/cache 2>/dev/null || true
fi
if command -v pnpm &>/dev/null; then
pnpm store prune &>/dev/null || true
fi
# Go (only build cache, not modules)
if command -v go &>/dev/null; then
$STD go clean -cache 2>/dev/null || true
fi
# Rust (only registry cache, not build artifacts)
if command -v cargo &>/dev/null; then
rm -rf /root/.cargo/registry/cache /root/.cargo/.package-cache 2>/dev/null || true
fi
# Ruby
if command -v gem &>/dev/null; then
rm -rf /root/.gem/cache 2>/dev/null || true
fi
# PHP
if command -v composer &>/dev/null; then
rm -rf /root/.composer/cache 2>/dev/null || true
fi
# Node.js yarn
if command -v yarn &>/dev/null; then yarn cache clean &>/dev/null || true; fi
# Node.js pnpm
if command -v pnpm &>/dev/null; then pnpm store prune &>/dev/null || true; fi
# Go
if command -v go &>/dev/null; then $STD go clean -cache -modcache || true; fi
# Rust cargo
if command -v cargo &>/dev/null; then $STD cargo clean || true; fi
# Ruby gem
if command -v gem &>/dev/null; then $STD gem cleanup || true; fi
# Composer (PHP)
if command -v composer &>/dev/null; then COMPOSER_ALLOW_SUPERUSER=1 $STD composer clear-cache || true; fi
msg_ok "Cleaned"
}
@@ -924,95 +878,8 @@ check_or_create_swap() {
fi
}
# ------------------------------------------------------------------------------
# Loads LOCAL_IP from persistent store or detects if missing.
#
# Description:
# - Loads from /run/local-ip.env or performs runtime lookup
# ------------------------------------------------------------------------------
function get_lxc_ip() {
local IP_FILE="/run/local-ip.env"
if [[ -f "$IP_FILE" ]]; then
# shellcheck disable=SC1090
source "$IP_FILE"
fi
if [[ -z "${LOCAL_IP:-}" ]]; then
get_current_ip() {
local ip
# Try direct interface lookup for eth0 FIRST (most reliable for LXC) - IPv4
ip=$(ip -4 addr show eth0 2>/dev/null | awk '/inet / {print $2}' | cut -d/ -f1 | head -n1)
if [[ -n "$ip" && "$ip" =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
echo "$ip"
return 0
fi
# Fallback: Try hostname -I (returns IPv4 first if available)
if command -v hostname >/dev/null 2>&1; then
ip=$(hostname -I 2>/dev/null | awk '{print $1}')
if [[ -n "$ip" && "$ip" =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
echo "$ip"
return 0
fi
fi
# Try routing table with IPv4 targets
local ipv4_targets=("8.8.8.8" "1.1.1.1" "default")
for target in "${ipv4_targets[@]}"; do
if [[ "$target" == "default" ]]; then
ip=$(ip route get 1 2>/dev/null | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}')
else
ip=$(ip route get "$target" 2>/dev/null | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}')
fi
if [[ -n "$ip" ]]; then
echo "$ip"
return 0
fi
done
# IPv6 fallback: Try direct interface lookup for eth0
ip=$(ip -6 addr show eth0 scope global 2>/dev/null | awk '/inet6 / {print $2}' | cut -d/ -f1 | head -n1)
if [[ -n "$ip" && "$ip" =~ : ]]; then
echo "$ip"
return 0
fi
# IPv6 fallback: Try hostname -I for IPv6
if command -v hostname >/dev/null 2>&1; then
ip=$(hostname -I 2>/dev/null | tr ' ' '\n' | grep -E ':' | head -n1)
if [[ -n "$ip" && "$ip" =~ : ]]; then
echo "$ip"
return 0
fi
fi
# IPv6 fallback: Use routing table with IPv6 targets
local ipv6_targets=("2001:4860:4860::8888" "2606:4700:4700::1111")
for target in "${ipv6_targets[@]}"; do
ip=$(ip -6 route get "$target" 2>/dev/null | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}')
if [[ -n "$ip" && "$ip" =~ : ]]; then
echo "$ip"
return 0
fi
done
return 1
}
LOCAL_IP="$(get_current_ip || true)"
if [[ -z "$LOCAL_IP" ]]; then
msg_error "Could not determine LOCAL_IP"
return 1
fi
fi
export LOCAL_IP
}
# ==============================================================================
# SIGNAL TRAPS
# ==============================================================================
trap 'stop_spinner' EXIT INT TERM
trap 'stop_spinner' EXIT INT TERM

View File

@@ -37,9 +37,6 @@ source "$(dirname "${BASH_SOURCE[0]}")/error-handler.func"
load_functions
catch_errors
# Get LXC IP address (must be called INSIDE container, after network is up)
get_lxc_ip
# ==============================================================================
# SECTION 2: NETWORK & CONNECTIVITY
# ==============================================================================
@@ -79,13 +76,6 @@ EOF
# ------------------------------------------------------------------------------
setting_up_container() {
msg_info "Setting up Container OS"
# Fix Debian 13 LXC template bug where / is owned by nobody
# Only attempt in privileged containers (unprivileged cannot chown /)
if [[ "$(stat -c '%U' /)" != "root" ]]; then
(chown root:root / 2>/dev/null) || true
fi
for ((i = RETRY_NUM; i > 0; i--)); do
if [ "$(hostname -I)" != "" ]; then
break

View File

@@ -184,10 +184,7 @@ install_packages_with_retry() {
local retry=0
while [[ $retry -le $max_retries ]]; do
if DEBIAN_FRONTEND=noninteractive $STD apt install -y \
-o Dpkg::Options::="--force-confdef" \
-o Dpkg::Options::="--force-confold" \
"${packages[@]}" 2>/dev/null; then
if $STD apt install -y "${packages[@]}" 2>/dev/null; then
return 0
fi
@@ -214,10 +211,7 @@ upgrade_packages_with_retry() {
local retry=0
while [[ $retry -le $max_retries ]]; do
if DEBIAN_FRONTEND=noninteractive $STD apt install --only-upgrade -y \
-o Dpkg::Options::="--force-confdef" \
-o Dpkg::Options::="--force-confold" \
"${packages[@]}" 2>/dev/null; then
if $STD apt install --only-upgrade -y "${packages[@]}" 2>/dev/null; then
return 0
fi
@@ -574,8 +568,7 @@ EOF
msg_error "Failed to download PHP keyring"
return 1
}
# Don't use /dev/null redirection for dpkg as it may use background processes
dpkg -i /tmp/debsuryorg-archive-keyring.deb >>"$(get_active_logfile)" 2>&1 || {
dpkg -i /tmp/debsuryorg-archive-keyring.deb >/dev/null 2>&1 || {
msg_error "Failed to install PHP keyring"
rm -f /tmp/debsuryorg-archive-keyring.deb
return 1
@@ -1845,9 +1838,8 @@ function fetch_and_deploy_gh_release() {
}
chmod 644 "$tmpdir/$filename"
# SYSTEMD_OFFLINE=1 prevents systemd-tmpfiles failures in unprivileged LXC (Debian 13+/systemd 257+)
SYSTEMD_OFFLINE=1 $STD apt install -y "$tmpdir/$filename" || {
SYSTEMD_OFFLINE=1 $STD dpkg -i "$tmpdir/$filename" || {
$STD apt install -y "$tmpdir/$filename" || {
$STD dpkg -i "$tmpdir/$filename" || {
msg_error "Both apt and dpkg installation failed"
rm -rf "$tmpdir"
return 1
@@ -1902,7 +1894,7 @@ function fetch_and_deploy_gh_release() {
rm -rf "$tmpdir" "$unpack_tmp"
return 1
}
elif [[ "$filename" == *.tar.* || "$filename" == *.tgz || "$filename" == *.txz ]]; then
elif [[ "$filename" == *.tar.* || "$filename" == *.tgz ]]; then
tar --no-same-owner -xf "$tmpdir/$filename" -C "$unpack_tmp" || {
msg_error "Failed to extract TAR archive"
rm -rf "$tmpdir" "$unpack_tmp"
@@ -2006,6 +1998,50 @@ function fetch_and_deploy_gh_release() {
rm -rf "$tmpdir"
}
# ------------------------------------------------------------------------------
# Loads LOCAL_IP from persistent store or detects if missing.
#
# Description:
# - Loads from /run/local-ip.env or performs runtime lookup
# ------------------------------------------------------------------------------
function import_local_ip() {
local IP_FILE="/run/local-ip.env"
if [[ -f "$IP_FILE" ]]; then
# shellcheck disable=SC1090
source "$IP_FILE"
fi
if [[ -z "${LOCAL_IP:-}" ]]; then
get_current_ip() {
local targets=("8.8.8.8" "1.1.1.1" "192.168.1.1" "10.0.0.1" "172.16.0.1" "default")
local ip
for target in "${targets[@]}"; do
if [[ "$target" == "default" ]]; then
ip=$(ip route get 1 2>/dev/null | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}')
else
ip=$(ip route get "$target" 2>/dev/null | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}')
fi
if [[ -n "$ip" ]]; then
echo "$ip"
return 0
fi
done
return 1
}
LOCAL_IP="$(get_current_ip || true)"
if [[ -z "$LOCAL_IP" ]]; then
msg_error "Could not determine LOCAL_IP"
return 1
fi
fi
export LOCAL_IP
}
# ------------------------------------------------------------------------------
# Installs Adminer (Debian/Ubuntu via APT, Alpine via direct download).
#
@@ -2633,7 +2669,6 @@ function setup_hwaccel() {
# GPU Selection - Let user choose which GPU(s) to configure
# ═══════════════════════════════════════════════════════════════════════════
local -a SELECTED_INDICES=()
local install_nvidia_drivers="yes"
if [[ $gpu_count -eq 1 ]]; then
# Single GPU - auto-select
@@ -2642,7 +2677,7 @@ function setup_hwaccel() {
else
# Multiple GPUs - show selection menu
echo ""
msg_custom "⚠" "${YW}" "Multiple GPUs detected:"
msg_info "Multiple GPUs detected:"
echo ""
for i in "${!GPU_LIST[@]}"; do
local type_display="${GPU_TYPES[$i]}"
@@ -2695,30 +2730,6 @@ function setup_hwaccel() {
fi
fi
# Ask whether to install NVIDIA drivers in the container
local nvidia_selected="no"
for idx in "${SELECTED_INDICES[@]}"; do
if [[ "${GPU_TYPES[$idx]}" == "NVIDIA" ]]; then
nvidia_selected="yes"
break
fi
done
if [[ "$nvidia_selected" == "yes" ]]; then
if [[ -n "${INSTALL_NVIDIA_DRIVERS:-}" ]]; then
install_nvidia_drivers="${INSTALL_NVIDIA_DRIVERS}"
else
echo ""
msg_custom "🎮" "${GN}" "NVIDIA GPU passthrough detected"
local nvidia_reply=""
read -r -t 60 -p "${TAB3}⚙️ Install NVIDIA driver libraries in the container? [Y/n] (auto-yes in 60s): " nvidia_reply || nvidia_reply=""
case "${nvidia_reply,,}" in
n | no) install_nvidia_drivers="no" ;;
*) install_nvidia_drivers="yes" ;;
esac
fi
fi
# ═══════════════════════════════════════════════════════════════════════════
# OS Detection
# ═══════════════════════════════════════════════════════════════════════════
@@ -2779,11 +2790,7 @@ function setup_hwaccel() {
# NVIDIA GPUs
# ─────────────────────────────────────────────────────────────────────────
NVIDIA)
if [[ "$install_nvidia_drivers" == "yes" ]]; then
_setup_nvidia_gpu "$os_id" "$os_codename" "$os_version"
else
msg_warn "Skipping NVIDIA driver installation (user opted to install manually)"
fi
_setup_nvidia_gpu "$os_id" "$os_codename" "$os_version"
;;
esac
done
@@ -2913,15 +2920,8 @@ _setup_intel_legacy() {
vainfo \
intel-gpu-tools 2>/dev/null || msg_warn "Some Intel legacy packages failed"
# beignet provides OpenCL for older Intel GPUs (Sandy Bridge to Broadwell)
# Note: beignet-opencl-icd was removed in Debian 12+ and Ubuntu 22.04+
# Check if package is available before attempting installation
if apt-cache show beignet-opencl-icd &>/dev/null; then
$STD apt -y install beignet-opencl-icd 2>/dev/null || msg_warn "beignet-opencl-icd installation failed (optional)"
else
msg_warn "beignet-opencl-icd not available - OpenCL support for legacy Intel GPU limited"
msg_warn "Note: Hardware video encoding/decoding (VA-API) still works without OpenCL"
fi
# beignet provides OpenCL for older Intel GPUs (if available)
$STD apt -y install beignet-opencl-icd 2>/dev/null || true
msg_ok "Intel Legacy GPU configured"
}
@@ -2989,24 +2989,16 @@ _setup_nvidia_gpu() {
msg_info "Installing NVIDIA GPU drivers"
# Prevent interactive dialogs (e.g., "Mismatching nvidia kernel module" whiptail)
export DEBIAN_FRONTEND=noninteractive
export NEEDRESTART_MODE=a
# Detect host driver version (passed through via /proc)
# Format varies by driver type:
# Proprietary: "NVRM version: NVIDIA UNIX x86_64 Kernel Module 550.54.14 Thu..."
# Open: "NVRM version: NVIDIA UNIX Open Kernel Module for x86_64 590.48.01 Release..."
# Use regex to extract version number (###.##.## pattern)
local nvidia_host_version=""
if [[ -f /proc/driver/nvidia/version ]]; then
nvidia_host_version=$(grep -oP '\d{3,}\.\d+\.\d+' /proc/driver/nvidia/version 2>/dev/null | head -1)
nvidia_host_version=$(grep "NVRM version:" /proc/driver/nvidia/version 2>/dev/null | awk '{print $8}')
fi
if [[ -z "$nvidia_host_version" ]]; then
msg_warn "NVIDIA host driver version not found in /proc/driver/nvidia/version"
msg_warn "Ensure NVIDIA drivers are installed on host and GPU passthrough is enabled"
$STD apt-get -y install va-driver-all vainfo 2>/dev/null || true
$STD apt -y install va-driver-all vainfo 2>/dev/null || true
return 0
fi
@@ -3019,116 +3011,54 @@ _setup_nvidia_gpu() {
sed -i -E 's/Components: (.*)$/Components: \1 contrib non-free non-free-firmware/g' /etc/apt/sources.list.d/debian.sources 2>/dev/null || true
fi
fi
$STD apt-get -y update 2>/dev/null || msg_warn "apt update failed - continuing anyway"
# For Debian 13 Trixie/Sid: Use Debian's own nvidia packages first (better compatibility)
# NVIDIA's CUDA repo targets Debian 12 and may not have amd64 packages for Trixie
if [[ "$os_codename" == "trixie" || "$os_codename" == "sid" ]]; then
msg_info "Debian ${os_codename}: Using Debian's NVIDIA packages"
# Determine CUDA repository
local cuda_repo="debian12"
case "$os_codename" in
bullseye) cuda_repo="debian11" ;;
bookworm) cuda_repo="debian12" ;;
trixie | sid) cuda_repo="debian12" ;; # Forward compatible
esac
# Extract major version for flexible matching (580.126.09 -> 580)
local nvidia_major_version="${nvidia_host_version%%.*}"
# Check what versions are actually available
local available_version=""
available_version=$(apt-cache madison libcuda1 2>/dev/null | awk '{print $3}' | grep -E "^${nvidia_major_version}\." | head -1 || true)
if [[ -n "$available_version" ]]; then
msg_info "Found available NVIDIA version: ${available_version}"
local nvidia_pkgs="libcuda1=${available_version} libnvcuvid1=${available_version} libnvidia-encode1=${available_version} libnvidia-ml1=${available_version}"
if $STD apt-get -y -o Dpkg::Options::="--force-confold" install --no-install-recommends $nvidia_pkgs 2>/dev/null; then
msg_ok "Installed NVIDIA libraries (${available_version})"
else
msg_warn "Failed to install NVIDIA ${available_version} - trying unversioned"
$STD apt-get -y -o Dpkg::Options::="--force-confold" install --no-install-recommends libcuda1 libnvcuvid1 libnvidia-encode1 libnvidia-ml1 2>/dev/null || true
fi
# Add NVIDIA CUDA repository
if [[ ! -f /usr/share/keyrings/cuda-archive-keyring.gpg ]]; then
msg_info "Adding NVIDIA CUDA repository (${cuda_repo})"
local cuda_keyring
cuda_keyring="$(mktemp)"
if curl -fsSL -o "$cuda_keyring" "https://developer.download.nvidia.com/compute/cuda/repos/${cuda_repo}/x86_64/cuda-keyring_1.1-1_all.deb" 2>/dev/null; then
$STD dpkg -i "$cuda_keyring" 2>/dev/null || true
else
# No matching major version - try latest available or unversioned
msg_warn "No NVIDIA packages for version ${nvidia_major_version}.x found in repos"
available_version=$(apt-cache madison libcuda1 2>/dev/null | awk '{print $3}' | head -1 || true)
if [[ -n "$available_version" ]]; then
msg_info "Trying latest available: ${available_version} (may cause version mismatch)"
$STD apt-get -y -o Dpkg::Options::="--force-confold" install --no-install-recommends \
libcuda1="${available_version}" libnvcuvid1="${available_version}" \
libnvidia-encode1="${available_version}" libnvidia-ml1="${available_version}" 2>/dev/null ||
$STD apt-get -y -o Dpkg::Options::="--force-confold" install --no-install-recommends \
libcuda1 libnvcuvid1 libnvidia-encode1 libnvidia-ml1 2>/dev/null ||
msg_warn "NVIDIA library installation failed - GPU compute may not work"
else
msg_warn "No NVIDIA packages available in Debian repos - GPU support disabled"
fi
msg_warn "Failed to download NVIDIA CUDA keyring"
fi
$STD apt-get -y -o Dpkg::Options::="--force-confold" install --no-install-recommends nvidia-smi 2>/dev/null || true
rm -f "$cuda_keyring"
fi
else
# Debian 11/12: Use NVIDIA CUDA repository for version matching
local cuda_repo="debian12"
case "$os_codename" in
bullseye) cuda_repo="debian11" ;;
bookworm) cuda_repo="debian12" ;;
esac
# Add NVIDIA CUDA repository
if [[ ! -f /usr/share/keyrings/cuda-archive-keyring.gpg ]]; then
msg_info "Adding NVIDIA CUDA repository (${cuda_repo})"
local cuda_keyring
cuda_keyring="$(mktemp)"
if curl -fsSL -o "$cuda_keyring" "https://developer.download.nvidia.com/compute/cuda/repos/${cuda_repo}/x86_64/cuda-keyring_1.1-1_all.deb" 2>/dev/null; then
$STD dpkg -i "$cuda_keyring" 2>/dev/null || true
else
msg_warn "Failed to download NVIDIA CUDA keyring"
fi
rm -f "$cuda_keyring"
fi
# Pin NVIDIA repo for version matching
cat <<'NVIDIA_PIN' >/etc/apt/preferences.d/nvidia-cuda-pin
# Pin NVIDIA repo for version matching
cat <<'NVIDIA_PIN' >/etc/apt/preferences.d/nvidia-cuda-pin
Package: *
Pin: origin developer.download.nvidia.com
Pin-Priority: 1001
NVIDIA_PIN
$STD apt-get -y update 2>/dev/null || msg_warn "apt update failed - continuing anyway"
$STD apt -y update
# Extract major version for flexible matching (580.126.09 -> 580)
local nvidia_major_version="${nvidia_host_version%%.*}"
# Install version-matched NVIDIA libraries
local nvidia_pkgs="libcuda1=${nvidia_host_version}* libnvcuvid1=${nvidia_host_version}* libnvidia-encode1=${nvidia_host_version}* libnvidia-ml1=${nvidia_host_version}*"
# Check what versions are actually available in CUDA repo
local available_version=""
available_version=$(apt-cache madison libcuda1 2>/dev/null | awk '{print $3}' | grep -E "^${nvidia_major_version}\." | head -1 || true)
if [[ -n "$available_version" ]]; then
msg_info "Installing NVIDIA libraries (version ${available_version})"
local nvidia_pkgs="libcuda1=${available_version} libnvcuvid1=${available_version} libnvidia-encode1=${available_version} libnvidia-ml1=${available_version}"
if $STD apt-get -y -o Dpkg::Options::="--force-confold" install --no-install-recommends $nvidia_pkgs 2>/dev/null; then
msg_ok "Installed version-matched NVIDIA libraries"
else
msg_warn "Version-pinned install failed - trying unpinned"
$STD apt-get -y -o Dpkg::Options::="--force-confold" install --no-install-recommends libcuda1 libnvcuvid1 libnvidia-encode1 libnvidia-ml1 2>/dev/null ||
msg_warn "NVIDIA library installation failed"
fi
msg_info "Installing NVIDIA libraries (version ${nvidia_host_version})"
if $STD apt -y install --no-install-recommends $nvidia_pkgs 2>/dev/null; then
msg_ok "Installed version-matched NVIDIA libraries"
else
msg_warn "Version-pinned install failed - trying unpinned"
if $STD apt -y install --no-install-recommends libcuda1 libnvcuvid1 libnvidia-encode1 libnvidia-ml1 2>/dev/null; then
msg_warn "Installed NVIDIA libraries (unpinned) - version mismatch may occur"
else
msg_warn "No NVIDIA packages for version ${nvidia_major_version}.x in CUDA repo (host: ${nvidia_host_version})"
# Try latest available version
available_version=$(apt-cache madison libcuda1 2>/dev/null | awk '{print $3}' | head -1 || true)
if [[ -n "$available_version" ]]; then
msg_info "Trying latest available: ${available_version} (version mismatch warning)"
if $STD apt-get -y -o Dpkg::Options::="--force-confold" install --no-install-recommends \
libcuda1="${available_version}" libnvcuvid1="${available_version}" \
libnvidia-encode1="${available_version}" libnvidia-ml1="${available_version}" 2>/dev/null; then
msg_ok "Installed NVIDIA libraries (${available_version}) - version differs from host"
else
$STD apt-get -y -o Dpkg::Options::="--force-confold" install --no-install-recommends libcuda1 libnvcuvid1 libnvidia-encode1 libnvidia-ml1 2>/dev/null ||
msg_warn "NVIDIA library installation failed"
fi
else
msg_warn "No NVIDIA packages available in CUDA repo - GPU support disabled"
fi
msg_warn "NVIDIA library installation failed"
fi
$STD apt-get -y -o Dpkg::Options::="--force-confold" install --no-install-recommends nvidia-smi 2>/dev/null || true
fi
$STD apt -y install --no-install-recommends nvidia-smi 2>/dev/null || true
elif [[ "$os_id" == "ubuntu" ]]; then
# Ubuntu versioning
local ubuntu_cuda_repo=""
@@ -3151,45 +3081,20 @@ NVIDIA_PIN
rm -f "$cuda_keyring"
fi
$STD apt-get -y update 2>/dev/null || msg_warn "apt update failed - continuing anyway"
$STD apt -y update
# Extract major version for flexible matching
local nvidia_major_version="${nvidia_host_version%%.*}"
# Check what versions are available
local available_version=""
available_version=$(apt-cache madison libcuda1 2>/dev/null | awk '{print $3}' | grep -E "^${nvidia_major_version}\." | head -1 || true)
if [[ -n "$available_version" ]]; then
msg_info "Installing NVIDIA libraries (version ${available_version})"
local nvidia_pkgs="libcuda1=${available_version} libnvcuvid1=${available_version} libnvidia-encode1=${available_version} libnvidia-ml1=${available_version}"
if $STD apt-get -y -o Dpkg::Options::="--force-confold" install --no-install-recommends $nvidia_pkgs 2>/dev/null; then
msg_ok "Installed version-matched NVIDIA libraries"
else
# Fallback to Ubuntu repo packages with versioned nvidia-utils
msg_warn "CUDA repo install failed - trying Ubuntu native packages (nvidia-utils-${nvidia_major_version})"
if $STD apt-get -y -o Dpkg::Options::="--force-confold" install --no-install-recommends \
libnvidia-decode-${nvidia_major_version} libnvidia-encode-${nvidia_major_version} nvidia-utils-${nvidia_major_version} 2>/dev/null; then
msg_ok "Installed Ubuntu NVIDIA packages (${nvidia_major_version})"
else
msg_warn "NVIDIA driver installation failed - please install manually: apt install nvidia-utils-${nvidia_major_version}"
fi
fi
# Try version-matched install
local nvidia_pkgs="libcuda1=${nvidia_host_version}* libnvcuvid1=${nvidia_host_version}* libnvidia-encode1=${nvidia_host_version}* libnvidia-ml1=${nvidia_host_version}*"
if $STD apt -y install --no-install-recommends $nvidia_pkgs 2>/dev/null; then
msg_ok "Installed version-matched NVIDIA libraries"
else
msg_warn "No NVIDIA packages for version ${nvidia_major_version}.x in CUDA repo"
# Fallback to Ubuntu repo packages with versioned nvidia-utils
msg_info "Trying Ubuntu native packages (nvidia-utils-${nvidia_major_version})"
if $STD apt-get -y -o Dpkg::Options::="--force-confold" install --no-install-recommends \
libnvidia-decode-${nvidia_major_version} libnvidia-encode-${nvidia_major_version} nvidia-utils-${nvidia_major_version} 2>/dev/null; then
msg_ok "Installed Ubuntu NVIDIA packages (${nvidia_major_version})"
else
msg_warn "NVIDIA driver installation failed - please install manually: apt install nvidia-utils-${nvidia_major_version}"
fi
# Fallback to Ubuntu repo packages
$STD apt -y install --no-install-recommends libnvidia-decode libnvidia-encode nvidia-utils 2>/dev/null || msg_warn "NVIDIA installation failed"
fi
fi
# VA-API for hybrid setups (Intel + NVIDIA)
$STD apt-get -y install va-driver-all vainfo 2>/dev/null || true
$STD apt -y install va-driver-all vainfo 2>/dev/null || true
msg_ok "NVIDIA GPU configured"
}
@@ -3591,11 +3496,10 @@ IP_FILE="/run/local-ip.env"
mkdir -p "$(dirname "$IP_FILE")"
get_current_ip() {
local targets=("8.8.8.8" "1.1.1.1" "192.168.1.1" "10.0.0.1" "172.16.0.1" "default")
local ip
# Try IPv4 targets first
local ipv4_targets=("8.8.8.8" "1.1.1.1" "192.168.1.1" "10.0.0.1" "172.16.0.1" "default")
for target in "${ipv4_targets[@]}"; do
for target in "${targets[@]}"; do
if [[ "$target" == "default" ]]; then
ip=$(ip route get 1 2>/dev/null | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}')
else
@@ -3607,23 +3511,6 @@ get_current_ip() {
fi
done
# IPv6 fallback: Try direct interface lookup for eth0
ip=$(ip -6 addr show eth0 scope global 2>/dev/null | awk '/inet6 / {print $2}' | cut -d/ -f1 | head -n1)
if [[ -n "$ip" && "$ip" =~ : ]]; then
echo "$ip"
return 0
fi
# IPv6 fallback: Use routing table with IPv6 targets (Google DNS, Cloudflare DNS)
local ipv6_targets=("2001:4860:4860::8888" "2606:4700:4700::1111")
for target in "${ipv6_targets[@]}"; do
ip=$(ip -6 route get "$target" 2>/dev/null | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}')
if [[ -n "$ip" && "$ip" =~ : ]]; then
echo "$ip"
return 0
fi
done
return 1
}
@@ -3662,145 +3549,58 @@ EOF
}
# ------------------------------------------------------------------------------
# Installs or updates MariaDB.
# Installs or updates MariaDB from official repo.
#
# Description:
# - Uses Debian/Ubuntu distribution packages by default (most reliable)
# - Only uses official MariaDB repository when a specific version is requested
# - Detects current MariaDB version and replaces it if necessary
# - Preserves existing database data
# - Dynamically determines latest GA version if "latest" is given
#
# Variables:
# MARIADB_VERSION - MariaDB version to install (optional)
# - Not set or "latest": Uses distribution packages (recommended)
# - Specific version (e.g. "11.4", "12.2"): Uses MariaDB official repo
# MARIADB_VERSION - MariaDB version to install (e.g. 10.11, latest) (default: latest)
# ------------------------------------------------------------------------------
setup_mariadb() {
local MARIADB_VERSION="${MARIADB_VERSION:-latest}"
local USE_DISTRO_PACKAGES=false
# Ensure non-interactive mode for all apt operations
export DEBIAN_FRONTEND=noninteractive
export NEEDRESTART_MODE=a
export NEEDRESTART_SUSPEND=1
# Resolve "latest" to actual version
if [[ "$MARIADB_VERSION" == "latest" ]]; then
if ! curl -fsI --max-time 10 http://mirror.mariadb.org/repo/ >/dev/null 2>&1; then
msg_warn "MariaDB mirror not reachable - trying mariadb_repo_setup fallback"
# Try using official mariadb_repo_setup script as fallback
if curl -fsSL --max-time 15 https://r.mariadb.com/downloads/mariadb_repo_setup 2>/dev/null | bash -s -- --skip-verify >/dev/null 2>&1; then
msg_ok "MariaDB repository configured via mariadb_repo_setup"
# Extract version from configured repo
MARIADB_VERSION=$(grep -oP 'repo/\K[0-9]+\.[0-9]+\.[0-9]+' /etc/apt/sources.list.d/mariadb.list 2>/dev/null | head -n1 || echo "12.2")
else
msg_warn "mariadb_repo_setup failed - using hardcoded fallback version"
MARIADB_VERSION="12.2"
fi
else
MARIADB_VERSION=$(curl -fsSL --max-time 15 http://mirror.mariadb.org/repo/ 2>/dev/null |
grep -Eo '[0-9]+\.[0-9]+\.[0-9]+/' |
grep -vE 'rc/|rolling/' |
sed 's|/||' |
sort -Vr |
head -n1 || echo "")
# Determine installation method:
# - "latest" or empty: Use distribution packages (avoids mirror issues)
# - Specific version: Use MariaDB official repository
if [[ "$MARIADB_VERSION" == "latest" || -z "$MARIADB_VERSION" ]]; then
USE_DISTRO_PACKAGES=true
msg_info "Setup MariaDB (distribution packages)"
else
msg_info "Setup MariaDB $MARIADB_VERSION (official repository)"
if [[ -z "$MARIADB_VERSION" ]]; then
msg_warn "Could not parse latest GA MariaDB version from mirror - trying mariadb_repo_setup"
if curl -fsSL --max-time 15 https://r.mariadb.com/downloads/mariadb_repo_setup 2>/dev/null | bash -s -- --skip-verify >/dev/null 2>&1; then
msg_ok "MariaDB repository configured via mariadb_repo_setup"
MARIADB_VERSION=$(grep -oP 'repo/\K[0-9]+\.[0-9]+\.[0-9]+' /etc/apt/sources.list.d/mariadb.list 2>/dev/null | head -n1 || echo "12.2")
else
msg_warn "mariadb_repo_setup failed - using hardcoded fallback version"
MARIADB_VERSION="12.2"
fi
fi
fi
fi
# Get currently installed version
local CURRENT_VERSION=""
CURRENT_VERSION=$(is_tool_installed "mariadb" 2>/dev/null) || true
# Pre-configure debconf to prevent any interactive prompts during install/upgrade
debconf-set-selections <<EOF
mariadb-server mariadb-server/feedback boolean false
mariadb-server mariadb-server/root_password password
mariadb-server mariadb-server/root_password_again password
EOF
# If specific version requested, also configure version-specific debconf
if [[ "$USE_DISTRO_PACKAGES" == "false" ]]; then
local MARIADB_MAJOR_MINOR
MARIADB_MAJOR_MINOR=$(echo "$MARIADB_VERSION" | awk -F. '{print $1"."$2}')
if [[ -n "$MARIADB_MAJOR_MINOR" ]]; then
debconf-set-selections <<EOF
mariadb-server-$MARIADB_MAJOR_MINOR mariadb-server/feedback boolean false
mariadb-server-$MARIADB_MAJOR_MINOR mariadb-server/root_password password
mariadb-server-$MARIADB_MAJOR_MINOR mariadb-server/root_password_again password
EOF
fi
fi
# ============================================================================
# DISTRIBUTION PACKAGES PATH (default, most reliable)
# ============================================================================
if [[ "$USE_DISTRO_PACKAGES" == "true" ]]; then
# Check if MariaDB was previously installed from official repo
local HAD_MARIADB_REPO=false
if [[ -f /etc/apt/sources.list.d/mariadb.sources ]] || [[ -f /etc/apt/sources.list.d/mariadb.list ]]; then
HAD_MARIADB_REPO=true
msg_info "Removing MariaDB official repository (switching to distribution packages)"
fi
# Clean up any existing MariaDB repository files to avoid conflicts
cleanup_old_repo_files "mariadb"
# If we had a repo, we need to refresh APT cache
if [[ "$HAD_MARIADB_REPO" == "true" ]]; then
$STD apt update || msg_warn "APT update had issues, continuing..."
fi
# Ensure APT is working
ensure_apt_working || return 1
# Check if installed version is from official repo and higher than distro version
# In this case, we keep the existing installation to avoid data issues
if [[ -n "$CURRENT_VERSION" ]]; then
# Get available distro version
local DISTRO_VERSION=""
DISTRO_VERSION=$(apt-cache policy mariadb-server 2>/dev/null | grep -E "Candidate:" | awk '{print $2}' | grep -oP '^\d+:\K\d+\.\d+\.\d+' || echo "")
if [[ -n "$DISTRO_VERSION" ]]; then
# Compare versions - if current is higher, keep it
local CURRENT_MAJOR DISTRO_MAJOR
CURRENT_MAJOR=$(echo "$CURRENT_VERSION" | awk -F. '{print $1}')
DISTRO_MAJOR=$(echo "$DISTRO_VERSION" | awk -F. '{print $1}')
if [[ "$CURRENT_MAJOR" -gt "$DISTRO_MAJOR" ]]; then
msg_warn "MariaDB $CURRENT_VERSION is already installed (higher than distro $DISTRO_VERSION)"
msg_warn "Keeping existing installation to preserve data integrity"
msg_warn "To use distribution packages, manually remove MariaDB first"
_setup_mariadb_runtime_dir
cache_installed_version "mariadb" "$CURRENT_VERSION"
msg_ok "Setup MariaDB $CURRENT_VERSION (existing installation kept)"
return 0
fi
fi
fi
# Install or upgrade MariaDB from distribution packages
if ! install_packages_with_retry "mariadb-server" "mariadb-client"; then
msg_error "Failed to install MariaDB packages from distribution"
return 1
fi
# Get installed version for caching
local INSTALLED_VERSION=""
INSTALLED_VERSION=$(mariadb --version 2>/dev/null | grep -oP '\d+\.\d+\.\d+' | head -n1 || echo "distro")
# Configure runtime directory and finish
_setup_mariadb_runtime_dir
cache_installed_version "mariadb" "$INSTALLED_VERSION"
msg_ok "Setup MariaDB $INSTALLED_VERSION (distribution packages)"
return 0
fi
# ============================================================================
# OFFICIAL REPOSITORY PATH (only when specific version requested)
# ============================================================================
# First, check if there's an old/broken repository that needs cleanup
if [[ -f /etc/apt/sources.list.d/mariadb.sources ]] || [[ -f /etc/apt/sources.list.d/mariadb.list ]]; then
local OLD_REPO_VERSION=""
OLD_REPO_VERSION=$(grep -oP 'repo/\K[0-9]+\.[0-9]+(\.[0-9]+)?' /etc/apt/sources.list.d/mariadb.sources 2>/dev/null || \
grep -oP 'repo/\K[0-9]+\.[0-9]+(\.[0-9]+)?' /etc/apt/sources.list.d/mariadb.list 2>/dev/null || echo "")
# Check if old repo points to a different version
if [[ -n "$OLD_REPO_VERSION" ]] && [[ "${OLD_REPO_VERSION%.*}" != "${MARIADB_VERSION%.*}" ]]; then
msg_info "Cleaning up old MariaDB repository (was: $OLD_REPO_VERSION, requested: $MARIADB_VERSION)"
cleanup_old_repo_files "mariadb"
$STD apt update || msg_warn "APT update had issues, continuing..."
fi
fi
# Scenario 1: Already installed at target version - just update packages
if [[ -n "$CURRENT_VERSION" && "$CURRENT_VERSION" == "$MARIADB_VERSION" ]]; then
msg_info "Update MariaDB $MARIADB_VERSION"
@@ -3839,7 +3639,9 @@ EOF
remove_old_tool_version "mariadb"
fi
# Scenario 3: Fresh install or version change with specific version
# Scenario 3: Fresh install or version change
msg_info "Setup MariaDB $MARIADB_VERSION"
# Prepare repository (cleanup + validation)
prepare_repository_setup "mariadb" || {
msg_error "Failed to prepare MariaDB repository"
@@ -3865,39 +3667,31 @@ EOF
return 1
}
# Set debconf selections for all potential versions
local MARIADB_MAJOR_MINOR
MARIADB_MAJOR_MINOR=$(echo "$MARIADB_VERSION" | awk -F. '{print $1"."$2}')
if [[ -n "$MARIADB_MAJOR_MINOR" ]]; then
echo "mariadb-server-$MARIADB_MAJOR_MINOR mariadb-server/feedback boolean false" | debconf-set-selections
fi
# Install packages with retry logic
export DEBIAN_FRONTEND=noninteractive
if ! install_packages_with_retry "mariadb-server" "mariadb-client"; then
# Fallback: try distribution packages
msg_warn "Failed to install MariaDB $MARIADB_VERSION from official repo, falling back to distribution packages..."
# Fallback: try without specific version
msg_warn "Failed to install MariaDB packages from upstream repo, trying distro fallback..."
cleanup_old_repo_files "mariadb"
$STD apt update || {
msg_warn "APT update also failed, continuing with cache"
}
if install_packages_with_retry "mariadb-server" "mariadb-client"; then
local FALLBACK_VERSION=""
FALLBACK_VERSION=$(mariadb --version 2>/dev/null | grep -oP '\d+\.\d+\.\d+' | head -n1 || echo "distro")
msg_warn "Installed MariaDB $FALLBACK_VERSION from distribution instead of requested $MARIADB_VERSION"
_setup_mariadb_runtime_dir
cache_installed_version "mariadb" "$FALLBACK_VERSION"
msg_ok "Setup MariaDB $FALLBACK_VERSION (fallback to distribution packages)"
return 0
else
msg_error "Failed to install MariaDB packages (both official repo and distribution)"
install_packages_with_retry "mariadb-server" "mariadb-client" || {
msg_error "Failed to install MariaDB packages (both upstream and distro)"
return 1
fi
}
fi
_setup_mariadb_runtime_dir
cache_installed_version "mariadb" "$MARIADB_VERSION"
msg_ok "Setup MariaDB $MARIADB_VERSION"
}
# ------------------------------------------------------------------------------
# Helper function: Configure MariaDB runtime directory persistence
# ------------------------------------------------------------------------------
_setup_mariadb_runtime_dir() {
# Configure tmpfiles.d to ensure /run/mysqld directory is created on boot
# This fixes the issue where MariaDB fails to start after container reboot
msg_info "Configuring MariaDB runtime directory persistence"
# Create tmpfiles.d configuration with error handling
if ! printf '# Ensure /run/mysqld directory exists with correct permissions for MariaDB\nd /run/mysqld 0755 mysql mysql -\n' >/etc/tmpfiles.d/mariadb.conf; then
@@ -3917,6 +3711,11 @@ _setup_mariadb_runtime_dir() {
msg_warn "mysql user not found - directory created with correct permissions but ownership not set"
fi
fi
msg_ok "Configured MariaDB runtime directory persistence"
cache_installed_version "mariadb" "$MARIADB_VERSION"
msg_ok "Setup MariaDB $MARIADB_VERSION"
}
# ------------------------------------------------------------------------------
@@ -4016,11 +3815,6 @@ function setup_mongodb() {
DISTRO_ID=$(get_os_info id)
DISTRO_CODENAME=$(get_os_info codename)
# Ensure non-interactive mode for all apt operations
export DEBIAN_FRONTEND=noninteractive
export NEEDRESTART_MODE=a
export NEEDRESTART_SUSPEND=1
# Check AVX support
if ! grep -qm1 'avx[^ ]*' /proc/cpuinfo; then
local major="${MONGO_VERSION%%.*}"
@@ -4139,11 +3933,6 @@ function setup_mysql() {
DISTRO_ID=$(awk -F= '/^ID=/{print $2}' /etc/os-release | tr -d '"')
DISTRO_CODENAME=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release)
# Ensure non-interactive mode for all apt operations
export DEBIAN_FRONTEND=noninteractive
export NEEDRESTART_MODE=a
export NEEDRESTART_SUSPEND=1
# Get currently installed version
local CURRENT_VERSION=""
CURRENT_VERSION=$(is_tool_installed "mysql" 2>/dev/null) || true
@@ -4238,6 +4027,7 @@ EOF
ensure_apt_working || return 1
# Try multiple package names with retry logic
export DEBIAN_FRONTEND=noninteractive
local mysql_install_success=false
if apt-cache search "^mysql-server$" 2>/dev/null | grep -q . &&
@@ -4525,20 +4315,11 @@ EOF
return 1
}
# Use different repository based on OS
if [[ "$DISTRO_ID" == "ubuntu" ]]; then
# Ubuntu: Use ondrej/php PPA
msg_info "Adding ondrej/php PPA for Ubuntu"
$STD apt install -y software-properties-common
# Don't use $STD for add-apt-repository as it uses background processes
add-apt-repository -y ppa:ondrej/php >>"$(get_active_logfile)" 2>&1
else
# Debian: Use Sury repository
manage_tool_repository "php" "$PHP_VERSION" "" "https://packages.sury.org/debsuryorg-archive-keyring.deb" || {
msg_error "Failed to setup PHP repository"
return 1
}
fi
manage_tool_repository "php" "$PHP_VERSION" "" "https://packages.sury.org/debsuryorg-archive-keyring.deb" || {
msg_error "Failed to setup PHP repository"
return 1
}
ensure_apt_working || return 1
$STD apt update
@@ -4561,14 +4342,6 @@ EOF
if [[ "$PHP_FPM" == "YES" ]]; then
MODULE_LIST+=" php${PHP_VERSION}-fpm"
# Create systemd override for PHP-FPM to fix runtime directory issues in LXC containers
mkdir -p /etc/systemd/system/php${PHP_VERSION}-fpm.service.d/
cat <<EOF >/etc/systemd/system/php${PHP_VERSION}-fpm.service.d/override.conf
[Service]
RuntimeDirectory=php
RuntimeDirectoryMode=0755
EOF
$STD systemctl daemon-reload
fi
# install apache2 with PHP support if requested
@@ -4693,11 +4466,6 @@ function setup_postgresql() {
DISTRO_ID=$(awk -F= '/^ID=/{print $2}' /etc/os-release | tr -d '"')
DISTRO_CODENAME=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release)
# Ensure non-interactive mode for all apt operations
export DEBIAN_FRONTEND=noninteractive
export NEEDRESTART_MODE=a
export NEEDRESTART_SUSPEND=1
# Get currently installed version
local CURRENT_PG_VERSION=""
if command -v psql >/dev/null; then
@@ -5136,146 +4904,6 @@ function setup_ruby() {
msg_ok "Setup Ruby $RUBY_VERSION"
}
# ------------------------------------------------------------------------------
# Installs or updates MeiliSearch search engine.
#
# Description:
# - Fresh install: Downloads binary, creates config/service, starts
# - Update: Checks for new release, updates binary if available
# - Waits for service to be ready before returning
# - Exports API keys for use by caller
#
# Variables:
# MEILISEARCH_BIND - Bind address (default: 127.0.0.1:7700)
# MEILISEARCH_ENV - Environment: production/development (default: production)
# MEILISEARCH_DB_PATH - Database path (default: /var/lib/meilisearch/data)
#
# Exports:
# MEILISEARCH_MASTER_KEY - The master key for admin access
# MEILISEARCH_API_KEY - The default search API key
# MEILISEARCH_API_KEY_UID - The UID of the default API key
#
# Example (install script):
# setup_meilisearch
#
# Example (CT update_script):
# setup_meilisearch
# ------------------------------------------------------------------------------
function setup_meilisearch() {
local MEILISEARCH_BIND="${MEILISEARCH_BIND:-127.0.0.1:7700}"
local MEILISEARCH_ENV="${MEILISEARCH_ENV:-production}"
local MEILISEARCH_DB_PATH="${MEILISEARCH_DB_PATH:-/var/lib/meilisearch/data}"
local MEILISEARCH_DUMP_DIR="${MEILISEARCH_DUMP_DIR:-/var/lib/meilisearch/dumps}"
local MEILISEARCH_SNAPSHOT_DIR="${MEILISEARCH_SNAPSHOT_DIR:-/var/lib/meilisearch/snapshots}"
# Get bind address for health checks
local MEILISEARCH_HOST="${MEILISEARCH_BIND%%:*}"
local MEILISEARCH_PORT="${MEILISEARCH_BIND##*:}"
[[ "$MEILISEARCH_HOST" == "0.0.0.0" ]] && MEILISEARCH_HOST="127.0.0.1"
# Update mode: MeiliSearch already installed
if [[ -f /usr/bin/meilisearch ]]; then
if check_for_gh_release "meilisearch" "meilisearch/meilisearch"; then
msg_info "Updating MeiliSearch"
systemctl stop meilisearch
fetch_and_deploy_gh_release "meilisearch" "meilisearch/meilisearch" "binary"
systemctl start meilisearch
msg_ok "Updated MeiliSearch"
fi
return 0
fi
# Fresh install
msg_info "Setup MeiliSearch"
# Install binary
fetch_and_deploy_gh_release "meilisearch" "meilisearch/meilisearch" "binary" || {
msg_error "Failed to install MeiliSearch binary"
return 1
}
# Download default config
curl -fsSL https://raw.githubusercontent.com/meilisearch/meilisearch/latest/config.toml -o /etc/meilisearch.toml || {
msg_error "Failed to download MeiliSearch config"
return 1
}
# Generate master key
MEILISEARCH_MASTER_KEY=$(openssl rand -base64 12)
export MEILISEARCH_MASTER_KEY
# Configure
sed -i \
-e "s|^env =.*|env = \"${MEILISEARCH_ENV}\"|" \
-e "s|^# master_key =.*|master_key = \"${MEILISEARCH_MASTER_KEY}\"|" \
-e "s|^db_path =.*|db_path = \"${MEILISEARCH_DB_PATH}\"|" \
-e "s|^dump_dir =.*|dump_dir = \"${MEILISEARCH_DUMP_DIR}\"|" \
-e "s|^snapshot_dir =.*|snapshot_dir = \"${MEILISEARCH_SNAPSHOT_DIR}\"|" \
-e 's|^# no_analytics = true|no_analytics = true|' \
-e "s|^http_addr =.*|http_addr = \"${MEILISEARCH_BIND}\"|" \
/etc/meilisearch.toml
# Create data directories
mkdir -p "${MEILISEARCH_DB_PATH}" "${MEILISEARCH_DUMP_DIR}" "${MEILISEARCH_SNAPSHOT_DIR}"
# Create systemd service
cat <<EOF >/etc/systemd/system/meilisearch.service
[Unit]
Description=Meilisearch
After=network.target
[Service]
ExecStart=/usr/bin/meilisearch --config-file-path /etc/meilisearch.toml
Restart=always
[Install]
WantedBy=multi-user.target
EOF
# Enable and start service
systemctl daemon-reload
systemctl enable -q --now meilisearch
# Wait for MeiliSearch to be ready (up to 30 seconds)
for i in {1..30}; do
if curl -s -o /dev/null -w "%{http_code}" "http://${MEILISEARCH_HOST}:${MEILISEARCH_PORT}/health" 2>/dev/null | grep -q "200"; then
break
fi
sleep 1
done
# Verify service is running
if ! systemctl is-active --quiet meilisearch; then
msg_error "MeiliSearch service failed to start"
return 1
fi
# Get API keys with retry logic
MEILISEARCH_API_KEY=""
for i in {1..10}; do
MEILISEARCH_API_KEY=$(curl -s -X GET "http://${MEILISEARCH_HOST}:${MEILISEARCH_PORT}/keys" \
-H "Authorization: Bearer ${MEILISEARCH_MASTER_KEY}" 2>/dev/null | \
grep -o '"key":"[^"]*"' | head -n 1 | sed 's/"key":"//;s/"//') || true
[[ -n "$MEILISEARCH_API_KEY" ]] && break
sleep 2
done
MEILISEARCH_API_KEY_UID=$(curl -s -X GET "http://${MEILISEARCH_HOST}:${MEILISEARCH_PORT}/keys" \
-H "Authorization: Bearer ${MEILISEARCH_MASTER_KEY}" 2>/dev/null | \
grep -o '"uid":"[^"]*"' | head -n 1 | sed 's/"uid":"//;s/"//') || true
export MEILISEARCH_API_KEY
export MEILISEARCH_API_KEY_UID
# Cache version
local MEILISEARCH_VERSION
MEILISEARCH_VERSION=$(/usr/bin/meilisearch --version 2>/dev/null | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -1) || true
cache_installed_version "meilisearch" "${MEILISEARCH_VERSION:-unknown}"
msg_ok "Setup MeiliSearch ${MEILISEARCH_VERSION:-}"
}
# ------------------------------------------------------------------------------
# Installs or upgrades ClickHouse database server.
#
@@ -5295,11 +4923,6 @@ function setup_clickhouse() {
DISTRO_ID=$(awk -F= '/^ID=/{print $2}' /etc/os-release | tr -d '"')
DISTRO_CODENAME=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release)
# Ensure non-interactive mode for all apt operations
export DEBIAN_FRONTEND=noninteractive
export NEEDRESTART_MODE=a
export NEEDRESTART_SUSPEND=1
# Resolve "latest" version
if [[ "$CLICKHOUSE_VERSION" == "latest" ]]; then
CLICKHOUSE_VERSION=$(curl -fsSL --max-time 15 https://packages.clickhouse.com/tgz/stable/ 2>/dev/null |
@@ -5362,6 +4985,7 @@ function setup_clickhouse() {
"main"
# Install packages with retry logic
export DEBIAN_FRONTEND=noninteractive
$STD apt update || {
msg_error "APT update failed for ClickHouse repository"
return 1
@@ -6004,4 +5628,4 @@ EOF
fi
msg_ok "Docker setup completed"
}
}

View File

@@ -1,7 +1,7 @@
#!/usr/bin/env bash
SCRIPT_DIR="$(dirname "$0")"
source "$SCRIPT_DIR/../core/build.func"
# Copyright (c) 2021-2026 tteck
# Copyright (c) 2021-2025 tteck
# Author: tteck (tteckster)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://www.debian.org/
@@ -40,5 +40,5 @@ start
build_container
description
msg_ok "Completed successfully!\n"
msg_ok "Completed Successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2026 tteck
# Copyright (c) 2021-2025 tteck
# Author: tteck (tteckster)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://www.debian.org/

View File

@@ -58,11 +58,6 @@ export function ConfigurationModal({
// Advanced mode state
const [advancedVars, setAdvancedVars] = useState<EnvVars>({});
// Discovered SSH keys on the Proxmox host (advanced mode only)
const [discoveredSshKeys, setDiscoveredSshKeys] = useState<string[]>([]);
const [discoveredSshKeysLoading, setDiscoveredSshKeysLoading] = useState(false);
const [discoveredSshKeysError, setDiscoveredSshKeysError] = useState<string | null>(null);
// Validation errors
const [errors, setErrors] = useState<Record<string, string>>({});
@@ -124,38 +119,6 @@ export function ConfigurationModal({
}
}, [actualScript, server, mode, resources, slug]);
// Discover SSH keys on the Proxmox host when advanced mode is open
useEffect(() => {
if (!server?.id || !isOpen || mode !== 'advanced') {
setDiscoveredSshKeys([]);
setDiscoveredSshKeysError(null);
return;
}
let cancelled = false;
setDiscoveredSshKeysLoading(true);
setDiscoveredSshKeysError(null);
fetch(`/api/servers/${server.id}/discover-ssh-keys`)
.then((res) => {
if (!res.ok) throw new Error(res.status === 404 ? 'Server not found' : res.statusText);
return res.json();
})
.then((data: { keys?: string[] }) => {
if (!cancelled && Array.isArray(data.keys)) setDiscoveredSshKeys(data.keys);
})
.catch((err) => {
if (!cancelled) {
setDiscoveredSshKeys([]);
setDiscoveredSshKeysError(err instanceof Error ? err.message : 'Could not detect keys');
}
})
.finally(() => {
if (!cancelled) setDiscoveredSshKeysLoading(false);
});
return () => {
cancelled = true;
};
}, [server?.id, isOpen, mode]);
// Validation functions
const validateIPv4 = (ip: string): boolean => {
if (!ip) return true; // Empty is allowed (auto)
@@ -312,16 +275,6 @@ export function ConfigurationModal({
if ((hasPassword || hasSSHKey) && envVars.var_ssh !== 'no') {
envVars.var_ssh = 'yes';
}
// Normalize var_tags: accept both comma and semicolon, output comma-separated
const rawTags = envVars.var_tags;
if (typeof rawTags === 'string' && rawTags.trim() !== '') {
envVars.var_tags = rawTags
.split(/[,;]/)
.map((s) => s.trim())
.filter(Boolean)
.join(',');
}
}
// Remove empty string values (but keep 0, false, etc.)
@@ -691,13 +644,13 @@ export function ConfigurationModal({
</div>
<div className="col-span-2">
<label className="block text-sm font-medium text-foreground mb-2">
Tags (comma or semicolon separated)
Tags (comma-separated)
</label>
<Input
type="text"
value={typeof advancedVars.var_tags === 'boolean' ? '' : String(advancedVars.var_tags ?? '')}
onChange={(e) => updateAdvancedVar('var_tags', e.target.value)}
placeholder="e.g. tag1; tag2"
placeholder="community-script"
/>
</div>
</div>
@@ -724,40 +677,11 @@ export function ConfigurationModal({
<label className="block text-sm font-medium text-foreground mb-2">
SSH Authorized Key
</label>
{discoveredSshKeysLoading && (
<p className="text-sm text-muted-foreground mb-2">Detecting SSH keys...</p>
)}
{discoveredSshKeysError && !discoveredSshKeysLoading && (
<p className="text-sm text-muted-foreground mb-2">Could not detect keys on host</p>
)}
{discoveredSshKeys.length > 0 && !discoveredSshKeysLoading && (
<div className="mb-2">
<label htmlFor="discover-ssh-key" className="sr-only">Use detected key</label>
<select
id="discover-ssh-key"
className="w-full rounded-md border border-input bg-background px-3 py-2 text-sm text-foreground focus:ring-2 focus:ring-ring focus:outline-none mb-2"
value=""
onChange={(e) => {
const idx = e.target.value;
if (idx === '') return;
const key = discoveredSshKeys[Number(idx)];
if (key) updateAdvancedVar('var_ssh_authorized_key', key);
}}
>
<option value=""> Select or paste below </option>
{discoveredSshKeys.map((key, i) => (
<option key={i} value={i}>
{key.length > 44 ? `${key.slice(0, 44)}...` : key}
</option>
))}
</select>
</div>
)}
<Input
type="text"
value={typeof advancedVars.var_ssh_authorized_key === 'boolean' ? '' : String(advancedVars.var_ssh_authorized_key ?? '')}
onChange={(e) => updateAdvancedVar('var_ssh_authorized_key', e.target.value)}
placeholder="Or paste a public key: ssh-rsa AAAA..."
placeholder="ssh-rsa AAAA..."
/>
</div>
</div>

View File

@@ -1,96 +0,0 @@
import type { NextRequest } from 'next/server';
import { NextResponse } from 'next/server';
import { getDatabase } from '../../../../../server/database-prisma';
import { getSSHExecutionService } from '../../../../../server/ssh-execution-service';
import type { Server } from '~/types/server';
const DISCOVER_TIMEOUT_MS = 10_000;
/** Match lines that look like SSH public keys (same as build.func) */
const SSH_PUBKEY_RE = /^(ssh-(rsa|ed25519)|ecdsa-sha2-nistp256|sk-(ssh-ed25519|ecdsa-sha2-nistp256))\s+/;
/**
* Run a command on the Proxmox host and return buffered stdout.
* Resolves when the process exits or rejects on timeout/spawn error.
*/
function runRemoteCommand(
server: Server,
command: string,
timeoutMs: number
): Promise<{ stdout: string; exitCode: number }> {
const ssh = getSSHExecutionService();
return new Promise((resolve, reject) => {
const chunks: string[] = [];
let settled = false;
const finish = (stdout: string, exitCode: number) => {
if (settled) return;
settled = true;
clearTimeout(timer);
resolve({ stdout, exitCode });
};
const timer = setTimeout(() => {
if (settled) return;
settled = true;
reject(new Error('SSH discover keys timeout'));
}, timeoutMs);
ssh
.executeCommand(
server,
command,
(data: string) => chunks.push(data),
() => {},
(code: number) => finish(chunks.join(''), code)
)
.catch((err) => {
if (!settled) {
settled = true;
clearTimeout(timer);
reject(err);
}
});
});
}
export async function GET(
_request: NextRequest,
{ params }: { params: Promise<{ id: string }> }
) {
try {
const { id: idParam } = await params;
const id = parseInt(idParam);
if (isNaN(id)) {
return NextResponse.json({ error: 'Invalid server ID' }, { status: 400 });
}
const db = getDatabase();
const server = await db.getServerById(id) as Server | null;
if (!server) {
return NextResponse.json({ error: 'Server not found' }, { status: 404 });
}
// Same paths as native build.func ssh_discover_default_files()
const remoteScript = `bash -c 'for f in /root/.ssh/authorized_keys /root/.ssh/authorized_keys2 /root/.ssh/*.pub /etc/ssh/authorized_keys /etc/ssh/authorized_keys.d/* 2>/dev/null; do [ -f "$f" ] && [ -r "$f" ] && grep -E "^(ssh-(rsa|ed25519)|ecdsa-sha2-nistp256|sk-)" "$f" 2>/dev/null; done | sort -u'`;
const { stdout } = await runRemoteCommand(server, remoteScript, DISCOVER_TIMEOUT_MS);
const keys = stdout
.split(/\r?\n/)
.map((line) => line.trim())
.filter((line) => line.length > 0 && SSH_PUBKEY_RE.test(line));
return NextResponse.json({ keys });
} catch (error) {
console.error('Error discovering SSH keys:', error);
return NextResponse.json(
{
success: false,
error: error instanceof Error ? error.message : String(error),
},
{ status: 500 }
);
}
}