- Replace GitHub API calls (390+) with 1 API call + raw URL downloads - Create GitHubJsonService for efficient JSON file syncing - Reduce API rate limiting issues by 99.7% - Add automatic page reload after successful sync - Update tests to use new service - Maintain same functionality with better performance Performance improvement: - Before: 390+ GitHub API calls (1 per JSON file) - After: 1 GitHub API call + 389 raw URL downloads - Raw URLs have no rate limits, making sync much more reliable
35 lines
1.1 KiB
JSON
35 lines
1.1 KiB
JSON
{
|
|
"name": "Open WebUI",
|
|
"slug": "openwebui",
|
|
"categories": [
|
|
20
|
|
],
|
|
"date_created": "2024-10-24",
|
|
"type": "ct",
|
|
"updateable": true,
|
|
"privileged": false,
|
|
"interface_port": 8080,
|
|
"documentation": "https://docs.openwebui.com/",
|
|
"website": "https://openwebui.com/",
|
|
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/webp/open-webui.webp",
|
|
"config_path": "/opt/open-webui/.env",
|
|
"description": "OpenWebUI is a self-hosted, web-based interface that allows you to run AI models entirely offline. It integrates with various LLM runners, such as OpenAI and Ollama, and supports features like markdown and LaTeX rendering, model management, and voice/video calls. It also offers multilingual support and the ability to generate images using APIs like DALL-E or ComfyUI",
|
|
"install_methods": [
|
|
{
|
|
"type": "default",
|
|
"script": "ct/openwebui.sh",
|
|
"resources": {
|
|
"cpu": 4,
|
|
"ram": 8192,
|
|
"hdd": 25,
|
|
"os": "debian",
|
|
"version": "12"
|
|
}
|
|
}
|
|
],
|
|
"default_credentials": {
|
|
"username": null,
|
|
"password": null
|
|
},
|
|
"notes": []
|
|
} |