- Replace GitHub API calls (390+) with 1 API call + raw URL downloads - Create GitHubJsonService for efficient JSON file syncing - Reduce API rate limiting issues by 99.7% - Add automatic page reload after successful sync - Update tests to use new service - Maintain same functionality with better performance Performance improvement: - Before: 390+ GitHub API calls (1 per JSON file) - After: 1 GitHub API call + 389 raw URL downloads - Raw URLs have no rate limits, making sync much more reliable
35 lines
1.2 KiB
JSON
35 lines
1.2 KiB
JSON
{
|
|
"name": "Ollama",
|
|
"slug": "ollama",
|
|
"categories": [
|
|
20
|
|
],
|
|
"date_created": "2025-04-30",
|
|
"type": "ct",
|
|
"updateable": true,
|
|
"privileged": false,
|
|
"interface_port": 11434,
|
|
"documentation": "https://github.com/ollama/ollama/tree/main/docs",
|
|
"config_path": "/usr/local/lib/ollama",
|
|
"website": "https://ollama.com/",
|
|
"logo": "https://cdn.jsdelivr.net/gh/selfhst/icons/webp/ollama.webp",
|
|
"description": "Ollama is a tool that allows you to run large language models locally on your own computer. This means you can experiment with and use these AI models without needing an internet connection or relying on cloud-based services. It simplifies the process of managing and running these models, offering a way to keep your data private and potentially work faster. 1 You can use Ollama to create local chatbots, conduct AI research, develop privacy-focused AI applications, and integrate AI into existing systems.",
|
|
"install_methods": [
|
|
{
|
|
"type": "default",
|
|
"script": "ct/ollama.sh",
|
|
"resources": {
|
|
"cpu": 4,
|
|
"ram": 4096,
|
|
"hdd": 35,
|
|
"os": "Ubuntu",
|
|
"version": "24.04"
|
|
}
|
|
}
|
|
],
|
|
"default_credentials": {
|
|
"username": null,
|
|
"password": null
|
|
},
|
|
"notes": []
|
|
} |