diff --git a/dappnode_package.json b/dappnode_package.json index f5052b0..d700284 100644 --- a/dappnode_package.json +++ b/dappnode_package.json @@ -4,12 +4,12 @@ "upstream": [ { "repo": "ollama/ollama", - "version": "v0.12.9", + "version": "v0.13.0", "arg": "OLLAMA_VERSION" }, { "repo": "open-webui/open-webui", - "version": "v0.6.34", + "version": "v0.6.36", "arg": "WEBUI_VERSION" } ], @@ -19,13 +19,9 @@ "mainService": "webui", "author": "DAppNode Association (https://github.com/dappnode)", "license": "GPL-3.0", - "categories": [ - "AI" - ], + "categories": ["AI"], "links": { "ui": "http://ollama-openwebui.dappnode:8080" }, - "architectures": [ - "linux/amd64" - ] -} \ No newline at end of file + "architectures": ["linux/amd64"] +} diff --git a/docker-compose.yml b/docker-compose.yml index ebbb656..ce40afd 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,37 +1,28 @@ version: "3.5" - services: webui: build: context: webui container_name: openwebui.ollama-openwebui.dnp.dappnode.eth environment: - OLLAMA_BASE_URL: "http://ollama:11434" + OLLAMA_BASE_URL: http://ollama:11434 WEBUI_AUTH: "true" volumes: - - "webui:/app/backend/data" + - webui:/app/backend/data restart: unless-stopped depends_on: - ollama - ollama: build: context: ollama container_name: ollama.ollama-openwebui.dnp.dappnode.eth volumes: - - "ollama:/root/.ollama" + - ollama:/root/.ollama restart: unless-stopped environment: - # Show token‑throughput and other debug info in the container logs - OLLAMA_LOG_LEVEL=debug - - # Enable the /metrics endpoint (Prometheus format) - OLLAMA_METRICS=1 - - # OPTIONAL – JSON‑formatted logs (easier to ship to Loki/Elastic) - OLLAMA_LOG_FORMAT=json - - # OPTIONAL – Turn off outbound telemetry if you only want local metrics - OLLAMA_TELEMETRY=0 volumes: ollama: {} diff --git a/package_variants/amd/dappnode_package.json b/package_variants/amd/dappnode_package.json index 77ca308..67dd63b 100644 --- a/package_variants/amd/dappnode_package.json +++ b/package_variants/amd/dappnode_package.json @@ -5,5 +5,4 @@ "ui": "http://ollama-openwebui.dappnode:8080" }, "description": "Run large language models locally on your DAppNode with GPU acceleration. This package combines Ollama (with AMD ROCm support for GPU inference) and Open WebUI (a ChatGPT-like interface) to provide a complete local AI solution.\n\n**Features:**\n- AMD GPU acceleration via ROCm\n- ChatGPT-like web interface\n- Complete privacy - all processing stays local\n- Support for multiple LLM models (Llama, Mistral, CodeLlama, etc.)\n\n**Requirements:**\n- AMD GPU with ROCm support\n- At least 8GB RAM (16GB+ recommended)\n- Sufficient storage for models (10GB+ recommended)\n" - -} \ No newline at end of file +} diff --git a/package_variants/cpu/dappnode_package.json b/package_variants/cpu/dappnode_package.json index 89be948..64314ee 100644 --- a/package_variants/cpu/dappnode_package.json +++ b/package_variants/cpu/dappnode_package.json @@ -1,12 +1,9 @@ { "name": "ollama-cpu-openwebui.dnp.dappnode.eth", - "version": "0.1.0", + "version": "0.1.1", "links": { "ui": "http://ollama-cpu-openwebui.dappnode:8080" }, - "architectures": [ - "linux/amd64", - "linux/arm64" - ], + "architectures": ["linux/amd64", "linux/arm64"], "description": "Run large language models locally on your DAppNode. This package combines Ollama and Open WebUI (a ChatGPT-like interface) to provide a complete local AI solution.\n\n**Features:**\n- CPU acceleration for inference\n- ChatGPT-like web interface\n- Complete privacy - all processing stays local\n- Support for multiple LLM models (Llama, Mistral, CodeLlama, etc.)\n\n**Requirements:**\n- At least 8GB RAM (16GB+ recommended)\n- Sufficient storage for models (10GB+ recommended)\n" -} \ No newline at end of file +} diff --git a/package_variants/nvidia/dappnode_package.json b/package_variants/nvidia/dappnode_package.json index bf3c4fc..74a7eba 100644 --- a/package_variants/nvidia/dappnode_package.json +++ b/package_variants/nvidia/dappnode_package.json @@ -1,13 +1,9 @@ { "name": "ollama-nvidia-openwebui.dnp.dappnode.eth", - "version": "0.1.0", + "version": "0.1.1", "links": { "ui": "http://ollama-nvidia-openwebui.dappnode:8080" }, - "architectures": [ - "linux/amd64", - "linux/arm64" - ], + "architectures": ["linux/amd64", "linux/arm64"], "description": "Run large language models locally on your DAppNode with GPU acceleration. This package combines Ollama (with NVIDIA GPU support for GPU inference) and Open WebUI (a ChatGPT-like interface) to provide a complete local AI solution.\n\n**Features:**\n- NVIDIA GPU acceleration\n- ChatGPT-like web interface\n- Complete privacy - all processing stays local\n- Support for multiple LLM models (Llama, Mistral, CodeLlama, etc.)\n\n**Requirements:**\n- NVIDIA GPU with CUDA support\n- At least 8GB RAM (16GB+ recommended)\n- Sufficient storage for models (10GB+ recommended)\n" - -} \ No newline at end of file +}