Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 5 additions & 9 deletions dappnode_package.json
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,12 @@
"upstream": [
{
"repo": "ollama/ollama",
"version": "v0.12.9",
"version": "v0.13.0",
"arg": "OLLAMA_VERSION"
},
{
"repo": "open-webui/open-webui",
"version": "v0.6.34",
"version": "v0.6.36",
"arg": "WEBUI_VERSION"
}
],
Expand All @@ -19,13 +19,9 @@
"mainService": "webui",
"author": "DAppNode Association <admin@dappnode.io> (https://github.com/dappnode)",
"license": "GPL-3.0",
"categories": [
"AI"
],
"categories": ["AI"],
"links": {
"ui": "http://ollama-openwebui.dappnode:8080"
},
"architectures": [
"linux/amd64"
]
}
"architectures": ["linux/amd64"]
}
15 changes: 3 additions & 12 deletions docker-compose.yml
Original file line number Diff line number Diff line change
@@ -1,37 +1,28 @@
version: "3.5"

services:
webui:
build:
context: webui
container_name: openwebui.ollama-openwebui.dnp.dappnode.eth
environment:
OLLAMA_BASE_URL: "http://ollama:11434"
OLLAMA_BASE_URL: http://ollama:11434
WEBUI_AUTH: "true"
volumes:
- "webui:/app/backend/data"
- webui:/app/backend/data
restart: unless-stopped
depends_on:
- ollama

ollama:
build:
context: ollama
container_name: ollama.ollama-openwebui.dnp.dappnode.eth
volumes:
- "ollama:/root/.ollama"
- ollama:/root/.ollama
restart: unless-stopped
environment:
# Show token‑throughput and other debug info in the container logs
- OLLAMA_LOG_LEVEL=debug

# Enable the /metrics endpoint (Prometheus format)
- OLLAMA_METRICS=1

# OPTIONAL – JSON‑formatted logs (easier to ship to Loki/Elastic)
- OLLAMA_LOG_FORMAT=json

# OPTIONAL – Turn off outbound telemetry if you only want local metrics
- OLLAMA_TELEMETRY=0
volumes:
ollama: {}
Expand Down
3 changes: 1 addition & 2 deletions package_variants/amd/dappnode_package.json
Original file line number Diff line number Diff line change
Expand Up @@ -5,5 +5,4 @@
"ui": "http://ollama-openwebui.dappnode:8080"
},
"description": "Run large language models locally on your DAppNode with GPU acceleration. This package combines Ollama (with AMD ROCm support for GPU inference) and Open WebUI (a ChatGPT-like interface) to provide a complete local AI solution.\n\n**Features:**\n- AMD GPU acceleration via ROCm\n- ChatGPT-like web interface\n- Complete privacy - all processing stays local\n- Support for multiple LLM models (Llama, Mistral, CodeLlama, etc.)\n\n**Requirements:**\n- AMD GPU with ROCm support\n- At least 8GB RAM (16GB+ recommended)\n- Sufficient storage for models (10GB+ recommended)\n"

}
}
9 changes: 3 additions & 6 deletions package_variants/cpu/dappnode_package.json
Original file line number Diff line number Diff line change
@@ -1,12 +1,9 @@
{
"name": "ollama-cpu-openwebui.dnp.dappnode.eth",
"version": "0.1.0",
"version": "0.1.1",
"links": {
"ui": "http://ollama-cpu-openwebui.dappnode:8080"
},
"architectures": [
"linux/amd64",
"linux/arm64"
],
"architectures": ["linux/amd64", "linux/arm64"],
"description": "Run large language models locally on your DAppNode. This package combines Ollama and Open WebUI (a ChatGPT-like interface) to provide a complete local AI solution.\n\n**Features:**\n- CPU acceleration for inference\n- ChatGPT-like web interface\n- Complete privacy - all processing stays local\n- Support for multiple LLM models (Llama, Mistral, CodeLlama, etc.)\n\n**Requirements:**\n- At least 8GB RAM (16GB+ recommended)\n- Sufficient storage for models (10GB+ recommended)\n"
}
}
10 changes: 3 additions & 7 deletions package_variants/nvidia/dappnode_package.json
Original file line number Diff line number Diff line change
@@ -1,13 +1,9 @@
{
"name": "ollama-nvidia-openwebui.dnp.dappnode.eth",
"version": "0.1.0",
"version": "0.1.1",
"links": {
"ui": "http://ollama-nvidia-openwebui.dappnode:8080"
},
"architectures": [
"linux/amd64",
"linux/arm64"
],
"architectures": ["linux/amd64", "linux/arm64"],
"description": "Run large language models locally on your DAppNode with GPU acceleration. This package combines Ollama (with NVIDIA GPU support for GPU inference) and Open WebUI (a ChatGPT-like interface) to provide a complete local AI solution.\n\n**Features:**\n- NVIDIA GPU acceleration\n- ChatGPT-like web interface\n- Complete privacy - all processing stays local\n- Support for multiple LLM models (Llama, Mistral, CodeLlama, etc.)\n\n**Requirements:**\n- NVIDIA GPU with CUDA support\n- At least 8GB RAM (16GB+ recommended)\n- Sufficient storage for models (10GB+ recommended)\n"

}
}