1
0

Added openwebui + ollama for selfhosted LLM chatbot

This commit is contained in:
Adrien Bouvais 2025-06-20 18:29:27 +00:00
parent 142e8f878d
commit 015e546e7a
5 changed files with 50 additions and 3 deletions

View File

@ -89,6 +89,7 @@ services:
siyuan:
image: b3log/siyuan
container_name: siyuan
command: ['--workspace=/siyuan/workspace/']
volumes:
- /siyuan/workspace:/siyuan/workspace

View File

@ -25,6 +25,6 @@ scrape_configs:
static_configs:
- targets: ['gitea:3000']
- job_name: 'affine'
- job_name: 'nvidia_gpu_exporter'
static_configs:
- targets: ['affine:3010']
- targets: ['nvidia-gpu-exporter:9835']

View File

@ -1,6 +1,7 @@
include:
- apps.yml
- gitea.yml
- llm.yml
- monitoring.yml
services:

28
llm.yml Normal file
View File

@ -0,0 +1,28 @@
services:
openwebui:
image: ghcr.io/open-webui/open-webui:main
container_name: openwebui
restart: unless-stopped
volumes:
- ./hdd0/openwebui:/app/backend/data
labels:
- "traefik.enable=true"
- "traefik.http.routers.openwebui.rule=Host(`openwebui.bouvais.lu`)"
- "traefik.http.routers.openwebui.entrypoints=websecure"
- "traefik.http.routers.openwebui.tls.certresolver=myresolver"
- "traefik.http.services.openwebui.loadbalancer.server.port=8080"
environment:
OLLAMA_BASE_URLS: http://ollama:11434
ollama:
image: ollama/ollama:latest
container_name: ollama
volumes:
- ./hdd0/ollama:/root/.ollama
deploy:
resources:
reservations:
devices:
- driver: nvidia
capabilities: ["gpu"]
count: all

View File

@ -78,4 +78,21 @@ services:
- '--path.rootfs=/rootfs'
- '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc|rootfs/var/lib/docker/containers|rootfs/var/lib/docker/overlay2|rootfs/run/docker/netns|rootfs/var/lib/docker/aufs)($$|/)'
nvidia-gpu-exporter:
image: utkuozdemir/nvidia_gpu_exporter:1.3.1
container_name: nvidia-gpu-exporter
restart: unless-stopped
privileged: true # This might be needed for full access to devices, or try without first
devices:
- /dev/nvidia0:/dev/nvidia0
volumes:
- /usr/bin/nvidia-smi:/usr/bin/nvidia-smi:ro
- /usr/lib/x86_64-linux-gnu/libnvidia-ml.so:/usr/lib/x86_64-linux-gnu/libnvidia-ml.so:ro
- /usr/lib/x86_64-linux-gnu/libnvidia-ml.so.1:/usr/lib/x86_64-linux-gnu/libnvidia-ml.so.1:ro
command:
- --web.listen-address=:9835
- --web.telemetry-path=/metrics
- --nvidia-smi-command=nvidia-smi
- --log.level=info
- --query-field-names=AUTO
- --log.format=logfmt