From 015e546e7a9e92914515aab7ad6dc6e2169f6c55 Mon Sep 17 00:00:00 2001 From: adrien Date: Fri, 20 Jun 2025 18:29:27 +0000 Subject: [PATCH] Added openwebui + ollama for selfhosted LLM chatbot --- apps.yml | 1 + config/prometheus/prometheus.yml | 4 ++-- docker-compose.yml | 1 + llm.yml | 28 ++++++++++++++++++++++++++++ monitoring.yml | 19 ++++++++++++++++++- 5 files changed, 50 insertions(+), 3 deletions(-) create mode 100644 llm.yml diff --git a/apps.yml b/apps.yml index 50eda9d..0e7bffb 100644 --- a/apps.yml +++ b/apps.yml @@ -89,6 +89,7 @@ services: siyuan: image: b3log/siyuan + container_name: siyuan command: ['--workspace=/siyuan/workspace/'] volumes: - /siyuan/workspace:/siyuan/workspace diff --git a/config/prometheus/prometheus.yml b/config/prometheus/prometheus.yml index d852f2c..bb14310 100644 --- a/config/prometheus/prometheus.yml +++ b/config/prometheus/prometheus.yml @@ -25,6 +25,6 @@ scrape_configs: static_configs: - targets: ['gitea:3000'] - - job_name: 'affine' + - job_name: 'nvidia_gpu_exporter' static_configs: - - targets: ['affine:3010'] + - targets: ['nvidia-gpu-exporter:9835'] diff --git a/docker-compose.yml b/docker-compose.yml index cbf2d48..39d2af4 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,6 +1,7 @@ include: - apps.yml - gitea.yml + - llm.yml - monitoring.yml services: diff --git a/llm.yml b/llm.yml new file mode 100644 index 0000000..c39171a --- /dev/null +++ b/llm.yml @@ -0,0 +1,28 @@ +services: + openwebui: + image: ghcr.io/open-webui/open-webui:main + container_name: openwebui + restart: unless-stopped + volumes: + - ./hdd0/openwebui:/app/backend/data + labels: + - "traefik.enable=true" + - "traefik.http.routers.openwebui.rule=Host(`openwebui.bouvais.lu`)" + - "traefik.http.routers.openwebui.entrypoints=websecure" + - "traefik.http.routers.openwebui.tls.certresolver=myresolver" + - "traefik.http.services.openwebui.loadbalancer.server.port=8080" + environment: + OLLAMA_BASE_URLS: http://ollama:11434 + + ollama: + image: ollama/ollama:latest + container_name: ollama + volumes: + - ./hdd0/ollama:/root/.ollama + deploy: + resources: + reservations: + devices: + - driver: nvidia + capabilities: ["gpu"] + count: all diff --git a/monitoring.yml b/monitoring.yml index 4b45975..fc6da38 100644 --- a/monitoring.yml +++ b/monitoring.yml @@ -78,4 +78,21 @@ services: - '--path.rootfs=/rootfs' - '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc|rootfs/var/lib/docker/containers|rootfs/var/lib/docker/overlay2|rootfs/run/docker/netns|rootfs/var/lib/docker/aufs)($$|/)' - + nvidia-gpu-exporter: + image: utkuozdemir/nvidia_gpu_exporter:1.3.1 + container_name: nvidia-gpu-exporter + restart: unless-stopped + privileged: true # This might be needed for full access to devices, or try without first + devices: + - /dev/nvidia0:/dev/nvidia0 + volumes: + - /usr/bin/nvidia-smi:/usr/bin/nvidia-smi:ro + - /usr/lib/x86_64-linux-gnu/libnvidia-ml.so:/usr/lib/x86_64-linux-gnu/libnvidia-ml.so:ro + - /usr/lib/x86_64-linux-gnu/libnvidia-ml.so.1:/usr/lib/x86_64-linux-gnu/libnvidia-ml.so.1:ro + command: + - --web.listen-address=:9835 + - --web.telemetry-path=/metrics + - --nvidia-smi-command=nvidia-smi + - --log.level=info + - --query-field-names=AUTO + - --log.format=logfmt