2025-12-07 17:15:09 +02:00
|
|
|
version: '3.9'
|
|
|
|
|
|
|
|
|
|
services:
|
|
|
|
|
llama-swap:
|
|
|
|
|
image: ghcr.io/mostlygeek/llama-swap:cuda
|
|
|
|
|
container_name: llama-swap
|
|
|
|
|
ports:
|
|
|
|
|
- "8090:8080" # Map host port 8090 to container port 8080
|
|
|
|
|
volumes:
|
|
|
|
|
- ./models:/models # GGUF model files
|
|
|
|
|
- ./llama-swap-config.yaml:/app/config.yaml # llama-swap configuration
|
|
|
|
|
runtime: nvidia
|
|
|
|
|
restart: unless-stopped
|
|
|
|
|
healthcheck:
|
|
|
|
|
test: ["CMD", "curl", "-f", "http://localhost:8080/health"]
|
|
|
|
|
interval: 10s
|
|
|
|
|
timeout: 5s
|
|
|
|
|
retries: 10
|
|
|
|
|
start_period: 30s # Give more time for initial model loading
|
|
|
|
|
environment:
|
|
|
|
|
- NVIDIA_VISIBLE_DEVICES=all
|
|
|
|
|
|
2026-01-09 00:03:59 +02:00
|
|
|
llama-swap-amd:
|
|
|
|
|
build:
|
|
|
|
|
context: .
|
|
|
|
|
dockerfile: Dockerfile.llamaswap-rocm
|
|
|
|
|
container_name: llama-swap-amd
|
|
|
|
|
ports:
|
|
|
|
|
- "8091:8080" # Map host port 8091 to container port 8080
|
|
|
|
|
volumes:
|
|
|
|
|
- ./models:/models # GGUF model files
|
|
|
|
|
- ./llama-swap-rocm-config.yaml:/app/config.yaml # llama-swap configuration for AMD
|
|
|
|
|
devices:
|
|
|
|
|
- /dev/kfd:/dev/kfd
|
|
|
|
|
- /dev/dri:/dev/dri
|
|
|
|
|
group_add:
|
|
|
|
|
- "985" # video group
|
|
|
|
|
- "989" # render group
|
|
|
|
|
restart: unless-stopped
|
|
|
|
|
healthcheck:
|
|
|
|
|
test: ["CMD", "curl", "-f", "http://localhost:8080/health"]
|
|
|
|
|
interval: 10s
|
|
|
|
|
timeout: 5s
|
|
|
|
|
retries: 10
|
|
|
|
|
start_period: 30s # Give more time for initial model loading
|
|
|
|
|
environment:
|
|
|
|
|
- HSA_OVERRIDE_GFX_VERSION=10.3.0 # RX 6800 compatibility
|
|
|
|
|
- ROCM_PATH=/opt/rocm
|
|
|
|
|
- HIP_VISIBLE_DEVICES=0 # Use first AMD GPU
|
|
|
|
|
- GPU_DEVICE_ORDINAL=0
|
|
|
|
|
|
2025-12-07 17:15:09 +02:00
|
|
|
miku-bot:
|
|
|
|
|
build: ./bot
|
|
|
|
|
container_name: miku-bot
|
|
|
|
|
volumes:
|
|
|
|
|
- ./bot/memory:/app/memory
|
|
|
|
|
- /home/koko210Serve/ComfyUI/output:/app/ComfyUI/output:ro
|
|
|
|
|
- /var/run/docker.sock:/var/run/docker.sock # Allow container management
|
|
|
|
|
depends_on:
|
|
|
|
|
llama-swap:
|
|
|
|
|
condition: service_healthy
|
2026-01-09 00:03:59 +02:00
|
|
|
llama-swap-amd:
|
|
|
|
|
condition: service_healthy
|
2025-12-07 17:15:09 +02:00
|
|
|
environment:
|
|
|
|
|
- DISCORD_BOT_TOKEN=MTM0ODAyMjY0Njc3NTc0NjY1MQ.GXsxML.nNCDOplmgNxKgqdgpAomFM2PViX10GjxyuV8uw
|
|
|
|
|
- LLAMA_URL=http://llama-swap:8080
|
2026-01-09 00:03:59 +02:00
|
|
|
- LLAMA_AMD_URL=http://llama-swap-amd:8080 # Secondary AMD GPU endpoint
|
2025-12-07 17:15:09 +02:00
|
|
|
- TEXT_MODEL=llama3.1
|
|
|
|
|
- VISION_MODEL=vision
|
|
|
|
|
- OWNER_USER_ID=209381657369772032 # Your Discord user ID for DM analysis reports
|
2026-01-11 02:01:41 +02:00
|
|
|
- FACE_DETECTOR_STARTUP_TIMEOUT=60
|
2025-12-07 17:15:09 +02:00
|
|
|
ports:
|
|
|
|
|
- "3939:3939"
|
2026-01-16 23:37:34 +02:00
|
|
|
networks:
|
|
|
|
|
- default # Stay on default for llama-swap communication
|
|
|
|
|
- miku-voice # Connect to voice network for RVC/TTS
|
2025-12-07 17:15:09 +02:00
|
|
|
restart: unless-stopped
|
|
|
|
|
|
2026-01-17 03:14:40 +02:00
|
|
|
miku-stt:
|
|
|
|
|
build:
|
2026-01-20 23:06:17 +02:00
|
|
|
context: ./stt-realtime
|
2026-01-19 00:29:44 +02:00
|
|
|
dockerfile: Dockerfile
|
2026-01-17 03:14:40 +02:00
|
|
|
container_name: miku-stt
|
|
|
|
|
runtime: nvidia
|
|
|
|
|
environment:
|
2026-01-19 00:29:44 +02:00
|
|
|
- NVIDIA_VISIBLE_DEVICES=0 # GTX 1660
|
2026-01-17 03:14:40 +02:00
|
|
|
- CUDA_VISIBLE_DEVICES=0
|
|
|
|
|
- NVIDIA_DRIVER_CAPABILITIES=compute,utility
|
2026-01-20 23:06:17 +02:00
|
|
|
- STT_HOST=0.0.0.0
|
|
|
|
|
- STT_PORT=8766
|
|
|
|
|
- STT_HTTP_PORT=8767 # HTTP health check port
|
2026-01-17 03:14:40 +02:00
|
|
|
volumes:
|
2026-01-20 23:06:17 +02:00
|
|
|
- stt-models:/root/.cache/huggingface # Persistent model storage
|
2026-01-17 03:14:40 +02:00
|
|
|
ports:
|
2026-01-19 00:29:44 +02:00
|
|
|
- "8766:8766" # WebSocket port
|
2026-01-20 23:06:17 +02:00
|
|
|
- "8767:8767" # HTTP health check port
|
2026-01-17 03:14:40 +02:00
|
|
|
networks:
|
|
|
|
|
- miku-voice
|
|
|
|
|
deploy:
|
|
|
|
|
resources:
|
|
|
|
|
reservations:
|
|
|
|
|
devices:
|
|
|
|
|
- driver: nvidia
|
|
|
|
|
device_ids: ['0'] # GTX 1660
|
|
|
|
|
capabilities: [gpu]
|
|
|
|
|
restart: unless-stopped
|
|
|
|
|
|
2025-12-07 17:15:09 +02:00
|
|
|
anime-face-detector:
|
|
|
|
|
build: ./face-detector
|
|
|
|
|
container_name: anime-face-detector
|
|
|
|
|
runtime: nvidia
|
|
|
|
|
deploy:
|
|
|
|
|
resources:
|
|
|
|
|
reservations:
|
|
|
|
|
devices:
|
|
|
|
|
- capabilities: [gpu]
|
|
|
|
|
volumes:
|
|
|
|
|
- ./face-detector/api:/app/api
|
|
|
|
|
- ./face-detector/images:/app/images
|
|
|
|
|
ports:
|
|
|
|
|
- "7860:7860" # Gradio UI
|
|
|
|
|
- "6078:6078" # FastAPI API
|
|
|
|
|
environment:
|
|
|
|
|
- NVIDIA_VISIBLE_DEVICES=all
|
|
|
|
|
- NVIDIA_DRIVER_CAPABILITIES=compute,utility
|
|
|
|
|
restart: "no" # Don't auto-restart - only run on-demand
|
|
|
|
|
profiles:
|
|
|
|
|
- tools # Don't start by default
|
2026-01-16 23:37:34 +02:00
|
|
|
|
|
|
|
|
networks:
|
|
|
|
|
miku-voice:
|
|
|
|
|
external: true
|
|
|
|
|
name: miku-voice-network
|
2026-01-20 23:06:17 +02:00
|
|
|
|
|
|
|
|
volumes:
|
|
|
|
|
stt-models:
|
|
|
|
|
name: miku-stt-models
|