Files
miku-discord/docker-compose.yml

211 lines
6.6 KiB
YAML
Raw Normal View History

2025-12-07 17:15:09 +02:00
services:
# ========== LLM Backends ==========
2025-12-07 17:15:09 +02:00
llama-swap:
image: ghcr.io/mostlygeek/llama-swap:cuda
container_name: llama-swap
ports:
- "8090:8080" # Map host port 8090 to container port 8080
volumes:
- ./models:/models # GGUF model files
- ./llama-swap-config.yaml:/app/config.yaml # llama-swap configuration
- ./llama31_notool_template.jinja:/app/llama31_notool_template.jinja # Custom chat template
2025-12-07 17:15:09 +02:00
runtime: nvidia
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/health"]
interval: 10s
timeout: 5s
retries: 10
start_period: 30s # Give more time for initial model loading
environment:
- NVIDIA_VISIBLE_DEVICES=all
- LOG_LEVEL=debug # Enable verbose logging for llama-swap
2025-12-07 17:15:09 +02:00
2026-01-09 00:03:59 +02:00
llama-swap-amd:
build:
context: .
dockerfile: Dockerfile.llamaswap-rocm
container_name: llama-swap-amd
ports:
- "8091:8080" # Map host port 8091 to container port 8080
volumes:
- ./models:/models # GGUF model files
- ./llama-swap-rocm-config.yaml:/app/config.yaml # llama-swap configuration for AMD
- ./llama31_notool_template.jinja:/app/llama31_notool_template.jinja # Custom chat template
2026-01-09 00:03:59 +02:00
devices:
- /dev/kfd:/dev/kfd
- /dev/dri:/dev/dri
group_add:
- "985" # video group
- "989" # render group
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/health"]
interval: 10s
timeout: 5s
retries: 10
start_period: 30s # Give more time for initial model loading
environment:
- HSA_OVERRIDE_GFX_VERSION=10.3.0 # RX 6800 compatibility
- ROCM_PATH=/opt/rocm
- HIP_VISIBLE_DEVICES=0 # Use first AMD GPU
- GPU_DEVICE_ORDINAL=0
# ========== Cheshire Cat AI (Memory & Personality) ==========
cheshire-cat:
image: ghcr.io/cheshire-cat-ai/core:1.6.2
container_name: miku-cheshire-cat
depends_on:
cheshire-cat-vector-memory:
condition: service_started
llama-swap-amd:
condition: service_healthy
environment:
- PYTHONUNBUFFERED=1
- WATCHFILES_FORCE_POLLING=true
- CORE_HOST=localhost
- CORE_PORT=1865
- QDRANT_HOST=cheshire-cat-vector-memory
- QDRANT_PORT=6333
- CORE_USE_SECURE_PROTOCOLS=false
- API_KEY=
- LOG_LEVEL=INFO
- DEBUG=true
- SAVE_MEMORY_SNAPSHOTS=false
- OPENAI_API_BASE=http://llama-swap-amd:8080/v1
ports:
- "1865:80" # Cat admin UI on host port 1865
volumes:
- ./cheshire-cat/cat/static:/app/cat/static
- ./cat-plugins:/app/cat/plugins # Shared plugins directory
- ./cheshire-cat/cat/data:/app/cat/data # Personality data (lore, prompts)
refactor: deduplicate prompts, reorganize persona files, update paths Prompt deduplication (~20% reduction, 4,743 chars saved): - evil_miku_lore.txt: remove intra-file duplication (height rule 2x, cruelty-has-substance 2x, music secret 2x, adoration secret 2x), trim verbose restatements, cut speech examples from 10 to 6 - evil_miku_prompt.txt: remove entire PERSONALITY section (in lore), remove entire RESPONSE STYLE section (now only in preamble), soften height from prohibition to knowledge - miku_lore.txt: remove RELATIONSHIPS section (duplicates FRIENDS) - miku_prompt.txt: remove duplicate intro, 4 personality traits already in lore, FAMOUS SONGS section (in lore), fix response length inconsistency (1-2 vs 2-3 -> consistent 2-3) Preamble updates (evil_mode.py, evil_miku_personality.py, llm.py, miku_personality.py): - Response rules now exist in ONE place only (preamble) - Height rule softened: model knows 15.8m, can say it if asked, but won't default to quoting it when taunting - Response length: 2-4 sentences (was 1-3), removed action template list that model was copying literally (*scoffs*, *rolls eyes*) - Added: always include actual words, never action-only responses - Normal Miku: trim CHARACTER CONTEXT, fix 1-3 -> 2-3 sentences Directory reorganization: - Move 6 persona files to bot/persona/{evil,miku}/ subdirectories - Update all open() paths in evil_mode.py, context_manager.py, voice_manager.py, both Cat plugins - Dockerfile: 6 COPY lines -> 1 (COPY persona /app/persona) - docker-compose: 6 file mounts -> 2 directory mounts (bot/persona/evil -> cat/data/evil, bot/persona/miku -> cat/data/miku) Evil Miku system (previously unstaged): - Full evil mood management: 2h rotation timer, mood persistence, 10 mood-specific autonomous template pools, mood-aware DMs - Evil mode toggle with role color/nickname/pfp management - get_evil_system_prompt() with mood integration Add test_evil_moods.py: 10-mood x 3-message comprehensive test
2026-02-27 13:14:03 +02:00
# Mount canonical bot/persona/ files into Cat (single source of truth)
- ./bot/persona/evil:/app/cat/data/evil
- ./bot/persona/miku:/app/cat/data/miku
- ./bot/moods:/app/moods # Mood description files
- ./bot/memory:/app/memory # Profile pictures and other memory files
- ./cheshire-cat/cat/log.py:/app/cat/log.py # Patched: fix loguru KeyError for third-party libs
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:80/"]
interval: 15s
timeout: 10s
retries: 8
start_period: 45s # Cat takes a while to load embedder + plugins
cheshire-cat-vector-memory:
image: qdrant/qdrant:v1.9.1
container_name: miku-qdrant
environment:
- LOG_LEVEL=INFO
ports:
- "6333:6333" # Qdrant REST API (for debugging)
ulimits:
nofile:
soft: 65536
hard: 65536
volumes:
- ./cheshire-cat/cat/long_term_memory/vector:/qdrant/storage
restart: unless-stopped
# ========== Discord Bot ==========
2025-12-07 17:15:09 +02:00
miku-bot:
build: ./bot
container_name: miku-bot
environment:
- TZ=Europe/Sofia
2025-12-07 17:15:09 +02:00
volumes:
- ./bot/memory:/app/memory
- /home/koko210Serve/ComfyUI/output:/app/ComfyUI/output:ro
- /var/run/docker.sock:/var/run/docker.sock # Allow container management
Implement comprehensive config system and clean up codebase Major changes: - Add Pydantic-based configuration system (bot/config.py, bot/config_manager.py) - Add config.yaml with all service URLs, models, and feature flags - Fix config.yaml path resolution in Docker (check /app/config.yaml first) - Remove Fish Audio API integration (tested feature that didn't work) - Remove hardcoded ERROR_WEBHOOK_URL, import from config instead - Add missing Pydantic models (LogConfigUpdateRequest, LogFilterUpdateRequest) - Enable Cheshire Cat memory system by default (USE_CHESHIRE_CAT=true) - Add .env.example template with all required environment variables - Add setup.sh script for user-friendly initialization - Update docker-compose.yml with proper env file mounting - Update .gitignore for config files and temporary files Config system features: - Static configuration from config.yaml - Runtime overrides from config_runtime.yaml - Environment variables for secrets (.env) - Web UI integration via config_manager - Graceful fallback to defaults Secrets handling: - Move ERROR_WEBHOOK_URL from hardcoded to .env - Add .env.example with all placeholder values - Document all required secrets - Fish API key and voice ID removed from .env Documentation: - CONFIG_README.md - Configuration system guide - CONFIG_SYSTEM_COMPLETE.md - Implementation summary - FISH_API_REMOVAL_COMPLETE.md - Removal record - SECRETS_CONFIGURED.md - Secrets setup record - BOT_STARTUP_FIX.md - Pydantic model fixes - MIGRATION_CHECKLIST.md - Setup checklist - WEB_UI_INTEGRATION_COMPLETE.md - Web UI config guide - Updated readmes/README.md with new features
2026-02-15 19:51:00 +02:00
- ./.env:/app/.env:ro # Mount .env file (read-only)
- ./config.yaml:/app/config.yaml:ro # Mount config file (read-only)
2025-12-07 17:15:09 +02:00
depends_on:
llama-swap:
condition: service_healthy
2026-01-09 00:03:59 +02:00
llama-swap-amd:
condition: service_healthy
cheshire-cat:
condition: service_healthy
Implement comprehensive config system and clean up codebase Major changes: - Add Pydantic-based configuration system (bot/config.py, bot/config_manager.py) - Add config.yaml with all service URLs, models, and feature flags - Fix config.yaml path resolution in Docker (check /app/config.yaml first) - Remove Fish Audio API integration (tested feature that didn't work) - Remove hardcoded ERROR_WEBHOOK_URL, import from config instead - Add missing Pydantic models (LogConfigUpdateRequest, LogFilterUpdateRequest) - Enable Cheshire Cat memory system by default (USE_CHESHIRE_CAT=true) - Add .env.example template with all required environment variables - Add setup.sh script for user-friendly initialization - Update docker-compose.yml with proper env file mounting - Update .gitignore for config files and temporary files Config system features: - Static configuration from config.yaml - Runtime overrides from config_runtime.yaml - Environment variables for secrets (.env) - Web UI integration via config_manager - Graceful fallback to defaults Secrets handling: - Move ERROR_WEBHOOK_URL from hardcoded to .env - Add .env.example with all placeholder values - Document all required secrets - Fish API key and voice ID removed from .env Documentation: - CONFIG_README.md - Configuration system guide - CONFIG_SYSTEM_COMPLETE.md - Implementation summary - FISH_API_REMOVAL_COMPLETE.md - Removal record - SECRETS_CONFIGURED.md - Secrets setup record - BOT_STARTUP_FIX.md - Pydantic model fixes - MIGRATION_CHECKLIST.md - Setup checklist - WEB_UI_INTEGRATION_COMPLETE.md - Web UI config guide - Updated readmes/README.md with new features
2026-02-15 19:51:00 +02:00
env_file:
- .env # Load environment variables from .env file
2025-12-07 17:15:09 +02:00
ports:
- "3939:3939"
networks:
- default # Stay on default for llama-swap + cheshire-cat communication
- miku-voice # Connect to voice network for RVC/TTS
- proxy # Traefik proxy network for miku.panel
2025-12-07 17:15:09 +02:00
restart: unless-stopped
labels:
- "traefik.enable=true"
- "traefik.http.routers.miku.rule=Host(`miku.panel`)"
- "traefik.http.routers.miku.entrypoints=websecure"
- "traefik.http.routers.miku.tls=true"
- "traefik.http.services.miku.loadbalancer.server.port=3939"
- "traefik.docker.network=proxy"
2025-12-07 17:15:09 +02:00
# ========== Voice / STT ==========
miku-stt:
build:
context: ./stt-realtime
dockerfile: Dockerfile
container_name: miku-stt
runtime: nvidia
environment:
- NVIDIA_VISIBLE_DEVICES=0 # GTX 1660
- CUDA_VISIBLE_DEVICES=0
- NVIDIA_DRIVER_CAPABILITIES=compute,utility
- STT_HOST=0.0.0.0
- STT_PORT=8766
- STT_HTTP_PORT=8767 # HTTP health check port
volumes:
- stt-models:/root/.cache/huggingface # Persistent model storage
ports:
- "8766:8766" # WebSocket port
- "8767:8767" # HTTP health check port
networks:
- miku-voice
deploy:
resources:
reservations:
devices:
- driver: nvidia
device_ids: ['0'] # GTX 1660
capabilities: [gpu]
restart: unless-stopped
# ========== Tools (on-demand) ==========
2025-12-07 17:15:09 +02:00
anime-face-detector:
build: ./face-detector
container_name: anime-face-detector
runtime: nvidia
deploy:
resources:
reservations:
devices:
- capabilities: [gpu]
volumes:
- ./face-detector/api:/app/api
- ./face-detector/images:/app/images
ports:
- "7860:7860" # Gradio UI
- "6078:6078" # FastAPI API
environment:
- NVIDIA_VISIBLE_DEVICES=all
- NVIDIA_DRIVER_CAPABILITIES=compute,utility
restart: "no" # Don't auto-restart - only run on-demand
profiles:
- tools # Don't start by default
networks:
miku-voice:
external: true
name: miku-voice-network
proxy:
name: proxy
external: true
volumes:
stt-models:
name: miku-stt-models