116 lines
4.0 KiB
Python
116 lines
4.0 KiB
Python
# globals.py
|
|
import os
|
|
from collections import defaultdict, deque
|
|
import discord
|
|
from apscheduler.schedulers.asyncio import AsyncIOScheduler
|
|
|
|
scheduler = AsyncIOScheduler()
|
|
|
|
GUILD_SETTINGS = {}
|
|
|
|
# Stores last 5 exchanges per user (as deque)
|
|
conversation_history = defaultdict(lambda: deque(maxlen=5))
|
|
|
|
DISCORD_BOT_TOKEN = os.getenv("DISCORD_BOT_TOKEN")
|
|
|
|
# Autonomous V2 Debug Mode (set to True to see detailed decision logging)
|
|
AUTONOMOUS_DEBUG = os.getenv("AUTONOMOUS_DEBUG", "false").lower() == "true"
|
|
|
|
# Voice Chat Debug Mode (set to True for manual commands and text notifications)
|
|
# When False (field deployment), voice chat operates silently without command notifications
|
|
VOICE_DEBUG_MODE = os.getenv("VOICE_DEBUG_MODE", "false").lower() == "true"
|
|
|
|
# Llama.cpp server settings (via llama-swap)
|
|
LLAMA_URL = os.getenv("LLAMA_URL", "http://llama-swap:8080")
|
|
LLAMA_AMD_URL = os.getenv("LLAMA_AMD_URL", "http://llama-swap-amd:8080") # Secondary AMD GPU
|
|
TEXT_MODEL = os.getenv("TEXT_MODEL", "llama3.1")
|
|
VISION_MODEL = os.getenv("VISION_MODEL", "vision")
|
|
EVIL_TEXT_MODEL = os.getenv("EVIL_TEXT_MODEL", "darkidol") # Uncensored model for evil mode
|
|
JAPANESE_TEXT_MODEL = os.getenv("JAPANESE_TEXT_MODEL", "swallow") # Llama 3.1 Swallow model for Japanese
|
|
OWNER_USER_ID = int(os.getenv("OWNER_USER_ID", "209381657369772032")) # Bot owner's Discord user ID for reports
|
|
|
|
# Language mode for Miku (english or japanese)
|
|
LANGUAGE_MODE = "english" # Can be "english" or "japanese"
|
|
|
|
# Fish.audio TTS settings
|
|
FISH_API_KEY = os.getenv("FISH_API_KEY", "478d263d8c094e0c8993aae3e9cf9159")
|
|
MIKU_VOICE_ID = os.getenv("MIKU_VOICE_ID", "b28b79555e8c4904ac4d048c36e716b7")
|
|
|
|
# Set up Discord client
|
|
intents = discord.Intents.default()
|
|
intents.message_content = True
|
|
intents.members = True
|
|
intents.presences = True
|
|
client = discord.Client(intents=intents)
|
|
|
|
# Note: llama-swap handles model loading/unloading automatically
|
|
# No need to track current_model anymore
|
|
|
|
KINDNESS_KEYWORDS = [
|
|
"thank you", "love you", "luv u", "you're the best", "so cute",
|
|
"adorable", "amazing", "sweet", "kind", "great job", "well done",
|
|
"precious", "good girl", "cutie", "angel", "my favorite", "so helpful"
|
|
]
|
|
HEART_REACTIONS = ["💙", "💝", "💖", "💕", "💜", "❤️🔥", "☺️"]
|
|
kindness_reacted_messages = set()
|
|
|
|
# DM Mood System (simple, auto-rotating only)
|
|
DM_MOOD = "neutral"
|
|
DM_MOOD_DESCRIPTION = "I'm feeling neutral and balanced today."
|
|
AVAILABLE_MOODS = [
|
|
"bubbly", "sleepy", "curious", "shy", "serious", "excited", "silly",
|
|
"melancholy", "flirty", "romantic", "irritated", "angry", "neutral", "asleep"
|
|
]
|
|
|
|
# Evil Mode System
|
|
EVIL_MODE = False
|
|
EVIL_DM_MOOD = "evil_neutral"
|
|
EVIL_DM_MOOD_DESCRIPTION = "Evil Miku is calculating and cold."
|
|
EVIL_AVAILABLE_MOODS = ["aggressive", "cunning", "sarcastic", "evil_neutral"]
|
|
EVIL_MOOD_EMOJIS = {
|
|
"aggressive": "👿",
|
|
"cunning": "🐍",
|
|
"sarcastic": "😈",
|
|
"evil_neutral": ""
|
|
}
|
|
|
|
# Bipolar Mode System (both Mikus can argue via webhooks)
|
|
BIPOLAR_MODE = False
|
|
BIPOLAR_WEBHOOKS = {} # guild_id -> {"miku_webhook_url": str, "evil_webhook_url": str}
|
|
BIPOLAR_ARGUMENT_IN_PROGRESS = {} # channel_id -> {"active": bool, "exchange_count": int, "current_speaker": str}
|
|
|
|
# Regular Miku mood emojis (used in bipolar mode for webhook display names)
|
|
MOOD_EMOJIS = {
|
|
"bubbly": "✨",
|
|
"sleepy": "💤",
|
|
"curious": "🔍",
|
|
"shy": "🥺",
|
|
"serious": "😐",
|
|
"excited": "🎉",
|
|
"silly": "🤪",
|
|
"melancholy": "💙",
|
|
"flirty": "💕",
|
|
"romantic": "💖",
|
|
"irritated": "😤",
|
|
"angry": "😠",
|
|
"neutral": "",
|
|
"asleep": "😴"
|
|
}
|
|
|
|
BOT_USER = None
|
|
|
|
LAST_FULL_PROMPT = ""
|
|
|
|
# Persona Dialogue System (conversations between Miku and Evil Miku)
|
|
LAST_PERSONA_DIALOGUE_TIME = 0 # Timestamp of last dialogue for cooldown
|
|
|
|
# Voice Chat Session State
|
|
VOICE_SESSION_ACTIVE = False
|
|
TEXT_MESSAGE_QUEUE = [] # Queue for messages received during voice session
|
|
|
|
# Feature Blocking Flags (set during voice session)
|
|
VISION_MODEL_BLOCKED = False
|
|
IMAGE_GENERATION_BLOCKED = False
|
|
IMAGE_GENERATION_BLOCK_MESSAGE = None
|
|
|