Files
miku-discord/bot/utils/context_manager.py
koko210Serve 32c2a7b930 feat: Implement comprehensive non-hierarchical logging system
- Created new logging infrastructure with per-component filtering
- Added 6 log levels: DEBUG, INFO, API, WARNING, ERROR, CRITICAL
- Implemented non-hierarchical level control (any combination can be enabled)
- Migrated 917 print() statements across 31 files to structured logging
- Created web UI (system.html) for runtime configuration with dark theme
- Added global level controls to enable/disable levels across all components
- Added timestamp format control (off/time/date/datetime options)
- Implemented log rotation (10MB per file, 5 backups)
- Added API endpoints for dynamic log configuration
- Configured HTTP request logging with filtering via api.requests component
- Intercepted APScheduler logs with proper formatting
- Fixed persistence paths to use /app/memory for Docker volume compatibility
- Fixed checkbox display bug in web UI (enabled_levels now properly shown)
- Changed System Settings button to open in same tab instead of new window

Components: bot, api, api.requests, autonomous, persona, vision, llm,
conversation, mood, dm, scheduled, gpu, media, server, commands,
sentiment, core, apscheduler

All settings persist across container restarts via JSON config.
2026-01-10 20:46:19 +02:00

100 lines
3.2 KiB
Python

# utils/context_manager.py
"""
Structured context management for Miku's personality and knowledge.
Replaces the vector search system with organized, complete context.
Preserves original content files in their entirety.
"""
from utils.logger import get_logger
logger = get_logger('core')
def get_original_miku_lore() -> str:
"""Load the complete, unmodified miku_lore.txt file"""
try:
with open("miku_lore.txt", "r", encoding="utf-8") as f:
return f.read()
except Exception as e:
logger.error(f"Failed to load miku_lore.txt: {e}")
return "## MIKU LORE\n[File could not be loaded]"
def get_original_miku_prompt() -> str:
"""Load the complete, unmodified miku_prompt.txt file"""
try:
with open("miku_prompt.txt", "r", encoding="utf-8") as f:
return f.read()
except Exception as e:
logger.error(f"Failed to load miku_prompt.txt: {e}")
return "## MIKU PROMPT\n[File could not be loaded]"
def get_original_miku_lyrics() -> str:
"""Load the complete, unmodified miku_lyrics.txt file"""
try:
with open("miku_lyrics.txt", "r", encoding="utf-8") as f:
return f.read()
except Exception as e:
logger.error(f"Failed to load miku_lyrics.txt: {e}")
return "## MIKU LYRICS\n[File could not be loaded]"
def get_complete_context() -> str:
"""Returns all essential Miku context using original files in their entirety"""
return f"""## MIKU LORE (Complete Original)
{get_original_miku_lore()}
## MIKU PERSONALITY & GUIDELINES (Complete Original)
{get_original_miku_prompt()}
## MIKU SONG LYRICS (Complete Original)
{get_original_miku_lyrics()}"""
def get_context_for_response_type(response_type: str) -> str:
"""Returns appropriate context based on the type of response being generated"""
# Core context always includes the complete original files
core_context = f"""## MIKU LORE (Complete Original)
{get_original_miku_lore()}
## MIKU PERSONALITY & GUIDELINES (Complete Original)
{get_original_miku_prompt()}"""
if response_type == "autonomous_general":
# For general autonomous messages, include everything
return f"""{core_context}
## MIKU SONG LYRICS (Complete Original)
{get_original_miku_lyrics()}"""
elif response_type == "autonomous_tweet":
# For tweet responses, include lyrics for musical context
return f"""{core_context}
## MIKU SONG LYRICS (Complete Original)
{get_original_miku_lyrics()}"""
elif response_type == "dm_response" or response_type == "server_response":
# For conversational responses, include everything
return f"""{core_context}
## MIKU SONG LYRICS (Complete Original)
{get_original_miku_lyrics()}"""
elif response_type == "conversation_join":
# For joining conversations, include everything
return f"""{core_context}
## MIKU SONG LYRICS (Complete Original)
{get_original_miku_lyrics()}"""
elif response_type == "emoji_selection":
# For emoji reactions, no context needed - the prompt has everything
return ""
else:
# Default: comprehensive context
return get_complete_context()