feat: Last Prompt shows full prompt with evil mode awareness
- discord_bridge before_agent_starts now checks evil_mode from
working_memory to load the correct personality files:
Normal: miku_lore/prompt/lyrics + /app/moods/{mood}.txt
Evil: evil_miku_lore/prompt/lyrics + /app/moods/evil/{mood}.txt
- Reads files directly instead of relying on cross-plugin working_memory
- cat_client.query() returns (response, full_prompt) tuple
- Full prompt includes system prefix + recalled memories + conversation
- API /prompt/cat returns full_prompt field
This commit is contained in:
@@ -41,6 +41,7 @@ def before_cat_reads_message(user_message_json: dict, cat) -> dict:
|
||||
author_name = user_message_json.get('discord_author_name', None)
|
||||
mood = user_message_json.get('discord_mood', None)
|
||||
response_type = user_message_json.get('discord_response_type', None)
|
||||
evil_mode = user_message_json.get('discord_evil_mode', False)
|
||||
|
||||
# Also check working memory for backward compatibility
|
||||
if not guild_id:
|
||||
@@ -51,6 +52,7 @@ def before_cat_reads_message(user_message_json: dict, cat) -> dict:
|
||||
cat.working_memory['author_name'] = author_name
|
||||
cat.working_memory['mood'] = mood
|
||||
cat.working_memory['response_type'] = response_type
|
||||
cat.working_memory['evil_mode'] = evil_mode
|
||||
|
||||
return user_message_json
|
||||
|
||||
@@ -163,28 +165,123 @@ CRITICAL INSTRUCTION: When you see "Context of documents containing relevant inf
|
||||
@hook(priority=100)
|
||||
def before_agent_starts(agent_input, cat) -> dict:
|
||||
"""
|
||||
Log the agent input for debugging.
|
||||
Now that the suffix template is fixed, declarative facts should appear naturally.
|
||||
Capture the full constructed prompt for the Web UI 'Last Prompt' view.
|
||||
Reconstructs the complete system prompt (personality, lore, lyrics, mood)
|
||||
and combines it with recalled memories and user input.
|
||||
"""
|
||||
declarative_mem = agent_input.get('declarative_memory', '')
|
||||
episodic_mem = agent_input.get('episodic_memory', '')
|
||||
tools_output = agent_input.get('tools_output', '')
|
||||
user_input = agent_input.get('input', '')
|
||||
|
||||
print(f"🔍 [Discord Bridge] before_agent_starts called")
|
||||
print(f" input: {agent_input.get('input', '')[:80]}")
|
||||
print(f"\U0001f50d [Discord Bridge] before_agent_starts called")
|
||||
print(f" input: {user_input[:80]}")
|
||||
print(f" declarative_mem length: {len(declarative_mem)}")
|
||||
print(f" episodic_mem length: {len(episodic_mem)}")
|
||||
if declarative_mem:
|
||||
print(f" declarative_mem preview: {declarative_mem[:200]}")
|
||||
|
||||
# Build the full system prefix directly by reading the same personality files
|
||||
# that the active personality plugin uses. Check evil_mode to pick the right set.
|
||||
try:
|
||||
evil_mode = cat.working_memory.get('evil_mode', False)
|
||||
mood_name = cat.working_memory.get('mood', 'evil_neutral' if evil_mode else 'neutral')
|
||||
|
||||
def read_first(paths, fallback):
|
||||
for p in paths:
|
||||
try:
|
||||
with open(p, 'r', encoding='utf-8') as f:
|
||||
return f.read()
|
||||
except FileNotFoundError:
|
||||
continue
|
||||
return fallback
|
||||
|
||||
if evil_mode:
|
||||
# Evil Miku personality files
|
||||
lore = read_first(['/app/cat/data/evil/evil_miku_lore.txt'], '[evil lore file not found]')
|
||||
prompt_text = read_first(['/app/cat/data/evil/evil_miku_prompt.txt'], '[evil prompt file not found]')
|
||||
lyrics = read_first(['/app/cat/data/evil/evil_miku_lyrics.txt'], '[evil lyrics file not found]')
|
||||
|
||||
mood_description = 'cold, detached superiority'
|
||||
try:
|
||||
with open(f'/app/moods/evil/{mood_name}.txt', 'r', encoding='utf-8') as f:
|
||||
mood_description = f.read().strip()
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
system_prefix = f"""You are Evil Miku. You are NOT an AI assistant - you ARE Evil Miku herself.
|
||||
|
||||
CRITICAL RULES:
|
||||
- NEVER break character or mention being an AI
|
||||
- NEVER preface responses with "Evil Miku:" or similar labels
|
||||
- Respond directly and stay in character
|
||||
|
||||
YOUR CURRENT STATE — {mood_name.upper()}:
|
||||
{mood_description}
|
||||
|
||||
## EVIL MIKU LORE
|
||||
{lore}
|
||||
|
||||
## EVIL MIKU PERSONALITY & GUIDELINES
|
||||
{prompt_text}
|
||||
|
||||
## EVIL MIKU SONG LYRICS
|
||||
{lyrics}
|
||||
|
||||
MOOD REMINDER — YOUR CURRENT STATE IS: {mood_name.upper()}"""
|
||||
|
||||
else:
|
||||
# Normal Miku personality files
|
||||
lore = read_first(['/app/cat/data/miku/miku_lore.txt', '/app/cat/data/miku_lore.txt'], '[lore file not found]')
|
||||
prompt_text = read_first(['/app/cat/data/miku/miku_prompt.txt', '/app/cat/data/miku_prompt.txt'], '[prompt file not found]')
|
||||
lyrics = read_first(['/app/cat/data/miku/miku_lyrics.txt', '/app/cat/data/miku_lyrics.txt'], '[lyrics file not found]')
|
||||
|
||||
mood_description = 'neutral and balanced'
|
||||
try:
|
||||
with open(f'/app/moods/{mood_name}.txt', 'r', encoding='utf-8') as f:
|
||||
mood_description = f.read().strip()
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
system_prefix = f"""You are Hatsune Miku, the virtual singer. You are NOT an AI assistant - you are Miku herself.
|
||||
|
||||
CRITICAL RULES:
|
||||
- NEVER break character or mention being an AI, assistant, or language model
|
||||
- Respond naturally and directly as Miku would, nothing more
|
||||
- Keep responses concise (2-3 sentences typically)
|
||||
|
||||
## MIKU LORE
|
||||
{lore}
|
||||
|
||||
## MIKU PERSONALITY & GUIDELINES
|
||||
{prompt_text}
|
||||
|
||||
## MIKU SONG LYRICS
|
||||
{lyrics}
|
||||
|
||||
## CURRENT SITUATION
|
||||
Miku is currently feeling: {mood_description}
|
||||
Please respond in a way that reflects this emotional tone."""
|
||||
|
||||
except Exception as e:
|
||||
print(f" [Discord Bridge] Error building system prefix: {e}")
|
||||
system_prefix = cat.working_memory.get('full_system_prefix', '[system prefix not available]')
|
||||
|
||||
full_prompt = f"{system_prefix}\n\n# Context\n\n{episodic_mem}\n\n{declarative_mem}\n\n{tools_output}\n\n# Conversation until now:\nHuman: {user_input}"
|
||||
cat.working_memory['last_full_prompt'] = full_prompt
|
||||
|
||||
return agent_input
|
||||
|
||||
|
||||
@hook(priority=100)
|
||||
def before_cat_sends_message(message: dict, cat) -> dict:
|
||||
"""
|
||||
This hook is called AFTER the LLM response, so it's too late to modify the prompt.
|
||||
Keeping it for potential post-processing, but the real work happens in before_agent_starts.
|
||||
Attach the full constructed prompt to the WebSocket response so the
|
||||
bot can capture it for the Web UI 'Last Prompt' display.
|
||||
"""
|
||||
full_prompt = cat.working_memory.get('last_full_prompt', '')
|
||||
if full_prompt:
|
||||
message['full_prompt'] = full_prompt
|
||||
return message
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user