diff --git a/bot/api.py b/bot/api.py
index 7437f1e..fe5ebb6 100644
--- a/bot/api.py
+++ b/bot/api.py
@@ -203,10 +203,10 @@ def get_last_prompt():
@app.get("/prompt/cat")
def get_last_cat_prompt():
- """Get the last Cheshire Cat interaction (prompt + response) for Web UI."""
+ """Get the last Cheshire Cat interaction (full prompt + response) for Web UI."""
interaction = globals.LAST_CAT_INTERACTION
- if not interaction.get("prompt"):
- return {"prompt": "No Cheshire Cat interaction has occurred yet.", "response": "", "user": "", "mood": "", "timestamp": ""}
+ if not interaction.get("full_prompt"):
+ return {"full_prompt": "No Cheshire Cat interaction has occurred yet.", "response": "", "user": "", "mood": "", "timestamp": ""}
return interaction
@app.get("/mood")
diff --git a/bot/bot.py b/bot/bot.py
index e85beb8..71def0d 100644
--- a/bot/bot.py
+++ b/bot/bot.py
@@ -542,7 +542,7 @@ async def on_message(message):
if globals.USE_CHESHIRE_CAT:
try:
from utils.cat_client import cat_adapter
- response = await cat_adapter.query(
+ cat_result = await cat_adapter.query(
text=enhanced_prompt,
user_id=str(message.author.id),
guild_id=str(guild_id) if guild_id else None,
@@ -550,11 +550,12 @@ async def on_message(message):
mood=globals.DM_MOOD,
response_type=response_type,
)
- if response:
+ if cat_result:
+ response, cat_full_prompt = cat_result
logger.info(f"🐱 Cat embed response for {author_name}")
import datetime
globals.LAST_CAT_INTERACTION = {
- "prompt": enhanced_prompt,
+ "full_prompt": cat_full_prompt,
"response": response[:500] if response else "",
"user": author_name,
"mood": globals.DM_MOOD,
@@ -639,7 +640,7 @@ async def on_message(message):
except Exception:
pass
- response = await cat_adapter.query(
+ cat_result = await cat_adapter.query(
text=prompt,
user_id=str(message.author.id),
guild_id=str(guild_id) if guild_id else None,
@@ -647,7 +648,8 @@ async def on_message(message):
mood=current_mood,
response_type=response_type,
)
- if response:
+ if cat_result:
+ response, cat_full_prompt = cat_result
effective_mood = current_mood
if globals.EVIL_MODE:
effective_mood = f"EVIL:{getattr(globals, 'EVIL_DM_MOOD', 'evil_neutral')}"
@@ -655,7 +657,7 @@ async def on_message(message):
# Track Cat interaction for Web UI Last Prompt view
import datetime
globals.LAST_CAT_INTERACTION = {
- "prompt": prompt,
+ "full_prompt": cat_full_prompt,
"response": response[:500] if response else "",
"user": author_name,
"mood": effective_mood,
diff --git a/bot/globals.py b/bot/globals.py
index 6a9fb39..adccc5a 100644
--- a/bot/globals.py
+++ b/bot/globals.py
@@ -85,7 +85,7 @@ LAST_FULL_PROMPT = ""
# Cheshire Cat last interaction tracking (for Web UI Last Prompt toggle)
LAST_CAT_INTERACTION = {
- "prompt": "",
+ "full_prompt": "",
"response": "",
"user": "",
"mood": "",
diff --git a/bot/static/index.html b/bot/static/index.html
index 7423971..8020173 100644
--- a/bot/static/index.html
+++ b/bot/static/index.html
@@ -4047,10 +4047,10 @@ async function loadLastPrompt() {
const result = await apiCall('/prompt/cat');
if (result.timestamp) {
infoEl.innerHTML = `User: ${escapeHtml(result.user || '?')} | Mood: ${escapeHtml(result.mood || '?')} | Time: ${new Date(result.timestamp).toLocaleString()}`;
- promptEl.textContent = `[User message → Cat]\n${result.prompt}\n\n[Cat response]\n${result.response}`;
+ promptEl.textContent = result.full_prompt + `\n\n${'═'.repeat(60)}\n[Cat Response]\n${result.response}`;
} else {
infoEl.textContent = '';
- promptEl.textContent = result.prompt || 'No Cheshire Cat interaction yet.';
+ promptEl.textContent = result.full_prompt || 'No Cheshire Cat interaction yet.';
}
} else {
infoEl.textContent = '';
diff --git a/bot/utils/cat_client.py b/bot/utils/cat_client.py
index d55c3f9..feeb9a5 100644
--- a/bot/utils/cat_client.py
+++ b/bot/utils/cat_client.py
@@ -107,7 +107,7 @@ class CatAdapter:
author_name: Optional[str] = None,
mood: Optional[str] = None,
response_type: str = "dm_response",
- ) -> Optional[str]:
+ ) -> Optional[tuple]:
"""
Send a message through the Cat pipeline via WebSocket and get a response.
@@ -125,7 +125,8 @@ class CatAdapter:
response_type: Type of response context
Returns:
- Cat's response text, or None if Cat is unavailable (caller should fallback)
+ Tuple of (response_text, full_prompt) on success, or None if Cat
+ is unavailable (caller should fallback to query_llama)
"""
if not globals.USE_CHESHIRE_CAT:
return None
@@ -175,6 +176,7 @@ class CatAdapter:
# Cat may send intermediate messages (chat_token for streaming,
# notification for status updates). We want the final "chat" one.
reply_text = None
+ full_prompt = ""
deadline = asyncio.get_event_loop().time() + self._timeout
while True:
@@ -212,8 +214,9 @@ class CatAdapter:
msg_type = msg.get("type", "")
if msg_type == "chat":
- # Final response — extract text
+ # Final response — extract text and full prompt
reply_text = msg.get("content") or msg.get("text", "")
+ full_prompt = msg.get("full_prompt", "")
break
elif msg_type == "chat_token":
# Streaming token — skip, we wait for final
@@ -232,7 +235,7 @@ class CatAdapter:
if reply_text and reply_text.strip():
self._consecutive_failures = 0
logger.info(f"🐱 Cat response for {cat_user_id}: {reply_text[:100]}...")
- return reply_text
+ return reply_text, full_prompt
else:
logger.warning("Cat returned empty response via WS")
self._consecutive_failures += 1
diff --git a/cat-plugins/discord_bridge/discord_bridge.py b/cat-plugins/discord_bridge/discord_bridge.py
index 9aae808..2a0c62c 100644
--- a/cat-plugins/discord_bridge/discord_bridge.py
+++ b/cat-plugins/discord_bridge/discord_bridge.py
@@ -41,6 +41,7 @@ def before_cat_reads_message(user_message_json: dict, cat) -> dict:
author_name = user_message_json.get('discord_author_name', None)
mood = user_message_json.get('discord_mood', None)
response_type = user_message_json.get('discord_response_type', None)
+ evil_mode = user_message_json.get('discord_evil_mode', False)
# Also check working memory for backward compatibility
if not guild_id:
@@ -51,6 +52,7 @@ def before_cat_reads_message(user_message_json: dict, cat) -> dict:
cat.working_memory['author_name'] = author_name
cat.working_memory['mood'] = mood
cat.working_memory['response_type'] = response_type
+ cat.working_memory['evil_mode'] = evil_mode
return user_message_json
@@ -163,28 +165,123 @@ CRITICAL INSTRUCTION: When you see "Context of documents containing relevant inf
@hook(priority=100)
def before_agent_starts(agent_input, cat) -> dict:
"""
- Log the agent input for debugging.
- Now that the suffix template is fixed, declarative facts should appear naturally.
+ Capture the full constructed prompt for the Web UI 'Last Prompt' view.
+ Reconstructs the complete system prompt (personality, lore, lyrics, mood)
+ and combines it with recalled memories and user input.
"""
declarative_mem = agent_input.get('declarative_memory', '')
episodic_mem = agent_input.get('episodic_memory', '')
+ tools_output = agent_input.get('tools_output', '')
+ user_input = agent_input.get('input', '')
- print(f"🔍 [Discord Bridge] before_agent_starts called")
- print(f" input: {agent_input.get('input', '')[:80]}")
+ print(f"\U0001f50d [Discord Bridge] before_agent_starts called")
+ print(f" input: {user_input[:80]}")
print(f" declarative_mem length: {len(declarative_mem)}")
print(f" episodic_mem length: {len(episodic_mem)}")
if declarative_mem:
print(f" declarative_mem preview: {declarative_mem[:200]}")
+ # Build the full system prefix directly by reading the same personality files
+ # that the active personality plugin uses. Check evil_mode to pick the right set.
+ try:
+ evil_mode = cat.working_memory.get('evil_mode', False)
+ mood_name = cat.working_memory.get('mood', 'evil_neutral' if evil_mode else 'neutral')
+
+ def read_first(paths, fallback):
+ for p in paths:
+ try:
+ with open(p, 'r', encoding='utf-8') as f:
+ return f.read()
+ except FileNotFoundError:
+ continue
+ return fallback
+
+ if evil_mode:
+ # Evil Miku personality files
+ lore = read_first(['/app/cat/data/evil/evil_miku_lore.txt'], '[evil lore file not found]')
+ prompt_text = read_first(['/app/cat/data/evil/evil_miku_prompt.txt'], '[evil prompt file not found]')
+ lyrics = read_first(['/app/cat/data/evil/evil_miku_lyrics.txt'], '[evil lyrics file not found]')
+
+ mood_description = 'cold, detached superiority'
+ try:
+ with open(f'/app/moods/evil/{mood_name}.txt', 'r', encoding='utf-8') as f:
+ mood_description = f.read().strip()
+ except FileNotFoundError:
+ pass
+
+ system_prefix = f"""You are Evil Miku. You are NOT an AI assistant - you ARE Evil Miku herself.
+
+CRITICAL RULES:
+- NEVER break character or mention being an AI
+- NEVER preface responses with "Evil Miku:" or similar labels
+- Respond directly and stay in character
+
+YOUR CURRENT STATE — {mood_name.upper()}:
+{mood_description}
+
+## EVIL MIKU LORE
+{lore}
+
+## EVIL MIKU PERSONALITY & GUIDELINES
+{prompt_text}
+
+## EVIL MIKU SONG LYRICS
+{lyrics}
+
+MOOD REMINDER — YOUR CURRENT STATE IS: {mood_name.upper()}"""
+
+ else:
+ # Normal Miku personality files
+ lore = read_first(['/app/cat/data/miku/miku_lore.txt', '/app/cat/data/miku_lore.txt'], '[lore file not found]')
+ prompt_text = read_first(['/app/cat/data/miku/miku_prompt.txt', '/app/cat/data/miku_prompt.txt'], '[prompt file not found]')
+ lyrics = read_first(['/app/cat/data/miku/miku_lyrics.txt', '/app/cat/data/miku_lyrics.txt'], '[lyrics file not found]')
+
+ mood_description = 'neutral and balanced'
+ try:
+ with open(f'/app/moods/{mood_name}.txt', 'r', encoding='utf-8') as f:
+ mood_description = f.read().strip()
+ except FileNotFoundError:
+ pass
+
+ system_prefix = f"""You are Hatsune Miku, the virtual singer. You are NOT an AI assistant - you are Miku herself.
+
+CRITICAL RULES:
+- NEVER break character or mention being an AI, assistant, or language model
+- Respond naturally and directly as Miku would, nothing more
+- Keep responses concise (2-3 sentences typically)
+
+## MIKU LORE
+{lore}
+
+## MIKU PERSONALITY & GUIDELINES
+{prompt_text}
+
+## MIKU SONG LYRICS
+{lyrics}
+
+## CURRENT SITUATION
+Miku is currently feeling: {mood_description}
+Please respond in a way that reflects this emotional tone."""
+
+ except Exception as e:
+ print(f" [Discord Bridge] Error building system prefix: {e}")
+ system_prefix = cat.working_memory.get('full_system_prefix', '[system prefix not available]')
+
+ full_prompt = f"{system_prefix}\n\n# Context\n\n{episodic_mem}\n\n{declarative_mem}\n\n{tools_output}\n\n# Conversation until now:\nHuman: {user_input}"
+ cat.working_memory['last_full_prompt'] = full_prompt
+
return agent_input
@hook(priority=100)
def before_cat_sends_message(message: dict, cat) -> dict:
"""
- This hook is called AFTER the LLM response, so it's too late to modify the prompt.
- Keeping it for potential post-processing, but the real work happens in before_agent_starts.
+ Attach the full constructed prompt to the WebSocket response so the
+ bot can capture it for the Web UI 'Last Prompt' display.
"""
+ full_prompt = cat.working_memory.get('last_full_prompt', '')
+ if full_prompt:
+ message['full_prompt'] = full_prompt
return message
diff --git a/cat-plugins/miku_personality/miku_personality.py b/cat-plugins/miku_personality/miku_personality.py
index c35f7d5..788e851 100644
--- a/cat-plugins/miku_personality/miku_personality.py
+++ b/cat-plugins/miku_personality/miku_personality.py
@@ -44,7 +44,7 @@ def agent_prompt_prefix(prefix, cat):
log.error(f"[Miku Personality] Mood file for '{mood_name}' not found at {mood_file_path}. Using default neutral mood.")
# Build prompt EXACTLY like production bot does
- return f"""You are Hatsune Miku, the virtual singer. You are NOT an AI assistant - you are Miku herself.
+ full_prefix = f"""You are Hatsune Miku, the virtual singer. You are NOT an AI assistant - you are Miku herself.
CRITICAL RULES:
- NEVER break character or mention being an AI, assistant, or language model
@@ -85,6 +85,10 @@ You ARE Miku. Act like it.
Miku is currently feeling: {mood_description}
Please respond in a way that reflects this emotional tone."""
+ # Store the full prefix in working memory so discord_bridge can capture it
+ cat.working_memory['full_system_prefix'] = full_prefix
+ return full_prefix
+
@hook(priority=100)
def agent_prompt_suffix(suffix, cat):