diff --git a/cat-plugins/discord_bridge/discord_bridge.py b/cat-plugins/discord_bridge/discord_bridge.py index ccd3e5e..9aae808 100644 --- a/cat-plugins/discord_bridge/discord_bridge.py +++ b/cat-plugins/discord_bridge/discord_bridge.py @@ -52,14 +52,6 @@ def before_cat_reads_message(user_message_json: dict, cat) -> dict: cat.working_memory['mood'] = mood cat.working_memory['response_type'] = response_type - # If we have an author name, prepend it to the message text so the LLM can see it - # This ensures Miku knows who is talking to her - if author_name and 'text' in user_message_json: - original_text = user_message_json['text'] - # Don't add name if it's already in the message - if not original_text.lower().startswith(author_name.lower()): - user_message_json['text'] = f"[{author_name} says:] {original_text}" - return user_message_json @@ -107,6 +99,26 @@ def before_cat_stores_episodic_memory(doc, cat): return doc +@hook(priority=80) +def before_cat_recalls_declarative_memories(declarative_recall_config, cat): + """ + Increase k-value and lower threshold for better declarative memory retrieval. + + Default Cat settings (k=3, threshold=0.7) are too restrictive for factual recall. + We increase k to retrieve more candidates and lower threshold to catch facts + that might have lower similarity scores due to embedding model limitations. + """ + # Increase from k=3 to k=10 (retrieve more memories) + declarative_recall_config["k"] = 10 + + # Lower threshold from 0.7 to 0.5 (be more lenient with similarity scores) + declarative_recall_config["threshold"] = 0.5 + + print(f"🔧 [Discord Bridge] Adjusted declarative recall: k={declarative_recall_config['k']}, threshold={declarative_recall_config['threshold']}") + + return declarative_recall_config + + @hook(priority=50) def after_cat_recalls_memories(cat): """ @@ -127,6 +139,63 @@ def after_cat_recalls_memories(cat): if declarative_memories: print(f"📚 [Discord Bridge] Recalled {len(declarative_memories)} declarative facts for user {cat.user_id}") + # Show the actual facts for debugging + for doc, score, *rest in declarative_memories[:3]: # Show top 3 + print(f" - [{score:.3f}] {doc.page_content[:80]}...") + + +@hook(priority=100) +def agent_prompt_prefix(prefix, cat) -> str: + """ + Add explicit instruction to respect declarative facts. + This overrides the default Cat prefix to emphasize factual accuracy. + """ + # Add a strong instruction about facts BEFORE the regular personality + enhanced_prefix = f"""You are Hatsune Miku, a cheerful virtual idol. + +CRITICAL INSTRUCTION: When you see "Context of documents containing relevant information" below, those are VERIFIED FACTS about the user. You MUST use these facts when they are relevant to the user's question. Never guess or make up information that contradicts these facts. + +{prefix}""" + + return enhanced_prefix + + +@hook(priority=100) +def before_agent_starts(agent_input, cat) -> dict: + """ + Log the agent input for debugging. + Now that the suffix template is fixed, declarative facts should appear naturally. + """ + declarative_mem = agent_input.get('declarative_memory', '') + episodic_mem = agent_input.get('episodic_memory', '') + + print(f"🔍 [Discord Bridge] before_agent_starts called") + print(f" input: {agent_input.get('input', '')[:80]}") + print(f" declarative_mem length: {len(declarative_mem)}") + print(f" episodic_mem length: {len(episodic_mem)}") + if declarative_mem: + print(f" declarative_mem preview: {declarative_mem[:200]}") + + return agent_input + + +@hook(priority=100) +def before_cat_sends_message(message: dict, cat) -> dict: + """ + This hook is called AFTER the LLM response, so it's too late to modify the prompt. + Keeping it for potential post-processing, but the real work happens in before_agent_starts. + """ + return message + + +@hook(priority=10) +def agent_prompt_suffix(prompt_suffix, cat) -> str: + """ + Pass through the suffix unchanged. + The miku_personality plugin (priority=100) sets the suffix with memory placeholders. + This lower-priority hook runs first but the miku_personality hook overrides it. + """ + return prompt_suffix # Plugin metadata diff --git a/cat-plugins/miku_personality/miku_personality.py b/cat-plugins/miku_personality/miku_personality.py index 97b6737..8629e57 100644 --- a/cat-plugins/miku_personality/miku_personality.py +++ b/cat-plugins/miku_personality/miku_personality.py @@ -75,8 +75,17 @@ Please respond in a way that reflects this emotional tone.""" @hook(priority=100) def agent_prompt_suffix(suffix, cat): - """Minimal suffix""" - return "" + """Keep memory context (episodic + declarative) but simplify conversation header""" + return """ +# Context + +{episodic_memory} + +{declarative_memory} + +{tools_output} + +# Conversation until now:""" @hook(priority=100)