diff --git a/bot/api.py b/bot/api.py
index c2b5bda..ff697c3 100644
--- a/bot/api.py
+++ b/bot/api.py
@@ -2772,6 +2772,134 @@ def set_voice_debug_mode(enabled: bool = Form(...)):
}
+# ========== Cheshire Cat Memory Management (Phase 3) ==========
+
+class MemoryDeleteRequest(BaseModel):
+ confirmation: str
+
+@app.get("/memory/status")
+async def get_cat_memory_status():
+ """Get Cheshire Cat connection status and feature flag."""
+ from utils.cat_client import cat_adapter
+ is_healthy = await cat_adapter.health_check()
+ return {
+ "enabled": globals.USE_CHESHIRE_CAT,
+ "healthy": is_healthy,
+ "url": globals.CHESHIRE_CAT_URL,
+ "circuit_breaker_active": cat_adapter._is_circuit_broken(),
+ "consecutive_failures": cat_adapter._consecutive_failures
+ }
+
+@app.post("/memory/toggle")
+async def toggle_cat_integration(enabled: bool = Form(...)):
+ """Toggle Cheshire Cat integration on/off."""
+ globals.USE_CHESHIRE_CAT = enabled
+ logger.info(f"๐ฑ Cheshire Cat integration {'ENABLED' if enabled else 'DISABLED'}")
+ return {
+ "success": True,
+ "enabled": globals.USE_CHESHIRE_CAT,
+ "message": f"Cheshire Cat {'enabled' if enabled else 'disabled'}"
+ }
+
+@app.get("/memory/stats")
+async def get_memory_stats():
+ """Get memory collection statistics from Cheshire Cat (point counts per collection)."""
+ from utils.cat_client import cat_adapter
+ stats = await cat_adapter.get_memory_stats()
+ if stats is None:
+ return {"success": False, "error": "Could not reach Cheshire Cat"}
+ return {"success": True, "collections": stats.get("collections", [])}
+
+@app.get("/memory/facts")
+async def get_memory_facts():
+ """Get all declarative memory facts (learned knowledge about users)."""
+ from utils.cat_client import cat_adapter
+ facts = await cat_adapter.get_all_facts()
+ return {"success": True, "facts": facts, "count": len(facts)}
+
+@app.get("/memory/episodic")
+async def get_episodic_memories():
+ """Get all episodic memories (conversation snippets)."""
+ from utils.cat_client import cat_adapter
+ result = await cat_adapter.get_memory_points(collection="episodic", limit=100)
+ if result is None:
+ return {"success": False, "error": "Could not reach Cheshire Cat"}
+
+ memories = []
+ for point in result.get("points", []):
+ payload = point.get("payload", {})
+ memories.append({
+ "id": point.get("id"),
+ "content": payload.get("page_content", ""),
+ "metadata": payload.get("metadata", {}),
+ })
+
+ return {"success": True, "memories": memories, "count": len(memories)}
+
+@app.post("/memory/consolidate")
+async def trigger_memory_consolidation():
+ """Manually trigger memory consolidation (sleep consolidation process)."""
+ from utils.cat_client import cat_adapter
+ logger.info("๐ Manual memory consolidation triggered via API")
+ result = await cat_adapter.trigger_consolidation()
+ if result is None:
+ return {"success": False, "error": "Consolidation failed or timed out"}
+ return {"success": True, "result": result}
+
+@app.post("/memory/delete")
+async def delete_all_memories(request: MemoryDeleteRequest):
+ """
+ Delete ALL of Miku's memories. Requires exact confirmation string.
+
+ The confirmation field must be exactly:
+ "Yes, I am deleting Miku's memories fully."
+
+ This is destructive and irreversible.
+ """
+ REQUIRED_CONFIRMATION = "Yes, I am deleting Miku's memories fully."
+
+ if request.confirmation != REQUIRED_CONFIRMATION:
+ logger.warning(f"Memory deletion rejected: wrong confirmation string")
+ return {
+ "success": False,
+ "error": "Confirmation string does not match. "
+ f"Expected exactly: \"{REQUIRED_CONFIRMATION}\""
+ }
+
+ from utils.cat_client import cat_adapter
+ logger.warning("โ ๏ธ MEMORY DELETION CONFIRMED โ wiping all memories!")
+
+ # Wipe vector memories (episodic + declarative)
+ wipe_success = await cat_adapter.wipe_all_memories()
+
+ # Also clear conversation history
+ history_success = await cat_adapter.wipe_conversation_history()
+
+ if wipe_success:
+ logger.warning("๐๏ธ All Miku memories have been deleted.")
+ return {
+ "success": True,
+ "message": "All memories have been permanently deleted.",
+ "vector_memory_wiped": wipe_success,
+ "conversation_history_cleared": history_success
+ }
+ else:
+ return {
+ "success": False,
+ "error": "Failed to wipe memory collections. Check Cat connection."
+ }
+
+@app.delete("/memory/point/{collection}/{point_id}")
+async def delete_single_memory_point(collection: str, point_id: str):
+ """Delete a single memory point by collection and ID."""
+ from utils.cat_client import cat_adapter
+ success = await cat_adapter.delete_memory_point(collection, point_id)
+ if success:
+ return {"success": True, "deleted": point_id}
+ else:
+ return {"success": False, "error": f"Failed to delete point {point_id}"}
+
+
def start_api():
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=3939)
diff --git a/bot/bot.py b/bot/bot.py
index e853e61..ad2b29b 100644
--- a/bot/bot.py
+++ b/bot/bot.py
@@ -512,14 +512,34 @@ async def on_message(message):
guild_id = message.guild.id if message.guild else None
response_type = "dm_response" if is_dm else "server_response"
author_name = message.author.display_name
-
- response = await query_llama(
- enhanced_prompt,
- user_id=str(message.author.id),
- guild_id=guild_id,
- response_type=response_type,
- author_name=author_name
- )
+
+ # Phase 3: Try Cat pipeline first for embed responses too
+ response = None
+ if globals.USE_CHESHIRE_CAT:
+ try:
+ from utils.cat_client import cat_adapter
+ response = await cat_adapter.query(
+ text=enhanced_prompt,
+ user_id=str(message.author.id),
+ guild_id=str(guild_id) if guild_id else None,
+ author_name=author_name,
+ mood=globals.DM_MOOD,
+ response_type=response_type,
+ )
+ if response:
+ logger.info(f"๐ฑ Cat embed response for {author_name}")
+ except Exception as e:
+ logger.warning(f"๐ฑ Cat embed error, fallback: {e}")
+ response = None
+
+ if not response:
+ response = await query_llama(
+ enhanced_prompt,
+ user_id=str(message.author.id),
+ guild_id=guild_id,
+ response_type=response_type,
+ author_name=author_name
+ )
if is_dm:
logger.info(f"๐ DM embed response to {message.author.display_name} (using DM mood: {globals.DM_MOOD})")
@@ -570,13 +590,46 @@ async def on_message(message):
guild_id = message.guild.id if message.guild else None
response_type = "dm_response" if is_dm else "server_response"
author_name = message.author.display_name
- response = await query_llama(
- prompt,
- user_id=str(message.author.id),
- guild_id=guild_id,
- response_type=response_type,
- author_name=author_name
- )
+
+ # Phase 3: Try Cheshire Cat pipeline first (memory-augmented response)
+ # Falls back to query_llama if Cat is unavailable or disabled
+ response = None
+ if globals.USE_CHESHIRE_CAT:
+ try:
+ from utils.cat_client import cat_adapter
+ current_mood = globals.DM_MOOD
+ if guild_id:
+ try:
+ from server_manager import server_manager
+ sc = server_manager.get_server_config(guild_id)
+ if sc:
+ current_mood = sc.current_mood_name
+ except Exception:
+ pass
+
+ response = await cat_adapter.query(
+ text=prompt,
+ user_id=str(message.author.id),
+ guild_id=str(guild_id) if guild_id else None,
+ author_name=author_name,
+ mood=current_mood,
+ response_type=response_type,
+ )
+ if response:
+ logger.info(f"๐ฑ Cat response for {author_name} (mood: {current_mood})")
+ except Exception as e:
+ logger.warning(f"๐ฑ Cat pipeline error, falling back to query_llama: {e}")
+ response = None
+
+ # Fallback to direct LLM query if Cat didn't respond
+ if not response:
+ response = await query_llama(
+ prompt,
+ user_id=str(message.author.id),
+ guild_id=guild_id,
+ response_type=response_type,
+ author_name=author_name
+ )
if is_dm:
logger.info(f"๐ DM response to {message.author.display_name} (using DM mood: {globals.DM_MOOD})")
diff --git a/bot/globals.py b/bot/globals.py
index 64036bf..2d1cb0e 100644
--- a/bot/globals.py
+++ b/bot/globals.py
@@ -29,6 +29,12 @@ EVIL_TEXT_MODEL = os.getenv("EVIL_TEXT_MODEL", "darkidol") # Uncensored model f
JAPANESE_TEXT_MODEL = os.getenv("JAPANESE_TEXT_MODEL", "swallow") # Llama 3.1 Swallow model for Japanese
OWNER_USER_ID = int(os.getenv("OWNER_USER_ID", "209381657369772032")) # Bot owner's Discord user ID for reports
+# Cheshire Cat AI integration (Phase 3)
+CHESHIRE_CAT_URL = os.getenv("CHESHIRE_CAT_URL", "http://cheshire-cat:80")
+USE_CHESHIRE_CAT = os.getenv("USE_CHESHIRE_CAT", "false").lower() == "true"
+CHESHIRE_CAT_API_KEY = os.getenv("CHESHIRE_CAT_API_KEY", "") # Empty = no auth
+CHESHIRE_CAT_TIMEOUT = int(os.getenv("CHESHIRE_CAT_TIMEOUT", "120")) # Seconds
+
# Language mode for Miku (english or japanese)
LANGUAGE_MODE = "english" # Can be "english" or "japanese"
diff --git a/bot/static/index.html b/bot/static/index.html
index 91fb852..f916edd 100644
--- a/bot/static/index.html
+++ b/bot/static/index.html
@@ -665,6 +665,7 @@
+
@@ -1547,6 +1548,142 @@
+
+
+
+
๐ง Cheshire Cat Memory Management
+
+ Manage Miku's long-term memories powered by the Cheshire Cat AI pipeline.
+ Memories are stored in Qdrant vector database and used to give Miku persistent knowledge about users.
+
+
+
+
+
+
+
๐ฑ Cheshire Cat Status
+ Checking...
+
+
+
+
+
+
+
+
+
+
+
+
โ
+
๐ Episodic Memories
+
Conversation snippets
+
+
+
โ
+
๐ Declarative Facts
+
Learned knowledge
+
+
+
โ
+
โ๏ธ Procedural
+
Tools & procedures
+
+
+
+
+
+
๐ Memory Consolidation
+
+ Trigger the sleep consolidation process: analyzes episodic memories, extracts important facts, and removes trivial entries.
+
+
+
+
+
+
+
+
+
+
+
+
๐ Declarative Facts
+
+
+
+
Click "Load Facts" to view stored knowledge
+
+
+
+
+
+
+
๐ Episodic Memories
+
+
+
+
Click "Load Memories" to view conversation snippets
+
+
+
+
+
+
โ ๏ธ Danger Zone โ Delete All Memories
+
+ This will permanently erase ALL of Miku's memories โ episodic conversations, learned facts, everything.
+ This action is irreversible. Miku will forget everything she has ever learned.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Type exactly: Yes, I am deleting Miku's memories fully.
+