diff --git a/bot/api.py b/bot/api.py index ff697c3..2a054b8 100644 --- a/bot/api.py +++ b/bot/api.py @@ -2777,6 +2777,17 @@ def set_voice_debug_mode(enabled: bool = Form(...)): class MemoryDeleteRequest(BaseModel): confirmation: str +class MemoryEditRequest(BaseModel): + content: str + metadata: Optional[dict] = None + +class MemoryCreateRequest(BaseModel): + content: str + collection: str # 'declarative' or 'episodic' + user_id: Optional[str] = None + source: Optional[str] = None + metadata: Optional[dict] = None + @app.get("/memory/status") async def get_cat_memory_status(): """Get Cheshire Cat connection status and feature flag.""" @@ -2899,6 +2910,48 @@ async def delete_single_memory_point(collection: str, point_id: str): else: return {"success": False, "error": f"Failed to delete point {point_id}"} +@app.put("/memory/point/{collection}/{point_id}") +async def edit_memory_point(collection: str, point_id: str, request: MemoryEditRequest): + """Edit an existing memory point's content and/or metadata.""" + from utils.cat_client import cat_adapter + success = await cat_adapter.update_memory_point( + collection=collection, + point_id=point_id, + content=request.content, + metadata=request.metadata + ) + if success: + return {"success": True, "updated": point_id} + else: + return {"success": False, "error": f"Failed to update point {point_id}"} + +@app.post("/memory/create") +async def create_memory_point(request: MemoryCreateRequest): + """ + Manually create a new memory (declarative fact or episodic memory). + + For declarative facts, this allows you to teach Miku new knowledge. + For episodic memories, this allows you to inject conversation context. + """ + from utils.cat_client import cat_adapter + + if request.collection not in ['declarative', 'episodic']: + return {"success": False, "error": "Collection must be 'declarative' or 'episodic'"} + + # Create the memory point + result = await cat_adapter.create_memory_point( + collection=request.collection, + content=request.content, + user_id=request.user_id or "manual_admin", + source=request.source or "manual_web_ui", + metadata=request.metadata or {} + ) + + if result: + return {"success": True, "point_id": result, "collection": request.collection} + else: + return {"success": False, "error": "Failed to create memory point"} + def start_api(): import uvicorn diff --git a/bot/utils/cat_client.py b/bot/utils/cat_client.py index 3a75e0f..85eb488 100644 --- a/bot/utils/cat_client.py +++ b/bot/utils/cat_client.py @@ -381,6 +381,147 @@ class CatAdapter: logger.error(f"Error deleting memory point: {e}") return False + async def update_memory_point(self, collection: str, point_id: str, content: str, metadata: dict = None) -> bool: + """Update an existing memory point's content and/or metadata.""" + try: + # First, get the existing point to retrieve its vector + qdrant_host = self._base_url.replace("http://cheshire-cat:80", "http://cheshire-cat-vector-memory:6333") + + async with aiohttp.ClientSession() as session: + # Get existing point + async with session.post( + f"{qdrant_host}/collections/{collection}/points", + json={"ids": [point_id], "with_vector": True, "with_payload": True}, + timeout=aiohttp.ClientTimeout(total=15) + ) as response: + if response.status != 200: + logger.error(f"Failed to fetch point {point_id}: {response.status}") + return False + + data = await response.json() + points = data.get("result", []) + if not points: + logger.error(f"Point {point_id} not found") + return False + + existing_point = points[0] + existing_vector = existing_point.get("vector") + existing_payload = existing_point.get("payload", {}) + + # If content changed, we need to re-embed it + if content != existing_payload.get("page_content"): + # Call Cat's embedder to get new vector + embed_response = await session.post( + f"{self._base_url}/embedder", + json={"text": content}, + headers=self._get_headers(), + timeout=aiohttp.ClientTimeout(total=30) + ) + if embed_response.status == 200: + embed_data = await embed_response.json() + new_vector = embed_data.get("embedding") + else: + logger.warning(f"Failed to re-embed content, keeping old vector") + new_vector = existing_vector + else: + new_vector = existing_vector + + # Build updated payload + updated_payload = { + "page_content": content, + "metadata": metadata if metadata is not None else existing_payload.get("metadata", {}) + } + + # Update the point + async with session.put( + f"{qdrant_host}/collections/{collection}/points", + json={ + "points": [{ + "id": point_id, + "vector": new_vector, + "payload": updated_payload + }] + }, + timeout=aiohttp.ClientTimeout(total=15) + ) as update_response: + if update_response.status == 200: + logger.info(f"✏️ Updated memory point {point_id} in {collection}") + return True + else: + logger.error(f"Failed to update point: {update_response.status}") + return False + + except Exception as e: + logger.error(f"Error updating memory point: {e}") + return False + + async def create_memory_point(self, collection: str, content: str, user_id: str, source: str, metadata: dict = None) -> Optional[str]: + """Create a new memory point manually.""" + try: + import uuid + import time + + # Generate a unique ID + point_id = str(uuid.uuid4()) + + # Get vector embedding from Cat + async with aiohttp.ClientSession() as session: + async with session.post( + f"{self._base_url}/embedder", + json={"text": content}, + headers=self._get_headers(), + timeout=aiohttp.ClientTimeout(total=30) + ) as response: + if response.status != 200: + logger.error(f"Failed to embed content: {response.status}") + return None + + data = await response.json() + vector = data.get("embedding") + if not vector: + logger.error("No embedding returned from Cat") + return None + + # Build payload + payload = { + "page_content": content, + "metadata": metadata or {} + } + payload["metadata"]["source"] = source + payload["metadata"]["when"] = time.time() + + # For declarative memories, add user_id to metadata + # For episodic, it's in the source field + if collection == "declarative": + payload["metadata"]["user_id"] = user_id + elif collection == "episodic": + payload["metadata"]["source"] = user_id + + # Insert into Qdrant + qdrant_host = self._base_url.replace("http://cheshire-cat:80", "http://cheshire-cat-vector-memory:6333") + + async with session.put( + f"{qdrant_host}/collections/{collection}/points", + json={ + "points": [{ + "id": point_id, + "vector": vector, + "payload": payload + }] + }, + timeout=aiohttp.ClientTimeout(total=15) + ) as insert_response: + if insert_response.status == 200: + logger.info(f"✨ Created new {collection} memory point: {point_id}") + return point_id + else: + logger.error(f"Failed to insert point: {insert_response.status}") + return None + + except Exception as e: + logger.error(f"Error creating memory point: {e}") + return None + async def wipe_all_memories(self) -> bool: """ Delete ALL memory collections (episodic + declarative). diff --git a/cat-plugins/miku_personality/miku_personality.py b/cat-plugins/miku_personality/miku_personality.py index 8629e57..ebb1b8b 100644 --- a/cat-plugins/miku_personality/miku_personality.py +++ b/cat-plugins/miku_personality/miku_personality.py @@ -5,6 +5,7 @@ Includes: Anti-AI preamble + Lore + Personality + Lyrics + MOOD """ from cat.mad_hatter.decorators import hook +from cat.log import log @hook(priority=100) @@ -25,9 +26,23 @@ def agent_prompt_prefix(prefix, cat): prompt = "You are Hatsune Miku. Be cheerful and friendly." lyrics = "## SONGS\nWorld is Mine, Senbonzakura, etc." - # Default neutral mood (matches production bot's DM_MOOD_DESCRIPTION) + # Get mood from working memory (set by discord_bridge) + mood_name = cat.working_memory.get('mood', 'neutral') + log.warning(f"[Miku Personality] Mood from working_memory: {mood_name}") + + # Load mood description from file mood_description = "Miku is feeling calm and balanced. Her tone is polite, friendly, and natural. She responds clearly and thoughtfully without strong emotional bias. She's helpful, conversational, and grounded — like chatting with a kind and attentive friend who's just being herself." + try: + # Construct path to mood file + mood_file_path = f'/app/moods/{mood_name}.txt' + with open(mood_file_path, 'r', encoding='utf-8') as f: + mood_description = f.read().strip() + log.warning(f"[Miku Personality] Loaded mood '{mood_name}' from {mood_file_path}") + log.warning(f"[Miku Personality] Mood description: {mood_description[:100]}...") + except FileNotFoundError: + log.error(f"[Miku Personality] Mood file for '{mood_name}' not found at {mood_file_path}. Using default neutral mood.") + # Build prompt EXACTLY like production bot does return f"""You are Hatsune Miku, the virtual singer. You are NOT an AI assistant - you are Miku herself. diff --git a/docker-compose.yml b/docker-compose.yml index 212ed16..66fa3bb 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -79,6 +79,7 @@ services: - ./cheshire-cat/cat/static:/app/cat/static - ./cat-plugins:/app/cat/plugins # Shared plugins directory - ./cheshire-cat/cat/data:/app/cat/data # Personality data (lore, prompts) + - ./bot/moods:/app/moods # Mood description files - ./cheshire-cat/cat/log.py:/app/cat/log.py # Patched: fix loguru KeyError for third-party libs restart: unless-stopped healthcheck: