- Created new logging infrastructure with per-component filtering - Added 6 log levels: DEBUG, INFO, API, WARNING, ERROR, CRITICAL - Implemented non-hierarchical level control (any combination can be enabled) - Migrated 917 print() statements across 31 files to structured logging - Created web UI (system.html) for runtime configuration with dark theme - Added global level controls to enable/disable levels across all components - Added timestamp format control (off/time/date/datetime options) - Implemented log rotation (10MB per file, 5 backups) - Added API endpoints for dynamic log configuration - Configured HTTP request logging with filtering via api.requests component - Intercepted APScheduler logs with proper formatting - Fixed persistence paths to use /app/memory for Docker volume compatibility - Fixed checkbox display bug in web UI (enabled_levels now properly shown) - Changed System Settings button to open in same tab instead of new window Components: bot, api, api.requests, autonomous, persona, vision, llm, conversation, mood, dm, scheduled, gpu, media, server, commands, sentiment, core, apscheduler All settings persist across container restarts via JSON config.
47 lines
1.7 KiB
Python
47 lines
1.7 KiB
Python
from utils.llm import query_llama
|
|
from utils.logger import get_logger
|
|
|
|
logger = get_logger('sentiment')
|
|
|
|
async def analyze_sentiment(messages: list) -> tuple[str, float]:
|
|
"""
|
|
Analyze the sentiment of a conversation using llama.cpp
|
|
Returns a tuple of (sentiment description, positivity score from 0-1)
|
|
"""
|
|
# Combine the last few messages for context (up to 5)
|
|
messages_to_analyze = messages[-5:] if len(messages) > 5 else messages
|
|
conversation_text = "\n".join([
|
|
f"{'Bot' if msg['is_bot_message'] else 'User'}: {msg['content']}"
|
|
for msg in messages_to_analyze
|
|
])
|
|
|
|
prompt = f"""Analyze the sentiment and tone of this conversation snippet between a user and a bot.
|
|
Focus on the overall mood, engagement level, and whether the interaction seems positive/neutral/negative.
|
|
Give a brief 1-2 sentence summary and a positivity score from 0-1 where:
|
|
0.0-0.3 = Negative/Hostile
|
|
0.3-0.7 = Neutral/Mixed
|
|
0.7-1.0 = Positive/Friendly
|
|
|
|
Conversation:
|
|
{conversation_text}
|
|
|
|
Format your response exactly like this example:
|
|
Summary: The conversation is friendly and engaging with good back-and-forth.
|
|
Score: 0.85
|
|
|
|
Response:"""
|
|
|
|
try:
|
|
response = await query_llama(prompt)
|
|
if not response or 'Score:' not in response:
|
|
return "Could not analyze sentiment", 0.5
|
|
|
|
# Parse the response
|
|
lines = response.strip().split('\n')
|
|
summary = lines[0].replace('Summary:', '').strip()
|
|
score = float(lines[1].replace('Score:', '').strip())
|
|
|
|
return summary, score
|
|
except Exception as e:
|
|
logger.error(f"Error in sentiment analysis: {e}")
|
|
return "Error analyzing sentiment", 0.5 |