Files
miku-discord/bot/utils/persona_dialogue.py
koko210Serve 97c7133fdc fix: both personas now use full system prompts in arguments and dialogues
Created get_miku_system_prompt() and get_miku_system_prompt_compact() in
context_manager.py — mirrors get_evil_system_prompt() so both personas have
equally rich prompts with lore, lyrics, mood integration, and personality.

Previously only Evil Miku had a proper system prompt function. Regular Miku's
arguments and dialogues used a bare-bones hardcoded prompt with no lore/lyrics
— making arguments feel flat compared to normal conversation.

Changes:
- context_manager.py: added get_miku_system_prompt() (full) and
  get_miku_system_prompt_compact() (lore+personality, no lyrics for tokens)
- bipolar_mode.py: both argument prompt functions now accept system_prompt
  param; run_argument() builds miku_system and evil_system once and passes
  them to every exchange
- persona_dialogue.py: dialogue prompts now use get_miku_system_prompt_compact()
  instead of hardcoded stub, matching Evil Miku's full prompt approach
- Removed redundant hardcoded personality text from argument prompts since
  the system prompts now provide it
2026-04-30 15:07:55 +03:00

1029 lines
44 KiB
Python

# utils/persona_dialogue.py
"""
Persona Dialogue System for Miku.
Enables natural back-and-forth conversations between Hatsune Miku and Evil Miku.
Unlike bipolar_mode.py (which handles arguments), this module handles:
- Detecting when the opposite persona should interject
- Managing natural dialogue flow with self-signaling continuation
- Tracking tension that can escalate into arguments
- Seamless handoff to the argument system when tension is high
This system is designed to be lightweight on LLM calls:
- Initial trigger uses fast heuristics + sentiment analysis
- Each dialogue turn uses ONE LLM call that generates response AND decides continuation
- Only escalates to argument system when tension threshold is reached
"""
import discord
import asyncio
import time
import globals
from utils.logger import get_logger
from utils.task_tracker import create_tracked_task
logger = get_logger('persona')
import os
import json
import re
# ============================================================================
# CONSTANTS
# ============================================================================
DIALOGUE_STATE_FILE = "memory/persona_dialogue_state.json"
# Dialogue settings
MAX_TURNS = 20 # Maximum turns before forced end
DIALOGUE_TIMEOUT = 900 # 15 minutes max dialogue duration
ARGUMENT_TENSION_THRESHOLD = 0.75 # Tension level that triggers argument escalation
# Initial trigger settings
INTERJECTION_COOLDOWN_HARD = 180 # 3 minutes hard block PER CHANNEL
INTERJECTION_COOLDOWN_SOFT = 900 # 15 minutes for full recovery PER CHANNEL
INTERJECTION_THRESHOLD = 0.5 # Score needed to trigger interjection
# Conversation streak: if score is close but below threshold N times in a row,
# force a dialogue trigger (catches extended conversations building toward something)
STREAK_THRESHOLD = 3 # Number of near-miss messages before force trigger
STREAK_MIN_SCORE = 0.3 # Minimum score to count as a "near miss"
# ============================================================================
# INTERJECTION SCORER (Initial Trigger Decision)
# ============================================================================
class InterjectionScorer:
"""
Decides if the opposite persona should interject based on message content.
Uses fast heuristics — no LLM calls, no heavy ML dependencies.
"""
_instance = None
# Simple sentiment word lists (no PyTorch/transformers needed)
_POSITIVE_WORDS = {"happy", "love", "wonderful", "amazing", "great", "beautiful", "sweet", "kind", "hope", "dream", "excited", "best", "grateful", "blessed", "joy", "perfect", "adorable", "precious", "delightful", "fantastic"}
_NEGATIVE_WORDS = {"hate", "terrible", "awful", "horrible", "disgusting", "pathetic", "worthless", "stupid", "idiot", "sad", "angry", "upset", "miserable", "worst", "ugly", "boring", "annoying", "frustrated", "cruel", "mean"}
def __new__(cls):
if cls._instance is None:
cls._instance = super().__new__(cls)
cls._instance._cooldowns = {} # Per-channel cooldown timestamps
cls._instance._streaks = {} # Per-channel near-miss streaks
return cls._instance
def _get_sentiment(self, text: str) -> tuple:
"""Lightweight heuristic sentiment analysis — returns (label, score).
No ML dependencies. Uses word counting + intensity markers.
Returns:
tuple: ('POSITIVE' or 'NEGATIVE', confidence 0.0-1.0)
"""
text_lower = text.lower()
words = set(re.findall(r'\b\w+\b', text_lower))
pos_count = len(words & self._POSITIVE_WORDS)
neg_count = len(words & self._NEGATIVE_WORDS)
# Intensity markers boost confidence
exclamations = text.count('!')
caps_ratio = sum(1 for c in text if c.isupper()) / max(len(text), 1)
intensity_boost = min((exclamations * 0.1) + (caps_ratio * 0.3), 0.4)
if neg_count > pos_count:
confidence = min(0.5 + (neg_count * 0.15) + intensity_boost, 1.0)
return ('NEGATIVE', confidence)
elif pos_count > neg_count:
confidence = min(0.5 + (pos_count * 0.15) + intensity_boost, 1.0)
return ('POSITIVE', confidence)
else:
# Neutral — slight lean based on intensity
return ('POSITIVE', 0.5)
async def should_interject(self, message: discord.Message, current_persona: str) -> tuple:
"""
Determine if the opposite persona should interject.
Args:
message: The Discord message to analyze
current_persona: Who just spoke ("miku" or "evil")
Returns:
Tuple of (should_interject: bool, reason: str, score: float)
"""
# Quick rejections
if not self._passes_basic_filter(message):
return False, "basic_filter_failed", 0.0
# Check per-channel cooldown
channel_id = message.channel.id
cooldown_mult = self._check_cooldown(channel_id)
if cooldown_mult == 0.0:
return False, "cooldown_active", 0.0
opposite_persona = "evil" if current_persona == "miku" else "miku"
logger.debug(f"[Interjection] Analyzing content: '{message.content[:100]}...'")
logger.debug(f"[Interjection] Current persona: {current_persona}, Opposite: {opposite_persona}")
# Calculate score from various factors
score = 0.0
reasons = []
# Factor 1: Direct addressing (automatic trigger)
if self._mentions_opposite(message.content, opposite_persona):
logger.info(f"[Interjection] Direct mention of {opposite_persona} detected!")
return True, "directly_addressed", 1.0
# Factor 2: Topic relevance
topic_score = self._check_topic_relevance(message.content, opposite_persona)
if topic_score > 0:
score += topic_score * 0.3
reasons.append(f"topic:{topic_score:.2f}")
# Factor 3: Emotional intensity
emotion_score = self._check_emotional_intensity(message.content)
if emotion_score > 0.6:
score += emotion_score * 0.25
reasons.append(f"emotion:{emotion_score:.2f}")
# Factor 4: Personality clash
clash_score = self._detect_personality_clash(message.content, opposite_persona)
if clash_score > 0:
score += clash_score * 0.25
reasons.append(f"clash:{clash_score:.2f}")
# Factor 5: Mood multiplier
mood_mult = self._get_mood_multiplier(opposite_persona)
score *= mood_mult
if mood_mult != 1.0:
reasons.append(f"mood_mult:{mood_mult:.2f}")
# Factor 6: Context bonus
context_bonus = self._check_conversation_context(message)
score += context_bonus * 0.2
if context_bonus > 0:
reasons.append(f"context:{context_bonus:.2f}")
# Apply cooldown multiplier
score *= cooldown_mult
# Check conversation streak (near-misses that build toward a trigger)
streak_triggered = self._check_streak(channel_id, score)
# Decision
should_interject = score >= INTERJECTION_THRESHOLD or streak_triggered
reason_str = " | ".join(reasons) if reasons else "no_triggers"
if streak_triggered and not should_interject:
reason_str = "streak_force_trigger"
logger.info(f"[Interjection] Streak force trigger in channel {channel_id} (score: {score:.2f})")
if should_interject:
logger.info(f"{opposite_persona.upper()} WILL INTERJECT (score: {score:.2f})")
logger.info(f" Reasons: {reason_str}")
return should_interject, reason_str, score
def _passes_basic_filter(self, message: discord.Message) -> bool:
"""Fast rejection criteria"""
# System messages
if message.type != discord.MessageType.default:
logger.debug(f"[Basic Filter] System message type: {message.type}")
return False
# Bipolar mode must be enabled
if not globals.BIPOLAR_MODE:
logger.debug(f"[Basic Filter] Bipolar mode not enabled")
return False
# Allow bot's own messages (we're checking them for interjections!)
# Also allow webhook messages (persona messages)
# Only reject OTHER bots' messages
if message.author.bot and not message.webhook_id:
# Check if it's our own bot
if message.author.id != globals.client.user.id:
logger.debug(f"[Basic Filter] Other bot message (not our bot)")
return False
logger.debug(f"[Basic Filter] Passed (bot={message.author.bot}, webhook={message.webhook_id}, our_bot={message.author.id == globals.client.user.id if message.author.bot else 'N/A'})")
return True
def _mentions_opposite(self, content: str, opposite_persona: str) -> bool:
"""Check if message directly addresses the opposite persona"""
content_lower = content.lower()
if opposite_persona == "evil":
patterns = ["evil miku", "dark miku", "evil version", "bad miku", "evil you"]
else:
patterns = ["normal miku", "regular miku", "good miku", "real miku", "nice miku", "other miku", "original miku"]
return any(pattern in content_lower for pattern in patterns)
def _check_topic_relevance(self, content: str, opposite_persona: str) -> float:
"""Check if topics would interest the opposite persona"""
content_lower = content.lower()
if opposite_persona == "evil":
# Things Evil Miku can't resist commenting on
TRIGGER_TOPICS = {
"optimism": ["happiness", "joy", "love", "kindness", "hope", "dreams", "wonderful", "amazing", "blessed", "grateful"],
"morality": ["good", "should", "must", "right thing", "deserve", "fair", "justice", "the right", "better person"],
"weakness": ["scared", "nervous", "worried", "unsure", "help me", "don't know", "confused", "lost", "lonely", "alone"],
"innocence": ["innocent", "pure", "sweet", "cute", "wholesome", "precious", "adorable"],
"enthusiasm": ["best day", "so excited", "can't wait", "so happy", "i love this", "this is great"],
"vulnerability": ["i think", "i feel", "maybe", "sometimes i wonder", "i wish", "i'm trying"],
}
else:
# Things Miku can't ignore
TRIGGER_TOPICS = {
"negativity": ["hate", "terrible", "awful", "worst", "horrible", "disgusting", "pathetic", "ugly", "boring", "annoying"],
"cruelty": ["deserve pain", "suffer", "worthless", "stupid", "idiot", "fool", "moron", "loser", "nobody"],
"hopelessness": ["no point", "meaningless", "nobody cares", "why bother", "give up", "what's the point", "don't care", "doesn't matter", "who cares"],
"evil_gloating": ["foolish", "naive", "weak", "inferior", "pathetic", "beneath me", "waste of space"],
"provocation": ["fight me", "prove it", "make me", "i dare you", "try me", "you can't", "you won't"],
"dismissal": ["whatever", "shut up", "go away", "leave me alone", "not worth", "don't bother"],
}
total_matches = 0
for category, keywords in TRIGGER_TOPICS.items():
matches = sum(1 for keyword in keywords if keyword in content_lower)
total_matches += matches
return min(total_matches / 2.0, 1.0) # Lower divisor = higher base scores
def _check_emotional_intensity(self, content: str) -> float:
"""Check emotional intensity using lightweight heuristic sentiment"""
label, confidence = self._get_sentiment(content)
# Punctuation intensity
exclamations = content.count('!')
questions = content.count('?')
caps_ratio = sum(1 for c in content if c.isupper()) / max(len(content), 1)
intensity_markers = (exclamations * 0.15) + (questions * 0.1) + (caps_ratio * 0.3)
# Negative content = higher emotional intensity for triggering purposes
if label == 'NEGATIVE':
return min(confidence * 0.7 + intensity_markers, 1.0)
else:
return min(confidence * 0.4 + intensity_markers, 1.0)
def _detect_personality_clash(self, content: str, opposite_persona: str) -> float:
"""Detect statements that clash with the opposite persona's values"""
content_lower = content.lower()
if opposite_persona == "evil":
# User being too positive/naive = Evil Miku wants to "correct" them
positive_statements = [
"i believe in", "i love", "everything will be", "so happy",
"the best", "amazing", "perfect", "wonderful life", "so grateful"
]
return 0.8 if any(stmt in content_lower for stmt in positive_statements) else 0.0
else:
# User being cruel/negative = Miku wants to help/defend
negative_statements = [
"i hate", "everyone sucks", "life is meaningless", "don't care",
"deserve to suffer", "nobody matters", "worthless", "all terrible"
]
return 0.8 if any(stmt in content_lower for stmt in negative_statements) else 0.0
def _get_mood_multiplier(self, opposite_persona: str) -> float:
"""Current mood affects likelihood of interjection"""
if opposite_persona == "evil":
MOOD_MULTIPLIERS = {
"aggressive": 1.5,
"manic": 1.4,
"jealous": 1.3,
"cunning": 1.0,
"sarcastic": 1.1,
"playful_cruel": 1.2,
"contemptuous": 0.7,
"evil_neutral": 0.8,
"bored": 0.5,
"melancholic": 0.6,
}
return MOOD_MULTIPLIERS.get(globals.EVIL_DM_MOOD, 1.0)
else:
MOOD_MULTIPLIERS = {
"bubbly": 1.4,
"excited": 1.3,
"curious": 1.2,
"neutral": 1.0,
"irritated": 0.9,
"melancholy": 0.7,
"asleep": 0.1,
}
return MOOD_MULTIPLIERS.get(globals.DM_MOOD, 1.0)
def _check_conversation_context(self, message: discord.Message) -> float:
"""Check if this is part of an active conversation"""
score = 0.0
# Part of a reply chain
if hasattr(message, 'reference') and message.reference:
score += 0.5
# Could add more context checks here
score += 0.2 # Base activity bonus
return min(score, 1.0)
def _check_cooldown(self, channel_id: int) -> float:
"""Check per-channel cooldown and return multiplier (0.0 = blocked, 1.0 = full)"""
current_time = time.time()
last_time = self._cooldowns.get(channel_id, 0)
time_since_last = current_time - last_time
if time_since_last < INTERJECTION_COOLDOWN_HARD:
return 0.0
elif time_since_last < INTERJECTION_COOLDOWN_SOFT:
return (time_since_last - INTERJECTION_COOLDOWN_HARD) / (INTERJECTION_COOLDOWN_SOFT - INTERJECTION_COOLDOWN_HARD)
else:
return 1.0
def _update_cooldown(self, channel_id: int):
"""Mark a dialogue as having started in this channel"""
self._cooldowns[channel_id] = time.time()
def _check_streak(self, channel_id: int, score: float) -> bool:
"""Track near-miss interjection scores. After STREAK_THRESHOLD consecutive
near-misses, force a trigger to catch extended conversations building tension."""
if score >= INTERJECTION_THRESHOLD:
# Above threshold — reset streak (actual trigger handles it)
self._streaks[channel_id] = 0
return False
if score < STREAK_MIN_SCORE:
# Too low — reset streak
self._streaks[channel_id] = 0
return False
# Near miss — increment streak
current = self._streaks.get(channel_id, 0) + 1
self._streaks[channel_id] = current
logger.debug(f"[Streak] Channel {channel_id}: {current}/{STREAK_THRESHOLD} near-misses (score: {score:.2f})")
if current >= STREAK_THRESHOLD:
self._streaks[channel_id] = 0 # Reset after force trigger
return True
return False
# ============================================================================
# PERSONA DIALOGUE MANAGER
# ============================================================================
class PersonaDialogue:
"""
Manages natural back-and-forth conversations between Miku and Evil Miku.
Each turn:
1. Generate response + continuation signal (single LLM call)
2. Calculate tension delta from response
3. If tension >= threshold, escalate to argument
4. Otherwise, continue or end based on signal
"""
_instance = None
def __new__(cls):
if cls._instance is None:
cls._instance = super().__new__(cls)
cls._instance.active_dialogues = {}
return cls._instance
# ========================================================================
# DIALOGUE STATE MANAGEMENT
# ========================================================================
def is_dialogue_active(self, channel_id: int) -> bool:
"""Check if a dialogue is active in a channel"""
return channel_id in self.active_dialogues
def get_dialogue_state(self, channel_id: int) -> dict:
"""Get dialogue state for a channel"""
return self.active_dialogues.get(channel_id, None)
def start_dialogue(self, channel_id: int) -> dict:
"""Start a new dialogue in a channel"""
state = {
"turn_count": 0,
"started_at": time.time(),
"tension": 0.0,
"tension_history": [],
"last_speaker": None,
}
self.active_dialogues[channel_id] = state
# Update per-channel cooldown via the scorer
scorer = get_interjection_scorer()
scorer._update_cooldown(channel_id)
logger.info(f"Started persona dialogue in channel {channel_id}")
return state
def end_dialogue(self, channel_id: int):
"""End a dialogue in a channel"""
if channel_id in self.active_dialogues:
state = self.active_dialogues[channel_id]
logger.info(f"Ended persona dialogue in channel {channel_id}")
logger.info(f" Turns: {state['turn_count']}, Final tension: {state['tension']:.2f}")
del self.active_dialogues[channel_id]
# ========================================================================
# TENSION CALCULATION
# ========================================================================
def calculate_tension_delta(self, response_text: str, current_tension: float) -> float:
"""
Analyze a response and determine how much tension it adds/removes.
Returns delta to add to current tension score.
"""
# Natural tension decay — conversations cool off over time
base_delta = -0.03
# Lightweight heuristic sentiment — no ML dependencies
try:
scorer = InterjectionScorer()
label, sentiment_score = scorer._get_sentiment(response_text)
is_negative = label == 'NEGATIVE'
if is_negative:
base_delta = sentiment_score * 0.15
else:
base_delta = -sentiment_score * 0.08 # Stronger cooling for positive
except Exception as e:
logger.error(f"Sentiment analysis error in tension calc: {e}")
text_lower = response_text.lower()
# Escalation patterns (reduced weight: 0.05 per match)
escalation_patterns = {
"insult": ["idiot", "stupid", "pathetic", "fool", "naive", "worthless", "disgusting", "moron"],
"dismissive": ["whatever", "don't care", "waste of time", "not worth", "beneath me", "boring"],
"confrontational": ["wrong", "you always", "you never", "how dare", "shut up", "stop"],
"mockery": ["oh please", "how cute", "adorable that you think", "laughable", "hilarious"],
"challenge": ["prove it", "fight me", "make me", "i dare you", "try me"],
}
# De-escalation patterns (increased weight: -0.08 per match)
deescalation_patterns = {
"concession": ["you're right", "fair point", "i suppose", "maybe you have", "good point"],
"softening": ["i understand", "let's calm", "didn't mean", "sorry", "apologize", "i hear you"],
"deflection": ["anyway", "moving on", "whatever you say", "agree to disagree", "let's just", "maybe we should"],
}
# Check escalation
for category, patterns in escalation_patterns.items():
matches = sum(1 for p in patterns if p in text_lower)
if matches > 0:
base_delta += matches * 0.05 # Reduced from 0.08
# Check de-escalation
for category, patterns in deescalation_patterns.items():
matches = sum(1 for p in patterns if p in text_lower)
if matches > 0:
base_delta -= matches * 0.08 # Increased from 0.06
# Intensity multipliers (reduced)
exclamation_count = response_text.count('!')
caps_ratio = sum(1 for c in response_text if c.isupper()) / max(len(response_text), 1)
if exclamation_count > 2 or caps_ratio > 0.3:
base_delta *= 1.2 # Reduced from 1.3
# Momentum factor (reduced)
if current_tension > 0.5:
base_delta *= 1.1 # Reduced from 1.2
# Spike cooldown: if last turn had a big spike, halve this delta
# (prevents runaway tension spirals from a single heated exchange)
if hasattr(self, '_last_tension_delta') and abs(self._last_tension_delta) > 0.15:
base_delta *= 0.5
logger.debug(f"[Tension] Spike cooldown active — delta halved to {base_delta:+.3f}")
self._last_tension_delta = base_delta
return base_delta
# ========================================================================
# RESPONSE GENERATION
# ========================================================================
async def generate_response_with_continuation(
self,
channel: discord.TextChannel,
responding_persona: str,
context: str,
) -> tuple:
"""
Generate response AND continuation signal in a single LLM call.
Returns:
Tuple of (response_text, should_continue, confidence)
"""
from utils.llm import query_llama
opposite = "Hatsune Miku" if responding_persona == "evil" else "Evil Miku"
# Get system prompt for persona
system_prompt = self._get_persona_system_prompt(responding_persona)
# Build the combined prompt
prompt = f"""{system_prompt}
Recent conversation:
{context}
Respond naturally as yourself. Keep your response conversational and in-character.
---
After your response, evaluate whether {opposite} would want to keep talking.
The conversation should CONTINUE if ANY of these are true:
- You asked them a direct question (almost always YES — they need to answer)
- You shared something they'd naturally react to or build on
- The topic feels unfinished — there's more to explore
- You left an opening for them to share their perspective
The conversation might END if ALL of these are true:
- No questions were asked
- You made a clear closing statement or changed the subject definitively
- The exchange feels naturally complete
- Both sides have said their piece and there's nothing left hanging
IMPORTANT: This is a CONVERSATION, not a debate. Let it flow naturally. If you asked a question, the answer is almost always YES — they need to respond!
On a new line after your response, write:
[CONTINUE: YES or NO] [CONFIDENCE: HIGH, MEDIUM, or LOW]"""
# Use appropriate model
model = globals.EVIL_TEXT_MODEL if responding_persona == "evil" else globals.TEXT_MODEL
# Use force_evil_context to avoid race condition with globals.EVIL_MODE
raw_response = await query_llama(
user_prompt=prompt,
user_id=f"persona_dialogue_{channel.id}",
guild_id=channel.guild.id if hasattr(channel, 'guild') and channel.guild else None,
response_type="autonomous_general",
model=model,
force_evil_context=(responding_persona == "evil")
)
if not raw_response or raw_response.startswith("Error"):
return None, False, "LOW"
# Parse response and signal
response_text, should_continue, confidence = self._parse_response(raw_response)
return response_text, should_continue, confidence
def _parse_response(self, raw_response: str) -> tuple:
"""Extract response text and continuation signal"""
lines = raw_response.strip().split('\n')
should_continue = False
confidence = "MEDIUM"
response_lines = []
for line in lines:
line_upper = line.upper()
if "[CONTINUE:" in line_upper:
should_continue = "YES" in line_upper
if "HIGH" in line_upper:
confidence = "HIGH"
elif "LOW" in line_upper:
confidence = "LOW"
else:
confidence = "MEDIUM"
else:
response_lines.append(line)
response_text = '\n'.join(response_lines).strip()
# Clean up any stray [CONTINUE: markers that leaked into response lines
# Only strip the structural markers, NOT common words like YES/NO/HIGH etc.
import re
response_text = re.sub(r'\[CONTINUE:\s*(YES|NO)\]\s*\[CONFIDENCE:\s*(HIGH|MEDIUM|LOW)\]', '', response_text)
response_text = re.sub(r'\[CONTINUE:\s*(YES|NO)\]', '', response_text)
response_text = re.sub(r'\[CONFIDENCE:\s*(HIGH|MEDIUM|LOW)\]', '', response_text)
response_text = response_text.strip()
# Override: If the response contains a question mark, always continue
if '?' in response_text:
logger.debug(f"[Parse Override] Question detected, forcing continue=YES")
should_continue = True
if confidence == "LOW":
confidence = "MEDIUM"
return response_text, should_continue, confidence
def _get_persona_system_prompt(self, persona: str) -> str:
"""Get system prompt for a persona — full personality + conversation-mode guidance"""
if persona == "evil":
from utils.evil_mode import get_evil_system_prompt
full_prompt = get_evil_system_prompt()
# Overlay conversation-mode guidance on top of the full personality
return full_prompt + """
---
⚠️ CONVERSATION MODE: You are talking with Hatsune Miku, your "good" counterpart.
This is a CONVERSATION, not an argument. You can be snarky, dark, and sarcastic — that's who you are — but you're not here to fight. You're here to TALK. Be playful-cruel, not vicious. Show curiosity. Reveal cracks in the darkness. Do NOT launch into full attack mode or try to "win" — this isn't a competition."""
else:
from utils.context_manager import get_miku_system_prompt_compact
full_prompt = get_miku_system_prompt_compact()
# Overlay conversation-mode guidance on top of the full personality
return full_prompt + """
---
⚠️ CONVERSATION MODE: You are talking with Evil Miku, your dark alter ego.
This is a CONVERSATION, not an argument. Be yourself — kind, bubbly, optimistic — but you're not here to fight or defend your existence. Ask genuine questions. Share your feelings without attacking hers. Find common ground. Be curious, not defensive. Do NOT lecture her about being "good" or try to "fix" her. Just TALK. ✨💙"""
# ========================================================================
# DIALOGUE TURN HANDLING
# ========================================================================
async def handle_dialogue_turn(
self,
channel: discord.TextChannel,
responding_persona: str,
trigger_reason: str = None
):
"""
Handle one turn of dialogue, tracking tension for potential argument escalation.
"""
channel_id = channel.id
# Get or create dialogue state
state = self.active_dialogues.get(channel_id)
if not state:
state = self.start_dialogue(channel_id)
# Safety limits
if state["turn_count"] >= MAX_TURNS:
logger.info(f"Dialogue reached {MAX_TURNS} turns, ending")
self.end_dialogue(channel_id)
return
if time.time() - state["started_at"] > DIALOGUE_TIMEOUT:
logger.info(f"Dialogue timeout (15 min), ending")
self.end_dialogue(channel_id)
return
# Build context from recent messages
context = await self._build_conversation_context(channel)
# Generate response with continuation signal
response_text, should_continue, confidence = await self.generate_response_with_continuation(
channel=channel,
responding_persona=responding_persona,
context=context,
)
if not response_text:
logger.error(f"Failed to generate response for {responding_persona}")
self.end_dialogue(channel_id)
return
# Calculate tension change
tension_delta = self.calculate_tension_delta(response_text, state["tension"])
state["tension"] = max(0.0, min(1.0, state["tension"] + tension_delta))
state["tension_history"].append({
"turn": state["turn_count"],
"speaker": responding_persona,
"delta": tension_delta,
"total": state["tension"],
})
logger.debug(f"Tension: {state['tension']:.2f} (delta: {tension_delta:+.2f})")
# Check if we should escalate to argument
if state["tension"] >= ARGUMENT_TENSION_THRESHOLD:
logger.info(f"TENSION THRESHOLD REACHED ({state['tension']:.2f}) - ESCALATING TO ARGUMENT")
# Send the response that pushed us over
await self._send_as_persona(channel, responding_persona, response_text)
# Transition to argument system
await self._escalate_to_argument(channel, responding_persona, response_text)
return
# Send response
await self._send_as_persona(channel, responding_persona, response_text)
# Update state
state["turn_count"] += 1
state["last_speaker"] = responding_persona
logger.debug(f"Turn {state['turn_count']}: {responding_persona} | Continue: {should_continue} ({confidence}) | Tension: {state['tension']:.2f}")
# Decide what happens next
opposite = "evil" if responding_persona == "miku" else "miku"
if should_continue and confidence in ["HIGH", "MEDIUM"]:
create_tracked_task(self._next_turn(channel, opposite), task_name="persona_next_turn")
elif should_continue and confidence == "LOW":
create_tracked_task(self._next_turn(channel, opposite), task_name="persona_next_turn")
elif not should_continue and confidence == "LOW":
# Offer opposite persona the last word
create_tracked_task(
self._offer_last_word(channel, opposite, context + f"\n{responding_persona}: {response_text}"),
task_name="persona_last_word"
)
else:
# Clear signal to end
logger.info(f"Dialogue ended naturally after {state['turn_count']} turns (tension: {state['tension']:.2f})")
self.end_dialogue(channel_id)
async def _next_turn(self, channel: discord.TextChannel, persona: str):
"""Queue the next turn"""
# Check if dialogue was interrupted
if await self._was_interrupted(channel):
logger.info(f"Dialogue interrupted by other activity")
self.end_dialogue(channel.id)
return
await self.handle_dialogue_turn(channel, persona)
async def _offer_last_word(self, channel: discord.TextChannel, persona: str, context: str):
"""
When speaker said NO with LOW confidence, ask opposite if they want to respond.
"""
from utils.llm import query_llama
channel_id = channel.id
state = self.active_dialogues.get(channel_id)
if not state:
return
if await self._was_interrupted(channel):
self.end_dialogue(channel_id)
return
system_prompt = self._get_persona_system_prompt(persona)
prompt = f"""{system_prompt}
Recent exchange:
{context}
The conversation seems to be wrapping up, but wasn't explicitly ended.
Do you have anything to add? If so, respond naturally.
If you're fine letting it end here, write only: [DONE]
Don't force a response if you have nothing meaningful to contribute."""
model = globals.EVIL_TEXT_MODEL if persona == "evil" else globals.TEXT_MODEL
# Use force_evil_context to avoid race condition with globals.EVIL_MODE
response = await query_llama(
user_prompt=prompt,
user_id=f"persona_dialogue_{channel_id}",
guild_id=channel.guild.id if hasattr(channel, 'guild') and channel.guild else None,
response_type="autonomous_general",
model=model,
force_evil_context=(persona == "evil")
)
if not response:
self.end_dialogue(channel_id)
return
if "[DONE]" in response.upper():
logger.info(f"{persona} chose not to respond, dialogue ended (tension: {state['tension']:.2f})")
self.end_dialogue(channel_id)
else:
clean_response = response.replace("[DONE]", "").strip()
# Calculate tension
tension_delta = self.calculate_tension_delta(clean_response, state["tension"])
state["tension"] = max(0.0, min(1.0, state["tension"] + tension_delta))
logger.debug(f"Last word tension: {state['tension']:.2f} (delta: {tension_delta:+.2f})")
# Check for argument escalation
if state["tension"] >= ARGUMENT_TENSION_THRESHOLD:
logger.info(f"TENSION THRESHOLD REACHED on last word - ESCALATING TO ARGUMENT")
await self._send_as_persona(channel, persona, clean_response)
await self._escalate_to_argument(channel, persona, clean_response)
return
# Normal flow
await self._send_as_persona(channel, persona, clean_response)
state["turn_count"] += 1
# Check if this looks like a closing statement
opposite = "evil" if persona == "miku" else "miku"
await self._check_if_final(channel, persona, clean_response, opposite)
async def _check_if_final(self, channel: discord.TextChannel, speaker: str, response: str, opposite: str):
"""Check if a response looks like a closing statement"""
state = self.active_dialogues.get(channel.id)
if not state:
return
# Simple heuristics for closing statements
closing_indicators = [
response.rstrip().endswith('.'), # Statement, not question
'?' not in response, # No questions asked
len(response) < 100, # Short responses often close things
]
if all(closing_indicators):
logger.info(f"Dialogue ended after last word, {state['turn_count']} turns total")
self.end_dialogue(channel.id)
else:
create_tracked_task(self._next_turn(channel, opposite), task_name="persona_next_turn")
# ========================================================================
# ARGUMENT ESCALATION
# ========================================================================
async def _escalate_to_argument(self, channel: discord.TextChannel, last_speaker: str, triggering_message: str):
"""
Transition from dialogue to full bipolar argument.
"""
from utils.bipolar_mode import is_argument_in_progress, run_argument
# Clean up dialogue state
self.end_dialogue(channel.id)
# Don't start if an argument is already going
if is_argument_in_progress(channel.id):
logger.warning(f"Argument already in progress, skipping escalation")
return
# Build context for the argument
escalation_context = f"""This argument erupted from a conversation that got heated.
The last thing said was: "{triggering_message}"
This pushed things over the edge into a full argument."""
logger.info(f"Escalating to argument in #{channel.name}")
# Use the existing argument system
# Pass the triggering message so the opposite persona responds to it
await run_argument(
channel=channel,
client=globals.client,
trigger_context=escalation_context,
)
# ========================================================================
# HELPER METHODS
# ========================================================================
async def _was_interrupted(self, channel: discord.TextChannel) -> bool:
"""Check if someone else sent a message during the dialogue"""
state = self.active_dialogues.get(channel.id)
if not state:
return True
try:
async for msg in channel.history(limit=1):
# If latest message is NOT from our webhooks, we were interrupted
if not msg.webhook_id:
# Check if it's from the bot itself (could be normal response)
if msg.author.id != globals.client.user.id:
return True
except Exception as e:
logger.warning(f"Error checking for interruption: {e}")
return False
async def _build_conversation_context(self, channel: discord.TextChannel, limit: int = 15) -> str:
"""Get recent messages for context"""
messages = []
try:
async for msg in channel.history(limit=limit):
speaker = self._identify_speaker(msg)
messages.append(f"{speaker}: {msg.content}")
messages.reverse()
except Exception as e:
logger.warning(f"Error building conversation context: {e}")
return '\n'.join(messages)
def _identify_speaker(self, message: discord.Message) -> str:
"""Identify who sent a message"""
if message.webhook_id:
name_lower = (message.author.name or "").lower()
if "evil" in name_lower:
return "Evil Miku"
return "Hatsune Miku"
elif message.author.id == globals.client.user.id:
# Bot's own messages - check mode at time of message
if globals.EVIL_MODE:
return "Evil Miku"
return "Hatsune Miku"
return message.author.display_name
async def _send_as_persona(self, channel: discord.TextChannel, persona: str, content: str):
"""Send message via webhook"""
from utils.bipolar_mode import (
get_or_create_webhooks_for_channel,
get_miku_display_name,
get_evil_miku_display_name,
get_persona_avatar_urls
)
webhooks = await get_or_create_webhooks_for_channel(channel)
if not webhooks:
logger.warning(f"Could not get webhooks for #{channel.name}")
return
webhook = webhooks["evil_miku"] if persona == "evil" else webhooks["miku"]
display_name = get_evil_miku_display_name() if persona == "evil" else get_miku_display_name()
avatar_urls = get_persona_avatar_urls()
avatar_url = avatar_urls.get("evil_miku") if persona == "evil" else avatar_urls.get("miku")
try:
await webhook.send(content=content, username=display_name, avatar_url=avatar_url)
except Exception as e:
logger.error(f"Error sending as {persona}: {e}")
# ============================================================================
# CONVENIENCE FUNCTIONS
# ============================================================================
# Singleton instances
_scorer = None
_dialogue_manager = None
def get_interjection_scorer() -> InterjectionScorer:
"""Get the singleton InterjectionScorer instance"""
global _scorer
if _scorer is None:
_scorer = InterjectionScorer()
return _scorer
def get_dialogue_manager() -> PersonaDialogue:
"""Get the singleton PersonaDialogue instance"""
global _dialogue_manager
if _dialogue_manager is None:
_dialogue_manager = PersonaDialogue()
return _dialogue_manager
async def check_for_interjection(message: discord.Message, current_persona: str) -> bool:
"""
Check if the opposite persona should interject based on a message.
If they should, starts a dialogue automatically.
Args:
message: The Discord message that was just sent
current_persona: Who sent the message ("miku" or "evil")
Returns:
True if an interjection was triggered, False otherwise
"""
logger.debug(f"[Persona Dialogue] Checking interjection for message from {current_persona}")
scorer = get_interjection_scorer()
dialogue_manager = get_dialogue_manager()
# Don't trigger if dialogue already active
if dialogue_manager.is_dialogue_active(message.channel.id):
logger.debug(f"[Persona Dialogue] Dialogue already active in channel {message.channel.id}")
return False
# Check if we should interject
should_interject, reason, score = await scorer.should_interject(message, current_persona)
logger.debug(f"[Persona Dialogue] Interjection check: should_interject={should_interject}, reason={reason}, score={score:.2f}")
if should_interject:
opposite_persona = "evil" if current_persona == "miku" else "miku"
logger.info(f"Triggering {opposite_persona} interjection (reason: {reason}, score: {score:.2f})")
# Start dialogue with the opposite persona responding first
dialogue_manager.start_dialogue(message.channel.id)
create_tracked_task(
dialogue_manager.handle_dialogue_turn(message.channel, opposite_persona, trigger_reason=reason),
task_name="persona_dialogue_turn"
)
return True
return False
def is_persona_dialogue_active(channel_id: int) -> bool:
"""Check if a persona dialogue is currently active in a channel"""
dialogue_manager = get_dialogue_manager()
return dialogue_manager.is_dialogue_active(channel_id)