feat: fix evil mode race conditions, expand moods and PFP detection

bipolar_mode.py:
- Replace unsafe globals.EVIL_MODE temporary overrides with
  force_evil_context parameter to fix async race conditions (3 sites)

moods.py:
- Add 6 new evil mood emojis: bored, manic, jealous, melancholic,
  playful_cruel, contemptuous
- Refactor rotate_dm_mood() to skip when evil mode active (evil mode
  has its own independent 2-hour rotation timer)

persona_dialogue.py:
- Same force_evil_context race condition fix (2 sites)
- Fix over-aggressive response cleanup that stripped common words
  (YES/NO/HIGH) — now uses targeted regex for structural markers only
- Update evil mood multipliers to match new mood set

profile_picture_context:
- Expand PFP detection regex for broader coverage (appearance questions,
  opinion queries, selection/change questions)
- Add plugin.json metadata file
This commit is contained in:
2026-03-04 00:45:23 +02:00
parent 5898b0eb3b
commit 335b58a867
5 changed files with 119 additions and 113 deletions

View File

@@ -264,12 +264,15 @@ class InterjectionScorer:
if opposite_persona == "evil":
MOOD_MULTIPLIERS = {
"aggressive": 1.5,
"cruel": 1.3,
"mischievous": 1.2,
"manic": 1.4,
"jealous": 1.3,
"cunning": 1.0,
"sarcastic": 1.1,
"playful_cruel": 1.2,
"contemptuous": 0.7,
"evil_neutral": 0.8,
"contemplative": 0.6,
"bored": 0.5,
"melancholic": 0.6,
}
return MOOD_MULTIPLIERS.get(globals.EVIL_DM_MOOD, 1.0)
else:
@@ -505,20 +508,15 @@ On a new line after your response, write:
# Use appropriate model
model = globals.EVIL_TEXT_MODEL if responding_persona == "evil" else globals.TEXT_MODEL
# Temporarily set evil mode for proper context
original_evil_mode = globals.EVIL_MODE
globals.EVIL_MODE = (responding_persona == "evil")
try:
raw_response = await query_llama(
user_prompt=prompt,
user_id=f"persona_dialogue_{channel.id}",
guild_id=channel.guild.id if hasattr(channel, 'guild') and channel.guild else None,
response_type="autonomous_general",
model=model
)
finally:
globals.EVIL_MODE = original_evil_mode
# Use force_evil_context to avoid race condition with globals.EVIL_MODE
raw_response = await query_llama(
user_prompt=prompt,
user_id=f"persona_dialogue_{channel.id}",
guild_id=channel.guild.id if hasattr(channel, 'guild') and channel.guild else None,
response_type="autonomous_general",
model=model,
force_evil_context=(responding_persona == "evil")
)
if not raw_response or raw_response.startswith("Error"):
return None, False, "LOW"
@@ -553,10 +551,12 @@ On a new line after your response, write:
response_text = '\n'.join(response_lines).strip()
# Clean up any stray signal markers
response_text = response_text.replace("[CONTINUE:", "").replace("]", "")
response_text = response_text.replace("YES", "").replace("NO", "")
response_text = response_text.replace("HIGH", "").replace("MEDIUM", "").replace("LOW", "")
# Clean up any stray [CONTINUE: markers that leaked into response lines
# Only strip the structural markers, NOT common words like YES/NO/HIGH etc.
import re
response_text = re.sub(r'\[CONTINUE:\s*(YES|NO)\]\s*\[CONFIDENCE:\s*(HIGH|MEDIUM|LOW)\]', '', response_text)
response_text = re.sub(r'\[CONTINUE:\s*(YES|NO)\]', '', response_text)
response_text = re.sub(r'\[CONFIDENCE:\s*(HIGH|MEDIUM|LOW)\]', '', response_text)
response_text = response_text.strip()
# Override: If the response contains a question mark, always continue
@@ -727,19 +727,15 @@ Don't force a response if you have nothing meaningful to contribute."""
model = globals.EVIL_TEXT_MODEL if persona == "evil" else globals.TEXT_MODEL
original_evil_mode = globals.EVIL_MODE
globals.EVIL_MODE = (persona == "evil")
try:
response = await query_llama(
user_prompt=prompt,
user_id=f"persona_dialogue_{channel_id}",
guild_id=channel.guild.id if hasattr(channel, 'guild') and channel.guild else None,
response_type="autonomous_general",
model=model
)
finally:
globals.EVIL_MODE = original_evil_mode
# Use force_evil_context to avoid race condition with globals.EVIL_MODE
response = await query_llama(
user_prompt=prompt,
user_id=f"persona_dialogue_{channel_id}",
guild_id=channel.guild.id if hasattr(channel, 'guild') and channel.guild else None,
response_type="autonomous_general",
model=model,
force_evil_context=(persona == "evil")
)
if not response:
self.end_dialogue(channel_id)