feat: Implement comprehensive non-hierarchical logging system

- Created new logging infrastructure with per-component filtering
- Added 6 log levels: DEBUG, INFO, API, WARNING, ERROR, CRITICAL
- Implemented non-hierarchical level control (any combination can be enabled)
- Migrated 917 print() statements across 31 files to structured logging
- Created web UI (system.html) for runtime configuration with dark theme
- Added global level controls to enable/disable levels across all components
- Added timestamp format control (off/time/date/datetime options)
- Implemented log rotation (10MB per file, 5 backups)
- Added API endpoints for dynamic log configuration
- Configured HTTP request logging with filtering via api.requests component
- Intercepted APScheduler logs with proper formatting
- Fixed persistence paths to use /app/memory for Docker volume compatibility
- Fixed checkbox display bug in web UI (enabled_levels now properly shown)
- Changed System Settings button to open in same tab instead of new window

Components: bot, api, api.requests, autonomous, persona, vision, llm,
conversation, mood, dm, scheduled, gpu, media, server, commands,
sentiment, core, apscheduler

All settings persist across container restarts via JSON config.
This commit is contained in:
2026-01-10 20:46:19 +02:00
parent ce00f9bd95
commit 32c2a7b930
34 changed files with 2766 additions and 936 deletions

View File

@@ -11,11 +11,14 @@ apply_twscrape_fix()
from twscrape import API, gather, Account
from playwright.async_api import async_playwright
from pathlib import Path
from utils.logger import get_logger
logger = get_logger('media')
COOKIE_PATH = Path(__file__).parent / "x.com.cookies.json"
async def extract_media_urls(page, tweet_url):
print(f"🔍 Visiting tweet page: {tweet_url}")
logger.debug(f"Visiting tweet page: {tweet_url}")
try:
await page.goto(tweet_url, timeout=15000)
await page.wait_for_timeout(1000)
@@ -29,11 +32,11 @@ async def extract_media_urls(page, tweet_url):
cleaned = src.split("&name=")[0] + "&name=large"
urls.add(cleaned)
print(f"🖼️ Found {len(urls)} media URLs on tweet: {tweet_url}")
logger.debug(f"Found {len(urls)} media URLs on tweet: {tweet_url}")
return list(urls)
except Exception as e:
print(f"Playwright error on {tweet_url}: {e}")
logger.error(f"Playwright error on {tweet_url}: {e}")
return []
async def fetch_miku_tweets(limit=5):
@@ -53,11 +56,11 @@ async def fetch_miku_tweets(limit=5):
)
await api.pool.login_all()
print(f"🔎 Searching for Miku tweets (limit={limit})...")
logger.info(f"Searching for Miku tweets (limit={limit})...")
query = 'Hatsune Miku OR 初音ミク has:images after:2025'
tweets = await gather(api.search(query, limit=limit, kv={"product": "Top"}))
print(f"📄 Found {len(tweets)} tweets, launching browser...")
logger.info(f"Found {len(tweets)} tweets, launching browser...")
async with async_playwright() as p:
browser = await p.firefox.launch(headless=True)
@@ -78,7 +81,7 @@ async def fetch_miku_tweets(limit=5):
for i, tweet in enumerate(tweets, 1):
username = tweet.user.username
tweet_url = f"https://twitter.com/{username}/status/{tweet.id}"
print(f"🧵 Processing tweet {i}/{len(tweets)} from @{username}")
logger.debug(f"Processing tweet {i}/{len(tweets)} from @{username}")
media_urls = await extract_media_urls(page, tweet_url)
if media_urls:
@@ -90,7 +93,7 @@ async def fetch_miku_tweets(limit=5):
})
await browser.close()
print(f"Finished! Returning {len(results)} tweet(s) with media.")
logger.info(f"Finished! Returning {len(results)} tweet(s) with media.")
return results
@@ -99,7 +102,7 @@ async def _search_latest(api: API, query: str, limit: int) -> list:
try:
return await gather(api.search(query, limit=limit, kv={"product": "Latest"}))
except Exception as e:
print(f"⚠️ Latest search failed for '{query}': {e}")
logger.error(f"Latest search failed for '{query}': {e}")
return []
@@ -131,13 +134,13 @@ async def fetch_figurine_tweets_latest(limit_per_source: int = 10) -> list:
"miku from:OtakuOwletMerch",
]
print("🔎 Searching figurine tweets by Latest across sources...")
logger.info("Searching figurine tweets by Latest across sources...")
all_tweets = []
for q in queries:
tweets = await _search_latest(api, q, limit_per_source)
all_tweets.extend(tweets)
print(f"📄 Found {len(all_tweets)} candidate tweets, launching browser to extract media...")
logger.info(f"Found {len(all_tweets)} candidate tweets, launching browser to extract media...")
async with async_playwright() as p:
browser = await p.firefox.launch(headless=True)
@@ -157,7 +160,7 @@ async def fetch_figurine_tweets_latest(limit_per_source: int = 10) -> list:
try:
username = tweet.user.username
tweet_url = f"https://twitter.com/{username}/status/{tweet.id}"
print(f"🧵 Processing tweet {i}/{len(all_tweets)} from @{username}")
logger.debug(f"Processing tweet {i}/{len(all_tweets)} from @{username}")
media_urls = await extract_media_urls(page, tweet_url)
if media_urls:
results.append({
@@ -167,10 +170,10 @@ async def fetch_figurine_tweets_latest(limit_per_source: int = 10) -> list:
"media": media_urls
})
except Exception as e:
print(f"⚠️ Error processing tweet: {e}")
logger.error(f"Error processing tweet: {e}")
await browser.close()
print(f"Figurine fetch finished. Returning {len(results)} tweet(s) with media.")
logger.info(f"Figurine fetch finished. Returning {len(results)} tweet(s) with media.")
return results