Files
miku-discord/cheshire-cat/test_setup_simple.py

162 lines
5.6 KiB
Python
Raw Normal View History

#!/usr/bin/env python3
"""
Simplified Cheshire Cat Test Setup - Just upload knowledge and test
LLM configuration should be done via admin panel: http://localhost:1865/admin
"""
import requests
import time
import sys
CAT_URL = "http://localhost:1865"
def wait_for_cat():
"""Wait for Cat to be ready"""
print("Waiting for Cheshire Cat to start...")
max_attempts = 30
for i in range(max_attempts):
try:
response = requests.get(f"{CAT_URL}/", timeout=5)
if response.status_code == 200:
print("✅ Cheshire Cat is ready!")
return True
except requests.exceptions.RequestException:
pass
print(f" Attempt {i+1}/{max_attempts}...")
time.sleep(2)
print("❌ Cheshire Cat failed to start")
return False
def upload_knowledge_base():
"""Upload Miku's knowledge files to Cat"""
print("\n📚 Uploading Miku knowledge base to Rabbit Hole...")
print(" (This will take a few minutes as Cat chunks and embeds the text)")
files_to_upload = [
("../bot/persona/miku/miku_lore.txt", "Miku's background, personality, and character info"),
("../bot/persona/miku/miku_prompt.txt", "Miku's behavior guidelines and examples"),
("../bot/persona/miku/miku_lyrics.txt", "Miku's song lyrics and music knowledge")
]
uploaded_count = 0
for filepath, description in files_to_upload:
try:
filename = filepath.split('/')[-1]
print(f"\n 📄 Uploading {filename}...")
print(f" ({description})")
with open(filepath, 'rb') as f:
files = {'file': (filename, f, 'text/plain')}
response = requests.post(
f"{CAT_URL}/rabbithole/",
files=files,
timeout=120 # Increased timeout for embedding
)
if response.status_code == 200:
print(f" ✅ Uploaded and processed successfully!")
uploaded_count += 1
else:
print(f" ❌ Failed: HTTP {response.status_code}")
try:
error_detail = response.json()
print(f" {error_detail}")
except:
print(f" {response.text[:200]}")
except FileNotFoundError:
print(f" ⚠️ File not found: {filepath}")
except requests.exceptions.Timeout:
print(f" ⚠️ Upload timed out (file might be too large or embedding is slow)")
except Exception as e:
print(f" ❌ Error: {e}")
print(f"\n📊 Successfully uploaded: {uploaded_count}/{len(files_to_upload)} files")
return uploaded_count > 0
def test_query():
"""Test a simple query to verify everything works"""
print("\n🧪 Testing queries (after LLM is configured)...")
print(" Note: These will fail until you configure the LLM in admin panel")
test_messages = [
"What is your favorite food?",
"Who are your friends?",
]
for message in test_messages:
print(f"\n Query: '{message}'")
try:
response = requests.post(
f"{CAT_URL}/message",
json={"text": message},
headers={"Content-Type": "application/json"},
timeout=30
)
if response.status_code == 200:
data = response.json()
reply = data.get("content", "No response")
print(f" ✅ Response: {reply[:150]}...")
else:
print(f" ⚠️ Query returned: {response.status_code}")
if response.status_code == 500:
print(f" (This is expected if LLM is not configured yet)")
except Exception as e:
print(f" ❌ Error: {e}")
time.sleep(1)
def main():
print("=" * 70)
print("🐱 Cheshire Cat Test Setup for Miku Bot")
print("=" * 70)
# Step 1: Wait for Cat to start
if not wait_for_cat():
print("\n❌ Setup failed: Cat didn't start")
sys.exit(1)
# Step 2: Upload knowledge base
print("\n" + "=" * 70)
if not upload_knowledge_base():
print("\n⚠️ Knowledge upload had issues")
# Give Cat time to process
print("\n⏳ Waiting 5 seconds for Cat to finish processing...")
time.sleep(5)
# Step 3: Manual LLM configuration instructions
print("\n" + "=" * 70)
print("⚙️ LLM CONFIGURATION REQUIRED")
print("=" * 70)
print("\nYou need to configure the LLM manually:")
print("\n1. Open admin panel: http://localhost:1865/admin")
print("\n2. Go to 'Settings''Language Model'")
print("\n3. Select 'OpenAI Compatible'")
print("\n4. Configure:")
print(" API Key: dummy")
print(" Model Name: Llama-3.1-8B-Instruct-UD-Q4_K_XL.gguf")
print(" API Base URL: http://llama-swap:8080/v1")
print(" (or http://host.docker.internal:8080/v1)")
print("\n5. Click 'Save'")
# Step 4: Test (will likely fail until LLM is configured)
test_query()
print("\n" + "=" * 70)
print("✅ Setup complete!")
print("=" * 70)
print("\nNext steps:")
print(" 1. Configure LLM in admin panel (see above)")
print(" 2. Test manually: http://localhost:1865/admin")
print(" 3. Run benchmarks: python3 benchmark_cat.py")
print("\n" + "=" * 70)
if __name__ == "__main__":
main()