#!/usr/bin/env python3 """ Quick test to verify LLM integration is working """ import os from dotenv import load_dotenv from agent.llm_client import LLMClient, LLMMessage from agent.consultation import NetworkConsultant # Load environment variables load_dotenv() print("=" * 60) print("Testing LLM Integration") print("=" * 60) # Test 1: Check API keys print("\n1. Checking API keys...") anthropic_key = os.getenv("ANTHROPIC_API_KEY") openai_key = os.getenv("OPENAI_API_KEY") if anthropic_key: print(f"✅ Anthropic API key found: {anthropic_key[:20]}...") else: print("❌ Anthropic API key not found") if openai_key: print(f"✅ OpenAI API key found: {openai_key[:20]}...") else: print("⚠️ OpenAI API key not found (optional)") # Test 2: Initialize LLM client print("\n2. Initializing LLM client...") llm = LLMClient() print(f"✅ LLM client initialized with provider: {llm.provider}") # Test 3: Simple chat test print("\n3. Testing basic chat completion...") try: messages = [ LLMMessage(role="user", content="Reply with just 'Hello from Overgrowth!' and nothing else.") ] response = llm.chat(messages, temperature=0.1) print(f"✅ Response received: {response[:100]}") except Exception as e: print(f"❌ Chat test failed: {e}") # Test 4: Test consultation print("\n4. Testing network consultation...") try: consultant = NetworkConsultant() test_input = """ We're a coffee shop chain with 3 locations. We need WiFi for customers, POS systems with payment processing, security cameras, and secure VPN to HQ for centralized management. Each location has ~50 customers at peak time. """ is_complete, output, intent = consultant.start_consultation(test_input) if is_complete: print("✅ Consultation completed immediately") print(f"\nIntent captured:\n{output}") else: print("✅ Consultation started - follow-up questions:") print(f"\n{output}") except Exception as e: print(f"❌ Consultation test failed: {e}") import traceback traceback.print_exc() print("\n" + "=" * 60) print("LLM Integration Test Complete!") print("=" * 60)