Understanding LLM Integration
LLMConfig
from evolverl.llm import LLMConfig, LLMBackend # OpenAI configuration openai_config = LLMConfig( model_name="gpt-4o-mini", model_type="openai", max_tokens=1000, temperature=0.7, openai_api_key="your-api-key" ) # Anthropic configuration anthropic_config = LLMConfig( model_name="claude-3-5-sonnet-20241022", model_type="anthropic", max_tokens=1000, temperature=0.7, anthropic_api_key="your-api-key" ) # Create backend backend = LLMBackend(openai_config)
Show Supported Models
# Create backend backend = LLMBackend(config) # Generate text response = await backend.generate( prompt="What is 2+2?", system_prompt="You are a math tutor." )
# Create agent with LLM config agent = Agent(AgentConfig(llm_config)) # Send message response = await agent.send_message("What is 2+2?")
# OpenAI export OPENAI_API_KEY="your-api-key" # Anthropic export ANTHROPIC_API_KEY="your-api-key"
.env
OPENAI_API_KEY=your-api-key ANTHROPIC_API_KEY=your-api-key