Configuration
Minimal Configuration
# settings.py
AI_SDK = {
"DEFAULT_PROVIDER": "openai",
"DEFAULT_MODEL": "gpt-4.1",
"PROVIDERS": {
"openai": {
"api_key": env("OPENAI_API_KEY"),
},
},
}Full Configuration Reference
AI_SDK = {
# Default provider and model used when an agent does not specify one
"DEFAULT_PROVIDER": "openai",
"DEFAULT_MODEL": "gpt-4.1",
# Provider configurations
"PROVIDERS": {
"openai": {
"api_key": env("OPENAI_API_KEY"),
},
"anthropic": {
"api_key": env("ANTHROPIC_API_KEY"),
"default_model": "claude-3-5-haiku-20241022",
"default_thinking_budget": 8000,
},
"gemini": {
"api_key": env("GEMINI_API_KEY"),
},
"groq": {
"api_key": env("GROQ_API_KEY"),
},
"deepseek": {
"api_key": env("DEEPSEEK_API_KEY"),
"base_url": "https://api.deepseek.com",
},
"ollama": {
"base_url": "http://localhost:11434",
},
},
# Provider failover chain
"FAILOVER": ["openai", "anthropic"],
# Conversation persistence
"CONVERSATION": {
"PERSIST": True, # Save messages to the database
"MAX_HISTORY": 50, # Maximum messages loaded per turn
"AUTO_SUMMARIZE": False, # Auto-summarize older messages
},
# Prompt caching (Anthropic + OpenAI native caching)
"CACHE": {
"ENABLED": True,
"PROVIDERS": ["anthropic", "openai"],
},
# Server-Sent Events streaming
"STREAMING": {
"CHUNK_SEPARATOR": "\n\n",
"SSE_RETRY_MS": 3000,
"STREAM_THINKING": False, # Include thinking_delta chunks
},
# Observability backend
"OBSERVABILITY": {
"BACKEND": None, # "langsmith" | "langfuse" | "opentelemetry" | None
},
# Token-based rate limiting
"RATE_LIMITING": {
"ENABLED": False,
"BACKEND": "django_cache",
"PER_USER_TOKENS_PER_MINUTE": 50000,
"PER_USER_TOKENS_PER_DAY": 500000,
},
}Provider Config Options
Key
Type
Description
Multiple Providers Example
Validating Configuration
Environment Variables
Last updated
Was this helpful?