chore: update model lists and defaults to GPT-5.4 family

This commit is contained in:
Yijia-Xiao 2026-03-29 19:45:36 +00:00
parent 6cddd26d6e
commit e75d17bc51
No known key found for this signature in database
4 changed files with 9 additions and 9 deletions

View File

@ -189,8 +189,8 @@ from tradingagents.default_config import DEFAULT_CONFIG
config = DEFAULT_CONFIG.copy() config = DEFAULT_CONFIG.copy()
config["llm_provider"] = "openai" # openai, google, anthropic, xai, openrouter, ollama config["llm_provider"] = "openai" # openai, google, anthropic, xai, openrouter, ollama
config["deep_think_llm"] = "gpt-5.2" # Model for complex reasoning config["deep_think_llm"] = "gpt-5.4" # Model for complex reasoning
config["quick_think_llm"] = "gpt-5-mini" # Model for quick tasks config["quick_think_llm"] = "gpt-5.4-mini" # Model for quick tasks
config["max_debate_rounds"] = 2 config["max_debate_rounds"] = 2
ta = TradingAgentsGraph(debug=True, config=config) ta = TradingAgentsGraph(debug=True, config=config)

View File

@ -8,8 +8,8 @@ load_dotenv()
# Create a custom config # Create a custom config
config = DEFAULT_CONFIG.copy() config = DEFAULT_CONFIG.copy()
config["deep_think_llm"] = "gpt-5-mini" # Use a different model config["deep_think_llm"] = "gpt-5.4-mini" # Use a different model
config["quick_think_llm"] = "gpt-5-mini" # Use a different model config["quick_think_llm"] = "gpt-5.4-mini" # Use a different model
config["max_debate_rounds"] = 1 # Increase debate rounds config["max_debate_rounds"] = 1 # Increase debate rounds
# Configure data vendors (default uses yfinance, no extra API keys needed) # Configure data vendors (default uses yfinance, no extra API keys needed)

View File

@ -9,8 +9,8 @@ DEFAULT_CONFIG = {
), ),
# LLM settings # LLM settings
"llm_provider": "openai", "llm_provider": "openai",
"deep_think_llm": "gpt-5.2", "deep_think_llm": "gpt-5.4",
"quick_think_llm": "gpt-5-mini", "quick_think_llm": "gpt-5.4-mini",
"backend_url": "https://api.openai.com/v1", "backend_url": "https://api.openai.com/v1",
# Provider-specific thinking configuration # Provider-specific thinking configuration
"google_thinking_level": None, # "high", "minimal", etc. "google_thinking_level": None, # "high", "minimal", etc.

View File

@ -11,15 +11,15 @@ ProviderModeOptions = Dict[str, Dict[str, List[ModelOption]]]
MODEL_OPTIONS: ProviderModeOptions = { MODEL_OPTIONS: ProviderModeOptions = {
"openai": { "openai": {
"quick": [ "quick": [
("GPT-5 Mini - Balanced speed, cost, and capability", "gpt-5-mini"), ("GPT-5.4 Mini - Fast, strong coding and tool use", "gpt-5.4-mini"),
("GPT-5 Nano - High-throughput, simple tasks", "gpt-5-nano"), ("GPT-5.4 Nano - Cheapest, high-volume tasks", "gpt-5.4-nano"),
("GPT-5.4 - Latest frontier, 1M context", "gpt-5.4"), ("GPT-5.4 - Latest frontier, 1M context", "gpt-5.4"),
("GPT-4.1 - Smartest non-reasoning model", "gpt-4.1"), ("GPT-4.1 - Smartest non-reasoning model", "gpt-4.1"),
], ],
"deep": [ "deep": [
("GPT-5.4 - Latest frontier, 1M context", "gpt-5.4"), ("GPT-5.4 - Latest frontier, 1M context", "gpt-5.4"),
("GPT-5.2 - Strong reasoning, cost-effective", "gpt-5.2"), ("GPT-5.2 - Strong reasoning, cost-effective", "gpt-5.2"),
("GPT-5 Mini - Balanced speed, cost, and capability", "gpt-5-mini"), ("GPT-5.4 Mini - Fast, strong coding and tool use", "gpt-5.4-mini"),
("GPT-5.4 Pro - Most capable, expensive ($30/$180 per 1M tokens)", "gpt-5.4-pro"), ("GPT-5.4 Pro - Most capable, expensive ($30/$180 per 1M tokens)", "gpt-5.4-pro"),
], ],
}, },