chore: update model lists, bump to v0.2.1, fix package build

- OpenAI: add GPT-5.4, GPT-5.4 Pro; remove o-series and legacy GPT-4o
- Anthropic: add Claude Opus 4.6, Sonnet 4.6; remove legacy 4.1/4.0/3.x
- Google: add Gemini 3.1 Pro, 3.1 Flash Lite; remove deprecated
  gemini-3-pro-preview and Gemini 2.0 series
- xAI: clean up model list to match current API
- Simplify UnifiedChatOpenAI GPT-5 temperature handling
- Add missing tradingagents/__init__.py (fixes pip install building)
This commit is contained in:
Yijia-Xiao 2026-03-15 23:34:50 +00:00
parent b0f9d180f9
commit 551fd7f074
5 changed files with 47 additions and 65 deletions

View File

@ -130,30 +130,30 @@ def select_shallow_thinking_agent(provider) -> str:
"""Select shallow thinking llm engine using an interactive selection.""" """Select shallow thinking llm engine using an interactive selection."""
# Define shallow thinking llm engine options with their corresponding model names # Define shallow thinking llm engine options with their corresponding model names
# Ordering: medium → light → heavy (balanced first for quick tasks)
# Within same tier, newer models first
SHALLOW_AGENT_OPTIONS = { SHALLOW_AGENT_OPTIONS = {
"openai": [ "openai": [
("GPT-5 Mini - Cost-optimized reasoning", "gpt-5-mini"), ("GPT-5 Mini - Balanced speed, cost, and capability", "gpt-5-mini"),
("GPT-5 Nano - Ultra-fast, high-throughput", "gpt-5-nano"), ("GPT-5 Nano - High-throughput, simple tasks", "gpt-5-nano"),
("GPT-5.2 - Latest flagship", "gpt-5.2"), ("GPT-5.4 - Latest frontier, 1M context", "gpt-5.4"),
("GPT-5.1 - Flexible reasoning", "gpt-5.1"), ("GPT-4.1 - Smartest non-reasoning model", "gpt-4.1"),
("GPT-4.1 - Smartest non-reasoning, 1M context", "gpt-4.1"),
], ],
"anthropic": [ "anthropic": [
("Claude Haiku 4.5 - Fast + extended thinking", "claude-haiku-4-5"), ("Claude Sonnet 4.6 - Best speed and intelligence balance", "claude-sonnet-4-6"),
("Claude Sonnet 4.5 - Best for agents/coding", "claude-sonnet-4-5"), ("Claude Haiku 4.5 - Fast, near-instant responses", "claude-haiku-4-5"),
("Claude Sonnet 4 - High-performance", "claude-sonnet-4-20250514"), ("Claude Sonnet 4.5 - Agents and coding", "claude-sonnet-4-5"),
], ],
"google": [ "google": [
("Gemini 3 Flash - Next-gen fast", "gemini-3-flash-preview"), ("Gemini 3 Flash - Next-gen fast", "gemini-3-flash-preview"),
("Gemini 2.5 Flash - Balanced, recommended", "gemini-2.5-flash"), ("Gemini 2.5 Flash - Balanced, stable", "gemini-2.5-flash"),
("Gemini 3 Pro - Reasoning-first", "gemini-3-pro-preview"), ("Gemini 3.1 Flash Lite - Most cost-efficient", "gemini-3.1-flash-lite-preview"),
("Gemini 2.5 Flash Lite - Fast, low-cost", "gemini-2.5-flash-lite"), ("Gemini 2.5 Flash Lite - Fast, low-cost", "gemini-2.5-flash-lite"),
], ],
"xai": [ "xai": [
("Grok 4.1 Fast (Non-Reasoning) - Speed optimized, 2M ctx", "grok-4-1-fast-non-reasoning"), ("Grok 4.1 Fast (Non-Reasoning) - Speed optimized, 2M ctx", "grok-4-1-fast-non-reasoning"),
("Grok 4 Fast (Non-Reasoning) - Speed optimized", "grok-4-fast-non-reasoning"), ("Grok 4 Fast (Non-Reasoning) - Speed optimized", "grok-4-fast-non-reasoning"),
("Grok 4.1 Fast (Reasoning) - High-performance, 2M ctx", "grok-4-1-fast-reasoning"), ("Grok 4.1 Fast (Reasoning) - High-performance, 2M ctx", "grok-4-1-fast-reasoning"),
("Grok 4 Fast (Reasoning) - High-performance", "grok-4-fast-reasoning"),
], ],
"openrouter": [ "openrouter": [
("NVIDIA Nemotron 3 Nano 30B (free)", "nvidia/nemotron-3-nano-30b-a3b:free"), ("NVIDIA Nemotron 3 Nano 30B (free)", "nvidia/nemotron-3-nano-30b-a3b:free"),
@ -195,33 +195,32 @@ def select_deep_thinking_agent(provider) -> str:
"""Select deep thinking llm engine using an interactive selection.""" """Select deep thinking llm engine using an interactive selection."""
# Define deep thinking llm engine options with their corresponding model names # Define deep thinking llm engine options with their corresponding model names
# Ordering: heavy → medium → light (most capable first for deep tasks)
# Within same tier, newer models first
DEEP_AGENT_OPTIONS = { DEEP_AGENT_OPTIONS = {
"openai": [ "openai": [
("GPT-5.2 - Latest flagship", "gpt-5.2"), ("GPT-5.4 - Latest frontier, 1M context", "gpt-5.4"),
("GPT-5.1 - Flexible reasoning", "gpt-5.1"), ("GPT-5.2 - Strong reasoning, cost-effective", "gpt-5.2"),
("GPT-5 - Advanced reasoning", "gpt-5"), ("GPT-5 Mini - Balanced speed, cost, and capability", "gpt-5-mini"),
("GPT-4.1 - Smartest non-reasoning, 1M context", "gpt-4.1"), ("GPT-5.4 Pro - Most capable, expensive ($30/$180 per 1M tokens)", "gpt-5.4-pro"),
("GPT-5 Mini - Cost-optimized reasoning", "gpt-5-mini"),
("GPT-5 Nano - Ultra-fast, high-throughput", "gpt-5-nano"),
], ],
"anthropic": [ "anthropic": [
("Claude Sonnet 4.5 - Best for agents/coding", "claude-sonnet-4-5"), ("Claude Opus 4.6 - Most intelligent, agents and coding", "claude-opus-4-6"),
("Claude Opus 4.5 - Premium, max intelligence", "claude-opus-4-5"), ("Claude Opus 4.5 - Premium, max intelligence", "claude-opus-4-5"),
("Claude Opus 4.1 - Most capable model", "claude-opus-4-1-20250805"), ("Claude Sonnet 4.6 - Best speed and intelligence balance", "claude-sonnet-4-6"),
("Claude Haiku 4.5 - Fast + extended thinking", "claude-haiku-4-5"), ("Claude Sonnet 4.5 - Agents and coding", "claude-sonnet-4-5"),
("Claude Sonnet 4 - High-performance", "claude-sonnet-4-20250514"),
], ],
"google": [ "google": [
("Gemini 3 Pro - Reasoning-first", "gemini-3-pro-preview"), ("Gemini 3.1 Pro - Reasoning-first, complex workflows", "gemini-3.1-pro-preview"),
("Gemini 3 Flash - Next-gen fast", "gemini-3-flash-preview"), ("Gemini 3 Flash - Next-gen fast", "gemini-3-flash-preview"),
("Gemini 2.5 Flash - Balanced, recommended", "gemini-2.5-flash"), ("Gemini 2.5 Pro - Stable pro model", "gemini-2.5-pro"),
("Gemini 2.5 Flash - Balanced, stable", "gemini-2.5-flash"),
], ],
"xai": [ "xai": [
("Grok 4 - Flagship model", "grok-4-0709"),
("Grok 4.1 Fast (Reasoning) - High-performance, 2M ctx", "grok-4-1-fast-reasoning"), ("Grok 4.1 Fast (Reasoning) - High-performance, 2M ctx", "grok-4-1-fast-reasoning"),
("Grok 4 Fast (Reasoning) - High-performance", "grok-4-fast-reasoning"), ("Grok 4 Fast (Reasoning) - High-performance", "grok-4-fast-reasoning"),
("Grok 4 - Flagship model", "grok-4-0709"),
("Grok 4.1 Fast (Non-Reasoning) - Speed optimized, 2M ctx", "grok-4-1-fast-non-reasoning"), ("Grok 4.1 Fast (Non-Reasoning) - Speed optimized, 2M ctx", "grok-4-1-fast-non-reasoning"),
("Grok 4 Fast (Non-Reasoning) - Speed optimized", "grok-4-fast-non-reasoning"),
], ],
"openrouter": [ "openrouter": [
("Z.AI GLM 4.5 Air (free)", "z-ai/glm-4.5-air:free"), ("Z.AI GLM 4.5 Air (free)", "z-ai/glm-4.5-air:free"),

View File

@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
[project] [project]
name = "tradingagents" name = "tradingagents"
version = "0.2.0" version = "0.2.1"
description = "TradingAgents: Multi-Agents LLM Financial Trading Framework" description = "TradingAgents: Multi-Agents LLM Financial Trading Framework"
readme = "README.md" readme = "README.md"
requires-python = ">=3.10" requires-python = ">=3.10"

View File

View File

@ -8,25 +8,23 @@ from .validators import validate_model
class UnifiedChatOpenAI(ChatOpenAI): class UnifiedChatOpenAI(ChatOpenAI):
"""ChatOpenAI subclass that strips incompatible params for certain models.""" """ChatOpenAI subclass that strips temperature/top_p for GPT-5 family models.
GPT-5 family models use reasoning natively. temperature/top_p are only
accepted when reasoning.effort is 'none'; with any other effort level
(or for older GPT-5/GPT-5-mini/GPT-5-nano which always reason) the API
rejects these params. Langchain defaults temperature=0.7, so we must
strip it to avoid errors.
Non-GPT-5 models (GPT-4.1, xAI, Ollama, etc.) are unaffected.
"""
def __init__(self, **kwargs): def __init__(self, **kwargs):
model = kwargs.get("model", "") if "gpt-5" in kwargs.get("model", "").lower():
if self._is_reasoning_model(model):
kwargs.pop("temperature", None) kwargs.pop("temperature", None)
kwargs.pop("top_p", None) kwargs.pop("top_p", None)
super().__init__(**kwargs) super().__init__(**kwargs)
@staticmethod
def _is_reasoning_model(model: str) -> bool:
"""Check if model is a reasoning model that doesn't support temperature."""
model_lower = model.lower()
return (
model_lower.startswith("o1")
or model_lower.startswith("o3")
or "gpt-5" in model_lower
)
class OpenAIClient(BaseLLMClient): class OpenAIClient(BaseLLMClient):
"""Client for OpenAI, Ollama, OpenRouter, and xAI providers.""" """Client for OpenAI, Ollama, OpenRouter, and xAI providers."""

View File

@ -6,59 +6,44 @@ Let LLM providers use their own defaults for unspecified params.
VALID_MODELS = { VALID_MODELS = {
"openai": [ "openai": [
# GPT-5 series (2025) # GPT-5 series
"gpt-5.4-pro",
"gpt-5.4",
"gpt-5.2", "gpt-5.2",
"gpt-5.1", "gpt-5.1",
"gpt-5", "gpt-5",
"gpt-5-mini", "gpt-5-mini",
"gpt-5-nano", "gpt-5-nano",
# GPT-4.1 series (2025) # GPT-4.1 series
"gpt-4.1", "gpt-4.1",
"gpt-4.1-mini", "gpt-4.1-mini",
"gpt-4.1-nano", "gpt-4.1-nano",
# o-series reasoning models
"o4-mini",
"o3",
"o3-mini",
"o1",
"o1-preview",
# GPT-4o series (legacy but still supported)
"gpt-4o",
"gpt-4o-mini",
], ],
"anthropic": [ "anthropic": [
# Claude 4.5 series (2025) # Claude 4.6 series (latest)
"claude-opus-4-6",
"claude-sonnet-4-6",
# Claude 4.5 series
"claude-opus-4-5", "claude-opus-4-5",
"claude-sonnet-4-5", "claude-sonnet-4-5",
"claude-haiku-4-5", "claude-haiku-4-5",
# Claude 4.x series
"claude-opus-4-1-20250805",
"claude-sonnet-4-20250514",
# Claude 3.7 series
"claude-3-7-sonnet-20250219",
# Claude 3.5 series (legacy)
"claude-3-5-haiku-20241022",
"claude-3-5-sonnet-20241022",
], ],
"google": [ "google": [
# Gemini 3.1 series (preview)
"gemini-3.1-pro-preview",
"gemini-3.1-flash-lite-preview",
# Gemini 3 series (preview) # Gemini 3 series (preview)
"gemini-3-pro-preview",
"gemini-3-flash-preview", "gemini-3-flash-preview",
# Gemini 2.5 series # Gemini 2.5 series
"gemini-2.5-pro", "gemini-2.5-pro",
"gemini-2.5-flash", "gemini-2.5-flash",
"gemini-2.5-flash-lite", "gemini-2.5-flash-lite",
# Gemini 2.0 series
"gemini-2.0-flash",
"gemini-2.0-flash-lite",
], ],
"xai": [ "xai": [
# Grok 4.1 series # Grok 4.1 series
"grok-4-1-fast",
"grok-4-1-fast-reasoning", "grok-4-1-fast-reasoning",
"grok-4-1-fast-non-reasoning", "grok-4-1-fast-non-reasoning",
# Grok 4 series # Grok 4 series
"grok-4",
"grok-4-0709", "grok-4-0709",
"grok-4-fast-reasoning", "grok-4-fast-reasoning",
"grok-4-fast-non-reasoning", "grok-4-fast-non-reasoning",