Merge branch 'main' into roadmap/realistic-timeline-march-2026

This commit is contained in:
Manav Chaudhary 2026-03-16 11:07:32 -04:00 committed by GitHub
commit a2936b6dfe
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
16 changed files with 124 additions and 93 deletions

View File

@ -5,6 +5,10 @@
## News
- [2026-03] **TradingAgents v0.2.1** released with GPT-5.4, Gemini 3.1, Claude 4.6 model coverage and improved system stability.
- [2026-02] **TradingAgents v0.2.0** released with multi-provider LLM support (GPT-5.x, Gemini 3.x, Claude 4.x, Grok 4.x) and improved system architecture.
- [2026-01] **Trading-R1** [Technical Report](https://arxiv.org/abs/2509.11420) released, with [Terminal](https://github.com/TauricResearch/Trading-R1) expected to land soon.

View File

@ -462,7 +462,7 @@ def update_display(layout, spinner_text=None, stats_handler=None, start_time=Non
def get_user_selections(): def get_user_selections():
"""Get all user selections before starting the analysis display.""" """Get all user selections before starting the analysis display."""
# Display ASCII art welcome message # Display ASCII art welcome message
with open("./cli/static/welcome.txt", "r") as f: with open("./cli/static/welcome.txt", "r", encoding="utf-8") as f:
welcome_ascii = f.read() welcome_ascii = f.read()
# Create welcome box content # Create welcome box content
@ -948,7 +948,7 @@ def run_analysis():
func(*args, **kwargs) func(*args, **kwargs)
timestamp, message_type, content = obj.messages[-1] timestamp, message_type, content = obj.messages[-1]
content = content.replace("\n", " ") # Replace newlines with spaces content = content.replace("\n", " ") # Replace newlines with spaces
with open(log_file, "a") as f: with open(log_file, "a", encoding="utf-8") as f:
f.write(f"{timestamp} [{message_type}] {content}\n") f.write(f"{timestamp} [{message_type}] {content}\n")
return wrapper return wrapper
@ -959,7 +959,7 @@ def run_analysis():
func(*args, **kwargs) func(*args, **kwargs)
timestamp, tool_name, args = obj.tool_calls[-1] timestamp, tool_name, args = obj.tool_calls[-1]
args_str = ", ".join(f"{k}={v}" for k, v in args.items()) args_str = ", ".join(f"{k}={v}" for k, v in args.items())
with open(log_file, "a") as f: with open(log_file, "a", encoding="utf-8") as f:
f.write(f"{timestamp} [Tool Call] {tool_name}({args_str})\n") f.write(f"{timestamp} [Tool Call] {tool_name}({args_str})\n")
return wrapper return wrapper
@ -972,7 +972,7 @@ def run_analysis():
content = obj.report_sections[section_name] content = obj.report_sections[section_name]
if content: if content:
file_name = f"{section_name}.md" file_name = f"{section_name}.md"
with open(report_dir / file_name, "w") as f: with open(report_dir / file_name, "w", encoding="utf-8") as f:
f.write(content) f.write(content)
return wrapper return wrapper

View File

@ -1,8 +1,12 @@
import questionary import questionary
from typing import List, Optional, Tuple, Dict from typing import List, Optional, Tuple, Dict
from rich.console import Console
from cli.models import AnalystType from cli.models import AnalystType
console = Console()
ANALYST_ORDER = [ ANALYST_ORDER = [
("Market Analyst", AnalystType.MARKET), ("Market Analyst", AnalystType.MARKET),
("Social Media Analyst", AnalystType.SOCIAL), ("Social Media Analyst", AnalystType.SOCIAL),
@ -126,30 +130,30 @@ def select_shallow_thinking_agent(provider) -> str:
"""Select shallow thinking llm engine using an interactive selection.""" """Select shallow thinking llm engine using an interactive selection."""
# Define shallow thinking llm engine options with their corresponding model names # Define shallow thinking llm engine options with their corresponding model names
# Ordering: medium → light → heavy (balanced first for quick tasks)
# Within same tier, newer models first
SHALLOW_AGENT_OPTIONS = { SHALLOW_AGENT_OPTIONS = {
"openai": [ "openai": [
("GPT-5 Mini - Cost-optimized reasoning", "gpt-5-mini"), ("GPT-5 Mini - Balanced speed, cost, and capability", "gpt-5-mini"),
("GPT-5 Nano - Ultra-fast, high-throughput", "gpt-5-nano"), ("GPT-5 Nano - High-throughput, simple tasks", "gpt-5-nano"),
("GPT-5.2 - Latest flagship", "gpt-5.2"), ("GPT-5.4 - Latest frontier, 1M context", "gpt-5.4"),
("GPT-5.1 - Flexible reasoning", "gpt-5.1"), ("GPT-4.1 - Smartest non-reasoning model", "gpt-4.1"),
("GPT-4.1 - Smartest non-reasoning, 1M context", "gpt-4.1"),
], ],
"anthropic": [ "anthropic": [
("Claude Haiku 4.5 - Fast + extended thinking", "claude-haiku-4-5"), ("Claude Sonnet 4.6 - Best speed and intelligence balance", "claude-sonnet-4-6"),
("Claude Sonnet 4.5 - Best for agents/coding", "claude-sonnet-4-5"), ("Claude Haiku 4.5 - Fast, near-instant responses", "claude-haiku-4-5"),
("Claude Sonnet 4 - High-performance", "claude-sonnet-4-20250514"), ("Claude Sonnet 4.5 - Agents and coding", "claude-sonnet-4-5"),
], ],
"google": [ "google": [
("Gemini 3 Flash - Next-gen fast", "gemini-3-flash-preview"), ("Gemini 3 Flash - Next-gen fast", "gemini-3-flash-preview"),
("Gemini 2.5 Flash - Balanced, recommended", "gemini-2.5-flash"), ("Gemini 2.5 Flash - Balanced, stable", "gemini-2.5-flash"),
("Gemini 3 Pro - Reasoning-first", "gemini-3-pro-preview"), ("Gemini 3.1 Flash Lite - Most cost-efficient", "gemini-3.1-flash-lite-preview"),
("Gemini 2.5 Flash Lite - Fast, low-cost", "gemini-2.5-flash-lite"), ("Gemini 2.5 Flash Lite - Fast, low-cost", "gemini-2.5-flash-lite"),
], ],
"xai": [ "xai": [
("Grok 4.1 Fast (Non-Reasoning) - Speed optimized, 2M ctx", "grok-4-1-fast-non-reasoning"), ("Grok 4.1 Fast (Non-Reasoning) - Speed optimized, 2M ctx", "grok-4-1-fast-non-reasoning"),
("Grok 4 Fast (Non-Reasoning) - Speed optimized", "grok-4-fast-non-reasoning"), ("Grok 4 Fast (Non-Reasoning) - Speed optimized", "grok-4-fast-non-reasoning"),
("Grok 4.1 Fast (Reasoning) - High-performance, 2M ctx", "grok-4-1-fast-reasoning"), ("Grok 4.1 Fast (Reasoning) - High-performance, 2M ctx", "grok-4-1-fast-reasoning"),
("Grok 4 Fast (Reasoning) - High-performance", "grok-4-fast-reasoning"),
], ],
"openrouter": [ "openrouter": [
("NVIDIA Nemotron 3 Nano 30B (free)", "nvidia/nemotron-3-nano-30b-a3b:free"), ("NVIDIA Nemotron 3 Nano 30B (free)", "nvidia/nemotron-3-nano-30b-a3b:free"),
@ -191,33 +195,32 @@ def select_deep_thinking_agent(provider) -> str:
"""Select deep thinking llm engine using an interactive selection.""" """Select deep thinking llm engine using an interactive selection."""
# Define deep thinking llm engine options with their corresponding model names # Define deep thinking llm engine options with their corresponding model names
# Ordering: heavy → medium → light (most capable first for deep tasks)
# Within same tier, newer models first
DEEP_AGENT_OPTIONS = { DEEP_AGENT_OPTIONS = {
"openai": [ "openai": [
("GPT-5.2 - Latest flagship", "gpt-5.2"), ("GPT-5.4 - Latest frontier, 1M context", "gpt-5.4"),
("GPT-5.1 - Flexible reasoning", "gpt-5.1"), ("GPT-5.2 - Strong reasoning, cost-effective", "gpt-5.2"),
("GPT-5 - Advanced reasoning", "gpt-5"), ("GPT-5 Mini - Balanced speed, cost, and capability", "gpt-5-mini"),
("GPT-4.1 - Smartest non-reasoning, 1M context", "gpt-4.1"), ("GPT-5.4 Pro - Most capable, expensive ($30/$180 per 1M tokens)", "gpt-5.4-pro"),
("GPT-5 Mini - Cost-optimized reasoning", "gpt-5-mini"),
("GPT-5 Nano - Ultra-fast, high-throughput", "gpt-5-nano"),
], ],
"anthropic": [ "anthropic": [
("Claude Sonnet 4.5 - Best for agents/coding", "claude-sonnet-4-5"), ("Claude Opus 4.6 - Most intelligent, agents and coding", "claude-opus-4-6"),
("Claude Opus 4.5 - Premium, max intelligence", "claude-opus-4-5"), ("Claude Opus 4.5 - Premium, max intelligence", "claude-opus-4-5"),
("Claude Opus 4.1 - Most capable model", "claude-opus-4-1-20250805"), ("Claude Sonnet 4.6 - Best speed and intelligence balance", "claude-sonnet-4-6"),
("Claude Haiku 4.5 - Fast + extended thinking", "claude-haiku-4-5"), ("Claude Sonnet 4.5 - Agents and coding", "claude-sonnet-4-5"),
("Claude Sonnet 4 - High-performance", "claude-sonnet-4-20250514"),
], ],
"google": [ "google": [
("Gemini 3 Pro - Reasoning-first", "gemini-3-pro-preview"), ("Gemini 3.1 Pro - Reasoning-first, complex workflows", "gemini-3.1-pro-preview"),
("Gemini 3 Flash - Next-gen fast", "gemini-3-flash-preview"), ("Gemini 3 Flash - Next-gen fast", "gemini-3-flash-preview"),
("Gemini 2.5 Flash - Balanced, recommended", "gemini-2.5-flash"), ("Gemini 2.5 Pro - Stable pro model", "gemini-2.5-pro"),
("Gemini 2.5 Flash - Balanced, stable", "gemini-2.5-flash"),
], ],
"xai": [ "xai": [
("Grok 4 - Flagship model", "grok-4-0709"),
("Grok 4.1 Fast (Reasoning) - High-performance, 2M ctx", "grok-4-1-fast-reasoning"), ("Grok 4.1 Fast (Reasoning) - High-performance, 2M ctx", "grok-4-1-fast-reasoning"),
("Grok 4 Fast (Reasoning) - High-performance", "grok-4-fast-reasoning"), ("Grok 4 Fast (Reasoning) - High-performance", "grok-4-fast-reasoning"),
("Grok 4 - Flagship model", "grok-4-0709"),
("Grok 4.1 Fast (Non-Reasoning) - Speed optimized, 2M ctx", "grok-4-1-fast-non-reasoning"), ("Grok 4.1 Fast (Non-Reasoning) - Speed optimized, 2M ctx", "grok-4-1-fast-non-reasoning"),
("Grok 4 Fast (Non-Reasoning) - Speed optimized", "grok-4-fast-non-reasoning"),
], ],
"openrouter": [ "openrouter": [
("Z.AI GLM 4.5 Air (free)", "z-ai/glm-4.5-air:free"), ("Z.AI GLM 4.5 Air (free)", "z-ai/glm-4.5-air:free"),

View File

@ -4,14 +4,13 @@ build-backend = "setuptools.build_meta"
[project] [project]
name = "tradingagents" name = "tradingagents"
version = "0.2.0" version = "0.2.1"
description = "TradingAgents: Multi-Agents LLM Financial Trading Framework" description = "TradingAgents: Multi-Agents LLM Financial Trading Framework"
readme = "README.md" readme = "README.md"
requires-python = ">=3.10" requires-python = ">=3.10"
dependencies = [ dependencies = [
"langchain-core>=0.3.81", "langchain-core>=0.3.81",
"backtrader>=1.9.78.123", "backtrader>=1.9.78.123",
"chainlit>=2.5.5",
"langchain-anthropic>=0.3.15", "langchain-anthropic>=0.3.15",
"langchain-experimental>=0.3.4", "langchain-experimental>=0.3.4",
"langchain-google-genai>=2.1.5", "langchain-google-genai>=2.1.5",

View File

@ -14,7 +14,6 @@ requests
tqdm tqdm
pytz pytz
redis redis
chainlit
rich rich
typer typer
questionary questionary

View File

View File

@ -10,14 +10,22 @@ def get_indicators(
look_back_days: Annotated[int, "how many days to look back"] = 30, look_back_days: Annotated[int, "how many days to look back"] = 30,
) -> str: ) -> str:
""" """
Retrieve technical indicators for a given ticker symbol. Retrieve a single technical indicator for a given ticker symbol.
Uses the configured technical_indicators vendor. Uses the configured technical_indicators vendor.
Args: Args:
symbol (str): Ticker symbol of the company, e.g. AAPL, TSM symbol (str): Ticker symbol of the company, e.g. AAPL, TSM
indicator (str): Technical indicator to get the analysis and report of indicator (str): A single technical indicator name, e.g. 'rsi', 'macd'. Call this tool once per indicator.
curr_date (str): The current trading date you are trading on, YYYY-mm-dd curr_date (str): The current trading date you are trading on, YYYY-mm-dd
look_back_days (int): How many days to look back, default is 30 look_back_days (int): How many days to look back, default is 30
Returns: Returns:
str: A formatted dataframe containing the technical indicators for the specified ticker symbol and indicator. str: A formatted dataframe containing the technical indicators for the specified ticker symbol and indicator.
""" """
return route_to_vendor("get_indicators", symbol, indicator, curr_date, look_back_days) # LLMs sometimes pass multiple indicators as a comma-separated string;
# split and process each individually.
indicators = [i.strip() for i in indicator.split(",") if i.strip()]
if len(indicators) > 1:
results = []
for ind in indicators:
results.append(route_to_vendor("get_indicators", symbol, ind, curr_date, look_back_days))
return "\n\n".join(results)
return route_to_vendor("get_indicators", symbol, indicator.strip(), curr_date, look_back_days)

View File

@ -6,6 +6,19 @@ import os
from .config import get_config from .config import get_config
def _clean_dataframe(data: pd.DataFrame) -> pd.DataFrame:
"""Normalize a stock DataFrame for stockstats: parse dates, drop invalid rows, fill price gaps."""
data["Date"] = pd.to_datetime(data["Date"], errors="coerce")
data = data.dropna(subset=["Date"])
price_cols = [c for c in ["Open", "High", "Low", "Close", "Volume"] if c in data.columns]
data[price_cols] = data[price_cols].apply(pd.to_numeric, errors="coerce")
data = data.dropna(subset=["Close"])
data[price_cols] = data[price_cols].ffill().bfill()
return data
class StockstatsUtils: class StockstatsUtils:
@staticmethod @staticmethod
def get_stock_stats( def get_stock_stats(
@ -36,8 +49,7 @@ class StockstatsUtils:
) )
if os.path.exists(data_file): if os.path.exists(data_file):
data = pd.read_csv(data_file) data = pd.read_csv(data_file, on_bad_lines="skip")
data["Date"] = pd.to_datetime(data["Date"])
else: else:
data = yf.download( data = yf.download(
symbol, symbol,
@ -50,6 +62,7 @@ class StockstatsUtils:
data = data.reset_index() data = data.reset_index()
data.to_csv(data_file, index=False) data.to_csv(data_file, index=False)
data = _clean_dataframe(data)
df = wrap(data) df = wrap(data)
df["Date"] = df["Date"].dt.strftime("%Y-%m-%d") df["Date"] = df["Date"].dt.strftime("%Y-%m-%d")
curr_date_str = curr_date_dt.strftime("%Y-%m-%d") curr_date_str = curr_date_dt.strftime("%Y-%m-%d")

View File

@ -3,7 +3,7 @@ from datetime import datetime
from dateutil.relativedelta import relativedelta from dateutil.relativedelta import relativedelta
import yfinance as yf import yfinance as yf
import os import os
from .stockstats_utils import StockstatsUtils from .stockstats_utils import StockstatsUtils, _clean_dataframe
def get_YFin_data_online( def get_YFin_data_online(
symbol: Annotated[str, "ticker symbol of the company"], symbol: Annotated[str, "ticker symbol of the company"],
@ -209,9 +209,9 @@ def _get_stock_stats_bulk(
os.path.join( os.path.join(
config.get("data_cache_dir", "data"), config.get("data_cache_dir", "data"),
f"{symbol}-YFin-data-2015-01-01-2025-03-25.csv", f"{symbol}-YFin-data-2015-01-01-2025-03-25.csv",
),
on_bad_lines="skip",
) )
)
df = wrap(data)
except FileNotFoundError: except FileNotFoundError:
raise Exception("Stockstats fail: Yahoo Finance data not fetched yet!") raise Exception("Stockstats fail: Yahoo Finance data not fetched yet!")
else: else:
@ -232,8 +232,7 @@ def _get_stock_stats_bulk(
) )
if os.path.exists(data_file): if os.path.exists(data_file):
data = pd.read_csv(data_file) data = pd.read_csv(data_file, on_bad_lines="skip")
data["Date"] = pd.to_datetime(data["Date"])
else: else:
data = yf.download( data = yf.download(
symbol, symbol,
@ -246,6 +245,7 @@ def _get_stock_stats_bulk(
data = data.reset_index() data = data.reset_index()
data.to_csv(data_file, index=False) data.to_csv(data_file, index=False)
data = _clean_dataframe(data)
df = wrap(data) df = wrap(data)
df["Date"] = df["Date"].dt.strftime("%Y-%m-%d") df["Date"] = df["Date"].dt.strftime("%Y-%m-%d")

View File

@ -24,14 +24,26 @@ class Propagator:
"company_of_interest": company_name, "company_of_interest": company_name,
"trade_date": str(trade_date), "trade_date": str(trade_date),
"investment_debate_state": InvestDebateState( "investment_debate_state": InvestDebateState(
{"history": "", "current_response": "", "count": 0} {
"bull_history": "",
"bear_history": "",
"history": "",
"current_response": "",
"judge_decision": "",
"count": 0,
}
), ),
"risk_debate_state": RiskDebateState( "risk_debate_state": RiskDebateState(
{ {
"aggressive_history": "",
"conservative_history": "",
"neutral_history": "",
"history": "", "history": "",
"latest_speaker": "",
"current_aggressive_response": "", "current_aggressive_response": "",
"current_conservative_response": "", "current_conservative_response": "",
"current_neutral_response": "", "current_neutral_response": "",
"judge_decision": "",
"count": 0, "count": 0,
} }
), ),

View File

@ -105,7 +105,10 @@ class TradingAgentsGraph:
self.tool_nodes = self._create_tool_nodes() self.tool_nodes = self._create_tool_nodes()
# Initialize components # Initialize components
self.conditional_logic = ConditionalLogic() self.conditional_logic = ConditionalLogic(
max_debate_rounds=self.config["max_debate_rounds"],
max_risk_discuss_rounds=self.config["max_risk_discuss_rounds"],
)
self.graph_setup = GraphSetup( self.graph_setup = GraphSetup(
self.quick_thinking_llm, self.quick_thinking_llm,
self.deep_thinking_llm, self.deep_thinking_llm,
@ -257,6 +260,7 @@ class TradingAgentsGraph:
with open( with open(
f"eval_results/{self.ticker}/TradingAgentsStrategy_logs/full_states_log_{trade_date}.json", f"eval_results/{self.ticker}/TradingAgentsStrategy_logs/full_states_log_{trade_date}.json",
"w", "w",
encoding="utf-8",
) as f: ) as f:
json.dump(self.log_states_dict, f, indent=4) json.dump(self.log_states_dict, f, indent=4)

View File

@ -16,7 +16,7 @@ class AnthropicClient(BaseLLMClient):
"""Return configured ChatAnthropic instance.""" """Return configured ChatAnthropic instance."""
llm_kwargs = {"model": self.model} llm_kwargs = {"model": self.model}
for key in ("timeout", "max_retries", "api_key", "max_tokens", "callbacks"): for key in ("timeout", "max_retries", "api_key", "max_tokens", "callbacks", "http_client", "http_async_client"):
if key in self.kwargs: if key in self.kwargs:
llm_kwargs[key] = self.kwargs[key] llm_kwargs[key] = self.kwargs[key]

View File

@ -19,6 +19,12 @@ def create_llm_client(
model: Model name/identifier model: Model name/identifier
base_url: Optional base URL for API endpoint base_url: Optional base URL for API endpoint
**kwargs: Additional provider-specific arguments **kwargs: Additional provider-specific arguments
- http_client: Custom httpx.Client for SSL proxy or certificate customization
- http_async_client: Custom httpx.AsyncClient for async operations
- timeout: Request timeout in seconds
- max_retries: Maximum retry attempts
- api_key: API key for the provider
- callbacks: LangChain callbacks
Returns: Returns:
Configured BaseLLMClient instance Configured BaseLLMClient instance

View File

@ -38,7 +38,7 @@ class GoogleClient(BaseLLMClient):
"""Return configured ChatGoogleGenerativeAI instance.""" """Return configured ChatGoogleGenerativeAI instance."""
llm_kwargs = {"model": self.model} llm_kwargs = {"model": self.model}
for key in ("timeout", "max_retries", "google_api_key", "callbacks"): for key in ("timeout", "max_retries", "google_api_key", "callbacks", "http_client", "http_async_client"):
if key in self.kwargs: if key in self.kwargs:
llm_kwargs[key] = self.kwargs[key] llm_kwargs[key] = self.kwargs[key]

View File

@ -8,25 +8,23 @@ from .validators import validate_model
class UnifiedChatOpenAI(ChatOpenAI): class UnifiedChatOpenAI(ChatOpenAI):
"""ChatOpenAI subclass that strips incompatible params for certain models.""" """ChatOpenAI subclass that strips temperature/top_p for GPT-5 family models.
GPT-5 family models use reasoning natively. temperature/top_p are only
accepted when reasoning.effort is 'none'; with any other effort level
(or for older GPT-5/GPT-5-mini/GPT-5-nano which always reason) the API
rejects these params. Langchain defaults temperature=0.7, so we must
strip it to avoid errors.
Non-GPT-5 models (GPT-4.1, xAI, Ollama, etc.) are unaffected.
"""
def __init__(self, **kwargs): def __init__(self, **kwargs):
model = kwargs.get("model", "") if "gpt-5" in kwargs.get("model", "").lower():
if self._is_reasoning_model(model):
kwargs.pop("temperature", None) kwargs.pop("temperature", None)
kwargs.pop("top_p", None) kwargs.pop("top_p", None)
super().__init__(**kwargs) super().__init__(**kwargs)
@staticmethod
def _is_reasoning_model(model: str) -> bool:
"""Check if model is a reasoning model that doesn't support temperature."""
model_lower = model.lower()
return (
model_lower.startswith("o1")
or model_lower.startswith("o3")
or "gpt-5" in model_lower
)
class OpenAIClient(BaseLLMClient): class OpenAIClient(BaseLLMClient):
"""Client for OpenAI, Ollama, OpenRouter, and xAI providers.""" """Client for OpenAI, Ollama, OpenRouter, and xAI providers."""
@ -61,7 +59,7 @@ class OpenAIClient(BaseLLMClient):
elif self.base_url: elif self.base_url:
llm_kwargs["base_url"] = self.base_url llm_kwargs["base_url"] = self.base_url
for key in ("timeout", "max_retries", "reasoning_effort", "api_key", "callbacks"): for key in ("timeout", "max_retries", "reasoning_effort", "api_key", "callbacks", "http_client", "http_async_client"):
if key in self.kwargs: if key in self.kwargs:
llm_kwargs[key] = self.kwargs[key] llm_kwargs[key] = self.kwargs[key]

View File

@ -6,59 +6,44 @@ Let LLM providers use their own defaults for unspecified params.
VALID_MODELS = { VALID_MODELS = {
"openai": [ "openai": [
# GPT-5 series (2025) # GPT-5 series
"gpt-5.4-pro",
"gpt-5.4",
"gpt-5.2", "gpt-5.2",
"gpt-5.1", "gpt-5.1",
"gpt-5", "gpt-5",
"gpt-5-mini", "gpt-5-mini",
"gpt-5-nano", "gpt-5-nano",
# GPT-4.1 series (2025) # GPT-4.1 series
"gpt-4.1", "gpt-4.1",
"gpt-4.1-mini", "gpt-4.1-mini",
"gpt-4.1-nano", "gpt-4.1-nano",
# o-series reasoning models
"o4-mini",
"o3",
"o3-mini",
"o1",
"o1-preview",
# GPT-4o series (legacy but still supported)
"gpt-4o",
"gpt-4o-mini",
], ],
"anthropic": [ "anthropic": [
# Claude 4.5 series (2025) # Claude 4.6 series (latest)
"claude-opus-4-6",
"claude-sonnet-4-6",
# Claude 4.5 series
"claude-opus-4-5", "claude-opus-4-5",
"claude-sonnet-4-5", "claude-sonnet-4-5",
"claude-haiku-4-5", "claude-haiku-4-5",
# Claude 4.x series
"claude-opus-4-1-20250805",
"claude-sonnet-4-20250514",
# Claude 3.7 series
"claude-3-7-sonnet-20250219",
# Claude 3.5 series (legacy)
"claude-3-5-haiku-20241022",
"claude-3-5-sonnet-20241022",
], ],
"google": [ "google": [
# Gemini 3.1 series (preview)
"gemini-3.1-pro-preview",
"gemini-3.1-flash-lite-preview",
# Gemini 3 series (preview) # Gemini 3 series (preview)
"gemini-3-pro-preview",
"gemini-3-flash-preview", "gemini-3-flash-preview",
# Gemini 2.5 series # Gemini 2.5 series
"gemini-2.5-pro", "gemini-2.5-pro",
"gemini-2.5-flash", "gemini-2.5-flash",
"gemini-2.5-flash-lite", "gemini-2.5-flash-lite",
# Gemini 2.0 series
"gemini-2.0-flash",
"gemini-2.0-flash-lite",
], ],
"xai": [ "xai": [
# Grok 4.1 series # Grok 4.1 series
"grok-4-1-fast",
"grok-4-1-fast-reasoning", "grok-4-1-fast-reasoning",
"grok-4-1-fast-non-reasoning", "grok-4-1-fast-non-reasoning",
# Grok 4 series # Grok 4 series
"grok-4",
"grok-4-0709", "grok-4-0709",
"grok-4-fast-reasoning", "grok-4-fast-reasoning",
"grok-4-fast-non-reasoning", "grok-4-fast-non-reasoning",