diff --git a/.env.example b/.env.example
index dfb6bdc5..8f47c087 100644
--- a/.env.example
+++ b/.env.example
@@ -5,7 +5,9 @@ OPENAI_API_KEY=openai_api_key_placeholder
# Optional API Keys (for alternative LLM providers)
ANTHROPIC_API_KEY=anthropic_api_key_placeholder
GOOGLE_API_KEY=GOOGLE_API_KEY_placeholder
-OPENROUTER_API_KEY=openrouter_api_key_placeholder
+XAI_API_KEY=XAI_API_KEY_placeholder
+DEEPSEEK_API_KEY=deepseek_api_key_placeholder
+DASHSCOPE_API_KEY=dashscope_api_key_placeholder
# Deployment Configuration
TRADINGAGENTS_RESULTS_DIR=/app/results
diff --git a/README.md b/README.md
index 69aa747e..56adfcf5 100644
--- a/README.md
+++ b/README.md
@@ -207,7 +207,7 @@ ALPHA_VANTAGE_API_KEY=your-alpha-vantage-key
# 其他 LLM 提供商 (可選)
ANTHROPIC_API_KEY=your-claude-api-key
GOOGLE_API_KEY=your-gemini-api-key
-OPENROUTER_API_KEY=your-openrouter-api-key
+GOOGLE_API_KEY=your-gemini-api-key
# ============ 後端服務配置 ============
BACKEND_HOST=0.0.0.0
@@ -341,21 +341,19 @@ docker compose down -v
**快速思維模型** (用於快速分析和即時回應):
- `gpt-5.1-2025-11-13` - GPT-5.1 (最新)
- - `gpt-5-mini-2025-08-07` - GPT-5 Mini
+ - `gpt-5-mini-2025-08-07` - GPT-5 Mini (預設)
- `gpt-5-nano-2025-08-07` - GPT-5 Nano
- `gpt-4.1-mini` - GPT-4.1 Mini
- `gpt-4.1-nano` - GPT-4.1 Nano
- - `gpt-4o` - GPT-4o (推薦預設)
- - `gpt-4o-mini` - GPT-4o Mini (預設)
+ - `o4-mini-2025-04-16` - o4-mini
**深層思維模型** (用於複雜推理和深度分析):
- `gpt-5.1-2025-11-13` - GPT-5.1 (最新)
- - `gpt-5-mini-2025-08-07` - GPT-5 Mini
+ - `gpt-5-mini-2025-08-07` - GPT-5 Mini (預設)
- `gpt-5-nano-2025-08-07` - GPT-5 Nano
- `gpt-4.1-mini` - GPT-4.1 Mini
- `gpt-4.1-nano` - GPT-4.1 Nano
- - `gpt-4o` - GPT-4o (推薦預設)
- - `gpt-4o-mini` - GPT-4o Mini
+ - `o4-mini-2025-04-16` - o4-mini
> 💡 **提示**: 快速思維模型用於初步分析和資料收集,深層思維模型用於複雜決策和策略制定。您可以根據需求選擇不同的模型組合。
@@ -410,7 +408,7 @@ curl -X POST http://localhost:8000/api/analyze \
"ticker": "NVDA",
"analysis_date": "2024-01-15",
"research_depth": "medium",
- "model": "gpt-4o",
+ "model": "gpt-5-mini-2025-08-07",
"selected_analysts": ["market", "sentiment", "news", "fundamental"],
"api_key": "sk-your-openai-key"
}'
@@ -502,7 +500,7 @@ TradingAgents 模擬真實交易公司的組織架構,每個代理都有其專
- 支援 OpenAI (GPT-4, GPT-4o, o1 系列)
- 支援 Anthropic Claude
- 支援 Google Gemini
-- 可透過 OpenRouter 存取更多模型
+- 支援 Google Gemini
#### 3. 長期記憶系統
- 使用 ChromaDB 向量資料庫儲存歷史決策
diff --git a/backend/app/core/config.py b/backend/app/core/config.py
index 4a5872c4..6eb795be 100644
--- a/backend/app/core/config.py
+++ b/backend/app/core/config.py
@@ -33,8 +33,8 @@ class Settings(BaseSettings):
results_dir: str = "./results"
max_debate_rounds: int = 1
max_risk_discuss_rounds: int = 1
- deep_think_llm: str = "gpt-4o-mini"
- quick_think_llm: str = "gpt-4o-mini"
+ deep_think_llm: str = "gpt-5-mini-2025-08-07"
+ quick_think_llm: str = "gpt-5-mini-2025-08-07"
class Config:
env_file = ".env"
diff --git a/backend/app/models/schemas.py b/backend/app/models/schemas.py
index 76293a25..f7249d06 100644
--- a/backend/app/models/schemas.py
+++ b/backend/app/models/schemas.py
@@ -15,8 +15,8 @@ class AnalysisRequest(BaseModel):
description="List of analysts to include in analysis"
)
research_depth: Optional[int] = Field(default=1, ge=1, le=5, description="Research depth (1-5)")
- deep_think_llm: Optional[str] = Field(default="gpt-4o-mini", description="Deep thinking LLM model")
- quick_think_llm: Optional[str] = Field(default="gpt-4o-mini", description="Quick thinking LLM model")
+ deep_think_llm: Optional[str] = Field(default="gpt-5-mini-2025-08-07", description="Deep thinking LLM model")
+ quick_think_llm: Optional[str] = Field(default="gpt-5-mini-2025-08-07", description="Quick thinking LLM model")
# API Configuration
openai_api_key: Optional[str] = Field(None, description="OpenAI API Key (optional if set on server)", min_length=0)
diff --git a/backend/app/services/trading_service.py b/backend/app/services/trading_service.py
index a3ab9144..1d157a59 100644
--- a/backend/app/services/trading_service.py
+++ b/backend/app/services/trading_service.py
@@ -26,8 +26,8 @@ class TradingService:
def create_config(
self,
research_depth: int = 1,
- deep_think_llm: str = "gpt-4o-mini",
- quick_think_llm: str = "gpt-4o-mini",
+ deep_think_llm: str = "gpt-5-mini-2025-08-07",
+ quick_think_llm: str = "gpt-5-mini-2025-08-07",
) -> Dict[str, Any]:
"""Create configuration for TradingAgents"""
config = self.default_config.copy()
@@ -47,8 +47,8 @@ class TradingService:
alpha_vantage_api_key: Optional[str] = None,
analysts: Optional[List[str]] = None,
research_depth: int = 1,
- deep_think_llm: str = "gpt-4o-mini",
- quick_think_llm: str = "gpt-4o-mini",
+ deep_think_llm: str = "gpt-5-mini-2025-08-07",
+ quick_think_llm: str = "gpt-5-mini-2025-08-07",
) -> Dict[str, Any]:
"""
Run trading analysis for a given ticker and date with user-provided API keys
@@ -170,21 +170,45 @@ class TradingService:
def get_available_llms(self) -> List[str]:
"""Get list of available OpenAI LLM models"""
return [
+ # OpenAI
"gpt-5.1-2025-11-13",
"gpt-5-mini-2025-08-07",
"gpt-5-nano-2025-08-07",
"gpt-4.1-mini",
"gpt-4.1-nano",
- "gpt-4o",
- "gpt-4o-mini",
+ "o4-mini-2025-04-16",
+ # Anthropic
+ "claude-haiku-4-5-20251001",
+ "claude-sonnet-4-5-20250929",
+ "claude-sonnet-4-0",
+ "claude-3-5-haiku-20241022",
+ "claude-3-haiku-20240307",
+ # Google
+ "gemini-2.0-flash-lite",
+ "gemini-2.0-flash",
+ "gemini-2.5-flash-lite",
+ # Grok
+ "grok-4-1-fast-reasoning",
+ "grok-4-1-fast-non-reasoning",
+ "grok-4-fast-reasoning",
+ "grok-4-fast-non-reasoning",
+ "grok-4-0709",
+ "grok-3",
+ "grok-3-mini",
+ # DeepSeek
+ "deepseek-reasoner",
+ "deepseek-chat",
+ # Qwen
+ "qwen3-max",
+ "qwen-plus",
]
def get_default_config(self) -> Dict[str, Any]:
"""Get default configuration"""
return {
"research_depth": 1,
- "deep_think_llm": "gpt-4o-mini",
- "quick_think_llm": "gpt-4o-mini",
+ "deep_think_llm": "gpt-5-mini-2025-08-07",
+ "quick_think_llm": "gpt-5-mini-2025-08-07",
"max_debate_rounds": 1,
"max_risk_discuss_rounds": 1,
}
diff --git a/cli/utils.py b/cli/utils.py
index 1df018fc..02ad07ce 100644
--- a/cli/utils.py
+++ b/cli/utils.py
@@ -187,28 +187,36 @@ def select_shallow_thinking_agent(provider) -> str:
("GPT-5-nano","gpt-5-nano-2025-08-07"),
("GPT-4.1-mini", "gpt-4.1-mini"),
("GPT-4.1-nano", "gpt-4.1-nano"),
- ("GPT-4o", "gpt-4o"),
- ("GPT-4o-mini", "gpt-4o-mini")
+ ("o4-mini", "o4-mini-2025-04-16")
],
"anthropic": [
- ("Claude Haiku 3.5", "claude-3-5-haiku-latest"),
- ("Claude Sonnet 3.5", "claude-3-5-sonnet-latest"),
- ("Claude Sonnet 3.7", "claude-3-7-sonnet-latest"),
+ ("Claude Haiku 4.5", "claude-haiku-4-5-20251001"),
+ ("Claude Sonnet 4.5", "claude-sonnet-4-5-20250929"),
("Claude Sonnet 4", "claude-sonnet-4-0"),
+ ("Claude Haiku 3.5", "claude-3-5-haiku-20241022"),
+ ("Claude Haiku 3", "claude-3-haiku-20240307")
],
"google": [
("Gemini 2.0 Flash-Lite", "gemini-2.0-flash-lite"),
("Gemini 2.0 Flash", "gemini-2.0-flash"),
- ("Gemini 2.5 Flash", "gemini-2.5-flash-preview-05-20"),
+ ("Gemini 2.5 Flash Lite", "gemini-2.5-flash-lite")
],
- "openrouter": [
- ("Meta: Llama 4 Scout", "meta-llama/llama-4-scout:free"),
- ("Meta: Llama 3.3 8B Instruct - Llama 3.3 70B", "meta-llama/llama-3.3-8b-instruct:free"),
- ("google/gemini-2.0-flash-exp:free - Gemini Flash 2.0", "google/gemini-2.0-flash-exp:free"),
+ "Grok":[
+ ("Grok 4.1 Fast Reasoning","grok-4-1-fast-reasoning"),
+ ("Grok 4.1 Fast Non Reasoning","grok-4-1-fast-non-reasoning"),
+ ("Grok 4 Fast Reasoning","grok-4-fast-reasoning"),
+ ("Grok 4 Fast Non Reasoning","grok-4-fast-non-reasoning"),
+ ("Grok 4","grok-4-0709"),
+ ("Grok 3","grok-3"),
+ ("Grok 3 Mini","grok-3-mini")
],
- "ollama": [
- ("llama3.1 本機版", "llama3.1"),
- ("llama3.2 本機版", "llama3.2"),
+ "DeepSeek": [
+ ("DeepSeek Reasoner","deepseek-reasoner"),
+ ("DeepSeek Chat","deepseek-chat")
+ ],
+ "Qwen":[
+ ("Qwen 3.5 Max", "qwen3-max"),
+ ("Qwen Plus", "qwen-plus")
]
}
@@ -261,29 +269,36 @@ def select_deep_thinking_agent(provider) -> str:
("GPT-5-nano","gpt-5-nano-2025-08-07"),
("GPT-4.1-mini", "gpt-4.1-mini"),
("GPT-4.1-nano", "gpt-4.1-nano"),
- ("GPT-4o", "gpt-4o"),
- ("GPT-4o-mini", "gpt-4o-mini")
+ ("o4-mini", "o4-mini-2025-04-16")
],
"anthropic": [
- ("Claude Haiku 3.5", "claude-3-5-haiku-latest"),
- ("Claude Sonnet 3.5", "claude-3-5-sonnet-latest"),
- ("Claude Sonnet 3.7", "claude-3-7-sonnet-latest"),
+ ("Claude Haiku 4.5", "claude-haiku-4-5-20251001"),
+ ("Claude Sonnet 4.5", "claude-sonnet-4-5-20250929"),
("Claude Sonnet 4", "claude-sonnet-4-0"),
- ("Claude Opus 4", "claude-opus-4-0"),
+ ("Claude Haiku 3.5", "claude-3-5-haiku-20241022"),
+ ("Claude Haiku 3", "claude-3-haiku-20240307")
],
"google": [
("Gemini 2.0 Flash-Lite", "gemini-2.0-flash-lite"),
("Gemini 2.0 Flash", "gemini-2.0-flash"),
- ("Gemini 2.5 Flash", "gemini-2.5-flash-preview-05-20"),
- ("Gemini 2.5 Pro", "gemini-2.5-pro-preview-06-05"),
+ ("Gemini 2.5 Flash Lite", "gemini-2.5-flash-lite")
],
- "openrouter": [
- ("DeepSeek V3 - 一個 685B 參數的專家混合模型", "deepseek/deepseek-chat-v3-0324:free"),
- ("Deepseek - DeepSeek 團隊旗艦聊天模型的最新版本", "deepseek/deepseek-chat-v3-0324:free"),
+ "Grok":[
+ ("Grok 4.1 Fast Reasoning","grok-4-1-fast-reasoning"),
+ ("Grok 4.1 Fast Non Reasoning","grok-4-1-fast-non-reasoning"),
+ ("Grok 4 Fast Reasoning","grok-4-fast-reasoning"),
+ ("Grok 4 Fast Non Reasoning","grok-4-fast-non-reasoning"),
+ ("Grok 4","grok-4-0709"),
+ ("Grok 3","grok-3"),
+ ("Grok 3 Mini","grok-3-mini")
],
- "ollama": [
- ("llama3.1 本機版", "llama3.1"),
- ("qwen3", "qwen3"),
+ "DeepSeek":[
+ ("DeepSeek Reasoner","deepseek-reasoner"),
+ ("DeepSeek Chat","deepseek-chat")
+ ],
+ "Qwen":[
+ ("Qwen 3.5 Max", "qwen3-max"),
+ ("Qwen Plus", "qwen-plus")
]
}
@@ -326,8 +341,9 @@ def select_llm_provider() -> tuple[str, str]:
("OpenAI", "https://api.openai.com/v1"),
("Anthropic", "https://api.anthropic.com/"),
("Google", "https://generativelanguage.googleapis.com/v1"),
- ("Openrouter", "https://openrouter.ai/api/v1"),
- ("Ollama", "http://localhost:11434/v1"),
+ ("Grok", "https://api.x.ai/v1"),
+ ("DeepSeek", "https://api.deepseek.com"),
+ ("Qwen", "https://dashscope-intl.aliyuncs.com/compatible-mode/v1")
]
choice = questionary.select(
diff --git a/frontend/components/analysis/AnalysisForm.tsx b/frontend/components/analysis/AnalysisForm.tsx
index 712b3c70..5d27744e 100644
--- a/frontend/components/analysis/AnalysisForm.tsx
+++ b/frontend/components/analysis/AnalysisForm.tsx
@@ -65,8 +65,8 @@ export function AnalysisForm({ onSubmit, loading = false }: AnalysisFormProps) {
analysis_date: format(new Date(), "yyyy-MM-dd"),
analysts: ["market", "social", "news", "fundamentals"], // 預設全選
research_depth: 3, // 預設中等層級
- shallow_thinking_agent: "gpt-4o-mini",
- deep_thinking_agent: "gpt-4o",
+ shallow_thinking_agent: "gpt-5-mini-2025-08-07",
+ deep_thinking_agent: "gpt-5-mini-2025-08-07",
openai_api_key: "",
openai_base_url: "https://api.openai.com/v1",
alpha_vantage_api_key: "",
@@ -236,13 +236,42 @@ export function AnalysisForm({ onSubmit, loading = false }: AnalysisFormProps) {
- GPT-5.1
- GPT-5 Mini
- GPT-5 Nano
- GPT-4.1 Mini
- GPT-4.1 Nano
- GPT-4o
- GPT-4o Mini
+ {/* OpenAI */}
+ OpenAI: GPT-5.1
+ OpenAI: GPT-5 Mini
+ OpenAI: GPT-5 Nano
+ OpenAI: GPT-4.1 Mini
+ OpenAI: GPT-4.1 Nano
+ OpenAI: o4-mini
+
+ {/* Anthropic */}
+ Anthropic: Claude Haiku 4.5
+ Anthropic: Claude Sonnet 4.5
+ Anthropic: Claude Sonnet 4
+ Anthropic: Claude 3.5 Haiku
+ Anthropic: Claude 3 Haiku
+
+ {/* Google */}
+ Google: Gemini 2.0 Flash-Lite
+ Google: Gemini 2.0 Flash
+ Google: Gemini 2.5 Flash Lite
+
+ {/* Grok */}
+ Grok: 4.1 Fast Reasoning
+ Grok: 4.1 Fast Non Reasoning
+ Grok: 4 Fast Reasoning
+ Grok: 4 Fast Non Reasoning
+ Grok: 4
+ Grok: 3
+ Grok: 3 Mini
+
+ {/* DeepSeek */}
+ DeepSeek: Reasoner
+ DeepSeek: Chat
+
+ {/* Qwen */}
+ Qwen: 3.5 Max
+ Qwen: Plus
@@ -266,13 +295,42 @@ export function AnalysisForm({ onSubmit, loading = false }: AnalysisFormProps) {
- GPT-5.1
- GPT-5 Mini
- GPT-5 Nano
- GPT-4.1 Mini
- GPT-4.1 Nano
- GPT-4o
- GPT-4o Mini
+ {/* OpenAI */}
+ OpenAI: GPT-5.1
+ OpenAI: GPT-5 Mini
+ OpenAI: GPT-5 Nano
+ OpenAI: GPT-4.1 Mini
+ OpenAI: GPT-4.1 Nano
+ OpenAI: o4-mini
+
+ {/* Anthropic */}
+ Anthropic: Claude Haiku 4.5
+ Anthropic: Claude Sonnet 4.5
+ Anthropic: Claude Sonnet 4
+ Anthropic: Claude 3.5 Haiku
+ Anthropic: Claude 3 Haiku
+
+ {/* Google */}
+ Google: Gemini 2.0 Flash-Lite
+ Google: Gemini 2.0 Flash
+ Google: Gemini 2.5 Flash Lite
+
+ {/* Grok */}
+ Grok: 4.1 Fast Reasoning
+ Grok: 4.1 Fast Non Reasoning
+ Grok: 4 Fast Reasoning
+ Grok: 4 Fast Non Reasoning
+ Grok: 4
+ Grok: 3
+ Grok: 3 Mini
+
+ {/* DeepSeek */}
+ DeepSeek: Reasoner
+ DeepSeek: Chat
+
+ {/* Qwen */}
+ Qwen: 3.5 Max
+ Qwen: Plus
@@ -311,12 +369,67 @@ export function AnalysisForm({ onSubmit, loading = false }: AnalysisFormProps) {
name="openai_base_url"
render={({ field }) => (
- OpenAI Base URL(選填)
-
-
-
+ API Base URL
+
+
+ {/* Show input only when custom is selected or value is not in the list */}
+ {(![
+ "https://api.openai.com/v1",
+ "https://api.anthropic.com/",
+ "https://generativelanguage.googleapis.com/v1",
+ "https://api.x.ai/v1",
+ "https://api.deepseek.com",
+ "https://dashscope-intl.aliyuncs.com/compatible-mode/v1"
+ ].includes(field.value || "") || field.value === "") && (
+
+
+
+
+
+ )}
+
- API 基礎網址(預設為 OpenAI 官方)
+ 選擇或輸入 LLM 服務的 API 基礎網址
diff --git a/main.py b/main.py
index 12df9dea..24392ba7 100644
--- a/main.py
+++ b/main.py
@@ -8,8 +8,8 @@ load_dotenv(override=True)
# 建立自訂設定
config = DEFAULT_CONFIG.copy()
-config["deep_think_llm"] = "gpt-4o-mini" # 使用不同的模型
-config["quick_think_llm"] = "gpt-4o-mini" # 使用不同的模型
+config["deep_think_llm"] = "gpt-5-mini-2025-08-07" # 使用不同的模型
+config["quick_think_llm"] = "gpt-5-mini-2025-08-07" # 使用不同的模型
config["max_debate_rounds"] = 1 # 增加辯論回合
# 設定資料供應商 (預設使用 yfinance 和 alpha_vantage)
diff --git a/tradingagents/agents/managers/research_manager.py b/tradingagents/agents/managers/research_manager.py
index ce2bfa2f..f986a92a 100644
--- a/tradingagents/agents/managers/research_manager.py
+++ b/tradingagents/agents/managers/research_manager.py
@@ -47,7 +47,7 @@ def create_research_manager(llm, memory):
return text[:max_chars] + "\n...(內容已截斷)"
# 為每個報告設置合理的字符限制
- # 模型 gpt-4o-mini 的限制是 8192 tokens
+ # 模型 gpt-5-mini 的限制是 8192 tokens
# 混合中英文估算: 1 字符 ≈ 1.5-2 tokens (取保守值)
# 目標: 總字符數 < 3500 字符 (約 5250-7000 tokens,留足夠 tokens 給 completion)
market_research_report = truncate_text(market_research_report, 500)
diff --git a/tradingagents/agents/managers/risk_manager.py b/tradingagents/agents/managers/risk_manager.py
index 97189860..81322cd1 100644
--- a/tradingagents/agents/managers/risk_manager.py
+++ b/tradingagents/agents/managers/risk_manager.py
@@ -49,7 +49,7 @@ def create_risk_manager(llm, memory):
return text[:max_chars] + "\n...(內容已截斷)"
# 為每個報告設置合理的字符限制
- # 模型 gpt-4o-mini 的限制是 8192 tokens
+ # 模型 gpt-5-mini 的限制是 8192 tokens
# 混合中英文估算: 1 字符 ≈ 1.5-2 tokens (取保守值)
# 目標: 總字符數 < 3500 字符 (約 5250-7000 tokens,留足夠 tokens 給 completion)
market_research_report = truncate_text(market_research_report, 500)
diff --git a/tradingagents/agents/utils/memory.py b/tradingagents/agents/utils/memory.py
index e62ff04e..5d75266f 100644
--- a/tradingagents/agents/utils/memory.py
+++ b/tradingagents/agents/utils/memory.py
@@ -6,10 +6,7 @@ from openai import OpenAI
class FinancialSituationMemory:
def __init__(self, name, config):
- if config["backend_url"] == "http://localhost:11434/v1":
- self.embedding = "nomic-embed-text"
- else:
- self.embedding = "text-embedding-3-small"
+ self.embedding = "text-embedding-3-small"
# Get the OpenAI API key from environment variable
openai_api_key = os.getenv("OPENAI_API_KEY")
self.client = OpenAI(base_url=config["backend_url"], api_key=openai_api_key)
diff --git a/tradingagents/default_config.py b/tradingagents/default_config.py
index b5be7c69..e22edaab 100644
--- a/tradingagents/default_config.py
+++ b/tradingagents/default_config.py
@@ -10,8 +10,8 @@ DEFAULT_CONFIG = {
)),
# LLM 設定
"llm_provider": "openai",
- "deep_think_llm": "gpt-4o-mini",
- "quick_think_llm": "gpt-4o-mini",
+ "deep_think_llm": "gpt-5-mini-2025-08-07",
+ "quick_think_llm": "gpt-5-mini-2025-08-07",
"backend_url": "https://api.openai.com/v1",
# 辯論與討論設定
"max_debate_rounds": 1,
diff --git a/tradingagents/graph/trading_graph.py b/tradingagents/graph/trading_graph.py
index 1a19cc1f..63b12f62 100644
--- a/tradingagents/graph/trading_graph.py
+++ b/tradingagents/graph/trading_graph.py
@@ -83,7 +83,7 @@ class TradingAgentsGraph:
# 初始化 LLM
provider = self.config["llm_provider"].lower()
- if provider in ["openai", "ollama", "openrouter"]:
+ if provider in ["openai"]:
# Get the OpenAI API key from environment variable
openai_api_key = os.getenv("OPENAI_API_KEY")
self.deep_thinking_llm = ChatOpenAI(