diff --git a/cli/main.py b/cli/main.py index adda48fc..b0c9dcbb 100644 --- a/cli/main.py +++ b/cli/main.py @@ -907,7 +907,12 @@ def run_analysis(): config["quick_think_llm"] = selections["shallow_thinker"] config["deep_think_llm"] = selections["deep_thinker"] config["backend_url"] = selections["backend_url"] - config["llm_provider"] = selections["llm_provider"].lower() + # claude-max uses OpenAI-compatible format via local proxy + if selections["llm_provider"].lower() == "claude-max": + config["llm_provider"] = "openai" + import os; os.environ.setdefault("OPENAI_API_KEY", "not-needed") + else: + config["llm_provider"] = selections["llm_provider"].lower() # Provider-specific thinking configuration config["google_thinking_level"] = selections.get("google_thinking_level") config["openai_reasoning_effort"] = selections.get("openai_reasoning_effort") diff --git a/cli/utils.py b/cli/utils.py index aa097fb5..c08f80b6 100644 --- a/cli/utils.py +++ b/cli/utils.py @@ -160,6 +160,10 @@ def select_shallow_thinking_agent(provider) -> str: ("GPT-OSS:latest (20B, local)", "gpt-oss:latest"), ("GLM-4.7-Flash:latest (30B, local)", "glm-4.7-flash:latest"), ], + "claude-max": [ + ("Claude Haiku 4 - Fast (Max subscription)", "claude-haiku-4"), + ("Claude Sonnet 4 - Balanced (Max subscription)", "claude-sonnet-4"), + ], } choice = questionary.select( @@ -228,6 +232,11 @@ def select_deep_thinking_agent(provider) -> str: ("GPT-OSS:latest (20B, local)", "gpt-oss:latest"), ("Qwen3:latest (8B, local)", "qwen3:latest"), ], + "claude-max": [ + ("Claude Sonnet 4 - High-performance (Max subscription)", "claude-sonnet-4"), + ("Claude Haiku 4 - Fast (Max subscription)", "claude-haiku-4"), + ("Claude Opus 4 - Premium (Max subscription)", "claude-opus-4"), + ], } choice = questionary.select( @@ -262,6 +271,7 @@ def select_llm_provider() -> tuple[str, str]: ("xAI", "https://api.x.ai/v1"), ("Openrouter", "https://openrouter.ai/api/v1"), ("Ollama", "http://localhost:11434/v1"), + ("Claude-Max", "http://localhost:3456/v1"), ] choice = questionary.select( diff --git a/test_crypto.py b/test_crypto.py new file mode 100644 index 00000000..64213f4d --- /dev/null +++ b/test_crypto.py @@ -0,0 +1,23 @@ +from tradingagents.graph.trading_graph import TradingAgentsGraph +from tradingagents.default_config import DEFAULT_CONFIG +from dotenv import load_dotenv +import os + +load_dotenv() +os.environ["OPENAI_API_KEY"] = "not-needed" + +config = DEFAULT_CONFIG.copy() +config["llm_provider"] = "openai" +config["deep_think_llm"] = "claude-sonnet-4" +config["quick_think_llm"] = "claude-sonnet-4" +config["backend_url"] = "http://localhost:3456/v1" +config["max_debate_rounds"] = 1 +config["max_risk_discuss_rounds"] = 1 + +ta = TradingAgentsGraph(debug=True, config=config) + +_, decision = ta.propagate("BTC-USD", "2026-03-14") +print("\n" + "="*60) +print("TRADING DECISION:") +print("="*60) +print(decision) diff --git a/tradingagents/default_config.py b/tradingagents/default_config.py index ecf0dc29..5e187809 100644 --- a/tradingagents/default_config.py +++ b/tradingagents/default_config.py @@ -9,9 +9,9 @@ DEFAULT_CONFIG = { ), # LLM settings "llm_provider": "openai", - "deep_think_llm": "gpt-5.2", - "quick_think_llm": "gpt-5-mini", - "backend_url": "https://api.openai.com/v1", + "deep_think_llm": "claude-sonnet-4", + "quick_think_llm": "claude-haiku-4", + "backend_url": "http://localhost:3456/v1", # Provider-specific thinking configuration "google_thinking_level": None, # "high", "minimal", etc. "openai_reasoning_effort": None, # "medium", "high", "low"