Support ClaudeMaxProxy
This commit is contained in:
parent
3642f5917c
commit
2ee2dc2382
|
|
@ -907,7 +907,12 @@ def run_analysis():
|
|||
config["quick_think_llm"] = selections["shallow_thinker"]
|
||||
config["deep_think_llm"] = selections["deep_thinker"]
|
||||
config["backend_url"] = selections["backend_url"]
|
||||
config["llm_provider"] = selections["llm_provider"].lower()
|
||||
# claude-max uses OpenAI-compatible format via local proxy
|
||||
if selections["llm_provider"].lower() == "claude-max":
|
||||
config["llm_provider"] = "openai"
|
||||
import os; os.environ.setdefault("OPENAI_API_KEY", "not-needed")
|
||||
else:
|
||||
config["llm_provider"] = selections["llm_provider"].lower()
|
||||
# Provider-specific thinking configuration
|
||||
config["google_thinking_level"] = selections.get("google_thinking_level")
|
||||
config["openai_reasoning_effort"] = selections.get("openai_reasoning_effort")
|
||||
|
|
|
|||
10
cli/utils.py
10
cli/utils.py
|
|
@ -160,6 +160,10 @@ def select_shallow_thinking_agent(provider) -> str:
|
|||
("GPT-OSS:latest (20B, local)", "gpt-oss:latest"),
|
||||
("GLM-4.7-Flash:latest (30B, local)", "glm-4.7-flash:latest"),
|
||||
],
|
||||
"claude-max": [
|
||||
("Claude Haiku 4 - Fast (Max subscription)", "claude-haiku-4"),
|
||||
("Claude Sonnet 4 - Balanced (Max subscription)", "claude-sonnet-4"),
|
||||
],
|
||||
}
|
||||
|
||||
choice = questionary.select(
|
||||
|
|
@ -228,6 +232,11 @@ def select_deep_thinking_agent(provider) -> str:
|
|||
("GPT-OSS:latest (20B, local)", "gpt-oss:latest"),
|
||||
("Qwen3:latest (8B, local)", "qwen3:latest"),
|
||||
],
|
||||
"claude-max": [
|
||||
("Claude Sonnet 4 - High-performance (Max subscription)", "claude-sonnet-4"),
|
||||
("Claude Haiku 4 - Fast (Max subscription)", "claude-haiku-4"),
|
||||
("Claude Opus 4 - Premium (Max subscription)", "claude-opus-4"),
|
||||
],
|
||||
}
|
||||
|
||||
choice = questionary.select(
|
||||
|
|
@ -262,6 +271,7 @@ def select_llm_provider() -> tuple[str, str]:
|
|||
("xAI", "https://api.x.ai/v1"),
|
||||
("Openrouter", "https://openrouter.ai/api/v1"),
|
||||
("Ollama", "http://localhost:11434/v1"),
|
||||
("Claude-Max", "http://localhost:3456/v1"),
|
||||
]
|
||||
|
||||
choice = questionary.select(
|
||||
|
|
|
|||
|
|
@ -0,0 +1,23 @@
|
|||
from tradingagents.graph.trading_graph import TradingAgentsGraph
|
||||
from tradingagents.default_config import DEFAULT_CONFIG
|
||||
from dotenv import load_dotenv
|
||||
import os
|
||||
|
||||
load_dotenv()
|
||||
os.environ["OPENAI_API_KEY"] = "not-needed"
|
||||
|
||||
config = DEFAULT_CONFIG.copy()
|
||||
config["llm_provider"] = "openai"
|
||||
config["deep_think_llm"] = "claude-sonnet-4"
|
||||
config["quick_think_llm"] = "claude-sonnet-4"
|
||||
config["backend_url"] = "http://localhost:3456/v1"
|
||||
config["max_debate_rounds"] = 1
|
||||
config["max_risk_discuss_rounds"] = 1
|
||||
|
||||
ta = TradingAgentsGraph(debug=True, config=config)
|
||||
|
||||
_, decision = ta.propagate("BTC-USD", "2026-03-14")
|
||||
print("\n" + "="*60)
|
||||
print("TRADING DECISION:")
|
||||
print("="*60)
|
||||
print(decision)
|
||||
|
|
@ -9,9 +9,9 @@ DEFAULT_CONFIG = {
|
|||
),
|
||||
# LLM settings
|
||||
"llm_provider": "openai",
|
||||
"deep_think_llm": "gpt-5.2",
|
||||
"quick_think_llm": "gpt-5-mini",
|
||||
"backend_url": "https://api.openai.com/v1",
|
||||
"deep_think_llm": "claude-sonnet-4",
|
||||
"quick_think_llm": "claude-haiku-4",
|
||||
"backend_url": "http://localhost:3456/v1",
|
||||
# Provider-specific thinking configuration
|
||||
"google_thinking_level": None, # "high", "minimal", etc.
|
||||
"openai_reasoning_effort": None, # "medium", "high", "low"
|
||||
|
|
|
|||
Loading…
Reference in New Issue