feat: surface claude_agent provider in CLI + runner polish

CLI: add "Claude Agent (Max subscription, no API key)" to the provider
picker and register opus/sonnet/haiku model aliases so the CLI flow picks
up the provider registered in factory.py. No effort-level step since the
SDK doesn't expose that knob.

Analyst runner: build a concrete user request from company_of_interest +
trade_date instead of echoing the terse ("human", ticker) initial state —
the SDK was sitting idle on prompts like just "NVDA". Add opt-in file-
based debug logging (TRADINGAGENTS_CLAUDE_AGENT_DEBUG=1 → /tmp/...log)
for observability during long adaptive-thinking blocks.

Also adds main_claude_agent.py as a ready-to-run example for a Max-only
end-to-end invocation (verified 12-min NVDA run → SELL).

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Michael Yang 2026-04-14 16:49:22 -04:00
parent 2f870be9a8
commit 441e1e5dd2
4 changed files with 162 additions and 22 deletions

View File

@ -235,6 +235,7 @@ def select_llm_provider() -> tuple[str, str | None]:
("OpenAI", "openai", "https://api.openai.com/v1"),
("Google", "google", None),
("Anthropic", "anthropic", "https://api.anthropic.com/"),
("Claude Agent (Max subscription, no API key)", "claude_agent", None),
("xAI", "xai", "https://api.x.ai/v1"),
("DeepSeek", "deepseek", "https://api.deepseek.com"),
("Qwen", "qwen", "https://dashscope.aliyuncs.com/compatible-mode/v1"),

41
main_claude_agent.py Normal file
View File

@ -0,0 +1,41 @@
"""Full TradingAgents run using a Claude Max subscription (no API key).
Requires being logged into Claude Code. Start small: one analyst, one debate
round. The analyst tool loop takes ~1-2 min/analyst via the SDK, so a full
4-analyst run will be ~10 min end-to-end.
"""
from dotenv import load_dotenv
from tradingagents.default_config import DEFAULT_CONFIG
from tradingagents.graph.trading_graph import TradingAgentsGraph
load_dotenv()
config = DEFAULT_CONFIG.copy()
config["llm_provider"] = "claude_agent"
config["deep_think_llm"] = "sonnet" # or "opus" for slower / higher quality
config["quick_think_llm"] = "sonnet"
config["max_debate_rounds"] = 1
config["max_risk_discuss_rounds"] = 1
# YFinance — no API key needed.
config["data_vendors"] = {
"core_stock_apis": "yfinance",
"technical_indicators": "yfinance",
"fundamental_data": "yfinance",
"news_data": "yfinance",
}
ta = TradingAgentsGraph(
# Start with one analyst to validate the pipeline before burning minutes
# on the full set. Expand to ["market", "social", "news", "fundamentals"]
# once this works.
selected_analysts=["market"],
debug=True,
config=config,
)
_, decision = ta.propagate("NVDA", "2025-10-15")
print("\n=== DECISION ===")
print(decision)

View File

@ -6,9 +6,19 @@ Claude iteratively invokes the translated MCP tools and returns a final text
report. No LangGraph ToolNode involvement the analyst returns a terminal
AIMessage with zero tool_calls, so the existing conditional edges route
straight to the message-clear node.
Debug logging: set ``TRADINGAGENTS_CLAUDE_AGENT_DEBUG=1`` to log SDK activity
to ``/tmp/tradingagents_claude_agent.log`` (or set
``TRADINGAGENTS_CLAUDE_AGENT_DEBUG=/path/to/file`` for a custom path). Tail it
in a second terminal to watch progress in real time:
tail -f /tmp/tradingagents_claude_agent.log
"""
import asyncio
import os
import time
from datetime import datetime
from typing import Any, Dict, List
from langchain_core.messages import AIMessage, HumanMessage
@ -17,23 +27,80 @@ from tradingagents.llm_clients.claude_agent_client import ChatClaudeAgent
from tradingagents.llm_clients.mcp_tool_adapter import build_mcp_server
def _build_user_prompt(state: Dict[str, Any]) -> str:
"""Extract any human content from the incoming message sequence.
def _debug_path() -> str | None:
val = os.environ.get("TRADINGAGENTS_CLAUDE_AGENT_DEBUG")
if not val:
return None
if val in ("1", "true", "yes", "on"):
return "/tmp/tradingagents_claude_agent.log"
return val
Existing analysts rely on LangGraph feeding tool-call round trips through
state["messages"]. On the SDK path we collapse the incoming messages into a
single user prompt tool results are consumed by the SDK loop, not via
LangGraph, so only the human-authored content matters here.
def _log(msg: str) -> None:
path = _debug_path()
if not path:
return
ts = datetime.now().strftime("%H:%M:%S.%f")[:-3]
try:
with open(path, "a") as f:
f.write(f"[{ts}] {msg}\n")
except OSError:
pass
def _describe_message(msg: Any) -> str:
"""One-line summary of an SDK message for the debug log."""
try:
name = type(msg).__name__
content = getattr(msg, "content", None)
if content is None:
return f"{name} (no content)"
if isinstance(content, list):
block_summary = []
for block in content:
bname = type(block).__name__
if hasattr(block, "text"):
text = str(block.text)
snippet = text[:80].replace("\n", " ")
block_summary.append(f"{bname}[{len(text)} chars]: {snippet!r}")
elif hasattr(block, "name"):
block_summary.append(f"{bname}(name={block.name!r})")
else:
block_summary.append(bname)
return f"{name} with {len(content)} blocks: " + " | ".join(block_summary)
return f"{name}: {str(content)[:200]!r}"
except Exception as e:
return f"(failed to describe: {e!r})"
def _build_user_prompt(state: Dict[str, Any]) -> str:
"""Construct a concrete user request from graph state.
The initial graph state is ``messages = [("human", ticker)]`` too terse
for Claude to act on unambiguously, which can leave the SDK session idle
waiting for clarification. Build an explicit request from
``company_of_interest`` + ``trade_date`` so Claude always knows what to do.
Any additional human-authored content in the message stream is appended.
"""
parts: List[str] = []
ticker = state.get("company_of_interest", "")
trade_date = state.get("trade_date", "")
base = (
f"Produce the requested report for {ticker} as of {trade_date}. "
"Use the available tools to gather the data you need, then write the "
"final report. Do not ask clarifying questions — proceed directly."
).strip()
extra: List[str] = []
for msg in state.get("messages", []):
if isinstance(msg, HumanMessage):
content = msg.content
if isinstance(content, str) and content.strip():
parts.append(content.strip())
if not parts:
parts.append("Produce the requested report.")
return "\n\n".join(parts)
content = getattr(msg, "content", None)
if isinstance(msg, HumanMessage) and isinstance(content, str):
c = content.strip()
if c and c != ticker:
extra.append(c)
if extra:
return base + "\n\nAdditional context:\n" + "\n".join(extra)
return base
async def _run(
@ -50,7 +117,10 @@ async def _run(
query,
)
_log(f"[{server_name}] building MCP server with {len(lc_tools)} tools: "
f"{[t.name for t in lc_tools]}")
server, allowed = build_mcp_server(server_name, lc_tools)
_log(f"[{server_name}] allowed_tools={allowed}")
options = ClaudeAgentOptions(
model=model,
@ -66,12 +136,23 @@ async def _run(
permission_mode="bypassPermissions",
)
_log(f"[{server_name}] starting query(model={model!r}, prompt={user_prompt[:120]!r}...)")
start = time.monotonic()
text_parts: List[str] = []
msg_count = 0
async for msg in query(prompt=user_prompt, options=options):
msg_count += 1
elapsed = time.monotonic() - start
_log(f"[{server_name}] +{elapsed:.1f}s msg #{msg_count}: {_describe_message(msg)}")
if isinstance(msg, AssistantMessage):
for block in msg.content:
if isinstance(block, TextBlock):
text_parts.append(block.text)
elapsed = time.monotonic() - start
_log(f"[{server_name}] query complete after {elapsed:.1f}s, "
f"{msg_count} messages, {sum(len(t) for t in text_parts)} chars")
return "\n".join(text_parts).strip()
@ -85,15 +166,22 @@ def run_sdk_analyst(
) -> Dict[str, Any]:
"""Run an analyst through the Claude Agent SDK tool loop and build the node output."""
user_prompt = _build_user_prompt(state)
report = asyncio.run(
_run(
system_prompt=system_prompt,
user_prompt=user_prompt,
lc_tools=lc_tools,
server_name=server_name,
model=llm.model,
_log(f"=== run_sdk_analyst start: server={server_name} report_field={report_field} "
f"ticker={state.get('company_of_interest')!r} date={state.get('trade_date')!r} ===")
try:
report = asyncio.run(
_run(
system_prompt=system_prompt,
user_prompt=user_prompt,
lc_tools=lc_tools,
server_name=server_name,
model=llm.model,
)
)
)
except Exception as e:
_log(f"[{server_name}] EXCEPTION: {type(e).__name__}: {e}")
raise
_log(f"=== run_sdk_analyst done: {report_field}={len(report)} chars ===")
return {
"messages": [AIMessage(content=report)],
report_field: report,

View File

@ -36,6 +36,16 @@ MODEL_OPTIONS: ProviderModeOptions = {
("Claude Sonnet 4.5 - Agents and coding", "claude-sonnet-4-5"),
],
},
"claude_agent": {
"quick": [
("Claude Sonnet (via Claude Code, Max subscription)", "sonnet"),
("Claude Haiku (via Claude Code, Max subscription)", "haiku"),
],
"deep": [
("Claude Opus (via Claude Code, Max subscription)", "opus"),
("Claude Sonnet (via Claude Code, Max subscription)", "sonnet"),
],
},
"google": {
"quick": [
("Gemini 3 Flash - Next-gen fast", "gemini-3-flash-preview"),