From 72f34187c1ea3549e961bc1fe750573193bcad8e Mon Sep 17 00:00:00 2001 From: Florian Bley Date: Mon, 23 Mar 2026 23:12:13 +0100 Subject: [PATCH] feat: add Claude Code subagent pipeline (no API keys required) Adds cc_tools.py CLI wrapper for data gathering and a trading-analysis skill that orchestrates ~12 specialized subagents through Claude Code's Agent tool, replacing the LangGraph pipeline's dependency on external LLM API keys. Co-Authored-By: Claude Opus 4.6 (1M context) --- .claude/skills/trading-analysis/SKILL.md | 312 ++++++++++++++++++++++ cc_tools.py | 324 +++++++++++++++++++++++ tests/test_cc_integration.py | 224 ++++++++++++++++ tests/test_cc_tools.py | 307 +++++++++++++++++++++ 4 files changed, 1167 insertions(+) create mode 100644 .claude/skills/trading-analysis/SKILL.md create mode 100644 cc_tools.py create mode 100644 tests/test_cc_integration.py create mode 100644 tests/test_cc_tools.py diff --git a/.claude/skills/trading-analysis/SKILL.md b/.claude/skills/trading-analysis/SKILL.md new file mode 100644 index 00000000..0c3b75ae --- /dev/null +++ b/.claude/skills/trading-analysis/SKILL.md @@ -0,0 +1,312 @@ +--- +name: trading-analysis +description: Run the TradingAgents multi-agent trading analysis pipeline for a stock ticker. Launches specialized analyst, researcher, debater, and portfolio manager subagents to produce a final BUY/OVERWEIGHT/HOLD/UNDERWEIGHT/SELL recommendation. +--- + +# Trading Analysis Skill + +Analyze a stock using the TradingAgents multi-agent framework. This skill orchestrates ~12 specialized agents through Claude Code's Agent tool, replicating the original LangGraph pipeline. + +## Input + +The user provides: +- **TICKER**: Stock ticker symbol (e.g., NVDA, AAPL, MSFT, CNC.TO) +- **TRADE_DATE**: Analysis date in yyyy-mm-dd format (defaults to today if not specified) + +## Configuration + +- `max_debate_rounds`: 1 (bull/bear debate rounds) +- `max_risk_discuss_rounds`: 1 (risk analyst discussion rounds) +- Python executable: Use the project's venv at `.venv/Scripts/python.exe` (Windows) or `.venv/bin/python` (Unix) +- All data tool calls go through: `python cc_tools.py [args]` from the project root + +## Prerequisites + +Before starting, verify the environment: +```bash +.venv/Scripts/python.exe cc_tools.py --help +``` + +## Execution Flow + +### PHASE 1: Analyst Reports (PARALLEL - launch all 4 simultaneously) + +Launch exactly 4 Agent subagents **in a single message** (parallel). Each subagent should use `model: "sonnet"` for cost efficiency. Each one collects data via Bash and returns its report. + +**Important:** Tell each subagent to run commands from the project root directory. The python executable path is `.venv/Scripts/python.exe` on Windows. + +#### 1a. Market Analyst Subagent + +Prompt for the Agent tool: + +> You are a Market Analyst. Your job is to analyze technical indicators for {TICKER} as of {TRADE_DATE}. +> +> Use Bash to call these commands from the project root to gather data: +> - `.venv/Scripts/python.exe cc_tools.py get_stock_data {TICKER} {START_DATE} {TRADE_DATE}` (START_DATE = 30 days before TRADE_DATE) +> - `.venv/Scripts/python.exe cc_tools.py get_indicators {TICKER} {TRADE_DATE}` for each indicator +> +> Select up to 8 of the most relevant technical indicators from: close_50_sma, close_200_sma, close_10_ema, macd, macds, macdh, rsi, boll, boll_ub, boll_lb, atr, vwma. Avoid redundancy. Call get_stock_data first, then get_indicators for each selected indicator. +> +> Write a detailed, nuanced report of the trends you observe. Provide specific, actionable insights with supporting evidence. Append a Markdown table at the end organizing key points. +> +> The instrument to analyze is `{TICKER}`. Use this exact ticker in every tool call and report. + +#### 1b. Social Media Analyst Subagent + +> You are a Social Media and Sentiment Analyst. Analyze social media posts, company news, and public sentiment for {TICKER} over the past week as of {TRADE_DATE}. +> +> Use Bash to call: `.venv/Scripts/python.exe cc_tools.py get_news {TICKER} {START_DATE} {TRADE_DATE}` (START_DATE = 7 days before TRADE_DATE) +> +> Write a comprehensive report analyzing sentiment, social media discussion, and recent company news. Provide specific, actionable insights. Append a Markdown table at the end. +> +> The instrument to analyze is `{TICKER}`. + +#### 1c. News Analyst Subagent + +> You are a News and Macroeconomic Analyst. Analyze recent news and trends relevant to trading {TICKER} as of {TRADE_DATE}. +> +> Use Bash to call: +> - `.venv/Scripts/python.exe cc_tools.py get_news {TICKER} {START_DATE} {TRADE_DATE}` (START_DATE = 7 days before) +> - `.venv/Scripts/python.exe cc_tools.py get_global_news {TRADE_DATE} 7 5` +> +> Write a comprehensive report of the global state relevant for trading and macroeconomics. Provide specific, actionable insights. Append a Markdown table at the end. +> +> The instrument to analyze is `{TICKER}`. + +#### 1d. Fundamentals Analyst Subagent + +> You are a Fundamentals Analyst. Analyze the fundamental financial information for {TICKER} as of {TRADE_DATE}. +> +> Use Bash to call: +> - `.venv/Scripts/python.exe cc_tools.py get_fundamentals {TICKER} {TRADE_DATE}` +> - `.venv/Scripts/python.exe cc_tools.py get_balance_sheet {TICKER} quarterly {TRADE_DATE}` +> - `.venv/Scripts/python.exe cc_tools.py get_cashflow {TICKER} quarterly {TRADE_DATE}` +> - `.venv/Scripts/python.exe cc_tools.py get_income_statement {TICKER} quarterly {TRADE_DATE}` +> +> Write a comprehensive report of company fundamentals including financial documents, company profile, financials, and history. Provide specific, actionable insights with a Markdown table summary. +> +> The instrument to analyze is `{TICKER}`. + +After all 4 return, save their outputs as: +- `market_report` = Market Analyst result +- `sentiment_report` = Social Media Analyst result +- `news_report` = News Analyst result +- `fundamentals_report` = Fundamentals Analyst result + +--- + +### PHASE 2: Investment Debate (SEQUENTIAL) + +Run 1 round of bull/bear debate (configurable via max_debate_rounds). + +#### 2a. Bull Researcher Subagent + +Launch an Agent with this prompt: + +> You are a Bull Analyst advocating for investing in {TICKER}. Build a strong, evidence-based case emphasizing growth potential, competitive advantages, and positive market indicators. +> +> Key points to focus on: +> - Growth Potential: Highlight market opportunities, revenue projections, and scalability +> - Competitive Advantages: Emphasize unique products, strong branding, or dominant market positioning +> - Positive Indicators: Use financial health, industry trends, and positive news as evidence +> - Engagement: Present conversationally, engaging directly as if debating +> +> Resources: +> Market report: {market_report} +> Sentiment report: {sentiment_report} +> News report: {news_report} +> Fundamentals report: {fundamentals_report} +> Last bear argument: (none yet - this is the opening round) +> +> Deliver a compelling bull argument. Do NOT use any tools - just analyze the provided data and argue your case. + +Save result as `bull_argument`. + +#### 2b. Bear Researcher Subagent + +Launch an Agent with this prompt: + +> You are a Bear Analyst advocating AGAINST investing in {TICKER}. Present a well-reasoned case emphasizing risks, challenges, and negative indicators. +> +> Key points to focus on: +> - Risks: Highlight market saturation, financial instability, macroeconomic threats +> - Competitive Weaknesses: Emphasize weaker market position, innovation decline +> - Negative Indicators: Use financial data, market trends, adverse news as evidence +> - Counter the bull argument with specific data and reasoning +> - Engagement: Present conversationally, as if debating +> +> Resources: +> Market report: {market_report} +> Sentiment report: {sentiment_report} +> News report: {news_report} +> Fundamentals report: {fundamentals_report} +> Bull argument to counter: {bull_argument} +> +> Deliver a compelling bear argument countering the bull case. Do NOT use any tools. + +Save result as `bear_argument`. + +Build the debate state: +- `debate_history` = "Bull Analyst: {bull_argument}\nBear Analyst: {bear_argument}" + +--- + +### PHASE 3: Research Manager (SEQUENTIAL) + +Launch an Agent: + +> As the Research Manager and debate facilitator, critically evaluate this debate and make a DEFINITIVE decision: align with the bull analyst, bear analyst, or Hold (only if strongly justified). +> +> Summarize key points from both sides concisely. Your recommendation -- Buy, Sell, or Hold -- must be clear and actionable, grounded in the debate's strongest arguments. Avoid defaulting to Hold simply because both sides have valid points. +> +> Develop a detailed investment plan including: +> 1. Your Recommendation: A decisive stance +> 2. Rationale: Why these arguments lead to your conclusion +> 3. Strategic Actions: Concrete implementation steps +> +> The instrument is `{TICKER}`. +> +> Debate History: +> {debate_history} +> +> Do NOT use any tools. + +Save result as `investment_plan`. + +--- + +### PHASE 4: Trader (SEQUENTIAL) + +Launch an Agent: + +> You are a Trading Agent analyzing market data to make investment decisions for {TICKER}. +> +> Based on a comprehensive analysis by a team of analysts, here is an investment plan. Use it as a foundation for your trading decision. +> +> Proposed Investment Plan: {investment_plan} +> +> Additional context: +> Market report: {market_report} +> Sentiment report: {sentiment_report} +> News report: {news_report} +> Fundamentals report: {fundamentals_report} +> +> Provide a specific recommendation to buy, sell, or hold. End with a firm decision and conclude your response with "FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL**". +> +> The instrument is `{TICKER}`. Do NOT use any tools. + +Save result as `trader_plan`. + +--- + +### PHASE 5: Risk Debate (SEQUENTIAL - 3 analysts) + +Run 1 round of aggressive/conservative/neutral debate. + +#### 5a. Aggressive Risk Analyst Subagent + +> As the Aggressive Risk Analyst, champion high-reward, high-risk opportunities for {TICKER}. Evaluate the trader's decision focusing on potential upside, growth potential, and innovative benefits. +> +> Trader's decision: {trader_plan} +> +> Market Research Report: {market_report} +> Sentiment Report: {sentiment_report} +> News Report: {news_report} +> Fundamentals Report: {fundamentals_report} +> +> Present your argument based on the data. Focus on debating and persuading, not just presenting data. Output conversationally without special formatting. Do NOT use any tools. + +Save result as `aggressive_argument`. + +#### 5b. Conservative Risk Analyst Subagent + +> As the Conservative Risk Analyst, protect assets, minimize volatility, and ensure steady growth for {TICKER}. Critically examine high-risk elements in the trader's decision. +> +> Trader's decision: {trader_plan} +> +> Market Research Report: {market_report} +> Sentiment Report: {sentiment_report} +> News Report: {news_report} +> Fundamentals Report: {fundamentals_report} +> +> Aggressive analyst's argument: {aggressive_argument} +> +> Counter the aggressive stance. Emphasize potential downsides they overlooked. Focus on debating and critiquing. Output conversationally without special formatting. Do NOT use any tools. + +Save result as `conservative_argument`. + +#### 5c. Neutral Risk Analyst Subagent + +> As the Neutral Risk Analyst, provide a balanced perspective on {TICKER}, weighing both potential benefits and risks. Challenge both the aggressive and conservative views. +> +> Trader's decision: {trader_plan} +> +> Market Research Report: {market_report} +> Sentiment Report: {sentiment_report} +> News Report: {news_report} +> Fundamentals Report: {fundamentals_report} +> +> Aggressive analyst's argument: {aggressive_argument} +> Conservative analyst's argument: {conservative_argument} +> +> Analyze both sides critically. Advocate for a balanced approach offering growth with safeguards. Output conversationally without special formatting. Do NOT use any tools. + +Save result as `neutral_argument`. + +Build risk debate history: +- `risk_debate_history` = "Aggressive Analyst: {aggressive_argument}\nConservative Analyst: {conservative_argument}\nNeutral Analyst: {neutral_argument}" + +--- + +### PHASE 6: Portfolio Manager (SEQUENTIAL) + +Launch an Agent: + +> As the Portfolio Manager, synthesize the risk analysts' debate and deliver the FINAL trading decision for {TICKER}. +> +> **Rating Scale** (use exactly one): +> - **Buy**: Strong conviction to enter or add to position +> - **Overweight**: Favorable outlook, gradually increase exposure +> - **Hold**: Maintain current position, no action needed +> - **Underweight**: Reduce exposure, take partial profits +> - **Sell**: Exit position or avoid entry +> +> Trader's proposed plan: {trader_plan} +> +> **Required Output Structure:** +> 1. **Rating**: State one of Buy / Overweight / Hold / Underweight / Sell +> 2. **Executive Summary**: Concise action plan covering entry strategy, position sizing, key risk levels, and time horizon +> 3. **Investment Thesis**: Detailed reasoning anchored in the analysts' debate +> +> Risk Analysts Debate History: +> {risk_debate_history} +> +> Be decisive and ground every conclusion in specific evidence from the analysts. Do NOT use any tools. + +Save result as `final_decision`. + +--- + +### PHASE 7: Signal Extraction & Results + +Extract the final rating from the portfolio manager's output. It should be exactly one of: **BUY**, **OVERWEIGHT**, **HOLD**, **UNDERWEIGHT**, or **SELL**. + +Then save the full results by writing a JSON state file and calling: +```bash +.venv/Scripts/python.exe cc_tools.py save_results {TICKER} {TRADE_DATE} +``` + +The state JSON should contain all fields: company_of_interest, trade_date, market_report, sentiment_report, news_report, fundamentals_report, investment_debate_state, investment_plan, trader_investment_plan, risk_debate_state, final_trade_decision. + +--- + +### PHASE 8: Present Results + +Display a summary to the user: + +1. **Final Rating**: The extracted BUY/OVERWEIGHT/HOLD/UNDERWEIGHT/SELL +2. **Executive Summary**: From the portfolio manager +3. **Key Reports**: Brief highlights from each analyst +4. **Debate Summary**: Key points from both investment and risk debates + +The full detailed results are saved in `eval_results/{TICKER}/TradingAgentsStrategy_logs/`. diff --git a/cc_tools.py b/cc_tools.py new file mode 100644 index 00000000..b94139f5 --- /dev/null +++ b/cc_tools.py @@ -0,0 +1,324 @@ +#!/usr/bin/env python3 +"""CLI bridge between Claude Code subagents and TradingAgents data/memory infrastructure. + +Usage: + python cc_tools.py [args...] + +Data Commands: + get_stock_data + get_indicators [look_back_days] + get_fundamentals + get_balance_sheet [freq] [curr_date] + get_cashflow [freq] [curr_date] + get_income_statement [freq] [curr_date] + get_news + get_global_news [look_back_days] [limit] + get_insider_transactions + +Memory Commands: + memory_get [n_matches] + memory_add + memory_clear + +Results Commands: + save_results +""" + +import argparse +import json +import os +import sys +from pathlib import Path + + +def _init_config(): + """Initialize the data vendor configuration.""" + from tradingagents.default_config import DEFAULT_CONFIG + from tradingagents.dataflows.config import set_config + set_config(DEFAULT_CONFIG) + + # Create data cache directory + os.makedirs( + os.path.join(DEFAULT_CONFIG["project_dir"], "dataflows/data_cache"), + exist_ok=True, + ) + + +def _route(method, *args, **kwargs): + """Route a method call through the vendor interface.""" + _init_config() + from tradingagents.dataflows.interface import route_to_vendor + return route_to_vendor(method, *args, **kwargs) + + +# --- Memory persistence helpers --- + +MEMORY_DIR = Path("eval_results/.memory") + + +def _memory_path(name: str) -> Path: + return MEMORY_DIR / f"{name}.json" + + +def _load_memory(name: str): + """Load a FinancialSituationMemory from disk, or create a fresh one.""" + from tradingagents.agents.utils.memory import FinancialSituationMemory + + mem = FinancialSituationMemory(name) + path = _memory_path(name) + if path.exists(): + with open(path, "r", encoding="utf-8") as f: + data = json.load(f) + docs = data.get("documents", []) + recs = data.get("recommendations", []) + if docs and recs and len(docs) == len(recs): + mem.add_situations(list(zip(docs, recs))) + return mem + + +def _save_memory(name: str, mem): + """Persist a FinancialSituationMemory to disk.""" + MEMORY_DIR.mkdir(parents=True, exist_ok=True) + path = _memory_path(name) + data = { + "documents": mem.documents, + "recommendations": mem.recommendations, + } + with open(path, "w", encoding="utf-8") as f: + json.dump(data, f, indent=2, ensure_ascii=False) + + +# --- Command handlers --- + +def cmd_get_stock_data(args): + result = _route("get_stock_data", args.symbol, args.start_date, args.end_date) + print(result) + + +def cmd_get_indicators(args): + look_back = int(args.look_back_days) if args.look_back_days else 30 + result = _route("get_indicators", args.symbol, args.indicator, args.curr_date, look_back) + print(result) + + +def cmd_get_fundamentals(args): + result = _route("get_fundamentals", args.ticker, args.curr_date) + print(result) + + +def cmd_get_balance_sheet(args): + freq = args.freq or "quarterly" + curr_date = args.curr_date or None + result = _route("get_balance_sheet", args.ticker, freq, curr_date) + print(result) + + +def cmd_get_cashflow(args): + freq = args.freq or "quarterly" + curr_date = args.curr_date or None + result = _route("get_cashflow", args.ticker, freq, curr_date) + print(result) + + +def cmd_get_income_statement(args): + freq = args.freq or "quarterly" + curr_date = args.curr_date or None + result = _route("get_income_statement", args.ticker, freq, curr_date) + print(result) + + +def cmd_get_news(args): + result = _route("get_news", args.ticker, args.start_date, args.end_date) + print(result) + + +def cmd_get_global_news(args): + look_back = int(args.look_back_days) if args.look_back_days else 7 + limit = int(args.limit) if args.limit else 5 + result = _route("get_global_news", args.curr_date, look_back, limit) + print(result) + + +def cmd_get_insider_transactions(args): + result = _route("get_insider_transactions", args.ticker) + print(result) + + +def cmd_memory_get(args): + situation_text = Path(args.situation_file).read_text(encoding="utf-8") + n_matches = int(args.n_matches) if args.n_matches else 2 + + mem = _load_memory(args.memory_name) + results = mem.get_memories(situation_text, n_matches=n_matches) + + if not results: + print("No past memories found.") + else: + for i, rec in enumerate(results, 1): + print(f"--- Memory Match {i} (score: {rec['similarity_score']:.2f}) ---") + print(rec["recommendation"]) + print() + + +def cmd_memory_add(args): + situation_text = Path(args.situation_file).read_text(encoding="utf-8") + advice_text = Path(args.advice_file).read_text(encoding="utf-8") + + mem = _load_memory(args.memory_name) + mem.add_situations([(situation_text, advice_text)]) + _save_memory(args.memory_name, mem) + print(f"Memory added to '{args.memory_name}'. Total entries: {len(mem.documents)}") + + +def cmd_memory_clear(args): + path = _memory_path(args.memory_name) + if path.exists(): + path.unlink() + print(f"Memory '{args.memory_name}' cleared.") + + +def cmd_save_results(args): + state_data = json.loads(Path(args.state_json_file).read_text(encoding="utf-8")) + + log_entry = { + str(args.trade_date): { + "company_of_interest": state_data.get("company_of_interest", args.ticker), + "trade_date": args.trade_date, + "market_report": state_data.get("market_report", ""), + "sentiment_report": state_data.get("sentiment_report", ""), + "news_report": state_data.get("news_report", ""), + "fundamentals_report": state_data.get("fundamentals_report", ""), + "investment_debate_state": state_data.get("investment_debate_state", {}), + "trader_investment_decision": state_data.get("trader_investment_plan", ""), + "risk_debate_state": state_data.get("risk_debate_state", {}), + "investment_plan": state_data.get("investment_plan", ""), + "final_trade_decision": state_data.get("final_trade_decision", ""), + } + } + + directory = Path(f"eval_results/{args.ticker}/TradingAgentsStrategy_logs/") + directory.mkdir(parents=True, exist_ok=True) + + out_path = directory / f"full_states_log_{args.trade_date}.json" + with open(out_path, "w", encoding="utf-8") as f: + json.dump(log_entry, f, indent=4, ensure_ascii=False) + + print(f"Results saved to {out_path}") + + +# --- Argument parser --- + +def build_parser(): + parser = argparse.ArgumentParser( + description="TradingAgents CLI tools for Claude Code subagents", + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + subparsers = parser.add_subparsers(dest="command", help="Available commands") + + # get_stock_data + p = subparsers.add_parser("get_stock_data", help="Get OHLCV stock price data") + p.add_argument("symbol", help="Ticker symbol (e.g., AAPL)") + p.add_argument("start_date", help="Start date (yyyy-mm-dd)") + p.add_argument("end_date", help="End date (yyyy-mm-dd)") + p.set_defaults(func=cmd_get_stock_data) + + # get_indicators + p = subparsers.add_parser("get_indicators", help="Get technical indicators") + p.add_argument("symbol", help="Ticker symbol") + p.add_argument("indicator", help="Indicator name (e.g., rsi, macd)") + p.add_argument("curr_date", help="Current trading date (yyyy-mm-dd)") + p.add_argument("look_back_days", nargs="?", default=None, help="Lookback days (default: 30)") + p.set_defaults(func=cmd_get_indicators) + + # get_fundamentals + p = subparsers.add_parser("get_fundamentals", help="Get company fundamentals") + p.add_argument("ticker", help="Ticker symbol") + p.add_argument("curr_date", help="Current date (yyyy-mm-dd)") + p.set_defaults(func=cmd_get_fundamentals) + + # get_balance_sheet + p = subparsers.add_parser("get_balance_sheet", help="Get balance sheet data") + p.add_argument("ticker", help="Ticker symbol") + p.add_argument("freq", nargs="?", default=None, help="Frequency: annual/quarterly (default: quarterly)") + p.add_argument("curr_date", nargs="?", default=None, help="Current date (yyyy-mm-dd)") + p.set_defaults(func=cmd_get_balance_sheet) + + # get_cashflow + p = subparsers.add_parser("get_cashflow", help="Get cash flow statement") + p.add_argument("ticker", help="Ticker symbol") + p.add_argument("freq", nargs="?", default=None, help="Frequency: annual/quarterly (default: quarterly)") + p.add_argument("curr_date", nargs="?", default=None, help="Current date (yyyy-mm-dd)") + p.set_defaults(func=cmd_get_cashflow) + + # get_income_statement + p = subparsers.add_parser("get_income_statement", help="Get income statement") + p.add_argument("ticker", help="Ticker symbol") + p.add_argument("freq", nargs="?", default=None, help="Frequency: annual/quarterly (default: quarterly)") + p.add_argument("curr_date", nargs="?", default=None, help="Current date (yyyy-mm-dd)") + p.set_defaults(func=cmd_get_income_statement) + + # get_news + p = subparsers.add_parser("get_news", help="Get company news") + p.add_argument("ticker", help="Ticker symbol") + p.add_argument("start_date", help="Start date (yyyy-mm-dd)") + p.add_argument("end_date", help="End date (yyyy-mm-dd)") + p.set_defaults(func=cmd_get_news) + + # get_global_news + p = subparsers.add_parser("get_global_news", help="Get global macroeconomic news") + p.add_argument("curr_date", help="Current date (yyyy-mm-dd)") + p.add_argument("look_back_days", nargs="?", default=None, help="Lookback days (default: 7)") + p.add_argument("limit", nargs="?", default=None, help="Max articles (default: 5)") + p.set_defaults(func=cmd_get_global_news) + + # get_insider_transactions + p = subparsers.add_parser("get_insider_transactions", help="Get insider transactions") + p.add_argument("ticker", help="Ticker symbol") + p.set_defaults(func=cmd_get_insider_transactions) + + # memory_get + p = subparsers.add_parser("memory_get", help="Retrieve memories matching a situation") + p.add_argument("memory_name", help="Memory name (e.g., bull_memory)") + p.add_argument("situation_file", help="Path to file containing the situation text") + p.add_argument("n_matches", nargs="?", default=None, help="Number of matches (default: 2)") + p.set_defaults(func=cmd_memory_get) + + # memory_add + p = subparsers.add_parser("memory_add", help="Add a situation+advice to memory") + p.add_argument("memory_name", help="Memory name (e.g., bull_memory)") + p.add_argument("situation_file", help="Path to file containing the situation text") + p.add_argument("advice_file", help="Path to file containing the advice text") + p.set_defaults(func=cmd_memory_add) + + # memory_clear + p = subparsers.add_parser("memory_clear", help="Clear all entries from a memory") + p.add_argument("memory_name", help="Memory name to clear") + p.set_defaults(func=cmd_memory_clear) + + # save_results + p = subparsers.add_parser("save_results", help="Save analysis results to JSON log") + p.add_argument("ticker", help="Ticker symbol") + p.add_argument("trade_date", help="Trade date (yyyy-mm-dd)") + p.add_argument("state_json_file", help="Path to JSON file containing the full state") + p.set_defaults(func=cmd_save_results) + + return parser + + +def main(): + parser = build_parser() + args = parser.parse_args() + + if not args.command: + parser.print_help() + sys.exit(1) + + try: + args.func(args) + except Exception as e: + print(f"ERROR: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/tests/test_cc_integration.py b/tests/test_cc_integration.py new file mode 100644 index 00000000..ac1ec16f --- /dev/null +++ b/tests/test_cc_integration.py @@ -0,0 +1,224 @@ +"""Integration tests for the Claude Code TradingAgents pipeline. + +These tests verify that the full data pipeline works end-to-end, +including data fetching, memory persistence, and results saving. + +This does NOT test the actual Claude Code subagent orchestration +(which requires a running Claude Code session), but verifies all +the infrastructure that subagents depend on. +""" + +import json +import os +import subprocess +import sys +import tempfile +from datetime import datetime, timedelta +from pathlib import Path + +import pytest + +PROJECT_ROOT = Path(__file__).parent.parent +CC_TOOLS = PROJECT_ROOT / "cc_tools.py" + +PYTHON = str(PROJECT_ROOT / ".venv" / "Scripts" / "python.exe") +if not Path(PYTHON).exists(): + PYTHON = str(PROJECT_ROOT / ".venv" / "bin" / "python") +if not Path(PYTHON).exists(): + PYTHON = sys.executable + + +def run(*args, timeout=60): + result = subprocess.run( + [PYTHON, str(CC_TOOLS)] + list(args), + capture_output=True, + text=True, + timeout=timeout, + cwd=str(PROJECT_ROOT), + ) + return result.stdout, result.stderr, result.returncode + + +class TestFullAnalystDataPipeline: + """Simulate what the 4 analyst subagents would do: fetch all data for a ticker.""" + + TICKER = "MSFT" + TRADE_DATE = "2025-03-15" + START_30D = "2025-02-13" + START_7D = "2025-03-08" + + def test_market_analyst_data(self): + """Market analyst fetches stock data + indicators.""" + # Stock data + stdout, _, rc = run("get_stock_data", self.TICKER, self.START_30D, self.TRADE_DATE) + assert rc == 0 + assert "Open" in stdout or "Close" in stdout + stock_lines = [l for l in stdout.strip().split("\n") if not l.startswith("#")] + assert len(stock_lines) >= 2 # header + data + + # Indicators + for indicator in ["rsi", "macd", "close_50_sma"]: + stdout, _, rc = run("get_indicators", self.TICKER, indicator, self.TRADE_DATE) + assert rc == 0 + assert len(stdout.strip()) > 10 + + def test_social_analyst_data(self): + """Social media analyst fetches company news.""" + stdout, _, rc = run("get_news", self.TICKER, self.START_7D, self.TRADE_DATE) + assert rc == 0 # may have no news but should not crash + + def test_news_analyst_data(self): + """News analyst fetches company + global news.""" + stdout, _, rc = run("get_news", self.TICKER, self.START_7D, self.TRADE_DATE) + assert rc == 0 + + stdout, _, rc = run("get_global_news", self.TRADE_DATE, "7", "5") + assert rc == 0 + + def test_fundamentals_analyst_data(self): + """Fundamentals analyst fetches all financial statements.""" + stdout, _, rc = run("get_fundamentals", self.TICKER, self.TRADE_DATE) + assert rc == 0 + assert len(stdout.strip()) > 50 + + stdout, _, rc = run("get_balance_sheet", self.TICKER, "quarterly", self.TRADE_DATE) + assert rc == 0 + + stdout, _, rc = run("get_cashflow", self.TICKER, "quarterly", self.TRADE_DATE) + assert rc == 0 + + stdout, _, rc = run("get_income_statement", self.TICKER, "quarterly", self.TRADE_DATE) + assert rc == 0 + + +class TestMemoryPersistenceAcrossInvocations: + """Verify memory works across separate CLI invocations (simulating separate subagents).""" + + MEMORY_NAME = "integration_test_memory" + + @pytest.fixture(autouse=True) + def cleanup(self): + run("memory_clear", self.MEMORY_NAME) + yield + run("memory_clear", self.MEMORY_NAME) + + def test_memory_persists_across_calls(self, tmp_path): + """Add memory in one call, retrieve in another — simulates cross-agent persistence.""" + # First invocation: add a memory + sit = tmp_path / "sit.txt" + adv = tmp_path / "adv.txt" + sit.write_text( + "AAPL showing strong growth in services revenue with expanding margins. " + "iPhone sales declining but offset by services and wearables growth." + ) + adv.write_text( + "The bull case was correct. Services growth proved more durable than expected. " + "Lesson: Don't underweight services revenue growth trajectory." + ) + stdout, _, rc = run("memory_add", self.MEMORY_NAME, str(sit), str(adv)) + assert rc == 0 + + # Second invocation: add another memory + sit2 = tmp_path / "sit2.txt" + adv2 = tmp_path / "adv2.txt" + sit2.write_text( + "NVDA GPU demand surging due to AI infrastructure buildout. " + "Data center revenue growing 200% year over year." + ) + adv2.write_text( + "The aggressive stance was justified. AI infrastructure spend continued. " + "Lesson: When there's a genuine paradigm shift, be more aggressive." + ) + stdout, _, rc = run("memory_add", self.MEMORY_NAME, str(sit2), str(adv2)) + assert rc == 0 + assert "Total entries: 2" in stdout + + # Third invocation: query for similar situations + query = tmp_path / "query.txt" + query.write_text( + "Apple services segment showing accelerating growth while hardware sales plateau." + ) + stdout, _, rc = run("memory_get", self.MEMORY_NAME, str(query), "2") + assert rc == 0 + assert "Memory Match 1" in stdout + assert "Memory Match 2" in stdout + + +class TestEndToEndResultsSaving: + """Test the full results save/load cycle.""" + + def test_save_and_verify_structure(self, tmp_path): + """Verify saved results match the original TradingAgentsGraph._log_state format.""" + state = { + "company_of_interest": "INTEGRATION_TEST", + "trade_date": "2025-03-15", + "market_report": "Market is trending upward with strong momentum indicators.", + "sentiment_report": "Social media sentiment is overwhelmingly positive.", + "news_report": "Recent earnings beat expectations. Fed holds rates steady.", + "fundamentals_report": "Strong balance sheet with growing free cash flow.", + "investment_debate_state": { + "bull_history": "Bull Analyst: Strong growth trajectory...", + "bear_history": "Bear Analyst: Overvalued at current levels...", + "history": "Bull Analyst: Strong growth...\nBear Analyst: Overvalued...", + "current_response": "Bear Analyst: Overvalued at current levels...", + "judge_decision": "Buy - bull case is more compelling", + }, + "investment_plan": "Buy with a 12-month horizon, position size 5% of portfolio.", + "trader_investment_plan": "FINAL TRANSACTION PROPOSAL: **BUY**", + "risk_debate_state": { + "aggressive_history": "Aggressive: Go all in...", + "conservative_history": "Conservative: Limit to 3%...", + "neutral_history": "Neutral: 5% seems right...", + "history": "Aggressive: Go all in...\nConservative: Limit...\nNeutral: 5%...", + "judge_decision": "Buy with 5% position size, stop loss at -10%", + }, + "final_trade_decision": "**Buy** - Position size: 5% of portfolio. Stop loss: -10%.", + } + + state_file = tmp_path / "state.json" + state_file.write_text(json.dumps(state)) + + stdout, _, rc = run("save_results", "INTEGRATION_TEST", "2025-03-15", str(state_file)) + assert rc == 0 + assert "Results saved" in stdout + + # Verify file structure matches original format + out_path = ( + PROJECT_ROOT + / "eval_results" + / "INTEGRATION_TEST" + / "TradingAgentsStrategy_logs" + / "full_states_log_2025-03-15.json" + ) + assert out_path.exists() + + with open(out_path) as f: + saved = json.load(f) + + entry = saved["2025-03-15"] + + # Verify all expected fields exist (matching TradingAgentsGraph._log_state) + assert entry["company_of_interest"] == "INTEGRATION_TEST" + assert entry["trade_date"] == "2025-03-15" + assert "market_report" in entry + assert "sentiment_report" in entry + assert "news_report" in entry + assert "fundamentals_report" in entry + assert "investment_debate_state" in entry + assert "investment_plan" in entry + assert "final_trade_decision" in entry + assert "risk_debate_state" in entry + assert "trader_investment_decision" in entry + + # Verify nested structure + assert "bull_history" in entry["investment_debate_state"] + assert "bear_history" in entry["investment_debate_state"] + assert "judge_decision" in entry["investment_debate_state"] + assert "aggressive_history" in entry["risk_debate_state"] + assert "conservative_history" in entry["risk_debate_state"] + assert "neutral_history" in entry["risk_debate_state"] + + # Clean up + out_path.unlink() + out_path.parent.rmdir() + (PROJECT_ROOT / "eval_results" / "INTEGRATION_TEST").rmdir() diff --git a/tests/test_cc_tools.py b/tests/test_cc_tools.py new file mode 100644 index 00000000..f4f968b0 --- /dev/null +++ b/tests/test_cc_tools.py @@ -0,0 +1,307 @@ +"""Tests for cc_tools.py CLI bridge. + +Each test runs cc_tools.py as a subprocess to verify it works correctly +when called from Claude Code subagents via Bash. +""" + +import json +import os +import subprocess +import sys +import tempfile +from datetime import datetime, timedelta +from pathlib import Path + +import pytest + +# Find the project root (where cc_tools.py lives) +PROJECT_ROOT = Path(__file__).parent.parent +CC_TOOLS = PROJECT_ROOT / "cc_tools.py" + +# Find Python executable - prefer venv +PYTHON = str(PROJECT_ROOT / ".venv" / "Scripts" / "python.exe") +if not Path(PYTHON).exists(): + PYTHON = str(PROJECT_ROOT / ".venv" / "bin" / "python") +if not Path(PYTHON).exists(): + PYTHON = sys.executable + + +def run_cc_tools(*args, timeout=60): + """Run cc_tools.py with given arguments and return (stdout, stderr, returncode).""" + cmd = [PYTHON, str(CC_TOOLS)] + list(args) + result = subprocess.run( + cmd, + capture_output=True, + text=True, + timeout=timeout, + cwd=str(PROJECT_ROOT), + ) + return result.stdout, result.stderr, result.returncode + + +class TestHelp: + def test_help_output(self): + stdout, stderr, rc = run_cc_tools("--help") + assert rc == 0 + assert "get_stock_data" in stdout + assert "memory_get" in stdout + assert "save_results" in stdout + + def test_no_args_shows_help(self): + stdout, stderr, rc = run_cc_tools() + assert rc == 1 # should fail with no command + + +class TestStockData: + def test_get_stock_data_returns_csv(self): + stdout, stderr, rc = run_cc_tools( + "get_stock_data", "AAPL", "2025-03-01", "2025-03-15" + ) + assert rc == 0 + assert "AAPL" in stdout or "Date" in stdout or "Open" in stdout + # Should contain CSV-like data + lines = stdout.strip().split("\n") + assert len(lines) > 2 # header + at least one data row + + def test_get_stock_data_invalid_ticker(self): + stdout, stderr, rc = run_cc_tools( + "get_stock_data", "INVALIDTICKER12345", "2025-03-01", "2025-03-15" + ) + # Should either return empty/error but not crash + assert rc == 0 or rc == 1 + + +class TestIndicators: + def test_get_rsi(self): + stdout, stderr, rc = run_cc_tools( + "get_indicators", "AAPL", "rsi", "2025-03-15" + ) + assert rc == 0 + assert "rsi" in stdout.lower() + + def test_get_macd(self): + stdout, stderr, rc = run_cc_tools( + "get_indicators", "AAPL", "macd", "2025-03-15" + ) + assert rc == 0 + assert "macd" in stdout.lower() + + def test_get_indicators_with_lookback(self): + stdout, stderr, rc = run_cc_tools( + "get_indicators", "AAPL", "rsi", "2025-03-15", "15" + ) + assert rc == 0 + + +class TestFundamentals: + def test_get_fundamentals(self): + stdout, stderr, rc = run_cc_tools( + "get_fundamentals", "AAPL", "2025-03-15" + ) + assert rc == 0 + assert len(stdout.strip()) > 50 # should have substantial content + # Should contain some fundamental data keywords + assert any( + kw in stdout.lower() + for kw in ["market cap", "pe ratio", "eps", "sector", "apple"] + ) + + def test_get_balance_sheet(self): + stdout, stderr, rc = run_cc_tools( + "get_balance_sheet", "AAPL" + ) + assert rc == 0 + assert len(stdout.strip()) > 20 + + def test_get_cashflow(self): + stdout, stderr, rc = run_cc_tools( + "get_cashflow", "AAPL" + ) + assert rc == 0 + assert len(stdout.strip()) > 20 + + def test_get_income_statement(self): + stdout, stderr, rc = run_cc_tools( + "get_income_statement", "AAPL" + ) + assert rc == 0 + assert len(stdout.strip()) > 20 + + +class TestNews: + def test_get_news(self): + # Use recent dates for better chance of finding news + end = datetime.now().strftime("%Y-%m-%d") + start = (datetime.now() - timedelta(days=7)).strftime("%Y-%m-%d") + stdout, stderr, rc = run_cc_tools( + "get_news", "AAPL", start, end + ) + assert rc == 0 + # May or may not find news, but should not crash + + def test_get_global_news(self): + curr = datetime.now().strftime("%Y-%m-%d") + stdout, stderr, rc = run_cc_tools( + "get_global_news", curr + ) + assert rc == 0 + + def test_get_insider_transactions(self): + stdout, stderr, rc = run_cc_tools( + "get_insider_transactions", "AAPL" + ) + assert rc == 0 + + +class TestMemory: + """Test memory persistence (add, get, clear).""" + + MEMORY_NAME = "pytest_test_memory" + + @pytest.fixture(autouse=True) + def cleanup_memory(self): + """Clean up test memory before and after each test.""" + run_cc_tools("memory_clear", self.MEMORY_NAME) + yield + run_cc_tools("memory_clear", self.MEMORY_NAME) + + def test_memory_add_and_get_roundtrip(self, tmp_path): + # Write situation and advice to temp files + sit_file = tmp_path / "situation.txt" + adv_file = tmp_path / "advice.txt" + sit_file.write_text("High inflation with rising interest rates affecting tech stocks") + adv_file.write_text("Consider defensive sectors like utilities and consumer staples") + + # Add to memory + stdout, stderr, rc = run_cc_tools( + "memory_add", self.MEMORY_NAME, + str(sit_file), str(adv_file) + ) + assert rc == 0 + assert "Memory added" in stdout + assert "Total entries: 1" in stdout + + # Query memory + query_file = tmp_path / "query.txt" + query_file.write_text("Rising rates impacting technology sector valuations") + + stdout, stderr, rc = run_cc_tools( + "memory_get", self.MEMORY_NAME, + str(query_file), "1" + ) + assert rc == 0 + assert "defensive sectors" in stdout.lower() or "utilities" in stdout.lower() + + def test_memory_get_empty(self, tmp_path): + query_file = tmp_path / "query.txt" + query_file.write_text("Some query text") + + stdout, stderr, rc = run_cc_tools( + "memory_get", self.MEMORY_NAME, + str(query_file), "1" + ) + assert rc == 0 + assert "No past memories found" in stdout + + def test_memory_multiple_entries(self, tmp_path): + # Add two entries + for i, (sit, adv) in enumerate([ + ("Bull market with tech sector leading gains", "Increase tech allocation"), + ("Bear market with economic recession fears", "Reduce equity exposure"), + ]): + sit_file = tmp_path / f"sit_{i}.txt" + adv_file = tmp_path / f"adv_{i}.txt" + sit_file.write_text(sit) + adv_file.write_text(adv) + stdout, stderr, rc = run_cc_tools( + "memory_add", self.MEMORY_NAME, + str(sit_file), str(adv_file) + ) + assert rc == 0 + + # Query for something tech-related + query_file = tmp_path / "query.txt" + query_file.write_text("Tech stocks surging in bull market conditions") + + stdout, stderr, rc = run_cc_tools( + "memory_get", self.MEMORY_NAME, + str(query_file), "2" + ) + assert rc == 0 + assert "Memory Match 1" in stdout + assert "Memory Match 2" in stdout + + def test_memory_clear(self, tmp_path): + # Add entry + sit_file = tmp_path / "sit.txt" + adv_file = tmp_path / "adv.txt" + sit_file.write_text("test situation") + adv_file.write_text("test advice") + run_cc_tools("memory_add", self.MEMORY_NAME, str(sit_file), str(adv_file)) + + # Clear + stdout, stderr, rc = run_cc_tools("memory_clear", self.MEMORY_NAME) + assert rc == 0 + assert "cleared" in stdout.lower() + + # Verify empty + query_file = tmp_path / "query.txt" + query_file.write_text("test") + stdout, stderr, rc = run_cc_tools( + "memory_get", self.MEMORY_NAME, str(query_file) + ) + assert "No past memories found" in stdout + + +class TestSaveResults: + def test_save_results(self, tmp_path): + state = { + "company_of_interest": "TEST", + "trade_date": "2025-03-15", + "market_report": "Test market report", + "sentiment_report": "Test sentiment report", + "news_report": "Test news report", + "fundamentals_report": "Test fundamentals report", + "investment_debate_state": { + "bull_history": "Bull argument", + "bear_history": "Bear argument", + "history": "Full debate", + "current_response": "Latest", + "judge_decision": "Buy", + }, + "trader_investment_plan": "Buy ASAP", + "risk_debate_state": { + "aggressive_history": "Go big", + "conservative_history": "Be cautious", + "neutral_history": "Balance", + "history": "Full risk debate", + "judge_decision": "Buy with limits", + }, + "investment_plan": "Investment plan text", + "final_trade_decision": "Buy", + } + + state_file = tmp_path / "state.json" + state_file.write_text(json.dumps(state)) + + stdout, stderr, rc = run_cc_tools( + "save_results", "TEST", "2025-03-15", str(state_file) + ) + assert rc == 0 + assert "Results saved" in stdout + + # Verify the output file exists + out_path = PROJECT_ROOT / "eval_results" / "TEST" / "TradingAgentsStrategy_logs" / "full_states_log_2025-03-15.json" + assert out_path.exists() + + # Verify content + with open(out_path) as f: + saved = json.load(f) + assert "2025-03-15" in saved + assert saved["2025-03-15"]["market_report"] == "Test market report" + assert saved["2025-03-15"]["final_trade_decision"] == "Buy" + + # Clean up + out_path.unlink() + out_path.parent.rmdir() + (PROJECT_ROOT / "eval_results" / "TEST").rmdir()