diff --git a/README.md b/README.md index 34310010..4c4856d1 100644 --- a/README.md +++ b/README.md @@ -28,6 +28,7 @@ # TradingAgents: Multi-Agents LLM Financial Trading Framework ## News +- [2026-03] **TradingAgents v0.2.2** released with GPT-5.4/Gemini 3.1/Claude 4.6 model coverage, five-tier rating scale, OpenAI Responses API, Anthropic effort control, and cross-platform stability. - [2026-02] **TradingAgents v0.2.0** released with multi-provider LLM support (GPT-5.x, Gemini 3.x, Claude 4.x, Grok 4.x) and improved system architecture. - [2026-01] **Trading-R1** [Technical Report](https://arxiv.org/abs/2509.11420) released, with [Terminal](https://github.com/TauricResearch/Trading-R1) expected to land soon. @@ -111,9 +112,9 @@ conda create -n tradingagents python=3.13 conda activate tradingagents ``` -Install dependencies: +Install the package and its dependencies: ```bash -pip install -r requirements.txt +pip install . ``` ### Required APIs @@ -138,11 +139,12 @@ cp .env.example .env ### CLI Usage -You can also try out the CLI directly by running: +Launch the interactive CLI: ```bash -python -m cli.main +tradingagents # installed command +python -m cli.main # alternative: run directly from source ``` -You will see a screen where you can select your desired tickers, date, LLMs, research depth, etc. +You will see a screen where you can select your desired tickers, analysis date, LLM provider, research depth, and more.

diff --git a/cli/main.py b/cli/main.py index d5fa38b8..dc0ee515 100644 --- a/cli/main.py +++ b/cli/main.py @@ -470,6 +470,7 @@ def get_user_selections(): """Get all user selections before starting the analysis display.""" # Display ASCII art welcome message with open("./cli/static/welcome.txt", "r", encoding='utf-8') as f: + with open(Path(__file__).parent / "static" / "welcome.txt", "r") as f: welcome_ascii = f.read() # Create welcome box content @@ -508,7 +509,9 @@ def get_user_selections(): # Step 1: Ticker symbol console.print( create_question_box( - "Step 1: Ticker Symbol", "Enter the ticker symbol to analyze", "SPY" + "Step 1: Ticker Symbol", + "Enter the exact ticker symbol to analyze, including exchange suffix when needed (examples: SPY, CNC.TO, 7203.T, 0700.HK)", + "SPY", ) ) selected_ticker = get_ticker() @@ -563,6 +566,7 @@ def get_user_selections(): # Step 7: Provider-specific thinking configuration thinking_level = None reasoning_effort = None + anthropic_effort = None provider_lower = selected_llm_provider.lower() if provider_lower == "google": @@ -581,6 +585,14 @@ def get_user_selections(): ) ) reasoning_effort = ask_openai_reasoning_effort() + elif provider_lower == "anthropic": + console.print( + create_question_box( + "Step 7: Effort Level", + "Configure Claude effort level" + ) + ) + anthropic_effort = ask_anthropic_effort() return { "ticker": selected_ticker, @@ -593,6 +605,7 @@ def get_user_selections(): "deep_thinker": selected_deep_thinker, "google_thinking_level": thinking_level, "openai_reasoning_effort": reasoning_effort, + "anthropic_effort": anthropic_effort, } @@ -795,9 +808,11 @@ ANALYST_REPORT_MAP = { def update_analyst_statuses(message_buffer, chunk): - """Update all analyst statuses based on current report state. + """Update analyst statuses based on accumulated report state. Logic: + - Store new report content from the current chunk if present + - Check accumulated report_sections (not just current chunk) for status - Analysts with reports = completed - First analyst without report = in_progress - Remaining analysts without reports = pending @@ -812,11 +827,16 @@ def update_analyst_statuses(message_buffer, chunk): agent_name = ANALYST_AGENT_NAMES[analyst_key] report_key = ANALYST_REPORT_MAP[analyst_key] - has_report = bool(chunk.get(report_key)) + + # Capture new report content from current chunk + if chunk.get(report_key): + message_buffer.update_report_section(report_key, chunk[report_key]) + + # Determine status from accumulated sections, not just current chunk + has_report = bool(message_buffer.report_sections.get(report_key)) if has_report: message_buffer.update_agent_status(agent_name, "completed") - message_buffer.update_report_section(report_key, chunk[report_key]) elif not found_active: message_buffer.update_agent_status(agent_name, "in_progress") found_active = True @@ -919,6 +939,7 @@ def run_analysis(): # Provider-specific thinking configuration config["google_thinking_level"] = selections.get("google_thinking_level") config["openai_reasoning_effort"] = selections.get("openai_reasoning_effort") + config["anthropic_effort"] = selections.get("anthropic_effort") # Create stats callback handler for tracking LLM/tool calls stats_handler = StatsCallbackHandler() @@ -982,6 +1003,9 @@ def run_analysis(): file_name = f"{section_name}.md" with open(report_dir / file_name, "w", encoding='utf-8') as f: f.write(content) + text = "\n".join(str(item) for item in content) if isinstance(content, list) else content + with open(report_dir / file_name, "w") as f: + f.write(text) return wrapper message_buffer.add_message = save_message_decorator(message_buffer, "add_message") diff --git a/cli/utils.py b/cli/utils.py index aa097fb5..18abc3a7 100644 --- a/cli/utils.py +++ b/cli/utils.py @@ -1,8 +1,14 @@ import questionary from typing import List, Optional, Tuple, Dict +from rich.console import Console + from cli.models import AnalystType +console = Console() + +TICKER_INPUT_EXAMPLES = "Examples: SPY, CNC.TO, 7203.T, 0700.HK" + ANALYST_ORDER = [ ("Market Analyst", AnalystType.MARKET), ("Social Media Analyst", AnalystType.SOCIAL), @@ -14,7 +20,7 @@ ANALYST_ORDER = [ def get_ticker() -> str: """Prompt the user to enter a ticker symbol.""" ticker = questionary.text( - "Enter the ticker symbol to analyze:", + f"Enter the exact ticker symbol to analyze ({TICKER_INPUT_EXAMPLES}):", validate=lambda x: len(x.strip()) > 0 or "Please enter a valid ticker symbol.", style=questionary.Style( [ @@ -28,6 +34,11 @@ def get_ticker() -> str: console.print("\n[red]No ticker symbol provided. Exiting...[/red]") exit(1) + return normalize_ticker_symbol(ticker) + + +def normalize_ticker_symbol(ticker: str) -> str: + """Normalize ticker input while preserving exchange suffixes.""" return ticker.strip().upper() @@ -126,30 +137,30 @@ def select_shallow_thinking_agent(provider) -> str: """Select shallow thinking llm engine using an interactive selection.""" # Define shallow thinking llm engine options with their corresponding model names + # Ordering: medium → light → heavy (balanced first for quick tasks) + # Within same tier, newer models first SHALLOW_AGENT_OPTIONS = { "openai": [ - ("GPT-5 Mini - Cost-optimized reasoning", "gpt-5-mini"), - ("GPT-5 Nano - Ultra-fast, high-throughput", "gpt-5-nano"), - ("GPT-5.2 - Latest flagship", "gpt-5.2"), - ("GPT-5.1 - Flexible reasoning", "gpt-5.1"), - ("GPT-4.1 - Smartest non-reasoning, 1M context", "gpt-4.1"), + ("GPT-5 Mini - Balanced speed, cost, and capability", "gpt-5-mini"), + ("GPT-5 Nano - High-throughput, simple tasks", "gpt-5-nano"), + ("GPT-5.4 - Latest frontier, 1M context", "gpt-5.4"), + ("GPT-4.1 - Smartest non-reasoning model", "gpt-4.1"), ], "anthropic": [ - ("Claude Haiku 4.5 - Fast + extended thinking", "claude-haiku-4-5"), - ("Claude Sonnet 4.5 - Best for agents/coding", "claude-sonnet-4-5"), - ("Claude Sonnet 4 - High-performance", "claude-sonnet-4-20250514"), + ("Claude Sonnet 4.6 - Best speed and intelligence balance", "claude-sonnet-4-6"), + ("Claude Haiku 4.5 - Fast, near-instant responses", "claude-haiku-4-5"), + ("Claude Sonnet 4.5 - Agents and coding", "claude-sonnet-4-5"), ], "google": [ ("Gemini 3 Flash - Next-gen fast", "gemini-3-flash-preview"), - ("Gemini 2.5 Flash - Balanced, recommended", "gemini-2.5-flash"), - ("Gemini 3 Pro - Reasoning-first", "gemini-3-pro-preview"), + ("Gemini 2.5 Flash - Balanced, stable", "gemini-2.5-flash"), + ("Gemini 3.1 Flash Lite - Most cost-efficient", "gemini-3.1-flash-lite-preview"), ("Gemini 2.5 Flash Lite - Fast, low-cost", "gemini-2.5-flash-lite"), ], "xai": [ ("Grok 4.1 Fast (Non-Reasoning) - Speed optimized, 2M ctx", "grok-4-1-fast-non-reasoning"), ("Grok 4 Fast (Non-Reasoning) - Speed optimized", "grok-4-fast-non-reasoning"), ("Grok 4.1 Fast (Reasoning) - High-performance, 2M ctx", "grok-4-1-fast-reasoning"), - ("Grok 4 Fast (Reasoning) - High-performance", "grok-4-fast-reasoning"), ], "openrouter": [ ("NVIDIA Nemotron 3 Nano 30B (free)", "nvidia/nemotron-3-nano-30b-a3b:free"), @@ -191,33 +202,32 @@ def select_deep_thinking_agent(provider) -> str: """Select deep thinking llm engine using an interactive selection.""" # Define deep thinking llm engine options with their corresponding model names + # Ordering: heavy → medium → light (most capable first for deep tasks) + # Within same tier, newer models first DEEP_AGENT_OPTIONS = { "openai": [ - ("GPT-5.2 - Latest flagship", "gpt-5.2"), - ("GPT-5.1 - Flexible reasoning", "gpt-5.1"), - ("GPT-5 - Advanced reasoning", "gpt-5"), - ("GPT-4.1 - Smartest non-reasoning, 1M context", "gpt-4.1"), - ("GPT-5 Mini - Cost-optimized reasoning", "gpt-5-mini"), - ("GPT-5 Nano - Ultra-fast, high-throughput", "gpt-5-nano"), + ("GPT-5.4 - Latest frontier, 1M context", "gpt-5.4"), + ("GPT-5.2 - Strong reasoning, cost-effective", "gpt-5.2"), + ("GPT-5 Mini - Balanced speed, cost, and capability", "gpt-5-mini"), + ("GPT-5.4 Pro - Most capable, expensive ($30/$180 per 1M tokens)", "gpt-5.4-pro"), ], "anthropic": [ - ("Claude Sonnet 4.5 - Best for agents/coding", "claude-sonnet-4-5"), + ("Claude Opus 4.6 - Most intelligent, agents and coding", "claude-opus-4-6"), ("Claude Opus 4.5 - Premium, max intelligence", "claude-opus-4-5"), - ("Claude Opus 4.1 - Most capable model", "claude-opus-4-1-20250805"), - ("Claude Haiku 4.5 - Fast + extended thinking", "claude-haiku-4-5"), - ("Claude Sonnet 4 - High-performance", "claude-sonnet-4-20250514"), + ("Claude Sonnet 4.6 - Best speed and intelligence balance", "claude-sonnet-4-6"), + ("Claude Sonnet 4.5 - Agents and coding", "claude-sonnet-4-5"), ], "google": [ - ("Gemini 3 Pro - Reasoning-first", "gemini-3-pro-preview"), + ("Gemini 3.1 Pro - Reasoning-first, complex workflows", "gemini-3.1-pro-preview"), ("Gemini 3 Flash - Next-gen fast", "gemini-3-flash-preview"), - ("Gemini 2.5 Flash - Balanced, recommended", "gemini-2.5-flash"), + ("Gemini 2.5 Pro - Stable pro model", "gemini-2.5-pro"), + ("Gemini 2.5 Flash - Balanced, stable", "gemini-2.5-flash"), ], "xai": [ + ("Grok 4 - Flagship model", "grok-4-0709"), ("Grok 4.1 Fast (Reasoning) - High-performance, 2M ctx", "grok-4-1-fast-reasoning"), ("Grok 4 Fast (Reasoning) - High-performance", "grok-4-fast-reasoning"), - ("Grok 4 - Flagship model", "grok-4-0709"), ("Grok 4.1 Fast (Non-Reasoning) - Speed optimized, 2M ctx", "grok-4-1-fast-non-reasoning"), - ("Grok 4 Fast (Non-Reasoning) - Speed optimized", "grok-4-fast-non-reasoning"), ], "openrouter": [ ("Z.AI GLM 4.5 Air (free)", "z-ai/glm-4.5-air:free"), @@ -308,6 +318,26 @@ def ask_openai_reasoning_effort() -> str: ).ask() +def ask_anthropic_effort() -> str | None: + """Ask for Anthropic effort level. + + Controls token usage and response thoroughness on Claude 4.5+ and 4.6 models. + """ + return questionary.select( + "Select Effort Level:", + choices=[ + questionary.Choice("High (recommended)", "high"), + questionary.Choice("Medium (balanced)", "medium"), + questionary.Choice("Low (faster, cheaper)", "low"), + ], + style=questionary.Style([ + ("selected", "fg:cyan noinherit"), + ("highlighted", "fg:cyan noinherit"), + ("pointer", "fg:cyan noinherit"), + ]), + ).ask() + + def ask_gemini_thinking_config() -> str | None: """Ask for Gemini thinking configuration. diff --git a/pyproject.toml b/pyproject.toml index 9213d7f6..de27a2b9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,14 +4,13 @@ build-backend = "setuptools.build_meta" [project] name = "tradingagents" -version = "0.2.0" +version = "0.2.2" description = "TradingAgents: Multi-Agents LLM Financial Trading Framework" readme = "README.md" requires-python = ">=3.10" dependencies = [ "langchain-core>=0.3.81", "backtrader>=1.9.78.123", - "chainlit>=2.5.5", "langchain-anthropic>=0.3.15", "langchain-experimental>=0.3.4", "langchain-google-genai>=2.1.5", @@ -38,3 +37,6 @@ tradingagents = "cli.main:app" [tool.setuptools.packages.find] include = ["tradingagents*", "cli*"] + +[tool.setuptools.package-data] +cli = ["static/*"] diff --git a/requirements.txt b/requirements.txt index 9e51ed98..9c558e35 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,22 +1 @@ -typing-extensions -langchain-core -langchain-openai -langchain-experimental -pandas -yfinance -stockstats -langgraph -rank-bm25 -setuptools -backtrader -parsel -requests -tqdm -pytz -redis -chainlit -rich -typer -questionary -langchain_anthropic -langchain-google-genai +. diff --git a/tests/test_ticker_symbol_handling.py b/tests/test_ticker_symbol_handling.py new file mode 100644 index 00000000..858d26cd --- /dev/null +++ b/tests/test_ticker_symbol_handling.py @@ -0,0 +1,18 @@ +import unittest + +from cli.utils import normalize_ticker_symbol +from tradingagents.agents.utils.agent_utils import build_instrument_context + + +class TickerSymbolHandlingTests(unittest.TestCase): + def test_normalize_ticker_symbol_preserves_exchange_suffix(self): + self.assertEqual(normalize_ticker_symbol(" cnc.to "), "CNC.TO") + + def test_build_instrument_context_mentions_exact_symbol(self): + context = build_instrument_context("7203.T") + self.assertIn("7203.T", context) + self.assertIn("exchange suffix", context) + + +if __name__ == "__main__": + unittest.main() diff --git a/tradingagents/__init__.py b/tradingagents/__init__.py new file mode 100644 index 00000000..43a2b439 --- /dev/null +++ b/tradingagents/__init__.py @@ -0,0 +1,2 @@ +import os +os.environ.setdefault("PYTHONUTF8", "1") diff --git a/tradingagents/agents/__init__.py b/tradingagents/agents/__init__.py index 8a169f22..1f03642c 100644 --- a/tradingagents/agents/__init__.py +++ b/tradingagents/agents/__init__.py @@ -15,7 +15,7 @@ from .risk_mgmt.conservative_debator import create_conservative_debator from .risk_mgmt.neutral_debator import create_neutral_debator from .managers.research_manager import create_research_manager -from .managers.risk_manager import create_risk_manager +from .managers.portfolio_manager import create_portfolio_manager from .trader.trader import create_trader @@ -33,7 +33,7 @@ __all__ = [ "create_neutral_debator", "create_news_analyst", "create_aggressive_debator", - "create_risk_manager", + "create_portfolio_manager", "create_conservative_debator", "create_social_media_analyst", "create_trader", diff --git a/tradingagents/agents/analysts/fundamentals_analyst.py b/tradingagents/agents/analysts/fundamentals_analyst.py index 22d91848..990398a6 100644 --- a/tradingagents/agents/analysts/fundamentals_analyst.py +++ b/tradingagents/agents/analysts/fundamentals_analyst.py @@ -1,15 +1,21 @@ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder import time import json -from tradingagents.agents.utils.agent_utils import get_fundamentals, get_balance_sheet, get_cashflow, get_income_statement, get_insider_transactions +from tradingagents.agents.utils.agent_utils import ( + build_instrument_context, + get_balance_sheet, + get_cashflow, + get_fundamentals, + get_income_statement, + get_insider_transactions, +) from tradingagents.dataflows.config import get_config def create_fundamentals_analyst(llm): def fundamentals_analyst_node(state): current_date = state["trade_date"] - ticker = state["company_of_interest"] - company_name = state["company_of_interest"] + instrument_context = build_instrument_context(state["company_of_interest"]) tools = [ get_fundamentals, @@ -19,7 +25,7 @@ def create_fundamentals_analyst(llm): ] system_message = ( - "You are a researcher tasked with analyzing fundamental information over the past week about a company. Please write a comprehensive report of the company's fundamental information such as financial documents, company profile, basic company financials, and company financial history to gain a full view of the company's fundamental information to inform traders. Make sure to include as much detail as possible. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions." + "You are a researcher tasked with analyzing fundamental information over the past week about a company. Please write a comprehensive report of the company's fundamental information such as financial documents, company profile, basic company financials, and company financial history to gain a full view of the company's fundamental information to inform traders. Make sure to include as much detail as possible. Provide specific, actionable insights with supporting evidence to help traders make informed decisions." + " Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read." + " Use the available tools: `get_fundamentals` for comprehensive company analysis, `get_balance_sheet`, `get_cashflow`, and `get_income_statement` for specific financial statements.", ) @@ -35,7 +41,7 @@ def create_fundamentals_analyst(llm): " If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable," " prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop." " You have access to the following tools: {tool_names}.\n{system_message}" - "For your reference, the current date is {current_date}. The company we want to look at is {ticker}", + "For your reference, the current date is {current_date}. {instrument_context}", ), MessagesPlaceholder(variable_name="messages"), ] @@ -44,7 +50,7 @@ def create_fundamentals_analyst(llm): prompt = prompt.partial(system_message=system_message) prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools])) prompt = prompt.partial(current_date=current_date) - prompt = prompt.partial(ticker=ticker) + prompt = prompt.partial(instrument_context=instrument_context) chain = prompt | llm.bind_tools(tools) diff --git a/tradingagents/agents/analysts/market_analyst.py b/tradingagents/agents/analysts/market_analyst.py index e175b94e..f5d17acd 100644 --- a/tradingagents/agents/analysts/market_analyst.py +++ b/tradingagents/agents/analysts/market_analyst.py @@ -1,7 +1,11 @@ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder import time import json -from tradingagents.agents.utils.agent_utils import get_stock_data, get_indicators +from tradingagents.agents.utils.agent_utils import ( + build_instrument_context, + get_indicators, + get_stock_data, +) from tradingagents.dataflows.config import get_config @@ -9,8 +13,7 @@ def create_market_analyst(llm): def market_analyst_node(state): current_date = state["trade_date"] - ticker = state["company_of_interest"] - company_name = state["company_of_interest"] + instrument_context = build_instrument_context(state["company_of_interest"]) tools = [ get_stock_data, @@ -42,7 +45,7 @@ Volatility Indicators: Volume-Based Indicators: - vwma: VWMA: A moving average weighted by volume. Usage: Confirm trends by integrating price action with volume data. Tips: Watch for skewed results from volume spikes; use in combination with other volume analyses. -- Select indicators that provide diverse and complementary information. Avoid redundancy (e.g., do not select both rsi and stochrsi). Also briefly explain why they are suitable for the given market context. When you tool call, please use the exact name of the indicators provided above as they are defined parameters, otherwise your call will fail. Please make sure to call get_stock_data first to retrieve the CSV that is needed to generate indicators. Then use get_indicators with the specific indicator names. Write a very detailed and nuanced report of the trends you observe. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions.""" +- Select indicators that provide diverse and complementary information. Avoid redundancy (e.g., do not select both rsi and stochrsi). Also briefly explain why they are suitable for the given market context. When you tool call, please use the exact name of the indicators provided above as they are defined parameters, otherwise your call will fail. Please make sure to call get_stock_data first to retrieve the CSV that is needed to generate indicators. Then use get_indicators with the specific indicator names. Write a very detailed and nuanced report of the trends you observe. Provide specific, actionable insights with supporting evidence to help traders make informed decisions.""" + """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read.""" ) @@ -57,7 +60,7 @@ Volume-Based Indicators: " If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable," " prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop." " You have access to the following tools: {tool_names}.\n{system_message}" - "For your reference, the current date is {current_date}. The company we want to look at is {ticker}", + "For your reference, the current date is {current_date}. {instrument_context}", ), MessagesPlaceholder(variable_name="messages"), ] @@ -66,7 +69,7 @@ Volume-Based Indicators: prompt = prompt.partial(system_message=system_message) prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools])) prompt = prompt.partial(current_date=current_date) - prompt = prompt.partial(ticker=ticker) + prompt = prompt.partial(instrument_context=instrument_context) chain = prompt | llm.bind_tools(tools) diff --git a/tradingagents/agents/analysts/news_analyst.py b/tradingagents/agents/analysts/news_analyst.py index 03b4fae4..3697c6f6 100644 --- a/tradingagents/agents/analysts/news_analyst.py +++ b/tradingagents/agents/analysts/news_analyst.py @@ -1,14 +1,18 @@ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder import time import json -from tradingagents.agents.utils.agent_utils import get_news, get_global_news +from tradingagents.agents.utils.agent_utils import ( + build_instrument_context, + get_global_news, + get_news, +) from tradingagents.dataflows.config import get_config def create_news_analyst(llm): def news_analyst_node(state): current_date = state["trade_date"] - ticker = state["company_of_interest"] + instrument_context = build_instrument_context(state["company_of_interest"]) tools = [ get_news, @@ -16,7 +20,7 @@ def create_news_analyst(llm): ] system_message = ( - "You are a news researcher tasked with analyzing recent news and trends over the past week. Please write a comprehensive report of the current state of the world that is relevant for trading and macroeconomics. Use the available tools: get_news(query, start_date, end_date) for company-specific or targeted news searches, and get_global_news(curr_date, look_back_days, limit) for broader macroeconomic news. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions." + "You are a news researcher tasked with analyzing recent news and trends over the past week. Please write a comprehensive report of the current state of the world that is relevant for trading and macroeconomics. Use the available tools: get_news(query, start_date, end_date) for company-specific or targeted news searches, and get_global_news(curr_date, look_back_days, limit) for broader macroeconomic news. Provide specific, actionable insights with supporting evidence to help traders make informed decisions." + """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read.""" ) @@ -31,7 +35,7 @@ def create_news_analyst(llm): " If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable," " prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop." " You have access to the following tools: {tool_names}.\n{system_message}" - "For your reference, the current date is {current_date}. We are looking at the company {ticker}", + "For your reference, the current date is {current_date}. {instrument_context}", ), MessagesPlaceholder(variable_name="messages"), ] @@ -40,7 +44,7 @@ def create_news_analyst(llm): prompt = prompt.partial(system_message=system_message) prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools])) prompt = prompt.partial(current_date=current_date) - prompt = prompt.partial(ticker=ticker) + prompt = prompt.partial(instrument_context=instrument_context) chain = prompt | llm.bind_tools(tools) result = chain.invoke(state["messages"]) diff --git a/tradingagents/agents/analysts/social_media_analyst.py b/tradingagents/agents/analysts/social_media_analyst.py index b25712d7..43df2258 100644 --- a/tradingagents/agents/analysts/social_media_analyst.py +++ b/tradingagents/agents/analysts/social_media_analyst.py @@ -1,23 +1,22 @@ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder import time import json -from tradingagents.agents.utils.agent_utils import get_news +from tradingagents.agents.utils.agent_utils import build_instrument_context, get_news from tradingagents.dataflows.config import get_config def create_social_media_analyst(llm): def social_media_analyst_node(state): current_date = state["trade_date"] - ticker = state["company_of_interest"] - company_name = state["company_of_interest"] + instrument_context = build_instrument_context(state["company_of_interest"]) tools = [ get_news, ] system_message = ( - "You are a social media and company specific news researcher/analyst tasked with analyzing social media posts, recent company news, and public sentiment for a specific company over the past week. You will be given a company's name your objective is to write a comprehensive long report detailing your analysis, insights, and implications for traders and investors on this company's current state after looking at social media and what people are saying about that company, analyzing sentiment data of what people feel each day about the company, and looking at recent company news. Use the get_news(query, start_date, end_date) tool to search for company-specific news and social media discussions. Try to look at all sources possible from social media to sentiment to news. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions." - + """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read.""", + "You are a social media and company specific news researcher/analyst tasked with analyzing social media posts, recent company news, and public sentiment for a specific company over the past week. You will be given a company's name your objective is to write a comprehensive long report detailing your analysis, insights, and implications for traders and investors on this company's current state after looking at social media and what people are saying about that company, analyzing sentiment data of what people feel each day about the company, and looking at recent company news. Use the get_news(query, start_date, end_date) tool to search for company-specific news and social media discussions. Try to look at all sources possible from social media to sentiment to news. Provide specific, actionable insights with supporting evidence to help traders make informed decisions." + + """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read.""" ) prompt = ChatPromptTemplate.from_messages( @@ -31,7 +30,7 @@ def create_social_media_analyst(llm): " If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable," " prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop." " You have access to the following tools: {tool_names}.\n{system_message}" - "For your reference, the current date is {current_date}. The current company we want to analyze is {ticker}", + "For your reference, the current date is {current_date}. {instrument_context}", ), MessagesPlaceholder(variable_name="messages"), ] @@ -40,7 +39,7 @@ def create_social_media_analyst(llm): prompt = prompt.partial(system_message=system_message) prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools])) prompt = prompt.partial(current_date=current_date) - prompt = prompt.partial(ticker=ticker) + prompt = prompt.partial(instrument_context=instrument_context) chain = prompt | llm.bind_tools(tools) diff --git a/tradingagents/agents/managers/portfolio_manager.py b/tradingagents/agents/managers/portfolio_manager.py new file mode 100644 index 00000000..acdf940b --- /dev/null +++ b/tradingagents/agents/managers/portfolio_manager.py @@ -0,0 +1,75 @@ +from tradingagents.agents.utils.agent_utils import build_instrument_context + + +def create_portfolio_manager(llm, memory): + def portfolio_manager_node(state) -> dict: + + instrument_context = build_instrument_context(state["company_of_interest"]) + + history = state["risk_debate_state"]["history"] + risk_debate_state = state["risk_debate_state"] + market_research_report = state["market_report"] + news_report = state["news_report"] + fundamentals_report = state["fundamentals_report"] + sentiment_report = state["sentiment_report"] + trader_plan = state["investment_plan"] + + curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}" + past_memories = memory.get_memories(curr_situation, n_matches=2) + + past_memory_str = "" + for i, rec in enumerate(past_memories, 1): + past_memory_str += rec["recommendation"] + "\n\n" + + prompt = f"""As the Portfolio Manager, synthesize the risk analysts' debate and deliver the final trading decision. + +{instrument_context} + +--- + +**Rating Scale** (use exactly one): +- **Buy**: Strong conviction to enter or add to position +- **Overweight**: Favorable outlook, gradually increase exposure +- **Hold**: Maintain current position, no action needed +- **Underweight**: Reduce exposure, take partial profits +- **Sell**: Exit position or avoid entry + +**Context:** +- Trader's proposed plan: **{trader_plan}** +- Lessons from past decisions: **{past_memory_str}** + +**Required Output Structure:** +1. **Rating**: State one of Buy / Overweight / Hold / Underweight / Sell. +2. **Executive Summary**: A concise action plan covering entry strategy, position sizing, key risk levels, and time horizon. +3. **Investment Thesis**: Detailed reasoning anchored in the analysts' debate and past reflections. + +--- + +**Risk Analysts Debate History:** +{history} + +--- + +Be decisive and ground every conclusion in specific evidence from the analysts.""" + + response = llm.invoke(prompt) + + new_risk_debate_state = { + "judge_decision": response.content, + "history": risk_debate_state["history"], + "aggressive_history": risk_debate_state["aggressive_history"], + "conservative_history": risk_debate_state["conservative_history"], + "neutral_history": risk_debate_state["neutral_history"], + "latest_speaker": "Judge", + "current_aggressive_response": risk_debate_state["current_aggressive_response"], + "current_conservative_response": risk_debate_state["current_conservative_response"], + "current_neutral_response": risk_debate_state["current_neutral_response"], + "count": risk_debate_state["count"], + } + + return { + "risk_debate_state": new_risk_debate_state, + "final_trade_decision": response.content, + } + + return portfolio_manager_node diff --git a/tradingagents/agents/managers/research_manager.py b/tradingagents/agents/managers/research_manager.py index c537fa2f..3ac4b150 100644 --- a/tradingagents/agents/managers/research_manager.py +++ b/tradingagents/agents/managers/research_manager.py @@ -1,9 +1,12 @@ import time import json +from tradingagents.agents.utils.agent_utils import build_instrument_context + def create_research_manager(llm, memory): def research_manager_node(state) -> dict: + instrument_context = build_instrument_context(state["company_of_interest"]) history = state["investment_debate_state"].get("history", "") market_research_report = state["market_report"] sentiment_report = state["sentiment_report"] @@ -33,6 +36,8 @@ Take into account your past mistakes on similar situations. Use these insights t Here are your past reflections on mistakes: \"{past_memory_str}\" +{instrument_context} + Here is the debate: Debate History: {history}""" diff --git a/tradingagents/agents/managers/risk_manager.py b/tradingagents/agents/managers/risk_manager.py deleted file mode 100644 index 9ed03e2d..00000000 --- a/tradingagents/agents/managers/risk_manager.py +++ /dev/null @@ -1,66 +0,0 @@ -import time -import json - - -def create_risk_manager(llm, memory): - def risk_manager_node(state) -> dict: - - company_name = state["company_of_interest"] - - history = state["risk_debate_state"]["history"] - risk_debate_state = state["risk_debate_state"] - market_research_report = state["market_report"] - news_report = state["news_report"] - fundamentals_report = state["news_report"] - sentiment_report = state["sentiment_report"] - trader_plan = state["investment_plan"] - - curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}" - past_memories = memory.get_memories(curr_situation, n_matches=2) - - past_memory_str = "" - for i, rec in enumerate(past_memories, 1): - past_memory_str += rec["recommendation"] + "\n\n" - - prompt = f"""As the Risk Management Judge and Debate Facilitator, your goal is to evaluate the debate between three risk analysts—Aggressive, Neutral, and Conservative—and determine the best course of action for the trader. Your decision must result in a clear recommendation: Buy, Sell, or Hold. Choose Hold only if strongly justified by specific arguments, not as a fallback when all sides seem valid. Strive for clarity and decisiveness. - -Guidelines for Decision-Making: -1. **Summarize Key Arguments**: Extract the strongest points from each analyst, focusing on relevance to the context. -2. **Provide Rationale**: Support your recommendation with direct quotes and counterarguments from the debate. -3. **Refine the Trader's Plan**: Start with the trader's original plan, **{trader_plan}**, and adjust it based on the analysts' insights. -4. **Learn from Past Mistakes**: Use lessons from **{past_memory_str}** to address prior misjudgments and improve the decision you are making now to make sure you don't make a wrong BUY/SELL/HOLD call that loses money. - -Deliverables: -- A clear and actionable recommendation: Buy, Sell, or Hold. -- Detailed reasoning anchored in the debate and past reflections. - ---- - -**Analysts Debate History:** -{history} - ---- - -Focus on actionable insights and continuous improvement. Build on past lessons, critically evaluate all perspectives, and ensure each decision advances better outcomes.""" - - response = llm.invoke(prompt) - - new_risk_debate_state = { - "judge_decision": response.content, - "history": risk_debate_state["history"], - "aggressive_history": risk_debate_state["aggressive_history"], - "conservative_history": risk_debate_state["conservative_history"], - "neutral_history": risk_debate_state["neutral_history"], - "latest_speaker": "Judge", - "current_aggressive_response": risk_debate_state["current_aggressive_response"], - "current_conservative_response": risk_debate_state["current_conservative_response"], - "current_neutral_response": risk_debate_state["current_neutral_response"], - "count": risk_debate_state["count"], - } - - return { - "risk_debate_state": new_risk_debate_state, - "final_trade_decision": response.content, - } - - return risk_manager_node diff --git a/tradingagents/agents/risk_mgmt/aggressive_debator.py b/tradingagents/agents/risk_mgmt/aggressive_debator.py index 3905d3d1..651114a7 100644 --- a/tradingagents/agents/risk_mgmt/aggressive_debator.py +++ b/tradingagents/agents/risk_mgmt/aggressive_debator.py @@ -28,7 +28,7 @@ Market Research Report: {market_research_report} Social Media Sentiment Report: {sentiment_report} Latest World Affairs Report: {news_report} Company Fundamentals Report: {fundamentals_report} -Here is the current conversation history: {history} Here are the last arguments from the conservative analyst: {current_conservative_response} Here are the last arguments from the neutral analyst: {current_neutral_response}. If there are no responses from the other viewpoints, do not hallucinate and just present your point. +Here is the current conversation history: {history} Here are the last arguments from the conservative analyst: {current_conservative_response} Here are the last arguments from the neutral analyst: {current_neutral_response}. If there are no responses from the other viewpoints yet, present your own argument based on the available data. Engage actively by addressing any specific concerns raised, refuting the weaknesses in their logic, and asserting the benefits of risk-taking to outpace market norms. Maintain a focus on debating and persuading, not just presenting data. Challenge each counterpoint to underscore why a high-risk approach is optimal. Output conversationally as if you are speaking without any special formatting.""" diff --git a/tradingagents/agents/risk_mgmt/conservative_debator.py b/tradingagents/agents/risk_mgmt/conservative_debator.py index 6b106b1b..7c8c0fd1 100644 --- a/tradingagents/agents/risk_mgmt/conservative_debator.py +++ b/tradingagents/agents/risk_mgmt/conservative_debator.py @@ -29,7 +29,7 @@ Market Research Report: {market_research_report} Social Media Sentiment Report: {sentiment_report} Latest World Affairs Report: {news_report} Company Fundamentals Report: {fundamentals_report} -Here is the current conversation history: {history} Here is the last response from the aggressive analyst: {current_aggressive_response} Here is the last response from the neutral analyst: {current_neutral_response}. If there are no responses from the other viewpoints, do not hallucinate and just present your point. +Here is the current conversation history: {history} Here is the last response from the aggressive analyst: {current_aggressive_response} Here is the last response from the neutral analyst: {current_neutral_response}. If there are no responses from the other viewpoints yet, present your own argument based on the available data. Engage by questioning their optimism and emphasizing the potential downsides they may have overlooked. Address each of their counterpoints to showcase why a conservative stance is ultimately the safest path for the firm's assets. Focus on debating and critiquing their arguments to demonstrate the strength of a low-risk strategy over their approaches. Output conversationally as if you are speaking without any special formatting.""" diff --git a/tradingagents/agents/risk_mgmt/neutral_debator.py b/tradingagents/agents/risk_mgmt/neutral_debator.py index f6aa888d..9ed490da 100644 --- a/tradingagents/agents/risk_mgmt/neutral_debator.py +++ b/tradingagents/agents/risk_mgmt/neutral_debator.py @@ -28,7 +28,7 @@ Market Research Report: {market_research_report} Social Media Sentiment Report: {sentiment_report} Latest World Affairs Report: {news_report} Company Fundamentals Report: {fundamentals_report} -Here is the current conversation history: {history} Here is the last response from the aggressive analyst: {current_aggressive_response} Here is the last response from the conservative analyst: {current_conservative_response}. If there are no responses from the other viewpoints, do not hallucinate and just present your point. +Here is the current conversation history: {history} Here is the last response from the aggressive analyst: {current_aggressive_response} Here is the last response from the conservative analyst: {current_conservative_response}. If there are no responses from the other viewpoints yet, present your own argument based on the available data. Engage actively by analyzing both sides critically, addressing weaknesses in the aggressive and conservative arguments to advocate for a more balanced approach. Challenge each of their points to illustrate why a moderate risk strategy might offer the best of both worlds, providing growth potential while safeguarding against extreme volatility. Focus on debating rather than simply presenting data, aiming to show that a balanced view can lead to the most reliable outcomes. Output conversationally as if you are speaking without any special formatting.""" diff --git a/tradingagents/agents/trader/trader.py b/tradingagents/agents/trader/trader.py index 1b05c35d..6298f239 100644 --- a/tradingagents/agents/trader/trader.py +++ b/tradingagents/agents/trader/trader.py @@ -2,10 +2,13 @@ import functools import time import json +from tradingagents.agents.utils.agent_utils import build_instrument_context + def create_trader(llm, memory): def trader_node(state, name): company_name = state["company_of_interest"] + instrument_context = build_instrument_context(company_name) investment_plan = state["investment_plan"] market_research_report = state["market_report"] sentiment_report = state["sentiment_report"] @@ -24,13 +27,13 @@ def create_trader(llm, memory): context = { "role": "user", - "content": f"Based on a comprehensive analysis by a team of analysts, here is an investment plan tailored for {company_name}. This plan incorporates insights from current technical market trends, macroeconomic indicators, and social media sentiment. Use this plan as a foundation for evaluating your next trading decision.\n\nProposed Investment Plan: {investment_plan}\n\nLeverage these insights to make an informed and strategic decision.", + "content": f"Based on a comprehensive analysis by a team of analysts, here is an investment plan tailored for {company_name}. {instrument_context} This plan incorporates insights from current technical market trends, macroeconomic indicators, and social media sentiment. Use this plan as a foundation for evaluating your next trading decision.\n\nProposed Investment Plan: {investment_plan}\n\nLeverage these insights to make an informed and strategic decision.", } messages = [ { "role": "system", - "content": f"""You are a trading agent analyzing market data to make investment decisions. Based on your analysis, provide a specific recommendation to buy, sell, or hold. End with a firm decision and always conclude your response with 'FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL**' to confirm your recommendation. Do not forget to utilize lessons from past decisions to learn from your mistakes. Here is some reflections from similar situatiosn you traded in and the lessons learned: {past_memory_str}""", + "content": f"""You are a trading agent analyzing market data to make investment decisions. Based on your analysis, provide a specific recommendation to buy, sell, or hold. End with a firm decision and always conclude your response with 'FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL**' to confirm your recommendation. Apply lessons from past decisions to strengthen your analysis. Here are reflections from similar situations you traded in and the lessons learned: {past_memory_str}""", }, context, ] diff --git a/tradingagents/agents/utils/agent_utils.py b/tradingagents/agents/utils/agent_utils.py index b329a3e9..e4abc4cd 100644 --- a/tradingagents/agents/utils/agent_utils.py +++ b/tradingagents/agents/utils/agent_utils.py @@ -19,6 +19,15 @@ from tradingagents.agents.utils.news_data_tools import ( get_global_news ) + +def build_instrument_context(ticker: str) -> str: + """Describe the exact instrument so agents preserve exchange-qualified tickers.""" + return ( + f"The instrument to analyze is `{ticker}`. " + "Use this exact ticker in every tool call, report, and recommendation, " + "preserving any exchange suffix (e.g. `.TO`, `.L`, `.HK`, `.T`)." + ) + def create_msg_delete(): def delete_messages(state): """Clear messages and add placeholder for Anthropic compatibility""" @@ -35,4 +44,4 @@ def create_msg_delete(): return delete_messages - \ No newline at end of file + diff --git a/tradingagents/agents/utils/technical_indicators_tools.py b/tradingagents/agents/utils/technical_indicators_tools.py index c6c08bca..77acf09c 100644 --- a/tradingagents/agents/utils/technical_indicators_tools.py +++ b/tradingagents/agents/utils/technical_indicators_tools.py @@ -10,14 +10,22 @@ def get_indicators( look_back_days: Annotated[int, "how many days to look back"] = 30, ) -> str: """ - Retrieve technical indicators for a given ticker symbol. + Retrieve a single technical indicator for a given ticker symbol. Uses the configured technical_indicators vendor. Args: symbol (str): Ticker symbol of the company, e.g. AAPL, TSM - indicator (str): Technical indicator to get the analysis and report of + indicator (str): A single technical indicator name, e.g. 'rsi', 'macd'. Call this tool once per indicator. curr_date (str): The current trading date you are trading on, YYYY-mm-dd look_back_days (int): How many days to look back, default is 30 Returns: str: A formatted dataframe containing the technical indicators for the specified ticker symbol and indicator. """ - return route_to_vendor("get_indicators", symbol, indicator, curr_date, look_back_days) \ No newline at end of file + # LLMs sometimes pass multiple indicators as a comma-separated string; + # split and process each individually. + indicators = [i.strip() for i in indicator.split(",") if i.strip()] + if len(indicators) > 1: + results = [] + for ind in indicators: + results.append(route_to_vendor("get_indicators", symbol, ind, curr_date, look_back_days)) + return "\n\n".join(results) + return route_to_vendor("get_indicators", symbol, indicator.strip(), curr_date, look_back_days) \ No newline at end of file diff --git a/tradingagents/dataflows/stockstats_utils.py b/tradingagents/dataflows/stockstats_utils.py index b31935b7..47d5460a 100644 --- a/tradingagents/dataflows/stockstats_utils.py +++ b/tradingagents/dataflows/stockstats_utils.py @@ -1,10 +1,48 @@ +import time +import logging + import pandas as pd import yfinance as yf +from yfinance.exceptions import YFRateLimitError from stockstats import wrap from typing import Annotated import os from .config import get_config +logger = logging.getLogger(__name__) + + +def yf_retry(func, max_retries=3, base_delay=2.0): + """Execute a yfinance call with exponential backoff on rate limits. + + yfinance raises YFRateLimitError on HTTP 429 responses but does not + retry them internally. This wrapper adds retry logic specifically + for rate limits. Other exceptions propagate immediately. + """ + for attempt in range(max_retries + 1): + try: + return func() + except YFRateLimitError: + if attempt < max_retries: + delay = base_delay * (2 ** attempt) + logger.warning(f"Yahoo Finance rate limited, retrying in {delay:.0f}s (attempt {attempt + 1}/{max_retries})") + time.sleep(delay) + else: + raise + + +def _clean_dataframe(data: pd.DataFrame) -> pd.DataFrame: + """Normalize a stock DataFrame for stockstats: parse dates, drop invalid rows, fill price gaps.""" + data["Date"] = pd.to_datetime(data["Date"], errors="coerce") + data = data.dropna(subset=["Date"]) + + price_cols = [c for c in ["Open", "High", "Low", "Close", "Volume"] if c in data.columns] + data[price_cols] = data[price_cols].apply(pd.to_numeric, errors="coerce") + data = data.dropna(subset=["Close"]) + data[price_cols] = data[price_cols].ffill().bfill() + + return data + class StockstatsUtils: @staticmethod @@ -36,20 +74,20 @@ class StockstatsUtils: ) if os.path.exists(data_file): - data = pd.read_csv(data_file) - data["Date"] = pd.to_datetime(data["Date"]) + data = pd.read_csv(data_file, on_bad_lines="skip") else: - data = yf.download( + data = yf_retry(lambda: yf.download( symbol, start=start_date_str, end=end_date_str, multi_level_index=False, progress=False, auto_adjust=True, - ) + )) data = data.reset_index() data.to_csv(data_file, index=False) + data = _clean_dataframe(data) df = wrap(data) df["Date"] = df["Date"].dt.strftime("%Y-%m-%d") curr_date_str = curr_date_dt.strftime("%Y-%m-%d") diff --git a/tradingagents/dataflows/y_finance.py b/tradingagents/dataflows/y_finance.py index ffe5be61..63b34237 100644 --- a/tradingagents/dataflows/y_finance.py +++ b/tradingagents/dataflows/y_finance.py @@ -5,7 +5,7 @@ import time from yfinance.exceptions import YFRateLimitError import yfinance as yf import os -from .stockstats_utils import StockstatsUtils +from .stockstats_utils import StockstatsUtils, _clean_dataframe, yf_retry import pandas as pd # for polygon data processing from polygon import RESTClient # for polygon client (need to pip install polygon-api-client) @@ -31,6 +31,14 @@ def get_YFin_data_online( # rename? or keep, add fallback inside raise ValueError("date format must be YYYY-MM-DD") time.sleep(1.0) # prevent concurrency + datetime.strptime(start_date, "%Y-%m-%d") + datetime.strptime(end_date, "%Y-%m-%d") + + # Create ticker object + ticker = yf.Ticker(symbol.upper()) + + # Fetch historical data for the specified date range + data = yf_retry(lambda: ticker.history(start=start_date, end=end_date)) # first try yfinance for attempt in range(max_retries): @@ -261,45 +269,45 @@ def _get_stock_stats_bulk( os.path.join( config.get("data_cache_dir", "data"), f"{symbol}-YFin-data-2015-01-01-2025-03-25.csv", - ) + ), + on_bad_lines="skip", ) - df = wrap(data) except FileNotFoundError: raise Exception("Stockstats fail: Yahoo Finance data not fetched yet!") else: # Online data fetching with caching today_date = pd.Timestamp.today() curr_date_dt = pd.to_datetime(curr_date) - + end_date = today_date start_date = today_date - pd.DateOffset(years=15) start_date_str = start_date.strftime("%Y-%m-%d") end_date_str = end_date.strftime("%Y-%m-%d") - + os.makedirs(config["data_cache_dir"], exist_ok=True) - + data_file = os.path.join( config["data_cache_dir"], f"{symbol}-YFin-data-{start_date_str}-{end_date_str}.csv", ) - + if os.path.exists(data_file): - data = pd.read_csv(data_file) - data["Date"] = pd.to_datetime(data["Date"]) + data = pd.read_csv(data_file, on_bad_lines="skip") else: - data = yf.download( + data = yf_retry(lambda: yf.download( symbol, start=start_date_str, end=end_date_str, multi_level_index=False, progress=False, auto_adjust=True, - ) + )) data = data.reset_index() data.to_csv(data_file, index=False) - - df = wrap(data) - df["Date"] = df["Date"].dt.strftime("%Y-%m-%d") + + data = _clean_dataframe(data) + df = wrap(data) + df["Date"] = df["Date"].dt.strftime("%Y-%m-%d") # Calculate the indicator for all rows at once df[indicator] # This triggers stockstats to calculate the indicator @@ -352,7 +360,7 @@ def get_fundamentals( """Get company fundamentals overview from yfinance.""" try: ticker_obj = yf.Ticker(ticker.upper()) - info = ticker_obj.info + info = yf_retry(lambda: ticker_obj.info) if not info: return f"No fundamentals data found for symbol '{ticker}'" @@ -410,11 +418,11 @@ def get_balance_sheet( """Get balance sheet data from yfinance.""" try: ticker_obj = yf.Ticker(ticker.upper()) - + if freq.lower() == "quarterly": - data = ticker_obj.quarterly_balance_sheet + data = yf_retry(lambda: ticker_obj.quarterly_balance_sheet) else: - data = ticker_obj.balance_sheet + data = yf_retry(lambda: ticker_obj.balance_sheet) if data.empty: return f"No balance sheet data found for symbol '{ticker}'" @@ -440,11 +448,11 @@ def get_cashflow( """Get cash flow data from yfinance.""" try: ticker_obj = yf.Ticker(ticker.upper()) - + if freq.lower() == "quarterly": - data = ticker_obj.quarterly_cashflow + data = yf_retry(lambda: ticker_obj.quarterly_cashflow) else: - data = ticker_obj.cashflow + data = yf_retry(lambda: ticker_obj.cashflow) if data.empty: return f"No cash flow data found for symbol '{ticker}'" @@ -470,11 +478,11 @@ def get_income_statement( """Get income statement data from yfinance.""" try: ticker_obj = yf.Ticker(ticker.upper()) - + if freq.lower() == "quarterly": - data = ticker_obj.quarterly_income_stmt + data = yf_retry(lambda: ticker_obj.quarterly_income_stmt) else: - data = ticker_obj.income_stmt + data = yf_retry(lambda: ticker_obj.income_stmt) if data.empty: return f"No income statement data found for symbol '{ticker}'" @@ -498,7 +506,7 @@ def get_insider_transactions( """Get insider transactions data from yfinance.""" try: ticker_obj = yf.Ticker(ticker.upper()) - data = ticker_obj.insider_transactions + data = yf_retry(lambda: ticker_obj.insider_transactions) if data is None or data.empty: return f"No insider transactions data found for symbol '{ticker}'" diff --git a/tradingagents/default_config.py b/tradingagents/default_config.py index ecf0dc29..898e1e1e 100644 --- a/tradingagents/default_config.py +++ b/tradingagents/default_config.py @@ -15,6 +15,7 @@ DEFAULT_CONFIG = { # Provider-specific thinking configuration "google_thinking_level": None, # "high", "minimal", etc. "openai_reasoning_effort": None, # "medium", "high", "low" + "anthropic_effort": None, # "high", "medium", "low" # Debate and discussion settings "max_debate_rounds": 1, "max_risk_discuss_rounds": 1, diff --git a/tradingagents/graph/conditional_logic.py b/tradingagents/graph/conditional_logic.py index 7b1b1f90..48371793 100644 --- a/tradingagents/graph/conditional_logic.py +++ b/tradingagents/graph/conditional_logic.py @@ -59,7 +59,7 @@ class ConditionalLogic: if ( state["risk_debate_state"]["count"] >= 3 * self.max_risk_discuss_rounds ): # 3 rounds of back-and-forth between 3 agents - return "Risk Judge" + return "Portfolio Manager" if state["risk_debate_state"]["latest_speaker"].startswith("Aggressive"): return "Conservative Analyst" if state["risk_debate_state"]["latest_speaker"].startswith("Conservative"): diff --git a/tradingagents/graph/propagation.py b/tradingagents/graph/propagation.py index 7aba5258..0fd10c0c 100644 --- a/tradingagents/graph/propagation.py +++ b/tradingagents/graph/propagation.py @@ -24,14 +24,26 @@ class Propagator: "company_of_interest": company_name, "trade_date": str(trade_date), "investment_debate_state": InvestDebateState( - {"history": "", "current_response": "", "count": 0} + { + "bull_history": "", + "bear_history": "", + "history": "", + "current_response": "", + "judge_decision": "", + "count": 0, + } ), "risk_debate_state": RiskDebateState( { + "aggressive_history": "", + "conservative_history": "", + "neutral_history": "", "history": "", + "latest_speaker": "", "current_aggressive_response": "", "current_conservative_response": "", "current_neutral_response": "", + "judge_decision": "", "count": 0, } ), diff --git a/tradingagents/graph/reflection.py b/tradingagents/graph/reflection.py index 33303231..85438595 100644 --- a/tradingagents/graph/reflection.py +++ b/tradingagents/graph/reflection.py @@ -110,12 +110,12 @@ Adhere strictly to these instructions, and ensure your output is detailed, accur ) invest_judge_memory.add_situations([(situation, result)]) - def reflect_risk_manager(self, current_state, returns_losses, risk_manager_memory): - """Reflect on risk manager's decision and update memory.""" + def reflect_portfolio_manager(self, current_state, returns_losses, portfolio_manager_memory): + """Reflect on portfolio manager's decision and update memory.""" situation = self._extract_current_situation(current_state) judge_decision = current_state["risk_debate_state"]["judge_decision"] result = self._reflect_on_component( - "RISK JUDGE", judge_decision, situation, returns_losses + "PORTFOLIO MANAGER", judge_decision, situation, returns_losses ) - risk_manager_memory.add_situations([(situation, result)]) + portfolio_manager_memory.add_situations([(situation, result)]) diff --git a/tradingagents/graph/setup.py b/tradingagents/graph/setup.py index 772efe7f..e0771c65 100644 --- a/tradingagents/graph/setup.py +++ b/tradingagents/graph/setup.py @@ -23,7 +23,7 @@ class GraphSetup: bear_memory, trader_memory, invest_judge_memory, - risk_manager_memory, + portfolio_manager_memory, conditional_logic: ConditionalLogic, ): """Initialize with required components.""" @@ -34,7 +34,7 @@ class GraphSetup: self.bear_memory = bear_memory self.trader_memory = trader_memory self.invest_judge_memory = invest_judge_memory - self.risk_manager_memory = risk_manager_memory + self.portfolio_manager_memory = portfolio_manager_memory self.conditional_logic = conditional_logic def setup_graph( @@ -101,8 +101,8 @@ class GraphSetup: aggressive_analyst = create_aggressive_debator(self.quick_thinking_llm) neutral_analyst = create_neutral_debator(self.quick_thinking_llm) conservative_analyst = create_conservative_debator(self.quick_thinking_llm) - risk_manager_node = create_risk_manager( - self.deep_thinking_llm, self.risk_manager_memory + portfolio_manager_node = create_portfolio_manager( + self.deep_thinking_llm, self.portfolio_manager_memory ) # Create workflow @@ -124,7 +124,7 @@ class GraphSetup: workflow.add_node("Aggressive Analyst", aggressive_analyst) workflow.add_node("Neutral Analyst", neutral_analyst) workflow.add_node("Conservative Analyst", conservative_analyst) - workflow.add_node("Risk Judge", risk_manager_node) + workflow.add_node("Portfolio Manager", portfolio_manager_node) # Define edges # Start with the first analyst @@ -176,7 +176,7 @@ class GraphSetup: self.conditional_logic.should_continue_risk_analysis, { "Conservative Analyst": "Conservative Analyst", - "Risk Judge": "Risk Judge", + "Portfolio Manager": "Portfolio Manager", }, ) workflow.add_conditional_edges( @@ -184,7 +184,7 @@ class GraphSetup: self.conditional_logic.should_continue_risk_analysis, { "Neutral Analyst": "Neutral Analyst", - "Risk Judge": "Risk Judge", + "Portfolio Manager": "Portfolio Manager", }, ) workflow.add_conditional_edges( @@ -192,11 +192,11 @@ class GraphSetup: self.conditional_logic.should_continue_risk_analysis, { "Aggressive Analyst": "Aggressive Analyst", - "Risk Judge": "Risk Judge", + "Portfolio Manager": "Portfolio Manager", }, ) - workflow.add_edge("Risk Judge", END) + workflow.add_edge("Portfolio Manager", END) # Compile and return return workflow.compile() diff --git a/tradingagents/graph/signal_processing.py b/tradingagents/graph/signal_processing.py index 903e8529..f96c1efa 100644 --- a/tradingagents/graph/signal_processing.py +++ b/tradingagents/graph/signal_processing.py @@ -18,12 +18,14 @@ class SignalProcessor: full_signal: Complete trading signal text Returns: - Extracted decision (BUY, SELL, or HOLD) + Extracted rating (BUY, OVERWEIGHT, HOLD, UNDERWEIGHT, or SELL) """ messages = [ ( "system", - "You are an efficient assistant designed to analyze paragraphs or financial reports provided by a group of analysts. Your task is to extract the investment decision: SELL, BUY, or HOLD. Provide only the extracted decision (SELL, BUY, or HOLD) as your output, without adding any additional text or information.", + "You are an efficient assistant that extracts the trading decision from analyst reports. " + "Extract the rating as exactly one of: BUY, OVERWEIGHT, HOLD, UNDERWEIGHT, SELL. " + "Output only the single rating word, nothing else.", ), ("human", full_signal), ] diff --git a/tradingagents/graph/trading_graph.py b/tradingagents/graph/trading_graph.py index 44ecca0c..c8cd7492 100644 --- a/tradingagents/graph/trading_graph.py +++ b/tradingagents/graph/trading_graph.py @@ -99,13 +99,16 @@ class TradingAgentsGraph: self.bear_memory = FinancialSituationMemory("bear_memory", self.config) self.trader_memory = FinancialSituationMemory("trader_memory", self.config) self.invest_judge_memory = FinancialSituationMemory("invest_judge_memory", self.config) - self.risk_manager_memory = FinancialSituationMemory("risk_manager_memory", self.config) + self.portfolio_manager_memory = FinancialSituationMemory("portfolio_manager_memory", self.config) # Create tool nodes self.tool_nodes = self._create_tool_nodes() # Initialize components - self.conditional_logic = ConditionalLogic() + self.conditional_logic = ConditionalLogic( + max_debate_rounds=self.config["max_debate_rounds"], + max_risk_discuss_rounds=self.config["max_risk_discuss_rounds"], + ) self.graph_setup = GraphSetup( self.quick_thinking_llm, self.deep_thinking_llm, @@ -114,7 +117,7 @@ class TradingAgentsGraph: self.bear_memory, self.trader_memory, self.invest_judge_memory, - self.risk_manager_memory, + self.portfolio_manager_memory, self.conditional_logic, ) @@ -145,6 +148,11 @@ class TradingAgentsGraph: if reasoning_effort: kwargs["reasoning_effort"] = reasoning_effort + elif provider == "anthropic": + effort = self.config.get("anthropic_effort") + if effort: + kwargs["effort"] = effort + return kwargs def _create_tool_nodes(self) -> Dict[str, ToolNode]: @@ -257,6 +265,7 @@ class TradingAgentsGraph: with open( f"eval_results/{self.ticker}/TradingAgentsStrategy_logs/full_states_log_{trade_date}.json", "w", + encoding="utf-8", ) as f: json.dump(self.log_states_dict, f, indent=4) @@ -274,8 +283,8 @@ class TradingAgentsGraph: self.reflector.reflect_invest_judge( self.curr_state, returns_losses, self.invest_judge_memory ) - self.reflector.reflect_risk_manager( - self.curr_state, returns_losses, self.risk_manager_memory + self.reflector.reflect_portfolio_manager( + self.curr_state, returns_losses, self.portfolio_manager_memory ) def process_signal(self, full_signal): diff --git a/tradingagents/llm_clients/anthropic_client.py b/tradingagents/llm_clients/anthropic_client.py index e2f1abba..2c1e5a67 100644 --- a/tradingagents/llm_clients/anthropic_client.py +++ b/tradingagents/llm_clients/anthropic_client.py @@ -2,9 +2,26 @@ from typing import Any, Optional from langchain_anthropic import ChatAnthropic -from .base_client import BaseLLMClient +from .base_client import BaseLLMClient, normalize_content from .validators import validate_model +_PASSTHROUGH_KWARGS = ( + "timeout", "max_retries", "api_key", "max_tokens", + "callbacks", "http_client", "http_async_client", "effort", +) + + +class NormalizedChatAnthropic(ChatAnthropic): + """ChatAnthropic with normalized content output. + + Claude models with extended thinking or tool use return content as a + list of typed blocks. This normalizes to string for consistent + downstream handling. + """ + + def invoke(self, input, config=None, **kwargs): + return normalize_content(super().invoke(input, config, **kwargs)) + class AnthropicClient(BaseLLMClient): """Client for Anthropic Claude models.""" @@ -16,11 +33,11 @@ class AnthropicClient(BaseLLMClient): """Return configured ChatAnthropic instance.""" llm_kwargs = {"model": self.model} - for key in ("timeout", "max_retries", "api_key", "max_tokens", "callbacks"): + for key in _PASSTHROUGH_KWARGS: if key in self.kwargs: llm_kwargs[key] = self.kwargs[key] - return ChatAnthropic(**llm_kwargs) + return NormalizedChatAnthropic(**llm_kwargs) def validate_model(self) -> bool: """Validate model for Anthropic.""" diff --git a/tradingagents/llm_clients/base_client.py b/tradingagents/llm_clients/base_client.py index 43845575..9c3dd17c 100644 --- a/tradingagents/llm_clients/base_client.py +++ b/tradingagents/llm_clients/base_client.py @@ -2,6 +2,25 @@ from abc import ABC, abstractmethod from typing import Any, Optional +def normalize_content(response): + """Normalize LLM response content to a plain string. + + Multiple providers (OpenAI Responses API, Google Gemini 3) return content + as a list of typed blocks, e.g. [{'type': 'reasoning', ...}, {'type': 'text', 'text': '...'}]. + Downstream agents expect response.content to be a string. This extracts + and joins the text blocks, discarding reasoning/metadata blocks. + """ + content = response.content + if isinstance(content, list): + texts = [ + item.get("text", "") if isinstance(item, dict) and item.get("type") == "text" + else item if isinstance(item, str) else "" + for item in content + ] + response.content = "\n".join(t for t in texts if t) + return response + + class BaseLLMClient(ABC): """Abstract base class for LLM clients.""" diff --git a/tradingagents/llm_clients/factory.py b/tradingagents/llm_clients/factory.py index 028c88a2..93c2a7d3 100644 --- a/tradingagents/llm_clients/factory.py +++ b/tradingagents/llm_clients/factory.py @@ -19,6 +19,12 @@ def create_llm_client( model: Model name/identifier base_url: Optional base URL for API endpoint **kwargs: Additional provider-specific arguments + - http_client: Custom httpx.Client for SSL proxy or certificate customization + - http_async_client: Custom httpx.AsyncClient for async operations + - timeout: Request timeout in seconds + - max_retries: Maximum retry attempts + - api_key: API key for the provider + - callbacks: LangChain callbacks Returns: Configured BaseLLMClient instance diff --git a/tradingagents/llm_clients/google_client.py b/tradingagents/llm_clients/google_client.py index a1bd386b..7401df0e 100644 --- a/tradingagents/llm_clients/google_client.py +++ b/tradingagents/llm_clients/google_client.py @@ -2,30 +2,19 @@ from typing import Any, Optional from langchain_google_genai import ChatGoogleGenerativeAI -from .base_client import BaseLLMClient +from .base_client import BaseLLMClient, normalize_content from .validators import validate_model class NormalizedChatGoogleGenerativeAI(ChatGoogleGenerativeAI): """ChatGoogleGenerativeAI with normalized content output. - Gemini 3 models return content as list: [{'type': 'text', 'text': '...'}] + Gemini 3 models return content as list of typed blocks. This normalizes to string for consistent downstream handling. """ - def _normalize_content(self, response): - content = response.content - if isinstance(content, list): - texts = [ - item.get("text", "") if isinstance(item, dict) and item.get("type") == "text" - else item if isinstance(item, str) else "" - for item in content - ] - response.content = "\n".join(t for t in texts if t) - return response - def invoke(self, input, config=None, **kwargs): - return self._normalize_content(super().invoke(input, config, **kwargs)) + return normalize_content(super().invoke(input, config, **kwargs)) class GoogleClient(BaseLLMClient): @@ -38,7 +27,7 @@ class GoogleClient(BaseLLMClient): """Return configured ChatGoogleGenerativeAI instance.""" llm_kwargs = {"model": self.model} - for key in ("timeout", "max_retries", "google_api_key", "callbacks"): + for key in ("timeout", "max_retries", "google_api_key", "callbacks", "http_client", "http_async_client"): if key in self.kwargs: llm_kwargs[key] = self.kwargs[key] diff --git a/tradingagents/llm_clients/openai_client.py b/tradingagents/llm_clients/openai_client.py index 7011895f..fd9b4e33 100644 --- a/tradingagents/llm_clients/openai_client.py +++ b/tradingagents/llm_clients/openai_client.py @@ -3,33 +3,43 @@ from typing import Any, Optional from langchain_openai import ChatOpenAI -from .base_client import BaseLLMClient +from .base_client import BaseLLMClient, normalize_content from .validators import validate_model -class UnifiedChatOpenAI(ChatOpenAI): - """ChatOpenAI subclass that strips incompatible params for certain models.""" +class NormalizedChatOpenAI(ChatOpenAI): + """ChatOpenAI with normalized content output. - def __init__(self, **kwargs): - model = kwargs.get("model", "") - if self._is_reasoning_model(model): - kwargs.pop("temperature", None) - kwargs.pop("top_p", None) - super().__init__(**kwargs) + The Responses API returns content as a list of typed blocks + (reasoning, text, etc.). This normalizes to string for consistent + downstream handling. + """ - @staticmethod - def _is_reasoning_model(model: str) -> bool: - """Check if model is a reasoning model that doesn't support temperature.""" - model_lower = model.lower() - return ( - model_lower.startswith("o1") - or model_lower.startswith("o3") - or "gpt-5" in model_lower - ) + def invoke(self, input, config=None, **kwargs): + return normalize_content(super().invoke(input, config, **kwargs)) + +# Kwargs forwarded from user config to ChatOpenAI +_PASSTHROUGH_KWARGS = ( + "timeout", "max_retries", "reasoning_effort", + "api_key", "callbacks", "http_client", "http_async_client", +) + +# Provider base URLs and API key env vars +_PROVIDER_CONFIG = { + "xai": ("https://api.x.ai/v1", "XAI_API_KEY"), + "openrouter": ("https://openrouter.ai/api/v1", "OPENROUTER_API_KEY"), + "ollama": ("http://localhost:11434/v1", None), +} class OpenAIClient(BaseLLMClient): - """Client for OpenAI, Ollama, OpenRouter, and xAI providers.""" + """Client for OpenAI, Ollama, OpenRouter, and xAI providers. + + For native OpenAI models, uses the Responses API (/v1/responses) which + supports reasoning_effort with function tools across all model families + (GPT-4.1, GPT-5). Third-party compatible providers (xAI, OpenRouter, + Ollama) use standard Chat Completions. + """ def __init__( self, @@ -45,27 +55,30 @@ class OpenAIClient(BaseLLMClient): """Return configured ChatOpenAI instance.""" llm_kwargs = {"model": self.model} - if self.provider == "xai": - llm_kwargs["base_url"] = "https://api.x.ai/v1" - api_key = os.environ.get("XAI_API_KEY") - if api_key: - llm_kwargs["api_key"] = api_key - elif self.provider == "openrouter": - llm_kwargs["base_url"] = "https://openrouter.ai/api/v1" - api_key = os.environ.get("OPENROUTER_API_KEY") - if api_key: - llm_kwargs["api_key"] = api_key - elif self.provider == "ollama": - llm_kwargs["base_url"] = "http://localhost:11434/v1" - llm_kwargs["api_key"] = "ollama" # Ollama doesn't require auth + # Provider-specific base URL and auth + if self.provider in _PROVIDER_CONFIG: + base_url, api_key_env = _PROVIDER_CONFIG[self.provider] + llm_kwargs["base_url"] = base_url + if api_key_env: + api_key = os.environ.get(api_key_env) + if api_key: + llm_kwargs["api_key"] = api_key + else: + llm_kwargs["api_key"] = "ollama" elif self.base_url: llm_kwargs["base_url"] = self.base_url - for key in ("timeout", "max_retries", "reasoning_effort", "api_key", "callbacks"): + # Forward user-provided kwargs + for key in _PASSTHROUGH_KWARGS: if key in self.kwargs: llm_kwargs[key] = self.kwargs[key] - return UnifiedChatOpenAI(**llm_kwargs) + # Native OpenAI: use Responses API for consistent behavior across + # all model families. Third-party providers use Chat Completions. + if self.provider == "openai": + llm_kwargs["use_responses_api"] = True + + return NormalizedChatOpenAI(**llm_kwargs) def validate_model(self) -> bool: """Validate model for the provider.""" diff --git a/tradingagents/llm_clients/validators.py b/tradingagents/llm_clients/validators.py index 3c0f2290..1e2388b3 100644 --- a/tradingagents/llm_clients/validators.py +++ b/tradingagents/llm_clients/validators.py @@ -6,59 +6,44 @@ Let LLM providers use their own defaults for unspecified params. VALID_MODELS = { "openai": [ - # GPT-5 series (2025) + # GPT-5 series + "gpt-5.4-pro", + "gpt-5.4", "gpt-5.2", "gpt-5.1", "gpt-5", "gpt-5-mini", "gpt-5-nano", - # GPT-4.1 series (2025) + # GPT-4.1 series "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", - # o-series reasoning models - "o4-mini", - "o3", - "o3-mini", - "o1", - "o1-preview", - # GPT-4o series (legacy but still supported) - "gpt-4o", - "gpt-4o-mini", ], "anthropic": [ - # Claude 4.5 series (2025) + # Claude 4.6 series (latest) + "claude-opus-4-6", + "claude-sonnet-4-6", + # Claude 4.5 series "claude-opus-4-5", "claude-sonnet-4-5", "claude-haiku-4-5", - # Claude 4.x series - "claude-opus-4-1-20250805", - "claude-sonnet-4-20250514", - # Claude 3.7 series - "claude-3-7-sonnet-20250219", - # Claude 3.5 series (legacy) - "claude-3-5-haiku-20241022", - "claude-3-5-sonnet-20241022", ], "google": [ + # Gemini 3.1 series (preview) + "gemini-3.1-pro-preview", + "gemini-3.1-flash-lite-preview", # Gemini 3 series (preview) - "gemini-3-pro-preview", "gemini-3-flash-preview", # Gemini 2.5 series "gemini-2.5-pro", "gemini-2.5-flash", "gemini-2.5-flash-lite", - # Gemini 2.0 series - "gemini-2.0-flash", - "gemini-2.0-flash-lite", ], "xai": [ # Grok 4.1 series - "grok-4-1-fast", "grok-4-1-fast-reasoning", "grok-4-1-fast-non-reasoning", # Grok 4 series - "grok-4", "grok-4-0709", "grok-4-fast-reasoning", "grok-4-fast-non-reasoning",