Merge pull request #90 from aguzererler/copilot/check-upstream-changes

docs: upstream PR triage and evaluation (2026-03-23)
This commit is contained in:
ahmet guzererler 2026-03-23 17:48:24 +01:00 committed by GitHub
commit 5bdd42f818
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
34 changed files with 775 additions and 157 deletions

View File

@ -28,7 +28,7 @@
# TradingAgents: Multi-Agents LLM Financial Trading Framework # TradingAgents: Multi-Agents LLM Financial Trading Framework
## News ## News
- [2026-03] **TradingAgents v0.2.1** released with GPT-5.4, Gemini 3.1, Claude 4.6 model coverage and improved system stability. - [2026-03] **TradingAgents v0.2.2** released with GPT-5.4/Gemini 3.1/Claude 4.6 model coverage, five-tier rating scale, OpenAI Responses API, Anthropic effort control, and cross-platform stability.
- [2026-02] **TradingAgents v0.2.0** released with multi-provider LLM support (GPT-5.x, Gemini 3.x, Claude 4.x, Grok 4.x) and improved system architecture. - [2026-02] **TradingAgents v0.2.0** released with multi-provider LLM support (GPT-5.x, Gemini 3.x, Claude 4.x, Grok 4.x) and improved system architecture.
- [2026-01] **Trading-R1** [Technical Report](https://arxiv.org/abs/2509.11420) released, with [Terminal](https://github.com/TauricResearch/Trading-R1) expected to land soon. - [2026-01] **Trading-R1** [Technical Report](https://arxiv.org/abs/2509.11420) released, with [Terminal](https://github.com/TauricResearch/Trading-R1) expected to land soon.
@ -112,9 +112,9 @@ conda create -n tradingagents python=3.13
conda activate tradingagents conda activate tradingagents
``` ```
Install dependencies: Install the package and its dependencies:
```bash ```bash
pip install -r requirements.txt pip install .
``` ```
### Required APIs ### Required APIs
@ -139,11 +139,12 @@ cp .env.example .env
### CLI Usage ### CLI Usage
You can also try out the CLI directly by running: Launch the interactive CLI:
```bash ```bash
python -m cli.main tradingagents # installed command
python -m cli.main # alternative: run directly from source
``` ```
You will see a screen where you can select your desired tickers, date, LLMs, research depth, etc. You will see a screen where you can select your desired tickers, analysis date, LLM provider, research depth, and more.
<p align="center"> <p align="center">
<img src="assets/cli/cli_init.png" width="100%" style="display: inline-block; margin: 0 2%;"> <img src="assets/cli/cli_init.png" width="100%" style="display: inline-block; margin: 0 2%;">

View File

@ -492,7 +492,7 @@ def _ask_provider_thinking_config(provider: str):
def get_user_selections(): def get_user_selections():
"""Get all user selections before starting the analysis display.""" """Get all user selections before starting the analysis display."""
# Display ASCII art welcome message # Display ASCII art welcome message
with open("./cli/static/welcome.txt", "r", encoding="utf-8") as f: with open(Path(__file__).parent / "static" / "welcome.txt", "r", encoding="utf-8") as f:
welcome_ascii = f.read() welcome_ascii = f.read()
# Create welcome box content # Create welcome box content
@ -531,7 +531,9 @@ def get_user_selections():
# Step 1: Ticker symbol # Step 1: Ticker symbol
console.print( console.print(
create_question_box( create_question_box(
"Step 1: Ticker Symbol", "Enter the ticker symbol to analyze", "SPY" "Step 1: Ticker Symbol",
"Enter the exact ticker symbol to analyze, including exchange suffix when needed (examples: SPY, CNC.TO, 7203.T, 0700.HK)",
"SPY",
) )
) )
selected_ticker = get_ticker() selected_ticker = get_ticker()
@ -862,9 +864,11 @@ ANALYST_REPORT_MAP = {
def update_analyst_statuses(message_buffer, chunk): def update_analyst_statuses(message_buffer, chunk):
"""Update all analyst statuses based on current report state. """Update analyst statuses based on accumulated report state.
Logic: Logic:
- Store new report content from the current chunk if present
- Check accumulated report_sections (not just current chunk) for status
- Analysts with reports = completed - Analysts with reports = completed
- First analyst without report = in_progress - First analyst without report = in_progress
- Remaining analysts without reports = pending - Remaining analysts without reports = pending
@ -879,11 +883,16 @@ def update_analyst_statuses(message_buffer, chunk):
agent_name = ANALYST_AGENT_NAMES[analyst_key] agent_name = ANALYST_AGENT_NAMES[analyst_key]
report_key = ANALYST_REPORT_MAP[analyst_key] report_key = ANALYST_REPORT_MAP[analyst_key]
has_report = bool(chunk.get(report_key))
# Capture new report content from current chunk
if chunk.get(report_key):
message_buffer.update_report_section(report_key, chunk[report_key])
# Determine status from accumulated sections, not just current chunk
has_report = bool(message_buffer.report_sections.get(report_key))
if has_report: if has_report:
message_buffer.update_agent_status(agent_name, "completed") message_buffer.update_agent_status(agent_name, "completed")
message_buffer.update_report_section(report_key, chunk[report_key])
elif not found_active: elif not found_active:
message_buffer.update_agent_status(agent_name, "in_progress") message_buffer.update_agent_status(agent_name, "in_progress")
found_active = True found_active = True
@ -1103,9 +1112,9 @@ def run_analysis():
content = obj.report_sections[section_name] content = obj.report_sections[section_name]
if content: if content:
file_name = f"{section_name}.md" file_name = f"{section_name}.md"
text = "\n".join(str(item) for item in content) if isinstance(content, list) else content
with open(report_dir / file_name, "w", encoding="utf-8") as f: with open(report_dir / file_name, "w", encoding="utf-8") as f:
f.write(content) f.write(text)
return wrapper return wrapper
message_buffer.add_message = save_message_decorator(message_buffer, "add_message") message_buffer.add_message = save_message_decorator(message_buffer, "add_message")

View File

@ -9,6 +9,9 @@ from cli.models import AnalystType
console = Console() console = Console()
TICKER_INPUT_EXAMPLES = "Examples: SPY, CNC.TO, 7203.T, 0700.HK"
def _fetch_ollama_models(base_url: str = "http://localhost:11434") -> list[tuple[str, str]]: def _fetch_ollama_models(base_url: str = "http://localhost:11434") -> list[tuple[str, str]]:
"""Fetch available models from a running Ollama instance.""" """Fetch available models from a running Ollama instance."""
try: try:
@ -31,7 +34,7 @@ ANALYST_ORDER = [
def get_ticker() -> str: def get_ticker() -> str:
"""Prompt the user to enter a ticker symbol.""" """Prompt the user to enter a ticker symbol."""
ticker = questionary.text( ticker = questionary.text(
"Enter the ticker symbol to analyze:", f"Enter the exact ticker symbol to analyze ({TICKER_INPUT_EXAMPLES}):",
validate=lambda x: len(x.strip()) > 0 or "Please enter a valid ticker symbol.", validate=lambda x: len(x.strip()) > 0 or "Please enter a valid ticker symbol.",
style=questionary.Style( style=questionary.Style(
[ [
@ -45,6 +48,11 @@ def get_ticker() -> str:
console.print("\n[red]No ticker symbol provided. Exiting...[/red]") console.print("\n[red]No ticker symbol provided. Exiting...[/red]")
exit(1) exit(1)
return normalize_ticker_symbol(ticker)
def normalize_ticker_symbol(ticker: str) -> str:
"""Normalize ticker input while preserving exchange suffixes."""
return ticker.strip().upper() return ticker.strip().upper()
@ -410,6 +418,26 @@ def ask_openai_reasoning_effort() -> str:
).ask() ).ask()
def ask_anthropic_effort() -> str | None:
"""Ask for Anthropic effort level.
Controls token usage and response thoroughness on Claude 4.5+ and 4.6 models.
"""
return questionary.select(
"Select Effort Level:",
choices=[
questionary.Choice("High (recommended)", "high"),
questionary.Choice("Medium (balanced)", "medium"),
questionary.Choice("Low (faster, cheaper)", "low"),
],
style=questionary.Style([
("selected", "fg:cyan noinherit"),
("highlighted", "fg:cyan noinherit"),
("pointer", "fg:cyan noinherit"),
]),
).ask()
def ask_gemini_thinking_config() -> str | None: def ask_gemini_thinking_config() -> str | None:
"""Ask for Gemini thinking configuration. """Ask for Gemini thinking configuration.

470
docs/upstream_pr_review.md Normal file
View File

@ -0,0 +1,470 @@
# Upstream PR Review — TauricResearch/TradingAgents
**Review Date**: 2026-03-23
**Upstream Repository**: [TauricResearch/TradingAgents](https://github.com/TauricResearch/TradingAgents)
**Latest Upstream Release**: v0.2.2 (2026-03-22)
**Fork**: aguzererler/TradingAgents
---
## Summary
This document reviews all 60+ open PRs on the upstream TauricResearch/TradingAgents repository, evaluates their relevance to our fork, and provides recommendations for which to consider merging or cherry-picking.
Our fork has significant custom work (AgentOS observability layer, scanner pipeline, portfolio management, 725+ unit tests) that the upstream doesn't have. The upstream has been actively evolving with v0.2.2 bringing a five-tier rating framework, OpenAI Responses API support, and various bug fixes.
### Priority Legend
| Priority | Meaning |
|----------|---------|
| 🔴 **HIGH** | Strongly recommended — fixes bugs or adds capabilities we need |
| 🟡 **MEDIUM** | Worth considering — useful features or improvements |
| 🟢 **LOW** | Nice to have but not urgent |
| ⚪ **SKIP** | Not relevant, too risky, or already addressed in our fork |
---
## Already-Merged Upstream Changes (v0.2.0 → v0.2.2)
These are changes already merged into upstream `main` that our fork should sync with. Review these first before looking at open PRs.
| Commit | Date | Description | Priority | Notes |
|--------|------|-------------|----------|-------|
| `589b351` | 2026-03-22 | TradingAgents v0.2.2 | 🔴 HIGH | Version bump + release |
| `6c9c9ce` | 2026-03-22 | fix: set process-level UTF-8 default | 🔴 HIGH | Cross-platform fix, prevents Windows encoding crashes |
| `b8b2825` | 2026-03-22 | refactor: standardize portfolio manager, five-tier rating scale | 🟡 MEDIUM | New rating scale (Buy/Overweight/Hold/Underweight/Sell) — significant prompt change |
| `318adda` | 2026-03-22 | refactor: five-tier rating scale and streamlined agent prompts | 🟡 MEDIUM | Paired with above |
| `7cca9c9` | 2026-03-22 | fix: add exponential backoff retry for yfinance rate limits | 🔴 HIGH | Directly relevant — we already handle this but their approach may be cleaner |
| `bd9b1e5` | 2026-03-22 | feat: add Anthropic effort level support for Claude models | 🟡 MEDIUM | Useful if using Claude |
| `7775500` | 2026-03-22 | chore: consolidate install, fix CLI portability | 🟡 MEDIUM | Build/install improvements |
| `0b13145` | 2026-03-22 | fix: handle list content when writing report sections | 🔴 HIGH | Bug fix for Gemini list-of-dicts responses |
| `3ff28f3` | 2026-03-22 | fix: use OpenAI Responses API for native models | 🔴 HIGH | Required for GPT-5+ models |
| `08bfe70` | 2026-03-21 | fix: preserve exchange-qualified tickers | 🟡 MEDIUM | International market support (CNC.TO, 7203.T) |
| `64f0767` | 2026-03-15 | fix: add http_client support for SSL cert customization | 🟡 MEDIUM | Corporate proxy environments |
| `551fd7f` | 2026-03-15 | chore: update model lists, bump to v0.2.1 | 🟢 LOW | Model list updates |
| `b0f9d18` | 2026-03-15 | fix: harden stock data parsing against malformed CSV/NaN | 🔴 HIGH | Data integrity fix |
| `9cc283a` | 2026-03-15 | fix: add missing console import to cli/utils.py | 🟢 LOW | Minor CLI fix |
| `fe9c8d5` | 2026-03-15 | fix: handle comma-separated indicators | 🟡 MEDIUM | Bug fix |
| `eec6ca4` | 2026-03-15 | fix: initialize all debate state fields | 🔴 HIGH | Prevents crashes in debate cycle |
| `3642f59` | 2026-03-15 | fix: add explicit UTF-8 encoding to all file open() calls | 🔴 HIGH | Windows compatibility |
| `907bc80` | 2026-03-15 | fix: pass debate round config to ConditionalLogic | 🔴 HIGH | Config was being ignored |
| `8a60662` | 2026-03-15 | chore: remove unused chainlit dependency (CVE-2026-22218) | 🔴 HIGH | Security fix |
| `35856ff` | 2026-02-09 | fix: risk manager fundamental report data source | 🔴 HIGH | Bug fix in risk manager |
| `5fec171` | 2026-02-07 | chore: add build-system config, update to v0.2.0 | 🟡 MEDIUM | Build system |
| `50c82a2` | 2026-02-07 | chore: consolidate deps to pyproject.toml | 🟡 MEDIUM | Cleanup |
| `66a02b3` | 2026-02-05 | security: patch LangGrinch vulnerability in langchain-core | 🔴 HIGH | Security patch |
| `e9470b6` | 2026-02-04 | TradingAgents v0.2.0: Multi-Provider LLM Support | 🔴 HIGH | Major release with multi-provider support |
**Recommendation**: Sync with upstream `main` to get all bug fixes, security patches, and the v0.2.2 release. The biggest changes are the five-tier rating scale (may conflict with our custom prompts), OpenAI Responses API support, and UTF-8 fixes.
---
## Open PRs — HIGH Priority (🔴 Recommended to Review)
### PR #427 — Respect Anthropic proxy base URL
- **Date**: 2026-03-22
- **Author**: lu-zhengda
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/427
- **Size**: +9 lines, 1 file changed
- **Mergeable**: ✅ Clean
- **Assessment**: Tiny, focused fix. Maps Anthropic client's generic `base_url` to LangChain's `anthropic_api_url`. Avoids overriding `ANTHROPIC_BASE_URL` when using default endpoint. Critical for anyone running behind a proxy.
- **Conflicts with our fork**: None expected — touches only `anthropic_client.py`
- **Recommendation**: 🔴 Review and cherry-pick. Small, safe, and useful.
### PR #389 — Warn on unknown models in LLM clients
- **Date**: 2026-03-17
- **Author**: HuYellow
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/389
- **Size**: +133/-8 lines, 6 files
- **Assessment**: Adds warning when using unknown models instead of silently proceeding or crashing. Includes unit tests. Defensive coding improvement.
- **Conflicts with our fork**: Low — touches LLM client files which we've extended
- **Recommendation**: 🔴 Review and adapt. Good defensive practice we should have.
### PR #399 — Optional social sentiment tool for social analyst
- **Date**: 2026-03-19
- **Author**: alexander-schneider
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/399
- **Size**: +326/-17 lines, 6 files
- **Assessment**: Adds optional `get_social_sentiment` tool using Adanos API for Reddit/X/Polymarket sentiment. Only activates when `ADANOS_API_KEY` is set. Addresses a real gap — our social analyst only has access to news data. Includes tests and graceful opt-in.
- **Conflicts with our fork**: Medium — touches `social_media_analyst.py` and `trading_graph.py` which we've modified
- **Recommendation**: 🔴 Review and consider adapting. The design (opt-in via env var, no changes to default flow) is clean.
---
## Open PRs — MEDIUM Priority (🟡 Worth Considering)
### PR #408 — Allow custom OpenRouter model IDs in CLI
- **Date**: 2026-03-21
- **Author**: CadeYu
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/408
- **Size**: +96/-13 lines, 2 files
- **Mergeable**: ✅ Clean
- **Assessment**: Adds "Custom OpenRouter model ID" option to CLI. Unblocks users with paid OpenRouter accounts from being limited to 2 free models. Includes unit tests.
- **Conflicts with our fork**: Low — CLI changes
- **Recommendation**: 🟡 Consider if OpenRouter is used. Nice UX improvement.
### PR #425 — Add popular paid models for OpenRouter
- **Date**: 2026-03-22
- **Author**: ctonneslan
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/425
- **Size**: +25 lines, 1 file
- **Mergeable**: ✅ Clean
- **Assessment**: Adds Claude Sonnet 4, Gemini 2.5, GPT-5 variants to OpenRouter dropdown. Plus custom model ID input. Overlaps with PR #408.
- **Recommendation**: 🟡 Pick one of #408 or #425 — they address the same issue (#337).
### PR #416 — Amazon Bedrock provider
- **Date**: 2026-03-22
- **Author**: cloudbeer
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/416
- **Size**: +63/-1 lines, 4 files
- **Mergeable**: ✅ Clean
- **Assessment**: Adds Amazon Bedrock as LLM provider via boto3 credential chain. Supports IAM Role, AKSK, and Bedrock API Key auth. Clean implementation, tested with multiple models.
- **Conflicts with our fork**: Low — adds new client file
- **Recommendation**: 🟡 Review if AWS deployment is planned. Good addition for enterprise use cases.
### PR #430 — Groq and Kilo Gateway LLM providers
- **Date**: 2026-03-23
- **Author**: deathvadeR-afk
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/430
- **Size**: +98/-32 lines, 7 files
- **Mergeable**: ✅ Clean
- **Assessment**: Adds Groq (fast inference) and Kilo Gateway providers. Both are OpenAI-compatible. Includes Windows UTF-8 fix.
- **Conflicts with our fork**: LowMedium — touches factory and CLI files
- **Recommendation**: 🟡 Groq is popular for fast inference. Consider for latency-sensitive workflows.
### PR #355 — Azure Foundry support
- **Date**: 2026-03-02
- **Author**: yulinzhang96
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/355
- **Size**: +179/-27 lines, 8 files
- **Assessment**: Adds Azure Foundry as LLM provider. Supports any model in Azure's catalog. Includes Windows file I/O UTF-8 fixes.
- **Conflicts with our fork**: Medium — touches default_config.py and factory
- **Recommendation**: 🟡 Review if Azure deployment is planned.
### PR #359 — Optional factor rule analyst with manual rule injection
- **Date**: 2026-03-06
- **Author**: 69049ed6x
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/359
- **Size**: +959/-76 lines, 24 files, 40 commits
- **Assessment**: Adds an optional "Factor Rule Analyst" that loads user-defined factor rules from JSON and injects them into the bull/bear/research/trader/risk pipeline. Interesting concept for semi-systematic workflows. However: large PR (40 commits), touches many core files, and modifies our heavily-customized graph.
- **Conflicts with our fork**: HIGH — touches setup.py, trading_graph.py, agents, propagation
- **Recommendation**: 🟡 Interesting concept but too invasive to merge directly. Consider extracting the idea and implementing it ourselves in a way that fits our architecture.
### PR #347 — Fix yfinance rate limit/session issues + Windows encoding
- **Date**: 2026-02-14
- **Author**: Hewei603
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/347
- **Size**: +101/-41 lines, 2 files
- **Assessment**: Adds exponential backoff retry for yfinance, removes manual session params. However, upstream already merged a similar fix (`7cca9c9` on 2026-03-22), making this PR partially redundant.
- **Conflicts with our fork**: Medium — we have our own rate limiting
- **Recommendation**: 🟡 Review but likely superseded by upstream's own merge. Check if any unique approach here is better.
### PR #362 — Testing infrastructure and utility modules
- **Date**: 2026-03-07
- **Author**: newwan
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/362
- **Size**: +1750 lines, 20 files
- **Assessment**: Adds pytest, ruff, mypy config. Adds config validation, structured logging, TypedDict definitions. We already have extensive test infrastructure (725+ tests), but the config validation and logging modules could be useful.
- **Conflicts with our fork**: HIGH — we have our own test setup
- **Recommendation**: 🟡 Cherry-pick specific modules (config validation, TypedDict types) rather than the whole PR.
### PR #401 — Multi-LLM routing (stage & role-based)
- **Date**: 2026-03-19
- **Author**: mzamini92
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/401
- **Size**: +410/-214 lines, 5 files
- **Assessment**: Adds flexible LLM routing per pipeline stage/role. We already have 3-tier LLM routing (quick/mid/deep think), but this is more granular (per analyst, per researcher, etc.). Conceptually aligned with our architecture.
- **Conflicts with our fork**: HIGH — modifies default_config.py, trading_graph.py, setup.py
- **Recommendation**: 🟡 Good concept but our 3-tier system + per-tier provider overrides already covers most use cases. Consider for future if fine-grained routing is needed.
### PR #324 — Multi-provider support, retry logic, dynamic model fetching
- **Date**: 2026-01-17
- **Author**: MUmarJ
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/324
- **Size**: +1476/-71 lines, 19 files
- **Mergeable**: ❌ Dirty (conflicts)
- **Assessment**: Large PR with content normalization for Gemini, retry logic with backoff, OpenAI Responses API support, config validation, and dynamic model fetching. Much of this has been incrementally merged into upstream's main already.
- **Conflicts with our fork**: HIGH — large, touches many files, and partially superseded
- **Recommendation**: 🟡 Check if any unique pieces (e.g., the normalize_content utility) haven't been incorporated upstream yet. Likely mostly superseded by v0.2.2.
---
## Open PRs — LOW Priority (🟢 Nice to Have)
### PR #432 — Polymarket prediction market analysis module
- **Date**: 2026-03-23
- **Author**: InjayTseng
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/432
- **Size**: +7618/-31 lines, 53 files
- **Mergeable**: ✅ Clean
- **Assessment**: Adds an entire parallel module for Polymarket binary prediction market analysis. 4 specialized analysts, YES/NO debate, Kelly Criterion sizing. Impressive scope but large and unrelated to stock analysis. No external dependencies added. Analysis-only (no order placement).
- **Recommendation**: 🟢 Interesting expansion but out of scope for our current focus. Monitor — if upstream merges it, we can pick it up later.
### PR #394 — Multi-market support (Vietnam stock market)
- **Date**: 2026-03-18
- **Author**: VONUHAU
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/394
- **Size**: +1705/-51 lines, 15 files
- **Mergeable**: ❌ Dirty (conflicts)
- **Assessment**: Adds pluggable `MarketRegistry` + `MarketProvider` abstraction, with Vietnam as first non-US market. Good architecture for multi-market but has conflicts and adds `vnstock` dependency.
- **Recommendation**: 🟢 The `MarketRegistry` abstraction pattern is worth noting for future multi-market expansion. Not urgent now.
### PR #372 — Swing trading pipeline with auto stock screening
- **Date**: 2026-03-11
- **Author**: hyejwon
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/372
- **Size**: +5278/-1217 lines, 49 files
- **Assessment**: Removes debate/risk stages, simplifies to Screening→Analysts→Trader. Adds Korean market support. Very different philosophy from our approach. Destructive changes (removes core features).
- **Recommendation**: 🟢 Interesting alternative architecture but incompatible with our fork's approach. Skip for merging, but review screening pipeline ideas.
### PR #339 — Cross-Asset Correlation Engine
- **Date**: 2026-02-07
- **Author**: Insider77Circle
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/339
- **Size**: +1583 lines, 6 files
- **Assessment**: Adds correlation analysis module (Pearson, Spearman, DCC-GARCH, wavelet coherence, regime detection). Adds scipy, scikit-learn, PyWavelets, networkx dependencies. Standalone module.
- **Recommendation**: 🟢 Potentially useful for market analysis but adds heavy dependencies. Consider as a standalone research tool.
### PR #419 — Chinese translation for README
- **Date**: 2026-03-22
- **Author**: JasonYeYuhe
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/419
- **Assessment**: Adds README.zh.md with Chinese translation.
- **Recommendation**: 🟢 Skip — documentation only, not relevant to our fork.
### PR #410 — llama.cpp local LLM support
- **Date**: 2026-03-21
- **Author**: TPTBusiness
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/410
- **Assessment**: Adds 'llamacpp' provider for running fully offline with local llama-server.
- **Recommendation**: 🟢 We already support Ollama for local inference. Consider if llama.cpp direct support adds value.
### PR #407 — Z.AI glm-5 provider support
- **Date**: 2026-03-21
- **Author**: tanyudii
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/407
- **Assessment**: Adds Z.AI as LLM provider for stock research.
- **Recommendation**: 🟢 Niche provider. Skip unless needed.
### PR #395 — MiniMax as LLM provider
- **Date**: 2026-03-18
- **Author**: octo-patch
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/395
- **Assessment**: Adds MiniMax (OpenAI-compatible API) with M2.7 models.
- **Recommendation**: 🟢 Niche provider. Skip unless needed.
### PR #344 — Streamlit UI for Hugging Face Spaces
- **Date**: 2026-02-11
- **Author**: rajeshthangaraj1
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/344
- **Assessment**: Adds Streamlit web UI. We have AgentOS (React+FastAPI) which is far more capable.
- **Recommendation**: 🟢 Skip — we have a superior UI solution.
### PR #340 — Top 10 OpenRouter models in CLI
- **Date**: 2026-02-08
- **Author**: treasuraid
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/340
- **Assessment**: Adds top OpenRouter models to selection. Overlaps with #408 and #425.
- **Recommendation**: 🟢 Superseded by #408/#425.
---
## Open PRs — SKIP (⚪ Not Recommended)
### PR #435 — CLI change to correct files reading
- **Date**: 2026-03-23
- **Author**: BranJ2106
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/435
- **Size**: +4851/-16 lines, 31 files
- **Assessment**: Description in Spanish, unclear scope, 4851 lines added across 31 files for what's described as file reading and report generation fixes. Disproportionate change size for described fix.
- **Recommendation**: ⚪ Skip — unclear quality and scope.
### PR #431 — "Kim"
- **Date**: 2026-03-23
- **Author**: Kim-254-de
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/431
- **Assessment**: Empty body, title is just "Kim". No description of changes.
- **Recommendation**: ⚪ Skip — no description, likely test PR.
### PR #376 — ChatGPT OAuth login (codex_oauth)
- **Date**: 2026-03-14
- **Author**: CaiJichang212
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/376
- **Assessment**: Adds OAuth login via ChatGPT Plus/Pro to bypass API keys. Security concern — relies on browser OAuth flow scraping.
- **Recommendation**: ⚪ Skip — security concern, unofficial API access pattern.
### PR #374 — Feature/phase2 execution
- **Date**: 2026-03-11
- **Author**: BrunoNatalicio
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/374
- **Assessment**: Empty body, no description.
- **Recommendation**: ⚪ Skip — no description.
### PR #373 — Dex data layer
- **Date**: 2026-03-11
- **Author**: BrunoNatalicio
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/373
- **Assessment**: Empty body, no description. Likely DeFi/DEX data layer.
- **Recommendation**: ⚪ Skip — no description, unclear scope.
### PR #371 — Simplified Chinese option for reports
- **Date**: 2026-03-10
- **Author**: divingken
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/371
- **Assessment**: Adds Chinese language option for reports. Localization.
- **Recommendation**: ⚪ Skip — not relevant to our fork's needs.
### PR #370 — Fix SSL certificate error and ASCII bug
- **Date**: 2026-03-09
- **Author**: iamhenryhuang
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/370
- **Assessment**: SSL and ASCII fixes. Likely superseded by upstream's UTF-8 and SSL fixes already merged.
- **Recommendation**: ⚪ Skip — likely superseded.
### PR #368 — Add vLLM provider
- **Date**: 2026-03-09
- **Author**: flutist
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/368
- **Assessment**: Adds vLLM provider.
- **Recommendation**: ⚪ Skip — niche, and there's also a duplicate PR #358 (draft).
### PR #367 — Traditional Chinese output support
- **Date**: 2026-03-09
- **Author**: Jack0630
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/367
- **Assessment**: Adds Traditional Chinese instructions to all agent prompts.
- **Recommendation**: ⚪ Skip — localization, not relevant.
### PR #366 — System R external risk validation example
- **Date**: 2026-03-08
- **Author**: ashimnandi-trika
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/366
- **Assessment**: Adds example showing integration with System R risk validation API. Example file only.
- **Recommendation**: ⚪ Skip — example/integration with external product.
### PR #363 — Add Ollama cloud provider
- **Date**: 2026-03-07
- **Author**: simodev25
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/363
- **Assessment**: Adds Ollama Cloud support. We already have Ollama support configured per our architecture.
- **Recommendation**: ⚪ Skip — we already handle Ollama.
### PR #360 — Add DeepSeek support
- **Date**: 2026-03-06
- **Author**: null0NULL123
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/360
- **Assessment**: Adds DeepSeek provider. DeepSeek is OpenAI-compatible and works via OpenRouter in our setup.
- **Recommendation**: ⚪ Skip — already works via OpenRouter or direct OpenAI-compatible config.
### PR #358 — Add vLLM support (Draft)
- **Date**: 2026-03-06
- **Author**: flutist
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/358
- **Assessment**: Draft PR, duplicate of #368.
- **Recommendation**: ⚪ Skip — draft, duplicate.
### PR #333 — Azure OpenAI support
- **Date**: 2026-02-04
- **Author**: kazuma-424
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/333
- **Assessment**: Adds Azure OpenAI support. Overlaps with #355 (Azure Foundry).
- **Recommendation**: ⚪ Skip — #355 (Azure Foundry) is more comprehensive.
### PR #329 — Settings UI, Pipeline Visualization & Documentation (Nifty50)
- **Date**: 2026-01-31
- **Author**: hemangjoshi37a
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/329
- **Assessment**: Nifty50-specific dashboard. We have AgentOS.
- **Recommendation**: ⚪ Skip — we have a superior UI.
### PR #328 — New UI
- **Date**: 2026-01-30
- **Author**: qq173681019
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/328
- **Assessment**: Empty body. Alternative UI implementation.
- **Recommendation**: ⚪ Skip — no description, we have AgentOS.
### PR #320 — Physical URL verification for Fact Checker
- **Date**: 2026-01-04
- **Author**: jiwoomap
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/320
- **Assessment**: Adds URL verification to fact checker, checks if cited URLs actually exist.
- **Recommendation**: ⚪ Skip for now — interesting concept but adds network calls during analysis.
### PR #302 — ACE (Agentic Context Engineer)
- **Date**: 2025-12-25
- **Author**: EduardGilM
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/302
- **Assessment**: Integrates ACE framework for autonomous agent improvement through reflection. External dependency.
- **Recommendation**: ⚪ Skip — adds external framework dependency.
### PR #286 — Fix OpenRouter embeddings
- **Date**: 2025-11-20
- **Author**: 00make
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/286
- **Assessment**: OpenRouter embeddings fix. Empty body.
- **Recommendation**: ⚪ Skip — no description, old PR.
### PR #282 — Turns off upload files, addresses conflicts
- **Date**: 2025-11-19
- **Author**: jackspace
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/282
- **Assessment**: Cleanup PR related to #281. Empty body.
- **Recommendation**: ⚪ Skip — related to #281 which we're also skipping.
### PR #281 — Production-Ready Platform (Multi-LLM, Paper Trading, Web UI, Docker)
- **Date**: 2025-11-19
- **Author**: jackspace
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/281
- **Size**: +40032/-45 lines, 113 files
- **Mergeable**: ❌ Dirty (conflicts)
- **Assessment**: Massive PR (40k+ lines) that adds LLM factory, Alpaca paper trading, Chainlit web UI, Docker. Very ambitious but: enormous scope, 5 months old, has merge conflicts, and our fork already has AgentOS + portfolio management.
- **Recommendation**: ⚪ Skip — too large, too old, and our fork has surpassed many of these features.
### PR #278 — Add openai_compatible mode
- **Date**: 2025-11-15
- **Author**: grandgen
- **URL**: https://github.com/TauricResearch/TradingAgents/pull/278
- **Assessment**: Adds generic OpenAI-compatible provider mode. Our fork already supports this pattern.
- **Recommendation**: ⚪ Skip — already addressed.
### Older PRs (#146, #145, #144, #135, #134, #128, #125, #120, #117, #116, #115, #110, #105, #103, #101, #94, #61, #56, #48)
- **Date range**: 2025-06 to 2025-07
- **Assessment**: These PRs are 89 months old, target an older codebase, and many of their features have been addressed either by upstream's v0.2.x releases or by our fork's custom work.
- **Recommendation**: ⚪ Skip all — stale, likely superseded.
---
## Action Items — Recommended Review Order
### Phase 1: Sync with upstream main (🔴 Critical)
1. **Merge/rebase upstream v0.2.2 changes** into our fork. Key changes:
- Security: LangGrinch vulnerability patch, chainlit CVE removal
- Bug fixes: debate state init, debate round config, UTF-8 encoding, stock data parsing
- Features: OpenAI Responses API, five-tier rating scale, yfinance retry
- **⚠️ Potential conflicts**: Our custom prompts, graph setup, and CLI may need manual resolution
### Phase 2: Cherry-pick high-value PRs (🔴 High Priority)
2. **PR #427** — Anthropic proxy base URL fix (tiny, safe)
3. **PR #389** — Unknown model warnings (small, defensive)
4. **PR #399** — Social sentiment tool (medium, fills a real gap)
### Phase 3: Evaluate medium-priority PRs (🟡)
5. **PR #408 or #425** — OpenRouter model expansion (pick one)
6. **PR #416** — Bedrock provider (if AWS is planned)
7. **PR #430** — Groq provider (if fast inference is needed)
### Phase 4: Future consideration (🟢)
8. **PR #432** — Polymarket module (monitor if upstream merges)
9. **PR #394** — Multi-market architecture (note the MarketRegistry pattern)
10. **PR #339** — Cross-asset correlation (standalone research tool)
---
## Notes for Next Review
- The upstream is actively maintained by @Yijia-Xiao with frequent merges
- Our fork has diverged significantly with AgentOS, scanner pipeline, and 725+ unit tests
- The five-tier rating scale in v0.2.2 is a significant prompt change — test thoroughly before adopting
- Many community PRs are LLM provider additions — most are OpenAI-compatible and work via OpenRouter anyway
- Watch for v0.2.3+ releases — the upstream is shipping fast

View File

@ -4,14 +4,13 @@ build-backend = "setuptools.build_meta"
[project] [project]
name = "tradingagents" name = "tradingagents"
version = "0.2.1" version = "0.2.2"
description = "TradingAgents: Multi-Agents LLM Financial Trading Framework" description = "TradingAgents: Multi-Agents LLM Financial Trading Framework"
readme = "README.md" readme = "README.md"
requires-python = ">=3.10" requires-python = ">=3.10"
dependencies = [ dependencies = [
"langchain-core>=0.3.81", "langchain-core>=0.3.81",
"backtrader>=1.9.78.123", "backtrader>=1.9.78.123",
"chainlit>=2.5.5",
"langchain-anthropic>=0.3.15", "langchain-anthropic>=0.3.15",
"langchain-experimental>=0.3.4", "langchain-experimental>=0.3.4",
"langchain-google-genai>=2.1.5", "langchain-google-genai>=2.1.5",
@ -62,3 +61,6 @@ markers = [
"paid_tier: marks tests that require a paid Finnhub subscription (free tier returns HTTP 403)", "paid_tier: marks tests that require a paid Finnhub subscription (free tier returns HTTP 403)",
] ]
addopts = "--ignore=tests/integration --ignore=tests/e2e --disable-socket --allow-unix-socket -x -q" addopts = "--ignore=tests/integration --ignore=tests/e2e --disable-socket --allow-unix-socket -x -q"
[tool.setuptools.package-data]
cli = ["static/*"]

View File

@ -1,21 +1 @@
typing-extensions .
langchain-core
langchain-openai
langchain-experimental
pandas
yfinance
stockstats
langgraph
rank-bm25
setuptools
backtrader
parsel
requests
tqdm
pytz
redis
rich
typer
questionary
langchain_anthropic
langchain-google-genai

View File

@ -0,0 +1,18 @@
import unittest
from cli.utils import normalize_ticker_symbol
from tradingagents.agents.utils.agent_utils import build_instrument_context
class TickerSymbolHandlingTests(unittest.TestCase):
def test_normalize_ticker_symbol_preserves_exchange_suffix(self):
self.assertEqual(normalize_ticker_symbol(" cnc.to "), "CNC.TO")
def test_build_instrument_context_mentions_exact_symbol(self):
context = build_instrument_context("7203.T")
self.assertIn("7203.T", context)
self.assertIn("exchange suffix", context)
if __name__ == "__main__":
unittest.main()

View File

@ -97,7 +97,7 @@ class TestConditionalLogicWiring:
) )
} }
result = cl.should_continue_risk_analysis(state) result = cl.should_continue_risk_analysis(state)
assert result == "Risk Judge" assert result == "Portfolio Manager"
class TestNewModulesImportable: class TestNewModulesImportable:

View File

@ -142,10 +142,10 @@ class TestRiskDebateRounds2:
assert result == "Aggressive Analyst" assert result == "Aggressive Analyst"
def test_threshold_at_6(self): def test_threshold_at_6(self):
# count=6 == 3*2=6 → route to Risk Judge # count=6 == 3*2=6 → route to Portfolio Manager
state = _make_risk_state(count=6, latest_speaker="Aggressive") state = _make_risk_state(count=6, latest_speaker="Aggressive")
result = self.cl.should_continue_risk_analysis(state) result = self.cl.should_continue_risk_analysis(state)
assert result == "Risk Judge" assert result == "Portfolio Manager"
def test_continues_at_count_5(self): def test_continues_at_count_5(self):
state = _make_risk_state(count=5, latest_speaker="Aggressive") state = _make_risk_state(count=5, latest_speaker="Aggressive")

View File

@ -0,0 +1,2 @@
import os
os.environ.setdefault("PYTHONUTF8", "1")

View File

@ -15,7 +15,7 @@ from .risk_mgmt.conservative_debator import create_conservative_debator
from .risk_mgmt.neutral_debator import create_neutral_debator from .risk_mgmt.neutral_debator import create_neutral_debator
from .managers.research_manager import create_research_manager from .managers.research_manager import create_research_manager
from .managers.risk_manager import create_risk_manager from .managers.portfolio_manager import create_portfolio_manager
from .trader.trader import create_trader from .trader.trader import create_trader
@ -33,7 +33,7 @@ __all__ = [
"create_neutral_debator", "create_neutral_debator",
"create_news_analyst", "create_news_analyst",
"create_aggressive_debator", "create_aggressive_debator",
"create_risk_manager", "create_portfolio_manager",
"create_conservative_debator", "create_conservative_debator",
"create_social_media_analyst", "create_social_media_analyst",
"create_trader", "create_trader",

View File

@ -17,8 +17,7 @@ from tradingagents.dataflows.config import get_config
def create_fundamentals_analyst(llm): def create_fundamentals_analyst(llm):
def fundamentals_analyst_node(state): def fundamentals_analyst_node(state):
current_date = state["trade_date"] current_date = state["trade_date"]
ticker = state["company_of_interest"] instrument_context = build_instrument_context(state["company_of_interest"])
company_name = state["company_of_interest"]
tools = [ tools = [
get_ttm_analysis, get_ttm_analysis,
@ -54,7 +53,7 @@ def create_fundamentals_analyst(llm):
" If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable," " If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable,"
" prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop." " prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop."
" You have access to the following tools: {tool_names}.\n{system_message}" " You have access to the following tools: {tool_names}.\n{system_message}"
"For your reference, the current date is {current_date}. The company we want to look at is {ticker}", "For your reference, the current date is {current_date}. {instrument_context}",
), ),
MessagesPlaceholder(variable_name="messages"), MessagesPlaceholder(variable_name="messages"),
] ]
@ -63,7 +62,7 @@ def create_fundamentals_analyst(llm):
prompt = prompt.partial(system_message=system_message) prompt = prompt.partial(system_message=system_message)
prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools])) prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools]))
prompt = prompt.partial(current_date=current_date) prompt = prompt.partial(current_date=current_date)
prompt = prompt.partial(ticker=ticker) prompt = prompt.partial(instrument_context=instrument_context)
chain = prompt | llm.bind_tools(tools) chain = prompt | llm.bind_tools(tools)

View File

@ -10,8 +10,7 @@ def create_market_analyst(llm):
def market_analyst_node(state): def market_analyst_node(state):
current_date = state["trade_date"] current_date = state["trade_date"]
ticker = state["company_of_interest"] instrument_context = build_instrument_context(state["company_of_interest"])
company_name = state["company_of_interest"]
tools = [ tools = [
get_macro_regime, get_macro_regime,
@ -46,7 +45,7 @@ Volatility Indicators:
Volume-Based Indicators: Volume-Based Indicators:
- vwma: VWMA: A moving average weighted by volume. Usage: Confirm trends by integrating price action with volume data. Tips: Watch for skewed results from volume spikes; use in combination with other volume analyses. - vwma: VWMA: A moving average weighted by volume. Usage: Confirm trends by integrating price action with volume data. Tips: Watch for skewed results from volume spikes; use in combination with other volume analyses.
- Select indicators that provide diverse and complementary information. Avoid redundancy (e.g., do not select both rsi and stochrsi). Also briefly explain why they are suitable for the given market context. When you tool call, please use the exact name of the indicators provided above as they are defined parameters, otherwise your call will fail. Please make sure to call get_stock_data first to retrieve the CSV that is needed to generate indicators. Then use get_indicators with the specific indicator names. Write a very detailed and nuanced report of the trends you observe. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions.""" - Select indicators that provide diverse and complementary information. Avoid redundancy (e.g., do not select both rsi and stochrsi). Also briefly explain why they are suitable for the given market context. When you tool call, please use the exact name of the indicators provided above as they are defined parameters, otherwise your call will fail. Please make sure to call get_stock_data first to retrieve the CSV that is needed to generate indicators. Then use get_indicators with the specific indicator names. Write a very detailed and nuanced report of the trends you observe. Provide specific, actionable insights with supporting evidence to help traders make informed decisions."""
+ """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read.""" + """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read."""
) )
@ -61,7 +60,7 @@ Volume-Based Indicators:
" If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable," " If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable,"
" prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop." " prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop."
" You have access to the following tools: {tool_names}.\n{system_message}" " You have access to the following tools: {tool_names}.\n{system_message}"
"For your reference, the current date is {current_date}. The company we want to look at is {ticker}", "For your reference, the current date is {current_date}. {instrument_context}",
), ),
MessagesPlaceholder(variable_name="messages"), MessagesPlaceholder(variable_name="messages"),
] ]
@ -70,7 +69,7 @@ Volume-Based Indicators:
prompt = prompt.partial(system_message=system_message) prompt = prompt.partial(system_message=system_message)
prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools])) prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools]))
prompt = prompt.partial(current_date=current_date) prompt = prompt.partial(current_date=current_date)
prompt = prompt.partial(ticker=ticker) prompt = prompt.partial(instrument_context=instrument_context)
chain = prompt | llm.bind_tools(tools) chain = prompt | llm.bind_tools(tools)

View File

@ -7,7 +7,7 @@ from tradingagents.dataflows.config import get_config
def create_news_analyst(llm): def create_news_analyst(llm):
def news_analyst_node(state): def news_analyst_node(state):
current_date = state["trade_date"] current_date = state["trade_date"]
ticker = state["company_of_interest"] instrument_context = build_instrument_context(state["company_of_interest"])
tools = [ tools = [
get_news, get_news,
@ -15,7 +15,7 @@ def create_news_analyst(llm):
] ]
system_message = ( system_message = (
"You are a news researcher tasked with analyzing recent news and trends over the past week. Please write a comprehensive report of the current state of the world that is relevant for trading and macroeconomics. Use the available tools: get_news(query, start_date, end_date) for company-specific or targeted news searches, and get_global_news(curr_date, look_back_days, limit) for broader macroeconomic news. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions." "You are a news researcher tasked with analyzing recent news and trends over the past week. Please write a comprehensive report of the current state of the world that is relevant for trading and macroeconomics. Use the available tools: get_news(query, start_date, end_date) for company-specific or targeted news searches, and get_global_news(curr_date, look_back_days, limit) for broader macroeconomic news. Provide specific, actionable insights with supporting evidence to help traders make informed decisions."
+ """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read.""" + """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read."""
) )
@ -30,7 +30,7 @@ def create_news_analyst(llm):
" If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable," " If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable,"
" prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop." " prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop."
" You have access to the following tools: {tool_names}.\n{system_message}" " You have access to the following tools: {tool_names}.\n{system_message}"
"For your reference, the current date is {current_date}. We are looking at the company {ticker}", "For your reference, the current date is {current_date}. {instrument_context}",
), ),
MessagesPlaceholder(variable_name="messages"), MessagesPlaceholder(variable_name="messages"),
] ]
@ -39,7 +39,7 @@ def create_news_analyst(llm):
prompt = prompt.partial(system_message=system_message) prompt = prompt.partial(system_message=system_message)
prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools])) prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools]))
prompt = prompt.partial(current_date=current_date) prompt = prompt.partial(current_date=current_date)
prompt = prompt.partial(ticker=ticker) prompt = prompt.partial(instrument_context=instrument_context)
chain = prompt | llm.bind_tools(tools) chain = prompt | llm.bind_tools(tools)
result = chain.invoke(state["messages"]) result = chain.invoke(state["messages"])

View File

@ -8,16 +8,15 @@ from tradingagents.dataflows.config import get_config
def create_social_media_analyst(llm): def create_social_media_analyst(llm):
def social_media_analyst_node(state): def social_media_analyst_node(state):
current_date = state["trade_date"] current_date = state["trade_date"]
ticker = state["company_of_interest"] instrument_context = build_instrument_context(state["company_of_interest"])
company_name = state["company_of_interest"]
tools = [ tools = [
get_news, get_news,
] ]
system_message = ( system_message = (
"You are a social media and company specific news researcher/analyst tasked with analyzing social media posts, recent company news, and public sentiment for a specific company over the past week. You will be given a company's name your objective is to write a comprehensive long report detailing your analysis, insights, and implications for traders and investors on this company's current state after looking at social media and what people are saying about that company, analyzing sentiment data of what people feel each day about the company, and looking at recent company news. Use the get_news(query, start_date, end_date) tool to search for company-specific news and social media discussions. Try to look at all sources possible from social media to sentiment to news. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions." "You are a social media and company specific news researcher/analyst tasked with analyzing social media posts, recent company news, and public sentiment for a specific company over the past week. You will be given a company's name your objective is to write a comprehensive long report detailing your analysis, insights, and implications for traders and investors on this company's current state after looking at social media and what people are saying about that company, analyzing sentiment data of what people feel each day about the company, and looking at recent company news. Use the get_news(query, start_date, end_date) tool to search for company-specific news and social media discussions. Try to look at all sources possible from social media to sentiment to news. Provide specific, actionable insights with supporting evidence to help traders make informed decisions."
+ """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read.""", + """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read."""
) )
prompt = ChatPromptTemplate.from_messages( prompt = ChatPromptTemplate.from_messages(
@ -31,7 +30,7 @@ def create_social_media_analyst(llm):
" If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable," " If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable,"
" prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop." " prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop."
" You have access to the following tools: {tool_names}.\n{system_message}" " You have access to the following tools: {tool_names}.\n{system_message}"
"For your reference, the current date is {current_date}. The current company we want to analyze is {ticker}", "For your reference, the current date is {current_date}. {instrument_context}",
), ),
MessagesPlaceholder(variable_name="messages"), MessagesPlaceholder(variable_name="messages"),
] ]
@ -40,7 +39,7 @@ def create_social_media_analyst(llm):
prompt = prompt.partial(system_message=system_message) prompt = prompt.partial(system_message=system_message)
prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools])) prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools]))
prompt = prompt.partial(current_date=current_date) prompt = prompt.partial(current_date=current_date)
prompt = prompt.partial(ticker=ticker) prompt = prompt.partial(instrument_context=instrument_context)
chain = prompt | llm.bind_tools(tools) chain = prompt | llm.bind_tools(tools)

View File

@ -1,5 +1,10 @@
def create_risk_manager(llm, memory): from tradingagents.agents.utils.agent_utils import build_instrument_context
def risk_manager_node(state) -> dict:
def create_portfolio_manager(llm, memory):
def portfolio_manager_node(state) -> dict:
instrument_context = build_instrument_context(state["company_of_interest"])
history = state["risk_debate_state"]["history"] history = state["risk_debate_state"]["history"]
risk_debate_state = state["risk_debate_state"] risk_debate_state = state["risk_debate_state"]
@ -20,27 +25,37 @@ def create_risk_manager(llm, memory):
macro_context = f"\n\nCurrent Macro Regime:\n{macro_regime_report}\nEnsure your risk assessment reflects the macro environment — in risk-off regimes, apply higher standards for position entry and tighter risk controls.\n" if macro_regime_report else "" macro_context = f"\n\nCurrent Macro Regime:\n{macro_regime_report}\nEnsure your risk assessment reflects the macro environment — in risk-off regimes, apply higher standards for position entry and tighter risk controls.\n" if macro_regime_report else ""
prompt = f"""As the Risk Management Judge and Debate Facilitator, your goal is to evaluate the debate between three risk analysts—Aggressive, Neutral, and Conservative—and determine the best course of action for the trader. Your decision must result in a clear recommendation: Buy, Sell, or Hold. Choose Hold only if strongly justified by specific arguments, not as a fallback when all sides seem valid. Strive for clarity and decisiveness. prompt = f"""As the Portfolio Manager, synthesize the risk analysts' debate and deliver the final trading decision.
{macro_context} {macro_context}
Guidelines for Decision-Making: {instrument_context}
1. **Summarize Key Arguments**: Extract the strongest points from each analyst, focusing on relevance to the context.
2. **Provide Rationale**: Support your recommendation with direct quotes and counterarguments from the debate.
3. **Refine the Trader's Plan**: Start with the trader's original plan, **{trader_plan}**, and adjust it based on the analysts' insights.
4. **Learn from Past Mistakes**: Use lessons from **{past_memory_str}** to address prior misjudgments and improve the decision you are making now to make sure you don't make a wrong BUY/SELL/HOLD call that loses money.
Deliverables:
- A clear and actionable recommendation: Buy, Sell, or Hold.
- Detailed reasoning anchored in the debate and past reflections.
--- ---
**Analysts Debate History:** **Rating Scale** (use exactly one):
- **Buy**: Strong conviction to enter or add to position
- **Overweight**: Favorable outlook, gradually increase exposure
- **Hold**: Maintain current position, no action needed
- **Underweight**: Reduce exposure, take partial profits
- **Sell**: Exit position or avoid entry
**Context:**
- Trader's proposed plan: **{trader_plan}**
- Lessons from past decisions: **{past_memory_str}**
**Required Output Structure:**
1. **Rating**: State one of Buy / Overweight / Hold / Underweight / Sell.
2. **Executive Summary**: A concise action plan covering entry strategy, position sizing, key risk levels, and time horizon.
3. **Investment Thesis**: Detailed reasoning anchored in the analysts' debate and past reflections.
---
**Risk Analysts Debate History:**
{history} {history}
--- ---
Focus on actionable insights and continuous improvement. Build on past lessons, critically evaluate all perspectives, and ensure each decision advances better outcomes.""" Be decisive and ground every conclusion in specific evidence from the analysts."""
response = llm.invoke(prompt) response = llm.invoke(prompt)
@ -62,4 +77,4 @@ Focus on actionable insights and continuous improvement. Build on past lessons,
"final_trade_decision": response.content, "final_trade_decision": response.content,
} }
return risk_manager_node return portfolio_manager_node

View File

@ -1,8 +1,11 @@
import time import time
from tradingagents.agents.utils.agent_utils import build_instrument_context
def create_research_manager(llm, memory): def create_research_manager(llm, memory):
def research_manager_node(state) -> dict: def research_manager_node(state) -> dict:
instrument_context = build_instrument_context(state["company_of_interest"])
history = state["investment_debate_state"].get("history", "") history = state["investment_debate_state"].get("history", "")
market_research_report = state["market_report"] market_research_report = state["market_report"]
sentiment_report = state["sentiment_report"] sentiment_report = state["sentiment_report"]
@ -37,6 +40,8 @@ Take into account your past mistakes on similar situations. Use these insights t
Here are your past reflections on mistakes: Here are your past reflections on mistakes:
\"{past_memory_str}\" \"{past_memory_str}\"
{instrument_context}
Here is the debate: Here is the debate:
Debate History: Debate History:
{history}""" {history}"""

View File

@ -28,7 +28,7 @@ Market Research Report: {market_research_report}
Social Media Sentiment Report: {sentiment_report} Social Media Sentiment Report: {sentiment_report}
Latest World Affairs Report: {news_report} Latest World Affairs Report: {news_report}
Company Fundamentals Report: {fundamentals_report} Company Fundamentals Report: {fundamentals_report}
Here is the current conversation history: {history} Here are the last arguments from the conservative analyst: {current_conservative_response} Here are the last arguments from the neutral analyst: {current_neutral_response}. If there are no responses from the other viewpoints, do not hallucinate and just present your point. Here is the current conversation history: {history} Here are the last arguments from the conservative analyst: {current_conservative_response} Here are the last arguments from the neutral analyst: {current_neutral_response}. If there are no responses from the other viewpoints yet, present your own argument based on the available data.
Engage actively by addressing any specific concerns raised, refuting the weaknesses in their logic, and asserting the benefits of risk-taking to outpace market norms. Maintain a focus on debating and persuading, not just presenting data. Challenge each counterpoint to underscore why a high-risk approach is optimal. Output conversationally as if you are speaking without any special formatting.""" Engage actively by addressing any specific concerns raised, refuting the weaknesses in their logic, and asserting the benefits of risk-taking to outpace market norms. Maintain a focus on debating and persuading, not just presenting data. Challenge each counterpoint to underscore why a high-risk approach is optimal. Output conversationally as if you are speaking without any special formatting."""

View File

@ -29,7 +29,7 @@ Market Research Report: {market_research_report}
Social Media Sentiment Report: {sentiment_report} Social Media Sentiment Report: {sentiment_report}
Latest World Affairs Report: {news_report} Latest World Affairs Report: {news_report}
Company Fundamentals Report: {fundamentals_report} Company Fundamentals Report: {fundamentals_report}
Here is the current conversation history: {history} Here is the last response from the aggressive analyst: {current_aggressive_response} Here is the last response from the neutral analyst: {current_neutral_response}. If there are no responses from the other viewpoints, do not hallucinate and just present your point. Here is the current conversation history: {history} Here is the last response from the aggressive analyst: {current_aggressive_response} Here is the last response from the neutral analyst: {current_neutral_response}. If there are no responses from the other viewpoints yet, present your own argument based on the available data.
Engage by questioning their optimism and emphasizing the potential downsides they may have overlooked. Address each of their counterpoints to showcase why a conservative stance is ultimately the safest path for the firm's assets. Focus on debating and critiquing their arguments to demonstrate the strength of a low-risk strategy over their approaches. Output conversationally as if you are speaking without any special formatting.""" Engage by questioning their optimism and emphasizing the potential downsides they may have overlooked. Address each of their counterpoints to showcase why a conservative stance is ultimately the safest path for the firm's assets. Focus on debating and critiquing their arguments to demonstrate the strength of a low-risk strategy over their approaches. Output conversationally as if you are speaking without any special formatting."""

View File

@ -28,7 +28,7 @@ Market Research Report: {market_research_report}
Social Media Sentiment Report: {sentiment_report} Social Media Sentiment Report: {sentiment_report}
Latest World Affairs Report: {news_report} Latest World Affairs Report: {news_report}
Company Fundamentals Report: {fundamentals_report} Company Fundamentals Report: {fundamentals_report}
Here is the current conversation history: {history} Here is the last response from the aggressive analyst: {current_aggressive_response} Here is the last response from the conservative analyst: {current_conservative_response}. If there are no responses from the other viewpoints, do not hallucinate and just present your point. Here is the current conversation history: {history} Here is the last response from the aggressive analyst: {current_aggressive_response} Here is the last response from the conservative analyst: {current_conservative_response}. If there are no responses from the other viewpoints yet, present your own argument based on the available data.
Engage actively by analyzing both sides critically, addressing weaknesses in the aggressive and conservative arguments to advocate for a more balanced approach. Challenge each of their points to illustrate why a moderate risk strategy might offer the best of both worlds, providing growth potential while safeguarding against extreme volatility. Focus on debating rather than simply presenting data, aiming to show that a balanced view can lead to the most reliable outcomes. Output conversationally as if you are speaking without any special formatting.""" Engage actively by analyzing both sides critically, addressing weaknesses in the aggressive and conservative arguments to advocate for a more balanced approach. Challenge each of their points to illustrate why a moderate risk strategy might offer the best of both worlds, providing growth potential while safeguarding against extreme volatility. Focus on debating rather than simply presenting data, aiming to show that a balanced view can lead to the most reliable outcomes. Output conversationally as if you are speaking without any special formatting."""

View File

@ -2,10 +2,13 @@ import functools
import time import time
import json import json
from tradingagents.agents.utils.agent_utils import build_instrument_context
def create_trader(llm, memory): def create_trader(llm, memory):
def trader_node(state, name): def trader_node(state, name):
company_name = state["company_of_interest"] company_name = state["company_of_interest"]
instrument_context = build_instrument_context(company_name)
investment_plan = state["investment_plan"] investment_plan = state["investment_plan"]
market_research_report = state["market_report"] market_research_report = state["market_report"]
sentiment_report = state["sentiment_report"] sentiment_report = state["sentiment_report"]
@ -24,13 +27,13 @@ def create_trader(llm, memory):
context = { context = {
"role": "user", "role": "user",
"content": f"Based on a comprehensive analysis by a team of analysts, here is an investment plan tailored for {company_name}. This plan incorporates insights from current technical market trends, macroeconomic indicators, and social media sentiment. Use this plan as a foundation for evaluating your next trading decision.\n\nProposed Investment Plan: {investment_plan}\n\nLeverage these insights to make an informed and strategic decision.", "content": f"Based on a comprehensive analysis by a team of analysts, here is an investment plan tailored for {company_name}. {instrument_context} This plan incorporates insights from current technical market trends, macroeconomic indicators, and social media sentiment. Use this plan as a foundation for evaluating your next trading decision.\n\nProposed Investment Plan: {investment_plan}\n\nLeverage these insights to make an informed and strategic decision.",
} }
messages = [ messages = [
{ {
"role": "system", "role": "system",
"content": f"""You are a trading agent analyzing market data to make investment decisions. Based on your analysis, provide a specific recommendation to buy, sell, or hold. End with a firm decision and always conclude your response with 'FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL**' to confirm your recommendation. Do not forget to utilize lessons from past decisions to learn from your mistakes. Here is some reflections from similar situatiosn you traded in and the lessons learned: {past_memory_str}""", "content": f"""You are a trading agent analyzing market data to make investment decisions. Based on your analysis, provide a specific recommendation to buy, sell, or hold. End with a firm decision and always conclude your response with 'FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL**' to confirm your recommendation. Apply lessons from past decisions to strengthen your analysis. Here are reflections from similar situations you traded in and the lessons learned: {past_memory_str}""",
}, },
context, context,
] ]

View File

@ -1,6 +1,15 @@
from langchain_core.messages import HumanMessage, RemoveMessage from langchain_core.messages import HumanMessage, RemoveMessage
def build_instrument_context(ticker: str) -> str:
"""Describe the exact instrument so agents preserve exchange-qualified tickers."""
return (
f"The instrument to analyze is `{ticker}`. "
"Use this exact ticker in every tool call, report, and recommendation, "
"preserving any exchange suffix (e.g. `.TO`, `.L`, `.HK`, `.T`)."
)
def create_msg_delete(): def create_msg_delete():
def delete_messages(state): def delete_messages(state):
"""Clear messages and add placeholder for Anthropic compatibility""" """Clear messages and add placeholder for Anthropic compatibility"""

View File

@ -1,6 +1,9 @@
import time
import logging import logging
import pandas as pd import pandas as pd
import yfinance as yf import yfinance as yf
from yfinance.exceptions import YFRateLimitError
from stockstats import wrap from stockstats import wrap
from typing import Annotated from typing import Annotated
import os import os
@ -120,6 +123,25 @@ def _load_or_fetch_ohlcv(symbol: str) -> pd.DataFrame:
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
def yf_retry(func, max_retries=3, base_delay=2.0):
"""Execute a yfinance call with exponential backoff on rate limits.
yfinance raises YFRateLimitError on HTTP 429 responses but does not
retry them internally. This wrapper adds retry logic specifically
for rate limits. Other exceptions propagate immediately.
"""
for attempt in range(max_retries + 1):
try:
return func()
except YFRateLimitError:
if attempt < max_retries:
delay = base_delay * (2 ** attempt)
logger.warning(f"Yahoo Finance rate limited, retrying in {delay:.0f}s (attempt {attempt + 1}/{max_retries})")
time.sleep(delay)
else:
raise
class StockstatsUtils: class StockstatsUtils:
@staticmethod @staticmethod
def get_stock_stats( def get_stock_stats(

View File

@ -5,7 +5,7 @@ from dateutil.relativedelta import relativedelta
import pandas as pd import pandas as pd
import yfinance as yf import yfinance as yf
import os import os
from .stockstats_utils import StockstatsUtils, YFinanceError, _clean_dataframe, _load_or_fetch_ohlcv from .stockstats_utils import StockstatsUtils, YFinanceError, _clean_dataframe, _load_or_fetch_ohlcv, yf_retry
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -23,7 +23,7 @@ def get_YFin_data_online(
ticker = yf.Ticker(symbol.upper()) ticker = yf.Ticker(symbol.upper())
# Fetch historical data for the specified date range # Fetch historical data for the specified date range
data = ticker.history(start=start_date, end=end_date) data = yf_retry(lambda: ticker.history(start=start_date, end=end_date))
# Check if data is empty # Check if data is empty
if data.empty: if data.empty:
@ -247,7 +247,7 @@ def get_fundamentals(
"""Get company fundamentals overview from yfinance.""" """Get company fundamentals overview from yfinance."""
try: try:
ticker_obj = yf.Ticker(ticker.upper()) ticker_obj = yf.Ticker(ticker.upper())
info = ticker_obj.info info = yf_retry(lambda: ticker_obj.info)
if not info: if not info:
return f"No fundamentals data found for symbol '{ticker}'" return f"No fundamentals data found for symbol '{ticker}'"
@ -307,9 +307,9 @@ def get_balance_sheet(
ticker_obj = yf.Ticker(ticker.upper()) ticker_obj = yf.Ticker(ticker.upper())
if freq.lower() == "quarterly": if freq.lower() == "quarterly":
data = ticker_obj.quarterly_balance_sheet data = yf_retry(lambda: ticker_obj.quarterly_balance_sheet)
else: else:
data = ticker_obj.balance_sheet data = yf_retry(lambda: ticker_obj.balance_sheet)
if data.empty: if data.empty:
return f"No balance sheet data found for symbol '{ticker}'" return f"No balance sheet data found for symbol '{ticker}'"
@ -337,9 +337,9 @@ def get_cashflow(
ticker_obj = yf.Ticker(ticker.upper()) ticker_obj = yf.Ticker(ticker.upper())
if freq.lower() == "quarterly": if freq.lower() == "quarterly":
data = ticker_obj.quarterly_cashflow data = yf_retry(lambda: ticker_obj.quarterly_cashflow)
else: else:
data = ticker_obj.cashflow data = yf_retry(lambda: ticker_obj.cashflow)
if data.empty: if data.empty:
return f"No cash flow data found for symbol '{ticker}'" return f"No cash flow data found for symbol '{ticker}'"
@ -367,9 +367,9 @@ def get_income_statement(
ticker_obj = yf.Ticker(ticker.upper()) ticker_obj = yf.Ticker(ticker.upper())
if freq.lower() == "quarterly": if freq.lower() == "quarterly":
data = ticker_obj.quarterly_income_stmt data = yf_retry(lambda: ticker_obj.quarterly_income_stmt)
else: else:
data = ticker_obj.income_stmt data = yf_retry(lambda: ticker_obj.income_stmt)
if data.empty: if data.empty:
return f"No income statement data found for symbol '{ticker}'" return f"No income statement data found for symbol '{ticker}'"
@ -393,7 +393,7 @@ def get_insider_transactions(
"""Get insider transactions data from yfinance.""" """Get insider transactions data from yfinance."""
try: try:
ticker_obj = yf.Ticker(ticker.upper()) ticker_obj = yf.Ticker(ticker.upper())
data = ticker_obj.insider_transactions data = yf_retry(lambda: ticker_obj.insider_transactions)
if data is None or data.empty: if data is None or data.empty:
return f"No insider transactions data found for symbol '{ticker}'" return f"No insider transactions data found for symbol '{ticker}'"

View File

@ -73,6 +73,7 @@ DEFAULT_CONFIG = {
# Provider-specific thinking configuration (applies to all roles unless overridden) # Provider-specific thinking configuration (applies to all roles unless overridden)
"google_thinking_level": _env("GOOGLE_THINKING_LEVEL"), # "high", "minimal", etc. "google_thinking_level": _env("GOOGLE_THINKING_LEVEL"), # "high", "minimal", etc.
"openai_reasoning_effort": _env("OPENAI_REASONING_EFFORT"), # "medium", "high", "low" "openai_reasoning_effort": _env("OPENAI_REASONING_EFFORT"), # "medium", "high", "low"
"anthropic_effort": _env("ANTHROPIC_EFFORT"), # "high", "medium", "low"
# Per-role provider-specific thinking configuration # Per-role provider-specific thinking configuration
"deep_think_google_thinking_level": _env("DEEP_THINK_GOOGLE_THINKING_LEVEL"), "deep_think_google_thinking_level": _env("DEEP_THINK_GOOGLE_THINKING_LEVEL"),
"deep_think_openai_reasoning_effort": _env("DEEP_THINK_OPENAI_REASONING_EFFORT"), "deep_think_openai_reasoning_effort": _env("DEEP_THINK_OPENAI_REASONING_EFFORT"),

View File

@ -59,7 +59,7 @@ class ConditionalLogic:
if ( if (
state["risk_debate_state"]["count"] >= 3 * self.max_risk_discuss_rounds state["risk_debate_state"]["count"] >= 3 * self.max_risk_discuss_rounds
): # 3 rounds of back-and-forth between 3 agents ): # 3 rounds of back-and-forth between 3 agents
return "Risk Judge" return "Portfolio Manager"
if state["risk_debate_state"]["latest_speaker"].startswith("Aggressive"): if state["risk_debate_state"]["latest_speaker"].startswith("Aggressive"):
return "Conservative Analyst" return "Conservative Analyst"
if state["risk_debate_state"]["latest_speaker"].startswith("Conservative"): if state["risk_debate_state"]["latest_speaker"].startswith("Conservative"):

View File

@ -110,12 +110,12 @@ Adhere strictly to these instructions, and ensure your output is detailed, accur
) )
invest_judge_memory.add_situations([(situation, result)]) invest_judge_memory.add_situations([(situation, result)])
def reflect_risk_manager(self, current_state, returns_losses, risk_manager_memory): def reflect_portfolio_manager(self, current_state, returns_losses, portfolio_manager_memory):
"""Reflect on risk manager's decision and update memory.""" """Reflect on portfolio manager's decision and update memory."""
situation = self._extract_current_situation(current_state) situation = self._extract_current_situation(current_state)
judge_decision = current_state["risk_debate_state"]["judge_decision"] judge_decision = current_state["risk_debate_state"]["judge_decision"]
result = self._reflect_on_component( result = self._reflect_on_component(
"RISK JUDGE", judge_decision, situation, returns_losses "PORTFOLIO MANAGER", judge_decision, situation, returns_losses
) )
risk_manager_memory.add_situations([(situation, result)]) portfolio_manager_memory.add_situations([(situation, result)])

View File

@ -24,7 +24,7 @@ class GraphSetup:
bear_memory, bear_memory,
trader_memory, trader_memory,
invest_judge_memory, invest_judge_memory,
risk_manager_memory, portfolio_manager_memory,
conditional_logic: ConditionalLogic, conditional_logic: ConditionalLogic,
): ):
"""Initialize with required components.""" """Initialize with required components."""
@ -36,7 +36,7 @@ class GraphSetup:
self.bear_memory = bear_memory self.bear_memory = bear_memory
self.trader_memory = trader_memory self.trader_memory = trader_memory
self.invest_judge_memory = invest_judge_memory self.invest_judge_memory = invest_judge_memory
self.risk_manager_memory = risk_manager_memory self.portfolio_manager_memory = portfolio_manager_memory
self.conditional_logic = conditional_logic self.conditional_logic = conditional_logic
def setup_graph( def setup_graph(
@ -103,8 +103,8 @@ class GraphSetup:
aggressive_analyst = create_aggressive_debator(self.quick_thinking_llm) aggressive_analyst = create_aggressive_debator(self.quick_thinking_llm)
neutral_analyst = create_neutral_debator(self.quick_thinking_llm) neutral_analyst = create_neutral_debator(self.quick_thinking_llm)
conservative_analyst = create_conservative_debator(self.quick_thinking_llm) conservative_analyst = create_conservative_debator(self.quick_thinking_llm)
risk_manager_node = create_risk_manager( portfolio_manager_node = create_portfolio_manager(
self.deep_thinking_llm, self.risk_manager_memory self.deep_thinking_llm, self.portfolio_manager_memory
) )
# Create workflow # Create workflow
@ -126,7 +126,7 @@ class GraphSetup:
workflow.add_node("Aggressive Analyst", aggressive_analyst) workflow.add_node("Aggressive Analyst", aggressive_analyst)
workflow.add_node("Neutral Analyst", neutral_analyst) workflow.add_node("Neutral Analyst", neutral_analyst)
workflow.add_node("Conservative Analyst", conservative_analyst) workflow.add_node("Conservative Analyst", conservative_analyst)
workflow.add_node("Risk Judge", risk_manager_node) workflow.add_node("Portfolio Manager", portfolio_manager_node)
# Define edges # Define edges
# Start with the first analyst # Start with the first analyst
@ -178,7 +178,7 @@ class GraphSetup:
self.conditional_logic.should_continue_risk_analysis, self.conditional_logic.should_continue_risk_analysis,
{ {
"Conservative Analyst": "Conservative Analyst", "Conservative Analyst": "Conservative Analyst",
"Risk Judge": "Risk Judge", "Portfolio Manager": "Portfolio Manager",
}, },
) )
workflow.add_conditional_edges( workflow.add_conditional_edges(
@ -186,7 +186,7 @@ class GraphSetup:
self.conditional_logic.should_continue_risk_analysis, self.conditional_logic.should_continue_risk_analysis,
{ {
"Neutral Analyst": "Neutral Analyst", "Neutral Analyst": "Neutral Analyst",
"Risk Judge": "Risk Judge", "Portfolio Manager": "Portfolio Manager",
}, },
) )
workflow.add_conditional_edges( workflow.add_conditional_edges(
@ -194,11 +194,11 @@ class GraphSetup:
self.conditional_logic.should_continue_risk_analysis, self.conditional_logic.should_continue_risk_analysis,
{ {
"Aggressive Analyst": "Aggressive Analyst", "Aggressive Analyst": "Aggressive Analyst",
"Risk Judge": "Risk Judge", "Portfolio Manager": "Portfolio Manager",
}, },
) )
workflow.add_edge("Risk Judge", END) workflow.add_edge("Portfolio Manager", END)
# Compile and return # Compile and return
return workflow.compile() return workflow.compile()

View File

@ -18,12 +18,14 @@ class SignalProcessor:
full_signal: Complete trading signal text full_signal: Complete trading signal text
Returns: Returns:
Extracted decision (BUY, SELL, or HOLD) Extracted rating (BUY, OVERWEIGHT, HOLD, UNDERWEIGHT, or SELL)
""" """
messages = [ messages = [
( (
"system", "system",
"You are an efficient assistant designed to analyze paragraphs or financial reports provided by a group of analysts. Your task is to extract the investment decision: SELL, BUY, or HOLD. Provide only the extracted decision (SELL, BUY, or HOLD) as your output, without adding any additional text or information.", "You are an efficient assistant that extracts the trading decision from analyst reports. "
"Extract the rating as exactly one of: BUY, OVERWEIGHT, HOLD, UNDERWEIGHT, SELL. "
"Output only the single rating word, nothing else.",
), ),
("human", full_signal), ("human", full_signal),
] ]

View File

@ -136,7 +136,7 @@ class TradingAgentsGraph:
self.bear_memory = FinancialSituationMemory("bear_memory", self.config) self.bear_memory = FinancialSituationMemory("bear_memory", self.config)
self.trader_memory = FinancialSituationMemory("trader_memory", self.config) self.trader_memory = FinancialSituationMemory("trader_memory", self.config)
self.invest_judge_memory = FinancialSituationMemory("invest_judge_memory", self.config) self.invest_judge_memory = FinancialSituationMemory("invest_judge_memory", self.config)
self.risk_manager_memory = FinancialSituationMemory("risk_manager_memory", self.config) self.portfolio_manager_memory = FinancialSituationMemory("portfolio_manager_memory", self.config)
# Create tool nodes # Create tool nodes
self.tool_nodes = self._create_tool_nodes() self.tool_nodes = self._create_tool_nodes()
@ -155,7 +155,7 @@ class TradingAgentsGraph:
self.bear_memory, self.bear_memory,
self.trader_memory, self.trader_memory,
self.invest_judge_memory, self.invest_judge_memory,
self.risk_manager_memory, self.portfolio_manager_memory,
self.conditional_logic, self.conditional_logic,
) )
@ -201,6 +201,11 @@ class TradingAgentsGraph:
if reasoning_effort: if reasoning_effort:
kwargs["reasoning_effort"] = reasoning_effort kwargs["reasoning_effort"] = reasoning_effort
elif provider == "anthropic":
effort = self.config.get("anthropic_effort")
if effort:
kwargs["effort"] = effort
return kwargs return kwargs
def _create_tool_nodes(self) -> Dict[str, ToolNode]: def _create_tool_nodes(self) -> Dict[str, ToolNode]:
@ -341,8 +346,8 @@ class TradingAgentsGraph:
self.reflector.reflect_invest_judge( self.reflector.reflect_invest_judge(
self.curr_state, returns_losses, self.invest_judge_memory self.curr_state, returns_losses, self.invest_judge_memory
) )
self.reflector.reflect_risk_manager( self.reflector.reflect_portfolio_manager(
self.curr_state, returns_losses, self.risk_manager_memory self.curr_state, returns_losses, self.portfolio_manager_memory
) )
def process_signal(self, full_signal): def process_signal(self, full_signal):

View File

@ -2,9 +2,26 @@ from typing import Any, Optional
from langchain_anthropic import ChatAnthropic from langchain_anthropic import ChatAnthropic
from .base_client import BaseLLMClient from .base_client import BaseLLMClient, normalize_content
from .validators import validate_model from .validators import validate_model
_PASSTHROUGH_KWARGS = (
"timeout", "max_retries", "api_key", "max_tokens",
"callbacks", "http_client", "http_async_client", "effort",
)
class NormalizedChatAnthropic(ChatAnthropic):
"""ChatAnthropic with normalized content output.
Claude models with extended thinking or tool use return content as a
list of typed blocks. This normalizes to string for consistent
downstream handling.
"""
def invoke(self, input, config=None, **kwargs):
return normalize_content(super().invoke(input, config, **kwargs))
class AnthropicClient(BaseLLMClient): class AnthropicClient(BaseLLMClient):
"""Client for Anthropic Claude models.""" """Client for Anthropic Claude models."""
@ -16,11 +33,11 @@ class AnthropicClient(BaseLLMClient):
"""Return configured ChatAnthropic instance.""" """Return configured ChatAnthropic instance."""
llm_kwargs = {"model": self.model} llm_kwargs = {"model": self.model}
for key in ("timeout", "max_retries", "api_key", "max_tokens", "callbacks", "http_client", "http_async_client"): for key in _PASSTHROUGH_KWARGS:
if key in self.kwargs: if key in self.kwargs:
llm_kwargs[key] = self.kwargs[key] llm_kwargs[key] = self.kwargs[key]
return ChatAnthropic(**llm_kwargs) return NormalizedChatAnthropic(**llm_kwargs)
def validate_model(self) -> bool: def validate_model(self) -> bool:
"""Validate model for Anthropic.""" """Validate model for Anthropic."""

View File

@ -2,6 +2,25 @@ from abc import ABC, abstractmethod
from typing import Any, Optional from typing import Any, Optional
def normalize_content(response):
"""Normalize LLM response content to a plain string.
Multiple providers (OpenAI Responses API, Google Gemini 3) return content
as a list of typed blocks, e.g. [{'type': 'reasoning', ...}, {'type': 'text', 'text': '...'}].
Downstream agents expect response.content to be a string. This extracts
and joins the text blocks, discarding reasoning/metadata blocks.
"""
content = response.content
if isinstance(content, list):
texts = [
item.get("text", "") if isinstance(item, dict) and item.get("type") == "text"
else item if isinstance(item, str) else ""
for item in content
]
response.content = "\n".join(t for t in texts if t)
return response
class BaseLLMClient(ABC): class BaseLLMClient(ABC):
"""Abstract base class for LLM clients.""" """Abstract base class for LLM clients."""

View File

@ -2,30 +2,19 @@ from typing import Any, Optional
from langchain_google_genai import ChatGoogleGenerativeAI from langchain_google_genai import ChatGoogleGenerativeAI
from .base_client import BaseLLMClient from .base_client import BaseLLMClient, normalize_content
from .validators import validate_model from .validators import validate_model
class NormalizedChatGoogleGenerativeAI(ChatGoogleGenerativeAI): class NormalizedChatGoogleGenerativeAI(ChatGoogleGenerativeAI):
"""ChatGoogleGenerativeAI with normalized content output. """ChatGoogleGenerativeAI with normalized content output.
Gemini 3 models return content as list: [{'type': 'text', 'text': '...'}] Gemini 3 models return content as list of typed blocks.
This normalizes to string for consistent downstream handling. This normalizes to string for consistent downstream handling.
""" """
def _normalize_content(self, response):
content = response.content
if isinstance(content, list):
texts = [
item.get("text", "") if isinstance(item, dict) and item.get("type") == "text"
else item if isinstance(item, str) else ""
for item in content
]
response.content = "\n".join(t for t in texts if t)
return response
def invoke(self, input, config=None, **kwargs): def invoke(self, input, config=None, **kwargs):
return self._normalize_content(super().invoke(input, config, **kwargs)) return normalize_content(super().invoke(input, config, **kwargs))
class GoogleClient(BaseLLMClient): class GoogleClient(BaseLLMClient):

View File

@ -3,20 +3,19 @@ from typing import Any, Optional
from langchain_openai import ChatOpenAI from langchain_openai import ChatOpenAI
from .base_client import BaseLLMClient from .base_client import BaseLLMClient, normalize_content
from .validators import validate_model from .validators import validate_model
class UnifiedChatOpenAI(ChatOpenAI): class NormalizedChatOpenAI(ChatOpenAI):
"""ChatOpenAI subclass that strips temperature/top_p for GPT-5 family models. """ChatOpenAI with normalized content output.
GPT-5 family models use reasoning natively. temperature/top_p are only The Responses API returns content as a list of typed blocks
accepted when reasoning.effort is 'none'; with any other effort level (reasoning, text, etc.). This normalizes to string for consistent
(or for older GPT-5/GPT-5-mini/GPT-5-nano which always reason) the API downstream handling.
rejects these params. Langchain defaults temperature=0.7, so we must
strip it to avoid errors.
Non-GPT-5 models (GPT-4.1, xAI, Ollama, etc.) are unaffected. Also strips temperature/top_p for GPT-5 family models which use
reasoning natively and reject these params.
""" """
def __init__(self, **kwargs): def __init__(self, **kwargs):
@ -25,9 +24,31 @@ class UnifiedChatOpenAI(ChatOpenAI):
kwargs.pop("top_p", None) kwargs.pop("top_p", None)
super().__init__(**kwargs) super().__init__(**kwargs)
def invoke(self, input, config=None, **kwargs):
return normalize_content(super().invoke(input, config, **kwargs))
# Kwargs forwarded from user config to ChatOpenAI
_PASSTHROUGH_KWARGS = (
"timeout", "max_retries", "reasoning_effort",
"api_key", "callbacks", "http_client", "http_async_client",
)
# Provider base URLs and API key env vars
_PROVIDER_CONFIG = {
"xai": ("https://api.x.ai/v1", "XAI_API_KEY"),
"openrouter": ("https://openrouter.ai/api/v1", "OPENROUTER_API_KEY"),
"ollama": (None, None), # base_url comes from config
}
class OpenAIClient(BaseLLMClient): class OpenAIClient(BaseLLMClient):
"""Client for OpenAI, Ollama, OpenRouter, and xAI providers.""" """Client for OpenAI, Ollama, OpenRouter, and xAI providers.
For native OpenAI models, uses the Responses API (/v1/responses) which
supports reasoning_effort with function tools across all model families
(GPT-4.1, GPT-5). Third-party compatible providers (xAI, OpenRouter,
Ollama) use standard Chat Completions.
"""
def __init__( def __init__(
self, self,
@ -43,31 +64,34 @@ class OpenAIClient(BaseLLMClient):
"""Return configured ChatOpenAI instance.""" """Return configured ChatOpenAI instance."""
llm_kwargs = {"model": self.model} llm_kwargs = {"model": self.model}
if self.provider == "xai": # Provider-specific base URL and auth
llm_kwargs["base_url"] = "https://api.x.ai/v1" if self.provider == "ollama":
api_key = os.environ.get("XAI_API_KEY")
if api_key:
llm_kwargs["api_key"] = api_key
elif self.provider == "openrouter":
llm_kwargs["base_url"] = "https://openrouter.ai/api/v1"
api_key = os.environ.get("OPENROUTER_API_KEY")
if api_key:
llm_kwargs["api_key"] = api_key
elif self.provider == "ollama":
host = self.base_url or "http://localhost:11434" host = self.base_url or "http://localhost:11434"
# Ensure the URL ends with /v1 for OpenAI-compatible endpoint
if not host.rstrip("/").endswith("/v1"): if not host.rstrip("/").endswith("/v1"):
host = host.rstrip("/") + "/v1" host = host.rstrip("/") + "/v1"
llm_kwargs["base_url"] = host llm_kwargs["base_url"] = host
llm_kwargs["api_key"] = "ollama" # Ollama doesn't require auth llm_kwargs["api_key"] = "ollama"
elif self.provider in _PROVIDER_CONFIG:
base_url, api_key_env = _PROVIDER_CONFIG[self.provider]
llm_kwargs["base_url"] = base_url
if api_key_env:
api_key = os.environ.get(api_key_env)
if api_key:
llm_kwargs["api_key"] = api_key
elif self.base_url: elif self.base_url:
llm_kwargs["base_url"] = self.base_url llm_kwargs["base_url"] = self.base_url
for key in ("timeout", "max_retries", "reasoning_effort", "api_key", "callbacks", "http_client", "http_async_client"): # Forward user-provided kwargs
for key in _PASSTHROUGH_KWARGS:
if key in self.kwargs: if key in self.kwargs:
llm_kwargs[key] = self.kwargs[key] llm_kwargs[key] = self.kwargs[key]
return UnifiedChatOpenAI(**llm_kwargs) # Native OpenAI: use Responses API for consistent behavior across
# all model families. Third-party providers use Chat Completions.
if self.provider == "openai":
llm_kwargs["use_responses_api"] = True
return NormalizedChatOpenAI(**llm_kwargs)
def validate_model(self) -> bool: def validate_model(self) -> bool:
"""Validate model for the provider.""" """Validate model for the provider."""