add new mode 'openai_compatible'

This commit is contained in:
Jitin Vishwakarma 2025-11-15 15:24:29 -06:00
parent 13b826a31d
commit 340dc5c05f
10 changed files with 402 additions and 6 deletions

View File

@ -1,2 +1,3 @@
ALPHA_VANTAGE_API_KEY=alpha_vantage_api_key_placeholder
OPENAI_API_KEY=openai_api_key_placeholder
OPENAI_API_KEY=openai_api_key_placeholder
GOOGLE_API_KEY=google_api_key_placeholder

View File

@ -114,11 +114,15 @@ pip install -r requirements.txt
### Required APIs
You will need the OpenAI API for all the agents, and [Alpha Vantage API](https://www.alphavantage.co/support/#api-key) for fundamental and news data (default configuration).
You will need the OpenAI API for all the agents, and [Alpha Vantage API](https://www.alphavantage.co/support/#api-key) for fundamental and news data (default configuration). You can also use any OpenAI-compatible API endpoint, such as a local LLM server or Google Vertex AI.
```bash
export OPENAI_API_KEY=$YOUR_OPENAI_API_KEY
export ALPHA_VANTAGE_API_KEY=$YOUR_ALPHA_VANTAGE_API_KEY
# For OpenAI-compatible endpoints (e.g., Vertex AI)
export OPENAI_COMPATIBLE_API_KEY=$YOUR_API_KEY
export OPENAI_COMPATIBLE_BASE_URL=$YOUR_API_BASE_URL
```
Alternatively, you can create a `.env` file in the project root with your API keys (see `.env.example` for reference):

View File

@ -144,6 +144,9 @@ def select_shallow_thinking_agent(provider) -> str:
("Gemini 2.0 Flash - Next generation features, speed, and thinking", "gemini-2.0-flash"),
("Gemini 2.5 Flash - Adaptive thinking, cost efficiency", "gemini-2.5-flash-preview-05-20"),
],
"openai_compatible": [
("Gemini Pro", "gemini-pro"),
],
"openrouter": [
("Meta: Llama 4 Scout", "meta-llama/llama-4-scout:free"),
("Meta: Llama 3.3 8B Instruct - A lightweight and ultra-fast variant of Llama 3.3 70B", "meta-llama/llama-3.3-8b-instruct:free"),
@ -207,6 +210,9 @@ def select_deep_thinking_agent(provider) -> str:
("Gemini 2.5 Flash - Adaptive thinking, cost efficiency", "gemini-2.5-flash-preview-05-20"),
("Gemini 2.5 Pro", "gemini-2.5-pro-preview-06-05"),
],
"openai_compatible": [
("Gemini Pro", "gemini-pro"),
],
"openrouter": [
("DeepSeek V3 - a 685B-parameter, mixture-of-experts model", "deepseek/deepseek-chat-v3-0324:free"),
("Deepseek - latest iteration of the flagship chat model family from the DeepSeek team.", "deepseek/deepseek-chat-v3-0324:free"),
@ -246,6 +252,7 @@ def select_llm_provider() -> tuple[str, str]:
("OpenAI", "https://api.openai.com/v1"),
("Anthropic", "https://api.anthropic.com/"),
("Google", "https://generativelanguage.googleapis.com/v1"),
("OpenAI Compatible", "http://127.0.0.1:8080/v1"),
("Openrouter", "https://openrouter.ai/api/v1"),
("Ollama", "http://localhost:11434/v1"),
]

165
docs/GPT5-Arch-Summ.md Normal file
View File

@ -0,0 +1,165 @@
# SPEC-1-TradingAgents-Codebase-Review
## Background
A careful review was performed of the `tradingAgents` project provided in the uploaded ZIP. The codebase implements a multi-agent trading research framework that composes specialized analyst, researcher, risk-manager and trader components into a directed-graph execution pipeline built on top of `langgraph` and `langchain` (OpenAI connectors are used). The purpose is research and analysis (not financial advice) and the system designs an opinionated, debatable research process combining news, social sentiment, fundamentals, and technical analysis for investment recommendations.
## Files & repository layout (high level)
Key top-level entries:
* `README.md` — long project README with architecture overview, CLI screenshots, and examples.
* `main.py` — example runner that constructs a graph using `DEFAULT_CONFIG` and the `TradingAgentsGraph` abstraction.
* `tradingagents/` — main package with subpackages:
* `agents/` — agent factories and utilities (analysts, researchers, risk_mgmt, managers, trader, utils)
* `dataflows/` — vendor adapters (yfinance, alpha_vantage, google news, openai backends, reddit utils, etc.)
* `graph/` — the orchestration pieces built on `langgraph` (setup, conditional logic, propagation, reflection, trading_graph)
* `default_config.py` — default configuration and vendor selections
* `docs/``InitAnalysis.md` and `Prompts.md` containing higher-level analysis and prompt templates.
* `requirements.txt` / `pyproject.toml` — dependency lists (langchain-openai, langgraph, chromadb, yfinance, etc.)
## High-level architecture & execution model
1. **Graph-First Orchestration**: The core runtime is a stateful directed graph implemented with `langgraph`. The graph contains *tool nodes* (agents, data tools, wrappers) and *message states* which pass agent outputs forward. There are clear `START` / `END` states and per-node routing decisions.
2. **Agents-as-ToolNodes**: Agents are implemented as factory functions that accept an LLM (ChatOpenAI wrappers are used) and return functions that operate on the shared `state` dictionary. These are wired into `ToolNode`s in `GraphSetup`.
3. **Two-Tier LLM Strategy**: The code distinguishes a `quick_think_llm` for fast, less expensive calls and a `deep_think_llm` for longer, more contemplative tasks (e.g., judge/reflector). Configurable through `DEFAULT_CONFIG`.
4. **Specialized Agent Roles**:
* Analysts: `market`, `news`, `social_media`, `fundamentals` — collect & synthesize different modalities.
* Researchers: `bull` and `bear` researchers that debate investment theses.
* Risk Debators: `aggressive`, `conservative`, `neutral` — produce risk-framed arguments.
* Managers: `research_manager` (coordinates debate/judge flow) and `risk_manager` (integrates risk decisions).
* Trader: a `trader` node that translates recommendations into actions/instructions.
5. **Dataflow Layer**: `tradingagents/dataflows` contains vendor adapters and wrappers: yfinance, alpha_vantage, Google/GoogleNews scraping utilities, Reddit scrapers, an OpenAI-based news summarizer, and several helpers for indicators. The design allows swapping vendors via configuration.
6. **Memory / Long-Term Storage**: `agents/utils/memory.py` implements `FinancialSituationMemory` backed by `chromadb` and OpenAI embeddings. Memories can be queried to retrieve past situations and recommendations.
7. **Reflection & Propagation**: There are distinct components that manage state propagation across nodes (`Propagator`), reflection for learning from outcomes (`Reflector`), and signal processing for derived signals (`SignalProcessor`). These are orchestrated by `TradingAgentsGraph`.
8. **Prompt Templates**: A large number of prompt templates and instructions are baked into manager and agent node implementations (docs/Prompts.md contains many of them). The system depends heavily on prompting to shape agent behavior.
## Data flow (simplified)
1. Input (ticker + trade_date + config) -> graph `START`.
2. Analyst nodes call dataflow adapters to fetch: OHLC history (yfinance/alpha_vantage), indicators, Reddit posts, news articles, macro data.
3. Each analyst returns a report string placed into `state` (e.g., `market_report`, `news_report`, `sentiment_report`, `fundamentals_report`).
4. Researcher agents (bull/bear) read these reports and start a debate sequence coordinated by `research_manager` — repeated message passing through the graph with conditional branching.
5. Risk debators augment the debate with risk framing; `research_manager` uses `judge` logic to decide whether to continue debate or finalize.
6. `invest_judge` / `invest_manager` produce a recommendation and a trader plan; `trader` node converts that into final actions.
7. `Reflector` runs post-hoc analyses and stores reflections/memories in `chromadb`.
## Integrations & External Dependencies
* **LLM**: `langchain_openai.ChatOpenAI` is used (wrapper for OpenAI). The code also contains references to different model names (e.g., `gpt-4o-mini`). The `DEFAULT_CONFIG` keeps model names configurable.
* **langgraph**: Graph orchestration is built on `langgraph` and its `ToolNode`, `StateGraph` primitives.
* **Data vendors**: `yfinance`, `alpha_vantage` (adapter), `finnhub` wrappers (present in requirements), Google News scrapers, Reddit (`praw`) utilities, local offline adapters for reproducible runs.
* **Vector DB**: `chromadb` for memory; embeddings are fetched via the OpenAI client (abstracted in `memory.py`) — uses `text-embedding-3-small` or `nomic-embed-text` depending on backend.
* **Misc**: `stockstats` for indicators, `pandas`, `backtrader` listed as dependency (not deeply wired in core flow but present), `questionary` & CLI tools.
## Observability, Telemetry & Logging
* There is **no centralized telemetry or metrics system** in the codebase (no Prometheus, no Sentry, and no logging imports were found). Internal state is tracked in Python structures (`TradingAgentsGraph.log_states_dict`) and the final state is serializable to JSON.
* Short-term print/console oriented flows are used (CLI screenshots showing progress). This implies limited production-grade observability and minimal error/tracing capabilities.
## Security & Secrets
* The project relies on environment variables (an `.env.example` is present). Secrets are expected to be loaded via `dotenv` in `main.py`.
* Several vendor adapters will require API keys (Alpha Vantage, Finnhub, OpenAI, etc.). There is no central secrets manager integration or explicit advice for safely rotating keys.
## Design patterns & code organization
* **Graph / Node pattern**: Orchestration is expressed as ToolNode nodes; nodes operate on a shared mutable `state` dict. This is an event/state propagation pattern.
* **Adapter pattern**: The `dataflows` directory acts as vendor adapters with a unified interface (`get_YFin_data_online`, `get_stock`, `get_news`, etc.). This enables vendor swapping through `DEFAULT_CONFIG`.
* **Factory functions**: Agents are provided through `create_*` functions that accept an LLM and return node behavior; this makes injection of different LLM wrappers straightforward.
* **Strategy / Debate**: The debate mechanism between bull/bear researchers plus judge is implemented as a state machine using Graph conditional transitions.
* **Memory as a service**: Memory is encapsulated in `FinancialSituationMemory` with query & add methods, following a simple repository pattern.
## Strengths & Good choices
* Clear separation between **data adapters**, **agent logic**, and **orchestration** (graph) — makes the codebase understandable and swappable.
* Two-LLM tiering for cost/performance tradeoffs is pragmatic.
* Use of `chromadb` + embeddings to record and retrieve prior situations is a sensible approach for agent memory and long-term learning.
* `langgraph` + `langchain` give a native architectural fit for agentic message-passing workflows.
* Extensive prompt engineering (templates + docs) already present.
## Risks, gaps & weaknesses (observations)
1. **Observability & Logging**: No structured logging or metrics. Hard to debug runs in production or trace LLM costs and latencies.
2. **Error handling & retries**: Vendor adapters appear to be synchronous and may lack robust retry/backoff logic for network/API failures.
3. **Testing**: There is little evidence of unit or integration tests for critical components (no tests besides `test.py` placeholder).
4. **Concurrency & throughput**: The design uses synchronous LLM calls and blocking data fetches — scaling to many tickers or parallel research runs will be slow.
5. **Secrets management**: Keys fetched from `.env` — no mention of vault integration or per-environment configurations.
6. **Vector DB persistence**: `chromadb` client is created without persistent configuration. Depending on how chroma is set up, memory may be ephemeral. Also the embedding provider selection is hard-coded per `backend_url`.
7. **Resource usage & cost controls**: No built-in quota or estimation of LLM tokens, cost accounting, or budgets per run.
8. **Tight coupling to `langgraph`**: Heavy use of `langgraph` primitives makes migration to another orchestrator non-trivial.
9. **Missing telemetry for LLM calls**: No per-prompt logging of response latency, token usage, or errors.
10. **Limited type enforcement**: Project mixes typed dicts with untyped state dict usage which can lead to runtime errors.
## Concrete recommendations (quick wins)
### Short-term (low-effort, high-impact)
* Add structured logging (Python `logging`) at entrypoints: agent invocations, external vendor calls, and LLM call wrappers. Log context (ticker, date, node name).
* Add simple metrics counters (e.g., via Prometheus client or expose a `/metrics` endpoint) for counts of LLM calls, failures, vendor call latencies.
* Centralize LLM invocation through a single wrapper that records token usage, latency, and catches common API errors — makes adding retries and fallback models easier.
* Implement retry/backoff for external HTTP/API calls using `tenacity` or `httpx` with retry.
* Persist chromadb collection to disk or configure a persistent deployment so memories survive across runs.
* Add basic unit tests for `dataflows` adapters (mock vendor responses) and `agents` deterministic pieces.
### Medium-term
* Introduce asynchronous execution (async/await) and batch operations when requesting market data for multiple tickers.
* Add a plugin-style adapter registry for vendor integrations so new providers can be registered without touching internal imports.
* Introduce a pluggable telemetry backend with traces for each graph run (OpenTelemetry) and LLM spans.
* Add a configuration profile system for environments (dev/staging/prod) and secret management guidance (Vault, AWS Secrets Manager, or similar).
* Improve type coverage with `mypy` and stricter typed data models for the shared `state` (pydantic models could help).
### Refactors & larger changes
* Decouple orchestration from `langgraph` with an internal thin interface, so other orchestrators could be used in future.
* Abstract vector DB usage behind an interface with multiple implementations (Chroma, Pinecone, etc.) and configuration-driven provider selection.
* Add a cost-control and LLM budgeter that estimates tokens and optionally refuses expensive calls beyond configured budgets.
* Create end-to-end integration/infrastructure tests that run a short scenario with mocked LLMs to validate the graph transitions and state outputs.
## Suggested file-by-file highlights (non-exhaustive)
* `tradingagents/graph/setup.py` — central wiring that constructs `ToolNode`s; good place to centralize LLM wrapper.
* `tradingagents/agents/*` — agent factories: prefer to make these pure functions that only transform inputs and call an injected `llm_api` helper. Avoid reading global config inside the node body.
* `tradingagents/dataflows/*` — vendor adapters: add standardized error classes, timeouts and retry.
* `tradingagents/agents/utils/memory.py` — ensure Chroma collection initialization accepts persistence directory and does not create ephemeral in-memory collections silently.
* `tradingagents/graph/trading_graph.py` — the Graph runtime; add hooks for metrics, logging and structured state snapshots.
## PlantUML: Simplified component diagram
```plantuml
@startuml
package "Client / CLI" {
[User Input]
}
package "Orchestrator" {
[TradingAgentsGraph] --> [GraphSetup]
[TradingAgentsGraph] --> [Propagator]
[TradingAgentsGraph] --> [Reflector]
}
package "Agents" {
[Analysts] --> [Researcher(s)]
[Researcher(s)] --> [Research Manager]
[Risk Debators] --> [Risk Manager]
[Research Manager] --> [Trader]
}
package "Data & Services" {
[YFinance / AlphaVantage / Finnhub]
[Google News / Reddit]
[OpenAI (LLM & Embeddings)]
[ChromaDB]
}
[Analysts] --> [YFinance / News / Reddit]
[Research Manager] --> [OpenAI (deep_llm)]
[Agents] --> [ChromaDB]
@enduml
```

56
docs/InitAnalysis.md Normal file
View File

@ -0,0 +1,56 @@
# Codebase Analysis: TradingAgents
This document outlines the purpose, architecture, and key components of the "TradingAgents" codebase.
### Purpose and Approach
The "TradingAgents" project is a sophisticated, multi-agent framework designed for financial analysis and trading research. It simulates a trading firm's decision-making process by orchestrating a team of specialized AI agents, each with a distinct role.
The analysis workflow is structured as follows:
1. **Analyst Team**: A group of agents gathers and synthesizes data from various sources:
* **Market Analyst**: Focuses on market trends and technical indicators.
* **News Analyst**: Processes financial news.
* **Social Media Analyst**: Gathers sentiment from social platforms like Reddit.
* **Fundamentals Analyst**: Analyzes company fundamentals (e.g., earnings reports).
2. **Researcher Team**: Two agents, one with a "bullish" (optimistic) and one with a "bearish" (pessimistic) perspective, debate the findings of the analyst team to form a balanced investment thesis.
3. **Trader Agent**: Based on the research, this agent formulates a specific, actionable trading plan.
4. **Risk Management Team**: A team of debaters assesses the proposed trade's potential risks from aggressive, conservative, and neutral viewpoints.
5. **Portfolio Manager**: A final agent gives the ultimate approval or rejection for the trade.
A standout feature is the system's ability to **learn from its outcomes**. After a trade is executed (or simulated), the framework reflects on the resulting profit or loss and updates the long-term memory of the agents to refine future decisions.
### Tech Stack and Dependencies
The project is built on a modern Python stack, leveraging several powerful libraries and external services.
* **Core Framework**:
* **Python 3.10+**
* **`langgraph`**: The central library used to construct and manage the directed acyclic graph (DAG) of AI agents.
* **LLM Integration**:
* **`langchain`**: Provides the core abstractions for interacting with Large Language Models.
* Integrations for multiple LLM providers are included: `langchain-openai`, `langchain-anthropic`, and `langchain-google-genai`.
* **Financial Data Sources**:
* The system is designed to be data-source agnostic. It integrates with a wide array of financial data APIs, including:
* `alpha_vantage`
* `yfinance` (Yahoo Finance)
* `praw` (Reddit)
* `feedparser` (for RSS news feeds)
* `eodhd`, `akshare`, `tushare`, `finnhub-python`
* **Command-Line Interface (CLI)**:
* An interactive and user-friendly CLI is built using:
* `typer`
* `rich` (for rich text and beautiful formatting in the terminal)
* `questionary` (for interactive prompts)
* **Data Handling & Storage**:
* **`pandas`**: Used for data manipulation and analysis.
* **`chromadb`**: Likely used for vector-based memory storage for the agents (e.g., for Retrieval Augmented Generation).
* **`redis`**: Used for caching or state management.
### Features and Usage
* **Multi-Agent System**: Decomposes the complex task of financial analysis into smaller, specialized roles, allowing for deeper and more nuanced insights.
* **High Configurability**: Key parameters, such as the LLMs to use, the preferred data vendors, and agent behaviors, are centralized in the `tradingagents/default_config.py` file, making the system easy to customize.
* **Interactive CLI**: The primary method of interaction is via the command line (`python -m cli.main`). This tool guides the user through setting up an analysis (e.g., selecting a stock ticker, date range, and agents) and displays a live dashboard showing the progress and reasoning of each agent in real-time.
* **Reflective Learning**: The framework includes a mechanism for the agents to learn from their successes and failures, creating a feedback loop for continuous improvement.
* **Modular and Extensible Architecture**: The codebase is well-structured, with a clear separation of concerns between the agent graph logic (`tradingagents/graph`), the agent definitions (`tradingagents/agents`), and the data fetching layer (`tradingagents/dataflows`). This modularity makes the system flexible and easier to extend with new agents or data sources.

3
docs/Prompts.md Normal file
View File

@ -0,0 +1,3 @@
Review this entire codebase in tradingAgents project. Review both the code and documents such as readme.md. Understand the approach, and design aspects, the capabilities, data flow, connectivities with other sus-systems and APIs, any specific agentic framework that is used, telemetry and observation methodologies if any, and any specific design patterns used.
Once you have a comprehensive knowledge and grasp on this project, we will start working on a few major enhancements.
No real code changes needed just yet, till I ask you for it. But list your understanding on this project in as much details as possible here so I benefit from it. Also list your observations in terms of upgrades and refactor that seems obvious.

142
docs/claude-sonnet-45.md Normal file
View File

@ -0,0 +1,142 @@
Here is an exhaustive breakdown and architectural overview of the **tradingAgents** project, based on the complete codebase and supporting files:
***
### 1. **High-Level Goal and Approach**
**TradingAgents** is an enterprise-ready, modular, multi-agent LLM financial trading framework. It orchestrates a team of autonomous agents specialized in financial analysis, market research, portfolio management, risk assessment, and decision making, using state-of-the-art agentic design patterns and LLM integration (OpenAI, Anthropic, Google Gemini, etc.).
***
### 2. **Architectural Structure**
#### **Agentic Framework & Orchestration**
- **Core Frameworks:** Utilizes **LangGraph** for agent graph orchestration (dynamic state management, conditional edge transitions).
- **Agents:** Each agent is a specialized node:
- **Market Analyst**
- **Social Analyst**
- **News Analyst**
- **Fundamentals Analyst**
- **Bull and Bear Researchers**
- **Research Manager**
- **Trader**
- **Risk Analysts (Risky/Neutral/Safe)**
- **Risk Manager/Portfolio Manager**
- **Team Workflow:**
- Teams are structured in functional, research, trading, risk management, and portfolio management hierarchies.
- Each agent/team performs a workflow step, aggregates and debates, then passes findings to the next team via well-defined graph transitions.
#### **Agent Design Patterns**
- **Debate Cycles:** Research and risk team debates are multi-round, with roles (Bull/Bear/Risk variants) using historical reasoning plus LLM-generated arguments.
- **Conditional Logic:** Dynamic state-driven transitions between team agents (e.g., debate not progressing → escalate to manager; risk cycles switch based on analyst type).
- **Reflection and Memory:** Dedicated modules for reflecting on decisions, updating agent/team memory for continual learning/improvement post-trade.
#### **Modularity**
- Each agent or module is independent yet shares a memory and API access layer, encouraging plug-and-play extensibility.
***
### 3. **Data Flow & Subsystem Integration**
#### **Data Source Integration**
- **APIs/Financial Vendors:** Pluggable adapters for yfinance, AlphaVantage, Finnhub, EODHD, Akshare, Tushare, and more.
- **Technical/Fundamental/News/Sentiment:** Each analyst agent fetches and processes different types of data (indicators, fundamentals, news, social sentiment) using abstracted data access utilities.
- **Config-Driven:** All data source/vendor selection is runtime-configurable via CLI or config files, supporting rapid swap-in of new sources.
#### **Agent Orchestration**
- **Graph Orchestration:** LangGraph's `StateGraph` compiles agent workflows, agent states, conditional transitions, and result routing.
- **State Propagation:** The `Propagator` initializes and transmits agent states, input data (company, date, debate state), and downstream results.
- **Signal Processing:** A signal processor extracts actionable trading signals (BUY/SELL/HOLD) from verbose multi-agent LLM output, enforcing clarity and standardization.
#### **Reflection and Memory Layer**
- Persistent agent memories (e.g., **FinancialSituationMemory**) ensure historical decisions, contexts, errors, and lessons learned are available for review and improvement.
- **Reflector** module: On every major decision, it records details for future lookup and learning.
***
### 4. **Agentic Capabilities**
- **Deep/Quick-Learning Models:** CLI lets users select providers/models for 'deep' vs. 'quick' thinkers (e.g., Claude Sonnet vs. GPT-4o-mini vs. Gemini Flash).
- **Multi-Round Debates & Voting:** Managers arbitrate debates, analysts debate and propose, risk analysts argue, trend toward consensus.
- **Comprehensive Reporting:** Each agent outputs fine-grained Markdown reports, tabulated key points, and a final proposal.
- **Human-in-the-Loop:** CLI includes steps for interactive analyst selection, research depth, and runtime parameter tweaking.
***
### 5. **Telemetry, Observation, and Evaluation**
- **Status Dashboards:** Rich CLI interfaces display team/agent status, recent messages, decisions, tool/LLM calls, and trace progression.
- **Trace and Reporting:** Full audit trail of message history, tool usage, debate, and decision reasoning.
- **Eval & Validation:** All agent outputs, decisions, trade signals, and reflections are tracked for post-hoc analysis, continuous improvement, and future retraining integration.
***
### 6. **Relevant Design Patterns**
| Pattern | Usage |
|----------------------------|--------------------------------------------|
| Multi-Agent Orchestration | Modular agents, each with focused roles |
| State Graph (LangGraph) | Dynamic workflow execution |
| Reflection & Memory | Self-correcting, continual learning agents |
| Debate & Majority Voting | For risk/research consensus building |
| Tool Abstraction | Vendor data sources, LLM models |
| Configurable Providers | CLI/config-driven agent selection |
| Separation of Concerns | Clear agent/sub-system boundaries |
| Observability/Tracing | Rich CLI, record status, audit, messages |
| Team-based Workflow | Analyst → Research → Trading → Risk → PM |
***
### 7. **Current Telemetry & Observability**
- **CLI Progress Panels:** Real-time view of teams/agents and statuses (pending, in-progress, completed, error).
- **Messages/Tool Calls Table:** Chronological tool and agent messages with truncation for context and traceability.
- **Report Panels:** Section-by-section breakdown of market, sentiment, news, fundamentals, plans, and final trade decision.
***
### 8. **Obvious Upgrades & Refactoring Opportunities**
- **Unified Telemetry API:** Abstract CLI telemetry into a backend service/API for seamless integration with dashboards or observability platforms (e.g., OpenTelemetry).
- **Agent Health Monitoring:** Add explicit agent 'heartbeat' and anomaly detection (timing, error patterns, decision volatility).
- **Async/Parallel Agent Execution:** Refactor synchronous blocks to enable agent process parallelization, improving performance during debates and analysis.
- **Plug-and-Play External Agent Registry:** Support runtime dynamic agent loading/unloading — e.g., new 3rd party LLM agent adapters.
- **Multi-agent Ensemble Voting:** Expand majority voting logic for agent outputs (especially for PM/risk evaluation).
- **Enhanced Memory Structuring:** Move from plain-text memory reflection to structured, queryable vector stores.
- **Explainability View:** Integrate explainability dashboards showing why agents made certain decisions at each debate/analysis step.
- **Unified Config Service:** Migrate CLI/config YAMLs into a discoverable configuration microservice.
- **Observability Hooks:** Insert lifecycle hooks to stream events to SIEM/logging tools.
- **Automated Evaluation Suites:** Complement report generation with automated scenario backtesting and error analysis.
***
### 9. **Summary Table Modular System Mapping**
| Subsystem | Description | Refactor Opportunity |
|--------------------------|----------------------------------------------------------------|----------------------------------|
| Market/Fundamental/News | Data collection & analysis agents | Add real-time streaming support |
| Debate/Decision Agents | Encapsulates reasoning and multi-round voting | Optimize for parallel execution |
| Risk/Portfolio Agents | Makes multi-perspective risk decisions | Add automated scenario tests |
| Reflection/Memory | Records decisions, mistakes, lessons | Upgrade to vector DB |
| Telemetry/CLI UI | Provides report/status panels and traces | Abstract as headless API |
| Config/Providers | Dynamic agent/data source/LLM selection | Unified microservice config |
| Orchestrator/Graph | LangGraph-powered sequence/state driver | Optimize conditional edges logic |
***
## **Final Notes**
**TradingAgents** demonstrates a sophisticated, multi-agent, agentic pattern-driven LLM solution for financial analysis and trading within the enterprise domain. It combines modularity, orchestration, deep observability, and extensibility principles typical of next-gen agentic architectures.
**Major strengths:**
- Clean modular separation
- Advanced debate/reflection/memory logic
- Deep configurable LLM and tool support
- Robust CLI for human-in-the-loop use
**Obvious next steps:** Refactor for parallelism, telemetry decoupling, agent registry, explainability dashboards, and production-grade evaluation/monitoring.
**If you want to deep dive into any subsystem or propose a specific enhancement, let me know which direction to focus next!**
[1](https://ppl-ai-file-upload.s3.amazonaws.com/web/direct-files/attachments/33584582/428405df-6391-4ef6-ab2d-445079ecd48f/tradingAgents.txt)

6
package-lock.json generated Normal file
View File

@ -0,0 +1,6 @@
{
"name": "TradingAgents",
"lockfileVersion": 3,
"requires": true,
"packages": {}
}

View File

@ -9,10 +9,11 @@ DEFAULT_CONFIG = {
"dataflows/data_cache",
),
# LLM settings
"llm_provider": "openai",
"deep_think_llm": "o4-mini",
"quick_think_llm": "gpt-4o-mini",
"backend_url": "https://api.openai.com/v1",
"llm_provider": "openai_compatible",
"deep_think_llm": "gemini-pro",
"quick_think_llm": "gemini-pro",
"openai_compatible_api_key": os.getenv("OPENAI_COMPATIBLE_API_KEY"),
"openai_compatible_base_url": os.getenv("OPENAI_COMPATIBLE_BASE_URL"),
# Debate and discussion settings
"max_debate_rounds": 1,
"max_risk_discuss_rounds": 1,

View File

@ -75,6 +75,17 @@ class TradingAgentsGraph:
if self.config["llm_provider"].lower() == "openai" or self.config["llm_provider"] == "ollama" or self.config["llm_provider"] == "openrouter":
self.deep_thinking_llm = ChatOpenAI(model=self.config["deep_think_llm"], base_url=self.config["backend_url"])
self.quick_thinking_llm = ChatOpenAI(model=self.config["quick_think_llm"], base_url=self.config["backend_url"])
elif self.config["llm_provider"].lower() == "openai_compatible":
self.deep_thinking_llm = ChatOpenAI(
model=self.config["deep_think_llm"],
api_key=os.environ.get("OPENAI_COMPATIBLE_API_KEY"),
base_url=os.environ.get("OPENAI_COMPATIBLE_BASE_URL"),
)
self.quick_thinking_llm = ChatOpenAI(
model=self.config["quick_think_llm"],
api_key=os.environ.get("OPENAI_COMPATIBLE_API_KEY"),
base_url=os.environ.get("OPENAI_COMPATIBLE_BASE_URL"),
)
elif self.config["llm_provider"].lower() == "anthropic":
self.deep_thinking_llm = ChatAnthropic(model=self.config["deep_think_llm"], base_url=self.config["backend_url"])
self.quick_thinking_llm = ChatAnthropic(model=self.config["quick_think_llm"], base_url=self.config["backend_url"])