This commit is contained in:
Chayton Bai 2026-04-13 07:11:49 +01:00 committed by GitHub
commit 2192546ad1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 339 additions and 1548 deletions

View File

@ -4,3 +4,7 @@ GOOGLE_API_KEY=
ANTHROPIC_API_KEY=
XAI_API_KEY=
OPENROUTER_API_KEY=
ZHIPU_API_KEY=
# Yahoo Finance proxy (required in regions where Yahoo Finance is blocked/throttled)
# YF_PROXY=http://127.0.0.1:7890

5
.gitignore vendored
View File

@ -217,3 +217,8 @@ __marimo__/
# Cache
**/data_cache/
# Reports
reports/
.claude/

87
CLAUDE.md Normal file
View File

@ -0,0 +1,87 @@
# CLAUDE.md
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
## Project Overview
TradingAgents is a multi-agent LLM trading framework (by TauricResearch) that simulates a real trading firm using LangGraph. Agents are organized into teams: Analysts (4 types), Researchers (bull/bear debate), Trader, Risk Management (3-way debate), and Portfolio Manager.
## Build & Install
```bash
# Install (Python >=3.10, 3.13 recommended)
pip install .
# or with uv:
uv pip install .
```
Environment variables go in `.env` (see `.env.example` for required API keys).
## Running
```bash
# CLI (installed entry point)
tradingagents
# Programmatic usage (see main.py for example)
from tradingagents.graph.trading_graph import TradingAgentsGraph
ta = TradingAgentsGraph(debug=True, config=config)
_, decision = ta.propagate("NVDA", "2024-05-10")
```
## Testing
```bash
# Run all tests (unittest-based, no pytest)
python -m unittest discover -s tests
# Run a single test module
python -m unittest tests.test_model_validation
python -m unittest tests.test_ticker_symbol_handling
python -m unittest tests.test_google_api_key
```
No linter or formatter is formally configured (Ruff may be used informally).
## Architecture
### LangGraph State Machine
The core is a **StateGraph** compiled in `tradingagents/graph/setup.py`:
```
START -> Market Analyst -> Social Media Analyst -> News Analyst -> Fundamentals Analyst
-> Bull/Bear Researcher Debate (multi-round)
-> Research Manager (deep_think_llm)
-> Trader
-> Risk Debate: Aggressive/Conservative/Neutral (multi-round)
-> Portfolio Manager (deep_think_llm) -> END
```
State flows through `AgentState` (extends LangGraph `MessagesState`) defined in `tradingagents/agents/utils/agent_states.py`. Nested `InvestDebateState` and `RiskDebateState` track debate rounds.
### Two-LLM Tier System
- **`quick_think_llm`**: Analysts, researchers, risk debators, trader
- **`deep_think_llm`**: Research Manager and Portfolio Manager (final decision-makers)
### Key Module Relationships
- **`tradingagents/graph/trading_graph.py`** — `TradingAgentsGraph` is the main orchestrator. Creates LLM clients, initializes agent nodes, compiles the graph, and exposes `propagate(ticker, date)`.
- **`tradingagents/graph/setup.py`** — `GraphSetup` wires agent nodes into the StateGraph with conditional edges from `conditional_logic.py`.
- **`tradingagents/agents/`** — Each agent type has a `create_*()` factory function returning a callable graph node. All re-exported from `__init__.py`.
- **`tradingagents/dataflows/interface.py`** — Vendor routing (strategy pattern) dispatches data calls to `yfinance` (default) or `alpha_vantage`, with automatic fallback on rate limits.
- **`tradingagents/llm_clients/factory.py`** — `create_llm_client()` dispatches to provider-specific clients (`OpenAIClient` handles openai/ollama/openrouter/xai, plus `AnthropicClient`, `GoogleClient`). All extend `BaseLLMClient` ABC.
- **`tradingagents/agents/utils/memory.py`** — `FinancialSituationMemory` uses BM25 retrieval for learning from past decisions via `reflect_and_remember()`.
### Configuration
`tradingagents/default_config.py` holds `DEFAULT_CONFIG` with: LLM provider/model settings, debate round limits, data vendor routing, results directory (overridable via `TRADINGAGENTS_RESULTS_DIR` env var), and provider-specific thinking configs.
### CLI
`cli/main.py` is a Typer app (~50K) providing an interactive Rich UI. Entry point registered as `tradingagents` console script.
### Docker
Multi-stage Dockerfile (python:3.12-slim). `docker-compose.yml` has optional Ollama profile (`docker compose --profile ollama up`).

View File

@ -240,6 +240,7 @@ def select_llm_provider() -> tuple[str, str | None]:
("Google", None), # google-genai SDK manages its own endpoint
("Anthropic", "https://api.anthropic.com/"),
("xAI", "https://api.x.ai/v1"),
("Zhipu", "https://open.bigmodel.cn/api/coding/paas/v4"),
("Openrouter", "https://openrouter.ai/api/v1"),
("Ollama", "http://localhost:11434/v1"),
]

View File

@ -23,6 +23,7 @@ from .alpha_vantage import (
get_global_news as get_alpha_vantage_global_news,
)
from .alpha_vantage_common import AlphaVantageRateLimitError
from yfinance.exceptions import YFRateLimitError
# Configuration and routing logic
from .config import get_config
@ -156,7 +157,7 @@ def route_to_vendor(method: str, *args, **kwargs):
try:
return impl_func(*args, **kwargs)
except AlphaVantageRateLimitError:
continue # Only rate limits trigger fallback
except (AlphaVantageRateLimitError, YFRateLimitError):
continue # Rate limits trigger fallback to next vendor
raise RuntimeError(f"No available vendor for '{method}'")

View File

@ -11,8 +11,13 @@ from .config import get_config
logger = logging.getLogger(__name__)
# Apply proxy from environment for yfinance requests
_yf_proxy = os.environ.get("YF_PROXY")
if _yf_proxy:
yf.set_config(proxy=_yf_proxy)
def yf_retry(func, max_retries=3, base_delay=2.0):
def yf_retry(func, max_retries=5, base_delay=5.0):
"""Execute a yfinance call with exponential backoff on rate limits.
yfinance raises YFRateLimitError on HTTP 429 responses but does not

View File

@ -15,7 +15,7 @@ def create_llm_client(
"""Create an LLM client for the specified provider.
Args:
provider: LLM provider (openai, anthropic, google, xai, ollama, openrouter)
provider: LLM provider (openai, anthropic, google, xai, ollama, openrouter, zhipu)
model: Model name/identifier
base_url: Optional base URL for API endpoint
**kwargs: Additional provider-specific arguments
@ -34,7 +34,7 @@ def create_llm_client(
"""
provider_lower = provider.lower()
if provider_lower in ("openai", "ollama", "openrouter"):
if provider_lower in ("openai", "ollama", "openrouter", "zhipu"):
return OpenAIClient(model, base_url, provider=provider_lower, **kwargs)
if provider_lower == "xai":

View File

@ -77,6 +77,18 @@ MODEL_OPTIONS: ProviderModeOptions = {
("Qwen3:latest (8B, local)", "qwen3:latest"),
],
},
"zhipu": {
"quick": [
("GLM-4.5-Air - Fast, cost-effective", "glm-4.5-air"),
("GLM-4.7 - Strong coding, 200K context", "glm-4.7"),
("GLM-5.1 - Latest flagship, reasoning", "glm-5.1"),
],
"deep": [
("GLM-5.1 - Latest flagship, reasoning", "glm-5.1"),
("GLM-4.7 - Strong coding, 200K context", "glm-4.7"),
("GLM-4.5-Air - Fast, cost-effective", "glm-4.5-air"),
],
},
}

View File

@ -29,11 +29,12 @@ _PROVIDER_CONFIG = {
"xai": ("https://api.x.ai/v1", "XAI_API_KEY"),
"openrouter": ("https://openrouter.ai/api/v1", "OPENROUTER_API_KEY"),
"ollama": ("http://localhost:11434/v1", None),
"zhipu": ("https://open.bigmodel.cn/api/coding/paas/v4", "ZHIPU_API_KEY"),
}
class OpenAIClient(BaseLLMClient):
"""Client for OpenAI, Ollama, OpenRouter, and xAI providers.
"""Client for OpenAI, Ollama, OpenRouter, xAI, and Zhipu providers.
For native OpenAI models, uses the Responses API (/v1/responses) which
supports reasoning_effort with function tools across all model families

1759
uv.lock

File diff suppressed because it is too large Load Diff