Compare commits
24 Commits
7958654394
...
6a3df3e65a
| Author | SHA1 | Date |
|---|---|---|
|
|
6a3df3e65a | |
|
|
fa4d01c23a | |
|
|
b0f6058299 | |
|
|
59d6b2152d | |
|
|
ccf375eafd | |
|
|
138c077cc6 | |
|
|
2648f91e09 | |
|
|
59a2212ff7 | |
|
|
5d09c4c984 | |
|
|
6f5610d82b | |
|
|
2ce7e2b6d0 | |
|
|
3abff48c7d | |
|
|
5ac72567be | |
|
|
f3d49335d1 | |
|
|
698b4ede4a | |
|
|
92b527b60a | |
|
|
0c4a912b0a | |
|
|
b3a087286b | |
|
|
95e10bd1fd | |
|
|
2466ec3c90 | |
|
|
85fbc48ede | |
|
|
dbd2c658e5 | |
|
|
03d7752d46 | |
|
|
ae2c813d8a |
|
|
@ -0,0 +1,5 @@
|
|||
# Azure OpenAI
|
||||
AZURE_OPENAI_API_KEY=
|
||||
AZURE_OPENAI_ENDPOINT=https://your-resource-name.openai.azure.com/
|
||||
AZURE_OPENAI_DEPLOYMENT_NAME=
|
||||
# OPENAI_API_VERSION=2024-10-21 # optional, required for non-v1 API
|
||||
|
|
@ -3,4 +3,7 @@ OPENAI_API_KEY=
|
|||
GOOGLE_API_KEY=
|
||||
ANTHROPIC_API_KEY=
|
||||
XAI_API_KEY=
|
||||
DEEPSEEK_API_KEY=
|
||||
DASHSCOPE_API_KEY=
|
||||
ZHIPU_API_KEY=
|
||||
OPENROUTER_API_KEY=
|
||||
|
|
|
|||
|
|
@ -140,10 +140,15 @@ export OPENAI_API_KEY=... # OpenAI (GPT)
|
|||
export GOOGLE_API_KEY=... # Google (Gemini)
|
||||
export ANTHROPIC_API_KEY=... # Anthropic (Claude)
|
||||
export XAI_API_KEY=... # xAI (Grok)
|
||||
export DEEPSEEK_API_KEY=... # DeepSeek
|
||||
export DASHSCOPE_API_KEY=... # Qwen (Alibaba DashScope)
|
||||
export ZHIPU_API_KEY=... # GLM (Zhipu)
|
||||
export OPENROUTER_API_KEY=... # OpenRouter
|
||||
export ALPHA_VANTAGE_API_KEY=... # Alpha Vantage
|
||||
```
|
||||
|
||||
For enterprise providers (e.g. Azure OpenAI, AWS Bedrock), copy `.env.enterprise.example` to `.env.enterprise` and fill in your credentials.
|
||||
|
||||
For local models, configure Ollama with `llm_provider: "ollama"` in your config.
|
||||
|
||||
Alternatively, copy `.env.example` to `.env` and fill in your keys:
|
||||
|
|
|
|||
43
cli/main.py
43
cli/main.py
|
|
@ -6,8 +6,9 @@ from functools import wraps
|
|||
from rich.console import Console
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# Load environment variables from .env file
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
load_dotenv(".env.enterprise", override=False)
|
||||
from rich.panel import Panel
|
||||
from rich.spinner import Spinner
|
||||
from rich.live import Live
|
||||
|
|
@ -79,7 +80,7 @@ class MessageBuffer:
|
|||
self.current_agent = None
|
||||
self.report_sections = {}
|
||||
self.selected_analysts = []
|
||||
self._last_message_id = None
|
||||
self._processed_message_ids = set()
|
||||
|
||||
def init_for_analysis(self, selected_analysts):
|
||||
"""Initialize agent status and report sections based on selected analysts.
|
||||
|
|
@ -114,7 +115,7 @@ class MessageBuffer:
|
|||
self.current_agent = None
|
||||
self.messages.clear()
|
||||
self.tool_calls.clear()
|
||||
self._last_message_id = None
|
||||
self._processed_message_ids.clear()
|
||||
|
||||
def get_completed_reports_count(self):
|
||||
"""Count reports that are finalized (their finalizing agent is completed).
|
||||
|
|
@ -1052,28 +1053,24 @@ def run_analysis():
|
|||
# Stream the analysis
|
||||
trace = []
|
||||
for chunk in graph.graph.stream(init_agent_state, **args):
|
||||
# Process messages if present (skip duplicates via message ID)
|
||||
if len(chunk["messages"]) > 0:
|
||||
last_message = chunk["messages"][-1]
|
||||
msg_id = getattr(last_message, "id", None)
|
||||
# Process all messages in chunk, deduplicating by message ID
|
||||
for message in chunk.get("messages", []):
|
||||
msg_id = getattr(message, "id", None)
|
||||
if msg_id is not None:
|
||||
if msg_id in message_buffer._processed_message_ids:
|
||||
continue
|
||||
message_buffer._processed_message_ids.add(msg_id)
|
||||
|
||||
if msg_id != message_buffer._last_message_id:
|
||||
message_buffer._last_message_id = msg_id
|
||||
msg_type, content = classify_message_type(message)
|
||||
if content and content.strip():
|
||||
message_buffer.add_message(msg_type, content)
|
||||
|
||||
# Add message to buffer
|
||||
msg_type, content = classify_message_type(last_message)
|
||||
if content and content.strip():
|
||||
message_buffer.add_message(msg_type, content)
|
||||
|
||||
# Handle tool calls
|
||||
if hasattr(last_message, "tool_calls") and last_message.tool_calls:
|
||||
for tool_call in last_message.tool_calls:
|
||||
if isinstance(tool_call, dict):
|
||||
message_buffer.add_tool_call(
|
||||
tool_call["name"], tool_call["args"]
|
||||
)
|
||||
else:
|
||||
message_buffer.add_tool_call(tool_call.name, tool_call.args)
|
||||
if hasattr(message, "tool_calls") and message.tool_calls:
|
||||
for tool_call in message.tool_calls:
|
||||
if isinstance(tool_call, dict):
|
||||
message_buffer.add_tool_call(tool_call["name"], tool_call["args"])
|
||||
else:
|
||||
message_buffer.add_tool_call(tool_call.name, tool_call.args)
|
||||
|
||||
# Update analyst statuses based on report state (runs on every chunk)
|
||||
update_analyst_statuses(message_buffer, chunk)
|
||||
|
|
|
|||
92
cli/utils.py
92
cli/utils.py
|
|
@ -174,17 +174,30 @@ def select_openrouter_model() -> str:
|
|||
return choice
|
||||
|
||||
|
||||
def select_shallow_thinking_agent(provider) -> str:
|
||||
"""Select shallow thinking llm engine using an interactive selection."""
|
||||
def _prompt_custom_model_id() -> str:
|
||||
"""Prompt user to type a custom model ID."""
|
||||
return questionary.text(
|
||||
"Enter model ID:",
|
||||
validate=lambda x: len(x.strip()) > 0 or "Please enter a model ID.",
|
||||
).ask().strip()
|
||||
|
||||
|
||||
def _select_model(provider: str, mode: str) -> str:
|
||||
"""Select a model for the given provider and mode (quick/deep)."""
|
||||
if provider.lower() == "openrouter":
|
||||
return select_openrouter_model()
|
||||
|
||||
if provider.lower() == "azure":
|
||||
return questionary.text(
|
||||
f"Enter Azure deployment name ({mode}-thinking):",
|
||||
validate=lambda x: len(x.strip()) > 0 or "Please enter a deployment name.",
|
||||
).ask().strip()
|
||||
|
||||
choice = questionary.select(
|
||||
"Select Your [Quick-Thinking LLM Engine]:",
|
||||
f"Select Your [{mode.title()}-Thinking LLM Engine]:",
|
||||
choices=[
|
||||
questionary.Choice(display, value=value)
|
||||
for display, value in get_model_options(provider, "quick")
|
||||
for display, value in get_model_options(provider, mode)
|
||||
],
|
||||
instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
|
||||
style=questionary.Style(
|
||||
|
|
@ -197,58 +210,45 @@ def select_shallow_thinking_agent(provider) -> str:
|
|||
).ask()
|
||||
|
||||
if choice is None:
|
||||
console.print(
|
||||
"\n[red]No shallow thinking llm engine selected. Exiting...[/red]"
|
||||
)
|
||||
console.print(f"\n[red]No {mode} thinking llm engine selected. Exiting...[/red]")
|
||||
exit(1)
|
||||
|
||||
if choice == "custom":
|
||||
return _prompt_custom_model_id()
|
||||
|
||||
return choice
|
||||
|
||||
|
||||
def select_shallow_thinking_agent(provider) -> str:
|
||||
"""Select shallow thinking llm engine using an interactive selection."""
|
||||
return _select_model(provider, "quick")
|
||||
|
||||
|
||||
def select_deep_thinking_agent(provider) -> str:
|
||||
"""Select deep thinking llm engine using an interactive selection."""
|
||||
|
||||
if provider.lower() == "openrouter":
|
||||
return select_openrouter_model()
|
||||
|
||||
choice = questionary.select(
|
||||
"Select Your [Deep-Thinking LLM Engine]:",
|
||||
choices=[
|
||||
questionary.Choice(display, value=value)
|
||||
for display, value in get_model_options(provider, "deep")
|
||||
],
|
||||
instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
|
||||
style=questionary.Style(
|
||||
[
|
||||
("selected", "fg:magenta noinherit"),
|
||||
("highlighted", "fg:magenta noinherit"),
|
||||
("pointer", "fg:magenta noinherit"),
|
||||
]
|
||||
),
|
||||
).ask()
|
||||
|
||||
if choice is None:
|
||||
console.print("\n[red]No deep thinking llm engine selected. Exiting...[/red]")
|
||||
exit(1)
|
||||
|
||||
return choice
|
||||
return _select_model(provider, "deep")
|
||||
|
||||
def select_llm_provider() -> tuple[str, str | None]:
|
||||
"""Select the LLM provider and its API endpoint."""
|
||||
BASE_URLS = [
|
||||
("OpenAI", "https://api.openai.com/v1"),
|
||||
("Google", None), # google-genai SDK manages its own endpoint
|
||||
("Anthropic", "https://api.anthropic.com/"),
|
||||
("xAI", "https://api.x.ai/v1"),
|
||||
("Openrouter", "https://openrouter.ai/api/v1"),
|
||||
("Ollama", "http://localhost:11434/v1"),
|
||||
# (display_name, provider_key, base_url)
|
||||
PROVIDERS = [
|
||||
("OpenAI", "openai", "https://api.openai.com/v1"),
|
||||
("Google", "google", None),
|
||||
("Anthropic", "anthropic", "https://api.anthropic.com/"),
|
||||
("xAI", "xai", "https://api.x.ai/v1"),
|
||||
("DeepSeek", "deepseek", "https://api.deepseek.com"),
|
||||
("Qwen", "qwen", "https://dashscope.aliyuncs.com/compatible-mode/v1"),
|
||||
("GLM", "glm", "https://open.bigmodel.cn/api/paas/v4/"),
|
||||
("OpenRouter", "openrouter", "https://openrouter.ai/api/v1"),
|
||||
("Azure OpenAI", "azure", None),
|
||||
("Ollama", "ollama", "http://localhost:11434/v1"),
|
||||
]
|
||||
|
||||
|
||||
choice = questionary.select(
|
||||
"Select your LLM Provider:",
|
||||
choices=[
|
||||
questionary.Choice(display, value=(display, value))
|
||||
for display, value in BASE_URLS
|
||||
questionary.Choice(display, value=(provider_key, url))
|
||||
for display, provider_key, url in PROVIDERS
|
||||
],
|
||||
instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
|
||||
style=questionary.Style(
|
||||
|
|
@ -261,13 +261,11 @@ def select_llm_provider() -> tuple[str, str | None]:
|
|||
).ask()
|
||||
|
||||
if choice is None:
|
||||
console.print("\n[red]no OpenAI backend selected. Exiting...[/red]")
|
||||
console.print("\n[red]No LLM provider selected. Exiting...[/red]")
|
||||
exit(1)
|
||||
|
||||
display_name, url = choice
|
||||
print(f"You selected: {display_name}\tURL: {url}")
|
||||
|
||||
return display_name, url
|
||||
provider, url = choice
|
||||
return provider, url
|
||||
|
||||
|
||||
def ask_openai_reasoning_effort() -> str:
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ services:
|
|||
env_file:
|
||||
- .env
|
||||
volumes:
|
||||
- ./results:/home/appuser/app/results
|
||||
- tradingagents_data:/home/appuser/.tradingagents
|
||||
tty: true
|
||||
stdin_open: true
|
||||
|
||||
|
|
@ -22,7 +22,7 @@ services:
|
|||
environment:
|
||||
- LLM_PROVIDER=ollama
|
||||
volumes:
|
||||
- ./results:/home/appuser/app/results
|
||||
- tradingagents_data:/home/appuser/.tradingagents
|
||||
depends_on:
|
||||
- ollama
|
||||
tty: true
|
||||
|
|
@ -31,4 +31,5 @@ services:
|
|||
- ollama
|
||||
|
||||
volumes:
|
||||
tradingagents_data:
|
||||
ollama_data:
|
||||
|
|
|
|||
|
|
@ -0,0 +1,37 @@
|
|||
"""Portfolio analysis example.
|
||||
|
||||
Analyzes multiple stocks in a portfolio and produces a comparative
|
||||
recommendation for each position (KEEP / REDUCE / EXIT).
|
||||
|
||||
Related GitHub issues: #60, #406
|
||||
"""
|
||||
|
||||
from tradingagents.graph.trading_graph import TradingAgentsGraph
|
||||
from tradingagents.default_config import DEFAULT_CONFIG
|
||||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
|
||||
config = DEFAULT_CONFIG.copy()
|
||||
# Customize LLM provider and models as needed:
|
||||
# config["llm_provider"] = "anthropic" # or "openai", "google"
|
||||
# config["deep_think_llm"] = "claude-sonnet-4-20250514"
|
||||
# config["quick_think_llm"] = "claude-haiku-4-5-20251001"
|
||||
config["max_debate_rounds"] = 1
|
||||
|
||||
ta = TradingAgentsGraph(debug=False, config=config)
|
||||
|
||||
# Define your portfolio
|
||||
portfolio = ["NVDA", "AAPL", "MSFT", "GOOGL", "AMZN"]
|
||||
|
||||
# Run the portfolio analysis
|
||||
results = ta.propagate_portfolio(portfolio, "2025-03-23")
|
||||
|
||||
# Print individual signals
|
||||
print("\n=== INDIVIDUAL SIGNALS ===")
|
||||
for ticker, result in results["individual_results"].items():
|
||||
print(f" {ticker}: {result['signal']}")
|
||||
|
||||
# Print the comparative portfolio summary
|
||||
print("\n=== PORTFOLIO SUMMARY ===")
|
||||
print(results["portfolio_summary"])
|
||||
|
|
@ -78,7 +78,7 @@ class FinancialSituationMemory:
|
|||
|
||||
# Build results
|
||||
results = []
|
||||
max_score = max(scores) if max(scores) > 0 else 1 # Normalize scores
|
||||
max_score = float(scores.max()) if len(scores) > 0 and scores.max() > 0 else 1.0
|
||||
|
||||
for idx in top_indices:
|
||||
# Normalize score to 0-1 range for consistency
|
||||
|
|
|
|||
|
|
@ -1,12 +1,11 @@
|
|||
import os
|
||||
|
||||
_TRADINGAGENTS_HOME = os.path.join(os.path.expanduser("~"), ".tradingagents")
|
||||
|
||||
DEFAULT_CONFIG = {
|
||||
"project_dir": os.path.abspath(os.path.join(os.path.dirname(__file__), ".")),
|
||||
"results_dir": os.getenv("TRADINGAGENTS_RESULTS_DIR", "./results"),
|
||||
"data_cache_dir": os.path.join(
|
||||
os.path.abspath(os.path.join(os.path.dirname(__file__), ".")),
|
||||
"dataflows/data_cache",
|
||||
),
|
||||
"results_dir": os.getenv("TRADINGAGENTS_RESULTS_DIR", os.path.join(_TRADINGAGENTS_HOME, "logs")),
|
||||
"data_cache_dir": os.getenv("TRADINGAGENTS_CACHE_DIR", os.path.join(_TRADINGAGENTS_HOME, "cache")),
|
||||
# LLM settings
|
||||
"llm_provider": "openai",
|
||||
"deep_think_llm": "gpt-5.4",
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ from .setup import GraphSetup
|
|||
from .propagation import Propagator
|
||||
from .reflection import Reflector
|
||||
from .signal_processing import SignalProcessor
|
||||
from .portfolio_analysis import PortfolioAnalyzer
|
||||
|
||||
__all__ = [
|
||||
"TradingAgentsGraph",
|
||||
|
|
@ -14,4 +15,5 @@ __all__ = [
|
|||
"Propagator",
|
||||
"Reflector",
|
||||
"SignalProcessor",
|
||||
"PortfolioAnalyzer",
|
||||
]
|
||||
|
|
|
|||
|
|
@ -0,0 +1,192 @@
|
|||
# TradingAgents/graph/portfolio_analysis.py
|
||||
|
||||
import json
|
||||
import re
|
||||
import traceback
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, Dict, List, Tuple
|
||||
|
||||
from langchain_core.language_models.chat_models import BaseChatModel
|
||||
|
||||
|
||||
class PortfolioAnalyzer:
|
||||
"""Analyzes multiple stocks and produces a comparative portfolio recommendation.
|
||||
|
||||
Follows the same delegation pattern as SignalProcessor and Reflector —
|
||||
the orchestrator (TradingAgentsGraph) owns the graph and LLMs, this class
|
||||
owns the portfolio-level prompt, comparison logic, and logging.
|
||||
"""
|
||||
|
||||
def __init__(self, deep_thinking_llm: BaseChatModel, config: Dict[str, Any]):
|
||||
"""Initialize with the deep thinking LLM for comparative analysis.
|
||||
|
||||
Args:
|
||||
deep_thinking_llm: The LLM instance used for the portfolio summary.
|
||||
config: The configuration dictionary for the application.
|
||||
"""
|
||||
self.deep_thinking_llm = deep_thinking_llm
|
||||
self.config = config
|
||||
|
||||
def analyze(
|
||||
self,
|
||||
tickers: List[str],
|
||||
trade_date: str,
|
||||
propagate_fn: Callable[[str, str], Tuple[Dict[str, Any], str]],
|
||||
debug: bool = False,
|
||||
) -> Dict[str, Any]:
|
||||
"""Run analysis on multiple stocks and produce a comparative summary.
|
||||
|
||||
Args:
|
||||
tickers: List of ticker symbols to analyze.
|
||||
trade_date: The trade date string (e.g., "2026-03-23").
|
||||
propagate_fn: The single-stock propagation function (typically
|
||||
TradingAgentsGraph.propagate).
|
||||
debug: Whether to print progress output.
|
||||
|
||||
Returns:
|
||||
Dictionary with:
|
||||
- "individual_results": dict mapping ticker to its decision and signal
|
||||
- "portfolio_summary": the comparative LLM analysis
|
||||
|
||||
Raises:
|
||||
ValueError: If tickers is empty.
|
||||
"""
|
||||
if not tickers:
|
||||
raise ValueError("tickers must be a non-empty list")
|
||||
|
||||
individual_results = self._analyze_individual(
|
||||
tickers, trade_date, propagate_fn, debug
|
||||
)
|
||||
|
||||
portfolio_summary = self._generate_summary(
|
||||
individual_results, trade_date, debug
|
||||
)
|
||||
|
||||
try:
|
||||
self._log_portfolio(trade_date, tickers, individual_results, portfolio_summary)
|
||||
except Exception as e:
|
||||
if debug:
|
||||
print(f"Warning: failed to save portfolio log: {e}")
|
||||
|
||||
return {
|
||||
"individual_results": individual_results,
|
||||
"portfolio_summary": portfolio_summary,
|
||||
}
|
||||
|
||||
def _analyze_individual(
|
||||
self,
|
||||
tickers: List[str],
|
||||
trade_date: str,
|
||||
propagate_fn: Callable[[str, str], Tuple[Dict[str, Any], str]],
|
||||
debug: bool,
|
||||
) -> Dict[str, Dict[str, str]]:
|
||||
"""Run the agent pipeline on each ticker, collecting results."""
|
||||
individual_results = {}
|
||||
|
||||
for ticker in tickers:
|
||||
if debug:
|
||||
print(f"\n{'='*60}")
|
||||
print(f"Analyzing {ticker}...")
|
||||
print(f"{'='*60}\n")
|
||||
|
||||
try:
|
||||
final_state, signal = propagate_fn(ticker, trade_date)
|
||||
individual_results[ticker] = {
|
||||
"signal": signal,
|
||||
"final_trade_decision": final_state["final_trade_decision"],
|
||||
}
|
||||
except Exception as e:
|
||||
if debug:
|
||||
print(f"Error analyzing {ticker}: {e}")
|
||||
error_msg = f"Analysis failed: {e}"
|
||||
if debug:
|
||||
error_msg += f"\n{traceback.format_exc()}"
|
||||
individual_results[ticker] = {
|
||||
"signal": "ERROR",
|
||||
"final_trade_decision": error_msg,
|
||||
}
|
||||
|
||||
return individual_results
|
||||
|
||||
def _generate_summary(
|
||||
self,
|
||||
individual_results: Dict[str, Dict[str, str]],
|
||||
trade_date: str,
|
||||
debug: bool = False,
|
||||
) -> str:
|
||||
"""Use the deep thinking LLM to compare all positions."""
|
||||
# Skip summary if all tickers failed
|
||||
successful = {
|
||||
t: r for t, r in individual_results.items() if r["signal"] != "ERROR"
|
||||
}
|
||||
if not successful:
|
||||
return "Portfolio summary unavailable — all individual analyses failed."
|
||||
|
||||
analyses_text = self._build_analyses_text(successful)
|
||||
messages = [
|
||||
("system", self._get_system_prompt()),
|
||||
(
|
||||
"human",
|
||||
f"Here are the individual analyses for my portfolio positions "
|
||||
f"as of {trade_date}:\n{analyses_text}\n\n"
|
||||
f"Please provide a comparative portfolio recommendation.",
|
||||
),
|
||||
]
|
||||
|
||||
try:
|
||||
return self.deep_thinking_llm.invoke(messages).content
|
||||
except Exception as e:
|
||||
error_msg = f"Portfolio summary generation failed: {e}"
|
||||
if debug:
|
||||
error_msg += f"\n{traceback.format_exc()}"
|
||||
signals = ", ".join(f"{t}: {r['signal']}" for t, r in individual_results.items())
|
||||
return f"{error_msg}\nIndividual signals were: {signals}"
|
||||
|
||||
def _build_analyses_text(self, results: Dict[str, Dict[str, str]]) -> str:
|
||||
"""Format individual results into a text block for the LLM prompt."""
|
||||
parts = []
|
||||
for ticker, result in results.items():
|
||||
parts.append(
|
||||
f"--- {ticker} ---\n"
|
||||
f"Rating: {result['signal']}\n"
|
||||
f"Full Analysis:\n{result['final_trade_decision']}"
|
||||
)
|
||||
return "\n".join(parts)
|
||||
|
||||
def _get_system_prompt(self) -> str:
|
||||
"""Return the system prompt for the portfolio comparison LLM call."""
|
||||
return (
|
||||
"You are a senior portfolio strategist. You have received individual "
|
||||
"stock analyses for all positions in a portfolio. Your job is to compare "
|
||||
"them relative to each other and provide a clear, actionable portfolio "
|
||||
"recommendation.\n\n"
|
||||
"For each stock, assign one of: KEEP, REDUCE, or EXIT.\n\n"
|
||||
"Structure your response as:\n"
|
||||
"1. A ranked summary table (best to worst) with ticker, action, and "
|
||||
"one-line rationale.\n"
|
||||
"2. A brief portfolio-level commentary covering overall risk exposure, "
|
||||
"sector concentration, and any suggested rebalancing.\n\n"
|
||||
"Be direct and concise. This is for an experienced investor."
|
||||
)
|
||||
|
||||
def _log_portfolio(
|
||||
self,
|
||||
trade_date: str,
|
||||
tickers: List[str],
|
||||
individual_results: Dict[str, Dict[str, str]],
|
||||
portfolio_summary: str,
|
||||
) -> None:
|
||||
"""Log the portfolio analysis results to a JSON file."""
|
||||
directory = Path(self.config.get("portfolio_log_dir", "eval_results/portfolio/"))
|
||||
directory.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
log_data = {
|
||||
"trade_date": trade_date,
|
||||
"tickers": tickers,
|
||||
"individual_results": individual_results,
|
||||
"portfolio_summary": portfolio_summary,
|
||||
}
|
||||
|
||||
log_file = directory / f"portfolio_analysis_{re.sub(r'[^\w.-]', '_', trade_date)}.json"
|
||||
with log_file.open("w", encoding="utf-8") as f:
|
||||
json.dump(log_data, f, indent=4)
|
||||
|
|
@ -38,6 +38,7 @@ from .setup import GraphSetup
|
|||
from .propagation import Propagator
|
||||
from .reflection import Reflector
|
||||
from .signal_processing import SignalProcessor
|
||||
from .portfolio_analysis import PortfolioAnalyzer
|
||||
|
||||
|
||||
class TradingAgentsGraph:
|
||||
|
|
@ -66,10 +67,8 @@ class TradingAgentsGraph:
|
|||
set_config(self.config)
|
||||
|
||||
# Create necessary directories
|
||||
os.makedirs(
|
||||
os.path.join(self.config["project_dir"], "dataflows/data_cache"),
|
||||
exist_ok=True,
|
||||
)
|
||||
os.makedirs(self.config["data_cache_dir"], exist_ok=True)
|
||||
os.makedirs(self.config["results_dir"], exist_ok=True)
|
||||
|
||||
# Initialize LLMs with provider-specific thinking configuration
|
||||
llm_kwargs = self._get_provider_kwargs()
|
||||
|
|
@ -124,6 +123,7 @@ class TradingAgentsGraph:
|
|||
self.propagator = Propagator()
|
||||
self.reflector = Reflector(self.quick_thinking_llm)
|
||||
self.signal_processor = SignalProcessor(self.quick_thinking_llm)
|
||||
self.portfolio_analyzer = PortfolioAnalyzer(self.deep_thinking_llm, self.config)
|
||||
|
||||
# State tracking
|
||||
self.curr_state = None
|
||||
|
|
@ -266,6 +266,26 @@ class TradingAgentsGraph:
|
|||
with open(log_path, "w", encoding="utf-8") as f:
|
||||
json.dump(self.log_states_dict[str(trade_date)], f, indent=4)
|
||||
|
||||
def propagate_portfolio(
|
||||
self, tickers: List[str], trade_date: str
|
||||
) -> Dict[str, Any]:
|
||||
"""Run analysis on multiple stocks and produce a comparative portfolio summary.
|
||||
|
||||
Delegates to PortfolioAnalyzer.analyze — see that class for full details.
|
||||
|
||||
This method preserves the instance's ticker and curr_state attributes,
|
||||
restoring them after the portfolio analysis is complete.
|
||||
"""
|
||||
original_ticker = self.ticker
|
||||
original_curr_state = self.curr_state
|
||||
try:
|
||||
return self.portfolio_analyzer.analyze(
|
||||
tickers, trade_date, self.propagate, debug=self.debug
|
||||
)
|
||||
finally:
|
||||
self.ticker = original_ticker
|
||||
self.curr_state = original_curr_state
|
||||
|
||||
def reflect_and_remember(self, returns_losses):
|
||||
"""Reflect on decisions and update memory based on returns."""
|
||||
self.reflector.reflect_bull_researcher(
|
||||
|
|
|
|||
|
|
@ -0,0 +1,52 @@
|
|||
import os
|
||||
from typing import Any, Optional
|
||||
|
||||
from langchain_openai import AzureChatOpenAI
|
||||
|
||||
from .base_client import BaseLLMClient, normalize_content
|
||||
from .validators import validate_model
|
||||
|
||||
_PASSTHROUGH_KWARGS = (
|
||||
"timeout", "max_retries", "api_key", "reasoning_effort",
|
||||
"callbacks", "http_client", "http_async_client",
|
||||
)
|
||||
|
||||
|
||||
class NormalizedAzureChatOpenAI(AzureChatOpenAI):
|
||||
"""AzureChatOpenAI with normalized content output."""
|
||||
|
||||
def invoke(self, input, config=None, **kwargs):
|
||||
return normalize_content(super().invoke(input, config, **kwargs))
|
||||
|
||||
|
||||
class AzureOpenAIClient(BaseLLMClient):
|
||||
"""Client for Azure OpenAI deployments.
|
||||
|
||||
Requires environment variables:
|
||||
AZURE_OPENAI_API_KEY: API key
|
||||
AZURE_OPENAI_ENDPOINT: Endpoint URL (e.g. https://<resource>.openai.azure.com/)
|
||||
AZURE_OPENAI_DEPLOYMENT_NAME: Deployment name
|
||||
OPENAI_API_VERSION: API version (e.g. 2025-03-01-preview)
|
||||
"""
|
||||
|
||||
def __init__(self, model: str, base_url: Optional[str] = None, **kwargs):
|
||||
super().__init__(model, base_url, **kwargs)
|
||||
|
||||
def get_llm(self) -> Any:
|
||||
"""Return configured AzureChatOpenAI instance."""
|
||||
self.warn_if_unknown_model()
|
||||
|
||||
llm_kwargs = {
|
||||
"model": self.model,
|
||||
"azure_deployment": os.environ.get("AZURE_OPENAI_DEPLOYMENT_NAME", self.model),
|
||||
}
|
||||
|
||||
for key in _PASSTHROUGH_KWARGS:
|
||||
if key in self.kwargs:
|
||||
llm_kwargs[key] = self.kwargs[key]
|
||||
|
||||
return NormalizedAzureChatOpenAI(**llm_kwargs)
|
||||
|
||||
def validate_model(self) -> bool:
|
||||
"""Azure accepts any deployed model name."""
|
||||
return True
|
||||
|
|
@ -4,6 +4,12 @@ from .base_client import BaseLLMClient
|
|||
from .openai_client import OpenAIClient
|
||||
from .anthropic_client import AnthropicClient
|
||||
from .google_client import GoogleClient
|
||||
from .azure_client import AzureOpenAIClient
|
||||
|
||||
# Providers that use the OpenAI-compatible chat completions API
|
||||
_OPENAI_COMPATIBLE = (
|
||||
"openai", "xai", "deepseek", "qwen", "glm", "ollama", "openrouter",
|
||||
)
|
||||
|
||||
|
||||
def create_llm_client(
|
||||
|
|
@ -15,16 +21,10 @@ def create_llm_client(
|
|||
"""Create an LLM client for the specified provider.
|
||||
|
||||
Args:
|
||||
provider: LLM provider (openai, anthropic, google, xai, ollama, openrouter)
|
||||
provider: LLM provider name
|
||||
model: Model name/identifier
|
||||
base_url: Optional base URL for API endpoint
|
||||
**kwargs: Additional provider-specific arguments
|
||||
- http_client: Custom httpx.Client for SSL proxy or certificate customization
|
||||
- http_async_client: Custom httpx.AsyncClient for async operations
|
||||
- timeout: Request timeout in seconds
|
||||
- max_retries: Maximum retry attempts
|
||||
- api_key: API key for the provider
|
||||
- callbacks: LangChain callbacks
|
||||
|
||||
Returns:
|
||||
Configured BaseLLMClient instance
|
||||
|
|
@ -34,16 +34,16 @@ def create_llm_client(
|
|||
"""
|
||||
provider_lower = provider.lower()
|
||||
|
||||
if provider_lower in ("openai", "ollama", "openrouter"):
|
||||
if provider_lower in _OPENAI_COMPATIBLE:
|
||||
return OpenAIClient(model, base_url, provider=provider_lower, **kwargs)
|
||||
|
||||
if provider_lower == "xai":
|
||||
return OpenAIClient(model, base_url, provider="xai", **kwargs)
|
||||
|
||||
if provider_lower == "anthropic":
|
||||
return AnthropicClient(model, base_url, **kwargs)
|
||||
|
||||
if provider_lower == "google":
|
||||
return GoogleClient(model, base_url, **kwargs)
|
||||
|
||||
if provider_lower == "azure":
|
||||
return AzureOpenAIClient(model, base_url, **kwargs)
|
||||
|
||||
raise ValueError(f"Unsupported LLM provider: {provider}")
|
||||
|
|
|
|||
|
|
@ -63,8 +63,43 @@ MODEL_OPTIONS: ProviderModeOptions = {
|
|||
("Grok 4.1 Fast (Non-Reasoning) - Speed optimized, 2M ctx", "grok-4-1-fast-non-reasoning"),
|
||||
],
|
||||
},
|
||||
# OpenRouter models are fetched dynamically at CLI runtime.
|
||||
# No static entries needed; any model ID is accepted by the validator.
|
||||
"deepseek": {
|
||||
"quick": [
|
||||
("DeepSeek V3.2", "deepseek-chat"),
|
||||
("Custom model ID", "custom"),
|
||||
],
|
||||
"deep": [
|
||||
("DeepSeek V3.2 (thinking)", "deepseek-reasoner"),
|
||||
("DeepSeek V3.2", "deepseek-chat"),
|
||||
("Custom model ID", "custom"),
|
||||
],
|
||||
},
|
||||
"qwen": {
|
||||
"quick": [
|
||||
("Qwen 3.5 Flash", "qwen3.5-flash"),
|
||||
("Qwen Plus", "qwen-plus"),
|
||||
("Custom model ID", "custom"),
|
||||
],
|
||||
"deep": [
|
||||
("Qwen 3.6 Plus", "qwen3.6-plus"),
|
||||
("Qwen 3.5 Plus", "qwen3.5-plus"),
|
||||
("Qwen 3 Max", "qwen3-max"),
|
||||
("Custom model ID", "custom"),
|
||||
],
|
||||
},
|
||||
"glm": {
|
||||
"quick": [
|
||||
("GLM-4.7", "glm-4.7"),
|
||||
("GLM-5", "glm-5"),
|
||||
("Custom model ID", "custom"),
|
||||
],
|
||||
"deep": [
|
||||
("GLM-5.1", "glm-5.1"),
|
||||
("GLM-5", "glm-5"),
|
||||
("Custom model ID", "custom"),
|
||||
],
|
||||
},
|
||||
# OpenRouter: fetched dynamically. Azure: any deployed model name.
|
||||
"ollama": {
|
||||
"quick": [
|
||||
("Qwen3:latest (8B, local)", "qwen3:latest"),
|
||||
|
|
|
|||
|
|
@ -27,6 +27,9 @@ _PASSTHROUGH_KWARGS = (
|
|||
# Provider base URLs and API key env vars
|
||||
_PROVIDER_CONFIG = {
|
||||
"xai": ("https://api.x.ai/v1", "XAI_API_KEY"),
|
||||
"deepseek": ("https://api.deepseek.com", "DEEPSEEK_API_KEY"),
|
||||
"qwen": ("https://dashscope-intl.aliyuncs.com/compatible-mode/v1", "DASHSCOPE_API_KEY"),
|
||||
"glm": ("https://api.z.ai/api/paas/v4/", "ZHIPU_API_KEY"),
|
||||
"openrouter": ("https://openrouter.ai/api/v1", "OPENROUTER_API_KEY"),
|
||||
"ollama": ("http://localhost:11434/v1", None),
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue