This commit is contained in:
swj.premkumar 2026-01-09 20:59:52 -06:00
parent a0ab1a9b3e
commit 9347a419e4
9 changed files with 166 additions and 19 deletions

22
CHANGELOG.md Normal file
View File

@ -0,0 +1,22 @@
# Changelog
All notable changes to the **TradingAgents** project will be documented in this file.
## [Unreleased] - 2026-01-09
### Added
- **API Key Verification**: Added `verify_google_key.py` script to isolate and verify Google API Key functionality for embeddings.
- **Environment Management**: Added `load_dotenv` to `cli/main.py` and `verify_google_key.py` to ensure `.env` variables are correctly loaded.
- **Start Script Enhancements**: Updated `start.sh` to check for `GOOGLE_API_KEY` existence and warn the user.
- **Debug Logging**: Added temporary debug logging (commented out) in `memory.py` for API key verification.
### Fixed
- **Embedding Model Error**: Fixed `BadRequestError` / `404 Not Found` when using Google (Gemini) provider by explicitly setting `text-embedding-004` and using the Google-compatible OpenAI endpoint (`generativelanguage.googleapis.com`).
- **Data Fetching Failure**: Resolved `RuntimeError: All vendor implementations failed for method 'get_fundamentals'` by implementing a fallback to `yfinance` in `tradingagents/dataflows/y_finance.py` and registering it in `interface.py`.
- **Report Saving Crash**: Fixed `TypeError: write() argument must be str, not list` in `cli/main.py` by converting structured list content to string before writing to files.
- **API Rate Limiting**: Added `max_retries` handling (exponential backoff) to both `ChatGoogleGenerativeAI` (10 retries) and `OpenAI` embedding client (5 retries) to robustly handle `429 RESOURCE_EXHAUSTED` errors.
- **Payload Size Error**: Implemented input truncation (max 9000 chars) in `memory.py`'s `get_embedding` method to prevent massive payloads from crashing the API.
### Changed
- **LLM Configuration**: Updated `tradingagents/default_config.py` and `cli/utils.py` to use valid Gemini model names (e.g., `gemini-1.5-flash`, `gemini-1.5-pro`) and `gemini-pro`.
- **Vendor Configuration**: Updated default `fundamental_data` vendor to "alpha_vantage, yfinance" to ensure fallback availability.

View File

@ -789,11 +789,12 @@ def run_analysis():
def wrapper(section_name, content):
func(section_name, content)
if section_name in obj.report_sections and obj.report_sections[section_name] is not None:
content = obj.report_sections[section_name]
if content:
raw_content = obj.report_sections[section_name]
if raw_content:
content_str = extract_content_string(raw_content)
file_name = f"{section_name}.md"
with open(report_dir / file_name, "w") as f:
f.write(content)
f.write(content_str)
return wrapper
message_buffer.add_message = save_message_decorator(message_buffer, "add_message")

View File

@ -140,10 +140,11 @@ def select_shallow_thinking_agent(provider) -> str:
("Claude Sonnet 4 - High performance and excellent reasoning", "claude-sonnet-4-0"),
],
"google": [
("Gemini 2.0 Flash-Lite - Cost efficiency and low latency", "gemini-2.0-flash-lite"),
("Gemini 2.0 Flash - Next generation features, speed, and thinking", "gemini-2.0-flash"),
("Gemini 2.5 Flash - Adaptive thinking, cost efficiency", "gemini-2.5-flash-preview-05-20"),
],
("Gemini 2.5 Flash-Lite - Cost efficiency and low latency", "gemini-2.5-flash-lite"),
("Gemini 2.5 Flash - Next generation features, speed, and thinking", "gemini-2.5-flash"),
("Gemini 3.0 Flash - Next generation features, speed, and thinking", "gemini-3-flash-preview"),
("Gemini 3.0 Pro - Adaptive thinking, cost efficiency", "gemini-3-pro-preview"),
("Gemini 2.5 Pro", "gemini-2.5-pro")],
"openrouter": [
("Meta: Llama 4 Scout", "meta-llama/llama-4-scout:free"),
("Meta: Llama 3.3 8B Instruct - A lightweight and ultra-fast variant of Llama 3.3 70B", "meta-llama/llama-3.3-8b-instruct:free"),
@ -202,10 +203,10 @@ def select_deep_thinking_agent(provider) -> str:
("Claude Opus 4 - Most powerful Anthropic model", " claude-opus-4-0"),
],
"google": [
("Gemini 2.0 Flash-Lite - Cost efficiency and low latency", "gemini-2.0-flash-lite"),
("Gemini 2.0 Flash - Next generation features, speed, and thinking", "gemini-2.0-flash"),
("Gemini 2.5 Flash - Adaptive thinking, cost efficiency", "gemini-2.5-flash-preview-05-20"),
("Gemini 2.5 Pro", "gemini-2.5-pro-preview-06-05"),
("Gemini 2.5 Flash - Next generation features, speed, and thinking", "gemini-2.5-flash"),
("Gemini 3.0 Flash - Next generation features, speed, and thinking", "gemini-3-flash-preview"),
("Gemini 3.0 Pro - Adaptive thinking, cost efficiency", "gemini-3-pro-preview"),
("Gemini 2.5 Pro", "gemini-2.5-pro"),
],
"openrouter": [
("DeepSeek V3 - a 685B-parameter, mixture-of-experts model", "deepseek/deepseek-chat-v3-0324:free"),

37
start.sh Executable file
View File

@ -0,0 +1,37 @@
#!/bin/bash
# 1. Activate Virtual Environment
if [ -d ".venv" ]; then
source .venv/bin/activate
echo "✅ Virtual Environment (.venv) Activated"
else
echo "❌ Virtual Environment not found! Run 'uv venv --python 3.13' first."
exit 1
fi
# 2. Export API Keys (PLACEHOLDERS - PLEASE UPDATE)
# You can also load these from a .env file if preferred
if [ -f ".env" ]; then
export $(grep -v '^#' .env | xargs)
echo "✅ Loaded keys from .env"
else
echo "⚠️ No .env file found. Using default/exported keys."
# START: REPLACE WITH YOUR ACTUAL KEYS IF NOT USING .ENV
# export OPENAI_API_KEY="sk-your-key-here"
# export ALPHA_VANTAGE_API_KEY="your-key-here"
# export GOOGLE_API_KEY="your-key-here"
# END
fi
# Check if keys are set
if [ -z "$OPENAI_API_KEY" ]; then
echo "⚠️ OPENAI_API_KEY is missing! Set it if using OpenAI."
fi
if [ -z "$GOOGLE_API_KEY" ]; then
echo "⚠️ GOOGLE_API_KEY is missing! Set it if using Gemini."
fi
# 3. Start the Shadow Run (Daily Execution)
echo "🚀 Starting Shadow Run Daily Execution..."
python3 -m cli.main

View File

@ -1,3 +1,4 @@
import os
import chromadb
from chromadb.config import Settings
from openai import OpenAI
@ -5,17 +6,44 @@ from openai import OpenAI
class FinancialSituationMemory:
def __init__(self, name, config):
if config["backend_url"] == "http://localhost:11434/v1":
if config.get("llm_provider") == "google":
self.embedding = "text-embedding-004"
google_api_key = os.getenv("GOOGLE_API_KEY")
if not google_api_key:
raise ValueError("❌ GOOGLE_API_KEY not found in environment. Please add it to your .env file or export it.")
# Use Google's OpenAI-compatible endpoint with retries
self.client = OpenAI(
api_key=google_api_key,
base_url="https://generativelanguage.googleapis.com/v1beta/openai/",
max_retries=5
)
elif config["backend_url"] == "http://localhost:11434/v1" or config.get("llm_provider") == "ollama":
self.embedding = "nomic-embed-text"
self.client = OpenAI(base_url=config["backend_url"])
else:
self.embedding = "text-embedding-3-small"
self.client = OpenAI(base_url=config["backend_url"])
self.client = OpenAI(base_url=config["backend_url"])
self.chroma_client = chromadb.Client(Settings(allow_reset=True))
self.situation_collection = self.chroma_client.create_collection(name=name)
def get_embedding(self, text):
"""Get OpenAI embedding for a text"""
# DEBUG: Check API Key
if hasattr(self, 'client') and self.client.api_key:
masked_key = self.client.api_key[:4] + "..."
# print(f"DEBUG: Using API Key: {masked_key}")
# Truncate text if too long (Google's limit is ~2048 tokens / 8k chars, allow buffer)
# OpenAI text-embedding-3 is 8191 tokens (~32k chars)
# Use safe limit of 9000 chars
if len(text) > 9000:
# print(f"WARNING: Truncating text for embedding. Length {len(text)} > 9000")
text = text[:9000]
response = self.client.embeddings.create(
model=self.embedding, input=text
)

View File

@ -2,7 +2,7 @@ from typing import Annotated
# Import from vendor-specific modules
from .local import get_YFin_data, get_finnhub_news, get_finnhub_company_insider_sentiment, get_finnhub_company_insider_transactions, get_simfin_balance_sheet, get_simfin_cashflow, get_simfin_income_statements, get_reddit_global_news, get_reddit_company_news
from .y_finance import get_YFin_data_online, get_stock_stats_indicators_window, get_balance_sheet as get_yfinance_balance_sheet, get_cashflow as get_yfinance_cashflow, get_income_statement as get_yfinance_income_statement, get_insider_transactions as get_yfinance_insider_transactions
from .y_finance import get_YFin_data_online, get_stock_stats_indicators_window, get_balance_sheet as get_yfinance_balance_sheet, get_cashflow as get_yfinance_cashflow, get_income_statement as get_yfinance_income_statement, get_insider_transactions as get_yfinance_insider_transactions, get_fundamentals as get_fundamentals_yfinance
from .google import get_google_news
from .openai import get_stock_news_openai, get_global_news_openai, get_fundamentals_openai
from .alpha_vantage import (
@ -78,6 +78,7 @@ VENDOR_METHODS = {
# fundamental_data
"get_fundamentals": {
"alpha_vantage": get_alpha_vantage_fundamentals,
"yfinance": get_fundamentals_yfinance,
"openai": get_fundamentals_openai,
},
"get_balance_sheet": {

View File

@ -404,4 +404,53 @@ def get_insider_transactions(
return header + csv_string
except Exception as e:
return f"Error retrieving insider transactions for {ticker}: {str(e)}"
return f"Error retrieving insider transactions for {ticker}: {str(e)}"
def get_fundamentals(
ticker: Annotated[str, "ticker symbol of the company"],
curr_date: Annotated[str, "current date (not used for yfinance info)"] = None
):
"""Get fundamental data from yfinance ticker.info."""
try:
ticker_obj = yf.Ticker(ticker.upper())
info = ticker_obj.info
if not info:
return f"No fundamental data found for symbol '{ticker}'"
keys_of_interest = [
"shortName", "longName", "sector", "industry", "fullTimeEmployees",
"marketCap", "enterpriseValue", "totalRevenue", "profitMargins",
"floatShares", "sharesOutstanding", "impliedSharesOutstanding",
"bookValue", "priceToBook", "trailingEps", "forwardEps",
"pegRatio", "priceToSalesTrailing12Months", "forwardPE", "trailingPE",
"dividendRate", "dividendYield", "payoutRatio",
"beta", "52WeekChange", "SnP52WeekChange", "lastDividendValue", "lastDividendDate",
"currentPrice", "targetHighPrice", "targetLowPrice", "targetMeanPrice",
"recommendationMean", "recommendationKey", "numberOfAnalystOpinions",
"totalCash", "totalCashPerShare", "ebitda", "totalDebt",
"quickRatio", "currentRatio", "revenueGrowth", "debtToEquity",
"returnOnAssets", "returnOnEquity", "grossProfits", "freeCashflow",
"operatingCashflow", "earningsGrowth", "revenueGrowth", "grossMargins",
"ebitdaMargins", "operatingMargins", "auditRisk", "boardRisk",
"compensationRisk", "shareHolderRightsRisk", "overallRisk"
]
report_lines = []
report_lines.append(f"# Fundamental Data for {ticker.upper()}")
report_lines.append(f"# Data retrieved on: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} (Real-time/Latest)")
report_lines.append(f"# Note: YFinance 'info' provides current data, not historical.")
report_lines.append("")
for key in keys_of_interest:
if key in info and info[key] is not None:
report_lines.append(f"{key}: {info[key]}")
if "longBusinessSummary" in info:
report_lines.append("")
report_lines.append("## Business Summary")
report_lines.append(info["longBusinessSummary"])
return "\n".join(report_lines)
except Exception as e:
return f"Error retrieving fundamentals for {ticker}: {str(e)}"

View File

@ -10,8 +10,8 @@ DEFAULT_CONFIG = {
),
# LLM settings
"llm_provider": "openai",
"deep_think_llm": "o4-mini",
"quick_think_llm": "gpt-4o-mini",
"deep_think_llm": "gemini-pro",
"quick_think_llm": "gemini-pro",
"backend_url": "https://api.openai.com/v1",
# Debate and discussion settings
"max_debate_rounds": 1,

View File

@ -79,8 +79,16 @@ class TradingAgentsGraph:
self.deep_thinking_llm = ChatAnthropic(model=self.config["deep_think_llm"], base_url=self.config["backend_url"])
self.quick_thinking_llm = ChatAnthropic(model=self.config["quick_think_llm"], base_url=self.config["backend_url"])
elif self.config["llm_provider"].lower() == "google":
self.deep_thinking_llm = ChatGoogleGenerativeAI(model=self.config["deep_think_llm"])
self.quick_thinking_llm = ChatGoogleGenerativeAI(model=self.config["quick_think_llm"])
self.deep_thinking_llm = ChatGoogleGenerativeAI(
model=self.config["deep_think_llm"],
max_retries=10,
request_timeout=60
)
self.quick_thinking_llm = ChatGoogleGenerativeAI(
model=self.config["quick_think_llm"],
max_retries=10,
request_timeout=60
)
else:
raise ValueError(f"Unsupported LLM provider: {self.config['llm_provider']}")