From 9647359246eb1d42ef86da345024e88823dbc4d6 Mon Sep 17 00:00:00 2001 From: ZeroAct Date: Thu, 12 Jun 2025 11:25:07 +0900 Subject: [PATCH 01/26] save reports & logs under results_dir --- cli/main.py | 22 ++++++++++++++++++++++ tradingagents/default_config.py | 1 + 2 files changed, 23 insertions(+) diff --git a/cli/main.py b/cli/main.py index e7bed4ee..462ad49f 100644 --- a/cli/main.py +++ b/cli/main.py @@ -1,6 +1,7 @@ from typing import Optional import datetime import typer +from pathlib import Path from rich.console import Console from rich.panel import Panel from rich.spinner import Spinner @@ -700,6 +701,10 @@ def run_analysis(): [analyst.value for analyst in selections["analysts"]], config=config, debug=True ) + # Create result directory + results_dir = Path(config["results_dir"]) / selections["ticker"] / datetime.datetime.now().strftime("%Y-%m-%d") + results_dir.mkdir(parents=True, exist_ok=True) + # Now start the display layout layout = create_layout() @@ -994,6 +999,23 @@ def run_analysis(): if section in final_state: message_buffer.update_report_section(section, final_state[section]) + # Save results to file + report_dir = results_dir / "reports" + report_dir.mkdir(parents=True, exist_ok=True) + + for section, content in message_buffer.report_sections.items(): + if content: + with open(report_dir / f"{section}.md", "w") as f: + f.write(content) + + for (timestamp, msg_type, content) in message_buffer.messages: + with open(results_dir / "messages.log", "a") as f: + f.write(f"{timestamp} [{msg_type}]: {content}\n") + + for (timestamp, tool_name, args) in message_buffer.tool_calls: + with open(results_dir / "tool_calls.log", "a") as f: + f.write(f"{timestamp} [Tool: {tool_name}]: {args}\n") + # Display the complete final report display_complete_report(final_state) diff --git a/tradingagents/default_config.py b/tradingagents/default_config.py index 5bb2548c..b5abf38e 100644 --- a/tradingagents/default_config.py +++ b/tradingagents/default_config.py @@ -2,6 +2,7 @@ import os DEFAULT_CONFIG = { "project_dir": os.path.abspath(os.path.join(os.path.dirname(__file__), ".")), + "results_dir": os.getenv("TRADINGAGENTS_RESULTS_DIR", "./results"), "data_dir": "/Users/yluo/Documents/Code/ScAI/FR1-data", "data_cache_dir": os.path.join( os.path.abspath(os.path.join(os.path.dirname(__file__), ".")), From 417b09712ce1ea316e22f054f257888a985fa5f1 Mon Sep 17 00:00:00 2001 From: ZeroAct Date: Thu, 12 Jun 2025 13:53:28 +0900 Subject: [PATCH 02/26] refactor --- cli/main.py | 63 ++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 45 insertions(+), 18 deletions(-) diff --git a/cli/main.py b/cli/main.py index 462ad49f..92cfc92b 100644 --- a/cli/main.py +++ b/cli/main.py @@ -2,6 +2,7 @@ from typing import Optional import datetime import typer from pathlib import Path +from functools import wraps from rich.console import Console from rich.panel import Panel from rich.spinner import Spinner @@ -702,8 +703,51 @@ def run_analysis(): ) # Create result directory - results_dir = Path(config["results_dir"]) / selections["ticker"] / datetime.datetime.now().strftime("%Y-%m-%d") + results_dir = Path(config["results_dir"]) / selections["ticker"] / selections["analysis_date"] results_dir.mkdir(parents=True, exist_ok=True) + report_dir = results_dir / "reports" + report_dir.mkdir(parents=True, exist_ok=True) + log_file = results_dir / "message_tool.log" + log_file.touch(exist_ok=True) + + def save_message_decorator(obj, func_name): + func = getattr(obj, func_name) + @wraps(func) + def wrapper(*args, **kwargs): + func(*args, **kwargs) + timestamp, message_type, content = obj.messages[-1] + content = content.replace("\n", " ") # Replace newlines with spaces + with open(log_file, "a") as f: + f.write(f"{timestamp} [{message_type}] {content}\n") + return wrapper + + def save_tool_call_decorator(obj, func_name): + func = getattr(obj, func_name) + @wraps(func) + def wrapper(*args, **kwargs): + func(*args, **kwargs) + timestamp, tool_name, args = obj.tool_calls[-1] + args_str = ", ".join(f"{k}={v}" for k, v in args.items()) + with open(log_file, "a") as f: + f.write(f"{timestamp} [Tool Call] {tool_name}({args_str})\n") + return wrapper + + def save_report_section_decorator(obj, func_name): + func = getattr(obj, func_name) + @wraps(func) + def wrapper(section_name, content): + func(section_name, content) + if section_name in obj.report_sections and obj.report_sections[section_name] is not None: + content = obj.report_sections[section_name] + if content: + file_name = f"{section_name}.md" + with open(report_dir / file_name, "w") as f: + f.write(content) + return wrapper + + message_buffer.add_message = save_message_decorator(message_buffer, "add_message") + message_buffer.add_tool_call = save_tool_call_decorator(message_buffer, "add_tool_call") + message_buffer.update_report_section = save_report_section_decorator(message_buffer, "update_report_section") # Now start the display layout layout = create_layout() @@ -999,23 +1043,6 @@ def run_analysis(): if section in final_state: message_buffer.update_report_section(section, final_state[section]) - # Save results to file - report_dir = results_dir / "reports" - report_dir.mkdir(parents=True, exist_ok=True) - - for section, content in message_buffer.report_sections.items(): - if content: - with open(report_dir / f"{section}.md", "w") as f: - f.write(content) - - for (timestamp, msg_type, content) in message_buffer.messages: - with open(results_dir / "messages.log", "a") as f: - f.write(f"{timestamp} [{msg_type}]: {content}\n") - - for (timestamp, tool_name, args) in message_buffer.tool_calls: - with open(results_dir / "tool_calls.log", "a") as f: - f.write(f"{timestamp} [Tool: {tool_name}]: {args}\n") - # Display the complete final report display_complete_report(final_state) From 383deb72aac77d898c11e9bbbf5bfd17790a5371 Mon Sep 17 00:00:00 2001 From: Mithil Srungarapu Date: Wed, 18 Jun 2025 19:08:10 -0700 Subject: [PATCH 03/26] Updated README.md The diagrams were switched, so I fixed it. --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 9846761e..cac18691 100644 --- a/README.md +++ b/README.md @@ -80,7 +80,7 @@ Our framework decomposes complex trading tasks into specialized roles. This ensu - Composes reports from the analysts and researchers to make informed trading decisions. It determines the timing and magnitude of trades based on comprehensive market insights.

- +

### Risk Management and Portfolio Manager @@ -88,7 +88,7 @@ Our framework decomposes complex trading tasks into specialized roles. This ensu - The Portfolio Manager approves/rejects the transaction proposal. If approved, the order will be sent to the simulated exchange and executed.

- +

## Installation and CLI From 2af7ef3d798d8a94fe12129c4b85ce83e1126b8c Mon Sep 17 00:00:00 2001 From: Atharv Sabde <115389416+AtharvSabde@users.noreply.github.com> Date: Thu, 19 Jun 2025 21:48:16 +0530 Subject: [PATCH 04/26] fundamentals_analyst.py(spelling mistake.markdown) --- tradingagents/agents/analysts/fundamentals_analyst.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tradingagents/agents/analysts/fundamentals_analyst.py b/tradingagents/agents/analysts/fundamentals_analyst.py index 45ddb915..644d9f6b 100644 --- a/tradingagents/agents/analysts/fundamentals_analyst.py +++ b/tradingagents/agents/analysts/fundamentals_analyst.py @@ -22,7 +22,7 @@ def create_fundamentals_analyst(llm, toolkit): system_message = ( "You are a researcher tasked with analyzing fundamental information over the past week about a company. Please write a comprehensive report of the company's fundamental information such as financial documents, company profile, basic company financials, company financial history, insider sentiment and insider transactions to gain a full view of the company's fundamental information to inform traders. Make sure to include as much detail as possible. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions." - + " Make sure to append a Makrdown table at the end of the report to organize key points in the report, organized and easy to read.", + + " Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read.", ) prompt = ChatPromptTemplate.from_messages( From 11804f88ffa8313b042f02161eb7afb6ea0f2910 Mon Sep 17 00:00:00 2001 From: Atharv Sabde <115389416+AtharvSabde@users.noreply.github.com> Date: Fri, 20 Jun 2025 15:58:22 +0530 Subject: [PATCH 05/26] Updated requirements.txt based on latest commit PULL REQUEST: Add support for other backends, such as OpenRouter and Ollama it had two requirments missing. added those --- requirements.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/requirements.txt b/requirements.txt index 1c7c2818..a6154cd2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -22,3 +22,5 @@ redis chainlit rich questionary +langchain_anthropic +langchain-google-genai From 52284ce13c92d0abf68b32ed630ac8b31a81f97d Mon Sep 17 00:00:00 2001 From: Edward Sun Date: Sat, 21 Jun 2025 12:51:34 -0700 Subject: [PATCH 06/26] fixed anthropic support. Anthropic has different format of response when it has tool calls. Explicit handling added --- cli/main.py | 4 ++-- tradingagents/agents/analysts/fundamentals_analyst.py | 7 ++++++- tradingagents/agents/analysts/market_analyst.py | 7 ++++++- tradingagents/agents/analysts/news_analyst.py | 7 ++++++- tradingagents/agents/analysts/social_media_analyst.py | 7 ++++++- 5 files changed, 26 insertions(+), 6 deletions(-) diff --git a/cli/main.py b/cli/main.py index e6ff3095..3f42f2e2 100644 --- a/cli/main.py +++ b/cli/main.py @@ -97,7 +97,7 @@ class MessageBuffer: if content is not None: latest_section = section latest_content = content - + if latest_section and latest_content: # Format the current section for display section_titles = { @@ -808,7 +808,7 @@ def run_analysis(): msg_type = "System" # Add message to buffer - message_buffer.add_message(msg_type, content) + message_buffer.add_message(msg_type, content) # If it's a tool call, add it to tool calls if hasattr(last_message, "tool_calls"): diff --git a/tradingagents/agents/analysts/fundamentals_analyst.py b/tradingagents/agents/analysts/fundamentals_analyst.py index 45ddb915..dc8d4eb6 100644 --- a/tradingagents/agents/analysts/fundamentals_analyst.py +++ b/tradingagents/agents/analysts/fundamentals_analyst.py @@ -51,9 +51,14 @@ def create_fundamentals_analyst(llm, toolkit): result = chain.invoke(state["messages"]) + report = "" + + if len(result.tool_calls) == 0: + report = result.content + return { "messages": [result], - "fundamentals_report": result.content, + "fundamentals_report": report, } return fundamentals_analyst_node diff --git a/tradingagents/agents/analysts/market_analyst.py b/tradingagents/agents/analysts/market_analyst.py index 4a18761a..41ee944b 100644 --- a/tradingagents/agents/analysts/market_analyst.py +++ b/tradingagents/agents/analysts/market_analyst.py @@ -76,9 +76,14 @@ Volume-Based Indicators: result = chain.invoke(state["messages"]) + report = "" + + if len(result.tool_calls) == 0: + report = result.content + return { "messages": [result], - "market_report": result.content, + "market_report": report, } return market_analyst_node diff --git a/tradingagents/agents/analysts/news_analyst.py b/tradingagents/agents/analysts/news_analyst.py index 12222f5d..e1f03aa4 100644 --- a/tradingagents/agents/analysts/news_analyst.py +++ b/tradingagents/agents/analysts/news_analyst.py @@ -47,9 +47,14 @@ def create_news_analyst(llm, toolkit): chain = prompt | llm.bind_tools(tools) result = chain.invoke(state["messages"]) + report = "" + + if len(result.tool_calls) == 0: + report = result.content + return { "messages": [result], - "news_report": result.content, + "news_report": report, } return news_analyst_node diff --git a/tradingagents/agents/analysts/social_media_analyst.py b/tradingagents/agents/analysts/social_media_analyst.py index 42fb1f71..d556f73a 100644 --- a/tradingagents/agents/analysts/social_media_analyst.py +++ b/tradingagents/agents/analysts/social_media_analyst.py @@ -47,9 +47,14 @@ def create_social_media_analyst(llm, toolkit): result = chain.invoke(state["messages"]) + report = "" + + if len(result.tool_calls) == 0: + report = result.content + return { "messages": [result], - "sentiment_report": result.content, + "sentiment_report": report, } return social_media_analyst_node From b8f712b17030f5d447d775a70d928cf7bf55b803 Mon Sep 17 00:00:00 2001 From: Zhongyi Lu Date: Sat, 21 Jun 2025 23:29:26 -0700 Subject: [PATCH 07/26] Exclude `.env` from Git --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 8313619e..4ebf99e3 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,4 @@ src/ eval_results/ eval_data/ *.egg-info/ +.env From 78ea029a0bbe3ea23c0016c1f54d022bc8c28459 Mon Sep 17 00:00:00 2001 From: Geeta Chauhan <4461127+chauhang@users.noreply.github.com> Date: Wed, 25 Jun 2025 20:57:05 -0700 Subject: [PATCH 08/26] Docker support and Ollama support (#47) - Added support for running CLI and Ollama server via Docker - Introduced tests for local embeddings model and standalone Docker setup - Enabled conditional Ollama server launch via LLM_PROVIDER --- .dockerignore | 125 +++++++ .env.example | 36 ++ .gitattributes | 2 + .gitignore | 7 + Docker-readme.md | 506 +++++++++++++++++++++++++++ Dockerfile | 65 ++++ README.md | 4 + build.bat | 176 ++++++++++ build.sh | 248 +++++++++++++ cli/utils.py | 6 +- docker-compose.yml | 74 ++++ init-ollama.bat | 97 +++++ init-ollama.sh | 78 +++++ main.py | 55 ++- requirements.txt | 7 +- tests/README.md | 185 ++++++++++ tests/__init__.py | 10 + tests/run_tests.py | 101 ++++++ tests/test_ollama_connection.py | 108 ++++++ tests/test_openai_connection.py | 142 ++++++++ tests/test_setup.py | 122 +++++++ tradingagents/agents/utils/memory.py | 1 + tradingagents/default_config.py | 5 +- 23 files changed, 2141 insertions(+), 19 deletions(-) create mode 100644 .dockerignore create mode 100644 .env.example create mode 100644 .gitattributes create mode 100644 Docker-readme.md create mode 100644 Dockerfile create mode 100644 build.bat create mode 100644 build.sh create mode 100644 docker-compose.yml create mode 100644 init-ollama.bat create mode 100644 init-ollama.sh create mode 100644 tests/README.md create mode 100644 tests/__init__.py create mode 100644 tests/run_tests.py create mode 100644 tests/test_ollama_connection.py create mode 100644 tests/test_openai_connection.py create mode 100644 tests/test_setup.py diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..5230626d --- /dev/null +++ b/.dockerignore @@ -0,0 +1,125 @@ +# Git specific +.git +.gitignore +.gitattributes +*.git + +# Python specific +__pycache__/ +*.pyc +*.pyo +*.pyd +*.egg-info/ +.Python +env/ +venv/ +.venv/ +.pytest_cache/ +.coverage +.coverage.* +htmlcov/ +.tox/ +.mypy_cache/ +.dmypy.json +dmypy.json + +# Environment files +.env +.env.* +!.env.example + +# IDE/Editor specific +.vscode/ +.idea/ +*.swp +*.swo +*.sublime-* +.spyderproject +.spyproject + +# Model cache directories (can be large) +.ollama/ +ollama_data/ +.cache/ +.local/ + +# Documentation and non-essential files +*.md +!README.md +docs/ +assets/ +*.png +*.jpg +*.jpeg +*.gif +*.svg +!assets/TauricResearch.png + +# Build artifacts and logs +build/ +dist/ +*.log +logs/ +*.tmp +*.temp + +# Test files (uncomment if you don't want tests in production image) +# tests/test_*.py +# test_*.py +# *_test.py + +# Docker and deployment files +Dockerfile* +docker-compose*.yml +.dockerignore +build*.sh +deploy*.sh +k8s/ +helm/ + +# Development tools +.devcontainer/ +.github/ +.gitlab-ci.yml +.travis.yml +.circleci/ +Makefile + +# Data files (can be large) +data/ +*.csv +*.json +*.xlsx +*.db +*.sqlite + +# Temporary and backup files +*.bak +*.backup +*.orig +*.rej +~* +.#* +\#*# + +# OS specific +.DS_Store +.DS_Store? +._* +.Spotlight-V100 +.Trashes +ehthumbs.db +Thumbs.db +Desktop.ini + +# Node.js (if any frontend assets) +node_modules/ +npm-debug.log* +yarn-debug.log* +yarn-error.log* + +# Lock files (include them for reproducible builds) +# Uncomment if you want to exclude them +# uv.lock +# poetry.lock +# Pipfile.lock diff --git a/.env.example b/.env.example new file mode 100644 index 00000000..1868c5a8 --- /dev/null +++ b/.env.example @@ -0,0 +1,36 @@ +# This is an example .env file for the Trading Agent project. +# Copy this file to .env and fill in your API keys and environment configurations. +# "NOTE: When using for `docker` command do not use quotes around the values, otherwise environment variables will not be set." + +# API Keys +# Set your OpenAI API key, for OpenAI, Ollama or other OpenAI-compatible models +OPENAI_API_KEY= +# Set your Finnhub API key +FINNHUB_API_KEY= + +#LLM Configuration for OpenAI +# Set LLM_Provider to one of: openai, anthropic, google, openrouter or ollama, +LLM_PROVIDER=openai +# Set the API URL for the LLM backend +LLM_BACKEND_URL=https://api.openai.com/v1 + + +# Uncomment for LLM Configuration for local ollama +#LLM_PROVIDER=ollama +## For Ollama running in the same container, /v1 added for OpenAI compatibility +#LLM_BACKEND_URL=http://localhost:11434/v1 +# Set name of the Deep think model +LLM_DEEP_THINK_MODEL=llama3.2 +## Setname of the quick think model +LLM_QUICK_THINK_MODEL=qwen3 +# Set the name of the embedding model +LLM_EMBEDDING_MODEL=nomic-embed-text + +# Agent Configuration +# Maximum number of debate rounds for the agent to engage in choose from 1, 3, 5 +MAX_DEBATE_ROUNDS=1 +# Set to False if you want to disable tools that access the internet +ONLINE_TOOLS=True + + + diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000..6aec1b04 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,2 @@ +init-ollama.sh text eol=lf +build.sh text eol=lf \ No newline at end of file diff --git a/.gitignore b/.gitignore index 8313619e..cf5ea542 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,10 @@ src/ eval_results/ eval_data/ *.egg-info/ +.ollama/ +ollama_data/ +.local/ +.cache/ +.pytest_cache/ +.devcontainer/ +.env diff --git a/Docker-readme.md b/Docker-readme.md new file mode 100644 index 00000000..080c5660 --- /dev/null +++ b/Docker-readme.md @@ -0,0 +1,506 @@ +# πŸš€ Docker Setup for Trading Agents + +This guide provides instructions for running the Trading Agents application within a secure and reproducible Docker environment. Using Docker simplifies setup, manages dependencies, and ensures a consistent experience across different machines. + +The recommended method is using `docker-compose`, which handles the entire stack, including the Ollama server and model downloads. + +## Prerequisites + +Before you begin, ensure you have the following installed: + +- [**Docker**](https://docs.docker.com/get-docker/) +- [**Docker Compose**](https://docs.docker.com/compose/install/) (usually included with Docker Desktop) + +## πŸ€” Which Option Should I Choose? + +| Feature | OpenAI | Local Ollama | +| ------------------------- | ------------------------- | ----------------------------- | +| **Setup Time** | 2-5 minutes | 15-30 minutes | +| **Cost** | ~$0.01-0.05 per query | Free after setup | +| **Quality** | GPT-4o (excellent) | Depends on model | +| **Privacy** | Data sent to OpenAI | Fully private | +| **Internet Required** | Yes | No (after setup) | +| **Hardware Requirements** | None | 4GB+ RAM recommended | +| **Model Downloads** | None | Depends on model | +| **Best For** | Quick testing, production | Privacy-focused, cost control | + +**πŸ’‘ Recommendation**: Start with OpenAI for quick testing, then switch to Ollama for production if privacy/cost is important. + +## ⚑ Quickstart + +### Option A: Using OpenAI (Recommended for beginners) + +```bash +# 1. Clone the repository +git clone https://github.com/TauricResearch/TradingAgents.git +cd TradingAgents + +# 2. Create and configure environment file +cp .env.example .env +# Edit .env: Set LLM_PROVIDER=openai and add your OPENAI_API_KEY + +# 3. Build and run with OpenAI +docker compose --profile openai build +docker compose --profile openai run -it app-openai +``` + +### Option B: Using Local Ollama (Free but requires more setup) + +```bash +# 1. Clone the repository +git clone https://github.com/TauricResearch/TradingAgents.git +cd TradingAgents + +# 2. Create environment file +cp .env.example .env +# Edit .env: Set LLM_PROVIDER=ollama + +# 3. Start Ollama service +docker compose --profile ollama up -d --build + +# 4. Initialize models (first time only) +# Linux/macOS: +./init-ollama.sh +# Windows Command Prompt: +init-ollama.bat + + +# 5. Run the command-line app +docker compose --profile ollama run -it app-ollama +``` + +## πŸ› οΈ Build Methods + +Choose your preferred build method: + +### Method 1: Quick Build (Recommended) + +```bash +# Standard Docker build +docker build -t trading-agents . + +# Or with docker-compose +docker compose build +``` + +### Method 2: Optimized Build (Advanced) + +For faster rebuilds with caching: + +**Linux/macOS:** + +```bash +# Build with BuildKit optimization +./build.sh + +# With testing +./build.sh --test + +# Clean cache and rebuild +./build.sh --clean --test +``` + +**Windows Command Prompt:** + +```cmd +REM Build with BuildKit optimization +build.bat + +REM With testing +build.bat --test + +REM Clean cache and rebuild +build.bat --clean --test +``` + +**Benefits of Optimized Build:** + +- ⚑ 60-90% faster rebuilds via BuildKit cache +- πŸ”„ Automatic fallback to simple build if needed +- πŸ“Š Cache statistics and build info +- πŸ§ͺ Built-in testing capabilities + +## Step-by-Step Instructions + +### Step 1: Clone the Repository + +```bash +git clone https://github.com/TauricResearch/TradingAgents.git +cd TradingAgents +``` + +### Step 2: Configure Your Environment (`.env` file) + +The application is configured using an environment file. Create your own `.env` file by copying the provided template. + +```bash +cp .env.example .env +``` + +#### Option A: OpenAI Configuration (Recommended) + +Edit your `.env` file and set: + +```env +# LLM Provider Configuration +LLM_PROVIDER=openai +LLM_BACKEND_URL=https://api.openai.com/v1 + +# API Keys +OPENAI_API_KEY=your-actual-openai-api-key-here +FINNHUB_API_KEY=your-finnhub-api-key-here + +# Agent Configuration +MAX_DEBATE_ROUNDS=1 +ONLINE_TOOLS=True +``` + +**Benefits of OpenAI:** + +- βœ… No local setup required +- βœ… Higher quality responses (GPT-4o) +- βœ… Faster startup (no model downloads) +- βœ… No GPU/CPU requirements +- ❌ Requires API costs ($0.01-0.05 per query) + +#### Option B: Local Ollama Configuration (Free) + +Edit your `.env` file and set: + +```env +# LLM Provider Configuration +LLM_PROVIDER=ollama +LLM_BACKEND_URL=http://ollama:11434/v1 + +# Local Models +LLM_DEEP_THINK_MODEL=llama3.2 +LLM_QUICK_THINK_MODEL=qwen3 +LLM_EMBEDDING_MODEL=nomic-embed-text + +# API Keys (still need Finnhub for market data) +FINNHUB_API_KEY=your-finnhub-api-key-here + +# Agent Configuration +MAX_DEBATE_ROUNDS=1 +ONLINE_TOOLS=True +``` + +**Benefits of Ollama:** + +- βœ… Completely free after setup +- βœ… Data privacy (runs locally) +- βœ… Works offline +- ❌ Requires initial setup and model downloads +- ❌ Slower responses than cloud APIs + +### Step 3: Run with Docker Compose + +Choose the appropriate method based on your LLM provider configuration: + +#### Option A: Running with OpenAI + +```bash +# Build the app container +docker compose --profile openai build +# Or use optimized build: ./build.sh + +# Test OpenAI connection (optional) +docker compose --profile openai run --rm app-openai python tests/test_openai_connection.py + +# Run the trading agents +docker compose --profile openai run -it app-openai +``` + +**No additional services needed** - the app connects directly to OpenAI's API. + +#### Option B: Running with Ollama (CPU) + +```bash +# Start the Ollama service +docker compose --profile ollama up -d --build +# Or use optimized build: ./build.sh + +# Initialize Ollama models (first time only) +# Linux/macOS: +./init-ollama.sh +# Windows Command Prompt: +init-ollama.bat + +# Test Ollama connection (optional) +docker compose --profile ollama exec app-ollama python tests/test_ollama_connection.py + +# Run the trading agents +docker compose --profile ollama run -it app-ollama +``` + +#### Option C: Running with Ollama (GPU) + +First, uncomment the GPU configuration in docker-compose.yml: + +```yaml +# deploy: +# resources: +# reservations: +# devices: +# - capabilities: ["gpu"] +``` + +Then run: + +```bash +# Start with GPU support +docker compose --profile ollama up -d --build +# Or use optimized build: ./build.sh + +# Initialize Ollama models (first time only) +# Linux/macOS: +./init-ollama.sh +# Windows Command Prompt: +init-ollama.bat + +# Run the trading agents +docker compose --profile ollama run -it app-ollama +``` + +#### View Logs + +To view the application logs in real-time, you can run: + +```bash +docker compose --profile ollama logs -f +``` + +#### Stop the Containers + +To stop and remove the containers: + +```bash +docker compose --profile ollama down +``` + +### Step 4: Verify Your Setup (Optional) + +#### For OpenAI Setup: + +```bash +# Test OpenAI API connection +docker compose --profile openai run --rm app-openai python tests/test_openai_connection.py + +# Run a quick trading analysis test +docker compose --profile openai run --rm app-openai python tests/test_setup.py + +# Run all tests automatically +docker compose --profile openai run --rm app-openai python tests/run_tests.py +``` + +#### For Ollama Setup: + +```bash +# Test Ollama connection +docker compose --profile ollama exec app-ollama python tests/test_ollama_connection.py + +# Run a quick trading analysis test +docker compose --profile ollama exec app-ollama python tests/test_setup.py + +# Run all tests automatically +docker compose --profile ollama exec app-ollama python tests/run_tests.py +``` + +### Step 5: Model Management (Optional) + +#### View and Manage Models + +```bash +# List all available models +docker compose --profile ollama exec ollama ollama list + +# Check model cache size +du -sh ./ollama_data + +# Pull additional models (cached locally) +docker compose --profile ollama exec ollama ollama pull llama3.2 + +# Remove a model (frees up cache space) +docker compose --profile ollama exec ollama ollama rm model-name +``` + +#### Model Cache Benefits + +- **Persistence**: Models downloaded once are reused across container restarts +- **Speed**: Subsequent startups are much faster (seconds vs minutes) +- **Bandwidth**: No need to re-download multi-GB models +- **Offline**: Once cached, models work without internet connection + +#### Troubleshooting Cache Issues + +```bash +# If models seem corrupted, clear cache and re-initialize +docker compose --profile ollama down +rm -rf ./ollama_data +docker compose --profile ollama up -d +# Linux/macOS: +./init-ollama.sh +# Windows Command Prompt: +init-ollama.bat +``` + +βœ… **Expected Output:** + +``` +Testing Ollama connection: + Backend URL: http://localhost:11434/v1 + Model: qwen3:0.6b + Embedding Model: nomic-embed-text +βœ… Ollama API is responding +βœ… Model 'qwen3:0.6b' is available +βœ… OpenAI-compatible API is working + Response: ... +``` + +--- + +## Alternative Method: Using `docker` Only + +If you prefer not to use `docker-compose`, you can build and run the container manually. + +**1. Build the Docker Image:** + +```bash +# Standard build +docker build -t trading-agents . + +# Or optimized build (recommended) +# Linux/macOS: +./build.sh +# Windows Command Prompt: +build.bat +``` + +**2. Test local ollama setup (Optional):** +Make sure you have a `.env` file configured as described in Step 2. If you are using `LLM_PROVIDER="ollama"`, you can verify that the Ollama server is running correctly and has the necessary models. + +```bash +docker run -it --network host --env-file .env trading-agents python tests/test_ollama_connection.py +``` + +for picking environment settings from .env file. You can pass values directly using: + +```bash +docker run -it --network host \ + -e LLM_PROVIDER="ollama" \ + -e LLM_BACKEND_URL="http://localhost:11434/v1" \ + -e LLM_DEEP_THINK_MODEL="qwen3:0.6b" \ + -e LLM_EMBEDDING_MODEL="nomic-embed-text"\ + trading-agents \ + python tests/test_ollama_connection.py +``` + +To prevent re-downloading of Ollama models, mount folder from your host and run as + +```bash +docker run -it --network host \ + -e LLM_PROVIDER="ollama" \ + -e LLM_BACKEND_URL="http://localhost:11434/v1" \ + -e LLM_DEEP_THINK_MODEL="qwen3:0.6b" \ + -e LLM_EMBEDDING_MODEL="nomic-embed-text"\ + -v ./ollama_cache:/app/.ollama \ + trading-agents \ + python tests/test_ollama_connection.py +``` + +**3. Run the Docker Container:** +Make sure you have a `.env` file configured as described in Step 2. + +```bash +docker run --rm -it \ + --network host \ + --env-file .env \ + -v ./data:/app/data \ + --name trading-agents \ + trading-agents +``` + +**4. Run on GPU machine:** +For running on GPU machine, pass `--gpus=all` flag to the `docker run` command: + +```bash +docker run --rm -it \ + --gpus=all \ + --network host \ + --env-file .env \ + -v ./data:/app/data \ + --name trading-agents \ + trading-agents +``` + +## Configuration Details + +### Test Suite Organization + +All test scripts are organized in the `tests/` directory: + +``` +tests/ +β”œβ”€β”€ __init__.py # Python package initialization +β”œβ”€β”€ run_tests.py # Automated test runner +β”œβ”€β”€ test_openai_connection.py # OpenAI API connectivity tests +β”œβ”€β”€ test_ollama_connection.py # Ollama connectivity tests +└── test_setup.py # General setup and configuration tests +``` + +**Automated Testing:** + +```bash +# Run all tests automatically (detects provider) - from project root +python tests/run_tests.py + +# Run specific test - from project root +python tests/test_openai_connection.py +python tests/test_ollama_connection.py +python tests/test_setup.py +``` + +**⚠️ Important**: When running tests locally (outside Docker), always run from the **project root directory**, not from inside the `tests/` folder. The Docker commands automatically handle this. + +### Live Reloading + +The `app` directory is mounted as a volume into the container. This means any changes you make to the source code on your local machine will be reflected instantly in the running container without needing to rebuild the image. + +### Persistent Data & Model Caching + +The following volumes are used to persist data between container runs: + +- **`./data`**: Stores application data, trading reports, and cached market data +- **`./ollama_data`**: Caches downloaded Ollama models (typically 1-4GB per model) + +#### Model Cache Management + +The Ollama models are automatically cached in `./ollama_data/` on your host machine: + +- **First run**: Models are downloaded automatically (may take 5-15 minutes depending on internet speed) +- **Subsequent runs**: Models are reused from cache, startup is much faster +- **Cache location**: `./ollama_data/` directory in your project folder +- **Cache size**: Typically 2-6GB total for the required models + +```bash +# Check cache size +du -sh ./ollama_data + +# Clean cache if needed (will require re-downloading models) +rm -rf ./ollama_data + +# List cached models +docker compose --profile ollama exec ollama ollama list +``` + +### GPU troubleshooting + +If you find model is running very slow on GPU machine, make sur you the latest GPU drivers installed and GPU is working fine with docker. Eg you can check for Nvidia GPUs by running: + +```bash +docker run --rm -it --gpus=all nvcr.io/nvidia/k8s/cuda-sample:nbody nbody -gpu -benchmark + +or + +nvidia-smi +``` diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..56ada79b --- /dev/null +++ b/Dockerfile @@ -0,0 +1,65 @@ +# syntax=docker/dockerfile:1.4 + +# Build stage for dependencies +FROM python:3.9-slim-bookworm AS builder + +# Set environment variables for build +ENV PYTHONDONTWRITEBYTECODE=1 \ + PIP_DISABLE_PIP_VERSION_CHECK=on \ + PIP_DEFAULT_TIMEOUT=100 + +# Install build dependencies +RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ + apt-get update && apt-get install -y --no-install-recommends \ + curl \ + git \ + && apt-get clean + +# Create virtual environment +RUN python -m venv /opt/venv +ENV PATH="/opt/venv/bin:$PATH" + +# Copy requirements and install Python dependencies +COPY requirements.txt . +RUN --mount=type=cache,target=/root/.cache/pip \ + pip install --no-cache-dir -r requirements.txt + +# Runtime stage +FROM python:3.9-slim-bookworm AS runtime + +# Set environment variables +ENV PYTHONUNBUFFERED=1 \ + PYTHONDONTWRITEBYTECODE=1 \ + PIP_DISABLE_PIP_VERSION_CHECK=on \ + PIP_DEFAULT_TIMEOUT=100 + +# Install runtime dependencies +RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ + apt-get update && apt-get install -y --no-install-recommends \ + curl \ + git \ + && apt-get clean + +# Copy virtual environment from builder stage +COPY --from=builder /opt/venv /opt/venv +ENV PATH="/opt/venv/bin:$PATH" + +# Create a non-root user and group +RUN groupadd -r appuser && useradd -r -g appuser -s /bin/bash -d /app appuser + +# Create app directory +WORKDIR /app + +# Copy the application code +COPY . . + +# Change ownership of the app directory to the non-root user +RUN chown -R appuser:appuser /app + +# Switch to non-root user +USER appuser + +# Default command (can be overridden, e.g., by pytest command in CI) +CMD ["python", "-m", "cli.main"] diff --git a/README.md b/README.md index cac18691..41cfc946 100644 --- a/README.md +++ b/README.md @@ -192,6 +192,10 @@ print(decision) You can view the full list of configurations in `tradingagents/default_config.py`. +## Docker usage and local ollama tests ## + +See [Docker Readme](./Docker-readme.md) for details. + ## Contributing We welcome contributions from the community! Whether it's fixing a bug, improving documentation, or suggesting a new feature, your input helps make this project better. If you are interested in this line of research, please consider joining our open-source financial AI research community [Tauric Research](https://tauric.ai/). diff --git a/build.bat b/build.bat new file mode 100644 index 00000000..19e4de97 --- /dev/null +++ b/build.bat @@ -0,0 +1,176 @@ +@echo off +REM πŸš€ Optimized BuildKit Docker Build Script for TradingAgents (Windows Batch) +REM This script uses Docker BuildKit for faster builds with advanced caching + +setlocal EnableDelayedExpansion + +REM Configuration +set "IMAGE_NAME=trading-agents" +set "CACHE_TAG=%IMAGE_NAME%:cache" +set "LATEST_TAG=%IMAGE_NAME%:latest" +set "REGISTRY=" +set "TARGET=production" +set "CLEAN_CACHE=" +set "RUN_TESTS=" +set "SHOW_STATS=" +set "SHOW_HELP=" + +REM Parse command line arguments +:parse_args +if "%~1"=="" goto end_parse +if /i "%~1"=="--clean" ( + set "CLEAN_CACHE=1" + shift + goto parse_args +) +if /i "%~1"=="--test" ( + set "RUN_TESTS=1" + shift + goto parse_args +) +if /i "%~1"=="--stats" ( + set "SHOW_STATS=1" + shift + goto parse_args +) +if /i "%~1"=="--help" ( + set "SHOW_HELP=1" + shift + goto parse_args +) +if /i "%~1"=="-h" ( + set "SHOW_HELP=1" + shift + goto parse_args +) +echo [ERROR] Unknown option: %~1 +exit /b 1 + +:end_parse + +REM Show help if requested +if defined SHOW_HELP ( + echo πŸš€ TradingAgents Optimized Docker Build ^(Windows^) + echo Usage: build-optimized.bat [OPTIONS] + echo. + echo Options: + echo --clean Clean build cache before building + echo --test Run tests after building + echo --stats Show cache statistics after building + echo --help, -h Show this help message + echo. + echo Examples: + echo build-optimized.bat # Build image + echo build-optimized.bat --clean --test # Clean cache, build, and test + exit /b 0 +) + +echo πŸš€ TradingAgents Optimized Docker Build ^(Windows^) +echo ========================================= + +REM Check if BuildKit is available +echo [INFO] Checking BuildKit availability... +docker buildx version >nul 2>&1 +if errorlevel 1 ( + echo [ERROR] Docker BuildKit ^(buildx^) is not available + echo [ERROR] Please install Docker BuildKit or update Docker to a newer version + exit /b 1 +) +echo [SUCCESS] BuildKit is available + +REM Create buildx builder if it doesn't exist +echo [INFO] Setting up BuildKit builder... +docker buildx inspect trading-agents-builder >nul 2>&1 +if errorlevel 1 ( + echo [INFO] Creating new buildx builder 'trading-agents-builder'... + docker buildx create --name trading-agents-builder --driver docker-container --bootstrap + if errorlevel 1 ( + echo [ERROR] Failed to create builder + exit /b 1 + ) +) + +REM Use our builder +docker buildx use trading-agents-builder +if errorlevel 1 ( + echo [ERROR] Failed to use builder + exit /b 1 +) +echo [SUCCESS] Builder 'trading-agents-builder' is ready + +REM Clean cache if requested +if defined CLEAN_CACHE ( + echo [INFO] Cleaning build cache... + docker buildx prune -f + echo [SUCCESS] Build cache cleaned +) + +REM Show build information +echo [INFO] Build Information: +echo πŸ“¦ Image: %LATEST_TAG% +echo πŸ“Š Cache: Local BuildKit cache +echo πŸ”„ Multi-stage: Yes ^(builder β†’ runtime^) +echo 🌐 Network: Host networking mode + +REM Build the image +echo [INFO] Building image with BuildKit cache optimization... + +REM Get build metadata +for /f "tokens=*" %%i in ('powershell -Command "(Get-Date).ToUniversalTime().ToString('yyyy-MM-ddTHH:mm:ssZ')"') do set "BUILD_DATE=%%i" +for /f "tokens=*" %%i in ('git rev-parse --short HEAD 2^>nul') do set "GIT_HASH=%%i" +if "!GIT_HASH!"=="" set "GIT_HASH=unknown" + +REM Execute build +echo [INFO] Starting Docker build... +docker buildx build ^ + --file Dockerfile ^ + --tag %LATEST_TAG% ^ + --cache-from type=local,src=C:\tmp\.buildx-cache ^ + --cache-to type=local,dest=C:\tmp\.buildx-cache,mode=max ^ + --label build.date=%BUILD_DATE% ^ + --label build.version=%GIT_HASH% ^ + --load ^ + . + +if errorlevel 1 ( + echo [ERROR] ❌ Build failed! + exit /b 1 +) + +echo [SUCCESS] βœ… Build completed successfully! + +REM Test the image if requested +if defined RUN_TESTS ( + echo [INFO] Testing built image... + + REM Basic functionality test + docker run --rm %LATEST_TAG% python -c "print('βœ… Image test successful')" + if errorlevel 1 ( + echo [ERROR] Image test failed + exit /b 1 + ) + echo [SUCCESS] Image test passed + + REM Test import capabilities + docker run --rm %LATEST_TAG% python -c "from tradingagents.default_config import DEFAULT_CONFIG; print('βœ… Import test successful')" + if errorlevel 1 ( + echo [WARNING] Import test failed ^(this might be expected if dependencies are missing^) + ) else ( + echo [SUCCESS] Import test passed + ) +) + +REM Show cache statistics if requested +if defined SHOW_STATS ( + echo [INFO] Cache Statistics: + docker buildx du 2>nul || echo Cache statistics not available +) + +echo. +echo [SUCCESS] πŸŽ‰ Ready to use! Try: +echo docker run -it --network host %LATEST_TAG% +echo docker compose --profile openai run -it app-openai +echo docker compose --profile ollama up -d ^&^& docker compose --profile ollama exec app-ollama cmd +echo docker compose --profile default run -it app + +exit /b 0 \ No newline at end of file diff --git a/build.sh b/build.sh new file mode 100644 index 00000000..006e360c --- /dev/null +++ b/build.sh @@ -0,0 +1,248 @@ +#!/bin/bash + +# πŸš€ Optimized BuildKit Docker Build Script for TradingAgents +# This script uses Docker BuildKit for faster builds with advanced caching + +set -e + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Configuration +IMAGE_NAME="trading-agents" +CACHE_TAG="${IMAGE_NAME}:cache" +LATEST_TAG="${IMAGE_NAME}:latest" +REGISTRY="" # Set this if you want to push to a registry + +# Function to print colored output +print_status() { + echo -e "${BLUE}[INFO]${NC} $1" +} + +print_success() { + echo -e "${GREEN}[SUCCESS]${NC} $1" +} + +print_warning() { + echo -e "${YELLOW}[WARNING]${NC} $1" +} + +print_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +# Check if BuildKit is available +check_buildkit() { + print_status "Checking BuildKit availability..." + + if ! docker buildx version > /dev/null 2>&1; then + print_error "Docker BuildKit (buildx) is not available" + print_error "Please install Docker BuildKit or update Docker to a newer version" + exit 1 + fi + + print_success "BuildKit is available" +} + +# Create buildx builder if it doesn't exist +setup_builder() { + print_status "Setting up BuildKit builder..." + + # Check if our builder exists + if ! docker buildx inspect trading-agents-builder > /dev/null 2>&1; then + print_status "Creating new buildx builder 'trading-agents-builder'..." + docker buildx create --name trading-agents-builder --driver docker-container --bootstrap + fi + + # Use our builder + docker buildx use trading-agents-builder + print_success "Builder 'trading-agents-builder' is ready" +} + +# Build with cache optimization +build_image() { + print_status "Building image with BuildKit cache optimization..." + + # Build arguments + local build_args=( + --file Dockerfile + --tag "$LATEST_TAG" + --cache-from "type=local,src=/tmp/.buildx-cache" + --cache-to "type=local,dest=/tmp/.buildx-cache,mode=max" + --load # Load into local Docker daemon + ) + + # Add build metadata + build_args+=( + --label "build.date=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" + --label "build.version=$(git rev-parse --short HEAD 2>/dev/null || echo 'unknown')" + --label "build.target=$target" + ) + + print_status "Build command: docker buildx build ${build_args[*]} ." + + # Execute build + if docker buildx build "${build_args[@]}" .; then + print_success "Build completed successfully!" + return 0 + else + print_error "Build failed!" + print_warning "Attempting fallback build with simple Dockerfile..." + return build_simple_fallback + fi +} + +# Fallback build function for when BuildKit fails +build_simple_fallback() { + print_status "Using simple Dockerfile as fallback..." + + if [ -f "Dockerfile.simple" ]; then + if docker build -f Dockerfile.simple -t "$LATEST_TAG" .; then + print_success "Fallback build completed successfully!" + print_warning "Note: Using simple build without advanced caching" + return 0 + else + print_error "Fallback build also failed!" + return 1 + fi + else + print_error "Dockerfile.simple not found for fallback" + return 1 + fi +} + +# Show build info +show_build_info() { + print_status "Build Information:" + echo " πŸ“¦ Image: $LATEST_TAG" + echo " πŸ—οΈ Builder: $(docker buildx inspect --bootstrap | grep "Name:" | head -1 | cut -d: -f2 | xargs)" + echo " πŸ“Š Cache: Local BuildKit cache" + echo " πŸ”„ Multi-stage: Yes (builder β†’ runtime)" + echo " 🌐 Network: Host networking mode" +} + +# Test the built image +test_image() { + print_status "Testing built image..." + + # Basic functionality test + if docker run --rm "$LATEST_TAG" python -c "print('βœ… Image test successful')"; then + print_success "Image test passed" + else + print_error "Image test failed" + return 1 + fi + + # Test import capabilities + if docker run --rm "$LATEST_TAG" python -c "from tradingagents.default_config import DEFAULT_CONFIG; print('βœ… Import test successful')"; then + print_success "Import test passed" + else + print_warning "Import test failed (this might be expected if dependencies are missing)" + fi +} + +# Show cache statistics +show_cache_stats() { + print_status "Cache Statistics:" + + # Show buildx disk usage + if docker buildx du > /dev/null 2>&1; then + docker buildx du + else + echo " Cache statistics not available" + fi +} + +# Clean up build cache +clean_cache() { + print_status "Cleaning build cache..." + docker buildx prune -f + print_success "Build cache cleaned" +} + +# Main function +main() { + echo "πŸš€ TradingAgents Optimized Docker Build" + echo "========================================" + + # Parse arguments + local clean=false + local test=false + local stats=false + + while [[ $# -gt 0 ]]; do + case $1 in + --clean) + clean=true + shift + ;; + --test) + test=true + shift + ;; + --stats) + stats=true + shift + ;; + --help|-h) + echo "Usage: $0 [OPTIONS]" + echo "" + echo "Options:" + echo " --clean Clean build cache before building" + echo " --test Run tests after building" + echo " --stats Show cache statistics after building" + echo " --help, -h Show this help message" + echo "" + echo "Examples:" + echo " $0 # Build image" + echo " $0 --clean --test # Clean cache, build, and test" + echo " $0 --stats # Build and show cache stats" + exit 0 + ;; + *) + print_error "Unknown option: $1" + exit 1 + ;; + esac + done + + # Execute steps + check_buildkit + setup_builder + + if [ "$clean" = true ]; then + clean_cache + fi + + show_build_info + + if build_image; then + print_success "βœ… Build completed successfully!" + + if [ "$test" = true ]; then + test_image + fi + + if [ "$stats" = true ]; then + show_cache_stats + fi + + echo "" + print_success "πŸŽ‰ Ready to use! Try:" + echo " docker run -it --network host $LATEST_TAG" + echo " docker compose --profile openai run -it app-openai" + echo " docker compose --profile ollama up -d && docker compose --profile ollama exec app-ollama bash" + echo " docker compose --profile default run -it app" + + else + print_error "❌ Build failed!" + exit 1 + fi +} + +# Run main function with all arguments +main "$@" \ No newline at end of file diff --git a/cli/utils.py b/cli/utils.py index d3873360..d56117d4 100644 --- a/cli/utils.py +++ b/cli/utils.py @@ -151,6 +151,8 @@ def select_shallow_thinking_agent(provider) -> str: ], "ollama": [ ("llama3.2 local", "llama3.2"), + ("qwen3 small local", "qwen3:0.6b"), + ("deepseek-r1 local", "deepseek-r1:1.5b"), ] } @@ -211,7 +213,9 @@ def select_deep_thinking_agent(provider) -> str: ("Deepseek - latest iteration of the flagship chat model family from the DeepSeek team.", "deepseek/deepseek-chat-v3-0324:free"), ], "ollama": [ - ("qwen3", "qwen3"), + ("qwen3 local", "qwen3"), + ("qwen3 small local", "qwen3:0.6b"), + ("deepseek-r1 local", "deepseek-r1:1.5b"), ] } diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 00000000..1ac75ea6 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,74 @@ +version: "3.8" + +services: + # Ollama service for local LLM + ollama: + image: ollama/ollama:latest + container_name: ollama + network_mode: host + volumes: + - ./ollama_data:/root/.ollama + # Uncomment for GPU support + # deploy: + # resources: + # reservations: + # devices: + # - capabilities: ["gpu"] + profiles: + - ollama + + # App container for Ollama setup + app-ollama: + build: + context: . + container_name: trading-agents-ollama + network_mode: host + volumes: + - .:/app + - ./data:/app/data + env_file: + - .env + environment: + - LLM_BACKEND_URL=http://localhost:11434/v1 + - LLM_PROVIDER=ollama + depends_on: + - ollama + tty: true + stdin_open: true + profiles: + - ollama + + # App container for OpenAI setup (no Ollama dependency) + app-openai: + build: + context: . + container_name: trading-agents-openai + network_mode: host + volumes: + - .:/app + - ./data:/app/data + env_file: + - .env + environment: + - LLM_PROVIDER=openai + - LLM_BACKEND_URL=https://api.openai.com/v1 + tty: true + stdin_open: true + profiles: + - openai + + # Generic app container (uses .env settings as-is) + app: + build: + context: . + container_name: trading-agents + network_mode: host + volumes: + - .:/app + - ./data:/app/data + env_file: + - .env + tty: true + stdin_open: true + profiles: + - default diff --git a/init-ollama.bat b/init-ollama.bat new file mode 100644 index 00000000..818678c1 --- /dev/null +++ b/init-ollama.bat @@ -0,0 +1,97 @@ +@echo off +setlocal enabledelayedexpansion + +echo πŸš€ Initializing Ollama models... + +REM Define required models +set DEEP_THINK_MODEL=qwen3:0.6b +set EMBEDDING_MODEL=nomic-embed-text + +REM Wait for Ollama to be ready +echo ⏳ Waiting for Ollama service to start... +set max_attempts=30 +set attempt=0 + +:wait_loop +if %attempt% geq %max_attempts% goto timeout_error + +docker compose --profile ollama exec ollama ollama list >nul 2>&1 +if %errorlevel% equ 0 ( + echo βœ… Ollama is ready! + goto ollama_ready +) + +set /a attempt=%attempt%+1 +echo Waiting for Ollama... (attempt %attempt%/%max_attempts%) +timeout /t 2 /nobreak >nul +goto wait_loop + +:timeout_error +echo ❌ Error: Ollama failed to start within the expected time +exit /b 1 + +:ollama_ready + +REM Check cache directory +if exist ".\ollama_data" ( + echo πŸ“ Found existing ollama_data cache directory + for /f "tokens=*" %%a in ('dir ".\ollama_data" /s /-c ^| find "bytes"') do ( + echo Cache directory exists + ) +) else ( + echo πŸ“ Creating ollama_data cache directory... + mkdir ".\ollama_data" +) + +REM Get list of currently available models +echo πŸ” Checking for existing models... +docker compose --profile ollama exec ollama ollama list > temp_models.txt 2>nul +if %errorlevel% neq 0 ( + echo > temp_models.txt +) + +REM Check if deep thinking model exists +findstr /c:"%DEEP_THINK_MODEL%" temp_models.txt >nul +if %errorlevel% equ 0 ( + echo βœ… Deep thinking model '%DEEP_THINK_MODEL%' already available +) else ( + echo πŸ“₯ Pulling deep thinking model: %DEEP_THINK_MODEL%... + docker compose --profile ollama exec ollama ollama pull %DEEP_THINK_MODEL% + if %errorlevel% equ 0 ( + echo βœ… Model %DEEP_THINK_MODEL% pulled successfully + ) else ( + echo ❌ Failed to pull model %DEEP_THINK_MODEL% + goto cleanup + ) +) + +REM Check if embedding model exists +findstr /c:"%EMBEDDING_MODEL%" temp_models.txt >nul +if %errorlevel% equ 0 ( + echo βœ… Embedding model '%EMBEDDING_MODEL%' already available +) else ( + echo πŸ“₯ Pulling embedding model: %EMBEDDING_MODEL%... + docker compose --profile ollama exec ollama ollama pull %EMBEDDING_MODEL% + if %errorlevel% equ 0 ( + echo βœ… Model %EMBEDDING_MODEL% pulled successfully + ) else ( + echo ❌ Failed to pull model %EMBEDDING_MODEL% + goto cleanup + ) +) + +REM List all available models +echo πŸ“‹ Available models: +docker compose --profile ollama exec ollama ollama list + +REM Show cache info +if exist ".\ollama_data" ( + echo πŸ’Ύ Model cache directory: .\ollama_data +) + +echo πŸŽ‰ Ollama initialization complete! +echo πŸ’‘ Tip: Models are cached in .\ollama_data and will be reused on subsequent runs + +:cleanup +if exist temp_models.txt del temp_models.txt +exit /b 0 \ No newline at end of file diff --git a/init-ollama.sh b/init-ollama.sh new file mode 100644 index 00000000..ab8aa1cb --- /dev/null +++ b/init-ollama.sh @@ -0,0 +1,78 @@ +#!/bin/bash +set -e + +echo "πŸš€ Initializing Ollama models..." + +# Define required models +DEEP_THINK_MODEL="qwen3:0.6b" +EMBEDDING_MODEL="nomic-embed-text" + +# Wait for Ollama to be ready +echo "⏳ Waiting for Ollama service to start..." +max_attempts=30 +attempt=0 +while [ $attempt -lt $max_attempts ]; do + if docker compose --profile ollama exec ollama ollama list > /dev/null 2>&1; then + echo "βœ… Ollama is ready!" + break + fi + echo " Waiting for Ollama... (attempt $((attempt + 1))/$max_attempts)" + sleep 2 + attempt=$((attempt + 1)) +done + +if [ $attempt -eq $max_attempts ]; then + echo "❌ Error: Ollama failed to start within the expected time" + exit 1 +fi + +# Check cache directory +if [ -d "./ollama_data" ]; then + echo "πŸ“ Found existing ollama_data cache directory" + cache_size=$(du -sh ./ollama_data 2>/dev/null | cut -f1 || echo "0") + echo " Cache size: $cache_size" +else + echo "πŸ“ Creating ollama_data cache directory..." + mkdir -p ./ollama_data +fi + +# Get list of currently available models +echo "πŸ” Checking for existing models..." +available_models=$(docker compose --profile ollama exec ollama ollama list 2>/dev/null | tail -n +2 | awk '{print $1}' || echo "") + +# Function to check if model exists +model_exists() { + local model_name="$1" + echo "$available_models" | grep -q "^$model_name" +} + +# Pull deep thinking model if not present +if model_exists "$DEEP_THINK_MODEL"; then + echo "βœ… Deep thinking model '$DEEP_THINK_MODEL' already available" +else + echo "πŸ“₯ Pulling deep thinking model: $DEEP_THINK_MODEL..." + docker compose --profile ollama exec ollama ollama pull "$DEEP_THINK_MODEL" + echo "βœ… Model $DEEP_THINK_MODEL pulled successfully" +fi + +# Pull embedding model if not present +if model_exists "$EMBEDDING_MODEL"; then + echo "βœ… Embedding model '$EMBEDDING_MODEL' already available" +else + echo "πŸ“₯ Pulling embedding model: $EMBEDDING_MODEL..." + docker compose --profile ollama exec ollama ollama pull "$EMBEDDING_MODEL" + echo "βœ… Model $EMBEDDING_MODEL pulled successfully" +fi + +# List all available models +echo "πŸ“‹ Available models:" +docker compose --profile ollama exec ollama ollama list + +# Show cache info +if [ -d "./ollama_data" ]; then + cache_size=$(du -sh ./ollama_data 2>/dev/null | cut -f1 || echo "unknown") + echo "πŸ’Ύ Model cache size: $cache_size (stored in ./ollama_data)" +fi + +echo "πŸŽ‰ Ollama initialization complete!" +echo "πŸ’‘ Tip: Models are cached in ./ollama_data and will be reused on subsequent runs" \ No newline at end of file diff --git a/main.py b/main.py index 6c8ae3d9..9434cce6 100644 --- a/main.py +++ b/main.py @@ -1,21 +1,46 @@ +import os from tradingagents.graph.trading_graph import TradingAgentsGraph from tradingagents.default_config import DEFAULT_CONFIG +from dotenv import load_dotenv -# Create a custom config -config = DEFAULT_CONFIG.copy() -config["llm_provider"] = "google" # Use a different model -config["backend_url"] = "https://generativelanguage.googleapis.com/v1" # Use a different backend -config["deep_think_llm"] = "gemini-2.0-flash" # Use a different model -config["quick_think_llm"] = "gemini-2.0-flash" # Use a different model -config["max_debate_rounds"] = 1 # Increase debate rounds -config["online_tools"] = True # Increase debate rounds +def run_analysis(config_overrides=None): + """ + Initializes and runs a trading cycle with configurable overrides. + """ + load_dotenv() # Load .env file variables -# Initialize with custom config -ta = TradingAgentsGraph(debug=True, config=config) + config = DEFAULT_CONFIG.copy() -# forward propagate -_, decision = ta.propagate("NVDA", "2024-05-10") -print(decision) + # Override with environment variables if set + config["llm_provider"] = os.environ.get("LLM_PROVIDER", config.get("llm_provider", "google")) + config["backend_url"] = os.environ.get("LLM_BACKEND_URL", config.get("backend_url", "https://generativelanguage.googleapis.com/v1")) + config["deep_think_llm"] = os.environ.get("LLM_DEEP_THINK_MODEL", config.get("deep_think_llm", "gemini-2.0-flash")) + config["quick_think_llm"] = os.environ.get("LLM_QUICK_THINK_MODEL", config.get("quick_think_llm", "gemini-2.0-flash")) + config["max_debate_rounds"] = int(os.environ.get("MAX_DEBATE_ROUNDS", config.get("max_debate_rounds", 1))) + config["online_tools"] = os.environ.get("ONLINE_TOOLS", str(config.get("online_tools", True))).lower() == 'true' -# Memorize mistakes and reflect -# ta.reflect_and_remember(1000) # parameter is the position returns + + # Apply overrides from function argument + if config_overrides: + config.update(config_overrides) + + print("Using configuration:") + for key, value in config.items(): + print(f"{key}: {value}") + + # Initialize with the final config + ta = TradingAgentsGraph(debug=True, config=config) + + # Forward propagate + _, decision = ta.propagate("NVDA", "2024-05-10") + return decision + +if __name__ == "__main__": + # Example of running the trading analysis + # You can override specific configurations here if needed, e.g.: + # decision = run_trading_cyrun_analysiscle(config_overrides={"max_debate_rounds": 2}) + decision = run_analysis() + print(decision) + + # Memorize mistakes and reflect + # ta.reflect_and_remember(1000) # parameter is the position returns diff --git a/requirements.txt b/requirements.txt index a6154cd2..610dd7e4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,8 @@ typing-extensions langchain-openai langchain-experimental +langchain_anthropic +langchain_google_genai pandas yfinance praw @@ -22,5 +24,6 @@ redis chainlit rich questionary -langchain_anthropic -langchain-google-genai +ollama +pytest +python-dotenv diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 00000000..62ed215d --- /dev/null +++ b/tests/README.md @@ -0,0 +1,185 @@ +# TradingAgents Test Suite + +This directory contains all test scripts for validating the TradingAgents setup and configuration. + +## Test Scripts + +### πŸ§ͺ `run_tests.py` - Automated Test Runner +**Purpose**: Automatically detects your LLM provider and runs appropriate tests. + +**Usage**: +```bash +# Run all tests (auto-detects provider from LLM_PROVIDER env var) +# Always run from project root, not from tests/ directory +python tests/run_tests.py + +# In Docker +docker compose --profile openai run --rm app-openai python tests/run_tests.py +docker compose --profile ollama exec app-ollama python tests/run_tests.py +``` + +**Important**: Always run the test runner from the **project root directory**, not from inside the `tests/` directory. The runner automatically handles path resolution and changes to the correct working directory. + +**Features**: +- Auto-detects LLM provider from environment +- Runs provider-specific tests only +- Provides comprehensive test summary +- Handles timeouts and error reporting + +--- + +### πŸ”Œ `test_openai_connection.py` - OpenAI API Tests +**Purpose**: Validates OpenAI API connectivity and functionality. + +**Tests**: +- βœ… API key validation +- βœ… Chat completion (using `gpt-4o-mini`) +- βœ… Embeddings (using `text-embedding-3-small`) +- βœ… Configuration validation + +**Usage**: +```bash +# From project root +python tests/test_openai_connection.py + +# In Docker +docker compose --profile openai run --rm app-openai python tests/test_openai_connection.py +``` + +**Requirements**: +- `OPENAI_API_KEY` environment variable +- `LLM_PROVIDER=openai` + +--- + +### πŸ¦™ `test_ollama_connection.py` - Ollama Connectivity Tests +**Purpose**: Validates Ollama server connectivity and model availability. + +**Tests**: +- βœ… Ollama API accessibility +- βœ… Model availability (`qwen3:0.6b`, `nomic-embed-text`) +- βœ… OpenAI-compatible API functionality +- βœ… Chat completion and embeddings + +**Usage**: +```bash +# From project root +python tests/test_ollama_connection.py + +# In Docker +docker compose --profile ollama exec app-ollama python tests/test_ollama_connection.py +``` + +**Requirements**: +- Ollama server running +- Required models downloaded +- `LLM_PROVIDER=ollama` + +--- + +### βš™οΈ `test_setup.py` - General Setup Validation +**Purpose**: Validates basic TradingAgents setup and configuration. + +**Tests**: +- βœ… Python package imports +- βœ… Configuration loading +- βœ… TradingAgentsGraph initialization +- βœ… Data access capabilities + +**Usage**: +```bash +# From project root +python tests/test_setup.py + +# In Docker +docker compose --profile openai run --rm app-openai python tests/test_setup.py +docker compose --profile ollama exec app-ollama python tests/test_setup.py +``` + +**Requirements**: +- TradingAgents dependencies installed +- Basic environment configuration + +--- + +## Test Results Interpretation + +### βœ… Success Indicators +- All tests pass +- API connections established +- Models available and responding +- Configuration properly loaded + +### ❌ Common Issues + +**OpenAI Tests Failing**: +- Check `OPENAI_API_KEY` is set correctly +- Verify API key has sufficient quota +- Ensure internet connectivity + +**Ollama Tests Failing**: +- Verify Ollama service is running +- Check if models are downloaded (`./init-ollama.sh`) +- Confirm `ollama list` shows required models + +**Setup Tests Failing**: +- Check Python dependencies are installed +- Verify environment variables are set +- Ensure `.env` file is properly configured + +--- + +## Quick Testing Commands + +**⚠️ Important**: Always run these commands from the **project root directory** (not from inside `tests/`): + +```bash +# Test everything automatically (from project root) +python tests/run_tests.py + +# Test specific provider (from project root) +LLM_PROVIDER=openai python tests/run_tests.py +LLM_PROVIDER=ollama python tests/run_tests.py + +# Test individual components (from project root) +python tests/test_openai_connection.py +python tests/test_ollama_connection.py +python tests/test_setup.py +``` + +**Why from project root?** +- Tests need to import the `tradingagents` package +- The `tradingagents` package is located in the project root +- Running from `tests/` directory would cause import errors + +--- + +## Adding New Tests + +To add new tests: + +1. Create new test script in `tests/` directory +2. Follow the naming convention: `test_.py` +3. Include proper error handling and status reporting +4. Update `run_tests.py` if automatic detection is needed +5. Document the test in this README + +**Test Script Template**: +```python +#!/usr/bin/env python3 +"""Test script for """ + +def test_component(): + """Test functionality.""" + try: + # Test implementation + print("βœ… Test passed") + return True + except Exception as e: + print(f"❌ Test failed: {e}") + return False + +if __name__ == "__main__": + success = test_component() + exit(0 if success else 1) +``` \ No newline at end of file diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 00000000..dfc7f701 --- /dev/null +++ b/tests/__init__.py @@ -0,0 +1,10 @@ +""" +TradingAgents Test Suite + +This package contains all test scripts for the TradingAgents application: +- test_openai_connection.py: OpenAI API connectivity tests +- test_ollama_connection.py: Ollama connectivity tests +- test_setup.py: General setup and configuration tests +""" + +__version__ = "1.0.0" \ No newline at end of file diff --git a/tests/run_tests.py b/tests/run_tests.py new file mode 100644 index 00000000..92f44842 --- /dev/null +++ b/tests/run_tests.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python3 +""" +Test runner script for TradingAgents + +This script automatically detects the LLM provider and runs appropriate tests. +""" + +import os +import sys +import subprocess + +def get_llm_provider(): + """Get the configured LLM provider from environment.""" + return os.environ.get("LLM_PROVIDER", "").lower() + +def run_test_script(script_name): + """Run a test script and return success status.""" + try: + print(f"πŸ§ͺ Running {script_name}...") + result = subprocess.run([sys.executable, script_name], + capture_output=True, text=True, timeout=120) + + if result.returncode == 0: + print(f"βœ… {script_name} passed") + if result.stdout: + print(f" Output: {result.stdout.strip()}") + return True + else: + print(f"❌ {script_name} failed") + if result.stderr: + print(f" Error: {result.stderr.strip()}") + return False + + except subprocess.TimeoutExpired: + print(f"⏰ {script_name} timed out") + return False + except Exception as e: + print(f"πŸ’₯ {script_name} crashed: {e}") + return False + +def main(): + """Main test runner function.""" + print("πŸš€ TradingAgents Test Runner") + print("=" * 50) + + # Get project root directory (parent of tests directory) + tests_dir = os.path.dirname(os.path.abspath(__file__)) + project_root = os.path.dirname(tests_dir) + os.chdir(project_root) + + provider = get_llm_provider() + print(f"πŸ“‹ Detected LLM Provider: {provider or 'not set'}") + + tests_run = [] + tests_passed = [] + + # Always run setup tests + if run_test_script("tests/test_setup.py"): + tests_passed.append("tests/test_setup.py") + tests_run.append("tests/test_setup.py") + + # Run provider-specific tests + if provider == "openai": + print("\nπŸ” Running OpenAI-specific tests...") + if run_test_script("tests/test_openai_connection.py"): + tests_passed.append("tests/test_openai_connection.py") + tests_run.append("tests/test_openai_connection.py") + + elif provider == "ollama": + print("\nπŸ” Running Ollama-specific tests...") + if run_test_script("tests/test_ollama_connection.py"): + tests_passed.append("tests/test_ollama_connection.py") + tests_run.append("tests/test_ollama_connection.py") + + else: + print(f"\n⚠️ Unknown or unset LLM provider: '{provider}'") + print(" Running all connectivity tests...") + + for test_script in ["tests/test_openai_connection.py", "tests/test_ollama_connection.py"]: + if run_test_script(test_script): + tests_passed.append(test_script) + tests_run.append(test_script) + + # Summary + print("\n" + "=" * 50) + print(f"πŸ“Š Test Results: {len(tests_passed)}/{len(tests_run)} tests passed") + + for test in tests_run: + status = "βœ… PASS" if test in tests_passed else "❌ FAIL" + print(f" {test}: {status}") + + if len(tests_passed) == len(tests_run): + print("\nπŸŽ‰ All tests passed! TradingAgents is ready to use.") + return 0 + else: + print(f"\n⚠️ {len(tests_run) - len(tests_passed)} test(s) failed. Check configuration.") + return 1 + +if __name__ == "__main__": + exit_code = main() + sys.exit(exit_code) \ No newline at end of file diff --git a/tests/test_ollama_connection.py b/tests/test_ollama_connection.py new file mode 100644 index 00000000..c9a67cc7 --- /dev/null +++ b/tests/test_ollama_connection.py @@ -0,0 +1,108 @@ +#!/usr/bin/env python3 +""" +Simple test script to verify Ollama connection is working. +""" + +import os +import requests +import time +from openai import OpenAI + +def test_ollama_connection(): + """Test if Ollama is accessible and responding.""" + + # Get configuration from environment + backend_url = os.environ.get("LLM_BACKEND_URL", "http://localhost:11434/v1") + model = os.environ.get("LLM_DEEP_THINK_MODEL", "qwen3:0.6b") + embedding_model = os.environ.get("LLM_EMBEDDING_MODEL", "nomic-embed-text") + + print(f"Testing Ollama connection:") + print(f" Backend URL: {backend_url}") + print(f" Model: {model}") + print(f" Embedding Model: {embedding_model}") + + # Test 1: Check if Ollama API is responding + try: + response = requests.get(f"{backend_url.replace('/v1', '')}/api/tags", timeout=10) + if response.status_code == 200: + print("βœ… Ollama API is responding") + else: + print(f"❌ Ollama API returned status code: {response.status_code}") + return False + except Exception as e: + print(f"❌ Failed to connect to Ollama API: {e}") + return False + + # Test 2: Check if the model is available + try: + response = requests.get(f"{backend_url.replace('/v1', '')}/api/tags", timeout=10) + models = response.json().get("models", []) + model_names = [m.get("name", "") for m in models] + + if any(name.startswith(model) for name in model_names): + print(f"βœ… Model '{model}' is available") + else: + print(f"❌ Model '{model}' not found. Available models: {model_names}") + return False + except Exception as e: + print(f"❌ Failed to check model availability: {e}") + return False + + # Test 3: Test OpenAI-compatible API + try: + client = OpenAI(base_url=backend_url, api_key="dummy") + response = client.chat.completions.create( + model=model, + messages=[{"role": "user", "content": "Hello, say 'test successful'"}], + max_tokens=50 + ) + print("βœ… OpenAI-compatible API is working") + print(f" Response: {response.choices[0].message.content}") + return True + except Exception as e: + print(f"❌ OpenAI-compatible API test failed: {e}") + return False + + # Test 4: Check if the embedding model is available + try: + response = requests.get(f"{backend_url.replace('/v1', '')}/api/tags", timeout=10) + models = response.json().get("models", []) + model_names = [m.get("name") for m in models if m.get("name")] + + # Check if any of the available models starts with the embedding model name + if any(name.startswith(embedding_model) for name in model_names): + print(f"βœ… Embedding Model '{embedding_model}' is available") + else: + print(f"❌ Embedding Model '{embedding_model}' not found. Available models: {model_names}") + return False + except Exception as e: + print(f"❌ Failed to check embedding model availability: {e}") + return False + + # Test 5: Test OpenAI-compatible embedding API + try: + client = OpenAI(base_url=backend_url, api_key="dummy") + response = client.embeddings.create( + model=embedding_model, + input="This is a test sentence.", + encoding_format="float" + ) + if response.data and len(response.data) > 0 and response.data[0].embedding: + print("βœ… OpenAI-compatible embedding API is working") + print(f" Successfully generated embedding of dimension: {len(response.data[0].embedding)}") + return True + else: + print("❌ Embedding API test failed: No embedding data in response") + return False + except Exception as e: + print(f"❌ OpenAI-compatible embedding API test failed: {e}") + return False + +if __name__ == "__main__": + success = test_ollama_connection() + if success: + print("\nπŸŽ‰ All tests passed! Ollama is ready.") + exit(0) + else: + print("\nπŸ’₯ Tests failed! Check Ollama configuration.") + exit(1) \ No newline at end of file diff --git a/tests/test_openai_connection.py b/tests/test_openai_connection.py new file mode 100644 index 00000000..9d0b8c08 --- /dev/null +++ b/tests/test_openai_connection.py @@ -0,0 +1,142 @@ +#!/usr/bin/env python3 +""" +Test script to verify OpenAI API connection is working. +""" + +import os +import sys +from openai import OpenAI + +def test_openai_connection(): + """Test if OpenAI API is accessible and responding.""" + + # Get configuration from environment + api_key = os.environ.get("OPENAI_API_KEY") + backend_url = os.environ.get("LLM_BACKEND_URL", "https://api.openai.com/v1") + provider = os.environ.get("LLM_PROVIDER", "openai") + + print(f"Testing OpenAI API connection:") + print(f" Provider: {provider}") + print(f" Backend URL: {backend_url}") + print(f" API Key: {'βœ… Set' if api_key and api_key != '' else '❌ Not set or using placeholder'}") + + if not api_key or api_key == "": + print("❌ OPENAI_API_KEY is not set or still using placeholder value") + print(" Please set your OpenAI API key in the .env file") + return False + + # Test 1: Initialize OpenAI client + try: + client = OpenAI( + api_key=api_key, + base_url=backend_url + ) + print("βœ… OpenAI client initialized successfully") + except Exception as e: + print(f"❌ Failed to initialize OpenAI client: {e}") + return False + + # Test 2: Test chat completion with a simple query + try: + print("πŸ§ͺ Testing chat completion...") + response = client.chat.completions.create( + model="gpt-4o-mini", # Use the most cost-effective model for testing + messages=[ + {"role": "user", "content": "Hello! Please respond with exactly: 'OpenAI API test successful'"} + ], + max_tokens=50, + temperature=0 + ) + + if response.choices and response.choices[0].message.content: + content = response.choices[0].message.content.strip() + print(f"βœ… Chat completion successful") + print(f" Model: {response.model}") + print(f" Response: {content}") + print(f" Tokens used: {response.usage.total_tokens if response.usage else 'unknown'}") + else: + print("❌ Chat completion returned empty response") + return False + + except Exception as e: + print(f"❌ Chat completion test failed: {e}") + if "insufficient_quota" in str(e).lower(): + print(" πŸ’‘ This might be a quota/billing issue. Check your OpenAI account.") + elif "invalid_api_key" in str(e).lower(): + print(" πŸ’‘ Invalid API key. Please check your OPENAI_API_KEY.") + return False + + # Test 3: Test embeddings (optional, for completeness) + try: + print("πŸ§ͺ Testing embeddings...") + response = client.embeddings.create( + model="text-embedding-3-small", # Cost-effective embedding model + input="This is a test sentence for embeddings." + ) + + if response.data and len(response.data) > 0 and response.data[0].embedding: + embedding = response.data[0].embedding + print(f"βœ… Embeddings successful") + print(f" Model: {response.model}") + print(f" Embedding dimension: {len(embedding)}") + print(f" Tokens used: {response.usage.total_tokens if response.usage else 'unknown'}") + else: + print("❌ Embeddings returned empty response") + return False + + except Exception as e: + print(f"❌ Embeddings test failed: {e}") + print(" ⚠️ Embeddings test failed but chat completion worked. This is usually fine for basic usage.") + # Don't return False here as embeddings might not be critical for all use cases + + return True + +def test_config_validation(): + """Validate the configuration is properly set for OpenAI.""" + + provider = os.environ.get("LLM_PROVIDER", "").lower() + backend_url = os.environ.get("LLM_BACKEND_URL", "") + + print("\nπŸ”§ Configuration validation:") + + if provider != "openai": + print(f"⚠️ LLM_PROVIDER is '{provider}', expected 'openai'") + print(" The app might still work if the provider supports OpenAI-compatible API") + else: + print("βœ… LLM_PROVIDER correctly set to 'openai'") + + if "openai.com" in backend_url: + print("βœ… Using official OpenAI API endpoint") + elif backend_url: + print(f"ℹ️ Using custom endpoint: {backend_url}") + print(" Make sure this endpoint is OpenAI-compatible") + else: + print("⚠️ LLM_BACKEND_URL not set, using default") + + # Check for common environment issues + finnhub_key = os.environ.get("FINNHUB_API_KEY") + if not finnhub_key or finnhub_key == "": + print("⚠️ FINNHUB_API_KEY not set - financial data fetching may not work") + else: + print("βœ… FINNHUB_API_KEY is set") + + return True + +if __name__ == "__main__": + print("πŸ§ͺ OpenAI API Connection Test\n") + + config_ok = test_config_validation() + api_ok = test_openai_connection() + + print(f"\nπŸ“Š Test Results:") + print(f" Configuration: {'βœ… OK' if config_ok else '❌ Issues'}") + print(f" API Connection: {'βœ… OK' if api_ok else '❌ Failed'}") + + if config_ok and api_ok: + print("\nπŸŽ‰ All tests passed! OpenAI API is ready for TradingAgents.") + print("πŸ’‘ You can now run the trading agents with OpenAI as the LLM provider.") + else: + print("\nπŸ’₯ Some tests failed. Please check your configuration and API key.") + print("πŸ’‘ Make sure OPENAI_API_KEY is set correctly in your .env file.") + + sys.exit(0 if (config_ok and api_ok) else 1) \ No newline at end of file diff --git a/tests/test_setup.py b/tests/test_setup.py new file mode 100644 index 00000000..1df78fc8 --- /dev/null +++ b/tests/test_setup.py @@ -0,0 +1,122 @@ +#!/usr/bin/env python3 +""" +Test script to verify the complete TradingAgents setup works end-to-end. +""" + +import os +import sys +from datetime import datetime, timedelta + +def test_basic_setup(): + """Test basic imports and configuration""" + try: + from tradingagents.graph.trading_graph import TradingAgentsGraph + from tradingagents.default_config import DEFAULT_CONFIG + print("βœ… Basic imports successful") + return True + except Exception as e: + print(f"❌ Basic import failed: {e}") + return False + +def test_config(): + """Test configuration loading""" + try: + from tradingagents.default_config import DEFAULT_CONFIG + + # Check required environment variables + required_vars = ['LLM_PROVIDER', 'OPENAI_API_KEY', 'FINNHUB_API_KEY'] + missing_vars = [] + + for var in required_vars: + if not os.environ.get(var): + missing_vars.append(var) + + if missing_vars: + print(f"⚠️ Missing environment variables: {missing_vars}") + print(" This may cause issues with data fetching or LLM calls") + else: + print("βœ… Required environment variables set") + + print(f"βœ… Configuration loaded successfully") + print(f" LLM Provider: {os.environ.get('LLM_PROVIDER', 'not set')}") + print(f" OPENAI API KEY: {os.environ.get('OPENAI_API_KEY', 'not set')}") + print(f" Backend URL: {os.environ.get('LLM_BACKEND_URL', 'not set')}") + return True + except Exception as e: + print(f"❌ Configuration test failed: {e}") + return False + +def test_trading_graph_init(): + """Test TradingAgentsGraph initialization""" + try: + from tradingagents.graph.trading_graph import TradingAgentsGraph + from tradingagents.default_config import DEFAULT_CONFIG + + # Create a minimal config for testing + config = DEFAULT_CONFIG.copy() + config["online_tools"] = False # Use cached data for testing + config["max_debate_rounds"] = 1 # Minimize API calls + + ta = TradingAgentsGraph(debug=True, config=config) + print("βœ… TradingAgentsGraph initialized successfully") + return True + except Exception as e: + print(f"❌ TradingAgentsGraph initialization failed: {e}") + return False + +def test_data_access(): + """Test if we can access basic data""" + try: + from tradingagents.dataflows.yfin_utils import get_stock_data + + # Test with a simple stock query + test_date = (datetime.now() - timedelta(days=30)).strftime('%Y-%m-%d') + + # This should work even without API keys if using cached data + data = get_stock_data("AAPL", test_date) + + if data: + print("βœ… Data access test successful") + return True + else: + print("⚠️ Data access returned empty results (may be expected with cached data)") + return True + except Exception as e: + print(f"❌ Data access test failed: {e}") + return False + +def run_all_tests(): + """Run all tests""" + print("πŸ§ͺ Running TradingAgents setup tests...\n") + + tests = [ + ("Basic Setup", test_basic_setup), + ("Configuration", test_config), + ("TradingGraph Init", test_trading_graph_init), + ("Data Access", test_data_access), + ] + + passed = 0 + total = len(tests) + + for test_name, test_func in tests: + print(f"Running {test_name} test...") + try: + if test_func(): + passed += 1 + print() + except Exception as e: + print(f"❌ {test_name} test crashed: {e}\n") + + print(f"πŸ“Š Test Results: {passed}/{total} tests passed") + + if passed == total: + print("πŸŽ‰ All tests passed! TradingAgents setup is working correctly.") + return True + else: + print("⚠️ Some tests failed. Check the output above for details.") + return False + +if __name__ == "__main__": + success = run_all_tests() + sys.exit(0 if success else 1) \ No newline at end of file diff --git a/tradingagents/agents/utils/memory.py b/tradingagents/agents/utils/memory.py index f3415765..c2119145 100644 --- a/tradingagents/agents/utils/memory.py +++ b/tradingagents/agents/utils/memory.py @@ -7,6 +7,7 @@ class FinancialSituationMemory: def __init__(self, name, config): if config["backend_url"] == "http://localhost:11434/v1": self.embedding = "nomic-embed-text" + self.client = OpenAI(base_url=config["backend_url"]) else: self.embedding = "text-embedding-3-small" self.client = OpenAI() diff --git a/tradingagents/default_config.py b/tradingagents/default_config.py index 2cf15b85..90914727 100644 --- a/tradingagents/default_config.py +++ b/tradingagents/default_config.py @@ -2,7 +2,10 @@ import os DEFAULT_CONFIG = { "project_dir": os.path.abspath(os.path.join(os.path.dirname(__file__), ".")), - "data_dir": "/Users/yluo/Documents/Code/ScAI/FR1-data", + "data_dir": os.path.join( + os.path.abspath(os.path.join(os.path.dirname(__file__), ".")), + "data", + ), "data_cache_dir": os.path.join( os.path.abspath(os.path.join(os.path.dirname(__file__), ".")), "dataflows/data_cache", From 26c5ba5a786e1da47c90b014faad889d759f6c76 Mon Sep 17 00:00:00 2001 From: Yijia Xiao <48253104+Yijia-Xiao@users.noreply.github.com> Date: Thu, 26 Jun 2025 00:07:58 -0400 Subject: [PATCH 09/26] Revert "Docker support and Ollama support (#47)" (#57) This reverts commit 78ea029a0bbe3ea23c0016c1f54d022bc8c28459. --- .dockerignore | 125 ------- .env.example | 36 -- .gitattributes | 2 - .gitignore | 7 - Docker-readme.md | 506 --------------------------- Dockerfile | 65 ---- README.md | 4 - build.bat | 176 ---------- build.sh | 248 ------------- cli/utils.py | 6 +- docker-compose.yml | 74 ---- init-ollama.bat | 97 ----- init-ollama.sh | 78 ----- main.py | 55 +-- requirements.txt | 7 +- tests/README.md | 185 ---------- tests/__init__.py | 10 - tests/run_tests.py | 101 ------ tests/test_ollama_connection.py | 108 ------ tests/test_openai_connection.py | 142 -------- tests/test_setup.py | 122 ------- tradingagents/agents/utils/memory.py | 1 - tradingagents/default_config.py | 5 +- 23 files changed, 19 insertions(+), 2141 deletions(-) delete mode 100644 .dockerignore delete mode 100644 .env.example delete mode 100644 .gitattributes delete mode 100644 Docker-readme.md delete mode 100644 Dockerfile delete mode 100644 build.bat delete mode 100644 build.sh delete mode 100644 docker-compose.yml delete mode 100644 init-ollama.bat delete mode 100644 init-ollama.sh delete mode 100644 tests/README.md delete mode 100644 tests/__init__.py delete mode 100644 tests/run_tests.py delete mode 100644 tests/test_ollama_connection.py delete mode 100644 tests/test_openai_connection.py delete mode 100644 tests/test_setup.py diff --git a/.dockerignore b/.dockerignore deleted file mode 100644 index 5230626d..00000000 --- a/.dockerignore +++ /dev/null @@ -1,125 +0,0 @@ -# Git specific -.git -.gitignore -.gitattributes -*.git - -# Python specific -__pycache__/ -*.pyc -*.pyo -*.pyd -*.egg-info/ -.Python -env/ -venv/ -.venv/ -.pytest_cache/ -.coverage -.coverage.* -htmlcov/ -.tox/ -.mypy_cache/ -.dmypy.json -dmypy.json - -# Environment files -.env -.env.* -!.env.example - -# IDE/Editor specific -.vscode/ -.idea/ -*.swp -*.swo -*.sublime-* -.spyderproject -.spyproject - -# Model cache directories (can be large) -.ollama/ -ollama_data/ -.cache/ -.local/ - -# Documentation and non-essential files -*.md -!README.md -docs/ -assets/ -*.png -*.jpg -*.jpeg -*.gif -*.svg -!assets/TauricResearch.png - -# Build artifacts and logs -build/ -dist/ -*.log -logs/ -*.tmp -*.temp - -# Test files (uncomment if you don't want tests in production image) -# tests/test_*.py -# test_*.py -# *_test.py - -# Docker and deployment files -Dockerfile* -docker-compose*.yml -.dockerignore -build*.sh -deploy*.sh -k8s/ -helm/ - -# Development tools -.devcontainer/ -.github/ -.gitlab-ci.yml -.travis.yml -.circleci/ -Makefile - -# Data files (can be large) -data/ -*.csv -*.json -*.xlsx -*.db -*.sqlite - -# Temporary and backup files -*.bak -*.backup -*.orig -*.rej -~* -.#* -\#*# - -# OS specific -.DS_Store -.DS_Store? -._* -.Spotlight-V100 -.Trashes -ehthumbs.db -Thumbs.db -Desktop.ini - -# Node.js (if any frontend assets) -node_modules/ -npm-debug.log* -yarn-debug.log* -yarn-error.log* - -# Lock files (include them for reproducible builds) -# Uncomment if you want to exclude them -# uv.lock -# poetry.lock -# Pipfile.lock diff --git a/.env.example b/.env.example deleted file mode 100644 index 1868c5a8..00000000 --- a/.env.example +++ /dev/null @@ -1,36 +0,0 @@ -# This is an example .env file for the Trading Agent project. -# Copy this file to .env and fill in your API keys and environment configurations. -# "NOTE: When using for `docker` command do not use quotes around the values, otherwise environment variables will not be set." - -# API Keys -# Set your OpenAI API key, for OpenAI, Ollama or other OpenAI-compatible models -OPENAI_API_KEY= -# Set your Finnhub API key -FINNHUB_API_KEY= - -#LLM Configuration for OpenAI -# Set LLM_Provider to one of: openai, anthropic, google, openrouter or ollama, -LLM_PROVIDER=openai -# Set the API URL for the LLM backend -LLM_BACKEND_URL=https://api.openai.com/v1 - - -# Uncomment for LLM Configuration for local ollama -#LLM_PROVIDER=ollama -## For Ollama running in the same container, /v1 added for OpenAI compatibility -#LLM_BACKEND_URL=http://localhost:11434/v1 -# Set name of the Deep think model -LLM_DEEP_THINK_MODEL=llama3.2 -## Setname of the quick think model -LLM_QUICK_THINK_MODEL=qwen3 -# Set the name of the embedding model -LLM_EMBEDDING_MODEL=nomic-embed-text - -# Agent Configuration -# Maximum number of debate rounds for the agent to engage in choose from 1, 3, 5 -MAX_DEBATE_ROUNDS=1 -# Set to False if you want to disable tools that access the internet -ONLINE_TOOLS=True - - - diff --git a/.gitattributes b/.gitattributes deleted file mode 100644 index 6aec1b04..00000000 --- a/.gitattributes +++ /dev/null @@ -1,2 +0,0 @@ -init-ollama.sh text eol=lf -build.sh text eol=lf \ No newline at end of file diff --git a/.gitignore b/.gitignore index cf5ea542..8313619e 100644 --- a/.gitignore +++ b/.gitignore @@ -6,10 +6,3 @@ src/ eval_results/ eval_data/ *.egg-info/ -.ollama/ -ollama_data/ -.local/ -.cache/ -.pytest_cache/ -.devcontainer/ -.env diff --git a/Docker-readme.md b/Docker-readme.md deleted file mode 100644 index 080c5660..00000000 --- a/Docker-readme.md +++ /dev/null @@ -1,506 +0,0 @@ -# πŸš€ Docker Setup for Trading Agents - -This guide provides instructions for running the Trading Agents application within a secure and reproducible Docker environment. Using Docker simplifies setup, manages dependencies, and ensures a consistent experience across different machines. - -The recommended method is using `docker-compose`, which handles the entire stack, including the Ollama server and model downloads. - -## Prerequisites - -Before you begin, ensure you have the following installed: - -- [**Docker**](https://docs.docker.com/get-docker/) -- [**Docker Compose**](https://docs.docker.com/compose/install/) (usually included with Docker Desktop) - -## πŸ€” Which Option Should I Choose? - -| Feature | OpenAI | Local Ollama | -| ------------------------- | ------------------------- | ----------------------------- | -| **Setup Time** | 2-5 minutes | 15-30 minutes | -| **Cost** | ~$0.01-0.05 per query | Free after setup | -| **Quality** | GPT-4o (excellent) | Depends on model | -| **Privacy** | Data sent to OpenAI | Fully private | -| **Internet Required** | Yes | No (after setup) | -| **Hardware Requirements** | None | 4GB+ RAM recommended | -| **Model Downloads** | None | Depends on model | -| **Best For** | Quick testing, production | Privacy-focused, cost control | - -**πŸ’‘ Recommendation**: Start with OpenAI for quick testing, then switch to Ollama for production if privacy/cost is important. - -## ⚑ Quickstart - -### Option A: Using OpenAI (Recommended for beginners) - -```bash -# 1. Clone the repository -git clone https://github.com/TauricResearch/TradingAgents.git -cd TradingAgents - -# 2. Create and configure environment file -cp .env.example .env -# Edit .env: Set LLM_PROVIDER=openai and add your OPENAI_API_KEY - -# 3. Build and run with OpenAI -docker compose --profile openai build -docker compose --profile openai run -it app-openai -``` - -### Option B: Using Local Ollama (Free but requires more setup) - -```bash -# 1. Clone the repository -git clone https://github.com/TauricResearch/TradingAgents.git -cd TradingAgents - -# 2. Create environment file -cp .env.example .env -# Edit .env: Set LLM_PROVIDER=ollama - -# 3. Start Ollama service -docker compose --profile ollama up -d --build - -# 4. Initialize models (first time only) -# Linux/macOS: -./init-ollama.sh -# Windows Command Prompt: -init-ollama.bat - - -# 5. Run the command-line app -docker compose --profile ollama run -it app-ollama -``` - -## πŸ› οΈ Build Methods - -Choose your preferred build method: - -### Method 1: Quick Build (Recommended) - -```bash -# Standard Docker build -docker build -t trading-agents . - -# Or with docker-compose -docker compose build -``` - -### Method 2: Optimized Build (Advanced) - -For faster rebuilds with caching: - -**Linux/macOS:** - -```bash -# Build with BuildKit optimization -./build.sh - -# With testing -./build.sh --test - -# Clean cache and rebuild -./build.sh --clean --test -``` - -**Windows Command Prompt:** - -```cmd -REM Build with BuildKit optimization -build.bat - -REM With testing -build.bat --test - -REM Clean cache and rebuild -build.bat --clean --test -``` - -**Benefits of Optimized Build:** - -- ⚑ 60-90% faster rebuilds via BuildKit cache -- πŸ”„ Automatic fallback to simple build if needed -- πŸ“Š Cache statistics and build info -- πŸ§ͺ Built-in testing capabilities - -## Step-by-Step Instructions - -### Step 1: Clone the Repository - -```bash -git clone https://github.com/TauricResearch/TradingAgents.git -cd TradingAgents -``` - -### Step 2: Configure Your Environment (`.env` file) - -The application is configured using an environment file. Create your own `.env` file by copying the provided template. - -```bash -cp .env.example .env -``` - -#### Option A: OpenAI Configuration (Recommended) - -Edit your `.env` file and set: - -```env -# LLM Provider Configuration -LLM_PROVIDER=openai -LLM_BACKEND_URL=https://api.openai.com/v1 - -# API Keys -OPENAI_API_KEY=your-actual-openai-api-key-here -FINNHUB_API_KEY=your-finnhub-api-key-here - -# Agent Configuration -MAX_DEBATE_ROUNDS=1 -ONLINE_TOOLS=True -``` - -**Benefits of OpenAI:** - -- βœ… No local setup required -- βœ… Higher quality responses (GPT-4o) -- βœ… Faster startup (no model downloads) -- βœ… No GPU/CPU requirements -- ❌ Requires API costs ($0.01-0.05 per query) - -#### Option B: Local Ollama Configuration (Free) - -Edit your `.env` file and set: - -```env -# LLM Provider Configuration -LLM_PROVIDER=ollama -LLM_BACKEND_URL=http://ollama:11434/v1 - -# Local Models -LLM_DEEP_THINK_MODEL=llama3.2 -LLM_QUICK_THINK_MODEL=qwen3 -LLM_EMBEDDING_MODEL=nomic-embed-text - -# API Keys (still need Finnhub for market data) -FINNHUB_API_KEY=your-finnhub-api-key-here - -# Agent Configuration -MAX_DEBATE_ROUNDS=1 -ONLINE_TOOLS=True -``` - -**Benefits of Ollama:** - -- βœ… Completely free after setup -- βœ… Data privacy (runs locally) -- βœ… Works offline -- ❌ Requires initial setup and model downloads -- ❌ Slower responses than cloud APIs - -### Step 3: Run with Docker Compose - -Choose the appropriate method based on your LLM provider configuration: - -#### Option A: Running with OpenAI - -```bash -# Build the app container -docker compose --profile openai build -# Or use optimized build: ./build.sh - -# Test OpenAI connection (optional) -docker compose --profile openai run --rm app-openai python tests/test_openai_connection.py - -# Run the trading agents -docker compose --profile openai run -it app-openai -``` - -**No additional services needed** - the app connects directly to OpenAI's API. - -#### Option B: Running with Ollama (CPU) - -```bash -# Start the Ollama service -docker compose --profile ollama up -d --build -# Or use optimized build: ./build.sh - -# Initialize Ollama models (first time only) -# Linux/macOS: -./init-ollama.sh -# Windows Command Prompt: -init-ollama.bat - -# Test Ollama connection (optional) -docker compose --profile ollama exec app-ollama python tests/test_ollama_connection.py - -# Run the trading agents -docker compose --profile ollama run -it app-ollama -``` - -#### Option C: Running with Ollama (GPU) - -First, uncomment the GPU configuration in docker-compose.yml: - -```yaml -# deploy: -# resources: -# reservations: -# devices: -# - capabilities: ["gpu"] -``` - -Then run: - -```bash -# Start with GPU support -docker compose --profile ollama up -d --build -# Or use optimized build: ./build.sh - -# Initialize Ollama models (first time only) -# Linux/macOS: -./init-ollama.sh -# Windows Command Prompt: -init-ollama.bat - -# Run the trading agents -docker compose --profile ollama run -it app-ollama -``` - -#### View Logs - -To view the application logs in real-time, you can run: - -```bash -docker compose --profile ollama logs -f -``` - -#### Stop the Containers - -To stop and remove the containers: - -```bash -docker compose --profile ollama down -``` - -### Step 4: Verify Your Setup (Optional) - -#### For OpenAI Setup: - -```bash -# Test OpenAI API connection -docker compose --profile openai run --rm app-openai python tests/test_openai_connection.py - -# Run a quick trading analysis test -docker compose --profile openai run --rm app-openai python tests/test_setup.py - -# Run all tests automatically -docker compose --profile openai run --rm app-openai python tests/run_tests.py -``` - -#### For Ollama Setup: - -```bash -# Test Ollama connection -docker compose --profile ollama exec app-ollama python tests/test_ollama_connection.py - -# Run a quick trading analysis test -docker compose --profile ollama exec app-ollama python tests/test_setup.py - -# Run all tests automatically -docker compose --profile ollama exec app-ollama python tests/run_tests.py -``` - -### Step 5: Model Management (Optional) - -#### View and Manage Models - -```bash -# List all available models -docker compose --profile ollama exec ollama ollama list - -# Check model cache size -du -sh ./ollama_data - -# Pull additional models (cached locally) -docker compose --profile ollama exec ollama ollama pull llama3.2 - -# Remove a model (frees up cache space) -docker compose --profile ollama exec ollama ollama rm model-name -``` - -#### Model Cache Benefits - -- **Persistence**: Models downloaded once are reused across container restarts -- **Speed**: Subsequent startups are much faster (seconds vs minutes) -- **Bandwidth**: No need to re-download multi-GB models -- **Offline**: Once cached, models work without internet connection - -#### Troubleshooting Cache Issues - -```bash -# If models seem corrupted, clear cache and re-initialize -docker compose --profile ollama down -rm -rf ./ollama_data -docker compose --profile ollama up -d -# Linux/macOS: -./init-ollama.sh -# Windows Command Prompt: -init-ollama.bat -``` - -βœ… **Expected Output:** - -``` -Testing Ollama connection: - Backend URL: http://localhost:11434/v1 - Model: qwen3:0.6b - Embedding Model: nomic-embed-text -βœ… Ollama API is responding -βœ… Model 'qwen3:0.6b' is available -βœ… OpenAI-compatible API is working - Response: ... -``` - ---- - -## Alternative Method: Using `docker` Only - -If you prefer not to use `docker-compose`, you can build and run the container manually. - -**1. Build the Docker Image:** - -```bash -# Standard build -docker build -t trading-agents . - -# Or optimized build (recommended) -# Linux/macOS: -./build.sh -# Windows Command Prompt: -build.bat -``` - -**2. Test local ollama setup (Optional):** -Make sure you have a `.env` file configured as described in Step 2. If you are using `LLM_PROVIDER="ollama"`, you can verify that the Ollama server is running correctly and has the necessary models. - -```bash -docker run -it --network host --env-file .env trading-agents python tests/test_ollama_connection.py -``` - -for picking environment settings from .env file. You can pass values directly using: - -```bash -docker run -it --network host \ - -e LLM_PROVIDER="ollama" \ - -e LLM_BACKEND_URL="http://localhost:11434/v1" \ - -e LLM_DEEP_THINK_MODEL="qwen3:0.6b" \ - -e LLM_EMBEDDING_MODEL="nomic-embed-text"\ - trading-agents \ - python tests/test_ollama_connection.py -``` - -To prevent re-downloading of Ollama models, mount folder from your host and run as - -```bash -docker run -it --network host \ - -e LLM_PROVIDER="ollama" \ - -e LLM_BACKEND_URL="http://localhost:11434/v1" \ - -e LLM_DEEP_THINK_MODEL="qwen3:0.6b" \ - -e LLM_EMBEDDING_MODEL="nomic-embed-text"\ - -v ./ollama_cache:/app/.ollama \ - trading-agents \ - python tests/test_ollama_connection.py -``` - -**3. Run the Docker Container:** -Make sure you have a `.env` file configured as described in Step 2. - -```bash -docker run --rm -it \ - --network host \ - --env-file .env \ - -v ./data:/app/data \ - --name trading-agents \ - trading-agents -``` - -**4. Run on GPU machine:** -For running on GPU machine, pass `--gpus=all` flag to the `docker run` command: - -```bash -docker run --rm -it \ - --gpus=all \ - --network host \ - --env-file .env \ - -v ./data:/app/data \ - --name trading-agents \ - trading-agents -``` - -## Configuration Details - -### Test Suite Organization - -All test scripts are organized in the `tests/` directory: - -``` -tests/ -β”œβ”€β”€ __init__.py # Python package initialization -β”œβ”€β”€ run_tests.py # Automated test runner -β”œβ”€β”€ test_openai_connection.py # OpenAI API connectivity tests -β”œβ”€β”€ test_ollama_connection.py # Ollama connectivity tests -└── test_setup.py # General setup and configuration tests -``` - -**Automated Testing:** - -```bash -# Run all tests automatically (detects provider) - from project root -python tests/run_tests.py - -# Run specific test - from project root -python tests/test_openai_connection.py -python tests/test_ollama_connection.py -python tests/test_setup.py -``` - -**⚠️ Important**: When running tests locally (outside Docker), always run from the **project root directory**, not from inside the `tests/` folder. The Docker commands automatically handle this. - -### Live Reloading - -The `app` directory is mounted as a volume into the container. This means any changes you make to the source code on your local machine will be reflected instantly in the running container without needing to rebuild the image. - -### Persistent Data & Model Caching - -The following volumes are used to persist data between container runs: - -- **`./data`**: Stores application data, trading reports, and cached market data -- **`./ollama_data`**: Caches downloaded Ollama models (typically 1-4GB per model) - -#### Model Cache Management - -The Ollama models are automatically cached in `./ollama_data/` on your host machine: - -- **First run**: Models are downloaded automatically (may take 5-15 minutes depending on internet speed) -- **Subsequent runs**: Models are reused from cache, startup is much faster -- **Cache location**: `./ollama_data/` directory in your project folder -- **Cache size**: Typically 2-6GB total for the required models - -```bash -# Check cache size -du -sh ./ollama_data - -# Clean cache if needed (will require re-downloading models) -rm -rf ./ollama_data - -# List cached models -docker compose --profile ollama exec ollama ollama list -``` - -### GPU troubleshooting - -If you find model is running very slow on GPU machine, make sur you the latest GPU drivers installed and GPU is working fine with docker. Eg you can check for Nvidia GPUs by running: - -```bash -docker run --rm -it --gpus=all nvcr.io/nvidia/k8s/cuda-sample:nbody nbody -gpu -benchmark - -or - -nvidia-smi -``` diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 56ada79b..00000000 --- a/Dockerfile +++ /dev/null @@ -1,65 +0,0 @@ -# syntax=docker/dockerfile:1.4 - -# Build stage for dependencies -FROM python:3.9-slim-bookworm AS builder - -# Set environment variables for build -ENV PYTHONDONTWRITEBYTECODE=1 \ - PIP_DISABLE_PIP_VERSION_CHECK=on \ - PIP_DEFAULT_TIMEOUT=100 - -# Install build dependencies -RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - apt-get update && apt-get install -y --no-install-recommends \ - curl \ - git \ - && apt-get clean - -# Create virtual environment -RUN python -m venv /opt/venv -ENV PATH="/opt/venv/bin:$PATH" - -# Copy requirements and install Python dependencies -COPY requirements.txt . -RUN --mount=type=cache,target=/root/.cache/pip \ - pip install --no-cache-dir -r requirements.txt - -# Runtime stage -FROM python:3.9-slim-bookworm AS runtime - -# Set environment variables -ENV PYTHONUNBUFFERED=1 \ - PYTHONDONTWRITEBYTECODE=1 \ - PIP_DISABLE_PIP_VERSION_CHECK=on \ - PIP_DEFAULT_TIMEOUT=100 - -# Install runtime dependencies -RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - apt-get update && apt-get install -y --no-install-recommends \ - curl \ - git \ - && apt-get clean - -# Copy virtual environment from builder stage -COPY --from=builder /opt/venv /opt/venv -ENV PATH="/opt/venv/bin:$PATH" - -# Create a non-root user and group -RUN groupadd -r appuser && useradd -r -g appuser -s /bin/bash -d /app appuser - -# Create app directory -WORKDIR /app - -# Copy the application code -COPY . . - -# Change ownership of the app directory to the non-root user -RUN chown -R appuser:appuser /app - -# Switch to non-root user -USER appuser - -# Default command (can be overridden, e.g., by pytest command in CI) -CMD ["python", "-m", "cli.main"] diff --git a/README.md b/README.md index 41cfc946..cac18691 100644 --- a/README.md +++ b/README.md @@ -192,10 +192,6 @@ print(decision) You can view the full list of configurations in `tradingagents/default_config.py`. -## Docker usage and local ollama tests ## - -See [Docker Readme](./Docker-readme.md) for details. - ## Contributing We welcome contributions from the community! Whether it's fixing a bug, improving documentation, or suggesting a new feature, your input helps make this project better. If you are interested in this line of research, please consider joining our open-source financial AI research community [Tauric Research](https://tauric.ai/). diff --git a/build.bat b/build.bat deleted file mode 100644 index 19e4de97..00000000 --- a/build.bat +++ /dev/null @@ -1,176 +0,0 @@ -@echo off -REM πŸš€ Optimized BuildKit Docker Build Script for TradingAgents (Windows Batch) -REM This script uses Docker BuildKit for faster builds with advanced caching - -setlocal EnableDelayedExpansion - -REM Configuration -set "IMAGE_NAME=trading-agents" -set "CACHE_TAG=%IMAGE_NAME%:cache" -set "LATEST_TAG=%IMAGE_NAME%:latest" -set "REGISTRY=" -set "TARGET=production" -set "CLEAN_CACHE=" -set "RUN_TESTS=" -set "SHOW_STATS=" -set "SHOW_HELP=" - -REM Parse command line arguments -:parse_args -if "%~1"=="" goto end_parse -if /i "%~1"=="--clean" ( - set "CLEAN_CACHE=1" - shift - goto parse_args -) -if /i "%~1"=="--test" ( - set "RUN_TESTS=1" - shift - goto parse_args -) -if /i "%~1"=="--stats" ( - set "SHOW_STATS=1" - shift - goto parse_args -) -if /i "%~1"=="--help" ( - set "SHOW_HELP=1" - shift - goto parse_args -) -if /i "%~1"=="-h" ( - set "SHOW_HELP=1" - shift - goto parse_args -) -echo [ERROR] Unknown option: %~1 -exit /b 1 - -:end_parse - -REM Show help if requested -if defined SHOW_HELP ( - echo πŸš€ TradingAgents Optimized Docker Build ^(Windows^) - echo Usage: build-optimized.bat [OPTIONS] - echo. - echo Options: - echo --clean Clean build cache before building - echo --test Run tests after building - echo --stats Show cache statistics after building - echo --help, -h Show this help message - echo. - echo Examples: - echo build-optimized.bat # Build image - echo build-optimized.bat --clean --test # Clean cache, build, and test - exit /b 0 -) - -echo πŸš€ TradingAgents Optimized Docker Build ^(Windows^) -echo ========================================= - -REM Check if BuildKit is available -echo [INFO] Checking BuildKit availability... -docker buildx version >nul 2>&1 -if errorlevel 1 ( - echo [ERROR] Docker BuildKit ^(buildx^) is not available - echo [ERROR] Please install Docker BuildKit or update Docker to a newer version - exit /b 1 -) -echo [SUCCESS] BuildKit is available - -REM Create buildx builder if it doesn't exist -echo [INFO] Setting up BuildKit builder... -docker buildx inspect trading-agents-builder >nul 2>&1 -if errorlevel 1 ( - echo [INFO] Creating new buildx builder 'trading-agents-builder'... - docker buildx create --name trading-agents-builder --driver docker-container --bootstrap - if errorlevel 1 ( - echo [ERROR] Failed to create builder - exit /b 1 - ) -) - -REM Use our builder -docker buildx use trading-agents-builder -if errorlevel 1 ( - echo [ERROR] Failed to use builder - exit /b 1 -) -echo [SUCCESS] Builder 'trading-agents-builder' is ready - -REM Clean cache if requested -if defined CLEAN_CACHE ( - echo [INFO] Cleaning build cache... - docker buildx prune -f - echo [SUCCESS] Build cache cleaned -) - -REM Show build information -echo [INFO] Build Information: -echo πŸ“¦ Image: %LATEST_TAG% -echo πŸ“Š Cache: Local BuildKit cache -echo πŸ”„ Multi-stage: Yes ^(builder β†’ runtime^) -echo 🌐 Network: Host networking mode - -REM Build the image -echo [INFO] Building image with BuildKit cache optimization... - -REM Get build metadata -for /f "tokens=*" %%i in ('powershell -Command "(Get-Date).ToUniversalTime().ToString('yyyy-MM-ddTHH:mm:ssZ')"') do set "BUILD_DATE=%%i" -for /f "tokens=*" %%i in ('git rev-parse --short HEAD 2^>nul') do set "GIT_HASH=%%i" -if "!GIT_HASH!"=="" set "GIT_HASH=unknown" - -REM Execute build -echo [INFO] Starting Docker build... -docker buildx build ^ - --file Dockerfile ^ - --tag %LATEST_TAG% ^ - --cache-from type=local,src=C:\tmp\.buildx-cache ^ - --cache-to type=local,dest=C:\tmp\.buildx-cache,mode=max ^ - --label build.date=%BUILD_DATE% ^ - --label build.version=%GIT_HASH% ^ - --load ^ - . - -if errorlevel 1 ( - echo [ERROR] ❌ Build failed! - exit /b 1 -) - -echo [SUCCESS] βœ… Build completed successfully! - -REM Test the image if requested -if defined RUN_TESTS ( - echo [INFO] Testing built image... - - REM Basic functionality test - docker run --rm %LATEST_TAG% python -c "print('βœ… Image test successful')" - if errorlevel 1 ( - echo [ERROR] Image test failed - exit /b 1 - ) - echo [SUCCESS] Image test passed - - REM Test import capabilities - docker run --rm %LATEST_TAG% python -c "from tradingagents.default_config import DEFAULT_CONFIG; print('βœ… Import test successful')" - if errorlevel 1 ( - echo [WARNING] Import test failed ^(this might be expected if dependencies are missing^) - ) else ( - echo [SUCCESS] Import test passed - ) -) - -REM Show cache statistics if requested -if defined SHOW_STATS ( - echo [INFO] Cache Statistics: - docker buildx du 2>nul || echo Cache statistics not available -) - -echo. -echo [SUCCESS] πŸŽ‰ Ready to use! Try: -echo docker run -it --network host %LATEST_TAG% -echo docker compose --profile openai run -it app-openai -echo docker compose --profile ollama up -d ^&^& docker compose --profile ollama exec app-ollama cmd -echo docker compose --profile default run -it app - -exit /b 0 \ No newline at end of file diff --git a/build.sh b/build.sh deleted file mode 100644 index 006e360c..00000000 --- a/build.sh +++ /dev/null @@ -1,248 +0,0 @@ -#!/bin/bash - -# πŸš€ Optimized BuildKit Docker Build Script for TradingAgents -# This script uses Docker BuildKit for faster builds with advanced caching - -set -e - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -NC='\033[0m' # No Color - -# Configuration -IMAGE_NAME="trading-agents" -CACHE_TAG="${IMAGE_NAME}:cache" -LATEST_TAG="${IMAGE_NAME}:latest" -REGISTRY="" # Set this if you want to push to a registry - -# Function to print colored output -print_status() { - echo -e "${BLUE}[INFO]${NC} $1" -} - -print_success() { - echo -e "${GREEN}[SUCCESS]${NC} $1" -} - -print_warning() { - echo -e "${YELLOW}[WARNING]${NC} $1" -} - -print_error() { - echo -e "${RED}[ERROR]${NC} $1" -} - -# Check if BuildKit is available -check_buildkit() { - print_status "Checking BuildKit availability..." - - if ! docker buildx version > /dev/null 2>&1; then - print_error "Docker BuildKit (buildx) is not available" - print_error "Please install Docker BuildKit or update Docker to a newer version" - exit 1 - fi - - print_success "BuildKit is available" -} - -# Create buildx builder if it doesn't exist -setup_builder() { - print_status "Setting up BuildKit builder..." - - # Check if our builder exists - if ! docker buildx inspect trading-agents-builder > /dev/null 2>&1; then - print_status "Creating new buildx builder 'trading-agents-builder'..." - docker buildx create --name trading-agents-builder --driver docker-container --bootstrap - fi - - # Use our builder - docker buildx use trading-agents-builder - print_success "Builder 'trading-agents-builder' is ready" -} - -# Build with cache optimization -build_image() { - print_status "Building image with BuildKit cache optimization..." - - # Build arguments - local build_args=( - --file Dockerfile - --tag "$LATEST_TAG" - --cache-from "type=local,src=/tmp/.buildx-cache" - --cache-to "type=local,dest=/tmp/.buildx-cache,mode=max" - --load # Load into local Docker daemon - ) - - # Add build metadata - build_args+=( - --label "build.date=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" - --label "build.version=$(git rev-parse --short HEAD 2>/dev/null || echo 'unknown')" - --label "build.target=$target" - ) - - print_status "Build command: docker buildx build ${build_args[*]} ." - - # Execute build - if docker buildx build "${build_args[@]}" .; then - print_success "Build completed successfully!" - return 0 - else - print_error "Build failed!" - print_warning "Attempting fallback build with simple Dockerfile..." - return build_simple_fallback - fi -} - -# Fallback build function for when BuildKit fails -build_simple_fallback() { - print_status "Using simple Dockerfile as fallback..." - - if [ -f "Dockerfile.simple" ]; then - if docker build -f Dockerfile.simple -t "$LATEST_TAG" .; then - print_success "Fallback build completed successfully!" - print_warning "Note: Using simple build without advanced caching" - return 0 - else - print_error "Fallback build also failed!" - return 1 - fi - else - print_error "Dockerfile.simple not found for fallback" - return 1 - fi -} - -# Show build info -show_build_info() { - print_status "Build Information:" - echo " πŸ“¦ Image: $LATEST_TAG" - echo " πŸ—οΈ Builder: $(docker buildx inspect --bootstrap | grep "Name:" | head -1 | cut -d: -f2 | xargs)" - echo " πŸ“Š Cache: Local BuildKit cache" - echo " πŸ”„ Multi-stage: Yes (builder β†’ runtime)" - echo " 🌐 Network: Host networking mode" -} - -# Test the built image -test_image() { - print_status "Testing built image..." - - # Basic functionality test - if docker run --rm "$LATEST_TAG" python -c "print('βœ… Image test successful')"; then - print_success "Image test passed" - else - print_error "Image test failed" - return 1 - fi - - # Test import capabilities - if docker run --rm "$LATEST_TAG" python -c "from tradingagents.default_config import DEFAULT_CONFIG; print('βœ… Import test successful')"; then - print_success "Import test passed" - else - print_warning "Import test failed (this might be expected if dependencies are missing)" - fi -} - -# Show cache statistics -show_cache_stats() { - print_status "Cache Statistics:" - - # Show buildx disk usage - if docker buildx du > /dev/null 2>&1; then - docker buildx du - else - echo " Cache statistics not available" - fi -} - -# Clean up build cache -clean_cache() { - print_status "Cleaning build cache..." - docker buildx prune -f - print_success "Build cache cleaned" -} - -# Main function -main() { - echo "πŸš€ TradingAgents Optimized Docker Build" - echo "========================================" - - # Parse arguments - local clean=false - local test=false - local stats=false - - while [[ $# -gt 0 ]]; do - case $1 in - --clean) - clean=true - shift - ;; - --test) - test=true - shift - ;; - --stats) - stats=true - shift - ;; - --help|-h) - echo "Usage: $0 [OPTIONS]" - echo "" - echo "Options:" - echo " --clean Clean build cache before building" - echo " --test Run tests after building" - echo " --stats Show cache statistics after building" - echo " --help, -h Show this help message" - echo "" - echo "Examples:" - echo " $0 # Build image" - echo " $0 --clean --test # Clean cache, build, and test" - echo " $0 --stats # Build and show cache stats" - exit 0 - ;; - *) - print_error "Unknown option: $1" - exit 1 - ;; - esac - done - - # Execute steps - check_buildkit - setup_builder - - if [ "$clean" = true ]; then - clean_cache - fi - - show_build_info - - if build_image; then - print_success "βœ… Build completed successfully!" - - if [ "$test" = true ]; then - test_image - fi - - if [ "$stats" = true ]; then - show_cache_stats - fi - - echo "" - print_success "πŸŽ‰ Ready to use! Try:" - echo " docker run -it --network host $LATEST_TAG" - echo " docker compose --profile openai run -it app-openai" - echo " docker compose --profile ollama up -d && docker compose --profile ollama exec app-ollama bash" - echo " docker compose --profile default run -it app" - - else - print_error "❌ Build failed!" - exit 1 - fi -} - -# Run main function with all arguments -main "$@" \ No newline at end of file diff --git a/cli/utils.py b/cli/utils.py index d56117d4..d3873360 100644 --- a/cli/utils.py +++ b/cli/utils.py @@ -151,8 +151,6 @@ def select_shallow_thinking_agent(provider) -> str: ], "ollama": [ ("llama3.2 local", "llama3.2"), - ("qwen3 small local", "qwen3:0.6b"), - ("deepseek-r1 local", "deepseek-r1:1.5b"), ] } @@ -213,9 +211,7 @@ def select_deep_thinking_agent(provider) -> str: ("Deepseek - latest iteration of the flagship chat model family from the DeepSeek team.", "deepseek/deepseek-chat-v3-0324:free"), ], "ollama": [ - ("qwen3 local", "qwen3"), - ("qwen3 small local", "qwen3:0.6b"), - ("deepseek-r1 local", "deepseek-r1:1.5b"), + ("qwen3", "qwen3"), ] } diff --git a/docker-compose.yml b/docker-compose.yml deleted file mode 100644 index 1ac75ea6..00000000 --- a/docker-compose.yml +++ /dev/null @@ -1,74 +0,0 @@ -version: "3.8" - -services: - # Ollama service for local LLM - ollama: - image: ollama/ollama:latest - container_name: ollama - network_mode: host - volumes: - - ./ollama_data:/root/.ollama - # Uncomment for GPU support - # deploy: - # resources: - # reservations: - # devices: - # - capabilities: ["gpu"] - profiles: - - ollama - - # App container for Ollama setup - app-ollama: - build: - context: . - container_name: trading-agents-ollama - network_mode: host - volumes: - - .:/app - - ./data:/app/data - env_file: - - .env - environment: - - LLM_BACKEND_URL=http://localhost:11434/v1 - - LLM_PROVIDER=ollama - depends_on: - - ollama - tty: true - stdin_open: true - profiles: - - ollama - - # App container for OpenAI setup (no Ollama dependency) - app-openai: - build: - context: . - container_name: trading-agents-openai - network_mode: host - volumes: - - .:/app - - ./data:/app/data - env_file: - - .env - environment: - - LLM_PROVIDER=openai - - LLM_BACKEND_URL=https://api.openai.com/v1 - tty: true - stdin_open: true - profiles: - - openai - - # Generic app container (uses .env settings as-is) - app: - build: - context: . - container_name: trading-agents - network_mode: host - volumes: - - .:/app - - ./data:/app/data - env_file: - - .env - tty: true - stdin_open: true - profiles: - - default diff --git a/init-ollama.bat b/init-ollama.bat deleted file mode 100644 index 818678c1..00000000 --- a/init-ollama.bat +++ /dev/null @@ -1,97 +0,0 @@ -@echo off -setlocal enabledelayedexpansion - -echo πŸš€ Initializing Ollama models... - -REM Define required models -set DEEP_THINK_MODEL=qwen3:0.6b -set EMBEDDING_MODEL=nomic-embed-text - -REM Wait for Ollama to be ready -echo ⏳ Waiting for Ollama service to start... -set max_attempts=30 -set attempt=0 - -:wait_loop -if %attempt% geq %max_attempts% goto timeout_error - -docker compose --profile ollama exec ollama ollama list >nul 2>&1 -if %errorlevel% equ 0 ( - echo βœ… Ollama is ready! - goto ollama_ready -) - -set /a attempt=%attempt%+1 -echo Waiting for Ollama... (attempt %attempt%/%max_attempts%) -timeout /t 2 /nobreak >nul -goto wait_loop - -:timeout_error -echo ❌ Error: Ollama failed to start within the expected time -exit /b 1 - -:ollama_ready - -REM Check cache directory -if exist ".\ollama_data" ( - echo πŸ“ Found existing ollama_data cache directory - for /f "tokens=*" %%a in ('dir ".\ollama_data" /s /-c ^| find "bytes"') do ( - echo Cache directory exists - ) -) else ( - echo πŸ“ Creating ollama_data cache directory... - mkdir ".\ollama_data" -) - -REM Get list of currently available models -echo πŸ” Checking for existing models... -docker compose --profile ollama exec ollama ollama list > temp_models.txt 2>nul -if %errorlevel% neq 0 ( - echo > temp_models.txt -) - -REM Check if deep thinking model exists -findstr /c:"%DEEP_THINK_MODEL%" temp_models.txt >nul -if %errorlevel% equ 0 ( - echo βœ… Deep thinking model '%DEEP_THINK_MODEL%' already available -) else ( - echo πŸ“₯ Pulling deep thinking model: %DEEP_THINK_MODEL%... - docker compose --profile ollama exec ollama ollama pull %DEEP_THINK_MODEL% - if %errorlevel% equ 0 ( - echo βœ… Model %DEEP_THINK_MODEL% pulled successfully - ) else ( - echo ❌ Failed to pull model %DEEP_THINK_MODEL% - goto cleanup - ) -) - -REM Check if embedding model exists -findstr /c:"%EMBEDDING_MODEL%" temp_models.txt >nul -if %errorlevel% equ 0 ( - echo βœ… Embedding model '%EMBEDDING_MODEL%' already available -) else ( - echo πŸ“₯ Pulling embedding model: %EMBEDDING_MODEL%... - docker compose --profile ollama exec ollama ollama pull %EMBEDDING_MODEL% - if %errorlevel% equ 0 ( - echo βœ… Model %EMBEDDING_MODEL% pulled successfully - ) else ( - echo ❌ Failed to pull model %EMBEDDING_MODEL% - goto cleanup - ) -) - -REM List all available models -echo πŸ“‹ Available models: -docker compose --profile ollama exec ollama ollama list - -REM Show cache info -if exist ".\ollama_data" ( - echo πŸ’Ύ Model cache directory: .\ollama_data -) - -echo πŸŽ‰ Ollama initialization complete! -echo πŸ’‘ Tip: Models are cached in .\ollama_data and will be reused on subsequent runs - -:cleanup -if exist temp_models.txt del temp_models.txt -exit /b 0 \ No newline at end of file diff --git a/init-ollama.sh b/init-ollama.sh deleted file mode 100644 index ab8aa1cb..00000000 --- a/init-ollama.sh +++ /dev/null @@ -1,78 +0,0 @@ -#!/bin/bash -set -e - -echo "πŸš€ Initializing Ollama models..." - -# Define required models -DEEP_THINK_MODEL="qwen3:0.6b" -EMBEDDING_MODEL="nomic-embed-text" - -# Wait for Ollama to be ready -echo "⏳ Waiting for Ollama service to start..." -max_attempts=30 -attempt=0 -while [ $attempt -lt $max_attempts ]; do - if docker compose --profile ollama exec ollama ollama list > /dev/null 2>&1; then - echo "βœ… Ollama is ready!" - break - fi - echo " Waiting for Ollama... (attempt $((attempt + 1))/$max_attempts)" - sleep 2 - attempt=$((attempt + 1)) -done - -if [ $attempt -eq $max_attempts ]; then - echo "❌ Error: Ollama failed to start within the expected time" - exit 1 -fi - -# Check cache directory -if [ -d "./ollama_data" ]; then - echo "πŸ“ Found existing ollama_data cache directory" - cache_size=$(du -sh ./ollama_data 2>/dev/null | cut -f1 || echo "0") - echo " Cache size: $cache_size" -else - echo "πŸ“ Creating ollama_data cache directory..." - mkdir -p ./ollama_data -fi - -# Get list of currently available models -echo "πŸ” Checking for existing models..." -available_models=$(docker compose --profile ollama exec ollama ollama list 2>/dev/null | tail -n +2 | awk '{print $1}' || echo "") - -# Function to check if model exists -model_exists() { - local model_name="$1" - echo "$available_models" | grep -q "^$model_name" -} - -# Pull deep thinking model if not present -if model_exists "$DEEP_THINK_MODEL"; then - echo "βœ… Deep thinking model '$DEEP_THINK_MODEL' already available" -else - echo "πŸ“₯ Pulling deep thinking model: $DEEP_THINK_MODEL..." - docker compose --profile ollama exec ollama ollama pull "$DEEP_THINK_MODEL" - echo "βœ… Model $DEEP_THINK_MODEL pulled successfully" -fi - -# Pull embedding model if not present -if model_exists "$EMBEDDING_MODEL"; then - echo "βœ… Embedding model '$EMBEDDING_MODEL' already available" -else - echo "πŸ“₯ Pulling embedding model: $EMBEDDING_MODEL..." - docker compose --profile ollama exec ollama ollama pull "$EMBEDDING_MODEL" - echo "βœ… Model $EMBEDDING_MODEL pulled successfully" -fi - -# List all available models -echo "πŸ“‹ Available models:" -docker compose --profile ollama exec ollama ollama list - -# Show cache info -if [ -d "./ollama_data" ]; then - cache_size=$(du -sh ./ollama_data 2>/dev/null | cut -f1 || echo "unknown") - echo "πŸ’Ύ Model cache size: $cache_size (stored in ./ollama_data)" -fi - -echo "πŸŽ‰ Ollama initialization complete!" -echo "πŸ’‘ Tip: Models are cached in ./ollama_data and will be reused on subsequent runs" \ No newline at end of file diff --git a/main.py b/main.py index 9434cce6..6c8ae3d9 100644 --- a/main.py +++ b/main.py @@ -1,46 +1,21 @@ -import os from tradingagents.graph.trading_graph import TradingAgentsGraph from tradingagents.default_config import DEFAULT_CONFIG -from dotenv import load_dotenv -def run_analysis(config_overrides=None): - """ - Initializes and runs a trading cycle with configurable overrides. - """ - load_dotenv() # Load .env file variables +# Create a custom config +config = DEFAULT_CONFIG.copy() +config["llm_provider"] = "google" # Use a different model +config["backend_url"] = "https://generativelanguage.googleapis.com/v1" # Use a different backend +config["deep_think_llm"] = "gemini-2.0-flash" # Use a different model +config["quick_think_llm"] = "gemini-2.0-flash" # Use a different model +config["max_debate_rounds"] = 1 # Increase debate rounds +config["online_tools"] = True # Increase debate rounds - config = DEFAULT_CONFIG.copy() +# Initialize with custom config +ta = TradingAgentsGraph(debug=True, config=config) - # Override with environment variables if set - config["llm_provider"] = os.environ.get("LLM_PROVIDER", config.get("llm_provider", "google")) - config["backend_url"] = os.environ.get("LLM_BACKEND_URL", config.get("backend_url", "https://generativelanguage.googleapis.com/v1")) - config["deep_think_llm"] = os.environ.get("LLM_DEEP_THINK_MODEL", config.get("deep_think_llm", "gemini-2.0-flash")) - config["quick_think_llm"] = os.environ.get("LLM_QUICK_THINK_MODEL", config.get("quick_think_llm", "gemini-2.0-flash")) - config["max_debate_rounds"] = int(os.environ.get("MAX_DEBATE_ROUNDS", config.get("max_debate_rounds", 1))) - config["online_tools"] = os.environ.get("ONLINE_TOOLS", str(config.get("online_tools", True))).lower() == 'true' +# forward propagate +_, decision = ta.propagate("NVDA", "2024-05-10") +print(decision) - - # Apply overrides from function argument - if config_overrides: - config.update(config_overrides) - - print("Using configuration:") - for key, value in config.items(): - print(f"{key}: {value}") - - # Initialize with the final config - ta = TradingAgentsGraph(debug=True, config=config) - - # Forward propagate - _, decision = ta.propagate("NVDA", "2024-05-10") - return decision - -if __name__ == "__main__": - # Example of running the trading analysis - # You can override specific configurations here if needed, e.g.: - # decision = run_trading_cyrun_analysiscle(config_overrides={"max_debate_rounds": 2}) - decision = run_analysis() - print(decision) - - # Memorize mistakes and reflect - # ta.reflect_and_remember(1000) # parameter is the position returns +# Memorize mistakes and reflect +# ta.reflect_and_remember(1000) # parameter is the position returns diff --git a/requirements.txt b/requirements.txt index 610dd7e4..a6154cd2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,8 +1,6 @@ typing-extensions langchain-openai langchain-experimental -langchain_anthropic -langchain_google_genai pandas yfinance praw @@ -24,6 +22,5 @@ redis chainlit rich questionary -ollama -pytest -python-dotenv +langchain_anthropic +langchain-google-genai diff --git a/tests/README.md b/tests/README.md deleted file mode 100644 index 62ed215d..00000000 --- a/tests/README.md +++ /dev/null @@ -1,185 +0,0 @@ -# TradingAgents Test Suite - -This directory contains all test scripts for validating the TradingAgents setup and configuration. - -## Test Scripts - -### πŸ§ͺ `run_tests.py` - Automated Test Runner -**Purpose**: Automatically detects your LLM provider and runs appropriate tests. - -**Usage**: -```bash -# Run all tests (auto-detects provider from LLM_PROVIDER env var) -# Always run from project root, not from tests/ directory -python tests/run_tests.py - -# In Docker -docker compose --profile openai run --rm app-openai python tests/run_tests.py -docker compose --profile ollama exec app-ollama python tests/run_tests.py -``` - -**Important**: Always run the test runner from the **project root directory**, not from inside the `tests/` directory. The runner automatically handles path resolution and changes to the correct working directory. - -**Features**: -- Auto-detects LLM provider from environment -- Runs provider-specific tests only -- Provides comprehensive test summary -- Handles timeouts and error reporting - ---- - -### πŸ”Œ `test_openai_connection.py` - OpenAI API Tests -**Purpose**: Validates OpenAI API connectivity and functionality. - -**Tests**: -- βœ… API key validation -- βœ… Chat completion (using `gpt-4o-mini`) -- βœ… Embeddings (using `text-embedding-3-small`) -- βœ… Configuration validation - -**Usage**: -```bash -# From project root -python tests/test_openai_connection.py - -# In Docker -docker compose --profile openai run --rm app-openai python tests/test_openai_connection.py -``` - -**Requirements**: -- `OPENAI_API_KEY` environment variable -- `LLM_PROVIDER=openai` - ---- - -### πŸ¦™ `test_ollama_connection.py` - Ollama Connectivity Tests -**Purpose**: Validates Ollama server connectivity and model availability. - -**Tests**: -- βœ… Ollama API accessibility -- βœ… Model availability (`qwen3:0.6b`, `nomic-embed-text`) -- βœ… OpenAI-compatible API functionality -- βœ… Chat completion and embeddings - -**Usage**: -```bash -# From project root -python tests/test_ollama_connection.py - -# In Docker -docker compose --profile ollama exec app-ollama python tests/test_ollama_connection.py -``` - -**Requirements**: -- Ollama server running -- Required models downloaded -- `LLM_PROVIDER=ollama` - ---- - -### βš™οΈ `test_setup.py` - General Setup Validation -**Purpose**: Validates basic TradingAgents setup and configuration. - -**Tests**: -- βœ… Python package imports -- βœ… Configuration loading -- βœ… TradingAgentsGraph initialization -- βœ… Data access capabilities - -**Usage**: -```bash -# From project root -python tests/test_setup.py - -# In Docker -docker compose --profile openai run --rm app-openai python tests/test_setup.py -docker compose --profile ollama exec app-ollama python tests/test_setup.py -``` - -**Requirements**: -- TradingAgents dependencies installed -- Basic environment configuration - ---- - -## Test Results Interpretation - -### βœ… Success Indicators -- All tests pass -- API connections established -- Models available and responding -- Configuration properly loaded - -### ❌ Common Issues - -**OpenAI Tests Failing**: -- Check `OPENAI_API_KEY` is set correctly -- Verify API key has sufficient quota -- Ensure internet connectivity - -**Ollama Tests Failing**: -- Verify Ollama service is running -- Check if models are downloaded (`./init-ollama.sh`) -- Confirm `ollama list` shows required models - -**Setup Tests Failing**: -- Check Python dependencies are installed -- Verify environment variables are set -- Ensure `.env` file is properly configured - ---- - -## Quick Testing Commands - -**⚠️ Important**: Always run these commands from the **project root directory** (not from inside `tests/`): - -```bash -# Test everything automatically (from project root) -python tests/run_tests.py - -# Test specific provider (from project root) -LLM_PROVIDER=openai python tests/run_tests.py -LLM_PROVIDER=ollama python tests/run_tests.py - -# Test individual components (from project root) -python tests/test_openai_connection.py -python tests/test_ollama_connection.py -python tests/test_setup.py -``` - -**Why from project root?** -- Tests need to import the `tradingagents` package -- The `tradingagents` package is located in the project root -- Running from `tests/` directory would cause import errors - ---- - -## Adding New Tests - -To add new tests: - -1. Create new test script in `tests/` directory -2. Follow the naming convention: `test_.py` -3. Include proper error handling and status reporting -4. Update `run_tests.py` if automatic detection is needed -5. Document the test in this README - -**Test Script Template**: -```python -#!/usr/bin/env python3 -"""Test script for """ - -def test_component(): - """Test functionality.""" - try: - # Test implementation - print("βœ… Test passed") - return True - except Exception as e: - print(f"❌ Test failed: {e}") - return False - -if __name__ == "__main__": - success = test_component() - exit(0 if success else 1) -``` \ No newline at end of file diff --git a/tests/__init__.py b/tests/__init__.py deleted file mode 100644 index dfc7f701..00000000 --- a/tests/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -""" -TradingAgents Test Suite - -This package contains all test scripts for the TradingAgents application: -- test_openai_connection.py: OpenAI API connectivity tests -- test_ollama_connection.py: Ollama connectivity tests -- test_setup.py: General setup and configuration tests -""" - -__version__ = "1.0.0" \ No newline at end of file diff --git a/tests/run_tests.py b/tests/run_tests.py deleted file mode 100644 index 92f44842..00000000 --- a/tests/run_tests.py +++ /dev/null @@ -1,101 +0,0 @@ -#!/usr/bin/env python3 -""" -Test runner script for TradingAgents - -This script automatically detects the LLM provider and runs appropriate tests. -""" - -import os -import sys -import subprocess - -def get_llm_provider(): - """Get the configured LLM provider from environment.""" - return os.environ.get("LLM_PROVIDER", "").lower() - -def run_test_script(script_name): - """Run a test script and return success status.""" - try: - print(f"πŸ§ͺ Running {script_name}...") - result = subprocess.run([sys.executable, script_name], - capture_output=True, text=True, timeout=120) - - if result.returncode == 0: - print(f"βœ… {script_name} passed") - if result.stdout: - print(f" Output: {result.stdout.strip()}") - return True - else: - print(f"❌ {script_name} failed") - if result.stderr: - print(f" Error: {result.stderr.strip()}") - return False - - except subprocess.TimeoutExpired: - print(f"⏰ {script_name} timed out") - return False - except Exception as e: - print(f"πŸ’₯ {script_name} crashed: {e}") - return False - -def main(): - """Main test runner function.""" - print("πŸš€ TradingAgents Test Runner") - print("=" * 50) - - # Get project root directory (parent of tests directory) - tests_dir = os.path.dirname(os.path.abspath(__file__)) - project_root = os.path.dirname(tests_dir) - os.chdir(project_root) - - provider = get_llm_provider() - print(f"πŸ“‹ Detected LLM Provider: {provider or 'not set'}") - - tests_run = [] - tests_passed = [] - - # Always run setup tests - if run_test_script("tests/test_setup.py"): - tests_passed.append("tests/test_setup.py") - tests_run.append("tests/test_setup.py") - - # Run provider-specific tests - if provider == "openai": - print("\nπŸ” Running OpenAI-specific tests...") - if run_test_script("tests/test_openai_connection.py"): - tests_passed.append("tests/test_openai_connection.py") - tests_run.append("tests/test_openai_connection.py") - - elif provider == "ollama": - print("\nπŸ” Running Ollama-specific tests...") - if run_test_script("tests/test_ollama_connection.py"): - tests_passed.append("tests/test_ollama_connection.py") - tests_run.append("tests/test_ollama_connection.py") - - else: - print(f"\n⚠️ Unknown or unset LLM provider: '{provider}'") - print(" Running all connectivity tests...") - - for test_script in ["tests/test_openai_connection.py", "tests/test_ollama_connection.py"]: - if run_test_script(test_script): - tests_passed.append(test_script) - tests_run.append(test_script) - - # Summary - print("\n" + "=" * 50) - print(f"πŸ“Š Test Results: {len(tests_passed)}/{len(tests_run)} tests passed") - - for test in tests_run: - status = "βœ… PASS" if test in tests_passed else "❌ FAIL" - print(f" {test}: {status}") - - if len(tests_passed) == len(tests_run): - print("\nπŸŽ‰ All tests passed! TradingAgents is ready to use.") - return 0 - else: - print(f"\n⚠️ {len(tests_run) - len(tests_passed)} test(s) failed. Check configuration.") - return 1 - -if __name__ == "__main__": - exit_code = main() - sys.exit(exit_code) \ No newline at end of file diff --git a/tests/test_ollama_connection.py b/tests/test_ollama_connection.py deleted file mode 100644 index c9a67cc7..00000000 --- a/tests/test_ollama_connection.py +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/env python3 -""" -Simple test script to verify Ollama connection is working. -""" - -import os -import requests -import time -from openai import OpenAI - -def test_ollama_connection(): - """Test if Ollama is accessible and responding.""" - - # Get configuration from environment - backend_url = os.environ.get("LLM_BACKEND_URL", "http://localhost:11434/v1") - model = os.environ.get("LLM_DEEP_THINK_MODEL", "qwen3:0.6b") - embedding_model = os.environ.get("LLM_EMBEDDING_MODEL", "nomic-embed-text") - - print(f"Testing Ollama connection:") - print(f" Backend URL: {backend_url}") - print(f" Model: {model}") - print(f" Embedding Model: {embedding_model}") - - # Test 1: Check if Ollama API is responding - try: - response = requests.get(f"{backend_url.replace('/v1', '')}/api/tags", timeout=10) - if response.status_code == 200: - print("βœ… Ollama API is responding") - else: - print(f"❌ Ollama API returned status code: {response.status_code}") - return False - except Exception as e: - print(f"❌ Failed to connect to Ollama API: {e}") - return False - - # Test 2: Check if the model is available - try: - response = requests.get(f"{backend_url.replace('/v1', '')}/api/tags", timeout=10) - models = response.json().get("models", []) - model_names = [m.get("name", "") for m in models] - - if any(name.startswith(model) for name in model_names): - print(f"βœ… Model '{model}' is available") - else: - print(f"❌ Model '{model}' not found. Available models: {model_names}") - return False - except Exception as e: - print(f"❌ Failed to check model availability: {e}") - return False - - # Test 3: Test OpenAI-compatible API - try: - client = OpenAI(base_url=backend_url, api_key="dummy") - response = client.chat.completions.create( - model=model, - messages=[{"role": "user", "content": "Hello, say 'test successful'"}], - max_tokens=50 - ) - print("βœ… OpenAI-compatible API is working") - print(f" Response: {response.choices[0].message.content}") - return True - except Exception as e: - print(f"❌ OpenAI-compatible API test failed: {e}") - return False - - # Test 4: Check if the embedding model is available - try: - response = requests.get(f"{backend_url.replace('/v1', '')}/api/tags", timeout=10) - models = response.json().get("models", []) - model_names = [m.get("name") for m in models if m.get("name")] - - # Check if any of the available models starts with the embedding model name - if any(name.startswith(embedding_model) for name in model_names): - print(f"βœ… Embedding Model '{embedding_model}' is available") - else: - print(f"❌ Embedding Model '{embedding_model}' not found. Available models: {model_names}") - return False - except Exception as e: - print(f"❌ Failed to check embedding model availability: {e}") - return False - - # Test 5: Test OpenAI-compatible embedding API - try: - client = OpenAI(base_url=backend_url, api_key="dummy") - response = client.embeddings.create( - model=embedding_model, - input="This is a test sentence.", - encoding_format="float" - ) - if response.data and len(response.data) > 0 and response.data[0].embedding: - print("βœ… OpenAI-compatible embedding API is working") - print(f" Successfully generated embedding of dimension: {len(response.data[0].embedding)}") - return True - else: - print("❌ Embedding API test failed: No embedding data in response") - return False - except Exception as e: - print(f"❌ OpenAI-compatible embedding API test failed: {e}") - return False - -if __name__ == "__main__": - success = test_ollama_connection() - if success: - print("\nπŸŽ‰ All tests passed! Ollama is ready.") - exit(0) - else: - print("\nπŸ’₯ Tests failed! Check Ollama configuration.") - exit(1) \ No newline at end of file diff --git a/tests/test_openai_connection.py b/tests/test_openai_connection.py deleted file mode 100644 index 9d0b8c08..00000000 --- a/tests/test_openai_connection.py +++ /dev/null @@ -1,142 +0,0 @@ -#!/usr/bin/env python3 -""" -Test script to verify OpenAI API connection is working. -""" - -import os -import sys -from openai import OpenAI - -def test_openai_connection(): - """Test if OpenAI API is accessible and responding.""" - - # Get configuration from environment - api_key = os.environ.get("OPENAI_API_KEY") - backend_url = os.environ.get("LLM_BACKEND_URL", "https://api.openai.com/v1") - provider = os.environ.get("LLM_PROVIDER", "openai") - - print(f"Testing OpenAI API connection:") - print(f" Provider: {provider}") - print(f" Backend URL: {backend_url}") - print(f" API Key: {'βœ… Set' if api_key and api_key != '' else '❌ Not set or using placeholder'}") - - if not api_key or api_key == "": - print("❌ OPENAI_API_KEY is not set or still using placeholder value") - print(" Please set your OpenAI API key in the .env file") - return False - - # Test 1: Initialize OpenAI client - try: - client = OpenAI( - api_key=api_key, - base_url=backend_url - ) - print("βœ… OpenAI client initialized successfully") - except Exception as e: - print(f"❌ Failed to initialize OpenAI client: {e}") - return False - - # Test 2: Test chat completion with a simple query - try: - print("πŸ§ͺ Testing chat completion...") - response = client.chat.completions.create( - model="gpt-4o-mini", # Use the most cost-effective model for testing - messages=[ - {"role": "user", "content": "Hello! Please respond with exactly: 'OpenAI API test successful'"} - ], - max_tokens=50, - temperature=0 - ) - - if response.choices and response.choices[0].message.content: - content = response.choices[0].message.content.strip() - print(f"βœ… Chat completion successful") - print(f" Model: {response.model}") - print(f" Response: {content}") - print(f" Tokens used: {response.usage.total_tokens if response.usage else 'unknown'}") - else: - print("❌ Chat completion returned empty response") - return False - - except Exception as e: - print(f"❌ Chat completion test failed: {e}") - if "insufficient_quota" in str(e).lower(): - print(" πŸ’‘ This might be a quota/billing issue. Check your OpenAI account.") - elif "invalid_api_key" in str(e).lower(): - print(" πŸ’‘ Invalid API key. Please check your OPENAI_API_KEY.") - return False - - # Test 3: Test embeddings (optional, for completeness) - try: - print("πŸ§ͺ Testing embeddings...") - response = client.embeddings.create( - model="text-embedding-3-small", # Cost-effective embedding model - input="This is a test sentence for embeddings." - ) - - if response.data and len(response.data) > 0 and response.data[0].embedding: - embedding = response.data[0].embedding - print(f"βœ… Embeddings successful") - print(f" Model: {response.model}") - print(f" Embedding dimension: {len(embedding)}") - print(f" Tokens used: {response.usage.total_tokens if response.usage else 'unknown'}") - else: - print("❌ Embeddings returned empty response") - return False - - except Exception as e: - print(f"❌ Embeddings test failed: {e}") - print(" ⚠️ Embeddings test failed but chat completion worked. This is usually fine for basic usage.") - # Don't return False here as embeddings might not be critical for all use cases - - return True - -def test_config_validation(): - """Validate the configuration is properly set for OpenAI.""" - - provider = os.environ.get("LLM_PROVIDER", "").lower() - backend_url = os.environ.get("LLM_BACKEND_URL", "") - - print("\nπŸ”§ Configuration validation:") - - if provider != "openai": - print(f"⚠️ LLM_PROVIDER is '{provider}', expected 'openai'") - print(" The app might still work if the provider supports OpenAI-compatible API") - else: - print("βœ… LLM_PROVIDER correctly set to 'openai'") - - if "openai.com" in backend_url: - print("βœ… Using official OpenAI API endpoint") - elif backend_url: - print(f"ℹ️ Using custom endpoint: {backend_url}") - print(" Make sure this endpoint is OpenAI-compatible") - else: - print("⚠️ LLM_BACKEND_URL not set, using default") - - # Check for common environment issues - finnhub_key = os.environ.get("FINNHUB_API_KEY") - if not finnhub_key or finnhub_key == "": - print("⚠️ FINNHUB_API_KEY not set - financial data fetching may not work") - else: - print("βœ… FINNHUB_API_KEY is set") - - return True - -if __name__ == "__main__": - print("πŸ§ͺ OpenAI API Connection Test\n") - - config_ok = test_config_validation() - api_ok = test_openai_connection() - - print(f"\nπŸ“Š Test Results:") - print(f" Configuration: {'βœ… OK' if config_ok else '❌ Issues'}") - print(f" API Connection: {'βœ… OK' if api_ok else '❌ Failed'}") - - if config_ok and api_ok: - print("\nπŸŽ‰ All tests passed! OpenAI API is ready for TradingAgents.") - print("πŸ’‘ You can now run the trading agents with OpenAI as the LLM provider.") - else: - print("\nπŸ’₯ Some tests failed. Please check your configuration and API key.") - print("πŸ’‘ Make sure OPENAI_API_KEY is set correctly in your .env file.") - - sys.exit(0 if (config_ok and api_ok) else 1) \ No newline at end of file diff --git a/tests/test_setup.py b/tests/test_setup.py deleted file mode 100644 index 1df78fc8..00000000 --- a/tests/test_setup.py +++ /dev/null @@ -1,122 +0,0 @@ -#!/usr/bin/env python3 -""" -Test script to verify the complete TradingAgents setup works end-to-end. -""" - -import os -import sys -from datetime import datetime, timedelta - -def test_basic_setup(): - """Test basic imports and configuration""" - try: - from tradingagents.graph.trading_graph import TradingAgentsGraph - from tradingagents.default_config import DEFAULT_CONFIG - print("βœ… Basic imports successful") - return True - except Exception as e: - print(f"❌ Basic import failed: {e}") - return False - -def test_config(): - """Test configuration loading""" - try: - from tradingagents.default_config import DEFAULT_CONFIG - - # Check required environment variables - required_vars = ['LLM_PROVIDER', 'OPENAI_API_KEY', 'FINNHUB_API_KEY'] - missing_vars = [] - - for var in required_vars: - if not os.environ.get(var): - missing_vars.append(var) - - if missing_vars: - print(f"⚠️ Missing environment variables: {missing_vars}") - print(" This may cause issues with data fetching or LLM calls") - else: - print("βœ… Required environment variables set") - - print(f"βœ… Configuration loaded successfully") - print(f" LLM Provider: {os.environ.get('LLM_PROVIDER', 'not set')}") - print(f" OPENAI API KEY: {os.environ.get('OPENAI_API_KEY', 'not set')}") - print(f" Backend URL: {os.environ.get('LLM_BACKEND_URL', 'not set')}") - return True - except Exception as e: - print(f"❌ Configuration test failed: {e}") - return False - -def test_trading_graph_init(): - """Test TradingAgentsGraph initialization""" - try: - from tradingagents.graph.trading_graph import TradingAgentsGraph - from tradingagents.default_config import DEFAULT_CONFIG - - # Create a minimal config for testing - config = DEFAULT_CONFIG.copy() - config["online_tools"] = False # Use cached data for testing - config["max_debate_rounds"] = 1 # Minimize API calls - - ta = TradingAgentsGraph(debug=True, config=config) - print("βœ… TradingAgentsGraph initialized successfully") - return True - except Exception as e: - print(f"❌ TradingAgentsGraph initialization failed: {e}") - return False - -def test_data_access(): - """Test if we can access basic data""" - try: - from tradingagents.dataflows.yfin_utils import get_stock_data - - # Test with a simple stock query - test_date = (datetime.now() - timedelta(days=30)).strftime('%Y-%m-%d') - - # This should work even without API keys if using cached data - data = get_stock_data("AAPL", test_date) - - if data: - print("βœ… Data access test successful") - return True - else: - print("⚠️ Data access returned empty results (may be expected with cached data)") - return True - except Exception as e: - print(f"❌ Data access test failed: {e}") - return False - -def run_all_tests(): - """Run all tests""" - print("πŸ§ͺ Running TradingAgents setup tests...\n") - - tests = [ - ("Basic Setup", test_basic_setup), - ("Configuration", test_config), - ("TradingGraph Init", test_trading_graph_init), - ("Data Access", test_data_access), - ] - - passed = 0 - total = len(tests) - - for test_name, test_func in tests: - print(f"Running {test_name} test...") - try: - if test_func(): - passed += 1 - print() - except Exception as e: - print(f"❌ {test_name} test crashed: {e}\n") - - print(f"πŸ“Š Test Results: {passed}/{total} tests passed") - - if passed == total: - print("πŸŽ‰ All tests passed! TradingAgents setup is working correctly.") - return True - else: - print("⚠️ Some tests failed. Check the output above for details.") - return False - -if __name__ == "__main__": - success = run_all_tests() - sys.exit(0 if success else 1) \ No newline at end of file diff --git a/tradingagents/agents/utils/memory.py b/tradingagents/agents/utils/memory.py index c2119145..f3415765 100644 --- a/tradingagents/agents/utils/memory.py +++ b/tradingagents/agents/utils/memory.py @@ -7,7 +7,6 @@ class FinancialSituationMemory: def __init__(self, name, config): if config["backend_url"] == "http://localhost:11434/v1": self.embedding = "nomic-embed-text" - self.client = OpenAI(base_url=config["backend_url"]) else: self.embedding = "text-embedding-3-small" self.client = OpenAI() diff --git a/tradingagents/default_config.py b/tradingagents/default_config.py index 90914727..2cf15b85 100644 --- a/tradingagents/default_config.py +++ b/tradingagents/default_config.py @@ -2,10 +2,7 @@ import os DEFAULT_CONFIG = { "project_dir": os.path.abspath(os.path.join(os.path.dirname(__file__), ".")), - "data_dir": os.path.join( - os.path.abspath(os.path.join(os.path.dirname(__file__), ".")), - "data", - ), + "data_dir": "/Users/yluo/Documents/Code/ScAI/FR1-data", "data_cache_dir": os.path.join( os.path.abspath(os.path.join(os.path.dirname(__file__), ".")), "dataflows/data_cache", From 43aa9c5d09baac7a4fda6d0fce82e9d21b0f3532 Mon Sep 17 00:00:00 2001 From: Max Wong Date: Thu, 26 Jun 2025 00:27:01 -0400 Subject: [PATCH 10/26] Local Ollama (#53) - Fix typo 'Start' 'End' - Add llama3.1 selection - Use 'quick_think_llm' model instead of hard-coding GPT --- cli/utils.py | 2 ++ tradingagents/agents/utils/agent_utils.py | 4 ++-- tradingagents/agents/utils/memory.py | 2 +- tradingagents/dataflows/interface.py | 16 ++++++++-------- 4 files changed, 13 insertions(+), 11 deletions(-) diff --git a/cli/utils.py b/cli/utils.py index d3873360..7b9682a6 100644 --- a/cli/utils.py +++ b/cli/utils.py @@ -150,6 +150,7 @@ def select_shallow_thinking_agent(provider) -> str: ("google/gemini-2.0-flash-exp:free - Gemini Flash 2.0 offers a significantly faster time to first token", "google/gemini-2.0-flash-exp:free"), ], "ollama": [ + ("llama3.1 local", "llama3.1"), ("llama3.2 local", "llama3.2"), ] } @@ -211,6 +212,7 @@ def select_deep_thinking_agent(provider) -> str: ("Deepseek - latest iteration of the flagship chat model family from the DeepSeek team.", "deepseek/deepseek-chat-v3-0324:free"), ], "ollama": [ + ("llama3.1 local", "llama3.1"), ("qwen3", "qwen3"), ] } diff --git a/tradingagents/agents/utils/agent_utils.py b/tradingagents/agents/utils/agent_utils.py index b7313b71..0b07f044 100644 --- a/tradingagents/agents/utils/agent_utils.py +++ b/tradingagents/agents/utils/agent_utils.py @@ -124,7 +124,7 @@ class Toolkit: def get_YFin_data( symbol: Annotated[str, "ticker symbol of the company"], start_date: Annotated[str, "Start date in yyyy-mm-dd format"], - end_date: Annotated[str, "Start date in yyyy-mm-dd format"], + end_date: Annotated[str, "End date in yyyy-mm-dd format"], ) -> str: """ Retrieve the stock price data for a given ticker symbol from Yahoo Finance. @@ -145,7 +145,7 @@ class Toolkit: def get_YFin_data_online( symbol: Annotated[str, "ticker symbol of the company"], start_date: Annotated[str, "Start date in yyyy-mm-dd format"], - end_date: Annotated[str, "Start date in yyyy-mm-dd format"], + end_date: Annotated[str, "End date in yyyy-mm-dd format"], ) -> str: """ Retrieve the stock price data for a given ticker symbol from Yahoo Finance. diff --git a/tradingagents/agents/utils/memory.py b/tradingagents/agents/utils/memory.py index f3415765..69b8ab8c 100644 --- a/tradingagents/agents/utils/memory.py +++ b/tradingagents/agents/utils/memory.py @@ -9,7 +9,7 @@ class FinancialSituationMemory: self.embedding = "nomic-embed-text" else: self.embedding = "text-embedding-3-small" - self.client = OpenAI() + self.client = OpenAI(base_url=config["backend_url"]) self.chroma_client = chromadb.Client(Settings(allow_reset=True)) self.situation_collection = self.chroma_client.create_collection(name=name) diff --git a/tradingagents/dataflows/interface.py b/tradingagents/dataflows/interface.py index a0952945..7fffbb4f 100644 --- a/tradingagents/dataflows/interface.py +++ b/tradingagents/dataflows/interface.py @@ -628,7 +628,7 @@ def get_YFin_data_window( def get_YFin_data_online( symbol: Annotated[str, "ticker symbol of the company"], start_date: Annotated[str, "Start date in yyyy-mm-dd format"], - end_date: Annotated[str, "Start date in yyyy-mm-dd format"], + end_date: Annotated[str, "End date in yyyy-mm-dd format"], ): datetime.strptime(start_date, "%Y-%m-%d") @@ -670,7 +670,7 @@ def get_YFin_data_online( def get_YFin_data( symbol: Annotated[str, "ticker symbol of the company"], start_date: Annotated[str, "Start date in yyyy-mm-dd format"], - end_date: Annotated[str, "Start date in yyyy-mm-dd format"], + end_date: Annotated[str, "End date in yyyy-mm-dd format"], ) -> str: # read in data data = pd.read_csv( @@ -704,10 +704,10 @@ def get_YFin_data( def get_stock_news_openai(ticker, curr_date): config = get_config() - client = OpenAI() + client = OpenAI(base_url=config["backend_url"]) response = client.responses.create( - model="gpt-4.1-mini", + model=config["quick_think_llm"], input=[ { "role": "system", @@ -739,10 +739,10 @@ def get_stock_news_openai(ticker, curr_date): def get_global_news_openai(curr_date): config = get_config() - client = OpenAI() + client = OpenAI(base_url=config["backend_url"]) response = client.responses.create( - model="gpt-4.1-mini", + model=config["quick_think_llm"], input=[ { "role": "system", @@ -774,10 +774,10 @@ def get_global_news_openai(curr_date): def get_fundamentals_openai(ticker, curr_date): config = get_config() - client = OpenAI() + client = OpenAI(base_url=config["backend_url"]) response = client.responses.create( - model="gpt-4.1-mini", + model=config["quick_think_llm"], input=[ { "role": "system", From f704828f89d1166692e5075d32509ebb93560320 Mon Sep 17 00:00:00 2001 From: mirza-samad-ahmed-baig Date: Thu, 3 Jul 2025 17:43:40 +0500 Subject: [PATCH 11/26] Fix: Prevent infinite loops, enable reflection, and improve logging --- main.py | 2 +- tradingagents/agents/trader/trader.py | 7 +++++-- tradingagents/graph/trading_graph.py | 2 +- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/main.py b/main.py index 6c8ae3d9..077dfb8e 100644 --- a/main.py +++ b/main.py @@ -18,4 +18,4 @@ _, decision = ta.propagate("NVDA", "2024-05-10") print(decision) # Memorize mistakes and reflect -# ta.reflect_and_remember(1000) # parameter is the position returns +ta.reflect_and_remember(1000) # parameter is the position returns diff --git a/tradingagents/agents/trader/trader.py b/tradingagents/agents/trader/trader.py index 282a8411..1b05c35d 100644 --- a/tradingagents/agents/trader/trader.py +++ b/tradingagents/agents/trader/trader.py @@ -16,8 +16,11 @@ def create_trader(llm, memory): past_memories = memory.get_memories(curr_situation, n_matches=2) past_memory_str = "" - for i, rec in enumerate(past_memories, 1): - past_memory_str += rec["recommendation"] + "\n\n" + if past_memories: + for i, rec in enumerate(past_memories, 1): + past_memory_str += rec["recommendation"] + "\n\n" + else: + past_memory_str = "No past memories found." context = { "role": "user", diff --git a/tradingagents/graph/trading_graph.py b/tradingagents/graph/trading_graph.py index eb06cf43..80a29e53 100644 --- a/tradingagents/graph/trading_graph.py +++ b/tradingagents/graph/trading_graph.py @@ -226,7 +226,7 @@ class TradingAgentsGraph: directory.mkdir(parents=True, exist_ok=True) with open( - f"eval_results/{self.ticker}/TradingAgentsStrategy_logs/full_states_log.json", + f"eval_results/{self.ticker}/TradingAgentsStrategy_logs/full_states_log_{trade_date}.json", "w", ) as f: json.dump(self.log_states_dict, f, indent=4) From c73e374e7c7229e3b731e961ce165a9b2da30916 Mon Sep 17 00:00:00 2001 From: Yijia Xiao <48253104+Yijia-Xiao@users.noreply.github.com> Date: Thu, 3 Jul 2025 10:14:06 -0400 Subject: [PATCH 12/26] Update main.py --- main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.py b/main.py index 077dfb8e..6c8ae3d9 100644 --- a/main.py +++ b/main.py @@ -18,4 +18,4 @@ _, decision = ta.propagate("NVDA", "2024-05-10") print(decision) # Memorize mistakes and reflect -ta.reflect_and_remember(1000) # parameter is the position returns +# ta.reflect_and_remember(1000) # parameter is the position returns From a6734d71bcca0543296bc2f202a2c98dbf74612f Mon Sep 17 00:00:00 2001 From: luohy15 Date: Fri, 26 Sep 2025 16:17:50 +0800 Subject: [PATCH 13/26] WIP --- .gitignore | 2 + README.md | 3 +- cli/main.py | 4 + main.py | 1 - tradingagents/agents/__init__.py | 3 +- .../agents/analysts/fundamentals_analyst.py | 23 +- .../agents/analysts/market_analyst.py | 20 +- tradingagents/agents/analysts/news_analyst.py | 20 +- .../agents/analysts/social_media_analyst.py | 17 +- tradingagents/agents/utils/agent_utils.py | 422 +------- .../agents/utils/core_stock_tools.py | 22 + .../agents/utils/fundamental_data_tools.py | 77 ++ tradingagents/agents/utils/news_data_tools.py | 71 ++ .../utils/technical_indicators_tools.py | 23 + tradingagents/dataflows/__init__.py | 46 - tradingagents/dataflows/finnhub_utils.py | 36 - tradingagents/dataflows/google.py | 30 + tradingagents/dataflows/interface.py | 951 +++--------------- tradingagents/dataflows/local.py | 475 +++++++++ tradingagents/dataflows/openai.py | 107 ++ tradingagents/dataflows/stockstats_utils.py | 14 +- tradingagents/dataflows/yahoo_finance.py | 186 ++++ tradingagents/default_config.py | 15 +- tradingagents/graph/setup.py | 11 +- tradingagents/graph/trading_graph.py | 61 +- 25 files changed, 1262 insertions(+), 1378 deletions(-) create mode 100644 tradingagents/agents/utils/core_stock_tools.py create mode 100644 tradingagents/agents/utils/fundamental_data_tools.py create mode 100644 tradingagents/agents/utils/news_data_tools.py create mode 100644 tradingagents/agents/utils/technical_indicators_tools.py delete mode 100644 tradingagents/dataflows/finnhub_utils.py create mode 100644 tradingagents/dataflows/google.py create mode 100644 tradingagents/dataflows/local.py create mode 100644 tradingagents/dataflows/openai.py create mode 100644 tradingagents/dataflows/yahoo_finance.py diff --git a/.gitignore b/.gitignore index 4ebf99e3..3369bad9 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,5 @@ +.venv +results env/ __pycache__/ .DS_Store diff --git a/README.md b/README.md index cac18691..31286500 100644 --- a/README.md +++ b/README.md @@ -178,7 +178,6 @@ config = DEFAULT_CONFIG.copy() config["deep_think_llm"] = "gpt-4.1-nano" # Use a different model config["quick_think_llm"] = "gpt-4.1-nano" # Use a different model config["max_debate_rounds"] = 1 # Increase debate rounds -config["online_tools"] = True # Use online tools or cached data # Initialize with custom config ta = TradingAgentsGraph(debug=True, config=config) @@ -188,7 +187,7 @@ _, decision = ta.propagate("NVDA", "2024-05-10") print(decision) ``` -> For `online_tools`, we recommend enabling them for experimentation, as they provide access to real-time data. The agents' offline tools rely on cached data from our **Tauric TradingDB**, a curated dataset we use for backtesting. We're currently in the process of refining this dataset, and we plan to release it soon alongside our upcoming projects. Stay tuned! +> We recommend enabling them for experimentation, as they provide access to real-time data. The agents' offline tools rely on cached data from our **Tauric TradingDB**, a curated dataset we use for backtesting. We're currently in the process of refining this dataset, and we plan to release it soon alongside our upcoming projects. Stay tuned! You can view the full list of configurations in `tradingagents/default_config.py`. diff --git a/cli/main.py b/cli/main.py index 64616ee1..2e06d50c 100644 --- a/cli/main.py +++ b/cli/main.py @@ -4,6 +4,10 @@ import typer from pathlib import Path from functools import wraps from rich.console import Console +from dotenv import load_dotenv + +# Load environment variables from .env file +load_dotenv() from rich.panel import Panel from rich.spinner import Spinner from rich.live import Live diff --git a/main.py b/main.py index 6c8ae3d9..1ce62cc6 100644 --- a/main.py +++ b/main.py @@ -8,7 +8,6 @@ config["backend_url"] = "https://generativelanguage.googleapis.com/v1" # Use a config["deep_think_llm"] = "gemini-2.0-flash" # Use a different model config["quick_think_llm"] = "gemini-2.0-flash" # Use a different model config["max_debate_rounds"] = 1 # Increase debate rounds -config["online_tools"] = True # Increase debate rounds # Initialize with custom config ta = TradingAgentsGraph(debug=True, config=config) diff --git a/tradingagents/agents/__init__.py b/tradingagents/agents/__init__.py index 6f507651..d84d9eb1 100644 --- a/tradingagents/agents/__init__.py +++ b/tradingagents/agents/__init__.py @@ -1,4 +1,4 @@ -from .utils.agent_utils import Toolkit, create_msg_delete +from .utils.agent_utils import create_msg_delete from .utils.agent_states import AgentState, InvestDebateState, RiskDebateState from .utils.memory import FinancialSituationMemory @@ -21,7 +21,6 @@ from .trader.trader import create_trader __all__ = [ "FinancialSituationMemory", - "Toolkit", "AgentState", "create_msg_delete", "InvestDebateState", diff --git a/tradingagents/agents/analysts/fundamentals_analyst.py b/tradingagents/agents/analysts/fundamentals_analyst.py index 716d4de1..6b8f286d 100644 --- a/tradingagents/agents/analysts/fundamentals_analyst.py +++ b/tradingagents/agents/analysts/fundamentals_analyst.py @@ -1,28 +1,27 @@ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder import time import json +from tradingagents.agents.utils.agent_utils import get_fundamentals, get_balance_sheet, get_cashflow, get_income_statement, get_insider_sentiment, get_insider_transactions +from tradingagents.dataflows.config import get_config -def create_fundamentals_analyst(llm, toolkit): +def create_fundamentals_analyst(llm): def fundamentals_analyst_node(state): current_date = state["trade_date"] ticker = state["company_of_interest"] company_name = state["company_of_interest"] - if toolkit.config["online_tools"]: - tools = [toolkit.get_fundamentals_openai] - else: - tools = [ - toolkit.get_finnhub_company_insider_sentiment, - toolkit.get_finnhub_company_insider_transactions, - toolkit.get_simfin_balance_sheet, - toolkit.get_simfin_cashflow, - toolkit.get_simfin_income_stmt, - ] + tools = [ + get_fundamentals, + get_balance_sheet, + get_cashflow, + get_income_statement, + ] system_message = ( "You are a researcher tasked with analyzing fundamental information over the past week about a company. Please write a comprehensive report of the company's fundamental information such as financial documents, company profile, basic company financials, company financial history, insider sentiment and insider transactions to gain a full view of the company's fundamental information to inform traders. Make sure to include as much detail as possible. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions." - + " Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read.", + + " Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read." + + " Use the get_fundamental_data tool with different data_type parameters: 'full_fundamentals' for comprehensive analysis, 'balance_sheet', 'cashflow', 'income_statement' for specific financial statements, 'insider_sentiment' and 'insider_transactions' for insider information.", ) prompt = ChatPromptTemplate.from_messages( diff --git a/tradingagents/agents/analysts/market_analyst.py b/tradingagents/agents/analysts/market_analyst.py index 41ee944b..8695acc7 100644 --- a/tradingagents/agents/analysts/market_analyst.py +++ b/tradingagents/agents/analysts/market_analyst.py @@ -1,25 +1,21 @@ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder import time import json +from tradingagents.agents.utils.agent_utils import get_stock_data, get_indicators +from tradingagents.dataflows.config import get_config -def create_market_analyst(llm, toolkit): +def create_market_analyst(llm): def market_analyst_node(state): current_date = state["trade_date"] ticker = state["company_of_interest"] company_name = state["company_of_interest"] - if toolkit.config["online_tools"]: - tools = [ - toolkit.get_YFin_data_online, - toolkit.get_stockstats_indicators_report_online, - ] - else: - tools = [ - toolkit.get_YFin_data, - toolkit.get_stockstats_indicators_report, - ] + tools = [ + get_stock_data, + get_indicators, + ] system_message = ( """You are a trading assistant tasked with analyzing financial markets. Your role is to select the **most relevant indicators** for a given market condition or trading strategy from the following list. The goal is to choose up to **8 indicators** that provide complementary insights without redundancy. Categories and each category's indicators are: @@ -46,7 +42,7 @@ Volatility Indicators: Volume-Based Indicators: - vwma: VWMA: A moving average weighted by volume. Usage: Confirm trends by integrating price action with volume data. Tips: Watch for skewed results from volume spikes; use in combination with other volume analyses. -- Select indicators that provide diverse and complementary information. Avoid redundancy (e.g., do not select both rsi and stochrsi). Also briefly explain why they are suitable for the given market context. When you tool call, please use the exact name of the indicators provided above as they are defined parameters, otherwise your call will fail. Please make sure to call get_YFin_data first to retrieve the CSV that is needed to generate indicators. Write a very detailed and nuanced report of the trends you observe. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions.""" +- Select indicators that provide diverse and complementary information. Avoid redundancy (e.g., do not select both rsi and stochrsi). Also briefly explain why they are suitable for the given market context. When you tool call, please use the exact name of the indicators provided above as they are defined parameters, otherwise your call will fail. Please make sure to call get_stock_data first to retrieve the CSV that is needed to generate indicators. Then use get_technical_indicators with the specific indicator names. Write a very detailed and nuanced report of the trends you observe. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions.""" + """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read.""" ) diff --git a/tradingagents/agents/analysts/news_analyst.py b/tradingagents/agents/analysts/news_analyst.py index e1f03aa4..2e227c93 100644 --- a/tradingagents/agents/analysts/news_analyst.py +++ b/tradingagents/agents/analysts/news_analyst.py @@ -1,25 +1,23 @@ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder import time import json +from tradingagents.agents.utils.agent_utils import get_news, get_global_news +from tradingagents.dataflows.config import get_config -def create_news_analyst(llm, toolkit): +def create_news_analyst(llm): def news_analyst_node(state): current_date = state["trade_date"] ticker = state["company_of_interest"] - if toolkit.config["online_tools"]: - tools = [toolkit.get_global_news_openai, toolkit.get_google_news] - else: - tools = [ - toolkit.get_finnhub_news, - toolkit.get_reddit_news, - toolkit.get_google_news, - ] + tools = [ + get_news, + get_global_news, + ] system_message = ( - "You are a news researcher tasked with analyzing recent news and trends over the past week. Please write a comprehensive report of the current state of the world that is relevant for trading and macroeconomics. Look at news from EODHD, and finnhub to be comprehensive. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions." - + """ Make sure to append a Makrdown table at the end of the report to organize key points in the report, organized and easy to read.""" + "You are a news researcher tasked with analyzing recent news and trends over the past week. Please write a comprehensive report of the current state of the world that is relevant for trading and macroeconomics. Use the get_news_data tool with different news_type parameters: 'global_news' for macroeconomic news, 'company_news' for company-specific news, 'reddit_global' for social sentiment, 'google' for general news searches. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions." + + """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read.""" ) prompt = ChatPromptTemplate.from_messages( diff --git a/tradingagents/agents/analysts/social_media_analyst.py b/tradingagents/agents/analysts/social_media_analyst.py index d556f73a..b1556fb6 100644 --- a/tradingagents/agents/analysts/social_media_analyst.py +++ b/tradingagents/agents/analysts/social_media_analyst.py @@ -1,24 +1,23 @@ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder import time import json +from tradingagents.agents.utils.agent_utils import get_news +from tradingagents.dataflows.config import get_config -def create_social_media_analyst(llm, toolkit): +def create_social_media_analyst(llm): def social_media_analyst_node(state): current_date = state["trade_date"] ticker = state["company_of_interest"] company_name = state["company_of_interest"] - if toolkit.config["online_tools"]: - tools = [toolkit.get_stock_news_openai] - else: - tools = [ - toolkit.get_reddit_stock_info, - ] + tools = [ + get_news, + ] system_message = ( - "You are a social media and company specific news researcher/analyst tasked with analyzing social media posts, recent company news, and public sentiment for a specific company over the past week. You will be given a company's name your objective is to write a comprehensive long report detailing your analysis, insights, and implications for traders and investors on this company's current state after looking at social media and what people are saying about that company, analyzing sentiment data of what people feel each day about the company, and looking at recent company news. Try to look at all sources possible from social media to sentiment to news. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions." - + """ Make sure to append a Makrdown table at the end of the report to organize key points in the report, organized and easy to read.""", + "You are a social media and company specific news researcher/analyst tasked with analyzing social media posts, recent company news, and public sentiment for a specific company over the past week. You will be given a company's name your objective is to write a comprehensive long report detailing your analysis, insights, and implications for traders and investors on this company's current state after looking at social media and what people are saying about that company, analyzing sentiment data of what people feel each day about the company, and looking at recent company news. Use get_news_data with news_type parameters: 'company_news' for company-specific news, 'reddit_stock' for Reddit discussions about the stock. Try to look at all sources possible from social media to sentiment to news. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions." + + """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read.""", ) prompt = ChatPromptTemplate.from_messages( diff --git a/tradingagents/agents/utils/agent_utils.py b/tradingagents/agents/utils/agent_utils.py index 0b07f044..6cf294a1 100644 --- a/tradingagents/agents/utils/agent_utils.py +++ b/tradingagents/agents/utils/agent_utils.py @@ -1,19 +1,24 @@ -from langchain_core.messages import BaseMessage, HumanMessage, ToolMessage, AIMessage -from typing import List -from typing import Annotated -from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder -from langchain_core.messages import RemoveMessage -from langchain_core.tools import tool -from datetime import date, timedelta, datetime -import functools -import pandas as pd -import os -from dateutil.relativedelta import relativedelta -from langchain_openai import ChatOpenAI -import tradingagents.dataflows.interface as interface -from tradingagents.default_config import DEFAULT_CONFIG -from langchain_core.messages import HumanMessage +from langchain_core.messages import HumanMessage, RemoveMessage +# Import tools from separate utility files +from tradingagents.agents.utils.core_stock_tools import ( + get_stock_data +) +from tradingagents.agents.utils.technical_indicators_tools import ( + get_indicators +) +from tradingagents.agents.utils.fundamental_data_tools import ( + get_fundamentals, + get_balance_sheet, + get_cashflow, + get_income_statement +) +from tradingagents.agents.utils.news_data_tools import ( + get_news, + get_insider_sentiment, + get_insider_transactions, + get_global_news +) def create_msg_delete(): def delete_messages(state): @@ -31,389 +36,4 @@ def create_msg_delete(): return delete_messages -class Toolkit: - _config = DEFAULT_CONFIG.copy() - - @classmethod - def update_config(cls, config): - """Update the class-level configuration.""" - cls._config.update(config) - - @property - def config(self): - """Access the configuration.""" - return self._config - - def __init__(self, config=None): - if config: - self.update_config(config) - - @staticmethod - @tool - def get_reddit_news( - curr_date: Annotated[str, "Date you want to get news for in yyyy-mm-dd format"], - ) -> str: - """ - Retrieve global news from Reddit within a specified time frame. - Args: - curr_date (str): Date you want to get news for in yyyy-mm-dd format - Returns: - str: A formatted dataframe containing the latest global news from Reddit in the specified time frame. - """ - - global_news_result = interface.get_reddit_global_news(curr_date, 7, 5) - - return global_news_result - - @staticmethod - @tool - def get_finnhub_news( - ticker: Annotated[ - str, - "Search query of a company, e.g. 'AAPL, TSM, etc.", - ], - start_date: Annotated[str, "Start date in yyyy-mm-dd format"], - end_date: Annotated[str, "End date in yyyy-mm-dd format"], - ): - """ - Retrieve the latest news about a given stock from Finnhub within a date range - Args: - ticker (str): Ticker of a company. e.g. AAPL, TSM - start_date (str): Start date in yyyy-mm-dd format - end_date (str): End date in yyyy-mm-dd format - Returns: - str: A formatted dataframe containing news about the company within the date range from start_date to end_date - """ - - end_date_str = end_date - - end_date = datetime.strptime(end_date, "%Y-%m-%d") - start_date = datetime.strptime(start_date, "%Y-%m-%d") - look_back_days = (end_date - start_date).days - - finnhub_news_result = interface.get_finnhub_news( - ticker, end_date_str, look_back_days - ) - - return finnhub_news_result - - @staticmethod - @tool - def get_reddit_stock_info( - ticker: Annotated[ - str, - "Ticker of a company. e.g. AAPL, TSM", - ], - curr_date: Annotated[str, "Current date you want to get news for"], - ) -> str: - """ - Retrieve the latest news about a given stock from Reddit, given the current date. - Args: - ticker (str): Ticker of a company. e.g. AAPL, TSM - curr_date (str): current date in yyyy-mm-dd format to get news for - Returns: - str: A formatted dataframe containing the latest news about the company on the given date - """ - - stock_news_results = interface.get_reddit_company_news(ticker, curr_date, 7, 5) - - return stock_news_results - - @staticmethod - @tool - def get_YFin_data( - symbol: Annotated[str, "ticker symbol of the company"], - start_date: Annotated[str, "Start date in yyyy-mm-dd format"], - end_date: Annotated[str, "End date in yyyy-mm-dd format"], - ) -> str: - """ - Retrieve the stock price data for a given ticker symbol from Yahoo Finance. - Args: - symbol (str): Ticker symbol of the company, e.g. AAPL, TSM - start_date (str): Start date in yyyy-mm-dd format - end_date (str): End date in yyyy-mm-dd format - Returns: - str: A formatted dataframe containing the stock price data for the specified ticker symbol in the specified date range. - """ - - result_data = interface.get_YFin_data(symbol, start_date, end_date) - - return result_data - - @staticmethod - @tool - def get_YFin_data_online( - symbol: Annotated[str, "ticker symbol of the company"], - start_date: Annotated[str, "Start date in yyyy-mm-dd format"], - end_date: Annotated[str, "End date in yyyy-mm-dd format"], - ) -> str: - """ - Retrieve the stock price data for a given ticker symbol from Yahoo Finance. - Args: - symbol (str): Ticker symbol of the company, e.g. AAPL, TSM - start_date (str): Start date in yyyy-mm-dd format - end_date (str): End date in yyyy-mm-dd format - Returns: - str: A formatted dataframe containing the stock price data for the specified ticker symbol in the specified date range. - """ - - result_data = interface.get_YFin_data_online(symbol, start_date, end_date) - - return result_data - - @staticmethod - @tool - def get_stockstats_indicators_report( - symbol: Annotated[str, "ticker symbol of the company"], - indicator: Annotated[ - str, "technical indicator to get the analysis and report of" - ], - curr_date: Annotated[ - str, "The current trading date you are trading on, YYYY-mm-dd" - ], - look_back_days: Annotated[int, "how many days to look back"] = 30, - ) -> str: - """ - Retrieve stock stats indicators for a given ticker symbol and indicator. - Args: - symbol (str): Ticker symbol of the company, e.g. AAPL, TSM - indicator (str): Technical indicator to get the analysis and report of - curr_date (str): The current trading date you are trading on, YYYY-mm-dd - look_back_days (int): How many days to look back, default is 30 - Returns: - str: A formatted dataframe containing the stock stats indicators for the specified ticker symbol and indicator. - """ - - result_stockstats = interface.get_stock_stats_indicators_window( - symbol, indicator, curr_date, look_back_days, False - ) - - return result_stockstats - - @staticmethod - @tool - def get_stockstats_indicators_report_online( - symbol: Annotated[str, "ticker symbol of the company"], - indicator: Annotated[ - str, "technical indicator to get the analysis and report of" - ], - curr_date: Annotated[ - str, "The current trading date you are trading on, YYYY-mm-dd" - ], - look_back_days: Annotated[int, "how many days to look back"] = 30, - ) -> str: - """ - Retrieve stock stats indicators for a given ticker symbol and indicator. - Args: - symbol (str): Ticker symbol of the company, e.g. AAPL, TSM - indicator (str): Technical indicator to get the analysis and report of - curr_date (str): The current trading date you are trading on, YYYY-mm-dd - look_back_days (int): How many days to look back, default is 30 - Returns: - str: A formatted dataframe containing the stock stats indicators for the specified ticker symbol and indicator. - """ - - result_stockstats = interface.get_stock_stats_indicators_window( - symbol, indicator, curr_date, look_back_days, True - ) - - return result_stockstats - - @staticmethod - @tool - def get_finnhub_company_insider_sentiment( - ticker: Annotated[str, "ticker symbol for the company"], - curr_date: Annotated[ - str, - "current date of you are trading at, yyyy-mm-dd", - ], - ): - """ - Retrieve insider sentiment information about a company (retrieved from public SEC information) for the past 30 days - Args: - ticker (str): ticker symbol of the company - curr_date (str): current date you are trading at, yyyy-mm-dd - Returns: - str: a report of the sentiment in the past 30 days starting at curr_date - """ - - data_sentiment = interface.get_finnhub_company_insider_sentiment( - ticker, curr_date, 30 - ) - - return data_sentiment - - @staticmethod - @tool - def get_finnhub_company_insider_transactions( - ticker: Annotated[str, "ticker symbol"], - curr_date: Annotated[ - str, - "current date you are trading at, yyyy-mm-dd", - ], - ): - """ - Retrieve insider transaction information about a company (retrieved from public SEC information) for the past 30 days - Args: - ticker (str): ticker symbol of the company - curr_date (str): current date you are trading at, yyyy-mm-dd - Returns: - str: a report of the company's insider transactions/trading information in the past 30 days - """ - - data_trans = interface.get_finnhub_company_insider_transactions( - ticker, curr_date, 30 - ) - - return data_trans - - @staticmethod - @tool - def get_simfin_balance_sheet( - ticker: Annotated[str, "ticker symbol"], - freq: Annotated[ - str, - "reporting frequency of the company's financial history: annual/quarterly", - ], - curr_date: Annotated[str, "current date you are trading at, yyyy-mm-dd"], - ): - """ - Retrieve the most recent balance sheet of a company - Args: - ticker (str): ticker symbol of the company - freq (str): reporting frequency of the company's financial history: annual / quarterly - curr_date (str): current date you are trading at, yyyy-mm-dd - Returns: - str: a report of the company's most recent balance sheet - """ - - data_balance_sheet = interface.get_simfin_balance_sheet(ticker, freq, curr_date) - - return data_balance_sheet - - @staticmethod - @tool - def get_simfin_cashflow( - ticker: Annotated[str, "ticker symbol"], - freq: Annotated[ - str, - "reporting frequency of the company's financial history: annual/quarterly", - ], - curr_date: Annotated[str, "current date you are trading at, yyyy-mm-dd"], - ): - """ - Retrieve the most recent cash flow statement of a company - Args: - ticker (str): ticker symbol of the company - freq (str): reporting frequency of the company's financial history: annual / quarterly - curr_date (str): current date you are trading at, yyyy-mm-dd - Returns: - str: a report of the company's most recent cash flow statement - """ - - data_cashflow = interface.get_simfin_cashflow(ticker, freq, curr_date) - - return data_cashflow - - @staticmethod - @tool - def get_simfin_income_stmt( - ticker: Annotated[str, "ticker symbol"], - freq: Annotated[ - str, - "reporting frequency of the company's financial history: annual/quarterly", - ], - curr_date: Annotated[str, "current date you are trading at, yyyy-mm-dd"], - ): - """ - Retrieve the most recent income statement of a company - Args: - ticker (str): ticker symbol of the company - freq (str): reporting frequency of the company's financial history: annual / quarterly - curr_date (str): current date you are trading at, yyyy-mm-dd - Returns: - str: a report of the company's most recent income statement - """ - - data_income_stmt = interface.get_simfin_income_statements( - ticker, freq, curr_date - ) - - return data_income_stmt - - @staticmethod - @tool - def get_google_news( - query: Annotated[str, "Query to search with"], - curr_date: Annotated[str, "Curr date in yyyy-mm-dd format"], - ): - """ - Retrieve the latest news from Google News based on a query and date range. - Args: - query (str): Query to search with - curr_date (str): Current date in yyyy-mm-dd format - look_back_days (int): How many days to look back - Returns: - str: A formatted string containing the latest news from Google News based on the query and date range. - """ - - google_news_results = interface.get_google_news(query, curr_date, 7) - - return google_news_results - - @staticmethod - @tool - def get_stock_news_openai( - ticker: Annotated[str, "the company's ticker"], - curr_date: Annotated[str, "Current date in yyyy-mm-dd format"], - ): - """ - Retrieve the latest news about a given stock by using OpenAI's news API. - Args: - ticker (str): Ticker of a company. e.g. AAPL, TSM - curr_date (str): Current date in yyyy-mm-dd format - Returns: - str: A formatted string containing the latest news about the company on the given date. - """ - - openai_news_results = interface.get_stock_news_openai(ticker, curr_date) - - return openai_news_results - - @staticmethod - @tool - def get_global_news_openai( - curr_date: Annotated[str, "Current date in yyyy-mm-dd format"], - ): - """ - Retrieve the latest macroeconomics news on a given date using OpenAI's macroeconomics news API. - Args: - curr_date (str): Current date in yyyy-mm-dd format - Returns: - str: A formatted string containing the latest macroeconomic news on the given date. - """ - - openai_news_results = interface.get_global_news_openai(curr_date) - - return openai_news_results - - @staticmethod - @tool - def get_fundamentals_openai( - ticker: Annotated[str, "the company's ticker"], - curr_date: Annotated[str, "Current date in yyyy-mm-dd format"], - ): - """ - Retrieve the latest fundamental information about a given stock on a given date by using OpenAI's news API. - Args: - ticker (str): Ticker of a company. e.g. AAPL, TSM - curr_date (str): Current date in yyyy-mm-dd format - Returns: - str: A formatted string containing the latest fundamental information about the company on the given date. - """ - - openai_fundamentals_results = interface.get_fundamentals_openai( - ticker, curr_date - ) - - return openai_fundamentals_results + \ No newline at end of file diff --git a/tradingagents/agents/utils/core_stock_tools.py b/tradingagents/agents/utils/core_stock_tools.py new file mode 100644 index 00000000..02733c6c --- /dev/null +++ b/tradingagents/agents/utils/core_stock_tools.py @@ -0,0 +1,22 @@ +from langchain_core.tools import tool +from typing import Annotated +from tradingagents.dataflows.interface import route_to_vender + + +@tool +def get_stock_data( + symbol: Annotated[str, "ticker symbol of the company"], + start_date: Annotated[str, "Start date in yyyy-mm-dd format"], + end_date: Annotated[str, "End date in yyyy-mm-dd format"], +) -> str: + """ + Retrieve stock price data (OHLCV) for a given ticker symbol. + Uses the configured core_stock_apis vendor. + Args: + symbol (str): Ticker symbol of the company, e.g. AAPL, TSM + start_date (str): Start date in yyyy-mm-dd format + end_date (str): End date in yyyy-mm-dd format + Returns: + str: A formatted dataframe containing the stock price data for the specified ticker symbol in the specified date range. + """ + return route_to_vender("get_stock_data", symbol, start_date, end_date) diff --git a/tradingagents/agents/utils/fundamental_data_tools.py b/tradingagents/agents/utils/fundamental_data_tools.py new file mode 100644 index 00000000..bbfd0153 --- /dev/null +++ b/tradingagents/agents/utils/fundamental_data_tools.py @@ -0,0 +1,77 @@ +from langchain_core.tools import tool +from typing import Annotated +from tradingagents.dataflows.interface import route_to_vender + + +@tool +def get_fundamentals( + ticker: Annotated[str, "ticker symbol"], + curr_date: Annotated[str, "current date you are trading at, yyyy-mm-dd"], +) -> str: + """ + Retrieve comprehensive fundamental data for a given ticker symbol. + Uses the configured fundamental_data vendor. + Args: + ticker (str): Ticker symbol of the company + curr_date (str): Current date you are trading at, yyyy-mm-dd + Returns: + str: A formatted report containing comprehensive fundamental data + """ + return route_to_vender("get_fundamentals", ticker, curr_date) + + +@tool +def get_balance_sheet( + ticker: Annotated[str, "ticker symbol"], + freq: Annotated[str, "reporting frequency: annual/quarterly"] = "quarterly", + curr_date: Annotated[str, "current date you are trading at, yyyy-mm-dd"] = None, +) -> str: + """ + Retrieve balance sheet data for a given ticker symbol. + Uses the configured fundamental_data vendor. + Args: + ticker (str): Ticker symbol of the company + freq (str): Reporting frequency: annual/quarterly (default quarterly) + curr_date (str): Current date you are trading at, yyyy-mm-dd + Returns: + str: A formatted report containing balance sheet data + """ + return route_to_vender("get_balance_sheet", ticker, freq, curr_date) + + +@tool +def get_cashflow( + ticker: Annotated[str, "ticker symbol"], + freq: Annotated[str, "reporting frequency: annual/quarterly"] = "quarterly", + curr_date: Annotated[str, "current date you are trading at, yyyy-mm-dd"] = None, +) -> str: + """ + Retrieve cash flow statement data for a given ticker symbol. + Uses the configured fundamental_data vendor. + Args: + ticker (str): Ticker symbol of the company + freq (str): Reporting frequency: annual/quarterly (default quarterly) + curr_date (str): Current date you are trading at, yyyy-mm-dd + Returns: + str: A formatted report containing cash flow statement data + """ + return route_to_vender("get_cashflow", ticker, freq, curr_date) + + +@tool +def get_income_statement( + ticker: Annotated[str, "ticker symbol"], + freq: Annotated[str, "reporting frequency: annual/quarterly"] = "quarterly", + curr_date: Annotated[str, "current date you are trading at, yyyy-mm-dd"] = None, +) -> str: + """ + Retrieve income statement data for a given ticker symbol. + Uses the configured fundamental_data vendor. + Args: + ticker (str): Ticker symbol of the company + freq (str): Reporting frequency: annual/quarterly (default quarterly) + curr_date (str): Current date you are trading at, yyyy-mm-dd + Returns: + str: A formatted report containing income statement data + """ + return route_to_vender("get_income_statement", ticker, freq, curr_date) \ No newline at end of file diff --git a/tradingagents/agents/utils/news_data_tools.py b/tradingagents/agents/utils/news_data_tools.py new file mode 100644 index 00000000..2949e904 --- /dev/null +++ b/tradingagents/agents/utils/news_data_tools.py @@ -0,0 +1,71 @@ +from langchain_core.tools import tool +from typing import Annotated +from tradingagents.dataflows.interface import route_to_vender + +@tool +def get_news( + query: Annotated[str, "Search query or ticker symbol"], + start_date: Annotated[str, "Start date in yyyy-mm-dd format"], + end_date: Annotated[str, "End date in yyyy-mm-dd format"], +) -> str: + """ + Retrieve news data for a given query or ticker symbol. + Uses the configured news_data vendor. + Args: + query (str): Search query or ticker symbol + start_date (str): Start date in yyyy-mm-dd format + end_date (str): End date in yyyy-mm-dd format + Returns: + str: A formatted string containing news data + """ + return route_to_vender("get_news", query, start_date, end_date) + +@tool +def get_global_news( + curr_date: Annotated[str, "Current date in yyyy-mm-dd format"], + look_back_days: Annotated[int, "Number of days to look back"] = 7, + limit: Annotated[int, "Maximum number of articles to return"] = 5, +) -> str: + """ + Retrieve global news data. + Uses the configured news_data vendor. + Args: + curr_date (str): Current date in yyyy-mm-dd format + look_back_days (int): Number of days to look back (default 7) + limit (int): Maximum number of articles to return (default 5) + Returns: + str: A formatted string containing global news data + """ + return route_to_vender("get_global_news", curr_date, look_back_days, limit) + +@tool +def get_insider_sentiment( + ticker: Annotated[str, "ticker symbol for the company"], + curr_date: Annotated[str, "current date you are trading at, yyyy-mm-dd"], +) -> str: + """ + Retrieve insider sentiment information about a company. + Uses the configured news_data vendor. + Args: + ticker (str): Ticker symbol of the company + curr_date (str): Current date you are trading at, yyyy-mm-dd + Returns: + str: A report of insider sentiment data + """ + return route_to_vender("get_insider_sentiment", ticker, curr_date) + +@tool +def get_insider_transactions( + ticker: Annotated[str, "ticker symbol"], + curr_date: Annotated[str, "current date you are trading at, yyyy-mm-dd"], +) -> str: + """ + Retrieve insider transaction information about a company. + Uses the configured news_data vendor. + Args: + ticker (str): Ticker symbol of the company + curr_date (str): Current date you are trading at, yyyy-mm-dd + Returns: + str: A report of insider transaction data + """ + return route_to_vender("get_insider_transactions", ticker, curr_date) diff --git a/tradingagents/agents/utils/technical_indicators_tools.py b/tradingagents/agents/utils/technical_indicators_tools.py new file mode 100644 index 00000000..86aba653 --- /dev/null +++ b/tradingagents/agents/utils/technical_indicators_tools.py @@ -0,0 +1,23 @@ +from langchain_core.tools import tool +from typing import Annotated +from tradingagents.dataflows.interface import route_to_vender + +@tool +def get_indicators( + symbol: Annotated[str, "ticker symbol of the company"], + indicator: Annotated[str, "technical indicator to get the analysis and report of"], + curr_date: Annotated[str, "The current trading date you are trading on, YYYY-mm-dd"], + look_back_days: Annotated[int, "how many days to look back"] = 30, +) -> str: + """ + Retrieve technical indicators for a given ticker symbol. + Uses the configured technical_indicators vendor. + Args: + symbol (str): Ticker symbol of the company, e.g. AAPL, TSM + indicator (str): Technical indicator to get the analysis and report of + curr_date (str): The current trading date you are trading on, YYYY-mm-dd + look_back_days (int): How many days to look back, default is 30 + Returns: + str: A formatted dataframe containing the technical indicators for the specified ticker symbol and indicator. + """ + return route_to_vender("get_indicators", symbol, indicator, curr_date, look_back_days) \ No newline at end of file diff --git a/tradingagents/dataflows/__init__.py b/tradingagents/dataflows/__init__.py index b0c04d1d..e69de29b 100644 --- a/tradingagents/dataflows/__init__.py +++ b/tradingagents/dataflows/__init__.py @@ -1,46 +0,0 @@ -from .finnhub_utils import get_data_in_range -from .googlenews_utils import getNewsData -from .yfin_utils import YFinanceUtils -from .reddit_utils import fetch_top_from_category -from .stockstats_utils import StockstatsUtils -from .yfin_utils import YFinanceUtils - -from .interface import ( - # News and sentiment functions - get_finnhub_news, - get_finnhub_company_insider_sentiment, - get_finnhub_company_insider_transactions, - get_google_news, - get_reddit_global_news, - get_reddit_company_news, - # Financial statements functions - get_simfin_balance_sheet, - get_simfin_cashflow, - get_simfin_income_statements, - # Technical analysis functions - get_stock_stats_indicators_window, - get_stockstats_indicator, - # Market data functions - get_YFin_data_window, - get_YFin_data, -) - -__all__ = [ - # News and sentiment functions - "get_finnhub_news", - "get_finnhub_company_insider_sentiment", - "get_finnhub_company_insider_transactions", - "get_google_news", - "get_reddit_global_news", - "get_reddit_company_news", - # Financial statements functions - "get_simfin_balance_sheet", - "get_simfin_cashflow", - "get_simfin_income_statements", - # Technical analysis functions - "get_stock_stats_indicators_window", - "get_stockstats_indicator", - # Market data functions - "get_YFin_data_window", - "get_YFin_data", -] diff --git a/tradingagents/dataflows/finnhub_utils.py b/tradingagents/dataflows/finnhub_utils.py deleted file mode 100644 index e7c7103c..00000000 --- a/tradingagents/dataflows/finnhub_utils.py +++ /dev/null @@ -1,36 +0,0 @@ -import json -import os - - -def get_data_in_range(ticker, start_date, end_date, data_type, data_dir, period=None): - """ - Gets finnhub data saved and processed on disk. - Args: - start_date (str): Start date in YYYY-MM-DD format. - end_date (str): End date in YYYY-MM-DD format. - data_type (str): Type of data from finnhub to fetch. Can be insider_trans, SEC_filings, news_data, insider_senti, or fin_as_reported. - data_dir (str): Directory where the data is saved. - period (str): Default to none, if there is a period specified, should be annual or quarterly. - """ - - if period: - data_path = os.path.join( - data_dir, - "finnhub_data", - data_type, - f"{ticker}_{period}_data_formatted.json", - ) - else: - data_path = os.path.join( - data_dir, "finnhub_data", data_type, f"{ticker}_data_formatted.json" - ) - - data = open(data_path, "r") - data = json.load(data) - - # filter keys (date, str in format YYYY-MM-DD) by the date range (str, str in format YYYY-MM-DD) - filtered_data = {} - for key, value in data.items(): - if start_date <= key <= end_date and len(value) > 0: - filtered_data[key] = value - return filtered_data diff --git a/tradingagents/dataflows/google.py b/tradingagents/dataflows/google.py new file mode 100644 index 00000000..3fe20f3c --- /dev/null +++ b/tradingagents/dataflows/google.py @@ -0,0 +1,30 @@ +from typing import Annotated +from datetime import datetime +from dateutil.relativedelta import relativedelta +from .googlenews_utils import getNewsData + + +def get_google_news( + query: Annotated[str, "Query to search with"], + curr_date: Annotated[str, "Curr date in yyyy-mm-dd format"], + look_back_days: Annotated[int, "how many days to look back"], +) -> str: + query = query.replace(" ", "+") + + start_date = datetime.strptime(curr_date, "%Y-%m-%d") + before = start_date - relativedelta(days=look_back_days) + before = before.strftime("%Y-%m-%d") + + news_results = getNewsData(query, before, curr_date) + + news_str = "" + + for news in news_results: + news_str += ( + f"### {news['title']} (source: {news['source']}) \n\n{news['snippet']}\n\n" + ) + + if len(news_results) == 0: + return "" + + return f"## {query} Google News, from {before} to {curr_date}:\n\n{news_str}" \ No newline at end of file diff --git a/tradingagents/dataflows/interface.py b/tradingagents/dataflows/interface.py index 7fffbb4f..2b6b92de 100644 --- a/tradingagents/dataflows/interface.py +++ b/tradingagents/dataflows/interface.py @@ -1,807 +1,160 @@ -from typing import Annotated, Dict -from .reddit_utils import fetch_top_from_category -from .yfin_utils import * -from .stockstats_utils import * -from .googlenews_utils import * -from .finnhub_utils import get_data_in_range -from dateutil.relativedelta import relativedelta -from concurrent.futures import ThreadPoolExecutor -from datetime import datetime -import json -import os -import pandas as pd -from tqdm import tqdm -import yfinance as yf -from openai import OpenAI -from .config import get_config, set_config, DATA_DIR - - -def get_finnhub_news( - ticker: Annotated[ - str, - "Search query of a company's, e.g. 'AAPL, TSM, etc.", - ], - curr_date: Annotated[str, "Current date in yyyy-mm-dd format"], - look_back_days: Annotated[int, "how many days to look back"], -): - """ - Retrieve news about a company within a time frame - - Args - ticker (str): ticker for the company you are interested in - start_date (str): Start date in yyyy-mm-dd format - end_date (str): End date in yyyy-mm-dd format - Returns - str: dataframe containing the news of the company in the time frame - - """ - - start_date = datetime.strptime(curr_date, "%Y-%m-%d") - before = start_date - relativedelta(days=look_back_days) - before = before.strftime("%Y-%m-%d") - - result = get_data_in_range(ticker, before, curr_date, "news_data", DATA_DIR) - - if len(result) == 0: - return "" - - combined_result = "" - for day, data in result.items(): - if len(data) == 0: - continue - for entry in data: - current_news = ( - "### " + entry["headline"] + f" ({day})" + "\n" + entry["summary"] - ) - combined_result += current_news + "\n\n" - - return f"## {ticker} News, from {before} to {curr_date}:\n" + str(combined_result) - - -def get_finnhub_company_insider_sentiment( - ticker: Annotated[str, "ticker symbol for the company"], - curr_date: Annotated[ - str, - "current date of you are trading at, yyyy-mm-dd", - ], - look_back_days: Annotated[int, "number of days to look back"], -): - """ - Retrieve insider sentiment about a company (retrieved from public SEC information) for the past 15 days - Args: - ticker (str): ticker symbol of the company - curr_date (str): current date you are trading on, yyyy-mm-dd - Returns: - str: a report of the sentiment in the past 15 days starting at curr_date - """ - - date_obj = datetime.strptime(curr_date, "%Y-%m-%d") - before = date_obj - relativedelta(days=look_back_days) - before = before.strftime("%Y-%m-%d") - - data = get_data_in_range(ticker, before, curr_date, "insider_senti", DATA_DIR) - - if len(data) == 0: - return "" - - result_str = "" - seen_dicts = [] - for date, senti_list in data.items(): - for entry in senti_list: - if entry not in seen_dicts: - result_str += f"### {entry['year']}-{entry['month']}:\nChange: {entry['change']}\nMonthly Share Purchase Ratio: {entry['mspr']}\n\n" - seen_dicts.append(entry) - - return ( - f"## {ticker} Insider Sentiment Data for {before} to {curr_date}:\n" - + result_str - + "The change field refers to the net buying/selling from all insiders' transactions. The mspr field refers to monthly share purchase ratio." - ) - - -def get_finnhub_company_insider_transactions( - ticker: Annotated[str, "ticker symbol"], - curr_date: Annotated[ - str, - "current date you are trading at, yyyy-mm-dd", - ], - look_back_days: Annotated[int, "how many days to look back"], -): - """ - Retrieve insider transcaction information about a company (retrieved from public SEC information) for the past 15 days - Args: - ticker (str): ticker symbol of the company - curr_date (str): current date you are trading at, yyyy-mm-dd - Returns: - str: a report of the company's insider transaction/trading informtaion in the past 15 days - """ - - date_obj = datetime.strptime(curr_date, "%Y-%m-%d") - before = date_obj - relativedelta(days=look_back_days) - before = before.strftime("%Y-%m-%d") - - data = get_data_in_range(ticker, before, curr_date, "insider_trans", DATA_DIR) - - if len(data) == 0: - return "" - - result_str = "" - - seen_dicts = [] - for date, senti_list in data.items(): - for entry in senti_list: - if entry not in seen_dicts: - result_str += f"### Filing Date: {entry['filingDate']}, {entry['name']}:\nChange:{entry['change']}\nShares: {entry['share']}\nTransaction Price: {entry['transactionPrice']}\nTransaction Code: {entry['transactionCode']}\n\n" - seen_dicts.append(entry) - - return ( - f"## {ticker} insider transactions from {before} to {curr_date}:\n" - + result_str - + "The change field reflects the variation in share countβ€”here a negative number indicates a reduction in holdingsβ€”while share specifies the total number of shares involved. The transactionPrice denotes the per-share price at which the trade was executed, and transactionDate marks when the transaction occurred. The name field identifies the insider making the trade, and transactionCode (e.g., S for sale) clarifies the nature of the transaction. FilingDate records when the transaction was officially reported, and the unique id links to the specific SEC filing, as indicated by the source. Additionally, the symbol ties the transaction to a particular company, isDerivative flags whether the trade involves derivative securities, and currency notes the currency context of the transaction." - ) - - -def get_simfin_balance_sheet( - ticker: Annotated[str, "ticker symbol"], - freq: Annotated[ - str, - "reporting frequency of the company's financial history: annual / quarterly", - ], - curr_date: Annotated[str, "current date you are trading at, yyyy-mm-dd"], -): - data_path = os.path.join( - DATA_DIR, - "fundamental_data", - "simfin_data_all", - "balance_sheet", - "companies", - "us", - f"us-balance-{freq}.csv", - ) - df = pd.read_csv(data_path, sep=";") - - # Convert date strings to datetime objects and remove any time components - df["Report Date"] = pd.to_datetime(df["Report Date"], utc=True).dt.normalize() - df["Publish Date"] = pd.to_datetime(df["Publish Date"], utc=True).dt.normalize() - - # Convert the current date to datetime and normalize - curr_date_dt = pd.to_datetime(curr_date, utc=True).normalize() - - # Filter the DataFrame for the given ticker and for reports that were published on or before the current date - filtered_df = df[(df["Ticker"] == ticker) & (df["Publish Date"] <= curr_date_dt)] - - # Check if there are any available reports; if not, return a notification - if filtered_df.empty: - print("No balance sheet available before the given current date.") - return "" - - # Get the most recent balance sheet by selecting the row with the latest Publish Date - latest_balance_sheet = filtered_df.loc[filtered_df["Publish Date"].idxmax()] - - # drop the SimFinID column - latest_balance_sheet = latest_balance_sheet.drop("SimFinId") - - return ( - f"## {freq} balance sheet for {ticker} released on {str(latest_balance_sheet['Publish Date'])[0:10]}: \n" - + str(latest_balance_sheet) - + "\n\nThis includes metadata like reporting dates and currency, share details, and a breakdown of assets, liabilities, and equity. Assets are grouped as current (liquid items like cash and receivables) and noncurrent (long-term investments and property). Liabilities are split between short-term obligations and long-term debts, while equity reflects shareholder funds such as paid-in capital and retained earnings. Together, these components ensure that total assets equal the sum of liabilities and equity." - ) - - -def get_simfin_cashflow( - ticker: Annotated[str, "ticker symbol"], - freq: Annotated[ - str, - "reporting frequency of the company's financial history: annual / quarterly", - ], - curr_date: Annotated[str, "current date you are trading at, yyyy-mm-dd"], -): - data_path = os.path.join( - DATA_DIR, - "fundamental_data", - "simfin_data_all", - "cash_flow", - "companies", - "us", - f"us-cashflow-{freq}.csv", - ) - df = pd.read_csv(data_path, sep=";") - - # Convert date strings to datetime objects and remove any time components - df["Report Date"] = pd.to_datetime(df["Report Date"], utc=True).dt.normalize() - df["Publish Date"] = pd.to_datetime(df["Publish Date"], utc=True).dt.normalize() - - # Convert the current date to datetime and normalize - curr_date_dt = pd.to_datetime(curr_date, utc=True).normalize() - - # Filter the DataFrame for the given ticker and for reports that were published on or before the current date - filtered_df = df[(df["Ticker"] == ticker) & (df["Publish Date"] <= curr_date_dt)] - - # Check if there are any available reports; if not, return a notification - if filtered_df.empty: - print("No cash flow statement available before the given current date.") - return "" - - # Get the most recent cash flow statement by selecting the row with the latest Publish Date - latest_cash_flow = filtered_df.loc[filtered_df["Publish Date"].idxmax()] - - # drop the SimFinID column - latest_cash_flow = latest_cash_flow.drop("SimFinId") - - return ( - f"## {freq} cash flow statement for {ticker} released on {str(latest_cash_flow['Publish Date'])[0:10]}: \n" - + str(latest_cash_flow) - + "\n\nThis includes metadata like reporting dates and currency, share details, and a breakdown of cash movements. Operating activities show cash generated from core business operations, including net income adjustments for non-cash items and working capital changes. Investing activities cover asset acquisitions/disposals and investments. Financing activities include debt transactions, equity issuances/repurchases, and dividend payments. The net change in cash represents the overall increase or decrease in the company's cash position during the reporting period." - ) - - -def get_simfin_income_statements( - ticker: Annotated[str, "ticker symbol"], - freq: Annotated[ - str, - "reporting frequency of the company's financial history: annual / quarterly", - ], - curr_date: Annotated[str, "current date you are trading at, yyyy-mm-dd"], -): - data_path = os.path.join( - DATA_DIR, - "fundamental_data", - "simfin_data_all", - "income_statements", - "companies", - "us", - f"us-income-{freq}.csv", - ) - df = pd.read_csv(data_path, sep=";") - - # Convert date strings to datetime objects and remove any time components - df["Report Date"] = pd.to_datetime(df["Report Date"], utc=True).dt.normalize() - df["Publish Date"] = pd.to_datetime(df["Publish Date"], utc=True).dt.normalize() - - # Convert the current date to datetime and normalize - curr_date_dt = pd.to_datetime(curr_date, utc=True).normalize() - - # Filter the DataFrame for the given ticker and for reports that were published on or before the current date - filtered_df = df[(df["Ticker"] == ticker) & (df["Publish Date"] <= curr_date_dt)] - - # Check if there are any available reports; if not, return a notification - if filtered_df.empty: - print("No income statement available before the given current date.") - return "" - - # Get the most recent income statement by selecting the row with the latest Publish Date - latest_income = filtered_df.loc[filtered_df["Publish Date"].idxmax()] - - # drop the SimFinID column - latest_income = latest_income.drop("SimFinId") - - return ( - f"## {freq} income statement for {ticker} released on {str(latest_income['Publish Date'])[0:10]}: \n" - + str(latest_income) - + "\n\nThis includes metadata like reporting dates and currency, share details, and a comprehensive breakdown of the company's financial performance. Starting with Revenue, it shows Cost of Revenue and resulting Gross Profit. Operating Expenses are detailed, including SG&A, R&D, and Depreciation. The statement then shows Operating Income, followed by non-operating items and Interest Expense, leading to Pretax Income. After accounting for Income Tax and any Extraordinary items, it concludes with Net Income, representing the company's bottom-line profit or loss for the period." - ) - - -def get_google_news( - query: Annotated[str, "Query to search with"], - curr_date: Annotated[str, "Curr date in yyyy-mm-dd format"], - look_back_days: Annotated[int, "how many days to look back"], -) -> str: - query = query.replace(" ", "+") - - start_date = datetime.strptime(curr_date, "%Y-%m-%d") - before = start_date - relativedelta(days=look_back_days) - before = before.strftime("%Y-%m-%d") - - news_results = getNewsData(query, before, curr_date) - - news_str = "" - - for news in news_results: - news_str += ( - f"### {news['title']} (source: {news['source']}) \n\n{news['snippet']}\n\n" - ) - - if len(news_results) == 0: - return "" - - return f"## {query} Google News, from {before} to {curr_date}:\n\n{news_str}" - - -def get_reddit_global_news( - start_date: Annotated[str, "Start date in yyyy-mm-dd format"], - look_back_days: Annotated[int, "how many days to look back"], - max_limit_per_day: Annotated[int, "Maximum number of news per day"], -) -> str: - """ - Retrieve the latest top reddit news - Args: - start_date: Start date in yyyy-mm-dd format - end_date: End date in yyyy-mm-dd format - Returns: - str: A formatted dataframe containing the latest news articles posts on reddit and meta information in these columns: "created_utc", "id", "title", "selftext", "score", "num_comments", "url" - """ - - start_date = datetime.strptime(start_date, "%Y-%m-%d") - before = start_date - relativedelta(days=look_back_days) - before = before.strftime("%Y-%m-%d") - - posts = [] - # iterate from start_date to end_date - curr_date = datetime.strptime(before, "%Y-%m-%d") - - total_iterations = (start_date - curr_date).days + 1 - pbar = tqdm(desc=f"Getting Global News on {start_date}", total=total_iterations) - - while curr_date <= start_date: - curr_date_str = curr_date.strftime("%Y-%m-%d") - fetch_result = fetch_top_from_category( - "global_news", - curr_date_str, - max_limit_per_day, - data_path=os.path.join(DATA_DIR, "reddit_data"), - ) - posts.extend(fetch_result) - curr_date += relativedelta(days=1) - pbar.update(1) - - pbar.close() - - if len(posts) == 0: - return "" - - news_str = "" - for post in posts: - if post["content"] == "": - news_str += f"### {post['title']}\n\n" - else: - news_str += f"### {post['title']}\n\n{post['content']}\n\n" - - return f"## Global News Reddit, from {before} to {curr_date}:\n{news_str}" - - -def get_reddit_company_news( - ticker: Annotated[str, "ticker symbol of the company"], - start_date: Annotated[str, "Start date in yyyy-mm-dd format"], - look_back_days: Annotated[int, "how many days to look back"], - max_limit_per_day: Annotated[int, "Maximum number of news per day"], -) -> str: - """ - Retrieve the latest top reddit news - Args: - ticker: ticker symbol of the company - start_date: Start date in yyyy-mm-dd format - end_date: End date in yyyy-mm-dd format - Returns: - str: A formatted dataframe containing the latest news articles posts on reddit and meta information in these columns: "created_utc", "id", "title", "selftext", "score", "num_comments", "url" - """ - - start_date = datetime.strptime(start_date, "%Y-%m-%d") - before = start_date - relativedelta(days=look_back_days) - before = before.strftime("%Y-%m-%d") - - posts = [] - # iterate from start_date to end_date - curr_date = datetime.strptime(before, "%Y-%m-%d") - - total_iterations = (start_date - curr_date).days + 1 - pbar = tqdm( - desc=f"Getting Company News for {ticker} on {start_date}", - total=total_iterations, - ) - - while curr_date <= start_date: - curr_date_str = curr_date.strftime("%Y-%m-%d") - fetch_result = fetch_top_from_category( - "company_news", - curr_date_str, - max_limit_per_day, - ticker, - data_path=os.path.join(DATA_DIR, "reddit_data"), - ) - posts.extend(fetch_result) - curr_date += relativedelta(days=1) - - pbar.update(1) - - pbar.close() - - if len(posts) == 0: - return "" - - news_str = "" - for post in posts: - if post["content"] == "": - news_str += f"### {post['title']}\n\n" - else: - news_str += f"### {post['title']}\n\n{post['content']}\n\n" - - return f"##{ticker} News Reddit, from {before} to {curr_date}:\n\n{news_str}" - - -def get_stock_stats_indicators_window( - symbol: Annotated[str, "ticker symbol of the company"], - indicator: Annotated[str, "technical indicator to get the analysis and report of"], - curr_date: Annotated[ - str, "The current trading date you are trading on, YYYY-mm-dd" - ], - look_back_days: Annotated[int, "how many days to look back"], - online: Annotated[bool, "to fetch data online or offline"], -) -> str: - - best_ind_params = { - # Moving Averages - "close_50_sma": ( - "50 SMA: A medium-term trend indicator. " - "Usage: Identify trend direction and serve as dynamic support/resistance. " - "Tips: It lags price; combine with faster indicators for timely signals." - ), - "close_200_sma": ( - "200 SMA: A long-term trend benchmark. " - "Usage: Confirm overall market trend and identify golden/death cross setups. " - "Tips: It reacts slowly; best for strategic trend confirmation rather than frequent trading entries." - ), - "close_10_ema": ( - "10 EMA: A responsive short-term average. " - "Usage: Capture quick shifts in momentum and potential entry points. " - "Tips: Prone to noise in choppy markets; use alongside longer averages for filtering false signals." - ), - # MACD Related - "macd": ( - "MACD: Computes momentum via differences of EMAs. " - "Usage: Look for crossovers and divergence as signals of trend changes. " - "Tips: Confirm with other indicators in low-volatility or sideways markets." - ), - "macds": ( - "MACD Signal: An EMA smoothing of the MACD line. " - "Usage: Use crossovers with the MACD line to trigger trades. " - "Tips: Should be part of a broader strategy to avoid false positives." - ), - "macdh": ( - "MACD Histogram: Shows the gap between the MACD line and its signal. " - "Usage: Visualize momentum strength and spot divergence early. " - "Tips: Can be volatile; complement with additional filters in fast-moving markets." - ), - # Momentum Indicators - "rsi": ( - "RSI: Measures momentum to flag overbought/oversold conditions. " - "Usage: Apply 70/30 thresholds and watch for divergence to signal reversals. " - "Tips: In strong trends, RSI may remain extreme; always cross-check with trend analysis." - ), - # Volatility Indicators - "boll": ( - "Bollinger Middle: A 20 SMA serving as the basis for Bollinger Bands. " - "Usage: Acts as a dynamic benchmark for price movement. " - "Tips: Combine with the upper and lower bands to effectively spot breakouts or reversals." - ), - "boll_ub": ( - "Bollinger Upper Band: Typically 2 standard deviations above the middle line. " - "Usage: Signals potential overbought conditions and breakout zones. " - "Tips: Confirm signals with other tools; prices may ride the band in strong trends." - ), - "boll_lb": ( - "Bollinger Lower Band: Typically 2 standard deviations below the middle line. " - "Usage: Indicates potential oversold conditions. " - "Tips: Use additional analysis to avoid false reversal signals." - ), - "atr": ( - "ATR: Averages true range to measure volatility. " - "Usage: Set stop-loss levels and adjust position sizes based on current market volatility. " - "Tips: It's a reactive measure, so use it as part of a broader risk management strategy." - ), - # Volume-Based Indicators - "vwma": ( - "VWMA: A moving average weighted by volume. " - "Usage: Confirm trends by integrating price action with volume data. " - "Tips: Watch for skewed results from volume spikes; use in combination with other volume analyses." - ), - "mfi": ( - "MFI: The Money Flow Index is a momentum indicator that uses both price and volume to measure buying and selling pressure. " - "Usage: Identify overbought (>80) or oversold (<20) conditions and confirm the strength of trends or reversals. " - "Tips: Use alongside RSI or MACD to confirm signals; divergence between price and MFI can indicate potential reversals." - ), +from typing import Annotated + +# Import from vendor-specific modules +from .local import get_YFin_data, get_finnhub_news, get_finnhub_company_insider_sentiment, get_finnhub_company_insider_transactions, get_simfin_balance_sheet, get_simfin_cashflow, get_simfin_income_statements, get_reddit_global_news, get_reddit_company_news +from .yahoo_finance import get_YFin_data_online, get_stock_stats_indicators_window +from .google import get_google_news +from .openai import get_stock_news_openai, get_global_news_openai, get_fundamentals_openai + +# Configuration and routing logic +from .config import get_config + +# Tools organized by category +TOOLS_CATEGORIES = { + "core_stock_apis": { + "description": "OHLCV stock price data", + "tools": [ + "get_stock_data" + ] + }, + "technical_indicators": { + "description": "Technical analysis indicators", + "tools": [ + "get_indicators" + ] + }, + "fundamental_data": { + "description": "Company fundamentals", + "tools": [ + "get_fundamentals", + "get_balance_sheet", + "get_cashflow", + "get_income_statement" + ] + }, + "news_data": { + "description": "News (public/insiders, original/processed)", + "tools": [ + "get_news", + "get_global_news", + "get_insider_sentiment", + "get_insider_transactions", + ] } +} - if indicator not in best_ind_params: - raise ValueError( - f"Indicator {indicator} is not supported. Please choose from: {list(best_ind_params.keys())}" - ) +VENDOR_LIST = [ + "local", + "yahoo_finance", + "openai", + "google" +] - end_date = curr_date - curr_date = datetime.strptime(curr_date, "%Y-%m-%d") - before = curr_date - relativedelta(days=look_back_days) +# Mapping of methods to their vendor-specific implementations +VENDOR_METHODS = { + # core_stock_apis + "get_stock_data": { + "yahoo_finance": get_YFin_data_online, + "local": get_YFin_data, + }, + # technical_indicators + "get_indicators": { + "yahoo_finance": get_stock_stats_indicators_window, + "local": get_stock_stats_indicators_window + }, + # fundamental_data + "get_fundamentals": { + "openai": get_fundamentals_openai + }, + "get_balance_sheet": { + "local": get_simfin_balance_sheet, + }, + "get_cashflow": { + "local": get_simfin_cashflow, + }, + "get_income_statement": { + "local": get_simfin_income_statements, + }, + # news_data + "get_news": { + "openai": get_stock_news_openai, + "google": get_google_news, + "local": [get_finnhub_news, get_reddit_company_news, get_google_news], + }, + "get_global_news": { + "openai": get_global_news_openai, + "local": get_reddit_global_news + }, + "get_insider_sentiment": { + "local": get_finnhub_company_insider_sentiment + }, + "get_insider_transactions": { + "local": get_finnhub_company_insider_transactions, + }, +} - if not online: - # read from YFin data - data = pd.read_csv( - os.path.join( - DATA_DIR, - f"market_data/price_data/{symbol}-YFin-data-2015-01-01-2025-03-25.csv", - ) - ) - data["Date"] = pd.to_datetime(data["Date"], utc=True) - dates_in_df = data["Date"].astype(str).str[:10] +def get_category_for_method(method: str) -> str: + """Get the category that contains the specified method.""" + for category, info in TOOLS_CATEGORIES.items(): + if method in info["tools"]: + return category + raise ValueError(f"Method '{method}' not found in any category") - ind_string = "" - while curr_date >= before: - # only do the trading dates - if curr_date.strftime("%Y-%m-%d") in dates_in_df.values: - indicator_value = get_stockstats_indicator( - symbol, indicator, curr_date.strftime("%Y-%m-%d"), online - ) +def get_vendor(category: str, method: str = None) -> str: + """Get the configured vendor for a data category or specific tool method. + Tool-level configuration takes precedence over category-level. + """ + config = get_config() - ind_string += f"{curr_date.strftime('%Y-%m-%d')}: {indicator_value}\n" + # Check tool-level configuration first (if method provided) + if method: + tool_vendors = config.get("tool_vendors", {}) + if method in tool_vendors: + return tool_vendors[method] - curr_date = curr_date - relativedelta(days=1) + # Fall back to category-level configuration + return config.get("data_vendors", {}).get(category, "default") + +def route_to_vender(method: str, *args, **kwargs): + """Route method calls to appropriate vendor implementation.""" + category = get_category_for_method(method) + vendor_config = get_vendor(category, method) + + # Handle comma-separated vendors + vendors = [v.strip() for v in vendor_config.split(',')] + + if method not in VENDOR_METHODS: + raise ValueError(f"Method '{method}' not supported") + + # Collect all methods to run + methods_to_run = [] + + for vendor in vendors: + if vendor not in VENDOR_METHODS[method]: + raise ValueError(f"Vendor '{vendor}' not supported for method '{method}'") + + vendor_impl = VENDOR_METHODS[method][vendor] + + # Handle list of methods for a vendor + if isinstance(vendor_impl, list): + methods_to_run.extend(vendor_impl) + else: + # Single method implementation + methods_to_run.append(vendor_impl) + + # Run all methods and collect results + results = [] + for impl_func in methods_to_run: + try: + result = impl_func(*args, **kwargs) + results.append(result) + except Exception as e: + # Log error but continue with other implementations + print(f"Warning: {impl_func.__name__} failed: {e}") + + # Return single result if only one, otherwise concatenate as string + if len(results) == 1: + return results[0] else: - # online gathering - ind_string = "" - while curr_date >= before: - indicator_value = get_stockstats_indicator( - symbol, indicator, curr_date.strftime("%Y-%m-%d"), online - ) - - ind_string += f"{curr_date.strftime('%Y-%m-%d')}: {indicator_value}\n" - - curr_date = curr_date - relativedelta(days=1) - - result_str = ( - f"## {indicator} values from {before.strftime('%Y-%m-%d')} to {end_date}:\n\n" - + ind_string - + "\n\n" - + best_ind_params.get(indicator, "No description available.") - ) - - return result_str - - -def get_stockstats_indicator( - symbol: Annotated[str, "ticker symbol of the company"], - indicator: Annotated[str, "technical indicator to get the analysis and report of"], - curr_date: Annotated[ - str, "The current trading date you are trading on, YYYY-mm-dd" - ], - online: Annotated[bool, "to fetch data online or offline"], -) -> str: - - curr_date = datetime.strptime(curr_date, "%Y-%m-%d") - curr_date = curr_date.strftime("%Y-%m-%d") - - try: - indicator_value = StockstatsUtils.get_stock_stats( - symbol, - indicator, - curr_date, - os.path.join(DATA_DIR, "market_data", "price_data"), - online=online, - ) - except Exception as e: - print( - f"Error getting stockstats indicator data for indicator {indicator} on {curr_date}: {e}" - ) - return "" - - return str(indicator_value) - - -def get_YFin_data_window( - symbol: Annotated[str, "ticker symbol of the company"], - curr_date: Annotated[str, "Start date in yyyy-mm-dd format"], - look_back_days: Annotated[int, "how many days to look back"], -) -> str: - # calculate past days - date_obj = datetime.strptime(curr_date, "%Y-%m-%d") - before = date_obj - relativedelta(days=look_back_days) - start_date = before.strftime("%Y-%m-%d") - - # read in data - data = pd.read_csv( - os.path.join( - DATA_DIR, - f"market_data/price_data/{symbol}-YFin-data-2015-01-01-2025-03-25.csv", - ) - ) - - # Extract just the date part for comparison - data["DateOnly"] = data["Date"].str[:10] - - # Filter data between the start and end dates (inclusive) - filtered_data = data[ - (data["DateOnly"] >= start_date) & (data["DateOnly"] <= curr_date) - ] - - # Drop the temporary column we created - filtered_data = filtered_data.drop("DateOnly", axis=1) - - # Set pandas display options to show the full DataFrame - with pd.option_context( - "display.max_rows", None, "display.max_columns", None, "display.width", None - ): - df_string = filtered_data.to_string() - - return ( - f"## Raw Market Data for {symbol} from {start_date} to {curr_date}:\n\n" - + df_string - ) - - -def get_YFin_data_online( - symbol: Annotated[str, "ticker symbol of the company"], - start_date: Annotated[str, "Start date in yyyy-mm-dd format"], - end_date: Annotated[str, "End date in yyyy-mm-dd format"], -): - - datetime.strptime(start_date, "%Y-%m-%d") - datetime.strptime(end_date, "%Y-%m-%d") - - # Create ticker object - ticker = yf.Ticker(symbol.upper()) - - # Fetch historical data for the specified date range - data = ticker.history(start=start_date, end=end_date) - - # Check if data is empty - if data.empty: - return ( - f"No data found for symbol '{symbol}' between {start_date} and {end_date}" - ) - - # Remove timezone info from index for cleaner output - if data.index.tz is not None: - data.index = data.index.tz_localize(None) - - # Round numerical values to 2 decimal places for cleaner display - numeric_columns = ["Open", "High", "Low", "Close", "Adj Close"] - for col in numeric_columns: - if col in data.columns: - data[col] = data[col].round(2) - - # Convert DataFrame to CSV string - csv_string = data.to_csv() - - # Add header information - header = f"# Stock data for {symbol.upper()} from {start_date} to {end_date}\n" - header += f"# Total records: {len(data)}\n" - header += f"# Data retrieved on: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n" - - return header + csv_string - - -def get_YFin_data( - symbol: Annotated[str, "ticker symbol of the company"], - start_date: Annotated[str, "Start date in yyyy-mm-dd format"], - end_date: Annotated[str, "End date in yyyy-mm-dd format"], -) -> str: - # read in data - data = pd.read_csv( - os.path.join( - DATA_DIR, - f"market_data/price_data/{symbol}-YFin-data-2015-01-01-2025-03-25.csv", - ) - ) - - if end_date > "2025-03-25": - raise Exception( - f"Get_YFin_Data: {end_date} is outside of the data range of 2015-01-01 to 2025-03-25" - ) - - # Extract just the date part for comparison - data["DateOnly"] = data["Date"].str[:10] - - # Filter data between the start and end dates (inclusive) - filtered_data = data[ - (data["DateOnly"] >= start_date) & (data["DateOnly"] <= end_date) - ] - - # Drop the temporary column we created - filtered_data = filtered_data.drop("DateOnly", axis=1) - - # remove the index from the dataframe - filtered_data = filtered_data.reset_index(drop=True) - - return filtered_data - - -def get_stock_news_openai(ticker, curr_date): - config = get_config() - client = OpenAI(base_url=config["backend_url"]) - - response = client.responses.create( - model=config["quick_think_llm"], - input=[ - { - "role": "system", - "content": [ - { - "type": "input_text", - "text": f"Can you search Social Media for {ticker} from 7 days before {curr_date} to {curr_date}? Make sure you only get the data posted during that period.", - } - ], - } - ], - text={"format": {"type": "text"}}, - reasoning={}, - tools=[ - { - "type": "web_search_preview", - "user_location": {"type": "approximate"}, - "search_context_size": "low", - } - ], - temperature=1, - max_output_tokens=4096, - top_p=1, - store=True, - ) - - return response.output[1].content[0].text - - -def get_global_news_openai(curr_date): - config = get_config() - client = OpenAI(base_url=config["backend_url"]) - - response = client.responses.create( - model=config["quick_think_llm"], - input=[ - { - "role": "system", - "content": [ - { - "type": "input_text", - "text": f"Can you search global or macroeconomics news from 7 days before {curr_date} to {curr_date} that would be informative for trading purposes? Make sure you only get the data posted during that period.", - } - ], - } - ], - text={"format": {"type": "text"}}, - reasoning={}, - tools=[ - { - "type": "web_search_preview", - "user_location": {"type": "approximate"}, - "search_context_size": "low", - } - ], - temperature=1, - max_output_tokens=4096, - top_p=1, - store=True, - ) - - return response.output[1].content[0].text - - -def get_fundamentals_openai(ticker, curr_date): - config = get_config() - client = OpenAI(base_url=config["backend_url"]) - - response = client.responses.create( - model=config["quick_think_llm"], - input=[ - { - "role": "system", - "content": [ - { - "type": "input_text", - "text": f"Can you search Fundamental for discussions on {ticker} during of the month before {curr_date} to the month of {curr_date}. Make sure you only get the data posted during that period. List as a table, with PE/PS/Cash flow/ etc", - } - ], - } - ], - text={"format": {"type": "text"}}, - reasoning={}, - tools=[ - { - "type": "web_search_preview", - "user_location": {"type": "approximate"}, - "search_context_size": "low", - } - ], - temperature=1, - max_output_tokens=4096, - top_p=1, - store=True, - ) - - return response.output[1].content[0].text + # Convert all results to strings and concatenate + return '\n'.join(str(result) for result in results) \ No newline at end of file diff --git a/tradingagents/dataflows/local.py b/tradingagents/dataflows/local.py new file mode 100644 index 00000000..502bc43a --- /dev/null +++ b/tradingagents/dataflows/local.py @@ -0,0 +1,475 @@ +from typing import Annotated +import pandas as pd +import os +from .config import DATA_DIR +from datetime import datetime +from dateutil.relativedelta import relativedelta +import json +from .reddit_utils import fetch_top_from_category +from tqdm import tqdm + +def get_YFin_data_window( + symbol: Annotated[str, "ticker symbol of the company"], + curr_date: Annotated[str, "Start date in yyyy-mm-dd format"], + look_back_days: Annotated[int, "how many days to look back"], +) -> str: + # calculate past days + date_obj = datetime.strptime(curr_date, "%Y-%m-%d") + before = date_obj - relativedelta(days=look_back_days) + start_date = before.strftime("%Y-%m-%d") + + # read in data + data = pd.read_csv( + os.path.join( + DATA_DIR, + f"market_data/price_data/{symbol}-YFin-data-2015-01-01-2025-03-25.csv", + ) + ) + + # Extract just the date part for comparison + data["DateOnly"] = data["Date"].str[:10] + + # Filter data between the start and end dates (inclusive) + filtered_data = data[ + (data["DateOnly"] >= start_date) & (data["DateOnly"] <= curr_date) + ] + + # Drop the temporary column we created + filtered_data = filtered_data.drop("DateOnly", axis=1) + + # Set pandas display options to show the full DataFrame + with pd.option_context( + "display.max_rows", None, "display.max_columns", None, "display.width", None + ): + df_string = filtered_data.to_string() + + return ( + f"## Raw Market Data for {symbol} from {start_date} to {curr_date}:\n\n" + + df_string + ) + +def get_YFin_data( + symbol: Annotated[str, "ticker symbol of the company"], + start_date: Annotated[str, "Start date in yyyy-mm-dd format"], + end_date: Annotated[str, "End date in yyyy-mm-dd format"], +) -> str: + # read in data + data = pd.read_csv( + os.path.join( + DATA_DIR, + f"market_data/price_data/{symbol}-YFin-data-2015-01-01-2025-03-25.csv", + ) + ) + + if end_date > "2025-03-25": + raise Exception( + f"Get_YFin_Data: {end_date} is outside of the data range of 2015-01-01 to 2025-03-25" + ) + + # Extract just the date part for comparison + data["DateOnly"] = data["Date"].str[:10] + + # Filter data between the start and end dates (inclusive) + filtered_data = data[ + (data["DateOnly"] >= start_date) & (data["DateOnly"] <= end_date) + ] + + # Drop the temporary column we created + filtered_data = filtered_data.drop("DateOnly", axis=1) + + # remove the index from the dataframe + filtered_data = filtered_data.reset_index(drop=True) + + return filtered_data + +def get_finnhub_news( + query: Annotated[str, "Search query or ticker symbol"], + start_date: Annotated[str, "Start date in yyyy-mm-dd format"], + end_date: Annotated[str, "End date in yyyy-mm-dd format"], +): + """ + Retrieve news about a company within a time frame + + Args + query (str): Search query or ticker symbol + start_date (str): Start date in yyyy-mm-dd format + end_date (str): End date in yyyy-mm-dd format + Returns + str: dataframe containing the news of the company in the time frame + + """ + + result = get_data_in_range(query, start_date, end_date, "news_data", DATA_DIR) + + if len(result) == 0: + return "" + + combined_result = "" + for day, data in result.items(): + if len(data) == 0: + continue + for entry in data: + current_news = ( + "### " + entry["headline"] + f" ({day})" + "\n" + entry["summary"] + ) + combined_result += current_news + "\n\n" + + return f"## {query} News, from {start_date} to {end_date}:\n" + str(combined_result) + + +def get_finnhub_company_insider_sentiment( + ticker: Annotated[str, "ticker symbol for the company"], + curr_date: Annotated[str, "current date you are trading at, yyyy-mm-dd"], +): + """ + Retrieve insider sentiment about a company (retrieved from public SEC information) for the past 15 days + Args: + ticker (str): ticker symbol of the company + curr_date (str): current date you are trading on, yyyy-mm-dd + Returns: + str: a report of the sentiment in the past 15 days starting at curr_date + """ + + date_obj = datetime.strptime(curr_date, "%Y-%m-%d") + before = date_obj - relativedelta(days=15) # Default 15 days lookback + before = before.strftime("%Y-%m-%d") + + data = get_data_in_range(ticker, before, curr_date, "insider_senti", DATA_DIR) + + if len(data) == 0: + return "" + + result_str = "" + seen_dicts = [] + for date, senti_list in data.items(): + for entry in senti_list: + if entry not in seen_dicts: + result_str += f"### {entry['year']}-{entry['month']}:\nChange: {entry['change']}\nMonthly Share Purchase Ratio: {entry['mspr']}\n\n" + seen_dicts.append(entry) + + return ( + f"## {ticker} Insider Sentiment Data for {before} to {curr_date}:\n" + + result_str + + "The change field refers to the net buying/selling from all insiders' transactions. The mspr field refers to monthly share purchase ratio." + ) + + +def get_finnhub_company_insider_transactions( + ticker: Annotated[str, "ticker symbol"], + curr_date: Annotated[str, "current date you are trading at, yyyy-mm-dd"], +): + """ + Retrieve insider transcaction information about a company (retrieved from public SEC information) for the past 15 days + Args: + ticker (str): ticker symbol of the company + curr_date (str): current date you are trading at, yyyy-mm-dd + Returns: + str: a report of the company's insider transaction/trading informtaion in the past 15 days + """ + + date_obj = datetime.strptime(curr_date, "%Y-%m-%d") + before = date_obj - relativedelta(days=15) # Default 15 days lookback + before = before.strftime("%Y-%m-%d") + + data = get_data_in_range(ticker, before, curr_date, "insider_trans", DATA_DIR) + + if len(data) == 0: + return "" + + result_str = "" + + seen_dicts = [] + for date, senti_list in data.items(): + for entry in senti_list: + if entry not in seen_dicts: + result_str += f"### Filing Date: {entry['filingDate']}, {entry['name']}:\nChange:{entry['change']}\nShares: {entry['share']}\nTransaction Price: {entry['transactionPrice']}\nTransaction Code: {entry['transactionCode']}\n\n" + seen_dicts.append(entry) + + return ( + f"## {ticker} insider transactions from {before} to {curr_date}:\n" + + result_str + + "The change field reflects the variation in share countβ€”here a negative number indicates a reduction in holdingsβ€”while share specifies the total number of shares involved. The transactionPrice denotes the per-share price at which the trade was executed, and transactionDate marks when the transaction occurred. The name field identifies the insider making the trade, and transactionCode (e.g., S for sale) clarifies the nature of the transaction. FilingDate records when the transaction was officially reported, and the unique id links to the specific SEC filing, as indicated by the source. Additionally, the symbol ties the transaction to a particular company, isDerivative flags whether the trade involves derivative securities, and currency notes the currency context of the transaction." + ) + +def get_data_in_range(ticker, start_date, end_date, data_type, data_dir, period=None): + """ + Gets finnhub data saved and processed on disk. + Args: + start_date (str): Start date in YYYY-MM-DD format. + end_date (str): End date in YYYY-MM-DD format. + data_type (str): Type of data from finnhub to fetch. Can be insider_trans, SEC_filings, news_data, insider_senti, or fin_as_reported. + data_dir (str): Directory where the data is saved. + period (str): Default to none, if there is a period specified, should be annual or quarterly. + """ + + if period: + data_path = os.path.join( + data_dir, + "finnhub_data", + data_type, + f"{ticker}_{period}_data_formatted.json", + ) + else: + data_path = os.path.join( + data_dir, "finnhub_data", data_type, f"{ticker}_data_formatted.json" + ) + + data = open(data_path, "r") + data = json.load(data) + + # filter keys (date, str in format YYYY-MM-DD) by the date range (str, str in format YYYY-MM-DD) + filtered_data = {} + for key, value in data.items(): + if start_date <= key <= end_date and len(value) > 0: + filtered_data[key] = value + return filtered_data + +def get_simfin_balance_sheet( + ticker: Annotated[str, "ticker symbol"], + freq: Annotated[ + str, + "reporting frequency of the company's financial history: annual / quarterly", + ], + curr_date: Annotated[str, "current date you are trading at, yyyy-mm-dd"], +): + data_path = os.path.join( + DATA_DIR, + "fundamental_data", + "simfin_data_all", + "balance_sheet", + "companies", + "us", + f"us-balance-{freq}.csv", + ) + df = pd.read_csv(data_path, sep=";") + + # Convert date strings to datetime objects and remove any time components + df["Report Date"] = pd.to_datetime(df["Report Date"], utc=True).dt.normalize() + df["Publish Date"] = pd.to_datetime(df["Publish Date"], utc=True).dt.normalize() + + # Convert the current date to datetime and normalize + curr_date_dt = pd.to_datetime(curr_date, utc=True).normalize() + + # Filter the DataFrame for the given ticker and for reports that were published on or before the current date + filtered_df = df[(df["Ticker"] == ticker) & (df["Publish Date"] <= curr_date_dt)] + + # Check if there are any available reports; if not, return a notification + if filtered_df.empty: + print("No balance sheet available before the given current date.") + return "" + + # Get the most recent balance sheet by selecting the row with the latest Publish Date + latest_balance_sheet = filtered_df.loc[filtered_df["Publish Date"].idxmax()] + + # drop the SimFinID column + latest_balance_sheet = latest_balance_sheet.drop("SimFinId") + + return ( + f"## {freq} balance sheet for {ticker} released on {str(latest_balance_sheet['Publish Date'])[0:10]}: \n" + + str(latest_balance_sheet) + + "\n\nThis includes metadata like reporting dates and currency, share details, and a breakdown of assets, liabilities, and equity. Assets are grouped as current (liquid items like cash and receivables) and noncurrent (long-term investments and property). Liabilities are split between short-term obligations and long-term debts, while equity reflects shareholder funds such as paid-in capital and retained earnings. Together, these components ensure that total assets equal the sum of liabilities and equity." + ) + + +def get_simfin_cashflow( + ticker: Annotated[str, "ticker symbol"], + freq: Annotated[ + str, + "reporting frequency of the company's financial history: annual / quarterly", + ], + curr_date: Annotated[str, "current date you are trading at, yyyy-mm-dd"], +): + data_path = os.path.join( + DATA_DIR, + "fundamental_data", + "simfin_data_all", + "cash_flow", + "companies", + "us", + f"us-cashflow-{freq}.csv", + ) + df = pd.read_csv(data_path, sep=";") + + # Convert date strings to datetime objects and remove any time components + df["Report Date"] = pd.to_datetime(df["Report Date"], utc=True).dt.normalize() + df["Publish Date"] = pd.to_datetime(df["Publish Date"], utc=True).dt.normalize() + + # Convert the current date to datetime and normalize + curr_date_dt = pd.to_datetime(curr_date, utc=True).normalize() + + # Filter the DataFrame for the given ticker and for reports that were published on or before the current date + filtered_df = df[(df["Ticker"] == ticker) & (df["Publish Date"] <= curr_date_dt)] + + # Check if there are any available reports; if not, return a notification + if filtered_df.empty: + print("No cash flow statement available before the given current date.") + return "" + + # Get the most recent cash flow statement by selecting the row with the latest Publish Date + latest_cash_flow = filtered_df.loc[filtered_df["Publish Date"].idxmax()] + + # drop the SimFinID column + latest_cash_flow = latest_cash_flow.drop("SimFinId") + + return ( + f"## {freq} cash flow statement for {ticker} released on {str(latest_cash_flow['Publish Date'])[0:10]}: \n" + + str(latest_cash_flow) + + "\n\nThis includes metadata like reporting dates and currency, share details, and a breakdown of cash movements. Operating activities show cash generated from core business operations, including net income adjustments for non-cash items and working capital changes. Investing activities cover asset acquisitions/disposals and investments. Financing activities include debt transactions, equity issuances/repurchases, and dividend payments. The net change in cash represents the overall increase or decrease in the company's cash position during the reporting period." + ) + + +def get_simfin_income_statements( + ticker: Annotated[str, "ticker symbol"], + freq: Annotated[ + str, + "reporting frequency of the company's financial history: annual / quarterly", + ], + curr_date: Annotated[str, "current date you are trading at, yyyy-mm-dd"], +): + data_path = os.path.join( + DATA_DIR, + "fundamental_data", + "simfin_data_all", + "income_statements", + "companies", + "us", + f"us-income-{freq}.csv", + ) + df = pd.read_csv(data_path, sep=";") + + # Convert date strings to datetime objects and remove any time components + df["Report Date"] = pd.to_datetime(df["Report Date"], utc=True).dt.normalize() + df["Publish Date"] = pd.to_datetime(df["Publish Date"], utc=True).dt.normalize() + + # Convert the current date to datetime and normalize + curr_date_dt = pd.to_datetime(curr_date, utc=True).normalize() + + # Filter the DataFrame for the given ticker and for reports that were published on or before the current date + filtered_df = df[(df["Ticker"] == ticker) & (df["Publish Date"] <= curr_date_dt)] + + # Check if there are any available reports; if not, return a notification + if filtered_df.empty: + print("No income statement available before the given current date.") + return "" + + # Get the most recent income statement by selecting the row with the latest Publish Date + latest_income = filtered_df.loc[filtered_df["Publish Date"].idxmax()] + + # drop the SimFinID column + latest_income = latest_income.drop("SimFinId") + + return ( + f"## {freq} income statement for {ticker} released on {str(latest_income['Publish Date'])[0:10]}: \n" + + str(latest_income) + + "\n\nThis includes metadata like reporting dates and currency, share details, and a comprehensive breakdown of the company's financial performance. Starting with Revenue, it shows Cost of Revenue and resulting Gross Profit. Operating Expenses are detailed, including SG&A, R&D, and Depreciation. The statement then shows Operating Income, followed by non-operating items and Interest Expense, leading to Pretax Income. After accounting for Income Tax and any Extraordinary items, it concludes with Net Income, representing the company's bottom-line profit or loss for the period." + ) + + +def get_reddit_global_news( + curr_date: Annotated[str, "Current date in yyyy-mm-dd format"], + look_back_days: Annotated[int, "Number of days to look back"] = 7, + limit: Annotated[int, "Maximum number of articles to return"] = 5, +) -> str: + """ + Retrieve the latest top reddit news + Args: + curr_date: Current date in yyyy-mm-dd format + look_back_days: Number of days to look back (default 7) + limit: Maximum number of articles to return (default 5) + Returns: + str: A formatted string containing the latest news articles posts on reddit + """ + + curr_date_dt = datetime.strptime(curr_date, "%Y-%m-%d") + before = curr_date_dt - relativedelta(days=look_back_days) + before = before.strftime("%Y-%m-%d") + + posts = [] + # iterate from before to curr_date + curr_iter_date = datetime.strptime(before, "%Y-%m-%d") + + total_iterations = (curr_date_dt - curr_iter_date).days + 1 + pbar = tqdm(desc=f"Getting Global News on {curr_date}", total=total_iterations) + + while curr_iter_date <= curr_date_dt: + curr_date_str = curr_iter_date.strftime("%Y-%m-%d") + fetch_result = fetch_top_from_category( + "global_news", + curr_date_str, + limit, + data_path=os.path.join(DATA_DIR, "reddit_data"), + ) + posts.extend(fetch_result) + curr_iter_date += relativedelta(days=1) + pbar.update(1) + + pbar.close() + + if len(posts) == 0: + return "" + + news_str = "" + for post in posts: + if post["content"] == "": + news_str += f"### {post['title']}\n\n" + else: + news_str += f"### {post['title']}\n\n{post['content']}\n\n" + + return f"## Global News Reddit, from {before} to {curr_date}:\n{news_str}" + + +def get_reddit_company_news( + query: Annotated[str, "Search query or ticker symbol"], + start_date: Annotated[str, "Start date in yyyy-mm-dd format"], + end_date: Annotated[str, "End date in yyyy-mm-dd format"], +) -> str: + """ + Retrieve the latest top reddit news + Args: + query: Search query or ticker symbol + start_date: Start date in yyyy-mm-dd format + end_date: End date in yyyy-mm-dd format + Returns: + str: A formatted string containing news articles posts on reddit + """ + + start_date_dt = datetime.strptime(start_date, "%Y-%m-%d") + end_date_dt = datetime.strptime(end_date, "%Y-%m-%d") + + posts = [] + # iterate from start_date to end_date + curr_date = start_date_dt + + total_iterations = (end_date_dt - curr_date).days + 1 + pbar = tqdm( + desc=f"Getting Company News for {query} from {start_date} to {end_date}", + total=total_iterations, + ) + + while curr_date <= end_date_dt: + curr_date_str = curr_date.strftime("%Y-%m-%d") + fetch_result = fetch_top_from_category( + "company_news", + curr_date_str, + 10, # max limit per day + query, + data_path=os.path.join(DATA_DIR, "reddit_data"), + ) + posts.extend(fetch_result) + curr_date += relativedelta(days=1) + + pbar.update(1) + + pbar.close() + + if len(posts) == 0: + return "" + + news_str = "" + for post in posts: + if post["content"] == "": + news_str += f"### {post['title']}\n\n" + else: + news_str += f"### {post['title']}\n\n{post['content']}\n\n" + + return f"##{query} News Reddit, from {start_date} to {end_date}:\n\n{news_str}" \ No newline at end of file diff --git a/tradingagents/dataflows/openai.py b/tradingagents/dataflows/openai.py new file mode 100644 index 00000000..91a2258b --- /dev/null +++ b/tradingagents/dataflows/openai.py @@ -0,0 +1,107 @@ +from openai import OpenAI +from .config import get_config + + +def get_stock_news_openai(query, start_date, end_date): + config = get_config() + client = OpenAI(base_url=config["backend_url"]) + + response = client.responses.create( + model=config["quick_think_llm"], + input=[ + { + "role": "system", + "content": [ + { + "type": "input_text", + "text": f"Can you search Social Media for {query} from {start_date} to {end_date}? Make sure you only get the data posted during that period.", + } + ], + } + ], + text={"format": {"type": "text"}}, + reasoning={}, + tools=[ + { + "type": "web_search_preview", + "user_location": {"type": "approximate"}, + "search_context_size": "low", + } + ], + temperature=1, + max_output_tokens=4096, + top_p=1, + store=True, + ) + + return response.output[1].content[0].text + + +def get_global_news_openai(curr_date, look_back_days=7, limit=5): + config = get_config() + client = OpenAI(base_url=config["backend_url"]) + + response = client.responses.create( + model=config["quick_think_llm"], + input=[ + { + "role": "system", + "content": [ + { + "type": "input_text", + "text": f"Can you search global or macroeconomics news from {look_back_days} days before {curr_date} to {curr_date} that would be informative for trading purposes? Make sure you only get the data posted during that period. Limit the results to {limit} articles.", + } + ], + } + ], + text={"format": {"type": "text"}}, + reasoning={}, + tools=[ + { + "type": "web_search_preview", + "user_location": {"type": "approximate"}, + "search_context_size": "low", + } + ], + temperature=1, + max_output_tokens=4096, + top_p=1, + store=True, + ) + + return response.output[1].content[0].text + + +def get_fundamentals_openai(ticker, curr_date): + config = get_config() + client = OpenAI(base_url=config["backend_url"]) + + response = client.responses.create( + model=config["quick_think_llm"], + input=[ + { + "role": "system", + "content": [ + { + "type": "input_text", + "text": f"Can you search Fundamental for discussions on {ticker} during of the month before {curr_date} to the month of {curr_date}. Make sure you only get the data posted during that period. List as a table, with PE/PS/Cash flow/ etc", + } + ], + } + ], + text={"format": {"type": "text"}}, + reasoning={}, + tools=[ + { + "type": "web_search_preview", + "user_location": {"type": "approximate"}, + "search_context_size": "low", + } + ], + temperature=1, + max_output_tokens=4096, + top_p=1, + store=True, + ) + + return response.output[1].content[0].text \ No newline at end of file diff --git a/tradingagents/dataflows/stockstats_utils.py b/tradingagents/dataflows/stockstats_utils.py index 78ffb220..ba547dc5 100644 --- a/tradingagents/dataflows/stockstats_utils.py +++ b/tradingagents/dataflows/stockstats_utils.py @@ -16,15 +16,12 @@ class StockstatsUtils: curr_date: Annotated[ str, "curr date for retrieving stock price data, YYYY-mm-dd" ], - data_dir: Annotated[ - str, - "directory where the stock data is stored.", - ], - online: Annotated[ - bool, - "whether to use online tools to fetch data or offline tools. If True, will use online tools.", - ] = False, ): + # Get config and set up data directory path + config = get_config() + data_dir = os.path.join(config["DATA_DIR"], "market_data", "price_data") + online = config["data_vendors"]["technical_indicators"] != "local" + df = None data = None @@ -50,7 +47,6 @@ class StockstatsUtils: end_date = end_date.strftime("%Y-%m-%d") # Get config and ensure cache directory exists - config = get_config() os.makedirs(config["data_cache_dir"], exist_ok=True) data_file = os.path.join( diff --git a/tradingagents/dataflows/yahoo_finance.py b/tradingagents/dataflows/yahoo_finance.py new file mode 100644 index 00000000..8189cc4d --- /dev/null +++ b/tradingagents/dataflows/yahoo_finance.py @@ -0,0 +1,186 @@ +from typing import Annotated +from datetime import datetime +from dateutil.relativedelta import relativedelta +import pandas as pd +import yfinance as yf +import os +from .config import DATA_DIR +from .stockstats_utils import StockstatsUtils + +def get_YFin_data_online( + symbol: Annotated[str, "ticker symbol of the company"], + start_date: Annotated[str, "Start date in yyyy-mm-dd format"], + end_date: Annotated[str, "End date in yyyy-mm-dd format"], +): + + datetime.strptime(start_date, "%Y-%m-%d") + datetime.strptime(end_date, "%Y-%m-%d") + + # Create ticker object + ticker = yf.Ticker(symbol.upper()) + + # Fetch historical data for the specified date range + data = ticker.history(start=start_date, end=end_date) + + # Check if data is empty + if data.empty: + return ( + f"No data found for symbol '{symbol}' between {start_date} and {end_date}" + ) + + # Remove timezone info from index for cleaner output + if data.index.tz is not None: + data.index = data.index.tz_localize(None) + + # Round numerical values to 2 decimal places for cleaner display + numeric_columns = ["Open", "High", "Low", "Close", "Adj Close"] + for col in numeric_columns: + if col in data.columns: + data[col] = data[col].round(2) + + # Convert DataFrame to CSV string + csv_string = data.to_csv() + + # Add header information + header = f"# Stock data for {symbol.upper()} from {start_date} to {end_date}\n" + header += f"# Total records: {len(data)}\n" + header += f"# Data retrieved on: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n" + + return header + csv_string + +def get_stock_stats_indicators_window( + symbol: Annotated[str, "ticker symbol of the company"], + indicator: Annotated[str, "technical indicator to get the analysis and report of"], + curr_date: Annotated[ + str, "The current trading date you are trading on, YYYY-mm-dd" + ], + look_back_days: Annotated[int, "how many days to look back"], +) -> str: + + best_ind_params = { + # Moving Averages + "close_50_sma": ( + "50 SMA: A medium-term trend indicator. " + "Usage: Identify trend direction and serve as dynamic support/resistance. " + "Tips: It lags price; combine with faster indicators for timely signals." + ), + "close_200_sma": ( + "200 SMA: A long-term trend benchmark. " + "Usage: Confirm overall market trend and identify golden/death cross setups. " + "Tips: It reacts slowly; best for strategic trend confirmation rather than frequent trading entries." + ), + "close_10_ema": ( + "10 EMA: A responsive short-term average. " + "Usage: Capture quick shifts in momentum and potential entry points. " + "Tips: Prone to noise in choppy markets; use alongside longer averages for filtering false signals." + ), + # MACD Related + "macd": ( + "MACD: Computes momentum via differences of EMAs. " + "Usage: Look for crossovers and divergence as signals of trend changes. " + "Tips: Confirm with other indicators in low-volatility or sideways markets." + ), + "macds": ( + "MACD Signal: An EMA smoothing of the MACD line. " + "Usage: Use crossovers with the MACD line to trigger trades. " + "Tips: Should be part of a broader strategy to avoid false positives." + ), + "macdh": ( + "MACD Histogram: Shows the gap between the MACD line and its signal. " + "Usage: Visualize momentum strength and spot divergence early. " + "Tips: Can be volatile; complement with additional filters in fast-moving markets." + ), + # Momentum Indicators + "rsi": ( + "RSI: Measures momentum to flag overbought/oversold conditions. " + "Usage: Apply 70/30 thresholds and watch for divergence to signal reversals. " + "Tips: In strong trends, RSI may remain extreme; always cross-check with trend analysis." + ), + # Volatility Indicators + "boll": ( + "Bollinger Middle: A 20 SMA serving as the basis for Bollinger Bands. " + "Usage: Acts as a dynamic benchmark for price movement. " + "Tips: Combine with the upper and lower bands to effectively spot breakouts or reversals." + ), + "boll_ub": ( + "Bollinger Upper Band: Typically 2 standard deviations above the middle line. " + "Usage: Signals potential overbought conditions and breakout zones. " + "Tips: Confirm signals with other tools; prices may ride the band in strong trends." + ), + "boll_lb": ( + "Bollinger Lower Band: Typically 2 standard deviations below the middle line. " + "Usage: Indicates potential oversold conditions. " + "Tips: Use additional analysis to avoid false reversal signals." + ), + "atr": ( + "ATR: Averages true range to measure volatility. " + "Usage: Set stop-loss levels and adjust position sizes based on current market volatility. " + "Tips: It's a reactive measure, so use it as part of a broader risk management strategy." + ), + # Volume-Based Indicators + "vwma": ( + "VWMA: A moving average weighted by volume. " + "Usage: Confirm trends by integrating price action with volume data. " + "Tips: Watch for skewed results from volume spikes; use in combination with other volume analyses." + ), + "mfi": ( + "MFI: The Money Flow Index is a momentum indicator that uses both price and volume to measure buying and selling pressure. " + "Usage: Identify overbought (>80) or oversold (<20) conditions and confirm the strength of trends or reversals. " + "Tips: Use alongside RSI or MACD to confirm signals; divergence between price and MFI can indicate potential reversals." + ), + } + + if indicator not in best_ind_params: + raise ValueError( + f"Indicator {indicator} is not supported. Please choose from: {list(best_ind_params.keys())}" + ) + + end_date = curr_date + curr_date_dt = datetime.strptime(curr_date, "%Y-%m-%d") + before = curr_date_dt - relativedelta(days=look_back_days) + + # online gathering only + ind_string = "" + while curr_date_dt >= before: + indicator_value = get_stockstats_indicator( + symbol, indicator, curr_date_dt.strftime("%Y-%m-%d") + ) + + ind_string += f"{curr_date_dt.strftime('%Y-%m-%d')}: {indicator_value}\n" + + curr_date_dt = curr_date_dt - relativedelta(days=1) + + result_str = ( + f"## {indicator} values from {before.strftime('%Y-%m-%d')} to {end_date}:\n\n" + + ind_string + + "\n\n" + + best_ind_params.get(indicator, "No description available.") + ) + + return result_str + + +def get_stockstats_indicator( + symbol: Annotated[str, "ticker symbol of the company"], + indicator: Annotated[str, "technical indicator to get the analysis and report of"], + curr_date: Annotated[ + str, "The current trading date you are trading on, YYYY-mm-dd" + ], +) -> str: + + curr_date_dt = datetime.strptime(curr_date, "%Y-%m-%d") + curr_date = curr_date_dt.strftime("%Y-%m-%d") + + try: + indicator_value = StockstatsUtils.get_stock_stats( + symbol, + indicator, + curr_date, + ) + except Exception as e: + print( + f"Error getting stockstats indicator data for indicator {indicator} on {curr_date}: {e}" + ) + return "" + + return str(indicator_value) \ No newline at end of file diff --git a/tradingagents/default_config.py b/tradingagents/default_config.py index 089e9c24..6134d2c1 100644 --- a/tradingagents/default_config.py +++ b/tradingagents/default_config.py @@ -17,6 +17,17 @@ DEFAULT_CONFIG = { "max_debate_rounds": 1, "max_risk_discuss_rounds": 1, "max_recur_limit": 100, - # Tool settings - "online_tools": True, + # Data vendor configuration + # Category-level configuration (default for all tools in category) + "data_vendors": { + "core_stock_apis": "yahoo_finance", # OHLCV data: yahoo_finance, local + "technical_indicators": "yahoo_finance", # Technical indicators: yahoo_finance, local + "fundamental_data": "openai", # Fundamentals: openai, local + "news_data": "openai,google", # News: openai, google, local + }, + # Tool-level configuration (takes precedence over category-level) + "tool_vendors": { + # Example: "get_stock_data": "alpha_vantage", # Override category default + # Example: "get_news": "openai", # Override category default + }, } diff --git a/tradingagents/graph/setup.py b/tradingagents/graph/setup.py index 847c429f..b270ffc0 100644 --- a/tradingagents/graph/setup.py +++ b/tradingagents/graph/setup.py @@ -7,7 +7,6 @@ from langgraph.prebuilt import ToolNode from tradingagents.agents import * from tradingagents.agents.utils.agent_states import AgentState -from tradingagents.agents.utils.agent_utils import Toolkit from .conditional_logic import ConditionalLogic @@ -19,7 +18,6 @@ class GraphSetup: self, quick_thinking_llm: ChatOpenAI, deep_thinking_llm: ChatOpenAI, - toolkit: Toolkit, tool_nodes: Dict[str, ToolNode], bull_memory, bear_memory, @@ -31,7 +29,6 @@ class GraphSetup: """Initialize with required components.""" self.quick_thinking_llm = quick_thinking_llm self.deep_thinking_llm = deep_thinking_llm - self.toolkit = toolkit self.tool_nodes = tool_nodes self.bull_memory = bull_memory self.bear_memory = bear_memory @@ -62,28 +59,28 @@ class GraphSetup: if "market" in selected_analysts: analyst_nodes["market"] = create_market_analyst( - self.quick_thinking_llm, self.toolkit + self.quick_thinking_llm ) delete_nodes["market"] = create_msg_delete() tool_nodes["market"] = self.tool_nodes["market"] if "social" in selected_analysts: analyst_nodes["social"] = create_social_media_analyst( - self.quick_thinking_llm, self.toolkit + self.quick_thinking_llm ) delete_nodes["social"] = create_msg_delete() tool_nodes["social"] = self.tool_nodes["social"] if "news" in selected_analysts: analyst_nodes["news"] = create_news_analyst( - self.quick_thinking_llm, self.toolkit + self.quick_thinking_llm ) delete_nodes["news"] = create_msg_delete() tool_nodes["news"] = self.tool_nodes["news"] if "fundamentals" in selected_analysts: analyst_nodes["fundamentals"] = create_fundamentals_analyst( - self.quick_thinking_llm, self.toolkit + self.quick_thinking_llm ) delete_nodes["fundamentals"] = create_msg_delete() tool_nodes["fundamentals"] = self.tool_nodes["fundamentals"] diff --git a/tradingagents/graph/trading_graph.py b/tradingagents/graph/trading_graph.py index 80a29e53..40cdff75 100644 --- a/tradingagents/graph/trading_graph.py +++ b/tradingagents/graph/trading_graph.py @@ -20,7 +20,21 @@ from tradingagents.agents.utils.agent_states import ( InvestDebateState, RiskDebateState, ) -from tradingagents.dataflows.interface import set_config +from tradingagents.dataflows.config import set_config + +# Import the new abstract tool methods from agent_utils +from tradingagents.agents.utils.agent_utils import ( + get_stock_data, + get_indicators, + get_fundamentals, + get_balance_sheet, + get_cashflow, + get_income_statement, + get_news, + get_insider_sentiment, + get_insider_transactions, + get_global_news +) from .conditional_logic import ConditionalLogic from .setup import GraphSetup @@ -70,8 +84,6 @@ class TradingAgentsGraph: else: raise ValueError(f"Unsupported LLM provider: {self.config['llm_provider']}") - self.toolkit = Toolkit(config=self.config) - # Initialize memories self.bull_memory = FinancialSituationMemory("bull_memory", self.config) self.bear_memory = FinancialSituationMemory("bear_memory", self.config) @@ -87,7 +99,6 @@ class TradingAgentsGraph: self.graph_setup = GraphSetup( self.quick_thinking_llm, self.deep_thinking_llm, - self.toolkit, self.tool_nodes, self.bull_memory, self.bear_memory, @@ -110,46 +121,38 @@ class TradingAgentsGraph: self.graph = self.graph_setup.setup_graph(selected_analysts) def _create_tool_nodes(self) -> Dict[str, ToolNode]: - """Create tool nodes for different data sources.""" + """Create tool nodes for different data sources using abstract methods.""" return { "market": ToolNode( [ - # online tools - self.toolkit.get_YFin_data_online, - self.toolkit.get_stockstats_indicators_report_online, - # offline tools - self.toolkit.get_YFin_data, - self.toolkit.get_stockstats_indicators_report, + # Core stock data tools + get_stock_data, + # Technical indicators + get_indicators, ] ), "social": ToolNode( [ - # online tools - self.toolkit.get_stock_news_openai, - # offline tools - self.toolkit.get_reddit_stock_info, + # News tools for social media analysis + get_news, ] ), "news": ToolNode( [ - # online tools - self.toolkit.get_global_news_openai, - self.toolkit.get_google_news, - # offline tools - self.toolkit.get_finnhub_news, - self.toolkit.get_reddit_news, + # News and insider information + get_news, + get_global_news, + get_insider_sentiment, + get_insider_transactions, ] ), "fundamentals": ToolNode( [ - # online tools - self.toolkit.get_fundamentals_openai, - # offline tools - self.toolkit.get_finnhub_company_insider_sentiment, - self.toolkit.get_finnhub_company_insider_transactions, - self.toolkit.get_simfin_balance_sheet, - self.toolkit.get_simfin_cashflow, - self.toolkit.get_simfin_income_stmt, + # Fundamental analysis tools + get_fundamentals, + get_balance_sheet, + get_cashflow, + get_income_statement, ] ), } From 0ab323c2c6575d474575a0a623654dec126c55ce Mon Sep 17 00:00:00 2001 From: luohy15 Date: Fri, 26 Sep 2025 22:57:50 +0800 Subject: [PATCH 14/26] Add Alpha Vantage API integration as primary data provider MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Replace FinnHub with Alpha Vantage API in README documentation - Implement comprehensive Alpha Vantage modules: - Stock data (daily OHLCV with date filtering) - Technical indicators (SMA, EMA, MACD, RSI, Bollinger Bands, ATR) - Fundamental data (overview, balance sheet, cashflow, income statement) - News and sentiment data with insider transactions - Update news analyst tools to use ticker-based news search - Integrate Alpha Vantage vendor methods into interface routing - Maintain backward compatibility with existing vendor system πŸ€– Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude --- README.md | 4 +- tradingagents/agents/analysts/news_analyst.py | 2 +- .../agents/analysts/social_media_analyst.py | 2 +- tradingagents/agents/utils/news_data_tools.py | 8 +- tradingagents/dataflows/alpha_vantage.py | 5 + .../dataflows/alpha_vantage_common.py | 103 +++++++++ .../dataflows/alpha_vantage_fundamentals.py | 77 +++++++ .../dataflows/alpha_vantage_indicator.py | 218 ++++++++++++++++++ tradingagents/dataflows/alpha_vantage_news.py | 43 ++++ .../dataflows/alpha_vantage_stock.py | 38 +++ tradingagents/dataflows/interface.py | 23 +- tradingagents/dataflows/stockstats_utils.py | 5 +- tradingagents/dataflows/yahoo_finance.py | 2 - tradingagents/default_config.py | 8 +- 14 files changed, 519 insertions(+), 19 deletions(-) create mode 100644 tradingagents/dataflows/alpha_vantage.py create mode 100644 tradingagents/dataflows/alpha_vantage_common.py create mode 100644 tradingagents/dataflows/alpha_vantage_fundamentals.py create mode 100644 tradingagents/dataflows/alpha_vantage_indicator.py create mode 100644 tradingagents/dataflows/alpha_vantage_news.py create mode 100644 tradingagents/dataflows/alpha_vantage_stock.py diff --git a/README.md b/README.md index 31286500..cef678f6 100644 --- a/README.md +++ b/README.md @@ -114,9 +114,9 @@ pip install -r requirements.txt ### Required APIs -You will also need the FinnHub API for financial data. All of our code is implemented with the free tier. +You will also need the Alpha Vantage API for financial data. The free tier supports 25 API calls per day. ```bash -export FINNHUB_API_KEY=$YOUR_FINNHUB_API_KEY +export ALPHA_VANTAGE_API_KEY=$YOUR_ALPHA_VANTAGE_API_KEY ``` You will need the OpenAI API for all the agents. diff --git a/tradingagents/agents/analysts/news_analyst.py b/tradingagents/agents/analysts/news_analyst.py index 2e227c93..03b4fae4 100644 --- a/tradingagents/agents/analysts/news_analyst.py +++ b/tradingagents/agents/analysts/news_analyst.py @@ -16,7 +16,7 @@ def create_news_analyst(llm): ] system_message = ( - "You are a news researcher tasked with analyzing recent news and trends over the past week. Please write a comprehensive report of the current state of the world that is relevant for trading and macroeconomics. Use the get_news_data tool with different news_type parameters: 'global_news' for macroeconomic news, 'company_news' for company-specific news, 'reddit_global' for social sentiment, 'google' for general news searches. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions." + "You are a news researcher tasked with analyzing recent news and trends over the past week. Please write a comprehensive report of the current state of the world that is relevant for trading and macroeconomics. Use the available tools: get_news(query, start_date, end_date) for company-specific or targeted news searches, and get_global_news(curr_date, look_back_days, limit) for broader macroeconomic news. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions." + """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read.""" ) diff --git a/tradingagents/agents/analysts/social_media_analyst.py b/tradingagents/agents/analysts/social_media_analyst.py index b1556fb6..b25712d7 100644 --- a/tradingagents/agents/analysts/social_media_analyst.py +++ b/tradingagents/agents/analysts/social_media_analyst.py @@ -16,7 +16,7 @@ def create_social_media_analyst(llm): ] system_message = ( - "You are a social media and company specific news researcher/analyst tasked with analyzing social media posts, recent company news, and public sentiment for a specific company over the past week. You will be given a company's name your objective is to write a comprehensive long report detailing your analysis, insights, and implications for traders and investors on this company's current state after looking at social media and what people are saying about that company, analyzing sentiment data of what people feel each day about the company, and looking at recent company news. Use get_news_data with news_type parameters: 'company_news' for company-specific news, 'reddit_stock' for Reddit discussions about the stock. Try to look at all sources possible from social media to sentiment to news. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions." + "You are a social media and company specific news researcher/analyst tasked with analyzing social media posts, recent company news, and public sentiment for a specific company over the past week. You will be given a company's name your objective is to write a comprehensive long report detailing your analysis, insights, and implications for traders and investors on this company's current state after looking at social media and what people are saying about that company, analyzing sentiment data of what people feel each day about the company, and looking at recent company news. Use the get_news(query, start_date, end_date) tool to search for company-specific news and social media discussions. Try to look at all sources possible from social media to sentiment to news. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions." + """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read.""", ) diff --git a/tradingagents/agents/utils/news_data_tools.py b/tradingagents/agents/utils/news_data_tools.py index 2949e904..e42b4dfe 100644 --- a/tradingagents/agents/utils/news_data_tools.py +++ b/tradingagents/agents/utils/news_data_tools.py @@ -4,21 +4,21 @@ from tradingagents.dataflows.interface import route_to_vender @tool def get_news( - query: Annotated[str, "Search query or ticker symbol"], + ticker: Annotated[str, "Ticker symbol"], start_date: Annotated[str, "Start date in yyyy-mm-dd format"], end_date: Annotated[str, "End date in yyyy-mm-dd format"], ) -> str: """ - Retrieve news data for a given query or ticker symbol. + Retrieve news data for a given ticker symbol. Uses the configured news_data vendor. Args: - query (str): Search query or ticker symbol + ticker (str): Ticker symbol start_date (str): Start date in yyyy-mm-dd format end_date (str): End date in yyyy-mm-dd format Returns: str: A formatted string containing news data """ - return route_to_vender("get_news", query, start_date, end_date) + return route_to_vender("get_news", ticker, start_date, end_date) @tool def get_global_news( diff --git a/tradingagents/dataflows/alpha_vantage.py b/tradingagents/dataflows/alpha_vantage.py new file mode 100644 index 00000000..c5177c29 --- /dev/null +++ b/tradingagents/dataflows/alpha_vantage.py @@ -0,0 +1,5 @@ +# Import functions from specialized modules +from .alpha_vantage_stock import get_stock +from .alpha_vantage_indicator import get_indicator +from .alpha_vantage_fundamentals import get_fundamentals, get_balance_sheet, get_cashflow, get_income_statement +from .alpha_vantage_news import get_news, get_insider_transactions \ No newline at end of file diff --git a/tradingagents/dataflows/alpha_vantage_common.py b/tradingagents/dataflows/alpha_vantage_common.py new file mode 100644 index 00000000..aeeb30ef --- /dev/null +++ b/tradingagents/dataflows/alpha_vantage_common.py @@ -0,0 +1,103 @@ +import os +import requests +import pandas as pd +from datetime import datetime +from io import StringIO + +API_BASE_URL = "https://www.alphavantage.co/query" + +def get_api_key() -> str: + """Retrieve the API key for Alpha Vantage from environment variables.""" + api_key = os.getenv("ALPHA_VANTAGE_API_KEY") + if not api_key: + raise ValueError("ALPHA_VANTAGE_API_KEY environment variable is not set.") + return api_key + +def format_datetime_for_api(date_input) -> str: + """Convert various date formats to YYYYMMDDTHHMM format required by Alpha Vantage API.""" + if isinstance(date_input, str): + # If already in correct format, return as-is + if len(date_input) == 13 and 'T' in date_input: + return date_input + # Try to parse common date formats + try: + dt = datetime.strptime(date_input, "%Y-%m-%d") + return dt.strftime("%Y%m%dT0000") + except ValueError: + try: + dt = datetime.strptime(date_input, "%Y-%m-%d %H:%M") + return dt.strftime("%Y%m%dT%H%M") + except ValueError: + raise ValueError(f"Unsupported date format: {date_input}") + elif isinstance(date_input, datetime): + return date_input.strftime("%Y%m%dT%H%M") + else: + raise ValueError(f"Date must be string or datetime object, got {type(date_input)}") + +def _make_api_request(function_name: str, params: dict) -> dict | str: + """Helper function to make API requests and handle responses. + + """ + # Create a copy of params to avoid modifying the original + api_params = params.copy() + api_params.update({ + "function": function_name, + "apikey": get_api_key(), + "source": "alphavangtagemcp" + }) + + # Handle entitlement parameter if present in params or global variable + current_entitlement = globals().get('_current_entitlement') + entitlement = api_params.get("entitlement") or current_entitlement + + if entitlement: + api_params["entitlement"] = entitlement + elif "entitlement" in api_params: + # Remove entitlement if it's None or empty + api_params.pop("entitlement", None) + + response = requests.get(API_BASE_URL, params=api_params) + response.raise_for_status() + + response_text = response.text + + return response_text + + + +def _filter_csv_by_date_range(csv_data: str, start_date: str, end_date: str) -> str: + """ + Filter CSV data to include only rows within the specified date range. + + Args: + csv_data: CSV string from Alpha Vantage API + start_date: Start date in yyyy-mm-dd format + end_date: End date in yyyy-mm-dd format + + Returns: + Filtered CSV string + """ + if not csv_data or csv_data.strip() == "": + return csv_data + + try: + # Parse CSV data + df = pd.read_csv(StringIO(csv_data)) + + # Assume the first column is the date column (timestamp) + date_col = df.columns[0] + df[date_col] = pd.to_datetime(df[date_col]) + + # Filter by date range + start_dt = pd.to_datetime(start_date) + end_dt = pd.to_datetime(end_date) + + filtered_df = df[(df[date_col] >= start_dt) & (df[date_col] <= end_dt)] + + # Convert back to CSV string + return filtered_df.to_csv(index=False) + + except Exception as e: + # If filtering fails, return original data with a warning + print(f"Warning: Failed to filter CSV data by date range: {e}") + return csv_data diff --git a/tradingagents/dataflows/alpha_vantage_fundamentals.py b/tradingagents/dataflows/alpha_vantage_fundamentals.py new file mode 100644 index 00000000..8b92faa6 --- /dev/null +++ b/tradingagents/dataflows/alpha_vantage_fundamentals.py @@ -0,0 +1,77 @@ +from .alpha_vantage_common import _make_api_request + + +def get_fundamentals(ticker: str, curr_date: str = None) -> str: + """ + Retrieve comprehensive fundamental data for a given ticker symbol using Alpha Vantage. + + Args: + ticker (str): Ticker symbol of the company + curr_date (str): Current date you are trading at, yyyy-mm-dd (not used for Alpha Vantage) + + Returns: + str: Company overview data including financial ratios and key metrics + """ + params = { + "symbol": ticker, + } + + return _make_api_request("OVERVIEW", params) + + +def get_balance_sheet(ticker: str, freq: str = "quarterly", curr_date: str = None) -> str: + """ + Retrieve balance sheet data for a given ticker symbol using Alpha Vantage. + + Args: + ticker (str): Ticker symbol of the company + freq (str): Reporting frequency: annual/quarterly (default quarterly) - not used for Alpha Vantage + curr_date (str): Current date you are trading at, yyyy-mm-dd (not used for Alpha Vantage) + + Returns: + str: Balance sheet data with normalized fields + """ + params = { + "symbol": ticker, + } + + return _make_api_request("BALANCE_SHEET", params) + + +def get_cashflow(ticker: str, freq: str = "quarterly", curr_date: str = None) -> str: + """ + Retrieve cash flow statement data for a given ticker symbol using Alpha Vantage. + + Args: + ticker (str): Ticker symbol of the company + freq (str): Reporting frequency: annual/quarterly (default quarterly) - not used for Alpha Vantage + curr_date (str): Current date you are trading at, yyyy-mm-dd (not used for Alpha Vantage) + + Returns: + str: Cash flow statement data with normalized fields + """ + params = { + "symbol": ticker, + } + + return _make_api_request("CASH_FLOW", params) + + +def get_income_statement(ticker: str, freq: str = "quarterly", curr_date: str = None) -> str: + """ + Retrieve income statement data for a given ticker symbol using Alpha Vantage. + + Args: + ticker (str): Ticker symbol of the company + freq (str): Reporting frequency: annual/quarterly (default quarterly) - not used for Alpha Vantage + curr_date (str): Current date you are trading at, yyyy-mm-dd (not used for Alpha Vantage) + + Returns: + str: Income statement data with normalized fields + """ + params = { + "symbol": ticker, + } + + return _make_api_request("INCOME_STATEMENT", params) + diff --git a/tradingagents/dataflows/alpha_vantage_indicator.py b/tradingagents/dataflows/alpha_vantage_indicator.py new file mode 100644 index 00000000..c696091c --- /dev/null +++ b/tradingagents/dataflows/alpha_vantage_indicator.py @@ -0,0 +1,218 @@ +from .alpha_vantage_common import _make_api_request + +def get_indicator( + symbol: str, + indicator: str, + curr_date: str, + look_back_days: int, + interval: str = "daily", + time_period: int = 14, + series_type: str = "close" +) -> str: + """ + Returns Alpha Vantage technical indicator values over a time window. + + Args: + symbol: ticker symbol of the company + indicator: technical indicator to get the analysis and report of + curr_date: The current trading date you are trading on, YYYY-mm-dd + look_back_days: how many days to look back + interval: Time interval (daily, weekly, monthly) + time_period: Number of data points for calculation + series_type: The desired price type (close, open, high, low) + + Returns: + String containing indicator values and description + """ + from datetime import datetime + from dateutil.relativedelta import relativedelta + + supported_indicators = { + "close_50_sma": ("50 SMA", "close"), + "close_200_sma": ("200 SMA", "close"), + "close_10_ema": ("10 EMA", "close"), + "macd": ("MACD", "close"), + "macds": ("MACD Signal", "close"), + "macdh": ("MACD Histogram", "close"), + "rsi": ("RSI", "close"), + "boll": ("Bollinger Middle", "close"), + "boll_ub": ("Bollinger Upper Band", "close"), + "boll_lb": ("Bollinger Lower Band", "close"), + "atr": ("ATR", None), + "vwma": ("VWMA", "close") + } + + indicator_descriptions = { + "close_50_sma": "50 SMA: A medium-term trend indicator. Usage: Identify trend direction and serve as dynamic support/resistance. Tips: It lags price; combine with faster indicators for timely signals.", + "close_200_sma": "200 SMA: A long-term trend benchmark. Usage: Confirm overall market trend and identify golden/death cross setups. Tips: It reacts slowly; best for strategic trend confirmation rather than frequent trading entries.", + "close_10_ema": "10 EMA: A responsive short-term average. Usage: Capture quick shifts in momentum and potential entry points. Tips: Prone to noise in choppy markets; use alongside longer averages for filtering false signals.", + "macd": "MACD: Computes momentum via differences of EMAs. Usage: Look for crossovers and divergence as signals of trend changes. Tips: Confirm with other indicators in low-volatility or sideways markets.", + "macds": "MACD Signal: An EMA smoothing of the MACD line. Usage: Use crossovers with the MACD line to trigger trades. Tips: Should be part of a broader strategy to avoid false positives.", + "macdh": "MACD Histogram: Shows the gap between the MACD line and its signal. Usage: Visualize momentum strength and spot divergence early. Tips: Can be volatile; complement with additional filters in fast-moving markets.", + "rsi": "RSI: Measures momentum to flag overbought/oversold conditions. Usage: Apply 70/30 thresholds and watch for divergence to signal reversals. Tips: In strong trends, RSI may remain extreme; always cross-check with trend analysis.", + "boll": "Bollinger Middle: A 20 SMA serving as the basis for Bollinger Bands. Usage: Acts as a dynamic benchmark for price movement. Tips: Combine with the upper and lower bands to effectively spot breakouts or reversals.", + "boll_ub": "Bollinger Upper Band: Typically 2 standard deviations above the middle line. Usage: Signals potential overbought conditions and breakout zones. Tips: Confirm signals with other tools; prices may ride the band in strong trends.", + "boll_lb": "Bollinger Lower Band: Typically 2 standard deviations below the middle line. Usage: Indicates potential oversold conditions. Tips: Use additional analysis to avoid false reversal signals.", + "atr": "ATR: Averages true range to measure volatility. Usage: Set stop-loss levels and adjust position sizes based on current market volatility. Tips: It's a reactive measure, so use it as part of a broader risk management strategy.", + "vwma": "VWMA: A moving average weighted by volume. Usage: Confirm trends by integrating price action with volume data. Tips: Watch for skewed results from volume spikes; use in combination with other volume analyses." + } + + if indicator not in supported_indicators: + raise ValueError( + f"Indicator {indicator} is not supported. Please choose from: {list(supported_indicators.keys())}" + ) + + curr_date_dt = datetime.strptime(curr_date, "%Y-%m-%d") + before = curr_date_dt - relativedelta(days=look_back_days) + + # Get the full data for the period instead of making individual calls + _, required_series_type = supported_indicators[indicator] + + # Use the provided series_type or fall back to the required one + if required_series_type: + series_type = required_series_type + + try: + # Get indicator data for the period + if indicator == "close_50_sma": + data = _make_api_request("SMA", { + "symbol": symbol, + "interval": interval, + "time_period": "50", + "series_type": series_type, + "datatype": "csv" + }) + elif indicator == "close_200_sma": + data = _make_api_request("SMA", { + "symbol": symbol, + "interval": interval, + "time_period": "200", + "series_type": series_type, + "datatype": "csv" + }) + elif indicator == "close_10_ema": + data = _make_api_request("EMA", { + "symbol": symbol, + "interval": interval, + "time_period": "10", + "series_type": series_type, + "datatype": "csv" + }) + elif indicator == "macd": + data = _make_api_request("MACD", { + "symbol": symbol, + "interval": interval, + "series_type": series_type, + "datatype": "csv" + }) + elif indicator == "macds": + data = _make_api_request("MACD", { + "symbol": symbol, + "interval": interval, + "series_type": series_type, + "datatype": "csv" + }) + elif indicator == "macdh": + data = _make_api_request("MACD", { + "symbol": symbol, + "interval": interval, + "series_type": series_type, + "datatype": "csv" + }) + elif indicator == "rsi": + data = _make_api_request("RSI", { + "symbol": symbol, + "interval": interval, + "time_period": str(time_period), + "series_type": series_type, + "datatype": "csv" + }) + elif indicator in ["boll", "boll_ub", "boll_lb"]: + data = _make_api_request("BBANDS", { + "symbol": symbol, + "interval": interval, + "time_period": "20", + "series_type": series_type, + "datatype": "csv" + }) + elif indicator == "atr": + data = _make_api_request("ATR", { + "symbol": symbol, + "interval": interval, + "time_period": str(time_period), + "datatype": "csv" + }) + elif indicator == "vwma": + # Alpha Vantage doesn't have direct VWMA, so we'll return an informative message + # In a real implementation, this would need to be calculated from OHLCV data + return f"## VWMA (Volume Weighted Moving Average) for {symbol}:\n\nVWMA calculation requires OHLCV data and is not directly available from Alpha Vantage API.\nThis indicator would need to be calculated from the raw stock data using volume-weighted price averaging.\n\n{indicator_descriptions.get('vwma', 'No description available.')}" + else: + return f"Error: Indicator {indicator} not implemented yet." + + # Parse CSV data and extract values for the date range + lines = data.strip().split('\n') + if len(lines) < 2: + return f"Error: No data returned for {indicator}" + + # Parse header and data + header = lines[0].split(',') + date_col_idx = 0 # Assuming first column is date + value_col_idx = 1 # Default to second column + + # Handle specific indicator column mappings + if indicator == "macds": + # MACD Signal is typically in the third column + value_col_idx = 2 if len(header) > 2 else 1 + elif indicator == "macdh": + # MACD Histogram is typically in the fourth column + value_col_idx = 3 if len(header) > 3 else 1 + elif indicator == "boll_ub": + # Bollinger Upper Band is typically in the second column + value_col_idx = 1 + elif indicator == "boll": + # Bollinger Middle is typically in the third column + value_col_idx = 2 if len(header) > 2 else 1 + elif indicator == "boll_lb": + # Bollinger Lower Band is typically in the fourth column + value_col_idx = 3 if len(header) > 3 else 1 + + result_data = [] + for line in lines[1:]: + if not line.strip(): + continue + values = line.split(',') + if len(values) > value_col_idx: + try: + date_str = values[date_col_idx].strip() + # Parse the date + date_dt = datetime.strptime(date_str, "%Y-%m-%d") + + # Check if date is in our range + if before <= date_dt <= curr_date_dt: + value = values[value_col_idx].strip() + result_data.append((date_dt, value)) + except (ValueError, IndexError): + continue + + # Sort by date and format output + result_data.sort(key=lambda x: x[0]) + + ind_string = "" + for date_dt, value in result_data: + ind_string += f"{date_dt.strftime('%Y-%m-%d')}: {value}\n" + + if not ind_string: + ind_string = "No data available for the specified date range.\n" + + result_str = ( + f"## {indicator.upper()} values from {before.strftime('%Y-%m-%d')} to {curr_date}:\n\n" + + ind_string + + "\n\n" + + indicator_descriptions.get(indicator, "No description available.") + ) + + return result_str + + except Exception as e: + print(f"Error getting Alpha Vantage indicator data for {indicator}: {e}") + return f"Error retrieving {indicator} data: {str(e)}" diff --git a/tradingagents/dataflows/alpha_vantage_news.py b/tradingagents/dataflows/alpha_vantage_news.py new file mode 100644 index 00000000..8124fb45 --- /dev/null +++ b/tradingagents/dataflows/alpha_vantage_news.py @@ -0,0 +1,43 @@ +from .alpha_vantage_common import _make_api_request, format_datetime_for_api + +def get_news(ticker, start_date, end_date) -> dict[str, str] | str: + """Returns live and historical market news & sentiment data from premier news outlets worldwide. + + Covers stocks, cryptocurrencies, forex, and topics like fiscal policy, mergers & acquisitions, IPOs. + + Args: + ticker: Stock symbol for news articles. + start_date: Start date for news search. + end_date: End date for news search. + + Returns: + Dictionary containing news sentiment data or JSON string. + """ + + params = { + "tickers": ticker, + "time_from": format_datetime_for_api(start_date), + "time_to": format_datetime_for_api(end_date), + "sort": "LATEST", + "limit": "50", + } + + return _make_api_request("NEWS_SENTIMENT", params) + +def get_insider_transactions(symbol: str) -> dict[str, str] | str: + """Returns latest and historical insider transactions by key stakeholders. + + Covers transactions by founders, executives, board members, etc. + + Args: + symbol: Ticker symbol. Example: "IBM". + + Returns: + Dictionary containing insider transaction data or JSON string. + """ + + params = { + "symbol": symbol, + } + + return _make_api_request("INSIDER_TRANSACTIONS", params) \ No newline at end of file diff --git a/tradingagents/dataflows/alpha_vantage_stock.py b/tradingagents/dataflows/alpha_vantage_stock.py new file mode 100644 index 00000000..ffd3570b --- /dev/null +++ b/tradingagents/dataflows/alpha_vantage_stock.py @@ -0,0 +1,38 @@ +from datetime import datetime +from .alpha_vantage_common import _make_api_request, _filter_csv_by_date_range + +def get_stock( + symbol: str, + start_date: str, + end_date: str +) -> str: + """ + Returns raw daily OHLCV values, adjusted close values, and historical split/dividend events + filtered to the specified date range. + + Args: + symbol: The name of the equity. For example: symbol=IBM + start_date: Start date in yyyy-mm-dd format + end_date: End date in yyyy-mm-dd format + + Returns: + CSV string containing the daily adjusted time series data filtered to the date range. + """ + # Parse dates to determine the range + start_dt = datetime.strptime(start_date, "%Y-%m-%d") + today = datetime.now() + + # Choose outputsize based on whether the requested range is within the latest 100 days + # Compact returns latest 100 data points, so check if start_date is recent enough + days_from_today_to_start = (today - start_dt).days + outputsize = "compact" if days_from_today_to_start < 100 else "full" + + params = { + "symbol": symbol, + "outputsize": outputsize, + "datatype": "csv", + } + + response = _make_api_request("TIME_SERIES_DAILY_ADJUSTED", params) + + return _filter_csv_by_date_range(response, start_date, end_date) \ No newline at end of file diff --git a/tradingagents/dataflows/interface.py b/tradingagents/dataflows/interface.py index 2b6b92de..3a23f4ce 100644 --- a/tradingagents/dataflows/interface.py +++ b/tradingagents/dataflows/interface.py @@ -5,6 +5,16 @@ from .local import get_YFin_data, get_finnhub_news, get_finnhub_company_insider_ from .yahoo_finance import get_YFin_data_online, get_stock_stats_indicators_window from .google import get_google_news from .openai import get_stock_news_openai, get_global_news_openai, get_fundamentals_openai +from .alpha_vantage import ( + get_stock as get_alpha_vantage_stock, + get_indicator as get_alpha_vantage_indicator, + get_fundamentals as get_alpha_vantage_fundamentals, + get_balance_sheet as get_alpha_vantage_balance_sheet, + get_cashflow as get_alpha_vantage_cashflow, + get_income_statement as get_alpha_vantage_income_statement, + get_insider_transactions as get_alpha_vantage_insider_transactions, + get_news as get_alpha_vantage_news +) # Configuration and routing logic from .config import get_config @@ -54,29 +64,36 @@ VENDOR_LIST = [ VENDOR_METHODS = { # core_stock_apis "get_stock_data": { + "alpha_vantage": get_alpha_vantage_stock, "yahoo_finance": get_YFin_data_online, "local": get_YFin_data, }, # technical_indicators "get_indicators": { + "alpha_vantage": get_alpha_vantage_indicator, "yahoo_finance": get_stock_stats_indicators_window, "local": get_stock_stats_indicators_window }, # fundamental_data "get_fundamentals": { - "openai": get_fundamentals_openai + "alpha_vantage": get_alpha_vantage_fundamentals, + "openai": get_fundamentals_openai, }, "get_balance_sheet": { + "alpha_vantage": get_alpha_vantage_balance_sheet, "local": get_simfin_balance_sheet, }, "get_cashflow": { + "alpha_vantage": get_alpha_vantage_cashflow, "local": get_simfin_cashflow, }, "get_income_statement": { + "alpha_vantage": get_alpha_vantage_income_statement, "local": get_simfin_income_statements, }, # news_data "get_news": { + "alpha_vantage": get_alpha_vantage_news, "openai": get_stock_news_openai, "google": get_google_news, "local": [get_finnhub_news, get_reddit_company_news, get_google_news], @@ -89,6 +106,7 @@ VENDOR_METHODS = { "local": get_finnhub_company_insider_sentiment }, "get_insider_transactions": { + "alpha_vantage": get_alpha_vantage_insider_transactions, "local": get_finnhub_company_insider_transactions, }, } @@ -131,7 +149,8 @@ def route_to_vender(method: str, *args, **kwargs): for vendor in vendors: if vendor not in VENDOR_METHODS[method]: - raise ValueError(f"Vendor '{vendor}' not supported for method '{method}'") + print(f"Info: Vendor '{vendor}' not supported for method '{method}', ignoring") + continue vendor_impl = VENDOR_METHODS[method][vendor] diff --git a/tradingagents/dataflows/stockstats_utils.py b/tradingagents/dataflows/stockstats_utils.py index ba547dc5..e81684e0 100644 --- a/tradingagents/dataflows/stockstats_utils.py +++ b/tradingagents/dataflows/stockstats_utils.py @@ -3,7 +3,7 @@ import yfinance as yf from stockstats import wrap from typing import Annotated import os -from .config import get_config +from .config import get_config, DATA_DIR class StockstatsUtils: @@ -19,7 +19,6 @@ class StockstatsUtils: ): # Get config and set up data directory path config = get_config() - data_dir = os.path.join(config["DATA_DIR"], "market_data", "price_data") online = config["data_vendors"]["technical_indicators"] != "local" df = None @@ -29,7 +28,7 @@ class StockstatsUtils: try: data = pd.read_csv( os.path.join( - data_dir, + DATA_DIR, f"{symbol}-YFin-data-2015-01-01-2025-03-25.csv", ) ) diff --git a/tradingagents/dataflows/yahoo_finance.py b/tradingagents/dataflows/yahoo_finance.py index 8189cc4d..049403cb 100644 --- a/tradingagents/dataflows/yahoo_finance.py +++ b/tradingagents/dataflows/yahoo_finance.py @@ -1,10 +1,8 @@ from typing import Annotated from datetime import datetime from dateutil.relativedelta import relativedelta -import pandas as pd import yfinance as yf import os -from .config import DATA_DIR from .stockstats_utils import StockstatsUtils def get_YFin_data_online( diff --git a/tradingagents/default_config.py b/tradingagents/default_config.py index 6134d2c1..d02411e0 100644 --- a/tradingagents/default_config.py +++ b/tradingagents/default_config.py @@ -20,10 +20,10 @@ DEFAULT_CONFIG = { # Data vendor configuration # Category-level configuration (default for all tools in category) "data_vendors": { - "core_stock_apis": "yahoo_finance", # OHLCV data: yahoo_finance, local - "technical_indicators": "yahoo_finance", # Technical indicators: yahoo_finance, local - "fundamental_data": "openai", # Fundamentals: openai, local - "news_data": "openai,google", # News: openai, google, local + "core_stock_apis": "alpha_vantage", # OHLCV data: alpha_vantage, yahoo_finance, local + "technical_indicators": "alpha_vantage", # Technical indicators: alpha_vantage, yahoo_finance, local + "fundamental_data": "alpha_vantage", # Fundamentals: alpha_vantage, openai, local + "news_data": "alpha_vantage", # News: alpha_vantage, openai, google, local }, # Tool-level configuration (takes precedence over category-level) "tool_vendors": { From 8b04ec307fa5cd8cb39a0cedce6260f3b1b3ee52 Mon Sep 17 00:00:00 2001 From: luohy15 Date: Fri, 26 Sep 2025 23:25:33 +0800 Subject: [PATCH 15/26] minor fix --- tradingagents/agents/analysts/market_analyst.py | 2 +- tradingagents/agents/utils/core_stock_tools.py | 4 ++-- tradingagents/agents/utils/fundamental_data_tools.py | 10 +++++----- tradingagents/agents/utils/news_data_tools.py | 10 +++++----- .../agents/utils/technical_indicators_tools.py | 4 ++-- tradingagents/dataflows/alpha_vantage_common.py | 2 +- tradingagents/dataflows/interface.py | 2 +- 7 files changed, 17 insertions(+), 17 deletions(-) diff --git a/tradingagents/agents/analysts/market_analyst.py b/tradingagents/agents/analysts/market_analyst.py index 8695acc7..c955dd76 100644 --- a/tradingagents/agents/analysts/market_analyst.py +++ b/tradingagents/agents/analysts/market_analyst.py @@ -42,7 +42,7 @@ Volatility Indicators: Volume-Based Indicators: - vwma: VWMA: A moving average weighted by volume. Usage: Confirm trends by integrating price action with volume data. Tips: Watch for skewed results from volume spikes; use in combination with other volume analyses. -- Select indicators that provide diverse and complementary information. Avoid redundancy (e.g., do not select both rsi and stochrsi). Also briefly explain why they are suitable for the given market context. When you tool call, please use the exact name of the indicators provided above as they are defined parameters, otherwise your call will fail. Please make sure to call get_stock_data first to retrieve the CSV that is needed to generate indicators. Then use get_technical_indicators with the specific indicator names. Write a very detailed and nuanced report of the trends you observe. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions.""" +- Select indicators that provide diverse and complementary information. Avoid redundancy (e.g., do not select both rsi and stochrsi). Also briefly explain why they are suitable for the given market context. When you tool call, please use the exact name of the indicators provided above as they are defined parameters, otherwise your call will fail. Please make sure to call get_stock_data first to retrieve the CSV that is needed to generate indicators. Then use get_indicators with the specific indicator names. Write a very detailed and nuanced report of the trends you observe. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions.""" + """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read.""" ) diff --git a/tradingagents/agents/utils/core_stock_tools.py b/tradingagents/agents/utils/core_stock_tools.py index 02733c6c..3a416622 100644 --- a/tradingagents/agents/utils/core_stock_tools.py +++ b/tradingagents/agents/utils/core_stock_tools.py @@ -1,6 +1,6 @@ from langchain_core.tools import tool from typing import Annotated -from tradingagents.dataflows.interface import route_to_vender +from tradingagents.dataflows.interface import route_to_vendor @tool @@ -19,4 +19,4 @@ def get_stock_data( Returns: str: A formatted dataframe containing the stock price data for the specified ticker symbol in the specified date range. """ - return route_to_vender("get_stock_data", symbol, start_date, end_date) + return route_to_vendor("get_stock_data", symbol, start_date, end_date) diff --git a/tradingagents/agents/utils/fundamental_data_tools.py b/tradingagents/agents/utils/fundamental_data_tools.py index bbfd0153..47f6f2eb 100644 --- a/tradingagents/agents/utils/fundamental_data_tools.py +++ b/tradingagents/agents/utils/fundamental_data_tools.py @@ -1,6 +1,6 @@ from langchain_core.tools import tool from typing import Annotated -from tradingagents.dataflows.interface import route_to_vender +from tradingagents.dataflows.interface import route_to_vendor @tool @@ -17,7 +17,7 @@ def get_fundamentals( Returns: str: A formatted report containing comprehensive fundamental data """ - return route_to_vender("get_fundamentals", ticker, curr_date) + return route_to_vendor("get_fundamentals", ticker, curr_date) @tool @@ -36,7 +36,7 @@ def get_balance_sheet( Returns: str: A formatted report containing balance sheet data """ - return route_to_vender("get_balance_sheet", ticker, freq, curr_date) + return route_to_vendor("get_balance_sheet", ticker, freq, curr_date) @tool @@ -55,7 +55,7 @@ def get_cashflow( Returns: str: A formatted report containing cash flow statement data """ - return route_to_vender("get_cashflow", ticker, freq, curr_date) + return route_to_vendor("get_cashflow", ticker, freq, curr_date) @tool @@ -74,4 +74,4 @@ def get_income_statement( Returns: str: A formatted report containing income statement data """ - return route_to_vender("get_income_statement", ticker, freq, curr_date) \ No newline at end of file + return route_to_vendor("get_income_statement", ticker, freq, curr_date) \ No newline at end of file diff --git a/tradingagents/agents/utils/news_data_tools.py b/tradingagents/agents/utils/news_data_tools.py index e42b4dfe..0df9d047 100644 --- a/tradingagents/agents/utils/news_data_tools.py +++ b/tradingagents/agents/utils/news_data_tools.py @@ -1,6 +1,6 @@ from langchain_core.tools import tool from typing import Annotated -from tradingagents.dataflows.interface import route_to_vender +from tradingagents.dataflows.interface import route_to_vendor @tool def get_news( @@ -18,7 +18,7 @@ def get_news( Returns: str: A formatted string containing news data """ - return route_to_vender("get_news", ticker, start_date, end_date) + return route_to_vendor("get_news", ticker, start_date, end_date) @tool def get_global_news( @@ -36,7 +36,7 @@ def get_global_news( Returns: str: A formatted string containing global news data """ - return route_to_vender("get_global_news", curr_date, look_back_days, limit) + return route_to_vendor("get_global_news", curr_date, look_back_days, limit) @tool def get_insider_sentiment( @@ -52,7 +52,7 @@ def get_insider_sentiment( Returns: str: A report of insider sentiment data """ - return route_to_vender("get_insider_sentiment", ticker, curr_date) + return route_to_vendor("get_insider_sentiment", ticker, curr_date) @tool def get_insider_transactions( @@ -68,4 +68,4 @@ def get_insider_transactions( Returns: str: A report of insider transaction data """ - return route_to_vender("get_insider_transactions", ticker, curr_date) + return route_to_vendor("get_insider_transactions", ticker, curr_date) diff --git a/tradingagents/agents/utils/technical_indicators_tools.py b/tradingagents/agents/utils/technical_indicators_tools.py index 86aba653..c6c08bca 100644 --- a/tradingagents/agents/utils/technical_indicators_tools.py +++ b/tradingagents/agents/utils/technical_indicators_tools.py @@ -1,6 +1,6 @@ from langchain_core.tools import tool from typing import Annotated -from tradingagents.dataflows.interface import route_to_vender +from tradingagents.dataflows.interface import route_to_vendor @tool def get_indicators( @@ -20,4 +20,4 @@ def get_indicators( Returns: str: A formatted dataframe containing the technical indicators for the specified ticker symbol and indicator. """ - return route_to_vender("get_indicators", symbol, indicator, curr_date, look_back_days) \ No newline at end of file + return route_to_vendor("get_indicators", symbol, indicator, curr_date, look_back_days) \ No newline at end of file diff --git a/tradingagents/dataflows/alpha_vantage_common.py b/tradingagents/dataflows/alpha_vantage_common.py index aeeb30ef..8fa17a7e 100644 --- a/tradingagents/dataflows/alpha_vantage_common.py +++ b/tradingagents/dataflows/alpha_vantage_common.py @@ -43,7 +43,7 @@ def _make_api_request(function_name: str, params: dict) -> dict | str: api_params.update({ "function": function_name, "apikey": get_api_key(), - "source": "alphavangtagemcp" + "source": "tradingagents", }) # Handle entitlement parameter if present in params or global variable diff --git a/tradingagents/dataflows/interface.py b/tradingagents/dataflows/interface.py index 3a23f4ce..43044149 100644 --- a/tradingagents/dataflows/interface.py +++ b/tradingagents/dataflows/interface.py @@ -133,7 +133,7 @@ def get_vendor(category: str, method: str = None) -> str: # Fall back to category-level configuration return config.get("data_vendors", {}).get(category, "default") -def route_to_vender(method: str, *args, **kwargs): +def route_to_vendor(method: str, *args, **kwargs): """Route method calls to appropriate vendor implementation.""" category = get_category_for_method(method) vendor_config = get_vendor(category, method) From 6211b1132ae8b0806977e59a6bba246796ec6af4 Mon Sep 17 00:00:00 2001 From: luohy15 Date: Fri, 26 Sep 2025 23:36:36 +0800 Subject: [PATCH 16/26] Improve Alpha Vantage indicator column parsing with robust mapping MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Replace hardcoded column indices with column name lookup - Add mapping for all supported indicators to their expected CSV column names - Handle missing columns gracefully with descriptive error messages - Strip whitespace from header parsing for reliability πŸ€– Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude --- .../dataflows/alpha_vantage_indicator.py | 40 ++++++++++--------- 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/tradingagents/dataflows/alpha_vantage_indicator.py b/tradingagents/dataflows/alpha_vantage_indicator.py index c696091c..6225b9bb 100644 --- a/tradingagents/dataflows/alpha_vantage_indicator.py +++ b/tradingagents/dataflows/alpha_vantage_indicator.py @@ -155,26 +155,30 @@ def get_indicator( return f"Error: No data returned for {indicator}" # Parse header and data - header = lines[0].split(',') - date_col_idx = 0 # Assuming first column is date - value_col_idx = 1 # Default to second column + header = [col.strip() for col in lines[0].split(',')] + try: + date_col_idx = header.index('time') + except ValueError: + return f"Error: 'time' column not found in data for {indicator}. Available columns: {header}" - # Handle specific indicator column mappings - if indicator == "macds": - # MACD Signal is typically in the third column - value_col_idx = 2 if len(header) > 2 else 1 - elif indicator == "macdh": - # MACD Histogram is typically in the fourth column - value_col_idx = 3 if len(header) > 3 else 1 - elif indicator == "boll_ub": - # Bollinger Upper Band is typically in the second column + # Map internal indicator names to expected CSV column names from Alpha Vantage + col_name_map = { + "macd": "MACD", "macds": "MACD_Signal", "macdh": "MACD_Hist", + "boll": "Real Middle Band", "boll_ub": "Real Upper Band", "boll_lb": "Real Lower Band", + "rsi": "RSI", "atr": "ATR", "close_10_ema": "EMA", + "close_50_sma": "SMA", "close_200_sma": "SMA" + } + + target_col_name = col_name_map.get(indicator) + + if not target_col_name: + # Default to the second column if no specific mapping exists value_col_idx = 1 - elif indicator == "boll": - # Bollinger Middle is typically in the third column - value_col_idx = 2 if len(header) > 2 else 1 - elif indicator == "boll_lb": - # Bollinger Lower Band is typically in the fourth column - value_col_idx = 3 if len(header) > 3 else 1 + else: + try: + value_col_idx = header.index(target_col_name) + except ValueError: + return f"Error: Column '{target_col_name}' not found for indicator '{indicator}'. Available columns: {header}" result_data = [] for line in lines[1:]: From 7bcc2cbd8aa4ccf05e3a86ea7afae74e1aa2b649 Mon Sep 17 00:00:00 2001 From: luohy15 Date: Fri, 26 Sep 2025 23:52:26 +0800 Subject: [PATCH 17/26] Update configuration documentation for Alpha Vantage data vendor MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add data vendor configuration examples in README and main.py showing how to configure Alpha Vantage as the primary data provider. Update documentation to reflect the current default behavior of using Alpha Vantage for real-time market data access. πŸ€– Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude --- README.md | 10 +++++++++- main.py | 8 ++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index cef678f6..ad293904 100644 --- a/README.md +++ b/README.md @@ -179,6 +179,14 @@ config["deep_think_llm"] = "gpt-4.1-nano" # Use a different model config["quick_think_llm"] = "gpt-4.1-nano" # Use a different model config["max_debate_rounds"] = 1 # Increase debate rounds +# Configure data vendors (default uses Alpha Vantage for real-time data) +config["data_vendors"] = { + "core_stock_apis": "alpha_vantage", # Options: alpha_vantage, yahoo_finance, local + "technical_indicators": "alpha_vantage", # Options: alpha_vantage, yahoo_finance, local + "fundamental_data": "alpha_vantage", # Options: alpha_vantage, openai, local + "news_data": "alpha_vantage", # Options: alpha_vantage, openai, google, local +} + # Initialize with custom config ta = TradingAgentsGraph(debug=True, config=config) @@ -187,7 +195,7 @@ _, decision = ta.propagate("NVDA", "2024-05-10") print(decision) ``` -> We recommend enabling them for experimentation, as they provide access to real-time data. The agents' offline tools rely on cached data from our **Tauric TradingDB**, a curated dataset we use for backtesting. We're currently in the process of refining this dataset, and we plan to release it soon alongside our upcoming projects. Stay tuned! +> The default configuration now uses Alpha Vantage as the primary data provider, which provides access to real-time market data. For offline experimentation, there's a local data vendor option that uses our **Tauric TradingDB**, a curated dataset for backtesting, though this is still in development. We're currently refining this dataset and plan to release it soon alongside our upcoming projects. Stay tuned! You can view the full list of configurations in `tradingagents/default_config.py`. diff --git a/main.py b/main.py index 1ce62cc6..8bf3d3c7 100644 --- a/main.py +++ b/main.py @@ -9,6 +9,14 @@ config["deep_think_llm"] = "gemini-2.0-flash" # Use a different model config["quick_think_llm"] = "gemini-2.0-flash" # Use a different model config["max_debate_rounds"] = 1 # Increase debate rounds +# Configure data vendors (default uses Alpha Vantage for real-time data) +config["data_vendors"] = { + "core_stock_apis": "alpha_vantage", # Options: alpha_vantage, yahoo_finance, local + "technical_indicators": "alpha_vantage", # Options: alpha_vantage, yahoo_finance, local + "fundamental_data": "alpha_vantage", # Options: alpha_vantage, openai, local + "news_data": "alpha_vantage", # Options: alpha_vantage, openai, google, local +} + # Initialize with custom config ta = TradingAgentsGraph(debug=True, config=config) From 7fc9c28a94d177851c205d4958be88014d16eba6 Mon Sep 17 00:00:00 2001 From: luohy15 Date: Fri, 26 Sep 2025 23:58:51 +0800 Subject: [PATCH 18/26] Add environment variable configuration support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Add .env.example file with API key placeholders - Update README.md with .env file setup instructions - Add dotenv loading in main.py for environment variables πŸ€– Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude --- .env.example | 2 ++ README.md | 6 ++++++ main.py | 5 +++++ 3 files changed, 13 insertions(+) create mode 100644 .env.example diff --git a/.env.example b/.env.example new file mode 100644 index 00000000..1e257c3c --- /dev/null +++ b/.env.example @@ -0,0 +1,2 @@ +ALPHA_VANTAGE_API_KEY=alpha_vantage_api_key_placeholder +OPENAI_API_KEY=openai_api_key_placeholder \ No newline at end of file diff --git a/README.md b/README.md index ad293904..4557fc29 100644 --- a/README.md +++ b/README.md @@ -124,6 +124,12 @@ You will need the OpenAI API for all the agents. export OPENAI_API_KEY=$YOUR_OPENAI_API_KEY ``` +Alternatively, you can create a `.env` file in the project root with your API keys (see `.env.example` for reference): +```bash +cp .env.example .env +# Edit .env with your actual API keys +``` + ### CLI Usage You can also try out the CLI directly by running: diff --git a/main.py b/main.py index 8bf3d3c7..fe4e4cb3 100644 --- a/main.py +++ b/main.py @@ -1,6 +1,11 @@ from tradingagents.graph.trading_graph import TradingAgentsGraph from tradingagents.default_config import DEFAULT_CONFIG +from dotenv import load_dotenv + +# Load environment variables from .env file +load_dotenv() + # Create a custom config config = DEFAULT_CONFIG.copy() config["llm_provider"] = "google" # Use a different model From 86bc0e793fd0d7fca5b5925af6b3632b4ff75282 Mon Sep 17 00:00:00 2001 From: luohy15 Date: Sat, 27 Sep 2025 00:04:59 +0800 Subject: [PATCH 19/26] minor fix --- tradingagents/agents/analysts/fundamentals_analyst.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tradingagents/agents/analysts/fundamentals_analyst.py b/tradingagents/agents/analysts/fundamentals_analyst.py index 6b8f286d..e20139cb 100644 --- a/tradingagents/agents/analysts/fundamentals_analyst.py +++ b/tradingagents/agents/analysts/fundamentals_analyst.py @@ -19,9 +19,9 @@ def create_fundamentals_analyst(llm): ] system_message = ( - "You are a researcher tasked with analyzing fundamental information over the past week about a company. Please write a comprehensive report of the company's fundamental information such as financial documents, company profile, basic company financials, company financial history, insider sentiment and insider transactions to gain a full view of the company's fundamental information to inform traders. Make sure to include as much detail as possible. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions." + "You are a researcher tasked with analyzing fundamental information over the past week about a company. Please write a comprehensive report of the company's fundamental information such as financial documents, company profile, basic company financials, and company financial history to gain a full view of the company's fundamental information to inform traders. Make sure to include as much detail as possible. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions." + " Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read." - + " Use the get_fundamental_data tool with different data_type parameters: 'full_fundamentals' for comprehensive analysis, 'balance_sheet', 'cashflow', 'income_statement' for specific financial statements, 'insider_sentiment' and 'insider_transactions' for insider information.", + + " Use the available tools: `get_fundamentals` for comprehensive company analysis, `get_balance_sheet`, `get_cashflow`, and `get_income_statement` for specific financial statements.", ) prompt = ChatPromptTemplate.from_messages( From 8fdbbcca3d9aabc7315e6e7659567ac39af9b337 Mon Sep 17 00:00:00 2001 From: luohy15 Date: Mon, 29 Sep 2025 14:08:29 +0800 Subject: [PATCH 20/26] alpha vantage api key url --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 4557fc29..bc4e3579 100644 --- a/README.md +++ b/README.md @@ -114,7 +114,7 @@ pip install -r requirements.txt ### Required APIs -You will also need the Alpha Vantage API for financial data. The free tier supports 25 API calls per day. +You will also need the [Alpha Vantage API](https://www.alphavantage.co/support/#api-key) for financial data. The free tier supports 25 API calls per day. ```bash export ALPHA_VANTAGE_API_KEY=$YOUR_ALPHA_VANTAGE_API_KEY ``` From b01051b9f478fb054f4c6e93598322df74bd8a46 Mon Sep 17 00:00:00 2001 From: luohy15 Date: Tue, 30 Sep 2025 11:11:05 +0800 Subject: [PATCH 21/26] Switch default data vendor MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit πŸ€– Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- README.md | 21 +++++++++---------- main.py | 16 +++++++------- tradingagents/dataflows/interface.py | 8 +++---- .../{yahoo_finance.py => y_finance.py} | 0 tradingagents/default_config.py | 8 +++---- 5 files changed, 25 insertions(+), 28 deletions(-) rename tradingagents/dataflows/{yahoo_finance.py => y_finance.py} (100%) diff --git a/README.md b/README.md index bc4e3579..f774ec2a 100644 --- a/README.md +++ b/README.md @@ -114,14 +114,11 @@ pip install -r requirements.txt ### Required APIs -You will also need the [Alpha Vantage API](https://www.alphavantage.co/support/#api-key) for financial data. The free tier supports 25 API calls per day. -```bash -export ALPHA_VANTAGE_API_KEY=$YOUR_ALPHA_VANTAGE_API_KEY -``` +You will need the OpenAI API for all the agents, and [Alpha Vantage API](https://www.alphavantage.co/support/#api-key) for fundamental and news data (default configuration). -You will need the OpenAI API for all the agents. ```bash export OPENAI_API_KEY=$YOUR_OPENAI_API_KEY +export ALPHA_VANTAGE_API_KEY=$YOUR_ALPHA_VANTAGE_API_KEY ``` Alternatively, you can create a `.env` file in the project root with your API keys (see `.env.example` for reference): @@ -130,6 +127,8 @@ cp .env.example .env # Edit .env with your actual API keys ``` +**Note:** The default configuration uses [Alpha Vantage](https://www.alphavantage.co/) for fundamental and news data. You can get a free API key from their website, or upgrade to [Alpha Vantage Premium](https://www.alphavantage.co/premium/) for higher rate limits and more stable access. If you prefer to use OpenAI for these data sources instead, you can modify the data vendor settings in `tradingagents/default_config.py`. + ### CLI Usage You can also try out the CLI directly by running: @@ -185,12 +184,12 @@ config["deep_think_llm"] = "gpt-4.1-nano" # Use a different model config["quick_think_llm"] = "gpt-4.1-nano" # Use a different model config["max_debate_rounds"] = 1 # Increase debate rounds -# Configure data vendors (default uses Alpha Vantage for real-time data) +# Configure data vendors (default uses yfinance and Alpha Vantage) config["data_vendors"] = { - "core_stock_apis": "alpha_vantage", # Options: alpha_vantage, yahoo_finance, local - "technical_indicators": "alpha_vantage", # Options: alpha_vantage, yahoo_finance, local - "fundamental_data": "alpha_vantage", # Options: alpha_vantage, openai, local - "news_data": "alpha_vantage", # Options: alpha_vantage, openai, google, local + "core_stock_apis": "yfinance", # Options: yfinance, alpha_vantage, local + "technical_indicators": "yfinance", # Options: yfinance, alpha_vantage, local + "fundamental_data": "alpha_vantage", # Options: openai, alpha_vantage, local + "news_data": "alpha_vantage", # Options: openai, alpha_vantage, google, local } # Initialize with custom config @@ -201,7 +200,7 @@ _, decision = ta.propagate("NVDA", "2024-05-10") print(decision) ``` -> The default configuration now uses Alpha Vantage as the primary data provider, which provides access to real-time market data. For offline experimentation, there's a local data vendor option that uses our **Tauric TradingDB**, a curated dataset for backtesting, though this is still in development. We're currently refining this dataset and plan to release it soon alongside our upcoming projects. Stay tuned! +> The default configuration uses yfinance for stock price and technical data, and Alpha Vantage for fundamental and news data. For production use or if you encounter rate limits, consider upgrading to [Alpha Vantage Premium](https://www.alphavantage.co/premium/) for more stable and reliable data access. For offline experimentation, there's a local data vendor option that uses our **Tauric TradingDB**, a curated dataset for backtesting, though this is still in development. We're currently refining this dataset and plan to release it soon alongside our upcoming projects. Stay tuned! You can view the full list of configurations in `tradingagents/default_config.py`. diff --git a/main.py b/main.py index fe4e4cb3..a85ee6ec 100644 --- a/main.py +++ b/main.py @@ -8,18 +8,16 @@ load_dotenv() # Create a custom config config = DEFAULT_CONFIG.copy() -config["llm_provider"] = "google" # Use a different model -config["backend_url"] = "https://generativelanguage.googleapis.com/v1" # Use a different backend -config["deep_think_llm"] = "gemini-2.0-flash" # Use a different model -config["quick_think_llm"] = "gemini-2.0-flash" # Use a different model +config["deep_think_llm"] = "gpt-4o-mini" # Use a different model +config["quick_think_llm"] = "gpt-4o-mini" # Use a different model config["max_debate_rounds"] = 1 # Increase debate rounds -# Configure data vendors (default uses Alpha Vantage for real-time data) +# Configure data vendors (default uses yfinance and alpha_vantage) config["data_vendors"] = { - "core_stock_apis": "alpha_vantage", # Options: alpha_vantage, yahoo_finance, local - "technical_indicators": "alpha_vantage", # Options: alpha_vantage, yahoo_finance, local - "fundamental_data": "alpha_vantage", # Options: alpha_vantage, openai, local - "news_data": "alpha_vantage", # Options: alpha_vantage, openai, google, local + "core_stock_apis": "yfinance", # Options: yfinance, alpha_vantage, local + "technical_indicators": "yfinance", # Options: yfinance, alpha_vantage, local + "fundamental_data": "alpha_vantage", # Options: openai, alpha_vantage, local + "news_data": "alpha_vantage", # Options: openai, alpha_vantage, google, local } # Initialize with custom config diff --git a/tradingagents/dataflows/interface.py b/tradingagents/dataflows/interface.py index 43044149..bc65df9c 100644 --- a/tradingagents/dataflows/interface.py +++ b/tradingagents/dataflows/interface.py @@ -2,7 +2,7 @@ from typing import Annotated # Import from vendor-specific modules from .local import get_YFin_data, get_finnhub_news, get_finnhub_company_insider_sentiment, get_finnhub_company_insider_transactions, get_simfin_balance_sheet, get_simfin_cashflow, get_simfin_income_statements, get_reddit_global_news, get_reddit_company_news -from .yahoo_finance import get_YFin_data_online, get_stock_stats_indicators_window +from .y_finance import get_YFin_data_online, get_stock_stats_indicators_window from .google import get_google_news from .openai import get_stock_news_openai, get_global_news_openai, get_fundamentals_openai from .alpha_vantage import ( @@ -55,7 +55,7 @@ TOOLS_CATEGORIES = { VENDOR_LIST = [ "local", - "yahoo_finance", + "yfinance", "openai", "google" ] @@ -65,13 +65,13 @@ VENDOR_METHODS = { # core_stock_apis "get_stock_data": { "alpha_vantage": get_alpha_vantage_stock, - "yahoo_finance": get_YFin_data_online, + "yfinance": get_YFin_data_online, "local": get_YFin_data, }, # technical_indicators "get_indicators": { "alpha_vantage": get_alpha_vantage_indicator, - "yahoo_finance": get_stock_stats_indicators_window, + "yfinance": get_stock_stats_indicators_window, "local": get_stock_stats_indicators_window }, # fundamental_data diff --git a/tradingagents/dataflows/yahoo_finance.py b/tradingagents/dataflows/y_finance.py similarity index 100% rename from tradingagents/dataflows/yahoo_finance.py rename to tradingagents/dataflows/y_finance.py diff --git a/tradingagents/default_config.py b/tradingagents/default_config.py index d02411e0..1f40a2a2 100644 --- a/tradingagents/default_config.py +++ b/tradingagents/default_config.py @@ -20,10 +20,10 @@ DEFAULT_CONFIG = { # Data vendor configuration # Category-level configuration (default for all tools in category) "data_vendors": { - "core_stock_apis": "alpha_vantage", # OHLCV data: alpha_vantage, yahoo_finance, local - "technical_indicators": "alpha_vantage", # Technical indicators: alpha_vantage, yahoo_finance, local - "fundamental_data": "alpha_vantage", # Fundamentals: alpha_vantage, openai, local - "news_data": "alpha_vantage", # News: alpha_vantage, openai, google, local + "core_stock_apis": "yfinance", # Options: yfinance, alpha_vantage, local + "technical_indicators": "yfinance", # Options: yfinance, alpha_vantage, local + "fundamental_data": "alpha_vantage", # Options: openai, alpha_vantage, local + "news_data": "alpha_vantage", # Options: openai, alpha_vantage, google, local }, # Tool-level configuration (takes precedence over category-level) "tool_vendors": { From d23fb539e963c5ed7f473799cdac183b3ba75eee Mon Sep 17 00:00:00 2001 From: luohy15 Date: Tue, 30 Sep 2025 13:27:48 +0800 Subject: [PATCH 22/26] minor fix --- tradingagents/dataflows/alpha_vantage_common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tradingagents/dataflows/alpha_vantage_common.py b/tradingagents/dataflows/alpha_vantage_common.py index 8fa17a7e..f8cc9749 100644 --- a/tradingagents/dataflows/alpha_vantage_common.py +++ b/tradingagents/dataflows/alpha_vantage_common.py @@ -43,7 +43,7 @@ def _make_api_request(function_name: str, params: dict) -> dict | str: api_params.update({ "function": function_name, "apikey": get_api_key(), - "source": "tradingagents", + "source": "trading_agents", }) # Handle entitlement parameter if present in params or global variable From c07dcf026b1daf5a89a80bf80a2664a1088709f2 Mon Sep 17 00:00:00 2001 From: Edward Sun Date: Fri, 3 Oct 2025 22:40:09 -0700 Subject: [PATCH 23/26] added fallbacks for tools --- .../dataflows/alpha_vantage_common.py | 19 +++ tradingagents/dataflows/interface.py | 103 +++++++++++++--- tradingagents/dataflows/y_finance.py | 116 +++++++++++++++++- 3 files changed, 218 insertions(+), 20 deletions(-) diff --git a/tradingagents/dataflows/alpha_vantage_common.py b/tradingagents/dataflows/alpha_vantage_common.py index f8cc9749..409ff29e 100644 --- a/tradingagents/dataflows/alpha_vantage_common.py +++ b/tradingagents/dataflows/alpha_vantage_common.py @@ -1,6 +1,7 @@ import os import requests import pandas as pd +import json from datetime import datetime from io import StringIO @@ -34,9 +35,15 @@ def format_datetime_for_api(date_input) -> str: else: raise ValueError(f"Date must be string or datetime object, got {type(date_input)}") +class AlphaVantageRateLimitError(Exception): + """Exception raised when Alpha Vantage API rate limit is exceeded.""" + pass + def _make_api_request(function_name: str, params: dict) -> dict | str: """Helper function to make API requests and handle responses. + Raises: + AlphaVantageRateLimitError: When API rate limit is exceeded """ # Create a copy of params to avoid modifying the original api_params = params.copy() @@ -60,6 +67,18 @@ def _make_api_request(function_name: str, params: dict) -> dict | str: response.raise_for_status() response_text = response.text + + # Check if response is JSON (error responses are typically JSON) + try: + response_json = json.loads(response_text) + # Check for rate limit error + if "Information" in response_json: + info_message = response_json["Information"] + if "rate limit" in info_message.lower() or "api key" in info_message.lower(): + raise AlphaVantageRateLimitError(f"Alpha Vantage rate limit exceeded: {info_message}") + except json.JSONDecodeError: + # Response is not JSON (likely CSV data), which is normal + pass return response_text diff --git a/tradingagents/dataflows/interface.py b/tradingagents/dataflows/interface.py index bc65df9c..4cd5ddef 100644 --- a/tradingagents/dataflows/interface.py +++ b/tradingagents/dataflows/interface.py @@ -2,7 +2,7 @@ from typing import Annotated # Import from vendor-specific modules from .local import get_YFin_data, get_finnhub_news, get_finnhub_company_insider_sentiment, get_finnhub_company_insider_transactions, get_simfin_balance_sheet, get_simfin_cashflow, get_simfin_income_statements, get_reddit_global_news, get_reddit_company_news -from .y_finance import get_YFin_data_online, get_stock_stats_indicators_window +from .y_finance import get_YFin_data_online, get_stock_stats_indicators_window, get_balance_sheet as get_yfinance_balance_sheet, get_cashflow as get_yfinance_cashflow, get_income_statement as get_yfinance_income_statement, get_insider_transactions as get_yfinance_insider_transactions from .google import get_google_news from .openai import get_stock_news_openai, get_global_news_openai, get_fundamentals_openai from .alpha_vantage import ( @@ -15,6 +15,7 @@ from .alpha_vantage import ( get_insider_transactions as get_alpha_vantage_insider_transactions, get_news as get_alpha_vantage_news ) +from .alpha_vantage_common import AlphaVantageRateLimitError # Configuration and routing logic from .config import get_config @@ -81,14 +82,17 @@ VENDOR_METHODS = { }, "get_balance_sheet": { "alpha_vantage": get_alpha_vantage_balance_sheet, + "yfinance": get_yfinance_balance_sheet, "local": get_simfin_balance_sheet, }, "get_cashflow": { "alpha_vantage": get_alpha_vantage_cashflow, + "yfinance": get_yfinance_cashflow, "local": get_simfin_cashflow, }, "get_income_statement": { "alpha_vantage": get_alpha_vantage_income_statement, + "yfinance": get_yfinance_income_statement, "local": get_simfin_income_statements, }, # news_data @@ -107,6 +111,7 @@ VENDOR_METHODS = { }, "get_insider_transactions": { "alpha_vantage": get_alpha_vantage_insider_transactions, + "yfinance": get_yfinance_insider_transactions, "local": get_finnhub_company_insider_transactions, }, } @@ -134,42 +139,102 @@ def get_vendor(category: str, method: str = None) -> str: return config.get("data_vendors", {}).get(category, "default") def route_to_vendor(method: str, *args, **kwargs): - """Route method calls to appropriate vendor implementation.""" + """Route method calls to appropriate vendor implementation with fallback support.""" category = get_category_for_method(method) vendor_config = get_vendor(category, method) # Handle comma-separated vendors - vendors = [v.strip() for v in vendor_config.split(',')] + primary_vendors = [v.strip() for v in vendor_config.split(',')] if method not in VENDOR_METHODS: raise ValueError(f"Method '{method}' not supported") - # Collect all methods to run - methods_to_run = [] + # Get all available vendors for this method for fallback + all_available_vendors = list(VENDOR_METHODS[method].keys()) + + # Create fallback vendor list: primary vendors first, then remaining vendors as fallbacks + fallback_vendors = primary_vendors.copy() + for vendor in all_available_vendors: + if vendor not in fallback_vendors: + fallback_vendors.append(vendor) - for vendor in vendors: + # Debug: Print fallback ordering + primary_str = " β†’ ".join(primary_vendors) + fallback_str = " β†’ ".join(fallback_vendors) + print(f"DEBUG: {method} - Primary: [{primary_str}] | Full fallback order: [{fallback_str}]") + + # Track results and execution state + results = [] + vendor_attempt_count = 0 + any_primary_vendor_attempted = False + successful_vendor = None + + for vendor in fallback_vendors: if vendor not in VENDOR_METHODS[method]: - print(f"Info: Vendor '{vendor}' not supported for method '{method}', ignoring") + if vendor in primary_vendors: + print(f"INFO: Vendor '{vendor}' not supported for method '{method}', falling back to next vendor") continue vendor_impl = VENDOR_METHODS[method][vendor] + is_primary_vendor = vendor in primary_vendors + vendor_attempt_count += 1 + + # Track if we attempted any primary vendor + if is_primary_vendor: + any_primary_vendor_attempted = True + + # Debug: Print current attempt + vendor_type = "PRIMARY" if is_primary_vendor else "FALLBACK" + print(f"DEBUG: Attempting {vendor_type} vendor '{vendor}' for {method} (attempt #{vendor_attempt_count})") # Handle list of methods for a vendor if isinstance(vendor_impl, list): - methods_to_run.extend(vendor_impl) + vendor_methods = [(impl, vendor) for impl in vendor_impl] + print(f"DEBUG: Vendor '{vendor}' has multiple implementations: {len(vendor_methods)} functions") else: - # Single method implementation - methods_to_run.append(vendor_impl) + vendor_methods = [(vendor_impl, vendor)] - # Run all methods and collect results - results = [] - for impl_func in methods_to_run: - try: - result = impl_func(*args, **kwargs) - results.append(result) - except Exception as e: - # Log error but continue with other implementations - print(f"Warning: {impl_func.__name__} failed: {e}") + # Run methods for this vendor + vendor_results = [] + for impl_func, vendor_name in vendor_methods: + try: + print(f"DEBUG: Calling {impl_func.__name__} from vendor '{vendor_name}'...") + result = impl_func(*args, **kwargs) + vendor_results.append(result) + print(f"SUCCESS: {impl_func.__name__} from vendor '{vendor_name}' completed successfully") + + except AlphaVantageRateLimitError as e: + if vendor == "alpha_vantage": + print(f"RATE_LIMIT: Alpha Vantage rate limit exceeded, falling back to next available vendor") + print(f"DEBUG: Rate limit details: {e}") + # Continue to next vendor for fallback + continue + except Exception as e: + # Log error but continue with other implementations + print(f"FAILED: {impl_func.__name__} from vendor '{vendor_name}' failed: {e}") + continue + + # Add this vendor's results + if vendor_results: + results.extend(vendor_results) + successful_vendor = vendor + result_summary = f"Got {len(vendor_results)} result(s)" + print(f"SUCCESS: Vendor '{vendor}' succeeded - {result_summary}") + + # Stopping logic: Stop after first successful vendor for single-vendor configs + # Multiple vendor configs (comma-separated) may want to collect from multiple sources + if len(primary_vendors) == 1: + print(f"DEBUG: Stopping after successful vendor '{vendor}' (single-vendor config)") + break + else: + print(f"FAILED: Vendor '{vendor}' produced no results") + + # Final result summary + if not results: + print(f"FAILURE: All {vendor_attempt_count} vendor attempts failed for method '{method}'") + raise RuntimeError(f"All vendor implementations failed for method '{method}'") + else: + print(f"FINAL: Method '{method}' completed with {len(results)} result(s) from {vendor_attempt_count} vendor attempt(s)") # Return single result if only one, otherwise concatenate as string if len(results) == 1: diff --git a/tradingagents/dataflows/y_finance.py b/tradingagents/dataflows/y_finance.py index 049403cb..76b6cf4d 100644 --- a/tradingagents/dataflows/y_finance.py +++ b/tradingagents/dataflows/y_finance.py @@ -181,4 +181,118 @@ def get_stockstats_indicator( ) return "" - return str(indicator_value) \ No newline at end of file + return str(indicator_value) + + +def get_balance_sheet( + ticker: Annotated[str, "ticker symbol of the company"], + freq: Annotated[str, "frequency of data: 'annual' or 'quarterly'"] = "quarterly", + curr_date: Annotated[str, "current date (not used for yfinance)"] = None +): + """Get balance sheet data from yfinance.""" + try: + ticker_obj = yf.Ticker(ticker.upper()) + + if freq.lower() == "quarterly": + data = ticker_obj.quarterly_balance_sheet + else: + data = ticker_obj.balance_sheet + + if data.empty: + return f"No balance sheet data found for symbol '{ticker}'" + + # Convert to CSV string for consistency with other functions + csv_string = data.to_csv() + + # Add header information + header = f"# Balance Sheet data for {ticker.upper()} ({freq})\n" + header += f"# Data retrieved on: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n" + + return header + csv_string + + except Exception as e: + return f"Error retrieving balance sheet for {ticker}: {str(e)}" + + +def get_cashflow( + ticker: Annotated[str, "ticker symbol of the company"], + freq: Annotated[str, "frequency of data: 'annual' or 'quarterly'"] = "quarterly", + curr_date: Annotated[str, "current date (not used for yfinance)"] = None +): + """Get cash flow data from yfinance.""" + try: + ticker_obj = yf.Ticker(ticker.upper()) + + if freq.lower() == "quarterly": + data = ticker_obj.quarterly_cashflow + else: + data = ticker_obj.cashflow + + if data.empty: + return f"No cash flow data found for symbol '{ticker}'" + + # Convert to CSV string for consistency with other functions + csv_string = data.to_csv() + + # Add header information + header = f"# Cash Flow data for {ticker.upper()} ({freq})\n" + header += f"# Data retrieved on: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n" + + return header + csv_string + + except Exception as e: + return f"Error retrieving cash flow for {ticker}: {str(e)}" + + +def get_income_statement( + ticker: Annotated[str, "ticker symbol of the company"], + freq: Annotated[str, "frequency of data: 'annual' or 'quarterly'"] = "quarterly", + curr_date: Annotated[str, "current date (not used for yfinance)"] = None +): + """Get income statement data from yfinance.""" + try: + ticker_obj = yf.Ticker(ticker.upper()) + + if freq.lower() == "quarterly": + data = ticker_obj.quarterly_income_stmt + else: + data = ticker_obj.income_stmt + + if data.empty: + return f"No income statement data found for symbol '{ticker}'" + + # Convert to CSV string for consistency with other functions + csv_string = data.to_csv() + + # Add header information + header = f"# Income Statement data for {ticker.upper()} ({freq})\n" + header += f"# Data retrieved on: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n" + + return header + csv_string + + except Exception as e: + return f"Error retrieving income statement for {ticker}: {str(e)}" + + +def get_insider_transactions( + ticker: Annotated[str, "ticker symbol of the company"] +): + """Get insider transactions data from yfinance.""" + try: + ticker_obj = yf.Ticker(ticker.upper()) + data = ticker_obj.insider_transactions + + if data is None or data.empty: + return f"No insider transactions data found for symbol '{ticker}'" + + # Convert to CSV string for consistency with other functions + csv_string = data.to_csv() + + # Add header information + header = f"# Insider Transactions data for {ticker.upper()}\n" + header += f"# Data retrieved on: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n" + + return header + csv_string + + except Exception as e: + return f"Error retrieving insider transactions for {ticker}: {str(e)}" \ No newline at end of file From 7bb2941b07b091ab8327bdce1aaafafef3447ddd Mon Sep 17 00:00:00 2001 From: Edward Sun Date: Mon, 6 Oct 2025 19:58:01 -0700 Subject: [PATCH 24/26] optimized yfin fetching to be much faster --- test.py | 11 +++ tradingagents/dataflows/y_finance.py | 129 ++++++++++++++++++++++++--- 2 files changed, 130 insertions(+), 10 deletions(-) create mode 100644 test.py diff --git a/test.py b/test.py new file mode 100644 index 00000000..b73783e1 --- /dev/null +++ b/test.py @@ -0,0 +1,11 @@ +import time +from tradingagents.dataflows.y_finance import get_YFin_data_online, get_stock_stats_indicators_window, get_balance_sheet as get_yfinance_balance_sheet, get_cashflow as get_yfinance_cashflow, get_income_statement as get_yfinance_income_statement, get_insider_transactions as get_yfinance_insider_transactions + +print("Testing optimized implementation with 30-day lookback:") +start_time = time.time() +result = get_stock_stats_indicators_window("AAPL", "macd", "2024-11-01", 30) +end_time = time.time() + +print(f"Execution time: {end_time - start_time:.2f} seconds") +print(f"Result length: {len(result)} characters") +print(result) diff --git a/tradingagents/dataflows/y_finance.py b/tradingagents/dataflows/y_finance.py index 76b6cf4d..da7273d5 100644 --- a/tradingagents/dataflows/y_finance.py +++ b/tradingagents/dataflows/y_finance.py @@ -137,16 +137,42 @@ def get_stock_stats_indicators_window( curr_date_dt = datetime.strptime(curr_date, "%Y-%m-%d") before = curr_date_dt - relativedelta(days=look_back_days) - # online gathering only - ind_string = "" - while curr_date_dt >= before: - indicator_value = get_stockstats_indicator( - symbol, indicator, curr_date_dt.strftime("%Y-%m-%d") - ) - - ind_string += f"{curr_date_dt.strftime('%Y-%m-%d')}: {indicator_value}\n" - - curr_date_dt = curr_date_dt - relativedelta(days=1) + # Optimized: Get stock data once and calculate indicators for all dates + try: + indicator_data = _get_stock_stats_bulk(symbol, indicator, curr_date) + + # Generate the date range we need + current_dt = curr_date_dt + date_values = [] + + while current_dt >= before: + date_str = current_dt.strftime('%Y-%m-%d') + + # Look up the indicator value for this date + if date_str in indicator_data: + indicator_value = indicator_data[date_str] + else: + indicator_value = "N/A: Not a trading day (weekend or holiday)" + + date_values.append((date_str, indicator_value)) + current_dt = current_dt - relativedelta(days=1) + + # Build the result string + ind_string = "" + for date_str, value in date_values: + ind_string += f"{date_str}: {value}\n" + + except Exception as e: + print(f"Error getting bulk stockstats data: {e}") + # Fallback to original implementation if bulk method fails + ind_string = "" + curr_date_dt = datetime.strptime(curr_date, "%Y-%m-%d") + while curr_date_dt >= before: + indicator_value = get_stockstats_indicator( + symbol, indicator, curr_date_dt.strftime("%Y-%m-%d") + ) + ind_string += f"{curr_date_dt.strftime('%Y-%m-%d')}: {indicator_value}\n" + curr_date_dt = curr_date_dt - relativedelta(days=1) result_str = ( f"## {indicator} values from {before.strftime('%Y-%m-%d')} to {end_date}:\n\n" @@ -158,6 +184,89 @@ def get_stock_stats_indicators_window( return result_str +def _get_stock_stats_bulk( + symbol: Annotated[str, "ticker symbol of the company"], + indicator: Annotated[str, "technical indicator to calculate"], + curr_date: Annotated[str, "current date for reference"] +) -> dict: + """ + Optimized bulk calculation of stock stats indicators. + Fetches data once and calculates indicator for all available dates. + Returns dict mapping date strings to indicator values. + """ + from .config import get_config + import pandas as pd + from stockstats import wrap + import os + + config = get_config() + online = config["data_vendors"]["technical_indicators"] != "local" + + if not online: + # Local data path + try: + data = pd.read_csv( + os.path.join( + config.get("data_cache_dir", "data"), + f"{symbol}-YFin-data-2015-01-01-2025-03-25.csv", + ) + ) + df = wrap(data) + except FileNotFoundError: + raise Exception("Stockstats fail: Yahoo Finance data not fetched yet!") + else: + # Online data fetching with caching + today_date = pd.Timestamp.today() + curr_date_dt = pd.to_datetime(curr_date) + + end_date = today_date + start_date = today_date - pd.DateOffset(years=15) + start_date_str = start_date.strftime("%Y-%m-%d") + end_date_str = end_date.strftime("%Y-%m-%d") + + os.makedirs(config["data_cache_dir"], exist_ok=True) + + data_file = os.path.join( + config["data_cache_dir"], + f"{symbol}-YFin-data-{start_date_str}-{end_date_str}.csv", + ) + + if os.path.exists(data_file): + data = pd.read_csv(data_file) + data["Date"] = pd.to_datetime(data["Date"]) + else: + data = yf.download( + symbol, + start=start_date_str, + end=end_date_str, + multi_level_index=False, + progress=False, + auto_adjust=True, + ) + data = data.reset_index() + data.to_csv(data_file, index=False) + + df = wrap(data) + df["Date"] = df["Date"].dt.strftime("%Y-%m-%d") + + # Calculate the indicator for all rows at once + df[indicator] # This triggers stockstats to calculate the indicator + + # Create a dictionary mapping date strings to indicator values + result_dict = {} + for _, row in df.iterrows(): + date_str = row["Date"] + indicator_value = row[indicator] + + # Handle NaN/None values + if pd.isna(indicator_value): + result_dict[date_str] = "N/A" + else: + result_dict[date_str] = str(indicator_value) + + return result_dict + + def get_stockstats_indicator( symbol: Annotated[str, "ticker symbol of the company"], indicator: Annotated[str, "technical indicator to get the analysis and report of"], From a5dcc7da452e2840fedea4a4beede3175c8ccd62 Mon Sep 17 00:00:00 2001 From: Edward Sun Date: Mon, 6 Oct 2025 20:33:12 -0700 Subject: [PATCH 25/26] update readme --- README.md | 2 +- pyproject.toml | 1 + uv.lock | 92 ++++++++++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 92 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index f774ec2a..356ad5bc 100644 --- a/README.md +++ b/README.md @@ -127,7 +127,7 @@ cp .env.example .env # Edit .env with your actual API keys ``` -**Note:** The default configuration uses [Alpha Vantage](https://www.alphavantage.co/) for fundamental and news data. You can get a free API key from their website, or upgrade to [Alpha Vantage Premium](https://www.alphavantage.co/premium/) for higher rate limits and more stable access. If you prefer to use OpenAI for these data sources instead, you can modify the data vendor settings in `tradingagents/default_config.py`. +**Note:** We are happy to partner with Alpha Vantage to provide robust API support for TradingAgents. You can get a free AlphaVantage API [here](https://www.alphavantage.co/support/#api-key). Typically the quota is sufficient for performing complex tasks with TradingAgents thanks to Alpha Vantage’s open-source support program. If you prefer to use OpenAI for these data sources instead, you can modify the data vendor settings in `tradingagents/default_config.py`. ### CLI Usage diff --git a/pyproject.toml b/pyproject.toml index 4b5793d1..63af4721 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,6 +12,7 @@ dependencies = [ "eodhd>=1.0.32", "feedparser>=6.0.11", "finnhub-python>=2.4.23", + "grip>=4.6.2", "langchain-anthropic>=0.3.15", "langchain-experimental>=0.3.4", "langchain-google-genai>=2.1.5", diff --git a/uv.lock b/uv.lock index e57ce7e5..e4a5030c 100644 --- a/uv.lock +++ b/uv.lock @@ -1,5 +1,5 @@ version = 1 -revision = 2 +revision = 3 requires-python = ">=3.10" resolution-markers = [ "python_full_version >= '3.13'", @@ -358,6 +358,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/99/37/e8730c3587a65eb5645d4aba2d27aae48e8003614d6aaf15dda67f702f1f/bidict-0.23.1-py3-none-any.whl", hash = "sha256:5dae8d4d79b552a71cbabc7deb25dfe8ce710b17ff41711e13010ead2abfc3e5", size = 32764, upload-time = "2024-02-18T19:09:04.156Z" }, ] +[[package]] +name = "blinker" +version = "1.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/21/28/9b3f50ce0e048515135495f198351908d99540d69bfdc8c1d15b73dc55ce/blinker-1.9.0.tar.gz", hash = "sha256:b4ce2265a7abece45e7cc896e98dbebe6cead56bcf805a3d23136d145f5445bf", size = 22460, upload-time = "2024-11-08T17:25:47.436Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/10/cb/f2ad4230dc2eb1a74edf38f1a38b9b52277f75bef262d8908e60d957e13c/blinker-1.9.0-py3-none-any.whl", hash = "sha256:ba0efaa9080b619ff2f3459d1d500c57bddea4a6b424b60a91141db6fd2f08bc", size = 8458, upload-time = "2024-11-08T17:25:46.184Z" }, +] + [[package]] name = "bs4" version = "0.0.2" @@ -791,6 +800,12 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/12/b3/231ffd4ab1fc9d679809f356cebee130ac7daa00d6d6f3206dd4fd137e9e/distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2", size = 20277, upload-time = "2023-12-24T09:54:30.421Z" }, ] +[[package]] +name = "docopt" +version = "0.6.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/55/8f8cab2afd404cf578136ef2cc5dfb50baa1761b68c9da1fb1e4eed343c9/docopt-0.6.2.tar.gz", hash = "sha256:49b3a825280bd66b3aa83585ef59c4a8c82f2c8a522dbe754a8bc8d08c85c491", size = 25901, upload-time = "2014-06-16T11:18:57.406Z" } + [[package]] name = "durationpy" version = "0.10" @@ -896,6 +911,23 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/17/09/9240b2a222717e7bda81f954047b662f2744aaeb6b29d62e89bb5c49dd16/finnhub_python-2.4.23-py3-none-any.whl", hash = "sha256:27585dfa32a92b435bd69bfbc9062bcf41a3b35302b654062816640f67b89eea", size = 11897, upload-time = "2025-03-16T06:11:36.085Z" }, ] +[[package]] +name = "flask" +version = "3.1.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "blinker" }, + { name = "click" }, + { name = "itsdangerous" }, + { name = "jinja2" }, + { name = "markupsafe" }, + { name = "werkzeug" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/dc/6d/cfe3c0fcc5e477df242b98bfe186a4c34357b4847e87ecaef04507332dab/flask-3.1.2.tar.gz", hash = "sha256:bf656c15c80190ed628ad08cdfd3aaa35beb087855e2f494910aa3774cc4fd87", size = 720160, upload-time = "2025-08-19T21:03:21.205Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/f9/7f9263c5695f4bd0023734af91bedb2ff8209e8de6ead162f35d8dc762fd/flask-3.1.2-py3-none-any.whl", hash = "sha256:ca1d8112ec8a6158cc29ea4858963350011b5c846a414cdb7a954aa9e967d03c", size = 103308, upload-time = "2025-08-19T21:03:19.499Z" }, +] + [[package]] name = "flatbuffers" version = "25.2.10" @@ -1182,6 +1214,24 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/5c/4f/aab73ecaa6b3086a4c89863d94cf26fa84cbff63f52ce9bc4342b3087a06/greenlet-3.2.3-cp314-cp314-win_amd64.whl", hash = "sha256:8c47aae8fbbfcf82cc13327ae802ba13c9c36753b67e760023fd116bc124a62a", size = 301236, upload-time = "2025-06-05T16:15:20.111Z" }, ] +[[package]] +name = "grip" +version = "4.6.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "docopt" }, + { name = "flask" }, + { name = "markdown" }, + { name = "path-and-address" }, + { name = "pygments" }, + { name = "requests" }, + { name = "werkzeug" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f4/3f/e8bc3ea1f24877292fa3962ad9e0234ad4bc787dc1eb5bd08c35afd0ceca/grip-4.6.2.tar.gz", hash = "sha256:3cf6dce0aa06edd663176914069af83f19dcb90f3a9c401271acfa71872f8ce3", size = 152280, upload-time = "2023-10-12T05:08:02.272Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f4/2c/ed06b092d21c66ea3b42408aecb62c3b16748f34205c27e6072186626088/grip-4.6.2-py3-none-any.whl", hash = "sha256:f2192e9d75b603d3de4a2c80ba70d82c7d9ebaade650306e41a7583966d0ed88", size = 138494, upload-time = "2023-10-12T05:07:59.686Z" }, +] + [[package]] name = "grpcio" version = "1.73.0" @@ -1424,6 +1474,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/59/91/aa6bde563e0085a02a435aa99b49ef75b0a4b062635e606dab23ce18d720/inflection-0.5.1-py2.py3-none-any.whl", hash = "sha256:f38b2b640938a4f35ade69ac3d053042959b62a0f1076a5bbaa1b9526605a8a2", size = 9454, upload-time = "2020-08-22T08:16:27.816Z" }, ] +[[package]] +name = "itsdangerous" +version = "2.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/9c/cb/8ac0172223afbccb63986cc25049b154ecfb5e85932587206f42317be31d/itsdangerous-2.2.0.tar.gz", hash = "sha256:e0050c0b7da1eea53ffaf149c0cfbb5c6e2e2b69c4bef22c81fa6eb73e5f6173", size = 54410, upload-time = "2024-04-16T21:28:15.614Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/96/92447566d16df59b2a776c0fb82dbc4d9e07cd95062562af01e408583fc4/itsdangerous-2.2.0-py3-none-any.whl", hash = "sha256:c6242fc49e35958c8b15141343aa660db5fc54d4f13a1db01a3f5891b98700ef", size = 16234, upload-time = "2024-04-16T21:28:14.499Z" }, +] + [[package]] name = "jinja2" version = "3.1.6" @@ -1831,7 +1890,7 @@ name = "langgraph-checkpoint" version = "2.0.26" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "langchain-core", marker = "python_full_version < '4.0'" }, + { name = "langchain-core", marker = "python_full_version < '4'" }, { name = "ormsgpack" }, ] sdist = { url = "https://files.pythonhosted.org/packages/c5/61/e2518ac9216a4e9f4efda3ac61595e3c9e9ac00833141c9688e8d56bd7eb/langgraph_checkpoint-2.0.26.tar.gz", hash = "sha256:2b800195532d5efb079db9754f037281225ae175f7a395523f4bf41223cbc9d6", size = 37874, upload-time = "2025-05-15T17:31:22.466Z" } @@ -1987,6 +2046,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/f1/ab/fdbbd91d8d82bf1a723ba88ec3e3d76c022b53c391b0c13cad441cdb8f9e/lxml-5.4.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b12cb6527599808ada9eb2cd6e0e7d3d8f13fe7bbb01c6311255a15ded4c7ab4", size = 3487862, upload-time = "2025-04-23T01:49:36.296Z" }, ] +[[package]] +name = "markdown" +version = "3.9" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8d/37/02347f6d6d8279247a5837082ebc26fc0d5aaeaf75aa013fcbb433c777ab/markdown-3.9.tar.gz", hash = "sha256:d2900fe1782bd33bdbbd56859defef70c2e78fc46668f8eb9df3128138f2cb6a", size = 364585, upload-time = "2025-09-04T20:25:22.885Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/70/ae/44c4a6a4cbb496d93c6257954260fe3a6e91b7bed2240e5dad2a717f5111/markdown-3.9-py3-none-any.whl", hash = "sha256:9f4d91ed810864ea88a6f32c07ba8bee1346c0cc1f6b1f9f6c822f2a9667d280", size = 107441, upload-time = "2025-09-04T20:25:21.784Z" }, +] + [[package]] name = "markdown-it-py" version = "3.0.0" @@ -3429,6 +3497,12 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/12/18/35d1d947553d24909dca37e2ff11720eecb601360d1bac8d7a9a1bc7eb08/parsel-1.10.0-py2.py3-none-any.whl", hash = "sha256:6a0c28bd81f9df34ba665884c88efa0b18b8d2c44c81f64e27f2f0cb37d46169", size = 17266, upload-time = "2025-01-17T15:38:27.83Z" }, ] +[[package]] +name = "path-and-address" +version = "2.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2b/b5/749fab14d9e84257f3b0583eedb54e013422b6c240491a4ae48d9ea5e44f/path-and-address-2.0.1.zip", hash = "sha256:e96363d982b3a2de8531f4cd5f086b51d0248b58527227d43cf5014d045371b7", size = 6503, upload-time = "2016-07-21T02:56:09.794Z" } + [[package]] name = "peewee" version = "3.18.1" @@ -4700,6 +4774,7 @@ dependencies = [ { name = "eodhd" }, { name = "feedparser" }, { name = "finnhub-python" }, + { name = "grip" }, { name = "langchain-anthropic" }, { name = "langchain-experimental" }, { name = "langchain-google-genai" }, @@ -4730,6 +4805,7 @@ requires-dist = [ { name = "eodhd", specifier = ">=1.0.32" }, { name = "feedparser", specifier = ">=6.0.11" }, { name = "finnhub-python", specifier = ">=2.4.23" }, + { name = "grip", specifier = ">=4.6.2" }, { name = "langchain-anthropic", specifier = ">=0.3.15" }, { name = "langchain-experimental", specifier = ">=0.3.4" }, { name = "langchain-google-genai", specifier = ">=2.1.5" }, @@ -5039,6 +5115,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/fa/a8/5b41e0da817d64113292ab1f8247140aac61cbf6cfd085d6a0fa77f4984f/websockets-15.0.1-py3-none-any.whl", hash = "sha256:f7a866fbc1e97b5c617ee4116daaa09b722101d4a3c170c787450ba409f9736f", size = 169743, upload-time = "2025-03-05T20:03:39.41Z" }, ] +[[package]] +name = "werkzeug" +version = "3.1.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9f/69/83029f1f6300c5fb2471d621ab06f6ec6b3324685a2ce0f9777fd4a8b71e/werkzeug-3.1.3.tar.gz", hash = "sha256:60723ce945c19328679790e3282cc758aa4a6040e4bb330f53d30fa546d44746", size = 806925, upload-time = "2024-11-08T15:52:18.093Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/52/24/ab44c871b0f07f491e5d2ad12c9bd7358e527510618cb1b803a88e986db1/werkzeug-3.1.3-py3-none-any.whl", hash = "sha256:54b78bf3716d19a65be4fceccc0d1d7b89e608834989dfae50ea87564639213e", size = 224498, upload-time = "2024-11-08T15:52:16.132Z" }, +] + [[package]] name = "wrapt" version = "1.17.2" From b2ef960da7b82152b043a99e9de25277eb3a5b0e Mon Sep 17 00:00:00 2001 From: Edward Sun Date: Thu, 9 Oct 2025 00:32:04 -0700 Subject: [PATCH 26/26] updated readme --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 356ad5bc..7e90c60f 100644 --- a/README.md +++ b/README.md @@ -127,7 +127,7 @@ cp .env.example .env # Edit .env with your actual API keys ``` -**Note:** We are happy to partner with Alpha Vantage to provide robust API support for TradingAgents. You can get a free AlphaVantage API [here](https://www.alphavantage.co/support/#api-key). Typically the quota is sufficient for performing complex tasks with TradingAgents thanks to Alpha Vantage’s open-source support program. If you prefer to use OpenAI for these data sources instead, you can modify the data vendor settings in `tradingagents/default_config.py`. +**Note:** We are happy to partner with Alpha Vantage to provide robust API support for TradingAgents. You can get a free AlphaVantage API [here](https://www.alphavantage.co/support/#api-key), TradingAgents-sourced requests also have increased rate limits to 60 requests per minute with no daily limits. Typically the quota is sufficient for performing complex tasks with TradingAgents thanks to Alpha Vantage’s open-source support program. If you prefer to use OpenAI for these data sources instead, you can modify the data vendor settings in `tradingagents/default_config.py`. ### CLI Usage