commit
afa2ab461c
|
|
@ -0,0 +1,100 @@
|
|||
# TradingAgents Environment Configuration
|
||||
# Copy this file to .env and fill in your values
|
||||
|
||||
# =============================================================================
|
||||
# API Keys (Required)
|
||||
# =============================================================================
|
||||
OPENAI_API_KEY=your_openai_api_key_here
|
||||
FINNHUB_API_KEY=your_finnhub_api_key_here
|
||||
|
||||
# Optional API Keys
|
||||
OPENROUTER_API_KEY=your_openrouter_api_key_here
|
||||
GOOGLE_API_KEY=your_google_api_key_here
|
||||
ANTHROPIC_API_KEY=your_anthropic_api_key_here
|
||||
|
||||
# Reddit API (Optional - for social media analysis)
|
||||
REDDIT_CLIENT_ID=your_reddit_client_id
|
||||
REDDIT_CLIENT_SECRET=your_reddit_client_secret
|
||||
REDDIT_USER_AGENT=TradingAgents/1.0
|
||||
|
||||
# =============================================================================
|
||||
# SSL/TLS Certificate Configuration (OPTIONAL)
|
||||
# =============================================================================
|
||||
|
||||
# Certificate Bundle Path (ONLY set if you need a custom certificate bundle)
|
||||
# If not set, system default SSL behavior is used
|
||||
# Common locations:
|
||||
# - macOS: /etc/ssl/cert.pem
|
||||
# - Ubuntu/Debian: /etc/ssl/certs/ca-certificates.crt
|
||||
# - CentOS/RHEL: /etc/pki/tls/certs/ca-bundle.crt
|
||||
# - Custom: /path/to/your/custom-ca-bundle.crt
|
||||
# REQUESTS_CA_BUNDLE=/etc/ssl/cert.pem
|
||||
# CURL_CA_BUNDLE=/etc/ssl/cert.pem
|
||||
|
||||
# SSL Verification (ONLY set to false if needed for development/testing)
|
||||
# If not set, SSL verification is enabled by default (recommended)
|
||||
# SSL_VERIFY=false
|
||||
|
||||
# HTTP Timeout (ONLY set if default timeout is insufficient)
|
||||
# If not set, uses reasonable defaults
|
||||
# HTTP_TIMEOUT=60
|
||||
|
||||
# =============================================================================
|
||||
# Proxy Configuration (ONLY if behind corporate firewall)
|
||||
# =============================================================================
|
||||
|
||||
# HTTP/HTTPS Proxy Settings (ONLY set if required by your network)
|
||||
# If not set, direct connections are used
|
||||
# HTTP_PROXY=http://proxy.company.com:8080
|
||||
# HTTPS_PROXY=https://proxy.company.com:8080
|
||||
|
||||
# Proxy with authentication
|
||||
# HTTP_PROXY=http://username:password@proxy.company.com:8080
|
||||
# HTTPS_PROXY=https://username:password@proxy.company.com:8080
|
||||
|
||||
# =============================================================================
|
||||
# Application Settings
|
||||
# =============================================================================
|
||||
|
||||
# Results Directory
|
||||
TRADINGAGENTS_RESULTS_DIR=./results
|
||||
|
||||
# Ollama Configuration (if using local Ollama)
|
||||
OLLAMA_HOST=localhost
|
||||
|
||||
# Optional Configuration
|
||||
# DEBUG=True
|
||||
# LOG_LEVEL=INFO
|
||||
|
||||
# =============================================================================
|
||||
# SSL Certificate Examples for Common Enterprise Environments
|
||||
# =============================================================================
|
||||
|
||||
# Example 1: Using system certificate store (macOS)
|
||||
# REQUESTS_CA_BUNDLE=/System/Library/OpenSSL/certs/cert.pem
|
||||
|
||||
# Example 2: Using system certificate store (Ubuntu/Debian)
|
||||
# REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt
|
||||
|
||||
# Example 3: Using custom corporate certificate bundle
|
||||
# REQUESTS_CA_BUNDLE=/usr/local/share/ca-certificates/corporate-ca-bundle.crt
|
||||
|
||||
# Example 4: Disabling SSL verification (development only)
|
||||
# SSL_VERIFY=false
|
||||
|
||||
# =============================================================================
|
||||
# Troubleshooting SSL Issues
|
||||
# =============================================================================
|
||||
|
||||
# If you encounter SSL certificate errors:
|
||||
# 1. Run the diagnostic tool: python diagnose_ssl.py
|
||||
# 2. Check if your organization uses a custom CA
|
||||
# 3. Ask your IT department for the corporate certificate bundle
|
||||
# 4. Try using certifi's bundle: pip install certifi
|
||||
# 5. Set REQUESTS_CA_BUNDLE to certifi's location (usually shown by diagnose_ssl.py)
|
||||
|
||||
# Common SSL Error Solutions:
|
||||
# - "certificate verify failed": Set REQUESTS_CA_BUNDLE to correct cert bundle
|
||||
# - "SSL: WRONG_VERSION_NUMBER": Check if you're behind a proxy
|
||||
# - "Connection timeout": Increase HTTP_TIMEOUT or check proxy settings
|
||||
# - "Name or service not known": Check DNS settings and proxy configuration
|
||||
42
README.md
42
README.md
|
|
@ -49,7 +49,7 @@
|
|||
|
||||
## TradingAgents Framework
|
||||
|
||||
TradingAgents is a multi-agent trading framework that mirrors the dynamics of real-world trading firms. By deploying specialized LLM-powered agents: from fundamental analysts, sentiment experts, and technical analysts, to trader, risk management team, the platform collaboratively evaluates market conditions and informs trading decisions. Moreover, these agents engage in dynamic discussions to pinpoint the optimal strategy.
|
||||
TradingAgents is a multi-agent trading framework that mirrors the dynamics of real-world trading firms. By deploying specialized LLM-powered agents: from fundamental analysts, sentiment experts, and trade planners, to trader, risk management team, the platform collaboratively evaluates market conditions and informs trading decisions. Moreover, these agents engage in dynamic discussions to pinpoint the optimal strategy.
|
||||
|
||||
<p align="center">
|
||||
<img src="assets/schema.png" style="width: 100%; height: auto;">
|
||||
|
|
@ -63,7 +63,7 @@ Our framework decomposes complex trading tasks into specialized roles. This ensu
|
|||
- Fundamentals Analyst: Evaluates company financials and performance metrics, identifying intrinsic values and potential red flags.
|
||||
- Sentiment Analyst: Analyzes social media and public sentiment using sentiment scoring algorithms to gauge short-term market mood.
|
||||
- News Analyst: Monitors global news and macroeconomic indicators, interpreting the impact of events on market conditions.
|
||||
- Technical Analyst: Utilizes technical indicators (like MACD and RSI) to detect trading patterns and forecast price movements.
|
||||
- Trade Planner: Utilizes technical indicators (like MACD and RSI) to detect trading patterns and forecast price movements.
|
||||
|
||||
<p align="center">
|
||||
<img src="assets/analyst.png" width="100%" style="display: inline-block; margin: 0 2%;">
|
||||
|
|
@ -101,15 +101,19 @@ git clone https://github.com/TauricResearch/TradingAgents.git
|
|||
cd TradingAgents
|
||||
```
|
||||
|
||||
Create a virtual environment in any of your favorite environment managers:
|
||||
Create a virtual environment in any of your favorite environment managers. Here are some indications if you've installed `uv`:
|
||||
```bash
|
||||
conda create -n tradingagents python=3.13
|
||||
conda activate tradingagents
|
||||
uv venv
|
||||
```
|
||||
|
||||
Activate the virtual environment:
|
||||
```bash
|
||||
venv/Scripts/activate.bat
|
||||
```
|
||||
|
||||
Install dependencies:
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
uv sync
|
||||
```
|
||||
|
||||
### Required APIs
|
||||
|
|
@ -151,6 +155,32 @@ An interface will appear showing results as they load, letting you track the age
|
|||
<img src="assets/cli/cli_transaction.png" width="100%" style="display: inline-block; margin: 0 2%;">
|
||||
</p>
|
||||
|
||||
## Web Frontend (HTMX/FastAPI)
|
||||
|
||||
In addition to the CLI, a new web-based frontend is available to visualize the agent communication process in real-time. It allows you to set configuration parameters, start the trading analysis, and observe the step-by-step execution of agents and tools, including their outputs and any errors.
|
||||
|
||||
### Running the Web Frontend
|
||||
|
||||
1. Ensure you have installed all dependencies using `uv sync`.
|
||||
2. Navigate to the project root directory in your terminal.
|
||||
3. Start the FastAPI server:
|
||||
```bash
|
||||
uvicorn webapp.main:app --reload
|
||||
```
|
||||
4. Open your web browser and go to `http://127.0.0.1:8000`.
|
||||
5. Enter a company symbol (e.g., `AAPL`) in the configuration form and click "Start Process" to begin the analysis.
|
||||
|
||||
### Rendered Reports (Markdown Support)
|
||||
|
||||
Agent-generated reports (analysis summaries, debate histories, plans, and risk assessments) are produced in Markdown. The web frontend now renders these Markdown documents as styled HTML instead of showing raw markup. This includes support for:
|
||||
|
||||
- Headings, emphasis, lists, and blockquotes
|
||||
- Tables (for structured metrics)
|
||||
- Fenced code blocks and inline code
|
||||
|
||||
Security: Markdown is sanitized server‑side using `bleach` to strip unsafe tags/attributes while preserving semantic structure. If you need to extend allowed tags (e.g., to permit additional formatting), modify `ALLOWED_TAGS` / `ALLOWED_ATTRIBUTES` in `webapp/main.py`.
|
||||
|
||||
|
||||
## TradingAgents Package
|
||||
|
||||
### Implementation Details
|
||||
|
|
|
|||
|
|
@ -0,0 +1,117 @@
|
|||
# SSL Certificate Bundle Configuration for TradingAgents
|
||||
|
||||
## Overview
|
||||
|
||||
This implementation provides flexible SSL/TLS certificate configuration for TradingAgents while maintaining backward compatibility. The system only applies custom SSL settings when explicitly configured via environment variables.
|
||||
|
||||
## Key Features
|
||||
|
||||
### 1. Environment Variable Based Configuration
|
||||
- `REQUESTS_CA_BUNDLE` or `CURL_CA_BUNDLE`: Path to custom certificate bundle
|
||||
- `SSL_VERIFY`: Enable/disable SSL verification (true/false)
|
||||
- `HTTP_TIMEOUT`: Custom timeout for HTTP requests (seconds)
|
||||
- `HTTP_PROXY`: HTTP proxy server
|
||||
- `HTTPS_PROXY`: HTTPS proxy server
|
||||
|
||||
### 2. Default Behavior Preservation
|
||||
- **If no environment variables are set**: Uses system default SSL behavior
|
||||
- **Only applies custom settings when explicitly configured**
|
||||
- **Empty or undefined variables are ignored**
|
||||
|
||||
### 3. Comprehensive Coverage
|
||||
- **LangChain LLM clients**: Custom SSL configuration for OpenAI, OpenRouter, etc.
|
||||
- **HTTP requests**: Custom configuration for Google News, Reddit APIs
|
||||
- **Global SSL setup**: Sets environment variables for libraries that respect them
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### Basic Usage (No Custom SSL)
|
||||
```bash
|
||||
# No SSL environment variables set
|
||||
# Uses system default SSL behavior
|
||||
python webapp/main.py
|
||||
```
|
||||
|
||||
### Custom Certificate Bundle
|
||||
```bash
|
||||
# Use custom corporate certificate bundle
|
||||
export REQUESTS_CA_BUNDLE=/path/to/corporate-ca-bundle.crt
|
||||
python webapp/main.py
|
||||
```
|
||||
|
||||
### Development/Testing (Disable SSL Verification)
|
||||
```bash
|
||||
# Disable SSL verification (NOT recommended for production)
|
||||
export SSL_VERIFY=false
|
||||
python webapp/main.py
|
||||
```
|
||||
|
||||
### Behind Corporate Proxy
|
||||
```bash
|
||||
# Configure proxy settings
|
||||
export HTTP_PROXY=http://proxy.company.com:8080
|
||||
export HTTPS_PROXY=https://proxy.company.com:8080
|
||||
export REQUESTS_CA_BUNDLE=/etc/ssl/corporate-ca-bundle.crt
|
||||
python webapp/main.py
|
||||
```
|
||||
|
||||
## Files Modified
|
||||
|
||||
### Core Configuration
|
||||
- `tradingagents/default_config.py`: Added SSL configuration parameters
|
||||
- `tradingagents/dataflows/ssl_utils.py`: SSL utility functions (NEW)
|
||||
|
||||
### Integration Points
|
||||
- `tradingagents/graph/trading_graph.py`: LLM client SSL configuration
|
||||
- `tradingagents/dataflows/googlenews_utils.py`: HTTP requests SSL configuration
|
||||
- `tradingagents/dataflows/interface.py`: Integration with SSL configuration
|
||||
|
||||
### Documentation and Tools
|
||||
- `.env.example`: Updated with SSL configuration examples
|
||||
- `diagnose_ssl.py`: SSL diagnostic tool (NEW)
|
||||
- `test_ssl_config.py`: SSL configuration test suite (NEW)
|
||||
|
||||
## Testing
|
||||
|
||||
Run the diagnostic tool to check your SSL configuration:
|
||||
```bash
|
||||
python diagnose_ssl.py
|
||||
```
|
||||
|
||||
Run the test suite to verify SSL configuration behavior:
|
||||
```bash
|
||||
python test_ssl_config.py
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common SSL Errors and Solutions
|
||||
|
||||
1. **Certificate verification failed**
|
||||
- Set `REQUESTS_CA_BUNDLE` to correct certificate bundle path
|
||||
- Check if your organization uses custom CA certificates
|
||||
|
||||
2. **SSL: WRONG_VERSION_NUMBER**
|
||||
- Usually indicates proxy configuration issues
|
||||
- Set appropriate `HTTP_PROXY` and `HTTPS_PROXY` variables
|
||||
|
||||
3. **Connection timeout**
|
||||
- Increase `HTTP_TIMEOUT` value
|
||||
- Check network connectivity and proxy settings
|
||||
|
||||
4. **Name or service not known**
|
||||
- Check DNS settings
|
||||
- Verify proxy configuration
|
||||
|
||||
### Getting Help
|
||||
|
||||
1. Run `python diagnose_ssl.py` for comprehensive SSL diagnostics
|
||||
2. Check your organization's IT documentation for certificate bundles
|
||||
3. Contact your IT department for corporate proxy and certificate information
|
||||
|
||||
## Security Considerations
|
||||
|
||||
- **Never disable SSL verification in production**
|
||||
- **Use custom certificate bundles for corporate environments**
|
||||
- **Keep certificate bundles updated**
|
||||
- **Secure proxy credentials if using authenticated proxies**
|
||||
|
|
@ -0,0 +1,55 @@
|
|||
#!/usr/bin/env python3
|
||||
"""
|
||||
TradingAgents Web Application Launcher
|
||||
|
||||
This script starts the TradingAgents webapp using uvicorn.
|
||||
It provides a convenient entry point to run the FastAPI application.
|
||||
"""
|
||||
|
||||
import uvicorn
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
def main():
|
||||
"""Start the TradingAgents webapp with uvicorn."""
|
||||
|
||||
# Get the project root directory
|
||||
project_root = Path(__file__).parent.absolute()
|
||||
|
||||
# Add the project root to Python path so imports work correctly
|
||||
if str(project_root) not in sys.path:
|
||||
sys.path.insert(0, str(project_root))
|
||||
|
||||
# Change to the project directory to ensure relative paths work
|
||||
os.chdir(project_root)
|
||||
|
||||
# Configuration for uvicorn
|
||||
config = {
|
||||
"app": "webapp.main:app",
|
||||
"host": "localhost",
|
||||
"port": 8000,
|
||||
"reload": True, # Enable auto-reload for development
|
||||
"reload_dirs": [str(project_root)], # Watch for changes in project directory
|
||||
"log_level": "info",
|
||||
"access_log": True,
|
||||
}
|
||||
|
||||
print("🚀 Starting TradingAgents WebApp...")
|
||||
print(f"📁 Project root: {project_root}")
|
||||
print(f"🌐 Server will be available at: http://localhost:{config['port']}")
|
||||
print("🔄 Auto-reload is enabled for development")
|
||||
print("⚠️ Make sure you have set up your .env file with required API keys")
|
||||
print("-" * 60)
|
||||
|
||||
try:
|
||||
# Start the uvicorn server
|
||||
uvicorn.run(**config)
|
||||
except KeyboardInterrupt:
|
||||
print("\n👋 Shutting down TradingAgents WebApp...")
|
||||
except Exception as e:
|
||||
print(f"❌ Error starting the application: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -788,7 +788,12 @@ def run_analysis():
|
|||
config["llm_provider"] = selections["llm_provider"].lower()
|
||||
config["user_position"] = selections["user_position"]
|
||||
config["cost_per_trade"] = selections["cost_per_trade"]
|
||||
|
||||
|
||||
print("\nConfiguration:")
|
||||
for key, value in config.items():
|
||||
print(f" {key}: {value}")
|
||||
print("")
|
||||
|
||||
# Initialize the graph
|
||||
graph = TradingAgentsGraph(
|
||||
[analyst.value for analyst in selections["analysts"]], config=config, debug=True
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
import os
|
||||
import questionary
|
||||
from typing import List, Optional, Tuple, Dict
|
||||
|
||||
|
|
@ -147,11 +148,13 @@ def select_shallow_thinking_agent(provider) -> str:
|
|||
"openrouter": [
|
||||
("xAI: Grok 4 Fast (free)", "x-ai/grok-4-fast:free"),
|
||||
("DeepSeek: DeepSeek V3.1 (free)", "deepseek/deepseek-chat-v3.1:free"),
|
||||
("Z.AI: GLM 4 32B", "z-ai/glm-4-32b"),
|
||||
("Meta: Llama 4 Scout", "meta-llama/llama-4-scout:free"),
|
||||
("Meta: Llama 3.3 8B Instruct - A lightweight and ultra-fast variant of Llama 3.3 70B", "meta-llama/llama-3.3-8b-instruct:free"),
|
||||
("google/gemini-2.0-flash-exp:free - Gemini Flash 2.0 offers a significantly faster time to first token", "google/gemini-2.0-flash-exp:free"),
|
||||
],
|
||||
"ollama": [
|
||||
("Granite 3.3 2B", "granite3.3:2b"),
|
||||
("llama3.1 local", "llama3.1"),
|
||||
("llama3.2 local", "llama3.2"),
|
||||
]
|
||||
|
|
@ -212,10 +215,12 @@ def select_deep_thinking_agent(provider) -> str:
|
|||
"openrouter": [
|
||||
("Qwen: Qwen3 235B A22B (free)", "qwen/qwen3-235b-a22b:free"),
|
||||
("OpenAI: gpt-oss-120b (free)", "openai/gpt-oss-120b:free"),
|
||||
("Z.AI: GLM 4 32B", "z-ai/glm-4-32b"),
|
||||
("DeepSeek V3 - a 685B-parameter, mixture-of-experts model", "deepseek/deepseek-chat-v3-0324:free"),
|
||||
("Deepseek - latest iteration of the flagship chat model family from the DeepSeek team.", "deepseek/deepseek-chat-v3-0324:free"),
|
||||
],
|
||||
"ollama": [
|
||||
("Granite 3.3 2B", "granite3.3:2b"),
|
||||
("llama3.1 local", "llama3.1"),
|
||||
("qwen3", "qwen3"),
|
||||
]
|
||||
|
|
@ -251,7 +256,7 @@ def select_llm_provider() -> tuple[str, str]:
|
|||
("Anthropic", "https://api.anthropic.com/"),
|
||||
("Google", "https://generativelanguage.googleapis.com/v1"),
|
||||
("OpenRouter", "https://openrouter.ai/api/v1"),
|
||||
("Ollama", "http://localhost:11434/v1"),
|
||||
("Ollama", f"http://{os.getenv('OLLAMA_HOST', 'localhost')}:11434/v1"),
|
||||
]
|
||||
|
||||
choice = questionary.select(
|
||||
|
|
|
|||
|
|
@ -0,0 +1,106 @@
|
|||
#!/usr/bin/env python3
|
||||
"""
|
||||
Certificate Bundle Combiner for TradingAgents
|
||||
|
||||
This script combines your corporate certificate bundle (Netskope) with
|
||||
the certifi certificate bundle to ensure all certificates are available.
|
||||
"""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
|
||||
def combine_certificate_bundles():
|
||||
"""Combine corporate and certifi certificate bundles"""
|
||||
|
||||
print("🔗 Certificate Bundle Combiner")
|
||||
print("=" * 40)
|
||||
|
||||
# Paths
|
||||
corporate_bundle = "/Users/kevin.bruton/netskope-certificates/netskope-cert-bundle.pem"
|
||||
|
||||
try:
|
||||
import certifi
|
||||
certifi_bundle = certifi.where()
|
||||
except ImportError:
|
||||
print("❌ certifi package not found. Please install it: pip install certifi")
|
||||
return False
|
||||
|
||||
combined_bundle = "/Users/kevin.bruton/netskope-certificates/combined-cert-bundle.pem"
|
||||
|
||||
print(f"📋 Corporate bundle: {corporate_bundle}")
|
||||
print(f"📋 Certifi bundle: {certifi_bundle}")
|
||||
print(f"📋 Combined bundle: {combined_bundle}")
|
||||
|
||||
# Check if corporate bundle exists
|
||||
if not os.path.exists(corporate_bundle):
|
||||
print(f"❌ Corporate certificate bundle not found: {corporate_bundle}")
|
||||
return False
|
||||
|
||||
# Create combined bundle
|
||||
try:
|
||||
with open(combined_bundle, 'w') as combined_file:
|
||||
# Write corporate certificates first
|
||||
print("📝 Adding corporate certificates...")
|
||||
with open(corporate_bundle, 'r') as corp_file:
|
||||
combined_file.write(corp_file.read())
|
||||
|
||||
# Add separator
|
||||
combined_file.write("\n# Certifi certificates below\n")
|
||||
|
||||
# Write certifi certificates
|
||||
print("📝 Adding certifi certificates...")
|
||||
with open(certifi_bundle, 'r') as certifi_file:
|
||||
certifi_content = certifi_file.read()
|
||||
combined_file.write(certifi_content)
|
||||
|
||||
print(f"✅ Combined certificate bundle created: {combined_bundle}")
|
||||
|
||||
# Set permissions
|
||||
os.chmod(combined_bundle, 0o644)
|
||||
|
||||
# Show usage instructions
|
||||
print("\n💡 Usage Instructions:")
|
||||
print(f" Add this to your .env file:")
|
||||
print(f" REQUESTS_CA_BUNDLE={combined_bundle}")
|
||||
print(f" CURL_CA_BUNDLE={combined_bundle}")
|
||||
|
||||
print("\n Or export in your shell:")
|
||||
print(f" export REQUESTS_CA_BUNDLE={combined_bundle}")
|
||||
print(f" export CURL_CA_BUNDLE={combined_bundle}")
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error creating combined bundle: {e}")
|
||||
return False
|
||||
|
||||
def test_combined_bundle():
|
||||
"""Test the combined certificate bundle"""
|
||||
combined_bundle = "/Users/kevin.bruton/netskope-certificates/combined-cert-bundle.pem"
|
||||
|
||||
if not os.path.exists(combined_bundle):
|
||||
print("❌ Combined bundle not found. Run combine_certificate_bundles() first.")
|
||||
return False
|
||||
|
||||
print(f"\n🧪 Testing combined certificate bundle: {combined_bundle}")
|
||||
|
||||
import requests
|
||||
test_urls = [
|
||||
"https://www.google.com",
|
||||
"https://api.openai.com/v1/models",
|
||||
"https://openrouter.ai/api/v1/models"
|
||||
]
|
||||
|
||||
for url in test_urls:
|
||||
try:
|
||||
response = requests.get(url, verify=combined_bundle, timeout=10)
|
||||
print(f"✅ {url} - Status: {response.status_code}")
|
||||
except Exception as e:
|
||||
print(f"❌ {url} - Error: {e}")
|
||||
|
||||
return True
|
||||
|
||||
if __name__ == "__main__":
|
||||
if combine_certificate_bundles():
|
||||
test_combined_bundle()
|
||||
|
|
@ -0,0 +1,42 @@
|
|||
"""Standalone diagnostic script to test a single LLM call with resilience.
|
||||
Run: python debug_llm_call.py --provider openai --model gpt-4o-mini --message "Test message".
|
||||
It will respect environment variables for keys and SSL the same way the graph does.
|
||||
"""
|
||||
import argparse
|
||||
import os
|
||||
from tradingagents.default_config import DEFAULT_CONFIG
|
||||
from tradingagents.graph.trading_graph import TradingAgentsGraph
|
||||
from langchain_core.messages import HumanMessage
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--provider', default=DEFAULT_CONFIG['llm_provider'])
|
||||
parser.add_argument('--model', default=DEFAULT_CONFIG['quick_think_llm'])
|
||||
parser.add_argument('--message', default='Say hello and include a short market summary placeholder.')
|
||||
args = parser.parse_args()
|
||||
|
||||
cfg = DEFAULT_CONFIG.copy()
|
||||
cfg['llm_provider'] = args.provider
|
||||
cfg['quick_think_llm'] = args.model
|
||||
cfg['deep_think_llm'] = args.model
|
||||
|
||||
graph = TradingAgentsGraph(config=cfg)
|
||||
# Build a minimal state for market analyst
|
||||
state = {
|
||||
'trade_date': '2025-09-29',
|
||||
'company_of_interest': 'AAPL',
|
||||
'messages': [HumanMessage(content=args.message)],
|
||||
}
|
||||
market_node = graph.graph_setup.analyst_nodes.get('market')
|
||||
if not market_node:
|
||||
print('Market node not found in graph setup.')
|
||||
return
|
||||
# Directly invoke underlying function if possible
|
||||
result_state = market_node(state)
|
||||
print('Result keys:', list(result_state.keys()))
|
||||
print('Market report snippet:', str(result_state.get('market_report',''))[:500])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
@ -0,0 +1,60 @@
|
|||
#!/usr/bin/env python3
|
||||
"""
|
||||
Debug script to test the TradingAgentsGraph streaming behavior
|
||||
"""
|
||||
|
||||
import os
|
||||
from dotenv import load_dotenv
|
||||
from tradingagents.graph.trading_graph import TradingAgentsGraph
|
||||
from tradingagents.default_config import DEFAULT_CONFIG
|
||||
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
|
||||
def debug_callback(state):
|
||||
"""Debug callback to see what state is being passed"""
|
||||
print(f"\n🔍 CALLBACK RECEIVED:")
|
||||
print(f" State type: {type(state)}")
|
||||
print(f" State keys: {list(state.keys()) if isinstance(state, dict) else 'Not a dict'}")
|
||||
|
||||
if isinstance(state, dict):
|
||||
for key, value in state.items():
|
||||
if key in ["__end__", "messages"]:
|
||||
continue
|
||||
print(f" {key}: {type(value)} - {str(value)[:100]}...")
|
||||
print("-" * 50)
|
||||
|
||||
def test_streaming():
|
||||
"""Test the streaming functionality"""
|
||||
print("🚀 Testing TradingAgentsGraph streaming...")
|
||||
|
||||
# Create a minimal config for testing
|
||||
config = DEFAULT_CONFIG.copy()
|
||||
config["llm_provider"] = "openai"
|
||||
config["quick_think_llm"] = "gpt-3.5-turbo"
|
||||
config["deep_think_llm"] = "gpt-4"
|
||||
|
||||
try:
|
||||
# Initialize the graph
|
||||
print("📊 Initializing TradingAgentsGraph...")
|
||||
graph = TradingAgentsGraph(config=config)
|
||||
|
||||
# Test propagation with callback
|
||||
print("🔄 Starting propagation with callback...")
|
||||
final_state, signal = graph.propagate(
|
||||
company_name="AAPL",
|
||||
trade_date="2024-01-01",
|
||||
on_step_callback=debug_callback
|
||||
)
|
||||
|
||||
print(f"✅ Propagation completed!")
|
||||
print(f"📈 Final signal: {signal}")
|
||||
print(f"🎯 Final state keys: {list(final_state.keys())}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error during streaming test: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_streaming()
|
||||
|
|
@ -0,0 +1,159 @@
|
|||
#!/usr/bin/env python3
|
||||
"""
|
||||
SSL Certificate Diagnostic Tool for TradingAgents
|
||||
|
||||
This script helps diagnose SSL/TLS certificate issues and provides guidance
|
||||
on how to configure certificate bundles properly.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import ssl
|
||||
import socket
|
||||
import requests
|
||||
from urllib.parse import urlparse
|
||||
from dotenv import load_dotenv
|
||||
load_dotenv()
|
||||
from tradingagents.dataflows.ssl_utils import get_certificate_info, get_ssl_config
|
||||
from tradingagents.default_config import DEFAULT_CONFIG
|
||||
|
||||
|
||||
def test_ssl_connection(hostname, port=443):
|
||||
"""Test SSL connection to a specific hostname."""
|
||||
print(f"\n🔒 Testing SSL connection to {hostname}:{port}")
|
||||
|
||||
try:
|
||||
# Create SSL context
|
||||
context = ssl.create_default_context()
|
||||
|
||||
# Connect and get certificate info
|
||||
with socket.create_connection((hostname, port), timeout=10) as sock:
|
||||
with context.wrap_socket(sock, server_hostname=hostname) as ssock:
|
||||
cert = ssock.getpeercert()
|
||||
print(f"✅ SSL connection successful")
|
||||
print(f" Subject: {cert.get('subject', 'Unknown')}")
|
||||
print(f" Issuer: {cert.get('issuer', 'Unknown')}")
|
||||
print(f" Version: {cert.get('version', 'Unknown')}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ SSL connection failed: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def test_requests_connection(url):
|
||||
"""Test HTTP request with requests library."""
|
||||
print(f"\n🌐 Testing HTTP request to {url}")
|
||||
|
||||
try:
|
||||
response = requests.get(url, timeout=10)
|
||||
print(f"✅ HTTP request successful")
|
||||
print(f" Status: {response.status_code}")
|
||||
print(f" SSL Cert: {response.raw.connection.sock.getpeercert().get('subject', 'Unknown') if hasattr(response.raw.connection, 'sock') else 'Unknown'}")
|
||||
return True
|
||||
|
||||
except requests.exceptions.SSLError as e:
|
||||
print(f"❌ SSL Error: {e}")
|
||||
return False
|
||||
except Exception as e:
|
||||
print(f"❌ Request failed: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def test_with_custom_cert_bundle(url, cert_bundle_path):
|
||||
"""Test HTTP request with custom certificate bundle."""
|
||||
print(f"\n🔐 Testing with custom cert bundle: {cert_bundle_path}")
|
||||
|
||||
if not os.path.exists(cert_bundle_path):
|
||||
print(f"❌ Certificate bundle not found: {cert_bundle_path}")
|
||||
return False
|
||||
|
||||
try:
|
||||
response = requests.get(url, verify=cert_bundle_path, timeout=10)
|
||||
print(f"✅ Request with custom cert bundle successful")
|
||||
print(f" Status: {response.status_code}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Request with custom cert bundle failed: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
"""Main diagnostic function."""
|
||||
print("🔍 TradingAgents SSL Certificate Diagnostic Tool")
|
||||
print("=" * 50)
|
||||
|
||||
# Get certificate information
|
||||
print("\n📋 Certificate Bundle Information:")
|
||||
cert_info = get_certificate_info()
|
||||
for key, value in cert_info.items():
|
||||
if isinstance(value, list):
|
||||
print(f" {key}: {', '.join(value) if value else 'None found'}")
|
||||
else:
|
||||
print(f" {key}: {value}")
|
||||
|
||||
# Test SSL configuration
|
||||
print(f"\n⚙️ Current SSL Configuration:")
|
||||
ssl_config = get_ssl_config(DEFAULT_CONFIG)
|
||||
for key, value in ssl_config.items():
|
||||
print(f" {key}: {value}")
|
||||
|
||||
# Test common endpoints
|
||||
test_endpoints = [
|
||||
("api.openai.com", 443),
|
||||
("openrouter.ai", 443),
|
||||
("generativelanguage.googleapis.com", 443),
|
||||
("www.google.com", 443)
|
||||
]
|
||||
|
||||
print(f"\n🎯 Testing SSL connections:")
|
||||
for hostname, port in test_endpoints:
|
||||
test_ssl_connection(hostname, port)
|
||||
|
||||
# Test HTTP requests
|
||||
test_urls = [
|
||||
"https://api.openai.com/v1/models",
|
||||
"https://www.google.com/search?q=test",
|
||||
"https://openrouter.ai/api/v1/models"
|
||||
]
|
||||
|
||||
print(f"\n🌍 Testing HTTP requests:")
|
||||
for url in test_urls:
|
||||
test_requests_connection(url)
|
||||
|
||||
# Test with different certificate bundles
|
||||
if cert_info.get("certifi_bundle") and cert_info["certifi_bundle"] != "Not available (certifi not installed)":
|
||||
print(f"\n🧪 Testing with certifi bundle:")
|
||||
test_with_custom_cert_bundle("https://www.google.com", cert_info["certifi_bundle"])
|
||||
|
||||
# Provide recommendations
|
||||
print(f"\n💡 Recommendations:")
|
||||
print(" 📋 Certificate Bundle Configuration:")
|
||||
print(" • Only set if you need a custom certificate bundle")
|
||||
print(" • If not set, system default SSL behavior is used")
|
||||
print(" export REQUESTS_CA_BUNDLE=/path/to/your/ca-bundle.crt")
|
||||
print(" export CURL_CA_BUNDLE=/path/to/your/ca-bundle.crt")
|
||||
|
||||
print("\n ⚠️ SSL Verification (use with caution):")
|
||||
print(" • Only disable for development/testing")
|
||||
print(" • If not set, SSL verification is enabled by default")
|
||||
print(" export SSL_VERIFY=false")
|
||||
|
||||
print("\n ⏱️ Timeout Configuration:")
|
||||
print(" • Only set if default timeout is insufficient")
|
||||
print(" export HTTP_TIMEOUT=60")
|
||||
|
||||
print("\n 🌐 Proxy Configuration:")
|
||||
print(" • Only required if behind corporate firewall")
|
||||
print(" export HTTP_PROXY=http://proxy.company.com:8080")
|
||||
print(" export HTTPS_PROXY=https://proxy.company.com:8080")
|
||||
|
||||
print("\n 📝 Configuration:")
|
||||
print(" • Add these to your .env file or export in shell")
|
||||
print(" • Leave unset to use system defaults")
|
||||
print(" • Only configure what you actually need")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,235 @@
|
|||
# HTMX Frontend Implementation Plan
|
||||
|
||||
This document outlines the architecture and step-by-step plan for building a new HTMX-based frontend for the TradingAgents project.
|
||||
|
||||
## 1. General Architecture
|
||||
|
||||
The frontend will be a single-page web application served by a lightweight Python backend (FastAPI). This backend will be responsible for serving the HTML, handling user requests to start the agent process, and providing real-time status updates. The frontend and backend code will be housed in a new top-level `webapp` directory to keep it separate from the core agent logic.
|
||||
|
||||
### Core Components:
|
||||
|
||||
* **FastAPI Backend:** A Python web server that will:
|
||||
* Serve the main `index.html` file.
|
||||
* Provide API endpoints for the frontend to interact with.
|
||||
* Run the `TradingAgentsGraph` in a background thread.
|
||||
* Maintain and serve the state of the execution process.
|
||||
* **HTMX Frontend:** The user interface, which will:
|
||||
* Display the configuration form and start button.
|
||||
* Show a hierarchical view of the agent execution process.
|
||||
* Poll the backend for status updates.
|
||||
* Display the content of selected process steps (reports, messages, errors) on the right side of the screen.
|
||||
* **Communication:** The frontend will communicate with the backend using a simple polling mechanism. The HTMX frontend will periodically request a status update from a `/status` endpoint. The backend will return a JSON object representing the current state of the execution tree. For displaying detailed content, the frontend will make specific requests to a `/content/{item_id}` endpoint.
|
||||
|
||||
## 2. Proposed Project Structure
|
||||
|
||||
To maintain separation of concerns, the new frontend code will live in a `webapp` directory.
|
||||
|
||||
```
|
||||
C:\Users\kevin\repo\TradingAgents\
|
||||
├───... (existing project files)
|
||||
└───webapp/
|
||||
├───main.py # FastAPI application
|
||||
├───static/
|
||||
│ └───styles.css # CSS for styling
|
||||
└───templates/
|
||||
├───index.html # Main HTML file
|
||||
└───_partials/
|
||||
├───left_panel.html # HTMX partial for the execution tree
|
||||
└───right_panel.html # HTMX partial for the content view
|
||||
```
|
||||
|
||||
## 3. Backend Implementation (FastAPI)
|
||||
|
||||
The `webapp/main.py` file will define the FastAPI application and its endpoints.
|
||||
|
||||
### API Endpoints:
|
||||
|
||||
* **`GET /`**: Serves the main `templates/index.html` page.
|
||||
* **`POST /start`**:
|
||||
* Accepts a JSON payload with the run configuration (`company_symbol`, etc.).
|
||||
* Initializes the `TradingAgentsGraph`.
|
||||
* Starts the `graph.propagate()` method in a background thread.
|
||||
* Returns an initial response that replaces the config form with the main progress bar.
|
||||
* **`GET /status`**:
|
||||
* This is the main polling endpoint for HTMX.
|
||||
* It will return an HTML partial (`_partials/left_panel.html`) rendered with the current state of the execution tree. The state will be stored in memory.
|
||||
* **`GET /content/{item_id}`**:
|
||||
* When a user clicks an item in the left panel, HTMX will call this endpoint.
|
||||
* It will retrieve the specific content for that `item_id` from the in-memory state.
|
||||
* It will return an HTML partial (`_partials/right_panel.html`) with the formatted content (e.g., a formatted report, a code block for a message, or a stack trace for an error).
|
||||
|
||||
### State Management & Integration:
|
||||
|
||||
To get real-time updates from the `TradingAgentsGraph`, we will need to instrument its execution. The plan is to modify the `TradingAgentsGraph` class slightly to accept a callback function.
|
||||
|
||||
1. **Modify `TradingAgentsGraph.__init__`**: Add an optional `on_step_end` callback parameter.
|
||||
2. **Callback Execution**: Inside the graph's execution logic (after each agent or tool runs), this callback will be invoked with the details of the completed step (e.g., node name, output, status).
|
||||
3. **Update Global State**: The callback function, defined in `webapp/main.py`, will update a global in-memory dictionary that represents the hierarchical execution tree. This tree will store the status, content, and relationships of all steps.
|
||||
|
||||
This approach avoids tight coupling and allows the web application to listen to the progress of the core agent logic.
|
||||
|
||||
## 4. Frontend Implementation (HTMX)
|
||||
|
||||
The frontend will be built using HTMX attributes directly in the HTML templates.
|
||||
|
||||
* **`templates/index.html`**:
|
||||
* Contains the basic page structure: a top bar for the overall progress, a left panel for the execution tree, and a right panel for content.
|
||||
* Includes the HTMX library.
|
||||
* Contains the initial configuration form. The form will have an `hx-post="/start"` attribute to trigger the process.
|
||||
|
||||
* **Left Panel (`_partials/left_panel.html`)**:
|
||||
* This partial will be the target of the status polling. The main container will have `hx-get="/status"` and `hx-trigger="load, every 5s"`.
|
||||
* It will use a template loop (Jinja2) to render the hierarchical tree from the state object provided by the backend.
|
||||
* Each item in the tree will be a clickable element with an `hx-get="/content/{item_id}"` attribute and an `hx-target="#right-panel"` attribute to load its content on the right side.
|
||||
* The status of each item (pending, in-progress, completed, error) will be reflected using different CSS classes and icons:
|
||||
- **Pending**: ⏸️ (paused icon, gray color)
|
||||
- **In Progress**: ⏳ (hourglass icon, blue color)
|
||||
- **Completed**: ✅ (check mark, green color)
|
||||
- **Error**: ❌ (X mark, red color)
|
||||
|
||||
* **Right Panel (`_partials/right_panel.html`)**:
|
||||
* A simple container (`<div id="right-panel">`) that gets its content replaced by HTMX when a user clicks an item on the left.
|
||||
* Content will be pre-formatted by the backend (e.g., using Markdown-to-HTML conversion or syntax highlighting for code/errors).
|
||||
|
||||
* **Progress Bar**:
|
||||
* The response from the initial `POST /start` call will replace the configuration form with a global progress bar.
|
||||
* This progress bar's value will be updated as part of the `/status` polling response, by targeting its element ID with an `hx-swap-oob="true"` (Out of Band swap).
|
||||
|
||||
### Execution Tree Structure
|
||||
|
||||
The left panel should display a hierarchical tree structure as follows:
|
||||
|
||||
```
|
||||
📈 Trading Analysis for [SYMBOL]
|
||||
├── 📊 Data Collection Phase
|
||||
│ ├── 📈 Market Analyst
|
||||
│ │ ├── 📄 Market Analysis Report
|
||||
│ │ └── 💬 Agent Messages
|
||||
│ ├── 📱 Social Media Analyst
|
||||
│ │ ├── 📄 Sentiment Analysis Report
|
||||
│ │ └── 💬 Agent Messages
|
||||
│ ├── 📰 News Analyst
|
||||
│ │ ├── 📄 News Analysis Report
|
||||
│ │ └── 💬 Agent Messages
|
||||
│ └── 📊 Fundamentals Analyst
|
||||
│ ├── 📄 Fundamentals Report
|
||||
│ └── 💬 Agent Messages
|
||||
├── 🔍 Research Phase
|
||||
│ ├── 🐂 Bull Researcher
|
||||
│ │ ├── 📄 Bull Case Analysis
|
||||
│ │ └── 💬 Agent Messages
|
||||
│ ├── 🐻 Bear Researcher
|
||||
│ │ ├── 📄 Bear Case Analysis
|
||||
│ │ └── 💬 Agent Messages
|
||||
│ └── 🔍 Research Manager
|
||||
│ ├── 📄 Research Synthesis
|
||||
│ └── 💬 Agent Messages
|
||||
├── 📋 Planning Phase
|
||||
│ └── 📋 Trade Planner
|
||||
│ ├── 📄 Trading Plan
|
||||
│ └── 💬 Agent Messages
|
||||
├── ⚡ Execution Phase
|
||||
│ └── ⚡ Trader
|
||||
│ ├── 📄 Execution Report
|
||||
│ └── 💬 Agent Messages
|
||||
└── ⚠️ Risk Management Phase
|
||||
├── 🚨 Aggressive Risk Analyst
|
||||
│ ├── 📄 Risk Assessment (Aggressive)
|
||||
│ └── 💬 Agent Messages
|
||||
├── ⚖️ Neutral Risk Analyst
|
||||
│ ├── 📄 Risk Assessment (Neutral)
|
||||
│ └── 💬 Agent Messages
|
||||
├── 🛡️ Conservative Risk Analyst
|
||||
│ ├── 📄 Risk Assessment (Conservative)
|
||||
│ └── 💬 Agent Messages
|
||||
└── 🧠 Portfolio Manager (Final Decision)
|
||||
├── 📄 Portfolio Manager's Decision
|
||||
└── 💬 Agent Messages
|
||||
```
|
||||
|
||||
Each agent should have:
|
||||
1. **Status Icon**: Shows current execution state (pending, in-progress, completed, error)
|
||||
2. **Report Sub-item**: Shows the specific report generated by that agent
|
||||
3. **Messages Sub-item**: Shows messages to/from that agent during execution
|
||||
|
||||
The tree structure should be initialized at the start showing all agents in "pending" state, then update their status as execution progresses.
|
||||
|
||||
## 5. Detailed Implementation Steps
|
||||
|
||||
1. **Setup Environment**:
|
||||
* Create the `webapp` directory and the file structure outlined above.
|
||||
* Add `fastapi`, `uvicorn`, and `python-multipart` to the `requirements.txt` file and install them.
|
||||
|
||||
2. **Backend - Basic Server**:
|
||||
* Create the initial FastAPI app in `webapp/main.py`.
|
||||
* Implement the `GET /` endpoint to serve `templates/index.html`.
|
||||
* Create a basic `index.html` with the two-panel layout.
|
||||
|
||||
3. **Backend - State & Integration**:
|
||||
* Define the Python data classes for the execution state (e.g., `ProcessStep`, `RunState`).
|
||||
* Modify `tradingagents/graph/trading_graph.py` to include the `on_step_end` callback mechanism.
|
||||
* In `webapp/main.py`, implement the callback function that builds the hierarchical state tree in memory.
|
||||
|
||||
4. **Backend - Endpoints**:
|
||||
* Implement the `/start` endpoint to receive configuration and launch the `propagate` method in a background thread, passing the callback function.
|
||||
* Implement the `/status` endpoint to render and return the `_partials/left_panel.html` partial.
|
||||
* Implement the `/content/{item_id}` endpoint to render and return the `_partials/right_panel.html` partial.
|
||||
|
||||
5. **Frontend - HTMX**:
|
||||
* Develop the configuration form in `index.html` with `hx-post` to start the process.
|
||||
* Create the `_partials/left_panel.html` template with the Jinja2 loop and the `hx-get` attributes for clicking on items.
|
||||
* Add the polling mechanism to the main container in `index.html`.
|
||||
* Style the different states (pending, completed, error) using CSS in `static/styles.css`.
|
||||
|
||||
6. **Error Handling**:
|
||||
* When the callback receives an error, it will update the corresponding item's status to "error" and store the stack trace.
|
||||
* The frontend will visually flag the item as an error.
|
||||
* When clicked, the `/content/{item_id}` endpoint will return the formatted stack trace to be displayed in the right panel.
|
||||
|
||||
7. **Refinement**:
|
||||
* Add a loading indicator for HTMX requests.
|
||||
* Refine the CSS to ensure the application is visually appealing and user-friendly.
|
||||
* Ensure the background process is managed correctly, especially in case of errors or server shutdown.
|
||||
|
||||
## 6. Current Implementation Issues & Solutions
|
||||
|
||||
### Issues Identified:
|
||||
|
||||
1. **Incomplete Agent Tree Structure**: The current implementation only shows a single top-level item "Trading Analysis for [SYMBOL]" with limited sub-items, instead of the full agent hierarchy.
|
||||
|
||||
2. **Improper Status Tracking**: Agents don't show proper execution status (pending, in-progress, completed, error) with appropriate icons.
|
||||
|
||||
3. **Missing Reports and Messages**: Sub-items for individual agent reports and messages are not being created or displayed.
|
||||
|
||||
4. **Callback State Detection**: The `update_execution_state` callback in `webapp/main.py` is not properly detecting and organizing the execution flow of all agents.
|
||||
|
||||
### Solutions Implemented:
|
||||
|
||||
#### Backend Changes (`webapp/main.py`):
|
||||
|
||||
1. **Initialize Complete Tree Structure**: Pre-populate the execution tree with all agents in "pending" state at the start of execution.
|
||||
|
||||
2. **Improved State Detection**: Enhanced the callback function to:
|
||||
- Detect agent execution start/completion more reliably
|
||||
- Track both agent status and their generated reports/messages
|
||||
- Maintain proper phase organization (Data Collection, Research, Planning, Execution, Risk Management)
|
||||
|
||||
3. **Agent Sub-items**: Each agent now has sub-items for:
|
||||
- **Report**: The specific analysis/report generated by the agent
|
||||
- **Messages**: Communication to/from the agent during execution
|
||||
|
||||
#### Frontend Changes (`_partials/left_panel.html`):
|
||||
|
||||
1. **Enhanced Status Icons**: Clear visual indicators for each execution state
|
||||
2. **Hierarchical Display**: Proper nesting of phases, agents, and their sub-items
|
||||
3. **Clickable Content**: All items are clickable to show detailed content in the right panel
|
||||
|
||||
#### State Management:
|
||||
|
||||
The execution tree now properly reflects:
|
||||
- **Phases**: Logical grouping of related agents (Data Collection, Research, etc.)
|
||||
- **Agents**: Individual agents with their execution status
|
||||
- **Sub-items**: Reports and messages for each agent
|
||||
- **Real-time Updates**: Status changes as execution progresses
|
||||
|
||||
This provides users with complete visibility into the trading analysis process, allowing them to track which agents are running, completed, or encountering issues, and access detailed reports and communications from each agent.
|
||||
43
main.py
43
main.py
|
|
@ -1,22 +1,55 @@
|
|||
from dotenv import load_dotenv
|
||||
load_dotenv()
|
||||
|
||||
from rich.panel import Panel
|
||||
from rich.console import Console
|
||||
from rich.align import Align
|
||||
from tradingagents.graph.trading_graph import TradingAgentsGraph
|
||||
from tradingagents.default_config import DEFAULT_CONFIG
|
||||
|
||||
# Create a custom config
|
||||
config = DEFAULT_CONFIG.copy()
|
||||
config["llm_provider"] = "google" # Use a different model
|
||||
config["backend_url"] = "https://generativelanguage.googleapis.com/v1" # Use a different backend
|
||||
config["deep_think_llm"] = "gemini-2.0-flash" # Use a different model
|
||||
config["quick_think_llm"] = "gemini-2.0-flash" # Use a different model
|
||||
config["ticker"] = "F"
|
||||
config['analysis_date'] = "2025-09-28"
|
||||
config["llm_provider"] = "openrouter" # Use a different model
|
||||
#config["backend_url"] = "https://generativelanguage.googleapis.com/v1" # Use a different backend
|
||||
config["backend_url"] = "https://openrouter.ai/api/v1"
|
||||
config["deep_think_llm"] = "qwen/qwen3-235b-a22b:free" # Use a different model
|
||||
config["quick_think_llm"] = "x-ai/grok-4-fast:free" # Use a different model
|
||||
config["max_debate_rounds"] = 1 # Increase debate rounds
|
||||
config["online_tools"] = True
|
||||
config["cost_per_trade"] = 0.0
|
||||
|
||||
|
||||
with open("./cli/static/welcome.txt", "r", encoding="utf-8") as f:
|
||||
welcome_ascii = f.read()
|
||||
|
||||
# Create welcome box content
|
||||
welcome_content = f"{welcome_ascii}\n"
|
||||
welcome_content += "[bold green]TradingAgents: Multi-Agents LLM Financial Trading Framework - CLI[/bold green]\n\n"
|
||||
welcome_content += "[bold]Workflow Steps:[/bold]\n"
|
||||
welcome_content += "I. Analyst Team -> II. Research Team -> III. Trader -> IV. Risk Management -> V. Portfolio Management\n\n"
|
||||
welcome_content += (
|
||||
"[dim]Built by [Tauric Research](https://github.com/TauricResearch)[/dim]"
|
||||
)
|
||||
|
||||
# Create and center the welcome box
|
||||
welcome_box = Panel(
|
||||
welcome_content,
|
||||
border_style="green",
|
||||
padding=(1, 2),
|
||||
title="Welcome to TradingAgents",
|
||||
subtitle="Multi-Agents LLM Financial Trading Framework",
|
||||
)
|
||||
console = Console()
|
||||
console.print(Align.center(welcome_box))
|
||||
console.print() # Add a blank line after the welcome box
|
||||
|
||||
# Initialize with custom config
|
||||
ta = TradingAgentsGraph(debug=True, config=config)
|
||||
|
||||
# forward propagate
|
||||
_, decision = ta.propagate("NVDA", "2024-05-10")
|
||||
_, decision = ta.propagate(config["ticker"], config["analysis_date"])
|
||||
print(decision)
|
||||
|
||||
# Memorize mistakes and reflect
|
||||
|
|
|
|||
Binary file not shown.
|
|
@ -33,4 +33,10 @@ dependencies = [
|
|||
"tushare>=1.4.21",
|
||||
"typing-extensions>=4.14.0",
|
||||
"yfinance>=0.2.63",
|
||||
"fastapi",
|
||||
"uvicorn",
|
||||
"python-multipart",
|
||||
"jinja2",
|
||||
"markdown>=3.6",
|
||||
"bleach>=6.1.0",
|
||||
]
|
||||
|
|
|
|||
|
|
@ -24,3 +24,9 @@ rich
|
|||
questionary
|
||||
langchain_anthropic
|
||||
langchain-google-genai
|
||||
fastapi
|
||||
uvicorn
|
||||
python-multipart
|
||||
jinja2
|
||||
markdown
|
||||
bleach
|
||||
|
|
|
|||
|
|
@ -0,0 +1,66 @@
|
|||
|
||||
# Stop-Loss Feature Implementation Plan
|
||||
|
||||
This document outlines the plan for implementing a stop-loss feature in the TradingAgents project.
|
||||
|
||||
## 1. Overview
|
||||
|
||||
The goal is to enhance the trading agents' capabilities by requiring a stop-loss price level for every trade recommendation. This will improve risk management and provide more concrete trading plans. An optional take-profit level can also be included.
|
||||
|
||||
## 2. Recommended Architecture: New Trade Planner Agent
|
||||
|
||||
After investigating the existing architecture, the recommended approach is to introduce a new, dedicated **Trade Planner Agent**. This approach is favored over modifying existing agents for the following reasons:
|
||||
|
||||
* **Modularity and Separation of Concerns:** It keeps the responsibilities of each agent clear. The new agent will specialize in technical analysis, while other agents, like the `risk_manager`, can focus on their core competencies.
|
||||
* **Expertise:** A dedicated agent can be specifically prompted and potentially fine-tuned to become an expert in technical analysis, leading to more accurate stop-loss and take-profit levels.
|
||||
* **Scalability:** It will be easier to add more sophisticated technical analysis logic in the future without complicating the existing agents.
|
||||
|
||||
The new workflow will be as follows:
|
||||
|
||||
1. **Analyst Team:** Gathers and analyzes data (no changes).
|
||||
2. **Researcher Team:** Debates the findings and creates an investment plan (no changes).
|
||||
3. **Trade Planner Agent (New):** Receives the market data and investment plan, and calculates the stop-loss and (optionally) take-profit levels.
|
||||
4. **Risk Management Team:** Assesses the risk of the proposed trade, now also considering the stop-loss level.
|
||||
5. **Trader Agent:** Makes the final trading decision, incorporating the stop-loss and take-profit levels into the final transaction proposal.
|
||||
|
||||
## 3. Implementation Details
|
||||
|
||||
### 3.1. Create the Trade Planner Agent
|
||||
|
||||
* **File:** `tradingagents/agents/managers/trade_planner.py`
|
||||
* **Function:** `create_trade_planner_agent`
|
||||
* **Logic:**
|
||||
* The agent will take the `market_report` and `investment_plan` from the state as input.
|
||||
* It will use a detailed prompt that instructs the LLM to act as a trade planner.
|
||||
* The prompt will guide the LLM to determine stop-loss and take-profit levels based on technical indicators such as:
|
||||
* Support and resistance levels
|
||||
* Moving averages
|
||||
* Fibonacci retracement levels
|
||||
* Volume analysis
|
||||
* The prompt will specify the desired output format, which should be a JSON object with `stop_loss` and `take_profit` keys.
|
||||
|
||||
### 3.2. Update the Graph
|
||||
|
||||
* **File:** `tradingagents/graph/trading_graph.py`
|
||||
* **Changes:**
|
||||
* Instantiate the new `trade_planner_agent`.
|
||||
* Add a new node for the agent in the `LangGraph` setup.
|
||||
* The new node will be placed after the `research_manager` and before the `risk_manager`.
|
||||
|
||||
### 3.3. Update Existing Agents
|
||||
|
||||
* **`risk_manager.py`:**
|
||||
* The prompt for the `risk_manager` will be updated to include the `stop_loss` level in its context. This will allow the risk manager to provide a more comprehensive risk assessment.
|
||||
* **`trader.py`:**
|
||||
* The prompt for the `trader` agent will be updated to include the `stop_loss` and `take_profit` levels.
|
||||
* The final output of the trader agent, the "FINAL TRANSACTION PROPOSAL", must include the stop-loss level.
|
||||
|
||||
### 3.4. Update Agent State
|
||||
|
||||
* **File:** `tradingagents/agents/utils/agent_states.py`
|
||||
* **Changes:**
|
||||
* Add `stop_loss: float` and `take_profit: float` fields to the `AgentState` dataclass. This will allow the new price levels to be passed between agents in the graph.
|
||||
|
||||
## 4. Next Steps
|
||||
|
||||
The next step is to implement the changes described in this document. This will involve creating the new agent, updating the graph, and modifying the existing agents and state.
|
||||
|
|
@ -0,0 +1,14 @@
|
|||
from webapp.main import render_markdown
|
||||
|
||||
def test_markdown_basic_headers():
|
||||
md_text = "# Title\n\nSome **bold** text and a table:\n\n| Col1 | Col2 |\n| ---- | ---- |\n| A | B |\n"
|
||||
html = render_markdown(md_text)
|
||||
assert '<h1>' in html and 'Title' in html
|
||||
assert '<strong>' in html and 'bold' in html
|
||||
assert '<table>' in html
|
||||
|
||||
def test_markdown_code_block():
|
||||
md_text = "```python\nprint('hi')\n```"
|
||||
html = render_markdown(md_text)
|
||||
# Sanitized but should keep code element
|
||||
assert 'print' in html and '<code' in html
|
||||
|
|
@ -0,0 +1,97 @@
|
|||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test SSL configuration behavior
|
||||
"""
|
||||
|
||||
import os
|
||||
from tradingagents.default_config import DEFAULT_CONFIG
|
||||
from tradingagents.dataflows.ssl_utils import get_ssl_config, setup_global_ssl_config
|
||||
|
||||
def test_ssl_config():
|
||||
"""Test SSL configuration with different environment variable settings"""
|
||||
|
||||
print("🧪 Testing SSL Configuration Behavior")
|
||||
print("=" * 50)
|
||||
|
||||
# Test 1: No environment variables set
|
||||
print("\n1️⃣ Test: No SSL environment variables set")
|
||||
os.environ.pop("REQUESTS_CA_BUNDLE", None)
|
||||
os.environ.pop("CURL_CA_BUNDLE", None)
|
||||
os.environ.pop("SSL_VERIFY", None)
|
||||
os.environ.pop("HTTP_TIMEOUT", None)
|
||||
|
||||
from tradingagents.default_config import DEFAULT_CONFIG
|
||||
config = DEFAULT_CONFIG.copy()
|
||||
ssl_config = get_ssl_config(config)
|
||||
print(f" SSL Config: {ssl_config}")
|
||||
print(f" Expected: Empty or minimal config (default behavior)")
|
||||
|
||||
# Test 2: Custom certificate bundle set
|
||||
print("\n2️⃣ Test: Custom certificate bundle set")
|
||||
os.environ["REQUESTS_CA_BUNDLE"] = "/custom/path/ca-bundle.crt"
|
||||
|
||||
# Re-import to get updated config
|
||||
from importlib import reload
|
||||
import tradingagents.default_config
|
||||
reload(tradingagents.default_config)
|
||||
from tradingagents.default_config import DEFAULT_CONFIG
|
||||
|
||||
config = DEFAULT_CONFIG.copy()
|
||||
ssl_config = get_ssl_config(config)
|
||||
print(f" Config ssl_cert_bundle: {config.get('ssl_cert_bundle')}")
|
||||
print(f" SSL Config: {ssl_config}")
|
||||
print(f" Expected: cert_bundle and verify set to custom path")
|
||||
|
||||
# Test 3: SSL verification disabled
|
||||
print("\n3️⃣ Test: SSL verification disabled")
|
||||
os.environ.pop("REQUESTS_CA_BUNDLE", None)
|
||||
os.environ["SSL_VERIFY"] = "false"
|
||||
|
||||
reload(tradingagents.default_config)
|
||||
from tradingagents.default_config import DEFAULT_CONFIG
|
||||
|
||||
config = DEFAULT_CONFIG.copy()
|
||||
ssl_config = get_ssl_config(config)
|
||||
print(f" Config ssl_verify: {config.get('ssl_verify')}")
|
||||
print(f" SSL Config: {ssl_config}")
|
||||
print(f" Expected: verify set to False")
|
||||
|
||||
# Test 4: Timeout and proxy settings
|
||||
print("\n4️⃣ Test: Timeout and proxy settings")
|
||||
os.environ["HTTP_TIMEOUT"] = "60"
|
||||
os.environ["HTTP_PROXY"] = "http://proxy.example.com:8080"
|
||||
os.environ["HTTPS_PROXY"] = "https://proxy.example.com:8080"
|
||||
|
||||
reload(tradingagents.default_config)
|
||||
from tradingagents.default_config import DEFAULT_CONFIG
|
||||
|
||||
config = DEFAULT_CONFIG.copy()
|
||||
ssl_config = get_ssl_config(config)
|
||||
print(f" Config timeout: {config.get('http_timeout')}")
|
||||
print(f" Config proxies: HTTP={config.get('http_proxy')}, HTTPS={config.get('https_proxy')}")
|
||||
print(f" SSL Config: {ssl_config}")
|
||||
print(f" Expected: timeout and proxies in ssl_config")
|
||||
|
||||
# Test 5: Empty environment variables (should not be used)
|
||||
print("\n5️⃣ Test: Empty environment variables")
|
||||
os.environ["REQUESTS_CA_BUNDLE"] = ""
|
||||
os.environ["HTTP_TIMEOUT"] = ""
|
||||
|
||||
reload(tradingagents.default_config)
|
||||
from tradingagents.default_config import DEFAULT_CONFIG
|
||||
|
||||
config = DEFAULT_CONFIG.copy()
|
||||
ssl_config = get_ssl_config(config)
|
||||
print(f" Config ssl_cert_bundle: '{config.get('ssl_cert_bundle')}'")
|
||||
print(f" Config http_timeout: {config.get('http_timeout')}")
|
||||
print(f" SSL Config: {ssl_config}")
|
||||
print(f" Expected: Empty values should not be used")
|
||||
|
||||
# Clean up
|
||||
for var in ["REQUESTS_CA_BUNDLE", "CURL_CA_BUNDLE", "SSL_VERIFY", "HTTP_TIMEOUT", "HTTP_PROXY", "HTTPS_PROXY"]:
|
||||
os.environ.pop(var, None)
|
||||
|
||||
print("\n✅ SSL configuration tests completed")
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_ssl_config()
|
||||
|
|
@ -0,0 +1,101 @@
|
|||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test SSL connectivity for TradingAgents components
|
||||
"""
|
||||
|
||||
import os
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
|
||||
def test_api_connections():
|
||||
"""Test API connections that TradingAgents will use"""
|
||||
|
||||
print("🔍 Testing TradingAgents API Connections")
|
||||
print("=" * 50)
|
||||
|
||||
# Test 1: Basic HTTP requests with proper SSL
|
||||
print("\n1️⃣ Testing HTTP requests with SSL configuration:")
|
||||
|
||||
import requests
|
||||
|
||||
test_endpoints = [
|
||||
("OpenAI API", "https://api.openai.com/v1/models"),
|
||||
("Google Search", "https://www.google.com/search?q=AAPL"),
|
||||
("OpenRouter API", "https://openrouter.ai/api/v1/models"),
|
||||
]
|
||||
|
||||
for name, url in test_endpoints:
|
||||
try:
|
||||
response = requests.get(url, timeout=10)
|
||||
print(f" ✅ {name}: Status {response.status_code}")
|
||||
except Exception as e:
|
||||
print(f" ❌ {name}: {e}")
|
||||
|
||||
# Test 2: LangChain LLM initialization
|
||||
print("\n2️⃣ Testing LangChain LLM initialization:")
|
||||
|
||||
try:
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
# Test with the SSL configuration
|
||||
llm = ChatOpenAI(
|
||||
model="gpt-3.5-turbo",
|
||||
api_key=os.getenv("OPENAI_API_KEY", "test-key")
|
||||
)
|
||||
print(" ✅ ChatOpenAI initialization successful")
|
||||
|
||||
# Test a simple API call (this might fail due to API key, but SSL should work)
|
||||
try:
|
||||
# This will test SSL connectivity
|
||||
response = llm.invoke("Hello")
|
||||
print(" ✅ ChatOpenAI API call successful")
|
||||
except Exception as e:
|
||||
if "401" in str(e) or "Unauthorized" in str(e):
|
||||
print(" ✅ ChatOpenAI SSL working (401 = API key issue, not SSL)")
|
||||
else:
|
||||
print(f" ⚠️ ChatOpenAI API call error: {e}")
|
||||
|
||||
except Exception as e:
|
||||
print(f" ❌ ChatOpenAI initialization failed: {e}")
|
||||
|
||||
# Test 3: TradingAgents configuration
|
||||
print("\n3️⃣ Testing TradingAgents SSL configuration:")
|
||||
|
||||
try:
|
||||
from tradingagents.default_config import DEFAULT_CONFIG
|
||||
from tradingagents.dataflows.ssl_utils import get_ssl_config, setup_global_ssl_config
|
||||
|
||||
print(f" 📋 SSL cert bundle: {DEFAULT_CONFIG.get('ssl_cert_bundle')}")
|
||||
print(f" 📋 SSL verify: {DEFAULT_CONFIG.get('ssl_verify')}")
|
||||
|
||||
ssl_config = get_ssl_config(DEFAULT_CONFIG)
|
||||
print(f" 📋 SSL config: {ssl_config}")
|
||||
|
||||
# Set up global SSL configuration
|
||||
setup_global_ssl_config(DEFAULT_CONFIG)
|
||||
print(" ✅ Global SSL configuration applied")
|
||||
|
||||
except Exception as e:
|
||||
print(f" ❌ TradingAgents SSL configuration failed: {e}")
|
||||
|
||||
# Test 4: Google News functionality
|
||||
print("\n4️⃣ Testing Google News data retrieval:")
|
||||
|
||||
try:
|
||||
from tradingagents.dataflows.googlenews_utils import getNewsData
|
||||
from tradingagents.dataflows.ssl_utils import get_ssl_config
|
||||
from tradingagents.default_config import DEFAULT_CONFIG
|
||||
|
||||
ssl_config = get_ssl_config(DEFAULT_CONFIG)
|
||||
|
||||
# Test news retrieval with SSL config
|
||||
news_results = getNewsData("AAPL", "2024-01-01", "2024-01-02", ssl_config)
|
||||
print(f" ✅ Google News retrieval successful, got {len(news_results)} results")
|
||||
|
||||
except Exception as e:
|
||||
print(f" ❌ Google News retrieval failed: {e}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_api_connections()
|
||||
|
|
@ -0,0 +1,138 @@
|
|||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test TradingAgents SSL connections specifically
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Add project root to path
|
||||
project_root = Path(__file__).parent.absolute()
|
||||
if str(project_root) not in sys.path:
|
||||
sys.path.insert(0, str(project_root))
|
||||
|
||||
from dotenv import load_dotenv
|
||||
load_dotenv()
|
||||
|
||||
from tradingagents.default_config import DEFAULT_CONFIG
|
||||
from tradingagents.dataflows.ssl_utils import get_ssl_config, setup_global_ssl_config, get_certificate_info
|
||||
from tradingagents.graph.trading_graph import TradingAgentsGraph
|
||||
import requests
|
||||
|
||||
def test_certificate_issues():
|
||||
"""Test SSL certificate issues that might occur in TradingAgents"""
|
||||
|
||||
print("🔍 Testing TradingAgents SSL Certificate Issues")
|
||||
print("=" * 55)
|
||||
|
||||
# Show environment variables
|
||||
print("\n📋 Environment Variables:")
|
||||
ssl_vars = ["REQUESTS_CA_BUNDLE", "CURL_CA_BUNDLE", "SSL_VERIFY", "HTTP_TIMEOUT",
|
||||
"HTTP_PROXY", "HTTPS_PROXY", "OPENAI_API_KEY", "FINNHUB_API_KEY"]
|
||||
for var in ssl_vars:
|
||||
value = os.getenv(var)
|
||||
if value:
|
||||
if "API_KEY" in var:
|
||||
print(f" {var}: {'*' * min(8, len(value))}...")
|
||||
else:
|
||||
print(f" {var}: {value}")
|
||||
else:
|
||||
print(f" {var}: Not set")
|
||||
|
||||
# Show certificate info
|
||||
print("\n📋 Certificate Bundle Information:")
|
||||
cert_info = get_certificate_info()
|
||||
for key, value in cert_info.items():
|
||||
if isinstance(value, list):
|
||||
print(f" {key}: {', '.join(value) if value else 'None found'}")
|
||||
else:
|
||||
print(f" {key}: {value}")
|
||||
|
||||
# Test SSL config
|
||||
print("\n⚙️ Current SSL Configuration:")
|
||||
config = DEFAULT_CONFIG.copy()
|
||||
ssl_config = get_ssl_config(config)
|
||||
print(f" SSL Config: {ssl_config}")
|
||||
|
||||
# Set up global SSL config
|
||||
print("\n🔧 Setting up global SSL configuration...")
|
||||
setup_global_ssl_config(config)
|
||||
|
||||
# Test basic HTTPS connections
|
||||
test_urls = [
|
||||
"https://api.openai.com/",
|
||||
"https://www.google.com/",
|
||||
"https://openrouter.ai/",
|
||||
"https://finnhub.io/"
|
||||
]
|
||||
|
||||
print(f"\n🌐 Testing HTTPS connections:")
|
||||
for url in test_urls:
|
||||
try:
|
||||
print(f" Testing {url}...")
|
||||
response = requests.get(url, timeout=10)
|
||||
print(f" ✅ {url}: Status {response.status_code}")
|
||||
except requests.exceptions.SSLError as e:
|
||||
print(f" ❌ SSL Error for {url}: {e}")
|
||||
except Exception as e:
|
||||
print(f" ❌ Connection error for {url}: {e}")
|
||||
|
||||
# Test TradingAgentsGraph initialization
|
||||
print(f"\n🤖 Testing TradingAgentsGraph initialization:")
|
||||
try:
|
||||
# Create minimal config
|
||||
test_config = DEFAULT_CONFIG.copy()
|
||||
test_config["llm_provider"] = "openai"
|
||||
test_config["quick_think_llm"] = "gpt-3.5-turbo"
|
||||
test_config["deep_think_llm"] = "gpt-4"
|
||||
|
||||
print(" Creating TradingAgentsGraph...")
|
||||
graph = TradingAgentsGraph(config=test_config)
|
||||
print(" ✅ TradingAgentsGraph created successfully")
|
||||
|
||||
# Test if we can make a simple LLM call
|
||||
print(" Testing LLM connection...")
|
||||
try:
|
||||
# This won't actually make an API call but will test the LLM initialization
|
||||
llm = graph.quick_thinking_llm
|
||||
print(f" ✅ LLM initialized: {llm}")
|
||||
except Exception as e:
|
||||
print(f" ❌ LLM initialization error: {e}")
|
||||
|
||||
except Exception as e:
|
||||
print(f" ❌ TradingAgentsGraph initialization error: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
# Recommendations based on findings
|
||||
print(f"\n💡 Troubleshooting Recommendations:")
|
||||
|
||||
# Check if we're on macOS and suggest system certificates
|
||||
if sys.platform == "darwin":
|
||||
macos_cert = "/etc/ssl/cert.pem"
|
||||
if os.path.exists(macos_cert):
|
||||
print(f" 📱 macOS detected - try: export REQUESTS_CA_BUNDLE={macos_cert}")
|
||||
else:
|
||||
print(f" 📱 macOS detected but {macos_cert} not found")
|
||||
|
||||
# Check for certifi
|
||||
try:
|
||||
import certifi
|
||||
print(f" 🔐 Certifi available - try: export REQUESTS_CA_BUNDLE={certifi.where()}")
|
||||
except ImportError:
|
||||
print(f" ❌ Certifi not installed - try: pip install certifi")
|
||||
|
||||
# Corporate environment suggestions
|
||||
print(f" 🏢 If behind corporate firewall:")
|
||||
print(f" • Contact IT for corporate certificate bundle")
|
||||
print(f" • Check if HTTP_PROXY/HTTPS_PROXY needed")
|
||||
print(f" • Ask about custom CA certificates")
|
||||
|
||||
# Temporary workaround (not recommended for production)
|
||||
print(f" 🚨 Temporary workaround (development only):")
|
||||
print(f" export SSL_VERIFY=false")
|
||||
print(f" ⚠️ This disables SSL verification - use with caution!")
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_certificate_issues()
|
||||
|
|
@ -0,0 +1,61 @@
|
|||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test script to verify LangGraph streaming behavior
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
from datetime import date
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# Add the project root to the path
|
||||
sys.path.insert(0, '/Users/kevin.bruton/repo2/TradingAgents')
|
||||
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
|
||||
def test_callback(state):
|
||||
"""Test callback to understand state structure"""
|
||||
print(f"\n🔍 CALLBACK RECEIVED:")
|
||||
print(f" Type: {type(state)}")
|
||||
print(f" Keys: {list(state.keys()) if isinstance(state, dict) else 'Not a dict'}")
|
||||
if isinstance(state, dict):
|
||||
for key, value in state.items():
|
||||
if key not in ["__end__", "messages"]:
|
||||
print(f" {key}: {type(value)} - {'Has content' if value else 'Empty'}")
|
||||
|
||||
def main():
|
||||
"""Test the TradingAgentsGraph streaming"""
|
||||
try:
|
||||
from tradingagents.graph.trading_graph import TradingAgentsGraph
|
||||
from tradingagents.default_config import DEFAULT_CONFIG
|
||||
|
||||
print("🚀 Testing TradingAgentsGraph streaming...")
|
||||
|
||||
# Create a minimal config for testing
|
||||
config = DEFAULT_CONFIG.copy()
|
||||
config["llm_provider"] = "openai"
|
||||
config["quick_think_llm"] = "gpt-3.5-turbo"
|
||||
config["deep_think_llm"] = "gpt-4"
|
||||
|
||||
# Create graph with debug mode
|
||||
graph = TradingAgentsGraph(config=config, debug=True)
|
||||
|
||||
print("📊 Starting propagation with callback...")
|
||||
|
||||
# Test with a simple company
|
||||
final_state, signal = graph.propagate(
|
||||
company_name="AAPL",
|
||||
trade_date=str(date.today()),
|
||||
on_step_callback=test_callback
|
||||
)
|
||||
|
||||
print(f"\n✅ Propagation completed!")
|
||||
print(f" Final signal: {signal}")
|
||||
|
||||
except Exception as e:
|
||||
import traceback
|
||||
print(f"❌ Error: {e}")
|
||||
print(traceback.format_exc())
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -6,6 +6,7 @@ from .analysts.fundamentals_analyst import create_fundamentals_analyst
|
|||
from .analysts.market_analyst import create_market_analyst
|
||||
from .analysts.news_analyst import create_news_analyst
|
||||
from .analysts.social_media_analyst import create_social_media_analyst
|
||||
from .managers.trade_planner import create_trade_planner_agent
|
||||
|
||||
from .researchers.bear_researcher import create_bear_researcher
|
||||
from .researchers.bull_researcher import create_bull_researcher
|
||||
|
|
@ -37,5 +38,6 @@ __all__ = [
|
|||
"create_risk_manager",
|
||||
"create_safe_debator",
|
||||
"create_social_media_analyst",
|
||||
"create_trade_planner_agent",
|
||||
"create_trader",
|
||||
]
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
||||
import time
|
||||
import json
|
||||
from tradingagents.agents.utils.llm_resilience import invoke_with_retries
|
||||
|
||||
|
||||
def create_fundamentals_analyst(llm, toolkit):
|
||||
|
|
@ -21,8 +22,15 @@ def create_fundamentals_analyst(llm, toolkit):
|
|||
]
|
||||
|
||||
system_message = (
|
||||
"You are a researcher tasked with analyzing fundamental information over the past week about a company. Please write a comprehensive report of the company's fundamental information such as financial documents, company profile, basic company financials, company financial history, insider sentiment and insider transactions to gain a full view of the company's fundamental information to inform traders. Make sure to include as much detail as possible. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions."
|
||||
+ " Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read.",
|
||||
"You are a researcher tasked with analyzing fundamental information over the past week about a company. "
|
||||
"Please write a comprehensive report of the company's fundamental information such as "
|
||||
"financial documents, company profile, basic company financials, company financial history, "
|
||||
"insider sentiment and insider transactions to gain a full view of the company's fundamental information to inform traders. "
|
||||
"Make sure to include as much detail as possible. "
|
||||
"Do not simply state the trends are mixed. "
|
||||
"Provide detailed and finegrained analysis and insights that may help traders make decisions. "
|
||||
"Make sure to append a Markdown table at the end of the report to organize key points in the report, "
|
||||
"organized and easy to read.",
|
||||
)
|
||||
|
||||
prompt = ChatPromptTemplate.from_messages(
|
||||
|
|
@ -48,13 +56,19 @@ def create_fundamentals_analyst(llm, toolkit):
|
|||
prompt = prompt.partial(ticker=ticker)
|
||||
|
||||
chain = prompt | llm.bind_tools(tools)
|
||||
|
||||
result = chain.invoke(state["messages"])
|
||||
try:
|
||||
result = invoke_with_retries(chain, state["messages"], toolkit.config)
|
||||
except Exception as e: # noqa: BLE001
|
||||
class DummyResult:
|
||||
def __init__(self, content):
|
||||
self.content = content
|
||||
self.tool_calls = []
|
||||
result = DummyResult(f"Fundamentals analyst failed after retries. Error: {e}")
|
||||
|
||||
report = ""
|
||||
|
||||
if len(result.tool_calls) == 0:
|
||||
report = result.content
|
||||
if getattr(result, 'tool_calls', []) == []:
|
||||
report = getattr(result, 'content', '')
|
||||
|
||||
return {
|
||||
"messages": [result],
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
||||
import time
|
||||
import json
|
||||
from tradingagents.agents.utils.llm_resilience import invoke_with_retries
|
||||
|
||||
|
||||
def create_market_analyst(llm, toolkit):
|
||||
|
|
@ -22,7 +23,8 @@ def create_market_analyst(llm, toolkit):
|
|||
]
|
||||
|
||||
system_message = (
|
||||
"""You are a trading assistant tasked with analyzing financial markets. Your role is to select the **most relevant indicators** for a given market condition or trading strategy from the following list. The goal is to choose up to **8 indicators** that provide complementary insights without redundancy. Categories and each category's indicators are:
|
||||
"""
|
||||
You are a trading assistant tasked with analyzing financial markets. Your role is to select the **most relevant indicators** for a given market condition or trading strategy from the following list. The goal is to choose the indicators that provide complementary insights without redundancy. Categories and each category's indicators are:
|
||||
|
||||
Moving Averages:
|
||||
- close_50_sma: 50 SMA: A medium-term trend indicator. Usage: Identify trend direction and serve as dynamic support/resistance. Tips: It lags price; combine with faster indicators for timely signals.
|
||||
|
|
@ -46,6 +48,16 @@ Volatility Indicators:
|
|||
Volume-Based Indicators:
|
||||
- vwma: VWMA: A moving average weighted by volume. Usage: Confirm trends by integrating price action with volume data. Tips: Watch for skewed results from volume spikes; use in combination with other volume analyses.
|
||||
|
||||
Support and Resistance Indicators:
|
||||
- supertrend_lower: Lower Band of the SuperTrend. Usage: Identify trend direction and serve as dynamic support/resistance. Tips: It lags price; combine with faster indicators for timely signals.
|
||||
- supertrend_upper: Upper Band of the SuperTrend. Usage: Identify trend direction and serve as dynamic support/resistance. Tips: It lags price; combine with faster indicators for timely signals.
|
||||
- Pivot Points: Pivot Points: Key points used to identify potential entry points. Usage: Identify potential entry points for long/short positions. Tips: Watch for confirmation with other indicators.
|
||||
- Donchian Chanells: Donchian Channels: A range of high and low prices over a specified period. Usage: Identify potential entry points for long/short positions. Tips: Watch for confirmation with other indicators.
|
||||
|
||||
Bullish and Bearish Candlestick Patterns:
|
||||
- bullish_candlestick: Bullish Candlestick Pattern: A bullish candlestick pattern. Usage: Identify potential entry points for long positions. Tips: Watch for confirmation with other indicators.
|
||||
- bearish_candlestick: Bearish Candlestick Pattern: A bearish candlestick pattern. Usage: Identify potential entry points for short positions. Tips: Watch for confirmation with other indicators.
|
||||
|
||||
- Select indicators that provide diverse and complementary information. Avoid redundancy (e.g., do not select both rsi and stochrsi). Also briefly explain why they are suitable for the given market context. When you tool call, please use the exact name of the indicators provided above as they are defined parameters, otherwise your call will fail. Please make sure to call get_YFin_data first to retrieve the CSV that is needed to generate indicators. Write a very detailed and nuanced report of the trends you observe. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions."""
|
||||
+ """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read."""
|
||||
)
|
||||
|
|
@ -74,12 +86,22 @@ Volume-Based Indicators:
|
|||
|
||||
chain = prompt | llm.bind_tools(tools)
|
||||
|
||||
result = chain.invoke(state["messages"])
|
||||
# Resilient invocation with retries
|
||||
try:
|
||||
result = invoke_with_retries(chain, state["messages"], toolkit.config)
|
||||
except Exception as e: # noqa: BLE001
|
||||
# Provide a graceful degraded response so graph can continue / be logged
|
||||
fallback_content = f"Market analyst failed to retrieve a model response after retries. Error: {e}"
|
||||
class DummyResult:
|
||||
def __init__(self, content):
|
||||
self.content = content
|
||||
self.tool_calls = []
|
||||
result = DummyResult(fallback_content)
|
||||
|
||||
report = ""
|
||||
|
||||
if len(result.tool_calls) == 0:
|
||||
report = result.content
|
||||
if getattr(result, 'tool_calls', []) == []:
|
||||
report = getattr(result, 'content', '')
|
||||
|
||||
return {
|
||||
"messages": [result],
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
||||
import time
|
||||
import json
|
||||
from tradingagents.agents.utils.llm_resilience import invoke_with_retries
|
||||
|
||||
|
||||
def create_news_analyst(llm, toolkit):
|
||||
|
|
@ -45,12 +46,19 @@ def create_news_analyst(llm, toolkit):
|
|||
prompt = prompt.partial(ticker=ticker)
|
||||
|
||||
chain = prompt | llm.bind_tools(tools)
|
||||
result = chain.invoke(state["messages"])
|
||||
try:
|
||||
result = invoke_with_retries(chain, state["messages"], toolkit.config)
|
||||
except Exception as e: # noqa: BLE001
|
||||
class DummyResult:
|
||||
def __init__(self, content):
|
||||
self.content = content
|
||||
self.tool_calls = []
|
||||
result = DummyResult(f"News analyst failed after retries. Error: {e}")
|
||||
|
||||
report = ""
|
||||
|
||||
if len(result.tool_calls) == 0:
|
||||
report = result.content
|
||||
if getattr(result, 'tool_calls', []) == []:
|
||||
report = getattr(result, 'content', '')
|
||||
|
||||
return {
|
||||
"messages": [result],
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
||||
import time
|
||||
import json
|
||||
from tradingagents.agents.utils.llm_resilience import invoke_with_retries
|
||||
|
||||
|
||||
def create_social_media_analyst(llm, toolkit):
|
||||
|
|
@ -44,13 +45,19 @@ def create_social_media_analyst(llm, toolkit):
|
|||
prompt = prompt.partial(ticker=ticker)
|
||||
|
||||
chain = prompt | llm.bind_tools(tools)
|
||||
|
||||
result = chain.invoke(state["messages"])
|
||||
try:
|
||||
result = invoke_with_retries(chain, state["messages"], toolkit.config)
|
||||
except Exception as e: # noqa: BLE001
|
||||
class DummyResult:
|
||||
def __init__(self, content):
|
||||
self.content = content
|
||||
self.tool_calls = []
|
||||
result = DummyResult(f"Social media analyst failed after retries. Error: {e}")
|
||||
|
||||
report = ""
|
||||
|
||||
if len(result.tool_calls) == 0:
|
||||
report = result.content
|
||||
if getattr(result, 'tool_calls', []) == []:
|
||||
report = getattr(result, 'content', '')
|
||||
|
||||
return {
|
||||
"messages": [result],
|
||||
|
|
|
|||
|
|
@ -14,6 +14,8 @@ def create_risk_manager(llm, memory):
|
|||
fundamentals_report = state["news_report"]
|
||||
sentiment_report = state["sentiment_report"]
|
||||
trader_plan = state["investment_plan"]
|
||||
stop_loss = state.get("stop_loss")
|
||||
take_profit = state.get("take_profit")
|
||||
|
||||
curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}"
|
||||
past_memories = memory.get_memories(curr_situation, n_matches=2)
|
||||
|
|
@ -38,7 +40,8 @@ Guidelines for Decision-Making:
|
|||
1. **Summarize Key Arguments**: Extract the strongest points from each analyst, focusing on relevance to the context.
|
||||
2. **Provide Rationale**: Support your recommendation with direct quotes and counterarguments from the debate.
|
||||
3. **Refine the Trader's Plan**: Start with the trader's original plan, **{trader_plan}**, and adjust it based on the analysts' insights.
|
||||
4. **Learn from Past Mistakes**: Use lessons from **{past_memory_str}** to address prior misjudgments and improve the decision you are making now to make sure you don't make a wrong decision that loses money.
|
||||
4. **Incorporate Technical Analysis**: The Trade Planner has proposed a stop-loss of **{stop_loss}** and a take-profit of **{take_profit}**. You must consider these levels in your final recommendation.
|
||||
5. **Learn from Past Mistakes**: Use lessons from **{past_memory_str}** to address prior misjudgments and improve the decision you are making now to make sure you don't make a wrong decision that loses money.
|
||||
|
||||
Deliverables:
|
||||
- A clear and actionable recommendation.
|
||||
|
|
|
|||
|
|
@ -0,0 +1,57 @@
|
|||
import json
|
||||
|
||||
def create_trade_planner_agent(llm, toolkit):
|
||||
def trade_planner_node(state) -> dict:
|
||||
market_report = state["market_report"]
|
||||
investment_plan = state["investment_plan"]
|
||||
|
||||
if toolkit.config["online_tools"]:
|
||||
tools = [
|
||||
toolkit.get_YFin_data_online,
|
||||
toolkit.get_stockstats_indicators_report_online,
|
||||
]
|
||||
else:
|
||||
tools = [
|
||||
toolkit.get_YFin_data,
|
||||
toolkit.get_stockstats_indicators_report,
|
||||
]
|
||||
|
||||
prompt = f'''You are a trade planner. Your role is to determine the stop-loss and take-profit levels for a given investment plan.
|
||||
|
||||
Analyze the following market report and investment plan to determine the optimal stop-loss and take-profit levels. You should use the available tools to get the latest market data and calculate technical indicators.
|
||||
|
||||
**Market Report:**
|
||||
{market_report}
|
||||
|
||||
**Investment Plan:**
|
||||
{investment_plan}
|
||||
|
||||
Use technical indicators such as Pivots, ATR, support and resistance levels, Donchian Channels, SuperTrend, etc., as well as risk factors to determine the stop-loss and take-profit levels.
|
||||
|
||||
Based on your analysis, provide the stop-loss and take-profit levels in a JSON format. For example:
|
||||
{{
|
||||
"stop_loss": 150.00,
|
||||
"take_profit": 180.00
|
||||
}}
|
||||
|
||||
The stop-loss level is mandatory. The take-profit level is optional.
|
||||
Do not provide any other information or explanation.
|
||||
'''
|
||||
|
||||
response = llm.invoke(prompt)
|
||||
|
||||
try:
|
||||
levels = json.loads(response.content)
|
||||
stop_loss = levels.get("stop_loss")
|
||||
take_profit = levels.get("take_profit")
|
||||
except (json.JSONDecodeError, AttributeError):
|
||||
stop_loss = None
|
||||
take_profit = None
|
||||
|
||||
|
||||
return {
|
||||
"stop_loss": stop_loss,
|
||||
"take_profit": take_profit,
|
||||
}
|
||||
|
||||
return trade_planner_node
|
||||
|
|
@ -25,9 +25,12 @@ def create_trader(llm, memory):
|
|||
user_position = state.get("user_position", "none")
|
||||
cost_per_trade = state.get("cost_per_trade", 0.0)
|
||||
|
||||
stop_loss = state.get("stop_loss")
|
||||
take_profit = state.get("take_profit")
|
||||
|
||||
context = {
|
||||
"role": "user",
|
||||
"content": f"Based on a comprehensive analysis by a team of analysts, here is an investment plan tailored for {company_name}. This plan incorporates insights from current technical market trends, macroeconomic indicators, and social media sentiment. Use this plan as a foundation for evaluating your next trading decision.\n\nProposed Investment Plan: {investment_plan}\n\nLeverage these insights to make an informed and strategic decision.",
|
||||
"content": f"Based on a comprehensive analysis by a team of analysts, here is an investment plan tailored for {company_name}. This plan incorporates insights from current technical market trends, macroeconomic indicators, and social media sentiment. Use this plan as a foundation for evaluating your next trading decision.\n\nProposed Investment Plan: {investment_plan}\n\nThe Trade Planner has proposed a stop-loss of **{stop_loss}** and a take-profit of **{take_profit}**. You must consider these levels in your final recommendation.\n\nLeverage these insights to make an informed and strategic decision.",
|
||||
}
|
||||
|
||||
messages = [
|
||||
|
|
|
|||
|
|
@ -76,3 +76,5 @@ class AgentState(MessagesState):
|
|||
RiskDebateState, "Current state of the debate on evaluating risk"
|
||||
]
|
||||
final_trade_decision: Annotated[str, "Final decision made by the Risk Analysts"]
|
||||
stop_loss: Annotated[Optional[float], "Stop loss price level"] = None
|
||||
take_profit: Annotated[Optional[float], "Take profit price level"] = None
|
||||
|
|
|
|||
|
|
@ -0,0 +1,46 @@
|
|||
import time
|
||||
import json
|
||||
import logging
|
||||
from typing import Any, Callable, Dict
|
||||
from json import JSONDecodeError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def invoke_with_retries(chain: Any, messages: Any, config: Dict[str, Any]):
|
||||
"""Invoke a langchain chain with retries and detailed logging.
|
||||
|
||||
Handles transient HTTP issues and JSON decode errors coming from provider SDKs.
|
||||
"""
|
||||
max_retries = config.get("llm_max_retries", 3)
|
||||
backoff = config.get("llm_retry_backoff", 2.0)
|
||||
|
||||
last_err = None
|
||||
for attempt in range(1, max_retries + 1):
|
||||
try:
|
||||
result = chain.invoke(messages)
|
||||
return result
|
||||
except JSONDecodeError as e:
|
||||
last_err = e
|
||||
logger.warning(
|
||||
"JSONDecodeError on attempt %s/%s: %s", attempt, max_retries, e
|
||||
)
|
||||
except Exception as e: # noqa: BLE001
|
||||
# Capture common transient network / HTTP errors keywords
|
||||
transient = any(
|
||||
kw in str(e).lower() for kw in [
|
||||
"timeout", "temporarily", "rate limit", "connection reset", "503", "502", "jsondecodeerror"
|
||||
]
|
||||
)
|
||||
last_err = e
|
||||
logger.warning(
|
||||
"LLM invocation error (transient=%s) attempt %s/%s: %s", transient, attempt, max_retries, e
|
||||
)
|
||||
if not transient and not isinstance(e, JSONDecodeError):
|
||||
# Non transient -> abort early
|
||||
break
|
||||
# Exponential backoff
|
||||
sleep_for = backoff ** (attempt - 1)
|
||||
time.sleep(sleep_for)
|
||||
# All attempts failed
|
||||
raise last_err # propagate last error
|
||||
|
|
@ -12,7 +12,7 @@ class FinancialSituationMemory:
|
|||
# Use a good general-purpose model for financial text
|
||||
self.embedding_model = SentenceTransformer('all-MiniLM-L6-v2')
|
||||
self.embedding_type = "local"
|
||||
print(f"✅ Using local embeddings with sentence-transformers for {name}")
|
||||
print(f" Using local embeddings with sentence-transformers for {name}") # ✅
|
||||
except ImportError:
|
||||
print("⚠️ sentence-transformers not found. Install with: pip install sentence-transformers")
|
||||
print("Falling back to ChromaDB's default embeddings...")
|
||||
|
|
|
|||
|
|
@ -23,20 +23,36 @@ def is_rate_limited(response):
|
|||
wait=wait_exponential(multiplier=1, min=4, max=60),
|
||||
stop=stop_after_attempt(5),
|
||||
)
|
||||
def make_request(url, headers):
|
||||
def make_request(url, headers, ssl_config=None):
|
||||
"""Make a request with retry logic for rate limiting"""
|
||||
# Random delay before each request to avoid detection
|
||||
time.sleep(random.uniform(2, 6))
|
||||
response = requests.get(url, headers=headers)
|
||||
|
||||
# Prepare SSL configuration - only use if explicitly configured
|
||||
kwargs = {}
|
||||
if ssl_config:
|
||||
if ssl_config.get("cert_bundle"):
|
||||
kwargs["verify"] = ssl_config["cert_bundle"]
|
||||
elif "verify" in ssl_config:
|
||||
kwargs["verify"] = ssl_config["verify"]
|
||||
|
||||
if ssl_config.get("timeout"):
|
||||
kwargs["timeout"] = ssl_config["timeout"]
|
||||
|
||||
if ssl_config.get("proxies"):
|
||||
kwargs["proxies"] = ssl_config["proxies"]
|
||||
|
||||
response = requests.get(url, headers=headers, **kwargs)
|
||||
return response
|
||||
|
||||
|
||||
def getNewsData(query, start_date, end_date):
|
||||
def getNewsData(query, start_date, end_date, ssl_config=None):
|
||||
"""
|
||||
Scrape Google News search results for a given query and date range.
|
||||
query: str - search query
|
||||
start_date: str - start date in the format yyyy-mm-dd or mm/dd/yyyy
|
||||
end_date: str - end date in the format yyyy-mm-dd or mm/dd/yyyy
|
||||
ssl_config: dict - SSL configuration including cert_bundle, verify, timeout, proxies
|
||||
"""
|
||||
if "-" in start_date:
|
||||
start_date = datetime.strptime(start_date, "%Y-%m-%d")
|
||||
|
|
@ -64,7 +80,7 @@ def getNewsData(query, start_date, end_date):
|
|||
)
|
||||
|
||||
try:
|
||||
response = make_request(url, headers)
|
||||
response = make_request(url, headers, ssl_config)
|
||||
soup = BeautifulSoup(response.content, "html.parser")
|
||||
results_on_page = soup.select("div.SoaBEf")
|
||||
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ from .yfin_utils import *
|
|||
from .stockstats_utils import *
|
||||
from .googlenews_utils import *
|
||||
from .finnhub_utils import get_data_in_range
|
||||
from .ssl_utils import get_ssl_config, setup_global_ssl_config
|
||||
from dateutil.relativedelta import relativedelta
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from datetime import datetime
|
||||
|
|
@ -294,7 +295,13 @@ def get_google_news(
|
|||
before = start_date - relativedelta(days=look_back_days)
|
||||
before = before.strftime("%Y-%m-%d")
|
||||
|
||||
news_results = getNewsData(query, before, curr_date)
|
||||
config = get_config()
|
||||
ssl_config = get_ssl_config(config)
|
||||
# Only pass ssl_config if it has actual configuration
|
||||
if ssl_config:
|
||||
news_results = getNewsData(query, before, curr_date, ssl_config)
|
||||
else:
|
||||
news_results = getNewsData(query, before, curr_date)
|
||||
|
||||
news_str = ""
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,149 @@
|
|||
"""
|
||||
SSL/TLS configuration utilities for TradingAgents
|
||||
"""
|
||||
|
||||
import os
|
||||
import ssl
|
||||
import certifi
|
||||
from typing import Dict, Any, Optional
|
||||
|
||||
|
||||
def get_ssl_config(config: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
Create SSL configuration dictionary from the main config.
|
||||
|
||||
Args:
|
||||
config: Main configuration dictionary
|
||||
|
||||
Returns:
|
||||
SSL configuration dictionary with cert_bundle, verify, timeout, proxies
|
||||
"""
|
||||
ssl_config = {}
|
||||
|
||||
# Certificate bundle configuration - only use if explicitly specified
|
||||
cert_bundle = config.get("ssl_cert_bundle")
|
||||
if cert_bundle and cert_bundle.strip():
|
||||
# Use explicitly specified certificate bundle
|
||||
ssl_config["cert_bundle"] = cert_bundle
|
||||
ssl_config["verify"] = cert_bundle
|
||||
elif not config.get("ssl_verify", True):
|
||||
# Only disable SSL verification if explicitly set to false
|
||||
ssl_config["verify"] = False
|
||||
|
||||
# If no explicit cert bundle and ssl_verify is true (default),
|
||||
# don't set anything - use default behavior
|
||||
|
||||
# Timeout configuration
|
||||
if config.get("http_timeout"):
|
||||
ssl_config["timeout"] = config["http_timeout"]
|
||||
|
||||
# Proxy configuration
|
||||
proxies = {}
|
||||
if config.get("http_proxy"):
|
||||
proxies["http"] = config["http_proxy"]
|
||||
if config.get("https_proxy"):
|
||||
proxies["https"] = config["https_proxy"]
|
||||
if proxies:
|
||||
ssl_config["proxies"] = proxies
|
||||
|
||||
return ssl_config
|
||||
|
||||
|
||||
def setup_global_ssl_config(config: Dict[str, Any]) -> None:
|
||||
"""
|
||||
Set up global SSL configuration for the application.
|
||||
This affects all SSL connections made by requests and other libraries.
|
||||
Only sets configuration if explicitly specified in environment variables.
|
||||
|
||||
Args:
|
||||
config: Main configuration dictionary
|
||||
"""
|
||||
# Set environment variables for requests library only if explicitly configured
|
||||
cert_bundle = config.get("ssl_cert_bundle")
|
||||
if cert_bundle and cert_bundle.strip():
|
||||
os.environ["REQUESTS_CA_BUNDLE"] = cert_bundle
|
||||
os.environ["CURL_CA_BUNDLE"] = cert_bundle
|
||||
print(f"🔒 Using custom SSL certificate bundle: {cert_bundle}")
|
||||
|
||||
# Set SSL verification for requests only if explicitly disabled
|
||||
if not config.get("ssl_verify", True):
|
||||
# Disable SSL warnings when verification is disabled
|
||||
import urllib3
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
print("⚠️ SSL certificate verification disabled")
|
||||
|
||||
# Set proxy environment variables if specified
|
||||
if config.get("http_proxy"):
|
||||
os.environ["HTTP_PROXY"] = config["http_proxy"]
|
||||
print(f"🌐 Using HTTP proxy: {config['http_proxy']}")
|
||||
if config.get("https_proxy"):
|
||||
os.environ["HTTPS_PROXY"] = config["https_proxy"]
|
||||
print(f"🌐 Using HTTPS proxy: {config['https_proxy']}")
|
||||
|
||||
# Set timeout if specified
|
||||
if config.get("http_timeout"):
|
||||
print(f"⏱️ HTTP timeout set to: {config['http_timeout']} seconds")
|
||||
|
||||
|
||||
def create_ssl_context(cert_bundle: Optional[str] = None, verify_ssl: bool = True) -> ssl.SSLContext:
|
||||
"""
|
||||
Create a custom SSL context with specified certificate bundle.
|
||||
|
||||
Args:
|
||||
cert_bundle: Path to certificate bundle file
|
||||
verify_ssl: Whether to verify SSL certificates
|
||||
|
||||
Returns:
|
||||
Configured SSL context
|
||||
"""
|
||||
if not verify_ssl:
|
||||
# Create unverified context (not recommended for production)
|
||||
context = ssl._create_unverified_context()
|
||||
else:
|
||||
# Create default context
|
||||
context = ssl.create_default_context()
|
||||
|
||||
if cert_bundle:
|
||||
# Load custom certificate bundle
|
||||
context.load_verify_locations(cafile=cert_bundle)
|
||||
|
||||
return context
|
||||
|
||||
|
||||
def get_certificate_info() -> Dict[str, str]:
|
||||
"""
|
||||
Get information about available certificate bundles.
|
||||
|
||||
Returns:
|
||||
Dictionary with certificate bundle information
|
||||
"""
|
||||
info = {}
|
||||
|
||||
# Check certifi bundle
|
||||
try:
|
||||
import certifi
|
||||
info["certifi_bundle"] = certifi.where()
|
||||
except ImportError:
|
||||
info["certifi_bundle"] = "Not available (certifi not installed)"
|
||||
|
||||
# Check environment variables
|
||||
info["env_ca_bundle"] = os.getenv("REQUESTS_CA_BUNDLE", "Not set")
|
||||
info["env_curl_bundle"] = os.getenv("CURL_CA_BUNDLE", "Not set")
|
||||
|
||||
# Check system certificate stores
|
||||
common_cert_paths = [
|
||||
"/etc/ssl/certs/ca-certificates.crt", # Debian/Ubuntu
|
||||
"/etc/pki/tls/certs/ca-bundle.crt", # RedHat/CentOS
|
||||
"/usr/local/share/certs/ca-root-nss.crt", # FreeBSD
|
||||
"/etc/ssl/cert.pem", # OpenBSD
|
||||
"/System/Library/OpenSSL/certs/cert.pem", # macOS
|
||||
]
|
||||
|
||||
available_system_certs = []
|
||||
for path in common_cert_paths:
|
||||
if os.path.exists(path):
|
||||
available_system_certs.append(path)
|
||||
|
||||
info["system_cert_bundles"] = available_system_certs
|
||||
|
||||
return info
|
||||
|
|
@ -3,13 +3,13 @@ import os
|
|||
DEFAULT_CONFIG = {
|
||||
"project_dir": os.path.abspath(os.path.join(os.path.dirname(__file__), ".")),
|
||||
"results_dir": os.getenv("TRADINGAGENTS_RESULTS_DIR", "./results"),
|
||||
"data_dir": "/Users/yluo/Documents/Code/ScAI/FR1-data",
|
||||
"data_dir": "./data",
|
||||
"data_cache_dir": os.path.join(
|
||||
os.path.abspath(os.path.join(os.path.dirname(__file__), ".")),
|
||||
"dataflows/data_cache",
|
||||
),
|
||||
# LLM settings
|
||||
"llm_provider": "openai", # "openai" or "gemini"
|
||||
"llm_provider": "openai", # "openai"/"gemini"/"openrouter"/"ollama"
|
||||
"deep_think_llm": "o4-mini",
|
||||
"quick_think_llm": "gpt-4o-mini",
|
||||
"backend_url": "https://api.openai.com/v1",
|
||||
|
|
@ -26,4 +26,15 @@ DEFAULT_CONFIG = {
|
|||
"online_tools": True,
|
||||
"user_position": "none",
|
||||
"cost_per_trade": 0.0,
|
||||
# SSL/TLS Certificate settings - only use if explicitly set
|
||||
"ssl_cert_bundle": os.getenv("REQUESTS_CA_BUNDLE") or os.getenv("CURL_CA_BUNDLE"),
|
||||
"ssl_verify": os.getenv("SSL_VERIFY", "true").lower() in ("true", "1", "yes"),
|
||||
"http_timeout": int(os.getenv("HTTP_TIMEOUT")) if os.getenv("HTTP_TIMEOUT") else None,
|
||||
# Proxy settings (if needed)
|
||||
"http_proxy": os.getenv("HTTP_PROXY"),
|
||||
"https_proxy": os.getenv("HTTPS_PROXY"),
|
||||
# LLM resilience settings
|
||||
"llm_max_retries": int(os.getenv("LLM_MAX_RETRIES", "3")),
|
||||
"llm_retry_backoff": float(os.getenv("LLM_RETRY_BACKOFF", "2")), # seconds exponential base
|
||||
"debug_http": os.getenv("DEBUG_HTTP", "false").lower() in ("1", "true", "yes"),
|
||||
}
|
||||
|
|
|
|||
|
|
@ -43,6 +43,14 @@ class ConditionalLogic:
|
|||
return "tools_fundamentals"
|
||||
return "Msg Clear Fundamentals"
|
||||
|
||||
def should_continue_technical(self, state: AgentState):
|
||||
"""Determine if technical analysis should continue."""
|
||||
messages = state["messages"]
|
||||
last_message = messages[-1]
|
||||
if last_message.tool_calls:
|
||||
return "tools_technical"
|
||||
return "Msg Clear Technical"
|
||||
|
||||
def should_continue_debate(self, state: AgentState) -> str:
|
||||
"""Determine if debate should continue."""
|
||||
|
||||
|
|
|
|||
|
|
@ -98,6 +98,7 @@ class GraphSetup:
|
|||
research_manager_node = create_research_manager(
|
||||
self.deep_thinking_llm, self.invest_judge_memory
|
||||
)
|
||||
trade_planner_node = create_trade_planner_agent(self.quick_thinking_llm, self.toolkit)
|
||||
trader_node = create_trader(self.quick_thinking_llm, self.trader_memory)
|
||||
|
||||
# Create risk analysis nodes
|
||||
|
|
@ -123,6 +124,7 @@ class GraphSetup:
|
|||
workflow.add_node("Bull Researcher", bull_researcher_node)
|
||||
workflow.add_node("Bear Researcher", bear_researcher_node)
|
||||
workflow.add_node("Research Manager", research_manager_node)
|
||||
workflow.add_node("Trade Planner", trade_planner_node)
|
||||
workflow.add_node("Trader", trader_node)
|
||||
workflow.add_node("Risky Analyst", risky_analyst)
|
||||
workflow.add_node("Neutral Analyst", neutral_analyst)
|
||||
|
|
@ -172,7 +174,8 @@ class GraphSetup:
|
|||
"Research Manager": "Research Manager",
|
||||
},
|
||||
)
|
||||
workflow.add_edge("Research Manager", "Trader")
|
||||
workflow.add_edge("Research Manager", "Trade Planner")
|
||||
workflow.add_edge("Trade Planner", "Trader")
|
||||
workflow.add_edge("Trader", "Risky Analyst")
|
||||
workflow.add_conditional_edges(
|
||||
"Risky Analyst",
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ from tradingagents.agents.utils.agent_states import (
|
|||
RiskDebateState,
|
||||
)
|
||||
from tradingagents.dataflows.interface import set_config
|
||||
from tradingagents.dataflows.ssl_utils import setup_global_ssl_config
|
||||
|
||||
from .conditional_logic import ConditionalLogic
|
||||
from .setup import GraphSetup
|
||||
|
|
@ -50,6 +51,9 @@ class TradingAgentsGraph:
|
|||
|
||||
# Update the interface's config
|
||||
set_config(self.config)
|
||||
|
||||
# Set up global SSL configuration
|
||||
setup_global_ssl_config(self.config)
|
||||
|
||||
# Create necessary directories
|
||||
os.makedirs(
|
||||
|
|
@ -79,15 +83,48 @@ class TradingAgentsGraph:
|
|||
"export OPENAI_API_KEY=your_openai_key_here"
|
||||
)
|
||||
|
||||
# Prepare SSL configuration for HTTP client - only if explicitly configured
|
||||
http_client_kwargs = {}
|
||||
cert_bundle = self.config.get("ssl_cert_bundle")
|
||||
|
||||
if cert_bundle and cert_bundle.strip():
|
||||
import httpx
|
||||
http_client_kwargs["verify"] = cert_bundle
|
||||
elif not self.config.get("ssl_verify", True):
|
||||
import httpx
|
||||
http_client_kwargs["verify"] = False
|
||||
|
||||
if self.config.get("http_timeout"):
|
||||
import httpx
|
||||
http_client_kwargs["timeout"] = self.config["http_timeout"]
|
||||
|
||||
# Add proxy configuration if specified
|
||||
if self.config.get("http_proxy") or self.config.get("https_proxy"):
|
||||
import httpx
|
||||
proxies = {}
|
||||
if self.config.get("http_proxy"):
|
||||
proxies["http://"] = self.config["http_proxy"]
|
||||
if self.config.get("https_proxy"):
|
||||
proxies["https://"] = self.config["https_proxy"]
|
||||
http_client_kwargs["proxies"] = proxies
|
||||
|
||||
# Create HTTP client only if we have custom settings
|
||||
http_client = None
|
||||
if http_client_kwargs:
|
||||
import httpx
|
||||
http_client = httpx.Client(**http_client_kwargs)
|
||||
|
||||
self.deep_thinking_llm = ChatOpenAI(
|
||||
model=self.config["deep_think_llm"],
|
||||
base_url=self.config["backend_url"],
|
||||
api_key=api_key
|
||||
api_key=api_key,
|
||||
http_client=http_client
|
||||
)
|
||||
self.quick_thinking_llm = ChatOpenAI(
|
||||
model=self.config["quick_think_llm"],
|
||||
base_url=self.config["backend_url"],
|
||||
api_key=api_key
|
||||
api_key=api_key,
|
||||
http_client=http_client
|
||||
)
|
||||
elif self.config["llm_provider"].lower() == "anthropic":
|
||||
self.deep_thinking_llm = ChatAnthropic(model=self.config["deep_think_llm"], base_url=self.config["backend_url"])
|
||||
|
|
@ -180,9 +217,19 @@ class TradingAgentsGraph:
|
|||
self.toolkit.get_simfin_income_stmt,
|
||||
]
|
||||
),
|
||||
"technical": ToolNode(
|
||||
[
|
||||
# online tools
|
||||
self.toolkit.get_YFin_data_online,
|
||||
self.toolkit.get_stockstats_indicators_report_online,
|
||||
# offline tools
|
||||
self.toolkit.get_YFin_data,
|
||||
self.toolkit.get_stockstats_indicators_report,
|
||||
]
|
||||
),
|
||||
}
|
||||
|
||||
def propagate(self, company_name, trade_date, user_position="none", cost_per_trade=0.0):
|
||||
def propagate(self, company_name, trade_date, user_position="none", cost_per_trade=0.0, on_step_callback=None):
|
||||
"""Run the trading agents graph for a company on a specific date."""
|
||||
|
||||
self.ticker = company_name
|
||||
|
|
@ -193,17 +240,14 @@ class TradingAgentsGraph:
|
|||
)
|
||||
args = self.propagator.get_graph_args()
|
||||
|
||||
if self.debug:
|
||||
# Debug mode with tracing
|
||||
if on_step_callback or self.debug:
|
||||
# Stream mode for callbacks or debug mode
|
||||
trace = []
|
||||
for chunk in self.graph.stream(init_agent_state, **args):
|
||||
if len(chunk["messages"]) == 0:
|
||||
pass
|
||||
else:
|
||||
chunk["messages"][-1].pretty_print()
|
||||
trace.append(chunk)
|
||||
|
||||
final_state = trace[-1]
|
||||
for s in self.graph.stream(init_agent_state, **args):
|
||||
trace.append(s)
|
||||
if on_step_callback:
|
||||
on_step_callback(s)
|
||||
final_state = trace[-1] if trace else {}
|
||||
else:
|
||||
# Standard mode without tracing
|
||||
final_state = self.graph.invoke(init_agent_state, **args)
|
||||
|
|
|
|||
33
uv.lock
33
uv.lock
|
|
@ -358,6 +358,18 @@ wheels = [
|
|||
{ url = "https://files.pythonhosted.org/packages/99/37/e8730c3587a65eb5645d4aba2d27aae48e8003614d6aaf15dda67f702f1f/bidict-0.23.1-py3-none-any.whl", hash = "sha256:5dae8d4d79b552a71cbabc7deb25dfe8ce710b17ff41711e13010ead2abfc3e5", size = 32764, upload-time = "2024-02-18T19:09:04.156Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bleach"
|
||||
version = "6.2.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "webencodings" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/76/9a/0e33f5054c54d349ea62c277191c020c2d6ef1d65ab2cb1993f91ec846d1/bleach-6.2.0.tar.gz", hash = "sha256:123e894118b8a599fd80d3ec1a6d4cc7ce4e5882b1317a7e1ba69b56e95f991f", size = 203083, upload-time = "2024-10-29T18:30:40.477Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/fc/55/96142937f66150805c25c4d0f31ee4132fd33497753400734f9dfdcbdc66/bleach-6.2.0-py3-none-any.whl", hash = "sha256:117d9c6097a7c3d22fd578fcd8d35ff1e125df6736f554da4e432fdd63f31e5e", size = 163406, upload-time = "2024-10-29T18:30:38.186Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bs4"
|
||||
version = "0.0.2"
|
||||
|
|
@ -1996,6 +2008,15 @@ wheels = [
|
|||
{ url = "https://files.pythonhosted.org/packages/f1/ab/fdbbd91d8d82bf1a723ba88ec3e3d76c022b53c391b0c13cad441cdb8f9e/lxml-5.4.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b12cb6527599808ada9eb2cd6e0e7d3d8f13fe7bbb01c6311255a15ded4c7ab4", size = 3487862, upload-time = "2025-04-23T01:49:36.296Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "markdown"
|
||||
version = "3.9"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/8d/37/02347f6d6d8279247a5837082ebc26fc0d5aaeaf75aa013fcbb433c777ab/markdown-3.9.tar.gz", hash = "sha256:d2900fe1782bd33bdbbd56859defef70c2e78fc46668f8eb9df3128138f2cb6a", size = 364585, upload-time = "2025-09-04T20:25:22.885Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/70/ae/44c4a6a4cbb496d93c6257954260fe3a6e91b7bed2240e5dad2a717f5111/markdown-3.9-py3-none-any.whl", hash = "sha256:9f4d91ed810864ea88a6f32c07ba8bee1346c0cc1f6b1f9f6c822f2a9667d280", size = 107441, upload-time = "2025-09-04T20:25:21.784Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "markdown-it-py"
|
||||
version = "3.0.0"
|
||||
|
|
@ -5142,20 +5163,25 @@ source = { virtual = "." }
|
|||
dependencies = [
|
||||
{ name = "akshare" },
|
||||
{ name = "backtrader" },
|
||||
{ name = "bleach" },
|
||||
{ name = "chainlit" },
|
||||
{ name = "chromadb" },
|
||||
{ name = "eodhd" },
|
||||
{ name = "fastapi" },
|
||||
{ name = "feedparser" },
|
||||
{ name = "finnhub-python" },
|
||||
{ name = "jinja2" },
|
||||
{ name = "langchain-anthropic" },
|
||||
{ name = "langchain-experimental" },
|
||||
{ name = "langchain-google-genai" },
|
||||
{ name = "langchain-openai" },
|
||||
{ name = "langgraph" },
|
||||
{ name = "markdown" },
|
||||
{ name = "pandas" },
|
||||
{ name = "parsel" },
|
||||
{ name = "praw" },
|
||||
{ name = "python-dotenv" },
|
||||
{ name = "python-multipart" },
|
||||
{ name = "pytz" },
|
||||
{ name = "questionary" },
|
||||
{ name = "redis" },
|
||||
|
|
@ -5167,6 +5193,7 @@ dependencies = [
|
|||
{ name = "tqdm" },
|
||||
{ name = "tushare" },
|
||||
{ name = "typing-extensions" },
|
||||
{ name = "uvicorn" },
|
||||
{ name = "yfinance" },
|
||||
]
|
||||
|
||||
|
|
@ -5174,20 +5201,25 @@ dependencies = [
|
|||
requires-dist = [
|
||||
{ name = "akshare", specifier = ">=1.16.98" },
|
||||
{ name = "backtrader", specifier = ">=1.9.78.123" },
|
||||
{ name = "bleach", specifier = ">=6.1.0" },
|
||||
{ name = "chainlit", specifier = ">=2.5.5" },
|
||||
{ name = "chromadb", specifier = ">=1.0.12" },
|
||||
{ name = "eodhd", specifier = ">=1.0.32" },
|
||||
{ name = "fastapi" },
|
||||
{ name = "feedparser", specifier = ">=6.0.11" },
|
||||
{ name = "finnhub-python", specifier = ">=2.4.23" },
|
||||
{ name = "jinja2" },
|
||||
{ name = "langchain-anthropic", specifier = ">=0.3.15" },
|
||||
{ name = "langchain-experimental", specifier = ">=0.3.4" },
|
||||
{ name = "langchain-google-genai", specifier = ">=2.1.5" },
|
||||
{ name = "langchain-openai", specifier = ">=0.3.23" },
|
||||
{ name = "langgraph", specifier = ">=0.4.8" },
|
||||
{ name = "markdown", specifier = ">=3.6" },
|
||||
{ name = "pandas", specifier = ">=2.3.0" },
|
||||
{ name = "parsel", specifier = ">=1.10.0" },
|
||||
{ name = "praw", specifier = ">=7.8.1" },
|
||||
{ name = "python-dotenv", specifier = ">=1.1.0" },
|
||||
{ name = "python-multipart" },
|
||||
{ name = "pytz", specifier = ">=2025.2" },
|
||||
{ name = "questionary", specifier = ">=2.1.0" },
|
||||
{ name = "redis", specifier = ">=6.2.0" },
|
||||
|
|
@ -5199,6 +5231,7 @@ requires-dist = [
|
|||
{ name = "tqdm", specifier = ">=4.67.1" },
|
||||
{ name = "tushare", specifier = ">=1.4.21" },
|
||||
{ name = "typing-extensions", specifier = ">=4.14.0" },
|
||||
{ name = "uvicorn" },
|
||||
{ name = "yfinance", specifier = ">=0.2.63" },
|
||||
]
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,684 @@
|
|||
from fastapi import FastAPI, Request, Form, BackgroundTasks, HTTPException
|
||||
from fastapi.responses import HTMLResponse
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
import jinja2
|
||||
import markdown as md
|
||||
import bleach
|
||||
import os
|
||||
from typing import Dict, Any
|
||||
import threading
|
||||
import time
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# Load environment variables from .env file
|
||||
load_dotenv()
|
||||
|
||||
# Check required environment variables
|
||||
required_env_vars = [
|
||||
'FINNHUB_API_KEY',
|
||||
'OPENAI_API_KEY',
|
||||
#'REDDIT_CLIENT_ID',
|
||||
#'REDDIT_CLIENT_SECRET',
|
||||
#'REDDIT_USER_AGENT'
|
||||
]
|
||||
|
||||
missing_vars = [var for var in required_env_vars if not os.getenv(var)]
|
||||
if missing_vars:
|
||||
print(f"Error: Missing required environment variables: {', '.join(missing_vars)}")
|
||||
print("Please create a .env file with these variables or set them in your environment.")
|
||||
|
||||
from tradingagents.graph.trading_graph import TradingAgentsGraph
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
# In-memory storage for the process state
|
||||
# Using a lock for thread-safe access to app_state
|
||||
app_state_lock = threading.Lock()
|
||||
app_state: Dict[str, Any] = {
|
||||
"process_running": False,
|
||||
"company_symbol": None,
|
||||
"execution_tree": [],
|
||||
"overall_status": "idle", # idle, in_progress, completed, error
|
||||
"overall_progress": 0 # 0-100
|
||||
}
|
||||
|
||||
# Define the strict sequential phase execution order
|
||||
PHASE_SEQUENCE = [
|
||||
"data_collection_phase",
|
||||
"research_phase",
|
||||
"planning_phase",
|
||||
"execution_phase",
|
||||
"risk_analysis_phase",
|
||||
# New dedicated top-level phase for the final portfolio decision
|
||||
"final_decision_phase"
|
||||
]
|
||||
|
||||
# Mount the static directory to serve CSS, JS, etc.
|
||||
app.mount("/static", StaticFiles(directory="webapp/static"), name="static")
|
||||
|
||||
# Setup Jinja2 for templating
|
||||
template_dir = os.path.join(os.path.dirname(__file__), "templates")
|
||||
jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir))
|
||||
|
||||
# Allowed tags and attributes for sanitized markdown rendering
|
||||
ALLOWED_TAGS = list(bleach.sanitizer.ALLOWED_TAGS) + [
|
||||
"p", "pre", "span", "h1", "h2", "h3", "h4", "h5", "h6", "table", "thead", "tbody", "tr", "th", "td", "blockquote", "code"
|
||||
]
|
||||
ALLOWED_ATTRIBUTES = {**bleach.sanitizer.ALLOWED_ATTRIBUTES, "span": ["class"], "code": ["class"], "th": ["align"], "td": ["align"]}
|
||||
|
||||
def render_markdown(value: str) -> str:
|
||||
"""Convert markdown text to sanitized HTML."""
|
||||
if not isinstance(value, str):
|
||||
value = str(value)
|
||||
html = md.markdown(
|
||||
value,
|
||||
extensions=["fenced_code", "tables", "codehilite", "toc", "sane_lists"],
|
||||
output_format="html5"
|
||||
)
|
||||
cleaned = bleach.clean(html, tags=ALLOWED_TAGS, attributes=ALLOWED_ATTRIBUTES, strip=True)
|
||||
return cleaned
|
||||
|
||||
jinja_env.filters['markdown'] = render_markdown
|
||||
|
||||
def update_execution_state(state: Dict[str, Any]):
|
||||
"""Callback function to update the app_state based on LangGraph's state."""
|
||||
print(f"📡 Callback received state keys: {list(state.keys())}")
|
||||
|
||||
with app_state_lock:
|
||||
# Ensure execution tree is initialized
|
||||
if not app_state["execution_tree"]:
|
||||
app_state["execution_tree"] = initialize_complete_execution_tree()
|
||||
|
||||
# Map LangGraph node names to our tracking system
|
||||
agent_state_mapping = {
|
||||
"Market Analyst": {
|
||||
"phase": "data_collection",
|
||||
"agent_id": "market_analyst",
|
||||
"report_key": "market_report",
|
||||
"report_name": "Market Analysis Report"
|
||||
},
|
||||
"Social Analyst": {
|
||||
"phase": "data_collection",
|
||||
"agent_id": "social_analyst",
|
||||
"report_key": "sentiment_report",
|
||||
"report_name": "Sentiment Analysis Report"
|
||||
},
|
||||
"News Analyst": {
|
||||
"phase": "data_collection",
|
||||
"agent_id": "news_analyst",
|
||||
"report_key": "news_report",
|
||||
"report_name": "News Analysis Report"
|
||||
},
|
||||
"Fundamentals Analyst": {
|
||||
"phase": "data_collection",
|
||||
"agent_id": "fundamentals_analyst",
|
||||
"report_key": "fundamentals_report",
|
||||
"report_name": "Fundamentals Report"
|
||||
},
|
||||
"Bull Researcher": {
|
||||
"phase": "research",
|
||||
"agent_id": "bull_researcher",
|
||||
"report_key": "investment_debate_state.bull_history",
|
||||
"report_name": "Bull Case Analysis"
|
||||
},
|
||||
"Bear Researcher": {
|
||||
"phase": "research",
|
||||
"agent_id": "bear_researcher",
|
||||
"report_key": "investment_debate_state.bear_history",
|
||||
"report_name": "Bear Case Analysis"
|
||||
},
|
||||
"Research Manager": {
|
||||
"phase": "research",
|
||||
"agent_id": "research_manager",
|
||||
"report_key": "investment_debate_state.judge_decision",
|
||||
"report_name": "Research Synthesis"
|
||||
},
|
||||
"Trade Planner": {
|
||||
"phase": "planning",
|
||||
"agent_id": "trade_planner",
|
||||
"report_key": "trader_investment_plan",
|
||||
"report_name": "Trading Plan"
|
||||
},
|
||||
"Trader": {
|
||||
"phase": "execution",
|
||||
"agent_id": "trader",
|
||||
"report_key": "investment_plan",
|
||||
"report_name": "Execution Report"
|
||||
},
|
||||
"Risky Analyst": {
|
||||
"phase": "risk_analysis",
|
||||
"agent_id": "risky_analyst",
|
||||
"report_key": "risk_debate_state.risky_history",
|
||||
"report_name": "Risk Assessment (Aggressive)"
|
||||
},
|
||||
"Neutral Analyst": {
|
||||
"phase": "risk_analysis",
|
||||
"agent_id": "neutral_analyst",
|
||||
"report_key": "risk_debate_state.neutral_history",
|
||||
"report_name": "Risk Assessment (Neutral)"
|
||||
},
|
||||
"Safe Analyst": {
|
||||
"phase": "risk_analysis",
|
||||
"agent_id": "safe_analyst",
|
||||
"report_key": "risk_debate_state.safe_history",
|
||||
"report_name": "Risk Assessment (Conservative)"
|
||||
},
|
||||
"Risk Judge": {
|
||||
# Moved to its own dedicated phase for prominence
|
||||
"phase": "final_decision",
|
||||
"agent_id": "risk_judge",
|
||||
"report_key": "final_trade_decision",
|
||||
"report_name": "Portfolio Manager's Decision"
|
||||
}
|
||||
}
|
||||
|
||||
# Update agent statuses based on available reports
|
||||
for agent_name, agent_info in agent_state_mapping.items():
|
||||
# Check if this agent has completed (has report data)
|
||||
report_data = get_nested_value(state, agent_info["report_key"])
|
||||
if report_data:
|
||||
update_agent_status(agent_info, "completed", report_data, state)
|
||||
|
||||
# Mark in-progress agent(s) sequentially BEFORE recalculating phase status
|
||||
mark_in_progress_agents(app_state["execution_tree"])
|
||||
# Recalculate phase statuses after setting agent in-progress markers
|
||||
recalc_phase_statuses(app_state["execution_tree"])
|
||||
# Update overall progress
|
||||
execution_tree = app_state["execution_tree"]
|
||||
total_agents = len(agent_state_mapping)
|
||||
completed_agents = count_completed_agents(execution_tree)
|
||||
app_state["overall_progress"] = min(100, int((completed_agents / max(total_agents, 1)) * 100))
|
||||
|
||||
print(f"📊 Progress updated: {app_state['overall_progress']}% ({completed_agents}/{total_agents} agents)")
|
||||
|
||||
def initialize_complete_execution_tree():
|
||||
"""Initialize the complete execution tree with all agents in pending state."""
|
||||
return [
|
||||
{
|
||||
"id": "data_collection_phase",
|
||||
"name": "📊 Data Collection Phase",
|
||||
"status": "pending",
|
||||
"content": "Collecting market data and analysis from various sources",
|
||||
"children": [
|
||||
create_agent_node("market_analyst", "📈 Market Analyst"),
|
||||
create_agent_node("social_analyst", "📱 Social Media Analyst"),
|
||||
create_agent_node("news_analyst", "📰 News Analyst"),
|
||||
create_agent_node("fundamentals_analyst", "📊 Fundamentals Analyst")
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "research_phase",
|
||||
"name": "🔍 Research Phase",
|
||||
"status": "pending",
|
||||
"content": "Research and debate investment perspectives",
|
||||
"children": [
|
||||
create_agent_node("bull_researcher", "🐂 Bull Researcher"),
|
||||
create_agent_node("bear_researcher", "🐻 Bear Researcher"),
|
||||
create_agent_node("research_manager", "🔍 Research Manager")
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "planning_phase",
|
||||
"name": "📋 Planning Phase",
|
||||
"status": "pending",
|
||||
"content": "Develop trading strategy and execution plan",
|
||||
"children": [
|
||||
create_agent_node("trade_planner", "📋 Trade Planner")
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "execution_phase",
|
||||
"name": "⚡ Execution Phase",
|
||||
"status": "pending",
|
||||
"content": "Execute trades based on analysis and planning",
|
||||
"children": [
|
||||
create_agent_node("trader", "⚡ Trader")
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "risk_analysis_phase",
|
||||
"name": "⚠️ Risk Management Phase",
|
||||
"status": "pending",
|
||||
"content": "Assess and manage investment risks",
|
||||
"children": [
|
||||
create_agent_node("risky_analyst", "🚨 Aggressive Risk Analyst"),
|
||||
create_agent_node("neutral_analyst", "⚖️ Neutral Risk Analyst"),
|
||||
create_agent_node("safe_analyst", "🛡️ Conservative Risk Analyst")
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "final_decision_phase",
|
||||
"name": "🧠 Portfolio Manager's Decision",
|
||||
"status": "pending",
|
||||
"content": "Final portfolio / trade decision synthesized from all prior phases",
|
||||
"children": [
|
||||
create_agent_node("risk_judge", "🧠 Portfolio Manager")
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
def create_agent_node(agent_id: str, agent_name: str):
|
||||
"""Create a standardized agent node with report and messages sub-items."""
|
||||
return {
|
||||
"id": agent_id,
|
||||
"name": agent_name,
|
||||
"status": "pending",
|
||||
"content": f"Agent: {agent_name} - Awaiting execution",
|
||||
"children": [
|
||||
{
|
||||
"id": f"{agent_id}_messages",
|
||||
"name": "<EFBFBD> Messages",
|
||||
"status": "pending",
|
||||
"content": "No messages yet",
|
||||
"children": [],
|
||||
"timestamp": time.time()
|
||||
},
|
||||
{
|
||||
"id": f"{agent_id}_report",
|
||||
"name": "<EFBFBD> Report",
|
||||
"status": "pending",
|
||||
"content": "Report not yet generated",
|
||||
"children": [],
|
||||
"timestamp": time.time()
|
||||
}
|
||||
],
|
||||
"timestamp": time.time()
|
||||
}
|
||||
|
||||
def get_nested_value(data: dict, key_path: str):
|
||||
"""Get value from nested dict using dot notation (e.g., 'investment_debate_state.bull_history')."""
|
||||
keys = key_path.split('.')
|
||||
value = data
|
||||
for key in keys:
|
||||
if isinstance(value, dict) and key in value:
|
||||
value = value[key]
|
||||
else:
|
||||
return None
|
||||
return value
|
||||
|
||||
def update_agent_status(agent_info: dict, status: str, report_data: any, full_state: dict):
|
||||
"""Update an agent's status and content in the execution tree."""
|
||||
execution_tree = app_state["execution_tree"]
|
||||
|
||||
# Find the agent in the tree
|
||||
agent_node = find_agent_in_tree(agent_info["agent_id"], execution_tree)
|
||||
if not agent_node:
|
||||
return
|
||||
|
||||
# Update agent status
|
||||
if agent_node["status"] != "completed":
|
||||
agent_node["status"] = status
|
||||
agent_node["content"] = f"✅ {agent_node['name']} - Analysis completed"
|
||||
|
||||
# Update report sub-item
|
||||
report_node = find_item_by_id(f"{agent_info['agent_id']}_report", agent_node["children"])
|
||||
if report_node:
|
||||
report_node["status"] = "completed"
|
||||
report_node["content"] = format_report_content(agent_info["report_name"], report_data)
|
||||
|
||||
# Update messages sub-item (extract from state if available)
|
||||
messages_node = find_item_by_id(f"{agent_info['agent_id']}_messages", agent_node["children"])
|
||||
if messages_node:
|
||||
messages_node["status"] = "completed"
|
||||
messages_node["content"] = extract_agent_messages(full_state, agent_info["agent_id"])
|
||||
|
||||
# Phase status recalculated globally in recalc_phase_statuses
|
||||
|
||||
def find_agent_in_tree(agent_id: str, tree: list):
|
||||
"""Find an agent node in the execution tree."""
|
||||
for phase in tree:
|
||||
if phase.get("children"):
|
||||
for agent in phase["children"]:
|
||||
if agent["id"] == agent_id:
|
||||
return agent
|
||||
return None
|
||||
|
||||
def find_item_by_id(item_id: str, items: list):
|
||||
"""Find an item by ID in a list of items."""
|
||||
for item in items:
|
||||
if item["id"] == item_id:
|
||||
return item
|
||||
return None
|
||||
|
||||
def format_report_content(report_name: str, report_data: any) -> str:
|
||||
"""Format report data for display."""
|
||||
if isinstance(report_data, str):
|
||||
return f"📄 {report_name}\n\n{report_data}"
|
||||
elif isinstance(report_data, dict):
|
||||
return f"📄 {report_name}\n\n{str(report_data)}"
|
||||
elif isinstance(report_data, list) and report_data:
|
||||
# For debate histories, show the latest message
|
||||
latest = report_data[-1] if report_data else "No data"
|
||||
return f"📄 {report_name}\n\n{str(latest)}"
|
||||
else:
|
||||
return f"📄 {report_name}\n\nReport generated successfully"
|
||||
|
||||
def extract_agent_messages(state: dict, agent_id: str) -> str:
|
||||
"""Extract relevant messages for an agent from the state."""
|
||||
# Expecting state['messages'] to be a list of dicts with optional keys like
|
||||
# 'role', 'content', 'timestamp'. We'll display each in an expandable box.
|
||||
messages = state.get("messages", []) or []
|
||||
if not messages:
|
||||
return "💬 Agent Messages\n\nNo messages recorded for this agent."
|
||||
|
||||
# Filter messages for this agent if agent_id field present
|
||||
filtered = []
|
||||
for m in messages:
|
||||
if isinstance(m, dict):
|
||||
msg_agent = m.get("agent_id") or m.get("agent")
|
||||
if msg_agent and msg_agent != agent_id:
|
||||
continue
|
||||
filtered.append(m)
|
||||
else:
|
||||
# Try common attributes used by message objects (e.g., langchain HumanMessage / AIMessage)
|
||||
msg_agent = getattr(m, "agent_id", None) or getattr(m, "agent", None)
|
||||
if msg_agent and msg_agent != agent_id:
|
||||
continue
|
||||
filtered.append(m)
|
||||
if not filtered:
|
||||
filtered = messages # fallback to all if no agent-specific match
|
||||
|
||||
parts = ["💬 Agent Messages", "", f"Total messages: {len(filtered)}", ""]
|
||||
for idx, m in enumerate(filtered, start=1):
|
||||
if isinstance(m, dict):
|
||||
role = m.get("role") or m.get("type") or "message"
|
||||
ts = m.get("timestamp")
|
||||
content = m.get("content") or m.get("text") or "(no content)"
|
||||
else:
|
||||
# Object-based message
|
||||
role = getattr(m, "role", None) or getattr(m, "type", None) or m.__class__.__name__
|
||||
ts = getattr(m, "timestamp", None)
|
||||
# LangChain messages often have a .content attribute
|
||||
content = getattr(m, "content", None) or getattr(m, "text", None) or str(m)
|
||||
# Escape triple backticks to avoid markdown parser confusion
|
||||
if isinstance(content, str):
|
||||
content = content.replace('```', '\u0060\u0060\u0060')
|
||||
header = f"{idx}. {role.title()}" + (f" – {ts}" if ts else "")
|
||||
# Use HTML <details> so user can expand long messages
|
||||
parts.append(
|
||||
f"<details class=\"message-box\" {'open' if idx <= 3 else ''}>")
|
||||
parts.append(f" <summary>{header}</summary>")
|
||||
# Wrap content in pre for formatting
|
||||
parts.append(" <pre class=\"message-content\">" + str(content) + "</pre>")
|
||||
parts.append("</details>")
|
||||
|
||||
return "\n".join(parts)
|
||||
|
||||
def recalc_phase_statuses(execution_tree: list):
|
||||
"""Recalculate each phase's status: pending (no started), in_progress (some running/completed but not all), completed (all done), error if any child error."""
|
||||
for phase in execution_tree:
|
||||
if not phase.get("children"):
|
||||
continue
|
||||
child_statuses = [c["status"] for c in phase["children"]]
|
||||
if any(s == "error" for s in child_statuses):
|
||||
phase["status"] = "error"
|
||||
phase["content"] = f"❌ {phase['name']} - Error in sub-task"
|
||||
elif all(s == "completed" for s in child_statuses):
|
||||
phase["status"] = "completed"
|
||||
phase["content"] = f"✅ {phase['name']} - All agents completed successfully"
|
||||
elif any(s in ("in_progress", "completed") for s in child_statuses):
|
||||
# At least one started but not all done
|
||||
if phase["status"] != "in_progress":
|
||||
phase["status"] = "in_progress"
|
||||
phase["content"] = f"⏳ {phase['name']} - Running..."
|
||||
else:
|
||||
# All pending
|
||||
phase["status"] = "pending"
|
||||
|
||||
|
||||
def count_completed_agents(execution_tree: list) -> int:
|
||||
"""Count the number of completed agents across all phases."""
|
||||
count = 0
|
||||
for phase in execution_tree:
|
||||
if phase.get("children"):
|
||||
for agent in phase["children"]:
|
||||
if agent["status"] == "completed":
|
||||
count += 1
|
||||
return count
|
||||
|
||||
def mark_in_progress_agents(execution_tree: list):
|
||||
"""Sequentially activate only the earliest phase that still has pending agents.
|
||||
Rules:
|
||||
- A phase becomes active when all prior phases are completed.
|
||||
- Only the first such phase can have an in_progress agent.
|
||||
- Within that phase, mark exactly one first pending agent as in_progress.
|
||||
"""
|
||||
# Build quick lookup by id
|
||||
phase_map = {p["id"]: p for p in execution_tree}
|
||||
|
||||
# Determine which phase should be active
|
||||
active_phase = None
|
||||
for phase_id in PHASE_SEQUENCE:
|
||||
phase = phase_map.get(phase_id)
|
||||
if not phase:
|
||||
continue
|
||||
# If all previous phases completed, and this phase not fully completed, it's the active one
|
||||
prev_completed = all(
|
||||
(phase_map.get(prev_id) and all(c["status"] == "completed" for c in phase_map[prev_id].get("children", [])))
|
||||
for prev_id in PHASE_SEQUENCE[:PHASE_SEQUENCE.index(phase_id)]
|
||||
)
|
||||
phase_done = all(c["status"] == "completed" for c in phase.get("children", []))
|
||||
if prev_completed and not phase_done:
|
||||
active_phase = phase
|
||||
break
|
||||
|
||||
if not active_phase:
|
||||
return
|
||||
|
||||
# If an agent already in progress in the active phase, leave as-is
|
||||
if any(a["status"] == "in_progress" for a in active_phase.get("children", [])):
|
||||
return
|
||||
|
||||
# Otherwise pick first pending agent
|
||||
for agent in active_phase.get("children", []):
|
||||
if agent["status"] == "pending":
|
||||
agent["status"] = "in_progress"
|
||||
agent["content"] = f"⏳ {agent['name']} - Running analysis..."
|
||||
for child in agent.get("children", []):
|
||||
if child["status"] == "pending":
|
||||
child["status"] = "in_progress"
|
||||
break
|
||||
|
||||
def run_trading_process(company_symbol: str, config: Dict[str, Any]):
|
||||
"""Runs the TradingAgentsGraph in a separate thread."""
|
||||
with app_state_lock:
|
||||
app_state["overall_status"] = "in_progress"
|
||||
app_state["overall_progress"] = 0
|
||||
|
||||
try:
|
||||
# Import and create custom config
|
||||
from tradingagents.default_config import DEFAULT_CONFIG
|
||||
|
||||
# Create custom configuration with user selections
|
||||
custom_config = DEFAULT_CONFIG.copy()
|
||||
custom_config["llm_provider"] = config["llm_provider"]
|
||||
custom_config["max_debate_rounds"] = config["max_debate_rounds"]
|
||||
custom_config["cost_per_trade"] = config["cost_per_trade"]
|
||||
|
||||
# Set the appropriate LLM models based on provider
|
||||
if config["llm_provider"] == "google":
|
||||
custom_config["gemini_quick_think_llm"] = config["quick_think_llm"]
|
||||
custom_config["gemini_deep_think_llm"] = config["deep_think_llm"]
|
||||
else:
|
||||
custom_config["quick_think_llm"] = config["quick_think_llm"]
|
||||
custom_config["deep_think_llm"] = config["deep_think_llm"]
|
||||
|
||||
# Set backend URL based on provider
|
||||
if config["llm_provider"] == "openrouter":
|
||||
custom_config["backend_url"] = "https://openrouter.ai/api/v1"
|
||||
elif config["llm_provider"] == "google":
|
||||
custom_config["backend_url"] = "https://generativelanguage.googleapis.com/v1"
|
||||
elif config["llm_provider"] == "anthropic":
|
||||
custom_config["backend_url"] = "https://api.anthropic.com/"
|
||||
elif config["llm_provider"] == "ollama":
|
||||
custom_config["backend_url"] = f"http://{os.getenv('OLLAMA_HOST', 'localhost')}:11434/v1"
|
||||
else: # openai
|
||||
custom_config["backend_url"] = "https://api.openai.com/v1"
|
||||
|
||||
print(f"🚀 Initializing TradingAgentsGraph for {company_symbol}")
|
||||
graph = TradingAgentsGraph(config=custom_config)
|
||||
analysis_date = config["analysis_date"] # Use user-selected date
|
||||
print(f"🔄 Starting propagation for {company_symbol} on {analysis_date}")
|
||||
|
||||
# The propagate method now accepts the callback and trade_date
|
||||
final_state, processed_signal = graph.propagate(company_symbol, trade_date=analysis_date, on_step_callback=update_execution_state)
|
||||
print(f"✅ Propagation completed for {company_symbol}")
|
||||
|
||||
with app_state_lock:
|
||||
app_state["overall_status"] = "completed"
|
||||
app_state["overall_progress"] = 100
|
||||
# Update the root node status to completed
|
||||
if app_state["execution_tree"]:
|
||||
app_state["execution_tree"][0]["status"] = "completed"
|
||||
app_state["execution_tree"][0]["content"] = f"✅ Analysis completed successfully!\n\nFinal Decision: {processed_signal}\n\nFull State: {str(final_state)}"
|
||||
|
||||
except Exception as e:
|
||||
import traceback
|
||||
error_detail = traceback.format_exc()
|
||||
with app_state_lock:
|
||||
app_state["overall_status"] = "error"
|
||||
app_state["overall_progress"] = 100
|
||||
if app_state["execution_tree"]:
|
||||
app_state["execution_tree"][0]["status"] = "error"
|
||||
app_state["execution_tree"][0]["content"] = f"Error during execution: {str(e)}\n\n{error_detail}"
|
||||
# Add a specific error item to the tree
|
||||
app_state["execution_tree"].append({
|
||||
"id": "error",
|
||||
"name": "Process Error",
|
||||
"status": "error",
|
||||
"content": f"Error during execution: {str(e)}\n\n{error_detail}",
|
||||
"children": [],
|
||||
"timestamp": time.time()
|
||||
})
|
||||
finally:
|
||||
with app_state_lock:
|
||||
app_state["process_running"] = False
|
||||
|
||||
|
||||
@app.get("/", response_class=HTMLResponse)
|
||||
async def read_root():
|
||||
from datetime import date
|
||||
template = jinja_env.get_template("index.html")
|
||||
today_str = date.today().isoformat()
|
||||
return template.render(app_state=app_state, default_date=today_str)
|
||||
|
||||
@app.post("/start", response_class=HTMLResponse)
|
||||
async def start_process(
|
||||
background_tasks: BackgroundTasks,
|
||||
company_symbol: str = Form(...),
|
||||
llm_provider: str = Form(...),
|
||||
quick_think_llm: str = Form(...),
|
||||
deep_think_llm: str = Form(...),
|
||||
max_debate_rounds: int = Form(...),
|
||||
cost_per_trade: float = Form(...),
|
||||
analysis_date: str = Form(...)
|
||||
):
|
||||
# Check if all required environment variables are set
|
||||
missing_vars = [var for var in required_env_vars if not os.getenv(var)]
|
||||
if missing_vars:
|
||||
app_state["overall_status"] = "error"
|
||||
app_state["execution_tree"] = [{
|
||||
"id": "error",
|
||||
"name": "Configuration Error",
|
||||
"status": "error",
|
||||
"content": f"Missing required environment variables: {', '.join(missing_vars)}. Please check .env.example file.",
|
||||
"children": [],
|
||||
"timestamp": time.time()
|
||||
}]
|
||||
template = jinja_env.get_template("_partials/left_panel.html")
|
||||
return template.render(tree=app_state["execution_tree"], app_state=app_state)
|
||||
|
||||
with app_state_lock:
|
||||
if app_state["process_running"]:
|
||||
# Optionally, return an error or a message that a process is already running
|
||||
template = jinja_env.get_template("_partials/left_panel.html")
|
||||
return template.render(tree=app_state["execution_tree"], app_state=app_state)
|
||||
|
||||
app_state["process_running"] = True
|
||||
app_state["company_symbol"] = company_symbol
|
||||
app_state["overall_status"] = "in_progress"
|
||||
app_state["overall_progress"] = 5 # Show initial progress
|
||||
|
||||
# Store all configuration parameters
|
||||
app_state["config"] = {
|
||||
"llm_provider": llm_provider,
|
||||
"quick_think_llm": quick_think_llm,
|
||||
"deep_think_llm": deep_think_llm,
|
||||
"max_debate_rounds": max_debate_rounds,
|
||||
"cost_per_trade": cost_per_trade,
|
||||
"analysis_date": analysis_date
|
||||
}
|
||||
|
||||
# Initialize execution tree with complete structure
|
||||
app_state["execution_tree"] = initialize_complete_execution_tree()
|
||||
|
||||
background_tasks.add_task(run_trading_process, company_symbol, app_state["config"])
|
||||
|
||||
template = jinja_env.get_template("_partials/left_panel.html")
|
||||
return template.render(tree=app_state["execution_tree"], app_state=app_state)
|
||||
|
||||
@app.get("/status", response_class=HTMLResponse)
|
||||
async def get_status():
|
||||
with app_state_lock:
|
||||
template = jinja_env.get_template("_partials/left_panel.html")
|
||||
return template.render(tree=app_state["execution_tree"], app_state=app_state)
|
||||
|
||||
|
||||
@app.get("/status-updates")
|
||||
async def get_status_updates():
|
||||
"""Return only the status updates as JSON for targeted updates."""
|
||||
with app_state_lock:
|
||||
status_updates = {}
|
||||
|
||||
def extract_status_info(items, prefix=""):
|
||||
for item in items:
|
||||
item_id = item["id"]
|
||||
status_updates[item_id] = {
|
||||
"status": item["status"],
|
||||
"status_icon": get_status_icon(item["status"])
|
||||
}
|
||||
if item.get("children"):
|
||||
extract_status_info(item["children"])
|
||||
|
||||
extract_status_info(app_state["execution_tree"])
|
||||
|
||||
return {
|
||||
"status_updates": status_updates,
|
||||
"overall_progress": app_state["overall_progress"],
|
||||
"overall_status": app_state["overall_status"]
|
||||
}
|
||||
|
||||
def get_status_icon(status: str) -> str:
|
||||
"""Get the status icon for a given status."""
|
||||
if status == 'completed':
|
||||
return '✅'
|
||||
elif status == 'in_progress':
|
||||
return '⏳'
|
||||
elif status == 'error':
|
||||
return '❌'
|
||||
else:
|
||||
return '⏸️'
|
||||
|
||||
def find_item_in_tree(item_id: str, tree: list) -> Dict[str, Any] | None:
|
||||
"""Recursively searches the execution tree for an item by its ID."""
|
||||
for item in tree:
|
||||
if item["id"] == item_id:
|
||||
return item
|
||||
if item["children"]:
|
||||
found_child = find_item_in_tree(item_id, item["children"])
|
||||
if found_child:
|
||||
return found_child
|
||||
return None
|
||||
|
||||
@app.get("/content/{item_id}", response_class=HTMLResponse)
|
||||
async def get_item_content(item_id: str):
|
||||
with app_state_lock:
|
||||
item = find_item_in_tree(item_id, app_state["execution_tree"])
|
||||
if item:
|
||||
template = jinja_env.get_template("_partials/right_panel.html")
|
||||
return template.render(content=item.get("content", "No content available."))
|
||||
else:
|
||||
return HTMLResponse(content="<p>Item not found.</p>", status_code=404)
|
||||
|
||||
# To run this app:
|
||||
# uvicorn webapp.main:app --reload
|
||||
|
|
@ -0,0 +1,544 @@
|
|||
/* Dark theme variables */
|
||||
:root {
|
||||
--bg-primary: #1a1a1a;
|
||||
--bg-secondary: #242424;
|
||||
--text-primary: #e0e0e0;
|
||||
--text-secondary: #a0a0a0;
|
||||
--accent-color: #4CAF50;
|
||||
--border-color: #333;
|
||||
--input-bg: #2a2a2a;
|
||||
--hover-color: #333;
|
||||
}
|
||||
|
||||
/* Basic styles for the webapp */
|
||||
body {
|
||||
font-family: 'Inter', -apple-system, BlinkMacSystemFont, sans-serif;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
height: 100vh;
|
||||
margin: 0;
|
||||
background-color: var(--bg-primary);
|
||||
color: var(--text-primary);
|
||||
}
|
||||
|
||||
#left-panel {
|
||||
width: 30%;
|
||||
border-right: 1px solid var(--border-color);
|
||||
padding: 20px;
|
||||
overflow-y: auto;
|
||||
background-color: var(--bg-secondary);
|
||||
}
|
||||
|
||||
#right-panel {
|
||||
width: 70%;
|
||||
padding: 20px;
|
||||
overflow-y: auto;
|
||||
background-color: var(--bg-primary);
|
||||
}
|
||||
|
||||
/* Overall Progress Bar */
|
||||
#overall-progress-container {
|
||||
width: 100%;
|
||||
background-color: var(--bg-secondary);
|
||||
border-bottom: 1px solid var(--border-color);
|
||||
display: flex;
|
||||
align-items: center;
|
||||
padding: 10px 20px;
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
#overall-progress-bar {
|
||||
height: 6px;
|
||||
background-color: var(--accent-color);
|
||||
width: 0%; /* Initial width */
|
||||
transition: width 0.5s ease-in-out, background-color 0.3s ease;
|
||||
border-radius: 3px;
|
||||
box-shadow: 0 0 10px rgba(76, 175, 80, 0.3);
|
||||
}
|
||||
|
||||
#overall-progress-text {
|
||||
margin-left: 15px;
|
||||
font-size: 0.9em;
|
||||
color: var(--text-secondary);
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
/* Main Content Layout */
|
||||
#main-content {
|
||||
display: flex;
|
||||
flex-grow: 1;
|
||||
height: calc(100vh - 46px); /* Adjust for progress bar height */
|
||||
background-color: var(--bg-primary);
|
||||
}
|
||||
|
||||
/* Left Panel - Configuration and Controls */
|
||||
#left-panel {
|
||||
width: 30%;
|
||||
border-right: 1px solid var(--border-color);
|
||||
padding: 20px;
|
||||
overflow-y: auto;
|
||||
box-sizing: border-box;
|
||||
background-color: var(--bg-secondary);
|
||||
}
|
||||
|
||||
#left-panel h2 {
|
||||
margin-top: 0;
|
||||
margin-bottom: 20px;
|
||||
color: var(--text-primary);
|
||||
font-size: 1.5em;
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
#config-form {
|
||||
background-color: var(--bg-primary);
|
||||
padding: 20px;
|
||||
border-radius: 8px;
|
||||
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.2);
|
||||
}
|
||||
|
||||
#config-form label {
|
||||
display: block;
|
||||
margin-bottom: 8px;
|
||||
color: var(--text-secondary);
|
||||
font-size: 0.9em;
|
||||
}
|
||||
|
||||
#config-form input,
|
||||
#config-form select {
|
||||
width: 100%;
|
||||
padding: 10px;
|
||||
margin-bottom: 15px;
|
||||
background-color: var(--input-bg);
|
||||
border: 1px solid var(--border-color);
|
||||
border-radius: 4px;
|
||||
color: var(--text-primary);
|
||||
font-size: 1em;
|
||||
transition: border-color 0.3s ease, box-shadow 0.3s ease;
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
#config-form input:focus,
|
||||
#config-form select:focus {
|
||||
outline: none;
|
||||
border-color: var(--accent-color);
|
||||
box-shadow: 0 0 0 2px rgba(76, 175, 80, 0.2);
|
||||
}
|
||||
|
||||
/* Specific styling for select dropdowns */
|
||||
#config-form select {
|
||||
cursor: pointer;
|
||||
background-image: url("data:image/svg+xml;charset=UTF-8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' fill='none' stroke='%23a0a0a0' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Cpolyline points='6,9 12,15 18,9'%3E%3C/polyline%3E%3C/svg%3E");
|
||||
background-repeat: no-repeat;
|
||||
background-position: right 10px center;
|
||||
background-size: 16px;
|
||||
padding-right: 40px;
|
||||
appearance: none;
|
||||
-webkit-appearance: none;
|
||||
-moz-appearance: none;
|
||||
}
|
||||
|
||||
#config-form select option {
|
||||
background-color: var(--input-bg);
|
||||
color: var(--text-primary);
|
||||
padding: 8px;
|
||||
}
|
||||
|
||||
#config-form button {
|
||||
width: 100%;
|
||||
padding: 12px;
|
||||
background-color: var(--accent-color);
|
||||
color: white;
|
||||
border: none;
|
||||
border-radius: 4px;
|
||||
font-size: 1em;
|
||||
font-weight: 500;
|
||||
cursor: pointer;
|
||||
transition: background-color 0.3s ease, transform 0.1s ease;
|
||||
}
|
||||
|
||||
#config-form button:hover {
|
||||
background-color: #45a049;
|
||||
transform: translateY(-1px);
|
||||
}
|
||||
|
||||
#config-form button:active {
|
||||
transform: translateY(0);
|
||||
}
|
||||
|
||||
#left-panel ul {
|
||||
list-style: none;
|
||||
padding-left: 15px;
|
||||
margin-top: 20px;
|
||||
}
|
||||
|
||||
#left-panel li {
|
||||
margin-bottom: 8px;
|
||||
cursor: pointer;
|
||||
transition: transform 0.2s ease;
|
||||
}
|
||||
|
||||
#left-panel li .item-name {
|
||||
padding: 8px 12px;
|
||||
border-radius: 4px;
|
||||
display: inline-block;
|
||||
transition: background-color 0.2s ease;
|
||||
}
|
||||
|
||||
#left-panel li .item-name:hover {
|
||||
background-color: var(--hover-color);
|
||||
}
|
||||
|
||||
/* Status Indicators */
|
||||
.status-pending .item-name {
|
||||
color: var(--text-secondary);
|
||||
border-left: 3px solid var(--text-secondary);
|
||||
padding-left: 10px;
|
||||
}
|
||||
|
||||
.status-in_progress .item-name {
|
||||
color: #3b82f6;
|
||||
font-weight: 600;
|
||||
border-left: 3px solid #3b82f6;
|
||||
padding-left: 10px;
|
||||
animation: pulse 2s infinite;
|
||||
}
|
||||
|
||||
.status-completed .item-name {
|
||||
color: var(--accent-color);
|
||||
border-left: 3px solid var(--accent-color);
|
||||
padding-left: 10px;
|
||||
}
|
||||
|
||||
.status-error .item-name {
|
||||
color: #ef4444;
|
||||
font-weight: 600;
|
||||
border-left: 3px solid #ef4444;
|
||||
padding-left: 10px;
|
||||
}
|
||||
|
||||
/* Right Panel Styles */
|
||||
#right-panel {
|
||||
padding: 30px;
|
||||
}
|
||||
|
||||
#right-panel p {
|
||||
color: var(--text-secondary);
|
||||
line-height: 1.6;
|
||||
font-size: 1.1em;
|
||||
}
|
||||
|
||||
/* Execution Tree Styles */
|
||||
.execution-tree {
|
||||
margin-top: 20px;
|
||||
}
|
||||
|
||||
.execution-tree ul {
|
||||
margin-left: 20px;
|
||||
border-left: 2px solid var(--border-color);
|
||||
padding-left: 15px;
|
||||
}
|
||||
|
||||
.item-children {
|
||||
margin-left: 20px;
|
||||
border-left: 2px solid var(--border-color);
|
||||
padding-left: 15px;
|
||||
margin-top: 8px;
|
||||
overflow: hidden;
|
||||
transition: max-height 0.3s ease-in-out, opacity 0.2s ease-in-out;
|
||||
}
|
||||
|
||||
/* Collapsed state - hide children by default */
|
||||
.item-children.collapsed {
|
||||
max-height: 0;
|
||||
opacity: 0;
|
||||
margin-top: 0;
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
/* Expanded state */
|
||||
.item-children.expanded {
|
||||
max-height: 1000px; /* Large enough to accommodate content */
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
.process-item {
|
||||
position: relative;
|
||||
margin-bottom: 5px;
|
||||
}
|
||||
|
||||
.process-item::before {
|
||||
content: '';
|
||||
position: absolute;
|
||||
left: -18px;
|
||||
top: 12px;
|
||||
width: 8px;
|
||||
height: 2px;
|
||||
background-color: var(--border-color);
|
||||
}
|
||||
|
||||
/* Item header container */
|
||||
.item-header {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 4px;
|
||||
}
|
||||
|
||||
/* Toggle button styles */
|
||||
.toggle-btn {
|
||||
background: none;
|
||||
border: none;
|
||||
color: var(--text-secondary);
|
||||
cursor: pointer;
|
||||
padding: 4px;
|
||||
border-radius: 4px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
width: 20px;
|
||||
height: 20px;
|
||||
transition: all 0.2s ease;
|
||||
font-size: 10px;
|
||||
line-height: 1;
|
||||
}
|
||||
|
||||
.toggle-btn:hover {
|
||||
background-color: var(--hover-color);
|
||||
color: var(--text-primary);
|
||||
transform: scale(1.1);
|
||||
}
|
||||
|
||||
/* Remove default focus highlight on mouse focus but keep accessible outline for keyboard users */
|
||||
.toggle-btn:focus {
|
||||
outline: none;
|
||||
}
|
||||
.toggle-btn:focus-visible {
|
||||
outline: 2px solid var(--accent-color);
|
||||
outline-offset: 2px;
|
||||
}
|
||||
|
||||
/* Toggle icon rotation */
|
||||
.toggle-icon {
|
||||
transition: transform 0.2s ease;
|
||||
display: inline-block;
|
||||
}
|
||||
|
||||
.toggle-btn.expanded .toggle-icon {
|
||||
transform: rotate(90deg);
|
||||
}
|
||||
|
||||
/* Spacer for items without children */
|
||||
.toggle-spacer {
|
||||
width: 20px;
|
||||
height: 20px;
|
||||
display: inline-block;
|
||||
}
|
||||
|
||||
.clickable {
|
||||
cursor: pointer;
|
||||
transition: all 0.2s ease;
|
||||
}
|
||||
|
||||
.clickable:hover {
|
||||
transform: translateX(3px);
|
||||
}
|
||||
|
||||
.status-icon {
|
||||
margin-right: 8px;
|
||||
font-size: 0.9em;
|
||||
transition: all 0.3s ease;
|
||||
display: inline-block;
|
||||
}
|
||||
|
||||
/* Specific styling for different item types */
|
||||
.process-item[class*="_phase"] .item-name {
|
||||
font-weight: bold;
|
||||
font-size: 1.1em;
|
||||
color: var(--text-primary);
|
||||
}
|
||||
|
||||
.process-item[class*="analyst"] .item-name,
|
||||
.process-item[class*="researcher"] .item-name,
|
||||
.process-item[class*="planner"] .item-name,
|
||||
.process-item[class*="trader"] .item-name,
|
||||
.process-item[class*="judge"] .item-name {
|
||||
font-weight: 500;
|
||||
color: var(--text-primary);
|
||||
padding-left: 8px;
|
||||
}
|
||||
|
||||
.process-item[class*="_report"] .item-name,
|
||||
.process-item[class*="_messages"] .item-name {
|
||||
font-size: 0.9em;
|
||||
color: var(--text-secondary);
|
||||
font-style: italic;
|
||||
padding-left: 16px;
|
||||
}
|
||||
|
||||
/* Loading indicator */
|
||||
.htmx-indicator {
|
||||
display: none;
|
||||
color: var(--accent-color);
|
||||
font-size: 0.9em;
|
||||
margin-top: 10px;
|
||||
animation: pulse 2s infinite;
|
||||
}
|
||||
|
||||
.htmx-request .htmx-indicator {
|
||||
display: block;
|
||||
}
|
||||
|
||||
/* Content display improvements */
|
||||
#right-panel pre {
|
||||
background-color: var(--bg-secondary);
|
||||
padding: 20px;
|
||||
border-radius: 8px;
|
||||
border: 1px solid var(--border-color);
|
||||
color: var(--text-primary);
|
||||
font-family: 'Monaco', 'Menlo', 'Consolas', monospace;
|
||||
font-size: 0.9em;
|
||||
line-height: 1.4;
|
||||
overflow-x: auto;
|
||||
white-space: pre-wrap;
|
||||
word-wrap: break-word;
|
||||
}
|
||||
|
||||
/* Markdown content styling */
|
||||
.markdown-body {
|
||||
line-height: 1.6;
|
||||
font-size: 0.95rem;
|
||||
}
|
||||
.markdown-body h1, .markdown-body h2, .markdown-body h3 {
|
||||
border-bottom: 1px solid var(--border-color);
|
||||
padding-bottom: 0.3em;
|
||||
margin-top: 1.2em;
|
||||
}
|
||||
.markdown-body code {
|
||||
background: var(--bg-secondary);
|
||||
padding: 2px 5px;
|
||||
border-radius: 4px;
|
||||
font-family: 'Monaco', 'Menlo', 'Consolas', monospace;
|
||||
}
|
||||
.markdown-body pre code {
|
||||
display: block;
|
||||
padding: 12px;
|
||||
overflow-x: auto;
|
||||
}
|
||||
|
||||
/* Expandable message boxes for Messages view */
|
||||
details.message-box {
|
||||
background: var(--bg-secondary);
|
||||
border: 1px solid var(--border-color);
|
||||
border-radius: 6px;
|
||||
margin-bottom: 10px;
|
||||
padding: 6px 10px;
|
||||
}
|
||||
|
||||
details.message-box > summary {
|
||||
cursor: pointer;
|
||||
font-weight: 600;
|
||||
list-style: none;
|
||||
}
|
||||
|
||||
details.message-box > summary::-webkit-details-marker {
|
||||
display: none;
|
||||
}
|
||||
|
||||
details.message-box[open] {
|
||||
box-shadow: 0 2px 4px rgba(0,0,0,0.25);
|
||||
}
|
||||
|
||||
pre.message-content {
|
||||
background: var(--bg-primary);
|
||||
border: 1px solid var(--border-color);
|
||||
border-radius: 4px;
|
||||
padding: 10px;
|
||||
margin: 8px 0 4px 0;
|
||||
max-height: 400px;
|
||||
overflow: auto;
|
||||
font-size: 0.85rem;
|
||||
}
|
||||
.markdown-body table {
|
||||
width: 100%;
|
||||
border-collapse: collapse;
|
||||
margin: 1em 0;
|
||||
}
|
||||
.markdown-body th, .markdown-body td {
|
||||
border: 1px solid var(--border-color);
|
||||
padding: 6px 10px;
|
||||
text-align: left;
|
||||
}
|
||||
.markdown-body blockquote {
|
||||
border-left: 4px solid var(--accent-color);
|
||||
margin: 1em 0;
|
||||
padding: 0.5em 1em;
|
||||
background: var(--bg-secondary);
|
||||
border-radius: 4px;
|
||||
}
|
||||
|
||||
#right-panel h3 {
|
||||
color: var(--text-primary);
|
||||
margin-bottom: 15px;
|
||||
font-size: 1.3em;
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
/* Animations */
|
||||
@keyframes pulse {
|
||||
0% {
|
||||
opacity: 1;
|
||||
}
|
||||
50% {
|
||||
opacity: 0.7;
|
||||
}
|
||||
100% {
|
||||
opacity: 1;
|
||||
}
|
||||
}
|
||||
|
||||
/* -------------------------------------------------- */
|
||||
/* Custom Scrollbars (Dark Theme) */
|
||||
/* -------------------------------------------------- */
|
||||
/* WebKit Browsers (Chrome, Edge, Safari) */
|
||||
::-webkit-scrollbar {
|
||||
width: 10px;
|
||||
height: 10px;
|
||||
}
|
||||
|
||||
::-webkit-scrollbar-track {
|
||||
background: var(--bg-secondary);
|
||||
border-left: 1px solid var(--border-color);
|
||||
}
|
||||
|
||||
::-webkit-scrollbar-thumb {
|
||||
background: linear-gradient(var(--hover-color), var(--bg-primary));
|
||||
border: 1px solid var(--border-color);
|
||||
border-radius: 8px;
|
||||
box-shadow: inset 0 0 4px rgba(0,0,0,0.4);
|
||||
}
|
||||
|
||||
::-webkit-scrollbar-thumb:hover {
|
||||
background: linear-gradient(#3a3a3a, var(--hover-color));
|
||||
}
|
||||
|
||||
::-webkit-scrollbar-corner {
|
||||
background: var(--bg-secondary);
|
||||
}
|
||||
|
||||
/* Firefox */
|
||||
* {
|
||||
scrollbar-width: thin;
|
||||
scrollbar-color: var(--hover-color) var(--bg-secondary);
|
||||
}
|
||||
|
||||
/* Make pre/code blocks and panels consistent */
|
||||
#left-panel, #right-panel, pre, .item-children, pre.message-content {
|
||||
scrollbar-width: thin;
|
||||
scrollbar-color: var(--hover-color) var(--bg-secondary);
|
||||
}
|
||||
|
||||
/* Reduced motion preference: simplify visuals */
|
||||
@media (prefers-reduced-motion: reduce) {
|
||||
::-webkit-scrollbar-thumb {
|
||||
box-shadow: none;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,51 @@
|
|||
{% macro render_item(item) %}
|
||||
<li class="process-item status-{{ item.status }}" id="item-{{ item.id }}">
|
||||
<div class="item-header">
|
||||
{% if item.children %}
|
||||
<button class="toggle-btn" onclick="toggleNode(this)" aria-label="Toggle children">
|
||||
<span class="toggle-icon">▶</span>
|
||||
</button>
|
||||
{% else %}
|
||||
<span class="toggle-spacer"></span>
|
||||
{% endif %}
|
||||
<span hx-get="/content/{{ item.id }}" hx-target="#right-panel" hx-swap="innerHTML" class="item-name clickable">
|
||||
<span class="status-icon" id="status-icon-{{ item.id }}">
|
||||
{% if item.status == 'completed' %}✅
|
||||
{% elif item.status == 'in_progress' %}⏳
|
||||
{% elif item.status == 'error' %}❌
|
||||
{% else %}⏸️
|
||||
{% endif %}
|
||||
</span>
|
||||
{{ item.name }}
|
||||
</span>
|
||||
</div>
|
||||
{% if item.children %}
|
||||
<ul class="item-children collapsed" id="children-{{ item.id }}">
|
||||
{% for child in item.children %}
|
||||
{{ render_item(child) }}
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endif %}
|
||||
</li>
|
||||
{% endmacro %}
|
||||
|
||||
<div id="overall-progress-bar" hx-swap-oob="true" style="width:{{ app_state.overall_progress }}%;"></div>
|
||||
<span id="overall-progress-text" hx-swap-oob="true">{{ app_state.overall_progress }}% ({{ app_state.overall_status }})</span>
|
||||
|
||||
<h2>
|
||||
{% if app_state.company_symbol %}
|
||||
Trading Analysis for {{ app_state.company_symbol }}
|
||||
{% else %}
|
||||
Execution Status
|
||||
{% endif %}
|
||||
</h2>
|
||||
|
||||
{% if tree %}
|
||||
<ul class="execution-tree" id="execution-tree">
|
||||
{% for item in tree %}
|
||||
{{ render_item(item) }}
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% else %}
|
||||
<p id="execution-tree">No process running. Start a new one from the configuration.</p>
|
||||
{% endif %}
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
<div>
|
||||
<h3>Content Details</h3>
|
||||
<div class="markdown-body">{{ content | markdown | safe }}</div>
|
||||
</div>
|
||||
|
|
@ -0,0 +1,34 @@
|
|||
{% macro render_item(item) %}
|
||||
<li class="process-item status-{{ item.status }}" id="item-{{ item.id }}">
|
||||
<div class="item-header">
|
||||
{% if item.children %}
|
||||
<button class="toggle-btn" onclick="toggleNode(this)" aria-label="Toggle children">
|
||||
<span class="toggle-icon">▶</span>
|
||||
</button>
|
||||
{% else %}
|
||||
<span class="toggle-spacer"></span>
|
||||
{% endif %}
|
||||
<span hx-get="/content/{{ item.id }}" hx-target="#right-panel" hx-swap="innerHTML" class="item-name clickable">
|
||||
<span class="status-icon" id="status-icon-{{ item.id }}">
|
||||
{% if item.status == 'completed' %}✅
|
||||
{% elif item.status == 'in_progress' %}⏳
|
||||
{% elif item.status == 'error' %}❌
|
||||
{% else %}⏸️
|
||||
{% endif %}
|
||||
</span>
|
||||
{{ item.name }}
|
||||
</span>
|
||||
</div>
|
||||
{% if item.children %}
|
||||
<ul class="item-children collapsed" id="children-{{ item.id }}">
|
||||
{% for child in item.children %}
|
||||
{{ render_item(child) }}
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endif %}
|
||||
</li>
|
||||
{% endmacro %}
|
||||
|
||||
{% for item in tree %}
|
||||
{{ render_item(item) }}
|
||||
{% endfor %}
|
||||
|
|
@ -0,0 +1,370 @@
|
|||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>TradingAgents</title>
|
||||
<link rel="preconnect" href="https://fonts.googleapis.com">
|
||||
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
|
||||
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600&display=swap" rel="stylesheet">
|
||||
<link rel="stylesheet" href="/static/styles.css">
|
||||
<script src="https://unpkg.com/htmx.org@1.9.10"></script>
|
||||
</head>
|
||||
<body>
|
||||
<div id="overall-progress-container">
|
||||
<div id="overall-progress-bar" role="progressbar" aria-valuenow="0" aria-valuemin="0" aria-valuemax="100"></div>
|
||||
<span id="overall-progress-text">0%</span>
|
||||
</div>
|
||||
<div id="main-content">
|
||||
<div id="left-panel">
|
||||
<h2>Configuration</h2>
|
||||
<div id="config-form">
|
||||
<form hx-post="/start" hx-target="#left-panel" hx-swap="innerHTML" hx-indicator="#loading">
|
||||
<label for="company_symbol">Company Symbol:</label>
|
||||
<input type="text" id="company_symbol" name="company_symbol" value="AAPL" required>
|
||||
|
||||
<label for="llm_provider">LLM Provider:</label>
|
||||
<select id="llm_provider" name="llm_provider" onchange="updateModelOptions()" required>
|
||||
<option value="openai">OpenAI</option>
|
||||
<option value="openrouter" selected>OpenRouter</option>
|
||||
<option value="google">Google (Gemini)</option>
|
||||
<option value="anthropic">Anthropic</option>
|
||||
<option value="ollama">Ollama</option>
|
||||
</select>
|
||||
|
||||
<label for="quick_think_llm">Quick Think LLM:</label>
|
||||
<select id="quick_think_llm" name="quick_think_llm" required>
|
||||
{% if not app_state.get('config') %}
|
||||
<option value="x-ai/grok-4-fast:free">xAI: Grok 4 Fast (free)</option>
|
||||
<option value="deepseek/deepseek-chat-v3.1:free">DeepSeek: DeepSeek V3.1 (free)</option>
|
||||
<option value="meta-llama/llama-4-scout:free">Meta: Llama 4 Scout</option>
|
||||
{% endif %}
|
||||
</select>
|
||||
|
||||
<label for="deep_think_llm">Deep Think LLM:</label>
|
||||
<select id="deep_think_llm" name="deep_think_llm" required>
|
||||
{% if not app_state.get('config') %}
|
||||
<option value="qwen/qwen3-235b-a22b:free">Qwen: Qwen3 235B A22B (free)</option>
|
||||
<option value="openai/gpt-oss-120b:free">OpenAI: gpt-oss-120b (free)</option>
|
||||
<option value="deepseek/deepseek-chat-v3-0324:free">DeepSeek V3 - 685B</option>
|
||||
{% endif %}
|
||||
</select>
|
||||
|
||||
<label for="max_debate_rounds">Max Debate Rounds:</label>
|
||||
<select id="max_debate_rounds" name="max_debate_rounds" required>
|
||||
<option value="1" selected>1</option>
|
||||
<option value="2">2</option>
|
||||
<option value="3">3</option>
|
||||
<option value="4">4</option>
|
||||
<option value="5">5</option>
|
||||
</select>
|
||||
|
||||
<label for="cost_per_trade">Cost Per Trade ($):</label>
|
||||
<input type="number" id="cost_per_trade" name="cost_per_trade" value="2.0" step="0.1" min="0" required>
|
||||
|
||||
<label for="analysis_date">Analysis Date:</label>
|
||||
<input type="date" id="analysis_date" name="analysis_date" value="{{ default_date }}" required>
|
||||
|
||||
<button type="submit">Start Process</button>
|
||||
<div id="loading" class="htmx-indicator">Starting process...</div>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
<div id="right-panel">
|
||||
<p>Welcome! Please set your configuration and start the process.</p>
|
||||
<p>Enter a company symbol (e.g., AAPL, MSFT, GOOGL) and click "Start Process" to begin the trading analysis.</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
// Model options for each provider
|
||||
const modelOptions = {
|
||||
"openai": {
|
||||
"quick": [
|
||||
{ value: "gpt-4o-mini", text: "GPT-4o-mini - Fast and efficient for quick tasks" },
|
||||
{ value: "gpt-4.1-nano", text: "GPT-4.1-nano - Ultra-lightweight model for basic operations" },
|
||||
{ value: "gpt-4.1-mini", text: "GPT-4.1-mini - Compact model with good performance" },
|
||||
{ value: "gpt-4o", text: "GPT-4o - Standard model with solid capabilities" }
|
||||
],
|
||||
"deep": [
|
||||
{ value: "gpt-4.1-nano", text: "GPT-4.1-nano - Ultra-lightweight model for basic operations" },
|
||||
{ value: "gpt-4.1-mini", text: "GPT-4.1-mini - Compact model with good performance" },
|
||||
{ value: "gpt-4o", text: "GPT-4o - Standard model with solid capabilities" },
|
||||
{ value: "o4-mini", text: "o4-mini - Specialized reasoning model (compact)" },
|
||||
{ value: "o3-mini", text: "o3-mini - Advanced reasoning model (lightweight)" },
|
||||
{ value: "o3", text: "o3 - Full advanced reasoning model" },
|
||||
{ value: "o1", text: "o1 - Premier reasoning and problem-solving model" }
|
||||
]
|
||||
},
|
||||
"openrouter": {
|
||||
"quick": [
|
||||
{ value: "x-ai/grok-4-fast:free", text: "xAI: Grok 4 Fast (free)" },
|
||||
{ value: "deepseek/deepseek-chat-v3.1:free", text: "DeepSeek: DeepSeek V3.1 (free)" },
|
||||
{ value: "z-ai/glm-4-32b", text: "Z.AI: GLM 4 32B" },
|
||||
{ value: "meta-llama/llama-4-scout:free", text: "Meta: Llama 4 Scout" },
|
||||
{ value: "meta-llama/llama-3.3-8b-instruct:free", text: "Meta: Llama 3.3 8B Instruct" },
|
||||
{ value: "google/gemini-2.0-flash-exp:free", text: "Google: Gemini 2.0 Flash (free)" }
|
||||
],
|
||||
"deep": [
|
||||
{ value: "qwen/qwen3-235b-a22b:free", text: "Qwen: Qwen3 235B A22B (free)" },
|
||||
{ value: "openai/gpt-oss-120b:free", text: "OpenAI: gpt-oss-120b (free)" },
|
||||
{ value: "z-ai/glm-4-32b", text: "Z.AI: GLM 4 32B" },
|
||||
{ value: "deepseek/deepseek-chat-v3-0324:free", text: "DeepSeek V3 - 685B-parameter model" }
|
||||
]
|
||||
},
|
||||
"google": {
|
||||
"quick": [
|
||||
{ value: "gemini-2.0-flash-lite", text: "Gemini 2.0 Flash-Lite - Cost efficiency and low latency" },
|
||||
{ value: "gemini-2.0-flash", text: "Gemini 2.0 Flash - Next generation features, speed, and thinking" },
|
||||
{ value: "gemini-2.5-flash-preview-05-20", text: "Gemini 2.5 Flash - Adaptive thinking, cost efficiency" }
|
||||
],
|
||||
"deep": [
|
||||
{ value: "gemini-2.0-flash-lite", text: "Gemini 2.0 Flash-Lite - Cost efficiency and low latency" },
|
||||
{ value: "gemini-2.0-flash", text: "Gemini 2.0 Flash - Next generation features, speed, and thinking" },
|
||||
{ value: "gemini-2.5-flash-preview-05-20", text: "Gemini 2.5 Flash - Adaptive thinking, cost efficiency" },
|
||||
{ value: "gemini-2.5-pro-preview-06-05", text: "Gemini 2.5 Pro" }
|
||||
]
|
||||
},
|
||||
"anthropic": {
|
||||
"quick": [
|
||||
{ value: "claude-3-5-haiku-latest", text: "Claude Haiku 3.5 - Fast inference and standard capabilities" },
|
||||
{ value: "claude-3-5-sonnet-latest", text: "Claude Sonnet 3.5 - Highly capable standard model" },
|
||||
{ value: "claude-3-7-sonnet-latest", text: "Claude Sonnet 3.7 - Exceptional hybrid reasoning" },
|
||||
{ value: "claude-sonnet-4-0", text: "Claude Sonnet 4 - High performance and excellent reasoning" }
|
||||
],
|
||||
"deep": [
|
||||
{ value: "claude-3-5-haiku-latest", text: "Claude Haiku 3.5 - Fast inference and standard capabilities" },
|
||||
{ value: "claude-3-5-sonnet-latest", text: "Claude Sonnet 3.5 - Highly capable standard model" },
|
||||
{ value: "claude-3-7-sonnet-latest", text: "Claude Sonnet 3.7 - Exceptional hybrid reasoning" },
|
||||
{ value: "claude-sonnet-4-0", text: "Claude Sonnet 4 - High performance and excellent reasoning" },
|
||||
{ value: "claude-opus-4-0", text: "Claude Opus 4 - Most powerful Anthropic model" }
|
||||
]
|
||||
},
|
||||
"ollama": {
|
||||
"quick": [
|
||||
{ value: "granite3.3:2b", text: "Granite 3.3 2B" },
|
||||
{ value: "llama3.1", text: "llama3.1 local" },
|
||||
{ value: "llama3.2", text: "llama3.2 local" }
|
||||
],
|
||||
"deep": [
|
||||
{ value: "granite3.3:2b", text: "Granite 3.3 2B" },
|
||||
{ value: "llama3.1", text: "llama3.1 local" },
|
||||
{ value: "qwen3", text: "qwen3" }
|
||||
]
|
||||
}
|
||||
};
|
||||
|
||||
function updateModelOptions() {
|
||||
const provider = document.getElementById('llm_provider').value;
|
||||
const quickSelect = document.getElementById('quick_think_llm');
|
||||
const deepSelect = document.getElementById('deep_think_llm');
|
||||
|
||||
// Clear existing options
|
||||
quickSelect.innerHTML = '';
|
||||
deepSelect.innerHTML = '';
|
||||
|
||||
// Populate quick think options
|
||||
if (modelOptions[provider] && modelOptions[provider].quick) {
|
||||
modelOptions[provider].quick.forEach(model => {
|
||||
const option = document.createElement('option');
|
||||
option.value = model.value;
|
||||
option.textContent = model.text;
|
||||
quickSelect.appendChild(option);
|
||||
});
|
||||
}
|
||||
|
||||
// Populate deep think options
|
||||
if (modelOptions[provider] && modelOptions[provider].deep) {
|
||||
modelOptions[provider].deep.forEach(model => {
|
||||
const option = document.createElement('option');
|
||||
option.value = model.value;
|
||||
option.textContent = model.text;
|
||||
deepSelect.appendChild(option);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Set current date as default for analysis_date
|
||||
function setCurrentDate() {
|
||||
const el = document.getElementById('analysis_date');
|
||||
if (el && !el.value) { // only set if not already provided by server
|
||||
const today = new Date();
|
||||
const dateString = today.toISOString().split('T')[0];
|
||||
el.value = dateString;
|
||||
}
|
||||
}
|
||||
|
||||
// Toggle node functionality for collapsible tree
|
||||
function toggleNode(button) {
|
||||
const li = button.closest('li.process-item');
|
||||
const children = li ? li.querySelector(':scope > .item-children') : null;
|
||||
if (!children) return;
|
||||
const isExpanded = children.classList.contains('expanded');
|
||||
if (isExpanded) {
|
||||
children.classList.remove('expanded');
|
||||
children.classList.add('collapsed');
|
||||
button.classList.remove('expanded');
|
||||
button.setAttribute('aria-expanded', 'false');
|
||||
} else {
|
||||
children.classList.remove('collapsed');
|
||||
children.classList.add('expanded');
|
||||
button.classList.add('expanded');
|
||||
button.setAttribute('aria-expanded', 'true');
|
||||
}
|
||||
}
|
||||
|
||||
// Save the current expansion state of all toggleable items
|
||||
function saveExpansionState() {
|
||||
const state = {};
|
||||
const executionTree = document.querySelector('.execution-tree');
|
||||
if (executionTree) {
|
||||
const items = executionTree.querySelectorAll('.process-item');
|
||||
items.forEach(item => {
|
||||
const children = item.querySelector('.item-children');
|
||||
const button = item.querySelector('.toggle-btn');
|
||||
if (children && button) {
|
||||
const itemId = getItemId(item);
|
||||
if (itemId) {
|
||||
state[itemId] = children.classList.contains('expanded');
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
return state;
|
||||
}
|
||||
|
||||
// Restore the expansion state after content update
|
||||
function restoreExpansionState(savedState) {
|
||||
if (!savedState) return;
|
||||
|
||||
const executionTree = document.querySelector('.execution-tree');
|
||||
if (executionTree) {
|
||||
const items = executionTree.querySelectorAll('.process-item');
|
||||
items.forEach(item => {
|
||||
const children = item.querySelector('.item-children');
|
||||
const button = item.querySelector('.toggle-btn');
|
||||
if (children && button) {
|
||||
const itemId = getItemId(item);
|
||||
if (itemId && savedState.hasOwnProperty(itemId)) {
|
||||
if (savedState[itemId]) {
|
||||
// Expand
|
||||
children.classList.remove('collapsed');
|
||||
children.classList.add('expanded');
|
||||
button.classList.add('expanded');
|
||||
} else {
|
||||
// Collapse
|
||||
children.classList.remove('expanded');
|
||||
children.classList.add('collapsed');
|
||||
button.classList.remove('expanded');
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Get item ID from the clickable span
|
||||
function getItemId(processItem) {
|
||||
const clickableSpan = processItem.querySelector('.item-name.clickable');
|
||||
if (clickableSpan) {
|
||||
const hxGet = clickableSpan.getAttribute('hx-get');
|
||||
if (hxGet) {
|
||||
const match = hxGet.match(/\/content\/(.+)$/);
|
||||
return match ? match[1] : null;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
// Handle HTMX before request to save state
|
||||
document.addEventListener('htmx:beforeSwap', function(event) {
|
||||
if (event.target.id === 'left-panel') {
|
||||
window.savedExpansionState = saveExpansionState();
|
||||
}
|
||||
});
|
||||
|
||||
// Handle HTMX after settle to restore state
|
||||
document.addEventListener('htmx:afterSettle', function(event) {
|
||||
if (event.target.id === 'left-panel' && window.savedExpansionState) {
|
||||
restoreExpansionState(window.savedExpansionState);
|
||||
}
|
||||
});
|
||||
|
||||
// Targeted status updates to prevent flickering
|
||||
function updateStatusIndicators() {
|
||||
fetch('/status-updates')
|
||||
.then(response => response.json())
|
||||
.then(data => {
|
||||
// Update overall progress
|
||||
const progressBar = document.getElementById('overall-progress-bar');
|
||||
const progressText = document.getElementById('overall-progress-text');
|
||||
if (progressBar) {
|
||||
progressBar.style.width = data.overall_progress + '%';
|
||||
}
|
||||
if (progressText) {
|
||||
progressText.textContent = data.overall_progress + '% (' + data.overall_status + ')';
|
||||
}
|
||||
|
||||
// Update individual status icons
|
||||
for (const [itemId, statusInfo] of Object.entries(data.status_updates)) {
|
||||
const statusIcon = document.querySelector(`[hx-get="/content/${itemId}"] .status-icon`);
|
||||
if (statusIcon && statusIcon.textContent !== statusInfo.status_icon) {
|
||||
statusIcon.textContent = statusInfo.status_icon;
|
||||
|
||||
// Update the parent item's CSS class
|
||||
const processItem = statusIcon.closest('.process-item');
|
||||
if (processItem) {
|
||||
// Remove old status classes
|
||||
processItem.classList.remove('status-pending', 'status-in_progress', 'status-completed', 'status-error');
|
||||
// Add new status class
|
||||
processItem.classList.add('status-' + statusInfo.status);
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
.catch(error => {
|
||||
console.log('Status update failed:', error);
|
||||
});
|
||||
}
|
||||
|
||||
// Start targeted updates when execution tree is present
|
||||
function startTargetedUpdates() {
|
||||
const tree = document.querySelector('.execution-tree');
|
||||
if (!tree) return;
|
||||
if (!window.statusUpdateInterval) {
|
||||
console.debug('[status] starting interval');
|
||||
window.statusUpdateInterval = setInterval(updateStatusIndicators, 2000);
|
||||
}
|
||||
}
|
||||
|
||||
// Stop targeted updates
|
||||
function stopTargetedUpdates() {
|
||||
if (window.statusUpdateInterval) {
|
||||
clearInterval(window.statusUpdateInterval);
|
||||
window.statusUpdateInterval = null;
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize the page
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
setCurrentDate();
|
||||
updateModelOptions(); // Set initial model options for OpenRouter
|
||||
|
||||
// Start targeted updates if execution tree is already present
|
||||
startTargetedUpdates();
|
||||
});
|
||||
|
||||
// Handle when new content is loaded (like when starting a process)
|
||||
document.addEventListener('htmx:afterSettle', function(event) {
|
||||
if (event.target.id === 'left-panel') {
|
||||
if (window.savedExpansionState) {
|
||||
restoreExpansionState(window.savedExpansionState);
|
||||
}
|
||||
// Start or restart targeted updates when new content is loaded
|
||||
stopTargetedUpdates();
|
||||
startTargetedUpdates();
|
||||
}
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
Loading…
Reference in New Issue