diff --git a/.env.example b/.env.example
new file mode 100644
index 00000000..a48e11b4
--- /dev/null
+++ b/.env.example
@@ -0,0 +1,100 @@
+# TradingAgents Environment Configuration
+# Copy this file to .env and fill in your values
+
+# =============================================================================
+# API Keys (Required)
+# =============================================================================
+OPENAI_API_KEY=your_openai_api_key_here
+FINNHUB_API_KEY=your_finnhub_api_key_here
+
+# Optional API Keys
+OPENROUTER_API_KEY=your_openrouter_api_key_here
+GOOGLE_API_KEY=your_google_api_key_here
+ANTHROPIC_API_KEY=your_anthropic_api_key_here
+
+# Reddit API (Optional - for social media analysis)
+REDDIT_CLIENT_ID=your_reddit_client_id
+REDDIT_CLIENT_SECRET=your_reddit_client_secret
+REDDIT_USER_AGENT=TradingAgents/1.0
+
+# =============================================================================
+# SSL/TLS Certificate Configuration (OPTIONAL)
+# =============================================================================
+
+# Certificate Bundle Path (ONLY set if you need a custom certificate bundle)
+# If not set, system default SSL behavior is used
+# Common locations:
+# - macOS: /etc/ssl/cert.pem
+# - Ubuntu/Debian: /etc/ssl/certs/ca-certificates.crt
+# - CentOS/RHEL: /etc/pki/tls/certs/ca-bundle.crt
+# - Custom: /path/to/your/custom-ca-bundle.crt
+# REQUESTS_CA_BUNDLE=/etc/ssl/cert.pem
+# CURL_CA_BUNDLE=/etc/ssl/cert.pem
+
+# SSL Verification (ONLY set to false if needed for development/testing)
+# If not set, SSL verification is enabled by default (recommended)
+# SSL_VERIFY=false
+
+# HTTP Timeout (ONLY set if default timeout is insufficient)
+# If not set, uses reasonable defaults
+# HTTP_TIMEOUT=60
+
+# =============================================================================
+# Proxy Configuration (ONLY if behind corporate firewall)
+# =============================================================================
+
+# HTTP/HTTPS Proxy Settings (ONLY set if required by your network)
+# If not set, direct connections are used
+# HTTP_PROXY=http://proxy.company.com:8080
+# HTTPS_PROXY=https://proxy.company.com:8080
+
+# Proxy with authentication
+# HTTP_PROXY=http://username:password@proxy.company.com:8080
+# HTTPS_PROXY=https://username:password@proxy.company.com:8080
+
+# =============================================================================
+# Application Settings
+# =============================================================================
+
+# Results Directory
+TRADINGAGENTS_RESULTS_DIR=./results
+
+# Ollama Configuration (if using local Ollama)
+OLLAMA_HOST=localhost
+
+# Optional Configuration
+# DEBUG=True
+# LOG_LEVEL=INFO
+
+# =============================================================================
+# SSL Certificate Examples for Common Enterprise Environments
+# =============================================================================
+
+# Example 1: Using system certificate store (macOS)
+# REQUESTS_CA_BUNDLE=/System/Library/OpenSSL/certs/cert.pem
+
+# Example 2: Using system certificate store (Ubuntu/Debian)
+# REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt
+
+# Example 3: Using custom corporate certificate bundle
+# REQUESTS_CA_BUNDLE=/usr/local/share/ca-certificates/corporate-ca-bundle.crt
+
+# Example 4: Disabling SSL verification (development only)
+# SSL_VERIFY=false
+
+# =============================================================================
+# Troubleshooting SSL Issues
+# =============================================================================
+
+# If you encounter SSL certificate errors:
+# 1. Run the diagnostic tool: python diagnose_ssl.py
+# 2. Check if your organization uses a custom CA
+# 3. Ask your IT department for the corporate certificate bundle
+# 4. Try using certifi's bundle: pip install certifi
+# 5. Set REQUESTS_CA_BUNDLE to certifi's location (usually shown by diagnose_ssl.py)
+
+# Common SSL Error Solutions:
+# - "certificate verify failed": Set REQUESTS_CA_BUNDLE to correct cert bundle
+# - "SSL: WRONG_VERSION_NUMBER": Check if you're behind a proxy
+# - "Connection timeout": Increase HTTP_TIMEOUT or check proxy settings
+# - "Name or service not known": Check DNS settings and proxy configuration
\ No newline at end of file
diff --git a/README.md b/README.md
index 410229f4..673ede5f 100644
--- a/README.md
+++ b/README.md
@@ -49,7 +49,7 @@
## TradingAgents Framework
-TradingAgents is a multi-agent trading framework that mirrors the dynamics of real-world trading firms. By deploying specialized LLM-powered agents: from fundamental analysts, sentiment experts, and technical analysts, to trader, risk management team, the platform collaboratively evaluates market conditions and informs trading decisions. Moreover, these agents engage in dynamic discussions to pinpoint the optimal strategy.
+TradingAgents is a multi-agent trading framework that mirrors the dynamics of real-world trading firms. By deploying specialized LLM-powered agents: from fundamental analysts, sentiment experts, and trade planners, to trader, risk management team, the platform collaboratively evaluates market conditions and informs trading decisions. Moreover, these agents engage in dynamic discussions to pinpoint the optimal strategy.
@@ -63,7 +63,7 @@ Our framework decomposes complex trading tasks into specialized roles. This ensu
- Fundamentals Analyst: Evaluates company financials and performance metrics, identifying intrinsic values and potential red flags.
- Sentiment Analyst: Analyzes social media and public sentiment using sentiment scoring algorithms to gauge short-term market mood.
- News Analyst: Monitors global news and macroeconomic indicators, interpreting the impact of events on market conditions.
-- Technical Analyst: Utilizes technical indicators (like MACD and RSI) to detect trading patterns and forecast price movements.
+- Trade Planner: Utilizes technical indicators (like MACD and RSI) to detect trading patterns and forecast price movements.
@@ -101,15 +101,19 @@ git clone https://github.com/TauricResearch/TradingAgents.git
cd TradingAgents
```
-Create a virtual environment in any of your favorite environment managers:
+Create a virtual environment in any of your favorite environment managers. Here are some indications if you've installed `uv`:
```bash
-conda create -n tradingagents python=3.13
-conda activate tradingagents
+uv venv
+```
+
+Activate the virtual environment:
+```bash
+venv/Scripts/activate.bat
```
Install dependencies:
```bash
-pip install -r requirements.txt
+uv sync
```
### Required APIs
@@ -151,6 +155,32 @@ An interface will appear showing results as they load, letting you track the age
+## Web Frontend (HTMX/FastAPI)
+
+In addition to the CLI, a new web-based frontend is available to visualize the agent communication process in real-time. It allows you to set configuration parameters, start the trading analysis, and observe the step-by-step execution of agents and tools, including their outputs and any errors.
+
+### Running the Web Frontend
+
+1. Ensure you have installed all dependencies using `uv sync`.
+2. Navigate to the project root directory in your terminal.
+3. Start the FastAPI server:
+ ```bash
+ uvicorn webapp.main:app --reload
+ ```
+4. Open your web browser and go to `http://127.0.0.1:8000`.
+5. Enter a company symbol (e.g., `AAPL`) in the configuration form and click "Start Process" to begin the analysis.
+
+### Rendered Reports (Markdown Support)
+
+Agent-generated reports (analysis summaries, debate histories, plans, and risk assessments) are produced in Markdown. The web frontend now renders these Markdown documents as styled HTML instead of showing raw markup. This includes support for:
+
+- Headings, emphasis, lists, and blockquotes
+- Tables (for structured metrics)
+- Fenced code blocks and inline code
+
+Security: Markdown is sanitized serverβside using `bleach` to strip unsafe tags/attributes while preserving semantic structure. If you need to extend allowed tags (e.g., to permit additional formatting), modify `ALLOWED_TAGS` / `ALLOWED_ATTRIBUTES` in `webapp/main.py`.
+
+
## TradingAgents Package
### Implementation Details
diff --git a/SSL_CONFIGURATION.md b/SSL_CONFIGURATION.md
new file mode 100644
index 00000000..39957ef3
--- /dev/null
+++ b/SSL_CONFIGURATION.md
@@ -0,0 +1,117 @@
+# SSL Certificate Bundle Configuration for TradingAgents
+
+## Overview
+
+This implementation provides flexible SSL/TLS certificate configuration for TradingAgents while maintaining backward compatibility. The system only applies custom SSL settings when explicitly configured via environment variables.
+
+## Key Features
+
+### 1. Environment Variable Based Configuration
+- `REQUESTS_CA_BUNDLE` or `CURL_CA_BUNDLE`: Path to custom certificate bundle
+- `SSL_VERIFY`: Enable/disable SSL verification (true/false)
+- `HTTP_TIMEOUT`: Custom timeout for HTTP requests (seconds)
+- `HTTP_PROXY`: HTTP proxy server
+- `HTTPS_PROXY`: HTTPS proxy server
+
+### 2. Default Behavior Preservation
+- **If no environment variables are set**: Uses system default SSL behavior
+- **Only applies custom settings when explicitly configured**
+- **Empty or undefined variables are ignored**
+
+### 3. Comprehensive Coverage
+- **LangChain LLM clients**: Custom SSL configuration for OpenAI, OpenRouter, etc.
+- **HTTP requests**: Custom configuration for Google News, Reddit APIs
+- **Global SSL setup**: Sets environment variables for libraries that respect them
+
+## Usage Examples
+
+### Basic Usage (No Custom SSL)
+```bash
+# No SSL environment variables set
+# Uses system default SSL behavior
+python webapp/main.py
+```
+
+### Custom Certificate Bundle
+```bash
+# Use custom corporate certificate bundle
+export REQUESTS_CA_BUNDLE=/path/to/corporate-ca-bundle.crt
+python webapp/main.py
+```
+
+### Development/Testing (Disable SSL Verification)
+```bash
+# Disable SSL verification (NOT recommended for production)
+export SSL_VERIFY=false
+python webapp/main.py
+```
+
+### Behind Corporate Proxy
+```bash
+# Configure proxy settings
+export HTTP_PROXY=http://proxy.company.com:8080
+export HTTPS_PROXY=https://proxy.company.com:8080
+export REQUESTS_CA_BUNDLE=/etc/ssl/corporate-ca-bundle.crt
+python webapp/main.py
+```
+
+## Files Modified
+
+### Core Configuration
+- `tradingagents/default_config.py`: Added SSL configuration parameters
+- `tradingagents/dataflows/ssl_utils.py`: SSL utility functions (NEW)
+
+### Integration Points
+- `tradingagents/graph/trading_graph.py`: LLM client SSL configuration
+- `tradingagents/dataflows/googlenews_utils.py`: HTTP requests SSL configuration
+- `tradingagents/dataflows/interface.py`: Integration with SSL configuration
+
+### Documentation and Tools
+- `.env.example`: Updated with SSL configuration examples
+- `diagnose_ssl.py`: SSL diagnostic tool (NEW)
+- `test_ssl_config.py`: SSL configuration test suite (NEW)
+
+## Testing
+
+Run the diagnostic tool to check your SSL configuration:
+```bash
+python diagnose_ssl.py
+```
+
+Run the test suite to verify SSL configuration behavior:
+```bash
+python test_ssl_config.py
+```
+
+## Troubleshooting
+
+### Common SSL Errors and Solutions
+
+1. **Certificate verification failed**
+ - Set `REQUESTS_CA_BUNDLE` to correct certificate bundle path
+ - Check if your organization uses custom CA certificates
+
+2. **SSL: WRONG_VERSION_NUMBER**
+ - Usually indicates proxy configuration issues
+ - Set appropriate `HTTP_PROXY` and `HTTPS_PROXY` variables
+
+3. **Connection timeout**
+ - Increase `HTTP_TIMEOUT` value
+ - Check network connectivity and proxy settings
+
+4. **Name or service not known**
+ - Check DNS settings
+ - Verify proxy configuration
+
+### Getting Help
+
+1. Run `python diagnose_ssl.py` for comprehensive SSL diagnostics
+2. Check your organization's IT documentation for certificate bundles
+3. Contact your IT department for corporate proxy and certificate information
+
+## Security Considerations
+
+- **Never disable SSL verification in production**
+- **Use custom certificate bundles for corporate environments**
+- **Keep certificate bundles updated**
+- **Secure proxy credentials if using authenticated proxies**
\ No newline at end of file
diff --git a/app.py b/app.py
new file mode 100755
index 00000000..2a40c9c2
--- /dev/null
+++ b/app.py
@@ -0,0 +1,55 @@
+#!/usr/bin/env python3
+"""
+TradingAgents Web Application Launcher
+
+This script starts the TradingAgents webapp using uvicorn.
+It provides a convenient entry point to run the FastAPI application.
+"""
+
+import uvicorn
+import os
+import sys
+from pathlib import Path
+
+def main():
+ """Start the TradingAgents webapp with uvicorn."""
+
+ # Get the project root directory
+ project_root = Path(__file__).parent.absolute()
+
+ # Add the project root to Python path so imports work correctly
+ if str(project_root) not in sys.path:
+ sys.path.insert(0, str(project_root))
+
+ # Change to the project directory to ensure relative paths work
+ os.chdir(project_root)
+
+ # Configuration for uvicorn
+ config = {
+ "app": "webapp.main:app",
+ "host": "localhost",
+ "port": 8000,
+ "reload": True, # Enable auto-reload for development
+ "reload_dirs": [str(project_root)], # Watch for changes in project directory
+ "log_level": "info",
+ "access_log": True,
+ }
+
+ print("π Starting TradingAgents WebApp...")
+ print(f"π Project root: {project_root}")
+ print(f"π Server will be available at: http://localhost:{config['port']}")
+ print("π Auto-reload is enabled for development")
+ print("β οΈ Make sure you have set up your .env file with required API keys")
+ print("-" * 60)
+
+ try:
+ # Start the uvicorn server
+ uvicorn.run(**config)
+ except KeyboardInterrupt:
+ print("\nπ Shutting down TradingAgents WebApp...")
+ except Exception as e:
+ print(f"β Error starting the application: {e}")
+ sys.exit(1)
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/cli/main.py b/cli/main.py
index 03c1858b..d3b25844 100644
--- a/cli/main.py
+++ b/cli/main.py
@@ -788,7 +788,12 @@ def run_analysis():
config["llm_provider"] = selections["llm_provider"].lower()
config["user_position"] = selections["user_position"]
config["cost_per_trade"] = selections["cost_per_trade"]
-
+
+ print("\nConfiguration:")
+ for key, value in config.items():
+ print(f" {key}: {value}")
+ print("")
+
# Initialize the graph
graph = TradingAgentsGraph(
[analyst.value for analyst in selections["analysts"]], config=config, debug=True
diff --git a/cli/utils.py b/cli/utils.py
index bfa9beff..0ecf405d 100644
--- a/cli/utils.py
+++ b/cli/utils.py
@@ -1,3 +1,4 @@
+import os
import questionary
from typing import List, Optional, Tuple, Dict
@@ -147,11 +148,13 @@ def select_shallow_thinking_agent(provider) -> str:
"openrouter": [
("xAI: Grok 4 Fast (free)", "x-ai/grok-4-fast:free"),
("DeepSeek: DeepSeek V3.1 (free)", "deepseek/deepseek-chat-v3.1:free"),
+ ("Z.AI: GLM 4 32B", "z-ai/glm-4-32b"),
("Meta: Llama 4 Scout", "meta-llama/llama-4-scout:free"),
("Meta: Llama 3.3 8B Instruct - A lightweight and ultra-fast variant of Llama 3.3 70B", "meta-llama/llama-3.3-8b-instruct:free"),
("google/gemini-2.0-flash-exp:free - Gemini Flash 2.0 offers a significantly faster time to first token", "google/gemini-2.0-flash-exp:free"),
],
"ollama": [
+ ("Granite 3.3 2B", "granite3.3:2b"),
("llama3.1 local", "llama3.1"),
("llama3.2 local", "llama3.2"),
]
@@ -212,10 +215,12 @@ def select_deep_thinking_agent(provider) -> str:
"openrouter": [
("Qwen: Qwen3 235B A22B (free)", "qwen/qwen3-235b-a22b:free"),
("OpenAI: gpt-oss-120b (free)", "openai/gpt-oss-120b:free"),
+ ("Z.AI: GLM 4 32B", "z-ai/glm-4-32b"),
("DeepSeek V3 - a 685B-parameter, mixture-of-experts model", "deepseek/deepseek-chat-v3-0324:free"),
("Deepseek - latest iteration of the flagship chat model family from the DeepSeek team.", "deepseek/deepseek-chat-v3-0324:free"),
],
"ollama": [
+ ("Granite 3.3 2B", "granite3.3:2b"),
("llama3.1 local", "llama3.1"),
("qwen3", "qwen3"),
]
@@ -251,7 +256,7 @@ def select_llm_provider() -> tuple[str, str]:
("Anthropic", "https://api.anthropic.com/"),
("Google", "https://generativelanguage.googleapis.com/v1"),
("OpenRouter", "https://openrouter.ai/api/v1"),
- ("Ollama", "http://localhost:11434/v1"),
+ ("Ollama", f"http://{os.getenv('OLLAMA_HOST', 'localhost')}:11434/v1"),
]
choice = questionary.select(
diff --git a/combine_certificates.py b/combine_certificates.py
new file mode 100644
index 00000000..1040171a
--- /dev/null
+++ b/combine_certificates.py
@@ -0,0 +1,106 @@
+#!/usr/bin/env python3
+"""
+Certificate Bundle Combiner for TradingAgents
+
+This script combines your corporate certificate bundle (Netskope) with
+the certifi certificate bundle to ensure all certificates are available.
+"""
+
+import os
+import shutil
+from pathlib import Path
+
+def combine_certificate_bundles():
+ """Combine corporate and certifi certificate bundles"""
+
+ print("π Certificate Bundle Combiner")
+ print("=" * 40)
+
+ # Paths
+ corporate_bundle = "/Users/kevin.bruton/netskope-certificates/netskope-cert-bundle.pem"
+
+ try:
+ import certifi
+ certifi_bundle = certifi.where()
+ except ImportError:
+ print("β certifi package not found. Please install it: pip install certifi")
+ return False
+
+ combined_bundle = "/Users/kevin.bruton/netskope-certificates/combined-cert-bundle.pem"
+
+ print(f"π Corporate bundle: {corporate_bundle}")
+ print(f"π Certifi bundle: {certifi_bundle}")
+ print(f"π Combined bundle: {combined_bundle}")
+
+ # Check if corporate bundle exists
+ if not os.path.exists(corporate_bundle):
+ print(f"β Corporate certificate bundle not found: {corporate_bundle}")
+ return False
+
+ # Create combined bundle
+ try:
+ with open(combined_bundle, 'w') as combined_file:
+ # Write corporate certificates first
+ print("π Adding corporate certificates...")
+ with open(corporate_bundle, 'r') as corp_file:
+ combined_file.write(corp_file.read())
+
+ # Add separator
+ combined_file.write("\n# Certifi certificates below\n")
+
+ # Write certifi certificates
+ print("π Adding certifi certificates...")
+ with open(certifi_bundle, 'r') as certifi_file:
+ certifi_content = certifi_file.read()
+ combined_file.write(certifi_content)
+
+ print(f"β Combined certificate bundle created: {combined_bundle}")
+
+ # Set permissions
+ os.chmod(combined_bundle, 0o644)
+
+ # Show usage instructions
+ print("\nπ‘ Usage Instructions:")
+ print(f" Add this to your .env file:")
+ print(f" REQUESTS_CA_BUNDLE={combined_bundle}")
+ print(f" CURL_CA_BUNDLE={combined_bundle}")
+
+ print("\n Or export in your shell:")
+ print(f" export REQUESTS_CA_BUNDLE={combined_bundle}")
+ print(f" export CURL_CA_BUNDLE={combined_bundle}")
+
+ return True
+
+ except Exception as e:
+ print(f"β Error creating combined bundle: {e}")
+ return False
+
+def test_combined_bundle():
+ """Test the combined certificate bundle"""
+ combined_bundle = "/Users/kevin.bruton/netskope-certificates/combined-cert-bundle.pem"
+
+ if not os.path.exists(combined_bundle):
+ print("β Combined bundle not found. Run combine_certificate_bundles() first.")
+ return False
+
+ print(f"\nπ§ͺ Testing combined certificate bundle: {combined_bundle}")
+
+ import requests
+ test_urls = [
+ "https://www.google.com",
+ "https://api.openai.com/v1/models",
+ "https://openrouter.ai/api/v1/models"
+ ]
+
+ for url in test_urls:
+ try:
+ response = requests.get(url, verify=combined_bundle, timeout=10)
+ print(f"β {url} - Status: {response.status_code}")
+ except Exception as e:
+ print(f"β {url} - Error: {e}")
+
+ return True
+
+if __name__ == "__main__":
+ if combine_certificate_bundles():
+ test_combined_bundle()
\ No newline at end of file
diff --git a/debug_llm_call.py b/debug_llm_call.py
new file mode 100644
index 00000000..4396a66e
--- /dev/null
+++ b/debug_llm_call.py
@@ -0,0 +1,42 @@
+"""Standalone diagnostic script to test a single LLM call with resilience.
+Run: python debug_llm_call.py --provider openai --model gpt-4o-mini --message "Test message".
+It will respect environment variables for keys and SSL the same way the graph does.
+"""
+import argparse
+import os
+from tradingagents.default_config import DEFAULT_CONFIG
+from tradingagents.graph.trading_graph import TradingAgentsGraph
+from langchain_core.messages import HumanMessage
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--provider', default=DEFAULT_CONFIG['llm_provider'])
+ parser.add_argument('--model', default=DEFAULT_CONFIG['quick_think_llm'])
+ parser.add_argument('--message', default='Say hello and include a short market summary placeholder.')
+ args = parser.parse_args()
+
+ cfg = DEFAULT_CONFIG.copy()
+ cfg['llm_provider'] = args.provider
+ cfg['quick_think_llm'] = args.model
+ cfg['deep_think_llm'] = args.model
+
+ graph = TradingAgentsGraph(config=cfg)
+ # Build a minimal state for market analyst
+ state = {
+ 'trade_date': '2025-09-29',
+ 'company_of_interest': 'AAPL',
+ 'messages': [HumanMessage(content=args.message)],
+ }
+ market_node = graph.graph_setup.analyst_nodes.get('market')
+ if not market_node:
+ print('Market node not found in graph setup.')
+ return
+ # Directly invoke underlying function if possible
+ result_state = market_node(state)
+ print('Result keys:', list(result_state.keys()))
+ print('Market report snippet:', str(result_state.get('market_report',''))[:500])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/debug_streaming.py b/debug_streaming.py
new file mode 100644
index 00000000..82c5e2cd
--- /dev/null
+++ b/debug_streaming.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python3
+"""
+Debug script to test the TradingAgentsGraph streaming behavior
+"""
+
+import os
+from dotenv import load_dotenv
+from tradingagents.graph.trading_graph import TradingAgentsGraph
+from tradingagents.default_config import DEFAULT_CONFIG
+
+# Load environment variables
+load_dotenv()
+
+def debug_callback(state):
+ """Debug callback to see what state is being passed"""
+ print(f"\nπ CALLBACK RECEIVED:")
+ print(f" State type: {type(state)}")
+ print(f" State keys: {list(state.keys()) if isinstance(state, dict) else 'Not a dict'}")
+
+ if isinstance(state, dict):
+ for key, value in state.items():
+ if key in ["__end__", "messages"]:
+ continue
+ print(f" {key}: {type(value)} - {str(value)[:100]}...")
+ print("-" * 50)
+
+def test_streaming():
+ """Test the streaming functionality"""
+ print("π Testing TradingAgentsGraph streaming...")
+
+ # Create a minimal config for testing
+ config = DEFAULT_CONFIG.copy()
+ config["llm_provider"] = "openai"
+ config["quick_think_llm"] = "gpt-3.5-turbo"
+ config["deep_think_llm"] = "gpt-4"
+
+ try:
+ # Initialize the graph
+ print("π Initializing TradingAgentsGraph...")
+ graph = TradingAgentsGraph(config=config)
+
+ # Test propagation with callback
+ print("π Starting propagation with callback...")
+ final_state, signal = graph.propagate(
+ company_name="AAPL",
+ trade_date="2024-01-01",
+ on_step_callback=debug_callback
+ )
+
+ print(f"β Propagation completed!")
+ print(f"π Final signal: {signal}")
+ print(f"π― Final state keys: {list(final_state.keys())}")
+
+ except Exception as e:
+ print(f"β Error during streaming test: {e}")
+ import traceback
+ traceback.print_exc()
+
+if __name__ == "__main__":
+ test_streaming()
\ No newline at end of file
diff --git a/diagnose_ssl.py b/diagnose_ssl.py
new file mode 100644
index 00000000..e9759502
--- /dev/null
+++ b/diagnose_ssl.py
@@ -0,0 +1,159 @@
+#!/usr/bin/env python3
+"""
+SSL Certificate Diagnostic Tool for TradingAgents
+
+This script helps diagnose SSL/TLS certificate issues and provides guidance
+on how to configure certificate bundles properly.
+"""
+
+import os
+import sys
+import ssl
+import socket
+import requests
+from urllib.parse import urlparse
+from dotenv import load_dotenv
+load_dotenv()
+from tradingagents.dataflows.ssl_utils import get_certificate_info, get_ssl_config
+from tradingagents.default_config import DEFAULT_CONFIG
+
+
+def test_ssl_connection(hostname, port=443):
+ """Test SSL connection to a specific hostname."""
+ print(f"\nπ Testing SSL connection to {hostname}:{port}")
+
+ try:
+ # Create SSL context
+ context = ssl.create_default_context()
+
+ # Connect and get certificate info
+ with socket.create_connection((hostname, port), timeout=10) as sock:
+ with context.wrap_socket(sock, server_hostname=hostname) as ssock:
+ cert = ssock.getpeercert()
+ print(f"β SSL connection successful")
+ print(f" Subject: {cert.get('subject', 'Unknown')}")
+ print(f" Issuer: {cert.get('issuer', 'Unknown')}")
+ print(f" Version: {cert.get('version', 'Unknown')}")
+ return True
+
+ except Exception as e:
+ print(f"β SSL connection failed: {e}")
+ return False
+
+
+def test_requests_connection(url):
+ """Test HTTP request with requests library."""
+ print(f"\nπ Testing HTTP request to {url}")
+
+ try:
+ response = requests.get(url, timeout=10)
+ print(f"β HTTP request successful")
+ print(f" Status: {response.status_code}")
+ print(f" SSL Cert: {response.raw.connection.sock.getpeercert().get('subject', 'Unknown') if hasattr(response.raw.connection, 'sock') else 'Unknown'}")
+ return True
+
+ except requests.exceptions.SSLError as e:
+ print(f"β SSL Error: {e}")
+ return False
+ except Exception as e:
+ print(f"β Request failed: {e}")
+ return False
+
+
+def test_with_custom_cert_bundle(url, cert_bundle_path):
+ """Test HTTP request with custom certificate bundle."""
+ print(f"\nπ Testing with custom cert bundle: {cert_bundle_path}")
+
+ if not os.path.exists(cert_bundle_path):
+ print(f"β Certificate bundle not found: {cert_bundle_path}")
+ return False
+
+ try:
+ response = requests.get(url, verify=cert_bundle_path, timeout=10)
+ print(f"β Request with custom cert bundle successful")
+ print(f" Status: {response.status_code}")
+ return True
+
+ except Exception as e:
+ print(f"β Request with custom cert bundle failed: {e}")
+ return False
+
+
+def main():
+ """Main diagnostic function."""
+ print("π TradingAgents SSL Certificate Diagnostic Tool")
+ print("=" * 50)
+
+ # Get certificate information
+ print("\nπ Certificate Bundle Information:")
+ cert_info = get_certificate_info()
+ for key, value in cert_info.items():
+ if isinstance(value, list):
+ print(f" {key}: {', '.join(value) if value else 'None found'}")
+ else:
+ print(f" {key}: {value}")
+
+ # Test SSL configuration
+ print(f"\nβοΈ Current SSL Configuration:")
+ ssl_config = get_ssl_config(DEFAULT_CONFIG)
+ for key, value in ssl_config.items():
+ print(f" {key}: {value}")
+
+ # Test common endpoints
+ test_endpoints = [
+ ("api.openai.com", 443),
+ ("openrouter.ai", 443),
+ ("generativelanguage.googleapis.com", 443),
+ ("www.google.com", 443)
+ ]
+
+ print(f"\nπ― Testing SSL connections:")
+ for hostname, port in test_endpoints:
+ test_ssl_connection(hostname, port)
+
+ # Test HTTP requests
+ test_urls = [
+ "https://api.openai.com/v1/models",
+ "https://www.google.com/search?q=test",
+ "https://openrouter.ai/api/v1/models"
+ ]
+
+ print(f"\nπ Testing HTTP requests:")
+ for url in test_urls:
+ test_requests_connection(url)
+
+ # Test with different certificate bundles
+ if cert_info.get("certifi_bundle") and cert_info["certifi_bundle"] != "Not available (certifi not installed)":
+ print(f"\nπ§ͺ Testing with certifi bundle:")
+ test_with_custom_cert_bundle("https://www.google.com", cert_info["certifi_bundle"])
+
+ # Provide recommendations
+ print(f"\nπ‘ Recommendations:")
+ print(" π Certificate Bundle Configuration:")
+ print(" β’ Only set if you need a custom certificate bundle")
+ print(" β’ If not set, system default SSL behavior is used")
+ print(" export REQUESTS_CA_BUNDLE=/path/to/your/ca-bundle.crt")
+ print(" export CURL_CA_BUNDLE=/path/to/your/ca-bundle.crt")
+
+ print("\n β οΈ SSL Verification (use with caution):")
+ print(" β’ Only disable for development/testing")
+ print(" β’ If not set, SSL verification is enabled by default")
+ print(" export SSL_VERIFY=false")
+
+ print("\n β±οΈ Timeout Configuration:")
+ print(" β’ Only set if default timeout is insufficient")
+ print(" export HTTP_TIMEOUT=60")
+
+ print("\n π Proxy Configuration:")
+ print(" β’ Only required if behind corporate firewall")
+ print(" export HTTP_PROXY=http://proxy.company.com:8080")
+ print(" export HTTPS_PROXY=https://proxy.company.com:8080")
+
+ print("\n π Configuration:")
+ print(" β’ Add these to your .env file or export in shell")
+ print(" β’ Leave unset to use system defaults")
+ print(" β’ Only configure what you actually need")
+
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/frontend_implementation_plan.md b/frontend_implementation_plan.md
new file mode 100644
index 00000000..ea09127f
--- /dev/null
+++ b/frontend_implementation_plan.md
@@ -0,0 +1,235 @@
+# HTMX Frontend Implementation Plan
+
+This document outlines the architecture and step-by-step plan for building a new HTMX-based frontend for the TradingAgents project.
+
+## 1. General Architecture
+
+The frontend will be a single-page web application served by a lightweight Python backend (FastAPI). This backend will be responsible for serving the HTML, handling user requests to start the agent process, and providing real-time status updates. The frontend and backend code will be housed in a new top-level `webapp` directory to keep it separate from the core agent logic.
+
+### Core Components:
+
+* **FastAPI Backend:** A Python web server that will:
+ * Serve the main `index.html` file.
+ * Provide API endpoints for the frontend to interact with.
+ * Run the `TradingAgentsGraph` in a background thread.
+ * Maintain and serve the state of the execution process.
+* **HTMX Frontend:** The user interface, which will:
+ * Display the configuration form and start button.
+ * Show a hierarchical view of the agent execution process.
+ * Poll the backend for status updates.
+ * Display the content of selected process steps (reports, messages, errors) on the right side of the screen.
+* **Communication:** The frontend will communicate with the backend using a simple polling mechanism. The HTMX frontend will periodically request a status update from a `/status` endpoint. The backend will return a JSON object representing the current state of the execution tree. For displaying detailed content, the frontend will make specific requests to a `/content/{item_id}` endpoint.
+
+## 2. Proposed Project Structure
+
+To maintain separation of concerns, the new frontend code will live in a `webapp` directory.
+
+```
+C:\Users\kevin\repo\TradingAgents\
+ββββ... (existing project files)
+ββββwebapp/
+ ββββmain.py # FastAPI application
+ ββββstatic/
+ β ββββstyles.css # CSS for styling
+ ββββtemplates/
+ ββββindex.html # Main HTML file
+ ββββ_partials/
+ ββββleft_panel.html # HTMX partial for the execution tree
+ ββββright_panel.html # HTMX partial for the content view
+```
+
+## 3. Backend Implementation (FastAPI)
+
+The `webapp/main.py` file will define the FastAPI application and its endpoints.
+
+### API Endpoints:
+
+* **`GET /`**: Serves the main `templates/index.html` page.
+* **`POST /start`**:
+ * Accepts a JSON payload with the run configuration (`company_symbol`, etc.).
+ * Initializes the `TradingAgentsGraph`.
+ * Starts the `graph.propagate()` method in a background thread.
+ * Returns an initial response that replaces the config form with the main progress bar.
+* **`GET /status`**:
+ * This is the main polling endpoint for HTMX.
+ * It will return an HTML partial (`_partials/left_panel.html`) rendered with the current state of the execution tree. The state will be stored in memory.
+* **`GET /content/{item_id}`**:
+ * When a user clicks an item in the left panel, HTMX will call this endpoint.
+ * It will retrieve the specific content for that `item_id` from the in-memory state.
+ * It will return an HTML partial (`_partials/right_panel.html`) with the formatted content (e.g., a formatted report, a code block for a message, or a stack trace for an error).
+
+### State Management & Integration:
+
+To get real-time updates from the `TradingAgentsGraph`, we will need to instrument its execution. The plan is to modify the `TradingAgentsGraph` class slightly to accept a callback function.
+
+1. **Modify `TradingAgentsGraph.__init__`**: Add an optional `on_step_end` callback parameter.
+2. **Callback Execution**: Inside the graph's execution logic (after each agent or tool runs), this callback will be invoked with the details of the completed step (e.g., node name, output, status).
+3. **Update Global State**: The callback function, defined in `webapp/main.py`, will update a global in-memory dictionary that represents the hierarchical execution tree. This tree will store the status, content, and relationships of all steps.
+
+This approach avoids tight coupling and allows the web application to listen to the progress of the core agent logic.
+
+## 4. Frontend Implementation (HTMX)
+
+The frontend will be built using HTMX attributes directly in the HTML templates.
+
+* **`templates/index.html`**:
+ * Contains the basic page structure: a top bar for the overall progress, a left panel for the execution tree, and a right panel for content.
+ * Includes the HTMX library.
+ * Contains the initial configuration form. The form will have an `hx-post="/start"` attribute to trigger the process.
+
+* **Left Panel (`_partials/left_panel.html`)**:
+ * This partial will be the target of the status polling. The main container will have `hx-get="/status"` and `hx-trigger="load, every 5s"`.
+ * It will use a template loop (Jinja2) to render the hierarchical tree from the state object provided by the backend.
+ * Each item in the tree will be a clickable element with an `hx-get="/content/{item_id}"` attribute and an `hx-target="#right-panel"` attribute to load its content on the right side.
+ * The status of each item (pending, in-progress, completed, error) will be reflected using different CSS classes and icons:
+ - **Pending**: βΈοΈ (paused icon, gray color)
+ - **In Progress**: β³ (hourglass icon, blue color)
+ - **Completed**: β (check mark, green color)
+ - **Error**: β (X mark, red color)
+
+* **Right Panel (`_partials/right_panel.html`)**:
+ * A simple container (`
`) that gets its content replaced by HTMX when a user clicks an item on the left.
+ * Content will be pre-formatted by the backend (e.g., using Markdown-to-HTML conversion or syntax highlighting for code/errors).
+
+* **Progress Bar**:
+ * The response from the initial `POST /start` call will replace the configuration form with a global progress bar.
+ * This progress bar's value will be updated as part of the `/status` polling response, by targeting its element ID with an `hx-swap-oob="true"` (Out of Band swap).
+
+### Execution Tree Structure
+
+The left panel should display a hierarchical tree structure as follows:
+
+```
+π Trading Analysis for [SYMBOL]
+βββ π Data Collection Phase
+β βββ π Market Analyst
+β β βββ π Market Analysis Report
+β β βββ π¬ Agent Messages
+β βββ π± Social Media Analyst
+β β βββ π Sentiment Analysis Report
+β β βββ π¬ Agent Messages
+β βββ π° News Analyst
+β β βββ π News Analysis Report
+β β βββ π¬ Agent Messages
+β βββ π Fundamentals Analyst
+β βββ π Fundamentals Report
+β βββ π¬ Agent Messages
+βββ π Research Phase
+β βββ π Bull Researcher
+β β βββ π Bull Case Analysis
+β β βββ π¬ Agent Messages
+β βββ π» Bear Researcher
+β β βββ π Bear Case Analysis
+β β βββ π¬ Agent Messages
+β βββ π Research Manager
+β βββ π Research Synthesis
+β βββ π¬ Agent Messages
+βββ π Planning Phase
+β βββ π Trade Planner
+β βββ π Trading Plan
+β βββ π¬ Agent Messages
+βββ β‘ Execution Phase
+β βββ β‘ Trader
+β βββ π Execution Report
+β βββ π¬ Agent Messages
+βββ β οΈ Risk Management Phase
+ βββ π¨ Aggressive Risk Analyst
+ β βββ π Risk Assessment (Aggressive)
+ β βββ π¬ Agent Messages
+ βββ βοΈ Neutral Risk Analyst
+ β βββ π Risk Assessment (Neutral)
+ β βββ π¬ Agent Messages
+ βββ π‘οΈ Conservative Risk Analyst
+ β βββ π Risk Assessment (Conservative)
+ β βββ π¬ Agent Messages
+ βββ π§ Portfolio Manager (Final Decision)
+ βββ π Portfolio Manager's Decision
+ βββ π¬ Agent Messages
+```
+
+Each agent should have:
+1. **Status Icon**: Shows current execution state (pending, in-progress, completed, error)
+2. **Report Sub-item**: Shows the specific report generated by that agent
+3. **Messages Sub-item**: Shows messages to/from that agent during execution
+
+The tree structure should be initialized at the start showing all agents in "pending" state, then update their status as execution progresses.
+
+## 5. Detailed Implementation Steps
+
+1. **Setup Environment**:
+ * Create the `webapp` directory and the file structure outlined above.
+ * Add `fastapi`, `uvicorn`, and `python-multipart` to the `requirements.txt` file and install them.
+
+2. **Backend - Basic Server**:
+ * Create the initial FastAPI app in `webapp/main.py`.
+ * Implement the `GET /` endpoint to serve `templates/index.html`.
+ * Create a basic `index.html` with the two-panel layout.
+
+3. **Backend - State & Integration**:
+ * Define the Python data classes for the execution state (e.g., `ProcessStep`, `RunState`).
+ * Modify `tradingagents/graph/trading_graph.py` to include the `on_step_end` callback mechanism.
+ * In `webapp/main.py`, implement the callback function that builds the hierarchical state tree in memory.
+
+4. **Backend - Endpoints**:
+ * Implement the `/start` endpoint to receive configuration and launch the `propagate` method in a background thread, passing the callback function.
+ * Implement the `/status` endpoint to render and return the `_partials/left_panel.html` partial.
+ * Implement the `/content/{item_id}` endpoint to render and return the `_partials/right_panel.html` partial.
+
+5. **Frontend - HTMX**:
+ * Develop the configuration form in `index.html` with `hx-post` to start the process.
+ * Create the `_partials/left_panel.html` template with the Jinja2 loop and the `hx-get` attributes for clicking on items.
+ * Add the polling mechanism to the main container in `index.html`.
+ * Style the different states (pending, completed, error) using CSS in `static/styles.css`.
+
+6. **Error Handling**:
+ * When the callback receives an error, it will update the corresponding item's status to "error" and store the stack trace.
+ * The frontend will visually flag the item as an error.
+ * When clicked, the `/content/{item_id}` endpoint will return the formatted stack trace to be displayed in the right panel.
+
+7. **Refinement**:
+ * Add a loading indicator for HTMX requests.
+ * Refine the CSS to ensure the application is visually appealing and user-friendly.
+ * Ensure the background process is managed correctly, especially in case of errors or server shutdown.
+
+## 6. Current Implementation Issues & Solutions
+
+### Issues Identified:
+
+1. **Incomplete Agent Tree Structure**: The current implementation only shows a single top-level item "Trading Analysis for [SYMBOL]" with limited sub-items, instead of the full agent hierarchy.
+
+2. **Improper Status Tracking**: Agents don't show proper execution status (pending, in-progress, completed, error) with appropriate icons.
+
+3. **Missing Reports and Messages**: Sub-items for individual agent reports and messages are not being created or displayed.
+
+4. **Callback State Detection**: The `update_execution_state` callback in `webapp/main.py` is not properly detecting and organizing the execution flow of all agents.
+
+### Solutions Implemented:
+
+#### Backend Changes (`webapp/main.py`):
+
+1. **Initialize Complete Tree Structure**: Pre-populate the execution tree with all agents in "pending" state at the start of execution.
+
+2. **Improved State Detection**: Enhanced the callback function to:
+ - Detect agent execution start/completion more reliably
+ - Track both agent status and their generated reports/messages
+ - Maintain proper phase organization (Data Collection, Research, Planning, Execution, Risk Management)
+
+3. **Agent Sub-items**: Each agent now has sub-items for:
+ - **Report**: The specific analysis/report generated by the agent
+ - **Messages**: Communication to/from the agent during execution
+
+#### Frontend Changes (`_partials/left_panel.html`):
+
+1. **Enhanced Status Icons**: Clear visual indicators for each execution state
+2. **Hierarchical Display**: Proper nesting of phases, agents, and their sub-items
+3. **Clickable Content**: All items are clickable to show detailed content in the right panel
+
+#### State Management:
+
+The execution tree now properly reflects:
+- **Phases**: Logical grouping of related agents (Data Collection, Research, etc.)
+- **Agents**: Individual agents with their execution status
+- **Sub-items**: Reports and messages for each agent
+- **Real-time Updates**: Status changes as execution progresses
+
+This provides users with complete visibility into the trading analysis process, allowing them to track which agents are running, completed, or encountering issues, and access detailed reports and communications from each agent.
diff --git a/main.py b/main.py
index 3cb5790d..06beece7 100644
--- a/main.py
+++ b/main.py
@@ -1,22 +1,55 @@
from dotenv import load_dotenv
load_dotenv()
+
+from rich.panel import Panel
+from rich.console import Console
+from rich.align import Align
from tradingagents.graph.trading_graph import TradingAgentsGraph
from tradingagents.default_config import DEFAULT_CONFIG
# Create a custom config
config = DEFAULT_CONFIG.copy()
-config["llm_provider"] = "google" # Use a different model
-config["backend_url"] = "https://generativelanguage.googleapis.com/v1" # Use a different backend
-config["deep_think_llm"] = "gemini-2.0-flash" # Use a different model
-config["quick_think_llm"] = "gemini-2.0-flash" # Use a different model
+config["ticker"] = "F"
+config['analysis_date'] = "2025-09-28"
+config["llm_provider"] = "openrouter" # Use a different model
+#config["backend_url"] = "https://generativelanguage.googleapis.com/v1" # Use a different backend
+config["backend_url"] = "https://openrouter.ai/api/v1"
+config["deep_think_llm"] = "qwen/qwen3-235b-a22b:free" # Use a different model
+config["quick_think_llm"] = "x-ai/grok-4-fast:free" # Use a different model
config["max_debate_rounds"] = 1 # Increase debate rounds
config["online_tools"] = True
+config["cost_per_trade"] = 0.0
+
+with open("./cli/static/welcome.txt", "r", encoding="utf-8") as f:
+ welcome_ascii = f.read()
+
+# Create welcome box content
+welcome_content = f"{welcome_ascii}\n"
+welcome_content += "[bold green]TradingAgents: Multi-Agents LLM Financial Trading Framework - CLI[/bold green]\n\n"
+welcome_content += "[bold]Workflow Steps:[/bold]\n"
+welcome_content += "I. Analyst Team -> II. Research Team -> III. Trader -> IV. Risk Management -> V. Portfolio Management\n\n"
+welcome_content += (
+ "[dim]Built by [Tauric Research](https://github.com/TauricResearch)[/dim]"
+)
+
+# Create and center the welcome box
+welcome_box = Panel(
+ welcome_content,
+ border_style="green",
+ padding=(1, 2),
+ title="Welcome to TradingAgents",
+ subtitle="Multi-Agents LLM Financial Trading Framework",
+)
+console = Console()
+console.print(Align.center(welcome_box))
+console.print() # Add a blank line after the welcome box
+
# Initialize with custom config
ta = TradingAgentsGraph(debug=True, config=config)
# forward propagate
-_, decision = ta.propagate("NVDA", "2024-05-10")
+_, decision = ta.propagate(config["ticker"], config["analysis_date"])
print(decision)
# Memorize mistakes and reflect
diff --git a/memory_store/chroma.sqlite3 b/memory_store/chroma.sqlite3
index 61f5629e..4fbec5df 100644
Binary files a/memory_store/chroma.sqlite3 and b/memory_store/chroma.sqlite3 differ
diff --git a/pyproject.toml b/pyproject.toml
index beb1235b..a1647e8a 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -33,4 +33,10 @@ dependencies = [
"tushare>=1.4.21",
"typing-extensions>=4.14.0",
"yfinance>=0.2.63",
+ "fastapi",
+ "uvicorn",
+ "python-multipart",
+ "jinja2",
+ "markdown>=3.6",
+ "bleach>=6.1.0",
]
diff --git a/requirements.txt b/requirements.txt
index a6154cd2..75f580f3 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -24,3 +24,9 @@ rich
questionary
langchain_anthropic
langchain-google-genai
+fastapi
+uvicorn
+python-multipart
+jinja2
+markdown
+bleach
diff --git a/stop_loss_feature_implementation.md b/stop_loss_feature_implementation.md
new file mode 100644
index 00000000..c75c6bbd
--- /dev/null
+++ b/stop_loss_feature_implementation.md
@@ -0,0 +1,66 @@
+
+# Stop-Loss Feature Implementation Plan
+
+This document outlines the plan for implementing a stop-loss feature in the TradingAgents project.
+
+## 1. Overview
+
+The goal is to enhance the trading agents' capabilities by requiring a stop-loss price level for every trade recommendation. This will improve risk management and provide more concrete trading plans. An optional take-profit level can also be included.
+
+## 2. Recommended Architecture: New Trade Planner Agent
+
+After investigating the existing architecture, the recommended approach is to introduce a new, dedicated **Trade Planner Agent**. This approach is favored over modifying existing agents for the following reasons:
+
+* **Modularity and Separation of Concerns:** It keeps the responsibilities of each agent clear. The new agent will specialize in technical analysis, while other agents, like the `risk_manager`, can focus on their core competencies.
+* **Expertise:** A dedicated agent can be specifically prompted and potentially fine-tuned to become an expert in technical analysis, leading to more accurate stop-loss and take-profit levels.
+* **Scalability:** It will be easier to add more sophisticated technical analysis logic in the future without complicating the existing agents.
+
+The new workflow will be as follows:
+
+1. **Analyst Team:** Gathers and analyzes data (no changes).
+2. **Researcher Team:** Debates the findings and creates an investment plan (no changes).
+3. **Trade Planner Agent (New):** Receives the market data and investment plan, and calculates the stop-loss and (optionally) take-profit levels.
+4. **Risk Management Team:** Assesses the risk of the proposed trade, now also considering the stop-loss level.
+5. **Trader Agent:** Makes the final trading decision, incorporating the stop-loss and take-profit levels into the final transaction proposal.
+
+## 3. Implementation Details
+
+### 3.1. Create the Trade Planner Agent
+
+* **File:** `tradingagents/agents/managers/trade_planner.py`
+* **Function:** `create_trade_planner_agent`
+* **Logic:**
+ * The agent will take the `market_report` and `investment_plan` from the state as input.
+ * It will use a detailed prompt that instructs the LLM to act as a trade planner.
+ * The prompt will guide the LLM to determine stop-loss and take-profit levels based on technical indicators such as:
+ * Support and resistance levels
+ * Moving averages
+ * Fibonacci retracement levels
+ * Volume analysis
+ * The prompt will specify the desired output format, which should be a JSON object with `stop_loss` and `take_profit` keys.
+
+### 3.2. Update the Graph
+
+* **File:** `tradingagents/graph/trading_graph.py`
+* **Changes:**
+ * Instantiate the new `trade_planner_agent`.
+ * Add a new node for the agent in the `LangGraph` setup.
+ * The new node will be placed after the `research_manager` and before the `risk_manager`.
+
+### 3.3. Update Existing Agents
+
+* **`risk_manager.py`:**
+ * The prompt for the `risk_manager` will be updated to include the `stop_loss` level in its context. This will allow the risk manager to provide a more comprehensive risk assessment.
+* **`trader.py`:**
+ * The prompt for the `trader` agent will be updated to include the `stop_loss` and `take_profit` levels.
+ * The final output of the trader agent, the "FINAL TRANSACTION PROPOSAL", must include the stop-loss level.
+
+### 3.4. Update Agent State
+
+* **File:** `tradingagents/agents/utils/agent_states.py`
+* **Changes:**
+ * Add `stop_loss: float` and `take_profit: float` fields to the `AgentState` dataclass. This will allow the new price levels to be passed between agents in the graph.
+
+## 4. Next Steps
+
+The next step is to implement the changes described in this document. This will involve creating the new agent, updating the graph, and modifying the existing agents and state.
diff --git a/test_markdown_rendering.py b/test_markdown_rendering.py
new file mode 100644
index 00000000..f406c883
--- /dev/null
+++ b/test_markdown_rendering.py
@@ -0,0 +1,14 @@
+from webapp.main import render_markdown
+
+def test_markdown_basic_headers():
+ md_text = "# Title\n\nSome **bold** text and a table:\n\n| Col1 | Col2 |\n| ---- | ---- |\n| A | B |\n"
+ html = render_markdown(md_text)
+ assert '
' in html and 'Title' in html
+ assert '' in html and 'bold' in html
+ assert '
' in html
+
+def test_markdown_code_block():
+ md_text = "```python\nprint('hi')\n```"
+ html = render_markdown(md_text)
+ # Sanitized but should keep code element
+ assert 'print' in html and ' dict:
+ market_report = state["market_report"]
+ investment_plan = state["investment_plan"]
+
+ if toolkit.config["online_tools"]:
+ tools = [
+ toolkit.get_YFin_data_online,
+ toolkit.get_stockstats_indicators_report_online,
+ ]
+ else:
+ tools = [
+ toolkit.get_YFin_data,
+ toolkit.get_stockstats_indicators_report,
+ ]
+
+ prompt = f'''You are a trade planner. Your role is to determine the stop-loss and take-profit levels for a given investment plan.
+
+Analyze the following market report and investment plan to determine the optimal stop-loss and take-profit levels. You should use the available tools to get the latest market data and calculate technical indicators.
+
+**Market Report:**
+{market_report}
+
+**Investment Plan:**
+{investment_plan}
+
+Use technical indicators such as Pivots, ATR, support and resistance levels, Donchian Channels, SuperTrend, etc., as well as risk factors to determine the stop-loss and take-profit levels.
+
+Based on your analysis, provide the stop-loss and take-profit levels in a JSON format. For example:
+{{
+ "stop_loss": 150.00,
+ "take_profit": 180.00
+}}
+
+The stop-loss level is mandatory. The take-profit level is optional.
+Do not provide any other information or explanation.
+'''
+
+ response = llm.invoke(prompt)
+
+ try:
+ levels = json.loads(response.content)
+ stop_loss = levels.get("stop_loss")
+ take_profit = levels.get("take_profit")
+ except (json.JSONDecodeError, AttributeError):
+ stop_loss = None
+ take_profit = None
+
+
+ return {
+ "stop_loss": stop_loss,
+ "take_profit": take_profit,
+ }
+
+ return trade_planner_node
\ No newline at end of file
diff --git a/tradingagents/agents/trader/trader.py b/tradingagents/agents/trader/trader.py
index c4d600e7..9a0aa51d 100644
--- a/tradingagents/agents/trader/trader.py
+++ b/tradingagents/agents/trader/trader.py
@@ -25,9 +25,12 @@ def create_trader(llm, memory):
user_position = state.get("user_position", "none")
cost_per_trade = state.get("cost_per_trade", 0.0)
+ stop_loss = state.get("stop_loss")
+ take_profit = state.get("take_profit")
+
context = {
"role": "user",
- "content": f"Based on a comprehensive analysis by a team of analysts, here is an investment plan tailored for {company_name}. This plan incorporates insights from current technical market trends, macroeconomic indicators, and social media sentiment. Use this plan as a foundation for evaluating your next trading decision.\n\nProposed Investment Plan: {investment_plan}\n\nLeverage these insights to make an informed and strategic decision.",
+ "content": f"Based on a comprehensive analysis by a team of analysts, here is an investment plan tailored for {company_name}. This plan incorporates insights from current technical market trends, macroeconomic indicators, and social media sentiment. Use this plan as a foundation for evaluating your next trading decision.\n\nProposed Investment Plan: {investment_plan}\n\nThe Trade Planner has proposed a stop-loss of **{stop_loss}** and a take-profit of **{take_profit}**. You must consider these levels in your final recommendation.\n\nLeverage these insights to make an informed and strategic decision.",
}
messages = [
diff --git a/tradingagents/agents/utils/agent_states.py b/tradingagents/agents/utils/agent_states.py
index bca6fa34..490a893b 100644
--- a/tradingagents/agents/utils/agent_states.py
+++ b/tradingagents/agents/utils/agent_states.py
@@ -76,3 +76,5 @@ class AgentState(MessagesState):
RiskDebateState, "Current state of the debate on evaluating risk"
]
final_trade_decision: Annotated[str, "Final decision made by the Risk Analysts"]
+ stop_loss: Annotated[Optional[float], "Stop loss price level"] = None
+ take_profit: Annotated[Optional[float], "Take profit price level"] = None
diff --git a/tradingagents/agents/utils/llm_resilience.py b/tradingagents/agents/utils/llm_resilience.py
new file mode 100644
index 00000000..29912b54
--- /dev/null
+++ b/tradingagents/agents/utils/llm_resilience.py
@@ -0,0 +1,46 @@
+import time
+import json
+import logging
+from typing import Any, Callable, Dict
+from json import JSONDecodeError
+
+logger = logging.getLogger(__name__)
+
+
+def invoke_with_retries(chain: Any, messages: Any, config: Dict[str, Any]):
+ """Invoke a langchain chain with retries and detailed logging.
+
+ Handles transient HTTP issues and JSON decode errors coming from provider SDKs.
+ """
+ max_retries = config.get("llm_max_retries", 3)
+ backoff = config.get("llm_retry_backoff", 2.0)
+
+ last_err = None
+ for attempt in range(1, max_retries + 1):
+ try:
+ result = chain.invoke(messages)
+ return result
+ except JSONDecodeError as e:
+ last_err = e
+ logger.warning(
+ "JSONDecodeError on attempt %s/%s: %s", attempt, max_retries, e
+ )
+ except Exception as e: # noqa: BLE001
+ # Capture common transient network / HTTP errors keywords
+ transient = any(
+ kw in str(e).lower() for kw in [
+ "timeout", "temporarily", "rate limit", "connection reset", "503", "502", "jsondecodeerror"
+ ]
+ )
+ last_err = e
+ logger.warning(
+ "LLM invocation error (transient=%s) attempt %s/%s: %s", transient, attempt, max_retries, e
+ )
+ if not transient and not isinstance(e, JSONDecodeError):
+ # Non transient -> abort early
+ break
+ # Exponential backoff
+ sleep_for = backoff ** (attempt - 1)
+ time.sleep(sleep_for)
+ # All attempts failed
+ raise last_err # propagate last error
diff --git a/tradingagents/agents/utils/memory.py b/tradingagents/agents/utils/memory.py
index 546af58c..016abf3d 100644
--- a/tradingagents/agents/utils/memory.py
+++ b/tradingagents/agents/utils/memory.py
@@ -12,7 +12,7 @@ class FinancialSituationMemory:
# Use a good general-purpose model for financial text
self.embedding_model = SentenceTransformer('all-MiniLM-L6-v2')
self.embedding_type = "local"
- print(f"β Using local embeddings with sentence-transformers for {name}")
+ print(f" Using local embeddings with sentence-transformers for {name}") # β
except ImportError:
print("β οΈ sentence-transformers not found. Install with: pip install sentence-transformers")
print("Falling back to ChromaDB's default embeddings...")
diff --git a/tradingagents/dataflows/googlenews_utils.py b/tradingagents/dataflows/googlenews_utils.py
index bdc6124d..4e775585 100644
--- a/tradingagents/dataflows/googlenews_utils.py
+++ b/tradingagents/dataflows/googlenews_utils.py
@@ -23,20 +23,36 @@ def is_rate_limited(response):
wait=wait_exponential(multiplier=1, min=4, max=60),
stop=stop_after_attempt(5),
)
-def make_request(url, headers):
+def make_request(url, headers, ssl_config=None):
"""Make a request with retry logic for rate limiting"""
# Random delay before each request to avoid detection
time.sleep(random.uniform(2, 6))
- response = requests.get(url, headers=headers)
+
+ # Prepare SSL configuration - only use if explicitly configured
+ kwargs = {}
+ if ssl_config:
+ if ssl_config.get("cert_bundle"):
+ kwargs["verify"] = ssl_config["cert_bundle"]
+ elif "verify" in ssl_config:
+ kwargs["verify"] = ssl_config["verify"]
+
+ if ssl_config.get("timeout"):
+ kwargs["timeout"] = ssl_config["timeout"]
+
+ if ssl_config.get("proxies"):
+ kwargs["proxies"] = ssl_config["proxies"]
+
+ response = requests.get(url, headers=headers, **kwargs)
return response
-def getNewsData(query, start_date, end_date):
+def getNewsData(query, start_date, end_date, ssl_config=None):
"""
Scrape Google News search results for a given query and date range.
query: str - search query
start_date: str - start date in the format yyyy-mm-dd or mm/dd/yyyy
end_date: str - end date in the format yyyy-mm-dd or mm/dd/yyyy
+ ssl_config: dict - SSL configuration including cert_bundle, verify, timeout, proxies
"""
if "-" in start_date:
start_date = datetime.strptime(start_date, "%Y-%m-%d")
@@ -64,7 +80,7 @@ def getNewsData(query, start_date, end_date):
)
try:
- response = make_request(url, headers)
+ response = make_request(url, headers, ssl_config)
soup = BeautifulSoup(response.content, "html.parser")
results_on_page = soup.select("div.SoaBEf")
diff --git a/tradingagents/dataflows/interface.py b/tradingagents/dataflows/interface.py
index 92e91450..4afc8600 100644
--- a/tradingagents/dataflows/interface.py
+++ b/tradingagents/dataflows/interface.py
@@ -4,6 +4,7 @@ from .yfin_utils import *
from .stockstats_utils import *
from .googlenews_utils import *
from .finnhub_utils import get_data_in_range
+from .ssl_utils import get_ssl_config, setup_global_ssl_config
from dateutil.relativedelta import relativedelta
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
@@ -294,7 +295,13 @@ def get_google_news(
before = start_date - relativedelta(days=look_back_days)
before = before.strftime("%Y-%m-%d")
- news_results = getNewsData(query, before, curr_date)
+ config = get_config()
+ ssl_config = get_ssl_config(config)
+ # Only pass ssl_config if it has actual configuration
+ if ssl_config:
+ news_results = getNewsData(query, before, curr_date, ssl_config)
+ else:
+ news_results = getNewsData(query, before, curr_date)
news_str = ""
diff --git a/tradingagents/dataflows/ssl_utils.py b/tradingagents/dataflows/ssl_utils.py
new file mode 100644
index 00000000..8e9a8177
--- /dev/null
+++ b/tradingagents/dataflows/ssl_utils.py
@@ -0,0 +1,149 @@
+"""
+SSL/TLS configuration utilities for TradingAgents
+"""
+
+import os
+import ssl
+import certifi
+from typing import Dict, Any, Optional
+
+
+def get_ssl_config(config: Dict[str, Any]) -> Dict[str, Any]:
+ """
+ Create SSL configuration dictionary from the main config.
+
+ Args:
+ config: Main configuration dictionary
+
+ Returns:
+ SSL configuration dictionary with cert_bundle, verify, timeout, proxies
+ """
+ ssl_config = {}
+
+ # Certificate bundle configuration - only use if explicitly specified
+ cert_bundle = config.get("ssl_cert_bundle")
+ if cert_bundle and cert_bundle.strip():
+ # Use explicitly specified certificate bundle
+ ssl_config["cert_bundle"] = cert_bundle
+ ssl_config["verify"] = cert_bundle
+ elif not config.get("ssl_verify", True):
+ # Only disable SSL verification if explicitly set to false
+ ssl_config["verify"] = False
+
+ # If no explicit cert bundle and ssl_verify is true (default),
+ # don't set anything - use default behavior
+
+ # Timeout configuration
+ if config.get("http_timeout"):
+ ssl_config["timeout"] = config["http_timeout"]
+
+ # Proxy configuration
+ proxies = {}
+ if config.get("http_proxy"):
+ proxies["http"] = config["http_proxy"]
+ if config.get("https_proxy"):
+ proxies["https"] = config["https_proxy"]
+ if proxies:
+ ssl_config["proxies"] = proxies
+
+ return ssl_config
+
+
+def setup_global_ssl_config(config: Dict[str, Any]) -> None:
+ """
+ Set up global SSL configuration for the application.
+ This affects all SSL connections made by requests and other libraries.
+ Only sets configuration if explicitly specified in environment variables.
+
+ Args:
+ config: Main configuration dictionary
+ """
+ # Set environment variables for requests library only if explicitly configured
+ cert_bundle = config.get("ssl_cert_bundle")
+ if cert_bundle and cert_bundle.strip():
+ os.environ["REQUESTS_CA_BUNDLE"] = cert_bundle
+ os.environ["CURL_CA_BUNDLE"] = cert_bundle
+ print(f"π Using custom SSL certificate bundle: {cert_bundle}")
+
+ # Set SSL verification for requests only if explicitly disabled
+ if not config.get("ssl_verify", True):
+ # Disable SSL warnings when verification is disabled
+ import urllib3
+ urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
+ print("β οΈ SSL certificate verification disabled")
+
+ # Set proxy environment variables if specified
+ if config.get("http_proxy"):
+ os.environ["HTTP_PROXY"] = config["http_proxy"]
+ print(f"π Using HTTP proxy: {config['http_proxy']}")
+ if config.get("https_proxy"):
+ os.environ["HTTPS_PROXY"] = config["https_proxy"]
+ print(f"π Using HTTPS proxy: {config['https_proxy']}")
+
+ # Set timeout if specified
+ if config.get("http_timeout"):
+ print(f"β±οΈ HTTP timeout set to: {config['http_timeout']} seconds")
+
+
+def create_ssl_context(cert_bundle: Optional[str] = None, verify_ssl: bool = True) -> ssl.SSLContext:
+ """
+ Create a custom SSL context with specified certificate bundle.
+
+ Args:
+ cert_bundle: Path to certificate bundle file
+ verify_ssl: Whether to verify SSL certificates
+
+ Returns:
+ Configured SSL context
+ """
+ if not verify_ssl:
+ # Create unverified context (not recommended for production)
+ context = ssl._create_unverified_context()
+ else:
+ # Create default context
+ context = ssl.create_default_context()
+
+ if cert_bundle:
+ # Load custom certificate bundle
+ context.load_verify_locations(cafile=cert_bundle)
+
+ return context
+
+
+def get_certificate_info() -> Dict[str, str]:
+ """
+ Get information about available certificate bundles.
+
+ Returns:
+ Dictionary with certificate bundle information
+ """
+ info = {}
+
+ # Check certifi bundle
+ try:
+ import certifi
+ info["certifi_bundle"] = certifi.where()
+ except ImportError:
+ info["certifi_bundle"] = "Not available (certifi not installed)"
+
+ # Check environment variables
+ info["env_ca_bundle"] = os.getenv("REQUESTS_CA_BUNDLE", "Not set")
+ info["env_curl_bundle"] = os.getenv("CURL_CA_BUNDLE", "Not set")
+
+ # Check system certificate stores
+ common_cert_paths = [
+ "/etc/ssl/certs/ca-certificates.crt", # Debian/Ubuntu
+ "/etc/pki/tls/certs/ca-bundle.crt", # RedHat/CentOS
+ "/usr/local/share/certs/ca-root-nss.crt", # FreeBSD
+ "/etc/ssl/cert.pem", # OpenBSD
+ "/System/Library/OpenSSL/certs/cert.pem", # macOS
+ ]
+
+ available_system_certs = []
+ for path in common_cert_paths:
+ if os.path.exists(path):
+ available_system_certs.append(path)
+
+ info["system_cert_bundles"] = available_system_certs
+
+ return info
\ No newline at end of file
diff --git a/tradingagents/default_config.py b/tradingagents/default_config.py
index 2cd6559c..d9e9b99a 100644
--- a/tradingagents/default_config.py
+++ b/tradingagents/default_config.py
@@ -3,13 +3,13 @@ import os
DEFAULT_CONFIG = {
"project_dir": os.path.abspath(os.path.join(os.path.dirname(__file__), ".")),
"results_dir": os.getenv("TRADINGAGENTS_RESULTS_DIR", "./results"),
- "data_dir": "/Users/yluo/Documents/Code/ScAI/FR1-data",
+ "data_dir": "./data",
"data_cache_dir": os.path.join(
os.path.abspath(os.path.join(os.path.dirname(__file__), ".")),
"dataflows/data_cache",
),
# LLM settings
- "llm_provider": "openai", # "openai" or "gemini"
+ "llm_provider": "openai", # "openai"/"gemini"/"openrouter"/"ollama"
"deep_think_llm": "o4-mini",
"quick_think_llm": "gpt-4o-mini",
"backend_url": "https://api.openai.com/v1",
@@ -26,4 +26,15 @@ DEFAULT_CONFIG = {
"online_tools": True,
"user_position": "none",
"cost_per_trade": 0.0,
+ # SSL/TLS Certificate settings - only use if explicitly set
+ "ssl_cert_bundle": os.getenv("REQUESTS_CA_BUNDLE") or os.getenv("CURL_CA_BUNDLE"),
+ "ssl_verify": os.getenv("SSL_VERIFY", "true").lower() in ("true", "1", "yes"),
+ "http_timeout": int(os.getenv("HTTP_TIMEOUT")) if os.getenv("HTTP_TIMEOUT") else None,
+ # Proxy settings (if needed)
+ "http_proxy": os.getenv("HTTP_PROXY"),
+ "https_proxy": os.getenv("HTTPS_PROXY"),
+ # LLM resilience settings
+ "llm_max_retries": int(os.getenv("LLM_MAX_RETRIES", "3")),
+ "llm_retry_backoff": float(os.getenv("LLM_RETRY_BACKOFF", "2")), # seconds exponential base
+ "debug_http": os.getenv("DEBUG_HTTP", "false").lower() in ("1", "true", "yes"),
}
diff --git a/tradingagents/graph/conditional_logic.py b/tradingagents/graph/conditional_logic.py
index e7c87859..300fc106 100644
--- a/tradingagents/graph/conditional_logic.py
+++ b/tradingagents/graph/conditional_logic.py
@@ -43,6 +43,14 @@ class ConditionalLogic:
return "tools_fundamentals"
return "Msg Clear Fundamentals"
+ def should_continue_technical(self, state: AgentState):
+ """Determine if technical analysis should continue."""
+ messages = state["messages"]
+ last_message = messages[-1]
+ if last_message.tool_calls:
+ return "tools_technical"
+ return "Msg Clear Technical"
+
def should_continue_debate(self, state: AgentState) -> str:
"""Determine if debate should continue."""
diff --git a/tradingagents/graph/setup.py b/tradingagents/graph/setup.py
index 847c429f..9badfa3e 100644
--- a/tradingagents/graph/setup.py
+++ b/tradingagents/graph/setup.py
@@ -98,6 +98,7 @@ class GraphSetup:
research_manager_node = create_research_manager(
self.deep_thinking_llm, self.invest_judge_memory
)
+ trade_planner_node = create_trade_planner_agent(self.quick_thinking_llm, self.toolkit)
trader_node = create_trader(self.quick_thinking_llm, self.trader_memory)
# Create risk analysis nodes
@@ -123,6 +124,7 @@ class GraphSetup:
workflow.add_node("Bull Researcher", bull_researcher_node)
workflow.add_node("Bear Researcher", bear_researcher_node)
workflow.add_node("Research Manager", research_manager_node)
+ workflow.add_node("Trade Planner", trade_planner_node)
workflow.add_node("Trader", trader_node)
workflow.add_node("Risky Analyst", risky_analyst)
workflow.add_node("Neutral Analyst", neutral_analyst)
@@ -172,7 +174,8 @@ class GraphSetup:
"Research Manager": "Research Manager",
},
)
- workflow.add_edge("Research Manager", "Trader")
+ workflow.add_edge("Research Manager", "Trade Planner")
+ workflow.add_edge("Trade Planner", "Trader")
workflow.add_edge("Trader", "Risky Analyst")
workflow.add_conditional_edges(
"Risky Analyst",
diff --git a/tradingagents/graph/trading_graph.py b/tradingagents/graph/trading_graph.py
index e007bf9b..03508cc7 100644
--- a/tradingagents/graph/trading_graph.py
+++ b/tradingagents/graph/trading_graph.py
@@ -21,6 +21,7 @@ from tradingagents.agents.utils.agent_states import (
RiskDebateState,
)
from tradingagents.dataflows.interface import set_config
+from tradingagents.dataflows.ssl_utils import setup_global_ssl_config
from .conditional_logic import ConditionalLogic
from .setup import GraphSetup
@@ -50,6 +51,9 @@ class TradingAgentsGraph:
# Update the interface's config
set_config(self.config)
+
+ # Set up global SSL configuration
+ setup_global_ssl_config(self.config)
# Create necessary directories
os.makedirs(
@@ -79,15 +83,48 @@ class TradingAgentsGraph:
"export OPENAI_API_KEY=your_openai_key_here"
)
+ # Prepare SSL configuration for HTTP client - only if explicitly configured
+ http_client_kwargs = {}
+ cert_bundle = self.config.get("ssl_cert_bundle")
+
+ if cert_bundle and cert_bundle.strip():
+ import httpx
+ http_client_kwargs["verify"] = cert_bundle
+ elif not self.config.get("ssl_verify", True):
+ import httpx
+ http_client_kwargs["verify"] = False
+
+ if self.config.get("http_timeout"):
+ import httpx
+ http_client_kwargs["timeout"] = self.config["http_timeout"]
+
+ # Add proxy configuration if specified
+ if self.config.get("http_proxy") or self.config.get("https_proxy"):
+ import httpx
+ proxies = {}
+ if self.config.get("http_proxy"):
+ proxies["http://"] = self.config["http_proxy"]
+ if self.config.get("https_proxy"):
+ proxies["https://"] = self.config["https_proxy"]
+ http_client_kwargs["proxies"] = proxies
+
+ # Create HTTP client only if we have custom settings
+ http_client = None
+ if http_client_kwargs:
+ import httpx
+ http_client = httpx.Client(**http_client_kwargs)
+
self.deep_thinking_llm = ChatOpenAI(
model=self.config["deep_think_llm"],
base_url=self.config["backend_url"],
- api_key=api_key
+ api_key=api_key,
+ http_client=http_client
)
self.quick_thinking_llm = ChatOpenAI(
model=self.config["quick_think_llm"],
base_url=self.config["backend_url"],
- api_key=api_key
+ api_key=api_key,
+ http_client=http_client
)
elif self.config["llm_provider"].lower() == "anthropic":
self.deep_thinking_llm = ChatAnthropic(model=self.config["deep_think_llm"], base_url=self.config["backend_url"])
@@ -180,9 +217,19 @@ class TradingAgentsGraph:
self.toolkit.get_simfin_income_stmt,
]
),
+ "technical": ToolNode(
+ [
+ # online tools
+ self.toolkit.get_YFin_data_online,
+ self.toolkit.get_stockstats_indicators_report_online,
+ # offline tools
+ self.toolkit.get_YFin_data,
+ self.toolkit.get_stockstats_indicators_report,
+ ]
+ ),
}
- def propagate(self, company_name, trade_date, user_position="none", cost_per_trade=0.0):
+ def propagate(self, company_name, trade_date, user_position="none", cost_per_trade=0.0, on_step_callback=None):
"""Run the trading agents graph for a company on a specific date."""
self.ticker = company_name
@@ -193,17 +240,14 @@ class TradingAgentsGraph:
)
args = self.propagator.get_graph_args()
- if self.debug:
- # Debug mode with tracing
+ if on_step_callback or self.debug:
+ # Stream mode for callbacks or debug mode
trace = []
- for chunk in self.graph.stream(init_agent_state, **args):
- if len(chunk["messages"]) == 0:
- pass
- else:
- chunk["messages"][-1].pretty_print()
- trace.append(chunk)
-
- final_state = trace[-1]
+ for s in self.graph.stream(init_agent_state, **args):
+ trace.append(s)
+ if on_step_callback:
+ on_step_callback(s)
+ final_state = trace[-1] if trace else {}
else:
# Standard mode without tracing
final_state = self.graph.invoke(init_agent_state, **args)
diff --git a/uv.lock b/uv.lock
index 2a29181d..68fc7a34 100644
--- a/uv.lock
+++ b/uv.lock
@@ -358,6 +358,18 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/99/37/e8730c3587a65eb5645d4aba2d27aae48e8003614d6aaf15dda67f702f1f/bidict-0.23.1-py3-none-any.whl", hash = "sha256:5dae8d4d79b552a71cbabc7deb25dfe8ce710b17ff41711e13010ead2abfc3e5", size = 32764, upload-time = "2024-02-18T19:09:04.156Z" },
]
+[[package]]
+name = "bleach"
+version = "6.2.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "webencodings" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/76/9a/0e33f5054c54d349ea62c277191c020c2d6ef1d65ab2cb1993f91ec846d1/bleach-6.2.0.tar.gz", hash = "sha256:123e894118b8a599fd80d3ec1a6d4cc7ce4e5882b1317a7e1ba69b56e95f991f", size = 203083, upload-time = "2024-10-29T18:30:40.477Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/fc/55/96142937f66150805c25c4d0f31ee4132fd33497753400734f9dfdcbdc66/bleach-6.2.0-py3-none-any.whl", hash = "sha256:117d9c6097a7c3d22fd578fcd8d35ff1e125df6736f554da4e432fdd63f31e5e", size = 163406, upload-time = "2024-10-29T18:30:38.186Z" },
+]
+
[[package]]
name = "bs4"
version = "0.0.2"
@@ -1996,6 +2008,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/f1/ab/fdbbd91d8d82bf1a723ba88ec3e3d76c022b53c391b0c13cad441cdb8f9e/lxml-5.4.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b12cb6527599808ada9eb2cd6e0e7d3d8f13fe7bbb01c6311255a15ded4c7ab4", size = 3487862, upload-time = "2025-04-23T01:49:36.296Z" },
]
+[[package]]
+name = "markdown"
+version = "3.9"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/8d/37/02347f6d6d8279247a5837082ebc26fc0d5aaeaf75aa013fcbb433c777ab/markdown-3.9.tar.gz", hash = "sha256:d2900fe1782bd33bdbbd56859defef70c2e78fc46668f8eb9df3128138f2cb6a", size = 364585, upload-time = "2025-09-04T20:25:22.885Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/70/ae/44c4a6a4cbb496d93c6257954260fe3a6e91b7bed2240e5dad2a717f5111/markdown-3.9-py3-none-any.whl", hash = "sha256:9f4d91ed810864ea88a6f32c07ba8bee1346c0cc1f6b1f9f6c822f2a9667d280", size = 107441, upload-time = "2025-09-04T20:25:21.784Z" },
+]
+
[[package]]
name = "markdown-it-py"
version = "3.0.0"
@@ -5142,20 +5163,25 @@ source = { virtual = "." }
dependencies = [
{ name = "akshare" },
{ name = "backtrader" },
+ { name = "bleach" },
{ name = "chainlit" },
{ name = "chromadb" },
{ name = "eodhd" },
+ { name = "fastapi" },
{ name = "feedparser" },
{ name = "finnhub-python" },
+ { name = "jinja2" },
{ name = "langchain-anthropic" },
{ name = "langchain-experimental" },
{ name = "langchain-google-genai" },
{ name = "langchain-openai" },
{ name = "langgraph" },
+ { name = "markdown" },
{ name = "pandas" },
{ name = "parsel" },
{ name = "praw" },
{ name = "python-dotenv" },
+ { name = "python-multipart" },
{ name = "pytz" },
{ name = "questionary" },
{ name = "redis" },
@@ -5167,6 +5193,7 @@ dependencies = [
{ name = "tqdm" },
{ name = "tushare" },
{ name = "typing-extensions" },
+ { name = "uvicorn" },
{ name = "yfinance" },
]
@@ -5174,20 +5201,25 @@ dependencies = [
requires-dist = [
{ name = "akshare", specifier = ">=1.16.98" },
{ name = "backtrader", specifier = ">=1.9.78.123" },
+ { name = "bleach", specifier = ">=6.1.0" },
{ name = "chainlit", specifier = ">=2.5.5" },
{ name = "chromadb", specifier = ">=1.0.12" },
{ name = "eodhd", specifier = ">=1.0.32" },
+ { name = "fastapi" },
{ name = "feedparser", specifier = ">=6.0.11" },
{ name = "finnhub-python", specifier = ">=2.4.23" },
+ { name = "jinja2" },
{ name = "langchain-anthropic", specifier = ">=0.3.15" },
{ name = "langchain-experimental", specifier = ">=0.3.4" },
{ name = "langchain-google-genai", specifier = ">=2.1.5" },
{ name = "langchain-openai", specifier = ">=0.3.23" },
{ name = "langgraph", specifier = ">=0.4.8" },
+ { name = "markdown", specifier = ">=3.6" },
{ name = "pandas", specifier = ">=2.3.0" },
{ name = "parsel", specifier = ">=1.10.0" },
{ name = "praw", specifier = ">=7.8.1" },
{ name = "python-dotenv", specifier = ">=1.1.0" },
+ { name = "python-multipart" },
{ name = "pytz", specifier = ">=2025.2" },
{ name = "questionary", specifier = ">=2.1.0" },
{ name = "redis", specifier = ">=6.2.0" },
@@ -5199,6 +5231,7 @@ requires-dist = [
{ name = "tqdm", specifier = ">=4.67.1" },
{ name = "tushare", specifier = ">=1.4.21" },
{ name = "typing-extensions", specifier = ">=4.14.0" },
+ { name = "uvicorn" },
{ name = "yfinance", specifier = ">=0.2.63" },
]
diff --git a/webapp/main.py b/webapp/main.py
new file mode 100644
index 00000000..0958e647
--- /dev/null
+++ b/webapp/main.py
@@ -0,0 +1,684 @@
+from fastapi import FastAPI, Request, Form, BackgroundTasks, HTTPException
+from fastapi.responses import HTMLResponse
+from fastapi.staticfiles import StaticFiles
+import jinja2
+import markdown as md
+import bleach
+import os
+from typing import Dict, Any
+import threading
+import time
+from dotenv import load_dotenv
+
+# Load environment variables from .env file
+load_dotenv()
+
+# Check required environment variables
+required_env_vars = [
+ 'FINNHUB_API_KEY',
+ 'OPENAI_API_KEY',
+ #'REDDIT_CLIENT_ID',
+ #'REDDIT_CLIENT_SECRET',
+ #'REDDIT_USER_AGENT'
+]
+
+missing_vars = [var for var in required_env_vars if not os.getenv(var)]
+if missing_vars:
+ print(f"Error: Missing required environment variables: {', '.join(missing_vars)}")
+ print("Please create a .env file with these variables or set them in your environment.")
+
+from tradingagents.graph.trading_graph import TradingAgentsGraph
+
+app = FastAPI()
+
+# In-memory storage for the process state
+# Using a lock for thread-safe access to app_state
+app_state_lock = threading.Lock()
+app_state: Dict[str, Any] = {
+ "process_running": False,
+ "company_symbol": None,
+ "execution_tree": [],
+ "overall_status": "idle", # idle, in_progress, completed, error
+ "overall_progress": 0 # 0-100
+}
+
+# Define the strict sequential phase execution order
+PHASE_SEQUENCE = [
+ "data_collection_phase",
+ "research_phase",
+ "planning_phase",
+ "execution_phase",
+ "risk_analysis_phase",
+ # New dedicated top-level phase for the final portfolio decision
+ "final_decision_phase"
+]
+
+# Mount the static directory to serve CSS, JS, etc.
+app.mount("/static", StaticFiles(directory="webapp/static"), name="static")
+
+# Setup Jinja2 for templating
+template_dir = os.path.join(os.path.dirname(__file__), "templates")
+jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir))
+
+# Allowed tags and attributes for sanitized markdown rendering
+ALLOWED_TAGS = list(bleach.sanitizer.ALLOWED_TAGS) + [
+ "p", "pre", "span", "h1", "h2", "h3", "h4", "h5", "h6", "table", "thead", "tbody", "tr", "th", "td", "blockquote", "code"
+]
+ALLOWED_ATTRIBUTES = {**bleach.sanitizer.ALLOWED_ATTRIBUTES, "span": ["class"], "code": ["class"], "th": ["align"], "td": ["align"]}
+
+def render_markdown(value: str) -> str:
+ """Convert markdown text to sanitized HTML."""
+ if not isinstance(value, str):
+ value = str(value)
+ html = md.markdown(
+ value,
+ extensions=["fenced_code", "tables", "codehilite", "toc", "sane_lists"],
+ output_format="html5"
+ )
+ cleaned = bleach.clean(html, tags=ALLOWED_TAGS, attributes=ALLOWED_ATTRIBUTES, strip=True)
+ return cleaned
+
+jinja_env.filters['markdown'] = render_markdown
+
+def update_execution_state(state: Dict[str, Any]):
+ """Callback function to update the app_state based on LangGraph's state."""
+ print(f"π‘ Callback received state keys: {list(state.keys())}")
+
+ with app_state_lock:
+ # Ensure execution tree is initialized
+ if not app_state["execution_tree"]:
+ app_state["execution_tree"] = initialize_complete_execution_tree()
+
+ # Map LangGraph node names to our tracking system
+ agent_state_mapping = {
+ "Market Analyst": {
+ "phase": "data_collection",
+ "agent_id": "market_analyst",
+ "report_key": "market_report",
+ "report_name": "Market Analysis Report"
+ },
+ "Social Analyst": {
+ "phase": "data_collection",
+ "agent_id": "social_analyst",
+ "report_key": "sentiment_report",
+ "report_name": "Sentiment Analysis Report"
+ },
+ "News Analyst": {
+ "phase": "data_collection",
+ "agent_id": "news_analyst",
+ "report_key": "news_report",
+ "report_name": "News Analysis Report"
+ },
+ "Fundamentals Analyst": {
+ "phase": "data_collection",
+ "agent_id": "fundamentals_analyst",
+ "report_key": "fundamentals_report",
+ "report_name": "Fundamentals Report"
+ },
+ "Bull Researcher": {
+ "phase": "research",
+ "agent_id": "bull_researcher",
+ "report_key": "investment_debate_state.bull_history",
+ "report_name": "Bull Case Analysis"
+ },
+ "Bear Researcher": {
+ "phase": "research",
+ "agent_id": "bear_researcher",
+ "report_key": "investment_debate_state.bear_history",
+ "report_name": "Bear Case Analysis"
+ },
+ "Research Manager": {
+ "phase": "research",
+ "agent_id": "research_manager",
+ "report_key": "investment_debate_state.judge_decision",
+ "report_name": "Research Synthesis"
+ },
+ "Trade Planner": {
+ "phase": "planning",
+ "agent_id": "trade_planner",
+ "report_key": "trader_investment_plan",
+ "report_name": "Trading Plan"
+ },
+ "Trader": {
+ "phase": "execution",
+ "agent_id": "trader",
+ "report_key": "investment_plan",
+ "report_name": "Execution Report"
+ },
+ "Risky Analyst": {
+ "phase": "risk_analysis",
+ "agent_id": "risky_analyst",
+ "report_key": "risk_debate_state.risky_history",
+ "report_name": "Risk Assessment (Aggressive)"
+ },
+ "Neutral Analyst": {
+ "phase": "risk_analysis",
+ "agent_id": "neutral_analyst",
+ "report_key": "risk_debate_state.neutral_history",
+ "report_name": "Risk Assessment (Neutral)"
+ },
+ "Safe Analyst": {
+ "phase": "risk_analysis",
+ "agent_id": "safe_analyst",
+ "report_key": "risk_debate_state.safe_history",
+ "report_name": "Risk Assessment (Conservative)"
+ },
+ "Risk Judge": {
+ # Moved to its own dedicated phase for prominence
+ "phase": "final_decision",
+ "agent_id": "risk_judge",
+ "report_key": "final_trade_decision",
+ "report_name": "Portfolio Manager's Decision"
+ }
+ }
+
+ # Update agent statuses based on available reports
+ for agent_name, agent_info in agent_state_mapping.items():
+ # Check if this agent has completed (has report data)
+ report_data = get_nested_value(state, agent_info["report_key"])
+ if report_data:
+ update_agent_status(agent_info, "completed", report_data, state)
+
+ # Mark in-progress agent(s) sequentially BEFORE recalculating phase status
+ mark_in_progress_agents(app_state["execution_tree"])
+ # Recalculate phase statuses after setting agent in-progress markers
+ recalc_phase_statuses(app_state["execution_tree"])
+ # Update overall progress
+ execution_tree = app_state["execution_tree"]
+ total_agents = len(agent_state_mapping)
+ completed_agents = count_completed_agents(execution_tree)
+ app_state["overall_progress"] = min(100, int((completed_agents / max(total_agents, 1)) * 100))
+
+ print(f"π Progress updated: {app_state['overall_progress']}% ({completed_agents}/{total_agents} agents)")
+
+def initialize_complete_execution_tree():
+ """Initialize the complete execution tree with all agents in pending state."""
+ return [
+ {
+ "id": "data_collection_phase",
+ "name": "π Data Collection Phase",
+ "status": "pending",
+ "content": "Collecting market data and analysis from various sources",
+ "children": [
+ create_agent_node("market_analyst", "π Market Analyst"),
+ create_agent_node("social_analyst", "π± Social Media Analyst"),
+ create_agent_node("news_analyst", "π° News Analyst"),
+ create_agent_node("fundamentals_analyst", "π Fundamentals Analyst")
+ ]
+ },
+ {
+ "id": "research_phase",
+ "name": "π Research Phase",
+ "status": "pending",
+ "content": "Research and debate investment perspectives",
+ "children": [
+ create_agent_node("bull_researcher", "π Bull Researcher"),
+ create_agent_node("bear_researcher", "π» Bear Researcher"),
+ create_agent_node("research_manager", "π Research Manager")
+ ]
+ },
+ {
+ "id": "planning_phase",
+ "name": "π Planning Phase",
+ "status": "pending",
+ "content": "Develop trading strategy and execution plan",
+ "children": [
+ create_agent_node("trade_planner", "π Trade Planner")
+ ]
+ },
+ {
+ "id": "execution_phase",
+ "name": "β‘ Execution Phase",
+ "status": "pending",
+ "content": "Execute trades based on analysis and planning",
+ "children": [
+ create_agent_node("trader", "β‘ Trader")
+ ]
+ },
+ {
+ "id": "risk_analysis_phase",
+ "name": "β οΈ Risk Management Phase",
+ "status": "pending",
+ "content": "Assess and manage investment risks",
+ "children": [
+ create_agent_node("risky_analyst", "π¨ Aggressive Risk Analyst"),
+ create_agent_node("neutral_analyst", "βοΈ Neutral Risk Analyst"),
+ create_agent_node("safe_analyst", "π‘οΈ Conservative Risk Analyst")
+ ]
+ },
+ {
+ "id": "final_decision_phase",
+ "name": "π§ Portfolio Manager's Decision",
+ "status": "pending",
+ "content": "Final portfolio / trade decision synthesized from all prior phases",
+ "children": [
+ create_agent_node("risk_judge", "π§ Portfolio Manager")
+ ]
+ }
+ ]
+
+def create_agent_node(agent_id: str, agent_name: str):
+ """Create a standardized agent node with report and messages sub-items."""
+ return {
+ "id": agent_id,
+ "name": agent_name,
+ "status": "pending",
+ "content": f"Agent: {agent_name} - Awaiting execution",
+ "children": [
+ {
+ "id": f"{agent_id}_messages",
+ "name": "οΏ½ Messages",
+ "status": "pending",
+ "content": "No messages yet",
+ "children": [],
+ "timestamp": time.time()
+ },
+ {
+ "id": f"{agent_id}_report",
+ "name": "οΏ½ Report",
+ "status": "pending",
+ "content": "Report not yet generated",
+ "children": [],
+ "timestamp": time.time()
+ }
+ ],
+ "timestamp": time.time()
+ }
+
+def get_nested_value(data: dict, key_path: str):
+ """Get value from nested dict using dot notation (e.g., 'investment_debate_state.bull_history')."""
+ keys = key_path.split('.')
+ value = data
+ for key in keys:
+ if isinstance(value, dict) and key in value:
+ value = value[key]
+ else:
+ return None
+ return value
+
+def update_agent_status(agent_info: dict, status: str, report_data: any, full_state: dict):
+ """Update an agent's status and content in the execution tree."""
+ execution_tree = app_state["execution_tree"]
+
+ # Find the agent in the tree
+ agent_node = find_agent_in_tree(agent_info["agent_id"], execution_tree)
+ if not agent_node:
+ return
+
+ # Update agent status
+ if agent_node["status"] != "completed":
+ agent_node["status"] = status
+ agent_node["content"] = f"β {agent_node['name']} - Analysis completed"
+
+ # Update report sub-item
+ report_node = find_item_by_id(f"{agent_info['agent_id']}_report", agent_node["children"])
+ if report_node:
+ report_node["status"] = "completed"
+ report_node["content"] = format_report_content(agent_info["report_name"], report_data)
+
+ # Update messages sub-item (extract from state if available)
+ messages_node = find_item_by_id(f"{agent_info['agent_id']}_messages", agent_node["children"])
+ if messages_node:
+ messages_node["status"] = "completed"
+ messages_node["content"] = extract_agent_messages(full_state, agent_info["agent_id"])
+
+ # Phase status recalculated globally in recalc_phase_statuses
+
+def find_agent_in_tree(agent_id: str, tree: list):
+ """Find an agent node in the execution tree."""
+ for phase in tree:
+ if phase.get("children"):
+ for agent in phase["children"]:
+ if agent["id"] == agent_id:
+ return agent
+ return None
+
+def find_item_by_id(item_id: str, items: list):
+ """Find an item by ID in a list of items."""
+ for item in items:
+ if item["id"] == item_id:
+ return item
+ return None
+
+def format_report_content(report_name: str, report_data: any) -> str:
+ """Format report data for display."""
+ if isinstance(report_data, str):
+ return f"π {report_name}\n\n{report_data}"
+ elif isinstance(report_data, dict):
+ return f"π {report_name}\n\n{str(report_data)}"
+ elif isinstance(report_data, list) and report_data:
+ # For debate histories, show the latest message
+ latest = report_data[-1] if report_data else "No data"
+ return f"π {report_name}\n\n{str(latest)}"
+ else:
+ return f"π {report_name}\n\nReport generated successfully"
+
+def extract_agent_messages(state: dict, agent_id: str) -> str:
+ """Extract relevant messages for an agent from the state."""
+ # Expecting state['messages'] to be a list of dicts with optional keys like
+ # 'role', 'content', 'timestamp'. We'll display each in an expandable box.
+ messages = state.get("messages", []) or []
+ if not messages:
+ return "π¬ Agent Messages\n\nNo messages recorded for this agent."
+
+ # Filter messages for this agent if agent_id field present
+ filtered = []
+ for m in messages:
+ if isinstance(m, dict):
+ msg_agent = m.get("agent_id") or m.get("agent")
+ if msg_agent and msg_agent != agent_id:
+ continue
+ filtered.append(m)
+ else:
+ # Try common attributes used by message objects (e.g., langchain HumanMessage / AIMessage)
+ msg_agent = getattr(m, "agent_id", None) or getattr(m, "agent", None)
+ if msg_agent and msg_agent != agent_id:
+ continue
+ filtered.append(m)
+ if not filtered:
+ filtered = messages # fallback to all if no agent-specific match
+
+ parts = ["π¬ Agent Messages", "", f"Total messages: {len(filtered)}", ""]
+ for idx, m in enumerate(filtered, start=1):
+ if isinstance(m, dict):
+ role = m.get("role") or m.get("type") or "message"
+ ts = m.get("timestamp")
+ content = m.get("content") or m.get("text") or "(no content)"
+ else:
+ # Object-based message
+ role = getattr(m, "role", None) or getattr(m, "type", None) or m.__class__.__name__
+ ts = getattr(m, "timestamp", None)
+ # LangChain messages often have a .content attribute
+ content = getattr(m, "content", None) or getattr(m, "text", None) or str(m)
+ # Escape triple backticks to avoid markdown parser confusion
+ if isinstance(content, str):
+ content = content.replace('```', '\u0060\u0060\u0060')
+ header = f"{idx}. {role.title()}" + (f" β {ts}" if ts else "")
+ # Use HTML so user can expand long messages
+ parts.append(
+ f"")
+ parts.append(f" {header}")
+ # Wrap content in pre for formatting
+ parts.append("
" + str(content) + "
")
+ parts.append("")
+
+ return "\n".join(parts)
+
+def recalc_phase_statuses(execution_tree: list):
+ """Recalculate each phase's status: pending (no started), in_progress (some running/completed but not all), completed (all done), error if any child error."""
+ for phase in execution_tree:
+ if not phase.get("children"):
+ continue
+ child_statuses = [c["status"] for c in phase["children"]]
+ if any(s == "error" for s in child_statuses):
+ phase["status"] = "error"
+ phase["content"] = f"β {phase['name']} - Error in sub-task"
+ elif all(s == "completed" for s in child_statuses):
+ phase["status"] = "completed"
+ phase["content"] = f"β {phase['name']} - All agents completed successfully"
+ elif any(s in ("in_progress", "completed") for s in child_statuses):
+ # At least one started but not all done
+ if phase["status"] != "in_progress":
+ phase["status"] = "in_progress"
+ phase["content"] = f"β³ {phase['name']} - Running..."
+ else:
+ # All pending
+ phase["status"] = "pending"
+
+
+def count_completed_agents(execution_tree: list) -> int:
+ """Count the number of completed agents across all phases."""
+ count = 0
+ for phase in execution_tree:
+ if phase.get("children"):
+ for agent in phase["children"]:
+ if agent["status"] == "completed":
+ count += 1
+ return count
+
+def mark_in_progress_agents(execution_tree: list):
+ """Sequentially activate only the earliest phase that still has pending agents.
+ Rules:
+ - A phase becomes active when all prior phases are completed.
+ - Only the first such phase can have an in_progress agent.
+ - Within that phase, mark exactly one first pending agent as in_progress.
+ """
+ # Build quick lookup by id
+ phase_map = {p["id"]: p for p in execution_tree}
+
+ # Determine which phase should be active
+ active_phase = None
+ for phase_id in PHASE_SEQUENCE:
+ phase = phase_map.get(phase_id)
+ if not phase:
+ continue
+ # If all previous phases completed, and this phase not fully completed, it's the active one
+ prev_completed = all(
+ (phase_map.get(prev_id) and all(c["status"] == "completed" for c in phase_map[prev_id].get("children", [])))
+ for prev_id in PHASE_SEQUENCE[:PHASE_SEQUENCE.index(phase_id)]
+ )
+ phase_done = all(c["status"] == "completed" for c in phase.get("children", []))
+ if prev_completed and not phase_done:
+ active_phase = phase
+ break
+
+ if not active_phase:
+ return
+
+ # If an agent already in progress in the active phase, leave as-is
+ if any(a["status"] == "in_progress" for a in active_phase.get("children", [])):
+ return
+
+ # Otherwise pick first pending agent
+ for agent in active_phase.get("children", []):
+ if agent["status"] == "pending":
+ agent["status"] = "in_progress"
+ agent["content"] = f"β³ {agent['name']} - Running analysis..."
+ for child in agent.get("children", []):
+ if child["status"] == "pending":
+ child["status"] = "in_progress"
+ break
+
+def run_trading_process(company_symbol: str, config: Dict[str, Any]):
+ """Runs the TradingAgentsGraph in a separate thread."""
+ with app_state_lock:
+ app_state["overall_status"] = "in_progress"
+ app_state["overall_progress"] = 0
+
+ try:
+ # Import and create custom config
+ from tradingagents.default_config import DEFAULT_CONFIG
+
+ # Create custom configuration with user selections
+ custom_config = DEFAULT_CONFIG.copy()
+ custom_config["llm_provider"] = config["llm_provider"]
+ custom_config["max_debate_rounds"] = config["max_debate_rounds"]
+ custom_config["cost_per_trade"] = config["cost_per_trade"]
+
+ # Set the appropriate LLM models based on provider
+ if config["llm_provider"] == "google":
+ custom_config["gemini_quick_think_llm"] = config["quick_think_llm"]
+ custom_config["gemini_deep_think_llm"] = config["deep_think_llm"]
+ else:
+ custom_config["quick_think_llm"] = config["quick_think_llm"]
+ custom_config["deep_think_llm"] = config["deep_think_llm"]
+
+ # Set backend URL based on provider
+ if config["llm_provider"] == "openrouter":
+ custom_config["backend_url"] = "https://openrouter.ai/api/v1"
+ elif config["llm_provider"] == "google":
+ custom_config["backend_url"] = "https://generativelanguage.googleapis.com/v1"
+ elif config["llm_provider"] == "anthropic":
+ custom_config["backend_url"] = "https://api.anthropic.com/"
+ elif config["llm_provider"] == "ollama":
+ custom_config["backend_url"] = f"http://{os.getenv('OLLAMA_HOST', 'localhost')}:11434/v1"
+ else: # openai
+ custom_config["backend_url"] = "https://api.openai.com/v1"
+
+ print(f"π Initializing TradingAgentsGraph for {company_symbol}")
+ graph = TradingAgentsGraph(config=custom_config)
+ analysis_date = config["analysis_date"] # Use user-selected date
+ print(f"π Starting propagation for {company_symbol} on {analysis_date}")
+
+ # The propagate method now accepts the callback and trade_date
+ final_state, processed_signal = graph.propagate(company_symbol, trade_date=analysis_date, on_step_callback=update_execution_state)
+ print(f"β Propagation completed for {company_symbol}")
+
+ with app_state_lock:
+ app_state["overall_status"] = "completed"
+ app_state["overall_progress"] = 100
+ # Update the root node status to completed
+ if app_state["execution_tree"]:
+ app_state["execution_tree"][0]["status"] = "completed"
+ app_state["execution_tree"][0]["content"] = f"β Analysis completed successfully!\n\nFinal Decision: {processed_signal}\n\nFull State: {str(final_state)}"
+
+ except Exception as e:
+ import traceback
+ error_detail = traceback.format_exc()
+ with app_state_lock:
+ app_state["overall_status"] = "error"
+ app_state["overall_progress"] = 100
+ if app_state["execution_tree"]:
+ app_state["execution_tree"][0]["status"] = "error"
+ app_state["execution_tree"][0]["content"] = f"Error during execution: {str(e)}\n\n{error_detail}"
+ # Add a specific error item to the tree
+ app_state["execution_tree"].append({
+ "id": "error",
+ "name": "Process Error",
+ "status": "error",
+ "content": f"Error during execution: {str(e)}\n\n{error_detail}",
+ "children": [],
+ "timestamp": time.time()
+ })
+ finally:
+ with app_state_lock:
+ app_state["process_running"] = False
+
+
+@app.get("/", response_class=HTMLResponse)
+async def read_root():
+ from datetime import date
+ template = jinja_env.get_template("index.html")
+ today_str = date.today().isoformat()
+ return template.render(app_state=app_state, default_date=today_str)
+
+@app.post("/start", response_class=HTMLResponse)
+async def start_process(
+ background_tasks: BackgroundTasks,
+ company_symbol: str = Form(...),
+ llm_provider: str = Form(...),
+ quick_think_llm: str = Form(...),
+ deep_think_llm: str = Form(...),
+ max_debate_rounds: int = Form(...),
+ cost_per_trade: float = Form(...),
+ analysis_date: str = Form(...)
+):
+ # Check if all required environment variables are set
+ missing_vars = [var for var in required_env_vars if not os.getenv(var)]
+ if missing_vars:
+ app_state["overall_status"] = "error"
+ app_state["execution_tree"] = [{
+ "id": "error",
+ "name": "Configuration Error",
+ "status": "error",
+ "content": f"Missing required environment variables: {', '.join(missing_vars)}. Please check .env.example file.",
+ "children": [],
+ "timestamp": time.time()
+ }]
+ template = jinja_env.get_template("_partials/left_panel.html")
+ return template.render(tree=app_state["execution_tree"], app_state=app_state)
+
+ with app_state_lock:
+ if app_state["process_running"]:
+ # Optionally, return an error or a message that a process is already running
+ template = jinja_env.get_template("_partials/left_panel.html")
+ return template.render(tree=app_state["execution_tree"], app_state=app_state)
+
+ app_state["process_running"] = True
+ app_state["company_symbol"] = company_symbol
+ app_state["overall_status"] = "in_progress"
+ app_state["overall_progress"] = 5 # Show initial progress
+
+ # Store all configuration parameters
+ app_state["config"] = {
+ "llm_provider": llm_provider,
+ "quick_think_llm": quick_think_llm,
+ "deep_think_llm": deep_think_llm,
+ "max_debate_rounds": max_debate_rounds,
+ "cost_per_trade": cost_per_trade,
+ "analysis_date": analysis_date
+ }
+
+ # Initialize execution tree with complete structure
+ app_state["execution_tree"] = initialize_complete_execution_tree()
+
+ background_tasks.add_task(run_trading_process, company_symbol, app_state["config"])
+
+ template = jinja_env.get_template("_partials/left_panel.html")
+ return template.render(tree=app_state["execution_tree"], app_state=app_state)
+
+@app.get("/status", response_class=HTMLResponse)
+async def get_status():
+ with app_state_lock:
+ template = jinja_env.get_template("_partials/left_panel.html")
+ return template.render(tree=app_state["execution_tree"], app_state=app_state)
+
+
+@app.get("/status-updates")
+async def get_status_updates():
+ """Return only the status updates as JSON for targeted updates."""
+ with app_state_lock:
+ status_updates = {}
+
+ def extract_status_info(items, prefix=""):
+ for item in items:
+ item_id = item["id"]
+ status_updates[item_id] = {
+ "status": item["status"],
+ "status_icon": get_status_icon(item["status"])
+ }
+ if item.get("children"):
+ extract_status_info(item["children"])
+
+ extract_status_info(app_state["execution_tree"])
+
+ return {
+ "status_updates": status_updates,
+ "overall_progress": app_state["overall_progress"],
+ "overall_status": app_state["overall_status"]
+ }
+
+def get_status_icon(status: str) -> str:
+ """Get the status icon for a given status."""
+ if status == 'completed':
+ return 'β '
+ elif status == 'in_progress':
+ return 'β³'
+ elif status == 'error':
+ return 'β'
+ else:
+ return 'βΈοΈ'
+
+def find_item_in_tree(item_id: str, tree: list) -> Dict[str, Any] | None:
+ """Recursively searches the execution tree for an item by its ID."""
+ for item in tree:
+ if item["id"] == item_id:
+ return item
+ if item["children"]:
+ found_child = find_item_in_tree(item_id, item["children"])
+ if found_child:
+ return found_child
+ return None
+
+@app.get("/content/{item_id}", response_class=HTMLResponse)
+async def get_item_content(item_id: str):
+ with app_state_lock:
+ item = find_item_in_tree(item_id, app_state["execution_tree"])
+ if item:
+ template = jinja_env.get_template("_partials/right_panel.html")
+ return template.render(content=item.get("content", "No content available."))
+ else:
+ return HTMLResponse(content="
\ No newline at end of file
diff --git a/webapp/templates/_partials/tree_content.html b/webapp/templates/_partials/tree_content.html
new file mode 100644
index 00000000..f0cfdc86
--- /dev/null
+++ b/webapp/templates/_partials/tree_content.html
@@ -0,0 +1,34 @@
+{% macro render_item(item) %}
+