fix: allow .env variables to override DEFAULT_CONFIG values

Merged origin/main and resolved all 8 conflicting files:
- CLAUDE.md: merged MISTAKES.md ref + Project Tracking section + env override docs
- cli/main.py: kept top-level json import, kept try/except in run_pipeline
- tool_runner.py: kept descriptive comments for MAX_TOOL_ROUNDS
- alpha_vantage_common.py: kept thread-safe rate limiter, robust error handling
- interface.py: kept broader exception catch (AlphaVantageError + ConnectionError + TimeoutError)
- default_config.py: kept _env()/_env_int() env var overrides with load_dotenv() at module level
- scanner_graph.py: kept debug mode fix (stream for debug, invoke for result)
- macro_bridge.py: kept get_running_loop() over deprecated get_event_loop()

Co-authored-by: aguzererler <6199053+aguzererler@users.noreply.github.com>
This commit is contained in:
copilot-swe-agent[bot] 2026-03-17 14:25:10 +00:00
parent 9ac773a69d
commit 2193ff3fa1
12 changed files with 254 additions and 61 deletions

View File

@ -1,6 +1,23 @@
# LLM Providers (set the one you use)
# LLM Provider API Keys (set the ones you use)
OPENAI_API_KEY=
GOOGLE_API_KEY=
ANTHROPIC_API_KEY=
XAI_API_KEY=
OPENROUTER_API_KEY=
# Data Provider API Keys
ALPHA_VANTAGE_API_KEY=
# ── Configuration overrides ──────────────────────────────────────────
# Any setting in DEFAULT_CONFIG can be overridden with a
# TRADINGAGENTS_<KEY> environment variable. Unset or empty values
# are ignored (the hardcoded default is kept).
#
# Examples:
# TRADINGAGENTS_LLM_PROVIDER=openrouter
# TRADINGAGENTS_QUICK_THINK_LLM=deepseek/deepseek-chat-v3-0324
# TRADINGAGENTS_DEEP_THINK_LLM=deepseek/deepseek-r1-0528
# TRADINGAGENTS_BACKEND_URL=https://openrouter.ai/api/v1
# TRADINGAGENTS_RESULTS_DIR=./my_results
# TRADINGAGENTS_MAX_DEBATE_ROUNDS=2
# TRADINGAGENTS_VENDOR_SCANNER_DATA=alpha_vantage

View File

@ -102,16 +102,13 @@ OpenAI, Anthropic, Google, xAI, OpenRouter, Ollama
- `PROGRESS.md` — Feature progress, what works, TODOs
- `MISTAKES.md` — Past bugs and lessons learned (9 documented mistakes)
## Current LLM Configuration (Hybrid)
## LLM Configuration
```
quick_think: qwen3.5:27b via Ollama (http://192.168.50.76:11434)
mid_think: qwen3.5:27b via Ollama (http://192.168.50.76:11434)
deep_think: deepseek/deepseek-r1-0528 via OpenRouter
```
Config: `tradingagents/default_config.py` (per-tier `_llm_provider` keys)
Keys: `.env` file (`OPENROUTER_API_KEY`, `ALPHA_VANTAGE_API_KEY`)
Per-tier provider overrides in `tradingagents/default_config.py`:
- Each tier (`quick_think`, `mid_think`, `deep_think`) can have its own `_llm_provider` and `_backend_url`
- Falls back to top-level `llm_provider` and `backend_url` when per-tier values are None
- All config values overridable via `TRADINGAGENTS_<KEY>` env vars
- Keys for LLM providers: `.env` file (e.g., `OPENROUTER_API_KEY`, `ALPHA_VANTAGE_API_KEY`)
## Running the Scanner

View File

@ -1,5 +1,6 @@
from typing import Optional
import datetime
import json
import typer
from pathlib import Path
from functools import wraps
@ -1201,8 +1202,6 @@ def run_scan(date: Optional[str] = None):
raise typer.Exit(1)
# Save reports
import json as _json
for key in ["geopolitical_report", "market_movers_report", "sector_performance_report",
"industry_deep_dive_report", "macro_scan_summary"]:
content = result.get(key, "")
@ -1217,7 +1216,7 @@ def run_scan(date: Optional[str] = None):
# Try to parse and show watchlist table
try:
summary_data = _json.loads(summary)
summary_data = json.loads(summary)
stocks = summary_data.get("stocks_to_investigate", [])
if stocks:
table = Table(title="Stocks to Investigate", box=box.ROUNDED)
@ -1235,16 +1234,16 @@ def run_scan(date: Optional[str] = None):
s.get("thesis_angle", ""),
)
console.print(table)
except (_json.JSONDecodeError, KeyError):
except (json.JSONDecodeError, KeyError):
pass # Summary wasn't valid JSON — already printed as markdown
console.print(f"\n[green]Results saved to {save_dir}[/green]")
def run_pipeline():
"""Full pipeline: scan -> filter -> per-ticker deep dive."""
import asyncio
import json as _json
from tradingagents.pipeline.macro_bridge import (
parse_macro_output,
filter_candidates,
@ -1293,10 +1292,14 @@ def run_pipeline():
output_dir = Path("results/macro_pipeline")
console.print(f"\n[cyan]Running TradingAgents for {len(candidates)} tickers...[/cyan]")
with Live(Spinner("dots", text="Analyzing..."), console=console, transient=True):
results = asyncio.run(
run_all_tickers(candidates, macro_context, config, analysis_date)
)
try:
with Live(Spinner("dots", text="Analyzing..."), console=console, transient=True):
results = asyncio.run(
run_all_tickers(candidates, macro_context, config, analysis_date)
)
except Exception as e:
console.print(f"[red]Pipeline failed: {e}[/red]")
raise typer.Exit(1)
save_results(results, macro_context, output_dir)

10
main.py
View File

@ -1,11 +1,13 @@
from tradingagents.graph.trading_graph import TradingAgentsGraph
from tradingagents.default_config import DEFAULT_CONFIG
from dotenv import load_dotenv
# Load environment variables from .env file
# Load environment variables from .env file BEFORE importing any
# tradingagents modules so TRADINGAGENTS_* vars are visible to
# DEFAULT_CONFIG at import time.
load_dotenv()
from tradingagents.graph.trading_graph import TradingAgentsGraph
from tradingagents.default_config import DEFAULT_CONFIG
# Create a custom config
config = DEFAULT_CONFIG.copy()
config["deep_think_llm"] = "gpt-5-mini" # Use a different model

View File

@ -19,6 +19,7 @@ dependencies = [
"langgraph>=0.4.8",
"pandas>=2.3.0",
"parsel>=1.10.0",
"python-dotenv>=1.0.0",
"pytz>=2025.2",
"questionary>=2.1.0",
"rank-bm25>=0.2.2",

108
tests/test_env_override.py Normal file
View File

@ -0,0 +1,108 @@
"""Tests that TRADINGAGENTS_* environment variables override DEFAULT_CONFIG."""
import importlib
import os
from unittest.mock import patch
import pytest
class TestEnvOverridesDefaults:
"""Verify that setting TRADINGAGENTS_<KEY> env vars changes DEFAULT_CONFIG."""
def _reload_config(self):
"""Force-reimport default_config so the module-level dict is rebuilt."""
import tradingagents.default_config as mod
importlib.reload(mod)
return mod.DEFAULT_CONFIG
def test_llm_provider_override(self):
with patch.dict(os.environ, {"TRADINGAGENTS_LLM_PROVIDER": "openrouter"}):
cfg = self._reload_config()
assert cfg["llm_provider"] == "openrouter"
def test_backend_url_override(self):
with patch.dict(os.environ, {"TRADINGAGENTS_BACKEND_URL": "http://localhost:1234"}):
cfg = self._reload_config()
assert cfg["backend_url"] == "http://localhost:1234"
def test_deep_think_llm_override(self):
with patch.dict(os.environ, {"TRADINGAGENTS_DEEP_THINK_LLM": "deepseek/deepseek-r1"}):
cfg = self._reload_config()
assert cfg["deep_think_llm"] == "deepseek/deepseek-r1"
def test_quick_think_llm_override(self):
with patch.dict(os.environ, {"TRADINGAGENTS_QUICK_THINK_LLM": "gpt-4o-mini"}):
cfg = self._reload_config()
assert cfg["quick_think_llm"] == "gpt-4o-mini"
def test_mid_think_llm_none_by_default(self):
"""mid_think_llm defaults to None (falls back to quick_think_llm)."""
with patch.dict(os.environ, {}, clear=False):
# Remove the env var if it happens to be set
os.environ.pop("TRADINGAGENTS_MID_THINK_LLM", None)
cfg = self._reload_config()
assert cfg["mid_think_llm"] is None
def test_mid_think_llm_override(self):
with patch.dict(os.environ, {"TRADINGAGENTS_MID_THINK_LLM": "gpt-4o"}):
cfg = self._reload_config()
assert cfg["mid_think_llm"] == "gpt-4o"
def test_empty_env_var_keeps_default(self):
"""An empty string is treated the same as unset (keeps the default)."""
with patch.dict(os.environ, {"TRADINGAGENTS_LLM_PROVIDER": ""}):
cfg = self._reload_config()
assert cfg["llm_provider"] == "openai"
def test_empty_env_var_keeps_none_default(self):
"""An empty string for a None-default field stays None."""
with patch.dict(os.environ, {"TRADINGAGENTS_DEEP_THINK_LLM_PROVIDER": ""}):
cfg = self._reload_config()
assert cfg["deep_think_llm_provider"] is None
def test_per_tier_provider_override(self):
with patch.dict(os.environ, {"TRADINGAGENTS_DEEP_THINK_LLM_PROVIDER": "anthropic"}):
cfg = self._reload_config()
assert cfg["deep_think_llm_provider"] == "anthropic"
def test_per_tier_backend_url_override(self):
with patch.dict(os.environ, {"TRADINGAGENTS_MID_THINK_BACKEND_URL": "http://my-ollama:11434"}):
cfg = self._reload_config()
assert cfg["mid_think_backend_url"] == "http://my-ollama:11434"
def test_max_debate_rounds_int(self):
with patch.dict(os.environ, {"TRADINGAGENTS_MAX_DEBATE_ROUNDS": "3"}):
cfg = self._reload_config()
assert cfg["max_debate_rounds"] == 3
def test_max_debate_rounds_bad_value(self):
"""Non-numeric string falls back to hardcoded default."""
with patch.dict(os.environ, {"TRADINGAGENTS_MAX_DEBATE_ROUNDS": "abc"}):
cfg = self._reload_config()
assert cfg["max_debate_rounds"] == 1
def test_results_dir_override(self):
with patch.dict(os.environ, {"TRADINGAGENTS_RESULTS_DIR": "/tmp/my_results"}):
cfg = self._reload_config()
assert cfg["results_dir"] == "/tmp/my_results"
def test_vendor_scanner_data_override(self):
with patch.dict(os.environ, {"TRADINGAGENTS_VENDOR_SCANNER_DATA": "alpha_vantage"}):
cfg = self._reload_config()
assert cfg["data_vendors"]["scanner_data"] == "alpha_vantage"
def test_defaults_unchanged_when_no_env_set(self):
"""Without any TRADINGAGENTS_* vars, defaults are the original hardcoded values."""
# Clear all TRADINGAGENTS_ vars
env_clean = {k: v for k, v in os.environ.items() if not k.startswith("TRADINGAGENTS_")}
with patch.dict(os.environ, env_clean, clear=True):
cfg = self._reload_config()
assert cfg["llm_provider"] == "openai"
assert cfg["deep_think_llm"] == "gpt-5.2"
assert cfg["mid_think_llm"] is None
assert cfg["quick_think_llm"] == "gpt-5-mini"
assert cfg["backend_url"] == "https://api.openai.com/v1"
assert cfg["max_debate_rounds"] == 1
assert cfg["data_vendors"]["scanner_data"] == "yfinance"

View File

@ -12,7 +12,9 @@ from typing import Any, List
from langchain_core.messages import AIMessage, ToolMessage
MAX_TOOL_ROUNDS = 5 # safety limit to avoid infinite loops
# Most LLM tool-calling patterns resolve within 2-3 rounds;
# 5 provides headroom for complex scenarios while preventing runaway loops.
MAX_TOOL_ROUNDS = 5
def run_tool_loop(

View File

@ -2,6 +2,8 @@ import os
import requests
import pandas as pd
import json
import threading
import time as _time
from datetime import datetime
from io import StringIO
@ -73,8 +75,6 @@ class ThirdPartyParseError(AlphaVantageError):
# ─── Rate-limited request helper ─────────────────────────────────────────────
import threading
import time as _time
_rate_lock = threading.Lock()
_call_timestamps: list[float] = []
@ -83,14 +83,30 @@ _RATE_LIMIT = 75 # calls per minute (Alpha Vantage premium)
def _rate_limited_request(function_name: str, params: dict, timeout: int = 30) -> dict | str:
"""Make an API request with rate limiting (75 calls/min for premium key)."""
sleep_time = 0.0
with _rate_lock:
now = _time.time()
# Remove timestamps older than 60 seconds
_call_timestamps[:] = [t for t in _call_timestamps if now - t < 60]
if len(_call_timestamps) >= _RATE_LIMIT:
sleep_time = 60 - (now - _call_timestamps[0]) + 0.1
_time.sleep(sleep_time)
# Sleep outside the lock to avoid blocking other threads
if sleep_time > 0:
_time.sleep(sleep_time)
# Re-check and register under lock to avoid races where multiple
# threads calculate similar sleep times and then all fire at once.
with _rate_lock:
now = _time.time()
_call_timestamps[:] = [t for t in _call_timestamps if now - t < 60]
if len(_call_timestamps) >= _RATE_LIMIT:
# Another thread filled the window while we slept — wait again
extra_sleep = 60 - (now - _call_timestamps[0]) + 0.1
_time.sleep(extra_sleep)
_call_timestamps.append(_time.time())
return _make_api_request(function_name, params, timeout=timeout)
@ -131,6 +147,8 @@ def _make_api_request(function_name: str, params: dict, timeout: int = 30) -> di
)
except requests.exceptions.ConnectionError as exc:
raise ThirdPartyError(f"Connection error: function={function_name}, error={exc}")
except requests.exceptions.RequestException as exc:
raise ThirdPartyError(f"Request failed: function={function_name}, error={exc}")
# HTTP-level errors
if response.status_code == 401:
@ -146,7 +164,13 @@ def _make_api_request(function_name: str, params: dict, timeout: int = 30) -> di
f"Server error: status={response.status_code}, function={function_name}, "
f"body={response.text[:200]}"
)
response.raise_for_status()
try:
response.raise_for_status()
except requests.exceptions.HTTPError as exc:
raise ThirdPartyError(
f"HTTP error: status={response.status_code}, function={function_name}, "
f"body={response.text[:200]}"
) from exc
response_text = response.text

View File

@ -201,7 +201,7 @@ def route_to_vendor(method: str, *args, **kwargs):
try:
return impl_func(*args, **kwargs)
except AlphaVantageError:
continue # Any AV error triggers fallback to next vendor
except (AlphaVantageError, ConnectionError, TimeoutError):
continue # Any AV error or connection/timeout triggers fallback to next vendor
raise RuntimeError(f"No available vendor for '{method}'")

View File

@ -1,45 +1,83 @@
import os
from pathlib import Path
from dotenv import load_dotenv
# Load .env so that TRADINGAGENTS_* variables are available before
# DEFAULT_CONFIG is evaluated. CWD is checked first, then the project
# root (two levels up from this file). load_dotenv never overwrites
# variables that are already present in the environment.
load_dotenv()
load_dotenv(Path(__file__).resolve().parent.parent / ".env")
def _env(key: str, default=None):
"""Read ``TRADINGAGENTS_<KEY>`` from the environment.
Returns *default* when the variable is unset **or** empty, so that
``TRADINGAGENTS_MID_THINK_LLM=`` in a ``.env`` file is treated the
same as not setting it at all (preserving the ``None`` semantics for
"fall back to the parent setting").
"""
val = os.getenv(f"TRADINGAGENTS_{key.upper()}")
if not val: # None or ""
return default
return val
def _env_int(key: str, default=None):
"""Like :func:`_env` but coerces the value to ``int``."""
val = _env(key)
if val is None:
return default
try:
return int(val)
except (ValueError, TypeError):
return default
DEFAULT_CONFIG = {
"project_dir": os.path.abspath(os.path.join(os.path.dirname(__file__), ".")),
"results_dir": os.getenv("TRADINGAGENTS_RESULTS_DIR", "./results"),
"results_dir": _env("RESULTS_DIR", "./results"),
"data_cache_dir": os.path.join(
os.path.abspath(os.path.join(os.path.dirname(__file__), ".")),
"dataflows/data_cache",
),
# LLM settings
"mid_think_llm": "qwen3.5:27b", # falls back to quick_think_llm when None
"quick_think_llm": "qwen3.5:27b",
# LLM settings — all overridable via TRADINGAGENTS_<KEY> env vars
"llm_provider": _env("LLM_PROVIDER", "openai"),
"deep_think_llm": _env("DEEP_THINK_LLM", "gpt-5.2"),
"mid_think_llm": _env("MID_THINK_LLM"), # falls back to quick_think_llm when None
"quick_think_llm": _env("QUICK_THINK_LLM", "gpt-5-mini"),
"backend_url": _env("BACKEND_URL", "https://api.openai.com/v1"),
# Per-role provider overrides (fall back to llm_provider / backend_url when None)
"deep_think_llm_provider": "openrouter",
"deep_think_llm": "deepseek/deepseek-r1-0528",
"deep_think_backend_url": None, # uses OpenRouter's default URL
"mid_think_llm_provider": "ollama", # falls back to ollama
"mid_think_backend_url": "http://192.168.50.76:11434", # falls back to backend_url (ollama host)
"quick_think_llm_provider": "ollama", # falls back to ollama
"quick_think_backend_url": "http://192.168.50.76:11434", # falls back to backend_url (ollama host)
"deep_think_llm_provider": _env("DEEP_THINK_LLM_PROVIDER"), # e.g. "google", "anthropic", "openrouter"
"deep_think_backend_url": _env("DEEP_THINK_BACKEND_URL"), # override backend URL for deep-think model
"mid_think_llm_provider": _env("MID_THINK_LLM_PROVIDER"), # e.g. "ollama"
"mid_think_backend_url": _env("MID_THINK_BACKEND_URL"), # override backend URL for mid-think model
"quick_think_llm_provider": _env("QUICK_THINK_LLM_PROVIDER"), # e.g. "openai", "ollama"
"quick_think_backend_url": _env("QUICK_THINK_BACKEND_URL"), # override backend URL for quick-think model
# Provider-specific thinking configuration (applies to all roles unless overridden)
"google_thinking_level": None, # "high", "minimal", etc.
"openai_reasoning_effort": None, # "medium", "high", "low"
"google_thinking_level": _env("GOOGLE_THINKING_LEVEL"), # "high", "minimal", etc.
"openai_reasoning_effort": _env("OPENAI_REASONING_EFFORT"), # "medium", "high", "low"
# Per-role provider-specific thinking configuration
"deep_think_google_thinking_level": None,
"deep_think_openai_reasoning_effort": None,
"mid_think_google_thinking_level": None,
"mid_think_openai_reasoning_effort": None,
"quick_think_google_thinking_level": None,
"quick_think_openai_reasoning_effort": None,
"deep_think_google_thinking_level": _env("DEEP_THINK_GOOGLE_THINKING_LEVEL"),
"deep_think_openai_reasoning_effort": _env("DEEP_THINK_OPENAI_REASONING_EFFORT"),
"mid_think_google_thinking_level": _env("MID_THINK_GOOGLE_THINKING_LEVEL"),
"mid_think_openai_reasoning_effort": _env("MID_THINK_OPENAI_REASONING_EFFORT"),
"quick_think_google_thinking_level": _env("QUICK_THINK_GOOGLE_THINKING_LEVEL"),
"quick_think_openai_reasoning_effort": _env("QUICK_THINK_OPENAI_REASONING_EFFORT"),
# Debate and discussion settings
"max_debate_rounds": 1,
"max_risk_discuss_rounds": 1,
"max_recur_limit": 100,
"max_debate_rounds": _env_int("MAX_DEBATE_ROUNDS", 1),
"max_risk_discuss_rounds": _env_int("MAX_RISK_DISCUSS_ROUNDS", 1),
"max_recur_limit": _env_int("MAX_RECUR_LIMIT", 100),
# Data vendor configuration
# Category-level configuration (default for all tools in category)
"data_vendors": {
"core_stock_apis": "yfinance", # Options: alpha_vantage, yfinance
"technical_indicators": "yfinance", # Options: alpha_vantage, yfinance
"fundamental_data": "yfinance", # Options: alpha_vantage, yfinance
"news_data": "yfinance", # Options: alpha_vantage, yfinance
"scanner_data": "alpha_vantage", # Options: alpha_vantage (primary), yfinance (fallback)
"core_stock_apis": _env("VENDOR_CORE_STOCK_APIS", "yfinance"),
"technical_indicators": _env("VENDOR_TECHNICAL_INDICATORS", "yfinance"),
"fundamental_data": _env("VENDOR_FUNDAMENTAL_DATA", "yfinance"),
"news_data": _env("VENDOR_NEWS_DATA", "yfinance"),
"scanner_data": _env("VENDOR_SCANNER_DATA", "yfinance"),
},
# Tool-level configuration (takes precedence over category-level)
"tool_vendors": {

View File

@ -139,9 +139,10 @@ class ScannerGraph:
}
if self.debug:
trace = []
# stream() yields partial state updates; use invoke() for the
# full accumulated state and print chunks for debugging only.
for chunk in self.graph.stream(initial_state):
trace.append(chunk)
return trace[-1] if trace else initial_state
print(f"[scanner debug] chunk keys: {list(chunk.keys())}")
# Fall through to invoke() for the correct accumulated result
return self.graph.invoke(initial_state)

View File

@ -238,10 +238,10 @@ async def run_all_tickers(
List of TickerResult in completion order.
"""
semaphore = asyncio.Semaphore(max_concurrent)
loop = asyncio.get_event_loop()
async def _run_one(candidate: StockCandidate) -> TickerResult:
async with semaphore:
loop = asyncio.get_running_loop()
# TradingAgentsGraph is synchronous — run it in a thread pool
return await loop.run_in_executor(
None,