diff --git a/.env.example b/.env.example index 1328b838..e547b91c 100644 --- a/.env.example +++ b/.env.example @@ -1,6 +1,22 @@ -# LLM Providers (set the one you use) +# ─── Cloud Providers ────────────────────────────────────────────────────────── OPENAI_API_KEY= GOOGLE_API_KEY= ANTHROPIC_API_KEY= XAI_API_KEY= OPENROUTER_API_KEY= + +# ─── Local LLM via llama.cpp ────────────────────────────────────────────────── +# 1. Start llama-server: +# llama-server --model ~/models/my-model.gguf --port 8081 --host 0.0.0.0 +# 2. Copy this file to .env and uncomment + fill in the values below +# 3. Find your model ID with: curl http://localhost:8081/v1/models + +# LLM_PROVIDER=llamacpp +# BACKEND_URL=http://localhost:8081/v1 +# DEEP_THINK_LLM=your-model-name-here +# QUICK_THINK_LLM=your-model-name-here + +# ─── Any OpenAI-compatible local server (LM Studio, vLLM, etc.) ─────────────── +# LLM_PROVIDER=openai +# BACKEND_URL=http://localhost:1234/v1 +# OPENAI_API_KEY=dummy diff --git a/.gitignore b/.gitignore index 9a2904a9..1bb7fd64 100644 --- a/.gitignore +++ b/.gitignore @@ -217,3 +217,4 @@ __marimo__/ # Cache **/data_cache/ +!.env.example diff --git a/main.py b/main.py index c94fde32..88794896 100644 --- a/main.py +++ b/main.py @@ -1,31 +1,20 @@ from tradingagents.graph.trading_graph import TradingAgentsGraph from tradingagents.default_config import DEFAULT_CONFIG -from dotenv import load_dotenv - -# Load environment variables from .env file -load_dotenv() - -# Create a custom config +# DEFAULT_CONFIG already loads .env via python-dotenv +# All LLM settings can be overridden via environment variables: +# LLM_PROVIDER, BACKEND_URL, DEEP_THINK_LLM, QUICK_THINK_LLM config = DEFAULT_CONFIG.copy() -config["deep_think_llm"] = "gpt-5.4-mini" # Use a different model -config["quick_think_llm"] = "gpt-5.4-mini" # Use a different model -config["max_debate_rounds"] = 1 # Increase debate rounds +config["max_debate_rounds"] = 1 # Configure data vendors (default uses yfinance, no extra API keys needed) config["data_vendors"] = { - "core_stock_apis": "yfinance", # Options: alpha_vantage, yfinance - "technical_indicators": "yfinance", # Options: alpha_vantage, yfinance - "fundamental_data": "yfinance", # Options: alpha_vantage, yfinance - "news_data": "yfinance", # Options: alpha_vantage, yfinance + "core_stock_apis": "yfinance", + "technical_indicators": "yfinance", + "fundamental_data": "yfinance", + "news_data": "yfinance", } -# Initialize with custom config ta = TradingAgentsGraph(debug=True, config=config) - -# forward propagate _, decision = ta.propagate("NVDA", "2024-05-10") print(decision) - -# Memorize mistakes and reflect -# ta.reflect_and_remember(1000) # parameter is the position returns diff --git a/tradingagents/default_config.py b/tradingagents/default_config.py index 26a4e4d2..ce4cafb8 100644 --- a/tradingagents/default_config.py +++ b/tradingagents/default_config.py @@ -1,5 +1,11 @@ import os +try: + from dotenv import load_dotenv + load_dotenv() +except ImportError: + pass # python-dotenv optional — falls back to environment variables + DEFAULT_CONFIG = { "project_dir": os.path.abspath(os.path.join(os.path.dirname(__file__), ".")), "results_dir": os.getenv("TRADINGAGENTS_RESULTS_DIR", "./results"), @@ -8,10 +14,10 @@ DEFAULT_CONFIG = { "dataflows/data_cache", ), # LLM settings - "llm_provider": "openai", - "deep_think_llm": "gpt-5.4", - "quick_think_llm": "gpt-5.4-mini", - "backend_url": "https://api.openai.com/v1", + "llm_provider": os.environ.get("LLM_PROVIDER", "openai"), + "deep_think_llm": os.environ.get("DEEP_THINK_LLM", "gpt-5.4"), + "quick_think_llm": os.environ.get("QUICK_THINK_LLM", "gpt-5.4-mini"), + "backend_url": os.environ.get("BACKEND_URL", "https://api.openai.com/v1"), # Provider-specific thinking configuration "google_thinking_level": None, # "high", "minimal", etc. "openai_reasoning_effort": None, # "medium", "high", "low" diff --git a/tradingagents/llm_clients/factory.py b/tradingagents/llm_clients/factory.py index 93c2a7d3..af593d9f 100644 --- a/tradingagents/llm_clients/factory.py +++ b/tradingagents/llm_clients/factory.py @@ -34,7 +34,7 @@ def create_llm_client( """ provider_lower = provider.lower() - if provider_lower in ("openai", "ollama", "openrouter"): + if provider_lower in ("openai", "ollama", "openrouter", "llamacpp"): return OpenAIClient(model, base_url, provider=provider_lower, **kwargs) if provider_lower == "xai": diff --git a/tradingagents/llm_clients/openai_client.py b/tradingagents/llm_clients/openai_client.py index 4f2e1b32..84eb306c 100644 --- a/tradingagents/llm_clients/openai_client.py +++ b/tradingagents/llm_clients/openai_client.py @@ -66,6 +66,10 @@ class OpenAIClient(BaseLLMClient): llm_kwargs["api_key"] = api_key else: llm_kwargs["api_key"] = "ollama" + elif self.provider == "llamacpp": + base_url = os.environ.get("BACKEND_URL") or os.environ.get("LLAMACPP_BASE_URL", "http://localhost:8080/v1") + llm_kwargs["base_url"] = base_url + llm_kwargs["api_key"] = "no-key-needed" # llama-server doesn't require auth elif self.base_url: llm_kwargs["base_url"] = self.base_url diff --git a/tradingagents/llm_clients/validators.py b/tradingagents/llm_clients/validators.py index 4e6d457b..b90ecac4 100644 --- a/tradingagents/llm_clients/validators.py +++ b/tradingagents/llm_clients/validators.py @@ -17,7 +17,7 @@ def validate_model(provider: str, model: str) -> bool: """ provider_lower = provider.lower() - if provider_lower in ("ollama", "openrouter"): + if provider_lower in ("ollama", "openrouter", "llamacpp"): return True if provider_lower not in VALID_MODELS: