This commit is contained in:
Trading Prediction Technology 2026-04-10 10:11:37 +02:00 committed by GitHub
commit 49c978ba05
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 42 additions and 26 deletions

View File

@ -1,6 +1,22 @@
# LLM Providers (set the one you use)
# ─── Cloud Providers ──────────────────────────────────────────────────────────
OPENAI_API_KEY=
GOOGLE_API_KEY=
ANTHROPIC_API_KEY=
XAI_API_KEY=
OPENROUTER_API_KEY=
# ─── Local LLM via llama.cpp ──────────────────────────────────────────────────
# 1. Start llama-server:
# llama-server --model ~/models/my-model.gguf --port 8081 --host 0.0.0.0
# 2. Copy this file to .env and uncomment + fill in the values below
# 3. Find your model ID with: curl http://localhost:8081/v1/models
# LLM_PROVIDER=llamacpp
# BACKEND_URL=http://localhost:8081/v1
# DEEP_THINK_LLM=your-model-name-here
# QUICK_THINK_LLM=your-model-name-here
# ─── Any OpenAI-compatible local server (LM Studio, vLLM, etc.) ───────────────
# LLM_PROVIDER=openai
# BACKEND_URL=http://localhost:1234/v1
# OPENAI_API_KEY=dummy

1
.gitignore vendored
View File

@ -217,3 +217,4 @@ __marimo__/
# Cache
**/data_cache/
!.env.example

27
main.py
View File

@ -1,31 +1,20 @@
from tradingagents.graph.trading_graph import TradingAgentsGraph
from tradingagents.default_config import DEFAULT_CONFIG
from dotenv import load_dotenv
# Load environment variables from .env file
load_dotenv()
# Create a custom config
# DEFAULT_CONFIG already loads .env via python-dotenv
# All LLM settings can be overridden via environment variables:
# LLM_PROVIDER, BACKEND_URL, DEEP_THINK_LLM, QUICK_THINK_LLM
config = DEFAULT_CONFIG.copy()
config["deep_think_llm"] = "gpt-5.4-mini" # Use a different model
config["quick_think_llm"] = "gpt-5.4-mini" # Use a different model
config["max_debate_rounds"] = 1 # Increase debate rounds
config["max_debate_rounds"] = 1
# Configure data vendors (default uses yfinance, no extra API keys needed)
config["data_vendors"] = {
"core_stock_apis": "yfinance", # Options: alpha_vantage, yfinance
"technical_indicators": "yfinance", # Options: alpha_vantage, yfinance
"fundamental_data": "yfinance", # Options: alpha_vantage, yfinance
"news_data": "yfinance", # Options: alpha_vantage, yfinance
"core_stock_apis": "yfinance",
"technical_indicators": "yfinance",
"fundamental_data": "yfinance",
"news_data": "yfinance",
}
# Initialize with custom config
ta = TradingAgentsGraph(debug=True, config=config)
# forward propagate
_, decision = ta.propagate("NVDA", "2024-05-10")
print(decision)
# Memorize mistakes and reflect
# ta.reflect_and_remember(1000) # parameter is the position returns

View File

@ -1,5 +1,11 @@
import os
try:
from dotenv import load_dotenv
load_dotenv()
except ImportError:
pass # python-dotenv optional — falls back to environment variables
DEFAULT_CONFIG = {
"project_dir": os.path.abspath(os.path.join(os.path.dirname(__file__), ".")),
"results_dir": os.getenv("TRADINGAGENTS_RESULTS_DIR", "./results"),
@ -8,10 +14,10 @@ DEFAULT_CONFIG = {
"dataflows/data_cache",
),
# LLM settings
"llm_provider": "openai",
"deep_think_llm": "gpt-5.4",
"quick_think_llm": "gpt-5.4-mini",
"backend_url": "https://api.openai.com/v1",
"llm_provider": os.environ.get("LLM_PROVIDER", "openai"),
"deep_think_llm": os.environ.get("DEEP_THINK_LLM", "gpt-5.4"),
"quick_think_llm": os.environ.get("QUICK_THINK_LLM", "gpt-5.4-mini"),
"backend_url": os.environ.get("BACKEND_URL", "https://api.openai.com/v1"),
# Provider-specific thinking configuration
"google_thinking_level": None, # "high", "minimal", etc.
"openai_reasoning_effort": None, # "medium", "high", "low"

View File

@ -34,7 +34,7 @@ def create_llm_client(
"""
provider_lower = provider.lower()
if provider_lower in ("openai", "ollama", "openrouter"):
if provider_lower in ("openai", "ollama", "openrouter", "llamacpp"):
return OpenAIClient(model, base_url, provider=provider_lower, **kwargs)
if provider_lower == "xai":

View File

@ -66,6 +66,10 @@ class OpenAIClient(BaseLLMClient):
llm_kwargs["api_key"] = api_key
else:
llm_kwargs["api_key"] = "ollama"
elif self.provider == "llamacpp":
base_url = os.environ.get("BACKEND_URL") or os.environ.get("LLAMACPP_BASE_URL", "http://localhost:8080/v1")
llm_kwargs["base_url"] = base_url
llm_kwargs["api_key"] = "no-key-needed" # llama-server doesn't require auth
elif self.base_url:
llm_kwargs["base_url"] = self.base_url

View File

@ -17,7 +17,7 @@ def validate_model(provider: str, model: str) -> bool:
"""
provider_lower = provider.lower()
if provider_lower in ("ollama", "openrouter"):
if provider_lower in ("ollama", "openrouter", "llamacpp"):
return True
if provider_lower not in VALID_MODELS: