This commit is contained in:
Jeff Hicken 2026-03-25 21:21:12 -07:00 committed by GitHub
commit c9fe5d6846
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 238 additions and 1311 deletions

View File

@ -1,6 +1,52 @@
# LLM Providers (set the one you use)
# =============================================================================
# TradingAgents Environment Configuration
# =============================================================================
# Copy this file to .env and fill in your API keys and configuration
# =============================================================================
# LLM Provider API Keys
# =============================================================================
# Set the API key for the LLM provider you want to use
# The CLI will prompt you to select a provider when running analysis
# OpenAI (GPT-4, GPT-5 models)
OPENAI_API_KEY=
# Google (Gemini models)
GOOGLE_API_KEY=
# Anthropic (Claude models)
ANTHROPIC_API_KEY=
# xAI (Grok models)
XAI_API_KEY=
# OpenRouter (Access to multiple models)
OPENROUTER_API_KEY=
# =============================================================================
# LM Studio Configuration (Local Models)
# =============================================================================
# LM Studio allows you to run models locally on your machine
# The CLI will automatically detect available models when you select LM Studio
# Base URL for LM Studio server (optional, defaults to http://localhost:1234/v1)
# Change this if you're running LM Studio on a different port or remote server
LMSTUDIO_BASE_URL=http://localhost:1234/v1
# API Key for LM Studio (optional, only needed if you secured your instance)
# Most local LM Studio instances don't require an API key
LMSTUDIO_API_KEY=
# =============================================================================
# Data Provider API Keys
# =============================================================================
# Alpha Vantage - Stock market data, news, and fundamentals
# Get your free API key at: https://www.alphavantage.co/support/#api-key
ALPHA_VANTAGE_API_KEY=
# =============================================================================
# Advanced Configuration (Optional)
# =============================================================================
# Custom results directory (defaults to ./results)
# TRADINGAGENTS_RESULTS_DIR=./results

4
.gitignore vendored
View File

@ -217,3 +217,7 @@ __marimo__/
# Cache
**/data_cache/
# Results
results/*
reports/*

View File

@ -585,6 +585,7 @@ def get_user_selections():
)
)
anthropic_effort = ask_anthropic_effort()
# LM Studio, Ollama, xAI, and Openrouter don't need additional configuration
return {
"ticker": selected_ticker,

View File

@ -1,3 +1,5 @@
import os
import requests
import questionary
from typing import List, Optional, Tuple, Dict
@ -133,6 +135,31 @@ def select_research_depth() -> int:
return choice
def fetch_lmstudio_models(base_url: str) -> Optional[List[str]]:
"""Fetch available models from LM Studio API.
Args:
base_url: LM Studio base URL (e.g., http://localhost:1234/v1)
Returns:
List of model IDs if successful, None if connection fails
"""
try:
# Remove /v1 suffix if present and add /models endpoint
api_url = base_url.rstrip('/') + '/models'
response = requests.get(api_url, timeout=2)
response.raise_for_status()
data = response.json()
if 'data' in data and isinstance(data['data'], list):
models = [model['id'] for model in data['data'] if 'id' in model]
return models if models else None
return None
except (requests.RequestException, KeyError, ValueError) as e:
# Connection failed or invalid response
return None
def select_shallow_thinking_agent(provider) -> str:
"""Select shallow thinking llm engine using an interactive selection."""
@ -171,13 +198,78 @@ def select_shallow_thinking_agent(provider) -> str:
("GPT-OSS:latest (20B, local)", "gpt-oss:latest"),
("GLM-4.7-Flash:latest (30B, local)", "glm-4.7-flash:latest"),
],
"lm studio": [
("Custom model (enter model name)", "custom"),
],
}
provider_key = provider.lower()
# For LM Studio, fetch models dynamically from API
if provider_key == "lm studio":
lmstudio_url = os.getenv("LMSTUDIO_BASE_URL", "http://localhost:1234/v1")
console.print(f"[dim]Fetching models from LM Studio at {lmstudio_url}...[/dim]")
available_models = fetch_lmstudio_models(lmstudio_url)
if available_models:
console.print(f"[green]✓ Found {len(available_models)} model(s)[/green]")
# Create choices from available models
choices = [questionary.Choice(model, value=model) for model in available_models]
# Add manual entry option at the end
choices.append(questionary.Choice("Enter model name manually", value="__manual__"))
choice = questionary.select(
"Select Your [Quick-Thinking LLM Engine]:",
choices=choices,
instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
style=questionary.Style([
("selected", "fg:magenta noinherit"),
("highlighted", "fg:magenta noinherit"),
("pointer", "fg:magenta noinherit"),
]),
).ask()
if choice is None:
console.print("\n[red]No model selected. Exiting...[/red]")
exit(1)
# If manual entry selected, prompt for model name
if choice == "__manual__":
model_name = questionary.text(
"Enter the model name:",
instruction="\n- This should match the model ID from LM Studio",
).ask()
if not model_name:
console.print("\n[red]No model name entered. Exiting...[/red]")
exit(1)
return model_name
return choice
else:
# Connection failed, show error and fall back to manual entry
console.print(f"[yellow]⚠ Could not connect to LM Studio at {lmstudio_url}[/yellow]")
console.print("[yellow]Please make sure LM Studio is running and a model is loaded.[/yellow]")
model_name = questionary.text(
"Enter the model name manually:",
instruction="\n- This should match the model ID from LM Studio",
).ask()
if not model_name:
console.print("\n[red]No model name entered. Exiting...[/red]")
exit(1)
return model_name
choice = questionary.select(
"Select Your [Quick-Thinking LLM Engine]:",
choices=[
questionary.Choice(display, value=value)
for display, value in SHALLOW_AGENT_OPTIONS[provider.lower()]
for display, value in SHALLOW_AGENT_OPTIONS[provider_key]
],
instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
style=questionary.Style(
@ -238,13 +330,78 @@ def select_deep_thinking_agent(provider) -> str:
("GPT-OSS:latest (20B, local)", "gpt-oss:latest"),
("Qwen3:latest (8B, local)", "qwen3:latest"),
],
"lm studio": [
("Custom model (enter model name)", "custom"),
],
}
provider_key = provider.lower()
# For LM Studio, fetch models dynamically from API
if provider_key == "lm studio":
lmstudio_url = os.getenv("LMSTUDIO_BASE_URL", "http://localhost:1234/v1")
console.print(f"[dim]Fetching models from LM Studio at {lmstudio_url}...[/dim]")
available_models = fetch_lmstudio_models(lmstudio_url)
if available_models:
console.print(f"[green]✓ Found {len(available_models)} model(s)[/green]")
# Create choices from available models
choices = [questionary.Choice(model, value=model) for model in available_models]
# Add manual entry option at the end
choices.append(questionary.Choice("Enter model name manually", value="__manual__"))
choice = questionary.select(
"Select Your [Deep-Thinking LLM Engine]:",
choices=choices,
instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
style=questionary.Style([
("selected", "fg:magenta noinherit"),
("highlighted", "fg:magenta noinherit"),
("pointer", "fg:magenta noinherit"),
]),
).ask()
if choice is None:
console.print("\n[red]No model selected. Exiting...[/red]")
exit(1)
# If manual entry selected, prompt for model name
if choice == "__manual__":
model_name = questionary.text(
"Enter the model name:",
instruction="\n- This should match the model ID from LM Studio",
).ask()
if not model_name:
console.print("\n[red]No model name entered. Exiting...[/red]")
exit(1)
return model_name
return choice
else:
# Connection failed, show error and fall back to manual entry
console.print(f"[yellow]⚠ Could not connect to LM Studio at {lmstudio_url}[/yellow]")
console.print("[yellow]Please make sure LM Studio is running and a model is loaded.[/yellow]")
model_name = questionary.text(
"Enter the model name manually:",
instruction="\n- This should match the model ID from LM Studio",
).ask()
if not model_name:
console.print("\n[red]No model name entered. Exiting...[/red]")
exit(1)
return model_name
choice = questionary.select(
"Select Your [Deep-Thinking LLM Engine]:",
choices=[
questionary.Choice(display, value=value)
for display, value in DEEP_AGENT_OPTIONS[provider.lower()]
for display, value in DEEP_AGENT_OPTIONS[provider_key]
],
instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
style=questionary.Style(
@ -264,6 +421,9 @@ def select_deep_thinking_agent(provider) -> str:
def select_llm_provider() -> tuple[str, str]:
"""Select the OpenAI api url using interactive selection."""
# Get LM Studio URL from environment or use default
lmstudio_url = os.getenv("LMSTUDIO_BASE_URL", "http://localhost:1234/v1")
# Define OpenAI api options with their corresponding endpoints
BASE_URLS = [
("OpenAI", "https://api.openai.com/v1"),
@ -272,6 +432,7 @@ def select_llm_provider() -> tuple[str, str]:
("xAI", "https://api.x.ai/v1"),
("Openrouter", "https://openrouter.ai/api/v1"),
("Ollama", "http://localhost:11434/v1"),
("LM Studio", lmstudio_url),
]
choice = questionary.select(

View File

@ -15,7 +15,7 @@ def create_llm_client(
"""Create an LLM client for the specified provider.
Args:
provider: LLM provider (openai, anthropic, google, xai, ollama, openrouter)
provider: LLM provider (openai, anthropic, google, xai, ollama, openrouter, lm studio)
model: Model name/identifier
base_url: Optional base URL for API endpoint
**kwargs: Additional provider-specific arguments
@ -34,7 +34,7 @@ def create_llm_client(
"""
provider_lower = provider.lower()
if provider_lower in ("openai", "ollama", "openrouter"):
if provider_lower in ("openai", "ollama", "openrouter", "lm studio"):
return OpenAIClient(model, base_url, provider=provider_lower, **kwargs)
if provider_lower == "xai":

View File

@ -25,20 +25,22 @@ _PASSTHROUGH_KWARGS = (
)
# Provider base URLs and API key env vars
# Format: (default_base_url, api_key_env_var)
_PROVIDER_CONFIG = {
"xai": ("https://api.x.ai/v1", "XAI_API_KEY"),
"openrouter": ("https://openrouter.ai/api/v1", "OPENROUTER_API_KEY"),
"ollama": ("http://localhost:11434/v1", None),
"lm studio": (None, "LMSTUDIO_API_KEY"), # Base URL from LMSTUDIO_BASE_URL
}
class OpenAIClient(BaseLLMClient):
"""Client for OpenAI, Ollama, OpenRouter, and xAI providers.
"""Client for OpenAI, Ollama, OpenRouter, LM Studio, and xAI providers.
For native OpenAI models, uses the Responses API (/v1/responses) which
supports reasoning_effort with function tools across all model families
(GPT-4.1, GPT-5). Third-party compatible providers (xAI, OpenRouter,
Ollama) use standard Chat Completions.
Ollama, LM Studio) use standard Chat Completions.
"""
def __init__(
@ -58,13 +60,27 @@ class OpenAIClient(BaseLLMClient):
# Provider-specific base URL and auth
if self.provider in _PROVIDER_CONFIG:
base_url, api_key_env = _PROVIDER_CONFIG[self.provider]
llm_kwargs["base_url"] = base_url
# Special handling for LM Studio - get base URL from env
if self.provider == "lm studio":
base_url = os.environ.get("LMSTUDIO_BASE_URL", "http://localhost:1234/v1")
# Set base URL if available
if base_url:
llm_kwargs["base_url"] = base_url
# Handle API key
if api_key_env:
api_key = os.environ.get(api_key_env)
if api_key:
llm_kwargs["api_key"] = api_key
else:
# For LM Studio, use placeholder if no API key set
if self.provider == "lm studio":
llm_kwargs["api_key"] = "not-needed"
else:
llm_kwargs["api_key"] = "ollama"
# For providers with no API key env var (like ollama)
llm_kwargs["api_key"] = "not-needed"
elif self.base_url:
llm_kwargs["base_url"] = self.base_url

1303
uv.lock

File diff suppressed because it is too large Load Diff