This commit is contained in:
kazuma 2026-02-07 17:37:36 +05:30 committed by GitHub
commit 58ed6112a0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 164 additions and 20 deletions

View File

@ -1,5 +1,8 @@
# LLM Providers (set the one you use)
OPENAI_API_KEY=
AZURE_OPENAI_API_KEY=
AZURE_OPENAI_ENDPOINT=
AZURE_OPENAI_API_VERSION=2024-10-21
GOOGLE_API_KEY=
ANTHROPIC_API_KEY=
XAI_API_KEY=

View File

@ -162,7 +162,7 @@ An interface will appear showing results as they load, letting you track the age
### Implementation Details
We built TradingAgents with LangGraph to ensure flexibility and modularity. The framework supports multiple LLM providers: OpenAI, Google, Anthropic, xAI, OpenRouter, and Ollama.
We built TradingAgents with LangGraph to ensure flexibility and modularity. The framework supports multiple LLM providers: OpenAI, Azure OpenAI, Google, Anthropic, xAI, OpenRouter, and Ollama.
### Python Usage
@ -186,7 +186,7 @@ from tradingagents.graph.trading_graph import TradingAgentsGraph
from tradingagents.default_config import DEFAULT_CONFIG
config = DEFAULT_CONFIG.copy()
config["llm_provider"] = "openai" # openai, google, anthropic, xai, openrouter, ollama
config["llm_provider"] = "openai" # openai, azure, google, anthropic, xai, openrouter, ollama
config["deep_think_llm"] = "gpt-5.2" # Model for complex reasoning
config["quick_think_llm"] = "gpt-5-mini" # Model for quick tasks
config["max_debate_rounds"] = 2

View File

@ -536,10 +536,10 @@ def get_user_selections():
)
selected_research_depth = select_research_depth()
# Step 5: OpenAI backend
# Step 5: LLM provider
console.print(
create_question_box(
"Step 5: OpenAI backend", "Select which service to talk to"
"Step 5: LLM Provider", "Select which service to talk to"
)
)
selected_llm_provider, backend_url = select_llm_provider()
@ -555,7 +555,9 @@ def get_user_selections():
# Step 7: Provider-specific thinking configuration
thinking_level = None
reasoning_effort = None
llm_reasoning_effort = None
azure_endpoint = None
azure_api_version = None
provider_lower = selected_llm_provider.lower()
if provider_lower == "google":
@ -570,10 +572,29 @@ def get_user_selections():
console.print(
create_question_box(
"Step 7: Reasoning Effort",
"Configure OpenAI reasoning effort level"
"Configure reasoning effort level"
)
)
reasoning_effort = ask_openai_reasoning_effort()
llm_reasoning_effort = ask_reasoning_effort()
elif provider_lower == "azure":
console.print(
create_question_box(
"Step 7: Azure OpenAI",
"Configure endpoint, API version, and deployment names"
)
)
azure_endpoint = ask_azure_endpoint(backend_url)
azure_api_version = ask_azure_api_version()
selected_shallow_thinker = ask_azure_deployment_name(
"Quick-Thinking LLM",
selected_shallow_thinker,
)
selected_deep_thinker = ask_azure_deployment_name(
"Deep-Thinking LLM",
selected_deep_thinker,
)
backend_url = azure_endpoint
llm_reasoning_effort = ask_reasoning_effort()
return {
"ticker": selected_ticker,
@ -585,7 +606,9 @@ def get_user_selections():
"shallow_thinker": selected_shallow_thinker,
"deep_thinker": selected_deep_thinker,
"google_thinking_level": thinking_level,
"openai_reasoning_effort": reasoning_effort,
"llm_reasoning_effort": llm_reasoning_effort,
"azure_endpoint": azure_endpoint,
"azure_api_version": azure_api_version,
}
@ -910,7 +933,10 @@ def run_analysis():
config["llm_provider"] = selections["llm_provider"].lower()
# Provider-specific thinking configuration
config["google_thinking_level"] = selections.get("google_thinking_level")
config["openai_reasoning_effort"] = selections.get("openai_reasoning_effort")
config["llm_reasoning_effort"] = selections.get("llm_reasoning_effort")
config["openai_reasoning_effort"] = selections.get("llm_reasoning_effort")
config["azure_endpoint"] = selections.get("azure_endpoint")
config["azure_api_version"] = selections.get("azure_api_version")
# Create stats callback handler for tracking LLM/tool calls
stats_handler = StatsCallbackHandler()

View File

@ -134,6 +134,13 @@ def select_shallow_thinking_agent(provider) -> str:
("GPT-5.1 - Flexible reasoning", "gpt-5.1"),
("GPT-4.1 - Smartest non-reasoning, 1M context", "gpt-4.1"),
],
"azure": [
("GPT-5 Mini deployment", "gpt-5-mini"),
("GPT-5 Nano deployment", "gpt-5-nano"),
("GPT-5.2 deployment", "gpt-5.2"),
("GPT-5.1 deployment", "gpt-5.1"),
("GPT-4.1 deployment", "gpt-4.1"),
],
"anthropic": [
("Claude Haiku 4.5 - Fast + extended thinking", "claude-haiku-4-5"),
("Claude Sonnet 4.5 - Best for agents/coding", "claude-sonnet-4-5"),
@ -200,6 +207,14 @@ def select_deep_thinking_agent(provider) -> str:
("GPT-5 Mini - Cost-optimized reasoning", "gpt-5-mini"),
("GPT-5 Nano - Ultra-fast, high-throughput", "gpt-5-nano"),
],
"azure": [
("GPT-5.2 deployment", "gpt-5.2"),
("GPT-5.1 deployment", "gpt-5.1"),
("GPT-5 deployment", "gpt-5"),
("GPT-4.1 deployment", "gpt-4.1"),
("GPT-5 Mini deployment", "gpt-5-mini"),
("GPT-5 Nano deployment", "gpt-5-nano"),
],
"anthropic": [
("Claude Sonnet 4.5 - Best for agents/coding", "claude-sonnet-4-5"),
("Claude Opus 4.5 - Premium, max intelligence", "claude-opus-4-5"),
@ -253,10 +268,11 @@ def select_deep_thinking_agent(provider) -> str:
return choice
def select_llm_provider() -> tuple[str, str]:
"""Select the OpenAI api url using interactive selection."""
# Define OpenAI api options with their corresponding endpoints
"""Select an LLM provider and default API endpoint."""
# Define provider options with their corresponding endpoints
BASE_URLS = [
("OpenAI", "https://api.openai.com/v1"),
("Azure", "https://YOUR-RESOURCE.openai.azure.com/"),
("Google", "https://generativelanguage.googleapis.com/v1"),
("Anthropic", "https://api.anthropic.com/"),
("xAI", "https://api.x.ai/v1"),
@ -281,7 +297,7 @@ def select_llm_provider() -> tuple[str, str]:
).ask()
if choice is None:
console.print("\n[red]no OpenAI backend selected. Exiting...[/red]")
console.print("\n[red]No LLM provider selected. Exiting...[/red]")
exit(1)
display_name, url = choice
@ -290,8 +306,8 @@ def select_llm_provider() -> tuple[str, str]:
return display_name, url
def ask_openai_reasoning_effort() -> str:
"""Ask for OpenAI reasoning effort level."""
def ask_reasoning_effort() -> str:
"""Ask for reasoning effort level for supported providers."""
choices = [
questionary.Choice("Medium (Default)", "medium"),
questionary.Choice("High (More thorough)", "high"),
@ -308,6 +324,11 @@ def ask_openai_reasoning_effort() -> str:
).ask()
def ask_openai_reasoning_effort() -> str:
"""Backward-compatible alias."""
return ask_reasoning_effort()
def ask_gemini_thinking_config() -> str | None:
"""Ask for Gemini thinking configuration.
@ -326,3 +347,30 @@ def ask_gemini_thinking_config() -> str | None:
("pointer", "fg:green noinherit"),
]),
).ask()
def ask_azure_endpoint(default: str = "https://YOUR-RESOURCE.openai.azure.com/") -> str:
"""Ask for Azure OpenAI endpoint URL."""
return questionary.text(
"Enter Azure OpenAI Endpoint URL:",
default=default,
validate=lambda x: len(x.strip()) > 0 or "Endpoint URL is required.",
).ask()
def ask_azure_api_version(default: str = "2024-10-21") -> str:
"""Ask for Azure OpenAI API version."""
return questionary.text(
"Enter Azure OpenAI API Version:",
default=default,
validate=lambda x: len(x.strip()) > 0 or "API version is required.",
).ask()
def ask_azure_deployment_name(label: str, default: str) -> str:
"""Ask for Azure OpenAI deployment name."""
return questionary.text(
f"Enter Azure deployment for {label}:",
default=default,
validate=lambda x: len(x.strip()) > 0 or "Deployment name is required.",
).ask()

View File

@ -14,7 +14,10 @@ DEFAULT_CONFIG = {
"backend_url": "https://api.openai.com/v1",
# Provider-specific thinking configuration
"google_thinking_level": None, # "high", "minimal", etc.
"openai_reasoning_effort": None, # "medium", "high", "low"
"llm_reasoning_effort": None, # "medium", "high", "low"
"openai_reasoning_effort": None, # Legacy key (backward compatible fallback)
"azure_endpoint": None, # e.g. https://<resource>.openai.azure.com/
"azure_api_version": "2024-10-21",
# Debate and discussion settings
"max_debate_rounds": 1,
"max_risk_discuss_rounds": 1,

View File

@ -140,8 +140,21 @@ class TradingAgentsGraph:
if thinking_level:
kwargs["thinking_level"] = thinking_level
elif provider == "openai":
reasoning_effort = self.config.get("openai_reasoning_effort")
elif provider == "azure":
azure_endpoint = self.config.get("azure_endpoint") or self.config.get("backend_url")
if azure_endpoint:
kwargs["azure_endpoint"] = azure_endpoint
api_version = self.config.get("azure_api_version")
if api_version:
kwargs["api_version"] = api_version
if provider in ("openai", "azure"):
# Backward compatibility: prefer llm_reasoning_effort, then legacy key.
reasoning_effort = (
self.config.get("llm_reasoning_effort")
or self.config.get("openai_reasoning_effort")
)
if reasoning_effort:
kwargs["reasoning_effort"] = reasoning_effort

View File

@ -0,0 +1,47 @@
from typing import Any, Optional
from langchain_openai import AzureChatOpenAI
from .base_client import BaseLLMClient
from .validators import validate_model
class AzureOpenAIClient(BaseLLMClient):
"""Client for Azure OpenAI models via AzureChatOpenAI."""
def __init__(
self,
model: str,
base_url: Optional[str] = None,
**kwargs,
):
super().__init__(model, base_url, **kwargs)
def get_llm(self) -> Any:
"""Return configured AzureChatOpenAI instance."""
llm_kwargs = {
"azure_deployment": self.model,
"model": self.model,
}
# Prefer explicit config sources and let AzureChatOpenAI resolve env fallbacks.
azure_endpoint = self.kwargs.get("azure_endpoint") or self.base_url
if azure_endpoint:
llm_kwargs["azure_endpoint"] = azure_endpoint
for key in (
"api_version",
"api_key",
"timeout",
"max_retries",
"reasoning_effort",
"callbacks",
):
if key in self.kwargs and self.kwargs[key] is not None:
llm_kwargs[key] = self.kwargs[key]
return AzureChatOpenAI(**llm_kwargs)
def validate_model(self) -> bool:
"""Validate model for Azure OpenAI."""
return validate_model("azure", self.model)

View File

@ -2,6 +2,7 @@ from typing import Optional
from .base_client import BaseLLMClient
from .openai_client import OpenAIClient
from .azure_openai_client import AzureOpenAIClient
from .anthropic_client import AnthropicClient
from .google_client import GoogleClient
@ -15,7 +16,7 @@ def create_llm_client(
"""Create an LLM client for the specified provider.
Args:
provider: LLM provider (openai, anthropic, google, xai, ollama, openrouter)
provider: LLM provider (openai, azure, anthropic, google, xai, ollama, openrouter)
model: Model name/identifier
base_url: Optional base URL for API endpoint
**kwargs: Additional provider-specific arguments
@ -31,6 +32,9 @@ def create_llm_client(
if provider_lower in ("openai", "ollama", "openrouter"):
return OpenAIClient(model, base_url, provider=provider_lower, **kwargs)
if provider_lower == "azure":
return AzureOpenAIClient(model, base_url, **kwargs)
if provider_lower == "xai":
return OpenAIClient(model, base_url, provider="xai", **kwargs)

View File

@ -69,11 +69,11 @@ VALID_MODELS = {
def validate_model(provider: str, model: str) -> bool:
"""Check if model name is valid for the given provider.
For ollama, openrouter - any model is accepted.
For ollama, openrouter, azure - any model/deployment is accepted.
"""
provider_lower = provider.lower()
if provider_lower in ("ollama", "openrouter"):
if provider_lower in ("ollama", "openrouter", "azure"):
return True
if provider_lower not in VALID_MODELS: