From ae25b1601a8451ef48a323cb1ed30819e166e69b Mon Sep 17 00:00:00 2001 From: kazuma Date: Wed, 4 Feb 2026 21:30:24 +0900 Subject: [PATCH] fix code review --- cli/main.py | 13 +++++---- cli/utils.py | 9 ++++-- tradingagents/default_config.py | 3 +- tradingagents/graph/trading_graph.py | 11 ++++---- .../llm_clients/azure_openai_client.py | 28 ++++++++----------- 5 files changed, 33 insertions(+), 31 deletions(-) diff --git a/cli/main.py b/cli/main.py index ee5abcf5..877c6b1e 100644 --- a/cli/main.py +++ b/cli/main.py @@ -555,7 +555,7 @@ def get_user_selections(): # Step 7: Provider-specific thinking configuration thinking_level = None - reasoning_effort = None + llm_reasoning_effort = None azure_endpoint = None azure_api_version = None @@ -572,10 +572,10 @@ def get_user_selections(): console.print( create_question_box( "Step 7: Reasoning Effort", - "Configure OpenAI reasoning effort level" + "Configure reasoning effort level" ) ) - reasoning_effort = ask_openai_reasoning_effort() + llm_reasoning_effort = ask_reasoning_effort() elif provider_lower == "azure": console.print( create_question_box( @@ -594,7 +594,7 @@ def get_user_selections(): selected_deep_thinker, ) backend_url = azure_endpoint - reasoning_effort = ask_openai_reasoning_effort() + llm_reasoning_effort = ask_reasoning_effort() return { "ticker": selected_ticker, @@ -606,7 +606,7 @@ def get_user_selections(): "shallow_thinker": selected_shallow_thinker, "deep_thinker": selected_deep_thinker, "google_thinking_level": thinking_level, - "openai_reasoning_effort": reasoning_effort, + "llm_reasoning_effort": llm_reasoning_effort, "azure_endpoint": azure_endpoint, "azure_api_version": azure_api_version, } @@ -933,7 +933,8 @@ def run_analysis(): config["llm_provider"] = selections["llm_provider"].lower() # Provider-specific thinking configuration config["google_thinking_level"] = selections.get("google_thinking_level") - config["openai_reasoning_effort"] = selections.get("openai_reasoning_effort") + config["llm_reasoning_effort"] = selections.get("llm_reasoning_effort") + config["openai_reasoning_effort"] = selections.get("llm_reasoning_effort") config["azure_endpoint"] = selections.get("azure_endpoint") config["azure_api_version"] = selections.get("azure_api_version") diff --git a/cli/utils.py b/cli/utils.py index 4e01f3b5..6d7336ed 100644 --- a/cli/utils.py +++ b/cli/utils.py @@ -306,8 +306,8 @@ def select_llm_provider() -> tuple[str, str]: return display_name, url -def ask_openai_reasoning_effort() -> str: - """Ask for OpenAI reasoning effort level.""" +def ask_reasoning_effort() -> str: + """Ask for reasoning effort level for supported providers.""" choices = [ questionary.Choice("Medium (Default)", "medium"), questionary.Choice("High (More thorough)", "high"), @@ -324,6 +324,11 @@ def ask_openai_reasoning_effort() -> str: ).ask() +def ask_openai_reasoning_effort() -> str: + """Backward-compatible alias.""" + return ask_reasoning_effort() + + def ask_gemini_thinking_config() -> str | None: """Ask for Gemini thinking configuration. diff --git a/tradingagents/default_config.py b/tradingagents/default_config.py index 70c7797e..4dc591c4 100644 --- a/tradingagents/default_config.py +++ b/tradingagents/default_config.py @@ -14,7 +14,8 @@ DEFAULT_CONFIG = { "backend_url": "https://api.openai.com/v1", # Provider-specific thinking configuration "google_thinking_level": None, # "high", "minimal", etc. - "openai_reasoning_effort": None, # "medium", "high", "low" + "llm_reasoning_effort": None, # "medium", "high", "low" + "openai_reasoning_effort": None, # Legacy key (backward compatible fallback) "azure_endpoint": None, # e.g. https://.openai.azure.com/ "azure_api_version": "2024-10-21", # Debate and discussion settings diff --git a/tradingagents/graph/trading_graph.py b/tradingagents/graph/trading_graph.py index 17a771ec..eee8343b 100644 --- a/tradingagents/graph/trading_graph.py +++ b/tradingagents/graph/trading_graph.py @@ -140,10 +140,6 @@ class TradingAgentsGraph: if thinking_level: kwargs["thinking_level"] = thinking_level - elif provider == "openai": - reasoning_effort = self.config.get("openai_reasoning_effort") - if reasoning_effort: - kwargs["reasoning_effort"] = reasoning_effort elif provider == "azure": azure_endpoint = self.config.get("azure_endpoint") or self.config.get("backend_url") if azure_endpoint: @@ -153,7 +149,12 @@ class TradingAgentsGraph: if api_version: kwargs["api_version"] = api_version - reasoning_effort = self.config.get("openai_reasoning_effort") + if provider in ("openai", "azure"): + # Backward compatibility: prefer llm_reasoning_effort, then legacy key. + reasoning_effort = ( + self.config.get("llm_reasoning_effort") + or self.config.get("openai_reasoning_effort") + ) if reasoning_effort: kwargs["reasoning_effort"] = reasoning_effort diff --git a/tradingagents/llm_clients/azure_openai_client.py b/tradingagents/llm_clients/azure_openai_client.py index cf5a3802..16304c71 100644 --- a/tradingagents/llm_clients/azure_openai_client.py +++ b/tradingagents/llm_clients/azure_openai_client.py @@ -1,4 +1,3 @@ -import os from typing import Any, Optional from langchain_openai import AzureChatOpenAI @@ -20,30 +19,25 @@ class AzureOpenAIClient(BaseLLMClient): def get_llm(self) -> Any: """Return configured AzureChatOpenAI instance.""" - azure_endpoint = ( - self.kwargs.get("azure_endpoint") - or self.base_url - or os.environ.get("AZURE_OPENAI_ENDPOINT") - ) - api_version = self.kwargs.get("api_version") or os.environ.get( - "AZURE_OPENAI_API_VERSION", - "2024-10-21", - ) - api_key = self.kwargs.get("api_key") or os.environ.get("AZURE_OPENAI_API_KEY") - llm_kwargs = { "azure_deployment": self.model, "model": self.model, - "api_version": api_version, } + # Prefer explicit config sources and let AzureChatOpenAI resolve env fallbacks. + azure_endpoint = self.kwargs.get("azure_endpoint") or self.base_url if azure_endpoint: llm_kwargs["azure_endpoint"] = azure_endpoint - if api_key: - llm_kwargs["api_key"] = api_key - for key in ("timeout", "max_retries", "reasoning_effort", "callbacks"): - if key in self.kwargs: + for key in ( + "api_version", + "api_key", + "timeout", + "max_retries", + "reasoning_effort", + "callbacks", + ): + if key in self.kwargs and self.kwargs[key] is not None: llm_kwargs[key] = self.kwargs[key] return AzureChatOpenAI(**llm_kwargs)