fix code review

This commit is contained in:
kazuma 2026-02-04 21:30:24 +09:00
parent 051a2c052a
commit ae25b1601a
5 changed files with 33 additions and 31 deletions

View File

@ -555,7 +555,7 @@ def get_user_selections():
# Step 7: Provider-specific thinking configuration
thinking_level = None
reasoning_effort = None
llm_reasoning_effort = None
azure_endpoint = None
azure_api_version = None
@ -572,10 +572,10 @@ def get_user_selections():
console.print(
create_question_box(
"Step 7: Reasoning Effort",
"Configure OpenAI reasoning effort level"
"Configure reasoning effort level"
)
)
reasoning_effort = ask_openai_reasoning_effort()
llm_reasoning_effort = ask_reasoning_effort()
elif provider_lower == "azure":
console.print(
create_question_box(
@ -594,7 +594,7 @@ def get_user_selections():
selected_deep_thinker,
)
backend_url = azure_endpoint
reasoning_effort = ask_openai_reasoning_effort()
llm_reasoning_effort = ask_reasoning_effort()
return {
"ticker": selected_ticker,
@ -606,7 +606,7 @@ def get_user_selections():
"shallow_thinker": selected_shallow_thinker,
"deep_thinker": selected_deep_thinker,
"google_thinking_level": thinking_level,
"openai_reasoning_effort": reasoning_effort,
"llm_reasoning_effort": llm_reasoning_effort,
"azure_endpoint": azure_endpoint,
"azure_api_version": azure_api_version,
}
@ -933,7 +933,8 @@ def run_analysis():
config["llm_provider"] = selections["llm_provider"].lower()
# Provider-specific thinking configuration
config["google_thinking_level"] = selections.get("google_thinking_level")
config["openai_reasoning_effort"] = selections.get("openai_reasoning_effort")
config["llm_reasoning_effort"] = selections.get("llm_reasoning_effort")
config["openai_reasoning_effort"] = selections.get("llm_reasoning_effort")
config["azure_endpoint"] = selections.get("azure_endpoint")
config["azure_api_version"] = selections.get("azure_api_version")

View File

@ -306,8 +306,8 @@ def select_llm_provider() -> tuple[str, str]:
return display_name, url
def ask_openai_reasoning_effort() -> str:
"""Ask for OpenAI reasoning effort level."""
def ask_reasoning_effort() -> str:
"""Ask for reasoning effort level for supported providers."""
choices = [
questionary.Choice("Medium (Default)", "medium"),
questionary.Choice("High (More thorough)", "high"),
@ -324,6 +324,11 @@ def ask_openai_reasoning_effort() -> str:
).ask()
def ask_openai_reasoning_effort() -> str:
"""Backward-compatible alias."""
return ask_reasoning_effort()
def ask_gemini_thinking_config() -> str | None:
"""Ask for Gemini thinking configuration.

View File

@ -14,7 +14,8 @@ DEFAULT_CONFIG = {
"backend_url": "https://api.openai.com/v1",
# Provider-specific thinking configuration
"google_thinking_level": None, # "high", "minimal", etc.
"openai_reasoning_effort": None, # "medium", "high", "low"
"llm_reasoning_effort": None, # "medium", "high", "low"
"openai_reasoning_effort": None, # Legacy key (backward compatible fallback)
"azure_endpoint": None, # e.g. https://<resource>.openai.azure.com/
"azure_api_version": "2024-10-21",
# Debate and discussion settings

View File

@ -140,10 +140,6 @@ class TradingAgentsGraph:
if thinking_level:
kwargs["thinking_level"] = thinking_level
elif provider == "openai":
reasoning_effort = self.config.get("openai_reasoning_effort")
if reasoning_effort:
kwargs["reasoning_effort"] = reasoning_effort
elif provider == "azure":
azure_endpoint = self.config.get("azure_endpoint") or self.config.get("backend_url")
if azure_endpoint:
@ -153,7 +149,12 @@ class TradingAgentsGraph:
if api_version:
kwargs["api_version"] = api_version
reasoning_effort = self.config.get("openai_reasoning_effort")
if provider in ("openai", "azure"):
# Backward compatibility: prefer llm_reasoning_effort, then legacy key.
reasoning_effort = (
self.config.get("llm_reasoning_effort")
or self.config.get("openai_reasoning_effort")
)
if reasoning_effort:
kwargs["reasoning_effort"] = reasoning_effort

View File

@ -1,4 +1,3 @@
import os
from typing import Any, Optional
from langchain_openai import AzureChatOpenAI
@ -20,30 +19,25 @@ class AzureOpenAIClient(BaseLLMClient):
def get_llm(self) -> Any:
"""Return configured AzureChatOpenAI instance."""
azure_endpoint = (
self.kwargs.get("azure_endpoint")
or self.base_url
or os.environ.get("AZURE_OPENAI_ENDPOINT")
)
api_version = self.kwargs.get("api_version") or os.environ.get(
"AZURE_OPENAI_API_VERSION",
"2024-10-21",
)
api_key = self.kwargs.get("api_key") or os.environ.get("AZURE_OPENAI_API_KEY")
llm_kwargs = {
"azure_deployment": self.model,
"model": self.model,
"api_version": api_version,
}
# Prefer explicit config sources and let AzureChatOpenAI resolve env fallbacks.
azure_endpoint = self.kwargs.get("azure_endpoint") or self.base_url
if azure_endpoint:
llm_kwargs["azure_endpoint"] = azure_endpoint
if api_key:
llm_kwargs["api_key"] = api_key
for key in ("timeout", "max_retries", "reasoning_effort", "callbacks"):
if key in self.kwargs:
for key in (
"api_version",
"api_key",
"timeout",
"max_retries",
"reasoning_effort",
"callbacks",
):
if key in self.kwargs and self.kwargs[key] is not None:
llm_kwargs[key] = self.kwargs[key]
return AzureChatOpenAI(**llm_kwargs)