diff --git a/cli/main.py b/cli/main.py index a706f11d..f6e2c44a 100644 --- a/cli/main.py +++ b/cli/main.py @@ -556,6 +556,7 @@ def get_user_selections(): # Step 7: Provider-specific thinking configuration thinking_level = None reasoning_effort = None + anthropic_effort = None provider_lower = selected_llm_provider.lower() if provider_lower == "google": @@ -574,6 +575,14 @@ def get_user_selections(): ) ) reasoning_effort = ask_openai_reasoning_effort() + elif provider_lower == "anthropic": + console.print( + create_question_box( + "Step 7: Effort Level", + "Configure Claude effort level" + ) + ) + anthropic_effort = ask_anthropic_effort() return { "ticker": selected_ticker, @@ -586,6 +595,7 @@ def get_user_selections(): "deep_thinker": selected_deep_thinker, "google_thinking_level": thinking_level, "openai_reasoning_effort": reasoning_effort, + "anthropic_effort": anthropic_effort, } @@ -911,6 +921,7 @@ def run_analysis(): # Provider-specific thinking configuration config["google_thinking_level"] = selections.get("google_thinking_level") config["openai_reasoning_effort"] = selections.get("openai_reasoning_effort") + config["anthropic_effort"] = selections.get("anthropic_effort") # Create stats callback handler for tracking LLM/tool calls stats_handler = StatsCallbackHandler() diff --git a/cli/utils.py b/cli/utils.py index 5a8ec16c..18efe1d6 100644 --- a/cli/utils.py +++ b/cli/utils.py @@ -311,6 +311,26 @@ def ask_openai_reasoning_effort() -> str: ).ask() +def ask_anthropic_effort() -> str | None: + """Ask for Anthropic effort level. + + Controls token usage and response thoroughness on Claude 4.5+ and 4.6 models. + """ + return questionary.select( + "Select Effort Level:", + choices=[ + questionary.Choice("High (recommended)", "high"), + questionary.Choice("Medium (balanced)", "medium"), + questionary.Choice("Low (faster, cheaper)", "low"), + ], + style=questionary.Style([ + ("selected", "fg:cyan noinherit"), + ("highlighted", "fg:cyan noinherit"), + ("pointer", "fg:cyan noinherit"), + ]), + ).ask() + + def ask_gemini_thinking_config() -> str | None: """Ask for Gemini thinking configuration. diff --git a/tradingagents/default_config.py b/tradingagents/default_config.py index ecf0dc29..898e1e1e 100644 --- a/tradingagents/default_config.py +++ b/tradingagents/default_config.py @@ -15,6 +15,7 @@ DEFAULT_CONFIG = { # Provider-specific thinking configuration "google_thinking_level": None, # "high", "minimal", etc. "openai_reasoning_effort": None, # "medium", "high", "low" + "anthropic_effort": None, # "high", "medium", "low" # Debate and discussion settings "max_debate_rounds": 1, "max_risk_discuss_rounds": 1, diff --git a/tradingagents/graph/trading_graph.py b/tradingagents/graph/trading_graph.py index c7ef0f98..306f7f38 100644 --- a/tradingagents/graph/trading_graph.py +++ b/tradingagents/graph/trading_graph.py @@ -148,6 +148,11 @@ class TradingAgentsGraph: if reasoning_effort: kwargs["reasoning_effort"] = reasoning_effort + elif provider == "anthropic": + effort = self.config.get("anthropic_effort") + if effort: + kwargs["effort"] = effort + return kwargs def _create_tool_nodes(self) -> Dict[str, ToolNode]: diff --git a/tradingagents/llm_clients/anthropic_client.py b/tradingagents/llm_clients/anthropic_client.py index 8539c752..2c1e5a67 100644 --- a/tradingagents/llm_clients/anthropic_client.py +++ b/tradingagents/llm_clients/anthropic_client.py @@ -2,9 +2,26 @@ from typing import Any, Optional from langchain_anthropic import ChatAnthropic -from .base_client import BaseLLMClient +from .base_client import BaseLLMClient, normalize_content from .validators import validate_model +_PASSTHROUGH_KWARGS = ( + "timeout", "max_retries", "api_key", "max_tokens", + "callbacks", "http_client", "http_async_client", "effort", +) + + +class NormalizedChatAnthropic(ChatAnthropic): + """ChatAnthropic with normalized content output. + + Claude models with extended thinking or tool use return content as a + list of typed blocks. This normalizes to string for consistent + downstream handling. + """ + + def invoke(self, input, config=None, **kwargs): + return normalize_content(super().invoke(input, config, **kwargs)) + class AnthropicClient(BaseLLMClient): """Client for Anthropic Claude models.""" @@ -16,11 +33,11 @@ class AnthropicClient(BaseLLMClient): """Return configured ChatAnthropic instance.""" llm_kwargs = {"model": self.model} - for key in ("timeout", "max_retries", "api_key", "max_tokens", "callbacks", "http_client", "http_async_client"): + for key in _PASSTHROUGH_KWARGS: if key in self.kwargs: llm_kwargs[key] = self.kwargs[key] - return ChatAnthropic(**llm_kwargs) + return NormalizedChatAnthropic(**llm_kwargs) def validate_model(self) -> bool: """Validate model for Anthropic."""