feat: add Anthropic effort level support for Claude models

Add effort parameter (high/medium/low) for Claude 4.5+ and 4.6 models,
consistent with OpenAI reasoning_effort and Google thinking_level.
Also add content normalization for Anthropic responses.
This commit is contained in:
Yijia-Xiao 2026-03-22 21:57:05 +00:00
parent 77755f0431
commit bd9b1e5efa
5 changed files with 57 additions and 3 deletions

View File

@ -556,6 +556,7 @@ def get_user_selections():
# Step 7: Provider-specific thinking configuration
thinking_level = None
reasoning_effort = None
anthropic_effort = None
provider_lower = selected_llm_provider.lower()
if provider_lower == "google":
@ -574,6 +575,14 @@ def get_user_selections():
)
)
reasoning_effort = ask_openai_reasoning_effort()
elif provider_lower == "anthropic":
console.print(
create_question_box(
"Step 7: Effort Level",
"Configure Claude effort level"
)
)
anthropic_effort = ask_anthropic_effort()
return {
"ticker": selected_ticker,
@ -586,6 +595,7 @@ def get_user_selections():
"deep_thinker": selected_deep_thinker,
"google_thinking_level": thinking_level,
"openai_reasoning_effort": reasoning_effort,
"anthropic_effort": anthropic_effort,
}
@ -911,6 +921,7 @@ def run_analysis():
# Provider-specific thinking configuration
config["google_thinking_level"] = selections.get("google_thinking_level")
config["openai_reasoning_effort"] = selections.get("openai_reasoning_effort")
config["anthropic_effort"] = selections.get("anthropic_effort")
# Create stats callback handler for tracking LLM/tool calls
stats_handler = StatsCallbackHandler()

View File

@ -311,6 +311,26 @@ def ask_openai_reasoning_effort() -> str:
).ask()
def ask_anthropic_effort() -> str | None:
"""Ask for Anthropic effort level.
Controls token usage and response thoroughness on Claude 4.5+ and 4.6 models.
"""
return questionary.select(
"Select Effort Level:",
choices=[
questionary.Choice("High (recommended)", "high"),
questionary.Choice("Medium (balanced)", "medium"),
questionary.Choice("Low (faster, cheaper)", "low"),
],
style=questionary.Style([
("selected", "fg:cyan noinherit"),
("highlighted", "fg:cyan noinherit"),
("pointer", "fg:cyan noinherit"),
]),
).ask()
def ask_gemini_thinking_config() -> str | None:
"""Ask for Gemini thinking configuration.

View File

@ -15,6 +15,7 @@ DEFAULT_CONFIG = {
# Provider-specific thinking configuration
"google_thinking_level": None, # "high", "minimal", etc.
"openai_reasoning_effort": None, # "medium", "high", "low"
"anthropic_effort": None, # "high", "medium", "low"
# Debate and discussion settings
"max_debate_rounds": 1,
"max_risk_discuss_rounds": 1,

View File

@ -148,6 +148,11 @@ class TradingAgentsGraph:
if reasoning_effort:
kwargs["reasoning_effort"] = reasoning_effort
elif provider == "anthropic":
effort = self.config.get("anthropic_effort")
if effort:
kwargs["effort"] = effort
return kwargs
def _create_tool_nodes(self) -> Dict[str, ToolNode]:

View File

@ -2,9 +2,26 @@ from typing import Any, Optional
from langchain_anthropic import ChatAnthropic
from .base_client import BaseLLMClient
from .base_client import BaseLLMClient, normalize_content
from .validators import validate_model
_PASSTHROUGH_KWARGS = (
"timeout", "max_retries", "api_key", "max_tokens",
"callbacks", "http_client", "http_async_client", "effort",
)
class NormalizedChatAnthropic(ChatAnthropic):
"""ChatAnthropic with normalized content output.
Claude models with extended thinking or tool use return content as a
list of typed blocks. This normalizes to string for consistent
downstream handling.
"""
def invoke(self, input, config=None, **kwargs):
return normalize_content(super().invoke(input, config, **kwargs))
class AnthropicClient(BaseLLMClient):
"""Client for Anthropic Claude models."""
@ -16,11 +33,11 @@ class AnthropicClient(BaseLLMClient):
"""Return configured ChatAnthropic instance."""
llm_kwargs = {"model": self.model}
for key in ("timeout", "max_retries", "api_key", "max_tokens", "callbacks", "http_client", "http_async_client"):
for key in _PASSTHROUGH_KWARGS:
if key in self.kwargs:
llm_kwargs[key] = self.kwargs[key]
return ChatAnthropic(**llm_kwargs)
return NormalizedChatAnthropic(**llm_kwargs)
def validate_model(self) -> bool:
"""Validate model for Anthropic."""