45 lines
1.4 KiB
Python
45 lines
1.4 KiB
Python
from typing import Any, Optional
|
|
|
|
from langchain_anthropic import ChatAnthropic
|
|
|
|
from .base_client import BaseLLMClient, normalize_content
|
|
from .validators import validate_model
|
|
|
|
_PASSTHROUGH_KWARGS = (
|
|
"timeout", "max_retries", "api_key", "max_tokens",
|
|
"callbacks", "http_client", "http_async_client", "effort",
|
|
)
|
|
|
|
|
|
class NormalizedChatAnthropic(ChatAnthropic):
|
|
"""ChatAnthropic with normalized content output.
|
|
|
|
Claude models with extended thinking or tool use return content as a
|
|
list of typed blocks. This normalizes to string for consistent
|
|
downstream handling.
|
|
"""
|
|
|
|
def invoke(self, input, config=None, **kwargs):
|
|
return normalize_content(super().invoke(input, config, **kwargs))
|
|
|
|
|
|
class AnthropicClient(BaseLLMClient):
|
|
"""Client for Anthropic Claude models."""
|
|
|
|
def __init__(self, model: str, base_url: Optional[str] = None, **kwargs):
|
|
super().__init__(model, base_url, **kwargs)
|
|
|
|
def get_llm(self) -> Any:
|
|
"""Return configured ChatAnthropic instance."""
|
|
llm_kwargs = {"model": self.model}
|
|
|
|
for key in _PASSTHROUGH_KWARGS:
|
|
if key in self.kwargs:
|
|
llm_kwargs[key] = self.kwargs[key]
|
|
|
|
return NormalizedChatAnthropic(**llm_kwargs)
|
|
|
|
def validate_model(self) -> bool:
|
|
"""Validate model for Anthropic."""
|
|
return validate_model("anthropic", self.model)
|