This commit is contained in:
Cloudbeer 2026-04-02 00:06:52 +09:00 committed by GitHub
commit 8989b95b57
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 63 additions and 1 deletions

View File

@ -13,6 +13,7 @@ dependencies = [
"backtrader>=1.9.78.123",
"langchain-anthropic>=0.3.15",
"langchain-experimental>=0.3.4",
"langchain-aws>=0.2.0",
"langchain-google-genai>=2.1.5",
"langchain-openai>=0.3.23",
"langgraph>=0.4.8",

View File

@ -1,4 +1,5 @@
from .base_client import BaseLLMClient
from .factory import create_llm_client
from .bedrock_client import BedrockClient
__all__ = ["BaseLLMClient", "create_llm_client"]
__all__ = ["BaseLLMClient", "create_llm_client", "BedrockClient"]

View File

@ -0,0 +1,56 @@
from typing import Any, Optional
from langchain_aws import ChatBedrockConverse
from .base_client import BaseLLMClient
class BedrockClient(BaseLLMClient):
"""Client for Amazon Bedrock models.
Supports any model available on Bedrock via IAM Role (no API key needed),
including Claude, Amazon Nova, Kimi, Qwen, GLM, DeepSeek, MiniMax, and more.
Authentication:
Uses boto3 default credential chain: IAM Role (EC2/Lambda), environment
variables (AWS_ACCESS_KEY_ID / AWS_SECRET_ACCESS_KEY), or ~/.aws/credentials.
Model ID formats:
- Cross-region inference profile (recommended):
``global.anthropic.claude-sonnet-4-6``
``eu.anthropic.claude-3-5-sonnet-20240620-v1:0``
- Direct on-demand (us-east-1 default region only):
``amazon.nova-lite-v1:0``
``moonshotai.kimi-k2.5``
``qwen.qwen3-32b-v1:0``
``zai.glm-4.7-flash``
``deepseek.v3.2``
Note:
When specifying a non-default ``region_name``, use region-specific
inference profile IDs (e.g. ``us-west-2.anthropic.claude-...``),
as direct model IDs only support on-demand throughput in us-east-1.
Example::
config["llm_provider"] = "bedrock"
config["deep_think_llm"] = "global.anthropic.claude-sonnet-4-6"
config["quick_think_llm"] = "amazon.nova-micro-v1:0"
"""
def __init__(self, model: str, base_url: Optional[str] = None, **kwargs):
super().__init__(model, base_url, **kwargs)
def get_llm(self) -> Any:
"""Return configured ChatBedrockConverse instance."""
llm_kwargs: dict = {"model_id": self.model}
for key in ("region_name", "max_tokens", "callbacks", "timeout"):
if key in self.kwargs:
llm_kwargs[key] = self.kwargs[key]
return ChatBedrockConverse(**llm_kwargs)
def validate_model(self) -> bool:
"""Bedrock model IDs are dynamic; skip static validation."""
return True

View File

@ -4,6 +4,7 @@ from .base_client import BaseLLMClient
from .openai_client import OpenAIClient
from .anthropic_client import AnthropicClient
from .google_client import GoogleClient
from .bedrock_client import BedrockClient
def create_llm_client(
@ -46,4 +47,7 @@ def create_llm_client(
if provider_lower == "google":
return GoogleClient(model, base_url, **kwargs)
if provider_lower == "bedrock":
return BedrockClient(model, base_url, **kwargs)
raise ValueError(f"Unsupported LLM provider: {provider}")