From 76876f8cc59961f53c7ce862893145f16d4db607 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michelle=20=28=E8=9C=9C=E9=9B=AA=29?= Date: Sun, 22 Mar 2026 13:33:18 +0800 Subject: [PATCH 1/3] feat: add Amazon Bedrock provider support Add BedrockClient using langchain-aws ChatBedrockConverse, enabling TradingAgents to use Bedrock-hosted models (Claude, Kimi, Qwen, GLM, etc.) via IAM Role authentication without API keys. Usage: config['llm_provider'] = 'bedrock' config['deep_think_llm'] = 'us.anthropic.claude-sonnet-4-5-20251001-v1:0' config['quick_think_llm'] = 'us.amazon.nova-lite-v1:0' Requires: langchain-aws>=0.2.0 --- pyproject.toml | 1 + tradingagents/llm_clients/bedrock_client.py | 32 +++++++++++++++++++++ tradingagents/llm_clients/factory.py | 4 +++ 3 files changed, 37 insertions(+) create mode 100644 tradingagents/llm_clients/bedrock_client.py diff --git a/pyproject.toml b/pyproject.toml index de27a2b9..a0444655 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,6 +13,7 @@ dependencies = [ "backtrader>=1.9.78.123", "langchain-anthropic>=0.3.15", "langchain-experimental>=0.3.4", + "langchain-aws>=0.2.0", "langchain-google-genai>=2.1.5", "langchain-openai>=0.3.23", "langgraph>=0.4.8", diff --git a/tradingagents/llm_clients/bedrock_client.py b/tradingagents/llm_clients/bedrock_client.py new file mode 100644 index 00000000..dd59d9a8 --- /dev/null +++ b/tradingagents/llm_clients/bedrock_client.py @@ -0,0 +1,32 @@ +from typing import Any, Optional + +from langchain_aws import ChatBedrockConverse + +from .base_client import BaseLLMClient +from .validators import validate_model + + +class BedrockClient(BaseLLMClient): + """Client for Amazon Bedrock models (Claude, Kimi, Qwen, GLM, etc.).""" + + def __init__(self, model: str, base_url: Optional[str] = None, **kwargs): + super().__init__(model, base_url, **kwargs) + + def get_llm(self) -> Any: + """Return configured ChatBedrockConverse instance.""" + llm_kwargs = {"model_id": self.model} + + if "region_name" in self.kwargs: + llm_kwargs["region_name"] = self.kwargs["region_name"] + if "max_tokens" in self.kwargs: + llm_kwargs["max_tokens"] = self.kwargs["max_tokens"] + if "callbacks" in self.kwargs: + llm_kwargs["callbacks"] = self.kwargs["callbacks"] + if "timeout" in self.kwargs: + llm_kwargs["timeout"] = self.kwargs["timeout"] + + return ChatBedrockConverse(**llm_kwargs) + + def validate_model(self) -> bool: + """Validate model for Bedrock (pass-through, model IDs are flexible).""" + return True diff --git a/tradingagents/llm_clients/factory.py b/tradingagents/llm_clients/factory.py index 93c2a7d3..24109640 100644 --- a/tradingagents/llm_clients/factory.py +++ b/tradingagents/llm_clients/factory.py @@ -4,6 +4,7 @@ from .base_client import BaseLLMClient from .openai_client import OpenAIClient from .anthropic_client import AnthropicClient from .google_client import GoogleClient +from .bedrock_client import BedrockClient def create_llm_client( @@ -46,4 +47,7 @@ def create_llm_client( if provider_lower == "google": return GoogleClient(model, base_url, **kwargs) + if provider_lower == "bedrock": + return BedrockClient(model, base_url, **kwargs) + raise ValueError(f"Unsupported LLM provider: {provider}") From d10f2fe5f0a0579390d9b3aed84db8f145e89320 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michelle=20=28=E8=9C=9C=E9=9B=AA=29?= Date: Sun, 22 Mar 2026 13:38:43 +0800 Subject: [PATCH 2/3] refactor: improve BedrockClient docs and export from __init__ - Add comprehensive docstring with model ID formats and auth notes - Document region_name behavior with inference profiles - Export BedrockClient from llm_clients package --- tradingagents/llm_clients/__init__.py | 3 +- tradingagents/llm_clients/bedrock_client.py | 48 +++++++++++++++------ 2 files changed, 38 insertions(+), 13 deletions(-) diff --git a/tradingagents/llm_clients/__init__.py b/tradingagents/llm_clients/__init__.py index e528eabe..b1f32afc 100644 --- a/tradingagents/llm_clients/__init__.py +++ b/tradingagents/llm_clients/__init__.py @@ -1,4 +1,5 @@ from .base_client import BaseLLMClient from .factory import create_llm_client +from .bedrock_client import BedrockClient -__all__ = ["BaseLLMClient", "create_llm_client"] +__all__ = ["BaseLLMClient", "create_llm_client", "BedrockClient"] diff --git a/tradingagents/llm_clients/bedrock_client.py b/tradingagents/llm_clients/bedrock_client.py index dd59d9a8..268582b3 100644 --- a/tradingagents/llm_clients/bedrock_client.py +++ b/tradingagents/llm_clients/bedrock_client.py @@ -3,30 +3,54 @@ from typing import Any, Optional from langchain_aws import ChatBedrockConverse from .base_client import BaseLLMClient -from .validators import validate_model class BedrockClient(BaseLLMClient): - """Client for Amazon Bedrock models (Claude, Kimi, Qwen, GLM, etc.).""" + """Client for Amazon Bedrock models. + + Supports any model available on Bedrock via IAM Role (no API key needed), + including Claude, Amazon Nova, Kimi, Qwen, GLM, DeepSeek, MiniMax, and more. + + Authentication: + Uses boto3 default credential chain: IAM Role (EC2/Lambda), environment + variables (AWS_ACCESS_KEY_ID / AWS_SECRET_ACCESS_KEY), or ~/.aws/credentials. + + Model ID formats: + - Cross-region inference profile (recommended): + ``us.anthropic.claude-haiku-4-5-20251001-v1:0`` + ``eu.anthropic.claude-3-5-sonnet-20240620-v1:0`` + - Direct on-demand (us-east-1 default region only): + ``amazon.nova-lite-v1:0`` + ``moonshotai.kimi-k2.5`` + ``qwen.qwen3-32b-v1:0`` + ``zai.glm-4.7-flash`` + ``deepseek.v3.2`` + + Note: + When specifying a non-default ``region_name``, use region-specific + inference profile IDs (e.g. ``us-west-2.anthropic.claude-...``), + as direct model IDs only support on-demand throughput in us-east-1. + + Example:: + + config["llm_provider"] = "bedrock" + config["deep_think_llm"] = "us.anthropic.claude-haiku-4-5-20251001-v1:0" + config["quick_think_llm"] = "amazon.nova-micro-v1:0" + """ def __init__(self, model: str, base_url: Optional[str] = None, **kwargs): super().__init__(model, base_url, **kwargs) def get_llm(self) -> Any: """Return configured ChatBedrockConverse instance.""" - llm_kwargs = {"model_id": self.model} + llm_kwargs: dict = {"model_id": self.model} - if "region_name" in self.kwargs: - llm_kwargs["region_name"] = self.kwargs["region_name"] - if "max_tokens" in self.kwargs: - llm_kwargs["max_tokens"] = self.kwargs["max_tokens"] - if "callbacks" in self.kwargs: - llm_kwargs["callbacks"] = self.kwargs["callbacks"] - if "timeout" in self.kwargs: - llm_kwargs["timeout"] = self.kwargs["timeout"] + for key in ("region_name", "max_tokens", "callbacks", "timeout"): + if key in self.kwargs: + llm_kwargs[key] = self.kwargs[key] return ChatBedrockConverse(**llm_kwargs) def validate_model(self) -> bool: - """Validate model for Bedrock (pass-through, model IDs are flexible).""" + """Bedrock model IDs are dynamic; skip static validation.""" return True From a78157434eedcb88b4351037310921f22eaa6587 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michelle=20=28=E8=9C=9C=E9=9B=AA=29?= Date: Sun, 22 Mar 2026 13:42:46 +0800 Subject: [PATCH 3/3] docs: update BedrockClient example to use global inference profile Use global.anthropic.claude-sonnet-4-6 as the recommended example, which supports cross-region routing without region-specific prefix. --- tradingagents/llm_clients/bedrock_client.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tradingagents/llm_clients/bedrock_client.py b/tradingagents/llm_clients/bedrock_client.py index 268582b3..6b1800d8 100644 --- a/tradingagents/llm_clients/bedrock_client.py +++ b/tradingagents/llm_clients/bedrock_client.py @@ -17,7 +17,7 @@ class BedrockClient(BaseLLMClient): Model ID formats: - Cross-region inference profile (recommended): - ``us.anthropic.claude-haiku-4-5-20251001-v1:0`` + ``global.anthropic.claude-sonnet-4-6`` ``eu.anthropic.claude-3-5-sonnet-20240620-v1:0`` - Direct on-demand (us-east-1 default region only): ``amazon.nova-lite-v1:0`` @@ -34,7 +34,7 @@ class BedrockClient(BaseLLMClient): Example:: config["llm_provider"] = "bedrock" - config["deep_think_llm"] = "us.anthropic.claude-haiku-4-5-20251001-v1:0" + config["deep_think_llm"] = "global.anthropic.claude-sonnet-4-6" config["quick_think_llm"] = "amazon.nova-micro-v1:0" """