From 64f07671b9736f44abdcc22eb33ef64ccdea7167 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=98=B3=E8=99=8E?= Date: Mon, 16 Mar 2026 07:41:20 +0800 Subject: [PATCH] fix: add http_client support for SSL certificate customization - Add http_client and http_async_client parameters to all LLM clients - OpenAIClient, GoogleClient, AnthropicClient now support custom httpx clients - Fixes SSL certificate verification errors on Windows Conda environments - Users can now pass custom httpx.Client with verify=False or custom certs Fixes #369 --- tradingagents/llm_clients/anthropic_client.py | 2 +- tradingagents/llm_clients/factory.py | 6 ++++++ tradingagents/llm_clients/google_client.py | 2 +- tradingagents/llm_clients/openai_client.py | 2 +- 4 files changed, 9 insertions(+), 3 deletions(-) diff --git a/tradingagents/llm_clients/anthropic_client.py b/tradingagents/llm_clients/anthropic_client.py index e2f1abba..8539c752 100644 --- a/tradingagents/llm_clients/anthropic_client.py +++ b/tradingagents/llm_clients/anthropic_client.py @@ -16,7 +16,7 @@ class AnthropicClient(BaseLLMClient): """Return configured ChatAnthropic instance.""" llm_kwargs = {"model": self.model} - for key in ("timeout", "max_retries", "api_key", "max_tokens", "callbacks"): + for key in ("timeout", "max_retries", "api_key", "max_tokens", "callbacks", "http_client", "http_async_client"): if key in self.kwargs: llm_kwargs[key] = self.kwargs[key] diff --git a/tradingagents/llm_clients/factory.py b/tradingagents/llm_clients/factory.py index 028c88a2..93c2a7d3 100644 --- a/tradingagents/llm_clients/factory.py +++ b/tradingagents/llm_clients/factory.py @@ -19,6 +19,12 @@ def create_llm_client( model: Model name/identifier base_url: Optional base URL for API endpoint **kwargs: Additional provider-specific arguments + - http_client: Custom httpx.Client for SSL proxy or certificate customization + - http_async_client: Custom httpx.AsyncClient for async operations + - timeout: Request timeout in seconds + - max_retries: Maximum retry attempts + - api_key: API key for the provider + - callbacks: LangChain callbacks Returns: Configured BaseLLMClient instance diff --git a/tradingagents/llm_clients/google_client.py b/tradingagents/llm_clients/google_client.py index a1bd386b..3dd85e3f 100644 --- a/tradingagents/llm_clients/google_client.py +++ b/tradingagents/llm_clients/google_client.py @@ -38,7 +38,7 @@ class GoogleClient(BaseLLMClient): """Return configured ChatGoogleGenerativeAI instance.""" llm_kwargs = {"model": self.model} - for key in ("timeout", "max_retries", "google_api_key", "callbacks"): + for key in ("timeout", "max_retries", "google_api_key", "callbacks", "http_client", "http_async_client"): if key in self.kwargs: llm_kwargs[key] = self.kwargs[key] diff --git a/tradingagents/llm_clients/openai_client.py b/tradingagents/llm_clients/openai_client.py index 924f24b0..4605c1f9 100644 --- a/tradingagents/llm_clients/openai_client.py +++ b/tradingagents/llm_clients/openai_client.py @@ -59,7 +59,7 @@ class OpenAIClient(BaseLLMClient): elif self.base_url: llm_kwargs["base_url"] = self.base_url - for key in ("timeout", "max_retries", "reasoning_effort", "api_key", "callbacks"): + for key in ("timeout", "max_retries", "reasoning_effort", "api_key", "callbacks", "http_client", "http_async_client"): if key in self.kwargs: llm_kwargs[key] = self.kwargs[key]