fix: add http_client support for SSL certificate customization
- Add http_client and http_async_client parameters to all LLM clients - OpenAIClient, GoogleClient, AnthropicClient now support custom httpx clients - Fixes SSL certificate verification errors on Windows Conda environments - Users can now pass custom httpx.Client with verify=False or custom certs Fixes #369
This commit is contained in:
parent
551fd7f074
commit
64f07671b9
|
|
@ -16,7 +16,7 @@ class AnthropicClient(BaseLLMClient):
|
|||
"""Return configured ChatAnthropic instance."""
|
||||
llm_kwargs = {"model": self.model}
|
||||
|
||||
for key in ("timeout", "max_retries", "api_key", "max_tokens", "callbacks"):
|
||||
for key in ("timeout", "max_retries", "api_key", "max_tokens", "callbacks", "http_client", "http_async_client"):
|
||||
if key in self.kwargs:
|
||||
llm_kwargs[key] = self.kwargs[key]
|
||||
|
||||
|
|
|
|||
|
|
@ -19,6 +19,12 @@ def create_llm_client(
|
|||
model: Model name/identifier
|
||||
base_url: Optional base URL for API endpoint
|
||||
**kwargs: Additional provider-specific arguments
|
||||
- http_client: Custom httpx.Client for SSL proxy or certificate customization
|
||||
- http_async_client: Custom httpx.AsyncClient for async operations
|
||||
- timeout: Request timeout in seconds
|
||||
- max_retries: Maximum retry attempts
|
||||
- api_key: API key for the provider
|
||||
- callbacks: LangChain callbacks
|
||||
|
||||
Returns:
|
||||
Configured BaseLLMClient instance
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ class GoogleClient(BaseLLMClient):
|
|||
"""Return configured ChatGoogleGenerativeAI instance."""
|
||||
llm_kwargs = {"model": self.model}
|
||||
|
||||
for key in ("timeout", "max_retries", "google_api_key", "callbacks"):
|
||||
for key in ("timeout", "max_retries", "google_api_key", "callbacks", "http_client", "http_async_client"):
|
||||
if key in self.kwargs:
|
||||
llm_kwargs[key] = self.kwargs[key]
|
||||
|
||||
|
|
|
|||
|
|
@ -59,7 +59,7 @@ class OpenAIClient(BaseLLMClient):
|
|||
elif self.base_url:
|
||||
llm_kwargs["base_url"] = self.base_url
|
||||
|
||||
for key in ("timeout", "max_retries", "reasoning_effort", "api_key", "callbacks"):
|
||||
for key in ("timeout", "max_retries", "reasoning_effort", "api_key", "callbacks", "http_client", "http_async_client"):
|
||||
if key in self.kwargs:
|
||||
llm_kwargs[key] = self.kwargs[key]
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue