import os from typing import Any, Optional from langchain_openai import ChatOpenAI from .base_client import BaseLLMClient from .validators import validate_model class UnifiedChatOpenAI(ChatOpenAI): """ChatOpenAI subclass that strips incompatible params for certain models.""" def __init__(self, **kwargs): model = kwargs.get("model", "") if self._is_reasoning_model(model): kwargs.pop("temperature", None) kwargs.pop("top_p", None) super().__init__(**kwargs) @staticmethod def _is_reasoning_model(model: str) -> bool: """Check if model is a reasoning model that doesn't support temperature.""" model_lower = model.lower() return ( model_lower.startswith("o1") or model_lower.startswith("o3") or "gpt-5" in model_lower ) class OpenAIClient(BaseLLMClient): """Client for OpenAI, Ollama, OpenRouter, and xAI providers.""" def __init__( self, model: str, base_url: Optional[str] = None, provider: str = "openai", **kwargs, ): super().__init__(model, base_url, **kwargs) self.provider = provider.lower() def get_llm(self) -> Any: """Return configured ChatOpenAI instance.""" import certifi # Fix SSL certificate path issue on Windows with conda # Conda sets SSL_CERT_FILE to a non-existent path, so we clear it # and let certifi handle it properly ssl_cert_file = os.environ.get("SSL_CERT_FILE", "") if ssl_cert_file and not os.path.exists(ssl_cert_file): # Remove invalid SSL_CERT_FILE and use certifi instead os.environ.pop("SSL_CERT_FILE", None) os.environ["SSL_CERT_FILE"] = certifi.where() llm_kwargs = {"model": self.model} if self.provider == "xai": llm_kwargs["base_url"] = "https://api.x.ai/v1" api_key = os.environ.get("XAI_API_KEY") if api_key: llm_kwargs["api_key"] = api_key elif self.provider == "openrouter": llm_kwargs["base_url"] = "https://openrouter.ai/api/v1" api_key = os.environ.get("OPENROUTER_API_KEY") if api_key: llm_kwargs["api_key"] = api_key elif self.provider == "ollama": llm_kwargs["base_url"] = "http://localhost:11434/v1" llm_kwargs["api_key"] = "ollama" # Ollama doesn't require auth elif self.base_url: llm_kwargs["base_url"] = self.base_url for key in ("timeout", "max_retries", "api_key", "callbacks"): if key in self.kwargs: llm_kwargs[key] = self.kwargs[key] # Only add reasoning_effort for O1/O3 models that support it if "reasoning_effort" in self.kwargs: model_lower = self.model.lower() if model_lower.startswith("o1") or model_lower.startswith("o3"): llm_kwargs["reasoning_effort"] = self.kwargs["reasoning_effort"] return UnifiedChatOpenAI(**llm_kwargs) def validate_model(self) -> bool: """Validate model for the provider.""" return validate_model(self.provider, self.model)