import os from typing import Any, Optional from langchain_openai import ChatOpenAI from .base_client import BaseLLMClient, normalize_content from .validators import validate_model from ..auth import get_codex_token, get_github_token, get_copilot_api_url, COPILOT_HEADERS class NormalizedChatOpenAI(ChatOpenAI): """ChatOpenAI with normalized content output. The Responses API returns content as a list of typed blocks (reasoning, text, etc.). This normalizes to string for consistent downstream handling. """ def invoke(self, input, config=None, **kwargs): return normalize_content(super().invoke(input, config, **kwargs)) # Kwargs forwarded from user config to ChatOpenAI _PASSTHROUGH_KWARGS = ( "timeout", "max_retries", "reasoning_effort", "api_key", "callbacks", "http_client", "http_async_client", ) # Provider base URLs and API key env vars. # Copilot: uses the GitHub Copilot inference API, authenticated via ``gh`` # CLI token with Copilot-specific headers. No env var needed. _PROVIDER_CONFIG = { "xai": ("https://api.x.ai/v1", "XAI_API_KEY"), "openrouter": ("https://openrouter.ai/api/v1", "OPENROUTER_API_KEY"), "ollama": ("http://localhost:11434/v1", None), "copilot": (None, None), # base_url resolved at runtime via GraphQL } class OpenAIClient(BaseLLMClient): """Client for OpenAI, Ollama, OpenRouter, xAI, and GitHub Copilot providers. For native OpenAI models, uses the Responses API (/v1/responses) which supports reasoning_effort with function tools across all model families (GPT-4.1, GPT-5). Third-party compatible providers (xAI, OpenRouter, Ollama) use standard Chat Completions. GitHub Copilot uses the Copilot inference API with special headers. """ def __init__( self, model: str, base_url: Optional[str] = None, provider: str = "openai", **kwargs, ): super().__init__(model, base_url, **kwargs) self.provider = provider.lower() def get_llm(self) -> Any: """Return configured ChatOpenAI instance.""" llm_kwargs = {"model": self.model} # Provider-specific base URL and auth if self.provider == "copilot": # GitHub Copilot: resolve base URL and inject required headers copilot_url = get_copilot_api_url() llm_kwargs["base_url"] = copilot_url token = get_github_token() if token: llm_kwargs["api_key"] = token llm_kwargs["default_headers"] = dict(COPILOT_HEADERS) elif self.provider in _PROVIDER_CONFIG: base_url, api_key_env = _PROVIDER_CONFIG[self.provider] if base_url: llm_kwargs["base_url"] = base_url if api_key_env: api_key = os.environ.get(api_key_env) if api_key: llm_kwargs["api_key"] = api_key else: llm_kwargs["api_key"] = "ollama" elif self.base_url: llm_kwargs["base_url"] = self.base_url # Forward user-provided kwargs (takes precedence over auto-resolved tokens) for key in _PASSTHROUGH_KWARGS: if key in self.kwargs: llm_kwargs[key] = self.kwargs[key] # Native OpenAI: use Responses API for consistent behavior across # all model families. Third-party providers use Chat Completions. if self.provider == "openai": llm_kwargs["use_responses_api"] = True # If no explicit api_key in kwargs, fall back to Codex OAuth token. if "api_key" not in llm_kwargs: codex_token = get_codex_token() if codex_token: llm_kwargs["api_key"] = codex_token return NormalizedChatOpenAI(**llm_kwargs) def validate_model(self) -> bool: """Validate model for the provider.""" return validate_model(self.provider, self.model)