feat: add GitHub Copilot provider with OAuth auth via gh CLI

Replace GitHub Models (models.github.ai) with GitHub Copilot inference API
(api.individual.githubcopilot.com). Auth uses gh CLI token with required
Copilot headers (Copilot-Integration-Id, X-GitHub-Api-Version).

- Add tradingagents/auth/ module: gh token retrieval, Copilot URL resolution
  via GraphQL, Codex OAuth token with auto-refresh
- Add "copilot" provider to OpenAIClient with dynamic base URL and headers
- Add live model listing from Copilot /models endpoint (27 models)
- Add perform_copilot_oauth() with Copilot access verification
- Remove all GitHub Models (models.github.ai) references

Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com>
This commit is contained in:
Jiaxu Liu 2026-03-23 13:18:55 +00:00
parent 589b351f2a
commit d8ac212253
10 changed files with 379 additions and 28 deletions

View File

@ -4,3 +4,7 @@ GOOGLE_API_KEY=
ANTHROPIC_API_KEY=
XAI_API_KEY=
OPENROUTER_API_KEY=
# GitHub Copilot authenticates via the GitHub CLI (`gh auth login`).
# No API key needed — the token from `gh auth token` is used automatically.
# Requires an active Copilot subscription (Pro/Pro+).

View File

@ -538,29 +538,42 @@ def get_user_selections():
)
selected_research_depth = select_research_depth()
# Step 5: OpenAI backend
# Step 5: LLM Provider
console.print(
create_question_box(
"Step 5: OpenAI backend", "Select which service to talk to"
"Step 5: LLM Provider", "Select which service to talk to"
)
)
selected_llm_provider, backend_url = select_llm_provider()
provider_id = selected_llm_provider.lower()
# GitHub Copilot: run OAuth before proceeding
if provider_id == "copilot":
console.print(
create_question_box(
"Step 5b: Copilot Auth",
"Authenticate with the GitHub CLI to use GitHub Copilot",
)
)
if not perform_copilot_oauth():
exit(1)
# Step 6: Thinking agents
console.print(
create_question_box(
"Step 6: Thinking Agents", "Select your thinking agents for analysis"
)
)
selected_shallow_thinker = select_shallow_thinking_agent(selected_llm_provider)
selected_deep_thinker = select_deep_thinking_agent(selected_llm_provider)
selected_shallow_thinker = select_shallow_thinking_agent(provider_id)
selected_deep_thinker = select_deep_thinking_agent(provider_id)
# Step 7: Provider-specific thinking configuration
thinking_level = None
reasoning_effort = None
anthropic_effort = None
provider_lower = selected_llm_provider.lower()
provider_lower = provider_id
if provider_lower == "google":
console.print(
create_question_box(
@ -591,7 +604,7 @@ def get_user_selections():
"analysis_date": analysis_date,
"analysts": selected_analysts,
"research_depth": selected_research_depth,
"llm_provider": selected_llm_provider.lower(),
"llm_provider": provider_id,
"backend_url": backend_url,
"shallow_thinker": selected_shallow_thinker,
"deep_thinker": selected_deep_thinker,

View File

@ -1,3 +1,4 @@
import subprocess
import questionary
from typing import List, Optional, Tuple, Dict
@ -136,9 +137,7 @@ def select_research_depth() -> int:
def select_shallow_thinking_agent(provider) -> str:
"""Select shallow thinking llm engine using an interactive selection."""
# Define shallow thinking llm engine options with their corresponding model names
# Ordering: medium → light → heavy (balanced first for quick tasks)
# Within same tier, newer models first
SHALLOW_AGENT_OPTIONS = {
"openai": [
("GPT-5 Mini - Balanced speed, cost, and capability", "gpt-5-mini"),
@ -171,13 +170,25 @@ def select_shallow_thinking_agent(provider) -> str:
("GPT-OSS:latest (20B, local)", "gpt-oss:latest"),
("GLM-4.7-Flash:latest (30B, local)", "glm-4.7-flash:latest"),
],
"copilot": [], # populated dynamically by fetch_copilot_models()
}
if provider.lower() == "copilot":
options = fetch_copilot_models()
if not options:
console.print("[red]No Copilot models available. Exiting...[/red]")
exit(1)
else:
options = SHALLOW_AGENT_OPTIONS.get(provider.lower())
if not options:
console.print(f"[red]No models available for provider '{provider}'. Exiting...[/red]")
exit(1)
choice = questionary.select(
"Select Your [Quick-Thinking LLM Engine]:",
choices=[
questionary.Choice(display, value=value)
for display, value in SHALLOW_AGENT_OPTIONS[provider.lower()]
for display, value in options
],
instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
style=questionary.Style(
@ -201,9 +212,7 @@ def select_shallow_thinking_agent(provider) -> str:
def select_deep_thinking_agent(provider) -> str:
"""Select deep thinking llm engine using an interactive selection."""
# Define deep thinking llm engine options with their corresponding model names
# Ordering: heavy → medium → light (most capable first for deep tasks)
# Within same tier, newer models first
DEEP_AGENT_OPTIONS = {
"openai": [
("GPT-5.4 - Latest frontier, 1M context", "gpt-5.4"),
@ -238,13 +247,25 @@ def select_deep_thinking_agent(provider) -> str:
("GPT-OSS:latest (20B, local)", "gpt-oss:latest"),
("Qwen3:latest (8B, local)", "qwen3:latest"),
],
"copilot": [], # populated dynamically by fetch_copilot_models()
}
if provider.lower() == "copilot":
options = fetch_copilot_models()
if not options:
console.print("[red]No Copilot models available. Exiting...[/red]")
exit(1)
else:
options = DEEP_AGENT_OPTIONS.get(provider.lower())
if not options:
console.print(f"[red]No models available for provider '{provider}'. Exiting...[/red]")
exit(1)
choice = questionary.select(
"Select Your [Deep-Thinking LLM Engine]:",
choices=[
questionary.Choice(display, value=value)
for display, value in DEEP_AGENT_OPTIONS[provider.lower()]
for display, value in options
],
instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
style=questionary.Style(
@ -262,9 +283,53 @@ def select_deep_thinking_agent(provider) -> str:
return choice
def fetch_copilot_models() -> list[tuple[str, str]]:
"""Fetch models from the GitHub Copilot inference API.
Returns a list of (display_label, model_id) tuples sorted by model ID.
Requires authentication via ``gh auth login`` with a Copilot subscription.
"""
import requests
from tradingagents.auth import get_github_token, COPILOT_HEADERS, get_copilot_api_url
token = get_github_token()
if not token:
console.print("[red]No GitHub token available. Run `gh auth login` first.[/red]")
return []
try:
console.print("[dim]Fetching available Copilot models...[/dim]")
copilot_url = get_copilot_api_url()
headers = {
"Authorization": f"Bearer {token}",
**COPILOT_HEADERS,
}
resp = requests.get(
f"{copilot_url}/models",
headers=headers,
timeout=10,
)
resp.raise_for_status()
data = resp.json()
models = data.get("data", data) if isinstance(data, dict) else data
# Filter to chat-capable models (exclude embeddings)
chat_models = [
m for m in models
if not m.get("id", "").startswith("text-embedding")
]
return [
(m["id"], m["id"])
for m in sorted(chat_models, key=lambda x: x.get("id", ""))
]
except Exception as e:
console.print(f"[yellow]Warning: Could not fetch Copilot models: {e}[/yellow]")
return []
def select_llm_provider() -> tuple[str, str]:
"""Select the OpenAI api url using interactive selection."""
# Define OpenAI api options with their corresponding endpoints
"""Select the LLM provider using interactive selection."""
BASE_URLS = [
("OpenAI", "https://api.openai.com/v1"),
("Google", "https://generativelanguage.googleapis.com/v1"),
@ -272,6 +337,7 @@ def select_llm_provider() -> tuple[str, str]:
("xAI", "https://api.x.ai/v1"),
("Openrouter", "https://openrouter.ai/api/v1"),
("Ollama", "http://localhost:11434/v1"),
("Copilot", ""), # resolved at runtime via GraphQL
]
choice = questionary.select(
@ -291,7 +357,7 @@ def select_llm_provider() -> tuple[str, str]:
).ask()
if choice is None:
console.print("\n[red]no OpenAI backend selected. Exiting...[/red]")
console.print("\n[red]No LLM provider selected. Exiting...[/red]")
exit(1)
display_name, url = choice
@ -300,6 +366,68 @@ def select_llm_provider() -> tuple[str, str]:
return display_name, url
def perform_copilot_oauth() -> bool:
"""Ensure the user is authenticated with the GitHub CLI for Copilot.
Checks for an existing token and verifies Copilot access. If the token
is missing, offers to run ``gh auth login`` interactively.
Returns True if a valid token with Copilot access is available, False otherwise.
"""
from tradingagents.auth import get_github_token
token = get_github_token()
if token:
# Verify Copilot access
import requests
from tradingagents.auth import COPILOT_HEADERS, get_copilot_api_url
try:
copilot_url = get_copilot_api_url()
resp = requests.get(
f"{copilot_url}/models",
headers={"Authorization": f"Bearer {token}", **COPILOT_HEADERS},
timeout=5,
)
if resp.status_code == 200:
console.print("[green]✓ Authenticated with GitHub Copilot[/green]")
return True
else:
console.print(
f"[yellow]⚠ GitHub token found but Copilot access failed "
f"(HTTP {resp.status_code}). Check your Copilot subscription.[/yellow]"
)
return False
except Exception:
# Network error — accept the token optimistically
console.print("[green]✓ Authenticated with GitHub CLI (Copilot access not verified)[/green]")
return True
console.print(
"[yellow]⚠ No GitHub token found.[/yellow] "
"You need to authenticate to use GitHub Copilot."
)
should_login = questionary.confirm(
"Run `gh auth login` now?", default=True
).ask()
if not should_login:
console.print("[red]GitHub authentication skipped. Exiting...[/red]")
return False
result = subprocess.run(["gh", "auth", "login"])
if result.returncode != 0:
console.print("[red]`gh auth login` failed.[/red]")
return False
token = get_github_token()
if token:
console.print("[green]✓ GitHub authentication successful![/green]")
return True
console.print("[red]Could not retrieve token after login.[/red]")
return False
def ask_openai_reasoning_effort() -> str:
"""Ask for OpenAI reasoning effort level."""
choices = [

View File

@ -0,0 +1,4 @@
from .codex_token import get_codex_token
from .github_token import get_github_token, get_copilot_api_url, COPILOT_HEADERS
__all__ = ["get_codex_token", "get_github_token", "get_copilot_api_url", "COPILOT_HEADERS"]

View File

@ -0,0 +1,109 @@
"""OpenAI Codex OAuth token reader with auto-refresh.
Reads credentials stored by the OpenAI Codex CLI at ~/.codex/auth.json.
Checks expiry and refreshes automatically via the OpenAI token endpoint
before returning a valid access token the same pattern OpenClaw uses
with its auth-profiles.json token sink.
Token refresh invalidates the previous refresh token, so only one tool
should hold the Codex credentials at a time (same caveat as OpenClaw).
"""
import json
import time
from pathlib import Path
from typing import Optional
import requests
_AUTH_FILE = Path.home() / ".codex" / "auth.json"
_TOKEN_URL = "https://auth.openai.com/oauth/token"
# Refresh this many seconds before actual expiry to avoid edge-case failures.
_EXPIRY_BUFFER_SECS = 60
def _load_auth() -> Optional[dict]:
"""Load the Codex auth file, return None if missing or malformed."""
if not _AUTH_FILE.exists():
return None
try:
return json.loads(_AUTH_FILE.read_text())
except (json.JSONDecodeError, OSError):
return None
def _save_auth(data: dict) -> None:
_AUTH_FILE.write_text(json.dumps(data, indent=2))
def _is_expired(auth: dict) -> bool:
"""Return True if the access token is expired (or close to expiring)."""
expires = auth.get("expires_at") or auth.get("tokens", {}).get("expires_at")
if expires is None:
# Fall back to decoding the JWT exp claim.
try:
import base64
token = auth["tokens"]["access_token"]
payload = token.split(".")[1]
decoded = json.loads(base64.b64decode(payload + "=="))
expires = decoded.get("exp")
except Exception:
return False # Can't determine — assume valid.
return time.time() >= (expires - _EXPIRY_BUFFER_SECS)
def _refresh(auth: dict) -> dict:
"""Exchange the refresh token for a new token pair and persist it."""
refresh_token = auth["tokens"]["refresh_token"]
resp = requests.post(
_TOKEN_URL,
json={
"grant_type": "refresh_token",
"refresh_token": refresh_token,
},
headers={"Content-Type": "application/json"},
timeout=15,
)
resp.raise_for_status()
new_tokens = resp.json()
# Merge new tokens back into the auth structure and persist.
auth["tokens"].update({
"access_token": new_tokens["access_token"],
"refresh_token": new_tokens.get("refresh_token", refresh_token),
"expires_at": new_tokens.get("expires_in") and
int(time.time()) + int(new_tokens["expires_in"]),
})
auth["last_refresh"] = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
_save_auth(auth)
return auth
def get_codex_token() -> Optional[str]:
"""Return a valid OpenAI access token from the Codex CLI auth file.
Resolution order:
1. OPENAI_API_KEY environment variable (explicit key always wins)
2. ~/.codex/auth.json auto-refreshes if the access token is expired
Returns None if no credentials are found.
"""
import os
explicit = os.environ.get("OPENAI_API_KEY")
if explicit:
return explicit
auth = _load_auth()
if not auth or "tokens" not in auth:
return None
# Refresh if expired.
if _is_expired(auth):
try:
auth = _refresh(auth)
except Exception:
# Refresh failed — return whatever token we have and let the
# API call surface a clearer error.
pass
return auth["tokens"].get("access_token")

View File

@ -0,0 +1,68 @@
"""GitHub token retrieval for the GitHub Copilot API.
Uses the ``gh`` CLI exclusively no explicit API token or env var.
Run ``gh auth login`` once to authenticate; this module handles the rest.
"""
import subprocess
from typing import Optional
def get_github_token() -> Optional[str]:
"""Return a GitHub token obtained via the GitHub CLI (``gh auth token``).
Returns None if the CLI is unavailable or the user is not logged in.
"""
try:
result = subprocess.run(
["gh", "auth", "token"],
capture_output=True,
text=True,
timeout=5,
)
if result.returncode == 0 and result.stdout.strip():
return result.stdout.strip()
except (FileNotFoundError, subprocess.TimeoutExpired):
pass
return None
def get_copilot_api_url() -> str:
"""Resolve the Copilot inference base URL.
Queries the GitHub GraphQL API for the user's Copilot endpoints.
Falls back to the standard individual endpoint on failure.
"""
import requests
token = get_github_token()
if not token:
return "https://api.individual.githubcopilot.com"
try:
resp = requests.post(
"https://api.github.com/graphql",
headers={
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
},
json={"query": "{ viewer { copilotEndpoints { api } } }"},
timeout=5,
)
if resp.status_code == 200:
api = resp.json()["data"]["viewer"]["copilotEndpoints"]["api"]
if api:
return api.rstrip("/")
except Exception:
pass
return "https://api.individual.githubcopilot.com"
# Required headers for the Copilot inference API (reverse-engineered from the
# Copilot CLI at /usr/local/lib/node_modules/@github/copilot).
COPILOT_HEADERS = {
"Copilot-Integration-Id": "copilot-developer-cli",
"X-GitHub-Api-Version": "2025-05-01",
"Openai-Intent": "conversation-agent",
}

View File

@ -8,6 +8,9 @@ DEFAULT_CONFIG = {
"dataflows/data_cache",
),
# LLM settings
# Set llm_provider to "copilot" to use GitHub Copilot (no explicit API key
# needed — authenticates via `gh auth token` from the GitHub CLI).
# Available models are fetched dynamically from the Copilot inference API.
"llm_provider": "openai",
"deep_think_llm": "gpt-5.2",
"quick_think_llm": "gpt-5-mini",

View File

@ -15,7 +15,7 @@ def create_llm_client(
"""Create an LLM client for the specified provider.
Args:
provider: LLM provider (openai, anthropic, google, xai, ollama, openrouter)
provider: LLM provider (openai, anthropic, google, xai, ollama, openrouter, copilot)
model: Model name/identifier
base_url: Optional base URL for API endpoint
**kwargs: Additional provider-specific arguments
@ -34,7 +34,7 @@ def create_llm_client(
"""
provider_lower = provider.lower()
if provider_lower in ("openai", "ollama", "openrouter"):
if provider_lower in ("openai", "ollama", "openrouter", "copilot"):
return OpenAIClient(model, base_url, provider=provider_lower, **kwargs)
if provider_lower == "xai":

View File

@ -5,6 +5,7 @@ from langchain_openai import ChatOpenAI
from .base_client import BaseLLMClient, normalize_content
from .validators import validate_model
from ..auth import get_codex_token, get_github_token, get_copilot_api_url, COPILOT_HEADERS
class NormalizedChatOpenAI(ChatOpenAI):
@ -24,21 +25,25 @@ _PASSTHROUGH_KWARGS = (
"api_key", "callbacks", "http_client", "http_async_client",
)
# Provider base URLs and API key env vars
# Provider base URLs and API key env vars.
# Copilot: uses the GitHub Copilot inference API, authenticated via ``gh``
# CLI token with Copilot-specific headers. No env var needed.
_PROVIDER_CONFIG = {
"xai": ("https://api.x.ai/v1", "XAI_API_KEY"),
"openrouter": ("https://openrouter.ai/api/v1", "OPENROUTER_API_KEY"),
"ollama": ("http://localhost:11434/v1", None),
"copilot": (None, None), # base_url resolved at runtime via GraphQL
}
class OpenAIClient(BaseLLMClient):
"""Client for OpenAI, Ollama, OpenRouter, and xAI providers.
"""Client for OpenAI, Ollama, OpenRouter, xAI, and GitHub Copilot providers.
For native OpenAI models, uses the Responses API (/v1/responses) which
supports reasoning_effort with function tools across all model families
(GPT-4.1, GPT-5). Third-party compatible providers (xAI, OpenRouter,
Ollama) use standard Chat Completions.
Ollama) use standard Chat Completions. GitHub Copilot uses the Copilot
inference API with special headers.
"""
def __init__(
@ -56,8 +61,17 @@ class OpenAIClient(BaseLLMClient):
llm_kwargs = {"model": self.model}
# Provider-specific base URL and auth
if self.provider in _PROVIDER_CONFIG:
if self.provider == "copilot":
# GitHub Copilot: resolve base URL and inject required headers
copilot_url = get_copilot_api_url()
llm_kwargs["base_url"] = copilot_url
token = get_github_token()
if token:
llm_kwargs["api_key"] = token
llm_kwargs["default_headers"] = dict(COPILOT_HEADERS)
elif self.provider in _PROVIDER_CONFIG:
base_url, api_key_env = _PROVIDER_CONFIG[self.provider]
if base_url:
llm_kwargs["base_url"] = base_url
if api_key_env:
api_key = os.environ.get(api_key_env)
@ -68,7 +82,7 @@ class OpenAIClient(BaseLLMClient):
elif self.base_url:
llm_kwargs["base_url"] = self.base_url
# Forward user-provided kwargs
# Forward user-provided kwargs (takes precedence over auto-resolved tokens)
for key in _PASSTHROUGH_KWARGS:
if key in self.kwargs:
llm_kwargs[key] = self.kwargs[key]
@ -77,6 +91,11 @@ class OpenAIClient(BaseLLMClient):
# all model families. Third-party providers use Chat Completions.
if self.provider == "openai":
llm_kwargs["use_responses_api"] = True
# If no explicit api_key in kwargs, fall back to Codex OAuth token.
if "api_key" not in llm_kwargs:
codex_token = get_codex_token()
if codex_token:
llm_kwargs["api_key"] = codex_token
return NormalizedChatOpenAI(**llm_kwargs)

View File

@ -48,17 +48,20 @@ VALID_MODELS = {
"grok-4-fast-reasoning",
"grok-4-fast-non-reasoning",
],
# GitHub Copilot: model availability depends on plan and may change.
# Accept any model ID and let the API validate it.
"copilot": [],
}
def validate_model(provider: str, model: str) -> bool:
"""Check if model name is valid for the given provider.
For ollama, openrouter - any model is accepted.
For ollama, openrouter, copilot - any model is accepted.
"""
provider_lower = provider.lower()
if provider_lower in ("ollama", "openrouter"):
if provider_lower in ("ollama", "openrouter", "copilot"):
return True
if provider_lower not in VALID_MODELS: