refactor: address all 7 PR review comments

1. Fix header comment — document correct env var precedence
   (apiKey → GITHUB_COPILOT_TOKEN → GITHUB_TOKEN → device flow)
2. Use application/x-www-form-urlencoded for device code endpoint
3. Use application/x-www-form-urlencoded for poll endpoint
4. Add mutex (promise-based) on #getSessionToken to prevent
   concurrent token refreshes and duplicate device flow prompts
5. Add DeviceCodeCallback + CopilotAdapterOptions so callers can
   control device flow output instead of hardcoded console.log
6. Extract shared OpenAI wire-format helpers into openai-common.ts,
   imported by both openai.ts and copilot.ts (-142 lines net)
7. Update createAdapter JSDoc to mention copilot env vars
This commit is contained in:
Deathwing 2026-04-02 02:19:06 +02:00
parent eedfeb17a2
commit 8371cdb7c0
4 changed files with 346 additions and 488 deletions

View File

@ -42,8 +42,12 @@ export type SupportedProvider = 'anthropic' | 'copilot' | 'openai'
/**
* Instantiate the appropriate {@link LLMAdapter} for the given provider.
*
* API keys fall back to the standard environment variables
* (`ANTHROPIC_API_KEY` / `OPENAI_API_KEY`) when not supplied explicitly.
* API keys fall back to the standard environment variables when not supplied
* explicitly:
* - `anthropic` `ANTHROPIC_API_KEY`
* - `openai` `OPENAI_API_KEY`
* - `copilot` `GITHUB_COPILOT_TOKEN` / `GITHUB_TOKEN`, or interactive
* OAuth2 device flow if neither is set
*
* Adapters are imported lazily so that projects using only one provider
* are not forced to install the SDK for the other.

View File

@ -3,18 +3,20 @@
*
* Uses the OpenAI-compatible Copilot Chat Completions endpoint at
* `https://api.githubcopilot.com`. Authentication requires a GitHub token
* (e.g. from `gh auth token`) which is exchanged for a short-lived Copilot
* session token via the internal token endpoint.
* which is exchanged for a short-lived Copilot session token via the
* internal token endpoint.
*
* API key resolution order:
* 1. `apiKey` constructor argument
* 2. `GITHUB_TOKEN` environment variable
* 2. `GITHUB_COPILOT_TOKEN` environment variable
* 3. `GITHUB_TOKEN` environment variable
* 4. Interactive OAuth2 device flow (prompts the user to sign in)
*
* @example
* ```ts
* import { CopilotAdapter } from './copilot.js'
*
* const adapter = new CopilotAdapter() // uses GITHUB_TOKEN env var
* const adapter = new CopilotAdapter() // uses GITHUB_COPILOT_TOKEN, falling back to GITHUB_TOKEN
* const response = await adapter.chat(messages, {
* model: 'claude-sonnet-4',
* maxTokens: 4096,
@ -24,14 +26,7 @@
import OpenAI from 'openai'
import type {
ChatCompletion,
ChatCompletionAssistantMessageParam,
ChatCompletionChunk,
ChatCompletionMessageParam,
ChatCompletionMessageToolCall,
ChatCompletionTool,
ChatCompletionToolMessageParam,
ChatCompletionUserMessageParam,
} from 'openai/resources/chat/completions/index.js'
import type {
@ -47,6 +42,13 @@ import type {
ToolUseBlock,
} from '../types.js'
import {
toOpenAITool,
fromOpenAICompletion,
normalizeFinishReason,
buildOpenAIMessageList,
} from './openai-common.js'
// ---------------------------------------------------------------------------
// Copilot auth — OAuth2 device flow + token exchange
// ---------------------------------------------------------------------------
@ -81,22 +83,38 @@ interface PollResponse {
error_description?: string
}
/**
* Callback invoked when the OAuth2 device flow needs the user to authorize.
* Receives the verification URI and user code. If not provided, defaults to
* printing them to stdout.
*/
export type DeviceCodeCallback = (verificationUri: string, userCode: string) => void
const defaultDeviceCodeCallback: DeviceCodeCallback = (uri, code) => {
console.log(`\n┌─────────────────────────────────────────────┐`)
console.log(`│ GitHub Copilot — Sign in │`)
console.log(`│ │`)
console.log(`│ Open: ${uri.padEnd(35)}`)
console.log(`│ Code: ${code.padEnd(35)}`)
console.log(`└─────────────────────────────────────────────┘\n`)
}
/**
* Start the GitHub OAuth2 device code flow with the Copilot client ID.
*
* Prints a user code and URL to stdout, then polls until the user completes
* authorization in their browser. Returns a GitHub OAuth token scoped for
* Copilot access.
* Calls `onDeviceCode` with the verification URI and user code, then polls
* until the user completes authorization. Returns a GitHub OAuth token
* scoped for Copilot access.
*/
async function deviceCodeLogin(): Promise<string> {
async function deviceCodeLogin(onDeviceCode: DeviceCodeCallback): Promise<string> {
// Step 1: Request a device code
const codeRes = await fetch(DEVICE_CODE_URL, {
method: 'POST',
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded',
},
body: JSON.stringify({ client_id: COPILOT_CLIENT_ID, scope: 'copilot' }),
body: new URLSearchParams({ client_id: COPILOT_CLIENT_ID, scope: 'copilot' }),
})
if (!codeRes.ok) {
@ -106,13 +124,8 @@ async function deviceCodeLogin(): Promise<string> {
const codeData = (await codeRes.json()) as DeviceCodeResponse
// Step 2: Prompt the user
console.log(`\n┌─────────────────────────────────────────────┐`)
console.log(`│ GitHub Copilot — Sign in │`)
console.log(`│ │`)
console.log(`│ Open: ${codeData.verification_uri.padEnd(35)}`)
console.log(`│ Code: ${codeData.user_code.padEnd(35)}`)
console.log(`└─────────────────────────────────────────────┘\n`)
// Step 2: Prompt the user via callback
onDeviceCode(codeData.verification_uri, codeData.user_code)
// Step 3: Poll for the user to complete auth
const interval = (codeData.interval || 5) * 1000
@ -125,9 +138,9 @@ async function deviceCodeLogin(): Promise<string> {
method: 'POST',
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded',
},
body: JSON.stringify({
body: new URLSearchParams({
client_id: COPILOT_CLIENT_ID,
device_code: codeData.device_code,
grant_type: 'urn:ietf:params:oauth:grant-type:device_code',
@ -182,189 +195,35 @@ async function fetchCopilotToken(githubToken: string): Promise<CopilotTokenRespo
return (await res.json()) as CopilotTokenResponse
}
// ---------------------------------------------------------------------------
// Internal helpers — framework → OpenAI (shared with openai.ts pattern)
// ---------------------------------------------------------------------------
function toOpenAITool(tool: LLMToolDef): ChatCompletionTool {
return {
type: 'function',
function: {
name: tool.name,
description: tool.description,
parameters: tool.inputSchema as Record<string, unknown>,
},
}
}
function hasToolResults(msg: LLMMessage): boolean {
return msg.content.some((b) => b.type === 'tool_result')
}
function toOpenAIMessages(messages: LLMMessage[]): ChatCompletionMessageParam[] {
const result: ChatCompletionMessageParam[] = []
for (const msg of messages) {
if (msg.role === 'assistant') {
result.push(toOpenAIAssistantMessage(msg))
} else {
if (!hasToolResults(msg)) {
result.push(toOpenAIUserMessage(msg))
} else {
const nonToolBlocks = msg.content.filter((b) => b.type !== 'tool_result')
if (nonToolBlocks.length > 0) {
result.push(toOpenAIUserMessage({ role: 'user', content: nonToolBlocks }))
}
for (const block of msg.content) {
if (block.type === 'tool_result') {
const toolMsg: ChatCompletionToolMessageParam = {
role: 'tool',
tool_call_id: block.tool_use_id,
content: block.content,
}
result.push(toolMsg)
}
}
}
}
}
return result
}
function toOpenAIUserMessage(msg: LLMMessage): ChatCompletionUserMessageParam {
if (msg.content.length === 1 && msg.content[0]?.type === 'text') {
return { role: 'user', content: msg.content[0].text }
}
type ContentPart = OpenAI.Chat.ChatCompletionContentPartText | OpenAI.Chat.ChatCompletionContentPartImage
const parts: ContentPart[] = []
for (const block of msg.content) {
if (block.type === 'text') {
parts.push({ type: 'text', text: block.text })
} else if (block.type === 'image') {
parts.push({
type: 'image_url',
image_url: {
url: `data:${block.source.media_type};base64,${block.source.data}`,
},
})
}
}
return { role: 'user', content: parts }
}
function toOpenAIAssistantMessage(msg: LLMMessage): ChatCompletionAssistantMessageParam {
const toolCalls: ChatCompletionMessageToolCall[] = []
const textParts: string[] = []
for (const block of msg.content) {
if (block.type === 'tool_use') {
toolCalls.push({
id: block.id,
type: 'function',
function: {
name: block.name,
arguments: JSON.stringify(block.input),
},
})
} else if (block.type === 'text') {
textParts.push(block.text)
}
}
const assistantMsg: ChatCompletionAssistantMessageParam = {
role: 'assistant',
content: textParts.length > 0 ? textParts.join('') : null,
}
if (toolCalls.length > 0) {
assistantMsg.tool_calls = toolCalls
}
return assistantMsg
}
// ---------------------------------------------------------------------------
// Internal helpers — OpenAI → framework
// ---------------------------------------------------------------------------
function fromOpenAICompletion(completion: ChatCompletion): LLMResponse {
const choice = completion.choices[0]
if (choice === undefined) {
throw new Error('Copilot returned a completion with no choices')
}
const content: ContentBlock[] = []
const message = choice.message
if (message.content !== null && message.content !== undefined) {
const textBlock: TextBlock = { type: 'text', text: message.content }
content.push(textBlock)
}
for (const toolCall of message.tool_calls ?? []) {
let parsedInput: Record<string, unknown> = {}
try {
const parsed: unknown = JSON.parse(toolCall.function.arguments)
if (parsed !== null && typeof parsed === 'object' && !Array.isArray(parsed)) {
parsedInput = parsed as Record<string, unknown>
}
} catch {
// Malformed arguments — surface as empty object.
}
const toolUseBlock: ToolUseBlock = {
type: 'tool_use',
id: toolCall.id,
name: toolCall.function.name,
input: parsedInput,
}
content.push(toolUseBlock)
}
const stopReason = normalizeFinishReason(choice.finish_reason ?? 'stop')
return {
id: completion.id,
content,
model: completion.model,
stop_reason: stopReason,
usage: {
input_tokens: completion.usage?.prompt_tokens ?? 0,
output_tokens: completion.usage?.completion_tokens ?? 0,
},
}
}
function normalizeFinishReason(reason: string): string {
switch (reason) {
case 'stop': return 'end_turn'
case 'tool_calls': return 'tool_use'
case 'length': return 'max_tokens'
case 'content_filter': return 'content_filter'
default: return reason
}
}
// ---------------------------------------------------------------------------
// Adapter implementation
// ---------------------------------------------------------------------------
/** Options for the {@link CopilotAdapter} constructor. */
export interface CopilotAdapterOptions {
/** GitHub OAuth token already scoped for Copilot. Falls back to env vars. */
apiKey?: string
/**
* Callback invoked when the OAuth2 device flow needs user action.
* Defaults to printing the verification URI and user code to stdout.
*/
onDeviceCode?: DeviceCodeCallback
}
/**
* LLM adapter backed by the GitHub Copilot Chat Completions API.
*
* Authentication options (tried in order):
* 1. `apiKey` constructor arg a GitHub OAuth token already scoped for Copilot
* 2. `GITHUB_COPILOT_TOKEN` env var same as above
* 3. Interactive OAuth2 device flow prompts the user to sign in via browser
* 2. `GITHUB_COPILOT_TOKEN` env var
* 3. `GITHUB_TOKEN` env var
* 4. Interactive OAuth2 device flow
*
* The GitHub token is exchanged for a short-lived Copilot session token, which
* is cached and auto-refreshed.
*
* Thread-safe a single instance may be shared across concurrent agent runs.
* Concurrent token refreshes are serialised via an internal mutex.
*/
export class CopilotAdapter implements LLMAdapter {
readonly name = 'copilot'
@ -372,17 +231,25 @@ export class CopilotAdapter implements LLMAdapter {
#githubToken: string | null
#cachedToken: string | null = null
#tokenExpiresAt = 0
#refreshPromise: Promise<string> | null = null
readonly #onDeviceCode: DeviceCodeCallback
constructor(apiKey?: string) {
this.#githubToken = apiKey
constructor(apiKeyOrOptions?: string | CopilotAdapterOptions) {
const opts = typeof apiKeyOrOptions === 'string'
? { apiKey: apiKeyOrOptions }
: apiKeyOrOptions ?? {}
this.#githubToken = opts.apiKey
?? process.env['GITHUB_COPILOT_TOKEN']
?? process.env['GITHUB_TOKEN']
?? null
this.#onDeviceCode = opts.onDeviceCode ?? defaultDeviceCodeCallback
}
/**
* Return a valid Copilot session token, refreshing if necessary.
* If no GitHub token is available, triggers the interactive device flow.
* Concurrent calls share a single in-flight refresh to avoid races.
*/
async #getSessionToken(): Promise<string> {
const now = Math.floor(Date.now() / 1000)
@ -390,9 +257,22 @@ export class CopilotAdapter implements LLMAdapter {
return this.#cachedToken
}
// If we don't have a GitHub token yet, do the device flow
// If another call is already refreshing, piggyback on that promise
if (this.#refreshPromise) {
return this.#refreshPromise
}
this.#refreshPromise = this.#doRefresh()
try {
return await this.#refreshPromise
} finally {
this.#refreshPromise = null
}
}
async #doRefresh(): Promise<string> {
if (!this.#githubToken) {
this.#githubToken = await deviceCodeLogin()
this.#githubToken = await deviceCodeLogin(this.#onDeviceCode)
}
const resp = await fetchCopilotToken(this.#githubToken)
@ -568,36 +448,6 @@ export class CopilotAdapter implements LLMAdapter {
}
}
// ---------------------------------------------------------------------------
// Private utility
// ---------------------------------------------------------------------------
function buildOpenAIMessageList(
messages: LLMMessage[],
systemPrompt: string | undefined,
): ChatCompletionMessageParam[] {
const result: ChatCompletionMessageParam[] = []
if (systemPrompt !== undefined && systemPrompt.length > 0) {
result.push({ role: 'system', content: systemPrompt })
}
result.push(...toOpenAIMessages(messages))
return result
}
// Re-export types that consumers of this module commonly need alongside the adapter.
export type {
ContentBlock,
LLMAdapter,
LLMChatOptions,
LLMMessage,
LLMResponse,
LLMStreamOptions,
LLMToolDef,
StreamEvent,
}
// ---------------------------------------------------------------------------
// Premium request multipliers
// ---------------------------------------------------------------------------

255
src/llm/openai-common.ts Normal file
View File

@ -0,0 +1,255 @@
/**
* @fileoverview Shared OpenAI wire-format conversion helpers.
*
* Both the OpenAI and Copilot adapters use the OpenAI Chat Completions API
* format. This module contains the common conversion logic so it isn't
* duplicated across adapters.
*/
import OpenAI from 'openai'
import type {
ChatCompletion,
ChatCompletionAssistantMessageParam,
ChatCompletionMessageParam,
ChatCompletionMessageToolCall,
ChatCompletionTool,
ChatCompletionToolMessageParam,
ChatCompletionUserMessageParam,
} from 'openai/resources/chat/completions/index.js'
import type {
ContentBlock,
LLMMessage,
LLMResponse,
LLMToolDef,
TextBlock,
ToolUseBlock,
} from '../types.js'
// ---------------------------------------------------------------------------
// Framework → OpenAI
// ---------------------------------------------------------------------------
/**
* Convert a framework {@link LLMToolDef} to an OpenAI {@link ChatCompletionTool}.
*/
export function toOpenAITool(tool: LLMToolDef): ChatCompletionTool {
return {
type: 'function',
function: {
name: tool.name,
description: tool.description,
parameters: tool.inputSchema as Record<string, unknown>,
},
}
}
/**
* Determine whether a framework message contains any `tool_result` content
* blocks, which must be serialised as separate OpenAI `tool`-role messages.
*/
function hasToolResults(msg: LLMMessage): boolean {
return msg.content.some((b) => b.type === 'tool_result')
}
/**
* Convert framework {@link LLMMessage}s into OpenAI
* {@link ChatCompletionMessageParam} entries.
*
* `tool_result` blocks are expanded into top-level `tool`-role messages
* because OpenAI uses a dedicated role for tool results rather than embedding
* them inside user-content arrays.
*/
export function toOpenAIMessages(messages: LLMMessage[]): ChatCompletionMessageParam[] {
const result: ChatCompletionMessageParam[] = []
for (const msg of messages) {
if (msg.role === 'assistant') {
result.push(toOpenAIAssistantMessage(msg))
} else {
// user role
if (!hasToolResults(msg)) {
result.push(toOpenAIUserMessage(msg))
} else {
const nonToolBlocks = msg.content.filter((b) => b.type !== 'tool_result')
if (nonToolBlocks.length > 0) {
result.push(toOpenAIUserMessage({ role: 'user', content: nonToolBlocks }))
}
for (const block of msg.content) {
if (block.type === 'tool_result') {
const toolMsg: ChatCompletionToolMessageParam = {
role: 'tool',
tool_call_id: block.tool_use_id,
content: block.content,
}
result.push(toolMsg)
}
}
}
}
}
return result
}
/**
* Convert a `user`-role framework message into an OpenAI user message.
* Image blocks are converted to the OpenAI image_url content part format.
*/
function toOpenAIUserMessage(msg: LLMMessage): ChatCompletionUserMessageParam {
if (msg.content.length === 1 && msg.content[0]?.type === 'text') {
return { role: 'user', content: msg.content[0].text }
}
type ContentPart = OpenAI.Chat.ChatCompletionContentPartText | OpenAI.Chat.ChatCompletionContentPartImage
const parts: ContentPart[] = []
for (const block of msg.content) {
if (block.type === 'text') {
parts.push({ type: 'text', text: block.text })
} else if (block.type === 'image') {
parts.push({
type: 'image_url',
image_url: {
url: `data:${block.source.media_type};base64,${block.source.data}`,
},
})
}
// tool_result blocks are handled by the caller (toOpenAIMessages); skip here.
}
return { role: 'user', content: parts }
}
/**
* Convert an `assistant`-role framework message into an OpenAI assistant message.
* `tool_use` blocks become `tool_calls`; `text` blocks become message content.
*/
function toOpenAIAssistantMessage(msg: LLMMessage): ChatCompletionAssistantMessageParam {
const toolCalls: ChatCompletionMessageToolCall[] = []
const textParts: string[] = []
for (const block of msg.content) {
if (block.type === 'tool_use') {
toolCalls.push({
id: block.id,
type: 'function',
function: {
name: block.name,
arguments: JSON.stringify(block.input),
},
})
} else if (block.type === 'text') {
textParts.push(block.text)
}
}
const assistantMsg: ChatCompletionAssistantMessageParam = {
role: 'assistant',
content: textParts.length > 0 ? textParts.join('') : null,
}
if (toolCalls.length > 0) {
assistantMsg.tool_calls = toolCalls
}
return assistantMsg
}
// ---------------------------------------------------------------------------
// OpenAI → Framework
// ---------------------------------------------------------------------------
/**
* Convert an OpenAI {@link ChatCompletion} into a framework {@link LLMResponse}.
*
* Takes only the first choice (index 0), consistent with how the framework
* is designed for single-output agents.
*/
export function fromOpenAICompletion(completion: ChatCompletion): LLMResponse {
const choice = completion.choices[0]
if (choice === undefined) {
throw new Error('OpenAI returned a completion with no choices')
}
const content: ContentBlock[] = []
const message = choice.message
if (message.content !== null && message.content !== undefined) {
const textBlock: TextBlock = { type: 'text', text: message.content }
content.push(textBlock)
}
for (const toolCall of message.tool_calls ?? []) {
let parsedInput: Record<string, unknown> = {}
try {
const parsed: unknown = JSON.parse(toolCall.function.arguments)
if (parsed !== null && typeof parsed === 'object' && !Array.isArray(parsed)) {
parsedInput = parsed as Record<string, unknown>
}
} catch {
// Malformed arguments from the model — surface as empty object.
}
const toolUseBlock: ToolUseBlock = {
type: 'tool_use',
id: toolCall.id,
name: toolCall.function.name,
input: parsedInput,
}
content.push(toolUseBlock)
}
const stopReason = normalizeFinishReason(choice.finish_reason ?? 'stop')
return {
id: completion.id,
content,
model: completion.model,
stop_reason: stopReason,
usage: {
input_tokens: completion.usage?.prompt_tokens ?? 0,
output_tokens: completion.usage?.completion_tokens ?? 0,
},
}
}
/**
* Normalize an OpenAI `finish_reason` string to the framework's canonical
* stop-reason vocabulary.
*
* Mapping:
* - `'stop'` `'end_turn'`
* - `'tool_calls'` `'tool_use'`
* - `'length'` `'max_tokens'`
* - `'content_filter'` `'content_filter'`
* - anything else passed through unchanged
*/
export function normalizeFinishReason(reason: string): string {
switch (reason) {
case 'stop': return 'end_turn'
case 'tool_calls': return 'tool_use'
case 'length': return 'max_tokens'
case 'content_filter': return 'content_filter'
default: return reason
}
}
/**
* Prepend a system message when `systemPrompt` is provided, then append the
* converted conversation messages.
*/
export function buildOpenAIMessageList(
messages: LLMMessage[],
systemPrompt: string | undefined,
): ChatCompletionMessageParam[] {
const result: ChatCompletionMessageParam[] = []
if (systemPrompt !== undefined && systemPrompt.length > 0) {
result.push({ role: 'system', content: systemPrompt })
}
result.push(...toOpenAIMessages(messages))
return result
}

View File

@ -32,14 +32,7 @@
import OpenAI from 'openai'
import type {
ChatCompletion,
ChatCompletionAssistantMessageParam,
ChatCompletionChunk,
ChatCompletionMessageParam,
ChatCompletionMessageToolCall,
ChatCompletionTool,
ChatCompletionToolMessageParam,
ChatCompletionUserMessageParam,
} from 'openai/resources/chat/completions/index.js'
import type {
@ -55,231 +48,12 @@ import type {
ToolUseBlock,
} from '../types.js'
// ---------------------------------------------------------------------------
// Internal helpers — framework → OpenAI
// ---------------------------------------------------------------------------
/**
* Convert a framework {@link LLMToolDef} to an OpenAI {@link ChatCompletionTool}.
*
* OpenAI wraps the function definition inside a `function` key and a `type`
* discriminant. The `inputSchema` is already a JSON Schema object.
*/
function toOpenAITool(tool: LLMToolDef): ChatCompletionTool {
return {
type: 'function',
function: {
name: tool.name,
description: tool.description,
parameters: tool.inputSchema as Record<string, unknown>,
},
}
}
/**
* Determine whether a framework message contains any `tool_result` content
* blocks, which must be serialised as separate OpenAI `tool`-role messages.
*/
function hasToolResults(msg: LLMMessage): boolean {
return msg.content.some((b) => b.type === 'tool_result')
}
/**
* Convert a single framework {@link LLMMessage} into one or more OpenAI
* {@link ChatCompletionMessageParam} entries.
*
* The expansion is necessary because OpenAI represents tool results as
* top-level messages with role `tool`, whereas in our model they are content
* blocks inside a `user` message.
*
* Expansion rules:
* - A `user` message containing only text/image blocks single user message
* - A `user` message containing `tool_result` blocks one `tool` message per
* tool_result block; any remaining text/image blocks are folded into an
* additional user message prepended to the group
* - An `assistant` message single assistant message with optional tool_calls
*/
function toOpenAIMessages(messages: LLMMessage[]): ChatCompletionMessageParam[] {
const result: ChatCompletionMessageParam[] = []
for (const msg of messages) {
if (msg.role === 'assistant') {
result.push(toOpenAIAssistantMessage(msg))
} else {
// user role
if (!hasToolResults(msg)) {
result.push(toOpenAIUserMessage(msg))
} else {
// Split: text/image blocks become a user message (if any exist), then
// each tool_result block becomes an independent tool message.
const nonToolBlocks = msg.content.filter((b) => b.type !== 'tool_result')
if (nonToolBlocks.length > 0) {
result.push(toOpenAIUserMessage({ role: 'user', content: nonToolBlocks }))
}
for (const block of msg.content) {
if (block.type === 'tool_result') {
const toolMsg: ChatCompletionToolMessageParam = {
role: 'tool',
tool_call_id: block.tool_use_id,
content: block.content,
}
result.push(toolMsg)
}
}
}
}
}
return result
}
/**
* Convert a `user`-role framework message into an OpenAI user message.
* Image blocks are converted to the OpenAI image_url content part format.
*/
function toOpenAIUserMessage(msg: LLMMessage): ChatCompletionUserMessageParam {
// If the entire content is a single text block, use the compact string form
// to keep the request payload smaller.
if (msg.content.length === 1 && msg.content[0]?.type === 'text') {
return { role: 'user', content: msg.content[0].text }
}
type ContentPart = OpenAI.Chat.ChatCompletionContentPartText | OpenAI.Chat.ChatCompletionContentPartImage
const parts: ContentPart[] = []
for (const block of msg.content) {
if (block.type === 'text') {
parts.push({ type: 'text', text: block.text })
} else if (block.type === 'image') {
parts.push({
type: 'image_url',
image_url: {
url: `data:${block.source.media_type};base64,${block.source.data}`,
},
})
}
// tool_result blocks are handled by the caller (toOpenAIMessages); skip here.
}
return { role: 'user', content: parts }
}
/**
* Convert an `assistant`-role framework message into an OpenAI assistant message.
*
* Any `tool_use` blocks become `tool_calls`; `text` blocks become the message content.
*/
function toOpenAIAssistantMessage(msg: LLMMessage): ChatCompletionAssistantMessageParam {
const toolCalls: ChatCompletionMessageToolCall[] = []
const textParts: string[] = []
for (const block of msg.content) {
if (block.type === 'tool_use') {
toolCalls.push({
id: block.id,
type: 'function',
function: {
name: block.name,
arguments: JSON.stringify(block.input),
},
})
} else if (block.type === 'text') {
textParts.push(block.text)
}
}
const assistantMsg: ChatCompletionAssistantMessageParam = {
role: 'assistant',
content: textParts.length > 0 ? textParts.join('') : null,
}
if (toolCalls.length > 0) {
assistantMsg.tool_calls = toolCalls
}
return assistantMsg
}
// ---------------------------------------------------------------------------
// Internal helpers — OpenAI → framework
// ---------------------------------------------------------------------------
/**
* Convert an OpenAI {@link ChatCompletion} into a framework {@link LLMResponse}.
*
* We take only the first choice (index 0), consistent with how the framework
* is designed for single-output agents.
*/
function fromOpenAICompletion(completion: ChatCompletion): LLMResponse {
const choice = completion.choices[0]
if (choice === undefined) {
throw new Error('OpenAI returned a completion with no choices')
}
const content: ContentBlock[] = []
const message = choice.message
if (message.content !== null && message.content !== undefined) {
const textBlock: TextBlock = { type: 'text', text: message.content }
content.push(textBlock)
}
for (const toolCall of message.tool_calls ?? []) {
let parsedInput: Record<string, unknown> = {}
try {
const parsed: unknown = JSON.parse(toolCall.function.arguments)
if (parsed !== null && typeof parsed === 'object' && !Array.isArray(parsed)) {
parsedInput = parsed as Record<string, unknown>
}
} catch {
// Malformed arguments from the model — surface as empty object.
}
const toolUseBlock: ToolUseBlock = {
type: 'tool_use',
id: toolCall.id,
name: toolCall.function.name,
input: parsedInput,
}
content.push(toolUseBlock)
}
const stopReason = normalizeFinishReason(choice.finish_reason ?? 'stop')
return {
id: completion.id,
content,
model: completion.model,
stop_reason: stopReason,
usage: {
input_tokens: completion.usage?.prompt_tokens ?? 0,
output_tokens: completion.usage?.completion_tokens ?? 0,
},
}
}
/**
* Normalize an OpenAI `finish_reason` string to the framework's canonical
* stop-reason vocabulary so consumers never need to branch on provider-specific
* strings.
*
* Mapping:
* - `'stop'` `'end_turn'`
* - `'tool_calls'` `'tool_use'`
* - `'length'` `'max_tokens'`
* - `'content_filter'` `'content_filter'`
* - anything else passed through unchanged
*/
function normalizeFinishReason(reason: string): string {
switch (reason) {
case 'stop': return 'end_turn'
case 'tool_calls': return 'tool_use'
case 'length': return 'max_tokens'
case 'content_filter': return 'content_filter'
default: return reason
}
}
import {
toOpenAITool,
fromOpenAICompletion,
normalizeFinishReason,
buildOpenAIMessageList,
} from './openai-common.js'
// ---------------------------------------------------------------------------
// Adapter implementation
@ -484,31 +258,6 @@ export class OpenAIAdapter implements LLMAdapter {
}
}
// ---------------------------------------------------------------------------
// Private utility
// ---------------------------------------------------------------------------
/**
* Prepend a system message when `systemPrompt` is provided, then append the
* converted conversation messages.
*
* OpenAI represents system instructions as a message with `role: 'system'`
* at the top of the array, not as a separate API parameter.
*/
function buildOpenAIMessageList(
messages: LLMMessage[],
systemPrompt: string | undefined,
): ChatCompletionMessageParam[] {
const result: ChatCompletionMessageParam[] = []
if (systemPrompt !== undefined && systemPrompt.length > 0) {
result.push({ role: 'system', content: systemPrompt })
}
result.push(...toOpenAIMessages(messages))
return result
}
// Re-export types that consumers of this module commonly need alongside the adapter.
export type {
ContentBlock,