diff --git a/examples/04-multi-model-team.ts b/examples/04-multi-model-team.ts index 8642b80..684ce5d 100644 --- a/examples/04-multi-model-team.ts +++ b/examples/04-multi-model-team.ts @@ -136,6 +136,7 @@ function buildCustomAgent( // --------------------------------------------------------------------------- const useOpenAI = Boolean(process.env.OPENAI_API_KEY) +const useOllama = Boolean(process.env.OLLAMA_BASE_URL); const researcherConfig: AgentConfig = { name: 'researcher', @@ -151,7 +152,8 @@ Return the raw rates as a JSON object keyed by pair, e.g. { "USD/EUR": 0.91, "US const analystConfig: AgentConfig = { name: 'analyst', - model: useOllama ? 'llama3.1' : useOpenAI ? 'gpt-4o-mini' : 'claude-3-5-sonnet-20240620',\n provider: useOllama ? 'ollama' : useOpenAI ? 'openai' : 'anthropic', + model: useOllama ? 'llama3.1' : useOpenAI ? 'gpt-4o-mini' : 'claude-3-5-sonnet-20240620', + provider: useOllama ? 'ollama' : useOpenAI ? 'openai' : 'anthropic', systemPrompt: `You are a foreign exchange analyst. You receive exchange rate data and produce a short briefing. Use format_currency to show example conversions. diff --git a/package-lock.json b/package-lock.json index 06af6f5..b74dcd0 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,15 +1,16 @@ { - "name": "maestro-agents", + "name": "@jackchen_me/open-multi-agent", "version": "0.1.0", "lockfileVersion": 3, "requires": true, "packages": { "": { - "name": "maestro-agents", + "name": "@jackchen_me/open-multi-agent", "version": "0.1.0", "license": "MIT", "dependencies": { "@anthropic-ai/sdk": "^0.52.0", + "ollama": "^0.3.0", "openai": "^4.73.0", "zod": "^3.23.0" }, diff --git a/package.json b/package.json index ee0e26a..c9910a8 100644 --- a/package.json +++ b/package.json @@ -1,49 +1 @@ -{ - "name": "@jackchen_me/open-multi-agent", - "version": "0.1.0", - "description": "Production-grade multi-agent orchestration framework. Model-agnostic, supports team collaboration, task scheduling, and inter-agent communication.", - "type": "module", - "main": "dist/index.js", - "types": "dist/index.d.ts", - "exports": { - ".": { - "types": "./dist/index.d.ts", - "import": "./dist/index.js" - } - }, - "scripts": { - "build": "tsc", - "dev": "tsc --watch", - "test": "vitest run", - "test:watch": "vitest", - "lint": "tsc --noEmit", - "prepublishOnly": "npm run build" - }, - "keywords": [ - "ai", - "agent", - "multi-agent", - "orchestration", - "llm", - "claude", - "openai", - "mcp", - "tool-use", - "agent-framework" - ], - "author": "", - "license": "MIT", - "engines": { - "node": ">=18.0.0" - }, - "dependencies": { - "@anthropic-ai/sdk": "^0.52.0", - "openai": "^4.73.0", - "zod": "^3.23.0" - }, - "devDependencies": { - "typescript": "^5.6.0", - "vitest": "^2.1.0", - "@types/node": "^22.0.0" - } -} +{"name":"@jackchen_me/open-multi-agent","version":"0.1.0","description":"Production-grade multi-agent orchestration framework. Model-agnostic, supports team collaboration, task scheduling, and inter-agent communication.","type":"module","main":"dist/index.js","types":"dist/index.d.ts","exports":{".":{"types":"./dist/index.d.ts","import":"./dist/index.js"}},"scripts":{"build":"tsc","dev":"tsc --watch","test":"vitest run","test:watch":"vitest","lint":"tsc --noEmit","prepublishOnly":"npm run build"},"keywords":["ai","agent","multi-agent","orchestration","llm","claude","openai","ollama","mcp","tool-use","agent-framework"],"author":"","license":"MIT","engines":{"node":">=18.0.0"},"dependencies":{"@anthropic-ai/sdk":"^0.52.0","openai":"^4.73.0","zod":"^3.23.0"},"devDependencies":{"typescript":"^5.6.0","vitest":"^2.1.0","@types/node":"^22.0.0"}} diff --git a/src/llm/adapter.ts b/src/llm/adapter.ts index 1ed605f..d5ec557 100644 --- a/src/llm/adapter.ts +++ b/src/llm/adapter.ts @@ -78,4 +78,3 @@ export async function createAdapter( } } } - diff --git a/src/llm/ollama.ts b/src/llm/ollama.ts index 876a8ea..6802f69 100644 --- a/src/llm/ollama.ts +++ b/src/llm/ollama.ts @@ -56,172 +56,15 @@ import type { // Internal helpers — framework → Ollama (same as OpenAI) // --------------------------------------------------------------------------- -/** - * Convert a framework {@link LLMToolDef} to an Ollama {@link ChatCompletionTool}. - */ -function toOpenAITool(tool: LLMToolDef): ChatCompletionTool { - return { - type: 'function', - function: { - name: tool.name, - description: tool.description, - parameters: tool.inputSchema as Record, - }, - } -} - -function hasToolResults(msg: LLMMessage): boolean { - return msg.content.some((b) => b.type === 'tool_result') -} - -function toOpenAIMessages(messages: LLMMessage[]): ChatCompletionMessageParam[] { - const result: ChatCompletionMessageParam[] = [] - - for (const msg of messages) { - if (msg.role === 'assistant') { - result.push(toOpenAIAssistantMessage(msg)) - } else { - if (!hasToolResults(msg)) { - result.push(toOpenAIUserMessage(msg)) - } else { - const nonToolBlocks = msg.content.filter((b) => b.type !== 'tool_result') - if (nonToolBlocks.length > 0) { - result.push(toOpenAIUserMessage({ role: 'user', content: nonToolBlocks })) - } - - for (const block of msg.content) { - if (block.type === 'tool_result') { - const toolMsg: ChatCompletionToolMessageParam = { - role: 'tool', - tool_call_id: block.tool_use_id, - content: block.content, - } - result.push(toolMsg) - } - } - } - } - } - - return result -} - -function toOpenAIUserMessage(msg: LLMMessage): ChatCompletionUserMessageParam { - if (msg.content.length === 1 && msg.content[0]?.type === 'text') { - return { role: 'user', content: msg.content[0].text } - } - - type ContentPart = OpenAI.Chat.ChatCompletionContentPartText | OpenAI.Chat.ChatCompletionContentPartImage - const parts: ContentPart[] = [] - - for (const block of msg.content) { - if (block.type === 'text') { - parts.push({ type: 'text', text: block.text }) - } else if (block.type === 'image') { - parts.push({ - type: 'image_url', - image_url: { - url: `data:${block.source.media_type};base64,${block.source.data}`, - }, - }) - } - } - - return { role: 'user', content: parts } -} - -function toOpenAIAssistantMessage(msg: LLMMessage): ChatCompletionAssistantMessageParam { - const toolCalls: ChatCompletionMessageToolCall[] = [] - const textParts: string[] = [] - - for (const block of msg.content) { - if (block.type === 'tool_use') { - toolCalls.push({ - id: block.id, - type: 'function', - function: { - name: block.name, - arguments: JSON.stringify(block.input), - }, - }) - } else if (block.type === 'text') { - textParts.push(block.text) - } - } - - const assistantMsg: ChatCompletionAssistantMessageParam = { - role: 'assistant', - content: textParts.length > 0 ? textParts.join('') : null, - } - - if (toolCalls.length > 0) { - assistantMsg.tool_calls = toolCalls - } - - return assistantMsg -} - -// --------------------------------------------------------------------------- -// Internal helpers — Ollama → framework (same as OpenAI) -// --------------------------------------------------------------------------- - -function fromOpenAICompletion(completion: ChatCompletion): LLMResponse { - const choice = completion.choices[0] - if (choice === undefined) { - throw new Error('Ollama returned a completion with no choices') - } - - const content: ContentBlock[] = [] - const message = choice.message - - if (message.content !== null && message.content !== undefined) { - const textBlock: TextBlock = { type: 'text', text: message.content } - content.push(textBlock) - } - - for (const toolCall of message.tool_calls ?? []) { - let parsedInput: Record = {} - try { - const parsed: unknown = JSON.parse(toolCall.function.arguments) - if (parsed !== null && typeof parsed === 'object' && !Array.isArray(parsed)) { - parsedInput = parsed as Record - } - } catch { - // Malformed arguments from the model — surface as empty object. - } - - const toolUseBlock: ToolUseBlock = { - type: 'tool_use', - id: toolCall.id, - name: toolCall.function.name, - input: parsedInput, - } - content.push(toolUseBlock) - } - - const stopReason = normalizeFinishReason(choice.finish_reason ?? 'stop') - - return { - id: completion.id, - content, - model: completion.model, - stop_reason: stopReason, - usage: { - input_tokens: completion.usage?.prompt_tokens ?? 0, - output_tokens: completion.usage?.completion_tokens ?? 0, - }, - } -} - -function normalizeFinishReason(reason: string): string { - switch (reason) { - case 'stop': return 'end_turn' - case 'tool_calls': return 'tool_use' - case 'length': return 'max_tokens' - case 'content_filter': return 'content_filter' - default: return reason - } -} +import { + toOpenAITool, + hasToolResults, + toOpenAIMessages, + toOpenAIUserMessage, + toOpenAIAssistantMessage, + fromOpenAICompletion, + normalizeFinishReason, +} from './openai-common.js' // --------------------------------------------------------------------------- // Adapter implementation @@ -240,7 +83,8 @@ export class OllamaAdapter implements LLMAdapter { constructor() { this.#client = new OpenAI({ - baseURL: process.env.OLLAMA_BASE_URL ?? 'http://localhost:11434', + baseURL: process.env.OLLAMA_BASE_URL ?? 'http://localhost:11434/v1', + apiKey: 'ollama', // dummy API key }) } diff --git a/src/llm/openai-common.ts b/src/llm/openai-common.ts new file mode 100644 index 0000000..08a77f0 --- /dev/null +++ b/src/llm/openai-common.ts @@ -0,0 +1,231 @@ +/** + * @fileoverview Shared OpenAI wire-format helpers for Ollama and OpenAI adapters. + * + * These functions convert between the framework's internal types and the + * OpenAI/Ollama Chat Completions wire format. Both adapters should import + * from here rather than duplicating the conversion logic. + */ + +import type { + ChatCompletion, + ChatCompletionAssistantMessageParam, + ChatCompletionChunk, + ChatCompletionMessageParam, + ChatCompletionMessageToolCall, + ChatCompletionTool, + ChatCompletionToolMessageParam, + ChatCompletionUserMessageParam, +} from 'openai/resources/chat/completions/index.js' + +import type { + ContentBlock, + LLMMessage, + LLMResponse, + LLMToolDef, + StreamEvent, + TextBlock, + ToolUseBlock, +} from '../types.js' + +/** + * Convert a framework {@link LLMToolDef} to an OpenAI/Ollama {@link ChatCompletionTool}. + */ +export function toOpenAITool(tool: LLMToolDef): ChatCompletionTool { + return { + type: 'function', + function: { + name: tool.name, + description: tool.description, + parameters: tool.inputSchema as Record, + }, + } +} + +/** + * Determine whether a framework message contains any `tool_result` content + * blocks, which must be serialised as separate OpenAI/Ollama `tool`-role messages. + */ +export function hasToolResults(msg: LLMMessage): boolean { + return msg.content.some((b) => b.type === 'tool_result') +} + +/** + * Convert a single framework {@link LLMMessage} into one or more OpenAI/Ollama + * {@link ChatCompletionMessageParam} entries. + */ +export function toOpenAIMessages(messages: LLMMessage[]): ChatCompletionMessageParam[] { + const result: ChatCompletionMessageParam[] = [] + + for (const msg of messages) { + if (msg.role === 'assistant') { + result.push(toOpenAIAssistantMessage(msg)) + } else { + if (!hasToolResults(msg)) { + result.push(toOpenAIUserMessage(msg)) + } else { + const nonToolBlocks = msg.content.filter((b) => b.type !== 'tool_result') + if (nonToolBlocks.length > 0) { + result.push(toOpenAIUserMessage({ role: 'user', content: nonToolBlocks })) + } + + for (const block of msg.content) { + if (block.type === 'tool_result') { + const toolMsg: ChatCompletionToolMessageParam = { + role: 'tool', + tool_call_id: block.tool_use_id, + content: block.content, + } + result.push(toolMsg) + } + } + } + } + } + + return result +} + +export function toOpenAIUserMessage(msg: LLMMessage): ChatCompletionUserMessageParam { + if (msg.content.length === 1 && msg.content[0]?.type === 'text') { + return { role: 'user', content: msg.content[0].text } + } + + const parts: Array<{ type: 'text', text: string } | { type: 'image_url', image_url: { url: string } }> = [] + + for (const block of msg.content) { + if (block.type === 'text') { + parts.push({ type: 'text', text: block.text }) + } else if (block.type === 'image') { + parts.push({ + type: 'image_url', + image_url: { + url: `data:${block.source.media_type};base64,${block.source.data}`, + }, + }) + } + } + + return { role: 'user', content: parts } +} + +export function toOpenAIAssistantMessage(msg: LLMMessage): ChatCompletionAssistantMessageParam { + const toolCalls: ChatCompletionMessageToolCall[] = [] + const textParts: string[] = [] + + for (const block of msg.content) { + if (block.type === 'tool_use') { + toolCalls.push({ + id: block.id, + type: 'function', + function: { + name: block.name, + arguments: JSON.stringify(block.input), + }, + }) + } else if (block.type === 'text') { + textParts.push(block.text) + } + } + + const assistantMsg: ChatCompletionAssistantMessageParam = { + role: 'assistant', + content: textParts.length > 0 ? textParts.join('') : null, + } + + if (toolCalls.length > 0) { + assistantMsg.tool_calls = toolCalls + } + + return assistantMsg +} + +/** + * Convert an OpenAI/Ollama {@link ChatCompletion} into a framework {@link LLMResponse}. + */ +export function fromOpenAICompletion(completion: ChatCompletion): LLMResponse { + const choice = completion.choices[0] + if (choice === undefined) { + throw new Error('Completion returned with no choices') + } + + const content: ContentBlock[] = [] + const message = choice.message + + if (message.content !== null && message.content !== undefined) { + const textBlock: TextBlock = { type: 'text', text: message.content } + content.push(textBlock) + } + + for (const toolCall of message.tool_calls ?? []) { + let parsedInput: Record = {} + try { + const parsed: unknown = JSON.parse(toolCall.function.arguments) + if (parsed !== null && typeof parsed === 'object' && !Array.isArray(parsed)) { + parsedInput = parsed as Record + } + } catch { + // Malformed arguments from the model — surface as empty object. + } + + const toolUseBlock: ToolUseBlock = { + type: 'tool_use', + id: toolCall.id, + name: toolCall.function.name, + input: parsedInput, + } + content.push(toolUseBlock) + } + + const stopReason = normalizeFinishReason(choice.finish_reason ?? 'stop') + + return { + id: completion.id, + content, + model: completion.model, + stop_reason: stopReason, + usage: { + input_tokens: completion.usage?.prompt_tokens ?? 0, + output_tokens: completion.usage?.completion_tokens ?? 0, + }, + } +} + +/** + * Normalize an OpenAI/Ollama `finish_reason` string to the framework's canonical + * stop-reason vocabulary. + */ +export function normalizeFinishReason(reason: string): string { + switch (reason) { + case 'stop': return 'end_turn' + case 'tool_calls': return 'tool_use' + case 'length': return 'max_tokens' + case 'content_filter': return 'content_filter' + default: return reason + } +} + +/** + * Prepend a system message when `systemPrompt` is provided, then append the + * converted conversation messages. + */ +export function buildOpenAIMessageList( + messages: LLMMessage[], + systemPrompt: string | undefined, +): ChatCompletionMessageParam[] { + const result: ChatCompletionMessageParam[] = [] + + if (systemPrompt !== undefined && systemPrompt.length > 0) { + result.push({ role: 'system', content: systemPrompt }) + } + + result.push(...toOpenAIMessages(messages)) + return result +} + +// Re-export types that consumers of this module commonly need alongside the helpers. +export type { + ContentBlock, + LLMMessage, + LLMToolDef, + ToolUseBlock, +} diff --git a/src/types.ts b/src/types.ts index 8d6fdbe..ef6f005 100644 --- a/src/types.ts +++ b/src/types.ts @@ -186,7 +186,7 @@ export interface ToolDefinition> { export interface AgentConfig { readonly name: string readonly model: string -provider?: 'anthropic' | 'openai' | 'ollama' + readonly provider?: 'anthropic' | 'ollama' | 'openai' readonly systemPrompt?: string /** Names of tools (from the tool registry) available to this agent. */ readonly tools?: readonly string[] @@ -285,7 +285,7 @@ export interface OrchestratorEvent { export interface OrchestratorConfig { readonly maxConcurrency?: number readonly defaultModel?: string -defaultProvider?: 'anthropic' | 'openai' | 'ollama' + readonly defaultProvider?: 'anthropic' | 'ollama' | 'openai' onProgress?: (event: OrchestratorEvent) => void }