adding ollama adapter

This commit is contained in:
Rakesh 2026-04-02 10:49:10 +05:30
parent a6244cfe64
commit 53e2d11c4e
9 changed files with 730 additions and 15 deletions

View File

@ -187,6 +187,33 @@ const team = orchestrator.createTeam('mixed-team', {
const result = await orchestrator.runTeam(team, 'Build a CLI tool that converts JSON to CSV.') const result = await orchestrator.runTeam(team, 'Build a CLI tool that converts JSON to CSV.')
``` ```
### Local Ollama Support
```typescript
const orchestrator = new OpenMultiAgent({
defaultProvider: 'ollama',
defaultModel: 'llama2',
})
const localAgent: AgentConfig = {
name: 'assistant',
model: 'llama2',
provider: 'ollama',
systemPrompt: 'You are a local assistant running on Ollama.',
tools: ['bash', 'file_read', 'file_write'],
}
const team = orchestrator.createTeam('local-team', {
name: 'local-team',
agents: [localAgent],
sharedMemory: true,
})
const result = await orchestrator.runTeam(team, 'Create a small script that lists files in the current directory.')
```
Set `OLLAMA_API_KEY` when your local Ollama instance requires authentication. The adapter defaults to `http://localhost:11434`.
### Streaming Output ### Streaming Output
```typescript ```typescript

View File

@ -8,18 +8,22 @@
* npx tsx examples/01-single-agent.ts * npx tsx examples/01-single-agent.ts
* *
* Prerequisites: * Prerequisites:
* ANTHROPIC_API_KEY env var must be set. * Ollama server running at http://localhost:11434
* OLLAMA_API_KEY env var may be set if your server requires auth.
*/ */
import { OpenMultiAgent, Agent, ToolRegistry, ToolExecutor, registerBuiltInTools } from '../src/index.js' import { OpenMultiAgent, Agent, ToolRegistry, ToolExecutor, registerBuiltInTools } from '../src/index.js'
import type { OrchestratorEvent } from '../src/types.js' import type { OrchestratorEvent } from '../src/types.js'
const OLLAMA_MODEL = process.env.OLLAMA_MODEL ?? 'llama2'
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Part 1: Single agent via OpenMultiAgent (simplest path) // Part 1: Single agent via OpenMultiAgent (simplest path)
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
const orchestrator = new OpenMultiAgent({ const orchestrator = new OpenMultiAgent({
defaultModel: 'claude-sonnet-4-6', defaultProvider: 'ollama',
defaultModel: OLLAMA_MODEL,
onProgress: (event: OrchestratorEvent) => { onProgress: (event: OrchestratorEvent) => {
if (event.type === 'agent_start') { if (event.type === 'agent_start') {
console.log(`[start] agent=${event.agent}`) console.log(`[start] agent=${event.agent}`)
@ -34,10 +38,11 @@ console.log('Part 1: runAgent() — single one-shot task\n')
const result = await orchestrator.runAgent( const result = await orchestrator.runAgent(
{ {
name: 'coder', name: 'coder',
model: 'claude-sonnet-4-6', provider: 'ollama',
systemPrompt: `You are a focused TypeScript developer. model: OLLAMA_MODEL,
When asked to implement something, write clean, minimal code with no extra commentary. systemPrompt: `You are a digital marketing agency.
Use the bash tool to run commands and the file tools to read/write files.`, When asked to getting clients for our cybersecurity solution company, find some startup with funding, webscarp thier niche and collect there contact info.
Use that info and send a cold eamil`,
tools: ['bash', 'file_read', 'file_write'], tools: ['bash', 'file_read', 'file_write'],
maxTurns: 8, maxTurns: 8,
}, },
@ -80,7 +85,8 @@ const executor = new ToolExecutor(registry)
const streamingAgent = new Agent( const streamingAgent = new Agent(
{ {
name: 'explainer', name: 'explainer',
model: 'claude-sonnet-4-6', provider: 'ollama',
model: OLLAMA_MODEL,
systemPrompt: 'You are a concise technical writer. Keep explanations brief.', systemPrompt: 'You are a concise technical writer. Keep explanations brief.',
maxTurns: 3, maxTurns: 3,
}, },
@ -111,7 +117,8 @@ console.log('\nPart 3: Agent.prompt() — multi-turn conversation\n')
const conversationAgent = new Agent( const conversationAgent = new Agent(
{ {
name: 'tutor', name: 'tutor',
model: 'claude-sonnet-4-6', provider: 'ollama',
model: OLLAMA_MODEL,
systemPrompt: 'You are a TypeScript tutor. Give short, direct answers.', systemPrompt: 'You are a TypeScript tutor. Give short, direct answers.',
maxTurns: 2, maxTurns: 2,
}, },

4
package-lock.json generated
View File

@ -1,11 +1,11 @@
{ {
"name": "maestro-agents", "name": "open-multi-agent",
"version": "0.1.0", "version": "0.1.0",
"lockfileVersion": 3, "lockfileVersion": 3,
"requires": true, "requires": true,
"packages": { "packages": {
"": { "": {
"name": "maestro-agents", "name": "open-multi-agent",
"version": "0.1.0", "version": "0.1.0",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {

View File

@ -104,6 +104,7 @@ export {
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
export { createAdapter } from './llm/adapter.js' export { createAdapter } from './llm/adapter.js'
export { OllamaAdapter } from './llm/ollama.js'
export type { SupportedProvider } from './llm/adapter.js' export type { SupportedProvider } from './llm/adapter.js'
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------

View File

@ -37,13 +37,13 @@ import type { LLMAdapter } from '../types.js'
* Additional providers can be integrated by implementing {@link LLMAdapter} * Additional providers can be integrated by implementing {@link LLMAdapter}
* directly and bypassing this factory. * directly and bypassing this factory.
*/ */
export type SupportedProvider = 'anthropic' | 'openai' export type SupportedProvider = 'anthropic' | 'openai' | 'ollama'
/** /**
* Instantiate the appropriate {@link LLMAdapter} for the given provider. * Instantiate the appropriate {@link LLMAdapter} for the given provider.
* *
* API keys fall back to the standard environment variables * API keys fall back to the standard environment variables
* (`ANTHROPIC_API_KEY` / `OPENAI_API_KEY`) when not supplied explicitly. * (`ANTHROPIC_API_KEY` / `OPENAI_API_KEY` / `OLLAMA_API_KEY`) when not supplied explicitly.
* *
* Adapters are imported lazily so that projects using only one provider * Adapters are imported lazily so that projects using only one provider
* are not forced to install the SDK for the other. * are not forced to install the SDK for the other.
@ -65,6 +65,10 @@ export async function createAdapter(
const { OpenAIAdapter } = await import('./openai.js') const { OpenAIAdapter } = await import('./openai.js')
return new OpenAIAdapter(apiKey) return new OpenAIAdapter(apiKey)
} }
case 'ollama': {
const { OllamaAdapter } = await import('./ollama.js')
return new OllamaAdapter(apiKey)
}
default: { default: {
// The `never` cast here makes TypeScript enforce exhaustiveness. // The `never` cast here makes TypeScript enforce exhaustiveness.
const _exhaustive: never = provider const _exhaustive: never = provider

141
src/llm/ollama.test.ts Normal file
View File

@ -0,0 +1,141 @@
import { afterEach, describe, expect, it, vi } from 'vitest'
import { OllamaAdapter } from './ollama.js'
import { createAdapter } from './adapter.js'
const encoder = new TextEncoder()
function createFetchMock(response: unknown): ReturnType<typeof vi.fn> {
return vi.fn(async () => ({
ok: true,
status: 200,
json: async () => response,
text: async () => JSON.stringify(response),
body: new ReadableStream<Uint8Array>({
start(controller) {
controller.close()
},
}),
}))
}
describe('OllamaAdapter', () => {
afterEach(() => {
vi.restoreAllMocks()
})
it('creates an Ollama adapter through createAdapter()', async () => {
const adapter = await createAdapter('ollama')
expect(adapter.name).toBe('ollama')
})
it('sends chat requests to the local Ollama endpoint', async () => {
const adapter = new OllamaAdapter(undefined, 'http://localhost:11434')
const mockResponse = {
id: 'abc123',
model: 'qwen',
choices: [
{
message: {
role: 'assistant',
content: [{ type: 'text', text: 'hello' }],
},
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 1,
completion_tokens: 2,
},
}
const fetchMock = vi.fn(async (_url: string, init: RequestInit) => ({
ok: true,
status: 200,
json: async () => mockResponse,
text: async () => JSON.stringify(mockResponse),
body: new ReadableStream<Uint8Array>({
start(controller) {
controller.close()
},
}),
}))
vi.stubGlobal('fetch', fetchMock)
const result = await adapter.chat(
[{ role: 'user', content: [{ type: 'text', text: 'Hello' }] }],
{ model: 'qwen' },
)
expect(fetchMock).toHaveBeenCalledOnce()
const [, init] = fetchMock.mock.calls[0] as [string, RequestInit]
expect(init.method).toBe('POST')
expect(init?.headers).toEqual({ 'Content-Type': 'application/json' })
expect(typeof init?.body).toBe('string')
expect(JSON.parse(String(init?.body))).toMatchObject({
model: 'qwen',
})
expect(result.content).toEqual([{ type: 'text', text: 'hello' }])
expect(result.model).toBe('qwen')
})
it('parses streaming-style chat responses and ignores final empty chunks', async () => {
const adapter = new OllamaAdapter(undefined, 'http://localhost:11434')
const chunk = '{"model":"llama2","message":{"role":"assistant","content":"Hello"}}\n'
+ '{"model":"llama2","message":{"role":"assistant","content":""}}\n'
const fetchMock = vi.fn(async () => ({
ok: true,
status: 200,
json: async () => ({}),
text: async () => chunk,
body: new ReadableStream<Uint8Array>({
start(controller) {
controller.close()
},
}),
}))
vi.stubGlobal('fetch', fetchMock)
const result = await adapter.chat(
[{ role: 'user', content: [{ type: 'text', text: 'Hello' }] }],
{ model: 'llama2' },
)
expect(result.content).toEqual([{ type: 'text', text: 'Hello' }])
expect(result.model).toBe('llama2')
})
it('streams SSE events from Ollama and emits done', async () => {
const adapter = new OllamaAdapter(undefined, 'http://localhost:11434')
const chunk = `data: {"choices":[{"delta":{"content":"hi"}}]}\n\n` +
`data: [DONE]\n\n`
const fetchMock = vi.fn(async () => ({
ok: true,
status: 200,
text: async () => chunk,
body: new ReadableStream<Uint8Array>({
start(controller) {
controller.enqueue(encoder.encode(chunk))
controller.close()
},
}),
}))
vi.stubGlobal('fetch', fetchMock)
const events = [] as Array<unknown>
for await (const event of adapter.stream(
[{ role: 'user', content: [{ type: 'text', text: 'Hello' }] }],
{ model: 'qwen' },
)) {
events.push(event)
}
expect(events.length).toBeGreaterThanOrEqual(2)
expect(events[0]).toEqual({ type: 'text', data: 'hi' })
expect((events[events.length - 1] as any).type).toBe('done')
})
})

534
src/llm/ollama.ts Normal file
View File

@ -0,0 +1,534 @@
/**
* @fileoverview Ollama adapter implementing {@link LLMAdapter}.
*
* Supports local Ollama servers via `/api/chat` and handles function calling
* with Ollama's OpenAI-compatible tool definition format.
*
* The adapter is intentionally lightweight: it uses the native Fetch API and
* parses both regular JSON responses and SSE streams from Ollama.
*/
import type {
ContentBlock,
LLMAdapter,
LLMChatOptions,
LLMMessage,
LLMResponse,
LLMStreamOptions,
LLMToolDef,
StreamEvent,
TextBlock,
ToolUseBlock,
ToolResultBlock,
} from '../types.js'
const DEFAULT_BASE_URL = 'http://localhost:11434'
function hasToolResults(msg: LLMMessage): boolean {
return msg.content.some((block) => block.type === 'tool_result')
}
function toOllamaTextContent(msg: LLMMessage): string {
return msg.content
.filter((block) => block.type === 'text')
.map((block) => block.text)
.join('')
}
function toOllamaUserMessage(msg: LLMMessage): Record<string, unknown> {
const text = toOllamaTextContent(msg)
return {
role: 'user',
content: text || undefined,
}
}
function toOllamaToolMessages(msg: LLMMessage): Array<Record<string, unknown>> {
const messages: Array<Record<string, unknown>> = []
for (const block of msg.content) {
if (block.type !== 'tool_result') continue
messages.push({
role: 'tool',
tool_call_id: block.tool_use_id,
content: block.content,
})
}
return messages
}
function toOllamaAssistantMessage(msg: LLMMessage): Record<string, unknown> {
const text = toOllamaTextContent(msg)
const assistantMessage: Record<string, unknown> = {
role: 'assistant',
content: text || undefined,
}
const toolCalls = msg.content
.filter((block) => block.type === 'tool_use')
.map((block) => ({
id: block.id,
type: 'function',
function: {
name: block.name,
arguments: JSON.stringify(block.input),
},
}))
if (toolCalls.length > 0) {
assistantMessage.tool_calls = toolCalls
}
return assistantMessage
}
function toOllamaMessages(messages: LLMMessage[], systemPrompt?: string): Record<string, unknown>[] {
const result: Record<string, unknown>[] = []
if (systemPrompt) {
result.push({ role: 'system', content: systemPrompt })
}
for (const msg of messages) {
if (msg.role === 'assistant') {
result.push(toOllamaAssistantMessage(msg))
continue
}
if (!hasToolResults(msg)) {
result.push(toOllamaUserMessage(msg))
continue
}
const text = toOllamaTextContent(msg)
if (text.length > 0) {
result.push({ role: 'user', content: text })
}
result.push(...toOllamaToolMessages(msg))
}
return result
}
function toOllamaFunction(tool: LLMToolDef): Record<string, unknown> {
return {
name: tool.name,
description: tool.description,
parameters: tool.inputSchema,
}
}
function normalizeFinishReason(reason: unknown): string {
if (typeof reason !== 'string') {
return 'end_turn'
}
switch (reason) {
case 'stop':
case 'end_turn':
return 'end_turn'
case 'length':
return 'max_tokens'
case 'tool_call':
case 'function_call':
return 'tool_use'
default:
return reason
}
}
function isMeaningfulOllamaMessage(message: any): boolean {
if (!message || typeof message !== 'object') {
return false
}
const content = message.content
if (typeof content === 'string') {
return content.trim().length > 0
}
if (Array.isArray(content)) {
return content.length > 0
}
return false
}
function chooseBestResponseObject(objects: any[]): any {
let best: any = null
for (const obj of objects) {
const message = obj.message ?? obj.choices?.[0]?.message
if (isMeaningfulOllamaMessage(message)) {
best = obj
}
}
return best ?? objects[objects.length - 1]
}
function parseTextAsJson(text: string): any {
const trimmed = text.trim()
if (!trimmed) {
return null
}
try {
return JSON.parse(trimmed)
} catch {
// Fall through to SSE / NDJSON parsing.
}
const objects: any[] = []
// Try server-sent events blocks.
for (const chunk of trimmed.split(/\r?\n\r?\n/)) {
let payload = ''
for (const line of chunk.split(/\r?\n/)) {
if (line.startsWith('data:')) {
const value = line.slice(5).trim()
if (value === '[DONE]') {
continue
}
payload += value
}
}
if (payload.length === 0) {
continue
}
try {
objects.push(JSON.parse(payload))
} catch {
// ignore non-JSON payloads
}
}
if (objects.length > 0) {
return chooseBestResponseObject(objects)
}
// Try line-delimited JSON fallback.
for (const line of trimmed.split(/\r?\n/)) {
const candidate = line.trim()
if (!candidate) continue
try {
objects.push(JSON.parse(candidate))
} catch {
// ignore
}
}
if (objects.length > 0) {
return chooseBestResponseObject(objects)
}
return null
}
function parseToolCall(message: any): ToolUseBlock | null {
const toolCall = message?.tool_call ?? message?.tool_call?.arguments ? message?.tool_call : undefined
if (!toolCall || typeof toolCall.name !== 'string') {
return null
}
let input: Record<string, unknown> = {}
if (typeof toolCall.arguments === 'string') {
try {
const parsed = JSON.parse(toolCall.arguments)
if (parsed && typeof parsed === 'object' && !Array.isArray(parsed)) {
input = parsed as Record<string, unknown>
}
} catch {
// ignore malformed JSON
}
} else if (toolCall.arguments && typeof toolCall.arguments === 'object') {
input = toolCall.arguments as Record<string, unknown>
}
return {
type: 'tool_use',
id: toolCall.id ?? `${toolCall.name}:${Math.random().toString(16).slice(2)}`,
name: toolCall.name,
input,
}
}
function parseOllamaContent(message: any): ContentBlock[] {
if (typeof message === 'string') {
return [{ type: 'text', text: message }]
}
const content: ContentBlock[] = []
const items = Array.isArray(message.content) ? message.content : []
for (const item of items) {
if (item?.type === 'text' && typeof item.text === 'string') {
content.push({ type: 'text', text: item.text })
} else if (item?.type === 'tool_use' && typeof item.id === 'string' && typeof item.name === 'string') {
content.push({
type: 'tool_use',
id: item.id,
name: item.name,
input: item.input ?? {},
})
} else if (item?.type === 'tool_result' && typeof item.tool_use_id === 'string') {
content.push({
type: 'tool_result',
tool_use_id: item.tool_use_id,
content: typeof item.content === 'string' ? item.content : String(item.content ?? ''),
is_error: Boolean(item.is_error),
})
} else if (item?.type === 'image' && item.source) {
content.push({
type: 'image',
source: {
type: 'base64',
media_type: item.source.media_type ?? 'image/png',
data: item.source.data ?? '',
},
})
} else if (typeof item === 'string') {
content.push({ type: 'text', text: item })
}
}
return content.length > 0
? content
: [{ type: 'text', text: String(message.content ?? '') }]
}
function buildOllamaResponse(body: any): LLMResponse {
const choice = Array.isArray(body.choices) && body.choices.length > 0
? body.choices[0]
: null
const message = choice?.message ?? body.message ?? body
const content = parseOllamaContent(message)
const toolUse = parseToolCall(message)
if (toolUse) {
content.push(toolUse)
}
return {
id: body.id ?? choice?.id ?? '',
content,
model: body.model ?? choice?.model ?? 'ollama',
stop_reason: normalizeFinishReason(choice?.finish_reason ?? body.finish_reason),
usage: {
input_tokens: body.usage?.prompt_tokens ?? body.usage?.input_tokens ?? 0,
output_tokens: body.usage?.completion_tokens ?? body.usage?.output_tokens ?? 0,
},
}
}
function stripTrailingSlash(url: string): string {
return url.replace(/\/+$|$/, '')
}
/**
* Lightweight adapter for Ollama's local `/api/chat` endpoint.
*/
export class OllamaAdapter implements LLMAdapter {
readonly name = 'ollama'
readonly #baseUrl: string
readonly #apiKey?: string
constructor(apiKey?: string, baseUrl = DEFAULT_BASE_URL) {
const envApiKey = (globalThis as any).process?.env?.OLLAMA_API_KEY
this.#apiKey = apiKey ?? (typeof envApiKey === 'string' ? envApiKey : undefined)
this.#baseUrl = stripTrailingSlash(baseUrl)
}
private buildRequestBody(messages: LLMMessage[], options: LLMChatOptions): Record<string, unknown> {
const body: Record<string, unknown> = {
model: options.model,
messages: toOllamaMessages(messages, options.systemPrompt),
temperature: options.temperature ?? 1,
max_tokens: options.maxTokens,
}
if (options.tools) {
body.functions = options.tools.map(toOllamaFunction)
body.function_call = 'auto'
}
return body
}
private buildHeaders(): Record<string, string> {
const headers: Record<string, string> = {
'Content-Type': 'application/json',
}
if (this.#apiKey) {
headers.Authorization = `Bearer ${this.#apiKey}`
}
return headers
}
// -------------------------------------------------------------------------
// chat()
// -------------------------------------------------------------------------
async chat(messages: LLMMessage[], options: LLMChatOptions): Promise<LLMResponse> {
const url = `${this.#baseUrl}/api/chat`
const requestBody = this.buildRequestBody(messages, options)
requestBody.stream = false
const response = await fetch(url, {
method: 'POST',
headers: this.buildHeaders(),
body: JSON.stringify(requestBody),
signal: options.abortSignal,
})
if (!response.ok) {
const text = await response.text()
throw new Error(`Ollama API request failed (${response.status}): ${text}`)
}
const text = await response.text()
const body = parseTextAsJson(text)
if (body === null) {
throw new Error(
`Ollama API returned invalid JSON response: ${text.slice(0, 200)}`,
)
}
return buildOllamaResponse(body)
}
// -------------------------------------------------------------------------
// stream()
// -------------------------------------------------------------------------
async *stream(messages: LLMMessage[], options: LLMStreamOptions): AsyncIterable<StreamEvent> {
const url = `${this.#baseUrl}/api/chat`
const requestBody = this.buildRequestBody(messages, options)
requestBody.stream = true
const response = await fetch(url, {
method: 'POST',
headers: this.buildHeaders(),
body: JSON.stringify(requestBody),
signal: options.abortSignal,
})
if (!response.ok) {
const text = await response.text()
throw new Error(`Ollama API request failed (${response.status}): ${text}`)
}
const reader = response.body?.getReader()
if (!reader) {
throw new Error('Ollama stream response has no body')
}
const decoder = new TextDecoder()
let buffer = ''
let lastMessage: any = null
let accumulatedText = ''
try {
while (true) {
const { value, done } = await reader.read()
if (done) {
break
}
buffer += decoder.decode(value, { stream: true })
while (true) {
const boundary = buffer.indexOf('\n\n')
if (boundary === -1) {
break
}
const packet = buffer.slice(0, boundary)
buffer = buffer.slice(boundary + 2)
const lines = packet.split(/\r?\n/)
let data = ''
for (const line of lines) {
if (line.startsWith('data:')) {
data += line.slice(5).trim()
}
}
if (data === '[DONE]') {
buffer = ''
break
}
if (!data) {
continue
}
try {
const payload = JSON.parse(data)
const choice = Array.isArray(payload.choices) && payload.choices.length > 0
? payload.choices[0]
: null
const delta = choice?.delta ?? payload.delta
if (delta?.content) {
const text = String(delta.content)
accumulatedText += text
yield { type: 'text', data: text }
}
const toolDelta = delta?.tool_call ?? payload.tool_call
if (toolDelta || choice?.message?.tool_call || payload.message?.tool_call) {
const toolCallMessage = toolDelta ?? choice?.message?.tool_call ?? payload.message?.tool_call
const toolUse = parseToolCall({ tool_call: toolCallMessage })
if (toolUse) {
yield { type: 'tool_use', data: toolUse }
}
}
if (payload.message) {
lastMessage = payload.message
}
if (choice?.message) {
lastMessage = choice.message
}
} catch {
// Ignore malformed SSE payloads.
}
}
}
let finalMessage: any = lastMessage ?? {
id: '',
model: options.model,
message: { content: [{ type: 'text', text: accumulatedText }] },
usage: { prompt_tokens: 0, completion_tokens: 0 },
finish_reason: 'stop',
}
const isEmptyMessage = finalMessage?.message &&
((typeof finalMessage.message.content === 'string' && finalMessage.message.content.trim() === '') ||
(Array.isArray(finalMessage.message.content) && finalMessage.message.content.length === 0))
if (isEmptyMessage && accumulatedText.length > 0) {
finalMessage = {
...finalMessage,
message: { content: [{ type: 'text', text: accumulatedText }] },
}
}
const finalResponse: LLMResponse = buildOllamaResponse(finalMessage)
yield { type: 'done', data: finalResponse }
} catch (err) {
const error = err instanceof Error ? err : new Error(String(err))
yield { type: 'error', data: error }
}
}
}

View File

@ -186,7 +186,7 @@ export interface ToolDefinition<TInput = Record<string, unknown>> {
export interface AgentConfig { export interface AgentConfig {
readonly name: string readonly name: string
readonly model: string readonly model: string
readonly provider?: 'anthropic' | 'openai' readonly provider?: 'anthropic' | 'openai' | 'ollama'
readonly systemPrompt?: string readonly systemPrompt?: string
/** Names of tools (from the tool registry) available to this agent. */ /** Names of tools (from the tool registry) available to this agent. */
readonly tools?: readonly string[] readonly tools?: readonly string[]
@ -285,7 +285,7 @@ export interface OrchestratorEvent {
export interface OrchestratorConfig { export interface OrchestratorConfig {
readonly maxConcurrency?: number readonly maxConcurrency?: number
readonly defaultModel?: string readonly defaultModel?: string
readonly defaultProvider?: 'anthropic' | 'openai' readonly defaultProvider?: 'anthropic' | 'openai' | 'ollama'
onProgress?: (event: OrchestratorEvent) => void onProgress?: (event: OrchestratorEvent) => void
} }

View File

@ -3,7 +3,8 @@
"target": "ES2022", "target": "ES2022",
"module": "ESNext", "module": "ESNext",
"moduleResolution": "bundler", "moduleResolution": "bundler",
"lib": ["ES2022"], "lib": ["ES2022", "DOM"],
"types": ["node"],
"outDir": "dist", "outDir": "dist",
"rootDir": "src", "rootDir": "src",
"declaration": true, "declaration": true,