docs: add examples for local models (Ollama) and fan-out/aggregate pattern

- 06-local-model.ts: mix Ollama (local) + Claude (cloud) in a runTasks pipeline,
  demonstrating baseURL and apiKey placeholder for OpenAI-compatible servers
- 07-fan-out-aggregate.ts: MapReduce pattern using AgentPool.runParallel() to
  fan out analysis to 3 perspective agents, then aggregate via a synthesizer
This commit is contained in:
JackChen 2026-04-03 02:12:05 +08:00
parent 6e6a85178b
commit 31a0fa4951
2 changed files with 408 additions and 0 deletions

199
examples/06-local-model.ts Normal file
View File

@ -0,0 +1,199 @@
/**
* Example 06 Local Model + Cloud Model Team (Ollama + Claude)
*
* Demonstrates mixing a local model served by Ollama with a cloud model
* (Claude) in the same task pipeline. The key technique is using
* `provider: 'openai'` with a custom `baseURL` pointing at Ollama's
* OpenAI-compatible endpoint.
*
* This pattern works with ANY OpenAI-compatible local server:
* - Ollama http://localhost:11434/v1
* - vLLM http://localhost:8000/v1
* - LM Studio http://localhost:1234/v1
* - llama.cpp http://localhost:8080/v1
* Just change the baseURL and model name below.
*
* Run:
* npx tsx examples/06-local-model.ts
*
* Prerequisites:
* 1. Ollama installed and running: https://ollama.com
* 2. Pull the model: ollama pull llama3.1
* 3. ANTHROPIC_API_KEY env var must be set.
*/
import { OpenMultiAgent } from '../src/index.js'
import type { AgentConfig, OrchestratorEvent, Task } from '../src/types.js'
// ---------------------------------------------------------------------------
// Agents
// ---------------------------------------------------------------------------
/**
* Coder uses Claude (Anthropic) for high-quality code generation.
*/
const coder: AgentConfig = {
name: 'coder',
model: 'claude-sonnet-4-6',
provider: 'anthropic',
systemPrompt: `You are a senior TypeScript developer. Write clean, well-typed,
production-quality code. Use the tools to write files to /tmp/local-model-demo/.
Always include brief JSDoc comments on exported functions.`,
tools: ['bash', 'file_write'],
maxTurns: 6,
}
/**
* Reviewer uses a local Ollama model via the OpenAI-compatible API.
* The apiKey is required by the OpenAI SDK but Ollama ignores it,
* so we pass the placeholder string 'ollama'.
*/
const reviewer: AgentConfig = {
name: 'reviewer',
model: 'llama3.1',
provider: 'openai', // 'openai' here means "OpenAI-compatible protocol", not the OpenAI cloud
baseURL: 'http://localhost:11434/v1',
apiKey: 'ollama',
systemPrompt: `You are a code reviewer. You read source files and produce a structured review.
Your review MUST include these sections:
- Summary (2-3 sentences)
- Strengths (bullet list)
- Issues (bullet list or "None found" if the code is clean)
- Verdict: SHIP or NEEDS WORK
Be specific and constructive. Reference line numbers or function names when possible.`,
tools: ['file_read'],
maxTurns: 4,
}
// ---------------------------------------------------------------------------
// Progress handler
// ---------------------------------------------------------------------------
const taskTimes = new Map<string, number>()
function handleProgress(event: OrchestratorEvent): void {
const ts = new Date().toISOString().slice(11, 23)
switch (event.type) {
case 'task_start': {
taskTimes.set(event.task ?? '', Date.now())
const task = event.data as Task | undefined
console.log(`[${ts}] TASK READY "${task?.title ?? event.task}" → ${task?.assignee ?? '?'}`)
break
}
case 'task_complete': {
const elapsed = Date.now() - (taskTimes.get(event.task ?? '') ?? Date.now())
console.log(`[${ts}] TASK DONE task=${event.task} in ${elapsed}ms`)
break
}
case 'agent_start':
console.log(`[${ts}] AGENT START ${event.agent}`)
break
case 'agent_complete':
console.log(`[${ts}] AGENT DONE ${event.agent}`)
break
case 'error':
console.error(`[${ts}] ERROR ${event.agent ?? ''} task=${event.task ?? '?'}`)
break
}
}
// ---------------------------------------------------------------------------
// Orchestrator + Team
// ---------------------------------------------------------------------------
const orchestrator = new OpenMultiAgent({
defaultModel: 'claude-sonnet-4-6',
maxConcurrency: 2,
onProgress: handleProgress,
})
const team = orchestrator.createTeam('local-cloud-team', {
name: 'local-cloud-team',
agents: [coder, reviewer],
sharedMemory: true,
})
// ---------------------------------------------------------------------------
// Task pipeline: code → review
// ---------------------------------------------------------------------------
const OUTPUT_DIR = '/tmp/local-model-demo'
const tasks: Array<{
title: string
description: string
assignee?: string
dependsOn?: string[]
}> = [
{
title: 'Write: retry utility',
description: `Write a small but complete TypeScript utility to ${OUTPUT_DIR}/retry.ts.
The module should export:
1. A \`RetryOptions\` interface with: maxRetries (number), delayMs (number),
backoffFactor (optional number, default 2), shouldRetry (optional predicate
taking the error and returning boolean).
2. An async \`retry<T>(fn: () => Promise<T>, options: RetryOptions): Promise<T>\`
function that retries \`fn\` with exponential backoff.
3. A convenience \`withRetry\` wrapper that returns a new function with retry
behaviour baked in.
Include JSDoc comments. No external dependencies use only Node built-ins.
After writing the file, also create a small test script at ${OUTPUT_DIR}/retry-test.ts
that exercises the happy path and a failure case, then run it with \`npx tsx\`.`,
assignee: 'coder',
},
{
title: 'Review: retry utility',
description: `Read the files at ${OUTPUT_DIR}/retry.ts and ${OUTPUT_DIR}/retry-test.ts.
Produce a structured code review covering:
- Summary (2-3 sentences describing the module)
- Strengths (bullet list)
- Issues (bullet list be specific about what and why)
- Verdict: SHIP or NEEDS WORK`,
assignee: 'reviewer',
dependsOn: ['Write: retry utility'],
},
]
// ---------------------------------------------------------------------------
// Run
// ---------------------------------------------------------------------------
console.log('Local + Cloud model team')
console.log(` coder → Claude (${coder.model}) via Anthropic API`)
console.log(` reviewer → Ollama (${reviewer.model}) at ${reviewer.baseURL}`)
console.log()
console.log('Pipeline: coder writes code → local model reviews it')
console.log('='.repeat(60))
const result = await orchestrator.runTasks(team, tasks)
// ---------------------------------------------------------------------------
// Summary
// ---------------------------------------------------------------------------
console.log('\n' + '='.repeat(60))
console.log('Pipeline complete.\n')
console.log(`Overall success: ${result.success}`)
console.log(`Tokens — input: ${result.totalTokenUsage.input_tokens}, output: ${result.totalTokenUsage.output_tokens}`)
console.log('\nPer-agent summary:')
for (const [name, r] of result.agentResults) {
const icon = r.success ? 'OK ' : 'FAIL'
const provider = name === 'coder' ? 'anthropic' : 'ollama (local)'
const tools = r.toolCalls.map(c => c.toolName).join(', ')
console.log(` [${icon}] ${name.padEnd(10)} (${provider.padEnd(16)}) tools: ${tools || '(none)'}`)
}
// Print the reviewer's output
const review = result.agentResults.get('reviewer')
if (review?.success) {
console.log('\nCode review (from local model):')
console.log('─'.repeat(60))
console.log(review.output)
console.log('─'.repeat(60))
}

View File

@ -0,0 +1,209 @@
/**
* Example 07 Fan-Out / Aggregate (MapReduce) Pattern
*
* Demonstrates:
* - Fan-out: send the same question to N "analyst" agents in parallel
* - Aggregate: a "synthesizer" agent reads all analyst outputs and produces
* a balanced final report
* - AgentPool with runParallel() for concurrent fan-out
* - No tools needed pure LLM reasoning to keep the focus on the pattern
*
* Run:
* npx tsx examples/07-fan-out-aggregate.ts
*
* Prerequisites:
* ANTHROPIC_API_KEY env var must be set.
*/
import { Agent, AgentPool, ToolRegistry, ToolExecutor, registerBuiltInTools } from '../src/index.js'
import type { AgentConfig, AgentRunResult } from '../src/types.js'
// ---------------------------------------------------------------------------
// Analysis topic
// ---------------------------------------------------------------------------
const TOPIC = `Should a solo developer build a SaaS product that uses AI agents
for automated customer support? Consider the current state of AI technology,
market demand, competition, costs, and the unique constraints of being a solo
founder with limited time (~6 hours/day of productive work).`
// ---------------------------------------------------------------------------
// Analyst agent configs — three perspectives on the same question
// ---------------------------------------------------------------------------
const optimistConfig: AgentConfig = {
name: 'optimist',
model: 'claude-sonnet-4-6',
systemPrompt: `You are an optimistic technology analyst who focuses on
opportunities, upside potential, and emerging trends. You see possibilities
where others see obstacles. Back your optimism with concrete reasoning
cite market trends, cost curves, and real capabilities. Keep your analysis
to 200-300 words.`,
maxTurns: 1,
temperature: 0.4,
}
const skepticConfig: AgentConfig = {
name: 'skeptic',
model: 'claude-sonnet-4-6',
systemPrompt: `You are a skeptical technology analyst who focuses on risks,
challenges, failure modes, and hidden costs. You stress-test assumptions and
ask "what could go wrong?" Back your skepticism with concrete reasoning
cite failure rates, technical limitations, and market realities. Keep your
analysis to 200-300 words.`,
maxTurns: 1,
temperature: 0.4,
}
const pragmatistConfig: AgentConfig = {
name: 'pragmatist',
model: 'claude-sonnet-4-6',
systemPrompt: `You are a pragmatic technology analyst who focuses on practical
feasibility, execution complexity, and resource requirements. You care about
what works today, not what might work someday. You think in terms of MVPs,
timelines, and concrete tradeoffs. Keep your analysis to 200-300 words.`,
maxTurns: 1,
temperature: 0.4,
}
const synthesizerConfig: AgentConfig = {
name: 'synthesizer',
model: 'claude-sonnet-4-6',
systemPrompt: `You are a senior strategy advisor who synthesizes multiple
perspectives into a balanced, actionable recommendation. You do not simply
summarise you weigh the arguments, identify where they agree and disagree,
and produce a clear verdict with next steps. Structure your output as:
1. Key agreements across perspectives
2. Key disagreements and how you weigh them
3. Verdict (go / no-go / conditional go)
4. Recommended next steps (3-5 bullet points)
Keep the final report to 300-400 words.`,
maxTurns: 1,
temperature: 0.3,
}
// ---------------------------------------------------------------------------
// Build agents — no tools needed for pure reasoning
// ---------------------------------------------------------------------------
function buildAgent(config: AgentConfig): Agent {
const registry = new ToolRegistry()
registerBuiltInTools(registry) // not needed here, but safe if tools are added later
const executor = new ToolExecutor(registry)
return new Agent(config, registry, executor)
}
const optimist = buildAgent(optimistConfig)
const skeptic = buildAgent(skepticConfig)
const pragmatist = buildAgent(pragmatistConfig)
const synthesizer = buildAgent(synthesizerConfig)
// ---------------------------------------------------------------------------
// Set up the pool
// ---------------------------------------------------------------------------
const pool = new AgentPool(3) // 3 analysts can run simultaneously
pool.add(optimist)
pool.add(skeptic)
pool.add(pragmatist)
pool.add(synthesizer)
console.log('Fan-Out / Aggregate (MapReduce) Pattern')
console.log('='.repeat(60))
console.log(`\nTopic: ${TOPIC.replace(/\n/g, ' ').trim()}\n`)
// ---------------------------------------------------------------------------
// Step 1: Fan-out — run all 3 analysts in parallel
// ---------------------------------------------------------------------------
console.log('[Step 1] Fan-out: 3 analysts running in parallel...\n')
const analystResults: Map<string, AgentRunResult> = await pool.runParallel([
{ agent: 'optimist', prompt: TOPIC },
{ agent: 'skeptic', prompt: TOPIC },
{ agent: 'pragmatist', prompt: TOPIC },
])
// Print each analyst's output (truncated)
const analysts = ['optimist', 'skeptic', 'pragmatist'] as const
for (const name of analysts) {
const result = analystResults.get(name)!
const status = result.success ? 'OK' : 'FAILED'
console.log(` ${name} [${status}] — ${result.tokenUsage.output_tokens} output tokens`)
console.log(` ${result.output.slice(0, 150).replace(/\n/g, ' ')}...`)
console.log()
}
// Check all analysts succeeded
for (const name of analysts) {
if (!analystResults.get(name)!.success) {
console.error(`Analyst '${name}' failed: ${analystResults.get(name)!.output}`)
process.exit(1)
}
}
// ---------------------------------------------------------------------------
// Step 2: Aggregate — synthesizer reads all 3 analyses
// ---------------------------------------------------------------------------
console.log('[Step 2] Aggregate: synthesizer producing final report...\n')
const synthesizerPrompt = `Three analysts have independently evaluated the same question.
Read their analyses below and produce your synthesis report.
--- OPTIMIST ---
${analystResults.get('optimist')!.output}
--- SKEPTIC ---
${analystResults.get('skeptic')!.output}
--- PRAGMATIST ---
${analystResults.get('pragmatist')!.output}
Now synthesize these three perspectives into a balanced recommendation.`
const synthResult = await pool.run('synthesizer', synthesizerPrompt)
if (!synthResult.success) {
console.error('Synthesizer failed:', synthResult.output)
process.exit(1)
}
// ---------------------------------------------------------------------------
// Final output
// ---------------------------------------------------------------------------
console.log('='.repeat(60))
console.log('SYNTHESIZED REPORT')
console.log('='.repeat(60))
console.log()
console.log(synthResult.output)
console.log()
console.log('-'.repeat(60))
// ---------------------------------------------------------------------------
// Token usage comparison
// ---------------------------------------------------------------------------
console.log('\nToken Usage Summary:')
console.log('-'.repeat(60))
let totalInput = 0
let totalOutput = 0
for (const name of analysts) {
const r = analystResults.get(name)!
totalInput += r.tokenUsage.input_tokens
totalOutput += r.tokenUsage.output_tokens
console.log(` ${name.padEnd(12)} — input: ${r.tokenUsage.input_tokens}, output: ${r.tokenUsage.output_tokens}`)
}
totalInput += synthResult.tokenUsage.input_tokens
totalOutput += synthResult.tokenUsage.output_tokens
console.log(` ${'synthesizer'.padEnd(12)} — input: ${synthResult.tokenUsage.input_tokens}, output: ${synthResult.tokenUsage.output_tokens}`)
console.log('-'.repeat(60))
console.log(` ${'TOTAL'.padEnd(12)} — input: ${totalInput}, output: ${totalOutput}`)
console.log('\nDone.')