Merge remote-tracking branch 'upstream/main'

This commit is contained in:
MrAvalonApple 2026-04-02 21:56:17 +03:00
commit ec6be79dc4
6 changed files with 1079 additions and 740 deletions

183
README.md
View File

@ -26,7 +26,7 @@ Requires Node.js >= 18.
npm install @jackchen_me/open-multi-agent
```
Set the API key for your provider:
Set the API key for your provider. Local models via Ollama require no API key — see [example 06](examples/06-local-model.ts).
- `ANTHROPIC_API_KEY`
- `OPENAI_API_KEY`
@ -109,165 +109,23 @@ Tokens: 12847 output tokens
<img src="https://contrib.rocks/image?repo=JackChen-me/open-multi-agent" />
</a>
## More Examples
## Examples
<details>
<summary><b>Single Agent</b> — one agent, one prompt</summary>
All examples are runnable scripts in [`examples/`](./examples/). Run any of them with `npx tsx`:
```typescript
import { OpenMultiAgent } from '@jackchen_me/open-multi-agent'
const orchestrator = new OpenMultiAgent({ defaultModel: 'claude-sonnet-4-6' })
const result = await orchestrator.runAgent(
{
name: 'coder',
model: 'claude-sonnet-4-6',
tools: ['bash', 'file_write'],
},
'Write a TypeScript function that reverses a string, save it to /tmp/reverse.ts, and run it.',
)
console.log(result.output)
```bash
npx tsx examples/01-single-agent.ts
```
</details>
<details>
<summary><b>Task Pipeline</b> — explicit control over task graph and assignments</summary>
```typescript
const result = await orchestrator.runTasks(team, [
{
title: 'Design the data model',
description: 'Write a TypeScript interface spec to /tmp/spec.md',
assignee: 'architect',
},
{
title: 'Implement the module',
description: 'Read /tmp/spec.md and implement the module in /tmp/src/',
assignee: 'developer',
dependsOn: ['Design the data model'], // blocked until design completes
},
{
title: 'Write tests',
description: 'Read the implementation and write Vitest tests.',
assignee: 'developer',
dependsOn: ['Implement the module'],
},
{
title: 'Review code',
description: 'Review /tmp/src/ and produce a structured code review.',
assignee: 'reviewer',
dependsOn: ['Implement the module'], // can run in parallel with tests
},
])
```
</details>
<details>
<summary><b>Custom Tools</b> — define tools with Zod schemas</summary>
```typescript
import { z } from 'zod'
import { defineTool, Agent, ToolRegistry, ToolExecutor, registerBuiltInTools } from '@jackchen_me/open-multi-agent'
const searchTool = defineTool({
name: 'web_search',
description: 'Search the web and return the top results.',
inputSchema: z.object({
query: z.string().describe('The search query.'),
maxResults: z.number().optional().describe('Number of results (default 5).'),
}),
execute: async ({ query, maxResults = 5 }) => {
const results = await mySearchProvider(query, maxResults)
return { data: JSON.stringify(results), isError: false }
},
})
const registry = new ToolRegistry()
registerBuiltInTools(registry)
registry.register(searchTool)
const executor = new ToolExecutor(registry)
const agent = new Agent(
{ name: 'researcher', model: 'claude-sonnet-4-6', tools: ['web_search'] },
registry,
executor,
)
const result = await agent.run('Find the three most recent TypeScript releases.')
```
</details>
<details>
<summary><b>Multi-Model Teams</b> — mix Claude, GPT, Gemini, and local models in one workflow</summary>
```typescript
const claudeAgent: AgentConfig = {
name: 'strategist',
model: 'claude-opus-4-6',
provider: 'anthropic',
systemPrompt: 'You plan high-level approaches.',
tools: ['file_write'],
}
const gptAgent: AgentConfig = {
name: 'implementer',
model: 'gpt-5.4',
provider: 'openai',
systemPrompt: 'You implement plans as working code.',
tools: ['bash', 'file_read', 'file_write'],
}
// Any OpenAI-compatible API — Ollama, vLLM, LM Studio, etc.
const localAgent: AgentConfig = {
name: 'reviewer',
model: 'llama3.1',
provider: 'openai',
baseURL: 'http://localhost:11434/v1',
apiKey: 'ollama',
systemPrompt: 'You review code for correctness and clarity.',
tools: ['file_read', 'grep'],
}
const team = orchestrator.createTeam('mixed-team', {
name: 'mixed-team',
agents: [claudeAgent, gptAgent, localAgent],
sharedMemory: true,
})
const result = await orchestrator.runTeam(team, 'Build a CLI tool that converts JSON to CSV.')
```
</details>
<details>
<summary><b>Streaming Output</b></summary>
```typescript
import { Agent, ToolRegistry, ToolExecutor, registerBuiltInTools } from '@jackchen_me/open-multi-agent'
const registry = new ToolRegistry()
registerBuiltInTools(registry)
const executor = new ToolExecutor(registry)
const agent = new Agent(
{ name: 'writer', model: 'claude-sonnet-4-6', maxTurns: 3 },
registry,
executor,
)
for await (const event of agent.stream('Explain monads in two sentences.')) {
if (event.type === 'text' && typeof event.data === 'string') {
process.stdout.write(event.data)
}
}
```
</details>
| Example | What it shows |
|---------|---------------|
| [01 — Single Agent](examples/01-single-agent.ts) | `runAgent()` one-shot, `stream()` streaming, `prompt()` multi-turn |
| [02 — Team Collaboration](examples/02-team-collaboration.ts) | `runTeam()` auto-orchestration with coordinator pattern |
| [03 — Task Pipeline](examples/03-task-pipeline.ts) | `runTasks()` explicit dependency graph (design → implement → test + review) |
| [04 — Multi-Model Team](examples/04-multi-model-team.ts) | `defineTool()` custom tools, mixed Anthropic + OpenAI providers, `AgentPool` |
| [05 — Copilot](examples/05-copilot-test.ts) | GitHub Copilot as an LLM provider |
| [06 — Local Model](examples/06-local-model.ts) | Ollama + Claude in one pipeline via `baseURL` (works with vLLM, LM Studio, etc.) |
| [07 — Fan-Out / Aggregate](examples/07-fan-out-aggregate.ts) | `runParallel()` MapReduce — 3 analysts in parallel, then synthesize |
## Architecture
@ -321,11 +179,22 @@ for await (const event of agent.stream('Explain monads in two sentences.')) {
| `file_edit` | Edit a file by replacing an exact string match. |
| `grep` | Search file contents with regex. Uses ripgrep when available, falls back to Node.js. |
## Supported Providers
| Provider | Config | Env var | Status |
|----------|--------|---------|--------|
| Anthropic (Claude) | `provider: 'anthropic'` | `ANTHROPIC_API_KEY` | Verified |
| OpenAI (GPT) | `provider: 'openai'` | `OPENAI_API_KEY` | Verified |
| GitHub Copilot | `provider: 'copilot'` | `GITHUB_TOKEN` | Verified |
| Ollama / vLLM / LM Studio | `provider: 'openai'` + `baseURL` | — | Verified |
Any OpenAI-compatible API should work via `provider: 'openai'` + `baseURL` (DeepSeek, Groq, Mistral, Qwen, MiniMax, etc.). These providers have not been fully verified yet — contributions welcome via [#25](https://github.com/JackChen-me/open-multi-agent/issues/25).
## Contributing
Issues, feature requests, and PRs are welcome. Some areas where contributions would be especially valuable:
- **LLM Adapters** — Anthropic, OpenAI, and Copilot are supported out of the box. Any OpenAI-compatible API (Ollama, vLLM, LM Studio, etc.) works via `baseURL`. Additional adapters for other providers are welcome. The `LLMAdapter` interface requires just two methods: `chat()` and `stream()`.
- **Provider integrations** — Verify and document OpenAI-compatible providers (DeepSeek, Groq, Qwen, MiniMax, etc.) via `baseURL`. See [#25](https://github.com/JackChen-me/open-multi-agent/issues/25). For providers that are NOT OpenAI-compatible (e.g. Gemini), a new `LLMAdapter` implementation is welcome — the interface requires just two methods: `chat()` and `stream()`.
- **Examples** — Real-world workflows and use cases.
- **Documentation** — Guides, tutorials, and API docs.

View File

@ -26,7 +26,7 @@
npm install @jackchen_me/open-multi-agent
```
在环境变量中设置 `ANTHROPIC_API_KEY`(以及可选的 `OPENAI_API_KEY` 或用于 Copilot 的 `GITHUB_TOKEN`)。
在环境变量中设置 `ANTHROPIC_API_KEY`(以及可选的 `OPENAI_API_KEY` 或用于 Copilot 的 `GITHUB_TOKEN`)。通过 Ollama 使用本地模型无需 API key — 参见 [example 06](examples/06-local-model.ts)。
三个智能体,一个目标——框架处理剩下的一切:
@ -108,165 +108,23 @@ Tokens: 12847 output tokens
<img src="https://contrib.rocks/image?repo=JackChen-me/open-multi-agent" />
</a>
## 更多示例
## 示例
<details>
<summary><b>单智能体</b> — 一个智能体,一个提示词</summary>
所有示例都是可运行脚本,位于 [`examples/`](./examples/) 目录。使用 `npx tsx` 运行:
```typescript
import { OpenMultiAgent } from '@jackchen_me/open-multi-agent'
const orchestrator = new OpenMultiAgent({ defaultModel: 'claude-sonnet-4-6' })
const result = await orchestrator.runAgent(
{
name: 'coder',
model: 'claude-sonnet-4-6',
tools: ['bash', 'file_write'],
},
'Write a TypeScript function that reverses a string, save it to /tmp/reverse.ts, and run it.',
)
console.log(result.output)
```bash
npx tsx examples/01-single-agent.ts
```
</details>
<details>
<summary><b>任务流水线</b> — 显式控制任务图和分配</summary>
```typescript
const result = await orchestrator.runTasks(team, [
{
title: 'Design the data model',
description: 'Write a TypeScript interface spec to /tmp/spec.md',
assignee: 'architect',
},
{
title: 'Implement the module',
description: 'Read /tmp/spec.md and implement the module in /tmp/src/',
assignee: 'developer',
dependsOn: ['Design the data model'], // 等待设计完成后才开始
},
{
title: 'Write tests',
description: 'Read the implementation and write Vitest tests.',
assignee: 'developer',
dependsOn: ['Implement the module'],
},
{
title: 'Review code',
description: 'Review /tmp/src/ and produce a structured code review.',
assignee: 'reviewer',
dependsOn: ['Implement the module'], // 可以和测试并行执行
},
])
```
</details>
<details>
<summary><b>自定义工具</b> — 使用 Zod schema 定义工具</summary>
```typescript
import { z } from 'zod'
import { defineTool, Agent, ToolRegistry, ToolExecutor, registerBuiltInTools } from '@jackchen_me/open-multi-agent'
const searchTool = defineTool({
name: 'web_search',
description: 'Search the web and return the top results.',
inputSchema: z.object({
query: z.string().describe('The search query.'),
maxResults: z.number().optional().describe('Number of results (default 5).'),
}),
execute: async ({ query, maxResults = 5 }) => {
const results = await mySearchProvider(query, maxResults)
return { data: JSON.stringify(results), isError: false }
},
})
const registry = new ToolRegistry()
registerBuiltInTools(registry)
registry.register(searchTool)
const executor = new ToolExecutor(registry)
const agent = new Agent(
{ name: 'researcher', model: 'claude-sonnet-4-6', tools: ['web_search'] },
registry,
executor,
)
const result = await agent.run('Find the three most recent TypeScript releases.')
```
</details>
<details>
<summary><b>多模型团队</b> — 在一个工作流中混合使用 Claude、GPT 和本地模型</summary>
```typescript
const claudeAgent: AgentConfig = {
name: 'strategist',
model: 'claude-opus-4-6',
provider: 'anthropic',
systemPrompt: 'You plan high-level approaches.',
tools: ['file_write'],
}
const gptAgent: AgentConfig = {
name: 'implementer',
model: 'gpt-5.4',
provider: 'openai',
systemPrompt: 'You implement plans as working code.',
tools: ['bash', 'file_read', 'file_write'],
}
// 任何 OpenAI 兼容 API — Ollama、vLLM、LM Studio 等
const localAgent: AgentConfig = {
name: 'reviewer',
model: 'llama3.1',
provider: 'openai',
baseURL: 'http://localhost:11434/v1',
apiKey: 'ollama',
systemPrompt: 'You review code for correctness and clarity.',
tools: ['file_read', 'grep'],
}
const team = orchestrator.createTeam('mixed-team', {
name: 'mixed-team',
agents: [claudeAgent, gptAgent, localAgent],
sharedMemory: true,
})
const result = await orchestrator.runTeam(team, 'Build a CLI tool that converts JSON to CSV.')
```
</details>
<details>
<summary><b>流式输出</b></summary>
```typescript
import { Agent, ToolRegistry, ToolExecutor, registerBuiltInTools } from '@jackchen_me/open-multi-agent'
const registry = new ToolRegistry()
registerBuiltInTools(registry)
const executor = new ToolExecutor(registry)
const agent = new Agent(
{ name: 'writer', model: 'claude-sonnet-4-6', maxTurns: 3 },
registry,
executor,
)
for await (const event of agent.stream('Explain monads in two sentences.')) {
if (event.type === 'text' && typeof event.data === 'string') {
process.stdout.write(event.data)
}
}
```
</details>
| 示例 | 展示内容 |
|------|----------|
| [01 — 单智能体](examples/01-single-agent.ts) | `runAgent()` 单次调用、`stream()` 流式输出、`prompt()` 多轮对话 |
| [02 — 团队协作](examples/02-team-collaboration.ts) | `runTeam()` 自动编排 + 协调者模式 |
| [03 — 任务流水线](examples/03-task-pipeline.ts) | `runTasks()` 显式依赖图(设计 → 实现 → 测试 + 评审) |
| [04 — 多模型团队](examples/04-multi-model-team.ts) | `defineTool()` 自定义工具、Anthropic + OpenAI 混合、`AgentPool` |
| [05 — Copilot](examples/05-copilot-test.ts) | GitHub Copilot 作为 LLM 提供者 |
| [06 — 本地模型](examples/06-local-model.ts) | Ollama + Claude 混合流水线,通过 `baseURL` 接入(兼容 vLLM、LM Studio 等) |
| [07 — 扇出聚合](examples/07-fan-out-aggregate.ts) | `runParallel()` MapReduce — 3 个分析师并行,然后综合 |
## 架构
@ -319,11 +177,22 @@ for await (const event of agent.stream('Explain monads in two sentences.')) {
| `file_edit` | 通过精确字符串匹配编辑文件。 |
| `grep` | 使用正则表达式搜索文件内容。优先使用 ripgrep回退到 Node.js 实现。 |
## 支持的 Provider
| Provider | 配置 | 环境变量 | 状态 |
|----------|------|----------|------|
| Anthropic (Claude) | `provider: 'anthropic'` | `ANTHROPIC_API_KEY` | 已验证 |
| OpenAI (GPT) | `provider: 'openai'` | `OPENAI_API_KEY` | 已验证 |
| GitHub Copilot | `provider: 'copilot'` | `GITHUB_TOKEN` | 已验证 |
| Ollama / vLLM / LM Studio | `provider: 'openai'` + `baseURL` | — | 已验证 |
任何 OpenAI 兼容 API 均可通过 `provider: 'openai'` + `baseURL` 接入DeepSeek、Groq、Mistral、Qwen、MiniMax 等)。这些 Provider 尚未完整验证——欢迎通过 [#25](https://github.com/JackChen-me/open-multi-agent/issues/25) 贡献验证。
## 参与贡献
欢迎提 Issue、功能需求和 PR。以下方向的贡献尤其有价值
- **LLM 适配器** — Anthropic、OpenAI、Copilot 已原生支持。任何 OpenAI 兼容 APIOllama、vLLM、LM Studio 等)可通过 `baseURL` 直接使用。欢迎贡献 Gemini 等其他适配器。`LLMAdapter` 接口只需实现两个方法:`chat()` 和 `stream()`
- **Provider 集成** — 验证并文档化 OpenAI 兼容 ProviderDeepSeek、Groq、Qwen、MiniMax 等)通过 `baseURL` 接入。详见 [#25](https://github.com/JackChen-me/open-multi-agent/issues/25)。对于非 OpenAI 兼容的 Provider如 Gemini欢迎贡献新的 `LLMAdapter` 实现——接口只需两个方法:`chat()` 和 `stream()`
- **示例** — 真实场景的工作流和用例。
- **文档** — 指南、教程和 API 文档。

199
examples/06-local-model.ts Normal file
View File

@ -0,0 +1,199 @@
/**
* Example 06 Local Model + Cloud Model Team (Ollama + Claude)
*
* Demonstrates mixing a local model served by Ollama with a cloud model
* (Claude) in the same task pipeline. The key technique is using
* `provider: 'openai'` with a custom `baseURL` pointing at Ollama's
* OpenAI-compatible endpoint.
*
* This pattern works with ANY OpenAI-compatible local server:
* - Ollama http://localhost:11434/v1
* - vLLM http://localhost:8000/v1
* - LM Studio http://localhost:1234/v1
* - llama.cpp http://localhost:8080/v1
* Just change the baseURL and model name below.
*
* Run:
* npx tsx examples/06-local-model.ts
*
* Prerequisites:
* 1. Ollama installed and running: https://ollama.com
* 2. Pull the model: ollama pull llama3.1
* 3. ANTHROPIC_API_KEY env var must be set.
*/
import { OpenMultiAgent } from '../src/index.js'
import type { AgentConfig, OrchestratorEvent, Task } from '../src/types.js'
// ---------------------------------------------------------------------------
// Agents
// ---------------------------------------------------------------------------
/**
* Coder uses Claude (Anthropic) for high-quality code generation.
*/
const coder: AgentConfig = {
name: 'coder',
model: 'claude-sonnet-4-6',
provider: 'anthropic',
systemPrompt: `You are a senior TypeScript developer. Write clean, well-typed,
production-quality code. Use the tools to write files to /tmp/local-model-demo/.
Always include brief JSDoc comments on exported functions.`,
tools: ['bash', 'file_write'],
maxTurns: 6,
}
/**
* Reviewer uses a local Ollama model via the OpenAI-compatible API.
* The apiKey is required by the OpenAI SDK but Ollama ignores it,
* so we pass the placeholder string 'ollama'.
*/
const reviewer: AgentConfig = {
name: 'reviewer',
model: 'llama3.1',
provider: 'openai', // 'openai' here means "OpenAI-compatible protocol", not the OpenAI cloud
baseURL: 'http://localhost:11434/v1',
apiKey: 'ollama',
systemPrompt: `You are a code reviewer. You read source files and produce a structured review.
Your review MUST include these sections:
- Summary (2-3 sentences)
- Strengths (bullet list)
- Issues (bullet list or "None found" if the code is clean)
- Verdict: SHIP or NEEDS WORK
Be specific and constructive. Reference line numbers or function names when possible.`,
tools: ['file_read'],
maxTurns: 4,
}
// ---------------------------------------------------------------------------
// Progress handler
// ---------------------------------------------------------------------------
const taskTimes = new Map<string, number>()
function handleProgress(event: OrchestratorEvent): void {
const ts = new Date().toISOString().slice(11, 23)
switch (event.type) {
case 'task_start': {
taskTimes.set(event.task ?? '', Date.now())
const task = event.data as Task | undefined
console.log(`[${ts}] TASK READY "${task?.title ?? event.task}" → ${task?.assignee ?? '?'}`)
break
}
case 'task_complete': {
const elapsed = Date.now() - (taskTimes.get(event.task ?? '') ?? Date.now())
console.log(`[${ts}] TASK DONE task=${event.task} in ${elapsed}ms`)
break
}
case 'agent_start':
console.log(`[${ts}] AGENT START ${event.agent}`)
break
case 'agent_complete':
console.log(`[${ts}] AGENT DONE ${event.agent}`)
break
case 'error':
console.error(`[${ts}] ERROR ${event.agent ?? ''} task=${event.task ?? '?'}`)
break
}
}
// ---------------------------------------------------------------------------
// Orchestrator + Team
// ---------------------------------------------------------------------------
const orchestrator = new OpenMultiAgent({
defaultModel: 'claude-sonnet-4-6',
maxConcurrency: 2,
onProgress: handleProgress,
})
const team = orchestrator.createTeam('local-cloud-team', {
name: 'local-cloud-team',
agents: [coder, reviewer],
sharedMemory: true,
})
// ---------------------------------------------------------------------------
// Task pipeline: code → review
// ---------------------------------------------------------------------------
const OUTPUT_DIR = '/tmp/local-model-demo'
const tasks: Array<{
title: string
description: string
assignee?: string
dependsOn?: string[]
}> = [
{
title: 'Write: retry utility',
description: `Write a small but complete TypeScript utility to ${OUTPUT_DIR}/retry.ts.
The module should export:
1. A \`RetryOptions\` interface with: maxRetries (number), delayMs (number),
backoffFactor (optional number, default 2), shouldRetry (optional predicate
taking the error and returning boolean).
2. An async \`retry<T>(fn: () => Promise<T>, options: RetryOptions): Promise<T>\`
function that retries \`fn\` with exponential backoff.
3. A convenience \`withRetry\` wrapper that returns a new function with retry
behaviour baked in.
Include JSDoc comments. No external dependencies use only Node built-ins.
After writing the file, also create a small test script at ${OUTPUT_DIR}/retry-test.ts
that exercises the happy path and a failure case, then run it with \`npx tsx\`.`,
assignee: 'coder',
},
{
title: 'Review: retry utility',
description: `Read the files at ${OUTPUT_DIR}/retry.ts and ${OUTPUT_DIR}/retry-test.ts.
Produce a structured code review covering:
- Summary (2-3 sentences describing the module)
- Strengths (bullet list)
- Issues (bullet list be specific about what and why)
- Verdict: SHIP or NEEDS WORK`,
assignee: 'reviewer',
dependsOn: ['Write: retry utility'],
},
]
// ---------------------------------------------------------------------------
// Run
// ---------------------------------------------------------------------------
console.log('Local + Cloud model team')
console.log(` coder → Claude (${coder.model}) via Anthropic API`)
console.log(` reviewer → Ollama (${reviewer.model}) at ${reviewer.baseURL}`)
console.log()
console.log('Pipeline: coder writes code → local model reviews it')
console.log('='.repeat(60))
const result = await orchestrator.runTasks(team, tasks)
// ---------------------------------------------------------------------------
// Summary
// ---------------------------------------------------------------------------
console.log('\n' + '='.repeat(60))
console.log('Pipeline complete.\n')
console.log(`Overall success: ${result.success}`)
console.log(`Tokens — input: ${result.totalTokenUsage.input_tokens}, output: ${result.totalTokenUsage.output_tokens}`)
console.log('\nPer-agent summary:')
for (const [name, r] of result.agentResults) {
const icon = r.success ? 'OK ' : 'FAIL'
const provider = name === 'coder' ? 'anthropic' : 'ollama (local)'
const tools = r.toolCalls.map(c => c.toolName).join(', ')
console.log(` [${icon}] ${name.padEnd(10)} (${provider.padEnd(16)}) tools: ${tools || '(none)'}`)
}
// Print the reviewer's output
const review = result.agentResults.get('reviewer')
if (review?.success) {
console.log('\nCode review (from local model):')
console.log('─'.repeat(60))
console.log(review.output)
console.log('─'.repeat(60))
}

View File

@ -0,0 +1,209 @@
/**
* Example 07 Fan-Out / Aggregate (MapReduce) Pattern
*
* Demonstrates:
* - Fan-out: send the same question to N "analyst" agents in parallel
* - Aggregate: a "synthesizer" agent reads all analyst outputs and produces
* a balanced final report
* - AgentPool with runParallel() for concurrent fan-out
* - No tools needed pure LLM reasoning to keep the focus on the pattern
*
* Run:
* npx tsx examples/07-fan-out-aggregate.ts
*
* Prerequisites:
* ANTHROPIC_API_KEY env var must be set.
*/
import { Agent, AgentPool, ToolRegistry, ToolExecutor, registerBuiltInTools } from '../src/index.js'
import type { AgentConfig, AgentRunResult } from '../src/types.js'
// ---------------------------------------------------------------------------
// Analysis topic
// ---------------------------------------------------------------------------
const TOPIC = `Should a solo developer build a SaaS product that uses AI agents
for automated customer support? Consider the current state of AI technology,
market demand, competition, costs, and the unique constraints of being a solo
founder with limited time (~6 hours/day of productive work).`
// ---------------------------------------------------------------------------
// Analyst agent configs — three perspectives on the same question
// ---------------------------------------------------------------------------
const optimistConfig: AgentConfig = {
name: 'optimist',
model: 'claude-sonnet-4-6',
systemPrompt: `You are an optimistic technology analyst who focuses on
opportunities, upside potential, and emerging trends. You see possibilities
where others see obstacles. Back your optimism with concrete reasoning
cite market trends, cost curves, and real capabilities. Keep your analysis
to 200-300 words.`,
maxTurns: 1,
temperature: 0.4,
}
const skepticConfig: AgentConfig = {
name: 'skeptic',
model: 'claude-sonnet-4-6',
systemPrompt: `You are a skeptical technology analyst who focuses on risks,
challenges, failure modes, and hidden costs. You stress-test assumptions and
ask "what could go wrong?" Back your skepticism with concrete reasoning
cite failure rates, technical limitations, and market realities. Keep your
analysis to 200-300 words.`,
maxTurns: 1,
temperature: 0.4,
}
const pragmatistConfig: AgentConfig = {
name: 'pragmatist',
model: 'claude-sonnet-4-6',
systemPrompt: `You are a pragmatic technology analyst who focuses on practical
feasibility, execution complexity, and resource requirements. You care about
what works today, not what might work someday. You think in terms of MVPs,
timelines, and concrete tradeoffs. Keep your analysis to 200-300 words.`,
maxTurns: 1,
temperature: 0.4,
}
const synthesizerConfig: AgentConfig = {
name: 'synthesizer',
model: 'claude-sonnet-4-6',
systemPrompt: `You are a senior strategy advisor who synthesizes multiple
perspectives into a balanced, actionable recommendation. You do not simply
summarise you weigh the arguments, identify where they agree and disagree,
and produce a clear verdict with next steps. Structure your output as:
1. Key agreements across perspectives
2. Key disagreements and how you weigh them
3. Verdict (go / no-go / conditional go)
4. Recommended next steps (3-5 bullet points)
Keep the final report to 300-400 words.`,
maxTurns: 1,
temperature: 0.3,
}
// ---------------------------------------------------------------------------
// Build agents — no tools needed for pure reasoning
// ---------------------------------------------------------------------------
function buildAgent(config: AgentConfig): Agent {
const registry = new ToolRegistry()
registerBuiltInTools(registry) // not needed here, but safe if tools are added later
const executor = new ToolExecutor(registry)
return new Agent(config, registry, executor)
}
const optimist = buildAgent(optimistConfig)
const skeptic = buildAgent(skepticConfig)
const pragmatist = buildAgent(pragmatistConfig)
const synthesizer = buildAgent(synthesizerConfig)
// ---------------------------------------------------------------------------
// Set up the pool
// ---------------------------------------------------------------------------
const pool = new AgentPool(3) // 3 analysts can run simultaneously
pool.add(optimist)
pool.add(skeptic)
pool.add(pragmatist)
pool.add(synthesizer)
console.log('Fan-Out / Aggregate (MapReduce) Pattern')
console.log('='.repeat(60))
console.log(`\nTopic: ${TOPIC.replace(/\n/g, ' ').trim()}\n`)
// ---------------------------------------------------------------------------
// Step 1: Fan-out — run all 3 analysts in parallel
// ---------------------------------------------------------------------------
console.log('[Step 1] Fan-out: 3 analysts running in parallel...\n')
const analystResults: Map<string, AgentRunResult> = await pool.runParallel([
{ agent: 'optimist', prompt: TOPIC },
{ agent: 'skeptic', prompt: TOPIC },
{ agent: 'pragmatist', prompt: TOPIC },
])
// Print each analyst's output (truncated)
const analysts = ['optimist', 'skeptic', 'pragmatist'] as const
for (const name of analysts) {
const result = analystResults.get(name)!
const status = result.success ? 'OK' : 'FAILED'
console.log(` ${name} [${status}] — ${result.tokenUsage.output_tokens} output tokens`)
console.log(` ${result.output.slice(0, 150).replace(/\n/g, ' ')}...`)
console.log()
}
// Check all analysts succeeded
for (const name of analysts) {
if (!analystResults.get(name)!.success) {
console.error(`Analyst '${name}' failed: ${analystResults.get(name)!.output}`)
process.exit(1)
}
}
// ---------------------------------------------------------------------------
// Step 2: Aggregate — synthesizer reads all 3 analyses
// ---------------------------------------------------------------------------
console.log('[Step 2] Aggregate: synthesizer producing final report...\n')
const synthesizerPrompt = `Three analysts have independently evaluated the same question.
Read their analyses below and produce your synthesis report.
--- OPTIMIST ---
${analystResults.get('optimist')!.output}
--- SKEPTIC ---
${analystResults.get('skeptic')!.output}
--- PRAGMATIST ---
${analystResults.get('pragmatist')!.output}
Now synthesize these three perspectives into a balanced recommendation.`
const synthResult = await pool.run('synthesizer', synthesizerPrompt)
if (!synthResult.success) {
console.error('Synthesizer failed:', synthResult.output)
process.exit(1)
}
// ---------------------------------------------------------------------------
// Final output
// ---------------------------------------------------------------------------
console.log('='.repeat(60))
console.log('SYNTHESIZED REPORT')
console.log('='.repeat(60))
console.log()
console.log(synthResult.output)
console.log()
console.log('-'.repeat(60))
// ---------------------------------------------------------------------------
// Token usage comparison
// ---------------------------------------------------------------------------
console.log('\nToken Usage Summary:')
console.log('-'.repeat(60))
let totalInput = 0
let totalOutput = 0
for (const name of analysts) {
const r = analystResults.get(name)!
totalInput += r.tokenUsage.input_tokens
totalOutput += r.tokenUsage.output_tokens
console.log(` ${name.padEnd(12)} — input: ${r.tokenUsage.input_tokens}, output: ${r.tokenUsage.output_tokens}`)
}
totalInput += synthResult.tokenUsage.input_tokens
totalOutput += synthResult.tokenUsage.output_tokens
console.log(` ${'synthesizer'.padEnd(12)} — input: ${synthResult.tokenUsage.input_tokens}, output: ${synthResult.tokenUsage.output_tokens}`)
console.log('-'.repeat(60))
console.log(` ${'TOTAL'.padEnd(12)} — input: ${totalInput}, output: ${totalOutput}`)
console.log('\nDone.')

1044
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -44,6 +44,7 @@
},
"devDependencies": {
"@types/node": "^22.0.0",
"tsx": "^4.21.0",
"typescript": "^5.6.0",
"vitest": "^2.1.0"
}