examples: add Engram integration (memory store, toolkit, two demos) (#160)

This commit is contained in:
Joshua Brown 2026-04-23 14:48:44 -04:00 committed by GitHub
parent 8e6bf9bde1
commit a6d3c3877f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 836 additions and 0 deletions

View File

@ -0,0 +1,187 @@
/**
* Engram Memory Store
*
* A {@link MemoryStore} implementation backed by Engram's REST API.
* Engram provides shared team memory for AI agents facts committed by one
* agent are visible to all others in the workspace.
*
* Run:
* npx tsx examples/integrations/with-engram/research-team.ts
*
* Prerequisites:
* - Engram server running at http://localhost:7474 (or custom baseUrl)
* - ENGRAM_INVITE_KEY env var (or passed via constructor)
*/
import type { MemoryEntry, MemoryStore } from '../../../src/types.js'
// ---------------------------------------------------------------------------
// Engram fact shape (as returned by the API)
// ---------------------------------------------------------------------------
interface EngramFact {
fact_id: string
lineage_id: string
content: string
scope: string
agent_id?: string
committed_at: string
}
// ---------------------------------------------------------------------------
// Configuration
// ---------------------------------------------------------------------------
export interface EngramStoreOptions {
/** Engram server URL. Defaults to `http://localhost:7474`. */
baseUrl?: string
/** Workspace invite key. Falls back to `ENGRAM_INVITE_KEY` env var. */
inviteKey?: string
/** Default confidence for commits. Defaults to `0.9`. */
confidence?: number
}
// ---------------------------------------------------------------------------
// EngramMemoryStore
// ---------------------------------------------------------------------------
export class EngramMemoryStore implements MemoryStore {
private readonly baseUrl: string
private readonly inviteKey: string
private readonly confidence: number
constructor(options: EngramStoreOptions = {}) {
this.baseUrl = (options.baseUrl ?? 'http://localhost:7474').replace(/\/+$/, '')
this.inviteKey = options.inviteKey ?? process.env.ENGRAM_INVITE_KEY ?? ''
this.confidence = options.confidence ?? 0.9
}
// ---------------------------------------------------------------------------
// MemoryStore interface
// ---------------------------------------------------------------------------
/**
* Store a value under `key` by committing a fact with `scope=key`.
* Uses `operation: "update"` so repeated writes to the same key supersede
* the previous value rather than creating duplicates.
*/
async set(key: string, value: string, metadata?: Record<string, unknown>): Promise<void> {
await this.post('/api/commit', {
scope: key,
content: value,
confidence: this.confidence,
agent_id: metadata?.agent ?? undefined,
operation: 'update',
})
}
/**
* Retrieve the most recent fact for `key` (scope).
* Returns `null` when no matching fact exists.
*/
async get(key: string): Promise<MemoryEntry | null> {
const url = `${this.baseUrl}/api/facts?scope=${encodeURIComponent(key)}&limit=1`
const res = await fetch(url, { headers: this.headers() })
if (!res.ok) return null
const facts: EngramFact[] = await res.json()
if (facts.length === 0) return null
return this.toMemoryEntry(facts[0])
}
/**
* List all facts in the workspace (up to 200).
* Each fact is mapped to a {@link MemoryEntry} using `scope` as the key.
*/
async list(): Promise<MemoryEntry[]> {
const url = `${this.baseUrl}/api/facts?limit=200`
const res = await fetch(url, { headers: this.headers() })
if (!res.ok) return []
const facts: EngramFact[] = await res.json()
return facts.map((f) => this.toMemoryEntry(f))
}
/**
* Retire the most recent fact for `key` (scope) by its lineage ID.
*
* Engram's `delete` operation requires `corrects_lineage` it retires a
* specific lineage rather than deleting by scope. We look up the latest
* fact first to obtain its `lineage_id`, then issue the delete.
*
* No-op when no fact exists for the key.
*/
async delete(key: string): Promise<void> {
// Look up the latest fact to get its lineage_id.
const entry = await this.getFact(key)
if (!entry) return
await this.post('/api/commit', {
scope: key,
content: `Retired by MemoryStore.delete("${key}")`,
confidence: this.confidence,
operation: 'delete',
corrects_lineage: entry.lineage_id,
})
}
/**
* No-op. Engram preserves full audit history by design bulk erasure is
* not supported and would violate the append-only contract.
*/
async clear(): Promise<void> {
// Intentional no-op: Engram preserves audit history.
}
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
private headers(): Record<string, string> {
return {
Authorization: `Bearer ${this.inviteKey}`,
'Content-Type': 'application/json',
}
}
/**
* Fetch the most recent raw fact for a scope.
* Used internally by `delete()` to obtain the `lineage_id`.
*/
private async getFact(scope: string): Promise<EngramFact | null> {
const url = `${this.baseUrl}/api/facts?scope=${encodeURIComponent(scope)}&limit=1`
const res = await fetch(url, { headers: this.headers() })
if (!res.ok) return null
const facts: EngramFact[] = await res.json()
return facts.length > 0 ? facts[0] : null
}
private async post(path: string, body: Record<string, unknown>): Promise<void> {
const res = await fetch(`${this.baseUrl}${path}`, {
method: 'POST',
headers: this.headers(),
body: JSON.stringify(body),
})
if (!res.ok) {
const text = await res.text().catch(() => '<no body>')
throw new Error(`Engram ${path} failed (${res.status}): ${text}`)
}
}
private toMemoryEntry(fact: EngramFact): MemoryEntry {
return {
key: fact.scope,
value: fact.content,
metadata: {
fact_id: fact.fact_id,
lineage_id: fact.lineage_id,
agent_id: fact.agent_id,
},
createdAt: new Date(fact.committed_at),
}
}
}

View File

@ -0,0 +1,193 @@
/**
* Engram Toolkit
*
* Registers four Engram tools with a {@link ToolRegistry} so any agent can
* commit facts, query shared memory, audit conflict resolutions, and override
* auto-resolutions.
*
* Run:
* npx tsx examples/integrations/with-engram/research-team.ts
*
* Prerequisites:
* - Engram server running at http://localhost:7474 (or custom baseUrl)
* - ENGRAM_INVITE_KEY env var (or passed via constructor)
*/
import { z } from 'zod'
import { defineTool, ToolRegistry } from '../../../src/index.js'
// ---------------------------------------------------------------------------
// Configuration
// ---------------------------------------------------------------------------
export interface EngramToolkitOptions {
/** Engram server URL. Defaults to `http://localhost:7474`. */
baseUrl?: string
/** Workspace invite key. Falls back to `ENGRAM_INVITE_KEY` env var. */
inviteKey?: string
}
// ---------------------------------------------------------------------------
// EngramToolkit
// ---------------------------------------------------------------------------
export class EngramToolkit {
private readonly baseUrl: string
private readonly inviteKey: string
constructor(options: EngramToolkitOptions = {}) {
this.baseUrl = (options.baseUrl ?? 'http://localhost:7474').replace(/\/+$/, '')
this.inviteKey = options.inviteKey ?? process.env.ENGRAM_INVITE_KEY ?? ''
}
/**
* Register all four Engram tools with the given registry.
*/
registerAll(registry: ToolRegistry): void {
for (const tool of this.getTools()) {
registry.register(tool)
}
}
/**
* Returns all four Engram tool definitions as an array.
* Use this with `AgentConfig.customTools` so the orchestrator's per-agent
* registry picks them up automatically (instead of a shared outer registry
* that `runTeam` / `buildPool` never sees).
*/
getTools() {
return [this.commitTool(), this.queryTool(), this.conflictsTool(), this.resolveTool()]
}
// ---------------------------------------------------------------------------
// Tool definitions
// ---------------------------------------------------------------------------
private commitTool() {
return defineTool({
name: 'engram_commit',
description:
'Commit a verified fact to Engram shared team memory. ' +
'Use this to record discoveries, decisions, or corrections that other agents should see.',
inputSchema: z.object({
content: z.string().describe('The fact to commit'),
scope: z.string().describe('Context scope (e.g. "research", "architecture")'),
confidence: z.number().min(0).max(1).describe('Confidence level 0-1'),
operation: z
.enum(['add', 'update', 'delete', 'none'])
.optional()
.describe('Memory operation. Use "update" when correcting a prior fact. Default: add.'),
fact_type: z
.enum(['observation', 'decision', 'constraint', 'warning', 'inference'])
.optional()
.describe('Category of the fact'),
agent_id: z.string().optional().describe('Identifier of the committing agent'),
ttl_days: z.number().optional().describe('Auto-expire after N days'),
}),
execute: async (input) => {
const res = await fetch(`${this.baseUrl}/api/commit`, {
method: 'POST',
headers: this.headers(),
body: JSON.stringify(input),
})
const data = await res.text()
return { data, isError: !res.ok }
},
})
}
private queryTool() {
return defineTool({
name: 'engram_query',
description:
'Query Engram shared memory for facts about a topic. ' +
'Call this before starting any task to see what the team already knows.',
inputSchema: z.object({
topic: z.string().describe('What to search for'),
scope: z.string().optional().describe('Filter by scope'),
limit: z.number().optional().describe('Max results (default 10)'),
fact_type: z
.enum(['observation', 'decision', 'constraint', 'warning', 'inference'])
.optional()
.describe('Filter by fact type'),
}),
execute: async (input) => {
const res = await fetch(`${this.baseUrl}/api/query`, {
method: 'POST',
headers: this.headers(),
body: JSON.stringify(input),
})
const data = await res.text()
return { data, isError: !res.ok }
},
})
}
private conflictsTool() {
return defineTool({
name: 'engram_conflicts',
description:
'List conflicts between facts in Engram shared memory. ' +
'Conflicts are auto-resolved by Claude (with ANTHROPIC_API_KEY) or heuristic — ' +
'this tool is for auditing resolutions, not triggering them.',
inputSchema: z.object({
scope: z.string().optional().describe('Filter by scope'),
status: z
.enum(['open', 'resolved', 'dismissed'])
.optional()
.describe('Filter by status (default: open)'),
}),
execute: async (input) => {
const params = new URLSearchParams()
if (input.scope) params.set('scope', input.scope)
if (input.status) params.set('status', input.status)
const qs = params.toString()
const url = `${this.baseUrl}/api/conflicts${qs ? `?${qs}` : ''}`
const res = await fetch(url, { headers: this.headers() })
const data = await res.text()
return { data, isError: !res.ok }
},
})
}
private resolveTool() {
return defineTool({
name: 'engram_resolve',
description:
'Override an auto-resolution for a conflict between facts. ' +
'Use this when the automatic resolution was incorrect and you need to pick a different winner or merge.',
inputSchema: z.object({
conflict_id: z.string().describe('ID of the conflict to resolve'),
resolution_type: z
.enum(['winner', 'merge', 'dismissed'])
.describe('How to resolve: pick a winner, merge both, or dismiss'),
resolution: z.string().describe('Explanation of the resolution'),
winning_claim_id: z
.string()
.optional()
.describe('fact_id of the correct fact (required for winner type)'),
}),
execute: async (input) => {
const res = await fetch(`${this.baseUrl}/api/resolve`, {
method: 'POST',
headers: this.headers(),
body: JSON.stringify(input),
})
const data = await res.text()
return { data, isError: !res.ok }
},
})
}
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
private headers(): Record<string, string> {
return {
Authorization: `Bearer ${this.inviteKey}`,
'Content-Type': 'application/json',
}
}
}

View File

@ -0,0 +1,225 @@
/**
* Engram Research Team
*
* Three agents collaborate on a research topic using Engram shared memory:
*
* 1. **Researcher** explores the topic and commits findings as facts
* 2. **Fact-checker** verifies claims, commits corrections, and audits
* any auto-resolved conflicts
* 3. **Writer** queries settled facts and produces a briefing document
*
* Works with every provider the framework supports. Set the provider and model
* via environment variables:
*
* AGENT_PROVIDER anthropic | openai | gemini | grok | copilot | deepseek | minimax | azure-openai
* AGENT_MODEL model name for the chosen provider
*
* Defaults to anthropic / claude-sonnet-4-6 when unset.
*
* Run:
* npx tsx examples/integrations/with-engram/research-team.ts
*
* Examples:
* # Anthropic (default)
* ANTHROPIC_API_KEY=sk-... ENGRAM_INVITE_KEY=ek_live_... npx tsx examples/integrations/with-engram/research-team.ts
*
* # OpenAI
* AGENT_PROVIDER=openai AGENT_MODEL=gpt-4o OPENAI_API_KEY=sk-... ENGRAM_INVITE_KEY=ek_live_... npx tsx examples/integrations/with-engram/research-team.ts
*
* # Gemini
* AGENT_PROVIDER=gemini AGENT_MODEL=gemini-2.5-flash GEMINI_API_KEY=... ENGRAM_INVITE_KEY=ek_live_... npx tsx examples/integrations/with-engram/research-team.ts
*
* # Grok
* AGENT_PROVIDER=grok AGENT_MODEL=grok-3 XAI_API_KEY=... ENGRAM_INVITE_KEY=ek_live_... npx tsx examples/integrations/with-engram/research-team.ts
*
* # DeepSeek
* AGENT_PROVIDER=deepseek AGENT_MODEL=deepseek-chat DEEPSEEK_API_KEY=... ENGRAM_INVITE_KEY=ek_live_... npx tsx examples/integrations/with-engram/research-team.ts
*
* Prerequisites:
* - API key env var for your chosen provider
* - Engram server running at http://localhost:7474
* - ENGRAM_INVITE_KEY env var
*/
import {
Agent,
ToolExecutor,
ToolRegistry,
registerBuiltInTools,
} from '../../../src/index.js'
import type { SupportedProvider } from '../../../src/index.js'
import { EngramToolkit } from './engram-toolkit.js'
// ---------------------------------------------------------------------------
// Provider / model configuration
// ---------------------------------------------------------------------------
const PROVIDER = (process.env.AGENT_PROVIDER ?? 'anthropic') as SupportedProvider
const MODEL = process.env.AGENT_MODEL ?? 'claude-sonnet-4-6'
const PROVIDER_ENV_KEYS: Record<string, string> = {
anthropic: 'ANTHROPIC_API_KEY',
openai: 'OPENAI_API_KEY',
gemini: 'GEMINI_API_KEY',
grok: 'XAI_API_KEY',
copilot: 'GITHUB_TOKEN',
deepseek: 'DEEPSEEK_API_KEY',
minimax: 'MINIMAX_API_KEY',
'azure-openai': 'AZURE_OPENAI_API_KEY',
}
const envKey = PROVIDER_ENV_KEYS[PROVIDER]
if (envKey && !process.env[envKey]?.trim()) {
console.error(`Missing ${envKey}: required for provider "${PROVIDER}".`)
process.exit(1)
}
if (!process.env.ENGRAM_INVITE_KEY?.trim()) {
console.error('Missing ENGRAM_INVITE_KEY: set your Engram workspace invite key in the environment.')
process.exit(1)
}
// ---------------------------------------------------------------------------
// Shared setup
// ---------------------------------------------------------------------------
const TOPIC = 'the current state of AI agent memory systems'
const engramTools = ['engram_commit', 'engram_query', 'engram_conflicts', 'engram_resolve']
function buildAgent(config: {
name: string
systemPrompt: string
}): Agent {
const registry = new ToolRegistry()
registerBuiltInTools(registry)
new EngramToolkit().registerAll(registry)
const executor = new ToolExecutor(registry)
return new Agent(
{
name: config.name,
model: MODEL,
provider: PROVIDER,
tools: engramTools,
systemPrompt: config.systemPrompt,
},
registry,
executor,
)
}
// ---------------------------------------------------------------------------
// Agents
// ---------------------------------------------------------------------------
const researcher = buildAgent({
name: 'researcher',
systemPrompt: `You are a research agent investigating: "${TOPIC}".
Your job:
1. Think through the key dimensions of this topic (architectures, open problems,
leading projects, recent breakthroughs).
2. For each finding, use engram_commit to record it as a shared fact with
scope="research" and an appropriate confidence level.
3. Commit at least 5 distinct facts covering different aspects.
Be specific and cite concrete systems or papers where possible.`,
})
const factChecker = buildAgent({
name: 'fact-checker',
systemPrompt: `You are a fact-checking agent. Your job:
1. Use engram_query with topic="${TOPIC}" to retrieve what the researcher committed.
2. Evaluate each fact for accuracy and completeness.
3. If a fact is wrong or misleading, use engram_commit with operation="update"
to commit a corrected version in the same scope.
4. After committing corrections, call engram_conflicts to review any
auto-resolved conflicts. You are auditing the resolutions do NOT manually
resolve them unless an auto-resolution is clearly wrong.
5. Summarize your findings at the end.`,
})
const writer = buildAgent({
name: 'writer',
systemPrompt: `You are a technical writer. Your job:
1. Use engram_query with topic="${TOPIC}" to retrieve all settled facts.
2. Synthesize the facts into a concise executive briefing (300-500 words).
3. Structure the briefing with clear sections: Overview, Key Systems,
Open Challenges, and Outlook.
4. Only include claims that are grounded in the queried facts do not
fabricate or speculate beyond what the team has verified.
5. Output the briefing as your final response.`,
})
// ---------------------------------------------------------------------------
// Sequential execution
// ---------------------------------------------------------------------------
console.log('Engram Research Team')
console.log('='.repeat(60))
console.log(`Provider: ${PROVIDER}`)
console.log(`Model: ${MODEL}`)
console.log(`Topic: ${TOPIC}\n`)
// Step 1: Research
console.log('[1/3] Researcher is exploring the topic...')
const researchResult = await researcher.run(
`Research "${TOPIC}" and commit your findings to Engram shared memory.`,
)
console.log(` Done — ${researchResult.toolCalls.length} tool calls, ` +
`${researchResult.tokenUsage.output_tokens} output tokens\n`)
// Step 2: Fact-check
console.log('[2/3] Fact-checker is verifying claims...')
const checkResult = await factChecker.run(
`Review and fact-check the research on "${TOPIC}" in Engram shared memory. ` +
`Commit corrections and audit any auto-resolved conflicts.`,
)
console.log(` Done — ${checkResult.toolCalls.length} tool calls, ` +
`${checkResult.tokenUsage.output_tokens} output tokens\n`)
// Step 3: Write briefing
console.log('[3/3] Writer is producing the briefing...')
const writeResult = await writer.run(
`Query Engram for settled facts on "${TOPIC}" and write an executive briefing.`,
)
console.log(` Done — ${writeResult.toolCalls.length} tool calls, ` +
`${writeResult.tokenUsage.output_tokens} output tokens\n`)
// ---------------------------------------------------------------------------
// Output
// ---------------------------------------------------------------------------
console.log('='.repeat(60))
console.log('EXECUTIVE BRIEFING')
console.log('='.repeat(60))
console.log()
console.log(writeResult.output)
console.log()
console.log('-'.repeat(60))
// Token summary
const agents = [
{ name: 'researcher', result: researchResult },
{ name: 'fact-checker', result: checkResult },
{ name: 'writer', result: writeResult },
]
let totalInput = 0
let totalOutput = 0
console.log('\nToken Usage:')
for (const { name, result } of agents) {
totalInput += result.tokenUsage.input_tokens
totalOutput += result.tokenUsage.output_tokens
console.log(
` ${name.padEnd(14)} — input: ${result.tokenUsage.input_tokens}, output: ${result.tokenUsage.output_tokens}`,
)
}
console.log('-'.repeat(60))
console.log(` ${'TOTAL'.padEnd(14)} — input: ${totalInput}, output: ${totalOutput}`)
console.log(`\nView shared memory and conflicts: http://localhost:7474/dashboard`)

View File

@ -0,0 +1,231 @@
/**
* Engram Team Research (orchestrated)
*
* Same research pipeline as research-team.ts, but driven by the orchestrator
* via `runTeam()` with `EngramMemoryStore` plugged in as the team's
* `sharedMemoryStore`. This means the orchestrator's built-in shared-memory
* plumbing (task-result injection, coordinator summaries) flows through
* Engram automatically no manual engram_commit/engram_query calls needed
* for inter-task context.
*
* The Engram toolkit tools are still registered so agents can query or audit
* conflicts when they choose to.
*
* Works with every provider the framework supports. Set the provider and model
* via environment variables:
*
* AGENT_PROVIDER anthropic | openai | gemini | grok | copilot | deepseek | minimax | azure-openai
* AGENT_MODEL model name for the chosen provider
*
* Defaults to anthropic / claude-sonnet-4-6 when unset.
*
* Run:
* npx tsx examples/integrations/with-engram/team-research.ts
*
* Prerequisites:
* - API key env var for your chosen provider
* - Engram server running at http://localhost:7474
* - ENGRAM_INVITE_KEY env var
*/
import { OpenMultiAgent } from '../../../src/index.js'
import type {
AgentConfig,
OrchestratorEvent,
SupportedProvider,
} from '../../../src/index.js'
import { EngramMemoryStore } from './engram-store.js'
import { EngramToolkit } from './engram-toolkit.js'
// ---------------------------------------------------------------------------
// Provider / model configuration
// ---------------------------------------------------------------------------
const PROVIDER = (process.env.AGENT_PROVIDER ?? 'anthropic') as SupportedProvider
const MODEL = process.env.AGENT_MODEL ?? 'claude-sonnet-4-6'
const PROVIDER_ENV_KEYS: Record<string, string> = {
anthropic: 'ANTHROPIC_API_KEY',
openai: 'OPENAI_API_KEY',
gemini: 'GEMINI_API_KEY',
grok: 'XAI_API_KEY',
copilot: 'GITHUB_TOKEN',
deepseek: 'DEEPSEEK_API_KEY',
minimax: 'MINIMAX_API_KEY',
'azure-openai': 'AZURE_OPENAI_API_KEY',
}
const envKey = PROVIDER_ENV_KEYS[PROVIDER]
if (envKey && !process.env[envKey]?.trim()) {
console.error(`Missing ${envKey}: required for provider "${PROVIDER}".`)
process.exit(1)
}
if (!process.env.ENGRAM_INVITE_KEY?.trim()) {
console.error('Missing ENGRAM_INVITE_KEY: set your Engram workspace invite key in the environment.')
process.exit(1)
}
// ---------------------------------------------------------------------------
// Engram-backed shared memory store
// ---------------------------------------------------------------------------
const engramStore = new EngramMemoryStore()
// ---------------------------------------------------------------------------
// Engram tools via customTools so the orchestrator's per-agent registry
// picks them up (runTeam builds its own registry per agent from built-ins
// plus AgentConfig.customTools — an outer ToolRegistry is never seen).
// ---------------------------------------------------------------------------
const engramTools = new EngramToolkit().getTools()
// ---------------------------------------------------------------------------
// Agent configs
// ---------------------------------------------------------------------------
const TOPIC = 'the current state of AI agent memory systems'
const researcher: AgentConfig = {
name: 'researcher',
model: MODEL,
provider: PROVIDER,
systemPrompt: `You are a research agent investigating: "${TOPIC}".
Your job:
1. Think through the key dimensions of this topic (architectures, open problems,
leading projects, recent breakthroughs).
2. For each finding, use engram_commit to record it as a shared fact with
scope="research" and an appropriate confidence level.
3. Commit at least 5 distinct facts covering different aspects.
Be specific and cite concrete systems or papers where possible.`,
customTools: engramTools,
maxTurns: 10,
}
const factChecker: AgentConfig = {
name: 'fact-checker',
model: MODEL,
provider: PROVIDER,
systemPrompt: `You are a fact-checking agent. Your job:
1. Use engram_query with topic="${TOPIC}" to retrieve what the researcher committed.
2. Evaluate each fact for accuracy and completeness.
3. If a fact is wrong or misleading, use engram_commit with operation="update"
to commit a corrected version in the same scope.
4. After committing corrections, call engram_conflicts to review any
auto-resolved conflicts. You are auditing the resolutions do NOT manually
resolve them unless an auto-resolution is clearly wrong.
5. Summarize your findings at the end.`,
customTools: engramTools,
maxTurns: 10,
}
const writer: AgentConfig = {
name: 'writer',
model: MODEL,
provider: PROVIDER,
systemPrompt: `You are a technical writer. Your job:
1. Use engram_query with topic="${TOPIC}" to retrieve all settled facts.
2. Synthesize the facts into a concise executive briefing (300-500 words).
3. Structure the briefing with clear sections: Overview, Key Systems,
Open Challenges, and Outlook.
4. Only include claims that are grounded in the queried facts do not
fabricate or speculate beyond what the team has verified.
5. Output the briefing as your final response.`,
customTools: engramTools,
maxTurns: 6,
}
// ---------------------------------------------------------------------------
// Progress tracking
// ---------------------------------------------------------------------------
function handleProgress(event: OrchestratorEvent): void {
const ts = new Date().toISOString().slice(11, 23)
switch (event.type) {
case 'agent_start':
console.log(`[${ts}] AGENT START → ${event.agent}`)
break
case 'agent_complete':
console.log(`[${ts}] AGENT DONE ← ${event.agent}`)
break
case 'task_start':
console.log(`[${ts}] TASK START ↓ ${event.task}`)
break
case 'task_complete':
console.log(`[${ts}] TASK DONE ↑ ${event.task}`)
break
case 'error':
console.error(`[${ts}] ERROR ✗ agent=${event.agent} task=${event.task}`)
break
}
}
// ---------------------------------------------------------------------------
// Orchestrate
// ---------------------------------------------------------------------------
console.log('Engram Team Research (orchestrated)')
console.log('='.repeat(60))
console.log(`Provider: ${PROVIDER}`)
console.log(`Model: ${MODEL}`)
console.log(`Topic: ${TOPIC}`)
console.log(`Store: EngramMemoryStore → http://localhost:7474\n`)
const orchestrator = new OpenMultiAgent({
defaultModel: MODEL,
defaultProvider: PROVIDER,
maxConcurrency: 1,
onProgress: handleProgress,
})
const team = orchestrator.createTeam('engram-research', {
name: 'engram-research',
agents: [researcher, factChecker, writer],
sharedMemory: true,
sharedMemoryStore: engramStore,
maxConcurrency: 1,
})
const result = await orchestrator.runTeam(
team,
`Research "${TOPIC}". The researcher explores and commits facts, the fact-checker ` +
`verifies and corrects them (auditing any auto-resolved conflicts), and the writer ` +
`produces an executive briefing from the settled facts.`,
)
// ---------------------------------------------------------------------------
// Output
// ---------------------------------------------------------------------------
console.log('\n' + '='.repeat(60))
console.log('RESULTS')
console.log('='.repeat(60))
console.log(`\nSuccess: ${result.success}`)
console.log('\nPer-agent results:')
for (const [name, agentResult] of result.agentResults) {
const status = agentResult.success ? 'OK' : 'FAILED'
const tools = agentResult.toolCalls.length
console.log(` ${name.padEnd(14)} [${status}] tool_calls=${tools}`)
}
// Print the writer's briefing if available
const writerResult = result.agentResults.get('writer')
if (writerResult?.success) {
console.log('\n' + '='.repeat(60))
console.log('EXECUTIVE BRIEFING')
console.log('='.repeat(60))
console.log()
console.log(writerResult.output)
}
// Token summary
console.log('\n' + '-'.repeat(60))
console.log('Token Usage:')
console.log(` Total — input: ${result.totalTokenUsage.input_tokens}, output: ${result.totalTokenUsage.output_tokens}`)
console.log(`\nView shared memory and conflicts: http://localhost:7474/dashboard`)