parent
6de7bbd41f
commit
4714dd1d4c
|
|
@ -295,13 +295,14 @@ Notes:
|
||||||
| GitHub Copilot | `provider: 'copilot'` | `GITHUB_TOKEN` | Verified |
|
| GitHub Copilot | `provider: 'copilot'` | `GITHUB_TOKEN` | Verified |
|
||||||
| Gemini | `provider: 'gemini'` | `GEMINI_API_KEY` | Verified |
|
| Gemini | `provider: 'gemini'` | `GEMINI_API_KEY` | Verified |
|
||||||
| Ollama / vLLM / LM Studio | `provider: 'openai'` + `baseURL` | — | Verified |
|
| Ollama / vLLM / LM Studio | `provider: 'openai'` + `baseURL` | — | Verified |
|
||||||
|
| Groq | `provider: 'openai'` + `baseURL` | `GROQ_API_KEY` | Verified |
|
||||||
| llama.cpp server | `provider: 'openai'` + `baseURL` | — | Verified |
|
| llama.cpp server | `provider: 'openai'` + `baseURL` | — | Verified |
|
||||||
|
|
||||||
Gemini requires `npm install @google/genai` (optional peer dependency).
|
Gemini requires `npm install @google/genai` (optional peer dependency).
|
||||||
|
|
||||||
Verified local models with tool-calling: **Gemma 4** (see [example 08](examples/08-gemma4-local.ts)).
|
Verified local models with tool-calling: **Gemma 4** (see [example 08](examples/08-gemma4-local.ts)).
|
||||||
|
|
||||||
Any OpenAI-compatible API should work via `provider: 'openai'` + `baseURL` (Groq, Mistral, Qwen, etc.). **Grok, MiniMax, and DeepSeek now have first-class support** via `provider: 'grok'`, `provider: 'minimax'`, and `provider: 'deepseek'`.
|
Any OpenAI-compatible API should work via `provider: 'openai'` + `baseURL` (Mistral, Qwen, Moonshot, Doubao, etc.). Groq is now verified in [example 19](examples/19-groq.ts). **Grok, MiniMax, and DeepSeek now have first-class support** via `provider: 'grok'`, `provider: 'minimax'`, and `provider: 'deepseek'`.
|
||||||
|
|
||||||
### Local Model Tool-Calling
|
### Local Model Tool-Calling
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,165 @@
|
||||||
|
/**
|
||||||
|
* Example 19 — Multi-Agent Team Collaboration with Groq
|
||||||
|
*
|
||||||
|
* Three specialized agents (architect, developer, reviewer) collaborate via `runTeam()`
|
||||||
|
* to build a minimal Express.js REST API. Every agent uses Groq via the OpenAI-compatible adapter.
|
||||||
|
*
|
||||||
|
* Run:
|
||||||
|
* npx tsx examples/19-groq.ts
|
||||||
|
*
|
||||||
|
* Prerequisites:
|
||||||
|
* GROQ_API_KEY environment variable must be set.
|
||||||
|
*
|
||||||
|
* Available models:
|
||||||
|
* llama-3.3-70b-versatile — Groq production model (recommended for coding tasks)
|
||||||
|
* mixtral-8x7b-32768 — Groq fast mixture-of-experts model
|
||||||
|
* deepseek-r1-distill-llama-70b — Groq reasoning model
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { OpenMultiAgent } from '../src/index.js'
|
||||||
|
import type { AgentConfig, OrchestratorEvent } from '../src/types.js'
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Agent definitions (all using Groq via the OpenAI-compatible adapter)
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
const architect: AgentConfig = {
|
||||||
|
name: 'architect',
|
||||||
|
model: 'deepseek-r1-distill-llama-70b',
|
||||||
|
provider: 'openai',
|
||||||
|
baseURL: 'https://api.groq.com/openai/v1',
|
||||||
|
apiKey: process.env.GROQ_API_KEY,
|
||||||
|
systemPrompt: `You are a software architect with deep experience in Node.js and REST API design.
|
||||||
|
Your job is to design clear, production-quality API contracts and file/directory structures.
|
||||||
|
Output concise plans in markdown — no unnecessary prose.`,
|
||||||
|
tools: ['bash', 'file_write'],
|
||||||
|
maxTurns: 5,
|
||||||
|
temperature: 0.2,
|
||||||
|
}
|
||||||
|
|
||||||
|
const developer: AgentConfig = {
|
||||||
|
name: 'developer',
|
||||||
|
model: 'llama-3.3-70b-versatile',
|
||||||
|
provider: 'openai',
|
||||||
|
baseURL: 'https://api.groq.com/openai/v1',
|
||||||
|
apiKey: process.env.GROQ_API_KEY,
|
||||||
|
systemPrompt: `You are a TypeScript/Node.js developer. You implement what the architect specifies.
|
||||||
|
Write clean, runnable code with proper error handling. Use the tools to write files and run tests.`,
|
||||||
|
tools: ['bash', 'file_read', 'file_write', 'file_edit'],
|
||||||
|
maxTurns: 12,
|
||||||
|
temperature: 0.1,
|
||||||
|
}
|
||||||
|
|
||||||
|
const reviewer: AgentConfig = {
|
||||||
|
name: 'reviewer',
|
||||||
|
model: 'llama-3.3-70b-versatile',
|
||||||
|
provider: 'openai',
|
||||||
|
baseURL: 'https://api.groq.com/openai/v1',
|
||||||
|
apiKey: process.env.GROQ_API_KEY,
|
||||||
|
systemPrompt: `You are a senior code reviewer. Review code for correctness, security, and clarity.
|
||||||
|
Provide a structured review with: LGTM items, suggestions, and any blocking issues.
|
||||||
|
Read files using the tools before reviewing.`,
|
||||||
|
tools: ['bash', 'file_read', 'grep'],
|
||||||
|
maxTurns: 5,
|
||||||
|
temperature: 0.3,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Progress tracking
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
const startTimes = new Map<string, number>()
|
||||||
|
|
||||||
|
function handleProgress(event: OrchestratorEvent): void {
|
||||||
|
const ts = new Date().toISOString().slice(11, 23) // HH:MM:SS.mmm
|
||||||
|
switch (event.type) {
|
||||||
|
case 'agent_start':
|
||||||
|
startTimes.set(event.agent ?? '', Date.now())
|
||||||
|
console.log(`[${ts}] AGENT START → ${event.agent}`)
|
||||||
|
break
|
||||||
|
case 'agent_complete': {
|
||||||
|
const elapsed = Date.now() - (startTimes.get(event.agent ?? '') ?? Date.now())
|
||||||
|
console.log(`[${ts}] AGENT DONE ← ${event.agent} (${elapsed}ms)`)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
case 'task_start':
|
||||||
|
console.log(`[${ts}] TASK START ↓ ${event.task}`)
|
||||||
|
break
|
||||||
|
case 'task_complete':
|
||||||
|
console.log(`[${ts}] TASK DONE ↑ ${event.task}`)
|
||||||
|
break
|
||||||
|
case 'message':
|
||||||
|
console.log(`[${ts}] MESSAGE • ${event.agent} → (team)`)
|
||||||
|
break
|
||||||
|
case 'error':
|
||||||
|
console.error(`[${ts}] ERROR ✗ agent=${event.agent} task=${event.task}`)
|
||||||
|
if (event.data instanceof Error) console.error(` ${event.data.message}`)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Orchestrate
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
const orchestrator = new OpenMultiAgent({
|
||||||
|
defaultModel: 'llama-3.3-70b-versatile',
|
||||||
|
defaultProvider: 'openai',
|
||||||
|
maxConcurrency: 1, // sequential for readable output
|
||||||
|
onProgress: handleProgress,
|
||||||
|
})
|
||||||
|
|
||||||
|
const team = orchestrator.createTeam('api-team', {
|
||||||
|
name: 'api-team',
|
||||||
|
agents: [architect, developer, reviewer],
|
||||||
|
sharedMemory: true,
|
||||||
|
maxConcurrency: 1,
|
||||||
|
})
|
||||||
|
|
||||||
|
console.log(`Team "${team.name}" created with agents: ${team.getAgents().map(a => a.name).join(', ')}`)
|
||||||
|
console.log('\nStarting team run...\n')
|
||||||
|
console.log('='.repeat(60))
|
||||||
|
|
||||||
|
const goal = `Create a minimal Express.js REST API in /tmp/express-api/ with:
|
||||||
|
- GET /health → { status: "ok" }
|
||||||
|
- GET /users → returns a hardcoded array of 2 user objects
|
||||||
|
- POST /users → accepts { name, email } body, logs it, returns 201
|
||||||
|
- Proper error handling middleware
|
||||||
|
- The server should listen on port 3001
|
||||||
|
- Include a package.json with the required dependencies`
|
||||||
|
|
||||||
|
const result = await orchestrator.runTeam(team, goal)
|
||||||
|
|
||||||
|
console.log('\n' + '='.repeat(60))
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Results
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
console.log('\nTeam run complete.')
|
||||||
|
console.log(`Success: ${result.success}`)
|
||||||
|
console.log(`Total tokens — input: ${result.totalTokenUsage.input_tokens}, output: ${result.totalTokenUsage.output_tokens}`)
|
||||||
|
|
||||||
|
console.log('\nPer-agent results:')
|
||||||
|
for (const [agentName, agentResult] of result.agentResults) {
|
||||||
|
const status = agentResult.success ? 'OK' : 'FAILED'
|
||||||
|
const tools = agentResult.toolCalls.length
|
||||||
|
console.log(` ${agentName.padEnd(12)} [${status}] tool_calls=${tools}`)
|
||||||
|
if (!agentResult.success) {
|
||||||
|
console.log(` Error: ${agentResult.output.slice(0, 120)}`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sample outputs
|
||||||
|
const developerResult = result.agentResults.get('developer')
|
||||||
|
if (developerResult?.success) {
|
||||||
|
console.log('\nDeveloper output (last 600 chars):')
|
||||||
|
console.log('─'.repeat(60))
|
||||||
|
const out = developerResult.output
|
||||||
|
console.log(out.length > 600 ? '...' + out.slice(-600) : out)
|
||||||
|
console.log('─'.repeat(60))
|
||||||
|
}
|
||||||
|
|
||||||
|
const reviewerResult = result.agentResults.get('reviewer')
|
||||||
|
if (reviewerResult?.success) {
|
||||||
|
console.log('\nReviewer output:')
|
||||||
|
console.log('─'.repeat(60))
|
||||||
|
console.log(reviewerResult.output)
|
||||||
|
console.log('─'.repeat(60))
|
||||||
|
}
|
||||||
Loading…
Reference in New Issue