feat: add support for Gemini model

This commit is contained in:
MrAvalonApple 2026-04-02 21:41:56 +03:00
parent 6e6a85178b
commit 2bb220fc63
6 changed files with 834 additions and 12 deletions

View File

@ -15,7 +15,7 @@ Build AI agent teams that decompose goals into tasks automatically. Define agent
- **Auto Task Decomposition** — Describe a goal in plain text. A built-in coordinator agent breaks it into a task DAG with dependencies and assignees — no manual orchestration needed.
- **Multi-Agent Teams** — Define agents with different roles, tools, and even different models. They collaborate through a message bus and shared memory.
- **Task DAG Scheduling** — Tasks have dependencies. The framework resolves them topologically — dependent tasks wait, independent tasks run in parallel.
- **Model Agnostic** — Claude, GPT, and local models (Ollama, vLLM, LM Studio) in the same team. Swap models per agent via `baseURL`.
- **Model Agnostic** — Claude, GPT, Gemini, and local models (Ollama, vLLM, LM Studio) in the same team. Swap models per agent via `baseURL`.
- **In-Process Execution** — No subprocess overhead. Everything runs in one Node.js process. Deploy to serverless, Docker, CI/CD.
## Quick Start
@ -26,7 +26,12 @@ Requires Node.js >= 18.
npm install @jackchen_me/open-multi-agent
```
Set `ANTHROPIC_API_KEY` (and optionally `OPENAI_API_KEY` or `GITHUB_TOKEN` for Copilot) in your environment.
Set the API key for your provider:
- `ANTHROPIC_API_KEY`
- `OPENAI_API_KEY`
- `GEMINI_API_KEY`
- `GITHUB_TOKEN` (for Copilot)
Three agents, one goal — the framework handles the rest:
@ -198,7 +203,7 @@ const result = await agent.run('Find the three most recent TypeScript releases.'
</details>
<details>
<summary><b>Multi-Model Teams</b> — mix Claude, GPT, and local models in one workflow</summary>
<summary><b>Multi-Model Teams</b> — mix Claude, GPT, Gemini, and local models in one workflow</summary>
```typescript
const claudeAgent: AgentConfig = {
@ -296,6 +301,7 @@ for await (const event of agent.stream('Explain monads in two sentences.')) {
│ - stream() │ │ - AnthropicAdapter │
└────────┬──────────┘ │ - OpenAIAdapter │
│ │ - CopilotAdapter │
│ │ - GeminiAdapter │
│ └──────────────────────┘
┌────────▼──────────┐
│ AgentRunner │ ┌──────────────────────┐
@ -319,7 +325,7 @@ for await (const event of agent.stream('Explain monads in two sentences.')) {
Issues, feature requests, and PRs are welcome. Some areas where contributions would be especially valuable:
- **LLM Adapters** — Anthropic, OpenAI, and Copilot are supported out of the box. Any OpenAI-compatible API (Ollama, vLLM, LM Studio, etc.) works via `baseURL`. Additional adapters for Gemini and other providers are welcome. The `LLMAdapter` interface requires just two methods: `chat()` and `stream()`.
- **LLM Adapters** — Anthropic, OpenAI, and Copilot are supported out of the box. Any OpenAI-compatible API (Ollama, vLLM, LM Studio, etc.) works via `baseURL`. Additional adapters for other providers are welcome. The `LLMAdapter` interface requires just two methods: `chat()` and `stream()`.
- **Examples** — Real-world workflows and use cases.
- **Documentation** — Guides, tutorials, and API docs.

417
package-lock.json generated
View File

@ -10,6 +10,7 @@
"license": "MIT",
"dependencies": {
"@anthropic-ai/sdk": "^0.52.0",
"@google/genai": "^1.48.0",
"openai": "^4.73.0",
"zod": "^3.23.0"
},
@ -19,7 +20,7 @@
"vitest": "^2.1.0"
},
"engines": {
"node": ">=18.0.0"
"node": ">=20.0.0"
}
},
"node_modules/@anthropic-ai/sdk": {
@ -422,6 +423,29 @@
"node": ">=12"
}
},
"node_modules/@google/genai": {
"version": "1.48.0",
"resolved": "https://registry.npmjs.org/@google/genai/-/genai-1.48.0.tgz",
"integrity": "sha512-plonYK4ML2PrxsRD9SeqmFt76eREWkQdPCglOA6aYDzL1AAbE+7PUnT54SvpWGfws13L0AZEqGSpL7+1IPnTxQ==",
"license": "Apache-2.0",
"dependencies": {
"google-auth-library": "^10.3.0",
"p-retry": "^4.6.2",
"protobufjs": "^7.5.4",
"ws": "^8.18.0"
},
"engines": {
"node": ">=20.0.0"
},
"peerDependencies": {
"@modelcontextprotocol/sdk": "^1.25.2"
},
"peerDependenciesMeta": {
"@modelcontextprotocol/sdk": {
"optional": true
}
}
},
"node_modules/@jridgewell/sourcemap-codec": {
"version": "1.5.5",
"resolved": "https://registry.npmmirror.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz",
@ -429,6 +453,70 @@
"dev": true,
"license": "MIT"
},
"node_modules/@protobufjs/aspromise": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz",
"integrity": "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==",
"license": "BSD-3-Clause"
},
"node_modules/@protobufjs/base64": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz",
"integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==",
"license": "BSD-3-Clause"
},
"node_modules/@protobufjs/codegen": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz",
"integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==",
"license": "BSD-3-Clause"
},
"node_modules/@protobufjs/eventemitter": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz",
"integrity": "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==",
"license": "BSD-3-Clause"
},
"node_modules/@protobufjs/fetch": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz",
"integrity": "sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==",
"license": "BSD-3-Clause",
"dependencies": {
"@protobufjs/aspromise": "^1.1.1",
"@protobufjs/inquire": "^1.1.0"
}
},
"node_modules/@protobufjs/float": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz",
"integrity": "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==",
"license": "BSD-3-Clause"
},
"node_modules/@protobufjs/inquire": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz",
"integrity": "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==",
"license": "BSD-3-Clause"
},
"node_modules/@protobufjs/path": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz",
"integrity": "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==",
"license": "BSD-3-Clause"
},
"node_modules/@protobufjs/pool": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz",
"integrity": "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==",
"license": "BSD-3-Clause"
},
"node_modules/@protobufjs/utf8": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz",
"integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==",
"license": "BSD-3-Clause"
},
"node_modules/@rollup/rollup-android-arm-eabi": {
"version": "4.60.1",
"resolved": "https://registry.npmmirror.com/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.60.1.tgz",
@ -805,6 +893,12 @@
"form-data": "^4.0.4"
}
},
"node_modules/@types/retry": {
"version": "0.12.0",
"resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz",
"integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==",
"license": "MIT"
},
"node_modules/@vitest/expect": {
"version": "2.1.9",
"resolved": "https://registry.npmmirror.com/@vitest/expect/-/expect-2.1.9.tgz",
@ -930,6 +1024,15 @@
"node": ">=6.5"
}
},
"node_modules/agent-base": {
"version": "7.1.4",
"resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz",
"integrity": "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==",
"license": "MIT",
"engines": {
"node": ">= 14"
}
},
"node_modules/agentkeepalive": {
"version": "4.6.0",
"resolved": "https://registry.npmmirror.com/agentkeepalive/-/agentkeepalive-4.6.0.tgz",
@ -958,6 +1061,41 @@
"integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==",
"license": "MIT"
},
"node_modules/base64-js": {
"version": "1.5.1",
"resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz",
"integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
],
"license": "MIT"
},
"node_modules/bignumber.js": {
"version": "9.3.1",
"resolved": "https://registry.npmjs.org/bignumber.js/-/bignumber.js-9.3.1.tgz",
"integrity": "sha512-Ko0uX15oIUS7wJ3Rb30Fs6SkVbLmPBAKdlm7q9+ak9bbIeFf0MwuBsQV6z7+X768/cHsfg+WlysDWJcmthjsjQ==",
"license": "MIT",
"engines": {
"node": "*"
}
},
"node_modules/buffer-equal-constant-time": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz",
"integrity": "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA==",
"license": "BSD-3-Clause"
},
"node_modules/cac": {
"version": "6.7.14",
"resolved": "https://registry.npmmirror.com/cac/-/cac-6.7.14.tgz",
@ -1020,11 +1158,19 @@
"node": ">= 0.8"
}
},
"node_modules/data-uri-to-buffer": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/data-uri-to-buffer/-/data-uri-to-buffer-4.0.1.tgz",
"integrity": "sha512-0R9ikRb668HB7QDxT1vkpuUBtqc53YyAwMwGeUFKRojY/NWKvdZ+9UYtRfGmhqNbRkTSVpMbmyhXipFFv2cb/A==",
"license": "MIT",
"engines": {
"node": ">= 12"
}
},
"node_modules/debug": {
"version": "4.4.3",
"resolved": "https://registry.npmmirror.com/debug/-/debug-4.4.3.tgz",
"integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==",
"dev": true,
"license": "MIT",
"dependencies": {
"ms": "^2.1.3"
@ -1071,6 +1217,15 @@
"node": ">= 0.4"
}
},
"node_modules/ecdsa-sig-formatter": {
"version": "1.0.11",
"resolved": "https://registry.npmjs.org/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz",
"integrity": "sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==",
"license": "Apache-2.0",
"dependencies": {
"safe-buffer": "^5.0.1"
}
},
"node_modules/es-define-property": {
"version": "1.0.1",
"resolved": "https://registry.npmmirror.com/es-define-property/-/es-define-property-1.0.1.tgz",
@ -1191,6 +1346,44 @@
"node": ">=12.0.0"
}
},
"node_modules/extend": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz",
"integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==",
"license": "MIT"
},
"node_modules/fetch-blob": {
"version": "3.2.0",
"resolved": "https://registry.npmjs.org/fetch-blob/-/fetch-blob-3.2.0.tgz",
"integrity": "sha512-7yAQpD2UMJzLi1Dqv7qFYnPbaPx7ZfFK6PiIxQ4PfkGPyNyl2Ugx+a/umUonmKqjhM4DnfbMvdX6otXq83soQQ==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/jimmywarting"
},
{
"type": "paypal",
"url": "https://paypal.me/jimmywarting"
}
],
"license": "MIT",
"dependencies": {
"node-domexception": "^1.0.0",
"web-streams-polyfill": "^3.0.3"
},
"engines": {
"node": "^12.20 || >= 14.13"
}
},
"node_modules/fetch-blob/node_modules/web-streams-polyfill": {
"version": "3.3.3",
"resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.3.3.tgz",
"integrity": "sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==",
"license": "MIT",
"engines": {
"node": ">= 8"
}
},
"node_modules/form-data": {
"version": "4.0.5",
"resolved": "https://registry.npmmirror.com/form-data/-/form-data-4.0.5.tgz",
@ -1226,6 +1419,18 @@
"node": ">= 12.20"
}
},
"node_modules/formdata-polyfill": {
"version": "4.0.10",
"resolved": "https://registry.npmjs.org/formdata-polyfill/-/formdata-polyfill-4.0.10.tgz",
"integrity": "sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g==",
"license": "MIT",
"dependencies": {
"fetch-blob": "^3.1.2"
},
"engines": {
"node": ">=12.20.0"
}
},
"node_modules/fsevents": {
"version": "2.3.3",
"resolved": "https://registry.npmmirror.com/fsevents/-/fsevents-2.3.3.tgz",
@ -1250,6 +1455,52 @@
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/gaxios": {
"version": "7.1.4",
"resolved": "https://registry.npmjs.org/gaxios/-/gaxios-7.1.4.tgz",
"integrity": "sha512-bTIgTsM2bWn3XklZISBTQX7ZSddGW+IO3bMdGaemHZ3tbqExMENHLx6kKZ/KlejgrMtj8q7wBItt51yegqalrA==",
"license": "Apache-2.0",
"dependencies": {
"extend": "^3.0.2",
"https-proxy-agent": "^7.0.1",
"node-fetch": "^3.3.2"
},
"engines": {
"node": ">=18"
}
},
"node_modules/gaxios/node_modules/node-fetch": {
"version": "3.3.2",
"resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-3.3.2.tgz",
"integrity": "sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA==",
"license": "MIT",
"dependencies": {
"data-uri-to-buffer": "^4.0.0",
"fetch-blob": "^3.1.4",
"formdata-polyfill": "^4.0.10"
},
"engines": {
"node": "^12.20.0 || ^14.13.1 || >=16.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/node-fetch"
}
},
"node_modules/gcp-metadata": {
"version": "8.1.2",
"resolved": "https://registry.npmjs.org/gcp-metadata/-/gcp-metadata-8.1.2.tgz",
"integrity": "sha512-zV/5HKTfCeKWnxG0Dmrw51hEWFGfcF2xiXqcA3+J90WDuP0SvoiSO5ORvcBsifmx/FoIjgQN3oNOGaQ5PhLFkg==",
"license": "Apache-2.0",
"dependencies": {
"gaxios": "^7.0.0",
"google-logging-utils": "^1.0.0",
"json-bigint": "^1.0.0"
},
"engines": {
"node": ">=18"
}
},
"node_modules/get-intrinsic": {
"version": "1.3.0",
"resolved": "https://registry.npmmirror.com/get-intrinsic/-/get-intrinsic-1.3.0.tgz",
@ -1287,6 +1538,32 @@
"node": ">= 0.4"
}
},
"node_modules/google-auth-library": {
"version": "10.6.2",
"resolved": "https://registry.npmjs.org/google-auth-library/-/google-auth-library-10.6.2.tgz",
"integrity": "sha512-e27Z6EThmVNNvtYASwQxose/G57rkRuaRbQyxM2bvYLLX/GqWZ5chWq2EBoUchJbCc57eC9ArzO5wMsEmWftCw==",
"license": "Apache-2.0",
"dependencies": {
"base64-js": "^1.3.0",
"ecdsa-sig-formatter": "^1.0.11",
"gaxios": "^7.1.4",
"gcp-metadata": "8.1.2",
"google-logging-utils": "1.1.3",
"jws": "^4.0.0"
},
"engines": {
"node": ">=18"
}
},
"node_modules/google-logging-utils": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/google-logging-utils/-/google-logging-utils-1.1.3.tgz",
"integrity": "sha512-eAmLkjDjAFCVXg7A1unxHsLf961m6y17QFqXqAXGj/gVkKFrEICfStRfwUlGNfeCEjNRa32JEWOUTlYXPyyKvA==",
"license": "Apache-2.0",
"engines": {
"node": ">=14"
}
},
"node_modules/gopd": {
"version": "1.2.0",
"resolved": "https://registry.npmmirror.com/gopd/-/gopd-1.2.0.tgz",
@ -1338,6 +1615,19 @@
"node": ">= 0.4"
}
},
"node_modules/https-proxy-agent": {
"version": "7.0.6",
"resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz",
"integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==",
"license": "MIT",
"dependencies": {
"agent-base": "^7.1.2",
"debug": "4"
},
"engines": {
"node": ">= 14"
}
},
"node_modules/humanize-ms": {
"version": "1.2.1",
"resolved": "https://registry.npmmirror.com/humanize-ms/-/humanize-ms-1.2.1.tgz",
@ -1347,6 +1637,42 @@
"ms": "^2.0.0"
}
},
"node_modules/json-bigint": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/json-bigint/-/json-bigint-1.0.0.tgz",
"integrity": "sha512-SiPv/8VpZuWbvLSMtTDU8hEfrZWg/mH/nV/b4o0CYbSxu1UIQPLdwKOCIyLQX+VIPO5vrLX3i8qtqFyhdPSUSQ==",
"license": "MIT",
"dependencies": {
"bignumber.js": "^9.0.0"
}
},
"node_modules/jwa": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/jwa/-/jwa-2.0.1.tgz",
"integrity": "sha512-hRF04fqJIP8Abbkq5NKGN0Bbr3JxlQ+qhZufXVr0DvujKy93ZCbXZMHDL4EOtodSbCWxOqR8MS1tXA5hwqCXDg==",
"license": "MIT",
"dependencies": {
"buffer-equal-constant-time": "^1.0.1",
"ecdsa-sig-formatter": "1.0.11",
"safe-buffer": "^5.0.1"
}
},
"node_modules/jws": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/jws/-/jws-4.0.1.tgz",
"integrity": "sha512-EKI/M/yqPncGUUh44xz0PxSidXFr/+r0pA70+gIYhjv+et7yxM+s29Y+VGDkovRofQem0fs7Uvf4+YmAdyRduA==",
"license": "MIT",
"dependencies": {
"jwa": "^2.0.1",
"safe-buffer": "^5.0.1"
}
},
"node_modules/long": {
"version": "5.3.2",
"resolved": "https://registry.npmjs.org/long/-/long-5.3.2.tgz",
"integrity": "sha512-mNAgZ1GmyNhD7AuqnTG3/VQ26o760+ZYBPKjPvugO8+nLbYfX6TVpJPseBvopbdY+qpZ/lKUnmEc1LeZYS3QAA==",
"license": "Apache-2.0"
},
"node_modules/loupe": {
"version": "3.2.1",
"resolved": "https://registry.npmmirror.com/loupe/-/loupe-3.2.1.tgz",
@ -1504,6 +1830,19 @@
"integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==",
"license": "MIT"
},
"node_modules/p-retry": {
"version": "4.6.2",
"resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.2.tgz",
"integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==",
"license": "MIT",
"dependencies": {
"@types/retry": "0.12.0",
"retry": "^0.13.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/pathe": {
"version": "1.1.2",
"resolved": "https://registry.npmmirror.com/pathe/-/pathe-1.1.2.tgz",
@ -1557,6 +1896,39 @@
"node": "^10 || ^12 || >=14"
}
},
"node_modules/protobufjs": {
"version": "7.5.4",
"resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.5.4.tgz",
"integrity": "sha512-CvexbZtbov6jW2eXAvLukXjXUW1TzFaivC46BpWc/3BpcCysb5Vffu+B3XHMm8lVEuy2Mm4XGex8hBSg1yapPg==",
"hasInstallScript": true,
"license": "BSD-3-Clause",
"dependencies": {
"@protobufjs/aspromise": "^1.1.2",
"@protobufjs/base64": "^1.1.2",
"@protobufjs/codegen": "^2.0.4",
"@protobufjs/eventemitter": "^1.1.0",
"@protobufjs/fetch": "^1.1.0",
"@protobufjs/float": "^1.0.2",
"@protobufjs/inquire": "^1.1.0",
"@protobufjs/path": "^1.1.2",
"@protobufjs/pool": "^1.1.0",
"@protobufjs/utf8": "^1.1.0",
"@types/node": ">=13.7.0",
"long": "^5.0.0"
},
"engines": {
"node": ">=12.0.0"
}
},
"node_modules/retry": {
"version": "0.13.1",
"resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz",
"integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==",
"license": "MIT",
"engines": {
"node": ">= 4"
}
},
"node_modules/rollup": {
"version": "4.60.1",
"resolved": "https://registry.npmmirror.com/rollup/-/rollup-4.60.1.tgz",
@ -1602,6 +1974,26 @@
"fsevents": "~2.3.2"
}
},
"node_modules/safe-buffer": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
"integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
],
"license": "MIT"
},
"node_modules/siginfo": {
"version": "2.0.0",
"resolved": "https://registry.npmmirror.com/siginfo/-/siginfo-2.0.0.tgz",
@ -1894,6 +2286,27 @@
"node": ">=8"
}
},
"node_modules/ws": {
"version": "8.20.0",
"resolved": "https://registry.npmjs.org/ws/-/ws-8.20.0.tgz",
"integrity": "sha512-sAt8BhgNbzCtgGbt2OxmpuryO63ZoDk/sqaB/znQm94T4fCEsy/yV+7CdC1kJhOU9lboAEU7R3kquuycDoibVA==",
"license": "MIT",
"engines": {
"node": ">=10.0.0"
},
"peerDependencies": {
"bufferutil": "^4.0.1",
"utf-8-validate": ">=5.0.2"
},
"peerDependenciesMeta": {
"bufferutil": {
"optional": true
},
"utf-8-validate": {
"optional": true
}
}
},
"node_modules/zod": {
"version": "3.25.76",
"resolved": "https://registry.npmmirror.com/zod/-/zod-3.25.76.tgz",

View File

@ -34,16 +34,17 @@
"author": "",
"license": "MIT",
"engines": {
"node": ">=18.0.0"
"node": ">=20.0.0"
},
"dependencies": {
"@anthropic-ai/sdk": "^0.52.0",
"@google/genai": "^1.48.0",
"openai": "^4.73.0",
"zod": "^3.23.0"
},
"devDependencies": {
"@types/node": "^22.0.0",
"typescript": "^5.6.0",
"vitest": "^2.1.0",
"@types/node": "^22.0.0"
"vitest": "^2.1.0"
}
}

View File

@ -11,6 +11,7 @@
*
* const anthropic = createAdapter('anthropic')
* const openai = createAdapter('openai', process.env.OPENAI_API_KEY)
* const gemini = createAdapter('gemini', process.env.GEMINI_API_KEY)
* ```
*/
@ -37,7 +38,7 @@ import type { LLMAdapter } from '../types.js'
* Additional providers can be integrated by implementing {@link LLMAdapter}
* directly and bypassing this factory.
*/
export type SupportedProvider = 'anthropic' | 'copilot' | 'openai'
export type SupportedProvider = 'anthropic' | 'copilot' | 'gemini' | 'openai'
/**
* Instantiate the appropriate {@link LLMAdapter} for the given provider.
@ -46,6 +47,7 @@ export type SupportedProvider = 'anthropic' | 'copilot' | 'openai'
* explicitly:
* - `anthropic` `ANTHROPIC_API_KEY`
* - `openai` `OPENAI_API_KEY`
* - `gemini` `GEMINI_API_KEY` / `GOOGLE_API_KEY`
* - `copilot` `GITHUB_COPILOT_TOKEN` / `GITHUB_TOKEN`, or interactive
* OAuth2 device flow if neither is set
*
@ -74,6 +76,10 @@ export async function createAdapter(
const { CopilotAdapter } = await import('./copilot.js')
return new CopilotAdapter(apiKey)
}
case 'gemini': {
const { GeminiAdapter } = await import('./gemini.js')
return new GeminiAdapter(apiKey)
}
case 'openai': {
const { OpenAIAdapter } = await import('./openai.js')
return new OpenAIAdapter(apiKey, baseURL)

396
src/llm/gemini.ts Normal file
View File

@ -0,0 +1,396 @@
/**
* @fileoverview Google Gemini adapter implementing {@link LLMAdapter}.
*
* Built for `@google/genai` (the unified Google Gen AI SDK, v1.x), NOT the
* legacy `@google/generative-ai` package.
*
* Converts between the framework's internal {@link ContentBlock} types and the
* `@google/genai` SDK's wire format, handling tool definitions, system prompts,
* and both batch and streaming response paths.
*
* API key resolution order:
* 1. `apiKey` constructor argument
* 2. `GEMINI_API_KEY` environment variable
*
* @example
* ```ts
* import { GeminiAdapter } from './gemini.js'
*
* const adapter = new GeminiAdapter()
* const response = await adapter.chat(messages, {
* model: 'gemini-2.5-flash',
* maxTokens: 1024,
* })
* ```
*/
import {
GoogleGenAI,
FunctionCallingConfigMode,
type Content,
type FunctionDeclaration,
type GenerateContentConfig,
type GenerateContentResponse,
type Part,
type Tool as GeminiTool,
} from '@google/genai'
import type {
ContentBlock,
ImageBlock,
LLMAdapter,
LLMChatOptions,
LLMMessage,
LLMResponse,
LLMStreamOptions,
LLMToolDef,
StreamEvent,
TextBlock,
ToolResultBlock,
ToolUseBlock,
} from '../types.js'
// ---------------------------------------------------------------------------
// Internal helpers
// ---------------------------------------------------------------------------
/**
* Map framework role names to Gemini role names.
*
* Gemini uses `"model"` instead of `"assistant"`.
*/
function toGeminiRole(role: 'user' | 'assistant'): string {
return role === 'assistant' ? 'model' : 'user'
}
/**
* Convert framework messages into Gemini's {@link Content}[] format.
*
* Key differences from Anthropic:
* - Gemini uses `"model"` instead of `"assistant"`.
* - `functionResponse` parts (tool results) must appear in `"user"` turns.
* - `functionCall` parts appear in `"model"` turns.
* - We build a name lookup map from tool_use blocks so tool_result blocks
* can resolve the function name required by Gemini's `functionResponse`.
*/
function toGeminiContents(messages: LLMMessage[]): Content[] {
// First pass: build id → name map for resolving tool results.
const toolNameById = new Map<string, string>()
for (const msg of messages) {
for (const block of msg.content) {
if (block.type === 'tool_use') {
toolNameById.set(block.id, block.name)
}
}
}
return messages.map((msg): Content => {
const parts: Part[] = msg.content.map((block): Part => {
switch (block.type) {
case 'text':
return { text: block.text }
case 'tool_use':
return {
functionCall: {
id: block.id,
name: block.name,
args: block.input,
},
}
case 'tool_result': {
const name = toolNameById.get(block.tool_use_id) ?? block.tool_use_id
return {
functionResponse: {
id: block.tool_use_id,
name,
response: {
content:
typeof block.content === 'string'
? block.content
: JSON.stringify(block.content),
isError: block.is_error ?? false,
},
},
}
}
case 'image':
return {
inlineData: {
mimeType: block.source.media_type,
data: block.source.data,
},
}
default: {
const _exhaustive: never = block
throw new Error(`Unhandled content block type: ${JSON.stringify(_exhaustive)}`)
}
}
})
return { role: toGeminiRole(msg.role), parts }
})
}
/**
* Convert framework {@link LLMToolDef}s into a Gemini `tools` config array.
*
* In `@google/genai`, function declarations use `parametersJsonSchema` (not
* `parameters` or `input_schema`). All declarations are grouped under a single
* tool entry.
*/
function toGeminiTools(tools: readonly LLMToolDef[]): GeminiTool[] {
const functionDeclarations: FunctionDeclaration[] = tools.map((t) => ({
name: t.name,
description: t.description,
parametersJsonSchema: t.inputSchema as Record<string, unknown>,
}))
return [{ functionDeclarations }]
}
/**
* Build the {@link GenerateContentConfig} shared by chat() and stream().
*/
function buildConfig(
options: LLMChatOptions | LLMStreamOptions,
): GenerateContentConfig {
return {
maxOutputTokens: options.maxTokens ?? 4096,
temperature: options.temperature,
systemInstruction: options.systemPrompt,
tools: options.tools ? toGeminiTools(options.tools) : undefined,
toolConfig: options.tools
? { functionCallingConfig: { mode: FunctionCallingConfigMode.AUTO } }
: undefined,
}
}
/**
* Generate a stable pseudo-random ID string for tool use blocks.
*
* Gemini may not always return call IDs (especially in streaming), so we
* fabricate them when absent to satisfy the framework's {@link ToolUseBlock}
* contract.
*/
function generateId(): string {
return `gemini-${Date.now()}-${Math.random().toString(36).slice(2, 9)}`
}
/**
* Extract the function call ID from a Gemini part, or generate one.
*
* The `id` field exists in newer API versions but may be absent in older
* responses, so we cast conservatively and fall back to a generated ID.
*/
function getFunctionCallId(part: Part): string {
return (part.functionCall as { id?: string } | undefined)?.id ?? generateId()
}
/**
* Convert a Gemini {@link GenerateContentResponse} into a framework
* {@link LLMResponse}.
*/
function fromGeminiResponse(
response: GenerateContentResponse,
id: string,
model: string,
): LLMResponse {
const candidate = response.candidates?.[0]
const content: ContentBlock[] = []
for (const part of candidate?.content?.parts ?? []) {
if (part.text !== undefined && part.text !== '') {
content.push({ type: 'text', text: part.text })
} else if (part.functionCall !== undefined) {
content.push({
type: 'tool_use',
id: getFunctionCallId(part),
name: part.functionCall.name ?? '',
input: (part.functionCall.args ?? {}) as Record<string, unknown>,
})
}
// inlineData echoes and other part types are silently ignored.
}
// Map Gemini finish reasons to framework stop_reason vocabulary.
const finishReason = candidate?.finishReason as string | undefined
let stop_reason: LLMResponse['stop_reason'] = 'end_turn'
if (finishReason === 'MAX_TOKENS') {
stop_reason = 'max_tokens'
} else if (content.some((b) => b.type === 'tool_use')) {
// Gemini may report STOP even when it returned function calls.
stop_reason = 'tool_use'
}
const usage = response.usageMetadata
return {
id,
content,
model,
stop_reason,
usage: {
input_tokens: usage?.promptTokenCount ?? 0,
output_tokens: usage?.candidatesTokenCount ?? 0,
},
}
}
// ---------------------------------------------------------------------------
// Adapter implementation
// ---------------------------------------------------------------------------
/**
* LLM adapter backed by the Google Gemini API via `@google/genai`.
*
* Thread-safe a single instance may be shared across concurrent agent runs.
* The underlying SDK client is stateless across requests.
*/
export class GeminiAdapter implements LLMAdapter {
readonly name = 'gemini'
readonly #client: GoogleGenAI
constructor(apiKey?: string) {
this.#client = new GoogleGenAI({
apiKey: apiKey ?? process.env['GEMINI_API_KEY'],
})
}
// -------------------------------------------------------------------------
// chat()
// -------------------------------------------------------------------------
/**
* Send a synchronous (non-streaming) chat request and return the complete
* {@link LLMResponse}.
*
* Uses `ai.models.generateContent()` with the full conversation as `contents`,
* which is the idiomatic pattern for `@google/genai`.
*/
async chat(messages: LLMMessage[], options: LLMChatOptions): Promise<LLMResponse> {
const id = generateId()
const contents = toGeminiContents(messages)
const response = await this.#client.models.generateContent({
model: options.model,
contents,
config: buildConfig(options),
})
return fromGeminiResponse(response, id, options.model)
}
// -------------------------------------------------------------------------
// stream()
// -------------------------------------------------------------------------
/**
* Send a streaming chat request and yield {@link StreamEvent}s as they
* arrive from the API.
*
* Uses `ai.models.generateContentStream()` which returns an
* `AsyncGenerator<GenerateContentResponse>`. Each yielded chunk has the same
* shape as a full response but contains only the delta for that chunk.
*
* Because `@google/genai` doesn't expose a `finalMessage()` helper like the
* Anthropic SDK, we accumulate content and token counts as we stream so that
* the terminal `done` event carries a complete and accurate {@link LLMResponse}.
*
* Sequence guarantees (matching the Anthropic adapter):
* - Zero or more `text` events with incremental deltas
* - Zero or more `tool_use` events (one per call; Gemini doesn't stream args)
* - Exactly one terminal event: `done` or `error`
*/
async *stream(
messages: LLMMessage[],
options: LLMStreamOptions,
): AsyncIterable<StreamEvent> {
const id = generateId()
const contents = toGeminiContents(messages)
try {
const streamResponse = await this.#client.models.generateContentStream({
model: options.model,
contents,
config: buildConfig(options),
})
// Accumulators for building the done payload.
const accumulatedContent: ContentBlock[] = []
let inputTokens = 0
let outputTokens = 0
let lastFinishReason: string | undefined
for await (const chunk of streamResponse) {
const candidate = chunk.candidates?.[0]
// Accumulate token counts — the API emits these on the final chunk.
if (chunk.usageMetadata) {
inputTokens = chunk.usageMetadata.promptTokenCount ?? inputTokens
outputTokens = chunk.usageMetadata.candidatesTokenCount ?? outputTokens
}
if (candidate?.finishReason) {
lastFinishReason = candidate.finishReason as string
}
for (const part of candidate?.content?.parts ?? []) {
if (part.text) {
accumulatedContent.push({ type: 'text', text: part.text })
yield { type: 'text', data: part.text } satisfies StreamEvent
} else if (part.functionCall) {
const toolId = getFunctionCallId(part)
const toolUseBlock: ToolUseBlock = {
type: 'tool_use',
id: toolId,
name: part.functionCall.name ?? '',
input: (part.functionCall.args ?? {}) as Record<string, unknown>,
}
accumulatedContent.push(toolUseBlock)
yield { type: 'tool_use', data: toolUseBlock } satisfies StreamEvent
}
}
}
// Determine stop_reason from the accumulated response.
const hasToolUse = accumulatedContent.some((b) => b.type === 'tool_use')
let stop_reason: LLMResponse['stop_reason'] = 'end_turn'
if (lastFinishReason === 'MAX_TOKENS') {
stop_reason = 'max_tokens'
} else if (hasToolUse) {
stop_reason = 'tool_use'
}
const finalResponse: LLMResponse = {
id,
content: accumulatedContent,
model: options.model,
stop_reason,
usage: { input_tokens: inputTokens, output_tokens: outputTokens },
}
yield { type: 'done', data: finalResponse } satisfies StreamEvent
} catch (err) {
const error = err instanceof Error ? err : new Error(String(err))
yield { type: 'error', data: error } satisfies StreamEvent
}
}
}
// Re-export types that consumers of this module commonly need alongside the adapter.
export type {
ContentBlock,
ImageBlock,
LLMAdapter,
LLMChatOptions,
LLMMessage,
LLMResponse,
LLMStreamOptions,
LLMToolDef,
StreamEvent,
TextBlock,
ToolResultBlock,
ToolUseBlock,
}

View File

@ -186,7 +186,7 @@ export interface ToolDefinition<TInput = Record<string, unknown>> {
export interface AgentConfig {
readonly name: string
readonly model: string
readonly provider?: 'anthropic' | 'copilot' | 'openai'
readonly provider?: 'anthropic' | 'copilot' | 'gemini' | 'openai'
/**
* Custom base URL for OpenAI-compatible APIs (Ollama, vLLM, LM Studio, etc.).
* Note: local servers that don't require auth still need `apiKey` set to a
@ -293,7 +293,7 @@ export interface OrchestratorEvent {
export interface OrchestratorConfig {
readonly maxConcurrency?: number
readonly defaultModel?: string
readonly defaultProvider?: 'anthropic' | 'copilot' | 'openai'
readonly defaultProvider?: 'anthropic' | 'copilot' | 'gemini' | 'openai'
readonly defaultBaseURL?: string
readonly defaultApiKey?: string
onProgress?: (event: OrchestratorEvent) => void