From 2bb220fc63d510f565b554d83f978b061de2305f Mon Sep 17 00:00:00 2001 From: MrAvalonApple <74775400+ibrahimkazimov@users.noreply.github.com> Date: Thu, 2 Apr 2026 21:41:56 +0300 Subject: [PATCH 1/9] feat: add support for Gemini model --- README.md | 14 +- package-lock.json | 417 ++++++++++++++++++++++++++++++++++++++++++++- package.json | 7 +- src/llm/adapter.ts | 8 +- src/llm/gemini.ts | 396 ++++++++++++++++++++++++++++++++++++++++++ src/types.ts | 4 +- 6 files changed, 834 insertions(+), 12 deletions(-) create mode 100644 src/llm/gemini.ts diff --git a/README.md b/README.md index 34b0a1b..f8ccd6c 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ Build AI agent teams that decompose goals into tasks automatically. Define agent - **Auto Task Decomposition** — Describe a goal in plain text. A built-in coordinator agent breaks it into a task DAG with dependencies and assignees — no manual orchestration needed. - **Multi-Agent Teams** — Define agents with different roles, tools, and even different models. They collaborate through a message bus and shared memory. - **Task DAG Scheduling** — Tasks have dependencies. The framework resolves them topologically — dependent tasks wait, independent tasks run in parallel. -- **Model Agnostic** — Claude, GPT, and local models (Ollama, vLLM, LM Studio) in the same team. Swap models per agent via `baseURL`. +- **Model Agnostic** — Claude, GPT, Gemini, and local models (Ollama, vLLM, LM Studio) in the same team. Swap models per agent via `baseURL`. - **In-Process Execution** — No subprocess overhead. Everything runs in one Node.js process. Deploy to serverless, Docker, CI/CD. ## Quick Start @@ -26,7 +26,12 @@ Requires Node.js >= 18. npm install @jackchen_me/open-multi-agent ``` -Set `ANTHROPIC_API_KEY` (and optionally `OPENAI_API_KEY` or `GITHUB_TOKEN` for Copilot) in your environment. +Set the API key for your provider: + +- `ANTHROPIC_API_KEY` +- `OPENAI_API_KEY` +- `GEMINI_API_KEY` +- `GITHUB_TOKEN` (for Copilot) Three agents, one goal — the framework handles the rest: @@ -198,7 +203,7 @@ const result = await agent.run('Find the three most recent TypeScript releases.'
-Multi-Model Teams — mix Claude, GPT, and local models in one workflow +Multi-Model Teams — mix Claude, GPT, Gemini, and local models in one workflow ```typescript const claudeAgent: AgentConfig = { @@ -296,6 +301,7 @@ for await (const event of agent.stream('Explain monads in two sentences.')) { │ - stream() │ │ - AnthropicAdapter │ └────────┬──────────┘ │ - OpenAIAdapter │ │ │ - CopilotAdapter │ + │ │ - GeminiAdapter │ │ └──────────────────────┘ ┌────────▼──────────┐ │ AgentRunner │ ┌──────────────────────┐ @@ -319,7 +325,7 @@ for await (const event of agent.stream('Explain monads in two sentences.')) { Issues, feature requests, and PRs are welcome. Some areas where contributions would be especially valuable: -- **LLM Adapters** — Anthropic, OpenAI, and Copilot are supported out of the box. Any OpenAI-compatible API (Ollama, vLLM, LM Studio, etc.) works via `baseURL`. Additional adapters for Gemini and other providers are welcome. The `LLMAdapter` interface requires just two methods: `chat()` and `stream()`. +- **LLM Adapters** — Anthropic, OpenAI, and Copilot are supported out of the box. Any OpenAI-compatible API (Ollama, vLLM, LM Studio, etc.) works via `baseURL`. Additional adapters for other providers are welcome. The `LLMAdapter` interface requires just two methods: `chat()` and `stream()`. - **Examples** — Real-world workflows and use cases. - **Documentation** — Guides, tutorials, and API docs. diff --git a/package-lock.json b/package-lock.json index 96f1dec..3df1ac0 100644 --- a/package-lock.json +++ b/package-lock.json @@ -10,6 +10,7 @@ "license": "MIT", "dependencies": { "@anthropic-ai/sdk": "^0.52.0", + "@google/genai": "^1.48.0", "openai": "^4.73.0", "zod": "^3.23.0" }, @@ -19,7 +20,7 @@ "vitest": "^2.1.0" }, "engines": { - "node": ">=18.0.0" + "node": ">=20.0.0" } }, "node_modules/@anthropic-ai/sdk": { @@ -422,6 +423,29 @@ "node": ">=12" } }, + "node_modules/@google/genai": { + "version": "1.48.0", + "resolved": "https://registry.npmjs.org/@google/genai/-/genai-1.48.0.tgz", + "integrity": "sha512-plonYK4ML2PrxsRD9SeqmFt76eREWkQdPCglOA6aYDzL1AAbE+7PUnT54SvpWGfws13L0AZEqGSpL7+1IPnTxQ==", + "license": "Apache-2.0", + "dependencies": { + "google-auth-library": "^10.3.0", + "p-retry": "^4.6.2", + "protobufjs": "^7.5.4", + "ws": "^8.18.0" + }, + "engines": { + "node": ">=20.0.0" + }, + "peerDependencies": { + "@modelcontextprotocol/sdk": "^1.25.2" + }, + "peerDependenciesMeta": { + "@modelcontextprotocol/sdk": { + "optional": true + } + } + }, "node_modules/@jridgewell/sourcemap-codec": { "version": "1.5.5", "resolved": "https://registry.npmmirror.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", @@ -429,6 +453,70 @@ "dev": true, "license": "MIT" }, + "node_modules/@protobufjs/aspromise": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz", + "integrity": "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/base64": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz", + "integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/codegen": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz", + "integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/eventemitter": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz", + "integrity": "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/fetch": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz", + "integrity": "sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==", + "license": "BSD-3-Clause", + "dependencies": { + "@protobufjs/aspromise": "^1.1.1", + "@protobufjs/inquire": "^1.1.0" + } + }, + "node_modules/@protobufjs/float": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz", + "integrity": "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/inquire": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz", + "integrity": "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/path": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz", + "integrity": "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/pool": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz", + "integrity": "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/utf8": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz", + "integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==", + "license": "BSD-3-Clause" + }, "node_modules/@rollup/rollup-android-arm-eabi": { "version": "4.60.1", "resolved": "https://registry.npmmirror.com/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.60.1.tgz", @@ -805,6 +893,12 @@ "form-data": "^4.0.4" } }, + "node_modules/@types/retry": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz", + "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==", + "license": "MIT" + }, "node_modules/@vitest/expect": { "version": "2.1.9", "resolved": "https://registry.npmmirror.com/@vitest/expect/-/expect-2.1.9.tgz", @@ -930,6 +1024,15 @@ "node": ">=6.5" } }, + "node_modules/agent-base": { + "version": "7.1.4", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz", + "integrity": "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==", + "license": "MIT", + "engines": { + "node": ">= 14" + } + }, "node_modules/agentkeepalive": { "version": "4.6.0", "resolved": "https://registry.npmmirror.com/agentkeepalive/-/agentkeepalive-4.6.0.tgz", @@ -958,6 +1061,41 @@ "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", "license": "MIT" }, + "node_modules/base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/bignumber.js": { + "version": "9.3.1", + "resolved": "https://registry.npmjs.org/bignumber.js/-/bignumber.js-9.3.1.tgz", + "integrity": "sha512-Ko0uX15oIUS7wJ3Rb30Fs6SkVbLmPBAKdlm7q9+ak9bbIeFf0MwuBsQV6z7+X768/cHsfg+WlysDWJcmthjsjQ==", + "license": "MIT", + "engines": { + "node": "*" + } + }, + "node_modules/buffer-equal-constant-time": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz", + "integrity": "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA==", + "license": "BSD-3-Clause" + }, "node_modules/cac": { "version": "6.7.14", "resolved": "https://registry.npmmirror.com/cac/-/cac-6.7.14.tgz", @@ -1020,11 +1158,19 @@ "node": ">= 0.8" } }, + "node_modules/data-uri-to-buffer": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/data-uri-to-buffer/-/data-uri-to-buffer-4.0.1.tgz", + "integrity": "sha512-0R9ikRb668HB7QDxT1vkpuUBtqc53YyAwMwGeUFKRojY/NWKvdZ+9UYtRfGmhqNbRkTSVpMbmyhXipFFv2cb/A==", + "license": "MIT", + "engines": { + "node": ">= 12" + } + }, "node_modules/debug": { "version": "4.4.3", "resolved": "https://registry.npmmirror.com/debug/-/debug-4.4.3.tgz", "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", - "dev": true, "license": "MIT", "dependencies": { "ms": "^2.1.3" @@ -1071,6 +1217,15 @@ "node": ">= 0.4" } }, + "node_modules/ecdsa-sig-formatter": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz", + "integrity": "sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==", + "license": "Apache-2.0", + "dependencies": { + "safe-buffer": "^5.0.1" + } + }, "node_modules/es-define-property": { "version": "1.0.1", "resolved": "https://registry.npmmirror.com/es-define-property/-/es-define-property-1.0.1.tgz", @@ -1191,6 +1346,44 @@ "node": ">=12.0.0" } }, + "node_modules/extend": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==", + "license": "MIT" + }, + "node_modules/fetch-blob": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/fetch-blob/-/fetch-blob-3.2.0.tgz", + "integrity": "sha512-7yAQpD2UMJzLi1Dqv7qFYnPbaPx7ZfFK6PiIxQ4PfkGPyNyl2Ugx+a/umUonmKqjhM4DnfbMvdX6otXq83soQQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/jimmywarting" + }, + { + "type": "paypal", + "url": "https://paypal.me/jimmywarting" + } + ], + "license": "MIT", + "dependencies": { + "node-domexception": "^1.0.0", + "web-streams-polyfill": "^3.0.3" + }, + "engines": { + "node": "^12.20 || >= 14.13" + } + }, + "node_modules/fetch-blob/node_modules/web-streams-polyfill": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.3.3.tgz", + "integrity": "sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==", + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, "node_modules/form-data": { "version": "4.0.5", "resolved": "https://registry.npmmirror.com/form-data/-/form-data-4.0.5.tgz", @@ -1226,6 +1419,18 @@ "node": ">= 12.20" } }, + "node_modules/formdata-polyfill": { + "version": "4.0.10", + "resolved": "https://registry.npmjs.org/formdata-polyfill/-/formdata-polyfill-4.0.10.tgz", + "integrity": "sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g==", + "license": "MIT", + "dependencies": { + "fetch-blob": "^3.1.2" + }, + "engines": { + "node": ">=12.20.0" + } + }, "node_modules/fsevents": { "version": "2.3.3", "resolved": "https://registry.npmmirror.com/fsevents/-/fsevents-2.3.3.tgz", @@ -1250,6 +1455,52 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/gaxios": { + "version": "7.1.4", + "resolved": "https://registry.npmjs.org/gaxios/-/gaxios-7.1.4.tgz", + "integrity": "sha512-bTIgTsM2bWn3XklZISBTQX7ZSddGW+IO3bMdGaemHZ3tbqExMENHLx6kKZ/KlejgrMtj8q7wBItt51yegqalrA==", + "license": "Apache-2.0", + "dependencies": { + "extend": "^3.0.2", + "https-proxy-agent": "^7.0.1", + "node-fetch": "^3.3.2" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/gaxios/node_modules/node-fetch": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-3.3.2.tgz", + "integrity": "sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA==", + "license": "MIT", + "dependencies": { + "data-uri-to-buffer": "^4.0.0", + "fetch-blob": "^3.1.4", + "formdata-polyfill": "^4.0.10" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/node-fetch" + } + }, + "node_modules/gcp-metadata": { + "version": "8.1.2", + "resolved": "https://registry.npmjs.org/gcp-metadata/-/gcp-metadata-8.1.2.tgz", + "integrity": "sha512-zV/5HKTfCeKWnxG0Dmrw51hEWFGfcF2xiXqcA3+J90WDuP0SvoiSO5ORvcBsifmx/FoIjgQN3oNOGaQ5PhLFkg==", + "license": "Apache-2.0", + "dependencies": { + "gaxios": "^7.0.0", + "google-logging-utils": "^1.0.0", + "json-bigint": "^1.0.0" + }, + "engines": { + "node": ">=18" + } + }, "node_modules/get-intrinsic": { "version": "1.3.0", "resolved": "https://registry.npmmirror.com/get-intrinsic/-/get-intrinsic-1.3.0.tgz", @@ -1287,6 +1538,32 @@ "node": ">= 0.4" } }, + "node_modules/google-auth-library": { + "version": "10.6.2", + "resolved": "https://registry.npmjs.org/google-auth-library/-/google-auth-library-10.6.2.tgz", + "integrity": "sha512-e27Z6EThmVNNvtYASwQxose/G57rkRuaRbQyxM2bvYLLX/GqWZ5chWq2EBoUchJbCc57eC9ArzO5wMsEmWftCw==", + "license": "Apache-2.0", + "dependencies": { + "base64-js": "^1.3.0", + "ecdsa-sig-formatter": "^1.0.11", + "gaxios": "^7.1.4", + "gcp-metadata": "8.1.2", + "google-logging-utils": "1.1.3", + "jws": "^4.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/google-logging-utils": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/google-logging-utils/-/google-logging-utils-1.1.3.tgz", + "integrity": "sha512-eAmLkjDjAFCVXg7A1unxHsLf961m6y17QFqXqAXGj/gVkKFrEICfStRfwUlGNfeCEjNRa32JEWOUTlYXPyyKvA==", + "license": "Apache-2.0", + "engines": { + "node": ">=14" + } + }, "node_modules/gopd": { "version": "1.2.0", "resolved": "https://registry.npmmirror.com/gopd/-/gopd-1.2.0.tgz", @@ -1338,6 +1615,19 @@ "node": ">= 0.4" } }, + "node_modules/https-proxy-agent": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", + "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.2", + "debug": "4" + }, + "engines": { + "node": ">= 14" + } + }, "node_modules/humanize-ms": { "version": "1.2.1", "resolved": "https://registry.npmmirror.com/humanize-ms/-/humanize-ms-1.2.1.tgz", @@ -1347,6 +1637,42 @@ "ms": "^2.0.0" } }, + "node_modules/json-bigint": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-bigint/-/json-bigint-1.0.0.tgz", + "integrity": "sha512-SiPv/8VpZuWbvLSMtTDU8hEfrZWg/mH/nV/b4o0CYbSxu1UIQPLdwKOCIyLQX+VIPO5vrLX3i8qtqFyhdPSUSQ==", + "license": "MIT", + "dependencies": { + "bignumber.js": "^9.0.0" + } + }, + "node_modules/jwa": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/jwa/-/jwa-2.0.1.tgz", + "integrity": "sha512-hRF04fqJIP8Abbkq5NKGN0Bbr3JxlQ+qhZufXVr0DvujKy93ZCbXZMHDL4EOtodSbCWxOqR8MS1tXA5hwqCXDg==", + "license": "MIT", + "dependencies": { + "buffer-equal-constant-time": "^1.0.1", + "ecdsa-sig-formatter": "1.0.11", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/jws": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/jws/-/jws-4.0.1.tgz", + "integrity": "sha512-EKI/M/yqPncGUUh44xz0PxSidXFr/+r0pA70+gIYhjv+et7yxM+s29Y+VGDkovRofQem0fs7Uvf4+YmAdyRduA==", + "license": "MIT", + "dependencies": { + "jwa": "^2.0.1", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/long": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/long/-/long-5.3.2.tgz", + "integrity": "sha512-mNAgZ1GmyNhD7AuqnTG3/VQ26o760+ZYBPKjPvugO8+nLbYfX6TVpJPseBvopbdY+qpZ/lKUnmEc1LeZYS3QAA==", + "license": "Apache-2.0" + }, "node_modules/loupe": { "version": "3.2.1", "resolved": "https://registry.npmmirror.com/loupe/-/loupe-3.2.1.tgz", @@ -1504,6 +1830,19 @@ "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==", "license": "MIT" }, + "node_modules/p-retry": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.2.tgz", + "integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==", + "license": "MIT", + "dependencies": { + "@types/retry": "0.12.0", + "retry": "^0.13.1" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/pathe": { "version": "1.1.2", "resolved": "https://registry.npmmirror.com/pathe/-/pathe-1.1.2.tgz", @@ -1557,6 +1896,39 @@ "node": "^10 || ^12 || >=14" } }, + "node_modules/protobufjs": { + "version": "7.5.4", + "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.5.4.tgz", + "integrity": "sha512-CvexbZtbov6jW2eXAvLukXjXUW1TzFaivC46BpWc/3BpcCysb5Vffu+B3XHMm8lVEuy2Mm4XGex8hBSg1yapPg==", + "hasInstallScript": true, + "license": "BSD-3-Clause", + "dependencies": { + "@protobufjs/aspromise": "^1.1.2", + "@protobufjs/base64": "^1.1.2", + "@protobufjs/codegen": "^2.0.4", + "@protobufjs/eventemitter": "^1.1.0", + "@protobufjs/fetch": "^1.1.0", + "@protobufjs/float": "^1.0.2", + "@protobufjs/inquire": "^1.1.0", + "@protobufjs/path": "^1.1.2", + "@protobufjs/pool": "^1.1.0", + "@protobufjs/utf8": "^1.1.0", + "@types/node": ">=13.7.0", + "long": "^5.0.0" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/retry": { + "version": "0.13.1", + "resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz", + "integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==", + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, "node_modules/rollup": { "version": "4.60.1", "resolved": "https://registry.npmmirror.com/rollup/-/rollup-4.60.1.tgz", @@ -1602,6 +1974,26 @@ "fsevents": "~2.3.2" } }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, "node_modules/siginfo": { "version": "2.0.0", "resolved": "https://registry.npmmirror.com/siginfo/-/siginfo-2.0.0.tgz", @@ -1894,6 +2286,27 @@ "node": ">=8" } }, + "node_modules/ws": { + "version": "8.20.0", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.20.0.tgz", + "integrity": "sha512-sAt8BhgNbzCtgGbt2OxmpuryO63ZoDk/sqaB/znQm94T4fCEsy/yV+7CdC1kJhOU9lboAEU7R3kquuycDoibVA==", + "license": "MIT", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, "node_modules/zod": { "version": "3.25.76", "resolved": "https://registry.npmmirror.com/zod/-/zod-3.25.76.tgz", diff --git a/package.json b/package.json index ee0e26a..04ed0d9 100644 --- a/package.json +++ b/package.json @@ -34,16 +34,17 @@ "author": "", "license": "MIT", "engines": { - "node": ">=18.0.0" + "node": ">=20.0.0" }, "dependencies": { "@anthropic-ai/sdk": "^0.52.0", + "@google/genai": "^1.48.0", "openai": "^4.73.0", "zod": "^3.23.0" }, "devDependencies": { + "@types/node": "^22.0.0", "typescript": "^5.6.0", - "vitest": "^2.1.0", - "@types/node": "^22.0.0" + "vitest": "^2.1.0" } } diff --git a/src/llm/adapter.ts b/src/llm/adapter.ts index cbe5b4f..1283d90 100644 --- a/src/llm/adapter.ts +++ b/src/llm/adapter.ts @@ -11,6 +11,7 @@ * * const anthropic = createAdapter('anthropic') * const openai = createAdapter('openai', process.env.OPENAI_API_KEY) + * const gemini = createAdapter('gemini', process.env.GEMINI_API_KEY) * ``` */ @@ -37,7 +38,7 @@ import type { LLMAdapter } from '../types.js' * Additional providers can be integrated by implementing {@link LLMAdapter} * directly and bypassing this factory. */ -export type SupportedProvider = 'anthropic' | 'copilot' | 'openai' +export type SupportedProvider = 'anthropic' | 'copilot' | 'gemini' | 'openai' /** * Instantiate the appropriate {@link LLMAdapter} for the given provider. @@ -46,6 +47,7 @@ export type SupportedProvider = 'anthropic' | 'copilot' | 'openai' * explicitly: * - `anthropic` → `ANTHROPIC_API_KEY` * - `openai` → `OPENAI_API_KEY` + * - `gemini` → `GEMINI_API_KEY` / `GOOGLE_API_KEY` * - `copilot` → `GITHUB_COPILOT_TOKEN` / `GITHUB_TOKEN`, or interactive * OAuth2 device flow if neither is set * @@ -74,6 +76,10 @@ export async function createAdapter( const { CopilotAdapter } = await import('./copilot.js') return new CopilotAdapter(apiKey) } + case 'gemini': { + const { GeminiAdapter } = await import('./gemini.js') + return new GeminiAdapter(apiKey) + } case 'openai': { const { OpenAIAdapter } = await import('./openai.js') return new OpenAIAdapter(apiKey, baseURL) diff --git a/src/llm/gemini.ts b/src/llm/gemini.ts new file mode 100644 index 0000000..a618ff3 --- /dev/null +++ b/src/llm/gemini.ts @@ -0,0 +1,396 @@ +/** + * @fileoverview Google Gemini adapter implementing {@link LLMAdapter}. + * + * Built for `@google/genai` (the unified Google Gen AI SDK, v1.x), NOT the + * legacy `@google/generative-ai` package. + * + * Converts between the framework's internal {@link ContentBlock} types and the + * `@google/genai` SDK's wire format, handling tool definitions, system prompts, + * and both batch and streaming response paths. + * + * API key resolution order: + * 1. `apiKey` constructor argument + * 2. `GEMINI_API_KEY` environment variable + * + * @example + * ```ts + * import { GeminiAdapter } from './gemini.js' + * + * const adapter = new GeminiAdapter() + * const response = await adapter.chat(messages, { + * model: 'gemini-2.5-flash', + * maxTokens: 1024, + * }) + * ``` + */ + +import { + GoogleGenAI, + FunctionCallingConfigMode, + type Content, + type FunctionDeclaration, + type GenerateContentConfig, + type GenerateContentResponse, + type Part, + type Tool as GeminiTool, +} from '@google/genai' + +import type { + ContentBlock, + ImageBlock, + LLMAdapter, + LLMChatOptions, + LLMMessage, + LLMResponse, + LLMStreamOptions, + LLMToolDef, + StreamEvent, + TextBlock, + ToolResultBlock, + ToolUseBlock, +} from '../types.js' + +// --------------------------------------------------------------------------- +// Internal helpers +// --------------------------------------------------------------------------- + +/** + * Map framework role names to Gemini role names. + * + * Gemini uses `"model"` instead of `"assistant"`. + */ +function toGeminiRole(role: 'user' | 'assistant'): string { + return role === 'assistant' ? 'model' : 'user' +} + +/** + * Convert framework messages into Gemini's {@link Content}[] format. + * + * Key differences from Anthropic: + * - Gemini uses `"model"` instead of `"assistant"`. + * - `functionResponse` parts (tool results) must appear in `"user"` turns. + * - `functionCall` parts appear in `"model"` turns. + * - We build a name lookup map from tool_use blocks so tool_result blocks + * can resolve the function name required by Gemini's `functionResponse`. + */ +function toGeminiContents(messages: LLMMessage[]): Content[] { + // First pass: build id → name map for resolving tool results. + const toolNameById = new Map() + for (const msg of messages) { + for (const block of msg.content) { + if (block.type === 'tool_use') { + toolNameById.set(block.id, block.name) + } + } + } + + return messages.map((msg): Content => { + const parts: Part[] = msg.content.map((block): Part => { + switch (block.type) { + case 'text': + return { text: block.text } + + case 'tool_use': + return { + functionCall: { + id: block.id, + name: block.name, + args: block.input, + }, + } + + case 'tool_result': { + const name = toolNameById.get(block.tool_use_id) ?? block.tool_use_id + return { + functionResponse: { + id: block.tool_use_id, + name, + response: { + content: + typeof block.content === 'string' + ? block.content + : JSON.stringify(block.content), + isError: block.is_error ?? false, + }, + }, + } + } + + case 'image': + return { + inlineData: { + mimeType: block.source.media_type, + data: block.source.data, + }, + } + + default: { + const _exhaustive: never = block + throw new Error(`Unhandled content block type: ${JSON.stringify(_exhaustive)}`) + } + } + }) + + return { role: toGeminiRole(msg.role), parts } + }) +} + +/** + * Convert framework {@link LLMToolDef}s into a Gemini `tools` config array. + * + * In `@google/genai`, function declarations use `parametersJsonSchema` (not + * `parameters` or `input_schema`). All declarations are grouped under a single + * tool entry. + */ +function toGeminiTools(tools: readonly LLMToolDef[]): GeminiTool[] { + const functionDeclarations: FunctionDeclaration[] = tools.map((t) => ({ + name: t.name, + description: t.description, + parametersJsonSchema: t.inputSchema as Record, + })) + return [{ functionDeclarations }] +} + +/** + * Build the {@link GenerateContentConfig} shared by chat() and stream(). + */ +function buildConfig( + options: LLMChatOptions | LLMStreamOptions, +): GenerateContentConfig { + return { + maxOutputTokens: options.maxTokens ?? 4096, + temperature: options.temperature, + systemInstruction: options.systemPrompt, + tools: options.tools ? toGeminiTools(options.tools) : undefined, + toolConfig: options.tools + ? { functionCallingConfig: { mode: FunctionCallingConfigMode.AUTO } } + : undefined, + } +} + +/** + * Generate a stable pseudo-random ID string for tool use blocks. + * + * Gemini may not always return call IDs (especially in streaming), so we + * fabricate them when absent to satisfy the framework's {@link ToolUseBlock} + * contract. + */ +function generateId(): string { + return `gemini-${Date.now()}-${Math.random().toString(36).slice(2, 9)}` +} + +/** + * Extract the function call ID from a Gemini part, or generate one. + * + * The `id` field exists in newer API versions but may be absent in older + * responses, so we cast conservatively and fall back to a generated ID. + */ +function getFunctionCallId(part: Part): string { + return (part.functionCall as { id?: string } | undefined)?.id ?? generateId() +} + +/** + * Convert a Gemini {@link GenerateContentResponse} into a framework + * {@link LLMResponse}. + */ +function fromGeminiResponse( + response: GenerateContentResponse, + id: string, + model: string, +): LLMResponse { + const candidate = response.candidates?.[0] + const content: ContentBlock[] = [] + + for (const part of candidate?.content?.parts ?? []) { + if (part.text !== undefined && part.text !== '') { + content.push({ type: 'text', text: part.text }) + } else if (part.functionCall !== undefined) { + content.push({ + type: 'tool_use', + id: getFunctionCallId(part), + name: part.functionCall.name ?? '', + input: (part.functionCall.args ?? {}) as Record, + }) + } + // inlineData echoes and other part types are silently ignored. + } + + // Map Gemini finish reasons to framework stop_reason vocabulary. + const finishReason = candidate?.finishReason as string | undefined + let stop_reason: LLMResponse['stop_reason'] = 'end_turn' + if (finishReason === 'MAX_TOKENS') { + stop_reason = 'max_tokens' + } else if (content.some((b) => b.type === 'tool_use')) { + // Gemini may report STOP even when it returned function calls. + stop_reason = 'tool_use' + } + + const usage = response.usageMetadata + return { + id, + content, + model, + stop_reason, + usage: { + input_tokens: usage?.promptTokenCount ?? 0, + output_tokens: usage?.candidatesTokenCount ?? 0, + }, + } +} + +// --------------------------------------------------------------------------- +// Adapter implementation +// --------------------------------------------------------------------------- + +/** + * LLM adapter backed by the Google Gemini API via `@google/genai`. + * + * Thread-safe — a single instance may be shared across concurrent agent runs. + * The underlying SDK client is stateless across requests. + */ +export class GeminiAdapter implements LLMAdapter { + readonly name = 'gemini' + + readonly #client: GoogleGenAI + + constructor(apiKey?: string) { + this.#client = new GoogleGenAI({ + apiKey: apiKey ?? process.env['GEMINI_API_KEY'], + }) + } + + // ------------------------------------------------------------------------- + // chat() + // ------------------------------------------------------------------------- + + /** + * Send a synchronous (non-streaming) chat request and return the complete + * {@link LLMResponse}. + * + * Uses `ai.models.generateContent()` with the full conversation as `contents`, + * which is the idiomatic pattern for `@google/genai`. + */ + async chat(messages: LLMMessage[], options: LLMChatOptions): Promise { + const id = generateId() + const contents = toGeminiContents(messages) + + const response = await this.#client.models.generateContent({ + model: options.model, + contents, + config: buildConfig(options), + }) + + return fromGeminiResponse(response, id, options.model) + } + + // ------------------------------------------------------------------------- + // stream() + // ------------------------------------------------------------------------- + + /** + * Send a streaming chat request and yield {@link StreamEvent}s as they + * arrive from the API. + * + * Uses `ai.models.generateContentStream()` which returns an + * `AsyncGenerator`. Each yielded chunk has the same + * shape as a full response but contains only the delta for that chunk. + * + * Because `@google/genai` doesn't expose a `finalMessage()` helper like the + * Anthropic SDK, we accumulate content and token counts as we stream so that + * the terminal `done` event carries a complete and accurate {@link LLMResponse}. + * + * Sequence guarantees (matching the Anthropic adapter): + * - Zero or more `text` events with incremental deltas + * - Zero or more `tool_use` events (one per call; Gemini doesn't stream args) + * - Exactly one terminal event: `done` or `error` + */ + async *stream( + messages: LLMMessage[], + options: LLMStreamOptions, + ): AsyncIterable { + const id = generateId() + const contents = toGeminiContents(messages) + + try { + const streamResponse = await this.#client.models.generateContentStream({ + model: options.model, + contents, + config: buildConfig(options), + }) + + // Accumulators for building the done payload. + const accumulatedContent: ContentBlock[] = [] + let inputTokens = 0 + let outputTokens = 0 + let lastFinishReason: string | undefined + + for await (const chunk of streamResponse) { + const candidate = chunk.candidates?.[0] + + // Accumulate token counts — the API emits these on the final chunk. + if (chunk.usageMetadata) { + inputTokens = chunk.usageMetadata.promptTokenCount ?? inputTokens + outputTokens = chunk.usageMetadata.candidatesTokenCount ?? outputTokens + } + if (candidate?.finishReason) { + lastFinishReason = candidate.finishReason as string + } + + for (const part of candidate?.content?.parts ?? []) { + if (part.text) { + accumulatedContent.push({ type: 'text', text: part.text }) + yield { type: 'text', data: part.text } satisfies StreamEvent + } else if (part.functionCall) { + const toolId = getFunctionCallId(part) + const toolUseBlock: ToolUseBlock = { + type: 'tool_use', + id: toolId, + name: part.functionCall.name ?? '', + input: (part.functionCall.args ?? {}) as Record, + } + accumulatedContent.push(toolUseBlock) + yield { type: 'tool_use', data: toolUseBlock } satisfies StreamEvent + } + } + } + + // Determine stop_reason from the accumulated response. + const hasToolUse = accumulatedContent.some((b) => b.type === 'tool_use') + let stop_reason: LLMResponse['stop_reason'] = 'end_turn' + if (lastFinishReason === 'MAX_TOKENS') { + stop_reason = 'max_tokens' + } else if (hasToolUse) { + stop_reason = 'tool_use' + } + + const finalResponse: LLMResponse = { + id, + content: accumulatedContent, + model: options.model, + stop_reason, + usage: { input_tokens: inputTokens, output_tokens: outputTokens }, + } + + yield { type: 'done', data: finalResponse } satisfies StreamEvent + } catch (err) { + const error = err instanceof Error ? err : new Error(String(err)) + yield { type: 'error', data: error } satisfies StreamEvent + } + } +} + +// Re-export types that consumers of this module commonly need alongside the adapter. +export type { + ContentBlock, + ImageBlock, + LLMAdapter, + LLMChatOptions, + LLMMessage, + LLMResponse, + LLMStreamOptions, + LLMToolDef, + StreamEvent, + TextBlock, + ToolResultBlock, + ToolUseBlock, +} \ No newline at end of file diff --git a/src/types.ts b/src/types.ts index bd44065..af876dc 100644 --- a/src/types.ts +++ b/src/types.ts @@ -186,7 +186,7 @@ export interface ToolDefinition> { export interface AgentConfig { readonly name: string readonly model: string - readonly provider?: 'anthropic' | 'copilot' | 'openai' + readonly provider?: 'anthropic' | 'copilot' | 'gemini' | 'openai' /** * Custom base URL for OpenAI-compatible APIs (Ollama, vLLM, LM Studio, etc.). * Note: local servers that don't require auth still need `apiKey` set to a @@ -293,7 +293,7 @@ export interface OrchestratorEvent { export interface OrchestratorConfig { readonly maxConcurrency?: number readonly defaultModel?: string - readonly defaultProvider?: 'anthropic' | 'copilot' | 'openai' + readonly defaultProvider?: 'anthropic' | 'copilot' | 'gemini' | 'openai' readonly defaultBaseURL?: string readonly defaultApiKey?: string onProgress?: (event: OrchestratorEvent) => void From 4e4226783d11d0a90a47060c5f757843d34dae0c Mon Sep 17 00:00:00 2001 From: MrAvalonApple <74775400+ibrahimkazimov@users.noreply.github.com> Date: Thu, 2 Apr 2026 22:06:55 +0300 Subject: [PATCH 2/9] docs: update README to include Gemini model support --- README.md | 1 + README_zh.md | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index a02f47a..e3e4bef 100644 --- a/README.md +++ b/README.md @@ -186,6 +186,7 @@ npx tsx examples/01-single-agent.ts | Anthropic (Claude) | `provider: 'anthropic'` | `ANTHROPIC_API_KEY` | Verified | | OpenAI (GPT) | `provider: 'openai'` | `OPENAI_API_KEY` | Verified | | GitHub Copilot | `provider: 'copilot'` | `GITHUB_TOKEN` | Verified | +| Gemini | `provider: 'gemini'` | `GEMINI_API_KEY` | Verified | | Ollama / vLLM / LM Studio | `provider: 'openai'` + `baseURL` | — | Verified | Any OpenAI-compatible API should work via `provider: 'openai'` + `baseURL` (DeepSeek, Groq, Mistral, Qwen, MiniMax, etc.). These providers have not been fully verified yet — contributions welcome via [#25](https://github.com/JackChen-me/open-multi-agent/issues/25). diff --git a/README_zh.md b/README_zh.md index 4cf7a00..6a0b46d 100644 --- a/README_zh.md +++ b/README_zh.md @@ -15,7 +15,7 @@ - **自动任务拆解** — 用自然语言描述目标,内置的协调者智能体自动将其拆解为带依赖关系和分配的任务图——无需手动编排。 - **多智能体团队** — 定义不同角色、工具甚至不同模型的智能体。它们通过消息总线和共享内存协作。 - **任务 DAG 调度** — 任务之间存在依赖关系。框架进行拓扑排序——有依赖的任务等待,无依赖的任务并行执行。 -- **模型无关** — Claude、GPT 和本地模型(Ollama、vLLM、LM Studio)可以在同一个团队中使用。通过 `baseURL` 即可接入任何 OpenAI 兼容服务。 +- **模型无关** — Claude、GPT、Gemini 和本地模型(Ollama、vLLM、LM Studio)可以在同一个团队中使用。通过 `baseURL` 即可接入任何 OpenAI 兼容服务。 - **进程内执行** — 没有子进程开销。所有内容在一个 Node.js 进程中运行。可部署到 Serverless、Docker、CI/CD。 ## 快速开始 @@ -158,6 +158,7 @@ npx tsx examples/01-single-agent.ts │ - stream() │ │ - AnthropicAdapter │ └────────┬──────────┘ │ - OpenAIAdapter │ │ │ - CopilotAdapter │ + │ │ - GeminiAdapter │ │ └──────────────────────┘ ┌────────▼──────────┐ │ AgentRunner │ ┌──────────────────────┐ @@ -184,6 +185,7 @@ npx tsx examples/01-single-agent.ts | Anthropic (Claude) | `provider: 'anthropic'` | `ANTHROPIC_API_KEY` | 已验证 | | OpenAI (GPT) | `provider: 'openai'` | `OPENAI_API_KEY` | 已验证 | | GitHub Copilot | `provider: 'copilot'` | `GITHUB_TOKEN` | 已验证 | +| Gemini | `provider: 'gemini'` | `GEMINI_API_KEY` | 已验证 | | Ollama / vLLM / LM Studio | `provider: 'openai'` + `baseURL` | — | 已验证 | 任何 OpenAI 兼容 API 均可通过 `provider: 'openai'` + `baseURL` 接入(DeepSeek、Groq、Mistral、Qwen、MiniMax 等)。这些 Provider 尚未完整验证——欢迎通过 [#25](https://github.com/JackChen-me/open-multi-agent/issues/25) 贡献验证。 From 91826b5c22969ea56cec8eb754ae1ea299019d5d Mon Sep 17 00:00:00 2001 From: MrAvalonApple <74775400+ibrahimkazimov@users.noreply.github.com> Date: Thu, 2 Apr 2026 22:16:16 +0300 Subject: [PATCH 3/9] docs: add example for Gemini model --- examples/08-gemini-test.ts | 49 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 examples/08-gemini-test.ts diff --git a/examples/08-gemini-test.ts b/examples/08-gemini-test.ts new file mode 100644 index 0000000..42ae816 --- /dev/null +++ b/examples/08-gemini-test.ts @@ -0,0 +1,49 @@ +/** + * Quick smoke test for the Copilot adapter. + * + * Run: + * npx tsx examples/05-copilot-test.ts + * + * If GITHUB_COPILOT_TOKEN is not set, the adapter will start an interactive + * OAuth2 device flow — you'll be prompted to sign in via your browser. + */ + +import { OpenMultiAgent } from '../src/index.js' +import type { OrchestratorEvent } from '../src/types.js' + +const orchestrator = new OpenMultiAgent({ + defaultModel: 'gemini-2.5-flash', + defaultProvider: 'gemini', + onProgress: (event: OrchestratorEvent) => { + if (event.type === 'agent_start') { + console.log(`[start] agent=${event.agent}`) + } else if (event.type === 'agent_complete') { + console.log(`[complete] agent=${event.agent}`) + } + }, +}) + +console.log('Testing Gemini adapter with gemini-2.5-flash...\n') + +const result = await orchestrator.runAgent( + { + name: 'assistant', + model: 'gemini-2.5-flash', + provider: 'gemini', + systemPrompt: 'You are a helpful assistant. Keep answers brief.', + maxTurns: 1, + maxTokens: 256, + }, + 'What is 2 + 2? Reply in one sentence.', +) + +if (result.success) { + console.log('\nAgent output:') + console.log('─'.repeat(60)) + console.log(result.output) + console.log('─'.repeat(60)) + console.log(`\nTokens: input=${result.tokenUsage.input_tokens}, output=${result.tokenUsage.output_tokens}`) +} else { + console.error('Agent failed:', result.output) + process.exit(1) +} From 9f3e4751aa50ec655bcb66488c8c23d7dd0c495c Mon Sep 17 00:00:00 2001 From: MrAvalonApple <74775400+ibrahimkazimov@users.noreply.github.com> Date: Thu, 2 Apr 2026 22:17:54 +0300 Subject: [PATCH 4/9] docs: update example for Gemini adapter with correct usage instructions --- examples/08-gemini-test.ts | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/examples/08-gemini-test.ts b/examples/08-gemini-test.ts index 42ae816..a1f5435 100644 --- a/examples/08-gemini-test.ts +++ b/examples/08-gemini-test.ts @@ -1,11 +1,10 @@ /** - * Quick smoke test for the Copilot adapter. + * Quick smoke test for the Gemini adapter. * * Run: - * npx tsx examples/05-copilot-test.ts + * npx tsx examples/08-gemini-test.ts * - * If GITHUB_COPILOT_TOKEN is not set, the adapter will start an interactive - * OAuth2 device flow — you'll be prompted to sign in via your browser. + * If GEMINI_API_KEY is not set, the adapter will not work. */ import { OpenMultiAgent } from '../src/index.js' From 553bf24e61f44ac1985e07a6545f3016f1082f92 Mon Sep 17 00:00:00 2001 From: MrAvalonApple <74775400+ibrahimkazimov@users.noreply.github.com> Date: Sat, 4 Apr 2026 22:12:28 +0300 Subject: [PATCH 5/9] chore: support Node >=18, add optional @google/genai peer dependency and API key fallback --- examples/{08-gemini-test.ts => 13-gemini.ts} | 2 +- package-lock.json | 120 ++++++++++++++++--- package.json | 11 +- src/llm/gemini.ts | 22 +--- tests/gemini-adapter.test.ts | 97 +++++++++++++++ 5 files changed, 210 insertions(+), 42 deletions(-) rename examples/{08-gemini-test.ts => 13-gemini.ts} (96%) create mode 100644 tests/gemini-adapter.test.ts diff --git a/examples/08-gemini-test.ts b/examples/13-gemini.ts similarity index 96% rename from examples/08-gemini-test.ts rename to examples/13-gemini.ts index a1f5435..ddbf0c1 100644 --- a/examples/08-gemini-test.ts +++ b/examples/13-gemini.ts @@ -2,7 +2,7 @@ * Quick smoke test for the Gemini adapter. * * Run: - * npx tsx examples/08-gemini-test.ts + * npx tsx examples/13-gemini.ts * * If GEMINI_API_KEY is not set, the adapter will not work. */ diff --git a/package-lock.json b/package-lock.json index c98f848..9c25491 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,16 +1,15 @@ { "name": "@jackchen_me/open-multi-agent", - "version": "0.1.0", + "version": "0.2.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@jackchen_me/open-multi-agent", - "version": "0.1.0", + "version": "0.2.0", "license": "MIT", "dependencies": { "@anthropic-ai/sdk": "^0.52.0", - "@google/genai": "^1.48.0", "openai": "^4.73.0", "zod": "^3.23.0" }, @@ -21,7 +20,15 @@ "vitest": "^2.1.0" }, "engines": { - "node": ">=20.0.0" + "node": ">=18.0.0" + }, + "peerDependencies": { + "@google/genai": "^1.48.0" + }, + "peerDependenciesMeta": { + "@google/genai": { + "optional": true + } } }, "node_modules/@anthropic-ai/sdk": { @@ -480,6 +487,8 @@ "resolved": "https://registry.npmjs.org/@google/genai/-/genai-1.48.0.tgz", "integrity": "sha512-plonYK4ML2PrxsRD9SeqmFt76eREWkQdPCglOA6aYDzL1AAbE+7PUnT54SvpWGfws13L0AZEqGSpL7+1IPnTxQ==", "license": "Apache-2.0", + "optional": true, + "peer": true, "dependencies": { "google-auth-library": "^10.3.0", "p-retry": "^4.6.2", @@ -509,31 +518,41 @@ "version": "1.1.2", "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz", "integrity": "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==", - "license": "BSD-3-Clause" + "license": "BSD-3-Clause", + "optional": true, + "peer": true }, "node_modules/@protobufjs/base64": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz", "integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==", - "license": "BSD-3-Clause" + "license": "BSD-3-Clause", + "optional": true, + "peer": true }, "node_modules/@protobufjs/codegen": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz", "integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==", - "license": "BSD-3-Clause" + "license": "BSD-3-Clause", + "optional": true, + "peer": true }, "node_modules/@protobufjs/eventemitter": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz", "integrity": "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==", - "license": "BSD-3-Clause" + "license": "BSD-3-Clause", + "optional": true, + "peer": true }, "node_modules/@protobufjs/fetch": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz", "integrity": "sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==", "license": "BSD-3-Clause", + "optional": true, + "peer": true, "dependencies": { "@protobufjs/aspromise": "^1.1.1", "@protobufjs/inquire": "^1.1.0" @@ -543,31 +562,41 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz", "integrity": "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==", - "license": "BSD-3-Clause" + "license": "BSD-3-Clause", + "optional": true, + "peer": true }, "node_modules/@protobufjs/inquire": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz", "integrity": "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==", - "license": "BSD-3-Clause" + "license": "BSD-3-Clause", + "optional": true, + "peer": true }, "node_modules/@protobufjs/path": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz", "integrity": "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==", - "license": "BSD-3-Clause" + "license": "BSD-3-Clause", + "optional": true, + "peer": true }, "node_modules/@protobufjs/pool": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz", "integrity": "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==", - "license": "BSD-3-Clause" + "license": "BSD-3-Clause", + "optional": true, + "peer": true }, "node_modules/@protobufjs/utf8": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz", "integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==", - "license": "BSD-3-Clause" + "license": "BSD-3-Clause", + "optional": true, + "peer": true }, "node_modules/@rollup/rollup-darwin-arm64": { "version": "4.60.1", @@ -613,7 +642,9 @@ "version": "0.12.0", "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz", "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==", - "license": "MIT" + "license": "MIT", + "optional": true, + "peer": true }, "node_modules/@vitest/expect": { "version": "2.1.9", @@ -745,6 +776,8 @@ "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz", "integrity": "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==", "license": "MIT", + "optional": true, + "peer": true, "engines": { "node": ">= 14" } @@ -795,13 +828,17 @@ "url": "https://feross.org/support" } ], - "license": "MIT" + "license": "MIT", + "optional": true, + "peer": true }, "node_modules/bignumber.js": { "version": "9.3.1", "resolved": "https://registry.npmjs.org/bignumber.js/-/bignumber.js-9.3.1.tgz", "integrity": "sha512-Ko0uX15oIUS7wJ3Rb30Fs6SkVbLmPBAKdlm7q9+ak9bbIeFf0MwuBsQV6z7+X768/cHsfg+WlysDWJcmthjsjQ==", "license": "MIT", + "optional": true, + "peer": true, "engines": { "node": "*" } @@ -810,7 +847,9 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz", "integrity": "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA==", - "license": "BSD-3-Clause" + "license": "BSD-3-Clause", + "optional": true, + "peer": true }, "node_modules/cac": { "version": "6.7.14", @@ -879,6 +918,8 @@ "resolved": "https://registry.npmjs.org/data-uri-to-buffer/-/data-uri-to-buffer-4.0.1.tgz", "integrity": "sha512-0R9ikRb668HB7QDxT1vkpuUBtqc53YyAwMwGeUFKRojY/NWKvdZ+9UYtRfGmhqNbRkTSVpMbmyhXipFFv2cb/A==", "license": "MIT", + "optional": true, + "peer": true, "engines": { "node": ">= 12" } @@ -887,6 +928,7 @@ "version": "4.4.3", "resolved": "https://registry.npmmirror.com/debug/-/debug-4.4.3.tgz", "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "devOptional": true, "license": "MIT", "dependencies": { "ms": "^2.1.3" @@ -938,6 +980,8 @@ "resolved": "https://registry.npmjs.org/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz", "integrity": "sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==", "license": "Apache-2.0", + "optional": true, + "peer": true, "dependencies": { "safe-buffer": "^5.0.1" } @@ -1440,7 +1484,9 @@ "version": "3.0.2", "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==", - "license": "MIT" + "license": "MIT", + "optional": true, + "peer": true }, "node_modules/fetch-blob": { "version": "3.2.0", @@ -1457,6 +1503,8 @@ } ], "license": "MIT", + "optional": true, + "peer": true, "dependencies": { "node-domexception": "^1.0.0", "web-streams-polyfill": "^3.0.3" @@ -1470,6 +1518,8 @@ "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.3.3.tgz", "integrity": "sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==", "license": "MIT", + "optional": true, + "peer": true, "engines": { "node": ">= 8" } @@ -1514,6 +1564,8 @@ "resolved": "https://registry.npmjs.org/formdata-polyfill/-/formdata-polyfill-4.0.10.tgz", "integrity": "sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g==", "license": "MIT", + "optional": true, + "peer": true, "dependencies": { "fetch-blob": "^3.1.2" }, @@ -1550,6 +1602,8 @@ "resolved": "https://registry.npmjs.org/gaxios/-/gaxios-7.1.4.tgz", "integrity": "sha512-bTIgTsM2bWn3XklZISBTQX7ZSddGW+IO3bMdGaemHZ3tbqExMENHLx6kKZ/KlejgrMtj8q7wBItt51yegqalrA==", "license": "Apache-2.0", + "optional": true, + "peer": true, "dependencies": { "extend": "^3.0.2", "https-proxy-agent": "^7.0.1", @@ -1564,6 +1618,8 @@ "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-3.3.2.tgz", "integrity": "sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA==", "license": "MIT", + "optional": true, + "peer": true, "dependencies": { "data-uri-to-buffer": "^4.0.0", "fetch-blob": "^3.1.4", @@ -1582,6 +1638,8 @@ "resolved": "https://registry.npmjs.org/gcp-metadata/-/gcp-metadata-8.1.2.tgz", "integrity": "sha512-zV/5HKTfCeKWnxG0Dmrw51hEWFGfcF2xiXqcA3+J90WDuP0SvoiSO5ORvcBsifmx/FoIjgQN3oNOGaQ5PhLFkg==", "license": "Apache-2.0", + "optional": true, + "peer": true, "dependencies": { "gaxios": "^7.0.0", "google-logging-utils": "^1.0.0", @@ -1646,6 +1704,8 @@ "resolved": "https://registry.npmjs.org/google-auth-library/-/google-auth-library-10.6.2.tgz", "integrity": "sha512-e27Z6EThmVNNvtYASwQxose/G57rkRuaRbQyxM2bvYLLX/GqWZ5chWq2EBoUchJbCc57eC9ArzO5wMsEmWftCw==", "license": "Apache-2.0", + "optional": true, + "peer": true, "dependencies": { "base64-js": "^1.3.0", "ecdsa-sig-formatter": "^1.0.11", @@ -1663,6 +1723,8 @@ "resolved": "https://registry.npmjs.org/google-logging-utils/-/google-logging-utils-1.1.3.tgz", "integrity": "sha512-eAmLkjDjAFCVXg7A1unxHsLf961m6y17QFqXqAXGj/gVkKFrEICfStRfwUlGNfeCEjNRa32JEWOUTlYXPyyKvA==", "license": "Apache-2.0", + "optional": true, + "peer": true, "engines": { "node": ">=14" } @@ -1723,6 +1785,8 @@ "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", "license": "MIT", + "optional": true, + "peer": true, "dependencies": { "agent-base": "^7.1.2", "debug": "4" @@ -1745,6 +1809,8 @@ "resolved": "https://registry.npmjs.org/json-bigint/-/json-bigint-1.0.0.tgz", "integrity": "sha512-SiPv/8VpZuWbvLSMtTDU8hEfrZWg/mH/nV/b4o0CYbSxu1UIQPLdwKOCIyLQX+VIPO5vrLX3i8qtqFyhdPSUSQ==", "license": "MIT", + "optional": true, + "peer": true, "dependencies": { "bignumber.js": "^9.0.0" } @@ -1754,6 +1820,8 @@ "resolved": "https://registry.npmjs.org/jwa/-/jwa-2.0.1.tgz", "integrity": "sha512-hRF04fqJIP8Abbkq5NKGN0Bbr3JxlQ+qhZufXVr0DvujKy93ZCbXZMHDL4EOtodSbCWxOqR8MS1tXA5hwqCXDg==", "license": "MIT", + "optional": true, + "peer": true, "dependencies": { "buffer-equal-constant-time": "^1.0.1", "ecdsa-sig-formatter": "1.0.11", @@ -1765,6 +1833,8 @@ "resolved": "https://registry.npmjs.org/jws/-/jws-4.0.1.tgz", "integrity": "sha512-EKI/M/yqPncGUUh44xz0PxSidXFr/+r0pA70+gIYhjv+et7yxM+s29Y+VGDkovRofQem0fs7Uvf4+YmAdyRduA==", "license": "MIT", + "optional": true, + "peer": true, "dependencies": { "jwa": "^2.0.1", "safe-buffer": "^5.0.1" @@ -1774,7 +1844,9 @@ "version": "5.3.2", "resolved": "https://registry.npmjs.org/long/-/long-5.3.2.tgz", "integrity": "sha512-mNAgZ1GmyNhD7AuqnTG3/VQ26o760+ZYBPKjPvugO8+nLbYfX6TVpJPseBvopbdY+qpZ/lKUnmEc1LeZYS3QAA==", - "license": "Apache-2.0" + "license": "Apache-2.0", + "optional": true, + "peer": true }, "node_modules/loupe": { "version": "3.2.1", @@ -1938,6 +2010,8 @@ "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.2.tgz", "integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==", "license": "MIT", + "optional": true, + "peer": true, "dependencies": { "@types/retry": "0.12.0", "retry": "^0.13.1" @@ -2005,6 +2079,8 @@ "integrity": "sha512-CvexbZtbov6jW2eXAvLukXjXUW1TzFaivC46BpWc/3BpcCysb5Vffu+B3XHMm8lVEuy2Mm4XGex8hBSg1yapPg==", "hasInstallScript": true, "license": "BSD-3-Clause", + "optional": true, + "peer": true, "dependencies": { "@protobufjs/aspromise": "^1.1.2", "@protobufjs/base64": "^1.1.2", @@ -2038,6 +2114,8 @@ "resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz", "integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==", "license": "MIT", + "optional": true, + "peer": true, "engines": { "node": ">= 4" } @@ -2105,7 +2183,9 @@ "url": "https://feross.org/support" } ], - "license": "MIT" + "license": "MIT", + "optional": true, + "peer": true }, "node_modules/siginfo": { "version": "2.0.0", @@ -2483,6 +2563,8 @@ "resolved": "https://registry.npmjs.org/ws/-/ws-8.20.0.tgz", "integrity": "sha512-sAt8BhgNbzCtgGbt2OxmpuryO63ZoDk/sqaB/znQm94T4fCEsy/yV+7CdC1kJhOU9lboAEU7R3kquuycDoibVA==", "license": "MIT", + "optional": true, + "peer": true, "engines": { "node": ">=10.0.0" }, diff --git a/package.json b/package.json index 287dba9..c14a184 100644 --- a/package.json +++ b/package.json @@ -34,14 +34,21 @@ "author": "", "license": "MIT", "engines": { - "node": ">=20.0.0" + "node": ">=18.0.0" }, "dependencies": { "@anthropic-ai/sdk": "^0.52.0", - "@google/genai": "^1.48.0", "openai": "^4.73.0", "zod": "^3.23.0" }, + "peerDependencies": { + "@google/genai": "^1.48.0" + }, + "peerDependenciesMeta": { + "@google/genai": { + "optional": true + } + }, "devDependencies": { "@types/node": "^22.0.0", "tsx": "^4.21.0", diff --git a/src/llm/gemini.ts b/src/llm/gemini.ts index a618ff3..f68d981 100644 --- a/src/llm/gemini.ts +++ b/src/llm/gemini.ts @@ -11,6 +11,7 @@ * API key resolution order: * 1. `apiKey` constructor argument * 2. `GEMINI_API_KEY` environment variable + * 3. `GOOGLE_API_KEY` environment variable * * @example * ```ts @@ -37,7 +38,6 @@ import { import type { ContentBlock, - ImageBlock, LLMAdapter, LLMChatOptions, LLMMessage, @@ -45,8 +45,6 @@ import type { LLMStreamOptions, LLMToolDef, StreamEvent, - TextBlock, - ToolResultBlock, ToolUseBlock, } from '../types.js' @@ -255,7 +253,7 @@ export class GeminiAdapter implements LLMAdapter { constructor(apiKey?: string) { this.#client = new GoogleGenAI({ - apiKey: apiKey ?? process.env['GEMINI_API_KEY'], + apiKey: apiKey ?? process.env['GEMINI_API_KEY'] ?? process.env['GOOGLE_API_KEY'], }) } @@ -378,19 +376,3 @@ export class GeminiAdapter implements LLMAdapter { } } } - -// Re-export types that consumers of this module commonly need alongside the adapter. -export type { - ContentBlock, - ImageBlock, - LLMAdapter, - LLMChatOptions, - LLMMessage, - LLMResponse, - LLMStreamOptions, - LLMToolDef, - StreamEvent, - TextBlock, - ToolResultBlock, - ToolUseBlock, -} \ No newline at end of file diff --git a/tests/gemini-adapter.test.ts b/tests/gemini-adapter.test.ts new file mode 100644 index 0000000..7402bba --- /dev/null +++ b/tests/gemini-adapter.test.ts @@ -0,0 +1,97 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest' + +// --------------------------------------------------------------------------- +// Mock GoogleGenAI constructor (must be hoisted for Vitest) +// --------------------------------------------------------------------------- +const GoogleGenAIMock = vi.hoisted(() => vi.fn()) + +vi.mock('@google/genai', () => ({ + GoogleGenAI: GoogleGenAIMock, + FunctionCallingConfigMode: { AUTO: 'AUTO' }, +})) + +import { GeminiAdapter } from '../src/llm/gemini.js' +import { createAdapter } from '../src/llm/adapter.js' + +// --------------------------------------------------------------------------- +// GeminiAdapter tests +// --------------------------------------------------------------------------- + +describe('GeminiAdapter', () => { + beforeEach(() => { + GoogleGenAIMock.mockClear() + }) + + it('has name "gemini"', () => { + const adapter = new GeminiAdapter() + expect(adapter.name).toBe('gemini') + }) + + it('uses GEMINI_API_KEY by default', () => { + const originalGemini = process.env['GEMINI_API_KEY'] + const originalGoogle = process.env['GOOGLE_API_KEY'] + process.env['GEMINI_API_KEY'] = 'gemini-env-key' + delete process.env['GOOGLE_API_KEY'] + + try { + new GeminiAdapter() + expect(GoogleGenAIMock).toHaveBeenCalledWith( + expect.objectContaining({ + apiKey: 'gemini-env-key', + }), + ) + } finally { + if (originalGemini === undefined) { + delete process.env['GEMINI_API_KEY'] + } else { + process.env['GEMINI_API_KEY'] = originalGemini + } + if (originalGoogle === undefined) { + delete process.env['GOOGLE_API_KEY'] + } else { + process.env['GOOGLE_API_KEY'] = originalGoogle + } + } + }) + + it('falls back to GOOGLE_API_KEY when GEMINI_API_KEY is unset', () => { + const originalGemini = process.env['GEMINI_API_KEY'] + const originalGoogle = process.env['GOOGLE_API_KEY'] + delete process.env['GEMINI_API_KEY'] + process.env['GOOGLE_API_KEY'] = 'google-env-key' + + try { + new GeminiAdapter() + expect(GoogleGenAIMock).toHaveBeenCalledWith( + expect.objectContaining({ + apiKey: 'google-env-key', + }), + ) + } finally { + if (originalGemini === undefined) { + delete process.env['GEMINI_API_KEY'] + } else { + process.env['GEMINI_API_KEY'] = originalGemini + } + if (originalGoogle === undefined) { + delete process.env['GOOGLE_API_KEY'] + } else { + process.env['GOOGLE_API_KEY'] = originalGoogle + } + } + }) + + it('allows overriding apiKey explicitly', () => { + new GeminiAdapter('explicit-key') + expect(GoogleGenAIMock).toHaveBeenCalledWith( + expect.objectContaining({ + apiKey: 'explicit-key', + }), + ) + }) + + it('createAdapter("gemini") returns GeminiAdapter instance', async () => { + const adapter = await createAdapter('gemini') + expect(adapter).toBeInstanceOf(GeminiAdapter) + }) +}) From bc31008f4e7222b5f69c5615b894ef64911bfa6c Mon Sep 17 00:00:00 2001 From: JackChen Date: Sun, 5 Apr 2026 03:20:20 +0800 Subject: [PATCH 6/9] feat(llm): add fallback tool-call extraction for local models (#15) Local models (Ollama, vLLM) sometimes return tool calls as text instead of using the native tool_calls wire format. This adds a safety-net extractor that parses tool calls from model text output when native tool_calls is empty. - Add text-tool-extractor with support for bare JSON, code fences, and Hermes tags - Wire fallback into OpenAI adapter chat() and stream() paths - Add onWarning callback when model ignores configured tools - Add timeoutMs on AgentConfig for per-run abort (local models can be slow) - Add 26 tests for extractor and fallback behavior - Document local model compatibility in README --- README.md | 27 ++++ examples/06-local-model.ts | 1 + src/agent/agent.ts | 12 +- src/agent/runner.ts | 26 +++- src/llm/copilot.ts | 3 +- src/llm/openai-common.ts | 43 +++++- src/llm/openai.ts | 24 +++- src/tool/text-tool-extractor.ts | 228 ++++++++++++++++++++++++++++++ src/types.ts | 6 + tests/openai-fallback.test.ts | 159 +++++++++++++++++++++ tests/text-tool-extractor.test.ts | 170 ++++++++++++++++++++++ 11 files changed, 691 insertions(+), 8 deletions(-) create mode 100644 src/tool/text-tool-extractor.ts create mode 100644 tests/openai-fallback.test.ts create mode 100644 tests/text-tool-extractor.test.ts diff --git a/README.md b/README.md index 103bdb9..b762ae6 100644 --- a/README.md +++ b/README.md @@ -197,6 +197,33 @@ Verified local models with tool-calling: **Gemma 4** (see [example 08](examples/ Any OpenAI-compatible API should work via `provider: 'openai'` + `baseURL` (DeepSeek, Groq, Mistral, Qwen, MiniMax, etc.). **Grok now has first-class support** via `provider: 'grok'`. +### Local Model Tool-Calling + +The framework supports tool-calling with local models served by Ollama, vLLM, LM Studio, or llama.cpp. Tool-calling is handled natively by these servers via the OpenAI-compatible API. + +**Verified models:** Gemma 4, Llama 3.1, Qwen 3, Mistral, Phi-4. See the full list at [ollama.com/search?c=tools](https://ollama.com/search?c=tools). + +**Fallback extraction:** If a local model returns tool calls as text instead of using the `tool_calls` wire format (common with thinking models or misconfigured servers), the framework automatically extracts them from the text output. + +**Timeout:** Local inference can be slow. Use `timeoutMs` on `AgentConfig` to prevent indefinite hangs: + +```typescript +const localAgent: AgentConfig = { + name: 'local', + model: 'llama3.1', + provider: 'openai', + baseURL: 'http://localhost:11434/v1', + apiKey: 'ollama', + tools: ['bash', 'file_read'], + timeoutMs: 120_000, // abort after 2 minutes +} +``` + +**Troubleshooting:** +- Model not calling tools? Ensure it appears in Ollama's [Tools category](https://ollama.com/search?c=tools). Not all models support tool-calling. +- Using Ollama? Update to the latest version (`ollama update`) — older versions have known tool-calling bugs. +- Proxy interfering? Use `no_proxy=localhost` when running against local servers. + ### LLM Configuration Examples ```typescript diff --git a/examples/06-local-model.ts b/examples/06-local-model.ts index d7cf292..977950b 100644 --- a/examples/06-local-model.ts +++ b/examples/06-local-model.ts @@ -64,6 +64,7 @@ Your review MUST include these sections: Be specific and constructive. Reference line numbers or function names when possible.`, tools: ['file_read'], maxTurns: 4, + timeoutMs: 120_000, // 2 min — local models can be slow } // --------------------------------------------------------------------------- diff --git a/src/agent/agent.ts b/src/agent/agent.ts index caf5a9c..0d7f665 100644 --- a/src/agent/agent.ts +++ b/src/agent/agent.ts @@ -293,10 +293,16 @@ export class Agent { } // Auto-generate runId when onTrace is provided but runId is missing const needsRunId = callerOptions?.onTrace && !callerOptions.runId + // Create a fresh timeout signal per run (not per runner) so that + // each run() / prompt() call gets its own timeout window. + const timeoutSignal = this.config.timeoutMs !== undefined && this.config.timeoutMs > 0 + ? AbortSignal.timeout(this.config.timeoutMs) + : undefined const runOptions: RunOptions = { ...callerOptions, onMessage: internalOnMessage, ...(needsRunId ? { runId: generateRunId() } : undefined), + ...(timeoutSignal ? { abortSignal: timeoutSignal } : undefined), } const result = await runner.run(messages, runOptions) @@ -466,8 +472,12 @@ export class Agent { } const runner = await this.getRunner() + // Fresh timeout per stream call, same as executeRun. + const timeoutSignal = this.config.timeoutMs !== undefined && this.config.timeoutMs > 0 + ? AbortSignal.timeout(this.config.timeoutMs) + : undefined - for await (const event of runner.stream(messages)) { + for await (const event of runner.stream(messages, timeoutSignal ? { abortSignal: timeoutSignal } : {})) { if (event.type === 'done') { const result = event.data as import('./runner.js').RunResult this.state.tokenUsage = addUsage(this.state.tokenUsage, result.tokenUsage) diff --git a/src/agent/runner.ts b/src/agent/runner.ts index 113f93c..5e74254 100644 --- a/src/agent/runner.ts +++ b/src/agent/runner.ts @@ -78,6 +78,11 @@ export interface RunOptions { readonly onToolResult?: (name: string, result: ToolResult) => void /** Fired after each complete {@link LLMMessage} is appended. */ readonly onMessage?: (message: LLMMessage) => void + /** + * Fired when the runner detects a potential configuration issue. + * For example, when a model appears to ignore tool definitions. + */ + readonly onWarning?: (message: string) => void /** Trace callback for observability spans. Async callbacks are safe. */ readonly onTrace?: (event: TraceEvent) => void | Promise /** Run ID for trace correlation. */ @@ -86,6 +91,11 @@ export interface RunOptions { readonly taskId?: string /** Agent name for trace correlation (overrides RunnerOptions.agentName). */ readonly traceAgent?: string + /** + * Per-call abort signal. When set, takes precedence over the static + * {@link RunnerOptions.abortSignal}. Useful for per-run timeouts. + */ + readonly abortSignal?: AbortSignal } /** The aggregated result returned when a full run completes. */ @@ -235,13 +245,16 @@ export class AgentRunner { ? allDefs.filter(d => this.options.allowedTools!.includes(d.name)) : allDefs + // Per-call abortSignal takes precedence over the static one. + const effectiveAbortSignal = options.abortSignal ?? this.options.abortSignal + const baseChatOptions: LLMChatOptions = { model: this.options.model, tools: toolDefs.length > 0 ? toolDefs : undefined, maxTokens: this.options.maxTokens, temperature: this.options.temperature, systemPrompt: this.options.systemPrompt, - abortSignal: this.options.abortSignal, + abortSignal: effectiveAbortSignal, } try { @@ -250,7 +263,7 @@ export class AgentRunner { // ----------------------------------------------------------------- while (true) { // Respect abort before each LLM call. - if (this.options.abortSignal?.aborted) { + if (effectiveAbortSignal?.aborted) { break } @@ -311,6 +324,15 @@ export class AgentRunner { // Step 3: Decide whether to continue looping. // ------------------------------------------------------------------ if (toolUseBlocks.length === 0) { + // Warn on first turn if tools were provided but model didn't use them. + if (turns === 1 && toolDefs.length > 0 && options.onWarning) { + const agentName = this.options.agentName ?? 'unknown' + options.onWarning( + `Agent "${agentName}" has ${toolDefs.length} tool(s) available but the model ` + + `returned no tool calls. If using a local model, verify it supports tool calling ` + + `(see https://ollama.com/search?c=tools).`, + ) + } // No tools requested — this is the terminal assistant turn. finalOutput = turnText break diff --git a/src/llm/copilot.ts b/src/llm/copilot.ts index 7e829fe..44349f8 100644 --- a/src/llm/copilot.ts +++ b/src/llm/copilot.ts @@ -313,7 +313,8 @@ export class CopilotAdapter implements LLMAdapter { }, ) - return fromOpenAICompletion(completion) + const toolNames = options.tools?.map(t => t.name) + return fromOpenAICompletion(completion, toolNames) } // ------------------------------------------------------------------------- diff --git a/src/llm/openai-common.ts b/src/llm/openai-common.ts index 46fc67a..cdb16a0 100644 --- a/src/llm/openai-common.ts +++ b/src/llm/openai-common.ts @@ -25,6 +25,7 @@ import type { TextBlock, ToolUseBlock, } from '../types.js' +import { extractToolCallsFromText } from '../tool/text-tool-extractor.js' // --------------------------------------------------------------------------- // Framework → OpenAI @@ -166,8 +167,18 @@ function toOpenAIAssistantMessage(msg: LLMMessage): ChatCompletionAssistantMessa * * Takes only the first choice (index 0), consistent with how the framework * is designed for single-output agents. + * + * @param completion - The raw OpenAI completion. + * @param knownToolNames - Optional whitelist of tool names. When the model + * returns no `tool_calls` but the text contains JSON + * that looks like a tool call, the fallback extractor + * uses this list to validate matches. Pass the names + * of tools sent in the request for best results. */ -export function fromOpenAICompletion(completion: ChatCompletion): LLMResponse { +export function fromOpenAICompletion( + completion: ChatCompletion, + knownToolNames?: string[], +): LLMResponse { const choice = completion.choices[0] if (choice === undefined) { throw new Error('OpenAI returned a completion with no choices') @@ -201,7 +212,35 @@ export function fromOpenAICompletion(completion: ChatCompletion): LLMResponse { content.push(toolUseBlock) } - const stopReason = normalizeFinishReason(choice.finish_reason ?? 'stop') + // --------------------------------------------------------------------------- + // Fallback: extract tool calls from text when native tool_calls is empty. + // + // Some local models (Ollama thinking models, misconfigured vLLM) return tool + // calls as plain text instead of using the tool_calls wire format. When we + // have text but no tool_calls, try to extract them from the text. + // --------------------------------------------------------------------------- + const hasNativeToolCalls = (message.tool_calls ?? []).length > 0 + if ( + !hasNativeToolCalls && + knownToolNames !== undefined && + knownToolNames.length > 0 && + message.content !== null && + message.content !== undefined && + message.content.length > 0 + ) { + const extracted = extractToolCallsFromText(message.content, knownToolNames) + if (extracted.length > 0) { + content.push(...extracted) + } + } + + const hasToolUseBlocks = content.some(b => b.type === 'tool_use') + const rawStopReason = choice.finish_reason ?? 'stop' + // If we extracted tool calls from text but the finish_reason was 'stop', + // correct it to 'tool_use' so the agent runner continues the loop. + const stopReason = hasToolUseBlocks && rawStopReason === 'stop' + ? 'tool_use' + : normalizeFinishReason(rawStopReason) return { id: completion.id, diff --git a/src/llm/openai.ts b/src/llm/openai.ts index e3f166f..cd48086 100644 --- a/src/llm/openai.ts +++ b/src/llm/openai.ts @@ -54,6 +54,7 @@ import { normalizeFinishReason, buildOpenAIMessageList, } from './openai-common.js' +import { extractToolCallsFromText } from '../tool/text-tool-extractor.js' // --------------------------------------------------------------------------- // Adapter implementation @@ -104,7 +105,8 @@ export class OpenAIAdapter implements LLMAdapter { }, ) - return fromOpenAICompletion(completion) + const toolNames = options.tools?.map(t => t.name) + return fromOpenAICompletion(completion, toolNames) } // ------------------------------------------------------------------------- @@ -241,11 +243,29 @@ export class OpenAIAdapter implements LLMAdapter { } doneContent.push(...finalToolUseBlocks) + // Fallback: extract tool calls from text when streaming produced no + // native tool_calls (same logic as fromOpenAICompletion). + if (finalToolUseBlocks.length === 0 && fullText.length > 0 && options.tools) { + const toolNames = options.tools.map(t => t.name) + const extracted = extractToolCallsFromText(fullText, toolNames) + if (extracted.length > 0) { + doneContent.push(...extracted) + for (const block of extracted) { + yield { type: 'tool_use', data: block } satisfies StreamEvent + } + } + } + + const hasToolUseBlocks = doneContent.some(b => b.type === 'tool_use') + const resolvedStopReason = hasToolUseBlocks && finalFinishReason === 'stop' + ? 'tool_use' + : normalizeFinishReason(finalFinishReason) + const finalResponse: LLMResponse = { id: completionId, content: doneContent, model: completionModel, - stop_reason: normalizeFinishReason(finalFinishReason), + stop_reason: resolvedStopReason, usage: { input_tokens: inputTokens, output_tokens: outputTokens }, } diff --git a/src/tool/text-tool-extractor.ts b/src/tool/text-tool-extractor.ts new file mode 100644 index 0000000..79e3197 --- /dev/null +++ b/src/tool/text-tool-extractor.ts @@ -0,0 +1,228 @@ +/** + * @fileoverview Fallback tool-call extractor for local models. + * + * When a local model (Ollama, vLLM, LM Studio) returns tool calls as plain + * text instead of using the OpenAI `tool_calls` wire format, this module + * attempts to extract them from the text output. + * + * Common scenarios: + * - Ollama thinking-model bug: tool call JSON ends up inside unclosed `` tags + * - Model outputs raw JSON tool calls without the server parsing them + * - Model wraps tool calls in markdown code fences + * - Hermes-format `` tags + * + * This is a **safety net**, not the primary path. Native `tool_calls` from + * the server are always preferred. + */ + +import type { ToolUseBlock } from '../types.js' + +// --------------------------------------------------------------------------- +// ID generation +// --------------------------------------------------------------------------- + +let callCounter = 0 + +/** Generate a unique tool-call ID for extracted calls. */ +function generateToolCallId(): string { + return `extracted_call_${Date.now()}_${++callCounter}` +} + +// --------------------------------------------------------------------------- +// Internal parsers +// --------------------------------------------------------------------------- + +/** + * Try to parse a single JSON object as a tool call. + * + * Accepted shapes: + * ```json + * { "name": "bash", "arguments": { "command": "ls" } } + * { "name": "bash", "parameters": { "command": "ls" } } + * { "function": { "name": "bash", "arguments": { "command": "ls" } } } + * ``` + */ +function parseToolCallJSON( + json: unknown, + knownToolNames: ReadonlySet, +): ToolUseBlock | null { + if (json === null || typeof json !== 'object' || Array.isArray(json)) { + return null + } + + const obj = json as Record + + // Shape: { function: { name, arguments } } + if (typeof obj['function'] === 'object' && obj['function'] !== null) { + const fn = obj['function'] as Record + return parseFlat(fn, knownToolNames) + } + + // Shape: { name, arguments|parameters } + return parseFlat(obj, knownToolNames) +} + +function parseFlat( + obj: Record, + knownToolNames: ReadonlySet, +): ToolUseBlock | null { + const name = obj['name'] + if (typeof name !== 'string' || name.length === 0) return null + + // Whitelist check — don't treat arbitrary JSON as a tool call + if (knownToolNames.size > 0 && !knownToolNames.has(name)) return null + + let input: Record = {} + const args = obj['arguments'] ?? obj['parameters'] ?? obj['input'] + if (args !== null && args !== undefined) { + if (typeof args === 'string') { + try { + const parsed = JSON.parse(args) + if (typeof parsed === 'object' && parsed !== null && !Array.isArray(parsed)) { + input = parsed as Record + } + } catch { + // Malformed — use empty input + } + } else if (typeof args === 'object' && !Array.isArray(args)) { + input = args as Record + } + } + + return { + type: 'tool_use', + id: generateToolCallId(), + name, + input, + } +} + +// --------------------------------------------------------------------------- +// JSON extraction from text +// --------------------------------------------------------------------------- + +/** + * Find all top-level JSON objects in a string by tracking brace depth. + * Returns the parsed objects (not sub-objects). + */ +function extractJSONObjects(text: string): unknown[] { + const results: unknown[] = [] + let depth = 0 + let start = -1 + let inString = false + let escape = false + + for (let i = 0; i < text.length; i++) { + const ch = text[i]! + + if (escape) { + escape = false + continue + } + + if (ch === '\\' && inString) { + escape = true + continue + } + + if (ch === '"') { + inString = !inString + continue + } + + if (inString) continue + + if (ch === '{') { + if (depth === 0) start = i + depth++ + } else if (ch === '}') { + depth-- + if (depth === 0 && start !== -1) { + const candidate = text.slice(start, i + 1) + try { + results.push(JSON.parse(candidate)) + } catch { + // Not valid JSON — skip + } + start = -1 + } + } + } + + return results +} + +// --------------------------------------------------------------------------- +// Hermes format: ... +// --------------------------------------------------------------------------- + +function extractHermesToolCalls( + text: string, + knownToolNames: ReadonlySet, +): ToolUseBlock[] { + const results: ToolUseBlock[] = [] + + for (const match of text.matchAll(/\s*([\s\S]*?)\s*<\/tool_call>/g)) { + const inner = match[1]!.trim() + try { + const parsed: unknown = JSON.parse(inner) + const block = parseToolCallJSON(parsed, knownToolNames) + if (block !== null) results.push(block) + } catch { + // Malformed hermes content — skip + } + } + + return results +} + +// --------------------------------------------------------------------------- +// Public API +// --------------------------------------------------------------------------- + +/** + * Attempt to extract tool calls from a model's text output. + * + * Tries multiple strategies in order: + * 1. Hermes `` tags + * 2. JSON objects in text (bare or inside code fences) + * + * @param text - The model's text output. + * @param knownToolNames - Whitelist of registered tool names. When non-empty, + * only JSON objects whose `name` matches a known tool + * are treated as tool calls. + * @returns Extracted {@link ToolUseBlock}s, or an empty array if none found. + */ +export function extractToolCallsFromText( + text: string, + knownToolNames: string[], +): ToolUseBlock[] { + if (text.length === 0) return [] + + const nameSet = new Set(knownToolNames) + + // Strategy 1: Hermes format + const hermesResults = extractHermesToolCalls(text, nameSet) + if (hermesResults.length > 0) return hermesResults + + // Strategy 2: Strip code fences, then extract JSON objects + const stripped = text.replace(/```(?:json)?\s*\n?([\s\S]*?)\n?\s*```/g, '$1') + const jsonObjects = extractJSONObjects(stripped) + + const results: ToolUseBlock[] = [] + for (const obj of jsonObjects) { + // Handle array of tool calls + if (Array.isArray(obj)) { + for (const item of obj) { + const block = parseToolCallJSON(item, nameSet) + if (block !== null) results.push(block) + } + continue + } + + const block = parseToolCallJSON(obj, nameSet) + if (block !== null) results.push(block) + } + + return results +} diff --git a/src/types.ts b/src/types.ts index 2887b6c..e7e1b6b 100644 --- a/src/types.ts +++ b/src/types.ts @@ -209,6 +209,12 @@ export interface AgentConfig { readonly maxTurns?: number readonly maxTokens?: number readonly temperature?: number + /** + * Maximum wall-clock time (in milliseconds) for the entire agent run. + * When exceeded, the run is aborted via `AbortSignal.timeout()`. + * Useful for local models where inference can be unpredictably slow. + */ + readonly timeoutMs?: number /** * Optional Zod schema for structured output. When set, the agent's final * output is parsed as JSON and validated against this schema. A single diff --git a/tests/openai-fallback.test.ts b/tests/openai-fallback.test.ts new file mode 100644 index 0000000..6200146 --- /dev/null +++ b/tests/openai-fallback.test.ts @@ -0,0 +1,159 @@ +import { describe, it, expect } from 'vitest' +import { fromOpenAICompletion } from '../src/llm/openai-common.js' +import type { ChatCompletion } from 'openai/resources/chat/completions/index.js' + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +function makeCompletion(overrides: { + content?: string | null + tool_calls?: ChatCompletion.Choice['message']['tool_calls'] + finish_reason?: string +}): ChatCompletion { + return { + id: 'chatcmpl-test', + object: 'chat.completion', + created: Date.now(), + model: 'test-model', + choices: [ + { + index: 0, + message: { + role: 'assistant', + content: overrides.content ?? null, + tool_calls: overrides.tool_calls, + refusal: null, + }, + finish_reason: (overrides.finish_reason ?? 'stop') as 'stop' | 'tool_calls', + logprobs: null, + }, + ], + usage: { + prompt_tokens: 10, + completion_tokens: 20, + total_tokens: 30, + }, + } +} + +const TOOL_NAMES = ['bash', 'file_read', 'file_write'] + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +describe('fromOpenAICompletion fallback extraction', () => { + it('returns normal tool_calls when present (no fallback)', () => { + const completion = makeCompletion({ + content: 'Let me run a command.', + tool_calls: [ + { + id: 'call_123', + type: 'function', + function: { + name: 'bash', + arguments: '{"command": "ls"}', + }, + }, + ], + finish_reason: 'tool_calls', + }) + + const response = fromOpenAICompletion(completion, TOOL_NAMES) + const toolBlocks = response.content.filter(b => b.type === 'tool_use') + expect(toolBlocks).toHaveLength(1) + expect(toolBlocks[0]!.type === 'tool_use' && toolBlocks[0]!.name).toBe('bash') + expect(toolBlocks[0]!.type === 'tool_use' && toolBlocks[0]!.id).toBe('call_123') + expect(response.stop_reason).toBe('tool_use') + }) + + it('extracts tool calls from text when tool_calls is absent', () => { + const completion = makeCompletion({ + content: 'I will run this:\n{"name": "bash", "arguments": {"command": "pwd"}}', + finish_reason: 'stop', + }) + + const response = fromOpenAICompletion(completion, TOOL_NAMES) + const toolBlocks = response.content.filter(b => b.type === 'tool_use') + expect(toolBlocks).toHaveLength(1) + expect(toolBlocks[0]!.type === 'tool_use' && toolBlocks[0]!.name).toBe('bash') + expect(toolBlocks[0]!.type === 'tool_use' && toolBlocks[0]!.input).toEqual({ command: 'pwd' }) + // stop_reason should be corrected to tool_use + expect(response.stop_reason).toBe('tool_use') + }) + + it('does not fallback when knownToolNames is not provided', () => { + const completion = makeCompletion({ + content: '{"name": "bash", "arguments": {"command": "ls"}}', + finish_reason: 'stop', + }) + + const response = fromOpenAICompletion(completion) + const toolBlocks = response.content.filter(b => b.type === 'tool_use') + expect(toolBlocks).toHaveLength(0) + expect(response.stop_reason).toBe('end_turn') + }) + + it('does not fallback when knownToolNames is empty', () => { + const completion = makeCompletion({ + content: '{"name": "bash", "arguments": {"command": "ls"}}', + finish_reason: 'stop', + }) + + const response = fromOpenAICompletion(completion, []) + const toolBlocks = response.content.filter(b => b.type === 'tool_use') + expect(toolBlocks).toHaveLength(0) + expect(response.stop_reason).toBe('end_turn') + }) + + it('returns plain text when no tool calls found in text', () => { + const completion = makeCompletion({ + content: 'Hello! How can I help you today?', + finish_reason: 'stop', + }) + + const response = fromOpenAICompletion(completion, TOOL_NAMES) + const toolBlocks = response.content.filter(b => b.type === 'tool_use') + expect(toolBlocks).toHaveLength(0) + expect(response.stop_reason).toBe('end_turn') + }) + + it('preserves text block alongside extracted tool blocks', () => { + const completion = makeCompletion({ + content: 'Let me check:\n{"name": "file_read", "arguments": {"path": "/tmp/x"}}', + finish_reason: 'stop', + }) + + const response = fromOpenAICompletion(completion, TOOL_NAMES) + const textBlocks = response.content.filter(b => b.type === 'text') + const toolBlocks = response.content.filter(b => b.type === 'tool_use') + expect(textBlocks).toHaveLength(1) + expect(toolBlocks).toHaveLength(1) + }) + + it('does not double-extract when native tool_calls already present', () => { + // Text also contains a tool call JSON, but native tool_calls is populated. + // The fallback should NOT run. + const completion = makeCompletion({ + content: '{"name": "file_read", "arguments": {"path": "/tmp/y"}}', + tool_calls: [ + { + id: 'call_native', + type: 'function', + function: { + name: 'bash', + arguments: '{"command": "ls"}', + }, + }, + ], + finish_reason: 'tool_calls', + }) + + const response = fromOpenAICompletion(completion, TOOL_NAMES) + const toolBlocks = response.content.filter(b => b.type === 'tool_use') + // Should only have the native one, not the text-extracted one + expect(toolBlocks).toHaveLength(1) + expect(toolBlocks[0]!.type === 'tool_use' && toolBlocks[0]!.id).toBe('call_native') + }) +}) diff --git a/tests/text-tool-extractor.test.ts b/tests/text-tool-extractor.test.ts new file mode 100644 index 0000000..dba185e --- /dev/null +++ b/tests/text-tool-extractor.test.ts @@ -0,0 +1,170 @@ +import { describe, it, expect } from 'vitest' +import { extractToolCallsFromText } from '../src/tool/text-tool-extractor.js' + +const TOOLS = ['bash', 'file_read', 'file_write'] + +describe('extractToolCallsFromText', () => { + // ------------------------------------------------------------------------- + // No tool calls + // ------------------------------------------------------------------------- + + it('returns empty array for empty text', () => { + expect(extractToolCallsFromText('', TOOLS)).toEqual([]) + }) + + it('returns empty array for plain text with no JSON', () => { + expect(extractToolCallsFromText('Hello, I am a helpful assistant.', TOOLS)).toEqual([]) + }) + + it('returns empty array for JSON that does not match any known tool', () => { + const text = '{"name": "unknown_tool", "arguments": {"x": 1}}' + expect(extractToolCallsFromText(text, TOOLS)).toEqual([]) + }) + + // ------------------------------------------------------------------------- + // Bare JSON + // ------------------------------------------------------------------------- + + it('extracts a bare JSON tool call with "arguments"', () => { + const text = 'I will run this command:\n{"name": "bash", "arguments": {"command": "ls -la"}}' + const result = extractToolCallsFromText(text, TOOLS) + expect(result).toHaveLength(1) + expect(result[0]!.type).toBe('tool_use') + expect(result[0]!.name).toBe('bash') + expect(result[0]!.input).toEqual({ command: 'ls -la' }) + expect(result[0]!.id).toMatch(/^extracted_call_/) + }) + + it('extracts a bare JSON tool call with "parameters"', () => { + const text = '{"name": "file_read", "parameters": {"path": "/tmp/test.txt"}}' + const result = extractToolCallsFromText(text, TOOLS) + expect(result).toHaveLength(1) + expect(result[0]!.name).toBe('file_read') + expect(result[0]!.input).toEqual({ path: '/tmp/test.txt' }) + }) + + it('extracts a bare JSON tool call with "input"', () => { + const text = '{"name": "bash", "input": {"command": "pwd"}}' + const result = extractToolCallsFromText(text, TOOLS) + expect(result).toHaveLength(1) + expect(result[0]!.name).toBe('bash') + expect(result[0]!.input).toEqual({ command: 'pwd' }) + }) + + it('extracts { function: { name, arguments } } shape', () => { + const text = '{"function": {"name": "bash", "arguments": {"command": "echo hi"}}}' + const result = extractToolCallsFromText(text, TOOLS) + expect(result).toHaveLength(1) + expect(result[0]!.name).toBe('bash') + expect(result[0]!.input).toEqual({ command: 'echo hi' }) + }) + + it('handles string-encoded arguments', () => { + const text = '{"name": "bash", "arguments": "{\\"command\\": \\"ls\\"}"}' + const result = extractToolCallsFromText(text, TOOLS) + expect(result).toHaveLength(1) + expect(result[0]!.input).toEqual({ command: 'ls' }) + }) + + // ------------------------------------------------------------------------- + // Multiple tool calls + // ------------------------------------------------------------------------- + + it('extracts multiple tool calls from text', () => { + const text = `Let me do two things: +{"name": "bash", "arguments": {"command": "ls"}} +And then: +{"name": "file_read", "arguments": {"path": "/tmp/x"}}` + const result = extractToolCallsFromText(text, TOOLS) + expect(result).toHaveLength(2) + expect(result[0]!.name).toBe('bash') + expect(result[1]!.name).toBe('file_read') + }) + + // ------------------------------------------------------------------------- + // Code fence wrapped + // ------------------------------------------------------------------------- + + it('extracts tool call from markdown code fence', () => { + const text = 'Here is the tool call:\n```json\n{"name": "bash", "arguments": {"command": "whoami"}}\n```' + const result = extractToolCallsFromText(text, TOOLS) + expect(result).toHaveLength(1) + expect(result[0]!.name).toBe('bash') + expect(result[0]!.input).toEqual({ command: 'whoami' }) + }) + + it('extracts tool call from code fence without language tag', () => { + const text = '```\n{"name": "file_write", "arguments": {"path": "/tmp/a.txt", "content": "hi"}}\n```' + const result = extractToolCallsFromText(text, TOOLS) + expect(result).toHaveLength(1) + expect(result[0]!.name).toBe('file_write') + }) + + // ------------------------------------------------------------------------- + // Hermes format + // ------------------------------------------------------------------------- + + it('extracts tool call from tags', () => { + const text = '\n{"name": "bash", "arguments": {"command": "date"}}\n' + const result = extractToolCallsFromText(text, TOOLS) + expect(result).toHaveLength(1) + expect(result[0]!.name).toBe('bash') + expect(result[0]!.input).toEqual({ command: 'date' }) + }) + + it('extracts multiple hermes tool calls', () => { + const text = `{"name": "bash", "arguments": {"command": "ls"}} +Some text in between +{"name": "file_read", "arguments": {"path": "/tmp/x"}}` + const result = extractToolCallsFromText(text, TOOLS) + expect(result).toHaveLength(2) + expect(result[0]!.name).toBe('bash') + expect(result[1]!.name).toBe('file_read') + }) + + // ------------------------------------------------------------------------- + // Edge cases + // ------------------------------------------------------------------------- + + it('skips malformed JSON gracefully', () => { + const text = '{"name": "bash", "arguments": {invalid json}}' + const result = extractToolCallsFromText(text, TOOLS) + expect(result).toEqual([]) + }) + + it('skips JSON objects without a name field', () => { + const text = '{"command": "ls", "arguments": {"x": 1}}' + const result = extractToolCallsFromText(text, TOOLS) + expect(result).toEqual([]) + }) + + it('works with empty knownToolNames (no whitelist filtering)', () => { + const text = '{"name": "anything", "arguments": {"x": 1}}' + const result = extractToolCallsFromText(text, []) + expect(result).toHaveLength(1) + expect(result[0]!.name).toBe('anything') + }) + + it('generates unique IDs for each extracted call', () => { + const text = `{"name": "bash", "arguments": {"command": "a"}} +{"name": "bash", "arguments": {"command": "b"}}` + const result = extractToolCallsFromText(text, TOOLS) + expect(result).toHaveLength(2) + expect(result[0]!.id).not.toBe(result[1]!.id) + }) + + it('handles tool call with no arguments', () => { + const text = '{"name": "bash"}' + const result = extractToolCallsFromText(text, TOOLS) + expect(result).toHaveLength(1) + expect(result[0]!.input).toEqual({}) + }) + + it('handles text with nested JSON objects that are not tool calls', () => { + const text = `Here is some config: {"port": 3000, "host": "localhost"} +And a tool call: {"name": "bash", "arguments": {"command": "ls"}}` + const result = extractToolCallsFromText(text, TOOLS) + expect(result).toHaveLength(1) + expect(result[0]!.name).toBe('bash') + }) +}) From a4a1add8cae0c6553f698d44c0775000822adb6e Mon Sep 17 00:00:00 2001 From: JackChen Date: Sun, 5 Apr 2026 12:00:16 +0800 Subject: [PATCH 7/9] fix(agent): merge abort signals instead of overriding caller's signal When both timeoutMs and a caller-provided abortSignal were set, the timeout signal silently replaced the caller's signal. Now they are combined via mergeAbortSignals() so either source can cancel the run. Also removes dead array-handling branch in text-tool-extractor.ts (extractJSONObjects only returns objects, never arrays). --- src/agent/agent.ts | 21 ++++++++++++++++++++- src/tool/text-tool-extractor.ts | 9 --------- 2 files changed, 20 insertions(+), 10 deletions(-) diff --git a/src/agent/agent.ts b/src/agent/agent.ts index 0d7f665..3290347 100644 --- a/src/agent/agent.ts +++ b/src/agent/agent.ts @@ -50,6 +50,19 @@ import { const ZERO_USAGE: TokenUsage = { input_tokens: 0, output_tokens: 0 } +/** + * Combine two {@link AbortSignal}s so that aborting either one cancels the + * returned signal. Works on Node 18+ (no `AbortSignal.any` required). + */ +function mergeAbortSignals(a: AbortSignal, b: AbortSignal): AbortSignal { + const controller = new AbortController() + if (a.aborted || b.aborted) { controller.abort(); return controller.signal } + const abort = () => controller.abort() + a.addEventListener('abort', abort, { once: true }) + b.addEventListener('abort', abort, { once: true }) + return controller.signal +} + function addUsage(a: TokenUsage, b: TokenUsage): TokenUsage { return { input_tokens: a.input_tokens + b.input_tokens, @@ -298,11 +311,17 @@ export class Agent { const timeoutSignal = this.config.timeoutMs !== undefined && this.config.timeoutMs > 0 ? AbortSignal.timeout(this.config.timeoutMs) : undefined + // Merge caller-provided abortSignal with the timeout signal so that + // either cancellation source is respected. + const callerAbort = callerOptions?.abortSignal + const effectiveAbort = timeoutSignal && callerAbort + ? mergeAbortSignals(timeoutSignal, callerAbort) + : timeoutSignal ?? callerAbort const runOptions: RunOptions = { ...callerOptions, onMessage: internalOnMessage, ...(needsRunId ? { runId: generateRunId() } : undefined), - ...(timeoutSignal ? { abortSignal: timeoutSignal } : undefined), + ...(effectiveAbort ? { abortSignal: effectiveAbort } : undefined), } const result = await runner.run(messages, runOptions) diff --git a/src/tool/text-tool-extractor.ts b/src/tool/text-tool-extractor.ts index 79e3197..8c64d1d 100644 --- a/src/tool/text-tool-extractor.ts +++ b/src/tool/text-tool-extractor.ts @@ -211,15 +211,6 @@ export function extractToolCallsFromText( const results: ToolUseBlock[] = [] for (const obj of jsonObjects) { - // Handle array of tool calls - if (Array.isArray(obj)) { - for (const item of obj) { - const block = parseToolCallJSON(item, nameSet) - if (block !== null) results.push(block) - } - continue - } - const block = parseToolCallJSON(obj, nameSet) if (block !== null) results.push(block) } From a68d961379321f1e20912e8e880a8bd86d847742 Mon Sep 17 00:00:00 2001 From: JackChen Date: Sun, 5 Apr 2026 12:05:48 +0800 Subject: [PATCH 8/9] ci: use npm install instead of npm ci for cross-platform compat npm ci fails on Linux CI when package-lock.json was generated on macOS, because platform-specific optional deps (@rollup/rollup-linux-x64-gnu) are missing from the lockfile. This is a known npm bug (#4828). --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6f5b577..b38c38e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -18,6 +18,6 @@ jobs: with: node-version: ${{ matrix.node-version }} cache: npm - - run: npm ci + - run: npm install - run: npm run lint - run: npm test From ed3753c1f497d9b14234a67740729e472195e036 Mon Sep 17 00:00:00 2001 From: JackChen Date: Sun, 5 Apr 2026 12:09:31 +0800 Subject: [PATCH 9/9] ci: fix cross-platform CI failures after Gemini adapter merge - Add @google/genai to devDependencies so types are available for lint/test in CI (stays as optional peerDependency for consumers) - Delete package-lock.json in CI before npm install to avoid Mac-generated lockfile missing Linux platform-specific rollup binaries --- .github/workflows/ci.yml | 2 +- package-lock.json | 144 +++++++++++++++------------------------ package.json | 1 + 3 files changed, 56 insertions(+), 91 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b38c38e..39c0cc7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -18,6 +18,6 @@ jobs: with: node-version: ${{ matrix.node-version }} cache: npm - - run: npm install + - run: rm -f package-lock.json && npm install - run: npm run lint - run: npm test diff --git a/package-lock.json b/package-lock.json index 9c25491..55afff8 100644 --- a/package-lock.json +++ b/package-lock.json @@ -14,6 +14,7 @@ "zod": "^3.23.0" }, "devDependencies": { + "@google/genai": "^1.48.0", "@types/node": "^22.0.0", "tsx": "^4.21.0", "typescript": "^5.6.0", @@ -486,9 +487,8 @@ "version": "1.48.0", "resolved": "https://registry.npmjs.org/@google/genai/-/genai-1.48.0.tgz", "integrity": "sha512-plonYK4ML2PrxsRD9SeqmFt76eREWkQdPCglOA6aYDzL1AAbE+7PUnT54SvpWGfws13L0AZEqGSpL7+1IPnTxQ==", + "dev": true, "license": "Apache-2.0", - "optional": true, - "peer": true, "dependencies": { "google-auth-library": "^10.3.0", "p-retry": "^4.6.2", @@ -518,41 +518,36 @@ "version": "1.1.2", "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz", "integrity": "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==", - "license": "BSD-3-Clause", - "optional": true, - "peer": true + "dev": true, + "license": "BSD-3-Clause" }, "node_modules/@protobufjs/base64": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz", "integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==", - "license": "BSD-3-Clause", - "optional": true, - "peer": true + "dev": true, + "license": "BSD-3-Clause" }, "node_modules/@protobufjs/codegen": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz", "integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==", - "license": "BSD-3-Clause", - "optional": true, - "peer": true + "dev": true, + "license": "BSD-3-Clause" }, "node_modules/@protobufjs/eventemitter": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz", "integrity": "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==", - "license": "BSD-3-Clause", - "optional": true, - "peer": true + "dev": true, + "license": "BSD-3-Clause" }, "node_modules/@protobufjs/fetch": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz", "integrity": "sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==", + "dev": true, "license": "BSD-3-Clause", - "optional": true, - "peer": true, "dependencies": { "@protobufjs/aspromise": "^1.1.1", "@protobufjs/inquire": "^1.1.0" @@ -562,41 +557,36 @@ "version": "1.0.2", "resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz", "integrity": "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==", - "license": "BSD-3-Clause", - "optional": true, - "peer": true + "dev": true, + "license": "BSD-3-Clause" }, "node_modules/@protobufjs/inquire": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz", "integrity": "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==", - "license": "BSD-3-Clause", - "optional": true, - "peer": true + "dev": true, + "license": "BSD-3-Clause" }, "node_modules/@protobufjs/path": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz", "integrity": "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==", - "license": "BSD-3-Clause", - "optional": true, - "peer": true + "dev": true, + "license": "BSD-3-Clause" }, "node_modules/@protobufjs/pool": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz", "integrity": "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==", - "license": "BSD-3-Clause", - "optional": true, - "peer": true + "dev": true, + "license": "BSD-3-Clause" }, "node_modules/@protobufjs/utf8": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz", "integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==", - "license": "BSD-3-Clause", - "optional": true, - "peer": true + "dev": true, + "license": "BSD-3-Clause" }, "node_modules/@rollup/rollup-darwin-arm64": { "version": "4.60.1", @@ -642,9 +632,8 @@ "version": "0.12.0", "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz", "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==", - "license": "MIT", - "optional": true, - "peer": true + "dev": true, + "license": "MIT" }, "node_modules/@vitest/expect": { "version": "2.1.9", @@ -775,9 +764,8 @@ "version": "7.1.4", "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz", "integrity": "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==", + "dev": true, "license": "MIT", - "optional": true, - "peer": true, "engines": { "node": ">= 14" } @@ -814,6 +802,7 @@ "version": "1.5.1", "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", + "dev": true, "funding": [ { "type": "github", @@ -828,17 +817,14 @@ "url": "https://feross.org/support" } ], - "license": "MIT", - "optional": true, - "peer": true + "license": "MIT" }, "node_modules/bignumber.js": { "version": "9.3.1", "resolved": "https://registry.npmjs.org/bignumber.js/-/bignumber.js-9.3.1.tgz", "integrity": "sha512-Ko0uX15oIUS7wJ3Rb30Fs6SkVbLmPBAKdlm7q9+ak9bbIeFf0MwuBsQV6z7+X768/cHsfg+WlysDWJcmthjsjQ==", + "dev": true, "license": "MIT", - "optional": true, - "peer": true, "engines": { "node": "*" } @@ -847,9 +833,8 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz", "integrity": "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA==", - "license": "BSD-3-Clause", - "optional": true, - "peer": true + "dev": true, + "license": "BSD-3-Clause" }, "node_modules/cac": { "version": "6.7.14", @@ -917,9 +902,8 @@ "version": "4.0.1", "resolved": "https://registry.npmjs.org/data-uri-to-buffer/-/data-uri-to-buffer-4.0.1.tgz", "integrity": "sha512-0R9ikRb668HB7QDxT1vkpuUBtqc53YyAwMwGeUFKRojY/NWKvdZ+9UYtRfGmhqNbRkTSVpMbmyhXipFFv2cb/A==", + "dev": true, "license": "MIT", - "optional": true, - "peer": true, "engines": { "node": ">= 12" } @@ -928,7 +912,7 @@ "version": "4.4.3", "resolved": "https://registry.npmmirror.com/debug/-/debug-4.4.3.tgz", "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", - "devOptional": true, + "dev": true, "license": "MIT", "dependencies": { "ms": "^2.1.3" @@ -979,9 +963,8 @@ "version": "1.0.11", "resolved": "https://registry.npmjs.org/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz", "integrity": "sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==", + "dev": true, "license": "Apache-2.0", - "optional": true, - "peer": true, "dependencies": { "safe-buffer": "^5.0.1" } @@ -1484,14 +1467,14 @@ "version": "3.0.2", "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==", - "license": "MIT", - "optional": true, - "peer": true + "dev": true, + "license": "MIT" }, "node_modules/fetch-blob": { "version": "3.2.0", "resolved": "https://registry.npmjs.org/fetch-blob/-/fetch-blob-3.2.0.tgz", "integrity": "sha512-7yAQpD2UMJzLi1Dqv7qFYnPbaPx7ZfFK6PiIxQ4PfkGPyNyl2Ugx+a/umUonmKqjhM4DnfbMvdX6otXq83soQQ==", + "dev": true, "funding": [ { "type": "github", @@ -1503,8 +1486,6 @@ } ], "license": "MIT", - "optional": true, - "peer": true, "dependencies": { "node-domexception": "^1.0.0", "web-streams-polyfill": "^3.0.3" @@ -1517,9 +1498,8 @@ "version": "3.3.3", "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.3.3.tgz", "integrity": "sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==", + "dev": true, "license": "MIT", - "optional": true, - "peer": true, "engines": { "node": ">= 8" } @@ -1563,9 +1543,8 @@ "version": "4.0.10", "resolved": "https://registry.npmjs.org/formdata-polyfill/-/formdata-polyfill-4.0.10.tgz", "integrity": "sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g==", + "dev": true, "license": "MIT", - "optional": true, - "peer": true, "dependencies": { "fetch-blob": "^3.1.2" }, @@ -1601,9 +1580,8 @@ "version": "7.1.4", "resolved": "https://registry.npmjs.org/gaxios/-/gaxios-7.1.4.tgz", "integrity": "sha512-bTIgTsM2bWn3XklZISBTQX7ZSddGW+IO3bMdGaemHZ3tbqExMENHLx6kKZ/KlejgrMtj8q7wBItt51yegqalrA==", + "dev": true, "license": "Apache-2.0", - "optional": true, - "peer": true, "dependencies": { "extend": "^3.0.2", "https-proxy-agent": "^7.0.1", @@ -1617,9 +1595,8 @@ "version": "3.3.2", "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-3.3.2.tgz", "integrity": "sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA==", + "dev": true, "license": "MIT", - "optional": true, - "peer": true, "dependencies": { "data-uri-to-buffer": "^4.0.0", "fetch-blob": "^3.1.4", @@ -1637,9 +1614,8 @@ "version": "8.1.2", "resolved": "https://registry.npmjs.org/gcp-metadata/-/gcp-metadata-8.1.2.tgz", "integrity": "sha512-zV/5HKTfCeKWnxG0Dmrw51hEWFGfcF2xiXqcA3+J90WDuP0SvoiSO5ORvcBsifmx/FoIjgQN3oNOGaQ5PhLFkg==", + "dev": true, "license": "Apache-2.0", - "optional": true, - "peer": true, "dependencies": { "gaxios": "^7.0.0", "google-logging-utils": "^1.0.0", @@ -1703,9 +1679,8 @@ "version": "10.6.2", "resolved": "https://registry.npmjs.org/google-auth-library/-/google-auth-library-10.6.2.tgz", "integrity": "sha512-e27Z6EThmVNNvtYASwQxose/G57rkRuaRbQyxM2bvYLLX/GqWZ5chWq2EBoUchJbCc57eC9ArzO5wMsEmWftCw==", + "dev": true, "license": "Apache-2.0", - "optional": true, - "peer": true, "dependencies": { "base64-js": "^1.3.0", "ecdsa-sig-formatter": "^1.0.11", @@ -1722,9 +1697,8 @@ "version": "1.1.3", "resolved": "https://registry.npmjs.org/google-logging-utils/-/google-logging-utils-1.1.3.tgz", "integrity": "sha512-eAmLkjDjAFCVXg7A1unxHsLf961m6y17QFqXqAXGj/gVkKFrEICfStRfwUlGNfeCEjNRa32JEWOUTlYXPyyKvA==", + "dev": true, "license": "Apache-2.0", - "optional": true, - "peer": true, "engines": { "node": ">=14" } @@ -1784,9 +1758,8 @@ "version": "7.0.6", "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", + "dev": true, "license": "MIT", - "optional": true, - "peer": true, "dependencies": { "agent-base": "^7.1.2", "debug": "4" @@ -1808,9 +1781,8 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/json-bigint/-/json-bigint-1.0.0.tgz", "integrity": "sha512-SiPv/8VpZuWbvLSMtTDU8hEfrZWg/mH/nV/b4o0CYbSxu1UIQPLdwKOCIyLQX+VIPO5vrLX3i8qtqFyhdPSUSQ==", + "dev": true, "license": "MIT", - "optional": true, - "peer": true, "dependencies": { "bignumber.js": "^9.0.0" } @@ -1819,9 +1791,8 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/jwa/-/jwa-2.0.1.tgz", "integrity": "sha512-hRF04fqJIP8Abbkq5NKGN0Bbr3JxlQ+qhZufXVr0DvujKy93ZCbXZMHDL4EOtodSbCWxOqR8MS1tXA5hwqCXDg==", + "dev": true, "license": "MIT", - "optional": true, - "peer": true, "dependencies": { "buffer-equal-constant-time": "^1.0.1", "ecdsa-sig-formatter": "1.0.11", @@ -1832,9 +1803,8 @@ "version": "4.0.1", "resolved": "https://registry.npmjs.org/jws/-/jws-4.0.1.tgz", "integrity": "sha512-EKI/M/yqPncGUUh44xz0PxSidXFr/+r0pA70+gIYhjv+et7yxM+s29Y+VGDkovRofQem0fs7Uvf4+YmAdyRduA==", + "dev": true, "license": "MIT", - "optional": true, - "peer": true, "dependencies": { "jwa": "^2.0.1", "safe-buffer": "^5.0.1" @@ -1844,9 +1814,8 @@ "version": "5.3.2", "resolved": "https://registry.npmjs.org/long/-/long-5.3.2.tgz", "integrity": "sha512-mNAgZ1GmyNhD7AuqnTG3/VQ26o760+ZYBPKjPvugO8+nLbYfX6TVpJPseBvopbdY+qpZ/lKUnmEc1LeZYS3QAA==", - "license": "Apache-2.0", - "optional": true, - "peer": true + "dev": true, + "license": "Apache-2.0" }, "node_modules/loupe": { "version": "3.2.1", @@ -2009,9 +1978,8 @@ "version": "4.6.2", "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.2.tgz", "integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==", + "dev": true, "license": "MIT", - "optional": true, - "peer": true, "dependencies": { "@types/retry": "0.12.0", "retry": "^0.13.1" @@ -2077,10 +2045,9 @@ "version": "7.5.4", "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.5.4.tgz", "integrity": "sha512-CvexbZtbov6jW2eXAvLukXjXUW1TzFaivC46BpWc/3BpcCysb5Vffu+B3XHMm8lVEuy2Mm4XGex8hBSg1yapPg==", + "dev": true, "hasInstallScript": true, "license": "BSD-3-Clause", - "optional": true, - "peer": true, "dependencies": { "@protobufjs/aspromise": "^1.1.2", "@protobufjs/base64": "^1.1.2", @@ -2113,9 +2080,8 @@ "version": "0.13.1", "resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz", "integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==", + "dev": true, "license": "MIT", - "optional": true, - "peer": true, "engines": { "node": ">= 4" } @@ -2169,6 +2135,7 @@ "version": "5.2.1", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "dev": true, "funding": [ { "type": "github", @@ -2183,9 +2150,7 @@ "url": "https://feross.org/support" } ], - "license": "MIT", - "optional": true, - "peer": true + "license": "MIT" }, "node_modules/siginfo": { "version": "2.0.0", @@ -2562,9 +2527,8 @@ "version": "8.20.0", "resolved": "https://registry.npmjs.org/ws/-/ws-8.20.0.tgz", "integrity": "sha512-sAt8BhgNbzCtgGbt2OxmpuryO63ZoDk/sqaB/znQm94T4fCEsy/yV+7CdC1kJhOU9lboAEU7R3kquuycDoibVA==", + "devOptional": true, "license": "MIT", - "optional": true, - "peer": true, "engines": { "node": ">=10.0.0" }, diff --git a/package.json b/package.json index c14a184..94d8a7c 100644 --- a/package.json +++ b/package.json @@ -50,6 +50,7 @@ } }, "devDependencies": { + "@google/genai": "^1.48.0", "@types/node": "^22.0.0", "tsx": "^4.21.0", "typescript": "^5.6.0",