feat: add MiniMax as a first-class LLM provider
Add MiniMaxAdapter implementing LLMAdapter, backed by the MiniMax OpenAI-compatible Chat Completions API (api.minimax.io/v1). Changes: - src/llm/minimax.ts — full MiniMaxAdapter with chat() + stream(), temperature clamping to (0, 1], tool-call round-trip support - src/llm/minimax.test.ts — 22 unit tests (mocked) covering all paths - src/llm/minimax.integration.test.ts — 3 live-API integration tests - src/llm/adapter.ts — add 'minimax' to SupportedProvider union + factory - src/types.ts — add 'minimax' to provider field in AgentConfig / OrchestratorConfig - src/index.ts — re-export MiniMaxAdapter for library consumers - README.md / README_zh.md — document MINIMAX_API_KEY, add MiniMax to architecture diagram and multi-model team example Set MINIMAX_API_KEY to use MiniMax-M2.7 (204K context) in any agent.
This commit is contained in:
parent
376785c3fa
commit
e3695873c7
15
README.md
15
README.md
|
|
@ -21,7 +21,7 @@ Build AI agent teams that work together. One agent plans, another implements, a
|
|||
npm install @jackchen_me/open-multi-agent
|
||||
```
|
||||
|
||||
Set `ANTHROPIC_API_KEY` (and optionally `OPENAI_API_KEY`) in your environment.
|
||||
Set `ANTHROPIC_API_KEY` (and optionally `OPENAI_API_KEY` or `MINIMAX_API_KEY`) in your environment.
|
||||
|
||||
```typescript
|
||||
import { OpenMultiAgent } from '@jackchen_me/open-multi-agent'
|
||||
|
|
@ -179,9 +179,17 @@ const gptAgent: AgentConfig = {
|
|||
tools: ['bash', 'file_read', 'file_write'],
|
||||
}
|
||||
|
||||
const minimaxAgent: AgentConfig = {
|
||||
name: 'reviewer',
|
||||
model: 'MiniMax-M2.7',
|
||||
provider: 'minimax',
|
||||
systemPrompt: 'You review code for correctness and clarity.',
|
||||
tools: ['file_read', 'grep'],
|
||||
}
|
||||
|
||||
const team = orchestrator.createTeam('mixed-team', {
|
||||
name: 'mixed-team',
|
||||
agents: [claudeAgent, gptAgent],
|
||||
agents: [claudeAgent, gptAgent, minimaxAgent],
|
||||
sharedMemory: true,
|
||||
})
|
||||
|
||||
|
|
@ -246,6 +254,7 @@ for await (const event of agent.stream('Explain monads in two sentences.')) {
|
|||
│ - prompt() │───►│ LLMAdapter │
|
||||
│ - stream() │ │ - AnthropicAdapter │
|
||||
└────────┬──────────┘ │ - OpenAIAdapter │
|
||||
│ │ - MiniMaxAdapter │
|
||||
│ └──────────────────────┘
|
||||
┌────────▼──────────┐
|
||||
│ AgentRunner │ ┌──────────────────────┐
|
||||
|
|
@ -269,7 +278,7 @@ for await (const event of agent.stream('Explain monads in two sentences.')) {
|
|||
|
||||
Issues, feature requests, and PRs are welcome. Some areas where contributions would be especially valuable:
|
||||
|
||||
- **LLM Adapters** — Ollama, llama.cpp, vLLM, Gemini. The `LLMAdapter` interface requires just two methods: `chat()` and `stream()`.
|
||||
- **LLM Adapters** — MiniMax is now supported out of the box. Additional adapters for Ollama, llama.cpp, vLLM, and Gemini are welcome. The `LLMAdapter` interface requires just two methods: `chat()` and `stream()`.
|
||||
- **Examples** — Real-world workflows and use cases.
|
||||
- **Documentation** — Guides, tutorials, and API docs.
|
||||
|
||||
|
|
|
|||
17
README_zh.md
17
README_zh.md
|
|
@ -21,7 +21,7 @@
|
|||
npm install @jackchen_me/open-multi-agent
|
||||
```
|
||||
|
||||
在环境变量中设置 `ANTHROPIC_API_KEY`(以及可选的 `OPENAI_API_KEY`)。
|
||||
在环境变量中设置 `ANTHROPIC_API_KEY`(以及可选的 `OPENAI_API_KEY` 或 `MINIMAX_API_KEY`)。
|
||||
|
||||
```typescript
|
||||
import { OpenMultiAgent } from '@jackchen_me/open-multi-agent'
|
||||
|
|
@ -160,7 +160,7 @@ const result = await agent.run('Find the three most recent TypeScript releases.'
|
|||
</details>
|
||||
|
||||
<details>
|
||||
<summary><b>多模型团队</b> — 在一个工作流中混合使用 Claude 和 GPT</summary>
|
||||
<summary><b>多模型团队</b> — 在一个工作流中混合使用 Claude、GPT 和 MiniMax</summary>
|
||||
|
||||
```typescript
|
||||
const claudeAgent: AgentConfig = {
|
||||
|
|
@ -179,9 +179,17 @@ const gptAgent: AgentConfig = {
|
|||
tools: ['bash', 'file_read', 'file_write'],
|
||||
}
|
||||
|
||||
const minimaxAgent: AgentConfig = {
|
||||
name: 'reviewer',
|
||||
model: 'MiniMax-M2.7',
|
||||
provider: 'minimax',
|
||||
systemPrompt: 'You review code for correctness and clarity.',
|
||||
tools: ['file_read', 'grep'],
|
||||
}
|
||||
|
||||
const team = orchestrator.createTeam('mixed-team', {
|
||||
name: 'mixed-team',
|
||||
agents: [claudeAgent, gptAgent],
|
||||
agents: [claudeAgent, gptAgent, minimaxAgent],
|
||||
sharedMemory: true,
|
||||
})
|
||||
|
||||
|
|
@ -246,6 +254,7 @@ for await (const event of agent.stream('Explain monads in two sentences.')) {
|
|||
│ - prompt() │───►│ LLMAdapter │
|
||||
│ - stream() │ │ - AnthropicAdapter │
|
||||
└────────┬──────────┘ │ - OpenAIAdapter │
|
||||
│ │ - MiniMaxAdapter │
|
||||
│ └──────────────────────┘
|
||||
┌────────▼──────────┐
|
||||
│ AgentRunner │ ┌──────────────────────┐
|
||||
|
|
@ -269,7 +278,7 @@ for await (const event of agent.stream('Explain monads in two sentences.')) {
|
|||
|
||||
欢迎提 Issue、功能需求和 PR。以下方向的贡献尤其有价值:
|
||||
|
||||
- **LLM 适配器** — Ollama、llama.cpp、vLLM、Gemini。`LLMAdapter` 接口只需实现两个方法:`chat()` 和 `stream()`。
|
||||
- **LLM 适配器** — MiniMax 已原生支持。欢迎继续贡献 Ollama、llama.cpp、vLLM、Gemini 等适配器。`LLMAdapter` 接口只需实现两个方法:`chat()` 和 `stream()`。
|
||||
- **示例** — 真实场景的工作流和用例。
|
||||
- **文档** — 指南、教程和 API 文档。
|
||||
|
||||
|
|
|
|||
|
|
@ -105,6 +105,7 @@ export {
|
|||
|
||||
export { createAdapter } from './llm/adapter.js'
|
||||
export type { SupportedProvider } from './llm/adapter.js'
|
||||
export { MiniMaxAdapter } from './llm/minimax.js'
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Memory
|
||||
|
|
|
|||
|
|
@ -37,13 +37,14 @@ import type { LLMAdapter } from '../types.js'
|
|||
* Additional providers can be integrated by implementing {@link LLMAdapter}
|
||||
* directly and bypassing this factory.
|
||||
*/
|
||||
export type SupportedProvider = 'anthropic' | 'openai'
|
||||
export type SupportedProvider = 'anthropic' | 'openai' | 'minimax'
|
||||
|
||||
/**
|
||||
* Instantiate the appropriate {@link LLMAdapter} for the given provider.
|
||||
*
|
||||
* API keys fall back to the standard environment variables
|
||||
* (`ANTHROPIC_API_KEY` / `OPENAI_API_KEY`) when not supplied explicitly.
|
||||
* (`ANTHROPIC_API_KEY` / `OPENAI_API_KEY` / `MINIMAX_API_KEY`) when not
|
||||
* supplied explicitly.
|
||||
*
|
||||
* Adapters are imported lazily so that projects using only one provider
|
||||
* are not forced to install the SDK for the other.
|
||||
|
|
@ -65,6 +66,10 @@ export async function createAdapter(
|
|||
const { OpenAIAdapter } = await import('./openai.js')
|
||||
return new OpenAIAdapter(apiKey)
|
||||
}
|
||||
case 'minimax': {
|
||||
const { MiniMaxAdapter } = await import('./minimax.js')
|
||||
return new MiniMaxAdapter(apiKey)
|
||||
}
|
||||
default: {
|
||||
// The `never` cast here makes TypeScript enforce exhaustiveness.
|
||||
const _exhaustive: never = provider
|
||||
|
|
|
|||
|
|
@ -0,0 +1,326 @@
|
|||
/**
|
||||
* @fileoverview Tests for the MiniMax LLM adapter.
|
||||
*
|
||||
* Unit tests exercise temperature clamping, message conversion, and response
|
||||
* normalisation without making real API calls (the OpenAI client is mocked).
|
||||
*
|
||||
* Integration tests hit the live MiniMax API and are skipped automatically
|
||||
* when MINIMAX_API_KEY is not set.
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest'
|
||||
import type { MockInstance } from 'vitest'
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helpers — re-implement the clamping logic so tests don't import internals
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
function clampTemperature(temperature: number | undefined): number | undefined {
|
||||
if (temperature === undefined) return undefined
|
||||
if (temperature <= 0) return 0.01
|
||||
if (temperature > 1.0) return 1.0
|
||||
return temperature
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Unit tests — temperature clamping
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe('clampTemperature', () => {
|
||||
it('returns undefined when temperature is undefined', () => {
|
||||
expect(clampTemperature(undefined)).toBeUndefined()
|
||||
})
|
||||
|
||||
it('clamps 0 to 0.01', () => {
|
||||
expect(clampTemperature(0)).toBe(0.01)
|
||||
})
|
||||
|
||||
it('clamps negative values to 0.01', () => {
|
||||
expect(clampTemperature(-1)).toBe(0.01)
|
||||
expect(clampTemperature(-0.5)).toBe(0.01)
|
||||
})
|
||||
|
||||
it('clamps values above 1 to 1.0', () => {
|
||||
expect(clampTemperature(1.5)).toBe(1.0)
|
||||
expect(clampTemperature(2)).toBe(1.0)
|
||||
})
|
||||
|
||||
it('passes valid values through unchanged', () => {
|
||||
expect(clampTemperature(0.5)).toBe(0.5)
|
||||
expect(clampTemperature(0.01)).toBe(0.01)
|
||||
expect(clampTemperature(1.0)).toBe(1.0)
|
||||
expect(clampTemperature(0.7)).toBe(0.7)
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Unit tests — MiniMaxAdapter (mocked OpenAI client)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// We mock the openai module before importing MiniMaxAdapter so the adapter
|
||||
// never creates a real HTTP client.
|
||||
const mockCreate = vi.fn()
|
||||
|
||||
vi.mock('openai', () => {
|
||||
return {
|
||||
default: vi.fn().mockImplementation(() => ({
|
||||
chat: {
|
||||
completions: {
|
||||
create: mockCreate,
|
||||
},
|
||||
},
|
||||
})),
|
||||
}
|
||||
})
|
||||
|
||||
// Import after mocking.
|
||||
const { MiniMaxAdapter } = await import('./minimax.js')
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Shared fixtures
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/** A minimal chat completion response from the MiniMax API. */
|
||||
function makeCompletion(text: string, model = 'MiniMax-M2.7') {
|
||||
return {
|
||||
id: 'cmpl-test',
|
||||
model,
|
||||
choices: [
|
||||
{
|
||||
index: 0,
|
||||
message: { role: 'assistant', content: text, tool_calls: undefined },
|
||||
finish_reason: 'stop',
|
||||
},
|
||||
],
|
||||
usage: { prompt_tokens: 10, completion_tokens: 5 },
|
||||
}
|
||||
}
|
||||
|
||||
/** A minimal user message. */
|
||||
function userMsg(text: string) {
|
||||
return { role: 'user' as const, content: [{ type: 'text' as const, text }] }
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// chat() unit tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe('MiniMaxAdapter.chat()', () => {
|
||||
let adapter: InstanceType<typeof MiniMaxAdapter>
|
||||
|
||||
beforeEach(() => {
|
||||
mockCreate.mockReset()
|
||||
adapter = new MiniMaxAdapter('test-key')
|
||||
})
|
||||
|
||||
it('calls the API with the correct model and messages', async () => {
|
||||
mockCreate.mockResolvedValue(makeCompletion('hello'))
|
||||
|
||||
const messages = [userMsg('hi')]
|
||||
const response = await adapter.chat(messages, { model: 'MiniMax-M2.7' })
|
||||
|
||||
expect(mockCreate).toHaveBeenCalledOnce()
|
||||
const call = mockCreate.mock.calls[0]?.[0]
|
||||
expect(call.model).toBe('MiniMax-M2.7')
|
||||
expect(call.stream).toBe(false)
|
||||
expect(response.content[0]).toMatchObject({ type: 'text', text: 'hello' })
|
||||
})
|
||||
|
||||
it('clamps temperature=0 to 0.01 before sending', async () => {
|
||||
mockCreate.mockResolvedValue(makeCompletion('ok'))
|
||||
await adapter.chat([userMsg('hi')], { model: 'MiniMax-M2.7', temperature: 0 })
|
||||
|
||||
const call = mockCreate.mock.calls[0]?.[0]
|
||||
expect(call.temperature).toBe(0.01)
|
||||
})
|
||||
|
||||
it('clamps temperature=2 to 1.0 before sending', async () => {
|
||||
mockCreate.mockResolvedValue(makeCompletion('ok'))
|
||||
await adapter.chat([userMsg('hi')], { model: 'MiniMax-M2.7', temperature: 2 })
|
||||
|
||||
const call = mockCreate.mock.calls[0]?.[0]
|
||||
expect(call.temperature).toBe(1.0)
|
||||
})
|
||||
|
||||
it('omits temperature when not provided', async () => {
|
||||
mockCreate.mockResolvedValue(makeCompletion('ok'))
|
||||
await adapter.chat([userMsg('hi')], { model: 'MiniMax-M2.7' })
|
||||
|
||||
const call = mockCreate.mock.calls[0]?.[0]
|
||||
expect(call.temperature).toBeUndefined()
|
||||
})
|
||||
|
||||
it('prepends a system message when systemPrompt is provided', async () => {
|
||||
mockCreate.mockResolvedValue(makeCompletion('ok'))
|
||||
await adapter.chat([userMsg('hi')], {
|
||||
model: 'MiniMax-M2.7',
|
||||
systemPrompt: 'Be concise.',
|
||||
})
|
||||
|
||||
const call = mockCreate.mock.calls[0]?.[0]
|
||||
expect(call.messages[0]).toMatchObject({ role: 'system', content: 'Be concise.' })
|
||||
expect(call.messages[1]).toMatchObject({ role: 'user', content: 'hi' })
|
||||
})
|
||||
|
||||
it('normalises finish_reason "stop" to "end_turn"', async () => {
|
||||
mockCreate.mockResolvedValue(makeCompletion('ok'))
|
||||
const response = await adapter.chat([userMsg('hi')], { model: 'MiniMax-M2.7' })
|
||||
expect(response.stop_reason).toBe('end_turn')
|
||||
})
|
||||
|
||||
it('normalises finish_reason "tool_calls" to "tool_use"', async () => {
|
||||
const completion = makeCompletion('')
|
||||
completion.choices[0]!.finish_reason = 'tool_calls'
|
||||
completion.choices[0]!.message.tool_calls = [
|
||||
{
|
||||
id: 'call_1',
|
||||
type: 'function',
|
||||
function: { name: 'my_tool', arguments: '{"x":1}' },
|
||||
},
|
||||
] as any
|
||||
mockCreate.mockResolvedValue(completion)
|
||||
|
||||
const response = await adapter.chat([userMsg('hi')], { model: 'MiniMax-M2.7' })
|
||||
expect(response.stop_reason).toBe('tool_use')
|
||||
const toolUseBlock = response.content.find((b) => b.type === 'tool_use')
|
||||
expect(toolUseBlock).toMatchObject({
|
||||
type: 'tool_use',
|
||||
name: 'my_tool',
|
||||
input: { x: 1 },
|
||||
})
|
||||
})
|
||||
|
||||
it('returns usage token counts', async () => {
|
||||
mockCreate.mockResolvedValue(makeCompletion('ok'))
|
||||
const response = await adapter.chat([userMsg('hi')], { model: 'MiniMax-M2.7' })
|
||||
expect(response.usage.input_tokens).toBe(10)
|
||||
expect(response.usage.output_tokens).toBe(5)
|
||||
})
|
||||
|
||||
it('includes tools in the API call when provided', async () => {
|
||||
mockCreate.mockResolvedValue(makeCompletion('ok'))
|
||||
const tools = [
|
||||
{
|
||||
name: 'echo',
|
||||
description: 'Echo input',
|
||||
inputSchema: { type: 'object', properties: { text: { type: 'string' } } },
|
||||
},
|
||||
]
|
||||
|
||||
await adapter.chat([userMsg('hi')], { model: 'MiniMax-M2.7', tools })
|
||||
const call = mockCreate.mock.calls[0]?.[0]
|
||||
expect(call.tools).toHaveLength(1)
|
||||
expect(call.tools[0].function.name).toBe('echo')
|
||||
})
|
||||
|
||||
it('exposes adapter name as "minimax"', () => {
|
||||
expect(adapter.name).toBe('minimax')
|
||||
})
|
||||
|
||||
it('throws when the API returns no choices', async () => {
|
||||
mockCreate.mockResolvedValue({ id: 'x', model: 'MiniMax-M2.7', choices: [], usage: null })
|
||||
await expect(
|
||||
adapter.chat([userMsg('hi')], { model: 'MiniMax-M2.7' }),
|
||||
).rejects.toThrow('no choices')
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// adapter factory unit test
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe('createAdapter("minimax")', () => {
|
||||
it('returns a MiniMaxAdapter instance', async () => {
|
||||
const { createAdapter } = await import('./adapter.js')
|
||||
const adapter = await createAdapter('minimax', 'test-key')
|
||||
expect(adapter.name).toBe('minimax')
|
||||
})
|
||||
|
||||
it('throws for an unknown provider', async () => {
|
||||
const { createAdapter } = await import('./adapter.js')
|
||||
await expect(
|
||||
createAdapter('unknown' as any),
|
||||
).rejects.toThrow('Unsupported LLM provider')
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// stream() unit tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe('MiniMaxAdapter.stream()', () => {
|
||||
let adapter: InstanceType<typeof MiniMaxAdapter>
|
||||
|
||||
beforeEach(() => {
|
||||
mockCreate.mockReset()
|
||||
adapter = new MiniMaxAdapter('test-key')
|
||||
})
|
||||
|
||||
/** Build an async iterable that yields the supplied chunks in order. */
|
||||
function makeStream(chunks: object[]) {
|
||||
return {
|
||||
[Symbol.asyncIterator]: async function* () {
|
||||
for (const chunk of chunks) yield chunk
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
it('yields text events and a final done event', async () => {
|
||||
const chunks = [
|
||||
{ id: 's1', model: 'MiniMax-M2.7', choices: [{ delta: { content: 'hel' }, finish_reason: null }], usage: null },
|
||||
{ id: 's1', model: 'MiniMax-M2.7', choices: [{ delta: { content: 'lo' }, finish_reason: null }], usage: null },
|
||||
{ id: 's1', model: 'MiniMax-M2.7', choices: [{ delta: {}, finish_reason: 'stop' }], usage: { prompt_tokens: 5, completion_tokens: 3 } },
|
||||
]
|
||||
mockCreate.mockResolvedValue(makeStream(chunks))
|
||||
|
||||
const events: any[] = []
|
||||
for await (const event of adapter.stream([userMsg('hi')], { model: 'MiniMax-M2.7' })) {
|
||||
events.push(event)
|
||||
}
|
||||
|
||||
const textEvents = events.filter((e) => e.type === 'text')
|
||||
const doneEvent = events.find((e) => e.type === 'done')
|
||||
|
||||
expect(textEvents.map((e) => e.data).join('')).toBe('hello')
|
||||
expect(doneEvent).toBeDefined()
|
||||
expect((doneEvent.data as any).stop_reason).toBe('end_turn')
|
||||
expect((doneEvent.data as any).usage.input_tokens).toBe(5)
|
||||
})
|
||||
|
||||
it('yields an error event when the API throws', async () => {
|
||||
mockCreate.mockRejectedValue(new Error('network failure'))
|
||||
|
||||
const events: any[] = []
|
||||
for await (const event of adapter.stream([userMsg('hi')], { model: 'MiniMax-M2.7' })) {
|
||||
events.push(event)
|
||||
}
|
||||
|
||||
expect(events[0]).toMatchObject({ type: 'error' })
|
||||
expect((events[0].data as Error).message).toContain('network failure')
|
||||
})
|
||||
|
||||
it('clamps temperature before streaming', async () => {
|
||||
const chunks = [
|
||||
{ id: 's1', model: 'MiniMax-M2.7', choices: [{ delta: { content: 'ok' }, finish_reason: 'stop' }], usage: { prompt_tokens: 1, completion_tokens: 1 } },
|
||||
]
|
||||
mockCreate.mockResolvedValue(makeStream(chunks))
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
||||
for await (const _ of adapter.stream([userMsg('hi')], { model: 'MiniMax-M2.7', temperature: 0 })) {
|
||||
// drain
|
||||
}
|
||||
|
||||
const call = mockCreate.mock.calls[0]?.[0]
|
||||
expect(call.temperature).toBe(0.01)
|
||||
expect(call.stream).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Integration tests — run in a separate file (minimax.integration.test.ts)
|
||||
// to avoid conflicts with the global vi.mock('openai') above.
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe.skip('MiniMaxAdapter (integration — see minimax.integration.test.ts)', () => {
|
||||
it('placeholder', () => { /* no-op */ })
|
||||
})
|
||||
|
|
@ -0,0 +1,478 @@
|
|||
/**
|
||||
* @fileoverview MiniMax adapter implementing {@link LLMAdapter}.
|
||||
*
|
||||
* MiniMax provides an OpenAI-compatible Chat Completions API at
|
||||
* `https://api.minimax.io/v1`, so this adapter delegates to the `openai` SDK
|
||||
* with a custom `baseURL` and handles MiniMax-specific constraints:
|
||||
*
|
||||
* - **Temperature** must be in the open interval (0, 1]. A caller-supplied
|
||||
* value of `0` is clamped to `0.01` (deterministic-ish) and values above
|
||||
* `1` are clamped to `1`. When temperature is omitted the API default
|
||||
* applies (the SDK omits the field rather than sending `undefined`).
|
||||
*
|
||||
* API key resolution order:
|
||||
* 1. `apiKey` constructor argument
|
||||
* 2. `MINIMAX_API_KEY` environment variable
|
||||
*
|
||||
* Supported models (204 K context window):
|
||||
* - `MiniMax-M2.7` — latest, highest capability
|
||||
* - `MiniMax-M2.7-highspeed` — faster, lower latency
|
||||
* - `MiniMax-M2.5` — previous generation
|
||||
* - `MiniMax-M2.5-highspeed` — previous generation, faster
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* import { MiniMaxAdapter } from './minimax.js'
|
||||
*
|
||||
* const adapter = new MiniMaxAdapter()
|
||||
* const response = await adapter.chat(messages, {
|
||||
* model: 'MiniMax-M2.7',
|
||||
* maxTokens: 2048,
|
||||
* })
|
||||
* ```
|
||||
*/
|
||||
|
||||
import OpenAI from 'openai'
|
||||
import type {
|
||||
ChatCompletion,
|
||||
ChatCompletionAssistantMessageParam,
|
||||
ChatCompletionChunk,
|
||||
ChatCompletionMessageParam,
|
||||
ChatCompletionMessageToolCall,
|
||||
ChatCompletionTool,
|
||||
ChatCompletionToolMessageParam,
|
||||
ChatCompletionUserMessageParam,
|
||||
} from 'openai/resources/chat/completions/index.js'
|
||||
|
||||
import type {
|
||||
ContentBlock,
|
||||
LLMAdapter,
|
||||
LLMChatOptions,
|
||||
LLMMessage,
|
||||
LLMResponse,
|
||||
LLMStreamOptions,
|
||||
LLMToolDef,
|
||||
StreamEvent,
|
||||
TextBlock,
|
||||
ToolUseBlock,
|
||||
} from '../types.js'
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Constants
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/** Base URL for the MiniMax OpenAI-compatible Chat Completions API. */
|
||||
const MINIMAX_BASE_URL = 'https://api.minimax.io/v1'
|
||||
|
||||
/**
|
||||
* MiniMax requires temperature in the open interval (0, 1].
|
||||
* Clamp zero → this floor and values above 1 → 1.
|
||||
*/
|
||||
const TEMP_FLOOR = 0.01
|
||||
const TEMP_CEIL = 1.0
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Temperature helper
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* Clamp a temperature value to MiniMax's accepted range (0, 1].
|
||||
*
|
||||
* - `undefined` → `undefined` (let the API use its default)
|
||||
* - `<= 0` → {@link TEMP_FLOOR} (0.01)
|
||||
* - `> 1` → {@link TEMP_CEIL} (1.0)
|
||||
* - otherwise → unchanged
|
||||
*/
|
||||
function clampTemperature(temperature: number | undefined): number | undefined {
|
||||
if (temperature === undefined) return undefined
|
||||
if (temperature <= 0) return TEMP_FLOOR
|
||||
if (temperature > TEMP_CEIL) return TEMP_CEIL
|
||||
return temperature
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Internal helpers — framework → OpenAI wire format
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
function toMiniMaxTool(tool: LLMToolDef): ChatCompletionTool {
|
||||
return {
|
||||
type: 'function',
|
||||
function: {
|
||||
name: tool.name,
|
||||
description: tool.description,
|
||||
parameters: tool.inputSchema as Record<string, unknown>,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
function hasToolResults(msg: LLMMessage): boolean {
|
||||
return msg.content.some((b) => b.type === 'tool_result')
|
||||
}
|
||||
|
||||
function toMiniMaxMessages(messages: LLMMessage[]): ChatCompletionMessageParam[] {
|
||||
const result: ChatCompletionMessageParam[] = []
|
||||
|
||||
for (const msg of messages) {
|
||||
if (msg.role === 'assistant') {
|
||||
result.push(toMiniMaxAssistantMessage(msg))
|
||||
} else {
|
||||
if (!hasToolResults(msg)) {
|
||||
result.push(toMiniMaxUserMessage(msg))
|
||||
} else {
|
||||
const nonToolBlocks = msg.content.filter((b) => b.type !== 'tool_result')
|
||||
if (nonToolBlocks.length > 0) {
|
||||
result.push(toMiniMaxUserMessage({ role: 'user', content: nonToolBlocks }))
|
||||
}
|
||||
|
||||
for (const block of msg.content) {
|
||||
if (block.type === 'tool_result') {
|
||||
const toolMsg: ChatCompletionToolMessageParam = {
|
||||
role: 'tool',
|
||||
tool_call_id: block.tool_use_id,
|
||||
content: block.content,
|
||||
}
|
||||
result.push(toolMsg)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
function toMiniMaxUserMessage(msg: LLMMessage): ChatCompletionUserMessageParam {
|
||||
if (msg.content.length === 1 && msg.content[0]?.type === 'text') {
|
||||
return { role: 'user', content: msg.content[0].text }
|
||||
}
|
||||
|
||||
type ContentPart = OpenAI.Chat.ChatCompletionContentPartText | OpenAI.Chat.ChatCompletionContentPartImage
|
||||
const parts: ContentPart[] = []
|
||||
|
||||
for (const block of msg.content) {
|
||||
if (block.type === 'text') {
|
||||
parts.push({ type: 'text', text: block.text })
|
||||
} else if (block.type === 'image') {
|
||||
parts.push({
|
||||
type: 'image_url',
|
||||
image_url: {
|
||||
url: `data:${block.source.media_type};base64,${block.source.data}`,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return { role: 'user', content: parts }
|
||||
}
|
||||
|
||||
function toMiniMaxAssistantMessage(msg: LLMMessage): ChatCompletionAssistantMessageParam {
|
||||
const toolCalls: ChatCompletionMessageToolCall[] = []
|
||||
const textParts: string[] = []
|
||||
|
||||
for (const block of msg.content) {
|
||||
if (block.type === 'tool_use') {
|
||||
toolCalls.push({
|
||||
id: block.id,
|
||||
type: 'function',
|
||||
function: {
|
||||
name: block.name,
|
||||
arguments: JSON.stringify(block.input),
|
||||
},
|
||||
})
|
||||
} else if (block.type === 'text') {
|
||||
textParts.push(block.text)
|
||||
}
|
||||
}
|
||||
|
||||
const assistantMsg: ChatCompletionAssistantMessageParam = {
|
||||
role: 'assistant',
|
||||
content: textParts.length > 0 ? textParts.join('') : null,
|
||||
}
|
||||
|
||||
if (toolCalls.length > 0) {
|
||||
assistantMsg.tool_calls = toolCalls
|
||||
}
|
||||
|
||||
return assistantMsg
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Internal helpers — OpenAI wire format → framework
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
function fromMiniMaxCompletion(completion: ChatCompletion): LLMResponse {
|
||||
const choice = completion.choices[0]
|
||||
if (choice === undefined) {
|
||||
throw new Error('MiniMax returned a completion with no choices')
|
||||
}
|
||||
|
||||
const content: ContentBlock[] = []
|
||||
const message = choice.message
|
||||
|
||||
if (message.content !== null && message.content !== undefined) {
|
||||
const textBlock: TextBlock = { type: 'text', text: message.content }
|
||||
content.push(textBlock)
|
||||
}
|
||||
|
||||
for (const toolCall of message.tool_calls ?? []) {
|
||||
let parsedInput: Record<string, unknown> = {}
|
||||
try {
|
||||
const parsed: unknown = JSON.parse(toolCall.function.arguments)
|
||||
if (parsed !== null && typeof parsed === 'object' && !Array.isArray(parsed)) {
|
||||
parsedInput = parsed as Record<string, unknown>
|
||||
}
|
||||
} catch {
|
||||
// Malformed arguments — surface as empty object.
|
||||
}
|
||||
|
||||
const toolUseBlock: ToolUseBlock = {
|
||||
type: 'tool_use',
|
||||
id: toolCall.id,
|
||||
name: toolCall.function.name,
|
||||
input: parsedInput,
|
||||
}
|
||||
content.push(toolUseBlock)
|
||||
}
|
||||
|
||||
return {
|
||||
id: completion.id,
|
||||
content,
|
||||
model: completion.model,
|
||||
stop_reason: normalizeFinishReason(choice.finish_reason ?? 'stop'),
|
||||
usage: {
|
||||
input_tokens: completion.usage?.prompt_tokens ?? 0,
|
||||
output_tokens: completion.usage?.completion_tokens ?? 0,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
function normalizeFinishReason(reason: string): string {
|
||||
switch (reason) {
|
||||
case 'stop': return 'end_turn'
|
||||
case 'tool_calls': return 'tool_use'
|
||||
case 'length': return 'max_tokens'
|
||||
case 'content_filter': return 'content_filter'
|
||||
default: return reason
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Adapter implementation
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* LLM adapter backed by the MiniMax Chat Completions API.
|
||||
*
|
||||
* Uses the OpenAI SDK pointed at `https://api.minimax.io/v1`.
|
||||
* Thread-safe — a single instance may be shared across concurrent agent runs.
|
||||
*/
|
||||
export class MiniMaxAdapter implements LLMAdapter {
|
||||
readonly name = 'minimax'
|
||||
|
||||
readonly #client: OpenAI
|
||||
|
||||
constructor(apiKey?: string) {
|
||||
this.#client = new OpenAI({
|
||||
apiKey: apiKey ?? process.env['MINIMAX_API_KEY'],
|
||||
baseURL: MINIMAX_BASE_URL,
|
||||
})
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// chat()
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* Send a synchronous (non-streaming) chat request and return the complete
|
||||
* {@link LLMResponse}.
|
||||
*
|
||||
* Temperature is clamped to MiniMax's accepted range (0, 1] before sending.
|
||||
*/
|
||||
async chat(messages: LLMMessage[], options: LLMChatOptions): Promise<LLMResponse> {
|
||||
const miniMaxMessages = buildMiniMaxMessageList(messages, options.systemPrompt)
|
||||
|
||||
const completion = await this.#client.chat.completions.create(
|
||||
{
|
||||
model: options.model,
|
||||
messages: miniMaxMessages,
|
||||
max_tokens: options.maxTokens,
|
||||
temperature: clampTemperature(options.temperature),
|
||||
tools: options.tools ? options.tools.map(toMiniMaxTool) : undefined,
|
||||
stream: false,
|
||||
},
|
||||
{
|
||||
signal: options.abortSignal,
|
||||
},
|
||||
)
|
||||
|
||||
return fromMiniMaxCompletion(completion)
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// stream()
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* Send a streaming chat request and yield {@link StreamEvent}s incrementally.
|
||||
*
|
||||
* Temperature is clamped to MiniMax's accepted range (0, 1] before sending.
|
||||
*
|
||||
* Sequence guarantees:
|
||||
* - Zero or more `text` events
|
||||
* - Zero or more `tool_use` events (emitted once per tool call, after
|
||||
* arguments have been fully assembled)
|
||||
* - Exactly one terminal event: `done` or `error`
|
||||
*/
|
||||
async *stream(
|
||||
messages: LLMMessage[],
|
||||
options: LLMStreamOptions,
|
||||
): AsyncIterable<StreamEvent> {
|
||||
const miniMaxMessages = buildMiniMaxMessageList(messages, options.systemPrompt)
|
||||
|
||||
let completionId = ''
|
||||
let completionModel = ''
|
||||
let finalFinishReason: string = 'stop'
|
||||
let inputTokens = 0
|
||||
let outputTokens = 0
|
||||
|
||||
const toolCallBuffers = new Map<
|
||||
number,
|
||||
{ id: string; name: string; argsJson: string }
|
||||
>()
|
||||
|
||||
let fullText = ''
|
||||
|
||||
try {
|
||||
const streamResponse = await this.#client.chat.completions.create(
|
||||
{
|
||||
model: options.model,
|
||||
messages: miniMaxMessages,
|
||||
max_tokens: options.maxTokens,
|
||||
temperature: clampTemperature(options.temperature),
|
||||
tools: options.tools ? options.tools.map(toMiniMaxTool) : undefined,
|
||||
stream: true,
|
||||
stream_options: { include_usage: true },
|
||||
},
|
||||
{
|
||||
signal: options.abortSignal,
|
||||
},
|
||||
)
|
||||
|
||||
for await (const chunk of streamResponse) {
|
||||
completionId = chunk.id
|
||||
completionModel = chunk.model
|
||||
|
||||
if (chunk.usage !== null && chunk.usage !== undefined) {
|
||||
inputTokens = chunk.usage.prompt_tokens
|
||||
outputTokens = chunk.usage.completion_tokens
|
||||
}
|
||||
|
||||
const choice: ChatCompletionChunk.Choice | undefined = chunk.choices[0]
|
||||
if (choice === undefined) continue
|
||||
|
||||
const delta = choice.delta
|
||||
|
||||
if (delta.content !== null && delta.content !== undefined) {
|
||||
fullText += delta.content
|
||||
yield { type: 'text', data: delta.content } satisfies StreamEvent
|
||||
}
|
||||
|
||||
for (const toolCallDelta of delta.tool_calls ?? []) {
|
||||
const idx = toolCallDelta.index
|
||||
|
||||
if (!toolCallBuffers.has(idx)) {
|
||||
toolCallBuffers.set(idx, {
|
||||
id: toolCallDelta.id ?? '',
|
||||
name: toolCallDelta.function?.name ?? '',
|
||||
argsJson: '',
|
||||
})
|
||||
}
|
||||
|
||||
const buf = toolCallBuffers.get(idx)
|
||||
if (buf !== undefined) {
|
||||
if (toolCallDelta.id) buf.id = toolCallDelta.id
|
||||
if (toolCallDelta.function?.name) buf.name = toolCallDelta.function.name
|
||||
if (toolCallDelta.function?.arguments) {
|
||||
buf.argsJson += toolCallDelta.function.arguments
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (choice.finish_reason !== null && choice.finish_reason !== undefined) {
|
||||
finalFinishReason = choice.finish_reason
|
||||
}
|
||||
}
|
||||
|
||||
const finalToolUseBlocks: ToolUseBlock[] = []
|
||||
for (const buf of toolCallBuffers.values()) {
|
||||
let parsedInput: Record<string, unknown> = {}
|
||||
try {
|
||||
const parsed: unknown = JSON.parse(buf.argsJson)
|
||||
if (parsed !== null && typeof parsed === 'object' && !Array.isArray(parsed)) {
|
||||
parsedInput = parsed as Record<string, unknown>
|
||||
}
|
||||
} catch {
|
||||
// Malformed JSON — surface as empty object.
|
||||
}
|
||||
|
||||
const toolUseBlock: ToolUseBlock = {
|
||||
type: 'tool_use',
|
||||
id: buf.id,
|
||||
name: buf.name,
|
||||
input: parsedInput,
|
||||
}
|
||||
finalToolUseBlocks.push(toolUseBlock)
|
||||
yield { type: 'tool_use', data: toolUseBlock } satisfies StreamEvent
|
||||
}
|
||||
|
||||
const doneContent: ContentBlock[] = []
|
||||
if (fullText.length > 0) {
|
||||
doneContent.push({ type: 'text', text: fullText } satisfies TextBlock)
|
||||
}
|
||||
doneContent.push(...finalToolUseBlocks)
|
||||
|
||||
const finalResponse: LLMResponse = {
|
||||
id: completionId,
|
||||
content: doneContent,
|
||||
model: completionModel,
|
||||
stop_reason: normalizeFinishReason(finalFinishReason),
|
||||
usage: { input_tokens: inputTokens, output_tokens: outputTokens },
|
||||
}
|
||||
|
||||
yield { type: 'done', data: finalResponse } satisfies StreamEvent
|
||||
} catch (err) {
|
||||
const error = err instanceof Error ? err : new Error(String(err))
|
||||
yield { type: 'error', data: error } satisfies StreamEvent
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Private utility
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
function buildMiniMaxMessageList(
|
||||
messages: LLMMessage[],
|
||||
systemPrompt: string | undefined,
|
||||
): ChatCompletionMessageParam[] {
|
||||
const result: ChatCompletionMessageParam[] = []
|
||||
|
||||
if (systemPrompt !== undefined && systemPrompt.length > 0) {
|
||||
result.push({ role: 'system', content: systemPrompt })
|
||||
}
|
||||
|
||||
result.push(...toMiniMaxMessages(messages))
|
||||
return result
|
||||
}
|
||||
|
||||
// Re-export types that consumers of this module commonly need alongside the adapter.
|
||||
export type {
|
||||
ContentBlock,
|
||||
LLMAdapter,
|
||||
LLMChatOptions,
|
||||
LLMMessage,
|
||||
LLMResponse,
|
||||
LLMStreamOptions,
|
||||
LLMToolDef,
|
||||
StreamEvent,
|
||||
}
|
||||
|
|
@ -186,7 +186,7 @@ export interface ToolDefinition<TInput = Record<string, unknown>> {
|
|||
export interface AgentConfig {
|
||||
readonly name: string
|
||||
readonly model: string
|
||||
readonly provider?: 'anthropic' | 'openai'
|
||||
readonly provider?: 'anthropic' | 'openai' | 'minimax'
|
||||
readonly systemPrompt?: string
|
||||
/** Names of tools (from the tool registry) available to this agent. */
|
||||
readonly tools?: readonly string[]
|
||||
|
|
@ -285,7 +285,7 @@ export interface OrchestratorEvent {
|
|||
export interface OrchestratorConfig {
|
||||
readonly maxConcurrency?: number
|
||||
readonly defaultModel?: string
|
||||
readonly defaultProvider?: 'anthropic' | 'openai'
|
||||
readonly defaultProvider?: 'anthropic' | 'openai' | 'minimax'
|
||||
onProgress?: (event: OrchestratorEvent) => void
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue