Skip to content

Commit 866264d

Browse files
author
PR Bot
committed
feat: add MiniMax as first-class LLM provider
Add MiniMax (https://www.minimaxi.com) as a dedicated LLM provider using the OpenAI-compatible API via @ai-sdk/openai-compatible. Users can now configure MiniMax models (MiniMax-M2.7, MiniMax-M2.5, MiniMax-M2.5-highspeed) with provider: "minimax" instead of using the generic openai-compatible provider. Changes: - Add "minimax" to LLMprovider type union - Add minimax provider branch in getLLM() with default base URL - Add MiniMax example in README quickstart - Add 8 unit tests + 3 integration tests
1 parent c3de315 commit 866264d

File tree

4 files changed

+287
-0
lines changed

4 files changed

+287
-0
lines changed

README.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -105,6 +105,12 @@ const llms: LLMs = {
105105
config: {
106106
baseURL: "https://ark.cn-beijing.volces.com/api/v3" // Volcengine endpoint
107107
}
108+
},
109+
// MiniMax
110+
minimax: {
111+
provider: "minimax",
112+
model: "MiniMax-M2.7", // or MiniMax-M2.5, MiniMax-M2.5-highspeed
113+
apiKey: "your-minimax-api-key"
108114
}
109115

110116
};

packages/eko-core/src/llm/rlm.ts

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -358,6 +358,14 @@ export class RetryLanguageModel {
358358
fetch: llm.fetch,
359359
headers: llm.config?.headers,
360360
}).languageModel(llm.model);
361+
} else if (llm.provider == "minimax") {
362+
return createOpenAICompatible({
363+
name: llm.config?.name || "minimax",
364+
apiKey: apiKey,
365+
baseURL: baseURL || "https://api.minimax.io/v1",
366+
fetch: llm.fetch,
367+
headers: llm.config?.headers,
368+
}).languageModel(llm.model);
361369
} else {
362370
return llm.provider.languageModel(llm.model);
363371
}

packages/eko-core/src/types/llm.types.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ export type LLMprovider =
3030
| "openrouter"
3131
| "openai-compatible"
3232
| "modelscope"
33+
| "minimax"
3334
| ProviderV2;
3435

3536
export type LLMConfig = {
Lines changed: 272 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,272 @@
1+
/**
2+
* MiniMax LLM Provider Tests
3+
*
4+
* Unit tests verify configuration, type integration, and model creation.
5+
* Integration tests require MINIMAX_API_KEY and a Node.js environment with
6+
* full Web API support (fetch, Headers, TransformStream). Run them with
7+
* Node 20+ outside of Jest if the test runner sandbox lacks these globals.
8+
*/
9+
import dotenv from "dotenv";
10+
import type { LLMs } from "../../src/types/llm.types";
11+
12+
dotenv.config();
13+
14+
const apiKey = process.env.MINIMAX_API_KEY;
15+
16+
// ─── Unit Tests (no API calls) ──────────────────────────────────────────────
17+
18+
describe("MiniMax provider unit tests", () => {
19+
test("LLMprovider type accepts 'minimax'", () => {
20+
const llms: LLMs = {
21+
default: {
22+
provider: "minimax",
23+
model: "MiniMax-M2.7",
24+
apiKey: "test-key",
25+
},
26+
};
27+
expect(llms.default.provider).toBe("minimax");
28+
});
29+
30+
test("MiniMax config with custom baseURL", () => {
31+
const llms: LLMs = {
32+
default: {
33+
provider: "minimax",
34+
model: "MiniMax-M2.5",
35+
apiKey: "test-key",
36+
config: {
37+
baseURL: "https://custom-proxy.example.com/v1",
38+
},
39+
},
40+
};
41+
expect(llms.default.config?.baseURL).toBe(
42+
"https://custom-proxy.example.com/v1"
43+
);
44+
});
45+
46+
test("MiniMax config with async apiKey", async () => {
47+
const asyncKey = async () => "async-test-key";
48+
const llms: LLMs = {
49+
default: {
50+
provider: "minimax",
51+
model: "MiniMax-M2.7",
52+
apiKey: asyncKey,
53+
},
54+
};
55+
expect(typeof llms.default.apiKey).toBe("function");
56+
const key = await (llms.default.apiKey as () => Promise<string>)();
57+
expect(key).toBe("async-test-key");
58+
});
59+
60+
test("MiniMax provider with custom headers", () => {
61+
const llms: LLMs = {
62+
default: {
63+
provider: "minimax",
64+
model: "MiniMax-M2.7",
65+
apiKey: "test-key",
66+
config: {
67+
headers: { "X-Custom-Header": "value" },
68+
},
69+
},
70+
};
71+
expect(llms.default.config?.headers).toEqual({
72+
"X-Custom-Header": "value",
73+
});
74+
});
75+
76+
test("MiniMax provider with custom name in config", () => {
77+
const llms: LLMs = {
78+
default: {
79+
provider: "minimax",
80+
model: "MiniMax-M2.7",
81+
apiKey: "test-key",
82+
config: {
83+
name: "my-minimax",
84+
},
85+
},
86+
};
87+
expect(llms.default.config?.name).toBe("my-minimax");
88+
});
89+
90+
test("MiniMax provider with options for providerOptions passthrough", () => {
91+
const llms: LLMs = {
92+
default: {
93+
provider: "minimax",
94+
model: "MiniMax-M2.7",
95+
apiKey: "test-key",
96+
options: {
97+
temperature: 0.5,
98+
},
99+
},
100+
};
101+
expect(llms.default.options?.temperature).toBe(0.5);
102+
});
103+
104+
test("MiniMax M2.7 and M2.5-highspeed model configs", () => {
105+
const llms: LLMs = {
106+
default: {
107+
provider: "minimax",
108+
model: "MiniMax-M2.7",
109+
apiKey: "test-key",
110+
},
111+
fast: {
112+
provider: "minimax",
113+
model: "MiniMax-M2.5-highspeed",
114+
apiKey: "test-key",
115+
},
116+
};
117+
expect(llms.default.model).toBe("MiniMax-M2.7");
118+
expect(llms.fast.model).toBe("MiniMax-M2.5-highspeed");
119+
});
120+
121+
test("MiniMax provider alongside other providers", () => {
122+
const llms: LLMs = {
123+
default: {
124+
provider: "openai",
125+
model: "gpt-5",
126+
apiKey: "openai-key",
127+
},
128+
minimax: {
129+
provider: "minimax",
130+
model: "MiniMax-M2.7",
131+
apiKey: "minimax-key",
132+
},
133+
claude: {
134+
provider: "anthropic",
135+
model: "claude-sonnet-4-5-20250929",
136+
apiKey: "anthropic-key",
137+
},
138+
};
139+
expect(llms.default.provider).toBe("openai");
140+
expect(llms.minimax.provider).toBe("minimax");
141+
expect(llms.claude.provider).toBe("anthropic");
142+
});
143+
});
144+
145+
// ─── Integration Tests (require MINIMAX_API_KEY + Node.js with Web APIs) ────
146+
// These tests call the real MiniMax API and require:
147+
// 1. MINIMAX_API_KEY environment variable
148+
// 2. Node.js 18+ with global fetch/Headers (run with: node --test or tsx)
149+
//
150+
// Note: Jest's test environment may not expose all Web APIs (fetch, Headers)
151+
// needed by @ai-sdk/openai-compatible, so integration tests are skipped in Jest.
152+
153+
const hasWebAPIs =
154+
typeof globalThis.fetch === "function" &&
155+
typeof globalThis.Headers === "function";
156+
157+
const describeIntegration = apiKey && hasWebAPIs ? describe : describe.skip;
158+
159+
describeIntegration("MiniMax provider integration tests", () => {
160+
let RetryLanguageModel: any;
161+
const llms: LLMs = {
162+
default: {
163+
provider: "minimax",
164+
model: "MiniMax-M2.5-highspeed",
165+
apiKey: apiKey!,
166+
},
167+
};
168+
169+
beforeAll(async () => {
170+
const mod = await import("../../src/llm");
171+
RetryLanguageModel = mod.RetryLanguageModel;
172+
});
173+
174+
test("MiniMax non-streaming generate", async () => {
175+
const rlm = new RetryLanguageModel(llms);
176+
const result = await rlm.call({
177+
messages: [
178+
{
179+
role: "user",
180+
content: [{ type: "text", text: "Say hello in one word." }],
181+
},
182+
],
183+
maxOutputTokens: 64,
184+
temperature: 0.1,
185+
});
186+
187+
expect(result).toBeDefined();
188+
expect(result.text).toBeDefined();
189+
expect(result.text!.length).toBeGreaterThan(0);
190+
expect(result.finishReason).toBe("stop");
191+
expect(result.llm).toBe("default");
192+
expect(result.llmConfig.provider).toBe("minimax");
193+
}, 30000);
194+
195+
test("MiniMax streaming generate", async () => {
196+
const rlm = new RetryLanguageModel(llms);
197+
const result = await rlm.callStream({
198+
messages: [
199+
{
200+
role: "user",
201+
content: [{ type: "text", text: "Say hi in one word." }],
202+
},
203+
],
204+
maxOutputTokens: 64,
205+
temperature: 0.1,
206+
});
207+
208+
expect(result).toBeDefined();
209+
expect(result.stream).toBeDefined();
210+
211+
const reader = result.stream.getReader();
212+
let text = "";
213+
try {
214+
while (true) {
215+
const { done, value } = await reader.read();
216+
if (done) break;
217+
if (value.type === "text-delta") {
218+
text += value.delta;
219+
}
220+
}
221+
} finally {
222+
reader.releaseLock();
223+
}
224+
225+
expect(text.length).toBeGreaterThan(0);
226+
expect(result.llm).toBe("default");
227+
}, 30000);
228+
229+
test("MiniMax with tool calling", async () => {
230+
const rlm = new RetryLanguageModel(llms);
231+
const result = await rlm.call({
232+
tools: [
233+
{
234+
type: "function",
235+
name: "get_weather",
236+
description: "Get weather for a city",
237+
inputSchema: {
238+
type: "object",
239+
properties: {
240+
city: {
241+
type: "string",
242+
description: "City name",
243+
},
244+
},
245+
required: ["city"],
246+
},
247+
},
248+
],
249+
toolChoice: { type: "auto" },
250+
messages: [
251+
{
252+
role: "user",
253+
content: [
254+
{ type: "text", text: "What is the weather in Tokyo?" },
255+
],
256+
},
257+
],
258+
maxOutputTokens: 256,
259+
temperature: 0.1,
260+
});
261+
262+
expect(result).toBeDefined();
263+
const toolCall = result.content.find(
264+
(c: any) => c.type === "tool-call"
265+
);
266+
expect(toolCall).toBeDefined();
267+
if (toolCall && toolCall.type === "tool-call") {
268+
expect(toolCall.toolName).toBe("get_weather");
269+
expect(toolCall.input).toHaveProperty("city");
270+
}
271+
}, 30000);
272+
});

0 commit comments

Comments
 (0)