feat(minimax): support fast mode and sync pi defaults

This commit is contained in:
Vincent Koc
2026-03-22 14:50:56 -07:00
parent 57267b23d5
commit f7bc9818b5
8 changed files with 128 additions and 11 deletions

View File

@@ -29,6 +29,9 @@ Docs: https://docs.openclaw.ai
- Android/nodes: add `callLog.search` plus shared Call Log permission wiring so Android nodes can search recent call history through the gateway. (#44073) Thanks @lixuankai.
- Android/nodes: add `sms.search` plus shared SMS permission wiring so Android nodes can search device text messages through the gateway. (#48299) Thanks @lixuankai.
- Plugins/MiniMax: merge the bundled MiniMax API and MiniMax OAuth plugin surfaces into a single default-on `minimax` plugin, while keeping legacy `minimax-portal-auth` config ids aliased for compatibility.
- Agents/Pi compatibility: align OpenClaw's bundled MiniMax runtime behavior with the current upstream Pi 0.61.1 release so embedded runs stay in sync with the latest published Pi SDK semantics. Thanks @vincentkoc.
- Models/MiniMax defaults: raise bundled MiniMax M2.5/M2.7 context-window, max-token, and pricing metadata to the higher defaults shipped by the current upstream Pi SDK. Thanks @vincentkoc.
- MiniMax/fast mode: map shared `/fast` and `params.fastMode` to MiniMax `-highspeed` models for M2.1, M2.5, and M2.7 API-key and OAuth runs. Thanks @vincentkoc.
- Telegram/actions: add `topic-edit` for forum-topic renames and icon updates while sharing the same Telegram topic-edit transport used by the plugin runtime. (#47798) Thanks @obviyus.
- Telegram/error replies: add a default-off `channels.telegram.silentErrorReplies` setting so bot error replies can be delivered silently across regular replies, native commands, and fallback sends. (#19776) Thanks @ImLukeF.
- Doctor/refactor: start splitting doctor provider checks into `src/commands/doctor/providers/*` by extracting Telegram first-run and group allowlist warnings into a provider-specific module, keeping the current setup guidance and warning behavior intact. Thanks @vincentkoc.

View File

@@ -13,6 +13,17 @@ describe("minimax model definitions", () => {
expect(MINIMAX_HOSTED_MODEL_ID).toBe("MiniMax-M2.7");
});
it("uses the higher upstream MiniMax context and token defaults", () => {
expect(DEFAULT_MINIMAX_CONTEXT_WINDOW).toBe(204800);
expect(DEFAULT_MINIMAX_MAX_TOKENS).toBe(131072);
expect(MINIMAX_API_COST).toEqual({
input: 0.3,
output: 1.2,
cacheRead: 0.06,
cacheWrite: 0.375,
});
});
it("builds catalog model with name and reasoning from catalog", () => {
const model = buildMinimaxModelDefinition({
id: "MiniMax-M2.7",

View File

@@ -5,14 +5,14 @@ export const MINIMAX_API_BASE_URL = "https://api.minimax.io/anthropic";
export const MINIMAX_CN_API_BASE_URL = "https://api.minimaxi.com/anthropic";
export const MINIMAX_HOSTED_MODEL_ID = "MiniMax-M2.7";
export const MINIMAX_HOSTED_MODEL_REF = `minimax/${MINIMAX_HOSTED_MODEL_ID}`;
export const DEFAULT_MINIMAX_CONTEXT_WINDOW = 200000;
export const DEFAULT_MINIMAX_MAX_TOKENS = 8192;
export const DEFAULT_MINIMAX_CONTEXT_WINDOW = 204800;
export const DEFAULT_MINIMAX_MAX_TOKENS = 131072;
export const MINIMAX_API_COST = {
input: 0.3,
output: 1.2,
cacheRead: 0.03,
cacheWrite: 0.12,
cacheRead: 0.06,
cacheWrite: 0.375,
};
export const MINIMAX_HOSTED_COST = {
input: 0,

View File

@@ -6,13 +6,13 @@ import type {
const MINIMAX_PORTAL_BASE_URL = "https://api.minimax.io/anthropic";
export const MINIMAX_DEFAULT_MODEL_ID = "MiniMax-M2.7";
const MINIMAX_DEFAULT_VISION_MODEL_ID = "MiniMax-VL-01";
const MINIMAX_DEFAULT_CONTEXT_WINDOW = 200000;
const MINIMAX_DEFAULT_MAX_TOKENS = 8192;
const MINIMAX_DEFAULT_CONTEXT_WINDOW = 204800;
const MINIMAX_DEFAULT_MAX_TOKENS = 131072;
const MINIMAX_API_COST = {
input: 0.3,
output: 1.2,
cacheRead: 0.03,
cacheWrite: 0.12,
cacheRead: 0.06,
cacheWrite: 0.375,
};
function buildMinimaxModel(params: {

View File

@@ -310,6 +310,31 @@ describe("applyExtraParamsToAgent", () => {
return payload;
}
function runResolvedModelIdCase(params: {
applyProvider: string;
applyModelId: string;
model: Model<"anthropic-messages">;
cfg?: Record<string, unknown>;
extraParamsOverride?: Record<string, unknown>;
}): string {
let resolvedModelId = params.model.id;
const baseStreamFn: StreamFn = (model) => {
resolvedModelId = String(model.id ?? "");
return {} as ReturnType<StreamFn>;
};
const agent = { streamFn: baseStreamFn };
applyExtraParamsToAgent(
agent,
params.cfg as Parameters<typeof applyExtraParamsToAgent>[1],
params.applyProvider,
params.applyModelId,
params.extraParamsOverride,
);
const context: Context = { messages: [] };
void agent.streamFn?.(params.model, context, {});
return resolvedModelId;
}
function runParallelToolCallsPayloadMutationCase(params: {
applyProvider: string;
applyModelId: string;
@@ -1827,6 +1852,38 @@ describe("applyExtraParamsToAgent", () => {
expect(payload.service_tier).toBe("default");
});
it("maps MiniMax /fast to the matching highspeed model", () => {
const resolvedModelId = runResolvedModelIdCase({
applyProvider: "minimax",
applyModelId: "MiniMax-M2.7",
extraParamsOverride: { fastMode: true },
model: {
api: "anthropic-messages",
provider: "minimax",
id: "MiniMax-M2.7",
baseUrl: "https://api.minimax.io/anthropic",
} as Model<"anthropic-messages">,
});
expect(resolvedModelId).toBe("MiniMax-M2.7-highspeed");
});
it("keeps explicit MiniMax highspeed models unchanged when /fast is off", () => {
const resolvedModelId = runResolvedModelIdCase({
applyProvider: "minimax-portal",
applyModelId: "MiniMax-M2.7-highspeed",
extraParamsOverride: { fastMode: false },
model: {
api: "anthropic-messages",
provider: "minimax-portal",
id: "MiniMax-M2.7-highspeed",
baseUrl: "https://api.minimax.io/anthropic",
} as Model<"anthropic-messages">,
});
expect(resolvedModelId).toBe("MiniMax-M2.7-highspeed");
});
it("injects service_tier=auto for Anthropic fast mode on direct API-key models", () => {
const payload = runResponsesPayloadMutationCase({
applyProvider: "anthropic",

View File

@@ -16,6 +16,7 @@ import {
resolveCacheRetention,
} from "./anthropic-stream-wrappers.js";
import { log } from "./logger.js";
import { createMinimaxFastModeWrapper } from "./minimax-stream-wrappers.js";
import {
createMoonshotThinkingWrapper,
resolveMoonshotThinkingType,
@@ -275,6 +276,13 @@ export function applyExtraParamsToAgent(
agent.streamFn = createAnthropicFastModeWrapper(agent.streamFn, anthropicFastMode);
}
if (typeof effectiveExtraParams?.fastMode === "boolean") {
log.debug(
`applying MiniMax fast mode=${effectiveExtraParams.fastMode} for ${provider}/${modelId}`,
);
agent.streamFn = createMinimaxFastModeWrapper(agent.streamFn, effectiveExtraParams.fastMode);
}
const openAIFastMode = resolveOpenAIFastMode(effectiveExtraParams);
if (openAIFastMode) {
log.debug(`applying OpenAI fast mode for ${provider}/${modelId}`);

View File

@@ -0,0 +1,38 @@
import type { StreamFn } from "@mariozechner/pi-agent-core";
import { streamSimple } from "@mariozechner/pi-ai";
const MINIMAX_FAST_MODEL_IDS = new Map<string, string>([
["MiniMax-M2.1", "MiniMax-M2.1-highspeed"],
["MiniMax-M2.5", "MiniMax-M2.5-highspeed"],
["MiniMax-M2.7", "MiniMax-M2.7-highspeed"],
]);
function resolveMinimaxFastModelId(modelId: unknown): string | undefined {
if (typeof modelId !== "string") {
return undefined;
}
return MINIMAX_FAST_MODEL_IDS.get(modelId.trim());
}
export function createMinimaxFastModeWrapper(
baseStreamFn: StreamFn | undefined,
fastMode: boolean,
): StreamFn {
const underlying = baseStreamFn ?? streamSimple;
return (model, context, options) => {
if (
!fastMode ||
model.api !== "anthropic-messages" ||
(model.provider !== "minimax" && model.provider !== "minimax-portal")
) {
return underlying(model, context, options);
}
const fastModelId = resolveMinimaxFastModelId(model.id);
if (!fastModelId) {
return underlying(model, context, options);
}
return underlying({ ...model, id: fastModelId }, context, options);
};
}

View File

@@ -16,9 +16,9 @@ const MINIMAX_API_BASE_URL = "https://api.minimax.io/anthropic";
const MINIMAX_CN_API_BASE_URL = "https://api.minimaxi.com/anthropic";
const MINIMAX_HOSTED_MODEL_ID = "MiniMax-M2.7";
const MINIMAX_HOSTED_MODEL_REF = `minimax/${MINIMAX_HOSTED_MODEL_ID}`;
const DEFAULT_MINIMAX_CONTEXT_WINDOW = 200000;
const DEFAULT_MINIMAX_MAX_TOKENS = 8192;
const MINIMAX_API_COST = { input: 0.3, output: 1.2, cacheRead: 0.03, cacheWrite: 0.12 };
const DEFAULT_MINIMAX_CONTEXT_WINDOW = 204800;
const DEFAULT_MINIMAX_MAX_TOKENS = 131072;
const MINIMAX_API_COST = { input: 0.3, output: 1.2, cacheRead: 0.06, cacheWrite: 0.375 };
const MINIMAX_HOSTED_COST = { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 };
const MINIMAX_LM_STUDIO_COST = { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 };
const MINIMAX_MODEL_CATALOG = {