test(agents): share extra params payload helper

This commit is contained in:
Vincent Koc
2026-04-12 11:01:08 +01:00
parent 279f82ba5f
commit 0450f98157
3 changed files with 58 additions and 90 deletions

View File

@@ -1,8 +1,6 @@
import type { StreamFn } from "@mariozechner/pi-agent-core";
import type { Context, Model } from "@mariozechner/pi-ai";
import { afterEach, beforeEach, describe, expect, it } from "vitest";
import { runExtraParamsPayloadCase } from "./pi-embedded-runner-extraparams.test-support.js";
import { __testing as extraParamsTesting } from "./pi-embedded-runner/extra-params.js";
import { applyExtraParamsToAgent } from "./pi-embedded-runner/extra-params.js";
import {
createMoonshotThinkingWrapper,
resolveMoonshotThinkingType,
@@ -40,45 +38,8 @@ afterEach(() => {
});
describe("applyExtraParamsToAgent Moonshot and Ollama Kimi", () => {
function runPayloadCase(params: {
provider: "moonshot" | "ollama";
modelId: string;
thinkingLevel?: "off" | "low" | "medium" | "high";
payload?: Record<string, unknown>;
cfg?: Record<string, unknown>;
}) {
const payloads: Record<string, unknown>[] = [];
const baseStreamFn: StreamFn = (model, _context, options) => {
const payload = { ...params.payload };
options?.onPayload?.(payload, model);
payloads.push(payload);
return {} as ReturnType<StreamFn>;
};
const agent = { streamFn: baseStreamFn };
applyExtraParamsToAgent(
agent,
params.cfg as Parameters<typeof applyExtraParamsToAgent>[1],
params.provider,
params.modelId,
undefined,
params.thinkingLevel,
);
const model = {
api: "openai-completions",
provider: params.provider,
id: params.modelId,
} as Model<"openai-completions">;
const context: Context = { messages: [] };
void agent.streamFn?.(model, context, {});
expect(payloads).toHaveLength(1);
return payloads[0] ?? {};
}
it("maps thinkingLevel=off to Moonshot thinking.type=disabled", () => {
const payload = runPayloadCase({
const payload = runExtraParamsPayloadCase({
provider: "moonshot",
modelId: "kimi-k2.5",
thinkingLevel: "off",
@@ -88,7 +49,7 @@ describe("applyExtraParamsToAgent Moonshot and Ollama Kimi", () => {
});
it("maps non-off thinking levels to Moonshot thinking.type=enabled and normalizes tool_choice", () => {
const payload = runPayloadCase({
const payload = runExtraParamsPayloadCase({
provider: "moonshot",
modelId: "kimi-k2.5",
thinkingLevel: "low",
@@ -100,7 +61,7 @@ describe("applyExtraParamsToAgent Moonshot and Ollama Kimi", () => {
});
it("disables thinking instead of broadening pinned Moonshot tool_choice", () => {
const payload = runPayloadCase({
const payload = runExtraParamsPayloadCase({
provider: "moonshot",
modelId: "kimi-k2.5",
thinkingLevel: "low",
@@ -112,7 +73,7 @@ describe("applyExtraParamsToAgent Moonshot and Ollama Kimi", () => {
});
it("respects explicit Moonshot thinking param from model config", () => {
const payload = runPayloadCase({
const payload = runExtraParamsPayloadCase({
provider: "moonshot",
modelId: "kimi-k2.5",
thinkingLevel: "high",
@@ -135,7 +96,7 @@ describe("applyExtraParamsToAgent Moonshot and Ollama Kimi", () => {
});
it("applies Moonshot payload compatibility to Ollama Kimi cloud models", () => {
const payload = runPayloadCase({
const payload = runExtraParamsPayloadCase({
provider: "ollama",
modelId: "kimi-k2.5:cloud",
thinkingLevel: "low",
@@ -147,7 +108,7 @@ describe("applyExtraParamsToAgent Moonshot and Ollama Kimi", () => {
});
it("maps thinkingLevel=off for Ollama Kimi cloud models through Moonshot compatibility", () => {
const payload = runPayloadCase({
const payload = runExtraParamsPayloadCase({
provider: "ollama",
modelId: "kimi-k2.5:cloud",
thinkingLevel: "off",
@@ -157,7 +118,7 @@ describe("applyExtraParamsToAgent Moonshot and Ollama Kimi", () => {
});
it("disables thinking instead of broadening pinned Ollama Kimi cloud tool_choice", () => {
const payload = runPayloadCase({
const payload = runExtraParamsPayloadCase({
provider: "ollama",
modelId: "kimi-k2.5:cloud",
thinkingLevel: "low",

View File

@@ -1,8 +1,6 @@
import type { StreamFn } from "@mariozechner/pi-agent-core";
import type { Context, Model } from "@mariozechner/pi-ai";
import { afterEach, beforeEach, describe, expect, it } from "vitest";
import { runExtraParamsPayloadCase } from "./pi-embedded-runner-extraparams.test-support.js";
import { __testing as extraParamsTesting } from "./pi-embedded-runner/extra-params.js";
import { applyExtraParamsToAgent } from "./pi-embedded-runner/extra-params.js";
import {
createOpenRouterSystemCacheWrapper,
createOpenRouterWrapper,
@@ -49,43 +47,9 @@ afterEach(() => {
});
describe("applyExtraParamsToAgent OpenRouter reasoning", () => {
function runPayloadCase(params: {
modelId: string;
thinkingLevel?: "off" | "low" | "medium" | "high";
payload?: Record<string, unknown>;
}) {
const payloads: Record<string, unknown>[] = [];
const baseStreamFn: StreamFn = (model, _context, options) => {
const payload = { ...params.payload };
options?.onPayload?.(payload, model);
payloads.push(payload);
return {} as ReturnType<StreamFn>;
};
const agent = { streamFn: baseStreamFn };
applyExtraParamsToAgent(
agent,
undefined,
"openrouter",
params.modelId,
undefined,
params.thinkingLevel,
);
const model = {
api: "openai-completions",
provider: "openrouter",
id: params.modelId,
} as Model<"openai-completions">;
const context: Context = { messages: [] };
void agent.streamFn?.(model, context, {});
expect(payloads).toHaveLength(1);
return payloads[0] ?? {};
}
it("does not inject reasoning when thinkingLevel is off (default) for OpenRouter", () => {
const payload = runPayloadCase({
const payload = runExtraParamsPayloadCase({
provider: "openrouter",
modelId: "deepseek/deepseek-r1",
thinkingLevel: "off",
payload: { model: "deepseek/deepseek-r1" },
@@ -96,7 +60,8 @@ describe("applyExtraParamsToAgent OpenRouter reasoning", () => {
});
it("injects reasoning.effort when thinkingLevel is non-off for OpenRouter", () => {
const payload = runPayloadCase({
const payload = runExtraParamsPayloadCase({
provider: "openrouter",
modelId: "openrouter/auto",
thinkingLevel: "low",
});
@@ -105,7 +70,8 @@ describe("applyExtraParamsToAgent OpenRouter reasoning", () => {
});
it("removes legacy reasoning_effort and keeps reasoning unset when thinkingLevel is off", () => {
const payload = runPayloadCase({
const payload = runExtraParamsPayloadCase({
provider: "openrouter",
modelId: "openrouter/auto",
thinkingLevel: "off",
payload: { reasoning_effort: "high" },
@@ -116,7 +82,8 @@ describe("applyExtraParamsToAgent OpenRouter reasoning", () => {
});
it("does not inject effort when payload already has reasoning.max_tokens", () => {
const payload = runPayloadCase({
const payload = runExtraParamsPayloadCase({
provider: "openrouter",
modelId: "openrouter/auto",
thinkingLevel: "low",
payload: { reasoning: { max_tokens: 256 } },
@@ -126,7 +93,8 @@ describe("applyExtraParamsToAgent OpenRouter reasoning", () => {
});
it("does not inject reasoning.effort for x-ai/grok models on OpenRouter (#32039)", () => {
const payload = runPayloadCase({
const payload = runExtraParamsPayloadCase({
provider: "openrouter",
modelId: "x-ai/grok-4.1-fast",
thinkingLevel: "medium",
payload: { reasoning_effort: "medium" },

View File

@@ -0,0 +1,39 @@
import type { StreamFn } from "@mariozechner/pi-agent-core";
import type { Context, Model } from "@mariozechner/pi-ai";
import { applyExtraParamsToAgent } from "./pi-embedded-runner/extra-params.js";
export function runExtraParamsPayloadCase(params: {
provider: string;
modelId: string;
thinkingLevel?: "off" | "low" | "medium" | "high";
payload?: Record<string, unknown>;
cfg?: Record<string, unknown>;
}) {
const payloads: Record<string, unknown>[] = [];
const baseStreamFn: StreamFn = (model, _context, options) => {
const payload = { ...params.payload };
options?.onPayload?.(payload, model);
payloads.push(payload);
return {} as ReturnType<StreamFn>;
};
const agent = { streamFn: baseStreamFn };
applyExtraParamsToAgent(
agent,
params.cfg as Parameters<typeof applyExtraParamsToAgent>[1],
params.provider,
params.modelId,
undefined,
params.thinkingLevel,
);
const model = {
api: "openai-completions",
provider: params.provider,
id: params.modelId,
} as Model<"openai-completions">;
const context: Context = { messages: [] };
void agent.streamFn?.(model, context, {});
return payloads[0] ?? {};
}