fix(agents): omit gpt-5.4-mini chat reasoning effort

This commit is contained in:
Peter Steinberger
2026-05-03 15:26:13 +01:00
parent 0949f4fe51
commit c1deaface2
4 changed files with 55 additions and 0 deletions

View File

@@ -25,6 +25,7 @@ Docs: https://docs.openclaw.ai
### Fixes
- Discord/status: honor explicit `messages.statusReactions.enabled: true` in tool-only guild channels so queued ack reactions can progress through thinking/done lifecycle reactions instead of stopping at the initial emoji. Thanks @Marvinthebored.
- Agents/OpenAI: omit Chat Completions `reasoning_effort` for `gpt-5.4-mini` while preserving Responses reasoning support, preventing Telegram-routed fallback runs from hanging after OpenAI rejects tool payloads. Fixes #76176. Thanks @ThisIsAdilah and @chinar-amrutkar.
- Agents/models: forward model `maxTokens` as the default output-token limit for OpenAI-compatible Responses and Completions transports when no runtime override is provided, preventing provider defaults from silently truncating larger outputs. (#76645) Thanks @joeyfrasier.
- Control UI/Skills: fix skill detail modal silently failing to open in all browsers by deferring `showModal()` until the dialog element is connected to the DOM; the Lit `ref` callback fired before connection causing a `DOMException: HTMLDialogElement.showModal: Dialog element is not connected` on every skill click. Thanks @nickmopen.
- Gateway/update: run `doctor --non-interactive --fix` after Control UI global package updates before reporting success, so legacy config is migrated before the gateway restart. Thanks @stevenchouai.

View File

@@ -13,6 +13,18 @@ describe("OpenAI reasoning effort support", () => {
expect(resolveOpenAIReasoningEffortForModel({ model, effort: "xhigh" })).toBe("xhigh");
});
it("omits reasoning_effort for gpt-5.4-mini in Chat Completions", () => {
const model = { provider: "openai", id: "gpt-5.4-mini", api: "openai-completions" };
expect(resolveOpenAISupportedReasoningEfforts(model)).toHaveLength(0);
expect(resolveOpenAIReasoningEffortForModel({ model, effort: "medium" })).toBeUndefined();
});
it("preserves reasoning_effort for gpt-5.4-mini in Responses", () => {
const model = { provider: "openai", id: "gpt-5.4-mini", api: "openai-responses" };
expect(resolveOpenAISupportedReasoningEfforts(model)).toContain("medium");
expect(resolveOpenAIReasoningEffortForModel({ model, effort: "medium" })).toBe("medium");
});
it("does not downgrade xhigh when Pi compat metadata declares it explicitly", () => {
const model = {
provider: "openai-codex",

View File

@@ -80,6 +80,14 @@ export function resolveOpenAISupportedReasoningEfforts(
if (/^gpt-5\.[2-9](?:\.\d+)?-pro(?:-|$)/u.test(id)) {
return GPT_PRO_REASONING_EFFORTS;
}
const api = normalizeLowercaseStringOrEmpty(typeof model.api === "string" ? model.api : "");
if (api === "openai-responses" || api === "openai-codex-responses") {
if (/^gpt-5\.4-mini(?:-|$)/u.test(id)) {
return GPT_52_REASONING_EFFORTS;
}
} else if (/^gpt-5\.4-mini(?:-|$)/u.test(id)) {
return [];
}
if (/^gpt-5\.[2-9](?:\.\d+)?(?:-|$)/u.test(id)) {
return GPT_52_REASONING_EFFORTS;
}

View File

@@ -2050,6 +2050,40 @@ describe("openai transport stream", () => {
expect(params.reasoning_effort).toBe("high");
});
it("omits reasoning_effort for gpt-5.4-mini Chat Completions tool payloads", () => {
const params = buildOpenAICompletionsParams(
{
id: "gpt-5.4-mini",
name: "GPT-5.4 mini",
api: "openai-completions",
provider: "openai",
baseUrl: "https://api.openai.com/v1",
reasoning: true,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 400000,
maxTokens: 128000,
} satisfies Model<"openai-completions">,
{
systemPrompt: "system",
messages: [],
tools: [
{
name: "lookup_weather",
description: "Get forecast",
parameters: { type: "object", properties: {}, additionalProperties: false },
},
],
} as never,
{
reasoning: "medium",
} as never,
) as { reasoning_effort?: unknown; tools?: unknown };
expect(params.tools).toBeDefined();
expect(params).not.toHaveProperty("reasoning_effort");
});
it("uses provider-native reasoning effort values declared by model compat", () => {
const baseModel = {
id: "qwen/qwen3-32b",