fix(agents): scope gpt-5.4-mini chat reasoning fallback (#76727)

Fixes #76176.

OpenAI live verification showed `gpt-5.4-mini` supports reasoning effort generally, but rejects `/v1/chat/completions` payloads that combine function tools with `reasoning_effort`. This keeps reasoning effort for tool-free Chat Completions and Responses, and omits it only for the rejected Chat Completions + function tools combination.

Validation:
- Live OpenAI API matrix on 2026-05-03
- pnpm test src/agents/openai-reasoning-effort.test.ts src/agents/openai-transport-stream.test.ts -- --reporter=verbose
- GitHub PR CI green on ea3915308c

Thanks @ThisIsAdilah and @chinar-amrutkar.
This commit is contained in:
Peter Steinberger
2026-05-03 16:02:15 +01:00
committed by GitHub
parent aeac10f5ce
commit 11e05e86a2
5 changed files with 85 additions and 1 deletions

View File

@@ -25,6 +25,7 @@ Docs: https://docs.openclaw.ai
### Fixes
- Discord/status: honor explicit `messages.statusReactions.enabled: true` in tool-only guild channels so queued ack reactions can progress through thinking/done lifecycle reactions instead of stopping at the initial emoji. Thanks @Marvinthebored.
- Agents/OpenAI: omit Chat Completions `reasoning_effort` for `gpt-5.4-mini` only when function tools are present while preserving tool-free Chat and Responses reasoning support, preventing Telegram-routed fallback runs from hanging after OpenAI rejects tool payloads. Fixes #76176. Thanks @ThisIsAdilah and @chinar-amrutkar.
- Agents/models: forward model `maxTokens` as the default output-token limit for OpenAI-compatible Responses and Completions transports when no runtime override is provided, preventing provider defaults from silently truncating larger outputs. (#76645) Thanks @joeyfrasier.
- Control UI/Skills: fix skill detail modal silently failing to open in all browsers by deferring `showModal()` until the dialog element is connected to the DOM; the Lit `ref` callback fired before connection causing a `DOMException: HTMLDialogElement.showModal: Dialog element is not connected` on every skill click. Thanks @nickmopen.
- Gateway/update: run `doctor --non-interactive --fix` after Control UI global package updates before reporting success, so legacy config is migrated before the gateway restart. Thanks @stevenchouai.

View File

@@ -13,6 +13,18 @@ describe("OpenAI reasoning effort support", () => {
expect(resolveOpenAIReasoningEffortForModel({ model, effort: "xhigh" })).toBe("xhigh");
});
it("preserves reasoning_effort metadata for gpt-5.4-mini in Chat Completions", () => {
const model = { provider: "openai", id: "gpt-5.4-mini", api: "openai-completions" };
expect(resolveOpenAISupportedReasoningEfforts(model)).toContain("medium");
expect(resolveOpenAIReasoningEffortForModel({ model, effort: "medium" })).toBe("medium");
});
it("preserves reasoning_effort for gpt-5.4-mini in Responses", () => {
const model = { provider: "openai", id: "gpt-5.4-mini", api: "openai-responses" };
expect(resolveOpenAISupportedReasoningEfforts(model)).toContain("medium");
expect(resolveOpenAIReasoningEffortForModel({ model, effort: "medium" })).toBe("medium");
});
it("does not downgrade xhigh when Pi compat metadata declares it explicitly", () => {
const model = {
provider: "openai-codex",

View File

@@ -26,6 +26,11 @@ function normalizeModelId(id: string | null | undefined): string {
return normalizeLowercaseStringOrEmpty(id ?? "").replace(/-\d{4}-\d{2}-\d{2}$/u, "");
}
export function isOpenAIGpt54MiniModel(model: OpenAIReasoningModel): boolean {
const id = normalizeModelId(typeof model.id === "string" ? model.id : undefined);
return /^gpt-5\.4-mini(?:-|$)/u.test(id);
}
export function normalizeOpenAIReasoningEffort(effort: string): string {
return effort === "minimal" ? "minimal" : effort;
}

View File

@@ -2050,6 +2050,68 @@ describe("openai transport stream", () => {
expect(params.reasoning_effort).toBe("high");
});
it("omits reasoning_effort for gpt-5.4-mini Chat Completions tool payloads", () => {
const params = buildOpenAICompletionsParams(
{
id: "gpt-5.4-mini",
name: "GPT-5.4 mini",
api: "openai-completions",
provider: "openai",
baseUrl: "https://api.openai.com/v1",
reasoning: true,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 400000,
maxTokens: 128000,
} satisfies Model<"openai-completions">,
{
systemPrompt: "system",
messages: [],
tools: [
{
name: "lookup_weather",
description: "Get forecast",
parameters: { type: "object", properties: {}, additionalProperties: false },
},
],
} as never,
{
reasoning: "medium",
} as never,
) as { reasoning_effort?: unknown; tools?: unknown };
expect(params.tools).toBeDefined();
expect(params).not.toHaveProperty("reasoning_effort");
});
it("keeps reasoning_effort for gpt-5.4-mini Chat Completions payloads without tools", () => {
const params = buildOpenAICompletionsParams(
{
id: "gpt-5.4-mini",
name: "GPT-5.4 mini",
api: "openai-completions",
provider: "openai",
baseUrl: "https://api.openai.com/v1",
reasoning: true,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 400000,
maxTokens: 128000,
} satisfies Model<"openai-completions">,
{
systemPrompt: "system",
messages: [],
tools: [],
} as never,
{
reasoning: "medium",
} as never,
) as { reasoning_effort?: unknown; tools?: unknown };
expect(params.tools).toEqual([]);
expect(params.reasoning_effort).toBe("medium");
});
it("uses provider-native reasoning effort values declared by model compat", () => {
const baseModel = {
id: "qwen/qwen3-32b",

View File

@@ -28,6 +28,7 @@ import { detectOpenAICompletionsCompat } from "./openai-completions-compat.js";
import { flattenCompletionMessagesToStringContent } from "./openai-completions-string-content.js";
import { resolveOpenAIReasoningEffortMap } from "./openai-reasoning-compat.js";
import {
isOpenAIGpt54MiniModel,
normalizeOpenAIReasoningEffort,
resolveOpenAIReasoningEffortForModel,
type OpenAIApiReasoningEffort,
@@ -1899,6 +1900,8 @@ export function buildOpenAICompletionsParams(
fallbackMap: compat.reasoningEffortMap,
})
: undefined;
const omitGpt54MiniToolReasoningEffort =
isOpenAIGpt54MiniModel(model) && Array.isArray(params.tools) && params.tools.length > 0;
if (
compat.thinkingFormat === "openrouter" &&
model.reasoning &&
@@ -1910,7 +1913,8 @@ export function buildOpenAICompletionsParams(
} else if (
resolvedCompletionsReasoningEffort &&
model.reasoning &&
compat.supportsReasoningEffort
compat.supportsReasoningEffort &&
!omitGpt54MiniToolReasoningEffort
) {
params.reasoning_effort = resolvedCompletionsReasoningEffort;
}