fix(agents): scope gpt-5.4-mini chat reasoning fallback

This commit is contained in:
Peter Steinberger
2026-05-03 15:56:06 +01:00
parent c1deaface2
commit ea3915308c
5 changed files with 42 additions and 13 deletions

View File

@@ -25,7 +25,7 @@ Docs: https://docs.openclaw.ai
### Fixes
- Discord/status: honor explicit `messages.statusReactions.enabled: true` in tool-only guild channels so queued ack reactions can progress through thinking/done lifecycle reactions instead of stopping at the initial emoji. Thanks @Marvinthebored.
- Agents/OpenAI: omit Chat Completions `reasoning_effort` for `gpt-5.4-mini` while preserving Responses reasoning support, preventing Telegram-routed fallback runs from hanging after OpenAI rejects tool payloads. Fixes #76176. Thanks @ThisIsAdilah and @chinar-amrutkar.
- Agents/OpenAI: omit Chat Completions `reasoning_effort` for `gpt-5.4-mini` only when function tools are present while preserving tool-free Chat and Responses reasoning support, preventing Telegram-routed fallback runs from hanging after OpenAI rejects tool payloads. Fixes #76176. Thanks @ThisIsAdilah and @chinar-amrutkar.
- Agents/models: forward model `maxTokens` as the default output-token limit for OpenAI-compatible Responses and Completions transports when no runtime override is provided, preventing provider defaults from silently truncating larger outputs. (#76645) Thanks @joeyfrasier.
- Control UI/Skills: fix skill detail modal silently failing to open in all browsers by deferring `showModal()` until the dialog element is connected to the DOM; the Lit `ref` callback fired before connection causing a `DOMException: HTMLDialogElement.showModal: Dialog element is not connected` on every skill click. Thanks @nickmopen.
- Gateway/update: run `doctor --non-interactive --fix` after Control UI global package updates before reporting success, so legacy config is migrated before the gateway restart. Thanks @stevenchouai.

View File

@@ -13,10 +13,10 @@ describe("OpenAI reasoning effort support", () => {
expect(resolveOpenAIReasoningEffortForModel({ model, effort: "xhigh" })).toBe("xhigh");
});
it("omits reasoning_effort for gpt-5.4-mini in Chat Completions", () => {
it("preserves reasoning_effort metadata for gpt-5.4-mini in Chat Completions", () => {
const model = { provider: "openai", id: "gpt-5.4-mini", api: "openai-completions" };
expect(resolveOpenAISupportedReasoningEfforts(model)).toHaveLength(0);
expect(resolveOpenAIReasoningEffortForModel({ model, effort: "medium" })).toBeUndefined();
expect(resolveOpenAISupportedReasoningEfforts(model)).toContain("medium");
expect(resolveOpenAIReasoningEffortForModel({ model, effort: "medium" })).toBe("medium");
});
it("preserves reasoning_effort for gpt-5.4-mini in Responses", () => {

View File

@@ -26,6 +26,11 @@ function normalizeModelId(id: string | null | undefined): string {
return normalizeLowercaseStringOrEmpty(id ?? "").replace(/-\d{4}-\d{2}-\d{2}$/u, "");
}
export function isOpenAIGpt54MiniModel(model: OpenAIReasoningModel): boolean {
const id = normalizeModelId(typeof model.id === "string" ? model.id : undefined);
return /^gpt-5\.4-mini(?:-|$)/u.test(id);
}
export function normalizeOpenAIReasoningEffort(effort: string): string {
return effort === "minimal" ? "minimal" : effort;
}
@@ -80,14 +85,6 @@ export function resolveOpenAISupportedReasoningEfforts(
if (/^gpt-5\.[2-9](?:\.\d+)?-pro(?:-|$)/u.test(id)) {
return GPT_PRO_REASONING_EFFORTS;
}
const api = normalizeLowercaseStringOrEmpty(typeof model.api === "string" ? model.api : "");
if (api === "openai-responses" || api === "openai-codex-responses") {
if (/^gpt-5\.4-mini(?:-|$)/u.test(id)) {
return GPT_52_REASONING_EFFORTS;
}
} else if (/^gpt-5\.4-mini(?:-|$)/u.test(id)) {
return [];
}
if (/^gpt-5\.[2-9](?:\.\d+)?(?:-|$)/u.test(id)) {
return GPT_52_REASONING_EFFORTS;
}

View File

@@ -2084,6 +2084,34 @@ describe("openai transport stream", () => {
expect(params).not.toHaveProperty("reasoning_effort");
});
it("keeps reasoning_effort for gpt-5.4-mini Chat Completions payloads without tools", () => {
const params = buildOpenAICompletionsParams(
{
id: "gpt-5.4-mini",
name: "GPT-5.4 mini",
api: "openai-completions",
provider: "openai",
baseUrl: "https://api.openai.com/v1",
reasoning: true,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 400000,
maxTokens: 128000,
} satisfies Model<"openai-completions">,
{
systemPrompt: "system",
messages: [],
tools: [],
} as never,
{
reasoning: "medium",
} as never,
) as { reasoning_effort?: unknown; tools?: unknown };
expect(params.tools).toEqual([]);
expect(params.reasoning_effort).toBe("medium");
});
it("uses provider-native reasoning effort values declared by model compat", () => {
const baseModel = {
id: "qwen/qwen3-32b",

View File

@@ -28,6 +28,7 @@ import { detectOpenAICompletionsCompat } from "./openai-completions-compat.js";
import { flattenCompletionMessagesToStringContent } from "./openai-completions-string-content.js";
import { resolveOpenAIReasoningEffortMap } from "./openai-reasoning-compat.js";
import {
isOpenAIGpt54MiniModel,
normalizeOpenAIReasoningEffort,
resolveOpenAIReasoningEffortForModel,
type OpenAIApiReasoningEffort,
@@ -1899,6 +1900,8 @@ export function buildOpenAICompletionsParams(
fallbackMap: compat.reasoningEffortMap,
})
: undefined;
const omitGpt54MiniToolReasoningEffort =
isOpenAIGpt54MiniModel(model) && Array.isArray(params.tools) && params.tools.length > 0;
if (
compat.thinkingFormat === "openrouter" &&
model.reasoning &&
@@ -1910,7 +1913,8 @@ export function buildOpenAICompletionsParams(
} else if (
resolvedCompletionsReasoningEffort &&
model.reasoning &&
compat.supportsReasoningEffort
compat.supportsReasoningEffort &&
!omitGpt54MiniToolReasoningEffort
) {
params.reasoning_effort = resolvedCompletionsReasoningEffort;
}