fix: align OpenAI reasoning effort handling

This commit is contained in:
Peter Steinberger
2026-04-21 04:54:17 +01:00
parent e1d7e2e8a2
commit 2641b052dc
12 changed files with 289 additions and 78 deletions

View File

@@ -18,6 +18,7 @@ Docs: https://docs.openclaw.ai
### Fixes
- OpenAI/Responses: resolve `/think` levels against each GPT model's supported reasoning efforts so `/think off` no longer becomes high reasoning or sends unsupported `reasoning.effort: "none"` payloads.
- Setup/TUI: relaunch the setup hatch TUI in a fresh process while preserving the configured gateway target and auth source, so onboarding recovers terminal state cleanly without exposing gateway secrets on command-line args. (#69524) Thanks @shakkernerd.
- Codex: avoid re-exposing the image-generation tool on native vision turns with inbound images, and keep bare image-model overrides on the configured image provider. (#65061) Thanks @zhulijin1991.
- Sessions/reset: clear auto-sourced model, provider, and auth-profile overrides on `/new` and `/reset` while preserving explicit user selections, so channel sessions stop staying pinned to runtime fallback choices. (#69419) Thanks @sk7n4k3d.

View File

@@ -514,7 +514,8 @@ Values are case-insensitive at runtime, so `"Off"` and `"off"` both disable the
OpenClaw treats direct OpenAI, Codex, and Azure OpenAI endpoints differently from generic OpenAI-compatible `/v1` proxies:
**Native routes** (`openai/*`, `openai-codex/*`, Azure OpenAI):
- Keep `reasoning: { effort: "none" }` intact when reasoning is explicitly disabled
- Keep `reasoning: { effort: "none" }` only for models that support the OpenAI `none` effort
- Omit disabled reasoning for models or proxies that reject `reasoning.effort: "none"`
- Default tool schemas to strict mode
- Attach hidden attribution headers on verified native hosts only
- Keep OpenAI-only request shaping (`service_tier`, `store`, reasoning-compat, prompt-cache hints)

View File

@@ -23,6 +23,7 @@ title: "Thinking Levels"
- Anthropic Claude 4.6 models default to `adaptive` when no explicit thinking level is set.
- Anthropic Claude Opus 4.7 does not default to adaptive thinking. Its API effort default remains provider-owned unless you explicitly set a thinking level.
- Anthropic Claude Opus 4.7 maps `/think xhigh` to adaptive thinking plus `output_config.effort: "xhigh"`, because `/think` is a thinking directive and `xhigh` is the Opus 4.7 effort setting.
- OpenAI GPT models map `/think` through model-specific Responses API effort support. `/think off` sends `reasoning.effort: "none"` only when the target model supports it; otherwise OpenClaw omits the disabled reasoning payload instead of sending an unsupported value.
- MiniMax (`minimax/*`) on the Anthropic-compatible streaming path defaults to `thinking: { type: "disabled" }` unless you explicitly set thinking in model params or request params. This avoids leaked `reasoning_content` deltas from MiniMax's non-native Anthropic stream format.
- Z.AI (`zai/*`) only supports binary thinking (`on`/`off`). Any non-`off` level is treated as `on` (mapped to `low`).
- Moonshot (`moonshot/*`) maps `/think off` to `thinking: { type: "disabled" }` and any non-`off` level to `thinking: { type: "enabled" }`. When thinking is enabled, Moonshot only accepts `tool_choice` `auto|none`; OpenClaw normalizes incompatible values to `auto`.

View File

@@ -1,4 +1,5 @@
import { normalizeLowercaseStringOrEmpty } from "../shared/string-coerce.js";
import { resolveOpenAIReasoningEffortForModel } from "./openai-reasoning-effort.js";
type OpenAIReasoningCompatModel = {
provider?: string | null;
@@ -6,7 +7,7 @@ type OpenAIReasoningCompatModel = {
compat?: unknown;
};
const OPENAI_MEDIUM_ONLY_REASONING_MODEL_IDS = new Set(["gpt-5.1-codex-mini", "gpt-5.4-mini"]);
const OPENAI_MEDIUM_ONLY_REASONING_MODEL_IDS = new Set(["gpt-5.1-codex-mini"]);
function readCompatReasoningEffortMap(compat: unknown): Record<string, string> {
if (!compat || typeof compat !== "object") {
@@ -48,9 +49,12 @@ export function mapOpenAIReasoningEffortForModel(params: {
fallbackMap?: Record<string, string>;
}): string | undefined {
const { effort } = params;
if (effort === undefined || effort === "none") {
if (effort === undefined) {
return effort;
}
const reasoningEffortMap = resolveOpenAIReasoningEffortMap(params.model, params.fallbackMap);
return reasoningEffortMap[effort] ?? effort;
return resolveOpenAIReasoningEffortForModel({
model: params.model,
effort,
fallbackMap: resolveOpenAIReasoningEffortMap(params.model, params.fallbackMap),
});
}

View File

@@ -1,7 +1,129 @@
export type OpenAIReasoningEffort = "minimal" | "low" | "medium" | "high" | "xhigh";
import { normalizeLowercaseStringOrEmpty } from "../shared/string-coerce.js";
export type OpenAIApiReasoningEffort = "none" | "low" | "medium" | "high" | "xhigh";
export type OpenAIReasoningEffort = "none" | "minimal" | "low" | "medium" | "high" | "xhigh";
export type OpenAIApiReasoningEffort = OpenAIReasoningEffort;
type OpenAIReasoningModel = {
provider?: unknown;
id?: unknown;
api?: unknown;
baseUrl?: unknown;
compat?: unknown;
};
const ALL_OPENAI_REASONING_EFFORTS = [
"none",
"minimal",
"low",
"medium",
"high",
"xhigh",
] as const satisfies readonly OpenAIApiReasoningEffort[];
const GPT_5_REASONING_EFFORTS = ["minimal", "low", "medium", "high"] as const;
const GPT_51_REASONING_EFFORTS = ["none", "low", "medium", "high"] as const;
const GPT_52_REASONING_EFFORTS = ["none", "low", "medium", "high", "xhigh"] as const;
const GPT_CODEX_REASONING_EFFORTS = ["low", "medium", "high", "xhigh"] as const;
const GPT_PRO_REASONING_EFFORTS = ["medium", "high", "xhigh"] as const;
const GPT_5_PRO_REASONING_EFFORTS = ["high"] as const;
const GPT_51_CODEX_MAX_REASONING_EFFORTS = ["none", "medium", "high", "xhigh"] as const;
const GPT_51_CODEX_MINI_REASONING_EFFORTS = ["medium"] as const;
const GENERIC_REASONING_EFFORTS = ["low", "medium", "high"] as const;
function normalizeModelId(id: string | null | undefined): string {
return normalizeLowercaseStringOrEmpty(id ?? "").replace(/-\d{4}-\d{2}-\d{2}$/u, "");
}
export function normalizeOpenAIReasoningEffort(effort: string): string {
return effort === "minimal" ? "low" : effort;
return effort === "minimal" ? "minimal" : effort;
}
function readCompatReasoningEfforts(compat: unknown): OpenAIApiReasoningEffort[] | undefined {
if (!compat || typeof compat !== "object") {
return undefined;
}
const raw = (compat as { supportedReasoningEfforts?: unknown }).supportedReasoningEfforts;
if (!Array.isArray(raw)) {
return undefined;
}
const supported = raw.filter((value): value is OpenAIApiReasoningEffort =>
ALL_OPENAI_REASONING_EFFORTS.includes(value as OpenAIApiReasoningEffort),
);
return supported.length > 0 ? supported : undefined;
}
export function resolveOpenAISupportedReasoningEfforts(
model: OpenAIReasoningModel,
): readonly OpenAIApiReasoningEffort[] {
const compatEfforts = readCompatReasoningEfforts(model.compat);
if (compatEfforts) {
return compatEfforts;
}
const provider = normalizeLowercaseStringOrEmpty(
typeof model.provider === "string" ? model.provider : "",
);
const id = normalizeModelId(typeof model.id === "string" ? model.id : undefined);
if (id === "gpt-5.1-codex-mini") {
return GPT_51_CODEX_MINI_REASONING_EFFORTS;
}
if (id === "gpt-5.1-codex-max") {
return GPT_51_CODEX_MAX_REASONING_EFFORTS;
}
if (/^gpt-5(?:\.\d+)?-codex(?:-|$)/u.test(id) || provider === "openai-codex") {
return GPT_CODEX_REASONING_EFFORTS;
}
if (id === "gpt-5-pro") {
return GPT_5_PRO_REASONING_EFFORTS;
}
if (/^gpt-5\.[2-9](?:\.\d+)?-pro(?:-|$)/u.test(id)) {
return GPT_PRO_REASONING_EFFORTS;
}
if (/^gpt-5\.[2-9](?:\.\d+)?(?:-|$)/u.test(id)) {
return GPT_52_REASONING_EFFORTS;
}
if (/^gpt-5\.1(?:-|$)/u.test(id)) {
return GPT_51_REASONING_EFFORTS;
}
if (/^gpt-5(?:-|$)/u.test(id)) {
return GPT_5_REASONING_EFFORTS;
}
return GENERIC_REASONING_EFFORTS;
}
export function supportsOpenAIReasoningEffort(
model: OpenAIReasoningModel,
effort: string,
): boolean {
return resolveOpenAISupportedReasoningEfforts(model).includes(
normalizeOpenAIReasoningEffort(effort) as OpenAIApiReasoningEffort,
);
}
export function resolveOpenAIReasoningEffortForModel(params: {
model: OpenAIReasoningModel;
effort: string;
fallbackMap?: Record<string, string>;
}): OpenAIApiReasoningEffort | undefined {
const requested = normalizeOpenAIReasoningEffort(params.effort);
const mapped = params.fallbackMap?.[requested] ?? requested;
const normalized = normalizeOpenAIReasoningEffort(mapped);
const supported = resolveOpenAISupportedReasoningEfforts(params.model);
if (supported.includes(normalized as OpenAIApiReasoningEffort)) {
return normalized as OpenAIApiReasoningEffort;
}
if (requested === "none") {
return undefined;
}
if (requested === "minimal" && supported.includes("low")) {
return "low";
}
if ((requested === "minimal" || requested === "low") && supported.includes("medium")) {
return "medium";
}
if (requested === "xhigh" && supported.includes("high")) {
return "high";
}
return supported.find((effort) => effort !== "none");
}

View File

@@ -58,7 +58,7 @@ describe("openai responses payload policy", () => {
expect(payload).not.toHaveProperty("prompt_cache_retention");
});
it("keeps disabled reasoning payloads on native OpenAI responses routes", () => {
it("keeps disabled reasoning payloads on native OpenAI responses models that support none", () => {
const payload = {
reasoning: {
effort: "none",
@@ -71,6 +71,7 @@ describe("openai responses payload policy", () => {
{
api: "openai-responses",
provider: "openai",
id: "gpt-5.4",
baseUrl: "https://api.openai.com/v1",
},
{ storeMode: "disable" },
@@ -85,6 +86,31 @@ describe("openai responses payload policy", () => {
});
});
it("strips disabled reasoning payloads on native OpenAI responses models that do not support none", () => {
const payload = {
reasoning: {
effort: "none",
},
} satisfies Record<string, unknown>;
applyOpenAIResponsesPayloadPolicy(
payload,
resolveOpenAIResponsesPayloadPolicy(
{
api: "openai-responses",
provider: "openai",
id: "gpt-5",
baseUrl: "https://api.openai.com/v1",
},
{ storeMode: "disable" },
),
);
expect(payload).toEqual({
store: false,
});
});
it("strips disabled reasoning payloads for proxy-like OpenAI responses routes", () => {
const payload = {
reasoning: {

View File

@@ -1,10 +1,12 @@
import { readStringValue } from "../shared/string-coerce.js";
import { supportsOpenAIReasoningEffort } from "./openai-reasoning-effort.js";
import { isOpenAIResponsesApi } from "./provider-attribution.js";
import { resolveProviderRequestPolicyConfig } from "./provider-request-config.js";
type OpenAIResponsesPayloadModel = {
api?: unknown;
baseUrl?: unknown;
id?: unknown;
provider?: unknown;
contextWindow?: unknown;
compat?: { supportsStore?: boolean };
@@ -76,8 +78,8 @@ function stripDisabledOpenAIReasoningPayload(payloadObj: Record<string, unknown>
return;
}
// Proxy/OpenAI-compat routes can reject `reasoning.effort: "none"`. Treat the
// disabled effort as "reasoning omitted" instead of forwarding an unsupported value.
// Some Responses models and OpenAI-compatible proxies reject
// `reasoning.effort: "none"`. Treat unsupported disabled effort as omitted.
const reasoningObj = reasoning as Record<string, unknown>;
if (reasoningObj.effort === "none") {
delete payloadObj.reasoning;
@@ -108,6 +110,9 @@ export function resolveOpenAIResponsesPayloadPolicy(
? true
: undefined;
const isResponsesApi = isOpenAIResponsesApi(readStringValue(model.api));
const shouldStripDisabledReasoningPayload =
isResponsesApi &&
(!capabilities.usesKnownNativeOpenAIRoute || !supportsOpenAIReasoningEffort(model, "none"));
return {
allowsServiceTier: capabilities.allowsOpenAIServiceTier,
@@ -115,7 +120,7 @@ export function resolveOpenAIResponsesPayloadPolicy(
parsePositiveInteger(options.extraParams?.responsesCompactThreshold) ??
resolveOpenAIResponsesCompactThreshold(model),
explicitStore,
shouldStripDisabledReasoningPayload: isResponsesApi && !capabilities.usesKnownNativeOpenAIRoute,
shouldStripDisabledReasoningPayload,
shouldStripPromptCache:
options.enablePromptCacheStripping === true && capabilities.shouldStripResponsesPromptCache,
shouldStripStore:

View File

@@ -566,6 +566,62 @@ describe("openai transport stream", () => {
expect(params.reasoning).toEqual({ effort: "high", summary: "auto" });
});
it("uses disabled OpenAI Responses reasoning when the model supports none", () => {
const params = buildOpenAIResponsesParams(
{
id: "gpt-5.4",
name: "GPT-5.4",
api: "openai-responses",
provider: "openai",
baseUrl: "https://api.openai.com/v1",
reasoning: true,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 200000,
maxTokens: 8192,
} satisfies Model<"openai-responses">,
{
systemPrompt: "system",
messages: [],
tools: [],
} as never,
{
reasoningEffort: "none",
} as never,
) as { reasoning?: unknown; include?: unknown };
expect(params.reasoning).toEqual({ effort: "none" });
expect(params).not.toHaveProperty("include");
});
it("omits disabled OpenAI Responses reasoning when the model does not support none", () => {
const params = buildOpenAIResponsesParams(
{
id: "gpt-5",
name: "GPT-5",
api: "openai-responses",
provider: "openai",
baseUrl: "https://api.openai.com/v1",
reasoning: true,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 200000,
maxTokens: 8192,
} satisfies Model<"openai-responses">,
{
systemPrompt: "system",
messages: [],
tools: [],
} as never,
{
reasoningEffort: "none",
} as never,
) as { reasoning?: unknown; include?: unknown };
expect(params).not.toHaveProperty("reasoning");
expect(params).not.toHaveProperty("include");
});
it("maps minimal shared reasoning to low for OpenAI Responses", () => {
const params = buildOpenAIResponsesParams(
{

View File

@@ -24,12 +24,10 @@ import { resolveProviderTransportTurnStateWithPlugin } from "../plugins/provider
import { buildCopilotDynamicHeaders, hasCopilotVisionInput } from "./copilot-dynamic-headers.js";
import { detectOpenAICompletionsCompat } from "./openai-completions-compat.js";
import { flattenCompletionMessagesToStringContent } from "./openai-completions-string-content.js";
import {
mapOpenAIReasoningEffortForModel,
resolveOpenAIReasoningEffortMap,
} from "./openai-reasoning-compat.js";
import { resolveOpenAIReasoningEffortMap } from "./openai-reasoning-compat.js";
import {
normalizeOpenAIReasoningEffort,
resolveOpenAIReasoningEffortForModel,
type OpenAIApiReasoningEffort,
type OpenAIReasoningEffort,
} from "./openai-reasoning-effort.js";
@@ -748,24 +746,10 @@ function getPromptCacheRetention(
function resolveOpenAIReasoningEffort(
options: OpenAIResponsesOptions | undefined,
): Exclude<OpenAIApiReasoningEffort, "none"> {
): OpenAIApiReasoningEffort {
return normalizeOpenAIReasoningEffort(
options?.reasoningEffort ?? options?.reasoning ?? "high",
) as Exclude<OpenAIApiReasoningEffort, "none">;
}
function coerceOpenAIApiReasoningEffort(effort: string): OpenAIApiReasoningEffort {
const normalized = normalizeOpenAIReasoningEffort(effort);
switch (normalized) {
case "none":
case "low":
case "medium":
case "high":
case "xhigh":
return normalized;
default:
return "high";
}
) as OpenAIApiReasoningEffort;
}
export function buildOpenAIResponsesParams(
@@ -814,22 +798,33 @@ export function buildOpenAIResponsesParams(
if (model.reasoning) {
if (options?.reasoningEffort || options?.reasoning || options?.reasoningSummary) {
const requestedReasoningEffort = resolveOpenAIReasoningEffort(options);
const reasoningEffort = coerceOpenAIApiReasoningEffort(
mapOpenAIReasoningEffortForModel({
model,
effort: requestedReasoningEffort,
}) ?? requestedReasoningEffort,
);
const normalizedReasoningEffort: Exclude<OpenAIApiReasoningEffort, "none"> =
reasoningEffort === "none" ? "high" : reasoningEffort;
params.reasoning = {
effort: normalizedReasoningEffort,
summary: options?.reasoningSummary || "auto",
};
params.include = ["reasoning.encrypted_content"];
const reasoningEffort = resolveOpenAIReasoningEffortForModel({
model,
effort: requestedReasoningEffort,
});
if (reasoningEffort) {
params.reasoning = {
effort: reasoningEffort,
...(reasoningEffort === "none" ? {} : { summary: options?.reasoningSummary || "auto" }),
};
if (reasoningEffort !== "none") {
params.include = ["reasoning.encrypted_content"];
}
}
} else if (model.provider !== "github-copilot") {
params.reasoning = { effort: "high", summary: "auto" };
params.include = ["reasoning.encrypted_content"];
const reasoningEffort = resolveOpenAIReasoningEffortForModel({
model,
effort: "high",
});
if (reasoningEffort) {
params.reasoning = {
effort: reasoningEffort,
...(reasoningEffort === "none" ? {} : { summary: "auto" }),
};
if (reasoningEffort !== "none") {
params.include = ["reasoning.encrypted_content"];
}
}
}
}
applyOpenAIResponsesPayloadPolicy(params as Record<string, unknown>, payloadPolicy);
@@ -1437,29 +1432,18 @@ type OpenAIResponsesRequestParams = {
service_tier?: ResponseCreateParamsStreaming["service_tier"];
tools?: FunctionTool[];
reasoning?:
| { effort: "none" }
| { effort: OpenAIApiReasoningEffort }
| {
effort: Exclude<OpenAIApiReasoningEffort, "none">;
effort: OpenAIApiReasoningEffort;
summary: NonNullable<OpenAIResponsesOptions["reasoningSummary"]>;
};
include?: string[];
};
function mapReasoningEffort(effort: string, reasoningEffortMap: Record<string, string>): string {
return reasoningEffortMap[effort] ?? effort;
}
function resolveOpenAICompletionsReasoningEffort(options: OpenAICompletionsOptions | undefined) {
return options?.reasoningEffort ?? options?.reasoning ?? "high";
}
function mapNativeOpenAIReasoningEffort(
effort: string,
reasoningEffortMap: Record<string, string>,
): string {
return normalizeOpenAIReasoningEffort(mapReasoningEffort(effort, reasoningEffortMap));
}
function convertTools(
tools: NonNullable<Context["tools"]>,
compat: ReturnType<typeof getCompat>,
@@ -1526,15 +1510,27 @@ export function buildOpenAICompletionsParams(
params.tools = [];
}
const completionsReasoningEffort = resolveOpenAICompletionsReasoningEffort(options);
if (compat.thinkingFormat === "openrouter" && model.reasoning && completionsReasoningEffort) {
const resolvedCompletionsReasoningEffort = completionsReasoningEffort
? resolveOpenAIReasoningEffortForModel({
model,
effort: completionsReasoningEffort,
fallbackMap: compat.reasoningEffortMap,
})
: undefined;
if (
compat.thinkingFormat === "openrouter" &&
model.reasoning &&
resolvedCompletionsReasoningEffort
) {
params.reasoning = {
effort: mapReasoningEffort(completionsReasoningEffort, compat.reasoningEffortMap),
effort: resolvedCompletionsReasoningEffort,
};
} else if (completionsReasoningEffort && model.reasoning && compat.supportsReasoningEffort) {
params.reasoning_effort = mapNativeOpenAIReasoningEffort(
completionsReasoningEffort,
compat.reasoningEffortMap,
);
} else if (
resolvedCompletionsReasoningEffort &&
model.reasoning &&
compat.supportsReasoningEffort
) {
params.reasoning_effort = resolvedCompletionsReasoningEffort;
}
return params;
}

View File

@@ -79,12 +79,12 @@ export function buildOpenAIWebSocketResponseCreatePayload(params: {
streamOpts?.reasoning ??
(params.model.reasoning ? "high" : undefined),
});
if (reasoningEffort !== "none" && (reasoningEffort || streamOpts?.reasoningSummary)) {
if (reasoningEffort || streamOpts?.reasoningSummary) {
const reasoning: { effort?: string; summary?: string } = {};
if (reasoningEffort !== undefined) {
reasoning.effort = normalizeOpenAIReasoningEffort(reasoningEffort);
}
if (streamOpts?.reasoningSummary !== undefined) {
if (reasoningEffort !== "none" && streamOpts?.reasoningSummary !== undefined) {
reasoning.summary = streamOpts.reasoningSummary;
}
extraParams.reasoning = reasoning;

View File

@@ -3191,7 +3191,7 @@ describe("createOpenAIWebSocketStreamFn", () => {
expect(sent.reasoning).toEqual({ effort: "medium" });
});
it("omits response.create reasoning when reasoningEffort is none", async () => {
it("sends response.create reasoning none when the model supports it", async () => {
const streamFn = createOpenAIWebSocketStreamFn("sk-test", "sess-reason-none");
const opts = { reasoningEffort: "none" };
const stream = streamFn(
@@ -3218,7 +3218,7 @@ describe("createOpenAIWebSocketStreamFn", () => {
});
const sent = MockManager.lastInstance!.sentEvents[0] as Record<string, unknown>;
expect(sent.type).toBe("response.create");
expect(sent).not.toHaveProperty("reasoning");
expect(sent.reasoning).toEqual({ effort: "none" });
});
it("applies onPayload mutations before sending response.create", async () => {

View File

@@ -658,7 +658,7 @@ describe("applyExtraParamsToAgent", () => {
expect(payload).not.toHaveProperty("reasoning_effort");
});
it("keeps disabled reasoning payloads for native OpenAI responses routes", () => {
it("strips disabled reasoning payloads for native OpenAI responses models that do not support none", () => {
const payloads: Record<string, unknown>[] = [];
const baseStreamFn: StreamFn = (_model, _context, options) => {
const payload: Record<string, unknown> = {
@@ -685,13 +685,12 @@ describe("applyExtraParamsToAgent", () => {
expect(payloads[0]).toEqual({
context_management: [{ type: "compaction", compact_threshold: 80000 }],
parallel_tool_calls: true,
reasoning: { effort: "none", summary: "auto" },
store: true,
text: { verbosity: "low" },
});
});
it("keeps disabled reasoning payloads for proxied OpenAI responses routes", () => {
it("strips disabled reasoning payloads for proxied OpenAI responses routes", () => {
const payloads: Record<string, unknown>[] = [];
const baseStreamFn: StreamFn = (_model, _context, options) => {
const payload: Record<string, unknown> = {
@@ -2158,7 +2157,7 @@ describe("applyExtraParamsToAgent", () => {
expect(payload.store).toBe(true);
});
it("keeps disabled OpenAI reasoning payloads on native Responses routes", () => {
it("strips disabled OpenAI reasoning payloads on native Responses models that do not support none", () => {
const payload = runResponsesPayloadMutationCase({
applyProvider: "openai",
applyModelId: "gpt-5-mini",
@@ -2173,10 +2172,10 @@ describe("applyExtraParamsToAgent", () => {
reasoning: { effort: "none" },
},
});
expect(payload.reasoning).toEqual({ effort: "none" });
expect(payload).not.toHaveProperty("reasoning");
});
it("keeps disabled Azure OpenAI Responses reasoning payloads", () => {
it("strips disabled Azure OpenAI Responses reasoning payloads for models that do not support none", () => {
const payload = runResponsesPayloadMutationCase({
applyProvider: "azure-openai-responses",
applyModelId: "gpt-5-mini",
@@ -2191,7 +2190,7 @@ describe("applyExtraParamsToAgent", () => {
reasoning: { effort: "none" },
},
});
expect(payload.reasoning).toEqual({ effort: "none" });
expect(payload).not.toHaveProperty("reasoning");
});
it("injects configured OpenAI service_tier into Responses payloads", () => {