test: cover OpenAI GPT xhigh reasoning

This commit is contained in:
Peter Steinberger
2026-04-24 01:30:44 +01:00
parent f04a3dced0
commit d4d307e07a
3 changed files with 49 additions and 1 deletions

View File

@@ -13,7 +13,7 @@ title: "Thinking levels"
- low → “think hard”
- medium → “think harder”
- high → “ultrathink” (max budget)
- xhigh → “ultrathink+” (GPT-5.2 + Codex models and Anthropic Claude Opus 4.7 effort)
- xhigh → “ultrathink+” (GPT-5.2+ and Codex models, plus Anthropic Claude Opus 4.7 effort)
- adaptive → provider-managed adaptive thinking (supported for Claude 4.6 on Anthropic/Bedrock and Anthropic Claude Opus 4.7)
- max → provider max reasoning (currently Anthropic Claude Opus 4.7)
- `x-high`, `x_high`, `extra-high`, `extra high`, and `extra_high` map to `xhigh`.

View File

@@ -0,0 +1,27 @@
import { describe, expect, it } from "vitest";
import {
resolveOpenAIReasoningEffortForModel,
resolveOpenAISupportedReasoningEfforts,
} from "./openai-reasoning-effort.js";
describe("OpenAI reasoning effort support", () => {
it.each([
{ provider: "openai", id: "gpt-5.5" },
{ provider: "openai-codex", id: "gpt-5.5" },
])("preserves xhigh for $provider/$id", (model) => {
expect(resolveOpenAISupportedReasoningEfforts(model)).toContain("xhigh");
expect(resolveOpenAIReasoningEffortForModel({ model, effort: "xhigh" })).toBe("xhigh");
});
it("does not downgrade xhigh when Pi compat metadata declares it explicitly", () => {
const model = {
provider: "openai-codex",
id: "gpt-5.5",
compat: {
supportedReasoningEfforts: ["low", "medium", "high", "xhigh"],
},
};
expect(resolveOpenAIReasoningEffortForModel({ model, effort: "xhigh" })).toBe("xhigh");
});
});

View File

@@ -158,4 +158,25 @@ describe("createOpenAIThinkingLevelWrapper", () => {
expect(payloads[0]?.reasoning).toEqual({ effort: level });
}
});
it.each([
{
api: "openai-responses",
provider: "openai",
id: "gpt-5.5",
},
{
api: "openai-codex-responses",
provider: "openai-codex",
id: "gpt-5.5",
},
] as const)("preserves xhigh for $provider/$id", (model) => {
const { baseStreamFn, payloads } = createPayloadCapture({
initialReasoning: { effort: "high" },
});
const wrapped = createOpenAIThinkingLevelWrapper(baseStreamFn, "xhigh");
void wrapped(model as Model<typeof model.api>, { messages: [] }, {});
expect(payloads[0]?.reasoning).toEqual({ effort: "xhigh" });
});
});