fix: align native openai transport defaults

This commit is contained in:
Peter Steinberger
2026-04-04 01:19:00 +01:00
parent bc16b9dccf
commit 628c71103e
12 changed files with 245 additions and 23 deletions

View File

@@ -33,7 +33,7 @@ import { wrapOpenAICodexProviderStream } from "./stream-hooks.js";
const PROVIDER_ID = "openai-codex";
const OPENAI_CODEX_BASE_URL = "https://chatgpt.com/backend-api";
const OPENAI_CODEX_GPT_54_MODEL_ID = "gpt-5.4";
const OPENAI_CODEX_GPT_54_CONTEXT_TOKENS = 272_000;
const OPENAI_CODEX_GPT_54_CONTEXT_TOKENS = 400_000;
const OPENAI_CODEX_GPT_54_MAX_TOKENS = 128_000;
const OPENAI_CODEX_GPT_54_COST = {
input: 2.5,

View File

@@ -163,6 +163,39 @@ describe("buildOpenAIProvider", () => {
);
});
it("keeps GPT-5.4 family metadata aligned with native OpenAI docs", () => {
const provider = buildOpenAIProvider();
const codexProvider = buildOpenAICodexProviderPlugin();
const openaiModel = provider.resolveDynamicModel?.({
provider: "openai",
modelId: "gpt-5.4",
modelRegistry: { find: () => null },
} as never);
const codexModel = codexProvider.resolveDynamicModel?.({
provider: "openai-codex",
modelId: "gpt-5.4",
modelRegistry: { find: () => null },
} as never);
expect(openaiModel).toMatchObject({
provider: "openai",
id: "gpt-5.4",
api: "openai-responses",
baseUrl: "https://api.openai.com/v1",
contextWindow: 1_050_000,
maxTokens: 128_000,
});
expect(codexModel).toMatchObject({
provider: "openai-codex",
id: "gpt-5.4",
api: "openai-codex-responses",
baseUrl: "https://chatgpt.com/backend-api",
contextWindow: 400_000,
maxTokens: 128_000,
});
});
it("keeps modern live selection on OpenAI 5.2+ and Codex 5.2+", () => {
const provider = buildOpenAIProvider();
const codexProvider = buildOpenAICodexProviderPlugin();
@@ -289,11 +322,11 @@ describe("buildOpenAIProvider", () => {
expect(extraParams).toMatchObject({
transport: "auto",
openaiWsWarmup: false,
openaiWsWarmup: true,
});
expect(result.payload.service_tier).toBe("priority");
expect(result.payload.text).toEqual({ verbosity: "low" });
expect(result.payload).not.toHaveProperty("reasoning");
expect(result.payload.reasoning).toEqual({ effort: "none" });
});
it("owns Azure OpenAI reasoning compatibility without forcing OpenAI transport defaults", () => {
@@ -315,7 +348,7 @@ describe("buildOpenAIProvider", () => {
expect(result.options?.transport).toBeUndefined();
expect(result.options?.openaiWsWarmup).toBeUndefined();
expect(result.payload).not.toHaveProperty("reasoning");
expect(result.payload.reasoning).toEqual({ effort: "none" });
});
it("owns Codex wrapper composition for responses payloads", () => {

View File

@@ -24,7 +24,7 @@ const OPENAI_GPT_54_MODEL_ID = "gpt-5.4";
const OPENAI_GPT_54_PRO_MODEL_ID = "gpt-5.4-pro";
const OPENAI_GPT_54_MINI_MODEL_ID = "gpt-5.4-mini";
const OPENAI_GPT_54_NANO_MODEL_ID = "gpt-5.4-nano";
const OPENAI_GPT_54_CONTEXT_TOKENS = 272_000;
const OPENAI_GPT_54_CONTEXT_TOKENS = 1_050_000;
const OPENAI_GPT_54_PRO_CONTEXT_TOKENS = 1_050_000;
const OPENAI_GPT_54_MINI_CONTEXT_TOKENS = 400_000;
const OPENAI_GPT_54_NANO_CONTEXT_TOKENS = 400_000;
@@ -248,7 +248,7 @@ export function buildOpenAIProvider(): ProviderPlugin {
return {
...ctx.extraParams,
...(hasSupportedTransport ? {} : { transport: "auto" }),
...(hasExplicitWarmup ? {} : { openaiWsWarmup: false }),
...(hasExplicitWarmup ? {} : { openaiWsWarmup: true }),
};
},
wrapStreamFn: (ctx) =>