feat(codex): add gpt-5.4-pro forward compat (#66453)

* feat(openai-codex): add gpt-5.4-pro forward-compat #63404

* feat(openai-codex): add gpt-5.4-pro forward-compat #63404

* openai-codex: use patch.cost when forward-compat falls back to normalizeModelCompat

* feat(codex): add gpt-5.4-pro forward compat

* fix(codex): reuse gpt-5.4 fallback for gpt-5.4-pro

---------

Co-authored-by: jepson-liu <jepsonliu@gmail.com>
This commit is contained in:
Vincent Koc
2026-04-14 11:05:24 +01:00
committed by GitHub
parent 8820a43818
commit 5a5ca6d62c
12 changed files with 235 additions and 13 deletions

View File

@@ -6,6 +6,8 @@ Docs: https://docs.openclaw.ai
### Changes
- OpenAI Codex/models: add forward-compat support for `gpt-5.4-pro`, including Codex pricing/limits and list/status visibility before the upstream catalog catches up. (#66453) Thanks @jepson-liu.
### Fixes
- Agents/Ollama: forward the configured embedded-run timeout into the global undici stream timeout tuning so slow local Ollama runs no longer inherit the default stream cutoff instead of the operator-set run timeout. (#63175) Thanks @mindcraftreader and @vincentkoc.

View File

@@ -145,7 +145,9 @@ describe("discord native /think autocomplete", () => {
providerThinkingMocks.resolveProviderBinaryThinking.mockReturnValue(undefined);
providerThinkingMocks.resolveProviderDefaultThinkingLevel.mockReturnValue(undefined);
providerThinkingMocks.resolveProviderXHighThinking.mockImplementation(({ provider, context }) =>
provider === "openai-codex" && context.modelId === "gpt-5.4" ? true : undefined,
provider === "openai-codex" && ["gpt-5.4", "gpt-5.4-pro"].includes(context.modelId)
? true
: undefined,
);
buildModelsProviderDataMock.mockResolvedValue({
byProvider: new Map<string, Set<string>>(),
@@ -172,7 +174,9 @@ describe("discord native /think autocomplete", () => {
providerThinkingMocks.resolveProviderDefaultThinkingLevel.mockReturnValue(undefined);
providerThinkingMocks.resolveProviderXHighThinking.mockReset();
providerThinkingMocks.resolveProviderXHighThinking.mockImplementation(({ provider, context }) =>
provider === "openai-codex" && context.modelId === "gpt-5.4" ? true : undefined,
provider === "openai-codex" && ["gpt-5.4", "gpt-5.4-pro"].includes(context.modelId)
? true
: undefined,
);
fs.mkdirSync(path.dirname(STORE_PATH), { recursive: true });
fs.writeFileSync(

View File

@@ -134,6 +134,81 @@ describe("openai codex provider", () => {
});
});
it("resolves gpt-5.4-pro with pro pricing and codex-sized limits", () => {
const provider = buildOpenAICodexProviderPlugin();
const model = provider.resolveDynamicModel?.({
provider: "openai-codex",
modelId: "gpt-5.4-pro",
modelRegistry: {
find: (providerId: string, modelId: string) => {
if (providerId === "openai-codex" && modelId === "gpt-5.3-codex") {
return {
id: "gpt-5.3-codex",
name: "gpt-5.3-codex",
provider: "openai-codex",
api: "openai-codex-responses",
baseUrl: "https://chatgpt.com/backend-api",
reasoning: true,
input: ["text", "image"] as const,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 272_000,
maxTokens: 128_000,
};
}
return undefined;
},
} as never,
});
expect(model).toMatchObject({
id: "gpt-5.4-pro",
contextWindow: 1_050_000,
contextTokens: 272_000,
maxTokens: 128_000,
cost: { input: 30, output: 180, cacheRead: 0, cacheWrite: 0 },
});
});
it("resolves gpt-5.4-pro from a gpt-5.4 runtime template when legacy codex rows are absent", () => {
const provider = buildOpenAICodexProviderPlugin();
const model = provider.resolveDynamicModel?.({
provider: "openai-codex",
modelId: "gpt-5.4-pro",
modelRegistry: {
find: (providerId: string, modelId: string) => {
if (providerId === "openai-codex" && modelId === "gpt-5.4") {
return {
id: "gpt-5.4",
name: "gpt-5.4",
provider: "openai-codex",
api: "openai-codex-responses",
baseUrl: "https://chatgpt.com/backend-api",
reasoning: true,
input: ["text", "image"] as const,
cost: { input: 2.5, output: 15, cacheRead: 0.25, cacheWrite: 0 },
contextWindow: 1_050_000,
contextTokens: 272_000,
maxTokens: 128_000,
};
}
return undefined;
},
} as never,
});
expect(model).toMatchObject({
id: "gpt-5.4-pro",
api: "openai-codex-responses",
baseUrl: "https://chatgpt.com/backend-api",
contextWindow: 1_050_000,
contextTokens: 272_000,
maxTokens: 128_000,
cost: { input: 30, output: 180, cacheRead: 0, cacheWrite: 0 },
});
});
it("resolves the legacy gpt-5.4-codex alias to canonical gpt-5.4", () => {
const provider = buildOpenAICodexProviderPlugin();
@@ -228,12 +303,49 @@ describe("openai codex provider", () => {
id: "gpt-5.4",
contextWindow: 1_050_000,
contextTokens: 272_000,
cost: { input: 2.5, output: 15, cacheRead: 0.25, cacheWrite: 0 },
}),
);
expect(entries).toContainEqual(
expect.objectContaining({
id: "gpt-5.4-pro",
contextWindow: 1_050_000,
contextTokens: 272_000,
cost: { input: 30, output: 180, cacheRead: 0, cacheWrite: 0 },
}),
);
expect(entries).toContainEqual(
expect.objectContaining({
id: "gpt-5.4-mini",
contextWindow: 272_000,
cost: { input: 0.75, output: 4.5, cacheRead: 0.075, cacheWrite: 0 },
}),
);
});
it("augments gpt-5.4-pro from catalog gpt-5.4 when legacy codex rows are absent", () => {
const provider = buildOpenAICodexProviderPlugin();
const entries = provider.augmentModelCatalog?.({
env: process.env,
entries: [
{
id: "gpt-5.4",
name: "gpt-5.4",
provider: "openai-codex",
reasoning: true,
input: ["text", "image"],
contextWindow: 272_000,
},
],
} as never);
expect(entries).toContainEqual(
expect.objectContaining({
id: "gpt-5.4-pro",
contextWindow: 1_050_000,
contextTokens: 272_000,
cost: { input: 30, output: 180, cacheRead: 0, cacheWrite: 0 },
}),
);
});

View File

@@ -42,6 +42,7 @@ const PROVIDER_ID = "openai-codex";
const OPENAI_CODEX_BASE_URL = "https://chatgpt.com/backend-api";
const OPENAI_CODEX_GPT_54_MODEL_ID = "gpt-5.4";
const OPENAI_CODEX_GPT_54_LEGACY_MODEL_ID = "gpt-5.4-codex";
const OPENAI_CODEX_GPT_54_PRO_MODEL_ID = "gpt-5.4-pro";
const OPENAI_CODEX_GPT_54_MINI_MODEL_ID = "gpt-5.4-mini";
const OPENAI_CODEX_GPT_54_NATIVE_CONTEXT_TOKENS = 1_050_000;
const OPENAI_CODEX_GPT_54_DEFAULT_CONTEXT_TOKENS = 272_000;
@@ -53,6 +54,12 @@ const OPENAI_CODEX_GPT_54_COST = {
cacheRead: 0.25,
cacheWrite: 0,
} as const;
const OPENAI_CODEX_GPT_54_PRO_COST = {
input: 30,
output: 180,
cacheRead: 0,
cacheWrite: 0,
} as const;
const OPENAI_CODEX_GPT_54_MINI_COST = {
input: 0.75,
output: 4.5,
@@ -60,6 +67,11 @@ const OPENAI_CODEX_GPT_54_MINI_COST = {
cacheWrite: 0,
} as const;
const OPENAI_CODEX_GPT_54_TEMPLATE_MODEL_IDS = ["gpt-5.3-codex", "gpt-5.2-codex"] as const;
/** Legacy codex rows first; fall back to catalog `gpt-5.4` when the API omits 5.3/5.2. */
const OPENAI_CODEX_GPT_54_CATALOG_SYNTH_TEMPLATE_MODEL_IDS = [
...OPENAI_CODEX_GPT_54_TEMPLATE_MODEL_IDS,
OPENAI_CODEX_GPT_54_MODEL_ID,
] as const;
const OPENAI_CODEX_GPT_54_MINI_TEMPLATE_MODEL_IDS = [
OPENAI_CODEX_GPT_54_MODEL_ID,
"gpt-5.1-codex-mini",
@@ -72,6 +84,7 @@ const OPENAI_CODEX_GPT_53_SPARK_MAX_TOKENS = 128_000;
const OPENAI_CODEX_TEMPLATE_MODEL_IDS = ["gpt-5.2-codex"] as const;
const OPENAI_CODEX_XHIGH_MODEL_IDS = [
OPENAI_CODEX_GPT_54_MODEL_ID,
OPENAI_CODEX_GPT_54_PRO_MODEL_ID,
OPENAI_CODEX_GPT_54_MINI_MODEL_ID,
OPENAI_CODEX_GPT_53_MODEL_ID,
OPENAI_CODEX_GPT_53_SPARK_MODEL_ID,
@@ -80,6 +93,7 @@ const OPENAI_CODEX_XHIGH_MODEL_IDS = [
] as const;
const OPENAI_CODEX_MODERN_MODEL_IDS = [
OPENAI_CODEX_GPT_54_MODEL_ID,
OPENAI_CODEX_GPT_54_PRO_MODEL_ID,
OPENAI_CODEX_GPT_54_MINI_MODEL_ID,
"gpt-5.2",
"gpt-5.2-codex",
@@ -128,13 +142,21 @@ function resolveCodexForwardCompatModel(ctx: ProviderResolveDynamicModelContext)
let templateIds: readonly string[];
let patch: Parameters<typeof cloneFirstTemplateModel>[0]["patch"];
if (lower === OPENAI_CODEX_GPT_54_MODEL_ID || lower === OPENAI_CODEX_GPT_54_LEGACY_MODEL_ID) {
templateIds = OPENAI_CODEX_GPT_54_TEMPLATE_MODEL_IDS;
templateIds = OPENAI_CODEX_GPT_54_CATALOG_SYNTH_TEMPLATE_MODEL_IDS;
patch = {
contextWindow: OPENAI_CODEX_GPT_54_NATIVE_CONTEXT_TOKENS,
contextTokens: OPENAI_CODEX_GPT_54_DEFAULT_CONTEXT_TOKENS,
maxTokens: OPENAI_CODEX_GPT_54_MAX_TOKENS,
cost: OPENAI_CODEX_GPT_54_COST,
};
} else if (lower === OPENAI_CODEX_GPT_54_PRO_MODEL_ID) {
templateIds = OPENAI_CODEX_GPT_54_CATALOG_SYNTH_TEMPLATE_MODEL_IDS;
patch = {
contextWindow: OPENAI_CODEX_GPT_54_NATIVE_CONTEXT_TOKENS,
contextTokens: OPENAI_CODEX_GPT_54_DEFAULT_CONTEXT_TOKENS,
maxTokens: OPENAI_CODEX_GPT_54_MAX_TOKENS,
cost: OPENAI_CODEX_GPT_54_PRO_COST,
};
} else if (lower === OPENAI_CODEX_GPT_54_MINI_MODEL_ID) {
templateIds = OPENAI_CODEX_GPT_54_MINI_TEMPLATE_MODEL_IDS;
patch = {
@@ -306,9 +328,13 @@ export function buildOpenAICodexProviderPlugin(): ProviderPlugin {
supportsXHighThinking: ({ modelId }) =>
matchesExactOrPrefix(modelId, OPENAI_CODEX_XHIGH_MODEL_IDS),
isModernModelRef: ({ modelId }) => matchesExactOrPrefix(modelId, OPENAI_CODEX_MODERN_MODEL_IDS),
preferRuntimeResolvedModel: (ctx) =>
normalizeProviderId(ctx.provider) === PROVIDER_ID &&
ctx.modelId.trim().toLowerCase() === OPENAI_CODEX_GPT_54_MODEL_ID,
preferRuntimeResolvedModel: (ctx) => {
if (normalizeProviderId(ctx.provider) !== PROVIDER_ID) {
return false;
}
const id = ctx.modelId.trim().toLowerCase();
return id === OPENAI_CODEX_GPT_54_MODEL_ID || id === OPENAI_CODEX_GPT_54_PRO_MODEL_ID;
},
buildReplayPolicy: buildOpenAIReplayPolicy,
prepareExtraParams: (ctx) => {
const transport = ctx.extraParams?.transport;
@@ -338,7 +364,7 @@ export function buildOpenAICodexProviderPlugin(): ProviderPlugin {
const gpt54Template = findCatalogTemplate({
entries: ctx.entries,
providerId: PROVIDER_ID,
templateIds: OPENAI_CODEX_GPT_54_TEMPLATE_MODEL_IDS,
templateIds: OPENAI_CODEX_GPT_54_CATALOG_SYNTH_TEMPLATE_MODEL_IDS,
});
const gpt54MiniTemplate = findCatalogTemplate({
entries: ctx.entries,
@@ -357,12 +383,22 @@ export function buildOpenAICodexProviderPlugin(): ProviderPlugin {
input: ["text", "image"],
contextWindow: OPENAI_CODEX_GPT_54_NATIVE_CONTEXT_TOKENS,
contextTokens: OPENAI_CODEX_GPT_54_DEFAULT_CONTEXT_TOKENS,
cost: OPENAI_CODEX_GPT_54_COST,
}),
buildOpenAISyntheticCatalogEntry(gpt54Template, {
id: OPENAI_CODEX_GPT_54_PRO_MODEL_ID,
reasoning: true,
input: ["text", "image"],
contextWindow: OPENAI_CODEX_GPT_54_NATIVE_CONTEXT_TOKENS,
contextTokens: OPENAI_CODEX_GPT_54_DEFAULT_CONTEXT_TOKENS,
cost: OPENAI_CODEX_GPT_54_PRO_COST,
}),
buildOpenAISyntheticCatalogEntry(gpt54MiniTemplate, {
id: OPENAI_CODEX_GPT_54_MINI_MODEL_ID,
reasoning: true,
input: ["text", "image"],
contextWindow: OPENAI_CODEX_GPT_54_MINI_CONTEXT_TOKENS,
cost: OPENAI_CODEX_GPT_54_MINI_COST,
}),
buildOpenAISyntheticCatalogEntry(sparkTemplate, {
id: OPENAI_CODEX_GPT_53_SPARK_MODEL_ID,

View File

@@ -6,6 +6,13 @@ import {
} from "openclaw/plugin-sdk/provider-model-shared";
import { normalizeOptionalString } from "openclaw/plugin-sdk/text-runtime";
type SyntheticOpenAIModelCatalogCost = {
input: number;
output: number;
cacheRead: number;
cacheWrite: number;
};
type SyntheticOpenAIModelCatalogEntry = {
provider: string;
id: string;
@@ -14,6 +21,7 @@ type SyntheticOpenAIModelCatalogEntry = {
input?: ("text" | "image")[];
contextWindow?: number;
contextTokens?: number;
cost?: SyntheticOpenAIModelCatalogCost;
};
export const OPENAI_API_BASE_URL = "https://api.openai.com/v1";
@@ -50,6 +58,7 @@ export function buildOpenAISyntheticCatalogEntry(
input: readonly ("text" | "image")[];
contextWindow: number;
contextTokens?: number;
cost?: SyntheticOpenAIModelCatalogCost;
},
): SyntheticOpenAIModelCatalogEntry | undefined {
if (!template) {
@@ -63,6 +72,7 @@ export function buildOpenAISyntheticCatalogEntry(
input: [...entry.input],
contextWindow: entry.contextWindow,
...(entry.contextTokens === undefined ? {} : { contextTokens: entry.contextTokens }),
...(entry.cost === undefined ? {} : { cost: entry.cost }),
};
}

View File

@@ -401,7 +401,8 @@ describe("isModernModelRef", () => {
provider === "openai" &&
["gpt-5.4", "gpt-5.4-pro", "gpt-5.4-mini", "gpt-5.4-nano"].includes(context.modelId)
? true
: provider === "openai-codex" && ["gpt-5.4", "gpt-5.4-mini"].includes(context.modelId)
: provider === "openai-codex" &&
["gpt-5.4", "gpt-5.4-pro", "gpt-5.4-mini"].includes(context.modelId)
? true
: provider === "opencode" && ["claude-opus-4-6", "gemini-3-pro"].includes(context.modelId)
? true
@@ -415,6 +416,7 @@ describe("isModernModelRef", () => {
expect(isModernModelRef({ provider: "openai", id: "gpt-5.4-mini" })).toBe(true);
expect(isModernModelRef({ provider: "openai", id: "gpt-5.4-nano" })).toBe(true);
expect(isModernModelRef({ provider: "openai-codex", id: "gpt-5.4" })).toBe(true);
expect(isModernModelRef({ provider: "openai-codex", id: "gpt-5.4-pro" })).toBe(true);
expect(isModernModelRef({ provider: "openai-codex", id: "gpt-5.4-mini" })).toBe(true);
expect(isModernModelRef({ provider: "opencode", id: "claude-opus-4-6" })).toBe(true);
expect(isModernModelRef({ provider: "opencode", id: "gemini-3-pro" })).toBe(true);

View File

@@ -184,17 +184,17 @@ function buildDynamicModel(
case "openai-codex": {
const isLegacyGpt54Alias = lower === "gpt-5.4-codex";
const template =
lower === "gpt-5.4" || isLegacyGpt54Alias
? findTemplate(params, "openai-codex", ["gpt-5.4", "gpt-5.4"])
lower === "gpt-5.4" || isLegacyGpt54Alias || lower === "gpt-5.4-pro"
? findTemplate(params, "openai-codex", ["gpt-5.4", "gpt-5.3-codex", "gpt-5.2-codex"])
: lower === "gpt-5.4-mini"
? findTemplate(params, "openai-codex", [
"gpt-5.4",
"gpt-5.1-codex-mini",
"gpt-5.3-codex",
"gpt-5.4",
"gpt-5.2-codex",
])
: lower === "gpt-5.3-codex-spark"
? findTemplate(params, "openai-codex", ["gpt-5.4", "gpt-5.4"])
? findTemplate(params, "openai-codex", ["gpt-5.4", "gpt-5.3-codex", "gpt-5.2-codex"])
: findTemplate(params, "openai-codex", ["gpt-5.4"]);
const fallback = {
provider: "openai-codex",
@@ -222,6 +222,22 @@ function buildDynamicModel(
fallback,
);
}
if (lower === "gpt-5.4-pro") {
return cloneTemplate(
template,
modelId,
{
provider: "openai-codex",
api: "openai-codex-responses",
baseUrl: OPENAI_CODEX_BASE_URL,
cost: { input: 30, output: 180, cacheRead: 0, cacheWrite: 0 },
contextWindow: 1_050_000,
contextTokens: 272_000,
maxTokens: 128_000,
},
fallback,
);
}
if (lower === "gpt-5.4-mini") {
return cloneTemplate(
template,

View File

@@ -39,6 +39,7 @@ export const DEFAULT_TEST_MODEL_CATALOG: Array<{
{ id: "gpt-5.4-mini", name: "GPT-5.4 Mini", provider: "openai" },
{ id: "gpt-5.4-nano", name: "GPT-5.4 Nano", provider: "openai" },
{ id: "gpt-5.4", name: "GPT-5.4 (Codex)", provider: "openai-codex" },
{ id: "gpt-5.4-pro", name: "GPT-5.4 Pro (Codex)", provider: "openai-codex" },
{ id: "gpt-5.4-mini", name: "GPT-5.4 Mini (Codex)", provider: "openai-codex" },
{ id: "gpt-4.1-mini", name: "GPT-4.1 Mini", provider: "openai" },
];
@@ -55,6 +56,7 @@ const OPENAI_XHIGH_MODEL_IDS = [
const OPENAI_CODEX_XHIGH_MODEL_IDS = [
"gpt-5.4",
"gpt-5.4-pro",
"gpt-5.4-mini",
"gpt-5.3-codex",
"gpt-5.3-codex-spark",

View File

@@ -85,7 +85,7 @@ describe("listThinkingLevels", () => {
providerRuntimeMocks.resolveProviderXHighThinking.mockImplementation(({ provider, context }) =>
(provider === "openai" && ["gpt-5.4", "gpt-5.4", "gpt-5.4-pro"].includes(context.modelId)) ||
(provider === "openai-codex" &&
["gpt-5.4", "gpt-5.4", "gpt-5.3-codex-spark"].includes(context.modelId)) ||
["gpt-5.4", "gpt-5.4-pro", "gpt-5.3-codex-spark"].includes(context.modelId)) ||
(provider === "github-copilot" && ["gpt-5.4", "gpt-5.4"].includes(context.modelId))
? true
: undefined,
@@ -94,6 +94,7 @@ describe("listThinkingLevels", () => {
expect(listThinkingLevels("openai-codex", "gpt-5.4")).toContain("xhigh");
expect(listThinkingLevels("openai-codex", "gpt-5.4")).toContain("xhigh");
expect(listThinkingLevels("openai-codex", "gpt-5.3-codex-spark")).toContain("xhigh");
expect(listThinkingLevels("openai-codex", "gpt-5.4-pro")).toContain("xhigh");
expect(listThinkingLevels("openai", "gpt-5.4")).toContain("xhigh");
expect(listThinkingLevels("openai", "gpt-5.4")).toContain("xhigh");
expect(listThinkingLevels("openai", "gpt-5.4-pro")).toContain("xhigh");

View File

@@ -19,6 +19,12 @@ const OPENAI_CODEX_MINI_MODEL = {
contextWindow: 272_000,
};
const OPENAI_CODEX_PRO_MODEL = {
...OPENAI_CODEX_MODEL,
id: "gpt-5.4-pro",
name: "GPT-5.4 Pro",
};
const OPENAI_CODEX_53_MODEL = {
...OPENAI_CODEX_MODEL,
id: "gpt-5.4",
@@ -234,6 +240,35 @@ describe("modelsListCommand forward-compat", () => {
expect(codexMini?.tags).not.toContain("missing");
});
it("does not mark configured codex gpt-5.4-pro as missing when forward-compat can build a fallback", async () => {
mocks.resolveConfiguredEntries.mockReturnValueOnce({
entries: [
{
key: "openai-codex/gpt-5.4-pro",
ref: { provider: "openai-codex", model: "gpt-5.4-pro" },
tags: new Set(["configured"]),
aliases: [],
},
],
});
mocks.resolveModelWithRegistry.mockReturnValueOnce({ ...OPENAI_CODEX_PRO_MODEL });
const runtime = createRuntime();
await modelsListCommand({ json: true }, runtime as never);
expect(mocks.printModelTable).toHaveBeenCalled();
const rows = lastPrintedRows<{
key: string;
tags: string[];
missing: boolean;
}>();
const codexPro = rows.find((row) => row.key === "openai-codex/gpt-5.4-pro");
expect(codexPro).toBeTruthy();
expect(codexPro?.missing).toBe(false);
expect(codexPro?.tags).not.toContain("missing");
});
it("passes source config to model registry loading for persistence safety", async () => {
const runtime = createRuntime();

View File

@@ -14,6 +14,7 @@ export const expectedAugmentedOpenaiCodexCatalogEntries = [
{ provider: "openai", id: "gpt-5.4-mini", name: "gpt-5.4-mini" },
{ provider: "openai", id: "gpt-5.4-nano", name: "gpt-5.4-nano" },
{ provider: "openai-codex", id: "gpt-5.4", name: "gpt-5.4" },
{ provider: "openai-codex", id: "gpt-5.4-pro", name: "gpt-5.4-pro" },
{ provider: "openai-codex", id: "gpt-5.4-mini", name: "gpt-5.4-mini" },
{
provider: "openai-codex",

View File

@@ -132,6 +132,7 @@ function createOpenAiCatalogProviderPlugin(
{ provider: "openai", id: "gpt-5.4-mini", name: "gpt-5.4-mini" },
{ provider: "openai", id: "gpt-5.4-nano", name: "gpt-5.4-nano" },
{ provider: "openai-codex", id: "gpt-5.4", name: "gpt-5.4" },
{ provider: "openai-codex", id: "gpt-5.4-pro", name: "gpt-5.4-pro" },
{ provider: "openai-codex", id: "gpt-5.4-mini", name: "gpt-5.4-mini" },
{
provider: "openai-codex",