fix(models): use 1M context for openai-codex gpt-5.4 (#37876)

Merged via squash.

Prepared head SHA: c41020779e
Co-authored-by: yuweuii <82372187+yuweuii@users.noreply.github.com>
Co-authored-by: jalehman <550978+jalehman@users.noreply.github.com>
Reviewed-by: @jalehman
This commit is contained in:
yuweuii
2026-03-09 09:23:49 +08:00
committed by GitHub
parent b34158086a
commit f6243916b5
6 changed files with 26 additions and 15 deletions

View File

@@ -48,6 +48,7 @@ Docs: https://docs.openclaw.ai
- Config/runtime snapshots: keep secrets-runtime-resolved config and auth-profile snapshots intact after config writes so follow-up reads still see file-backed secret values while picking up the persisted config update. (#37313) thanks @bbblending.
- Gateway/Control UI: resolve bundled dashboard assets through symlinked global wrappers and auto-detected package roots, while keeping configured and custom roots on the strict hardlink boundary. (#40385) Thanks @LarytheLord.
- Docs/Changelog: correct the contributor credit for the bundled Control UI global-install fix to @LarytheLord. (#40420) Thanks @velvet-shark.
- Models/openai-codex GPT-5.4 forward-compat: use the GPT-5.4 1,050,000-token context window and 128,000 max tokens for `openai-codex/gpt-5.4` instead of inheriting stale legacy Codex limits in resolver fallbacks and model listing. (#37876) thanks @yuweuii.
## 2026.3.7

View File

@@ -363,7 +363,7 @@ describe("resolveForwardCompatModel", () => {
expectResolvedForwardCompat(model, { provider: "openai-codex", id: "gpt-5.4" });
expect(model?.api).toBe("openai-codex-responses");
expect(model?.baseUrl).toBe("https://chatgpt.com/backend-api");
expect(model?.contextWindow).toBe(272_000);
expect(model?.contextWindow).toBe(1_050_000);
expect(model?.maxTokens).toBe(128_000);
});

View File

@@ -12,6 +12,8 @@ const OPENAI_GPT_54_TEMPLATE_MODEL_IDS = ["gpt-5.2"] as const;
const OPENAI_GPT_54_PRO_TEMPLATE_MODEL_IDS = ["gpt-5.2-pro", "gpt-5.2"] as const;
const OPENAI_CODEX_GPT_54_MODEL_ID = "gpt-5.4";
const OPENAI_CODEX_GPT_54_CONTEXT_TOKENS = 1_050_000;
const OPENAI_CODEX_GPT_54_MAX_TOKENS = 128_000;
const OPENAI_CODEX_GPT_54_TEMPLATE_MODEL_IDS = ["gpt-5.3-codex", "gpt-5.2-codex"] as const;
const OPENAI_CODEX_GPT_53_MODEL_ID = "gpt-5.3-codex";
const OPENAI_CODEX_TEMPLATE_MODEL_IDS = ["gpt-5.2-codex"] as const;
@@ -123,9 +125,14 @@ function resolveOpenAICodexForwardCompatModel(
let templateIds: readonly string[];
let eligibleProviders: Set<string>;
let patch: Partial<Model<Api>> | undefined;
if (lower === OPENAI_CODEX_GPT_54_MODEL_ID) {
templateIds = OPENAI_CODEX_GPT_54_TEMPLATE_MODEL_IDS;
eligibleProviders = CODEX_GPT54_ELIGIBLE_PROVIDERS;
patch = {
contextWindow: OPENAI_CODEX_GPT_54_CONTEXT_TOKENS,
maxTokens: OPENAI_CODEX_GPT_54_MAX_TOKENS,
};
} else if (lower === OPENAI_CODEX_GPT_53_MODEL_ID) {
templateIds = OPENAI_CODEX_TEMPLATE_MODEL_IDS;
eligibleProviders = CODEX_GPT53_ELIGIBLE_PROVIDERS;
@@ -146,6 +153,7 @@ function resolveOpenAICodexForwardCompatModel(
...template,
id: trimmedModelId,
name: trimmedModelId,
...patch,
} as Model<Api>);
}
@@ -158,8 +166,8 @@ function resolveOpenAICodexForwardCompatModel(
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: DEFAULT_CONTEXT_TOKENS,
maxTokens: DEFAULT_CONTEXT_TOKENS,
contextWindow: patch?.contextWindow ?? DEFAULT_CONTEXT_TOKENS,
maxTokens: patch?.maxTokens ?? DEFAULT_CONTEXT_TOKENS,
} as Model<Api>);
}

View File

@@ -36,13 +36,14 @@ export function mockOpenAICodexTemplateModel(): void {
export function buildOpenAICodexForwardCompatExpectation(
id: string = "gpt-5.3-codex",
): Partial<typeof OPENAI_CODEX_TEMPLATE_MODEL> & { provider: string; id: string } {
const isGpt54 = id === "gpt-5.4";
return {
provider: "openai-codex",
id,
api: "openai-codex-responses",
baseUrl: "https://chatgpt.com/backend-api",
reasoning: true,
contextWindow: 272000,
contextWindow: isGpt54 ? 1_050_000 : 272000,
maxTokens: 128000,
};
}

View File

@@ -36,16 +36,17 @@ const renderGatewayPortHealthDiagnostics = vi.fn(() => ["diag: unhealthy port"])
const renderRestartDiagnostics = vi.fn(() => ["diag: unhealthy runtime"]);
const resolveGatewayPort = vi.fn(() => 18789);
const findGatewayPidsOnPortSync = vi.fn<(port: number) => number[]>(() => []);
const probeGateway = vi.fn<
(opts: {
url: string;
auth?: { token?: string; password?: string };
timeoutMs: number;
}) => Promise<{
ok: boolean;
configSnapshot: unknown;
}>
>();
const probeGateway =
vi.fn<
(opts: {
url: string;
auth?: { token?: string; password?: string };
timeoutMs: number;
}) => Promise<{
ok: boolean;
configSnapshot: unknown;
}>
>();
const isRestartEnabled = vi.fn<(config?: { commands?: unknown }) => boolean>(() => true);
const loadConfig = vi.fn(() => ({}));

View File

@@ -7,7 +7,7 @@ const OPENAI_CODEX_MODEL = {
api: "openai-codex-responses",
baseUrl: "https://chatgpt.com/backend-api",
input: ["text"],
contextWindow: 272000,
contextWindow: 1_050_000,
maxTokens: 128000,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
};