diff --git a/CHANGELOG.md b/CHANGELOG.md index a05934e5fd1..1610b81e2ef 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,6 +48,7 @@ Docs: https://docs.openclaw.ai - Config/runtime snapshots: keep secrets-runtime-resolved config and auth-profile snapshots intact after config writes so follow-up reads still see file-backed secret values while picking up the persisted config update. (#37313) thanks @bbblending. - Gateway/Control UI: resolve bundled dashboard assets through symlinked global wrappers and auto-detected package roots, while keeping configured and custom roots on the strict hardlink boundary. (#40385) Thanks @LarytheLord. - Docs/Changelog: correct the contributor credit for the bundled Control UI global-install fix to @LarytheLord. (#40420) Thanks @velvet-shark. +- Models/openai-codex GPT-5.4 forward-compat: use the GPT-5.4 1,050,000-token context window and 128,000 max tokens for `openai-codex/gpt-5.4` instead of inheriting stale legacy Codex limits in resolver fallbacks and model listing. (#37876) thanks @yuweuii. ## 2026.3.7 diff --git a/src/agents/model-compat.test.ts b/src/agents/model-compat.test.ts index 24361c0a534..3c1894bb390 100644 --- a/src/agents/model-compat.test.ts +++ b/src/agents/model-compat.test.ts @@ -363,7 +363,7 @@ describe("resolveForwardCompatModel", () => { expectResolvedForwardCompat(model, { provider: "openai-codex", id: "gpt-5.4" }); expect(model?.api).toBe("openai-codex-responses"); expect(model?.baseUrl).toBe("https://chatgpt.com/backend-api"); - expect(model?.contextWindow).toBe(272_000); + expect(model?.contextWindow).toBe(1_050_000); expect(model?.maxTokens).toBe(128_000); }); diff --git a/src/agents/model-forward-compat.ts b/src/agents/model-forward-compat.ts index e27260db832..8735193346e 100644 --- a/src/agents/model-forward-compat.ts +++ b/src/agents/model-forward-compat.ts @@ -12,6 +12,8 @@ const OPENAI_GPT_54_TEMPLATE_MODEL_IDS = ["gpt-5.2"] as const; const OPENAI_GPT_54_PRO_TEMPLATE_MODEL_IDS = ["gpt-5.2-pro", "gpt-5.2"] as const; const OPENAI_CODEX_GPT_54_MODEL_ID = "gpt-5.4"; +const OPENAI_CODEX_GPT_54_CONTEXT_TOKENS = 1_050_000; +const OPENAI_CODEX_GPT_54_MAX_TOKENS = 128_000; const OPENAI_CODEX_GPT_54_TEMPLATE_MODEL_IDS = ["gpt-5.3-codex", "gpt-5.2-codex"] as const; const OPENAI_CODEX_GPT_53_MODEL_ID = "gpt-5.3-codex"; const OPENAI_CODEX_TEMPLATE_MODEL_IDS = ["gpt-5.2-codex"] as const; @@ -123,9 +125,14 @@ function resolveOpenAICodexForwardCompatModel( let templateIds: readonly string[]; let eligibleProviders: Set; + let patch: Partial> | undefined; if (lower === OPENAI_CODEX_GPT_54_MODEL_ID) { templateIds = OPENAI_CODEX_GPT_54_TEMPLATE_MODEL_IDS; eligibleProviders = CODEX_GPT54_ELIGIBLE_PROVIDERS; + patch = { + contextWindow: OPENAI_CODEX_GPT_54_CONTEXT_TOKENS, + maxTokens: OPENAI_CODEX_GPT_54_MAX_TOKENS, + }; } else if (lower === OPENAI_CODEX_GPT_53_MODEL_ID) { templateIds = OPENAI_CODEX_TEMPLATE_MODEL_IDS; eligibleProviders = CODEX_GPT53_ELIGIBLE_PROVIDERS; @@ -146,6 +153,7 @@ function resolveOpenAICodexForwardCompatModel( ...template, id: trimmedModelId, name: trimmedModelId, + ...patch, } as Model); } @@ -158,8 +166,8 @@ function resolveOpenAICodexForwardCompatModel( reasoning: true, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, - contextWindow: DEFAULT_CONTEXT_TOKENS, - maxTokens: DEFAULT_CONTEXT_TOKENS, + contextWindow: patch?.contextWindow ?? DEFAULT_CONTEXT_TOKENS, + maxTokens: patch?.maxTokens ?? DEFAULT_CONTEXT_TOKENS, } as Model); } diff --git a/src/agents/pi-embedded-runner/model.test-harness.ts b/src/agents/pi-embedded-runner/model.test-harness.ts index c28210b1921..58d724307de 100644 --- a/src/agents/pi-embedded-runner/model.test-harness.ts +++ b/src/agents/pi-embedded-runner/model.test-harness.ts @@ -36,13 +36,14 @@ export function mockOpenAICodexTemplateModel(): void { export function buildOpenAICodexForwardCompatExpectation( id: string = "gpt-5.3-codex", ): Partial & { provider: string; id: string } { + const isGpt54 = id === "gpt-5.4"; return { provider: "openai-codex", id, api: "openai-codex-responses", baseUrl: "https://chatgpt.com/backend-api", reasoning: true, - contextWindow: 272000, + contextWindow: isGpt54 ? 1_050_000 : 272000, maxTokens: 128000, }; } diff --git a/src/cli/daemon-cli/lifecycle.test.ts b/src/cli/daemon-cli/lifecycle.test.ts index f1e87fc4938..3f0ed6d531c 100644 --- a/src/cli/daemon-cli/lifecycle.test.ts +++ b/src/cli/daemon-cli/lifecycle.test.ts @@ -36,16 +36,17 @@ const renderGatewayPortHealthDiagnostics = vi.fn(() => ["diag: unhealthy port"]) const renderRestartDiagnostics = vi.fn(() => ["diag: unhealthy runtime"]); const resolveGatewayPort = vi.fn(() => 18789); const findGatewayPidsOnPortSync = vi.fn<(port: number) => number[]>(() => []); -const probeGateway = vi.fn< - (opts: { - url: string; - auth?: { token?: string; password?: string }; - timeoutMs: number; - }) => Promise<{ - ok: boolean; - configSnapshot: unknown; - }> ->(); +const probeGateway = + vi.fn< + (opts: { + url: string; + auth?: { token?: string; password?: string }; + timeoutMs: number; + }) => Promise<{ + ok: boolean; + configSnapshot: unknown; + }> + >(); const isRestartEnabled = vi.fn<(config?: { commands?: unknown }) => boolean>(() => true); const loadConfig = vi.fn(() => ({})); diff --git a/src/commands/models/list.list-command.forward-compat.test.ts b/src/commands/models/list.list-command.forward-compat.test.ts index 9dd7f55f797..eafe6a1cb01 100644 --- a/src/commands/models/list.list-command.forward-compat.test.ts +++ b/src/commands/models/list.list-command.forward-compat.test.ts @@ -7,7 +7,7 @@ const OPENAI_CODEX_MODEL = { api: "openai-codex-responses", baseUrl: "https://chatgpt.com/backend-api", input: ["text"], - contextWindow: 272000, + contextWindow: 1_050_000, maxTokens: 128000, cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, };