From 79066f5cab855963e99579a7a7a6a5b5b74a9b38 Mon Sep 17 00:00:00 2001 From: Peter Steinberger Date: Fri, 24 Apr 2026 01:49:00 +0100 Subject: [PATCH] fix(openai): synthesize codex gpt-5.5 oauth model --- CHANGELOG.md | 1 + .../openai/openai-codex-provider.test.ts | 21 +++++++++++++++++++ extensions/openai/openai-codex-provider.ts | 19 ++++++++++++++++- .../model.provider-runtime.test-support.ts | 17 ++++++++++++++- src/agents/pi-embedded-runner/model.test.ts | 17 +++++++++++++++ 5 files changed, 73 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6acb4a485a4..b12c5b57b2c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ Docs: https://docs.openclaw.ai ### Fixes - Codex/media understanding: support `codex/*` image models through bounded Codex app-server image turns, while keeping `openai-codex/*` on the OpenAI Codex OAuth route and validating app-server responses against generated protocol contracts. Fixes #70201. +- Providers/OpenAI Codex: synthesize the `openai-codex/gpt-5.5` OAuth model row when Codex catalog discovery omits it, so cron and subagent runs do not fail with `Unknown model` while the account is authenticated. - Providers/Google: honor the private-network SSRF opt-in for Gemini image generation requests, so trusted proxy setups that resolve Google API hosts to private addresses can use `image_generate`. Fixes #67216. - Agents/transport: stop embedded runs from lowering the process-wide undici stream timeouts, so slow Gemini image generation and other long-running provider requests no longer inherit short run-attempt headers timeouts. Fixes #70423. Thanks @giangthb. - Providers/OpenRouter: send image-understanding prompts as user text before image parts, restoring non-empty vision responses for OpenRouter multimodal models. Fixes #70410. diff --git a/extensions/openai/openai-codex-provider.test.ts b/extensions/openai/openai-codex-provider.test.ts index 212f5cb6e48..0ffac736c59 100644 --- a/extensions/openai/openai-codex-provider.test.ts +++ b/extensions/openai/openai-codex-provider.test.ts @@ -368,6 +368,27 @@ describe("openai codex provider", () => { }); }); + it("synthesizes gpt-5.5 when the Codex catalog omits the OAuth row", () => { + const provider = buildOpenAICodexProviderPlugin(); + + const model = provider.resolveDynamicModel?.({ + provider: "openai-codex", + modelId: "gpt-5.5", + modelRegistry: createSingleModelRegistry(createCodexTemplate({}), null) as never, + }); + + expect(model).toMatchObject({ + id: "gpt-5.5", + api: "openai-codex-responses", + baseUrl: "https://chatgpt.com/backend-api/codex", + reasoning: true, + input: ["text", "image"], + contextWindow: 1_000_000, + contextTokens: 272_000, + maxTokens: 128_000, + }); + }); + it("resolves gpt-5.4-pro from a gpt-5.4 runtime template when legacy codex rows are absent", () => { const provider = buildOpenAICodexProviderPlugin(); diff --git a/extensions/openai/openai-codex-provider.ts b/extensions/openai/openai-codex-provider.ts index 424578ccd98..53b0ad417de 100644 --- a/extensions/openai/openai-codex-provider.ts +++ b/extensions/openai/openai-codex-provider.ts @@ -50,6 +50,8 @@ const OPENAI_CODEX_GPT_54_MODEL_ID = "gpt-5.4"; const OPENAI_CODEX_GPT_54_LEGACY_MODEL_ID = "gpt-5.4-codex"; const OPENAI_CODEX_GPT_54_PRO_MODEL_ID = "gpt-5.4-pro"; const OPENAI_CODEX_GPT_54_MINI_MODEL_ID = "gpt-5.4-mini"; +const OPENAI_CODEX_GPT_55_NATIVE_CONTEXT_TOKENS = 1_000_000; +const OPENAI_CODEX_GPT_55_DEFAULT_CONTEXT_TOKENS = 272_000; const OPENAI_CODEX_GPT_55_PRO_NATIVE_CONTEXT_TOKENS = 1_000_000; const OPENAI_CODEX_GPT_55_PRO_DEFAULT_CONTEXT_TOKENS = 272_000; const OPENAI_CODEX_GPT_54_NATIVE_CONTEXT_TOKENS = 1_050_000; @@ -185,7 +187,22 @@ function resolveCodexForwardCompatModel(ctx: ProviderResolveDynamicModelContext) const model = ctx.modelRegistry.find(PROVIDER_ID, trimmedModelId) as | ProviderRuntimeModel | undefined; - return model; + return ( + model ?? + normalizeModelCompat({ + id: trimmedModelId, + name: trimmedModelId, + api: "openai-codex-responses", + provider: PROVIDER_ID, + baseUrl: OPENAI_CODEX_BASE_URL, + reasoning: true, + input: ["text", "image"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: OPENAI_CODEX_GPT_55_NATIVE_CONTEXT_TOKENS, + contextTokens: OPENAI_CODEX_GPT_55_DEFAULT_CONTEXT_TOKENS, + maxTokens: OPENAI_CODEX_GPT_54_MAX_TOKENS, + } as ProviderRuntimeModel) + ); } let templateIds: readonly string[]; diff --git a/src/agents/pi-embedded-runner/model.provider-runtime.test-support.ts b/src/agents/pi-embedded-runner/model.provider-runtime.test-support.ts index 982e0c08384..4e3413856ae 100644 --- a/src/agents/pi-embedded-runner/model.provider-runtime.test-support.ts +++ b/src/agents/pi-embedded-runner/model.provider-runtime.test-support.ts @@ -233,7 +233,22 @@ function buildDynamicModel( if (lower === "gpt-5.5") { return ( (params.modelRegistry.find("openai-codex", modelId) as ResolvedModelLike | null) ?? - undefined + cloneTemplate( + undefined, + modelId, + { + provider: "openai-codex", + api: "openai-codex-responses", + baseUrl: OPENAI_CODEX_BASE_URL, + reasoning: true, + input: ["text", "image"], + cost: OPENROUTER_FALLBACK_COST, + contextWindow: 1_000_000, + contextTokens: 272_000, + maxTokens: 128_000, + }, + {}, + ) ); } const template = diff --git a/src/agents/pi-embedded-runner/model.test.ts b/src/agents/pi-embedded-runner/model.test.ts index 02ddda4c506..0e18e02615e 100644 --- a/src/agents/pi-embedded-runner/model.test.ts +++ b/src/agents/pi-embedded-runner/model.test.ts @@ -1145,6 +1145,23 @@ describe("resolveModel", () => { }); }); + it("resolves openai-codex gpt-5.5 even when discovery omits the OAuth catalog row", () => { + const result = resolveModelForTest("openai-codex", "gpt-5.5"); + + expect(result.error).toBeUndefined(); + expect(result.model).toMatchObject({ + provider: "openai-codex", + id: "gpt-5.5", + api: "openai-codex-responses", + baseUrl: "https://chatgpt.com/backend-api", + reasoning: true, + input: ["text", "image"], + contextWindow: 1_000_000, + contextTokens: 272_000, + maxTokens: 128_000, + }); + }); + it("preserves unmarked manual openai-codex metadata overrides", () => { mockDiscoveredModel(discoverModels, { provider: "openai-codex",