fix(openai): synthesize codex gpt-5.5 oauth model

This commit is contained in:
Peter Steinberger
2026-04-24 01:49:00 +01:00
parent cec3482175
commit 79066f5cab
5 changed files with 73 additions and 2 deletions

View File

@@ -17,6 +17,7 @@ Docs: https://docs.openclaw.ai
### Fixes
- Codex/media understanding: support `codex/*` image models through bounded Codex app-server image turns, while keeping `openai-codex/*` on the OpenAI Codex OAuth route and validating app-server responses against generated protocol contracts. Fixes #70201.
- Providers/OpenAI Codex: synthesize the `openai-codex/gpt-5.5` OAuth model row when Codex catalog discovery omits it, so cron and subagent runs do not fail with `Unknown model` while the account is authenticated.
- Providers/Google: honor the private-network SSRF opt-in for Gemini image generation requests, so trusted proxy setups that resolve Google API hosts to private addresses can use `image_generate`. Fixes #67216.
- Agents/transport: stop embedded runs from lowering the process-wide undici stream timeouts, so slow Gemini image generation and other long-running provider requests no longer inherit short run-attempt headers timeouts. Fixes #70423. Thanks @giangthb.
- Providers/OpenRouter: send image-understanding prompts as user text before image parts, restoring non-empty vision responses for OpenRouter multimodal models. Fixes #70410.

View File

@@ -368,6 +368,27 @@ describe("openai codex provider", () => {
});
});
it("synthesizes gpt-5.5 when the Codex catalog omits the OAuth row", () => {
const provider = buildOpenAICodexProviderPlugin();
const model = provider.resolveDynamicModel?.({
provider: "openai-codex",
modelId: "gpt-5.5",
modelRegistry: createSingleModelRegistry(createCodexTemplate({}), null) as never,
});
expect(model).toMatchObject({
id: "gpt-5.5",
api: "openai-codex-responses",
baseUrl: "https://chatgpt.com/backend-api/codex",
reasoning: true,
input: ["text", "image"],
contextWindow: 1_000_000,
contextTokens: 272_000,
maxTokens: 128_000,
});
});
it("resolves gpt-5.4-pro from a gpt-5.4 runtime template when legacy codex rows are absent", () => {
const provider = buildOpenAICodexProviderPlugin();

View File

@@ -50,6 +50,8 @@ const OPENAI_CODEX_GPT_54_MODEL_ID = "gpt-5.4";
const OPENAI_CODEX_GPT_54_LEGACY_MODEL_ID = "gpt-5.4-codex";
const OPENAI_CODEX_GPT_54_PRO_MODEL_ID = "gpt-5.4-pro";
const OPENAI_CODEX_GPT_54_MINI_MODEL_ID = "gpt-5.4-mini";
const OPENAI_CODEX_GPT_55_NATIVE_CONTEXT_TOKENS = 1_000_000;
const OPENAI_CODEX_GPT_55_DEFAULT_CONTEXT_TOKENS = 272_000;
const OPENAI_CODEX_GPT_55_PRO_NATIVE_CONTEXT_TOKENS = 1_000_000;
const OPENAI_CODEX_GPT_55_PRO_DEFAULT_CONTEXT_TOKENS = 272_000;
const OPENAI_CODEX_GPT_54_NATIVE_CONTEXT_TOKENS = 1_050_000;
@@ -185,7 +187,22 @@ function resolveCodexForwardCompatModel(ctx: ProviderResolveDynamicModelContext)
const model = ctx.modelRegistry.find(PROVIDER_ID, trimmedModelId) as
| ProviderRuntimeModel
| undefined;
return model;
return (
model ??
normalizeModelCompat({
id: trimmedModelId,
name: trimmedModelId,
api: "openai-codex-responses",
provider: PROVIDER_ID,
baseUrl: OPENAI_CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: OPENAI_CODEX_GPT_55_NATIVE_CONTEXT_TOKENS,
contextTokens: OPENAI_CODEX_GPT_55_DEFAULT_CONTEXT_TOKENS,
maxTokens: OPENAI_CODEX_GPT_54_MAX_TOKENS,
} as ProviderRuntimeModel)
);
}
let templateIds: readonly string[];

View File

@@ -233,7 +233,22 @@ function buildDynamicModel(
if (lower === "gpt-5.5") {
return (
(params.modelRegistry.find("openai-codex", modelId) as ResolvedModelLike | null) ??
undefined
cloneTemplate(
undefined,
modelId,
{
provider: "openai-codex",
api: "openai-codex-responses",
baseUrl: OPENAI_CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: OPENROUTER_FALLBACK_COST,
contextWindow: 1_000_000,
contextTokens: 272_000,
maxTokens: 128_000,
},
{},
)
);
}
const template =

View File

@@ -1145,6 +1145,23 @@ describe("resolveModel", () => {
});
});
it("resolves openai-codex gpt-5.5 even when discovery omits the OAuth catalog row", () => {
const result = resolveModelForTest("openai-codex", "gpt-5.5");
expect(result.error).toBeUndefined();
expect(result.model).toMatchObject({
provider: "openai-codex",
id: "gpt-5.5",
api: "openai-codex-responses",
baseUrl: "https://chatgpt.com/backend-api",
reasoning: true,
input: ["text", "image"],
contextWindow: 1_000_000,
contextTokens: 272_000,
maxTokens: 128_000,
});
});
it("preserves unmarked manual openai-codex metadata overrides", () => {
mockDiscoveredModel(discoverModels, {
provider: "openai-codex",