fix: canonicalize Codex image base URLs

This commit is contained in:
Peter Steinberger
2026-04-25 08:45:33 +01:00
parent 5163a2fbf7
commit 0f4b6f81d9
8 changed files with 89 additions and 10 deletions

View File

@@ -13,6 +13,7 @@ Docs: https://docs.openclaw.ai
### Fixes
- OpenAI/Codex image generation: canonicalize legacy `openai-codex.baseUrl` values such as `https://chatgpt.com/backend-api` to the Codex Responses backend before calling `gpt-image-2`, matching the chat transport. Fixes #71460.
- Telegram/webhook: acknowledge validated webhook updates before running bot middleware, keeping slow agent turns from tripping Telegram delivery retries while preserving per-chat processing lanes. Fixes #71392.
- MCP: retire one-shot embedded bundled MCP runtimes at run end, skip bundle-MCP startup when a runtime tool allowlist cannot reach bundle-MCP tools, and add `mcp.sessionIdleTtlMs` idle eviction for leaked session runtimes. Fixes #71106, #71110, #70389, and #70808.
- MCP/config reload: hot-apply `mcp.*` changes by disposing cached session MCP runtimes, and dispose bundled MCP runtimes during gateway shutdown so removed `mcp.servers` entries reap child processes promptly. Fixes #60656.

View File

@@ -221,10 +221,12 @@ OpenClaw forwards `prompt`, `count`, reference images, and Gemini-compatible `as
OpenAI image generation defaults to `openai/gpt-image-2`. If an
`openai-codex` OAuth profile is configured, OpenClaw reuses the same OAuth
profile used by Codex subscription chat models and sends the image request
through the Codex Responses backend; it does not silently fall back to
`OPENAI_API_KEY` for that request. To force direct OpenAI Images API routing,
configure `models.providers.openai` explicitly with an API key, custom base URL,
or Azure endpoint. The older
through the Codex Responses backend. Legacy Codex base URLs such as
`https://chatgpt.com/backend-api` are canonicalized to
`https://chatgpt.com/backend-api/codex` for image requests. It does not
silently fall back to `OPENAI_API_KEY` for that request. To force direct OpenAI
Images API routing, configure `models.providers.openai` explicitly with an API
key, custom base URL, or Azure endpoint. The older
`openai/gpt-image-1` model can still be selected explicitly, but new OpenAI
image-generation and image-editing requests should use `gpt-image-2`.

View File

@@ -1,5 +1,10 @@
import { describe, expect, it } from "vitest";
import { isOpenAIApiBaseUrl, isOpenAICodexBaseUrl } from "./base-url.js";
import {
canonicalizeCodexResponsesBaseUrl,
isOpenAIApiBaseUrl,
isOpenAICodexBaseUrl,
OPENAI_CODEX_RESPONSES_BASE_URL,
} from "./base-url.js";
describe("openai base URL helpers", () => {
it("recognizes direct OpenAI API routes", () => {
@@ -36,4 +41,20 @@ describe("openai base URL helpers", () => {
expect(isOpenAICodexBaseUrl("https://chatgpt.com/backend-api/codex/v2")).toBe(false);
expect(isOpenAICodexBaseUrl(undefined)).toBe(false);
});
it("canonicalizes legacy Codex Responses base URLs", () => {
expect(canonicalizeCodexResponsesBaseUrl("https://chatgpt.com/backend-api")).toBe(
OPENAI_CODEX_RESPONSES_BASE_URL,
);
expect(canonicalizeCodexResponsesBaseUrl("https://chatgpt.com/backend-api/v1")).toBe(
OPENAI_CODEX_RESPONSES_BASE_URL,
);
expect(canonicalizeCodexResponsesBaseUrl("https://chatgpt.com/backend-api/codex/v1")).toBe(
OPENAI_CODEX_RESPONSES_BASE_URL,
);
expect(canonicalizeCodexResponsesBaseUrl("https://proxy.example.com/v1")).toBe(
"https://proxy.example.com/v1",
);
expect(canonicalizeCodexResponsesBaseUrl(undefined)).toBeUndefined();
});
});

View File

@@ -1,5 +1,7 @@
import { normalizeOptionalString } from "openclaw/plugin-sdk/text-runtime";
export const OPENAI_CODEX_RESPONSES_BASE_URL = "https://chatgpt.com/backend-api/codex";
export function isOpenAIApiBaseUrl(baseUrl?: string): boolean {
const trimmed = normalizeOptionalString(baseUrl);
if (!trimmed) {
@@ -15,3 +17,7 @@ export function isOpenAICodexBaseUrl(baseUrl?: string): boolean {
}
return /^https?:\/\/chatgpt\.com\/backend-api(?:\/codex)?(?:\/v1)?\/?$/i.test(trimmed);
}
export function canonicalizeCodexResponsesBaseUrl(baseUrl?: string): string | undefined {
return isOpenAICodexBaseUrl(baseUrl) ? OPENAI_CODEX_RESPONSES_BASE_URL : baseUrl;
}

View File

@@ -921,6 +921,49 @@ describe("openai image generation provider", () => {
expect(result.images[0]?.buffer).toEqual(Buffer.from("codex-image"));
});
it.each([
"https://chatgpt.com/backend-api",
"https://chatgpt.com/backend-api/",
"https://chatgpt.com/backend-api/v1",
"https://chatgpt.com/backend-api/codex/v1",
])("canonicalizes configured Codex OAuth image baseUrl %s", async (configuredBaseUrl) => {
mockCodexAuthOnly();
mockCodexImageStream({ imageData: "codex-image" });
const provider = buildOpenAIImageGenerationProvider();
await provider.generateImage({
provider: "openai",
model: "gpt-image-2",
prompt: "Draw through a legacy configured Codex endpoint",
cfg: {
models: {
providers: {
"openai-codex": {
baseUrl: configuredBaseUrl,
api: "openai-codex-responses",
models: [],
},
},
},
},
authStore: createCodexOAuthAuthStore(),
});
expect(resolveProviderHttpRequestConfigMock).toHaveBeenCalledWith(
expect.objectContaining({
baseUrl: "https://chatgpt.com/backend-api/codex",
provider: "openai-codex",
api: "openai-codex-responses",
capability: "image",
}),
);
expect(postJsonRequestMock).toHaveBeenCalledWith(
expect.objectContaining({
url: "https://chatgpt.com/backend-api/codex/responses",
}),
);
});
it("uses direct OpenAI auth when custom OpenAI image config is explicit", async () => {
mockGeneratedPngResponse();
resolveApiKeyForProviderMock.mockImplementation(async (params?: { provider?: string }) => {

View File

@@ -22,11 +22,12 @@ import {
sanitizeConfiguredModelProviderRequest,
} from "openclaw/plugin-sdk/provider-http";
import { isPrivateNetworkOptInEnabled } from "openclaw/plugin-sdk/ssrf-runtime";
import { canonicalizeCodexResponsesBaseUrl, OPENAI_CODEX_RESPONSES_BASE_URL } from "./base-url.js";
import { OPENAI_DEFAULT_IMAGE_MODEL as DEFAULT_OPENAI_IMAGE_MODEL } from "./default-models.js";
import { resolveConfiguredOpenAIBaseUrl } from "./shared.js";
const DEFAULT_OPENAI_IMAGE_BASE_URL = "https://api.openai.com/v1";
const DEFAULT_OPENAI_CODEX_IMAGE_BASE_URL = "https://chatgpt.com/backend-api/codex";
const DEFAULT_OPENAI_CODEX_IMAGE_BASE_URL = OPENAI_CODEX_RESPONSES_BASE_URL;
const OPENAI_CODEX_IMAGE_INSTRUCTIONS = "You are an image generation assistant.";
const DEFAULT_OPENAI_IMAGE_TIMEOUT_MS = 180_000;
const DEFAULT_OUTPUT_MIME = "image/png";
@@ -534,7 +535,7 @@ async function generateOpenAICodexImage(params: {
const codexProviderConfig = req.cfg?.models?.providers?.["openai-codex"];
const { baseUrl, allowPrivateNetwork, headers, dispatcherPolicy } =
resolveProviderHttpRequestConfig({
baseUrl: codexProviderConfig?.baseUrl,
baseUrl: canonicalizeCodexResponsesBaseUrl(codexProviderConfig?.baseUrl),
defaultBaseUrl: DEFAULT_OPENAI_CODEX_IMAGE_BASE_URL,
defaultHeaders: {
Authorization: `Bearer ${apiKey}`,

View File

@@ -1,6 +1,7 @@
import type { ModelProviderConfig } from "openclaw/plugin-sdk/provider-model-shared";
import { OPENAI_CODEX_RESPONSES_BASE_URL } from "./base-url.js";
export const OPENAI_CODEX_BASE_URL = "https://chatgpt.com/backend-api/codex";
export const OPENAI_CODEX_BASE_URL = OPENAI_CODEX_RESPONSES_BASE_URL;
export function buildOpenAICodexProvider(): ModelProviderConfig {
return {

View File

@@ -27,7 +27,11 @@ import {
OPENAI_CODEX_LOGIN_LABEL,
OPENAI_CODEX_WIZARD_GROUP,
} from "./auth-choice-copy.js";
import { isOpenAIApiBaseUrl, isOpenAICodexBaseUrl } from "./base-url.js";
import {
isOpenAIApiBaseUrl,
isOpenAICodexBaseUrl,
OPENAI_CODEX_RESPONSES_BASE_URL,
} from "./base-url.js";
import { OPENAI_CODEX_DEFAULT_MODEL } from "./default-models.js";
import { resolveCodexAuthIdentity } from "./openai-codex-auth-identity.js";
import { buildOpenAICodexProvider } from "./openai-codex-catalog.js";
@@ -41,7 +45,7 @@ import {
} from "./shared.js";
const PROVIDER_ID = "openai-codex";
const OPENAI_CODEX_BASE_URL = "https://chatgpt.com/backend-api/codex";
const OPENAI_CODEX_BASE_URL = OPENAI_CODEX_RESPONSES_BASE_URL;
const OPENAI_CODEX_LOGIN_ASSISTANT_PRIORITY = -30;
const OPENAI_CODEX_DEVICE_PAIRING_ASSISTANT_PRIORITY = -10;
const OPENAI_CODEX_GPT_55_MODEL_ID = "gpt-5.5";