fix(openai-codex): honor providerConfig.baseUrl in dynamic-model synthesis fallback (#76428)

* fix(openai-codex): honor providerConfig.baseUrl in dynamic-model synthesis fallback

The synthesis fallback in resolveCodexForwardCompatModel hardcoded
OPENAI_CODEX_BASE_URL when the model registry had no template row to
clone, which meant openai-codex providers configured with a custom
baseUrl (e.g. a local proxy that forwards Codex traffic) silently
fell back to api.openai.com / chatgpt.com - bypassing the proxy and
typically failing the auth contract.

Synthesis now reads ctx.providerConfig.baseUrl when present, with the
existing OPENAI_CODEX_BASE_URL constant as the fallback. No effect on
template-clone or registry-find paths, which already inherit the
configured baseUrl through the cloned template.

* docs(changelog): add Unreleased Fixes entry for #76428 codex synthesis baseUrl honor
This commit is contained in:
Arnab Saha
2026-05-02 22:52:29 -07:00
committed by GitHub
parent 73a95d3af4
commit b83b2e3f1c
3 changed files with 41 additions and 2 deletions

View File

@@ -68,6 +68,7 @@ Docs: https://docs.openclaw.ai
- Plugins/externalization: keep official external install docs, update examples, and live Codex npm checks on default npm tags instead of `@beta`. Thanks @vincentkoc.
- Plugins/externalization: keep ACPX, Google Chat, and LINE publishable plugin dist trees out of the core npm package file list.
- Plugins/ClawHub: fall back to version metadata when the artifact resolver route is missing and keep the Docker ClawHub fixture aligned with npm-pack artifact resolution, avoiding false version-not-found failures during plugin install validation. Thanks @vincentkoc.
- Providers/openai-codex: honor `providerConfig.baseUrl` in the dynamic-model synthesis fallback so codex providers configured with a custom upstream (for example a forwarding proxy) no longer silently bypass the configured URL when the registry has no template row to clone for the requested model id. (#76428) Thanks @arniesaha.
- Status/channels: show configured channels in `openclaw status` and config-only `openclaw channels status` output even when the Gateway is unreachable, avoiding empty Channels tables on WSL and other no-Gateway paths. Thanks @vincentkoc.
- Plugins/ClawHub: explain unavailable explicit ClawHub ClawPack artifact downloads with a temporary npm install hint while ClawHub artifact routing rolls out. Thanks @vincentkoc.
- Media: accept home-relative `MEDIA:~/...` attachment paths while preserving existing file-read policy, traversal checks, and media type validation. Fixes #73796. Thanks @fabkury.

View File

@@ -405,6 +405,43 @@ describe("openai codex provider", () => {
});
});
it("honors providerConfig.baseUrl in the gpt-5.5 synthesis fallback", () => {
const provider = buildOpenAICodexProviderPlugin();
const model = provider.resolveDynamicModel?.({
provider: "openai-codex",
modelId: "gpt-5.5",
modelRegistry: createSingleModelRegistry(createCodexTemplate({}), null) as never,
providerConfig: { baseUrl: "http://proxy.local:30400" },
});
expect(model).toMatchObject({
id: "gpt-5.5",
api: "openai-codex-responses",
baseUrl: "http://proxy.local:30400",
});
});
it("honors providerConfig.baseUrl in the gpt-5.4 synthesis fallback", () => {
const provider = buildOpenAICodexProviderPlugin();
const emptyRegistry = { find: () => null };
const model = provider.resolveDynamicModel?.({
provider: "openai-codex",
modelId: "gpt-5.4",
modelRegistry: emptyRegistry as never,
providerConfig: { baseUrl: "http://proxy.local:30400" },
});
expect(model).toMatchObject({
id: "gpt-5.4",
api: "openai-codex-responses",
baseUrl: "http://proxy.local:30400",
contextWindow: 1_050_000,
maxTokens: 128_000,
});
});
it("resolves gpt-5.4-pro from a gpt-5.4 runtime template when legacy codex rows are absent", () => {
const provider = buildOpenAICodexProviderPlugin();

View File

@@ -172,6 +172,7 @@ function normalizeCodexTransport(model: ProviderRuntimeModel): ProviderRuntimeMo
function resolveCodexForwardCompatModel(ctx: ProviderResolveDynamicModelContext) {
const trimmedModelId = ctx.modelId.trim();
const lower = normalizeLowercaseStringOrEmpty(trimmedModelId);
const synthBaseUrl = ctx.providerConfig?.baseUrl ?? OPENAI_CODEX_BASE_URL;
if (lower === OPENAI_CODEX_GPT_55_MODEL_ID) {
const model = ctx.modelRegistry.find(PROVIDER_ID, trimmedModelId) as
@@ -188,7 +189,7 @@ function resolveCodexForwardCompatModel(ctx: ProviderResolveDynamicModelContext)
name: trimmedModelId,
api: "openai-codex-responses",
provider: PROVIDER_ID,
baseUrl: OPENAI_CODEX_BASE_URL,
baseUrl: synthBaseUrl,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
@@ -264,7 +265,7 @@ function resolveCodexForwardCompatModel(ctx: ProviderResolveDynamicModelContext)
: trimmedModelId,
api: "openai-codex-responses",
provider: PROVIDER_ID,
baseUrl: OPENAI_CODEX_BASE_URL,
baseUrl: synthBaseUrl,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },