mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-06 12:20:44 +00:00
fix(codex): canonicalize the gpt-5.4-codex alias (#66438)
* fix(codex): canonicalize the gpt-5.4-codex alias * Update CHANGELOG.md
This commit is contained in:
@@ -19,6 +19,7 @@ Docs: https://docs.openclaw.ai
|
||||
- Doctor/plugins: cache external `preferOver` catalog lookups within each plugin auto-enable pass so large `agents.list` configs no longer peg CPU and repeatedly reread plugin catalogs during doctor/plugins resolution. (#66246) Thanks @yfge.
|
||||
- Agents/local models: clarify low-context preflight hints for self-hosted models, point config-backed caps at the relevant OpenClaw setting, and stop suggesting larger models when `agents.defaults.contextTokens` is the real limit. (#66236) Thanks @ImLukeF.
|
||||
- Browser/SSRF: restore hostname navigation under the default browser SSRF policy while keeping explicit strict mode reachable from config, and keep managed loopback CDP `/json/new` fallback requests on the local CDP control policy so browser follow-up fixes stop regressing normal navigation or self-blocking local CDP control. (#66386) Thanks @obviyus.
|
||||
- Models/Codex: canonicalize the legacy `openai-codex/gpt-5.4-codex` runtime alias to `openai-codex/gpt-5.4` while still honoring alias-specific and canonical per-model overrides. (#43060) Thanks @Sapientropic and @vincentkoc.
|
||||
- Browser/SSRF: preserve explicit strict browser navigation mode for legacy `browser.ssrfPolicy.allowPrivateNetwork: false` configs by normalizing the legacy alias to the canonical strict marker instead of silently widening those installs to the default non-strict hostname-navigation path.
|
||||
- Agents/subagents: emit the subagent registry lazy-runtime stub on the stable dist path that both source and bundled runtime imports resolve, so the follow-up dist fix no longer still fails with `ERR_MODULE_NOT_FOUND` at runtime. (#66420) Thanks @obviyus.
|
||||
- Browser: keep loopback CDP readiness checks reachable under strict SSRF defaults so OpenClaw can reconnect to locally started managed Chrome. (#66354) Thanks @hxy91819.
|
||||
|
||||
@@ -134,6 +134,42 @@ describe("openai codex provider", () => {
|
||||
});
|
||||
});
|
||||
|
||||
it("resolves the legacy gpt-5.4-codex alias to canonical gpt-5.4", () => {
|
||||
const provider = buildOpenAICodexProviderPlugin();
|
||||
|
||||
const model = provider.resolveDynamicModel?.({
|
||||
provider: "openai-codex",
|
||||
modelId: "gpt-5.4-codex",
|
||||
modelRegistry: {
|
||||
find: (providerId: string, modelId: string) => {
|
||||
if (providerId === "openai-codex" && modelId === "gpt-5.3-codex") {
|
||||
return {
|
||||
id: "gpt-5.3-codex",
|
||||
name: "gpt-5.3-codex",
|
||||
provider: "openai-codex",
|
||||
api: "openai-codex-responses",
|
||||
baseUrl: "https://chatgpt.com/backend-api",
|
||||
reasoning: true,
|
||||
input: ["text", "image"] as const,
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 272_000,
|
||||
maxTokens: 128_000,
|
||||
};
|
||||
}
|
||||
return undefined;
|
||||
},
|
||||
} as never,
|
||||
});
|
||||
|
||||
expect(model).toMatchObject({
|
||||
id: "gpt-5.4",
|
||||
name: "gpt-5.4",
|
||||
contextWindow: 1_050_000,
|
||||
contextTokens: 272_000,
|
||||
maxTokens: 128_000,
|
||||
});
|
||||
});
|
||||
|
||||
it("resolves gpt-5.4-mini from codex templates with codex-sized limits", () => {
|
||||
const provider = buildOpenAICodexProviderPlugin();
|
||||
|
||||
@@ -201,4 +237,30 @@ describe("openai codex provider", () => {
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it("canonicalizes legacy gpt-5.4-codex models during resolved-model normalization", () => {
|
||||
const provider = buildOpenAICodexProviderPlugin();
|
||||
|
||||
const model = provider.normalizeResolvedModel?.({
|
||||
provider: "openai-codex",
|
||||
model: {
|
||||
id: "gpt-5.4-codex",
|
||||
name: "gpt-5.4-codex",
|
||||
provider: "openai-codex",
|
||||
api: "openai-codex-responses",
|
||||
baseUrl: "https://chatgpt.com/backend-api",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 1_050_000,
|
||||
contextTokens: 272_000,
|
||||
maxTokens: 128_000,
|
||||
},
|
||||
} as never);
|
||||
|
||||
expect(model).toMatchObject({
|
||||
id: "gpt-5.4",
|
||||
name: "gpt-5.4",
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -41,6 +41,7 @@ import {
|
||||
const PROVIDER_ID = "openai-codex";
|
||||
const OPENAI_CODEX_BASE_URL = "https://chatgpt.com/backend-api";
|
||||
const OPENAI_CODEX_GPT_54_MODEL_ID = "gpt-5.4";
|
||||
const OPENAI_CODEX_GPT_54_LEGACY_MODEL_ID = "gpt-5.4-codex";
|
||||
const OPENAI_CODEX_GPT_54_MINI_MODEL_ID = "gpt-5.4-mini";
|
||||
const OPENAI_CODEX_GPT_54_NATIVE_CONTEXT_TOKENS = 1_050_000;
|
||||
const OPENAI_CODEX_GPT_54_DEFAULT_CONTEXT_TOKENS = 272_000;
|
||||
@@ -88,6 +89,13 @@ const OPENAI_CODEX_MODERN_MODEL_IDS = [
|
||||
const OPENAI_RESPONSES_STREAM_HOOKS = buildProviderStreamFamilyHooks("openai-responses-defaults");
|
||||
|
||||
function normalizeCodexTransport(model: ProviderRuntimeModel): ProviderRuntimeModel {
|
||||
const lowerModelId = normalizeLowercaseStringOrEmpty(model.id);
|
||||
const canonicalModelId =
|
||||
lowerModelId === OPENAI_CODEX_GPT_54_LEGACY_MODEL_ID ? OPENAI_CODEX_GPT_54_MODEL_ID : model.id;
|
||||
const canonicalName =
|
||||
normalizeLowercaseStringOrEmpty(model.name) === OPENAI_CODEX_GPT_54_LEGACY_MODEL_ID
|
||||
? OPENAI_CODEX_GPT_54_MODEL_ID
|
||||
: model.name;
|
||||
const useCodexTransport =
|
||||
!model.baseUrl || isOpenAIApiBaseUrl(model.baseUrl) || isOpenAICodexBaseUrl(model.baseUrl);
|
||||
const api =
|
||||
@@ -96,25 +104,30 @@ function normalizeCodexTransport(model: ProviderRuntimeModel): ProviderRuntimeMo
|
||||
api === "openai-codex-responses" && (!model.baseUrl || isOpenAIApiBaseUrl(model.baseUrl))
|
||||
? OPENAI_CODEX_BASE_URL
|
||||
: model.baseUrl;
|
||||
if (api === model.api && baseUrl === model.baseUrl) {
|
||||
if (
|
||||
api === model.api &&
|
||||
baseUrl === model.baseUrl &&
|
||||
canonicalModelId === model.id &&
|
||||
canonicalName === model.name
|
||||
) {
|
||||
return model;
|
||||
}
|
||||
return {
|
||||
...model,
|
||||
id: canonicalModelId,
|
||||
name: canonicalName,
|
||||
api,
|
||||
baseUrl,
|
||||
};
|
||||
}
|
||||
|
||||
function resolveCodexForwardCompatModel(
|
||||
ctx: ProviderResolveDynamicModelContext,
|
||||
): ProviderRuntimeModel | undefined {
|
||||
function resolveCodexForwardCompatModel(ctx: ProviderResolveDynamicModelContext) {
|
||||
const trimmedModelId = ctx.modelId.trim();
|
||||
const lower = normalizeLowercaseStringOrEmpty(trimmedModelId);
|
||||
|
||||
let templateIds: readonly string[];
|
||||
let patch: Partial<ProviderRuntimeModel> | undefined;
|
||||
if (lower === OPENAI_CODEX_GPT_54_MODEL_ID) {
|
||||
let patch: Parameters<typeof cloneFirstTemplateModel>[0]["patch"];
|
||||
if (lower === OPENAI_CODEX_GPT_54_MODEL_ID || lower === OPENAI_CODEX_GPT_54_LEGACY_MODEL_ID) {
|
||||
templateIds = OPENAI_CODEX_GPT_54_TEMPLATE_MODEL_IDS;
|
||||
patch = {
|
||||
contextWindow: OPENAI_CODEX_GPT_54_NATIVE_CONTEXT_TOKENS,
|
||||
@@ -150,14 +163,23 @@ function resolveCodexForwardCompatModel(
|
||||
return (
|
||||
cloneFirstTemplateModel({
|
||||
providerId: PROVIDER_ID,
|
||||
modelId: trimmedModelId,
|
||||
modelId:
|
||||
lower === OPENAI_CODEX_GPT_54_LEGACY_MODEL_ID
|
||||
? OPENAI_CODEX_GPT_54_MODEL_ID
|
||||
: trimmedModelId,
|
||||
templateIds,
|
||||
ctx,
|
||||
patch,
|
||||
}) ??
|
||||
normalizeModelCompat({
|
||||
id: trimmedModelId,
|
||||
name: trimmedModelId,
|
||||
id:
|
||||
lower === OPENAI_CODEX_GPT_54_LEGACY_MODEL_ID
|
||||
? OPENAI_CODEX_GPT_54_MODEL_ID
|
||||
: trimmedModelId,
|
||||
name:
|
||||
lower === OPENAI_CODEX_GPT_54_LEGACY_MODEL_ID
|
||||
? OPENAI_CODEX_GPT_54_MODEL_ID
|
||||
: trimmedModelId,
|
||||
api: "openai-codex-responses",
|
||||
provider: PROVIDER_ID,
|
||||
baseUrl: OPENAI_CODEX_BASE_URL,
|
||||
|
||||
@@ -182,8 +182,9 @@ function buildDynamicModel(
|
||||
};
|
||||
}
|
||||
case "openai-codex": {
|
||||
const isLegacyGpt54Alias = lower === "gpt-5.4-codex";
|
||||
const template =
|
||||
lower === "gpt-5.4"
|
||||
lower === "gpt-5.4" || isLegacyGpt54Alias
|
||||
? findTemplate(params, "openai-codex", ["gpt-5.4", "gpt-5.4"])
|
||||
: lower === "gpt-5.4-mini"
|
||||
? findTemplate(params, "openai-codex", [
|
||||
@@ -205,10 +206,10 @@ function buildDynamicModel(
|
||||
contextWindow: DEFAULT_CONTEXT_WINDOW,
|
||||
maxTokens: DEFAULT_CONTEXT_WINDOW,
|
||||
};
|
||||
if (lower === "gpt-5.4") {
|
||||
if (lower === "gpt-5.4" || isLegacyGpt54Alias) {
|
||||
return cloneTemplate(
|
||||
template,
|
||||
modelId,
|
||||
"gpt-5.4",
|
||||
{
|
||||
provider: "openai-codex",
|
||||
api: "openai-codex-responses",
|
||||
|
||||
@@ -827,6 +827,91 @@ describe("resolveModel", () => {
|
||||
expect(result.model).toMatchObject(buildOpenAICodexForwardCompatExpectation("gpt-5.4"));
|
||||
});
|
||||
|
||||
it("canonicalizes the legacy openai-codex gpt-5.4-codex alias at runtime", () => {
|
||||
mockOpenAICodexTemplateModel(discoverModels);
|
||||
|
||||
const result = resolveModelForTest("openai-codex", "gpt-5.4-codex", "/tmp/agent");
|
||||
|
||||
expect(result.error).toBeUndefined();
|
||||
expect(result.model).toMatchObject(buildOpenAICodexForwardCompatExpectation("gpt-5.4"));
|
||||
expect(result.model?.id).toBe("gpt-5.4");
|
||||
expect(result.model?.name).toBe("gpt-5.4");
|
||||
});
|
||||
|
||||
it("applies canonical openai-codex overrides when resolving the gpt-5.4-codex alias", () => {
|
||||
mockOpenAICodexTemplateModel(discoverModels);
|
||||
|
||||
const cfg = {
|
||||
models: {
|
||||
providers: {
|
||||
"openai-codex": {
|
||||
baseUrl: "https://proxy.example.com/backend-api",
|
||||
api: "openai-codex-responses",
|
||||
models: [
|
||||
{
|
||||
...makeModel("gpt-5.4"),
|
||||
contextWindow: 123456,
|
||||
contextTokens: 65432,
|
||||
maxTokens: 7777,
|
||||
reasoning: false,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
} as unknown as OpenClawConfig;
|
||||
|
||||
const result = resolveModelForTest("openai-codex", "gpt-5.4-codex", "/tmp/agent", cfg);
|
||||
|
||||
expect(result.error).toBeUndefined();
|
||||
expect(result.model).toMatchObject({
|
||||
provider: "openai-codex",
|
||||
id: "gpt-5.4",
|
||||
api: "openai-codex-responses",
|
||||
baseUrl: "https://proxy.example.com/backend-api",
|
||||
contextWindow: 123456,
|
||||
contextTokens: 65432,
|
||||
maxTokens: 7777,
|
||||
reasoning: false,
|
||||
});
|
||||
});
|
||||
|
||||
it("prefers alias-specific overrides over canonical ones for gpt-5.4-codex", () => {
|
||||
mockOpenAICodexTemplateModel(discoverModels);
|
||||
|
||||
const cfg = {
|
||||
models: {
|
||||
providers: {
|
||||
"openai-codex": {
|
||||
api: "openai-codex-responses",
|
||||
models: [
|
||||
{
|
||||
...makeModel("gpt-5.4"),
|
||||
contextWindow: 222222,
|
||||
maxTokens: 22222,
|
||||
},
|
||||
{
|
||||
...makeModel("gpt-5.4-codex"),
|
||||
contextWindow: 111111,
|
||||
maxTokens: 11111,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
} as unknown as OpenClawConfig;
|
||||
|
||||
const result = resolveModelForTest("openai-codex", "gpt-5.4-codex", "/tmp/agent", cfg);
|
||||
|
||||
expect(result.error).toBeUndefined();
|
||||
expect(result.model).toMatchObject({
|
||||
provider: "openai-codex",
|
||||
id: "gpt-5.4",
|
||||
contextWindow: 111111,
|
||||
maxTokens: 11111,
|
||||
});
|
||||
});
|
||||
|
||||
it("builds an openai-codex fallback for gpt-5.4-mini", () => {
|
||||
mockOpenAICodexTemplateModel(discoverModels);
|
||||
|
||||
|
||||
@@ -96,6 +96,24 @@ function resolveRuntimeHooks(params?: {
|
||||
return params?.runtimeHooks ?? DEFAULT_PROVIDER_RUNTIME_HOOKS;
|
||||
}
|
||||
|
||||
function canonicalizeLegacyResolvedModel(params: {
|
||||
provider: string;
|
||||
model: Model<Api>;
|
||||
}): Model<Api> {
|
||||
if (
|
||||
normalizeProviderId(params.provider) !== "openai-codex" ||
|
||||
params.model.id.trim().toLowerCase() !== "gpt-5.4-codex"
|
||||
) {
|
||||
return params.model;
|
||||
}
|
||||
return {
|
||||
...params.model,
|
||||
id: "gpt-5.4",
|
||||
name:
|
||||
params.model.name.trim().toLowerCase() === "gpt-5.4-codex" ? "gpt-5.4" : params.model.name,
|
||||
};
|
||||
}
|
||||
|
||||
function applyResolvedTransportFallback(params: {
|
||||
provider: string;
|
||||
cfg?: OpenClawConfig;
|
||||
@@ -184,10 +202,13 @@ function normalizeResolvedModel(params: {
|
||||
runtimeHooks,
|
||||
model: compatNormalized ?? pluginNormalized ?? normalizedInputModel,
|
||||
});
|
||||
return normalizeResolvedProviderModel({
|
||||
return canonicalizeLegacyResolvedModel({
|
||||
provider: params.provider,
|
||||
model:
|
||||
fallbackTransportNormalized ?? compatNormalized ?? pluginNormalized ?? normalizedInputModel,
|
||||
model: normalizeResolvedProviderModel({
|
||||
provider: params.provider,
|
||||
model:
|
||||
fallbackTransportNormalized ?? compatNormalized ?? pluginNormalized ?? normalizedInputModel,
|
||||
}),
|
||||
});
|
||||
}
|
||||
|
||||
@@ -270,7 +291,11 @@ function applyConfiguredProviderOverrides(params: {
|
||||
headers: sanitizeModelHeaders(discoveredModel.headers, { stripSecretRefMarkers: true }),
|
||||
};
|
||||
}
|
||||
const configuredModel = providerConfig.models?.find((candidate) => candidate.id === modelId);
|
||||
const configuredModel =
|
||||
providerConfig.models?.find((candidate) => candidate.id === modelId) ??
|
||||
(discoveredModel.id !== modelId
|
||||
? providerConfig.models?.find((candidate) => candidate.id === discoveredModel.id)
|
||||
: undefined);
|
||||
const discoveredHeaders = sanitizeModelHeaders(discoveredModel.headers, {
|
||||
stripSecretRefMarkers: true,
|
||||
});
|
||||
|
||||
Reference in New Issue
Block a user