mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-06 14:20:44 +00:00
fix(codex): canonicalize the gpt-5.4-codex alias (#66438)
* fix(codex): canonicalize the gpt-5.4-codex alias * Update CHANGELOG.md
This commit is contained in:
@@ -182,8 +182,9 @@ function buildDynamicModel(
|
||||
};
|
||||
}
|
||||
case "openai-codex": {
|
||||
const isLegacyGpt54Alias = lower === "gpt-5.4-codex";
|
||||
const template =
|
||||
lower === "gpt-5.4"
|
||||
lower === "gpt-5.4" || isLegacyGpt54Alias
|
||||
? findTemplate(params, "openai-codex", ["gpt-5.4", "gpt-5.4"])
|
||||
: lower === "gpt-5.4-mini"
|
||||
? findTemplate(params, "openai-codex", [
|
||||
@@ -205,10 +206,10 @@ function buildDynamicModel(
|
||||
contextWindow: DEFAULT_CONTEXT_WINDOW,
|
||||
maxTokens: DEFAULT_CONTEXT_WINDOW,
|
||||
};
|
||||
if (lower === "gpt-5.4") {
|
||||
if (lower === "gpt-5.4" || isLegacyGpt54Alias) {
|
||||
return cloneTemplate(
|
||||
template,
|
||||
modelId,
|
||||
"gpt-5.4",
|
||||
{
|
||||
provider: "openai-codex",
|
||||
api: "openai-codex-responses",
|
||||
|
||||
@@ -827,6 +827,91 @@ describe("resolveModel", () => {
|
||||
expect(result.model).toMatchObject(buildOpenAICodexForwardCompatExpectation("gpt-5.4"));
|
||||
});
|
||||
|
||||
it("canonicalizes the legacy openai-codex gpt-5.4-codex alias at runtime", () => {
|
||||
mockOpenAICodexTemplateModel(discoverModels);
|
||||
|
||||
const result = resolveModelForTest("openai-codex", "gpt-5.4-codex", "/tmp/agent");
|
||||
|
||||
expect(result.error).toBeUndefined();
|
||||
expect(result.model).toMatchObject(buildOpenAICodexForwardCompatExpectation("gpt-5.4"));
|
||||
expect(result.model?.id).toBe("gpt-5.4");
|
||||
expect(result.model?.name).toBe("gpt-5.4");
|
||||
});
|
||||
|
||||
it("applies canonical openai-codex overrides when resolving the gpt-5.4-codex alias", () => {
|
||||
mockOpenAICodexTemplateModel(discoverModels);
|
||||
|
||||
const cfg = {
|
||||
models: {
|
||||
providers: {
|
||||
"openai-codex": {
|
||||
baseUrl: "https://proxy.example.com/backend-api",
|
||||
api: "openai-codex-responses",
|
||||
models: [
|
||||
{
|
||||
...makeModel("gpt-5.4"),
|
||||
contextWindow: 123456,
|
||||
contextTokens: 65432,
|
||||
maxTokens: 7777,
|
||||
reasoning: false,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
} as unknown as OpenClawConfig;
|
||||
|
||||
const result = resolveModelForTest("openai-codex", "gpt-5.4-codex", "/tmp/agent", cfg);
|
||||
|
||||
expect(result.error).toBeUndefined();
|
||||
expect(result.model).toMatchObject({
|
||||
provider: "openai-codex",
|
||||
id: "gpt-5.4",
|
||||
api: "openai-codex-responses",
|
||||
baseUrl: "https://proxy.example.com/backend-api",
|
||||
contextWindow: 123456,
|
||||
contextTokens: 65432,
|
||||
maxTokens: 7777,
|
||||
reasoning: false,
|
||||
});
|
||||
});
|
||||
|
||||
it("prefers alias-specific overrides over canonical ones for gpt-5.4-codex", () => {
|
||||
mockOpenAICodexTemplateModel(discoverModels);
|
||||
|
||||
const cfg = {
|
||||
models: {
|
||||
providers: {
|
||||
"openai-codex": {
|
||||
api: "openai-codex-responses",
|
||||
models: [
|
||||
{
|
||||
...makeModel("gpt-5.4"),
|
||||
contextWindow: 222222,
|
||||
maxTokens: 22222,
|
||||
},
|
||||
{
|
||||
...makeModel("gpt-5.4-codex"),
|
||||
contextWindow: 111111,
|
||||
maxTokens: 11111,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
} as unknown as OpenClawConfig;
|
||||
|
||||
const result = resolveModelForTest("openai-codex", "gpt-5.4-codex", "/tmp/agent", cfg);
|
||||
|
||||
expect(result.error).toBeUndefined();
|
||||
expect(result.model).toMatchObject({
|
||||
provider: "openai-codex",
|
||||
id: "gpt-5.4",
|
||||
contextWindow: 111111,
|
||||
maxTokens: 11111,
|
||||
});
|
||||
});
|
||||
|
||||
it("builds an openai-codex fallback for gpt-5.4-mini", () => {
|
||||
mockOpenAICodexTemplateModel(discoverModels);
|
||||
|
||||
|
||||
@@ -96,6 +96,24 @@ function resolveRuntimeHooks(params?: {
|
||||
return params?.runtimeHooks ?? DEFAULT_PROVIDER_RUNTIME_HOOKS;
|
||||
}
|
||||
|
||||
function canonicalizeLegacyResolvedModel(params: {
|
||||
provider: string;
|
||||
model: Model<Api>;
|
||||
}): Model<Api> {
|
||||
if (
|
||||
normalizeProviderId(params.provider) !== "openai-codex" ||
|
||||
params.model.id.trim().toLowerCase() !== "gpt-5.4-codex"
|
||||
) {
|
||||
return params.model;
|
||||
}
|
||||
return {
|
||||
...params.model,
|
||||
id: "gpt-5.4",
|
||||
name:
|
||||
params.model.name.trim().toLowerCase() === "gpt-5.4-codex" ? "gpt-5.4" : params.model.name,
|
||||
};
|
||||
}
|
||||
|
||||
function applyResolvedTransportFallback(params: {
|
||||
provider: string;
|
||||
cfg?: OpenClawConfig;
|
||||
@@ -184,10 +202,13 @@ function normalizeResolvedModel(params: {
|
||||
runtimeHooks,
|
||||
model: compatNormalized ?? pluginNormalized ?? normalizedInputModel,
|
||||
});
|
||||
return normalizeResolvedProviderModel({
|
||||
return canonicalizeLegacyResolvedModel({
|
||||
provider: params.provider,
|
||||
model:
|
||||
fallbackTransportNormalized ?? compatNormalized ?? pluginNormalized ?? normalizedInputModel,
|
||||
model: normalizeResolvedProviderModel({
|
||||
provider: params.provider,
|
||||
model:
|
||||
fallbackTransportNormalized ?? compatNormalized ?? pluginNormalized ?? normalizedInputModel,
|
||||
}),
|
||||
});
|
||||
}
|
||||
|
||||
@@ -270,7 +291,11 @@ function applyConfiguredProviderOverrides(params: {
|
||||
headers: sanitizeModelHeaders(discoveredModel.headers, { stripSecretRefMarkers: true }),
|
||||
};
|
||||
}
|
||||
const configuredModel = providerConfig.models?.find((candidate) => candidate.id === modelId);
|
||||
const configuredModel =
|
||||
providerConfig.models?.find((candidate) => candidate.id === modelId) ??
|
||||
(discoveredModel.id !== modelId
|
||||
? providerConfig.models?.find((candidate) => candidate.id === discoveredModel.id)
|
||||
: undefined);
|
||||
const discoveredHeaders = sanitizeModelHeaders(discoveredModel.headers, {
|
||||
stripSecretRefMarkers: true,
|
||||
});
|
||||
|
||||
Reference in New Issue
Block a user