Models: augment raw registry GPT-5.4 fallbacks

This commit is contained in:
Vincent Koc
2026-03-05 23:42:05 -05:00
parent 22043197ba
commit 0f4950dad6
3 changed files with 175 additions and 4 deletions

View File

@@ -1,14 +1,16 @@
// Lazy-load pi-coding-agent model metadata so we can infer context windows when
// the agent reports a model id. This includes custom models.json entries.
import type { Api, Model } from "@mariozechner/pi-ai";
import { loadConfig } from "../config/config.js";
import type { OpenClawConfig } from "../config/config.js";
import { computeBackoff, type BackoffPolicy } from "../infra/backoff.js";
import { consumeRootOptionToken, FLAG_TERMINATOR } from "../infra/cli-root-options.js";
import { resolveOpenClawAgentDir } from "./agent-paths.js";
import { augmentKnownForwardCompatModels } from "./model-forward-compat.js";
import { ensureOpenClawModelsJson } from "./models-config.js";
type ModelEntry = { id: string; contextWindow?: number };
type ModelEntry = { id: string; provider?: string; contextWindow?: number };
type ModelRegistryLike = {
getAvailable?: () => ModelEntry[];
getAll: () => ModelEntry[];
@@ -156,10 +158,11 @@ function ensureContextWindowCacheLoaded(): Promise<void> {
const agentDir = resolveOpenClawAgentDir();
const authStorage = discoverAuthStorage(agentDir);
const modelRegistry = discoverModels(authStorage, agentDir) as unknown as ModelRegistryLike;
const models =
const models = augmentKnownForwardCompatModels(
typeof modelRegistry.getAvailable === "function"
? modelRegistry.getAvailable()
: modelRegistry.getAll();
: modelRegistry.getAll(),
);
applyDiscoveredContextWindows({
cache: MODEL_CACHE,
models,

View File

@@ -14,6 +14,7 @@ const OPENAI_GPT_54_PRO_TEMPLATE_MODEL_IDS = ["gpt-5.2-pro", "gpt-5.2"] as const
const OPENAI_CODEX_GPT_54_MODEL_ID = "gpt-5.4";
const OPENAI_CODEX_GPT_54_TEMPLATE_MODEL_IDS = ["gpt-5.3-codex", "gpt-5.2-codex"] as const;
const OPENAI_CODEX_GPT_53_MODEL_ID = "gpt-5.3-codex";
const OPENAI_CODEX_GPT_53_SPARK_MODEL_ID = "gpt-5.3-codex-spark";
const OPENAI_CODEX_TEMPLATE_MODEL_IDS = ["gpt-5.2-codex"] as const;
const ANTHROPIC_OPUS_46_MODEL_ID = "claude-opus-4-6";
@@ -109,6 +110,172 @@ function cloneFirstTemplateModel(params: {
return undefined;
}
function cloneSyntheticTemplateModel(params: {
models: Model<Api>[];
normalizedProvider: string;
trimmedModelId: string;
templateIds: readonly string[];
patch?: Partial<Model<Api>>;
}): Model<Api> | undefined {
const { models, normalizedProvider, trimmedModelId, templateIds, patch } = params;
for (const templateId of [...new Set(templateIds)].filter(Boolean)) {
const template =
models.find(
(model) =>
normalizeProviderId(model.provider) === normalizedProvider &&
model.id.trim().toLowerCase() === templateId.toLowerCase(),
) ?? null;
if (!template) {
continue;
}
return normalizeModelCompat({
...template,
id: trimmedModelId,
name: trimmedModelId,
...patch,
} as Model<Api>);
}
return undefined;
}
export function augmentKnownForwardCompatModels(models: Model<Api>[]): Model<Api>[] {
const next = [...models];
const existing = new Set(
next.map((model) => `${normalizeProviderId(model.provider)}::${model.id.trim().toLowerCase()}`),
);
const hasProvider = (provider: string) =>
next.some((model) => normalizeProviderId(model.provider) === provider);
const pushIfMissing = (provider: string, id: string, model: Model<Api> | undefined) => {
const key = `${normalizeProviderId(provider)}::${id.trim().toLowerCase()}`;
if (existing.has(key) || !model) {
return;
}
next.push(model);
existing.add(key);
};
if (hasProvider("openai")) {
pushIfMissing(
"openai",
OPENAI_GPT_54_MODEL_ID,
cloneSyntheticTemplateModel({
models: next,
normalizedProvider: "openai",
trimmedModelId: OPENAI_GPT_54_MODEL_ID,
templateIds: OPENAI_GPT_54_TEMPLATE_MODEL_IDS,
patch: {
api: "openai-responses",
provider: "openai",
baseUrl: "https://api.openai.com/v1",
reasoning: true,
input: ["text", "image"],
contextWindow: OPENAI_GPT_54_CONTEXT_TOKENS,
maxTokens: OPENAI_GPT_54_MAX_TOKENS,
},
}) ??
normalizeModelCompat({
id: OPENAI_GPT_54_MODEL_ID,
name: OPENAI_GPT_54_MODEL_ID,
api: "openai-responses",
provider: "openai",
baseUrl: "https://api.openai.com/v1",
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: OPENAI_GPT_54_CONTEXT_TOKENS,
maxTokens: OPENAI_GPT_54_MAX_TOKENS,
} as Model<Api>),
);
pushIfMissing(
"openai",
OPENAI_GPT_54_PRO_MODEL_ID,
cloneSyntheticTemplateModel({
models: next,
normalizedProvider: "openai",
trimmedModelId: OPENAI_GPT_54_PRO_MODEL_ID,
templateIds: OPENAI_GPT_54_PRO_TEMPLATE_MODEL_IDS,
patch: {
api: "openai-responses",
provider: "openai",
baseUrl: "https://api.openai.com/v1",
reasoning: true,
input: ["text", "image"],
contextWindow: OPENAI_GPT_54_CONTEXT_TOKENS,
maxTokens: OPENAI_GPT_54_MAX_TOKENS,
},
}) ??
normalizeModelCompat({
id: OPENAI_GPT_54_PRO_MODEL_ID,
name: OPENAI_GPT_54_PRO_MODEL_ID,
api: "openai-responses",
provider: "openai",
baseUrl: "https://api.openai.com/v1",
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: OPENAI_GPT_54_CONTEXT_TOKENS,
maxTokens: OPENAI_GPT_54_MAX_TOKENS,
} as Model<Api>),
);
}
if (hasProvider("openai-codex")) {
pushIfMissing(
"openai-codex",
OPENAI_CODEX_GPT_54_MODEL_ID,
cloneSyntheticTemplateModel({
models: next,
normalizedProvider: "openai-codex",
trimmedModelId: OPENAI_CODEX_GPT_54_MODEL_ID,
templateIds: OPENAI_CODEX_GPT_54_TEMPLATE_MODEL_IDS,
}) ??
normalizeModelCompat({
id: OPENAI_CODEX_GPT_54_MODEL_ID,
name: OPENAI_CODEX_GPT_54_MODEL_ID,
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: "https://chatgpt.com/backend-api",
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: DEFAULT_CONTEXT_TOKENS,
maxTokens: DEFAULT_CONTEXT_TOKENS,
} as Model<Api>),
);
pushIfMissing(
"openai-codex",
OPENAI_CODEX_GPT_53_SPARK_MODEL_ID,
cloneSyntheticTemplateModel({
models: next,
normalizedProvider: "openai-codex",
trimmedModelId: OPENAI_CODEX_GPT_53_SPARK_MODEL_ID,
templateIds: [OPENAI_CODEX_GPT_53_MODEL_ID, ...OPENAI_CODEX_TEMPLATE_MODEL_IDS],
patch: {
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: "https://chatgpt.com/backend-api",
reasoning: true,
input: ["text", "image"],
},
}) ??
normalizeModelCompat({
id: OPENAI_CODEX_GPT_53_SPARK_MODEL_ID,
name: OPENAI_CODEX_GPT_53_SPARK_MODEL_ID,
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: "https://chatgpt.com/backend-api",
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: DEFAULT_CONTEXT_TOKENS,
maxTokens: DEFAULT_CONTEXT_TOKENS,
} as Model<Api>),
);
}
return next;
}
const CODEX_GPT54_ELIGIBLE_PROVIDERS = new Set(["openai-codex"]);
const CODEX_GPT53_ELIGIBLE_PROVIDERS = new Set(["openai-codex", "github-copilot"]);

View File

@@ -8,6 +8,7 @@ import {
resolveAwsSdkEnvVarName,
resolveEnvApiKey,
} from "../../agents/model-auth.js";
import { augmentKnownForwardCompatModels } from "../../agents/model-forward-compat.js";
import { ensureOpenClawModelsJson } from "../../agents/models-config.js";
import { discoverAuthStorage, discoverModels } from "../../agents/pi-model-discovery.js";
import type { OpenClawConfig } from "../../config/config.js";
@@ -99,7 +100,7 @@ export async function loadModelRegistry(cfg: OpenClawConfig) {
const agentDir = resolveOpenClawAgentDir();
const authStorage = discoverAuthStorage(agentDir);
const registry = discoverModels(authStorage, agentDir);
const models = registry.getAll();
const models = augmentKnownForwardCompatModels(registry.getAll());
let availableKeys: Set<string> | undefined;
let availabilityErrorMessage: string | undefined;