mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-06 06:50:43 +00:00
fix(models): support Codex model add metadata (#70820)
This commit is contained in:
@@ -482,7 +482,12 @@ export function buildOpenAICodexProviderPlugin(): ProviderPlugin {
|
||||
return false;
|
||||
}
|
||||
const id = ctx.modelId.trim().toLowerCase();
|
||||
return id === OPENAI_CODEX_GPT_54_MODEL_ID || id === OPENAI_CODEX_GPT_54_PRO_MODEL_ID;
|
||||
return [
|
||||
OPENAI_CODEX_GPT_55_MODEL_ID,
|
||||
OPENAI_CODEX_GPT_55_PRO_MODEL_ID,
|
||||
OPENAI_CODEX_GPT_54_MODEL_ID,
|
||||
OPENAI_CODEX_GPT_54_PRO_MODEL_ID,
|
||||
].includes(id);
|
||||
},
|
||||
...buildOpenAIResponsesProviderHooks(),
|
||||
resolveReasoningOutputMode: () => "native",
|
||||
|
||||
33
src/agents/openai-codex-models-add-legacy.ts
Normal file
33
src/agents/openai-codex-models-add-legacy.ts
Normal file
@@ -0,0 +1,33 @@
|
||||
import type { ModelDefinitionConfig } from "../config/types.models.js";
|
||||
import { normalizeProviderId } from "./provider-id.js";
|
||||
|
||||
const LEGACY_MODELS_ADD_CODEX_MODEL_IDS = new Set(["gpt-5.5", "gpt-5.5-pro"]);
|
||||
|
||||
export function isLegacyModelsAddCodexMetadataModel(params: {
|
||||
provider: string;
|
||||
model: Partial<ModelDefinitionConfig> | undefined;
|
||||
}): boolean {
|
||||
const model = params.model;
|
||||
if (normalizeProviderId(params.provider) !== "openai-codex" || !model) {
|
||||
return false;
|
||||
}
|
||||
const id = model.id?.trim().toLowerCase();
|
||||
if (!id || !LEGACY_MODELS_ADD_CODEX_MODEL_IDS.has(id)) {
|
||||
return false;
|
||||
}
|
||||
return (
|
||||
model.api === "openai-codex-responses" &&
|
||||
model.reasoning === true &&
|
||||
Array.isArray(model.input) &&
|
||||
model.input.length === 2 &&
|
||||
model.input[0] === "text" &&
|
||||
model.input[1] === "image" &&
|
||||
model.cost?.input === 5 &&
|
||||
model.cost.output === 30 &&
|
||||
model.cost.cacheRead === 0.5 &&
|
||||
model.cost.cacheWrite === 0 &&
|
||||
model.contextWindow === 400_000 &&
|
||||
model.contextTokens === 272_000 &&
|
||||
model.maxTokens === 128_000
|
||||
);
|
||||
}
|
||||
@@ -231,18 +231,24 @@ function buildDynamicModel(
|
||||
case "openai-codex": {
|
||||
const isLegacyGpt54Alias = lower === "gpt-5.4-codex";
|
||||
const template =
|
||||
lower === "gpt-5.4" || isLegacyGpt54Alias || lower === "gpt-5.4-pro"
|
||||
? findTemplate(params, "openai-codex", ["gpt-5.4", "gpt-5.3-codex", "gpt-5.2-codex"])
|
||||
: lower === "gpt-5.4-mini"
|
||||
? findTemplate(params, "openai-codex", [
|
||||
"gpt-5.4",
|
||||
"gpt-5.1-codex-mini",
|
||||
"gpt-5.3-codex",
|
||||
"gpt-5.2-codex",
|
||||
])
|
||||
: lower === "gpt-5.3-codex-spark"
|
||||
? findTemplate(params, "openai-codex", ["gpt-5.4", "gpt-5.3-codex", "gpt-5.2-codex"])
|
||||
: findTemplate(params, "openai-codex", ["gpt-5.4"]);
|
||||
lower === "gpt-5.5" || lower === "gpt-5.5-pro"
|
||||
? findTemplate(params, "openai-codex", ["gpt-5.4", "gpt-5.4-pro", "gpt-5.3-codex"])
|
||||
: lower === "gpt-5.4" || isLegacyGpt54Alias || lower === "gpt-5.4-pro"
|
||||
? findTemplate(params, "openai-codex", ["gpt-5.4", "gpt-5.3-codex", "gpt-5.2-codex"])
|
||||
: lower === "gpt-5.4-mini"
|
||||
? findTemplate(params, "openai-codex", [
|
||||
"gpt-5.4",
|
||||
"gpt-5.1-codex-mini",
|
||||
"gpt-5.3-codex",
|
||||
"gpt-5.2-codex",
|
||||
])
|
||||
: lower === "gpt-5.3-codex-spark"
|
||||
? findTemplate(params, "openai-codex", [
|
||||
"gpt-5.4",
|
||||
"gpt-5.3-codex",
|
||||
"gpt-5.2-codex",
|
||||
])
|
||||
: findTemplate(params, "openai-codex", ["gpt-5.4"]);
|
||||
const fallback = {
|
||||
provider: "openai-codex",
|
||||
api: "openai-codex-responses",
|
||||
@@ -253,6 +259,25 @@ function buildDynamicModel(
|
||||
contextWindow: DEFAULT_CONTEXT_WINDOW,
|
||||
maxTokens: DEFAULT_CONTEXT_WINDOW,
|
||||
};
|
||||
if (lower === "gpt-5.5" || lower === "gpt-5.5-pro") {
|
||||
return cloneTemplate(
|
||||
template,
|
||||
modelId,
|
||||
{
|
||||
provider: "openai-codex",
|
||||
api: "openai-codex-responses",
|
||||
baseUrl: OPENAI_CODEX_BASE_URL,
|
||||
cost:
|
||||
lower === "gpt-5.5-pro"
|
||||
? { input: 30, output: 180, cacheRead: 0, cacheWrite: 0 }
|
||||
: { input: 5, output: 30, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 1_000_000,
|
||||
contextTokens: 272_000,
|
||||
maxTokens: 128_000,
|
||||
},
|
||||
fallback,
|
||||
);
|
||||
}
|
||||
if (lower === "gpt-5.4" || isLegacyGpt54Alias) {
|
||||
return cloneTemplate(
|
||||
template,
|
||||
@@ -556,7 +581,9 @@ export function createProviderRuntimeTestMock(options: ProviderRuntimeTestMockOp
|
||||
context: { modelId: string };
|
||||
}) =>
|
||||
params.provider === "openai-codex" &&
|
||||
params.context.modelId.trim().toLowerCase() === "gpt-5.4",
|
||||
["gpt-5.5", "gpt-5.5-pro", "gpt-5.4", "gpt-5.4-pro"].includes(
|
||||
params.context.modelId.trim().toLowerCase(),
|
||||
),
|
||||
prepareProviderDynamicModel: async (params: {
|
||||
provider: string;
|
||||
context: { modelId: string };
|
||||
|
||||
@@ -1047,6 +1047,149 @@ describe("resolveModel", () => {
|
||||
});
|
||||
});
|
||||
|
||||
it("lets official openai-codex metadata override stale configured model rows", () => {
|
||||
mockDiscoveredModel(discoverModels, {
|
||||
provider: "openai-codex",
|
||||
modelId: "gpt-5.4",
|
||||
templateModel: {
|
||||
...buildOpenAICodexForwardCompatExpectation("gpt-5.4"),
|
||||
name: "GPT-5.4",
|
||||
},
|
||||
});
|
||||
|
||||
const cfg = {
|
||||
models: {
|
||||
providers: {
|
||||
"openai-codex": {
|
||||
baseUrl: "https://chatgpt.com/backend-api",
|
||||
api: "openai-codex-responses",
|
||||
models: [
|
||||
{
|
||||
...makeModel("gpt-5.5-pro"),
|
||||
api: "openai-codex-responses",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: { input: 5, output: 30, cacheRead: 0.5, cacheWrite: 0 },
|
||||
contextWindow: 400_000,
|
||||
contextTokens: 64_000,
|
||||
maxTokens: 32_000,
|
||||
metadataSource: "models-add",
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
} as unknown as OpenClawConfig;
|
||||
|
||||
const result = resolveModelForTest("openai-codex", "gpt-5.5-pro", "/tmp/agent", cfg);
|
||||
|
||||
expect(result.error).toBeUndefined();
|
||||
expect(result.model).toMatchObject({
|
||||
provider: "openai-codex",
|
||||
id: "gpt-5.5-pro",
|
||||
api: "openai-codex-responses",
|
||||
baseUrl: "https://chatgpt.com/backend-api",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: { input: 30, output: 180, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 1_000_000,
|
||||
contextTokens: 272_000,
|
||||
maxTokens: 128_000,
|
||||
});
|
||||
});
|
||||
|
||||
it("lets official openai-codex metadata override legacy unmarked models-add rows", () => {
|
||||
mockDiscoveredModel(discoverModels, {
|
||||
provider: "openai-codex",
|
||||
modelId: "gpt-5.4",
|
||||
templateModel: {
|
||||
...buildOpenAICodexForwardCompatExpectation("gpt-5.4"),
|
||||
name: "GPT-5.4",
|
||||
},
|
||||
});
|
||||
|
||||
const cfg = {
|
||||
models: {
|
||||
providers: {
|
||||
"openai-codex": {
|
||||
baseUrl: "https://chatgpt.com/backend-api",
|
||||
api: "openai-codex-responses",
|
||||
models: [
|
||||
{
|
||||
...makeModel("gpt-5.5"),
|
||||
api: "openai-codex-responses",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: { input: 5, output: 30, cacheRead: 0.5, cacheWrite: 0 },
|
||||
contextWindow: 400_000,
|
||||
contextTokens: 272_000,
|
||||
maxTokens: 128_000,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
} as unknown as OpenClawConfig;
|
||||
|
||||
const result = resolveModelForTest("openai-codex", "gpt-5.5", "/tmp/agent", cfg);
|
||||
|
||||
expect(result.error).toBeUndefined();
|
||||
expect(result.model).toMatchObject({
|
||||
provider: "openai-codex",
|
||||
id: "gpt-5.5",
|
||||
cost: { input: 5, output: 30, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 1_000_000,
|
||||
contextTokens: 272_000,
|
||||
maxTokens: 128_000,
|
||||
});
|
||||
});
|
||||
|
||||
it("preserves unmarked manual openai-codex metadata overrides", () => {
|
||||
mockDiscoveredModel(discoverModels, {
|
||||
provider: "openai-codex",
|
||||
modelId: "gpt-5.4",
|
||||
templateModel: {
|
||||
...buildOpenAICodexForwardCompatExpectation("gpt-5.4"),
|
||||
name: "GPT-5.4",
|
||||
},
|
||||
});
|
||||
|
||||
const cfg = {
|
||||
models: {
|
||||
providers: {
|
||||
"openai-codex": {
|
||||
baseUrl: "https://chatgpt.com/backend-api",
|
||||
api: "openai-codex-responses",
|
||||
models: [
|
||||
{
|
||||
...makeModel("gpt-5.5"),
|
||||
api: "openai-codex-responses",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: { input: 9, output: 99, cacheRead: 0.9, cacheWrite: 0 },
|
||||
contextWindow: 555_555,
|
||||
contextTokens: 111_111,
|
||||
maxTokens: 22_222,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
} as unknown as OpenClawConfig;
|
||||
|
||||
const result = resolveModelForTest("openai-codex", "gpt-5.5", "/tmp/agent", cfg);
|
||||
|
||||
expect(result.error).toBeUndefined();
|
||||
expect(result.model).toMatchObject({
|
||||
provider: "openai-codex",
|
||||
id: "gpt-5.5",
|
||||
cost: { input: 9, output: 99, cacheRead: 0.9, cacheWrite: 0 },
|
||||
contextWindow: 555_555,
|
||||
contextTokens: 111_111,
|
||||
maxTokens: 22_222,
|
||||
});
|
||||
});
|
||||
|
||||
it("prefers runtime-resolved openai-codex gpt-5.4 metadata during async resolution too", async () => {
|
||||
mockDiscoveredModel(discoverModels, {
|
||||
provider: "openai-codex",
|
||||
|
||||
@@ -22,6 +22,7 @@ import {
|
||||
buildSuppressedBuiltInModelError,
|
||||
shouldSuppressBuiltInModel,
|
||||
} from "../model-suppression.js";
|
||||
import { isLegacyModelsAddCodexMetadataModel } from "../openai-codex-models-add-legacy.js";
|
||||
import { discoverAuthStorage, discoverModels } from "../pi-model-discovery.js";
|
||||
import {
|
||||
attachModelProviderRequestTransport,
|
||||
@@ -275,6 +276,16 @@ function resolveConfiguredProviderConfig(
|
||||
return findNormalizedProviderValue(configuredProviders, provider);
|
||||
}
|
||||
|
||||
function isModelsAddMetadataModel(params: {
|
||||
provider: string;
|
||||
model: NonNullable<InlineProviderConfig["models"]>[number] | undefined;
|
||||
}) {
|
||||
return (
|
||||
(params.model as { metadataSource?: unknown } | undefined)?.metadataSource === "models-add" ||
|
||||
isLegacyModelsAddCodexMetadataModel(params)
|
||||
);
|
||||
}
|
||||
|
||||
function applyConfiguredProviderOverrides(params: {
|
||||
provider: string;
|
||||
discoveredModel: ProviderRuntimeModel;
|
||||
@@ -282,6 +293,7 @@ function applyConfiguredProviderOverrides(params: {
|
||||
modelId: string;
|
||||
cfg?: OpenClawConfig;
|
||||
runtimeHooks?: ProviderRuntimeHooks;
|
||||
preferDiscoveredModelMetadata?: boolean;
|
||||
}): ProviderRuntimeModel {
|
||||
const { discoveredModel, providerConfig, modelId } = params;
|
||||
if (!providerConfig) {
|
||||
@@ -296,6 +308,11 @@ function applyConfiguredProviderOverrides(params: {
|
||||
(discoveredModel.id !== modelId
|
||||
? providerConfig.models?.find((candidate) => candidate.id === discoveredModel.id)
|
||||
: undefined);
|
||||
const metadataOverrideModel =
|
||||
params.preferDiscoveredModelMetadata &&
|
||||
isModelsAddMetadataModel({ provider: params.provider, model: configuredModel })
|
||||
? undefined
|
||||
: configuredModel;
|
||||
const discoveredHeaders = sanitizeModelHeaders(discoveredModel.headers, {
|
||||
stripSecretRefMarkers: true,
|
||||
});
|
||||
@@ -321,14 +338,14 @@ function applyConfiguredProviderOverrides(params: {
|
||||
const normalizedInput = resolveProviderModelInput({
|
||||
provider: params.provider,
|
||||
modelId,
|
||||
modelName: configuredModel?.name ?? discoveredModel.name,
|
||||
input: configuredModel?.input,
|
||||
modelName: metadataOverrideModel?.name ?? discoveredModel.name,
|
||||
input: metadataOverrideModel?.input,
|
||||
fallbackInput: discoveredModel.input,
|
||||
});
|
||||
|
||||
const resolvedTransport = resolveProviderTransport({
|
||||
provider: params.provider,
|
||||
api: configuredModel?.api ?? providerConfig.api ?? discoveredModel.api,
|
||||
api: metadataOverrideModel?.api ?? providerConfig.api ?? discoveredModel.api,
|
||||
baseUrl: providerConfig.baseUrl ?? discoveredModel.baseUrl,
|
||||
cfg: params.cfg,
|
||||
runtimeHooks: params.runtimeHooks,
|
||||
@@ -353,14 +370,14 @@ function applyConfiguredProviderOverrides(params: {
|
||||
...discoveredModel,
|
||||
api: requestConfig.api ?? "openai-responses",
|
||||
baseUrl: requestConfig.baseUrl ?? discoveredModel.baseUrl,
|
||||
reasoning: configuredModel?.reasoning ?? discoveredModel.reasoning,
|
||||
reasoning: metadataOverrideModel?.reasoning ?? discoveredModel.reasoning,
|
||||
input: normalizedInput,
|
||||
cost: configuredModel?.cost ?? discoveredModel.cost,
|
||||
contextWindow: configuredModel?.contextWindow ?? discoveredModel.contextWindow,
|
||||
contextTokens: configuredModel?.contextTokens ?? discoveredModel.contextTokens,
|
||||
maxTokens: configuredModel?.maxTokens ?? discoveredModel.maxTokens,
|
||||
cost: metadataOverrideModel?.cost ?? discoveredModel.cost,
|
||||
contextWindow: metadataOverrideModel?.contextWindow ?? discoveredModel.contextWindow,
|
||||
contextTokens: metadataOverrideModel?.contextTokens ?? discoveredModel.contextTokens,
|
||||
maxTokens: metadataOverrideModel?.maxTokens ?? discoveredModel.maxTokens,
|
||||
headers: requestConfig.headers,
|
||||
compat: configuredModel?.compat ?? discoveredModel.compat,
|
||||
compat: metadataOverrideModel?.compat ?? discoveredModel.compat,
|
||||
},
|
||||
providerRequest,
|
||||
);
|
||||
@@ -458,6 +475,14 @@ function resolvePluginDynamicModelWithRegistry(params: {
|
||||
const { provider, modelId, modelRegistry, cfg, agentDir, workspaceDir } = params;
|
||||
const runtimeHooks = params.runtimeHooks ?? DEFAULT_PROVIDER_RUNTIME_HOOKS;
|
||||
const providerConfig = resolveConfiguredProviderConfig(cfg, provider);
|
||||
const preferDiscoveredModelMetadata = shouldCompareProviderRuntimeResolvedModel({
|
||||
provider,
|
||||
modelId,
|
||||
cfg,
|
||||
agentDir,
|
||||
workspaceDir,
|
||||
runtimeHooks,
|
||||
});
|
||||
const pluginDynamicModel = runtimeHooks.runProviderDynamicModel({
|
||||
provider,
|
||||
config: cfg,
|
||||
@@ -481,6 +506,7 @@ function resolvePluginDynamicModelWithRegistry(params: {
|
||||
modelId,
|
||||
cfg,
|
||||
runtimeHooks,
|
||||
preferDiscoveredModelMetadata,
|
||||
});
|
||||
return normalizeResolvedModel({
|
||||
provider,
|
||||
@@ -593,10 +619,7 @@ function preferProviderRuntimeResolvedModel(params: {
|
||||
explicitModel: Model<Api>;
|
||||
runtimeResolvedModel?: Model<Api>;
|
||||
}): Model<Api> {
|
||||
if (
|
||||
params.runtimeResolvedModel &&
|
||||
params.runtimeResolvedModel.contextWindow > params.explicitModel.contextWindow
|
||||
) {
|
||||
if (params.runtimeResolvedModel) {
|
||||
return params.runtimeResolvedModel;
|
||||
}
|
||||
return params.explicitModel;
|
||||
|
||||
@@ -22,7 +22,11 @@ const modelsAddMocks = vi.hoisted(() => ({
|
||||
listAddableProviders: vi.fn<(params: unknown) => string[]>(),
|
||||
validateAddProvider:
|
||||
vi.fn<
|
||||
(params: unknown) => { ok: true; provider: string } | { ok: false; providers: string[] }
|
||||
(
|
||||
params: unknown,
|
||||
) =>
|
||||
| { ok: true; provider: string }
|
||||
| { ok: false; providers: string[]; knownProvider?: string }
|
||||
>(),
|
||||
}));
|
||||
|
||||
@@ -339,6 +343,24 @@ describe("handleModelsCommand", () => {
|
||||
expect(result?.reply?.text).toContain("```text\n/models ollama\n```");
|
||||
});
|
||||
|
||||
it("explains when a selectable provider does not support /models add", async () => {
|
||||
modelsAddMocks.validateAddProvider.mockReturnValueOnce({
|
||||
ok: false,
|
||||
providers: ["lmstudio", "ollama"],
|
||||
knownProvider: "openai",
|
||||
});
|
||||
|
||||
const result = await handleModelsCommand(buildParams("/models add openai gpt-5.5"), true);
|
||||
|
||||
expect(result?.reply?.text).toContain(
|
||||
"openai is available for model selection, but /models add cannot create models for this provider from chat.",
|
||||
);
|
||||
expect(result?.reply?.text).toContain("/models openai");
|
||||
expect(result?.reply?.text).toContain("/model openai/<modelId>");
|
||||
expect(result?.reply?.text).toContain("openclaw configure");
|
||||
expect(result?.reply?.text).not.toContain("Unknown provider");
|
||||
});
|
||||
|
||||
it("adds a model and points users back to browse or switch", async () => {
|
||||
const result = await handleModelsCommand(buildParams("/models add ollama glm-5.1:cloud"), true);
|
||||
|
||||
|
||||
@@ -409,6 +409,22 @@ export async function resolveModelsCommandReply(params: {
|
||||
discoveredProviders: providers,
|
||||
});
|
||||
if (!validatedProvider.ok) {
|
||||
if (validatedProvider.knownProvider) {
|
||||
return {
|
||||
text: [
|
||||
`${validatedProvider.knownProvider} is available for model selection, but /models add cannot create models for this provider from chat.`,
|
||||
"",
|
||||
"Browse:",
|
||||
`/models ${validatedProvider.knownProvider}`,
|
||||
"",
|
||||
"Switch:",
|
||||
`/model ${validatedProvider.knownProvider}/<modelId>`,
|
||||
"",
|
||||
"To configure providers or auth, run:",
|
||||
"openclaw configure",
|
||||
].join("\n"),
|
||||
};
|
||||
}
|
||||
return {
|
||||
text: [
|
||||
`Unknown provider: ${parsed.provider}`,
|
||||
|
||||
@@ -110,6 +110,59 @@ describe("models-add", () => {
|
||||
queryOllamaModelShowInfo: ollamaMocks.queryOllamaModelShowInfo,
|
||||
};
|
||||
}
|
||||
if (
|
||||
params &&
|
||||
typeof params === "object" &&
|
||||
"dirName" in params &&
|
||||
params.dirName === "openai" &&
|
||||
"artifactBasename" in params &&
|
||||
params.artifactBasename === "api.js"
|
||||
) {
|
||||
return {
|
||||
buildOpenAICodexProvider: () => ({
|
||||
baseUrl: "https://chatgpt.com/backend-api",
|
||||
api: "openai-codex-responses",
|
||||
models: [],
|
||||
}),
|
||||
buildOpenAICodexProviderPlugin: () => ({
|
||||
resolveDynamicModel: ({ modelId }: { modelId: string }) => {
|
||||
const common = {
|
||||
id: modelId,
|
||||
name: modelId,
|
||||
api: "openai-codex-responses",
|
||||
provider: "openai-codex",
|
||||
baseUrl: "https://chatgpt.com/backend-api/codex",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
contextTokens: 272_000,
|
||||
maxTokens: 128_000,
|
||||
} as const;
|
||||
switch (modelId) {
|
||||
case "gpt-5.4":
|
||||
return {
|
||||
...common,
|
||||
contextWindow: 1_050_000,
|
||||
cost: { input: 2.5, output: 15, cacheRead: 0.25, cacheWrite: 0 },
|
||||
};
|
||||
case "gpt-5.5":
|
||||
return {
|
||||
...common,
|
||||
contextWindow: 1_000_000,
|
||||
cost: { input: 5, output: 30, cacheRead: 0, cacheWrite: 0 },
|
||||
};
|
||||
case "gpt-5.5-pro":
|
||||
return {
|
||||
...common,
|
||||
contextWindow: 1_000_000,
|
||||
cost: { input: 30, output: 180, cacheRead: 0, cacheWrite: 0 },
|
||||
};
|
||||
default:
|
||||
return undefined;
|
||||
}
|
||||
},
|
||||
}),
|
||||
};
|
||||
}
|
||||
throw new Error(`Unexpected facade load: ${JSON.stringify(params)}`);
|
||||
});
|
||||
ollamaMocks.buildOllamaModelDefinition.mockClear();
|
||||
@@ -132,9 +185,9 @@ describe("models-add", () => {
|
||||
expect(
|
||||
listAddableProviders({
|
||||
cfg,
|
||||
discoveredProviders: ["openai", "ollama"],
|
||||
discoveredProviders: ["openai", "openai-codex", "ollama"],
|
||||
}),
|
||||
).toEqual(["lmstudio", "ollama"]);
|
||||
).toEqual(["lmstudio", "ollama", "openai-codex"]);
|
||||
});
|
||||
|
||||
it("validates add providers against addable providers", () => {
|
||||
@@ -149,6 +202,27 @@ describe("models-add", () => {
|
||||
});
|
||||
});
|
||||
|
||||
it("only bootstraps openai-codex when the provider is discovered", () => {
|
||||
const cfg = {} as OpenClawConfig;
|
||||
|
||||
expect(validateAddProvider({ cfg, provider: "openai-codex", discoveredProviders: [] })).toEqual(
|
||||
{
|
||||
ok: false,
|
||||
providers: ["lmstudio", "ollama"],
|
||||
},
|
||||
);
|
||||
expect(
|
||||
validateAddProvider({
|
||||
cfg,
|
||||
provider: "openai-codex",
|
||||
discoveredProviders: ["openai-codex"],
|
||||
}),
|
||||
).toEqual({
|
||||
ok: true,
|
||||
provider: "openai-codex",
|
||||
});
|
||||
});
|
||||
|
||||
it("rejects discovered providers that are not configured for custom models", () => {
|
||||
const cfg = {} as OpenClawConfig;
|
||||
|
||||
@@ -161,6 +235,7 @@ describe("models-add", () => {
|
||||
).toEqual({
|
||||
ok: false,
|
||||
providers: ["lmstudio", "ollama"],
|
||||
knownProvider: "openai",
|
||||
});
|
||||
});
|
||||
|
||||
@@ -373,6 +448,89 @@ describe("models-add", () => {
|
||||
]);
|
||||
});
|
||||
|
||||
it.each([
|
||||
[
|
||||
"gpt-5.4",
|
||||
{
|
||||
contextWindow: 1_050_000,
|
||||
cost: { input: 2.5, output: 15, cacheRead: 0.25, cacheWrite: 0 },
|
||||
},
|
||||
],
|
||||
[
|
||||
"gpt-5.5",
|
||||
{
|
||||
contextWindow: 1_000_000,
|
||||
cost: { input: 5, output: 30, cacheRead: 0, cacheWrite: 0 },
|
||||
},
|
||||
],
|
||||
[
|
||||
"gpt-5.5-pro",
|
||||
{
|
||||
contextWindow: 1_000_000,
|
||||
cost: { input: 30, output: 180, cacheRead: 0, cacheWrite: 0 },
|
||||
},
|
||||
],
|
||||
])(
|
||||
"bootstraps openai-codex metadata for %s from the provider plugin",
|
||||
async (modelId, expected) => {
|
||||
const cfg = {
|
||||
agents: {
|
||||
defaults: {
|
||||
model: { primary: "openai-codex/gpt-5.4" },
|
||||
models: {
|
||||
"openai-codex/gpt-5.3": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
models: { providers: {} },
|
||||
} as OpenClawConfig;
|
||||
configMocks.readConfigFileSnapshot.mockResolvedValue({
|
||||
valid: true,
|
||||
parsed: cfg,
|
||||
});
|
||||
configMocks.validateConfigObjectWithPlugins.mockImplementation((config: OpenClawConfig) => ({
|
||||
ok: true,
|
||||
config,
|
||||
}));
|
||||
|
||||
const result = await addModelToConfig({
|
||||
cfg,
|
||||
provider: "openai-codex",
|
||||
modelId,
|
||||
});
|
||||
|
||||
expect(result.ok).toBe(true);
|
||||
if (!result.ok) {
|
||||
return;
|
||||
}
|
||||
expect(result.result.allowlistAdded).toBe(true);
|
||||
expect(result.result.warnings).toEqual([
|
||||
"OpenAI Codex model metadata was saved from provider defaults; provider availability still depends on your Codex account.",
|
||||
]);
|
||||
const written = configMocks.replaceConfigFile.mock.calls[0]?.[0]
|
||||
?.nextConfig as OpenClawConfig;
|
||||
expect(written.models?.providers?.["openai-codex"]).toMatchObject({
|
||||
baseUrl: "https://chatgpt.com/backend-api",
|
||||
api: "openai-codex-responses",
|
||||
models: [
|
||||
expect.objectContaining({
|
||||
id: modelId,
|
||||
api: "openai-codex-responses",
|
||||
baseUrl: "https://chatgpt.com/backend-api/codex",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
contextWindow: expected.contextWindow,
|
||||
contextTokens: 272_000,
|
||||
maxTokens: 128_000,
|
||||
cost: expected.cost,
|
||||
metadataSource: "models-add",
|
||||
}),
|
||||
],
|
||||
});
|
||||
expect(written.agents?.defaults?.models?.[`openai-codex/${modelId}`]).toEqual({});
|
||||
},
|
||||
);
|
||||
|
||||
it("returns a generic validation error when config validation fails without issue details", async () => {
|
||||
const cfg = {
|
||||
models: {
|
||||
|
||||
@@ -33,6 +33,7 @@ import {
|
||||
resolveLmstudioInferenceBase,
|
||||
resolveLmstudioRequestContext,
|
||||
} from "../../plugin-sdk/lmstudio-runtime.js";
|
||||
import type { ProviderRuntimeModel } from "../../plugins/provider-runtime-model.types.js";
|
||||
import { isLoopbackIpAddress } from "../../shared/net/ip.js";
|
||||
import {
|
||||
normalizeLowercaseStringOrEmpty,
|
||||
@@ -41,6 +42,7 @@ import {
|
||||
|
||||
export type ModelAddAdapter = {
|
||||
providerId: string;
|
||||
bootstrapMode?: "always" | "discovered";
|
||||
bootstrapProviderConfig?: (cfg: OpenClawConfig) => ModelProviderConfig | null;
|
||||
detect?: (params: {
|
||||
cfg: OpenClawConfig;
|
||||
@@ -61,6 +63,10 @@ type AddModelOutcome = {
|
||||
warnings: string[];
|
||||
};
|
||||
|
||||
export type ValidateAddProviderResult =
|
||||
| { ok: true; provider: string }
|
||||
| { ok: false; providers: string[]; knownProvider?: string };
|
||||
|
||||
type OllamaModelShowInfo = {
|
||||
contextWindow?: number;
|
||||
capabilities?: string[];
|
||||
@@ -75,6 +81,17 @@ type OllamaApiFacade = {
|
||||
queryOllamaModelShowInfo: (apiBase: string, modelName: string) => Promise<OllamaModelShowInfo>;
|
||||
};
|
||||
|
||||
type OpenAIApiFacade = {
|
||||
buildOpenAICodexProvider: () => ModelProviderConfig;
|
||||
buildOpenAICodexProviderPlugin: () => {
|
||||
resolveDynamicModel?: (ctx: {
|
||||
provider: string;
|
||||
modelId: string;
|
||||
modelRegistry: { find: () => null };
|
||||
}) => ProviderRuntimeModel | null | undefined;
|
||||
};
|
||||
};
|
||||
|
||||
const log = createSubsystemLogger("models-add");
|
||||
const OLLAMA_DEFAULT_BASE_URL = "http://127.0.0.1:11434";
|
||||
|
||||
@@ -85,12 +102,25 @@ function loadOllamaApiFacade(): OllamaApiFacade {
|
||||
});
|
||||
}
|
||||
|
||||
function loadOpenAIApiFacade(): OpenAIApiFacade {
|
||||
return loadBundledPluginPublicSurfaceModuleSync<OpenAIApiFacade>({
|
||||
dirName: "openai",
|
||||
artifactBasename: "api.js",
|
||||
});
|
||||
}
|
||||
|
||||
const buildOllamaModelDefinition: OllamaApiFacade["buildOllamaModelDefinition"] =
|
||||
createLazyFacadeValue(loadOllamaApiFacade, "buildOllamaModelDefinition");
|
||||
const queryOllamaModelShowInfo: OllamaApiFacade["queryOllamaModelShowInfo"] = createLazyFacadeValue(
|
||||
loadOllamaApiFacade,
|
||||
"queryOllamaModelShowInfo",
|
||||
);
|
||||
const buildOpenAICodexProvider: OpenAIApiFacade["buildOpenAICodexProvider"] = createLazyFacadeValue(
|
||||
loadOpenAIApiFacade,
|
||||
"buildOpenAICodexProvider",
|
||||
);
|
||||
const buildOpenAICodexProviderPlugin: OpenAIApiFacade["buildOpenAICodexProviderPlugin"] =
|
||||
createLazyFacadeValue(loadOpenAIApiFacade, "buildOpenAICodexProviderPlugin");
|
||||
|
||||
function sanitizeUrlForLogs(raw: string | undefined): string | undefined {
|
||||
const trimmed = normalizeOptionalString(raw);
|
||||
@@ -121,6 +151,42 @@ function buildDefaultModelDefinition(modelId: string): ModelDefinitionConfig {
|
||||
};
|
||||
}
|
||||
|
||||
function buildOpenAICodexModelDefinition(modelId: string): ModelDefinitionConfig {
|
||||
const dynamicModel = buildOpenAICodexProviderPlugin().resolveDynamicModel?.({
|
||||
provider: "openai-codex",
|
||||
modelId,
|
||||
modelRegistry: { find: () => null },
|
||||
});
|
||||
if (dynamicModel) {
|
||||
return {
|
||||
id: dynamicModel.id,
|
||||
name: dynamicModel.name,
|
||||
api: "openai-codex-responses",
|
||||
baseUrl: dynamicModel.baseUrl,
|
||||
reasoning: dynamicModel.reasoning,
|
||||
input: [...dynamicModel.input],
|
||||
cost: dynamicModel.cost,
|
||||
contextWindow: dynamicModel.contextWindow,
|
||||
...(dynamicModel.contextTokens ? { contextTokens: dynamicModel.contextTokens } : {}),
|
||||
maxTokens: dynamicModel.maxTokens,
|
||||
...(dynamicModel.headers ? { headers: dynamicModel.headers } : {}),
|
||||
...(dynamicModel.compat ? { compat: dynamicModel.compat } : {}),
|
||||
metadataSource: "models-add",
|
||||
};
|
||||
}
|
||||
return {
|
||||
id: modelId,
|
||||
name: modelId,
|
||||
api: "openai-codex-responses",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: SELF_HOSTED_DEFAULT_CONTEXT_WINDOW,
|
||||
maxTokens: SELF_HOSTED_DEFAULT_MAX_TOKENS,
|
||||
metadataSource: "models-add",
|
||||
};
|
||||
}
|
||||
|
||||
function resolveConfiguredProvider(
|
||||
cfg: OpenClawConfig,
|
||||
providerId: string,
|
||||
@@ -176,6 +242,21 @@ function isLocalLmstudioBaseUrl(baseUrl: string | undefined): boolean {
|
||||
}
|
||||
|
||||
const MODEL_ADD_ADAPTERS: Record<string, ModelAddAdapter> = {
|
||||
"openai-codex": {
|
||||
providerId: "openai-codex",
|
||||
bootstrapMode: "discovered",
|
||||
bootstrapProviderConfig: () => ({
|
||||
...buildOpenAICodexProvider(),
|
||||
models: [],
|
||||
}),
|
||||
detect: async ({ modelId }) => ({
|
||||
found: true,
|
||||
model: buildOpenAICodexModelDefinition(modelId),
|
||||
warnings: [
|
||||
"OpenAI Codex model metadata was saved from provider defaults; provider availability still depends on your Codex account.",
|
||||
],
|
||||
}),
|
||||
},
|
||||
ollama: {
|
||||
providerId: "ollama",
|
||||
bootstrapProviderConfig: () => ({
|
||||
@@ -260,7 +341,11 @@ const MODEL_ADD_ADAPTERS: Record<string, ModelAddAdapter> = {
|
||||
},
|
||||
};
|
||||
|
||||
function canAddProvider(params: { cfg: OpenClawConfig; provider: string }): boolean {
|
||||
function canAddProvider(params: {
|
||||
cfg: OpenClawConfig;
|
||||
provider: string;
|
||||
allowDiscoveredBootstrap?: boolean;
|
||||
}): boolean {
|
||||
const provider = normalizeProviderId(params.provider);
|
||||
if (!provider) {
|
||||
return false;
|
||||
@@ -268,7 +353,14 @@ function canAddProvider(params: { cfg: OpenClawConfig; provider: string }): bool
|
||||
if (resolveConfiguredProvider(params.cfg, provider)) {
|
||||
return true;
|
||||
}
|
||||
return !!MODEL_ADD_ADAPTERS[provider]?.bootstrapProviderConfig?.(params.cfg);
|
||||
const adapter = MODEL_ADD_ADAPTERS[provider];
|
||||
if (!adapter?.bootstrapProviderConfig) {
|
||||
return false;
|
||||
}
|
||||
if (adapter.bootstrapMode === "discovered" && !params.allowDiscoveredBootstrap) {
|
||||
return false;
|
||||
}
|
||||
return !!adapter.bootstrapProviderConfig(params.cfg);
|
||||
}
|
||||
|
||||
export function listAddableProviders(params: {
|
||||
@@ -278,7 +370,14 @@ export function listAddableProviders(params: {
|
||||
const providers = new Set<string>();
|
||||
for (const provider of params.discoveredProviders ?? []) {
|
||||
const normalized = normalizeProviderId(provider);
|
||||
if (normalized && canAddProvider({ cfg: params.cfg, provider: normalized })) {
|
||||
if (
|
||||
normalized &&
|
||||
canAddProvider({
|
||||
cfg: params.cfg,
|
||||
provider: normalized,
|
||||
allowDiscoveredBootstrap: true,
|
||||
})
|
||||
) {
|
||||
providers.add(normalized);
|
||||
}
|
||||
}
|
||||
@@ -288,8 +387,10 @@ export function listAddableProviders(params: {
|
||||
providers.add(normalized);
|
||||
}
|
||||
}
|
||||
for (const provider of Object.keys(MODEL_ADD_ADAPTERS)) {
|
||||
providers.add(provider);
|
||||
for (const [provider, adapter] of Object.entries(MODEL_ADD_ADAPTERS)) {
|
||||
if (adapter.bootstrapMode !== "discovered") {
|
||||
providers.add(provider);
|
||||
}
|
||||
}
|
||||
return [...providers].toSorted();
|
||||
}
|
||||
@@ -298,14 +399,17 @@ export function validateAddProvider(params: {
|
||||
cfg: OpenClawConfig;
|
||||
provider: string;
|
||||
discoveredProviders?: readonly string[];
|
||||
}): { ok: true; provider: string } | { ok: false; providers: string[] } {
|
||||
}): ValidateAddProviderResult {
|
||||
const provider = normalizeProviderId(params.provider);
|
||||
const providers = listAddableProviders({
|
||||
cfg: params.cfg,
|
||||
discoveredProviders: params.discoveredProviders,
|
||||
});
|
||||
if (!provider || !providers.includes(provider)) {
|
||||
return { ok: false, providers };
|
||||
const knownProvider = (params.discoveredProviders ?? [])
|
||||
.map((discoveredProvider) => normalizeProviderId(discoveredProvider))
|
||||
.find((discoveredProvider) => discoveredProvider === provider);
|
||||
return { ok: false, providers, ...(knownProvider ? { knownProvider } : {}) };
|
||||
}
|
||||
return { ok: true, provider };
|
||||
}
|
||||
|
||||
@@ -219,6 +219,91 @@ describe("normalizeCompatibilityConfigValues", () => {
|
||||
]);
|
||||
});
|
||||
|
||||
it("marks legacy untagged /models add OpenAI Codex metadata rows for doctor repair", () => {
|
||||
const res = normalizeCompatibilityConfigValues({
|
||||
models: {
|
||||
providers: {
|
||||
"openai-codex": {
|
||||
baseUrl: "https://chatgpt.com/backend-api",
|
||||
api: "openai-codex-responses",
|
||||
models: [
|
||||
{
|
||||
id: "gpt-5.5",
|
||||
name: "gpt-5.5",
|
||||
api: "openai-codex-responses",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: { input: 5, output: 30, cacheRead: 0.5, cacheWrite: 0 },
|
||||
contextWindow: 400_000,
|
||||
contextTokens: 272_000,
|
||||
maxTokens: 128_000,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
} as unknown as OpenClawConfig);
|
||||
|
||||
expect(res.config.models?.providers?.["openai-codex"]?.models?.[0]).toMatchObject({
|
||||
id: "gpt-5.5",
|
||||
metadataSource: "models-add",
|
||||
});
|
||||
expect(res.changes).toContain(
|
||||
"Marked models.providers.openai-codex.models.gpt-5.5 as /models add metadata so official OpenAI Codex metadata can override it.",
|
||||
);
|
||||
});
|
||||
|
||||
it("does not mark untagged manual OpenAI Codex metadata overrides", () => {
|
||||
const res = normalizeCompatibilityConfigValues({
|
||||
models: {
|
||||
providers: {
|
||||
"openai-codex": {
|
||||
baseUrl: "https://chatgpt.com/backend-api",
|
||||
api: "openai-codex-responses",
|
||||
models: [
|
||||
{
|
||||
id: "gpt-5.5",
|
||||
name: "gpt-5.5",
|
||||
api: "openai-codex-responses",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: { input: 9, output: 99, cacheRead: 0.9, cacheWrite: 0 },
|
||||
contextWindow: 555_555,
|
||||
contextTokens: 111_111,
|
||||
maxTokens: 22_222,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
} as unknown as OpenClawConfig);
|
||||
|
||||
expect(res.config).toEqual({
|
||||
models: {
|
||||
providers: {
|
||||
"openai-codex": {
|
||||
baseUrl: "https://chatgpt.com/backend-api",
|
||||
api: "openai-codex-responses",
|
||||
models: [
|
||||
{
|
||||
id: "gpt-5.5",
|
||||
name: "gpt-5.5",
|
||||
api: "openai-codex-responses",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: { input: 9, output: 99, cacheRead: 0.9, cacheWrite: 0 },
|
||||
contextWindow: 555_555,
|
||||
contextTokens: 111_111,
|
||||
maxTokens: 22_222,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
expect(res.changes).toEqual([]);
|
||||
});
|
||||
|
||||
it("prefers legacy nano-banana env.GEMINI_API_KEY over skill apiKey during migration", () => {
|
||||
const res = normalizeCompatibilityConfigValues({
|
||||
skills: {
|
||||
|
||||
@@ -2,6 +2,7 @@ import type { OpenClawConfig } from "../../../config/types.openclaw.js";
|
||||
import { runPluginSetupConfigMigrations } from "../../../plugins/setup-registry.js";
|
||||
import { applyChannelDoctorCompatibilityMigrations } from "./channel-legacy-config-migrate.js";
|
||||
import { normalizeBaseCompatibilityConfigValues } from "./legacy-config-compatibility-base.js";
|
||||
import { normalizeLegacyOpenAICodexModelsAddMetadata } from "./legacy-config-core-normalizers.js";
|
||||
|
||||
export function normalizeCompatibilityConfigValues(cfg: OpenClawConfig): {
|
||||
config: OpenClawConfig;
|
||||
@@ -23,6 +24,7 @@ export function normalizeCompatibilityConfigValues(cfg: OpenClawConfig): {
|
||||
next = channelMigrations.next;
|
||||
changes.push(...channelMigrations.changes);
|
||||
}
|
||||
next = normalizeLegacyOpenAICodexModelsAddMetadata(next, changes);
|
||||
|
||||
return { config: next, changes };
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import { isLegacyModelsAddCodexMetadataModel } from "../../../agents/openai-codex-models-add-legacy.js";
|
||||
import { normalizeProviderId } from "../../../agents/provider-id.js";
|
||||
import { resolveSingleAccountKeysToMove } from "../../../channels/plugins/setup-promotion-helpers.js";
|
||||
import { resolveNormalizedProviderModelMaxTokens } from "../../../config/defaults.js";
|
||||
@@ -170,6 +171,70 @@ type ModelProviderEntry = Partial<
|
||||
NonNullable<NonNullable<OpenClawConfig["models"]>["providers"]>[string]
|
||||
>;
|
||||
type ModelsConfigPatch = Partial<NonNullable<OpenClawConfig["models"]>>;
|
||||
type ModelDefinitionEntry = NonNullable<ModelProviderEntry["models"]>[number];
|
||||
|
||||
export function normalizeLegacyOpenAICodexModelsAddMetadata(
|
||||
cfg: OpenClawConfig,
|
||||
changes: string[],
|
||||
): OpenClawConfig {
|
||||
const rawModels = cfg.models;
|
||||
if (!isRecord(rawModels) || !isRecord(rawModels.providers)) {
|
||||
return cfg;
|
||||
}
|
||||
|
||||
let providersChanged = false;
|
||||
const nextProviders = { ...rawModels.providers };
|
||||
for (const [providerId, rawProvider] of Object.entries(rawModels.providers)) {
|
||||
if (normalizeProviderId(providerId) !== "openai-codex" || !isRecord(rawProvider)) {
|
||||
continue;
|
||||
}
|
||||
const rawProviderModels = rawProvider.models;
|
||||
if (!Array.isArray(rawProviderModels)) {
|
||||
continue;
|
||||
}
|
||||
let providerChanged = false;
|
||||
const nextModels: typeof rawProviderModels = [];
|
||||
for (const model of rawProviderModels) {
|
||||
if (
|
||||
isRecord(model) &&
|
||||
!("metadataSource" in model) &&
|
||||
isLegacyModelsAddCodexMetadataModel({
|
||||
provider: providerId,
|
||||
model: model as Partial<ModelDefinitionEntry>,
|
||||
})
|
||||
) {
|
||||
providerChanged = true;
|
||||
changes.push(
|
||||
`Marked models.providers.${providerId}.models.${model.id} as /models add metadata so official OpenAI Codex metadata can override it.`,
|
||||
);
|
||||
nextModels.push(Object.assign({}, model, { metadataSource: "models-add" }));
|
||||
} else {
|
||||
nextModels.push(model);
|
||||
}
|
||||
}
|
||||
|
||||
if (!providerChanged) {
|
||||
continue;
|
||||
}
|
||||
nextProviders[providerId] = {
|
||||
...rawProvider,
|
||||
models: nextModels,
|
||||
} as (typeof nextProviders)[string];
|
||||
providersChanged = true;
|
||||
}
|
||||
|
||||
if (!providersChanged) {
|
||||
return cfg;
|
||||
}
|
||||
|
||||
return {
|
||||
...cfg,
|
||||
models: {
|
||||
...rawModels,
|
||||
providers: nextProviders as NonNullable<OpenClawConfig["models"]>["providers"],
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
export function normalizeLegacyNanoBananaSkill(
|
||||
cfg: OpenClawConfig,
|
||||
|
||||
@@ -2948,6 +2948,10 @@ export const GENERATED_BASE_CONFIG_SCHEMA: BaseConfigSchemaResponse = {
|
||||
},
|
||||
additionalProperties: false,
|
||||
},
|
||||
metadataSource: {
|
||||
type: "string",
|
||||
const: "models-add",
|
||||
},
|
||||
},
|
||||
required: ["id", "name"],
|
||||
additionalProperties: false,
|
||||
|
||||
@@ -85,6 +85,7 @@ export type ModelDefinitionConfig = {
|
||||
maxTokens: number;
|
||||
headers?: Record<string, string>;
|
||||
compat?: ModelCompatConfig;
|
||||
metadataSource?: "models-add";
|
||||
};
|
||||
|
||||
export type ModelProviderConfig = {
|
||||
|
||||
@@ -339,6 +339,7 @@ export const ModelDefinitionSchema = z
|
||||
maxTokens: z.number().positive().optional(),
|
||||
headers: z.record(z.string(), z.string()).optional(),
|
||||
compat: ModelCompatSchema,
|
||||
metadataSource: z.literal("models-add").optional(),
|
||||
})
|
||||
.strict();
|
||||
|
||||
|
||||
Reference in New Issue
Block a user