fix: deprecate models add command (#71175)

This commit is contained in:
Tak Hoffman
2026-04-24 12:20:59 -05:00
committed by GitHub
parent e35e6e1d15
commit 59e2825274
28 changed files with 192 additions and 1875 deletions

View File

@@ -848,7 +848,7 @@ export function buildBuiltinChatCommands(): ChatCommandDefinition[] {
defineChatCommand({
key: "models",
nativeName: "models",
description: "List model providers/models or add a model.",
description: "List model providers/models.",
textAlias: "/models",
tier: "standard",
argsParsing: "none",

View File

@@ -17,26 +17,8 @@ const modelAuthLabelMocks = vi.hoisted(() => ({
resolveModelAuthLabel: vi.fn<(params: unknown) => string | undefined>(() => undefined),
}));
const modelsAddMocks = vi.hoisted(() => ({
addModelToConfig: vi.fn(),
listAddableProviders: vi.fn<(params: unknown) => string[]>(),
validateAddProvider:
vi.fn<
(
params: unknown,
) =>
| { ok: true; provider: string }
| { ok: false; providers: string[]; knownProvider?: string }
>(),
}));
const configWriteAuthMocks = vi.hoisted(() => ({
resolveConfigWriteDeniedText: vi.fn<(params: { target: string }) => string | null>(() => null),
}));
const configWriteTargetMocks = vi.hoisted(() => ({
resolveConfigWriteTargetFromPath: vi.fn((path: string[]) => path.join(".")),
}));
const MODELS_ADD_DEPRECATED_TEXT =
"⚠️ /models add is deprecated. Use /models to browse providers and /model to switch models.";
vi.mock("../../agents/model-catalog.js", () => ({
loadModelCatalog: modelCatalogMocks.loadModelCatalog,
@@ -46,24 +28,6 @@ vi.mock("../../agents/model-auth-label.js", () => ({
resolveModelAuthLabel: modelAuthLabelMocks.resolveModelAuthLabel,
}));
vi.mock("../../channels/plugins/config-writes.js", () => ({
resolveConfigWriteTargetFromPath: configWriteTargetMocks.resolveConfigWriteTargetFromPath,
}));
vi.mock("./config-write-authorization.js", () => ({
resolveConfigWriteDeniedText: configWriteAuthMocks.resolveConfigWriteDeniedText,
}));
vi.mock("./models-add.js", async () => {
const actual = await vi.importActual<typeof import("./models-add.js")>("./models-add.js");
return {
...actual,
addModelToConfig: modelsAddMocks.addModelToConfig,
listAddableProviders: modelsAddMocks.listAddableProviders,
validateAddProvider: modelsAddMocks.validateAddProvider,
};
});
const telegramModelsTestPlugin: ChannelPlugin = {
...createChannelTestPluginBase({
id: "telegram",
@@ -80,19 +44,6 @@ const telegramModelsTestPlugin: ChannelPlugin = {
},
}),
commands: {
buildModelsMenuChannelData: ({ providers }) => ({
telegram: {
buttons: [
[{ text: "Add model", callback_data: "/models add" }],
...providers.map((provider) => [
{
text: provider.id,
callback_data: `models:${provider.id}`,
},
]),
],
},
}),
buildModelsProviderChannelData: ({ providers }) => ({
telegram: {
buttons: providers.map((provider) => [
@@ -103,14 +54,23 @@ const telegramModelsTestPlugin: ChannelPlugin = {
]),
},
}),
buildModelsAddProviderChannelData: ({ providers }) => ({
telegram: {
buttons: providers.map((provider) => [
{
text: provider.id,
callback_data: `/models add ${provider.id}`,
},
]),
},
};
const menuOnlyModelsTestPlugin: ChannelPlugin = {
...createChannelTestPluginBase({
id: "menuonly",
label: "Menu Only",
capabilities: {
chatTypes: ["direct"],
nativeCommands: true,
},
}),
commands: {
buildModelsMenuChannelData: ({ providers }) => ({
menuonly: {
providerIds: providers.map((provider) => provider.id),
labels: providers.map((provider) => `${provider.id}:${provider.count}`),
},
}),
},
@@ -133,32 +93,6 @@ beforeEach(() => {
]);
modelAuthLabelMocks.resolveModelAuthLabel.mockReset();
modelAuthLabelMocks.resolveModelAuthLabel.mockReturnValue(undefined);
modelsAddMocks.addModelToConfig.mockReset();
modelsAddMocks.addModelToConfig.mockResolvedValue({
ok: true,
result: {
provider: "ollama",
modelId: "glm-5.1:cloud",
existed: false,
allowlistAdded: false,
warnings: [],
},
});
modelsAddMocks.listAddableProviders.mockReset();
modelsAddMocks.listAddableProviders.mockReturnValue([
"anthropic",
"lmstudio",
"ollama",
"openai",
]);
modelsAddMocks.validateAddProvider.mockReset();
modelsAddMocks.validateAddProvider.mockImplementation((params: unknown) => ({
ok: true,
provider: (params as { provider: string }).provider,
}));
configWriteAuthMocks.resolveConfigWriteDeniedText.mockReset();
configWriteAuthMocks.resolveConfigWriteDeniedText.mockReturnValue(null);
configWriteTargetMocks.resolveConfigWriteTargetFromPath.mockClear();
setActivePluginRegistry(
createTestRegistry([
...textSurfaceModelsTestPlugins,
@@ -167,6 +101,11 @@ beforeEach(() => {
plugin: telegramModelsTestPlugin,
source: "test",
},
{
pluginId: "menuonly",
plugin: menuOnlyModelsTestPlugin,
source: "test",
},
]),
);
});
@@ -228,10 +167,10 @@ describe("handleModelsCommand", () => {
expect(result?.reply?.text).toContain("- openai (2)");
expect(result?.reply?.text).toContain("Use: /models <provider>");
expect(result?.reply?.text).toContain("Switch: /model <provider/model>");
expect(result?.reply?.text).toContain("Add: /models add");
expect(result?.reply?.text).not.toContain("Add: /models add");
});
it("shows the add-model action in the telegram provider picker by default", async () => {
it("keeps the telegram provider picker browse-only", async () => {
const params = buildParams("/models");
params.ctx.Surface = "telegram";
params.command.channel = "telegram";
@@ -243,7 +182,6 @@ describe("handleModelsCommand", () => {
expect(result?.reply?.channelData).toEqual({
telegram: {
buttons: [
[{ text: "Add model", callback_data: "/models add" }],
[{ text: "anthropic", callback_data: "models:anthropic" }],
[{ text: "google", callback_data: "models:google" }],
[{ text: "openai", callback_data: "models:openai" }],
@@ -252,27 +190,19 @@ describe("handleModelsCommand", () => {
});
});
it("keeps the telegram provider picker browse-only when modelsWrite is disabled", async () => {
const params = buildParams("/models", {
commands: {
text: true,
modelsWrite: false,
},
});
params.ctx.Surface = "telegram";
params.command.channel = "telegram";
params.command.surface = "telegram";
it("keeps plugin menu hook compatibility for provider pickers", async () => {
const params = buildParams("/models");
params.ctx.Surface = "menuonly";
params.command.channel = "menuonly";
params.command.surface = "menuonly";
const result = await handleModelsCommand(params, true);
expect(result?.reply?.text).toBe("Select a provider:");
expect(result?.reply?.channelData).toEqual({
telegram: {
buttons: [
[{ text: "anthropic", callback_data: "models:anthropic" }],
[{ text: "google", callback_data: "models:google" }],
[{ text: "openai", callback_data: "models:openai" }],
],
menuonly: {
providerIds: ["anthropic", "google", "openai"],
labels: ["anthropic:2", "google:1", "openai:2"],
},
});
});
@@ -352,110 +282,30 @@ describe("handleModelsCommand", () => {
expect(result?.reply?.text).toContain("Models (anthropic · 🔑 target-auth) — showing 1-2 of 2");
});
it("guides /models add when no provider is given", async () => {
it("returns a deprecation message for /models add when no provider is given", async () => {
const result = await handleModelsCommand(buildParams("/models add"), true);
expect(result?.reply?.text).toContain(
"Add a model: choose a provider, then send one of these example commands.",
);
expect(result?.reply?.text).toContain(
"These examples use models that already exist for those providers.",
);
expect(result?.reply?.text).toContain("```text");
expect(result?.reply?.text).toContain("/models add ollama glm-5.1:cloud");
expect(result?.reply?.text).toContain("/models add lmstudio qwen/qwen3.5-9b");
expect(result?.reply?.text).toContain("/models add <provider> <modelId>");
expect(result?.reply?.text).toContain("Generic form:");
expect(result?.reply?.text).toContain("/models add <provider> <modelId>");
expect(result?.reply?.text).toContain("- anthropic");
expect(result?.reply?.text).toContain("- lmstudio");
expect(result?.reply?.text).toContain("- ollama");
expect(result?.reply?.text).toContain("- openai");
expect(result).toEqual({
shouldContinue: false,
reply: { text: MODELS_ADD_DEPRECATED_TEXT },
});
});
it("guides /models add <provider> when the model id is missing", async () => {
it("returns a deprecation message for /models add <provider>", async () => {
const result = await handleModelsCommand(buildParams("/models add ollama"), true);
expect(result?.reply?.text).toContain("Add a model to ollama:");
expect(result?.reply?.text).toContain("```text\n/models add ollama <modelId>\n```");
expect(result?.reply?.text).toContain("```text\n/models ollama\n```");
expect(result).toEqual({
shouldContinue: false,
reply: { text: MODELS_ADD_DEPRECATED_TEXT },
});
});
it("explains when a selectable provider does not support /models add", async () => {
modelsAddMocks.validateAddProvider.mockReturnValueOnce({
ok: false,
providers: ["lmstudio", "ollama"],
knownProvider: "openai",
});
it("returns a deprecation message for /models add <provider> <modelId>", async () => {
const result = await handleModelsCommand(buildParams("/models add openai gpt-5.5"), true);
expect(result?.reply?.text).toContain(
"openai is available for model selection, but /models add cannot create models for this provider from chat.",
);
expect(result?.reply?.text).toContain("/models openai");
expect(result?.reply?.text).toContain("/model openai/<modelId>");
expect(result?.reply?.text).toContain("openclaw configure");
expect(result?.reply?.text).not.toContain("Unknown provider");
});
it("adds a model and points users back to browse or switch", async () => {
const result = await handleModelsCommand(buildParams("/models add ollama glm-5.1:cloud"), true);
expect(modelsAddMocks.addModelToConfig).toHaveBeenCalledWith(
expect.objectContaining({
provider: "ollama",
modelId: "glm-5.1:cloud",
}),
);
expect(result?.reply?.text).toContain("✅ Added model: ollama/glm-5.1:cloud.");
expect(result?.reply?.text).toContain("Browse:");
expect(result?.reply?.text).toContain("/models ollama");
expect(result?.reply?.text).toContain("Switch now:");
expect(result?.reply?.text).toContain("/model ollama/glm-5.1:cloud");
expect(result?.reply?.text).not.toContain("/models repair");
expect(result?.reply?.text).not.toContain("/models ollama/glm-5.1:cloud");
});
it("checks all config-write targets touched by /models add", async () => {
const result = await handleModelsCommand(buildParams("/models add ollama glm-5.1:cloud"), true);
expect(result?.shouldContinue).toBe(false);
expect(configWriteTargetMocks.resolveConfigWriteTargetFromPath).toHaveBeenCalledTimes(3);
expect(configWriteTargetMocks.resolveConfigWriteTargetFromPath.mock.calls).toEqual([
[["models", "providers", "ollama"]],
[["models", "providers", "ollama", "models"]],
[["agents", "defaults", "models"]],
]);
});
it("returns config-write denial text for add-time provider bootstrap", async () => {
configWriteAuthMocks.resolveConfigWriteDeniedText.mockReturnValueOnce("denied");
const result = await handleModelsCommand(buildParams("/models add ollama glm-5.1:cloud"), true);
expect(result).toEqual({
shouldContinue: false,
reply: { text: "denied" },
reply: { text: MODELS_ADD_DEPRECATED_TEXT },
});
expect(modelsAddMocks.addModelToConfig).not.toHaveBeenCalled();
});
it("rejects /models add when modelsWrite is disabled", async () => {
const result = await handleModelsCommand(
buildParams("/models add ollama glm-5.1:cloud", {
commands: { text: true, modelsWrite: false },
}),
true,
);
expect(result).toEqual({
shouldContinue: false,
reply: {
text: "⚠️ /models add is disabled. Set commands.modelsWrite=true to enable model registration.",
},
});
expect(modelsAddMocks.addModelToConfig).not.toHaveBeenCalled();
expect(configWriteTargetMocks.resolveConfigWriteTargetFromPath).not.toHaveBeenCalled();
});
});

View File

@@ -9,10 +9,7 @@ import {
resolveDefaultModelForAgent,
resolveModelRefFromString,
} from "../../agents/model-selection.js";
import { resolveConfigWriteTargetFromPath } from "../../channels/plugins/config-writes.js";
import { getChannelPlugin } from "../../channels/plugins/index.js";
import { normalizeChannelId } from "../../channels/registry.js";
import { isModelsWriteEnabled } from "../../config/commands.flags.js";
import type { SessionEntry } from "../../config/sessions.js";
import type { OpenClawConfig } from "../../config/types.openclaw.js";
import {
@@ -20,18 +17,13 @@ import {
normalizeOptionalString,
} from "../../shared/string-coerce.js";
import type { ReplyPayload } from "../types.js";
import { resolveChannelAccountId } from "./channel-context.js";
import {
rejectNonOwnerCommand,
rejectUnauthorizedCommand,
requireGatewayClientScopeForInternalChannel,
} from "./command-gates.js";
import { rejectUnauthorizedCommand } from "./command-gates.js";
import type { CommandHandler } from "./commands-types.js";
import { resolveConfigWriteDeniedText } from "./config-write-authorization.js";
import { addModelToConfig, listAddableProviders, validateAddProvider } from "./models-add.js";
const PAGE_SIZE_DEFAULT = 20;
const PAGE_SIZE_MAX = 100;
const MODELS_ADD_DEPRECATED_TEXT =
"⚠️ /models add is deprecated. Use /models to browse providers and /model to switch models.";
type ModelsCommandSessionEntry = Partial<
Pick<SessionEntry, "authProfileOverride" | "modelProvider" | "model">
@@ -271,7 +263,6 @@ export function formatModelsAvailableHeader(params: {
function buildModelsMenuText(params: {
providers: string[];
byProvider: ReadonlyMap<string, ReadonlySet<string>>;
includeAddAction?: boolean;
}): string {
return [
"Providers:",
@@ -284,44 +275,9 @@ function buildModelsMenuText(params: {
"",
"Use: /models <provider>",
"Switch: /model <provider/model>",
...(params.includeAddAction ? ["Add: /models add"] : []),
].join("\n");
}
function formatCopyableCommand(command: string): string {
return ["```text", command, "```"].join("\n");
}
function buildAddExamples(addableProviders: readonly string[]): string[] {
const examples: string[] = [];
if (addableProviders.includes("ollama")) {
examples.push("/models add ollama glm-5.1:cloud");
}
if (addableProviders.includes("lmstudio")) {
examples.push("/models add lmstudio qwen/qwen3.5-9b");
}
if (addableProviders.includes("codex")) {
examples.push("/models add codex gpt-5.4-mini");
}
if (addableProviders.includes("openai-codex")) {
examples.push("/models add openai-codex gpt-5.4");
}
if (examples.length === 0) {
examples.push("/models add <provider> <modelId>");
}
return examples.slice(0, 3);
}
function resolveWriteProvider(params: {
cfg: OpenClawConfig;
parsed: ParsedModelsCommand;
}): string | undefined {
if (params.parsed.action !== "add") {
return undefined;
}
return params.parsed.provider ? normalizeProviderId(params.parsed.provider) : undefined;
}
function buildProviderInfos(params: {
providers: string[];
byProvider: ReadonlyMap<string, ReadonlySet<string>>;
@@ -355,15 +311,12 @@ export async function resolveModelsCommandReply(params: {
);
const commandPlugin = params.surface ? getChannelPlugin(params.surface) : null;
const providerInfos = buildProviderInfos({ providers, byProvider });
const modelsWriteEnabled = isModelsWriteEnabled(params.cfg);
if (parsed.action === "providers") {
const channelData =
(modelsWriteEnabled
? commandPlugin?.commands?.buildModelsMenuChannelData?.({
providers: providerInfos,
})
: null) ??
commandPlugin?.commands?.buildModelsMenuChannelData?.({
providers: providerInfos,
}) ??
commandPlugin?.commands?.buildModelsProviderChannelData?.({
providers: providerInfos,
});
@@ -374,123 +327,12 @@ export async function resolveModelsCommandReply(params: {
};
}
return {
text: buildModelsMenuText({ providers, byProvider, includeAddAction: modelsWriteEnabled }),
text: buildModelsMenuText({ providers, byProvider }),
};
}
if (parsed.action === "add") {
if (!modelsWriteEnabled) {
return {
text: "⚠️ /models add is disabled. Set commands.modelsWrite=true to enable model registration.",
};
}
const addableProviders = listAddableProviders({
cfg: params.cfg,
discoveredProviders: providers,
});
if (!parsed.provider) {
const channelData = commandPlugin?.commands?.buildModelsAddProviderChannelData?.({
providers: addableProviders.map((id) => ({ id })),
});
return {
text: [
"Add a model: choose a provider, then send one of these example commands.",
"",
"These examples use models that already exist for those providers.",
"",
...buildAddExamples(addableProviders).flatMap((example) => [
formatCopyableCommand(example),
"",
]),
"Generic form:",
formatCopyableCommand("/models add <provider> <modelId>"),
"",
"Providers:",
...addableProviders.map((provider) => `- ${provider}`),
].join("\n"),
...(channelData ? { channelData } : {}),
};
}
const validatedProvider = validateAddProvider({
cfg: params.cfg,
provider: parsed.provider,
discoveredProviders: providers,
});
if (!validatedProvider.ok) {
if (validatedProvider.knownProvider) {
return {
text: [
`${validatedProvider.knownProvider} is available for model selection, but /models add cannot create models for this provider from chat.`,
"",
"Browse:",
`/models ${validatedProvider.knownProvider}`,
"",
"Switch:",
`/model ${validatedProvider.knownProvider}/<modelId>`,
"",
"To configure providers or auth, run:",
"openclaw configure",
].join("\n"),
};
}
return {
text: [
`Unknown provider: ${parsed.provider}`,
"",
"Available providers:",
...validatedProvider.providers.map((provider) => `- ${provider}`),
"",
"Use:",
"/models add <provider> <modelId>",
].join("\n"),
};
}
if (!parsed.modelId) {
return {
text: [
`Add a model to ${validatedProvider.provider}:`,
"",
"Use:",
formatCopyableCommand(`/models add ${validatedProvider.provider} <modelId>`),
"",
"Browse current models:",
formatCopyableCommand(`/models ${validatedProvider.provider}`),
].join("\n"),
};
}
const added = await addModelToConfig({
cfg: params.cfg,
provider: validatedProvider.provider,
modelId: parsed.modelId,
});
if (!added.ok) {
return {
text: `⚠️ ${added.error}`,
};
}
const modelRef = `${added.result.provider}/${added.result.modelId}`;
const warnings =
added.result.warnings.length > 0
? ["", ...added.result.warnings.map((warning) => `- ${warning}`)]
: [];
const allowlistNote = added.result.allowlistAdded ? " and added to the allowlist" : "";
return {
text: [
added.result.existed
? `✅ Model already exists: ${modelRef}${allowlistNote}.`
: `✅ Added model: ${modelRef}${allowlistNote}.`,
"Browse:",
`/models ${added.result.provider}`,
"",
"Switch now:",
`/model ${modelRef}`,
...warnings,
].join("\n"),
};
return { text: MODELS_ADD_DEPRECATED_TEXT };
}
const { provider, page, pageSize, all } = parsed;
@@ -506,7 +348,7 @@ export async function resolveModelsCommandReply(params: {
};
}
return {
text: buildModelsMenuText({ providers, byProvider, includeAddAction: modelsWriteEnabled }),
text: buildModelsMenuText({ providers, byProvider }),
};
}
@@ -623,59 +465,7 @@ export const handleModelsCommand: CommandHandler = async (params, allowTextComma
}
if (parsed.action === "add") {
if (!isModelsWriteEnabled(params.cfg)) {
return {
shouldContinue: false,
reply: {
text: "⚠️ /models add is disabled. Set commands.modelsWrite=true to enable model registration.",
},
};
}
const commandLabel = "/models add";
const nonOwner = rejectNonOwnerCommand(params, commandLabel);
if (nonOwner) {
return nonOwner;
}
const missingAdminScope = requireGatewayClientScopeForInternalChannel(params, {
label: commandLabel,
allowedScopes: ["operator.admin"],
missingText: "❌ /models add requires operator.admin for gateway clients.",
});
if (missingAdminScope) {
return missingAdminScope;
}
const writeProvider = resolveWriteProvider({
cfg: params.cfg,
parsed,
});
if (writeProvider) {
const channelId = params.command.channelId ?? normalizeChannelId(params.command.channel);
const accountId = resolveChannelAccountId({
cfg: params.cfg,
ctx: params.ctx,
command: params.command,
});
for (const path of [
["models", "providers", writeProvider],
["models", "providers", writeProvider, "models"],
["agents", "defaults", "models"],
]) {
const deniedText = resolveConfigWriteDeniedText({
cfg: params.cfg,
channel: params.command.channel,
channelId,
accountId,
gatewayClientScopes: params.ctx.GatewayClientScopes,
target: resolveConfigWriteTargetFromPath(path),
});
if (deniedText) {
return {
shouldContinue: false,
reply: { text: deniedText },
};
}
}
}
return { shouldContinue: false, reply: { text: MODELS_ADD_DEPRECATED_TEXT } };
}
const modelsAgentId = params.sessionKey

View File

@@ -1,696 +0,0 @@
import { beforeEach, describe, expect, it, vi } from "vitest";
import type { OpenClawConfig } from "../../config/types.openclaw.js";
import { addModelToConfig, listAddableProviders, validateAddProvider } from "./models-add.js";
const configMocks = vi.hoisted(() => ({
ConfigMutationConflictError: class ConfigMutationConflictError extends Error {
readonly currentHash: string | null;
constructor(message: string, params: { currentHash: string | null }) {
super(message);
this.name = "ConfigMutationConflictError";
this.currentHash = params.currentHash;
}
},
readConfigFileSnapshot: vi.fn(),
replaceConfigFile: vi.fn(),
validateConfigObjectWithPlugins: vi.fn(),
}));
const facadeRuntimeMocks = vi.hoisted(() => ({
loadBundledPluginPublicSurfaceModuleSync: vi.fn(),
}));
const ollamaMocks = vi.hoisted(() => ({
buildOllamaModelDefinition: vi.fn(
(modelId: string, contextWindow?: number, capabilities?: string[]) => ({
id: modelId,
name: modelId,
reasoning: /think|reason/i.test(modelId),
input: capabilities?.includes("vision") ? ["text", "image"] : ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: contextWindow ?? 32768,
maxTokens: 8192,
}),
),
queryOllamaModelShowInfo: vi.fn(),
}));
const lmstudioRuntimeMocks = vi.hoisted(() => ({
LMSTUDIO_DEFAULT_API_KEY_ENV_VAR: "LMSTUDIO_API_KEY",
LMSTUDIO_DEFAULT_INFERENCE_BASE_URL: "http://127.0.0.1:1234/v1",
fetchLmstudioModels: vi.fn(),
mapLmstudioWireEntry: vi.fn(
(entry: {
key: string;
displayName?: string;
display_name?: string;
max_context_length?: number;
capabilities?: { reasoning?: { allowed_options?: string[] } };
}) => ({
id: entry.key,
displayName: entry.displayName ?? entry.display_name ?? entry.key,
reasoning: (entry.capabilities?.reasoning?.allowed_options?.length ?? 0) > 0,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: entry.max_context_length ?? 32768,
maxTokens: 8192,
}),
),
resolveLmstudioInferenceBase: vi.fn((baseUrl?: string) => baseUrl ?? "http://127.0.0.1:1234/v1"),
resolveLmstudioRequestContext: vi.fn(),
}));
vi.mock("../../config/config.js", () => ({
ConfigMutationConflictError: configMocks.ConfigMutationConflictError,
readConfigFileSnapshot: configMocks.readConfigFileSnapshot,
replaceConfigFile: configMocks.replaceConfigFile,
validateConfigObjectWithPlugins: configMocks.validateConfigObjectWithPlugins,
}));
vi.mock("../../plugin-sdk/facade-runtime.js", async () => {
const actual = await vi.importActual<typeof import("../../plugin-sdk/facade-runtime.js")>(
"../../plugin-sdk/facade-runtime.js",
);
return {
...actual,
loadBundledPluginPublicSurfaceModuleSync:
facadeRuntimeMocks.loadBundledPluginPublicSurfaceModuleSync,
};
});
vi.mock("../../plugin-sdk/lmstudio-runtime.js", () => {
return {
LMSTUDIO_DEFAULT_API_KEY_ENV_VAR: lmstudioRuntimeMocks.LMSTUDIO_DEFAULT_API_KEY_ENV_VAR,
LMSTUDIO_DEFAULT_INFERENCE_BASE_URL: lmstudioRuntimeMocks.LMSTUDIO_DEFAULT_INFERENCE_BASE_URL,
fetchLmstudioModels: lmstudioRuntimeMocks.fetchLmstudioModels,
mapLmstudioWireEntry: lmstudioRuntimeMocks.mapLmstudioWireEntry,
resolveLmstudioInferenceBase: lmstudioRuntimeMocks.resolveLmstudioInferenceBase,
resolveLmstudioRequestContext: lmstudioRuntimeMocks.resolveLmstudioRequestContext,
};
});
describe("models-add", () => {
beforeEach(() => {
configMocks.readConfigFileSnapshot.mockReset();
configMocks.replaceConfigFile.mockReset();
configMocks.validateConfigObjectWithPlugins.mockReset();
facadeRuntimeMocks.loadBundledPluginPublicSurfaceModuleSync.mockReset();
facadeRuntimeMocks.loadBundledPluginPublicSurfaceModuleSync.mockImplementation((params) => {
if (
params &&
typeof params === "object" &&
"dirName" in params &&
params.dirName === "ollama" &&
"artifactBasename" in params &&
params.artifactBasename === "api.js"
) {
return {
buildOllamaModelDefinition: ollamaMocks.buildOllamaModelDefinition,
queryOllamaModelShowInfo: ollamaMocks.queryOllamaModelShowInfo,
};
}
if (
params &&
typeof params === "object" &&
"dirName" in params &&
params.dirName === "openai" &&
"artifactBasename" in params &&
params.artifactBasename === "api.js"
) {
return {
buildOpenAICodexProvider: () => ({
baseUrl: "https://chatgpt.com/backend-api",
api: "openai-codex-responses",
models: [],
}),
buildOpenAICodexProviderPlugin: () => ({
resolveDynamicModel: ({ modelId }: { modelId: string }) => {
const common = {
id: modelId,
name: modelId,
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: "https://chatgpt.com/backend-api/codex",
reasoning: true,
input: ["text", "image"],
contextTokens: 272_000,
maxTokens: 128_000,
} as const;
switch (modelId) {
case "gpt-5.4":
return {
...common,
contextWindow: 1_050_000,
cost: { input: 2.5, output: 15, cacheRead: 0.25, cacheWrite: 0 },
};
case "gpt-5.5":
return {
...common,
contextWindow: 1_000_000,
cost: { input: 5, output: 30, cacheRead: 0, cacheWrite: 0 },
};
case "gpt-5.5-pro":
return {
...common,
contextWindow: 1_000_000,
cost: { input: 30, output: 180, cacheRead: 0, cacheWrite: 0 },
};
default:
return undefined;
}
},
}),
};
}
throw new Error(`Unexpected facade load: ${JSON.stringify(params)}`);
});
ollamaMocks.buildOllamaModelDefinition.mockClear();
ollamaMocks.queryOllamaModelShowInfo.mockReset();
ollamaMocks.queryOllamaModelShowInfo.mockResolvedValue({});
lmstudioRuntimeMocks.fetchLmstudioModels.mockReset();
lmstudioRuntimeMocks.mapLmstudioWireEntry.mockClear();
lmstudioRuntimeMocks.resolveLmstudioInferenceBase.mockClear();
lmstudioRuntimeMocks.resolveLmstudioRequestContext.mockReset();
});
it("lists addable providers only when the write path can actually add them", () => {
const cfg = {
models: {
providers: {
lmstudio: { baseUrl: "http://localhost:1234/v1", api: "openai-completions", models: [] },
},
},
} as OpenClawConfig;
expect(
listAddableProviders({
cfg,
discoveredProviders: ["openai", "openai-codex", "ollama"],
}),
).toEqual(["lmstudio", "ollama", "openai-codex"]);
});
it("validates add providers against addable providers", () => {
const cfg = {} as OpenClawConfig;
expect(validateAddProvider({ cfg, provider: "ollama", discoveredProviders: [] })).toEqual({
ok: true,
provider: "ollama",
});
expect(validateAddProvider({ cfg, provider: "missing", discoveredProviders: [] })).toEqual({
ok: false,
providers: ["lmstudio", "ollama"],
});
});
it("only bootstraps openai-codex when the provider is discovered", () => {
const cfg = {} as OpenClawConfig;
expect(validateAddProvider({ cfg, provider: "openai-codex", discoveredProviders: [] })).toEqual(
{
ok: false,
providers: ["lmstudio", "ollama"],
},
);
expect(
validateAddProvider({
cfg,
provider: "openai-codex",
discoveredProviders: ["openai-codex"],
}),
).toEqual({
ok: true,
provider: "openai-codex",
});
});
it("rejects discovered providers that are not configured for custom models", () => {
const cfg = {} as OpenClawConfig;
expect(
validateAddProvider({
cfg,
provider: "openai",
discoveredProviders: ["openai"],
}),
).toEqual({
ok: false,
providers: ["lmstudio", "ollama"],
knownProvider: "openai",
});
});
it("adds an ollama model and extends the allowlist when needed", async () => {
const cfg = {
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
models: {
"anthropic/claude-opus-4-5": {},
},
},
},
models: {
providers: {
ollama: {
baseUrl: "http://127.0.0.1:11434",
api: "ollama",
models: [],
},
},
},
} as OpenClawConfig;
configMocks.readConfigFileSnapshot.mockResolvedValue({
valid: true,
parsed: cfg,
});
ollamaMocks.queryOllamaModelShowInfo.mockResolvedValue({
contextWindow: 202752,
capabilities: ["thinking", "tools"],
});
configMocks.validateConfigObjectWithPlugins.mockImplementation((config: OpenClawConfig) => ({
ok: true,
config,
}));
const result = await addModelToConfig({
cfg,
provider: "ollama",
modelId: "glm-5.1:cloud",
});
expect(result.ok).toBe(true);
if (!result.ok) {
return;
}
expect(result.result.existed).toBe(false);
expect(result.result.allowlistAdded).toBe(true);
expect(configMocks.replaceConfigFile).toHaveBeenCalledTimes(1);
const written = configMocks.replaceConfigFile.mock.calls[0]?.[0]?.nextConfig as OpenClawConfig;
expect(written.models?.providers?.ollama?.models).toEqual([
expect.objectContaining({
id: "glm-5.1:cloud",
reasoning: false,
contextWindow: 202752,
}),
]);
expect(written.agents?.defaults?.models?.["ollama/glm-5.1:cloud"]).toEqual({});
});
it("reuses an existing configured provider key when the stored key is non-canonical", async () => {
const cfg = {
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
models: {
"anthropic/claude-opus-4-5": {},
},
},
},
models: {
providers: {
Ollama: {
baseUrl: "http://127.0.0.1:11434",
api: "ollama",
models: [],
},
},
},
} as OpenClawConfig;
configMocks.readConfigFileSnapshot.mockResolvedValue({
valid: true,
parsed: cfg,
});
ollamaMocks.queryOllamaModelShowInfo.mockResolvedValue({
contextWindow: 202752,
capabilities: ["thinking"],
});
configMocks.validateConfigObjectWithPlugins.mockImplementation((config: OpenClawConfig) => ({
ok: true,
config,
}));
const result = await addModelToConfig({
cfg,
provider: "ollama",
modelId: "glm-5.1:cloud",
});
expect(result.ok).toBe(true);
if (!result.ok) {
return;
}
const written = configMocks.replaceConfigFile.mock.calls[0]?.[0]?.nextConfig as OpenClawConfig;
expect(written.models?.providers?.Ollama?.models).toEqual([
expect.objectContaining({
id: "glm-5.1:cloud",
}),
]);
expect(written.models?.providers?.ollama).toBeUndefined();
});
it("treats duplicate provider/model entries as idempotent", async () => {
const cfg = {
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
},
},
models: {
providers: {
ollama: {
baseUrl: "http://127.0.0.1:11434",
api: "ollama",
models: [
{
id: "glm-5.1:cloud",
name: "glm-5.1:cloud",
reasoning: true,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 202752,
maxTokens: 8192,
},
],
},
},
},
} as OpenClawConfig;
configMocks.readConfigFileSnapshot.mockResolvedValue({
valid: true,
parsed: cfg,
});
const result = await addModelToConfig({
cfg,
provider: "ollama",
modelId: "glm-5.1:cloud",
});
expect(result).toEqual({
ok: true,
result: {
provider: "ollama",
modelId: "glm-5.1:cloud",
existed: true,
allowlistAdded: false,
warnings: ["Model metadata could not be auto-detected; saved with default capabilities."],
},
});
expect(configMocks.replaceConfigFile).not.toHaveBeenCalled();
});
it("bootstraps lmstudio provider config when missing", async () => {
const cfg = {
agents: { defaults: { model: { primary: "anthropic/claude-opus-4-5" } } },
models: { providers: {} },
} as OpenClawConfig;
configMocks.readConfigFileSnapshot.mockResolvedValue({
valid: true,
parsed: cfg,
});
lmstudioRuntimeMocks.resolveLmstudioRequestContext.mockResolvedValue({
apiKey: undefined,
headers: undefined,
});
lmstudioRuntimeMocks.fetchLmstudioModels.mockResolvedValue({
reachable: true,
status: 200,
models: [
{
type: "llm",
key: "qwen/qwen3.5-9b",
display_name: "Qwen 3.5 9B",
max_context_length: 131072,
capabilities: { reasoning: { allowed_options: ["off", "on"] } },
},
],
});
configMocks.validateConfigObjectWithPlugins.mockImplementation((config: OpenClawConfig) => ({
ok: true,
config,
}));
const result = await addModelToConfig({
cfg,
provider: "lmstudio",
modelId: "qwen/qwen3.5-9b",
});
expect(result.ok).toBe(true);
const written = configMocks.replaceConfigFile.mock.calls[0]?.[0]?.nextConfig as OpenClawConfig;
expect(written.models?.providers?.lmstudio?.baseUrl).toBe("http://127.0.0.1:1234/v1");
expect(written.models?.providers?.lmstudio?.api).toBe("openai-completions");
expect(written.models?.providers?.lmstudio?.models).toEqual([
expect.objectContaining({
id: "qwen/qwen3.5-9b",
name: "Qwen 3.5 9B",
}),
]);
});
it.each([
[
"gpt-5.4",
{
contextWindow: 1_050_000,
cost: { input: 2.5, output: 15, cacheRead: 0.25, cacheWrite: 0 },
},
],
[
"gpt-5.5",
{
contextWindow: 1_000_000,
cost: { input: 5, output: 30, cacheRead: 0, cacheWrite: 0 },
},
],
[
"gpt-5.5-pro",
{
contextWindow: 1_000_000,
cost: { input: 30, output: 180, cacheRead: 0, cacheWrite: 0 },
},
],
])(
"bootstraps openai-codex metadata for %s from the provider plugin",
async (modelId, expected) => {
const cfg = {
agents: {
defaults: {
model: { primary: "openai-codex/gpt-5.4" },
models: {
"openai-codex/gpt-5.3": {},
},
},
},
models: { providers: {} },
} as OpenClawConfig;
configMocks.readConfigFileSnapshot.mockResolvedValue({
valid: true,
parsed: cfg,
});
configMocks.validateConfigObjectWithPlugins.mockImplementation((config: OpenClawConfig) => ({
ok: true,
config,
}));
const result = await addModelToConfig({
cfg,
provider: "openai-codex",
modelId,
});
expect(result.ok).toBe(true);
if (!result.ok) {
return;
}
expect(result.result.allowlistAdded).toBe(true);
expect(result.result.warnings).toEqual([
"OpenAI Codex model metadata was saved from provider defaults; provider availability still depends on your Codex account.",
]);
const written = configMocks.replaceConfigFile.mock.calls[0]?.[0]
?.nextConfig as OpenClawConfig;
expect(written.models?.providers?.["openai-codex"]).toMatchObject({
baseUrl: "https://chatgpt.com/backend-api",
api: "openai-codex-responses",
models: [
expect.objectContaining({
id: modelId,
api: "openai-codex-responses",
baseUrl: "https://chatgpt.com/backend-api/codex",
reasoning: true,
input: ["text", "image"],
contextWindow: expected.contextWindow,
contextTokens: 272_000,
maxTokens: 128_000,
cost: expected.cost,
metadataSource: "models-add",
}),
],
});
expect(written.agents?.defaults?.models?.[`openai-codex/${modelId}`]).toEqual({});
},
);
it("returns a generic validation error when config validation fails without issue details", async () => {
const cfg = {
models: {
providers: {
ollama: {
baseUrl: "http://127.0.0.1:11434",
api: "ollama",
models: [],
},
},
},
} as OpenClawConfig;
configMocks.readConfigFileSnapshot.mockResolvedValue({
valid: true,
parsed: cfg,
});
ollamaMocks.queryOllamaModelShowInfo.mockResolvedValue({
contextWindow: 202752,
capabilities: ["thinking"],
});
configMocks.validateConfigObjectWithPlugins.mockReturnValue({
ok: false,
issues: [],
});
const result = await addModelToConfig({
cfg,
provider: "ollama",
modelId: "glm-5.1:cloud",
});
expect(result).toEqual({
ok: false,
error: "Config invalid after /models add (unknown validation error).",
});
});
it("skips lmstudio metadata detection for non-loopback base urls before resolving auth", async () => {
const cfg = {
models: {
providers: {
lmstudio: {
baseUrl: "https://example.com/v1",
api: "openai-completions",
models: [],
},
},
},
} as OpenClawConfig;
configMocks.readConfigFileSnapshot.mockResolvedValue({
valid: true,
parsed: cfg,
});
configMocks.validateConfigObjectWithPlugins.mockImplementation((config: OpenClawConfig) => ({
ok: true,
config,
}));
const result = await addModelToConfig({
cfg,
provider: "lmstudio",
modelId: "qwen/qwen3.5-9b",
});
expect(result.ok).toBe(true);
if (!result.ok) {
return;
}
expect(lmstudioRuntimeMocks.resolveLmstudioRequestContext).not.toHaveBeenCalled();
expect(lmstudioRuntimeMocks.fetchLmstudioModels).not.toHaveBeenCalled();
expect(result.result.warnings).toContain(
"LM Studio metadata detection is limited to local baseUrl values; using defaults.",
);
});
it("does not leak raw lmstudio detection errors in user-facing warnings", async () => {
const cfg = {
models: {
providers: {
lmstudio: {
baseUrl: "http://localhost:1234/v1",
api: "openai-completions",
models: [],
},
},
},
} as OpenClawConfig;
configMocks.readConfigFileSnapshot.mockResolvedValue({
valid: true,
parsed: cfg,
});
lmstudioRuntimeMocks.resolveLmstudioRequestContext.mockResolvedValue({
apiKey: "secret-token",
headers: { Authorization: "Bearer secret-token" },
});
lmstudioRuntimeMocks.fetchLmstudioModels.mockRejectedValue(
new Error("connect ECONNREFUSED http://127.0.0.1:1234/v1/api/v1/models"),
);
configMocks.validateConfigObjectWithPlugins.mockImplementation((config: OpenClawConfig) => ({
ok: true,
config,
}));
const result = await addModelToConfig({
cfg,
provider: "lmstudio",
modelId: "qwen/qwen3.5-9b",
});
expect(result.ok).toBe(true);
if (!result.ok) {
return;
}
expect(result.result.warnings).toContain(
"LM Studio metadata detection failed; using defaults.",
);
expect(result.result.warnings.join(" ")).not.toContain("ECONNREFUSED");
expect(result.result.warnings.join(" ")).not.toContain("127.0.0.1");
});
it("returns a retryable error when the config changes before replace", async () => {
const cfg = {
models: {
providers: {
ollama: {
baseUrl: "http://127.0.0.1:11434",
api: "ollama",
models: [],
},
},
},
} as OpenClawConfig;
configMocks.readConfigFileSnapshot.mockResolvedValue({
valid: true,
parsed: cfg,
hash: "base-hash",
});
ollamaMocks.queryOllamaModelShowInfo.mockResolvedValue({
contextWindow: 202752,
capabilities: ["thinking"],
});
configMocks.validateConfigObjectWithPlugins.mockImplementation((config: OpenClawConfig) => ({
ok: true,
config,
}));
configMocks.replaceConfigFile.mockRejectedValue(
new configMocks.ConfigMutationConflictError("config changed since last load", {
currentHash: "new-hash",
}),
);
const result = await addModelToConfig({
cfg,
provider: "ollama",
modelId: "glm-5.1:cloud",
});
expect(result).toEqual({
ok: false,
error: "Config changed while /models add was running. Retry the command.",
});
});
});

View File

@@ -1,675 +0,0 @@
import {
buildConfiguredAllowlistKeys,
normalizeProviderId,
resolveDefaultModelForAgent,
resolveModelRefFromString,
} from "../../agents/model-selection.js";
import {
SELF_HOSTED_DEFAULT_CONTEXT_WINDOW,
SELF_HOSTED_DEFAULT_COST,
SELF_HOSTED_DEFAULT_MAX_TOKENS,
} from "../../agents/self-hosted-provider-defaults.js";
import {
ConfigMutationConflictError,
readConfigFileSnapshot,
replaceConfigFile,
validateConfigObjectWithPlugins,
} from "../../config/config.js";
import type { ModelDefinitionConfig, ModelProviderConfig } from "../../config/types.models.js";
import type { OpenClawConfig } from "../../config/types.openclaw.js";
import { formatErrorMessage } from "../../infra/errors.js";
import { normalizeHostname } from "../../infra/net/hostname.js";
import { createSubsystemLogger } from "../../logging/subsystem.js";
import { buildRemoteBaseUrlPolicy } from "../../memory-host-sdk/host/remote-http.js";
import {
createLazyFacadeValue,
loadBundledPluginPublicSurfaceModuleSync,
} from "../../plugin-sdk/facade-runtime.js";
import {
fetchLmstudioModels,
LMSTUDIO_DEFAULT_API_KEY_ENV_VAR,
LMSTUDIO_DEFAULT_INFERENCE_BASE_URL,
mapLmstudioWireEntry,
resolveLmstudioInferenceBase,
resolveLmstudioRequestContext,
} from "../../plugin-sdk/lmstudio-runtime.js";
import type { ProviderRuntimeModel } from "../../plugins/provider-runtime-model.types.js";
import { isLoopbackIpAddress } from "../../shared/net/ip.js";
import {
normalizeLowercaseStringOrEmpty,
normalizeOptionalString,
} from "../../shared/string-coerce.js";
export type ModelAddAdapter = {
providerId: string;
bootstrapMode?: "always" | "discovered";
bootstrapProviderConfig?: (cfg: OpenClawConfig) => ModelProviderConfig | null;
detect?: (params: {
cfg: OpenClawConfig;
providerConfig: ModelProviderConfig;
modelId: string;
}) => Promise<{
found: boolean;
model?: ModelDefinitionConfig;
warnings?: string[];
}>;
};
type AddModelOutcome = {
provider: string;
modelId: string;
existed: boolean;
allowlistAdded: boolean;
warnings: string[];
};
export type ValidateAddProviderResult =
| { ok: true; provider: string }
| { ok: false; providers: string[]; knownProvider?: string };
type OllamaModelShowInfo = {
contextWindow?: number;
capabilities?: string[];
};
type OllamaApiFacade = {
buildOllamaModelDefinition: (
modelId: string,
contextWindow?: number,
capabilities?: string[],
) => ModelDefinitionConfig;
queryOllamaModelShowInfo: (apiBase: string, modelName: string) => Promise<OllamaModelShowInfo>;
};
type OpenAIApiFacade = {
buildOpenAICodexProvider: () => ModelProviderConfig;
buildOpenAICodexProviderPlugin: () => {
resolveDynamicModel?: (ctx: {
provider: string;
modelId: string;
modelRegistry: { find: () => null };
}) => ProviderRuntimeModel | null | undefined;
};
};
const log = createSubsystemLogger("models-add");
const OLLAMA_DEFAULT_BASE_URL = "http://127.0.0.1:11434";
function loadOllamaApiFacade(): OllamaApiFacade {
return loadBundledPluginPublicSurfaceModuleSync<OllamaApiFacade>({
dirName: "ollama",
artifactBasename: "api.js",
});
}
function loadOpenAIApiFacade(): OpenAIApiFacade {
return loadBundledPluginPublicSurfaceModuleSync<OpenAIApiFacade>({
dirName: "openai",
artifactBasename: "api.js",
});
}
const buildOllamaModelDefinition: OllamaApiFacade["buildOllamaModelDefinition"] =
createLazyFacadeValue(loadOllamaApiFacade, "buildOllamaModelDefinition");
const queryOllamaModelShowInfo: OllamaApiFacade["queryOllamaModelShowInfo"] = createLazyFacadeValue(
loadOllamaApiFacade,
"queryOllamaModelShowInfo",
);
const buildOpenAICodexProvider: OpenAIApiFacade["buildOpenAICodexProvider"] = createLazyFacadeValue(
loadOpenAIApiFacade,
"buildOpenAICodexProvider",
);
const buildOpenAICodexProviderPlugin: OpenAIApiFacade["buildOpenAICodexProviderPlugin"] =
createLazyFacadeValue(loadOpenAIApiFacade, "buildOpenAICodexProviderPlugin");
function sanitizeUrlForLogs(raw: string | undefined): string | undefined {
const trimmed = normalizeOptionalString(raw);
if (!trimmed) {
return undefined;
}
try {
const url = new URL(trimmed);
url.username = "";
url.password = "";
url.search = "";
url.hash = "";
return url.toString();
} catch {
return "[invalid_url]";
}
}
function buildDefaultModelDefinition(modelId: string): ModelDefinitionConfig {
return {
id: modelId,
name: modelId,
reasoning: false,
input: ["text"],
cost: SELF_HOSTED_DEFAULT_COST,
contextWindow: SELF_HOSTED_DEFAULT_CONTEXT_WINDOW,
maxTokens: SELF_HOSTED_DEFAULT_MAX_TOKENS,
};
}
function buildOpenAICodexModelDefinition(modelId: string): ModelDefinitionConfig {
const dynamicModel = buildOpenAICodexProviderPlugin().resolveDynamicModel?.({
provider: "openai-codex",
modelId,
modelRegistry: { find: () => null },
});
if (dynamicModel) {
return {
id: dynamicModel.id,
name: dynamicModel.name,
api: "openai-codex-responses",
baseUrl: dynamicModel.baseUrl,
reasoning: dynamicModel.reasoning,
input: [...dynamicModel.input],
cost: dynamicModel.cost,
contextWindow: dynamicModel.contextWindow,
...(dynamicModel.contextTokens ? { contextTokens: dynamicModel.contextTokens } : {}),
maxTokens: dynamicModel.maxTokens,
...(dynamicModel.headers ? { headers: dynamicModel.headers } : {}),
...(dynamicModel.compat ? { compat: dynamicModel.compat } : {}),
metadataSource: "models-add",
};
}
return {
id: modelId,
name: modelId,
api: "openai-codex-responses",
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: SELF_HOSTED_DEFAULT_CONTEXT_WINDOW,
maxTokens: SELF_HOSTED_DEFAULT_MAX_TOKENS,
metadataSource: "models-add",
};
}
function resolveConfiguredProvider(
cfg: OpenClawConfig,
providerId: string,
): { providerKey: string; providerConfig: ModelProviderConfig } | undefined {
const normalizedProviderId = normalizeProviderId(providerId);
if (!normalizedProviderId) {
return undefined;
}
const providers = cfg.models?.providers;
if (!providers) {
return undefined;
}
for (const [configuredProviderId, configuredProvider] of Object.entries(providers)) {
if (normalizeProviderId(configuredProviderId) === normalizedProviderId) {
return {
providerKey: configuredProviderId,
providerConfig: configuredProvider,
};
}
}
return undefined;
}
function buildDefaultLmstudioProviderConfig(): ModelProviderConfig {
return {
baseUrl: resolveLmstudioInferenceBase(LMSTUDIO_DEFAULT_INFERENCE_BASE_URL),
api: "openai-completions",
auth: "api-key",
apiKey: LMSTUDIO_DEFAULT_API_KEY_ENV_VAR,
models: [],
};
}
function isLocalLmstudioBaseUrl(baseUrl: string | undefined): boolean {
const trimmed = normalizeOptionalString(baseUrl);
if (!trimmed) {
return false;
}
try {
const parsed = new URL(trimmed);
if (parsed.protocol !== "http:" && parsed.protocol !== "https:") {
return false;
}
const hostname = normalizeHostname(parsed.hostname);
return (
hostname === "localhost" ||
hostname === "localhost.localdomain" ||
isLoopbackIpAddress(hostname)
);
} catch {
return false;
}
}
const MODEL_ADD_ADAPTERS: Record<string, ModelAddAdapter> = {
"openai-codex": {
providerId: "openai-codex",
bootstrapMode: "discovered",
bootstrapProviderConfig: () => ({
...buildOpenAICodexProvider(),
models: [],
}),
detect: async ({ modelId }) => ({
found: true,
model: buildOpenAICodexModelDefinition(modelId),
warnings: [
"OpenAI Codex model metadata was saved from provider defaults; provider availability still depends on your Codex account.",
],
}),
},
ollama: {
providerId: "ollama",
bootstrapProviderConfig: () => ({
baseUrl: OLLAMA_DEFAULT_BASE_URL,
api: "ollama",
apiKey: "ollama-local",
models: [],
}),
detect: async ({ providerConfig, modelId }) => {
const info = (await queryOllamaModelShowInfo(providerConfig.baseUrl, modelId)) ?? {};
return {
found: typeof info.contextWindow === "number" || (info.capabilities?.length ?? 0) > 0,
model: buildOllamaModelDefinition(modelId, info.contextWindow, info.capabilities),
};
},
},
lmstudio: {
providerId: "lmstudio",
bootstrapProviderConfig: () => buildDefaultLmstudioProviderConfig(),
detect: async ({ cfg, providerConfig, modelId }) => {
if (!isLocalLmstudioBaseUrl(providerConfig.baseUrl)) {
return {
found: false,
warnings: [
"LM Studio metadata detection is limited to local baseUrl values; using defaults.",
],
};
}
try {
const { apiKey, headers } = await resolveLmstudioRequestContext({
config: {
...cfg,
models: {
...cfg.models,
providers: {
...cfg.models?.providers,
lmstudio: providerConfig,
},
},
},
env: process.env,
providerHeaders: providerConfig.headers,
});
const fetched = await fetchLmstudioModels({
baseUrl: providerConfig.baseUrl,
apiKey,
headers,
ssrfPolicy: buildRemoteBaseUrlPolicy(providerConfig.baseUrl),
});
const match = fetched.models.find(
(entry) => normalizeOptionalString(entry.key) === modelId,
);
const base = match ? mapLmstudioWireEntry(match) : null;
if (!base) {
return { found: false };
}
return {
found: true,
model: {
id: base.id,
name: base.displayName,
reasoning: base.reasoning,
input: base.input,
cost: base.cost,
contextWindow: base.contextWindow,
contextTokens: base.contextTokens,
maxTokens: base.maxTokens,
},
};
} catch (error) {
log.warn("lmstudio model metadata detection failed; using defaults", {
baseUrl: sanitizeUrlForLogs(providerConfig.baseUrl),
modelId,
error: formatErrorMessage(error),
});
return {
found: false,
warnings: ["LM Studio metadata detection failed; using defaults."],
};
}
},
},
};
function canAddProvider(params: {
cfg: OpenClawConfig;
provider: string;
allowDiscoveredBootstrap?: boolean;
}): boolean {
const provider = normalizeProviderId(params.provider);
if (!provider) {
return false;
}
if (resolveConfiguredProvider(params.cfg, provider)) {
return true;
}
const adapter = MODEL_ADD_ADAPTERS[provider];
if (!adapter?.bootstrapProviderConfig) {
return false;
}
if (adapter.bootstrapMode === "discovered" && !params.allowDiscoveredBootstrap) {
return false;
}
return !!adapter.bootstrapProviderConfig(params.cfg);
}
export function listAddableProviders(params: {
cfg: OpenClawConfig;
discoveredProviders?: readonly string[];
}): string[] {
const providers = new Set<string>();
for (const provider of params.discoveredProviders ?? []) {
const normalized = normalizeProviderId(provider);
if (
normalized &&
canAddProvider({
cfg: params.cfg,
provider: normalized,
allowDiscoveredBootstrap: true,
})
) {
providers.add(normalized);
}
}
for (const provider of Object.keys(params.cfg.models?.providers ?? {})) {
const normalized = normalizeProviderId(provider);
if (normalized) {
providers.add(normalized);
}
}
for (const [provider, adapter] of Object.entries(MODEL_ADD_ADAPTERS)) {
if (adapter.bootstrapMode !== "discovered") {
providers.add(provider);
}
}
return [...providers].toSorted();
}
export function validateAddProvider(params: {
cfg: OpenClawConfig;
provider: string;
discoveredProviders?: readonly string[];
}): ValidateAddProviderResult {
const provider = normalizeProviderId(params.provider);
const providers = listAddableProviders({
cfg: params.cfg,
discoveredProviders: params.discoveredProviders,
});
if (!provider || !providers.includes(provider)) {
const knownProvider = (params.discoveredProviders ?? [])
.map((discoveredProvider) => normalizeProviderId(discoveredProvider))
.find((discoveredProvider) => discoveredProvider === provider);
return { ok: false, providers, ...(knownProvider ? { knownProvider } : {}) };
}
return { ok: true, provider };
}
function ensureProviderConfig(params: { cfg: OpenClawConfig; provider: string }):
| {
ok: true;
providerKey: string;
providerConfig: ModelProviderConfig;
bootstrapped: boolean;
}
| { ok: false } {
const configuredProvider = resolveConfiguredProvider(params.cfg, params.provider);
if (configuredProvider) {
return {
ok: true,
providerKey: configuredProvider.providerKey,
providerConfig: configuredProvider.providerConfig,
bootstrapped: false,
};
}
const bootstrapped = MODEL_ADD_ADAPTERS[params.provider]?.bootstrapProviderConfig?.(params.cfg);
if (!bootstrapped) {
return { ok: false };
}
return {
ok: true,
providerKey: params.provider,
providerConfig: bootstrapped,
bootstrapped: true,
};
}
async function detectModelDefinition(params: {
cfg: OpenClawConfig;
provider: string;
providerConfig: ModelProviderConfig;
modelId: string;
}): Promise<{ model: ModelDefinitionConfig; warnings: string[] }> {
const adapter = MODEL_ADD_ADAPTERS[params.provider];
if (!adapter?.detect) {
return {
model: buildDefaultModelDefinition(params.modelId),
warnings: ["Model metadata could not be auto-detected; saved with default capabilities."],
};
}
const detected = await adapter.detect(params);
if (detected.found && detected.model) {
return {
model: detected.model,
warnings: detected.warnings ?? [],
};
}
return {
model: buildDefaultModelDefinition(params.modelId),
warnings: [
...(detected.warnings ?? []),
"Model metadata could not be auto-detected; saved with default capabilities.",
],
};
}
export async function detectProviderModelDefinition(params: {
cfg: OpenClawConfig;
provider: string;
modelId: string;
}): Promise<{
supported: boolean;
found: boolean;
model?: ModelDefinitionConfig;
warnings: string[];
}> {
const provider = normalizeProviderId(params.provider);
const modelId = normalizeOptionalString(params.modelId) ?? "";
if (!provider || !modelId) {
return { supported: false, found: false, warnings: [] };
}
const adapter = MODEL_ADD_ADAPTERS[provider];
if (!adapter?.detect) {
return { supported: false, found: false, warnings: [] };
}
const providerResolution = ensureProviderConfig({
cfg: params.cfg,
provider,
});
if (!providerResolution.ok) {
return { supported: true, found: false, warnings: [] };
}
const detected = await adapter.detect({
cfg: params.cfg,
providerConfig: providerResolution.providerConfig,
modelId,
});
return {
supported: true,
found: detected.found && !!detected.model,
model: detected.model,
warnings: detected.warnings ?? [],
};
}
function upsertModelEntry(params: {
cfg: OpenClawConfig;
provider: string;
providerKey: string;
providerConfig: ModelProviderConfig;
model: ModelDefinitionConfig;
}): { nextConfig: OpenClawConfig; existed: boolean } {
const nextConfig = structuredClone(params.cfg);
nextConfig.models ??= {};
nextConfig.models.providers ??= {};
const existingProvider = nextConfig.models.providers[params.providerKey];
const providerConfig = existingProvider
? {
...existingProvider,
models: Array.isArray(existingProvider.models) ? [...existingProvider.models] : [],
}
: {
...params.providerConfig,
models: Array.isArray(params.providerConfig.models)
? [...params.providerConfig.models]
: [],
};
const modelKey = normalizeLowercaseStringOrEmpty(params.model.id);
const existingIndex = providerConfig.models.findIndex(
(entry) => normalizeLowercaseStringOrEmpty(entry?.id) === modelKey,
);
const existed = existingIndex !== -1;
if (!existed) {
providerConfig.models.push(params.model);
}
nextConfig.models.providers[params.providerKey] = providerConfig;
return { nextConfig, existed };
}
function maybeAddAllowlistEntry(params: {
cfg: OpenClawConfig;
provider: string;
modelId: string;
}): { nextConfig: OpenClawConfig; added: boolean } {
const allowlistKeys = buildConfiguredAllowlistKeys({
cfg: params.cfg,
defaultProvider: resolveDefaultModelForAgent({ cfg: params.cfg }).provider,
});
if (!allowlistKeys || allowlistKeys.size === 0) {
return { nextConfig: params.cfg, added: false };
}
const rawRef = `${params.provider}/${params.modelId}`;
const resolved = resolveModelRefFromString({
raw: rawRef,
defaultProvider: resolveDefaultModelForAgent({ cfg: params.cfg }).provider,
});
if (!resolved) {
return { nextConfig: params.cfg, added: false };
}
const normalizedKey = `${resolved.ref.provider}/${resolved.ref.model}`.toLowerCase();
if (allowlistKeys.has(normalizedKey)) {
return { nextConfig: params.cfg, added: false };
}
const nextConfig = structuredClone(params.cfg);
nextConfig.agents ??= {};
nextConfig.agents.defaults ??= {};
nextConfig.agents.defaults.models ??= {};
nextConfig.agents.defaults.models[`${params.provider}/${params.modelId}`] = {};
return { nextConfig, added: true };
}
export async function addModelToConfig(params: {
cfg: OpenClawConfig;
provider: string;
modelId: string;
}): Promise<{ ok: true; result: AddModelOutcome } | { ok: false; error: string }> {
const provider = normalizeProviderId(params.provider);
const modelId = normalizeOptionalString(params.modelId) ?? "";
if (!provider || !modelId) {
return { ok: false, error: "Provider and model id are required." };
}
const snapshot = await readConfigFileSnapshot();
if (!snapshot.valid || !snapshot.parsed || typeof snapshot.parsed !== "object") {
return { ok: false, error: "Config file is invalid; fix it before using /models add." };
}
const currentConfig = structuredClone(snapshot.parsed as OpenClawConfig);
const providerResolution = ensureProviderConfig({
cfg: currentConfig,
provider,
});
if (!providerResolution.ok) {
return {
ok: false,
error: `Provider "${provider}" is not configured for custom models yet. Configure the provider first, then retry /models add.`,
};
}
const detected = await detectModelDefinition({
cfg: currentConfig,
provider,
providerConfig: providerResolution.providerConfig,
modelId,
});
const upserted = upsertModelEntry({
cfg: currentConfig,
provider,
providerKey: providerResolution.providerKey,
providerConfig: providerResolution.providerConfig,
model: detected.model,
});
const allowlisted = maybeAddAllowlistEntry({
cfg: upserted.nextConfig,
provider,
modelId,
});
const changed = !upserted.existed || allowlisted.added || providerResolution.bootstrapped;
if (!changed) {
return {
ok: true,
result: {
provider,
modelId,
existed: true,
allowlistAdded: false,
warnings: detected.warnings,
},
};
}
const validated = validateConfigObjectWithPlugins(allowlisted.nextConfig);
if (!validated.ok) {
const issue = validated.issues[0];
const detail = issue ? `${issue.path}: ${issue.message}` : "unknown validation error";
return {
ok: false,
error: `Config invalid after /models add (${detail}).`,
};
}
try {
await replaceConfigFile({
nextConfig: validated.config,
...(snapshot.hash !== undefined ? { baseHash: snapshot.hash } : {}),
});
} catch (error) {
if (error instanceof ConfigMutationConflictError) {
return {
ok: false,
error: "Config changed while /models add was running. Retry the command.",
};
}
throw error;
}
return {
ok: true,
result: {
provider,
modelId,
existed: upserted.existed,
allowlistAdded: allowlisted.added,
warnings: detected.warnings,
},
};
}

View File

@@ -1861,7 +1861,7 @@ describe("buildCommandsMessage", () => {
expect(text).toContain("/skill - Run a skill by name.");
expect(text).toContain("/think (/thinking, /t) - Set thinking level.");
expect(text).toContain("/compact - Compact the session context.");
expect(text).toContain("/models - List model providers/models or add a model.");
expect(text).toContain("/models - List model providers/models.");
expect(text).not.toContain("/config");
expect(text).not.toContain("/debug");
});