fix: deprecate models add command (#71175)

This commit is contained in:
Tak Hoffman
2026-04-24 12:20:59 -05:00
committed by GitHub
parent e35e6e1d15
commit 59e2825274
28 changed files with 192 additions and 1875 deletions

View File

@@ -32,6 +32,7 @@ Docs: https://docs.openclaw.ai
- Models/CLI: speed up `openclaw models list --all --provider <id>` for bundled providers with safe static catalogs while keeping live and third-party providers on registry discovery. (#70632) Thanks @shakkernerd.
- Models/CLI: avoid broad registry enumeration for default `openclaw models list`, reducing default listing latency while preserving configured-row output. (#70883) Thanks @shakkernerd.
- Models/CLI: split `openclaw models list` row-source orchestration and registry loading into narrower helpers without changing list output behavior. (#70867) Thanks @shakkernerd.
- Models/commands: deprecate `/models add` so chat attempts now return a deprecation message instead of writing model configuration, and remove the add action from `/models` provider menus.
- Codex harness/context-engine: run context-engine bootstrap, assembly, post-turn maintenance, and engine-owned compaction in Codex app-server sessions while keeping native Codex thread state and compaction auditable. (#70809) Thanks @jalehman.
- Plugins/Google Meet: add a bundled participant plugin with personal Google auth, explicit meeting URL joins, Chrome and Twilio transports, and realtime voice support. (#70765) Thanks @steipete.
- Plugins/Google Meet: default Chrome realtime sessions to OpenAI plus SoX `rec`/`play` audio bridge commands, so the usual setup only needs the plugin enabled and `OPENAI_API_KEY`.

View File

@@ -1,4 +1,4 @@
de02a4b0ec521fda7d951d2dc0c742fc2fa310647ffd56a666346e5ddc6b5a59 config-baseline.json
bf00f7910d8f0d8e12592e8a1c6bd0397f8e62fef2c11eb0cbd3b3a3e2a78ffe config-baseline.core.json
2894c80bc234b33f14a0ddb09e6f42368da4ca23e0e7faceb13aba52239c1a76 config-baseline.json
c8ff25fcdd2389d5fd88f8ba188d77c21f58b56765b555eecf3b37437f743d50 config-baseline.core.json
22d7cd6d8279146b2d79c9531a55b80b52a2c99c81338c508104729154fdd02d config-baseline.channel.json
b79dc28c1b6002dc59bd77bde47c1855a9ece72b9fdf94c0baf0c5320b2be12c config-baseline.plugin.json
5ce9062d0ab7f9447f149fc0770571068b4c4c89e0fb80ae6ef7b3f2a146c8b3 config-baseline.plugin.json

View File

@@ -305,7 +305,7 @@ By default, components are single use. Set `components.reusable=true` to allow b
To restrict who can click a button, set `allowedUsers` on that button (Discord user IDs, tags, or `*`). When configured, unmatched users receive an ephemeral denial.
The `/model` and `/models` slash commands open an interactive model picker with provider and model dropdowns plus a Submit step. Unless `commands.modelsWrite=false`, `/models add` also supports adding a new provider/model entry from chat, and newly added models show up without restarting the gateway. The picker reply is ephemeral and only the invoking user can use it.
The `/model` and `/models` slash commands open an interactive model picker with provider and model dropdowns plus a Submit step. `/models add` is deprecated and now returns a deprecation message instead of registering models from chat. The picker reply is ephemeral and only the invoking user can use it.
File attachments:

View File

@@ -301,8 +301,8 @@ Surface different features that extend the above defaults.
},
{
"command": "/models",
"description": "List providers/models or add a model",
"usage_hint": "[provider] [page] [limit=<n>|size=<n>|all] | add <provider> <modelId>"
"description": "List providers/models",
"usage_hint": "[provider] [page] [limit=<n>|size=<n>|all]"
},
{
"command": "/help",

View File

@@ -130,9 +130,7 @@ Notes:
- `/model` (and `/model list`) is a compact, numbered picker (model family + available providers).
- On Discord, `/model` and `/models` open an interactive picker with provider and model dropdowns plus a Submit step.
- `/models add` is available by default and can be disabled with `commands.modelsWrite=false`.
- When enabled, `/models add <provider> <modelId>` is the fastest path; bare `/models add` starts a provider-first guided flow where supported.
- After `/models add`, the new model becomes available in `/models` and `/model` without restarting the gateway.
- `/models add` is deprecated and now returns a deprecation message instead of registering models from chat.
- `/model <#>` selects from that picker.
- `/model` persists the new session selection immediately.
- If the agent is idle, the next run uses the new model right away.
@@ -151,14 +149,6 @@ Notes:
Full command behavior/config: [Slash commands](/tools/slash-commands).
Examples:
```text
/models add
/models add ollama glm-5.1:cloud
/models add lmstudio qwen/qwen3.5-9b
```
## CLI commands
```bash

View File

@@ -3138,8 +3138,8 @@ describe("createTelegramBot", () => {
}
)?.reply_markup?.inline_keyboard?.[0]?.[0],
).toEqual({
text: "Add model",
callback_data: "/models add",
text: "openai (1)",
callback_data: "mdl_list_openai_1",
});
});
@@ -3527,8 +3527,8 @@ describe("createTelegramBot", () => {
}
)?.reply_markup?.inline_keyboard?.[0]?.[0],
).toEqual({
text: "Add model",
callback_data: "/models add",
text: "openai (1)",
callback_data: "mdl_list_openai_1",
});
});

View File

@@ -10,10 +10,7 @@ import {
export { buildCommandsPaginationKeyboard };
export function buildTelegramModelsMenuButtons(params: { providers: ProviderInfo[] }) {
return [
[{ text: "Add model", callback_data: "/models add" }],
...buildProviderKeyboard(params.providers),
];
return buildProviderKeyboard(params.providers);
}
export function buildTelegramModelsMenuChannelData(params: {

View File

@@ -39,7 +39,6 @@ describe("createTelegramPluginBase config duplicate token guard", () => {
expect(channelData).toEqual({
telegram: {
buttons: [
[{ text: "Add model", callback_data: "/models add" }],
[
{ text: "anthropic (2)", callback_data: "mdl_list_anthropic_1" },
{ text: "openai (3)", callback_data: "mdl_list_openai_1" },

View File

@@ -0,0 +1,38 @@
import { describe, expect, it } from "vitest";
import { isLegacyModelsAddCodexMetadataModel } from "./openai-codex-models-add-legacy.js";
function buildLegacyModel(id: string) {
return {
id,
api: "openai-codex-responses",
reasoning: true,
input: ["text", "image"],
cost: { input: 5, output: 30, cacheRead: 0.5, cacheWrite: 0 },
contextWindow: 400_000,
contextTokens: 272_000,
maxTokens: 128_000,
};
}
describe("isLegacyModelsAddCodexMetadataModel", () => {
it("matches the legacy gpt-5.5-pro models-add fingerprint", () => {
expect(
isLegacyModelsAddCodexMetadataModel({
provider: "openai-codex",
model: buildLegacyModel("gpt-5.5-pro"),
}),
).toBe(true);
});
it("does not match current pro pricing as legacy models-add metadata", () => {
expect(
isLegacyModelsAddCodexMetadataModel({
provider: "openai-codex",
model: {
...buildLegacyModel("gpt-5.5-pro"),
cost: { input: 30, output: 180, cacheRead: 0, cacheWrite: 0 },
},
}),
).toBe(false);
});
});

View File

@@ -848,7 +848,7 @@ export function buildBuiltinChatCommands(): ChatCommandDefinition[] {
defineChatCommand({
key: "models",
nativeName: "models",
description: "List model providers/models or add a model.",
description: "List model providers/models.",
textAlias: "/models",
tier: "standard",
argsParsing: "none",

View File

@@ -17,26 +17,8 @@ const modelAuthLabelMocks = vi.hoisted(() => ({
resolveModelAuthLabel: vi.fn<(params: unknown) => string | undefined>(() => undefined),
}));
const modelsAddMocks = vi.hoisted(() => ({
addModelToConfig: vi.fn(),
listAddableProviders: vi.fn<(params: unknown) => string[]>(),
validateAddProvider:
vi.fn<
(
params: unknown,
) =>
| { ok: true; provider: string }
| { ok: false; providers: string[]; knownProvider?: string }
>(),
}));
const configWriteAuthMocks = vi.hoisted(() => ({
resolveConfigWriteDeniedText: vi.fn<(params: { target: string }) => string | null>(() => null),
}));
const configWriteTargetMocks = vi.hoisted(() => ({
resolveConfigWriteTargetFromPath: vi.fn((path: string[]) => path.join(".")),
}));
const MODELS_ADD_DEPRECATED_TEXT =
"⚠️ /models add is deprecated. Use /models to browse providers and /model to switch models.";
vi.mock("../../agents/model-catalog.js", () => ({
loadModelCatalog: modelCatalogMocks.loadModelCatalog,
@@ -46,24 +28,6 @@ vi.mock("../../agents/model-auth-label.js", () => ({
resolveModelAuthLabel: modelAuthLabelMocks.resolveModelAuthLabel,
}));
vi.mock("../../channels/plugins/config-writes.js", () => ({
resolveConfigWriteTargetFromPath: configWriteTargetMocks.resolveConfigWriteTargetFromPath,
}));
vi.mock("./config-write-authorization.js", () => ({
resolveConfigWriteDeniedText: configWriteAuthMocks.resolveConfigWriteDeniedText,
}));
vi.mock("./models-add.js", async () => {
const actual = await vi.importActual<typeof import("./models-add.js")>("./models-add.js");
return {
...actual,
addModelToConfig: modelsAddMocks.addModelToConfig,
listAddableProviders: modelsAddMocks.listAddableProviders,
validateAddProvider: modelsAddMocks.validateAddProvider,
};
});
const telegramModelsTestPlugin: ChannelPlugin = {
...createChannelTestPluginBase({
id: "telegram",
@@ -80,19 +44,6 @@ const telegramModelsTestPlugin: ChannelPlugin = {
},
}),
commands: {
buildModelsMenuChannelData: ({ providers }) => ({
telegram: {
buttons: [
[{ text: "Add model", callback_data: "/models add" }],
...providers.map((provider) => [
{
text: provider.id,
callback_data: `models:${provider.id}`,
},
]),
],
},
}),
buildModelsProviderChannelData: ({ providers }) => ({
telegram: {
buttons: providers.map((provider) => [
@@ -103,14 +54,23 @@ const telegramModelsTestPlugin: ChannelPlugin = {
]),
},
}),
buildModelsAddProviderChannelData: ({ providers }) => ({
telegram: {
buttons: providers.map((provider) => [
{
text: provider.id,
callback_data: `/models add ${provider.id}`,
},
]),
},
};
const menuOnlyModelsTestPlugin: ChannelPlugin = {
...createChannelTestPluginBase({
id: "menuonly",
label: "Menu Only",
capabilities: {
chatTypes: ["direct"],
nativeCommands: true,
},
}),
commands: {
buildModelsMenuChannelData: ({ providers }) => ({
menuonly: {
providerIds: providers.map((provider) => provider.id),
labels: providers.map((provider) => `${provider.id}:${provider.count}`),
},
}),
},
@@ -133,32 +93,6 @@ beforeEach(() => {
]);
modelAuthLabelMocks.resolveModelAuthLabel.mockReset();
modelAuthLabelMocks.resolveModelAuthLabel.mockReturnValue(undefined);
modelsAddMocks.addModelToConfig.mockReset();
modelsAddMocks.addModelToConfig.mockResolvedValue({
ok: true,
result: {
provider: "ollama",
modelId: "glm-5.1:cloud",
existed: false,
allowlistAdded: false,
warnings: [],
},
});
modelsAddMocks.listAddableProviders.mockReset();
modelsAddMocks.listAddableProviders.mockReturnValue([
"anthropic",
"lmstudio",
"ollama",
"openai",
]);
modelsAddMocks.validateAddProvider.mockReset();
modelsAddMocks.validateAddProvider.mockImplementation((params: unknown) => ({
ok: true,
provider: (params as { provider: string }).provider,
}));
configWriteAuthMocks.resolveConfigWriteDeniedText.mockReset();
configWriteAuthMocks.resolveConfigWriteDeniedText.mockReturnValue(null);
configWriteTargetMocks.resolveConfigWriteTargetFromPath.mockClear();
setActivePluginRegistry(
createTestRegistry([
...textSurfaceModelsTestPlugins,
@@ -167,6 +101,11 @@ beforeEach(() => {
plugin: telegramModelsTestPlugin,
source: "test",
},
{
pluginId: "menuonly",
plugin: menuOnlyModelsTestPlugin,
source: "test",
},
]),
);
});
@@ -228,10 +167,10 @@ describe("handleModelsCommand", () => {
expect(result?.reply?.text).toContain("- openai (2)");
expect(result?.reply?.text).toContain("Use: /models <provider>");
expect(result?.reply?.text).toContain("Switch: /model <provider/model>");
expect(result?.reply?.text).toContain("Add: /models add");
expect(result?.reply?.text).not.toContain("Add: /models add");
});
it("shows the add-model action in the telegram provider picker by default", async () => {
it("keeps the telegram provider picker browse-only", async () => {
const params = buildParams("/models");
params.ctx.Surface = "telegram";
params.command.channel = "telegram";
@@ -243,7 +182,6 @@ describe("handleModelsCommand", () => {
expect(result?.reply?.channelData).toEqual({
telegram: {
buttons: [
[{ text: "Add model", callback_data: "/models add" }],
[{ text: "anthropic", callback_data: "models:anthropic" }],
[{ text: "google", callback_data: "models:google" }],
[{ text: "openai", callback_data: "models:openai" }],
@@ -252,27 +190,19 @@ describe("handleModelsCommand", () => {
});
});
it("keeps the telegram provider picker browse-only when modelsWrite is disabled", async () => {
const params = buildParams("/models", {
commands: {
text: true,
modelsWrite: false,
},
});
params.ctx.Surface = "telegram";
params.command.channel = "telegram";
params.command.surface = "telegram";
it("keeps plugin menu hook compatibility for provider pickers", async () => {
const params = buildParams("/models");
params.ctx.Surface = "menuonly";
params.command.channel = "menuonly";
params.command.surface = "menuonly";
const result = await handleModelsCommand(params, true);
expect(result?.reply?.text).toBe("Select a provider:");
expect(result?.reply?.channelData).toEqual({
telegram: {
buttons: [
[{ text: "anthropic", callback_data: "models:anthropic" }],
[{ text: "google", callback_data: "models:google" }],
[{ text: "openai", callback_data: "models:openai" }],
],
menuonly: {
providerIds: ["anthropic", "google", "openai"],
labels: ["anthropic:2", "google:1", "openai:2"],
},
});
});
@@ -352,110 +282,30 @@ describe("handleModelsCommand", () => {
expect(result?.reply?.text).toContain("Models (anthropic · 🔑 target-auth) — showing 1-2 of 2");
});
it("guides /models add when no provider is given", async () => {
it("returns a deprecation message for /models add when no provider is given", async () => {
const result = await handleModelsCommand(buildParams("/models add"), true);
expect(result?.reply?.text).toContain(
"Add a model: choose a provider, then send one of these example commands.",
);
expect(result?.reply?.text).toContain(
"These examples use models that already exist for those providers.",
);
expect(result?.reply?.text).toContain("```text");
expect(result?.reply?.text).toContain("/models add ollama glm-5.1:cloud");
expect(result?.reply?.text).toContain("/models add lmstudio qwen/qwen3.5-9b");
expect(result?.reply?.text).toContain("/models add <provider> <modelId>");
expect(result?.reply?.text).toContain("Generic form:");
expect(result?.reply?.text).toContain("/models add <provider> <modelId>");
expect(result?.reply?.text).toContain("- anthropic");
expect(result?.reply?.text).toContain("- lmstudio");
expect(result?.reply?.text).toContain("- ollama");
expect(result?.reply?.text).toContain("- openai");
expect(result).toEqual({
shouldContinue: false,
reply: { text: MODELS_ADD_DEPRECATED_TEXT },
});
});
it("guides /models add <provider> when the model id is missing", async () => {
it("returns a deprecation message for /models add <provider>", async () => {
const result = await handleModelsCommand(buildParams("/models add ollama"), true);
expect(result?.reply?.text).toContain("Add a model to ollama:");
expect(result?.reply?.text).toContain("```text\n/models add ollama <modelId>\n```");
expect(result?.reply?.text).toContain("```text\n/models ollama\n```");
expect(result).toEqual({
shouldContinue: false,
reply: { text: MODELS_ADD_DEPRECATED_TEXT },
});
});
it("explains when a selectable provider does not support /models add", async () => {
modelsAddMocks.validateAddProvider.mockReturnValueOnce({
ok: false,
providers: ["lmstudio", "ollama"],
knownProvider: "openai",
});
it("returns a deprecation message for /models add <provider> <modelId>", async () => {
const result = await handleModelsCommand(buildParams("/models add openai gpt-5.5"), true);
expect(result?.reply?.text).toContain(
"openai is available for model selection, but /models add cannot create models for this provider from chat.",
);
expect(result?.reply?.text).toContain("/models openai");
expect(result?.reply?.text).toContain("/model openai/<modelId>");
expect(result?.reply?.text).toContain("openclaw configure");
expect(result?.reply?.text).not.toContain("Unknown provider");
});
it("adds a model and points users back to browse or switch", async () => {
const result = await handleModelsCommand(buildParams("/models add ollama glm-5.1:cloud"), true);
expect(modelsAddMocks.addModelToConfig).toHaveBeenCalledWith(
expect.objectContaining({
provider: "ollama",
modelId: "glm-5.1:cloud",
}),
);
expect(result?.reply?.text).toContain("✅ Added model: ollama/glm-5.1:cloud.");
expect(result?.reply?.text).toContain("Browse:");
expect(result?.reply?.text).toContain("/models ollama");
expect(result?.reply?.text).toContain("Switch now:");
expect(result?.reply?.text).toContain("/model ollama/glm-5.1:cloud");
expect(result?.reply?.text).not.toContain("/models repair");
expect(result?.reply?.text).not.toContain("/models ollama/glm-5.1:cloud");
});
it("checks all config-write targets touched by /models add", async () => {
const result = await handleModelsCommand(buildParams("/models add ollama glm-5.1:cloud"), true);
expect(result?.shouldContinue).toBe(false);
expect(configWriteTargetMocks.resolveConfigWriteTargetFromPath).toHaveBeenCalledTimes(3);
expect(configWriteTargetMocks.resolveConfigWriteTargetFromPath.mock.calls).toEqual([
[["models", "providers", "ollama"]],
[["models", "providers", "ollama", "models"]],
[["agents", "defaults", "models"]],
]);
});
it("returns config-write denial text for add-time provider bootstrap", async () => {
configWriteAuthMocks.resolveConfigWriteDeniedText.mockReturnValueOnce("denied");
const result = await handleModelsCommand(buildParams("/models add ollama glm-5.1:cloud"), true);
expect(result).toEqual({
shouldContinue: false,
reply: { text: "denied" },
reply: { text: MODELS_ADD_DEPRECATED_TEXT },
});
expect(modelsAddMocks.addModelToConfig).not.toHaveBeenCalled();
});
it("rejects /models add when modelsWrite is disabled", async () => {
const result = await handleModelsCommand(
buildParams("/models add ollama glm-5.1:cloud", {
commands: { text: true, modelsWrite: false },
}),
true,
);
expect(result).toEqual({
shouldContinue: false,
reply: {
text: "⚠️ /models add is disabled. Set commands.modelsWrite=true to enable model registration.",
},
});
expect(modelsAddMocks.addModelToConfig).not.toHaveBeenCalled();
expect(configWriteTargetMocks.resolveConfigWriteTargetFromPath).not.toHaveBeenCalled();
});
});

View File

@@ -9,10 +9,7 @@ import {
resolveDefaultModelForAgent,
resolveModelRefFromString,
} from "../../agents/model-selection.js";
import { resolveConfigWriteTargetFromPath } from "../../channels/plugins/config-writes.js";
import { getChannelPlugin } from "../../channels/plugins/index.js";
import { normalizeChannelId } from "../../channels/registry.js";
import { isModelsWriteEnabled } from "../../config/commands.flags.js";
import type { SessionEntry } from "../../config/sessions.js";
import type { OpenClawConfig } from "../../config/types.openclaw.js";
import {
@@ -20,18 +17,13 @@ import {
normalizeOptionalString,
} from "../../shared/string-coerce.js";
import type { ReplyPayload } from "../types.js";
import { resolveChannelAccountId } from "./channel-context.js";
import {
rejectNonOwnerCommand,
rejectUnauthorizedCommand,
requireGatewayClientScopeForInternalChannel,
} from "./command-gates.js";
import { rejectUnauthorizedCommand } from "./command-gates.js";
import type { CommandHandler } from "./commands-types.js";
import { resolveConfigWriteDeniedText } from "./config-write-authorization.js";
import { addModelToConfig, listAddableProviders, validateAddProvider } from "./models-add.js";
const PAGE_SIZE_DEFAULT = 20;
const PAGE_SIZE_MAX = 100;
const MODELS_ADD_DEPRECATED_TEXT =
"⚠️ /models add is deprecated. Use /models to browse providers and /model to switch models.";
type ModelsCommandSessionEntry = Partial<
Pick<SessionEntry, "authProfileOverride" | "modelProvider" | "model">
@@ -271,7 +263,6 @@ export function formatModelsAvailableHeader(params: {
function buildModelsMenuText(params: {
providers: string[];
byProvider: ReadonlyMap<string, ReadonlySet<string>>;
includeAddAction?: boolean;
}): string {
return [
"Providers:",
@@ -284,44 +275,9 @@ function buildModelsMenuText(params: {
"",
"Use: /models <provider>",
"Switch: /model <provider/model>",
...(params.includeAddAction ? ["Add: /models add"] : []),
].join("\n");
}
function formatCopyableCommand(command: string): string {
return ["```text", command, "```"].join("\n");
}
function buildAddExamples(addableProviders: readonly string[]): string[] {
const examples: string[] = [];
if (addableProviders.includes("ollama")) {
examples.push("/models add ollama glm-5.1:cloud");
}
if (addableProviders.includes("lmstudio")) {
examples.push("/models add lmstudio qwen/qwen3.5-9b");
}
if (addableProviders.includes("codex")) {
examples.push("/models add codex gpt-5.4-mini");
}
if (addableProviders.includes("openai-codex")) {
examples.push("/models add openai-codex gpt-5.4");
}
if (examples.length === 0) {
examples.push("/models add <provider> <modelId>");
}
return examples.slice(0, 3);
}
function resolveWriteProvider(params: {
cfg: OpenClawConfig;
parsed: ParsedModelsCommand;
}): string | undefined {
if (params.parsed.action !== "add") {
return undefined;
}
return params.parsed.provider ? normalizeProviderId(params.parsed.provider) : undefined;
}
function buildProviderInfos(params: {
providers: string[];
byProvider: ReadonlyMap<string, ReadonlySet<string>>;
@@ -355,15 +311,12 @@ export async function resolveModelsCommandReply(params: {
);
const commandPlugin = params.surface ? getChannelPlugin(params.surface) : null;
const providerInfos = buildProviderInfos({ providers, byProvider });
const modelsWriteEnabled = isModelsWriteEnabled(params.cfg);
if (parsed.action === "providers") {
const channelData =
(modelsWriteEnabled
? commandPlugin?.commands?.buildModelsMenuChannelData?.({
providers: providerInfos,
})
: null) ??
commandPlugin?.commands?.buildModelsMenuChannelData?.({
providers: providerInfos,
}) ??
commandPlugin?.commands?.buildModelsProviderChannelData?.({
providers: providerInfos,
});
@@ -374,123 +327,12 @@ export async function resolveModelsCommandReply(params: {
};
}
return {
text: buildModelsMenuText({ providers, byProvider, includeAddAction: modelsWriteEnabled }),
text: buildModelsMenuText({ providers, byProvider }),
};
}
if (parsed.action === "add") {
if (!modelsWriteEnabled) {
return {
text: "⚠️ /models add is disabled. Set commands.modelsWrite=true to enable model registration.",
};
}
const addableProviders = listAddableProviders({
cfg: params.cfg,
discoveredProviders: providers,
});
if (!parsed.provider) {
const channelData = commandPlugin?.commands?.buildModelsAddProviderChannelData?.({
providers: addableProviders.map((id) => ({ id })),
});
return {
text: [
"Add a model: choose a provider, then send one of these example commands.",
"",
"These examples use models that already exist for those providers.",
"",
...buildAddExamples(addableProviders).flatMap((example) => [
formatCopyableCommand(example),
"",
]),
"Generic form:",
formatCopyableCommand("/models add <provider> <modelId>"),
"",
"Providers:",
...addableProviders.map((provider) => `- ${provider}`),
].join("\n"),
...(channelData ? { channelData } : {}),
};
}
const validatedProvider = validateAddProvider({
cfg: params.cfg,
provider: parsed.provider,
discoveredProviders: providers,
});
if (!validatedProvider.ok) {
if (validatedProvider.knownProvider) {
return {
text: [
`${validatedProvider.knownProvider} is available for model selection, but /models add cannot create models for this provider from chat.`,
"",
"Browse:",
`/models ${validatedProvider.knownProvider}`,
"",
"Switch:",
`/model ${validatedProvider.knownProvider}/<modelId>`,
"",
"To configure providers or auth, run:",
"openclaw configure",
].join("\n"),
};
}
return {
text: [
`Unknown provider: ${parsed.provider}`,
"",
"Available providers:",
...validatedProvider.providers.map((provider) => `- ${provider}`),
"",
"Use:",
"/models add <provider> <modelId>",
].join("\n"),
};
}
if (!parsed.modelId) {
return {
text: [
`Add a model to ${validatedProvider.provider}:`,
"",
"Use:",
formatCopyableCommand(`/models add ${validatedProvider.provider} <modelId>`),
"",
"Browse current models:",
formatCopyableCommand(`/models ${validatedProvider.provider}`),
].join("\n"),
};
}
const added = await addModelToConfig({
cfg: params.cfg,
provider: validatedProvider.provider,
modelId: parsed.modelId,
});
if (!added.ok) {
return {
text: `⚠️ ${added.error}`,
};
}
const modelRef = `${added.result.provider}/${added.result.modelId}`;
const warnings =
added.result.warnings.length > 0
? ["", ...added.result.warnings.map((warning) => `- ${warning}`)]
: [];
const allowlistNote = added.result.allowlistAdded ? " and added to the allowlist" : "";
return {
text: [
added.result.existed
? `✅ Model already exists: ${modelRef}${allowlistNote}.`
: `✅ Added model: ${modelRef}${allowlistNote}.`,
"Browse:",
`/models ${added.result.provider}`,
"",
"Switch now:",
`/model ${modelRef}`,
...warnings,
].join("\n"),
};
return { text: MODELS_ADD_DEPRECATED_TEXT };
}
const { provider, page, pageSize, all } = parsed;
@@ -506,7 +348,7 @@ export async function resolveModelsCommandReply(params: {
};
}
return {
text: buildModelsMenuText({ providers, byProvider, includeAddAction: modelsWriteEnabled }),
text: buildModelsMenuText({ providers, byProvider }),
};
}
@@ -623,59 +465,7 @@ export const handleModelsCommand: CommandHandler = async (params, allowTextComma
}
if (parsed.action === "add") {
if (!isModelsWriteEnabled(params.cfg)) {
return {
shouldContinue: false,
reply: {
text: "⚠️ /models add is disabled. Set commands.modelsWrite=true to enable model registration.",
},
};
}
const commandLabel = "/models add";
const nonOwner = rejectNonOwnerCommand(params, commandLabel);
if (nonOwner) {
return nonOwner;
}
const missingAdminScope = requireGatewayClientScopeForInternalChannel(params, {
label: commandLabel,
allowedScopes: ["operator.admin"],
missingText: "❌ /models add requires operator.admin for gateway clients.",
});
if (missingAdminScope) {
return missingAdminScope;
}
const writeProvider = resolveWriteProvider({
cfg: params.cfg,
parsed,
});
if (writeProvider) {
const channelId = params.command.channelId ?? normalizeChannelId(params.command.channel);
const accountId = resolveChannelAccountId({
cfg: params.cfg,
ctx: params.ctx,
command: params.command,
});
for (const path of [
["models", "providers", writeProvider],
["models", "providers", writeProvider, "models"],
["agents", "defaults", "models"],
]) {
const deniedText = resolveConfigWriteDeniedText({
cfg: params.cfg,
channel: params.command.channel,
channelId,
accountId,
gatewayClientScopes: params.ctx.GatewayClientScopes,
target: resolveConfigWriteTargetFromPath(path),
});
if (deniedText) {
return {
shouldContinue: false,
reply: { text: deniedText },
};
}
}
}
return { shouldContinue: false, reply: { text: MODELS_ADD_DEPRECATED_TEXT } };
}
const modelsAgentId = params.sessionKey

View File

@@ -1,696 +0,0 @@
import { beforeEach, describe, expect, it, vi } from "vitest";
import type { OpenClawConfig } from "../../config/types.openclaw.js";
import { addModelToConfig, listAddableProviders, validateAddProvider } from "./models-add.js";
const configMocks = vi.hoisted(() => ({
ConfigMutationConflictError: class ConfigMutationConflictError extends Error {
readonly currentHash: string | null;
constructor(message: string, params: { currentHash: string | null }) {
super(message);
this.name = "ConfigMutationConflictError";
this.currentHash = params.currentHash;
}
},
readConfigFileSnapshot: vi.fn(),
replaceConfigFile: vi.fn(),
validateConfigObjectWithPlugins: vi.fn(),
}));
const facadeRuntimeMocks = vi.hoisted(() => ({
loadBundledPluginPublicSurfaceModuleSync: vi.fn(),
}));
const ollamaMocks = vi.hoisted(() => ({
buildOllamaModelDefinition: vi.fn(
(modelId: string, contextWindow?: number, capabilities?: string[]) => ({
id: modelId,
name: modelId,
reasoning: /think|reason/i.test(modelId),
input: capabilities?.includes("vision") ? ["text", "image"] : ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: contextWindow ?? 32768,
maxTokens: 8192,
}),
),
queryOllamaModelShowInfo: vi.fn(),
}));
const lmstudioRuntimeMocks = vi.hoisted(() => ({
LMSTUDIO_DEFAULT_API_KEY_ENV_VAR: "LMSTUDIO_API_KEY",
LMSTUDIO_DEFAULT_INFERENCE_BASE_URL: "http://127.0.0.1:1234/v1",
fetchLmstudioModels: vi.fn(),
mapLmstudioWireEntry: vi.fn(
(entry: {
key: string;
displayName?: string;
display_name?: string;
max_context_length?: number;
capabilities?: { reasoning?: { allowed_options?: string[] } };
}) => ({
id: entry.key,
displayName: entry.displayName ?? entry.display_name ?? entry.key,
reasoning: (entry.capabilities?.reasoning?.allowed_options?.length ?? 0) > 0,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: entry.max_context_length ?? 32768,
maxTokens: 8192,
}),
),
resolveLmstudioInferenceBase: vi.fn((baseUrl?: string) => baseUrl ?? "http://127.0.0.1:1234/v1"),
resolveLmstudioRequestContext: vi.fn(),
}));
vi.mock("../../config/config.js", () => ({
ConfigMutationConflictError: configMocks.ConfigMutationConflictError,
readConfigFileSnapshot: configMocks.readConfigFileSnapshot,
replaceConfigFile: configMocks.replaceConfigFile,
validateConfigObjectWithPlugins: configMocks.validateConfigObjectWithPlugins,
}));
vi.mock("../../plugin-sdk/facade-runtime.js", async () => {
const actual = await vi.importActual<typeof import("../../plugin-sdk/facade-runtime.js")>(
"../../plugin-sdk/facade-runtime.js",
);
return {
...actual,
loadBundledPluginPublicSurfaceModuleSync:
facadeRuntimeMocks.loadBundledPluginPublicSurfaceModuleSync,
};
});
vi.mock("../../plugin-sdk/lmstudio-runtime.js", () => {
return {
LMSTUDIO_DEFAULT_API_KEY_ENV_VAR: lmstudioRuntimeMocks.LMSTUDIO_DEFAULT_API_KEY_ENV_VAR,
LMSTUDIO_DEFAULT_INFERENCE_BASE_URL: lmstudioRuntimeMocks.LMSTUDIO_DEFAULT_INFERENCE_BASE_URL,
fetchLmstudioModels: lmstudioRuntimeMocks.fetchLmstudioModels,
mapLmstudioWireEntry: lmstudioRuntimeMocks.mapLmstudioWireEntry,
resolveLmstudioInferenceBase: lmstudioRuntimeMocks.resolveLmstudioInferenceBase,
resolveLmstudioRequestContext: lmstudioRuntimeMocks.resolveLmstudioRequestContext,
};
});
describe("models-add", () => {
beforeEach(() => {
configMocks.readConfigFileSnapshot.mockReset();
configMocks.replaceConfigFile.mockReset();
configMocks.validateConfigObjectWithPlugins.mockReset();
facadeRuntimeMocks.loadBundledPluginPublicSurfaceModuleSync.mockReset();
facadeRuntimeMocks.loadBundledPluginPublicSurfaceModuleSync.mockImplementation((params) => {
if (
params &&
typeof params === "object" &&
"dirName" in params &&
params.dirName === "ollama" &&
"artifactBasename" in params &&
params.artifactBasename === "api.js"
) {
return {
buildOllamaModelDefinition: ollamaMocks.buildOllamaModelDefinition,
queryOllamaModelShowInfo: ollamaMocks.queryOllamaModelShowInfo,
};
}
if (
params &&
typeof params === "object" &&
"dirName" in params &&
params.dirName === "openai" &&
"artifactBasename" in params &&
params.artifactBasename === "api.js"
) {
return {
buildOpenAICodexProvider: () => ({
baseUrl: "https://chatgpt.com/backend-api",
api: "openai-codex-responses",
models: [],
}),
buildOpenAICodexProviderPlugin: () => ({
resolveDynamicModel: ({ modelId }: { modelId: string }) => {
const common = {
id: modelId,
name: modelId,
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: "https://chatgpt.com/backend-api/codex",
reasoning: true,
input: ["text", "image"],
contextTokens: 272_000,
maxTokens: 128_000,
} as const;
switch (modelId) {
case "gpt-5.4":
return {
...common,
contextWindow: 1_050_000,
cost: { input: 2.5, output: 15, cacheRead: 0.25, cacheWrite: 0 },
};
case "gpt-5.5":
return {
...common,
contextWindow: 1_000_000,
cost: { input: 5, output: 30, cacheRead: 0, cacheWrite: 0 },
};
case "gpt-5.5-pro":
return {
...common,
contextWindow: 1_000_000,
cost: { input: 30, output: 180, cacheRead: 0, cacheWrite: 0 },
};
default:
return undefined;
}
},
}),
};
}
throw new Error(`Unexpected facade load: ${JSON.stringify(params)}`);
});
ollamaMocks.buildOllamaModelDefinition.mockClear();
ollamaMocks.queryOllamaModelShowInfo.mockReset();
ollamaMocks.queryOllamaModelShowInfo.mockResolvedValue({});
lmstudioRuntimeMocks.fetchLmstudioModels.mockReset();
lmstudioRuntimeMocks.mapLmstudioWireEntry.mockClear();
lmstudioRuntimeMocks.resolveLmstudioInferenceBase.mockClear();
lmstudioRuntimeMocks.resolveLmstudioRequestContext.mockReset();
});
it("lists addable providers only when the write path can actually add them", () => {
const cfg = {
models: {
providers: {
lmstudio: { baseUrl: "http://localhost:1234/v1", api: "openai-completions", models: [] },
},
},
} as OpenClawConfig;
expect(
listAddableProviders({
cfg,
discoveredProviders: ["openai", "openai-codex", "ollama"],
}),
).toEqual(["lmstudio", "ollama", "openai-codex"]);
});
it("validates add providers against addable providers", () => {
const cfg = {} as OpenClawConfig;
expect(validateAddProvider({ cfg, provider: "ollama", discoveredProviders: [] })).toEqual({
ok: true,
provider: "ollama",
});
expect(validateAddProvider({ cfg, provider: "missing", discoveredProviders: [] })).toEqual({
ok: false,
providers: ["lmstudio", "ollama"],
});
});
it("only bootstraps openai-codex when the provider is discovered", () => {
const cfg = {} as OpenClawConfig;
expect(validateAddProvider({ cfg, provider: "openai-codex", discoveredProviders: [] })).toEqual(
{
ok: false,
providers: ["lmstudio", "ollama"],
},
);
expect(
validateAddProvider({
cfg,
provider: "openai-codex",
discoveredProviders: ["openai-codex"],
}),
).toEqual({
ok: true,
provider: "openai-codex",
});
});
it("rejects discovered providers that are not configured for custom models", () => {
const cfg = {} as OpenClawConfig;
expect(
validateAddProvider({
cfg,
provider: "openai",
discoveredProviders: ["openai"],
}),
).toEqual({
ok: false,
providers: ["lmstudio", "ollama"],
knownProvider: "openai",
});
});
it("adds an ollama model and extends the allowlist when needed", async () => {
const cfg = {
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
models: {
"anthropic/claude-opus-4-5": {},
},
},
},
models: {
providers: {
ollama: {
baseUrl: "http://127.0.0.1:11434",
api: "ollama",
models: [],
},
},
},
} as OpenClawConfig;
configMocks.readConfigFileSnapshot.mockResolvedValue({
valid: true,
parsed: cfg,
});
ollamaMocks.queryOllamaModelShowInfo.mockResolvedValue({
contextWindow: 202752,
capabilities: ["thinking", "tools"],
});
configMocks.validateConfigObjectWithPlugins.mockImplementation((config: OpenClawConfig) => ({
ok: true,
config,
}));
const result = await addModelToConfig({
cfg,
provider: "ollama",
modelId: "glm-5.1:cloud",
});
expect(result.ok).toBe(true);
if (!result.ok) {
return;
}
expect(result.result.existed).toBe(false);
expect(result.result.allowlistAdded).toBe(true);
expect(configMocks.replaceConfigFile).toHaveBeenCalledTimes(1);
const written = configMocks.replaceConfigFile.mock.calls[0]?.[0]?.nextConfig as OpenClawConfig;
expect(written.models?.providers?.ollama?.models).toEqual([
expect.objectContaining({
id: "glm-5.1:cloud",
reasoning: false,
contextWindow: 202752,
}),
]);
expect(written.agents?.defaults?.models?.["ollama/glm-5.1:cloud"]).toEqual({});
});
it("reuses an existing configured provider key when the stored key is non-canonical", async () => {
const cfg = {
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
models: {
"anthropic/claude-opus-4-5": {},
},
},
},
models: {
providers: {
Ollama: {
baseUrl: "http://127.0.0.1:11434",
api: "ollama",
models: [],
},
},
},
} as OpenClawConfig;
configMocks.readConfigFileSnapshot.mockResolvedValue({
valid: true,
parsed: cfg,
});
ollamaMocks.queryOllamaModelShowInfo.mockResolvedValue({
contextWindow: 202752,
capabilities: ["thinking"],
});
configMocks.validateConfigObjectWithPlugins.mockImplementation((config: OpenClawConfig) => ({
ok: true,
config,
}));
const result = await addModelToConfig({
cfg,
provider: "ollama",
modelId: "glm-5.1:cloud",
});
expect(result.ok).toBe(true);
if (!result.ok) {
return;
}
const written = configMocks.replaceConfigFile.mock.calls[0]?.[0]?.nextConfig as OpenClawConfig;
expect(written.models?.providers?.Ollama?.models).toEqual([
expect.objectContaining({
id: "glm-5.1:cloud",
}),
]);
expect(written.models?.providers?.ollama).toBeUndefined();
});
it("treats duplicate provider/model entries as idempotent", async () => {
const cfg = {
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
},
},
models: {
providers: {
ollama: {
baseUrl: "http://127.0.0.1:11434",
api: "ollama",
models: [
{
id: "glm-5.1:cloud",
name: "glm-5.1:cloud",
reasoning: true,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 202752,
maxTokens: 8192,
},
],
},
},
},
} as OpenClawConfig;
configMocks.readConfigFileSnapshot.mockResolvedValue({
valid: true,
parsed: cfg,
});
const result = await addModelToConfig({
cfg,
provider: "ollama",
modelId: "glm-5.1:cloud",
});
expect(result).toEqual({
ok: true,
result: {
provider: "ollama",
modelId: "glm-5.1:cloud",
existed: true,
allowlistAdded: false,
warnings: ["Model metadata could not be auto-detected; saved with default capabilities."],
},
});
expect(configMocks.replaceConfigFile).not.toHaveBeenCalled();
});
it("bootstraps lmstudio provider config when missing", async () => {
const cfg = {
agents: { defaults: { model: { primary: "anthropic/claude-opus-4-5" } } },
models: { providers: {} },
} as OpenClawConfig;
configMocks.readConfigFileSnapshot.mockResolvedValue({
valid: true,
parsed: cfg,
});
lmstudioRuntimeMocks.resolveLmstudioRequestContext.mockResolvedValue({
apiKey: undefined,
headers: undefined,
});
lmstudioRuntimeMocks.fetchLmstudioModels.mockResolvedValue({
reachable: true,
status: 200,
models: [
{
type: "llm",
key: "qwen/qwen3.5-9b",
display_name: "Qwen 3.5 9B",
max_context_length: 131072,
capabilities: { reasoning: { allowed_options: ["off", "on"] } },
},
],
});
configMocks.validateConfigObjectWithPlugins.mockImplementation((config: OpenClawConfig) => ({
ok: true,
config,
}));
const result = await addModelToConfig({
cfg,
provider: "lmstudio",
modelId: "qwen/qwen3.5-9b",
});
expect(result.ok).toBe(true);
const written = configMocks.replaceConfigFile.mock.calls[0]?.[0]?.nextConfig as OpenClawConfig;
expect(written.models?.providers?.lmstudio?.baseUrl).toBe("http://127.0.0.1:1234/v1");
expect(written.models?.providers?.lmstudio?.api).toBe("openai-completions");
expect(written.models?.providers?.lmstudio?.models).toEqual([
expect.objectContaining({
id: "qwen/qwen3.5-9b",
name: "Qwen 3.5 9B",
}),
]);
});
it.each([
[
"gpt-5.4",
{
contextWindow: 1_050_000,
cost: { input: 2.5, output: 15, cacheRead: 0.25, cacheWrite: 0 },
},
],
[
"gpt-5.5",
{
contextWindow: 1_000_000,
cost: { input: 5, output: 30, cacheRead: 0, cacheWrite: 0 },
},
],
[
"gpt-5.5-pro",
{
contextWindow: 1_000_000,
cost: { input: 30, output: 180, cacheRead: 0, cacheWrite: 0 },
},
],
])(
"bootstraps openai-codex metadata for %s from the provider plugin",
async (modelId, expected) => {
const cfg = {
agents: {
defaults: {
model: { primary: "openai-codex/gpt-5.4" },
models: {
"openai-codex/gpt-5.3": {},
},
},
},
models: { providers: {} },
} as OpenClawConfig;
configMocks.readConfigFileSnapshot.mockResolvedValue({
valid: true,
parsed: cfg,
});
configMocks.validateConfigObjectWithPlugins.mockImplementation((config: OpenClawConfig) => ({
ok: true,
config,
}));
const result = await addModelToConfig({
cfg,
provider: "openai-codex",
modelId,
});
expect(result.ok).toBe(true);
if (!result.ok) {
return;
}
expect(result.result.allowlistAdded).toBe(true);
expect(result.result.warnings).toEqual([
"OpenAI Codex model metadata was saved from provider defaults; provider availability still depends on your Codex account.",
]);
const written = configMocks.replaceConfigFile.mock.calls[0]?.[0]
?.nextConfig as OpenClawConfig;
expect(written.models?.providers?.["openai-codex"]).toMatchObject({
baseUrl: "https://chatgpt.com/backend-api",
api: "openai-codex-responses",
models: [
expect.objectContaining({
id: modelId,
api: "openai-codex-responses",
baseUrl: "https://chatgpt.com/backend-api/codex",
reasoning: true,
input: ["text", "image"],
contextWindow: expected.contextWindow,
contextTokens: 272_000,
maxTokens: 128_000,
cost: expected.cost,
metadataSource: "models-add",
}),
],
});
expect(written.agents?.defaults?.models?.[`openai-codex/${modelId}`]).toEqual({});
},
);
it("returns a generic validation error when config validation fails without issue details", async () => {
const cfg = {
models: {
providers: {
ollama: {
baseUrl: "http://127.0.0.1:11434",
api: "ollama",
models: [],
},
},
},
} as OpenClawConfig;
configMocks.readConfigFileSnapshot.mockResolvedValue({
valid: true,
parsed: cfg,
});
ollamaMocks.queryOllamaModelShowInfo.mockResolvedValue({
contextWindow: 202752,
capabilities: ["thinking"],
});
configMocks.validateConfigObjectWithPlugins.mockReturnValue({
ok: false,
issues: [],
});
const result = await addModelToConfig({
cfg,
provider: "ollama",
modelId: "glm-5.1:cloud",
});
expect(result).toEqual({
ok: false,
error: "Config invalid after /models add (unknown validation error).",
});
});
it("skips lmstudio metadata detection for non-loopback base urls before resolving auth", async () => {
const cfg = {
models: {
providers: {
lmstudio: {
baseUrl: "https://example.com/v1",
api: "openai-completions",
models: [],
},
},
},
} as OpenClawConfig;
configMocks.readConfigFileSnapshot.mockResolvedValue({
valid: true,
parsed: cfg,
});
configMocks.validateConfigObjectWithPlugins.mockImplementation((config: OpenClawConfig) => ({
ok: true,
config,
}));
const result = await addModelToConfig({
cfg,
provider: "lmstudio",
modelId: "qwen/qwen3.5-9b",
});
expect(result.ok).toBe(true);
if (!result.ok) {
return;
}
expect(lmstudioRuntimeMocks.resolveLmstudioRequestContext).not.toHaveBeenCalled();
expect(lmstudioRuntimeMocks.fetchLmstudioModels).not.toHaveBeenCalled();
expect(result.result.warnings).toContain(
"LM Studio metadata detection is limited to local baseUrl values; using defaults.",
);
});
it("does not leak raw lmstudio detection errors in user-facing warnings", async () => {
const cfg = {
models: {
providers: {
lmstudio: {
baseUrl: "http://localhost:1234/v1",
api: "openai-completions",
models: [],
},
},
},
} as OpenClawConfig;
configMocks.readConfigFileSnapshot.mockResolvedValue({
valid: true,
parsed: cfg,
});
lmstudioRuntimeMocks.resolveLmstudioRequestContext.mockResolvedValue({
apiKey: "secret-token",
headers: { Authorization: "Bearer secret-token" },
});
lmstudioRuntimeMocks.fetchLmstudioModels.mockRejectedValue(
new Error("connect ECONNREFUSED http://127.0.0.1:1234/v1/api/v1/models"),
);
configMocks.validateConfigObjectWithPlugins.mockImplementation((config: OpenClawConfig) => ({
ok: true,
config,
}));
const result = await addModelToConfig({
cfg,
provider: "lmstudio",
modelId: "qwen/qwen3.5-9b",
});
expect(result.ok).toBe(true);
if (!result.ok) {
return;
}
expect(result.result.warnings).toContain(
"LM Studio metadata detection failed; using defaults.",
);
expect(result.result.warnings.join(" ")).not.toContain("ECONNREFUSED");
expect(result.result.warnings.join(" ")).not.toContain("127.0.0.1");
});
it("returns a retryable error when the config changes before replace", async () => {
const cfg = {
models: {
providers: {
ollama: {
baseUrl: "http://127.0.0.1:11434",
api: "ollama",
models: [],
},
},
},
} as OpenClawConfig;
configMocks.readConfigFileSnapshot.mockResolvedValue({
valid: true,
parsed: cfg,
hash: "base-hash",
});
ollamaMocks.queryOllamaModelShowInfo.mockResolvedValue({
contextWindow: 202752,
capabilities: ["thinking"],
});
configMocks.validateConfigObjectWithPlugins.mockImplementation((config: OpenClawConfig) => ({
ok: true,
config,
}));
configMocks.replaceConfigFile.mockRejectedValue(
new configMocks.ConfigMutationConflictError("config changed since last load", {
currentHash: "new-hash",
}),
);
const result = await addModelToConfig({
cfg,
provider: "ollama",
modelId: "glm-5.1:cloud",
});
expect(result).toEqual({
ok: false,
error: "Config changed while /models add was running. Retry the command.",
});
});
});

View File

@@ -1,675 +0,0 @@
import {
buildConfiguredAllowlistKeys,
normalizeProviderId,
resolveDefaultModelForAgent,
resolveModelRefFromString,
} from "../../agents/model-selection.js";
import {
SELF_HOSTED_DEFAULT_CONTEXT_WINDOW,
SELF_HOSTED_DEFAULT_COST,
SELF_HOSTED_DEFAULT_MAX_TOKENS,
} from "../../agents/self-hosted-provider-defaults.js";
import {
ConfigMutationConflictError,
readConfigFileSnapshot,
replaceConfigFile,
validateConfigObjectWithPlugins,
} from "../../config/config.js";
import type { ModelDefinitionConfig, ModelProviderConfig } from "../../config/types.models.js";
import type { OpenClawConfig } from "../../config/types.openclaw.js";
import { formatErrorMessage } from "../../infra/errors.js";
import { normalizeHostname } from "../../infra/net/hostname.js";
import { createSubsystemLogger } from "../../logging/subsystem.js";
import { buildRemoteBaseUrlPolicy } from "../../memory-host-sdk/host/remote-http.js";
import {
createLazyFacadeValue,
loadBundledPluginPublicSurfaceModuleSync,
} from "../../plugin-sdk/facade-runtime.js";
import {
fetchLmstudioModels,
LMSTUDIO_DEFAULT_API_KEY_ENV_VAR,
LMSTUDIO_DEFAULT_INFERENCE_BASE_URL,
mapLmstudioWireEntry,
resolveLmstudioInferenceBase,
resolveLmstudioRequestContext,
} from "../../plugin-sdk/lmstudio-runtime.js";
import type { ProviderRuntimeModel } from "../../plugins/provider-runtime-model.types.js";
import { isLoopbackIpAddress } from "../../shared/net/ip.js";
import {
normalizeLowercaseStringOrEmpty,
normalizeOptionalString,
} from "../../shared/string-coerce.js";
export type ModelAddAdapter = {
providerId: string;
bootstrapMode?: "always" | "discovered";
bootstrapProviderConfig?: (cfg: OpenClawConfig) => ModelProviderConfig | null;
detect?: (params: {
cfg: OpenClawConfig;
providerConfig: ModelProviderConfig;
modelId: string;
}) => Promise<{
found: boolean;
model?: ModelDefinitionConfig;
warnings?: string[];
}>;
};
type AddModelOutcome = {
provider: string;
modelId: string;
existed: boolean;
allowlistAdded: boolean;
warnings: string[];
};
export type ValidateAddProviderResult =
| { ok: true; provider: string }
| { ok: false; providers: string[]; knownProvider?: string };
type OllamaModelShowInfo = {
contextWindow?: number;
capabilities?: string[];
};
type OllamaApiFacade = {
buildOllamaModelDefinition: (
modelId: string,
contextWindow?: number,
capabilities?: string[],
) => ModelDefinitionConfig;
queryOllamaModelShowInfo: (apiBase: string, modelName: string) => Promise<OllamaModelShowInfo>;
};
type OpenAIApiFacade = {
buildOpenAICodexProvider: () => ModelProviderConfig;
buildOpenAICodexProviderPlugin: () => {
resolveDynamicModel?: (ctx: {
provider: string;
modelId: string;
modelRegistry: { find: () => null };
}) => ProviderRuntimeModel | null | undefined;
};
};
const log = createSubsystemLogger("models-add");
const OLLAMA_DEFAULT_BASE_URL = "http://127.0.0.1:11434";
function loadOllamaApiFacade(): OllamaApiFacade {
return loadBundledPluginPublicSurfaceModuleSync<OllamaApiFacade>({
dirName: "ollama",
artifactBasename: "api.js",
});
}
function loadOpenAIApiFacade(): OpenAIApiFacade {
return loadBundledPluginPublicSurfaceModuleSync<OpenAIApiFacade>({
dirName: "openai",
artifactBasename: "api.js",
});
}
const buildOllamaModelDefinition: OllamaApiFacade["buildOllamaModelDefinition"] =
createLazyFacadeValue(loadOllamaApiFacade, "buildOllamaModelDefinition");
const queryOllamaModelShowInfo: OllamaApiFacade["queryOllamaModelShowInfo"] = createLazyFacadeValue(
loadOllamaApiFacade,
"queryOllamaModelShowInfo",
);
const buildOpenAICodexProvider: OpenAIApiFacade["buildOpenAICodexProvider"] = createLazyFacadeValue(
loadOpenAIApiFacade,
"buildOpenAICodexProvider",
);
const buildOpenAICodexProviderPlugin: OpenAIApiFacade["buildOpenAICodexProviderPlugin"] =
createLazyFacadeValue(loadOpenAIApiFacade, "buildOpenAICodexProviderPlugin");
function sanitizeUrlForLogs(raw: string | undefined): string | undefined {
const trimmed = normalizeOptionalString(raw);
if (!trimmed) {
return undefined;
}
try {
const url = new URL(trimmed);
url.username = "";
url.password = "";
url.search = "";
url.hash = "";
return url.toString();
} catch {
return "[invalid_url]";
}
}
function buildDefaultModelDefinition(modelId: string): ModelDefinitionConfig {
return {
id: modelId,
name: modelId,
reasoning: false,
input: ["text"],
cost: SELF_HOSTED_DEFAULT_COST,
contextWindow: SELF_HOSTED_DEFAULT_CONTEXT_WINDOW,
maxTokens: SELF_HOSTED_DEFAULT_MAX_TOKENS,
};
}
function buildOpenAICodexModelDefinition(modelId: string): ModelDefinitionConfig {
const dynamicModel = buildOpenAICodexProviderPlugin().resolveDynamicModel?.({
provider: "openai-codex",
modelId,
modelRegistry: { find: () => null },
});
if (dynamicModel) {
return {
id: dynamicModel.id,
name: dynamicModel.name,
api: "openai-codex-responses",
baseUrl: dynamicModel.baseUrl,
reasoning: dynamicModel.reasoning,
input: [...dynamicModel.input],
cost: dynamicModel.cost,
contextWindow: dynamicModel.contextWindow,
...(dynamicModel.contextTokens ? { contextTokens: dynamicModel.contextTokens } : {}),
maxTokens: dynamicModel.maxTokens,
...(dynamicModel.headers ? { headers: dynamicModel.headers } : {}),
...(dynamicModel.compat ? { compat: dynamicModel.compat } : {}),
metadataSource: "models-add",
};
}
return {
id: modelId,
name: modelId,
api: "openai-codex-responses",
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: SELF_HOSTED_DEFAULT_CONTEXT_WINDOW,
maxTokens: SELF_HOSTED_DEFAULT_MAX_TOKENS,
metadataSource: "models-add",
};
}
function resolveConfiguredProvider(
cfg: OpenClawConfig,
providerId: string,
): { providerKey: string; providerConfig: ModelProviderConfig } | undefined {
const normalizedProviderId = normalizeProviderId(providerId);
if (!normalizedProviderId) {
return undefined;
}
const providers = cfg.models?.providers;
if (!providers) {
return undefined;
}
for (const [configuredProviderId, configuredProvider] of Object.entries(providers)) {
if (normalizeProviderId(configuredProviderId) === normalizedProviderId) {
return {
providerKey: configuredProviderId,
providerConfig: configuredProvider,
};
}
}
return undefined;
}
function buildDefaultLmstudioProviderConfig(): ModelProviderConfig {
return {
baseUrl: resolveLmstudioInferenceBase(LMSTUDIO_DEFAULT_INFERENCE_BASE_URL),
api: "openai-completions",
auth: "api-key",
apiKey: LMSTUDIO_DEFAULT_API_KEY_ENV_VAR,
models: [],
};
}
function isLocalLmstudioBaseUrl(baseUrl: string | undefined): boolean {
const trimmed = normalizeOptionalString(baseUrl);
if (!trimmed) {
return false;
}
try {
const parsed = new URL(trimmed);
if (parsed.protocol !== "http:" && parsed.protocol !== "https:") {
return false;
}
const hostname = normalizeHostname(parsed.hostname);
return (
hostname === "localhost" ||
hostname === "localhost.localdomain" ||
isLoopbackIpAddress(hostname)
);
} catch {
return false;
}
}
const MODEL_ADD_ADAPTERS: Record<string, ModelAddAdapter> = {
"openai-codex": {
providerId: "openai-codex",
bootstrapMode: "discovered",
bootstrapProviderConfig: () => ({
...buildOpenAICodexProvider(),
models: [],
}),
detect: async ({ modelId }) => ({
found: true,
model: buildOpenAICodexModelDefinition(modelId),
warnings: [
"OpenAI Codex model metadata was saved from provider defaults; provider availability still depends on your Codex account.",
],
}),
},
ollama: {
providerId: "ollama",
bootstrapProviderConfig: () => ({
baseUrl: OLLAMA_DEFAULT_BASE_URL,
api: "ollama",
apiKey: "ollama-local",
models: [],
}),
detect: async ({ providerConfig, modelId }) => {
const info = (await queryOllamaModelShowInfo(providerConfig.baseUrl, modelId)) ?? {};
return {
found: typeof info.contextWindow === "number" || (info.capabilities?.length ?? 0) > 0,
model: buildOllamaModelDefinition(modelId, info.contextWindow, info.capabilities),
};
},
},
lmstudio: {
providerId: "lmstudio",
bootstrapProviderConfig: () => buildDefaultLmstudioProviderConfig(),
detect: async ({ cfg, providerConfig, modelId }) => {
if (!isLocalLmstudioBaseUrl(providerConfig.baseUrl)) {
return {
found: false,
warnings: [
"LM Studio metadata detection is limited to local baseUrl values; using defaults.",
],
};
}
try {
const { apiKey, headers } = await resolveLmstudioRequestContext({
config: {
...cfg,
models: {
...cfg.models,
providers: {
...cfg.models?.providers,
lmstudio: providerConfig,
},
},
},
env: process.env,
providerHeaders: providerConfig.headers,
});
const fetched = await fetchLmstudioModels({
baseUrl: providerConfig.baseUrl,
apiKey,
headers,
ssrfPolicy: buildRemoteBaseUrlPolicy(providerConfig.baseUrl),
});
const match = fetched.models.find(
(entry) => normalizeOptionalString(entry.key) === modelId,
);
const base = match ? mapLmstudioWireEntry(match) : null;
if (!base) {
return { found: false };
}
return {
found: true,
model: {
id: base.id,
name: base.displayName,
reasoning: base.reasoning,
input: base.input,
cost: base.cost,
contextWindow: base.contextWindow,
contextTokens: base.contextTokens,
maxTokens: base.maxTokens,
},
};
} catch (error) {
log.warn("lmstudio model metadata detection failed; using defaults", {
baseUrl: sanitizeUrlForLogs(providerConfig.baseUrl),
modelId,
error: formatErrorMessage(error),
});
return {
found: false,
warnings: ["LM Studio metadata detection failed; using defaults."],
};
}
},
},
};
function canAddProvider(params: {
cfg: OpenClawConfig;
provider: string;
allowDiscoveredBootstrap?: boolean;
}): boolean {
const provider = normalizeProviderId(params.provider);
if (!provider) {
return false;
}
if (resolveConfiguredProvider(params.cfg, provider)) {
return true;
}
const adapter = MODEL_ADD_ADAPTERS[provider];
if (!adapter?.bootstrapProviderConfig) {
return false;
}
if (adapter.bootstrapMode === "discovered" && !params.allowDiscoveredBootstrap) {
return false;
}
return !!adapter.bootstrapProviderConfig(params.cfg);
}
export function listAddableProviders(params: {
cfg: OpenClawConfig;
discoveredProviders?: readonly string[];
}): string[] {
const providers = new Set<string>();
for (const provider of params.discoveredProviders ?? []) {
const normalized = normalizeProviderId(provider);
if (
normalized &&
canAddProvider({
cfg: params.cfg,
provider: normalized,
allowDiscoveredBootstrap: true,
})
) {
providers.add(normalized);
}
}
for (const provider of Object.keys(params.cfg.models?.providers ?? {})) {
const normalized = normalizeProviderId(provider);
if (normalized) {
providers.add(normalized);
}
}
for (const [provider, adapter] of Object.entries(MODEL_ADD_ADAPTERS)) {
if (adapter.bootstrapMode !== "discovered") {
providers.add(provider);
}
}
return [...providers].toSorted();
}
export function validateAddProvider(params: {
cfg: OpenClawConfig;
provider: string;
discoveredProviders?: readonly string[];
}): ValidateAddProviderResult {
const provider = normalizeProviderId(params.provider);
const providers = listAddableProviders({
cfg: params.cfg,
discoveredProviders: params.discoveredProviders,
});
if (!provider || !providers.includes(provider)) {
const knownProvider = (params.discoveredProviders ?? [])
.map((discoveredProvider) => normalizeProviderId(discoveredProvider))
.find((discoveredProvider) => discoveredProvider === provider);
return { ok: false, providers, ...(knownProvider ? { knownProvider } : {}) };
}
return { ok: true, provider };
}
function ensureProviderConfig(params: { cfg: OpenClawConfig; provider: string }):
| {
ok: true;
providerKey: string;
providerConfig: ModelProviderConfig;
bootstrapped: boolean;
}
| { ok: false } {
const configuredProvider = resolveConfiguredProvider(params.cfg, params.provider);
if (configuredProvider) {
return {
ok: true,
providerKey: configuredProvider.providerKey,
providerConfig: configuredProvider.providerConfig,
bootstrapped: false,
};
}
const bootstrapped = MODEL_ADD_ADAPTERS[params.provider]?.bootstrapProviderConfig?.(params.cfg);
if (!bootstrapped) {
return { ok: false };
}
return {
ok: true,
providerKey: params.provider,
providerConfig: bootstrapped,
bootstrapped: true,
};
}
async function detectModelDefinition(params: {
cfg: OpenClawConfig;
provider: string;
providerConfig: ModelProviderConfig;
modelId: string;
}): Promise<{ model: ModelDefinitionConfig; warnings: string[] }> {
const adapter = MODEL_ADD_ADAPTERS[params.provider];
if (!adapter?.detect) {
return {
model: buildDefaultModelDefinition(params.modelId),
warnings: ["Model metadata could not be auto-detected; saved with default capabilities."],
};
}
const detected = await adapter.detect(params);
if (detected.found && detected.model) {
return {
model: detected.model,
warnings: detected.warnings ?? [],
};
}
return {
model: buildDefaultModelDefinition(params.modelId),
warnings: [
...(detected.warnings ?? []),
"Model metadata could not be auto-detected; saved with default capabilities.",
],
};
}
export async function detectProviderModelDefinition(params: {
cfg: OpenClawConfig;
provider: string;
modelId: string;
}): Promise<{
supported: boolean;
found: boolean;
model?: ModelDefinitionConfig;
warnings: string[];
}> {
const provider = normalizeProviderId(params.provider);
const modelId = normalizeOptionalString(params.modelId) ?? "";
if (!provider || !modelId) {
return { supported: false, found: false, warnings: [] };
}
const adapter = MODEL_ADD_ADAPTERS[provider];
if (!adapter?.detect) {
return { supported: false, found: false, warnings: [] };
}
const providerResolution = ensureProviderConfig({
cfg: params.cfg,
provider,
});
if (!providerResolution.ok) {
return { supported: true, found: false, warnings: [] };
}
const detected = await adapter.detect({
cfg: params.cfg,
providerConfig: providerResolution.providerConfig,
modelId,
});
return {
supported: true,
found: detected.found && !!detected.model,
model: detected.model,
warnings: detected.warnings ?? [],
};
}
function upsertModelEntry(params: {
cfg: OpenClawConfig;
provider: string;
providerKey: string;
providerConfig: ModelProviderConfig;
model: ModelDefinitionConfig;
}): { nextConfig: OpenClawConfig; existed: boolean } {
const nextConfig = structuredClone(params.cfg);
nextConfig.models ??= {};
nextConfig.models.providers ??= {};
const existingProvider = nextConfig.models.providers[params.providerKey];
const providerConfig = existingProvider
? {
...existingProvider,
models: Array.isArray(existingProvider.models) ? [...existingProvider.models] : [],
}
: {
...params.providerConfig,
models: Array.isArray(params.providerConfig.models)
? [...params.providerConfig.models]
: [],
};
const modelKey = normalizeLowercaseStringOrEmpty(params.model.id);
const existingIndex = providerConfig.models.findIndex(
(entry) => normalizeLowercaseStringOrEmpty(entry?.id) === modelKey,
);
const existed = existingIndex !== -1;
if (!existed) {
providerConfig.models.push(params.model);
}
nextConfig.models.providers[params.providerKey] = providerConfig;
return { nextConfig, existed };
}
function maybeAddAllowlistEntry(params: {
cfg: OpenClawConfig;
provider: string;
modelId: string;
}): { nextConfig: OpenClawConfig; added: boolean } {
const allowlistKeys = buildConfiguredAllowlistKeys({
cfg: params.cfg,
defaultProvider: resolveDefaultModelForAgent({ cfg: params.cfg }).provider,
});
if (!allowlistKeys || allowlistKeys.size === 0) {
return { nextConfig: params.cfg, added: false };
}
const rawRef = `${params.provider}/${params.modelId}`;
const resolved = resolveModelRefFromString({
raw: rawRef,
defaultProvider: resolveDefaultModelForAgent({ cfg: params.cfg }).provider,
});
if (!resolved) {
return { nextConfig: params.cfg, added: false };
}
const normalizedKey = `${resolved.ref.provider}/${resolved.ref.model}`.toLowerCase();
if (allowlistKeys.has(normalizedKey)) {
return { nextConfig: params.cfg, added: false };
}
const nextConfig = structuredClone(params.cfg);
nextConfig.agents ??= {};
nextConfig.agents.defaults ??= {};
nextConfig.agents.defaults.models ??= {};
nextConfig.agents.defaults.models[`${params.provider}/${params.modelId}`] = {};
return { nextConfig, added: true };
}
export async function addModelToConfig(params: {
cfg: OpenClawConfig;
provider: string;
modelId: string;
}): Promise<{ ok: true; result: AddModelOutcome } | { ok: false; error: string }> {
const provider = normalizeProviderId(params.provider);
const modelId = normalizeOptionalString(params.modelId) ?? "";
if (!provider || !modelId) {
return { ok: false, error: "Provider and model id are required." };
}
const snapshot = await readConfigFileSnapshot();
if (!snapshot.valid || !snapshot.parsed || typeof snapshot.parsed !== "object") {
return { ok: false, error: "Config file is invalid; fix it before using /models add." };
}
const currentConfig = structuredClone(snapshot.parsed as OpenClawConfig);
const providerResolution = ensureProviderConfig({
cfg: currentConfig,
provider,
});
if (!providerResolution.ok) {
return {
ok: false,
error: `Provider "${provider}" is not configured for custom models yet. Configure the provider first, then retry /models add.`,
};
}
const detected = await detectModelDefinition({
cfg: currentConfig,
provider,
providerConfig: providerResolution.providerConfig,
modelId,
});
const upserted = upsertModelEntry({
cfg: currentConfig,
provider,
providerKey: providerResolution.providerKey,
providerConfig: providerResolution.providerConfig,
model: detected.model,
});
const allowlisted = maybeAddAllowlistEntry({
cfg: upserted.nextConfig,
provider,
modelId,
});
const changed = !upserted.existed || allowlisted.added || providerResolution.bootstrapped;
if (!changed) {
return {
ok: true,
result: {
provider,
modelId,
existed: true,
allowlistAdded: false,
warnings: detected.warnings,
},
};
}
const validated = validateConfigObjectWithPlugins(allowlisted.nextConfig);
if (!validated.ok) {
const issue = validated.issues[0];
const detail = issue ? `${issue.path}: ${issue.message}` : "unknown validation error";
return {
ok: false,
error: `Config invalid after /models add (${detail}).`,
};
}
try {
await replaceConfigFile({
nextConfig: validated.config,
...(snapshot.hash !== undefined ? { baseHash: snapshot.hash } : {}),
});
} catch (error) {
if (error instanceof ConfigMutationConflictError) {
return {
ok: false,
error: "Config changed while /models add was running. Retry the command.",
};
}
throw error;
}
return {
ok: true,
result: {
provider,
modelId,
existed: upserted.existed,
allowlistAdded: allowlisted.added,
warnings: detected.warnings,
},
};
}

View File

@@ -1861,7 +1861,7 @@ describe("buildCommandsMessage", () => {
expect(text).toContain("/skill - Run a skill by name.");
expect(text).toContain("/think (/thinking, /t) - Set thinking level.");
expect(text).toContain("/compact - Compact the session context.");
expect(text).toContain("/models - List model providers/models or add a model.");
expect(text).toContain("/models - List model providers/models.");
expect(text).not.toContain("/config");
expect(text).not.toContain("/debug");
});

View File

@@ -219,6 +219,20 @@ describe("normalizeCompatibilityConfigValues", () => {
]);
});
it("removes deprecated commands.modelsWrite from legacy configs", () => {
const res = normalizeCompatibilityConfigValues({
commands: {
text: true,
modelsWrite: false,
},
} as unknown as OpenClawConfig);
expect(res.config.commands).toEqual({ text: true });
expect(res.changes).toContain(
"Removed deprecated commands.modelsWrite (/models add is deprecated).",
);
});
it("marks legacy untagged /models add OpenAI Codex metadata rows for doctor repair", () => {
const res = normalizeCompatibilityConfigValues({
models: {

View File

@@ -2,7 +2,10 @@ import type { OpenClawConfig } from "../../../config/types.openclaw.js";
import { runPluginSetupConfigMigrations } from "../../../plugins/setup-registry.js";
import { applyChannelDoctorCompatibilityMigrations } from "./channel-legacy-config-migrate.js";
import { normalizeBaseCompatibilityConfigValues } from "./legacy-config-compatibility-base.js";
import { normalizeLegacyOpenAICodexModelsAddMetadata } from "./legacy-config-core-normalizers.js";
import {
normalizeLegacyCommandsConfig,
normalizeLegacyOpenAICodexModelsAddMetadata,
} from "./legacy-config-core-normalizers.js";
export function normalizeCompatibilityConfigValues(cfg: OpenClawConfig): {
config: OpenClawConfig;
@@ -24,6 +27,7 @@ export function normalizeCompatibilityConfigValues(cfg: OpenClawConfig): {
next = channelMigrations.next;
changes.push(...channelMigrations.changes);
}
next = normalizeLegacyCommandsConfig(next, changes);
next = normalizeLegacyOpenAICodexModelsAddMetadata(next, changes);
return { config: next, changes };

View File

@@ -9,9 +9,29 @@ import {
normalizeOptionalLowercaseString,
normalizeOptionalString,
} from "../../../shared/string-coerce.js";
import { sanitizeForLog } from "../../../terminal/ansi.js";
import { isRecord } from "./legacy-config-record-shared.js";
export { normalizeLegacyTalkConfig } from "./legacy-talk-config-normalizer.js";
export function normalizeLegacyCommandsConfig(
cfg: OpenClawConfig,
changes: string[],
): OpenClawConfig {
const rawCommands = cfg.commands;
if (!isRecord(rawCommands) || !("modelsWrite" in rawCommands)) {
return cfg;
}
const commands = { ...rawCommands };
delete commands.modelsWrite;
changes.push("Removed deprecated commands.modelsWrite (/models add is deprecated).");
return {
...cfg,
commands: commands as OpenClawConfig["commands"],
};
}
export function normalizeLegacyBrowserConfig(
cfg: OpenClawConfig,
changes: string[],
@@ -204,8 +224,10 @@ export function normalizeLegacyOpenAICodexModelsAddMetadata(
})
) {
providerChanged = true;
const safeProviderId = sanitizeForLog(providerId);
const safeModelId = sanitizeForLog(String(model.id));
changes.push(
`Marked models.providers.${providerId}.models.${model.id} as /models add metadata so official OpenAI Codex metadata can override it.`,
`Marked models.providers.${safeProviderId}.models.${safeModelId} as /models add metadata so official OpenAI Codex metadata can override it.`,
);
nextModels.push(Object.assign({}, model, { metadataSource: "models-add" }));
} else {

View File

@@ -23,10 +23,6 @@ export function isCommandFlagEnabled(
return getOwnCommandFlagValue(config, key) === true;
}
export function isModelsWriteEnabled(config?: { commands?: unknown }): boolean {
return getOwnCommandFlagValue(config, "modelsWrite") !== false;
}
export function isRestartEnabled(config?: { commands?: unknown }): boolean {
return getOwnCommandFlagValue(config, "restart") !== false;
}

View File

@@ -3,12 +3,12 @@ import { setActivePluginRegistry } from "../plugins/runtime.js";
import { createChannelTestPluginBase, createTestRegistry } from "../test-utils/channel-plugins.js";
import {
isCommandFlagEnabled,
isModelsWriteEnabled,
isRestartEnabled,
isNativeCommandsExplicitlyDisabled,
resolveNativeCommandsEnabled,
resolveNativeSkillsEnabled,
} from "./commands.js";
import { validateConfigObjectWithPlugins } from "./validation.js";
beforeEach(() => {
setActivePluginRegistry(
@@ -201,24 +201,6 @@ describe("isRestartEnabled", () => {
});
});
describe("isModelsWriteEnabled", () => {
it("defaults to enabled unless explicitly false", () => {
expect(isModelsWriteEnabled(undefined)).toBe(true);
expect(isModelsWriteEnabled({})).toBe(true);
expect(isModelsWriteEnabled({ commands: {} })).toBe(true);
expect(isModelsWriteEnabled({ commands: { modelsWrite: true } })).toBe(true);
expect(isModelsWriteEnabled({ commands: { modelsWrite: false } })).toBe(false);
});
it("ignores inherited modelsWrite flags", () => {
expect(
isModelsWriteEnabled({
commands: Object.create({ modelsWrite: false }) as Record<string, unknown>,
}),
).toBe(true);
});
});
describe("isCommandFlagEnabled", () => {
it("requires own boolean true", () => {
expect(isCommandFlagEnabled({ commands: { bash: true } }, "bash")).toBe(true);
@@ -233,3 +215,17 @@ describe("isCommandFlagEnabled", () => {
).toBe(false);
});
});
describe("deprecated commands compatibility", () => {
it("ignores legacy modelsWrite during validation", () => {
const result = validateConfigObjectWithPlugins({
commands: { text: true, modelsWrite: false },
});
expect(result.ok).toBe(true);
if (result.ok) {
expect(result.config.commands).toMatchObject({ text: true });
expect(Object.hasOwn(result.config.commands ?? {}, "modelsWrite")).toBe(false);
}
});
});

View File

@@ -1,12 +1,7 @@
import { getChannelPlugin, normalizeChannelId } from "../channels/plugins/index.js";
import type { ChannelId } from "../channels/plugins/types.public.js";
import type { NativeCommandsSetting } from "./types.js";
export {
isCommandFlagEnabled,
isModelsWriteEnabled,
isRestartEnabled,
type CommandFlagKey,
} from "./commands.flags.js";
export { isCommandFlagEnabled, isRestartEnabled, type CommandFlagKey } from "./commands.flags.js";
function resolveAutoDefault(
providerId: ChannelId | undefined,

View File

@@ -18868,7 +18868,6 @@ export const GENERATED_BASE_CONFIG_SCHEMA: BaseConfigSchemaResponse = {
default: {
native: "auto",
nativeSkills: "auto",
modelsWrite: true,
restart: true,
ownerDisplay: "raw",
},
@@ -18910,13 +18909,6 @@ export const GENERATED_BASE_CONFIG_SCHEMA: BaseConfigSchemaResponse = {
description:
"Enables text-command parsing in chat input in addition to native command surfaces where available. Keep this enabled for compatibility across channels that do not support native command registration.",
},
modelsWrite: {
default: true,
type: "boolean",
title: "Allow /models writes",
description:
"Allow model-management write commands such as `/models add` to register provider/model entries directly into config and make them available without restarting the gateway (default: true).",
},
bash: {
type: "boolean",
title: "Allow Bash Chat Command",
@@ -19018,7 +19010,7 @@ export const GENERATED_BASE_CONFIG_SCHEMA: BaseConfigSchemaResponse = {
"Defines elevated command allow rules by channel and sender for owner-level command surfaces. Use narrow provider-specific identities so privileged commands are not exposed to broad chat audiences.",
},
},
required: ["native", "nativeSkills", "modelsWrite", "restart", "ownerDisplay"],
required: ["native", "nativeSkills", "restart", "ownerDisplay"],
additionalProperties: false,
title: "Commands",
description:
@@ -26136,11 +26128,6 @@ export const GENERATED_BASE_CONFIG_SCHEMA: BaseConfigSchemaResponse = {
help: "Enables text-command parsing in chat input in addition to native command surfaces where available. Keep this enabled for compatibility across channels that do not support native command registration.",
tags: ["advanced"],
},
"commands.modelsWrite": {
label: "Allow /models writes",
help: "Allow model-management write commands such as `/models add` to register provider/model entries directly into config and make them available without restarting the gateway (default: true).",
tags: ["advanced"],
},
"commands.bash": {
label: "Allow Bash Chat Command",
help: "Allow bash chat command (`!`; `/bash` alias) to run host shell commands (default: false; requires tools.elevated).",

View File

@@ -1258,8 +1258,6 @@ export const FIELD_HELP: Record<string, string> = {
"Registers native skill commands so users can invoke skills directly from provider command menus where supported. Keep aligned with your skill policy so exposed commands match what operators expect.",
"commands.text":
"Enables text-command parsing in chat input in addition to native command surfaces where available. Keep this enabled for compatibility across channels that do not support native command registration.",
"commands.modelsWrite":
"Allow model-management write commands such as `/models add` to register provider/model entries directly into config and make them available without restarting the gateway (default: true).",
"commands.bash":
"Allow bash chat command (`!`; `/bash` alias) to run host shell commands (default: false; requires tools.elevated).",
"commands.bashForegroundMs":

View File

@@ -596,7 +596,6 @@ export const FIELD_LABELS: Record<string, string> = {
"commands.native": "Native Commands",
"commands.nativeSkills": "Native Skill Commands",
"commands.text": "Text Commands",
"commands.modelsWrite": "Allow /models writes",
"commands.bash": "Allow Bash Chat Command",
"commands.bashForegroundMs": "Bash Foreground Window (ms)",
"commands.config": "Allow /config",

View File

@@ -142,8 +142,6 @@ export type CommandsConfig = {
nativeSkills?: NativeCommandsSetting;
/** Enable text command parsing (default: true). */
text?: boolean;
/** Allow model-management write commands like `/models add` (default: true). */
modelsWrite?: boolean;
/** Allow bash chat command (`!`; `/bash` alias) (default: false). */
bash?: boolean;
/** How long bash waits before backgrounding (default: 2000; 0 backgrounds immediately). */

View File

@@ -51,6 +51,18 @@ type AllowedValuesCollection = {
};
type JsonSchemaLike = Record<string, unknown>;
function stripDeprecatedValidationKeys(raw: unknown): unknown {
if (!isRecord(raw) || !isRecord(raw.commands) || !Object.hasOwn(raw.commands, "modelsWrite")) {
return raw;
}
const commands = { ...raw.commands };
delete commands.modelsWrite;
return {
...raw,
commands,
};
}
const CUSTOM_EXPECTED_ONE_OF_RE = /expected one of ((?:"[^"]+"(?:\|"?[^"]+"?)*)+)/i;
const SECRETREF_POLICY_DOC_URL = "https://docs.openclaw.ai/reference/secretref-credential-surface";
const bundledChannelSchemaById = new Map<string, unknown>(
@@ -576,17 +588,23 @@ export function validateConfigObjectRaw(
touchedPaths?: ReadonlyArray<ReadonlyArray<string>>;
},
): { ok: true; config: OpenClawConfig } | { ok: false; issues: ConfigValidationIssue[] } {
const policyIssues = collectUnsupportedSecretRefPolicyIssues(raw);
const normalizedRaw = stripDeprecatedValidationKeys(raw);
const policyIssues = collectUnsupportedSecretRefPolicyIssues(normalizedRaw);
const doctorPluginIds = opts?.touchedPaths
? collectRelevantDoctorPluginIdsForTouchedPaths({
raw,
raw: normalizedRaw,
touchedPaths: opts.touchedPaths,
})
: collectRelevantDoctorPluginIds(raw);
: collectRelevantDoctorPluginIds(normalizedRaw);
const extraLegacyRules = listPluginDoctorLegacyConfigRules({
pluginIds: doctorPluginIds,
});
const legacyIssues = findLegacyConfigIssues(raw, raw, extraLegacyRules, opts?.touchedPaths);
const legacyIssues = findLegacyConfigIssues(
normalizedRaw,
normalizedRaw,
extraLegacyRules,
opts?.touchedPaths,
);
if (legacyIssues.length > 0) {
return {
ok: false,
@@ -596,7 +614,7 @@ export function validateConfigObjectRaw(
})),
};
}
const validated = OpenClawSchema.safeParse(raw);
const validated = OpenClawSchema.safeParse(normalizedRaw);
if (!validated.success) {
const schemaIssues = validated.error.issues.map((issue) => mapZodIssueToConfigIssue(issue));
return {

View File

@@ -208,7 +208,6 @@ export const CommandsSchema = z
native: NativeCommandsSettingSchema.optional().default("auto"),
nativeSkills: NativeCommandsSettingSchema.optional().default("auto"),
text: z.boolean().optional(),
modelsWrite: z.boolean().optional().default(true),
bash: z.boolean().optional(),
bashForegroundMs: z.number().int().min(0).max(30_000).optional(),
config: z.boolean().optional(),
@@ -229,7 +228,6 @@ export const CommandsSchema = z
({
native: "auto",
nativeSkills: "auto",
modelsWrite: true,
restart: true,
ownerDisplay: "raw",
}) as const,

View File

@@ -38,9 +38,7 @@ describe("plugin-sdk/command-auth", () => {
expect(buildHelpMessage(cfg)).toContain("/commands for full list");
expect(buildCommandsMessage(cfg)).toContain("More: /tools for available capabilities");
expect(buildCommandsMessage(cfg)).toContain(
"/models - List model providers/models or add a model.",
);
expect(buildCommandsMessage(cfg)).toContain("/models - List model providers/models.");
expect(buildCommandsMessagePaginated(cfg)).toMatchObject({
currentPage: 1,
totalPages: expect.any(Number),