feat: Add /models add hot-reload model registration (#70211)

* feat(models): add chat model registration with hot reload

* docs(changelog): add models entry for pr 70211

* fix(models): harden add flow follow-ups

* fix models add review follow-ups

* harden models add config writes

* tighten plugin boundary invariant

* move models add adapters behind sdk facades

* avoid ollama-specific core facade
This commit is contained in:
Tak Hoffman
2026-04-22 12:00:30 -05:00
committed by GitHub
parent 0623079e98
commit f328c21046
28 changed files with 2033 additions and 373 deletions

View File

@@ -6,6 +6,7 @@ Docs: https://docs.openclaw.ai
### Changes
- Models/commands: add `/models add <provider> <modelId>` so you can register a model from chat and use it without restarting the gateway; keep `/models` as a simple provider browser while adding clearer add guidance and copy-friendly command examples. (#70211) Thanks @Takhoffman.
- Pi/models: update the bundled pi packages to `0.68.1` and let the OpenCode Go catalog come from pi instead of plugin-maintained model aliases, adding the refreshed `opencode-go/kimi-k2.6`, Qwen, GLM, MiMo, and MiniMax entries.
- CLI/doctor plugins: lazy-load doctor plugin paths and prefer installed plugin `dist/*` runtime entries over source-adjacent JavaScript fallbacks, reducing the measured `doctor --non-interactive` runtime by about 74% while keeping cold doctor startup on built plugin artifacts. (#69840) Thanks @gumadeiras.
- WhatsApp/groups+direct: forward per-group and per-direct `systemPrompt` config into inbound context `GroupSystemPrompt` so configured per-chat behavioral instructions are injected on every turn. Supports `"*"` wildcard fallback and account-scoped overrides under `channels.whatsapp.accounts.<id>.{groups,direct}`; account maps fully replace root maps (no deep merge), matching the existing `requireMention` pattern. Closes #7011. (#59553) Thanks @Bluetegu.

View File

@@ -307,7 +307,7 @@ By default, components are single use. Set `components.reusable=true` to allow b
To restrict who can click a button, set `allowedUsers` on that button (Discord user IDs, tags, or `*`). When configured, unmatched users receive an ephemeral denial.
The `/model` and `/models` slash commands open an interactive model picker with provider and model dropdowns plus a Submit step. The picker reply is ephemeral and only the invoking user can use it.
The `/model` and `/models` slash commands open an interactive model picker with provider and model dropdowns plus a Submit step. `/models add` also supports adding a new provider/model entry from chat, and newly added models show up without restarting the gateway. The picker reply is ephemeral and only the invoking user can use it.
File attachments:

View File

@@ -361,8 +361,8 @@ Surface different features that extend the above defaults.
},
{
"command": "/models",
"description": "List providers or models for a provider",
"usage_hint": "[provider] [page] [limit=<n>|size=<n>|all]"
"description": "List providers/models or add a model",
"usage_hint": "[provider] [page] [limit=<n>|size=<n>|all] | add <provider> <modelId>"
},
{
"command": "/help",

View File

@@ -114,6 +114,9 @@ Notes:
- `/model` (and `/model list`) is a compact, numbered picker (model family + available providers).
- On Discord, `/model` and `/models` open an interactive picker with provider and model dropdowns plus a Submit step.
- `/models add` lets you add a provider/model entry from chat without editing config manually.
- `/models add <provider> <modelId>` is the fastest path; bare `/models add` starts a provider-first guided flow where supported.
- After `/models add`, the new model becomes available in `/models` and `/model` without restarting the gateway.
- `/model <#>` selects from that picker.
- `/model` persists the new session selection immediately.
- If the agent is idle, the next run uses the new model right away.
@@ -132,6 +135,14 @@ Notes:
Full command behavior/config: [Slash commands](/tools/slash-commands).
Examples:
```text
/models add
/models add ollama glm-5.1:cloud
/models add lmstudio qwen/qwen3.5-9b
```
## CLI commands
```bash

View File

@@ -68,7 +68,7 @@ import {
withResolvedTelegramForumFlag,
} from "./bot/helpers.js";
import type { TelegramContext, TelegramGetChat } from "./bot/types.js";
import { buildCommandsPaginationKeyboard } from "./command-ui.js";
import { buildCommandsPaginationKeyboard, buildTelegramModelsMenuButtons } from "./command-ui.js";
import {
resolveTelegramConversationBaseSessionKey,
resolveTelegramConversationRoute,
@@ -1509,7 +1509,7 @@ export const registerTelegramHandlers = ({
id: p,
count: byProvider.get(p)?.size ?? 0,
}));
const buttons = buildProviderKeyboard(providerInfos);
const buttons = buildTelegramModelsMenuButtons({ providers: providerInfos });
try {
await editMessageWithButtons("Select a provider:", buttons);
} catch (err) {
@@ -1527,7 +1527,7 @@ export const registerTelegramHandlers = ({
id: p,
count: byProvider.get(p)?.size ?? 0,
}));
const buttons = buildProviderKeyboard(providerInfos);
const buttons = buildTelegramModelsMenuButtons({ providers: providerInfos });
try {
await editMessageWithButtons(
`Unknown provider: ${provider}\n\nSelect a provider:`,
@@ -1580,7 +1580,7 @@ export const registerTelegramHandlers = ({
id: p,
count: byProvider.get(p)?.size ?? 0,
}));
const buttons = buildProviderKeyboard(providerInfos);
const buttons = buildTelegramModelsMenuButtons({ providers: providerInfos });
try {
await editMessageWithButtons(
`Could not resolve model "${selection.model}".\n\nSelect a provider:`,

View File

@@ -3274,6 +3274,16 @@ describe("createTelegramBot", () => {
expect(buildModelsProviderDataMock).toHaveBeenCalledTimes(2);
expect(editMessageTextSpy).toHaveBeenCalledTimes(1);
expect(editMessageTextSpy.mock.calls[0]?.[2]).toContain("Select a provider:");
expect(
(
editMessageTextSpy.mock.calls[0]?.[3] as {
reply_markup?: { inline_keyboard?: unknown[][] };
}
)?.reply_markup?.inline_keyboard?.[0]?.[0],
).toEqual({
text: "Add model",
callback_data: "/models add",
});
});
it("retries command pagination callbacks after a bubbled edit failure", async () => {
@@ -3654,6 +3664,16 @@ describe("createTelegramBot", () => {
expect(editMessageTextSpy).toHaveBeenCalledTimes(2);
expect(editMessageTextSpy.mock.calls.at(-1)?.[2]).toContain("Select a provider:");
expect(
(
editMessageTextSpy.mock.calls.at(-1)?.[3] as {
reply_markup?: { inline_keyboard?: unknown[][] };
}
)?.reply_markup?.inline_keyboard?.[0]?.[0],
).toEqual({
text: "Add model",
callback_data: "/models add",
});
});
it("retries model selection callbacks after a bubbled session-store failure", async () => {

View File

@@ -9,6 +9,7 @@ import {
toPluginMessageContext,
toPluginMessageSentEvent,
} from "openclaw/plugin-sdk/hook-runtime";
import type { ReplyPayloadDelivery } from "openclaw/plugin-sdk/interactive-runtime";
import { buildOutboundMediaLoadOptions } from "openclaw/plugin-sdk/media-runtime";
import { isGifMedia, kindFromMime } from "openclaw/plugin-sdk/media-runtime";
import {
@@ -487,7 +488,7 @@ async function deliverMediaReply(params: {
}
async function maybePinFirstDeliveredMessage(params: {
pin: NonNullable<ReplyPayload["delivery"]>["pin"] | undefined;
pin: ReplyPayloadDelivery["pin"];
bot: Bot;
chatId: string;
runtime: RuntimeEnv;

View File

@@ -9,6 +9,26 @@ import {
export { buildCommandsPaginationKeyboard };
export function buildTelegramModelsMenuButtons(params: { providers: ProviderInfo[] }) {
return [
[{ text: "Add model", callback_data: "/models add" }],
...buildProviderKeyboard(params.providers),
];
}
export function buildTelegramModelsMenuChannelData(params: {
providers: ProviderInfo[];
}): ReplyPayload["channelData"] | null {
if (params.providers.length === 0) {
return null;
}
return {
telegram: {
buttons: buildTelegramModelsMenuButtons(params),
},
};
}
export function buildTelegramCommandsListChannelData(params: {
currentPage: number;
totalPages: number;
@@ -41,6 +61,25 @@ export function buildTelegramModelsProviderChannelData(params: {
};
}
export function buildTelegramModelsAddProviderChannelData(params: {
providers: Array<{ id: string }>;
}): ReplyPayload["channelData"] | null {
if (params.providers.length === 0) {
return null;
}
const buttons = params.providers.map((provider) => [
{
text: provider.id,
callback_data: `/models add ${provider.id}`,
},
]);
return {
telegram: {
buttons,
},
};
}
export function buildTelegramModelsListChannelData(params: {
provider: string;
models: readonly string[];

View File

@@ -28,6 +28,42 @@ function resolveAccount(cfg: OpenClawConfig, accountId: string): ResolvedTelegra
}
describe("createTelegramPluginBase config duplicate token guard", () => {
it("wires the top-level models menu adapter into the production plugin", () => {
const channelData = telegramPluginBase.commands?.buildModelsMenuChannelData?.({
providers: [
{ id: "anthropic", count: 2 },
{ id: "openai", count: 3 },
],
});
expect(channelData).toEqual({
telegram: {
buttons: [
[{ text: "Add model", callback_data: "/models add" }],
[
{ text: "anthropic (2)", callback_data: "mdl_list_anthropic_1" },
{ text: "openai (3)", callback_data: "mdl_list_openai_1" },
],
],
},
});
});
it("wires the guided add-provider adapter into the production plugin", () => {
const channelData = telegramPluginBase.commands?.buildModelsAddProviderChannelData?.({
providers: [{ id: "ollama" }, { id: "lmstudio" }],
});
expect(channelData).toEqual({
telegram: {
buttons: [
[{ text: "ollama", callback_data: "/models add ollama" }],
[{ text: "lmstudio", callback_data: "/models add lmstudio" }],
],
},
});
});
it("marks secondary account as not configured when token is shared", async () => {
const cfg = createCfg();
const alertsAccount = resolveAccount(cfg, "alerts");

View File

@@ -19,7 +19,9 @@ import {
import {
buildTelegramCommandsListChannelData,
buildTelegramModelBrowseChannelData,
buildTelegramModelsAddProviderChannelData,
buildTelegramModelsListChannelData,
buildTelegramModelsMenuChannelData,
buildTelegramModelsProviderChannelData,
} from "./command-ui.js";
import { TelegramChannelConfigSchema } from "./config-schema.js";
@@ -148,7 +150,9 @@ export function createTelegramPluginBase(params: {
nativeCommandsAutoEnabled: true,
nativeSkillsAutoEnabled: true,
buildCommandsListChannelData: buildTelegramCommandsListChannelData,
buildModelsMenuChannelData: buildTelegramModelsMenuChannelData,
buildModelsProviderChannelData: buildTelegramModelsProviderChannelData,
buildModelsAddProviderChannelData: buildTelegramModelsAddProviderChannelData,
buildModelsListChannelData: buildTelegramModelsListChannelData,
buildModelBrowseChannelData: buildTelegramModelBrowseChannelData,
},

View File

@@ -3,6 +3,8 @@
import { createExtensionImportBoundaryChecker } from "./lib/extension-import-boundary-checker.mjs";
import { runAsScript } from "./lib/ts-guard-utils.mjs";
const ALLOWED_EXTENSION_PUBLIC_SURFACE_RE = /^extensions\/[^/]+\/(?:api|runtime-api)\.js$/;
const checker = createExtensionImportBoundaryChecker({
roots: ["src"],
boundaryLabel: "src",
@@ -10,6 +12,9 @@ const checker = createExtensionImportBoundaryChecker({
cleanMessage: "No src import boundary violations found.",
inventoryTitle: "Src extension import boundary inventory:",
skipSourcesWithoutBundledPluginPrefix: true,
allowResolvedPath(resolvedPath) {
return ALLOWED_EXTENSION_PUBLIC_SURFACE_RE.test(resolvedPath);
},
shouldSkipFile(relativeFile) {
return (
relativeFile.endsWith(".test.ts") ||

View File

@@ -38,7 +38,7 @@ function classifyResolvedExtensionReason(kind, boundaryLabel) {
return `${verb} bundled plugin file from ${boundaryLabel} boundary`;
}
function scanImportBoundaryViolations(sourceFile, filePath, boundaryLabel) {
function scanImportBoundaryViolations(sourceFile, filePath, boundaryLabel, allowResolvedPath) {
const entries = [];
const relativeFile = normalizeRepoPath(repoRoot, filePath);
@@ -47,6 +47,9 @@ function scanImportBoundaryViolations(sourceFile, filePath, boundaryLabel) {
if (!resolvedPath?.startsWith(BUNDLED_PLUGIN_PATH_PREFIX)) {
return;
}
if (allowResolvedPath?.(resolvedPath, { kind, specifier, file: relativeFile })) {
return;
}
entries.push({
file: relativeFile,
line: toLine(sourceFile, specifierNode),
@@ -74,7 +77,12 @@ export function createExtensionImportBoundaryChecker(params) {
files,
compareEntries,
collectEntries(sourceFile, filePath) {
return scanImportBoundaryViolations(sourceFile, filePath, params.boundaryLabel);
return scanImportBoundaryViolations(
sourceFile,
filePath,
params.boundaryLabel,
params.allowResolvedPath,
);
},
shouldParseSource: params.skipSourcesWithoutBundledPluginPrefix
? (source) => source.includes(BUNDLED_PLUGIN_PATH_PREFIX)

View File

@@ -51,12 +51,16 @@ function loadModelSuppression() {
return modelSuppressionPromise;
}
export function resetModelCatalogCacheForTest() {
export function resetModelCatalogCache() {
modelCatalogPromise = null;
hasLoggedModelCatalogError = false;
importPiSdk = defaultImportPiSdk;
}
export function resetModelCatalogCacheForTest() {
resetModelCatalogCache();
}
// Test-only escape hatch: allow mocking the dynamic import to simulate transient failures.
export function __setModelCatalogImportForTest(loader?: () => Promise<PiSdkModule>) {
importPiSdk = loader ?? defaultImportPiSdk;

View File

@@ -212,8 +212,7 @@ export function consumePendingToolMediaIntoReply(
...payload,
mediaUrls: mergedMediaUrls.length ? mergedMediaUrls : undefined,
audioAsVoice: payload.audioAsVoice || state.pendingToolAudioAsVoice || undefined,
trustedLocalMedia:
payload.trustedLocalMedia || state.pendingToolTrustedLocalMedia || undefined,
trustedLocalMedia: payload.trustedLocalMedia || state.pendingToolTrustedLocalMedia || undefined,
};
clearPendingToolMedia(state);
return mergedPayload;

View File

@@ -831,7 +831,7 @@ export function buildBuiltinChatCommands(): ChatCommandDefinition[] {
defineChatCommand({
key: "models",
nativeName: "models",
description: "List model providers or provider models.",
description: "List model providers/models or add a model.",
textAlias: "/models",
tier: "standard",
argsParsing: "none",

View File

@@ -1,6 +1,6 @@
import { beforeEach, describe, expect, it, vi } from "vitest";
import type { ChannelPlugin } from "../../channels/plugins/types.js";
import type { OpenClawConfig } from "../../config/config.js";
import type { OpenClawConfig } from "../../config/types.openclaw.js";
import { setActivePluginRegistry } from "../../plugins/runtime.js";
import {
createChannelTestPluginBase,
@@ -17,6 +17,23 @@ const modelAuthLabelMocks = vi.hoisted(() => ({
resolveModelAuthLabel: vi.fn<(params: unknown) => string | undefined>(() => undefined),
}));
const modelsAddMocks = vi.hoisted(() => ({
addModelToConfig: vi.fn(),
listAddableProviders: vi.fn<(params: unknown) => string[]>(),
validateAddProvider:
vi.fn<
(params: unknown) => { ok: true; provider: string } | { ok: false; providers: string[] }
>(),
}));
const configWriteAuthMocks = vi.hoisted(() => ({
resolveConfigWriteDeniedText: vi.fn<(params: { target: string }) => string | null>(() => null),
}));
const configWriteTargetMocks = vi.hoisted(() => ({
resolveConfigWriteTargetFromPath: vi.fn((path: string[]) => path.join(".")),
}));
vi.mock("../../agents/model-catalog.js", () => ({
loadModelCatalog: modelCatalogMocks.loadModelCatalog,
}));
@@ -25,6 +42,24 @@ vi.mock("../../agents/model-auth-label.js", () => ({
resolveModelAuthLabel: modelAuthLabelMocks.resolveModelAuthLabel,
}));
vi.mock("../../channels/plugins/config-writes.js", () => ({
resolveConfigWriteTargetFromPath: configWriteTargetMocks.resolveConfigWriteTargetFromPath,
}));
vi.mock("./config-write-authorization.js", () => ({
resolveConfigWriteDeniedText: configWriteAuthMocks.resolveConfigWriteDeniedText,
}));
vi.mock("./models-add.js", async () => {
const actual = await vi.importActual<typeof import("./models-add.js")>("./models-add.js");
return {
...actual,
addModelToConfig: modelsAddMocks.addModelToConfig,
listAddableProviders: modelsAddMocks.listAddableProviders,
validateAddProvider: modelsAddMocks.validateAddProvider,
};
});
const telegramModelsTestPlugin: ChannelPlugin = {
...createChannelTestPluginBase({
id: "telegram",
@@ -41,6 +76,19 @@ const telegramModelsTestPlugin: ChannelPlugin = {
},
}),
commands: {
buildModelsMenuChannelData: ({ providers }) => ({
telegram: {
buttons: [
[{ text: "Add model", callback_data: "/models add" }],
...providers.map((provider) => [
{
text: provider.id,
callback_data: `models:${provider.id}`,
},
]),
],
},
}),
buildModelsProviderChannelData: ({ providers }) => ({
telegram: {
buttons: providers.map((provider) => [
@@ -51,6 +99,16 @@ const telegramModelsTestPlugin: ChannelPlugin = {
]),
},
}),
buildModelsAddProviderChannelData: ({ providers }) => ({
telegram: {
buttons: providers.map((provider) => [
{
text: provider.id,
callback_data: `/models add ${provider.id}`,
},
]),
},
}),
},
};
@@ -71,6 +129,32 @@ beforeEach(() => {
]);
modelAuthLabelMocks.resolveModelAuthLabel.mockReset();
modelAuthLabelMocks.resolveModelAuthLabel.mockReturnValue(undefined);
modelsAddMocks.addModelToConfig.mockReset();
modelsAddMocks.addModelToConfig.mockResolvedValue({
ok: true,
result: {
provider: "ollama",
modelId: "glm-5.1:cloud",
existed: false,
allowlistAdded: false,
warnings: [],
},
});
modelsAddMocks.listAddableProviders.mockReset();
modelsAddMocks.listAddableProviders.mockReturnValue([
"anthropic",
"lmstudio",
"ollama",
"openai",
]);
modelsAddMocks.validateAddProvider.mockReset();
modelsAddMocks.validateAddProvider.mockImplementation((params: unknown) => ({
ok: true,
provider: (params as { provider: string }).provider,
}));
configWriteAuthMocks.resolveConfigWriteDeniedText.mockReset();
configWriteAuthMocks.resolveConfigWriteDeniedText.mockReturnValue(null);
configWriteTargetMocks.resolveConfigWriteTargetFromPath.mockClear();
setActivePluginRegistry(
createTestRegistry([
...textSurfaceModelsTestPlugins,
@@ -83,312 +167,192 @@ beforeEach(() => {
);
});
function buildModelsParams(
commandBody: string,
cfg: OpenClawConfig,
surface: string,
options?: {
authorized?: boolean;
agentId?: string;
sessionKey?: string;
},
function buildParams(
commandBodyNormalized: string,
cfgOverrides: Partial<OpenClawConfig> = {},
): HandleCommandsParams {
const params = {
cfg,
return {
cfg: {
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
},
},
commands: {
text: true,
},
...cfgOverrides,
} as OpenClawConfig,
ctx: {
Provider: surface,
Surface: surface,
CommandSource: "text",
Surface: "discord",
},
command: {
commandBodyNormalized: commandBody,
commandBodyNormalized,
isAuthorizedSender: true,
senderId: "owner",
senderIsOwner: true,
senderId: "user-1",
channel: "discord",
channelId: "channel-1",
surface: "discord",
ownerList: [],
from: "user-1",
to: "bot",
},
sessionKey: "agent:main:main",
sessionKey: "agent:main:discord:direct:user-1",
workspaceDir: "/tmp",
provider: "anthropic",
model: "claude-opus-4-5",
contextTokens: 0,
defaultGroupActivation: () => "mention",
resolvedVerboseLevel: "off",
resolvedReasoningLevel: "off",
resolveDefaultThinkingLevel: async () => undefined,
isGroup: false,
directives: {},
elevated: { enabled: true, allowed: true, failures: [] },
} as unknown as HandleCommandsParams;
if (options?.authorized === false) {
params.command.isAuthorizedSender = false;
params.command.senderId = "unauthorized";
}
if (options?.agentId) {
params.agentId = options.agentId;
}
if (options?.sessionKey) {
params.sessionKey = options.sessionKey;
}
return params;
}
describe("handleModelsCommand", () => {
const cfg = {
commands: { text: true },
agents: { defaults: { model: { primary: "anthropic/claude-opus-4-5" } } },
} as OpenClawConfig;
it("shows a simple providers menu on text surfaces", async () => {
const result = await handleModelsCommand(buildParams("/models"), true);
it.each(["discord", "whatsapp"])("lists providers on %s text surfaces", async (surface) => {
const result = await handleModelsCommand(buildModelsParams("/models", cfg, surface), true);
expect(result?.shouldContinue).toBe(false);
expect(result?.reply?.text).toContain("Providers:");
expect(result?.reply?.text).toContain("anthropic");
expect(result?.reply?.text).toContain("- anthropic (2)");
expect(result?.reply?.text).toContain("- google (1)");
expect(result?.reply?.text).toContain("- openai (2)");
expect(result?.reply?.text).toContain("Use: /models <provider>");
expect(result?.reply?.text).toContain("Switch: /model <provider/model>");
expect(result?.reply?.text).toContain("Add: /models add");
});
it("rejects unauthorized /models commands", async () => {
const result = await handleModelsCommand(
buildModelsParams("/models", cfg, "discord", { authorized: false }),
true,
);
expect(result).toEqual({ shouldContinue: false });
});
it("adds an add-model action to the telegram provider picker", async () => {
const params = buildParams("/models");
params.ctx.Surface = "telegram";
params.command.channel = "telegram";
params.command.surface = "telegram";
const result = await handleModelsCommand(params, true);
it("lists providers on telegram with buttons", async () => {
const result = await handleModelsCommand(buildModelsParams("/models", cfg, "telegram"), true);
expect(result?.shouldContinue).toBe(false);
expect(result?.reply?.text).toBe("Select a provider:");
const buttons = (result?.reply?.channelData as { telegram?: { buttons?: unknown[][] } })
?.telegram?.buttons;
expect(buttons).toBeDefined();
expect(buttons?.length).toBeGreaterThan(0);
});
it("handles provider pagination all mode and unknown providers", async () => {
const cases = [
{
name: "lists provider models with pagination hints",
command: "/models anthropic",
includes: [
"Models (anthropic",
"page 1/",
"anthropic/claude-opus-4-5",
"Switch: /model <provider/model>",
"All: /models anthropic all",
expect(result?.reply?.channelData).toEqual({
telegram: {
buttons: [
[{ text: "Add model", callback_data: "/models add" }],
[{ text: "anthropic", callback_data: "models:anthropic" }],
[{ text: "google", callback_data: "models:google" }],
[{ text: "openai", callback_data: "models:openai" }],
],
excludes: [],
},
{
name: "ignores page argument when all flag is present",
command: "/models anthropic 3 all",
includes: ["Models (anthropic", "page 1/1", "anthropic/claude-opus-4-5"],
excludes: ["Page out of range"],
},
{
name: "errors on out-of-range pages",
command: "/models anthropic 4",
includes: ["Page out of range", "valid: 1-"],
excludes: [],
},
{
name: "handles unknown providers",
command: "/models not-a-provider",
includes: ["Unknown provider", "Available providers"],
excludes: [],
},
] as const;
for (const testCase of cases) {
const result = await handleModelsCommand(
buildModelsParams(testCase.command, cfg, "discord"),
true,
);
expect(result?.shouldContinue, testCase.name).toBe(false);
for (const expected of testCase.includes) {
expect(result?.reply?.text, `${testCase.name}: ${expected}`).toContain(expected);
}
for (const blocked of testCase.excludes) {
expect(result?.reply?.text, `${testCase.name}: !${blocked}`).not.toContain(blocked);
}
}
});
it("lists configured models outside the curated catalog", async () => {
const customCfg = {
commands: { text: true },
agents: {
defaults: {
model: {
primary: "localai/ultra-chat",
fallbacks: ["anthropic/claude-opus-4-5"],
},
imageModel: "visionpro/studio-v1",
},
},
} as unknown as OpenClawConfig;
const providerList = await handleModelsCommand(
buildModelsParams("/models", customCfg, "discord"),
true,
);
expect(providerList?.reply?.text).toContain("localai");
expect(providerList?.reply?.text).toContain("visionpro");
const result = await handleModelsCommand(
buildModelsParams("/models localai", customCfg, "discord"),
true,
);
expect(result?.shouldContinue).toBe(false);
expect(result?.reply?.text).toContain("Models (localai");
expect(result?.reply?.text).toContain("localai/ultra-chat");
expect(result?.reply?.text).not.toContain("Unknown provider");
});
it("uses the active agent context for model list replies", async () => {
const multiAgentCfg = {
commands: { text: true },
agents: {
defaults: { model: { primary: "anthropic/claude-opus-4-5" } },
list: [{ id: "support", model: "localai/ultra-chat" }],
},
} as unknown as OpenClawConfig;
const result = await handleModelsCommand(
buildModelsParams("/models", multiAgentCfg, "discord", {
agentId: "support",
sessionKey: "agent:support:main",
}),
true,
);
expect(result?.shouldContinue).toBe(false);
expect(result?.reply?.text).toContain("Providers:");
expect(result?.reply?.text).toContain("localai");
});
it("prefers the target session entry for model auth labeling", async () => {
modelAuthLabelMocks.resolveModelAuthLabel.mockReturnValue("target-auth");
const params = buildModelsParams("/models anthropic", cfg, "discord", {
agentId: "main",
sessionKey: "agent:support:main",
});
});
it("lists models for /models <provider>", async () => {
const result = await handleModelsCommand(buildParams("/models openai"), true);
expect(result?.reply?.text).toContain("Models (openai) — showing 1-2 of 2 (page 1/1)");
expect(result?.reply?.text).toContain("- openai/gpt-4.1");
expect(result?.reply?.text).toContain("- openai/gpt-4.1-mini");
expect(result?.reply?.text).toContain("Switch: /model <provider/model>");
});
it("keeps /models list <provider> as an alias", async () => {
const result = await handleModelsCommand(buildParams("/models list anthropic"), true);
expect(result?.reply?.text).toContain("Models (anthropic) — showing 1-2 of 2 (page 1/1)");
expect(result?.reply?.text).toContain("- anthropic/claude-opus-4-5");
});
it("keeps the auth label on text-surface provider listings", async () => {
modelAuthLabelMocks.resolveModelAuthLabel.mockReturnValue("target-auth");
const params = buildParams("/models anthropic");
params.sessionEntry = {
sessionId: "wrapper-session",
updatedAt: Date.now(),
providerOverride: "wrapper-provider",
modelOverride: "wrapper-model",
authProfileOverride: "wrapper-auth",
};
params.sessionStore = {
"agent:support:main": {
"agent:main:discord:direct:user-1": {
sessionId: "target-session",
updatedAt: Date.now(),
providerOverride: "target-provider",
modelOverride: "target-model",
authProfileOverride: "target-auth",
},
};
const result = await handleModelsCommand(params, true);
expect(result?.shouldContinue).toBe(false);
expect(modelAuthLabelMocks.resolveModelAuthLabel).toHaveBeenCalledWith(
expect(result?.reply?.text).toContain("Models (anthropic · 🔑 target-auth) — showing 1-2 of 2");
});
it("guides /models add when no provider is given", async () => {
const result = await handleModelsCommand(buildParams("/models add"), true);
expect(result?.reply?.text).toContain(
"Add a model: choose a provider, then send one of these example commands.",
);
expect(result?.reply?.text).toContain(
"These examples use models that already exist for those providers.",
);
expect(result?.reply?.text).toContain("```text");
expect(result?.reply?.text).toContain("/models add ollama glm-5.1:cloud");
expect(result?.reply?.text).toContain("/models add lmstudio qwen/qwen3.5-9b");
expect(result?.reply?.text).toContain("/models add <provider> <modelId>");
expect(result?.reply?.text).toContain("Generic form:");
expect(result?.reply?.text).toContain("/models add <provider> <modelId>");
expect(result?.reply?.text).toContain("- anthropic");
expect(result?.reply?.text).toContain("- lmstudio");
expect(result?.reply?.text).toContain("- ollama");
expect(result?.reply?.text).toContain("- openai");
});
it("guides /models add <provider> when the model id is missing", async () => {
const result = await handleModelsCommand(buildParams("/models add ollama"), true);
expect(result?.reply?.text).toContain("Add a model to ollama:");
expect(result?.reply?.text).toContain("```text\n/models add ollama <modelId>\n```");
expect(result?.reply?.text).toContain("```text\n/models ollama\n```");
});
it("adds a model and points users back to browse or switch", async () => {
const result = await handleModelsCommand(buildParams("/models add ollama glm-5.1:cloud"), true);
expect(modelsAddMocks.addModelToConfig).toHaveBeenCalledWith(
expect.objectContaining({
sessionEntry: expect.objectContaining({
providerOverride: "target-provider",
modelOverride: "target-model",
}),
provider: "ollama",
modelId: "glm-5.1:cloud",
}),
);
expect(result?.reply?.text).toContain("target-auth");
expect(result?.reply?.text).toContain("✅ Added model: ollama/glm-5.1:cloud.");
expect(result?.reply?.text).toContain("Browse:");
expect(result?.reply?.text).toContain("/models ollama");
expect(result?.reply?.text).toContain("Switch now:");
expect(result?.reply?.text).toContain("/model ollama/glm-5.1:cloud");
expect(result?.reply?.text).not.toContain("/models repair");
expect(result?.reply?.text).not.toContain("/models ollama/glm-5.1:cloud");
});
it("honors model allowlists and config-only providers", async () => {
const allowlistedCfg = {
commands: { text: true },
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
models: {
"anthropic/claude-opus-4-5": {},
"openai/gpt-4.1-mini": {},
},
},
},
} as unknown as OpenClawConfig;
it("checks all config-write targets touched by /models add", async () => {
const result = await handleModelsCommand(buildParams("/models add ollama glm-5.1:cloud"), true);
const providerList = await handleModelsCommand(
buildModelsParams("/models", allowlistedCfg, "discord"),
true,
);
expect(providerList?.reply?.text).toContain("- anthropic");
expect(providerList?.reply?.text).toContain("- openai");
expect(providerList?.reply?.text).not.toContain("- google");
modelCatalogMocks.loadModelCatalog.mockResolvedValueOnce([
{ provider: "anthropic", id: "claude-opus-4-5", name: "Claude Opus" },
{ provider: "openai", id: "gpt-4.1-mini", name: "GPT-4.1 Mini" },
expect(result?.shouldContinue).toBe(false);
expect(configWriteTargetMocks.resolveConfigWriteTargetFromPath).toHaveBeenCalledTimes(3);
expect(configWriteTargetMocks.resolveConfigWriteTargetFromPath.mock.calls).toEqual([
[["models", "providers", "ollama"]],
[["models", "providers", "ollama", "models"]],
[["agents", "defaults", "models"]],
]);
const minimaxCfg = {
commands: { text: true },
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
models: {
"anthropic/claude-opus-4-5": {},
"openai/gpt-4.1-mini": {},
"minimax/MiniMax-M2.7": { alias: "minimax" },
},
},
},
models: {
mode: "merge",
providers: {
minimax: {
baseUrl: "https://api.minimax.io/anthropic",
api: "anthropic-messages",
models: [
{ id: "MiniMax-M2.7", name: "MiniMax M2.7" },
{ id: "MiniMax-M2.7-highspeed", name: "MiniMax M2.7 Highspeed" },
],
},
},
},
} as unknown as OpenClawConfig;
const result = await handleModelsCommand(
buildModelsParams("/models minimax", minimaxCfg, "discord"),
true,
);
expect(result?.reply?.text).toContain("Models (minimax");
expect(result?.reply?.text).toContain("minimax/MiniMax-M2.7");
});
it("threads the routed agent through /models replies", async () => {
const scopedCfg = {
commands: { text: true },
agents: {
defaults: { model: { primary: "anthropic/claude-opus-4-5" } },
list: [{ id: "support", model: "localai/ultra-chat" }],
},
} as OpenClawConfig;
it("returns config-write denial text for add-time provider bootstrap", async () => {
configWriteAuthMocks.resolveConfigWriteDeniedText.mockReturnValueOnce("denied");
const result = await handleModelsCommand(
buildModelsParams("/models", scopedCfg, "discord", {
agentId: "support",
sessionKey: "agent:support:main",
}),
true,
);
const result = await handleModelsCommand(buildParams("/models add ollama glm-5.1:cloud"), true);
expect(result?.reply?.text).toContain("localai");
});
it("uses the canonical target session agent when wrapper agentId differs", async () => {
const scopedCfg = {
commands: { text: true },
agents: {
defaults: { model: { primary: "anthropic/claude-opus-4-5" } },
list: [{ id: "support", model: "localai/ultra-chat" }],
},
} as OpenClawConfig;
const result = await handleModelsCommand(
buildModelsParams("/models", scopedCfg, "discord", {
agentId: "main",
sessionKey: "agent:support:main",
}),
true,
);
expect(result?.reply?.text).toContain("localai");
expect(result).toEqual({
shouldContinue: false,
reply: { text: "denied" },
});
expect(modelsAddMocks.addModelToConfig).not.toHaveBeenCalled();
});
});

View File

@@ -8,7 +8,9 @@ import {
resolveDefaultModelForAgent,
resolveModelRefFromString,
} from "../../agents/model-selection.js";
import { resolveConfigWriteTargetFromPath } from "../../channels/plugins/config-writes.js";
import { getChannelPlugin } from "../../channels/plugins/index.js";
import { normalizeChannelId } from "../../channels/registry.js";
import type { SessionEntry } from "../../config/sessions.js";
import type { OpenClawConfig } from "../../config/types.openclaw.js";
import {
@@ -16,11 +18,19 @@ import {
normalizeOptionalString,
} from "../../shared/string-coerce.js";
import type { ReplyPayload } from "../types.js";
import { rejectUnauthorizedCommand } from "./command-gates.js";
import { resolveChannelAccountId } from "./channel-context.js";
import {
rejectNonOwnerCommand,
rejectUnauthorizedCommand,
requireGatewayClientScopeForInternalChannel,
} from "./command-gates.js";
import type { CommandHandler } from "./commands-types.js";
import { resolveConfigWriteDeniedText } from "./config-write-authorization.js";
import { addModelToConfig, listAddableProviders, validateAddProvider } from "./models-add.js";
const PAGE_SIZE_DEFAULT = 20;
const PAGE_SIZE_MAX = 100;
type ModelsCommandSessionEntry = Partial<
Pick<SessionEntry, "authProfileOverride" | "modelProvider" | "model">
>;
@@ -29,14 +39,24 @@ export type ModelsProviderData = {
byProvider: Map<string, Set<string>>;
providers: string[];
resolvedDefault: { provider: string; model: string };
/** Map from provider/model to human-readable display name (when different from model ID). */
modelNames: Map<string, string>;
};
/**
* Build provider/model data from config and catalog.
* Exported for reuse by callback handlers.
*/
type ParsedModelsCommand =
| { action: "providers" }
| {
action: "list";
provider?: string;
page: number;
pageSize: number;
all: boolean;
}
| {
action: "add";
provider?: string;
modelId?: string;
};
export async function buildModelsProviderData(
cfg: OpenClawConfig,
agentId?: string,
@@ -110,20 +130,15 @@ export async function buildModelsProviderData(
add(entry.provider, entry.id);
}
// Include config-only allowlist keys that aren't in the curated catalog.
for (const raw of Object.keys(cfg.agents?.defaults?.models ?? {})) {
addRawModelRef(raw);
}
// Ensure configured defaults/fallbacks/image models show up even when the
// curated catalog doesn't know about them (custom providers, dev builds, etc.).
add(resolvedDefault.provider, resolvedDefault.model);
addModelConfigEntries();
const providers = [...byProvider.keys()].toSorted();
// Build a provider-scoped model display-name map so surfaces can show
// human-readable names without colliding across providers that share IDs.
const modelNames = new Map<string, string>();
for (const entry of catalog) {
if (entry.name && entry.name !== entry.id) {
@@ -138,18 +153,7 @@ function formatProviderLine(params: { provider: string; count: number }): string
return `- ${params.provider} (${params.count})`;
}
function parseModelsArgs(raw: string): {
provider?: string;
page: number;
pageSize: number;
all: boolean;
} {
const trimmed = raw.trim();
if (!trimmed) {
return { page: 1, pageSize: PAGE_SIZE_DEFAULT, all: false };
}
const tokens = trimmed.split(/\s+/g).filter(Boolean);
function parseListArgs(tokens: string[]): Extract<ParsedModelsCommand, { action: "list" }> {
const provider = normalizeOptionalString(tokens[0]);
let page = 1;
@@ -188,6 +192,7 @@ function parseModelsArgs(raw: string): {
}
return {
action: "list",
provider: provider ? normalizeProviderId(provider) : undefined,
page,
pageSize,
@@ -195,6 +200,30 @@ function parseModelsArgs(raw: string): {
};
}
function parseModelsArgs(raw: string): ParsedModelsCommand {
const trimmed = raw.trim();
if (!trimmed) {
return { action: "providers" };
}
const tokens = trimmed.split(/\s+/g).filter(Boolean);
const first = normalizeLowercaseStringOrEmpty(tokens[0]);
switch (first) {
case "providers":
return { action: "providers" };
case "list":
return parseListArgs(tokens.slice(1));
case "add":
return {
action: "add",
provider: normalizeOptionalString(tokens[1]),
modelId: normalizeOptionalString(tokens.slice(2).join(" ")),
};
default:
return parseListArgs(tokens);
}
}
function resolveProviderLabel(params: {
provider: string;
cfg: OpenClawConfig;
@@ -229,6 +258,69 @@ export function formatModelsAvailableHeader(params: {
return `Models (${providerLabel}) — ${params.total} available`;
}
function buildModelsMenuText(params: {
providers: string[];
byProvider: ReadonlyMap<string, ReadonlySet<string>>;
}): string {
return [
"Providers:",
...params.providers.map((provider) =>
formatProviderLine({
provider,
count: params.byProvider.get(provider)?.size ?? 0,
}),
),
"",
"Use: /models <provider>",
"Switch: /model <provider/model>",
"Add: /models add",
].join("\n");
}
function formatCopyableCommand(command: string): string {
return ["```text", command, "```"].join("\n");
}
function buildAddExamples(addableProviders: readonly string[]): string[] {
const examples: string[] = [];
if (addableProviders.includes("ollama")) {
examples.push("/models add ollama glm-5.1:cloud");
}
if (addableProviders.includes("lmstudio")) {
examples.push("/models add lmstudio qwen/qwen3.5-9b");
}
if (addableProviders.includes("codex")) {
examples.push("/models add codex gpt-5.4-mini");
}
if (addableProviders.includes("openai-codex")) {
examples.push("/models add openai-codex gpt-5.4");
}
if (examples.length === 0) {
examples.push("/models add <provider> <modelId>");
}
return examples.slice(0, 3);
}
function resolveWriteProvider(params: {
cfg: OpenClawConfig;
parsed: ParsedModelsCommand;
}): string | undefined {
if (params.parsed.action !== "add") {
return undefined;
}
return params.parsed.provider ? normalizeProviderId(params.parsed.provider) : undefined;
}
function buildProviderInfos(params: {
providers: string[];
byProvider: ReadonlyMap<string, ReadonlySet<string>>;
}): Array<{ id: string; count: number }> {
return params.providers.map((provider) => ({
id: provider,
count: params.byProvider.get(provider)?.size ?? 0,
}));
}
export async function resolveModelsCommandReply(params: {
cfg: OpenClawConfig;
commandBodyNormalized: string;
@@ -244,20 +336,131 @@ export async function resolveModelsCommandReply(params: {
}
const argText = body.replace(/^\/models\b/i, "").trim();
const { provider, page, pageSize, all } = parseModelsArgs(argText);
const parsed = parseModelsArgs(argText);
const { byProvider, providers, modelNames } = await buildModelsProviderData(
params.cfg,
params.agentId,
);
const commandPlugin = params.surface ? getChannelPlugin(params.surface) : null;
const providerInfos = buildProviderInfos({ providers, byProvider });
if (parsed.action === "providers") {
const channelData =
commandPlugin?.commands?.buildModelsMenuChannelData?.({
providers: providerInfos,
}) ??
commandPlugin?.commands?.buildModelsProviderChannelData?.({
providers: providerInfos,
});
if (channelData) {
return {
text: "Select a provider:",
channelData,
};
}
return {
text: buildModelsMenuText({ providers, byProvider }),
};
}
if (parsed.action === "add") {
const addableProviders = listAddableProviders({
cfg: params.cfg,
discoveredProviders: providers,
});
if (!parsed.provider) {
const channelData = commandPlugin?.commands?.buildModelsAddProviderChannelData?.({
providers: addableProviders.map((id) => ({ id })),
});
return {
text: [
"Add a model: choose a provider, then send one of these example commands.",
"",
"These examples use models that already exist for those providers.",
"",
...buildAddExamples(addableProviders).flatMap((example) => [
formatCopyableCommand(example),
"",
]),
"Generic form:",
formatCopyableCommand("/models add <provider> <modelId>"),
"",
"Providers:",
...addableProviders.map((provider) => `- ${provider}`),
].join("\n"),
...(channelData ? { channelData } : {}),
};
}
const validatedProvider = validateAddProvider({
cfg: params.cfg,
provider: parsed.provider,
discoveredProviders: providers,
});
if (!validatedProvider.ok) {
return {
text: [
`Unknown provider: ${parsed.provider}`,
"",
"Available providers:",
...validatedProvider.providers.map((provider) => `- ${provider}`),
"",
"Use:",
"/models add <provider> <modelId>",
].join("\n"),
};
}
if (!parsed.modelId) {
return {
text: [
`Add a model to ${validatedProvider.provider}:`,
"",
"Use:",
formatCopyableCommand(`/models add ${validatedProvider.provider} <modelId>`),
"",
"Browse current models:",
formatCopyableCommand(`/models ${validatedProvider.provider}`),
].join("\n"),
};
}
const added = await addModelToConfig({
cfg: params.cfg,
provider: validatedProvider.provider,
modelId: parsed.modelId,
});
if (!added.ok) {
return {
text: `⚠️ ${added.error}`,
};
}
const modelRef = `${added.result.provider}/${added.result.modelId}`;
const warnings =
added.result.warnings.length > 0
? ["", ...added.result.warnings.map((warning) => `- ${warning}`)]
: [];
const allowlistNote = added.result.allowlistAdded ? " and added to the allowlist" : "";
return {
text: [
added.result.existed
? `✅ Model already exists: ${modelRef}${allowlistNote}.`
: `✅ Added model: ${modelRef}${allowlistNote}.`,
"Browse:",
`/models ${added.result.provider}`,
"",
"Switch now:",
`/model ${modelRef}`,
...warnings,
].join("\n"),
};
}
const { provider, page, pageSize, all } = parsed;
// Provider list (no provider specified)
if (!provider) {
const providerInfos = providers.map((p) => ({
id: p,
count: byProvider.get(p)?.size ?? 0,
}));
const channelData = commandPlugin?.commands?.buildModelsProviderChannelData?.({
providers: providerInfos,
});
@@ -267,48 +470,42 @@ export async function resolveModelsCommandReply(params: {
channelData,
};
}
const lines: string[] = [
"Providers:",
...providers.map((p) =>
formatProviderLine({ provider: p, count: byProvider.get(p)?.size ?? 0 }),
),
"",
"Use: /models <provider>",
"Switch: /model <provider/model>",
];
return { text: lines.join("\n") };
return {
text: buildModelsMenuText({ providers, byProvider }),
};
}
if (!byProvider.has(provider)) {
const lines: string[] = [
`Unknown provider: ${provider}`,
"",
"Available providers:",
...providers.map((p) => `- ${p}`),
"",
"Use: /models <provider>",
];
return { text: lines.join("\n") };
return {
text: [
`Unknown provider: ${provider}`,
"",
"Available providers:",
...providers.map((entry) => `- ${entry}`),
"",
"Use: /models <provider>",
].join("\n"),
};
}
const models = [...(byProvider.get(provider) ?? new Set<string>())].toSorted();
const total = models.length;
const providerLabel = resolveProviderLabel({
provider,
cfg: params.cfg,
agentDir: params.agentDir,
sessionEntry: params.sessionEntry,
});
if (total === 0) {
const lines: string[] = [
`Models (${providerLabel}) — none`,
"",
"Browse: /models",
"Switch: /model <provider/model>",
];
return { text: lines.join("\n") };
const emptyProviderLabel = resolveProviderLabel({
provider,
cfg: params.cfg,
agentDir: params.agentDir,
sessionEntry: params.sessionEntry,
});
return {
text: [
`Models (${emptyProviderLabel}) — none`,
"",
"Browse: /models",
"Switch: /model <provider/model>",
].join("\n"),
};
}
const interactivePageSize = 8;
@@ -324,15 +521,14 @@ export async function resolveModelsCommandReply(params: {
modelNames,
});
if (interactiveChannelData) {
const text = formatModelsAvailableHeader({
provider,
total,
cfg: params.cfg,
agentDir: params.agentDir,
sessionEntry: params.sessionEntry,
});
return {
text,
text: formatModelsAvailableHeader({
provider,
total,
cfg: params.cfg,
agentDir: params.agentDir,
sessionEntry: params.sessionEntry,
}),
channelData: interactiveChannelData,
};
}
@@ -342,36 +538,39 @@ export async function resolveModelsCommandReply(params: {
const safePage = all ? 1 : Math.max(1, Math.min(page, pageCount));
if (!all && page !== safePage) {
const lines: string[] = [
`Page out of range: ${page} (valid: 1-${pageCount})`,
"",
`Try: /models ${provider} ${safePage}`,
`All: /models ${provider} all`,
];
return { text: lines.join("\n") };
return {
text: [
`Page out of range: ${page} (valid: 1-${pageCount})`,
"",
`Try: /models list ${provider} ${safePage}`,
`All: /models list ${provider} all`,
].join("\n"),
};
}
const startIndex = (safePage - 1) * effectivePageSize;
const endIndexExclusive = Math.min(total, startIndex + effectivePageSize);
const pageModels = models.slice(startIndex, endIndexExclusive);
const header = `Models (${providerLabel}) — showing ${startIndex + 1}-${endIndexExclusive} of ${total} (page ${safePage}/${pageCount})`;
const lines: string[] = [header];
const providerLabel = resolveProviderLabel({
provider,
cfg: params.cfg,
agentDir: params.agentDir,
sessionEntry: params.sessionEntry,
});
const lines = [
`Models (${providerLabel}) — showing ${startIndex + 1}-${endIndexExclusive} of ${total} (page ${safePage}/${pageCount})`,
];
for (const id of pageModels) {
lines.push(`- ${provider}/${id}`);
}
lines.push("", "Switch: /model <provider/model>");
if (!all && safePage < pageCount) {
lines.push(`More: /models ${provider} ${safePage + 1}`);
lines.push(`More: /models list ${provider} ${safePage + 1}`);
}
if (!all) {
lines.push(`All: /models ${provider} all`);
lines.push(`All: /models list ${provider} all`);
}
const payload: ReplyPayload = { text: lines.join("\n") };
return payload;
return { text: lines.join("\n") };
}
export const handleModelsCommand: CommandHandler = async (params, allowTextCommands) => {
@@ -382,11 +581,60 @@ export const handleModelsCommand: CommandHandler = async (params, allowTextComma
if (!commandBodyNormalized.startsWith("/models")) {
return null;
}
const parsed = parseModelsArgs(commandBodyNormalized.replace(/^\/models\b/i, "").trim());
const unauthorized = rejectUnauthorizedCommand(params, "/models");
if (unauthorized) {
return unauthorized;
}
if (parsed.action === "add") {
const commandLabel = "/models add";
const nonOwner = rejectNonOwnerCommand(params, commandLabel);
if (nonOwner) {
return nonOwner;
}
const missingAdminScope = requireGatewayClientScopeForInternalChannel(params, {
label: commandLabel,
allowedScopes: ["operator.admin"],
missingText: "❌ /models add requires operator.admin for gateway clients.",
});
if (missingAdminScope) {
return missingAdminScope;
}
const writeProvider = resolveWriteProvider({
cfg: params.cfg,
parsed,
});
if (writeProvider) {
const channelId = params.command.channelId ?? normalizeChannelId(params.command.channel);
const accountId = resolveChannelAccountId({
cfg: params.cfg,
ctx: params.ctx,
command: params.command,
});
for (const path of [
["models", "providers", writeProvider],
["models", "providers", writeProvider, "models"],
["agents", "defaults", "models"],
]) {
const deniedText = resolveConfigWriteDeniedText({
cfg: params.cfg,
channel: params.command.channel,
channelId,
accountId,
gatewayClientScopes: params.ctx.GatewayClientScopes,
target: resolveConfigWriteTargetFromPath(path),
});
if (deniedText) {
return {
shouldContinue: false,
reply: { text: deniedText },
};
}
}
}
}
const modelsAgentId = params.sessionKey
? resolveSessionAgentId({
sessionKey: params.sessionKey,

View File

@@ -0,0 +1,538 @@
import { beforeEach, describe, expect, it, vi } from "vitest";
import type { OpenClawConfig } from "../../config/types.openclaw.js";
import { addModelToConfig, listAddableProviders, validateAddProvider } from "./models-add.js";
const configMocks = vi.hoisted(() => ({
ConfigMutationConflictError: class ConfigMutationConflictError extends Error {
readonly currentHash: string | null;
constructor(message: string, params: { currentHash: string | null }) {
super(message);
this.name = "ConfigMutationConflictError";
this.currentHash = params.currentHash;
}
},
readConfigFileSnapshot: vi.fn(),
replaceConfigFile: vi.fn(),
validateConfigObjectWithPlugins: vi.fn(),
}));
const facadeRuntimeMocks = vi.hoisted(() => ({
loadBundledPluginPublicSurfaceModuleSync: vi.fn(),
}));
const ollamaMocks = vi.hoisted(() => ({
buildOllamaModelDefinition: vi.fn(
(modelId: string, contextWindow?: number, capabilities?: string[]) => ({
id: modelId,
name: modelId,
reasoning: /think|reason/i.test(modelId),
input: capabilities?.includes("vision") ? ["text", "image"] : ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: contextWindow ?? 32768,
maxTokens: 8192,
}),
),
queryOllamaModelShowInfo: vi.fn(),
}));
const lmstudioRuntimeMocks = vi.hoisted(() => ({
LMSTUDIO_DEFAULT_API_KEY_ENV_VAR: "LMSTUDIO_API_KEY",
LMSTUDIO_DEFAULT_INFERENCE_BASE_URL: "http://127.0.0.1:1234/v1",
fetchLmstudioModels: vi.fn(),
mapLmstudioWireEntry: vi.fn(
(entry: {
key: string;
displayName?: string;
display_name?: string;
max_context_length?: number;
capabilities?: { reasoning?: { allowed_options?: string[] } };
}) => ({
id: entry.key,
displayName: entry.displayName ?? entry.display_name ?? entry.key,
reasoning: (entry.capabilities?.reasoning?.allowed_options?.length ?? 0) > 0,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: entry.max_context_length ?? 32768,
maxTokens: 8192,
}),
),
resolveLmstudioInferenceBase: vi.fn((baseUrl?: string) => baseUrl ?? "http://127.0.0.1:1234/v1"),
resolveLmstudioRequestContext: vi.fn(),
}));
vi.mock("../../config/config.js", () => ({
ConfigMutationConflictError: configMocks.ConfigMutationConflictError,
readConfigFileSnapshot: configMocks.readConfigFileSnapshot,
replaceConfigFile: configMocks.replaceConfigFile,
validateConfigObjectWithPlugins: configMocks.validateConfigObjectWithPlugins,
}));
vi.mock("../../plugin-sdk/facade-runtime.js", async () => {
const actual = await vi.importActual<typeof import("../../plugin-sdk/facade-runtime.js")>(
"../../plugin-sdk/facade-runtime.js",
);
return {
...actual,
loadBundledPluginPublicSurfaceModuleSync:
facadeRuntimeMocks.loadBundledPluginPublicSurfaceModuleSync,
};
});
vi.mock("../../plugin-sdk/lmstudio-runtime.js", () => {
return {
LMSTUDIO_DEFAULT_API_KEY_ENV_VAR: lmstudioRuntimeMocks.LMSTUDIO_DEFAULT_API_KEY_ENV_VAR,
LMSTUDIO_DEFAULT_INFERENCE_BASE_URL: lmstudioRuntimeMocks.LMSTUDIO_DEFAULT_INFERENCE_BASE_URL,
fetchLmstudioModels: lmstudioRuntimeMocks.fetchLmstudioModels,
mapLmstudioWireEntry: lmstudioRuntimeMocks.mapLmstudioWireEntry,
resolveLmstudioInferenceBase: lmstudioRuntimeMocks.resolveLmstudioInferenceBase,
resolveLmstudioRequestContext: lmstudioRuntimeMocks.resolveLmstudioRequestContext,
};
});
describe("models-add", () => {
beforeEach(() => {
configMocks.readConfigFileSnapshot.mockReset();
configMocks.replaceConfigFile.mockReset();
configMocks.validateConfigObjectWithPlugins.mockReset();
facadeRuntimeMocks.loadBundledPluginPublicSurfaceModuleSync.mockReset();
facadeRuntimeMocks.loadBundledPluginPublicSurfaceModuleSync.mockImplementation((params) => {
if (
params &&
typeof params === "object" &&
"dirName" in params &&
params.dirName === "ollama" &&
"artifactBasename" in params &&
params.artifactBasename === "api.js"
) {
return {
buildOllamaModelDefinition: ollamaMocks.buildOllamaModelDefinition,
queryOllamaModelShowInfo: ollamaMocks.queryOllamaModelShowInfo,
};
}
throw new Error(`Unexpected facade load: ${JSON.stringify(params)}`);
});
ollamaMocks.buildOllamaModelDefinition.mockClear();
ollamaMocks.queryOllamaModelShowInfo.mockReset();
ollamaMocks.queryOllamaModelShowInfo.mockResolvedValue({});
lmstudioRuntimeMocks.fetchLmstudioModels.mockReset();
lmstudioRuntimeMocks.mapLmstudioWireEntry.mockClear();
lmstudioRuntimeMocks.resolveLmstudioInferenceBase.mockClear();
lmstudioRuntimeMocks.resolveLmstudioRequestContext.mockReset();
});
it("lists addable providers only when the write path can actually add them", () => {
const cfg = {
models: {
providers: {
lmstudio: { baseUrl: "http://localhost:1234/v1", api: "openai-completions", models: [] },
},
},
} as OpenClawConfig;
expect(
listAddableProviders({
cfg,
discoveredProviders: ["openai", "ollama"],
}),
).toEqual(["lmstudio", "ollama"]);
});
it("validates add providers against addable providers", () => {
const cfg = {} as OpenClawConfig;
expect(validateAddProvider({ cfg, provider: "ollama", discoveredProviders: [] })).toEqual({
ok: true,
provider: "ollama",
});
expect(validateAddProvider({ cfg, provider: "missing", discoveredProviders: [] })).toEqual({
ok: false,
providers: ["lmstudio", "ollama"],
});
});
it("rejects discovered providers that are not configured for custom models", () => {
const cfg = {} as OpenClawConfig;
expect(
validateAddProvider({
cfg,
provider: "openai",
discoveredProviders: ["openai"],
}),
).toEqual({
ok: false,
providers: ["lmstudio", "ollama"],
});
});
it("adds an ollama model and extends the allowlist when needed", async () => {
const cfg = {
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
models: {
"anthropic/claude-opus-4-5": {},
},
},
},
models: {
providers: {
ollama: {
baseUrl: "http://127.0.0.1:11434",
api: "ollama",
models: [],
},
},
},
} as OpenClawConfig;
configMocks.readConfigFileSnapshot.mockResolvedValue({
valid: true,
parsed: cfg,
});
ollamaMocks.queryOllamaModelShowInfo.mockResolvedValue({
contextWindow: 202752,
capabilities: ["thinking", "tools"],
});
configMocks.validateConfigObjectWithPlugins.mockImplementation((config: OpenClawConfig) => ({
ok: true,
config,
}));
const result = await addModelToConfig({
cfg,
provider: "ollama",
modelId: "glm-5.1:cloud",
});
expect(result.ok).toBe(true);
if (!result.ok) {
return;
}
expect(result.result.existed).toBe(false);
expect(result.result.allowlistAdded).toBe(true);
expect(configMocks.replaceConfigFile).toHaveBeenCalledTimes(1);
const written = configMocks.replaceConfigFile.mock.calls[0]?.[0]?.nextConfig as OpenClawConfig;
expect(written.models?.providers?.ollama?.models).toEqual([
expect.objectContaining({
id: "glm-5.1:cloud",
reasoning: false,
contextWindow: 202752,
}),
]);
expect(written.agents?.defaults?.models?.["ollama/glm-5.1:cloud"]).toEqual({});
});
it("reuses an existing configured provider key when the stored key is non-canonical", async () => {
const cfg = {
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
models: {
"anthropic/claude-opus-4-5": {},
},
},
},
models: {
providers: {
Ollama: {
baseUrl: "http://127.0.0.1:11434",
api: "ollama",
models: [],
},
},
},
} as OpenClawConfig;
configMocks.readConfigFileSnapshot.mockResolvedValue({
valid: true,
parsed: cfg,
});
ollamaMocks.queryOllamaModelShowInfo.mockResolvedValue({
contextWindow: 202752,
capabilities: ["thinking"],
});
configMocks.validateConfigObjectWithPlugins.mockImplementation((config: OpenClawConfig) => ({
ok: true,
config,
}));
const result = await addModelToConfig({
cfg,
provider: "ollama",
modelId: "glm-5.1:cloud",
});
expect(result.ok).toBe(true);
if (!result.ok) {
return;
}
const written = configMocks.replaceConfigFile.mock.calls[0]?.[0]?.nextConfig as OpenClawConfig;
expect(written.models?.providers?.Ollama?.models).toEqual([
expect.objectContaining({
id: "glm-5.1:cloud",
}),
]);
expect(written.models?.providers?.ollama).toBeUndefined();
});
it("treats duplicate provider/model entries as idempotent", async () => {
const cfg = {
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
},
},
models: {
providers: {
ollama: {
baseUrl: "http://127.0.0.1:11434",
api: "ollama",
models: [
{
id: "glm-5.1:cloud",
name: "glm-5.1:cloud",
reasoning: true,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 202752,
maxTokens: 8192,
},
],
},
},
},
} as OpenClawConfig;
configMocks.readConfigFileSnapshot.mockResolvedValue({
valid: true,
parsed: cfg,
});
const result = await addModelToConfig({
cfg,
provider: "ollama",
modelId: "glm-5.1:cloud",
});
expect(result).toEqual({
ok: true,
result: {
provider: "ollama",
modelId: "glm-5.1:cloud",
existed: true,
allowlistAdded: false,
warnings: ["Model metadata could not be auto-detected; saved with default capabilities."],
},
});
expect(configMocks.replaceConfigFile).not.toHaveBeenCalled();
});
it("bootstraps lmstudio provider config when missing", async () => {
const cfg = {
agents: { defaults: { model: { primary: "anthropic/claude-opus-4-5" } } },
models: { providers: {} },
} as OpenClawConfig;
configMocks.readConfigFileSnapshot.mockResolvedValue({
valid: true,
parsed: cfg,
});
lmstudioRuntimeMocks.resolveLmstudioRequestContext.mockResolvedValue({
apiKey: undefined,
headers: undefined,
});
lmstudioRuntimeMocks.fetchLmstudioModels.mockResolvedValue({
reachable: true,
status: 200,
models: [
{
type: "llm",
key: "qwen/qwen3.5-9b",
display_name: "Qwen 3.5 9B",
max_context_length: 131072,
capabilities: { reasoning: { allowed_options: ["off", "on"] } },
},
],
});
configMocks.validateConfigObjectWithPlugins.mockImplementation((config: OpenClawConfig) => ({
ok: true,
config,
}));
const result = await addModelToConfig({
cfg,
provider: "lmstudio",
modelId: "qwen/qwen3.5-9b",
});
expect(result.ok).toBe(true);
const written = configMocks.replaceConfigFile.mock.calls[0]?.[0]?.nextConfig as OpenClawConfig;
expect(written.models?.providers?.lmstudio?.baseUrl).toBe("http://127.0.0.1:1234/v1");
expect(written.models?.providers?.lmstudio?.api).toBe("openai-completions");
expect(written.models?.providers?.lmstudio?.models).toEqual([
expect.objectContaining({
id: "qwen/qwen3.5-9b",
name: "Qwen 3.5 9B",
}),
]);
});
it("returns a generic validation error when config validation fails without issue details", async () => {
const cfg = {
models: {
providers: {
ollama: {
baseUrl: "http://127.0.0.1:11434",
api: "ollama",
models: [],
},
},
},
} as OpenClawConfig;
configMocks.readConfigFileSnapshot.mockResolvedValue({
valid: true,
parsed: cfg,
});
ollamaMocks.queryOllamaModelShowInfo.mockResolvedValue({
contextWindow: 202752,
capabilities: ["thinking"],
});
configMocks.validateConfigObjectWithPlugins.mockReturnValue({
ok: false,
issues: [],
});
const result = await addModelToConfig({
cfg,
provider: "ollama",
modelId: "glm-5.1:cloud",
});
expect(result).toEqual({
ok: false,
error: "Config invalid after /models add (unknown validation error).",
});
});
it("skips lmstudio metadata detection for non-loopback base urls before resolving auth", async () => {
const cfg = {
models: {
providers: {
lmstudio: {
baseUrl: "https://example.com/v1",
api: "openai-completions",
models: [],
},
},
},
} as OpenClawConfig;
configMocks.readConfigFileSnapshot.mockResolvedValue({
valid: true,
parsed: cfg,
});
configMocks.validateConfigObjectWithPlugins.mockImplementation((config: OpenClawConfig) => ({
ok: true,
config,
}));
const result = await addModelToConfig({
cfg,
provider: "lmstudio",
modelId: "qwen/qwen3.5-9b",
});
expect(result.ok).toBe(true);
if (!result.ok) {
return;
}
expect(lmstudioRuntimeMocks.resolveLmstudioRequestContext).not.toHaveBeenCalled();
expect(lmstudioRuntimeMocks.fetchLmstudioModels).not.toHaveBeenCalled();
expect(result.result.warnings).toContain(
"LM Studio metadata detection is limited to local baseUrl values; using defaults.",
);
});
it("does not leak raw lmstudio detection errors in user-facing warnings", async () => {
const cfg = {
models: {
providers: {
lmstudio: {
baseUrl: "http://localhost:1234/v1",
api: "openai-completions",
models: [],
},
},
},
} as OpenClawConfig;
configMocks.readConfigFileSnapshot.mockResolvedValue({
valid: true,
parsed: cfg,
});
lmstudioRuntimeMocks.resolveLmstudioRequestContext.mockResolvedValue({
apiKey: "secret-token",
headers: { Authorization: "Bearer secret-token" },
});
lmstudioRuntimeMocks.fetchLmstudioModels.mockRejectedValue(
new Error("connect ECONNREFUSED http://127.0.0.1:1234/v1/api/v1/models"),
);
configMocks.validateConfigObjectWithPlugins.mockImplementation((config: OpenClawConfig) => ({
ok: true,
config,
}));
const result = await addModelToConfig({
cfg,
provider: "lmstudio",
modelId: "qwen/qwen3.5-9b",
});
expect(result.ok).toBe(true);
if (!result.ok) {
return;
}
expect(result.result.warnings).toContain(
"LM Studio metadata detection failed; using defaults.",
);
expect(result.result.warnings.join(" ")).not.toContain("ECONNREFUSED");
expect(result.result.warnings.join(" ")).not.toContain("127.0.0.1");
});
it("returns a retryable error when the config changes before replace", async () => {
const cfg = {
models: {
providers: {
ollama: {
baseUrl: "http://127.0.0.1:11434",
api: "ollama",
models: [],
},
},
},
} as OpenClawConfig;
configMocks.readConfigFileSnapshot.mockResolvedValue({
valid: true,
parsed: cfg,
hash: "base-hash",
});
ollamaMocks.queryOllamaModelShowInfo.mockResolvedValue({
contextWindow: 202752,
capabilities: ["thinking"],
});
configMocks.validateConfigObjectWithPlugins.mockImplementation((config: OpenClawConfig) => ({
ok: true,
config,
}));
configMocks.replaceConfigFile.mockRejectedValue(
new configMocks.ConfigMutationConflictError("config changed since last load", {
currentHash: "new-hash",
}),
);
const result = await addModelToConfig({
cfg,
provider: "ollama",
modelId: "glm-5.1:cloud",
});
expect(result).toEqual({
ok: false,
error: "Config changed while /models add was running. Retry the command.",
});
});
});

View File

@@ -0,0 +1,571 @@
import {
buildConfiguredAllowlistKeys,
normalizeProviderId,
resolveDefaultModelForAgent,
resolveModelRefFromString,
} from "../../agents/model-selection.js";
import {
SELF_HOSTED_DEFAULT_CONTEXT_WINDOW,
SELF_HOSTED_DEFAULT_COST,
SELF_HOSTED_DEFAULT_MAX_TOKENS,
} from "../../agents/self-hosted-provider-defaults.js";
import {
ConfigMutationConflictError,
readConfigFileSnapshot,
replaceConfigFile,
validateConfigObjectWithPlugins,
} from "../../config/config.js";
import type { ModelDefinitionConfig, ModelProviderConfig } from "../../config/types.models.js";
import type { OpenClawConfig } from "../../config/types.openclaw.js";
import { formatErrorMessage } from "../../infra/errors.js";
import { normalizeHostname } from "../../infra/net/hostname.js";
import { createSubsystemLogger } from "../../logging/subsystem.js";
import { buildRemoteBaseUrlPolicy } from "../../memory-host-sdk/host/remote-http.js";
import {
createLazyFacadeValue,
loadBundledPluginPublicSurfaceModuleSync,
} from "../../plugin-sdk/facade-runtime.js";
import {
fetchLmstudioModels,
LMSTUDIO_DEFAULT_API_KEY_ENV_VAR,
LMSTUDIO_DEFAULT_INFERENCE_BASE_URL,
mapLmstudioWireEntry,
resolveLmstudioInferenceBase,
resolveLmstudioRequestContext,
} from "../../plugin-sdk/lmstudio-runtime.js";
import { isLoopbackIpAddress } from "../../shared/net/ip.js";
import {
normalizeLowercaseStringOrEmpty,
normalizeOptionalString,
} from "../../shared/string-coerce.js";
export type ModelAddAdapter = {
providerId: string;
bootstrapProviderConfig?: (cfg: OpenClawConfig) => ModelProviderConfig | null;
detect?: (params: {
cfg: OpenClawConfig;
providerConfig: ModelProviderConfig;
modelId: string;
}) => Promise<{
found: boolean;
model?: ModelDefinitionConfig;
warnings?: string[];
}>;
};
type AddModelOutcome = {
provider: string;
modelId: string;
existed: boolean;
allowlistAdded: boolean;
warnings: string[];
};
type OllamaModelShowInfo = {
contextWindow?: number;
capabilities?: string[];
};
type OllamaApiFacade = {
buildOllamaModelDefinition: (
modelId: string,
contextWindow?: number,
capabilities?: string[],
) => ModelDefinitionConfig;
queryOllamaModelShowInfo: (apiBase: string, modelName: string) => Promise<OllamaModelShowInfo>;
};
const log = createSubsystemLogger("models-add");
const OLLAMA_DEFAULT_BASE_URL = "http://127.0.0.1:11434";
function loadOllamaApiFacade(): OllamaApiFacade {
return loadBundledPluginPublicSurfaceModuleSync<OllamaApiFacade>({
dirName: "ollama",
artifactBasename: "api.js",
});
}
const buildOllamaModelDefinition: OllamaApiFacade["buildOllamaModelDefinition"] =
createLazyFacadeValue(loadOllamaApiFacade, "buildOllamaModelDefinition");
const queryOllamaModelShowInfo: OllamaApiFacade["queryOllamaModelShowInfo"] = createLazyFacadeValue(
loadOllamaApiFacade,
"queryOllamaModelShowInfo",
);
function sanitizeUrlForLogs(raw: string | undefined): string | undefined {
const trimmed = normalizeOptionalString(raw);
if (!trimmed) {
return undefined;
}
try {
const url = new URL(trimmed);
url.username = "";
url.password = "";
url.search = "";
url.hash = "";
return url.toString();
} catch {
return "[invalid_url]";
}
}
function buildDefaultModelDefinition(modelId: string): ModelDefinitionConfig {
return {
id: modelId,
name: modelId,
reasoning: false,
input: ["text"],
cost: SELF_HOSTED_DEFAULT_COST,
contextWindow: SELF_HOSTED_DEFAULT_CONTEXT_WINDOW,
maxTokens: SELF_HOSTED_DEFAULT_MAX_TOKENS,
};
}
function resolveConfiguredProvider(
cfg: OpenClawConfig,
providerId: string,
): { providerKey: string; providerConfig: ModelProviderConfig } | undefined {
const normalizedProviderId = normalizeProviderId(providerId);
if (!normalizedProviderId) {
return undefined;
}
const providers = cfg.models?.providers;
if (!providers) {
return undefined;
}
for (const [configuredProviderId, configuredProvider] of Object.entries(providers)) {
if (normalizeProviderId(configuredProviderId) === normalizedProviderId) {
return {
providerKey: configuredProviderId,
providerConfig: configuredProvider,
};
}
}
return undefined;
}
function buildDefaultLmstudioProviderConfig(): ModelProviderConfig {
return {
baseUrl: resolveLmstudioInferenceBase(LMSTUDIO_DEFAULT_INFERENCE_BASE_URL),
api: "openai-completions",
auth: "api-key",
apiKey: LMSTUDIO_DEFAULT_API_KEY_ENV_VAR,
models: [],
};
}
function isLocalLmstudioBaseUrl(baseUrl: string | undefined): boolean {
const trimmed = normalizeOptionalString(baseUrl);
if (!trimmed) {
return false;
}
try {
const parsed = new URL(trimmed);
if (parsed.protocol !== "http:" && parsed.protocol !== "https:") {
return false;
}
const hostname = normalizeHostname(parsed.hostname);
return (
hostname === "localhost" ||
hostname === "localhost.localdomain" ||
isLoopbackIpAddress(hostname)
);
} catch {
return false;
}
}
const MODEL_ADD_ADAPTERS: Record<string, ModelAddAdapter> = {
ollama: {
providerId: "ollama",
bootstrapProviderConfig: () => ({
baseUrl: OLLAMA_DEFAULT_BASE_URL,
api: "ollama",
apiKey: "ollama-local",
models: [],
}),
detect: async ({ providerConfig, modelId }) => {
const info = (await queryOllamaModelShowInfo(providerConfig.baseUrl, modelId)) ?? {};
return {
found: typeof info.contextWindow === "number" || (info.capabilities?.length ?? 0) > 0,
model: buildOllamaModelDefinition(modelId, info.contextWindow, info.capabilities),
};
},
},
lmstudio: {
providerId: "lmstudio",
bootstrapProviderConfig: () => buildDefaultLmstudioProviderConfig(),
detect: async ({ cfg, providerConfig, modelId }) => {
if (!isLocalLmstudioBaseUrl(providerConfig.baseUrl)) {
return {
found: false,
warnings: [
"LM Studio metadata detection is limited to local baseUrl values; using defaults.",
],
};
}
try {
const { apiKey, headers } = await resolveLmstudioRequestContext({
config: {
...cfg,
models: {
...cfg.models,
providers: {
...cfg.models?.providers,
lmstudio: providerConfig,
},
},
},
env: process.env,
providerHeaders: providerConfig.headers,
});
const fetched = await fetchLmstudioModels({
baseUrl: providerConfig.baseUrl,
apiKey,
headers,
ssrfPolicy: buildRemoteBaseUrlPolicy(providerConfig.baseUrl),
});
const match = fetched.models.find(
(entry) => normalizeOptionalString(entry.key) === modelId,
);
const base = match ? mapLmstudioWireEntry(match) : null;
if (!base) {
return { found: false };
}
return {
found: true,
model: {
id: base.id,
name: base.displayName,
reasoning: base.reasoning,
input: base.input,
cost: base.cost,
contextWindow: base.contextWindow,
contextTokens: base.contextTokens,
maxTokens: base.maxTokens,
},
};
} catch (error) {
log.warn("lmstudio model metadata detection failed; using defaults", {
baseUrl: sanitizeUrlForLogs(providerConfig.baseUrl),
modelId,
error: formatErrorMessage(error),
});
return {
found: false,
warnings: ["LM Studio metadata detection failed; using defaults."],
};
}
},
},
};
function canAddProvider(params: { cfg: OpenClawConfig; provider: string }): boolean {
const provider = normalizeProviderId(params.provider);
if (!provider) {
return false;
}
if (resolveConfiguredProvider(params.cfg, provider)) {
return true;
}
return !!MODEL_ADD_ADAPTERS[provider]?.bootstrapProviderConfig?.(params.cfg);
}
export function listAddableProviders(params: {
cfg: OpenClawConfig;
discoveredProviders?: readonly string[];
}): string[] {
const providers = new Set<string>();
for (const provider of params.discoveredProviders ?? []) {
const normalized = normalizeProviderId(provider);
if (normalized && canAddProvider({ cfg: params.cfg, provider: normalized })) {
providers.add(normalized);
}
}
for (const provider of Object.keys(params.cfg.models?.providers ?? {})) {
const normalized = normalizeProviderId(provider);
if (normalized) {
providers.add(normalized);
}
}
for (const provider of Object.keys(MODEL_ADD_ADAPTERS)) {
providers.add(provider);
}
return [...providers].toSorted();
}
export function validateAddProvider(params: {
cfg: OpenClawConfig;
provider: string;
discoveredProviders?: readonly string[];
}): { ok: true; provider: string } | { ok: false; providers: string[] } {
const provider = normalizeProviderId(params.provider);
const providers = listAddableProviders({
cfg: params.cfg,
discoveredProviders: params.discoveredProviders,
});
if (!provider || !providers.includes(provider)) {
return { ok: false, providers };
}
return { ok: true, provider };
}
function ensureProviderConfig(params: { cfg: OpenClawConfig; provider: string }):
| {
ok: true;
providerKey: string;
providerConfig: ModelProviderConfig;
bootstrapped: boolean;
}
| { ok: false } {
const configuredProvider = resolveConfiguredProvider(params.cfg, params.provider);
if (configuredProvider) {
return {
ok: true,
providerKey: configuredProvider.providerKey,
providerConfig: configuredProvider.providerConfig,
bootstrapped: false,
};
}
const bootstrapped = MODEL_ADD_ADAPTERS[params.provider]?.bootstrapProviderConfig?.(params.cfg);
if (!bootstrapped) {
return { ok: false };
}
return {
ok: true,
providerKey: params.provider,
providerConfig: bootstrapped,
bootstrapped: true,
};
}
async function detectModelDefinition(params: {
cfg: OpenClawConfig;
provider: string;
providerConfig: ModelProviderConfig;
modelId: string;
}): Promise<{ model: ModelDefinitionConfig; warnings: string[] }> {
const adapter = MODEL_ADD_ADAPTERS[params.provider];
if (!adapter?.detect) {
return {
model: buildDefaultModelDefinition(params.modelId),
warnings: ["Model metadata could not be auto-detected; saved with default capabilities."],
};
}
const detected = await adapter.detect(params);
if (detected.found && detected.model) {
return {
model: detected.model,
warnings: detected.warnings ?? [],
};
}
return {
model: buildDefaultModelDefinition(params.modelId),
warnings: [
...(detected.warnings ?? []),
"Model metadata could not be auto-detected; saved with default capabilities.",
],
};
}
export async function detectProviderModelDefinition(params: {
cfg: OpenClawConfig;
provider: string;
modelId: string;
}): Promise<{
supported: boolean;
found: boolean;
model?: ModelDefinitionConfig;
warnings: string[];
}> {
const provider = normalizeProviderId(params.provider);
const modelId = normalizeOptionalString(params.modelId) ?? "";
if (!provider || !modelId) {
return { supported: false, found: false, warnings: [] };
}
const adapter = MODEL_ADD_ADAPTERS[provider];
if (!adapter?.detect) {
return { supported: false, found: false, warnings: [] };
}
const providerResolution = ensureProviderConfig({
cfg: params.cfg,
provider,
});
if (!providerResolution.ok) {
return { supported: true, found: false, warnings: [] };
}
const detected = await adapter.detect({
cfg: params.cfg,
providerConfig: providerResolution.providerConfig,
modelId,
});
return {
supported: true,
found: detected.found && !!detected.model,
model: detected.model,
warnings: detected.warnings ?? [],
};
}
function upsertModelEntry(params: {
cfg: OpenClawConfig;
provider: string;
providerKey: string;
providerConfig: ModelProviderConfig;
model: ModelDefinitionConfig;
}): { nextConfig: OpenClawConfig; existed: boolean } {
const nextConfig = structuredClone(params.cfg);
nextConfig.models ??= {};
nextConfig.models.providers ??= {};
const existingProvider = nextConfig.models.providers[params.providerKey];
const providerConfig = existingProvider
? {
...existingProvider,
models: Array.isArray(existingProvider.models) ? [...existingProvider.models] : [],
}
: {
...params.providerConfig,
models: Array.isArray(params.providerConfig.models)
? [...params.providerConfig.models]
: [],
};
const modelKey = normalizeLowercaseStringOrEmpty(params.model.id);
const existingIndex = providerConfig.models.findIndex(
(entry) => normalizeLowercaseStringOrEmpty(entry?.id) === modelKey,
);
const existed = existingIndex !== -1;
if (!existed) {
providerConfig.models.push(params.model);
}
nextConfig.models.providers[params.providerKey] = providerConfig;
return { nextConfig, existed };
}
function maybeAddAllowlistEntry(params: {
cfg: OpenClawConfig;
provider: string;
modelId: string;
}): { nextConfig: OpenClawConfig; added: boolean } {
const allowlistKeys = buildConfiguredAllowlistKeys({
cfg: params.cfg,
defaultProvider: resolveDefaultModelForAgent({ cfg: params.cfg }).provider,
});
if (!allowlistKeys || allowlistKeys.size === 0) {
return { nextConfig: params.cfg, added: false };
}
const rawRef = `${params.provider}/${params.modelId}`;
const resolved = resolveModelRefFromString({
raw: rawRef,
defaultProvider: resolveDefaultModelForAgent({ cfg: params.cfg }).provider,
});
if (!resolved) {
return { nextConfig: params.cfg, added: false };
}
const normalizedKey = `${resolved.ref.provider}/${resolved.ref.model}`.toLowerCase();
if (allowlistKeys.has(normalizedKey)) {
return { nextConfig: params.cfg, added: false };
}
const nextConfig = structuredClone(params.cfg);
nextConfig.agents ??= {};
nextConfig.agents.defaults ??= {};
nextConfig.agents.defaults.models ??= {};
nextConfig.agents.defaults.models[`${params.provider}/${params.modelId}`] = {};
return { nextConfig, added: true };
}
export async function addModelToConfig(params: {
cfg: OpenClawConfig;
provider: string;
modelId: string;
}): Promise<{ ok: true; result: AddModelOutcome } | { ok: false; error: string }> {
const provider = normalizeProviderId(params.provider);
const modelId = normalizeOptionalString(params.modelId) ?? "";
if (!provider || !modelId) {
return { ok: false, error: "Provider and model id are required." };
}
const snapshot = await readConfigFileSnapshot();
if (!snapshot.valid || !snapshot.parsed || typeof snapshot.parsed !== "object") {
return { ok: false, error: "Config file is invalid; fix it before using /models add." };
}
const currentConfig = structuredClone(snapshot.parsed as OpenClawConfig);
const providerResolution = ensureProviderConfig({
cfg: currentConfig,
provider,
});
if (!providerResolution.ok) {
return {
ok: false,
error: `Provider "${provider}" is not configured for custom models yet. Configure the provider first, then retry /models add.`,
};
}
const detected = await detectModelDefinition({
cfg: currentConfig,
provider,
providerConfig: providerResolution.providerConfig,
modelId,
});
const upserted = upsertModelEntry({
cfg: currentConfig,
provider,
providerKey: providerResolution.providerKey,
providerConfig: providerResolution.providerConfig,
model: detected.model,
});
const allowlisted = maybeAddAllowlistEntry({
cfg: upserted.nextConfig,
provider,
modelId,
});
const changed = !upserted.existed || allowlisted.added || providerResolution.bootstrapped;
if (!changed) {
return {
ok: true,
result: {
provider,
modelId,
existed: true,
allowlistAdded: false,
warnings: detected.warnings,
},
};
}
const validated = validateConfigObjectWithPlugins(allowlisted.nextConfig);
if (!validated.ok) {
const issue = validated.issues[0];
const detail = issue ? `${issue.path}: ${issue.message}` : "unknown validation error";
return {
ok: false,
error: `Config invalid after /models add (${detail}).`,
};
}
try {
await replaceConfigFile({
nextConfig: validated.config,
...(snapshot.hash !== undefined ? { baseHash: snapshot.hash } : {}),
});
} catch (error) {
if (error instanceof ConfigMutationConflictError) {
return {
ok: false,
error: "Config changed while /models add was running. Retry the command.",
};
}
throw error;
}
return {
ok: true,
result: {
provider,
modelId,
existed: upserted.existed,
allowlistAdded: allowlisted.added,
warnings: detected.warnings,
},
};
}

View File

@@ -1684,6 +1684,7 @@ describe("buildCommandsMessage", () => {
expect(text).toContain("/skill - Run a skill by name.");
expect(text).toContain("/think (/thinking, /t) - Set thinking level.");
expect(text).toContain("/compact - Compact the session context.");
expect(text).toContain("/models - List model providers/models or add a model.");
expect(text).not.toContain("/config");
expect(text).not.toContain("/debug");
});

View File

@@ -464,9 +464,15 @@ export type ChannelCommandAdapter = {
totalPages: number;
agentId?: string;
}) => ReplyPayload["channelData"] | null;
buildModelsMenuChannelData?: (params: {
providers: Array<{ id: string; count: number }>;
}) => ReplyPayload["channelData"] | null;
buildModelsProviderChannelData?: (params: {
providers: Array<{ id: string; count: number }>;
}) => ReplyPayload["channelData"] | null;
buildModelsAddProviderChannelData?: (params: {
providers: Array<{ id: string }>;
}) => ReplyPayload["channelData"] | null;
buildModelsListChannelData?: (params: {
provider: string;
models: readonly string[];

View File

@@ -113,8 +113,7 @@ function mimeTypeForPath(filePath: string): string {
function estimateBase64DecodedBytes(base64: string): number {
const sanitized = base64.replace(/\s+/g, "");
const padding =
sanitized.endsWith("==") ? 2 : sanitized.endsWith("=") ? 1 : 0;
const padding = sanitized.endsWith("==") ? 2 : sanitized.endsWith("=") ? 1 : 0;
return Math.floor((sanitized.length * 3) / 4) - padding;
}
@@ -232,7 +231,9 @@ export async function buildWebchatAssistantMessageFromReplyPayloads(
payloadHasImage = true;
}
const needsSyntheticText =
payloadMediaBlocks.length > 0 && (!text || replyDirectivePrefix) && transcriptTextParts.length === 0;
payloadMediaBlocks.length > 0 &&
(!text || replyDirectivePrefix) &&
transcriptTextParts.length === 0;
const syntheticText = needsSyntheticText
? payloadHasAudio && payloadHasImage
? "Media reply"

View File

@@ -248,7 +248,9 @@ function buildTranscriptReplyText(payloads: ReplyPayload[]): string {
}
function hasSensitiveMediaPayload(payloads: ReplyPayload[]): boolean {
return payloads.some((payload) => payload.sensitiveMedia === true && isMediaBearingPayload(payload));
return payloads.some(
(payload) => payload.sensitiveMedia === true && isMediaBearingPayload(payload),
);
}
function resolveChatSendOriginatingRoute(params: {

View File

@@ -1,3 +1,4 @@
import { resetModelCatalogCache } from "../agents/model-catalog.js";
import { getActiveEmbeddedRunCount } from "../agents/pi-embedded-runner/runs.js";
import { getTotalPendingReplies } from "../auto-reply/reply/dispatcher-registry.js";
import type { CliDeps } from "../cli/deps.types.js";
@@ -102,6 +103,20 @@ export function createGatewayReloadHandlers(params: GatewayReloadHandlerParams)
const state = params.getState();
const nextState = { ...state };
if (
plan.changedPaths.some(
(path) =>
path === "models" ||
path.startsWith("models.") ||
path === "agents.defaults.model" ||
path.startsWith("agents.defaults.model.") ||
path === "agents.defaults.models" ||
path.startsWith("agents.defaults.models."),
)
) {
resetModelCatalogCache();
}
if (plan.reloadHooks) {
try {
nextState.hooksConfig = resolveHooksConfig(nextConfig);

View File

@@ -2,7 +2,13 @@ import fs from "node:fs/promises";
import path from "node:path";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { WebSocket } from "ws";
import {
__setModelCatalogImportForTest,
resetModelCatalogCacheForTest,
} from "../agents/model-catalog.js";
import { buildModelsProviderData } from "../auto-reply/reply/commands-models.js";
import { resolveMainSessionKeyFromConfig } from "../config/sessions.js";
import type { OpenClawConfig } from "../config/types.openclaw.js";
import { drainSystemEvents } from "../infra/system-events.js";
import { withEnvAsync } from "../test-utils/env.js";
import {
@@ -48,6 +54,7 @@ const hoisted = vi.hoisted(() => {
const startGmailWatcher = vi.fn(async () => ({ started: true }));
const stopGmailWatcher = vi.fn(async () => {});
const resetModelCatalogCache = vi.fn();
const providerManager = {
getRuntimeSnapshot: vi.fn(() => ({
@@ -148,6 +155,7 @@ const hoisted = vi.hoisted(() => {
activeTaskCount,
startGmailWatcher,
stopGmailWatcher,
resetModelCatalogCache,
providerManager,
createChannelManager,
startGatewayConfigReloader,
@@ -157,6 +165,8 @@ const hoisted = vi.hoisted(() => {
};
});
type PiDiscoveryRuntimeModule = typeof import("../agents/pi-model-discovery-runtime.js");
vi.mock("../cron/service.js", () => ({
CronService: hoisted.CronService,
}));
@@ -170,6 +180,19 @@ vi.mock("../hooks/gmail-watcher.js", () => ({
stopGmailWatcher: hoisted.stopGmailWatcher,
}));
vi.mock("../agents/model-catalog.js", async () => {
const actual = await vi.importActual<typeof import("../agents/model-catalog.js")>(
"../agents/model-catalog.js",
);
return {
...actual,
resetModelCatalogCache: vi.fn(() => {
actual.resetModelCatalogCache();
hoisted.resetModelCatalogCache();
}),
};
});
vi.mock("../agents/pi-embedded-runner/runs.js", async () => {
const actual = await vi.importActual<typeof import("../agents/pi-embedded-runner/runs.js")>(
"../agents/pi-embedded-runner/runs.js",
@@ -254,6 +277,7 @@ describe("gateway hot reload", () => {
hoisted.totalPendingReplies.value = 0;
hoisted.totalQueueSize.value = 0;
hoisted.activeTaskCount.value = 0;
hoisted.resetModelCatalogCache.mockReset();
});
afterEach(() => {
@@ -528,6 +552,131 @@ describe("gateway hot reload", () => {
});
});
it("clears the model catalog cache on model-related hot reloads", async () => {
await withGatewayServer(async () => {
const onHotReload = hoisted.getOnHotReload();
expect(onHotReload).toBeTypeOf("function");
await onHotReload?.(
{
changedPaths: ["models.providers.ollama.models"],
restartGateway: false,
restartReasons: [],
hotReasons: ["models.providers.ollama.models"],
reloadHooks: false,
restartGmailWatcher: false,
restartCron: false,
restartHeartbeat: false,
restartChannels: new Set(),
noopPaths: [],
},
{
models: {
providers: {
ollama: {
models: [{ id: "glm-5.1:cloud" }],
},
},
},
},
);
expect(hoisted.resetModelCatalogCache).toHaveBeenCalledTimes(1);
});
});
it("makes newly available catalog models visible in-process after hot reload", async () => {
type TestRegistryEntry = { provider: string; id: string; name: string };
let registryEntries: TestRegistryEntry[] = [
{ provider: "ollama", id: "existing", name: "Existing" },
];
__setModelCatalogImportForTest(
async () =>
({
discoverAuthStorage: () => ({}),
ModelRegistry: class {
getAll() {
return registryEntries;
}
},
}) as unknown as PiDiscoveryRuntimeModule,
);
resetModelCatalogCacheForTest();
try {
await withGatewayServer(async () => {
const onHotReload = hoisted.getOnHotReload();
expect(onHotReload).toBeTypeOf("function");
const baseConfig: OpenClawConfig = {
agents: {
defaults: {
model: {
primary: "ollama/existing",
},
},
},
models: {
providers: {
ollama: {
baseUrl: "http://127.0.0.1:11434",
api: "ollama",
apiKey: "ollama-local",
models: [],
},
},
},
};
const before = await buildModelsProviderData(baseConfig);
expect([...(before.byProvider.get("ollama") ?? new Set()).values()]).toEqual(["existing"]);
registryEntries = [
...registryEntries,
{ provider: "ollama", id: "glm-5.1:cloud", name: "GLM 5.1 Cloud" },
];
const nextConfig = structuredClone(baseConfig);
await onHotReload?.(
{
changedPaths: ["models.providers.ollama.models"],
restartGateway: false,
restartReasons: [],
hotReasons: ["models.providers.ollama.models"],
reloadHooks: false,
restartGmailWatcher: false,
restartCron: false,
restartHeartbeat: false,
restartChannels: new Set(),
noopPaths: [],
},
nextConfig,
);
__setModelCatalogImportForTest(
async () =>
({
discoverAuthStorage: () => ({}),
ModelRegistry: class {
getAll() {
return registryEntries;
}
},
}) as unknown as PiDiscoveryRuntimeModule,
);
const after = await buildModelsProviderData(nextConfig);
expect([...(after.byProvider.get("ollama") ?? new Set()).values()]).toEqual([
"existing",
"glm-5.1:cloud",
]);
expect(hoisted.resetModelCatalogCache).toHaveBeenCalledTimes(1);
});
} finally {
__setModelCatalogImportForTest();
resetModelCatalogCacheForTest();
}
});
it("serves secrets.reload immediately after startup without race failures", async () => {
await writeEnvRefConfig();
process.env.OPENAI_API_KEY = "sk-startup"; // pragma: allowlist secret

View File

@@ -38,6 +38,9 @@ describe("plugin-sdk/command-auth", () => {
expect(buildHelpMessage(cfg)).toContain("/commands for full list");
expect(buildCommandsMessage(cfg)).toContain("More: /tools for available capabilities");
expect(buildCommandsMessage(cfg)).toContain(
"/models - List model providers/models or add a model.",
);
expect(buildCommandsMessagePaginated(cfg)).toMatchObject({
currentPage: 1,
totalPages: expect.any(Number),

View File

@@ -48,6 +48,13 @@ export type LmstudioModelBase = {
maxTokens: number;
};
export type FetchLmstudioModelsResult = {
reachable: boolean;
status?: number;
models: LmstudioModelWire[];
error?: unknown;
};
type FacadeModule = {
LMSTUDIO_DEFAULT_BASE_URL: string;
LMSTUDIO_DEFAULT_INFERENCE_BASE_URL: string;
@@ -68,7 +75,10 @@ type FacadeModule = {
baseUrl?: string;
apiKey?: string;
headers?: Record<string, string>;
}) => Promise<unknown>;
ssrfPolicy?: unknown;
timeoutMs?: number;
fetchImpl?: typeof fetch;
}) => Promise<FetchLmstudioModelsResult>;
mapLmstudioWireEntry: (entry: LmstudioModelWire) => LmstudioModelBase | null;
discoverLmstudioModels: (params?: {
config?: OpenClawConfig;
@@ -93,6 +103,16 @@ type FacadeModule = {
headers?: unknown;
path?: string;
}) => Promise<Record<string, string> | undefined>;
resolveLmstudioRequestContext: (params: {
config?: OpenClawConfig;
env?: NodeJS.ProcessEnv;
headers?: unknown;
providerHeaders?: unknown;
path?: string;
}) => Promise<{
apiKey?: string;
headers?: Record<string, string>;
}>;
resolveLmstudioRuntimeApiKey: (params: {
config?: OpenClawConfig;
agentDir?: string;
@@ -151,5 +171,7 @@ export const resolveLmstudioConfiguredApiKey: FacadeModule["resolveLmstudioConfi
createLazyFacadeRuntimeValue(loadFacadeModule, "resolveLmstudioConfiguredApiKey");
export const resolveLmstudioProviderHeaders: FacadeModule["resolveLmstudioProviderHeaders"] =
createLazyFacadeRuntimeValue(loadFacadeModule, "resolveLmstudioProviderHeaders");
export const resolveLmstudioRequestContext: FacadeModule["resolveLmstudioRequestContext"] =
createLazyFacadeRuntimeValue(loadFacadeModule, "resolveLmstudioRequestContext");
export const resolveLmstudioRuntimeApiKey: FacadeModule["resolveLmstudioRuntimeApiKey"] =
createLazyFacadeRuntimeValue(loadFacadeModule, "resolveLmstudioRuntimeApiKey");

View File

@@ -59,6 +59,21 @@ function readRepoSource(file: string): string {
return source;
}
function isAllowedBundledExtensionImport(specifier: string): boolean {
return /(?:^|\/)extensions\/[^/]+\/(?:api|runtime-api)\.js$/u.test(specifier);
}
function collectBundledExtensionImports(source: string): string[] {
const matches = [
...source.matchAll(/from\s+["']([^"']*extensions\/[^"']+)["']/gu),
...source.matchAll(/vi\.(?:mock|doMock)\(\s*["']([^"']*extensions\/[^"']+)["']/gu),
...source.matchAll(/importActual(?:<[^>]*>)?\(\s*["']([^"']*extensions\/[^"']+)["']/gu),
];
return matches
.map((match) => match[1])
.filter((specifier): specifier is string => typeof specifier === "string");
}
describe("plugin contract boundary invariants", () => {
it("keeps bundled-capability-metadata confined to contract/test inventory", () => {
const files = listTsFiles("src");
@@ -86,11 +101,8 @@ describe("plugin contract boundary invariants", () => {
it("keeps core tests off bundled extension deep imports", () => {
const files = listTsFiles("src", { testOnly: true });
const offenders = files.filter((file) => {
const source = readRepoSource(file);
return (
/from\s+["'][^"']*extensions\/.+(?:api|runtime-api|test-api)\.js["']/u.test(source) ||
/vi\.(?:mock|doMock)\(\s*["'][^"']*extensions\/.+["']/u.test(source) ||
/importActual<[^>]*>\(\s*["'][^"']*extensions\/.+["']/u.test(source)
return collectBundledExtensionImports(readRepoSource(file)).some(
(specifier) => !isAllowedBundledExtensionImport(specifier),
);
});
expect(offenders).toEqual([]);