diff --git a/CHANGELOG.md b/CHANGELOG.md index a3f3c9f38d7..40b86523d2d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -55,6 +55,7 @@ Docs: https://docs.openclaw.ai - CLI/model probes: add repeatable image `--file` inputs to `infer model run` for local and gateway multimodal model smokes, so vision models such as Ollama Qwen VL and Gemini can be tested through the raw model-probe surface. Fixes #63700. Thanks @cedricjanssens. - CLI/model probes: request trusted operator scope for `infer model run --gateway --model ` so Gateway raw model smokes can use one-off provider/model overrides instead of being rejected before provider auth resolution. Fixes #73759. Thanks @chrislro. - CLI/image describe: pass `--prompt` and `--timeout-ms` through `infer image describe` and `describe-many`, so custom vision instructions and slow local model budgets reach media-understanding providers such as Ollama, OpenAI, Google, and OpenRouter. Refs #63700. Thanks @cedricjanssens. +- Model selection: include the rejected provider/model ref and allowlist recovery hint when a stored session override is cleared, so local model selections such as Gemma GGUF variants do not fall back to the default with a generic message. Refs #71069. Thanks @CyberRaccoonTeam. - WhatsApp/Web: pass explicit Baileys socket timings into every WhatsApp Web socket and expose `web.whatsapp.*` keepalive, connect, and query timeout settings so unstable networks can avoid repeated 408 disconnect and opening-handshake timeout loops. Fixes #56365. (#73580) Thanks @velvet-shark. - Channels/Telegram: persist native command metadata on target sessions so topic, helper, and ACP-bound slash commands keep their session metadata attached to the routed conversation. (#57548) Thanks @GaosCode. - Channels/native commands: keep validated native slash command replies visible in group chats while preserving explicit owner allowlists for command authorization. (#73672) Thanks @obviyus. diff --git a/docs/concepts/models.md b/docs/concepts/models.md index 89820a6a915..0a3d7b41b31 100644 --- a/docs/concepts/models.md +++ b/docs/concepts/models.md @@ -131,6 +131,12 @@ This happens **before** a normal reply is generated, so the message can feel lik +For local/GGUF models, store the full provider-prefixed ref in the allowlist, +for example `ollama/gemma4:26b`, `lmstudio/Gemma4-26b-a4-it-gguf`, or the +exact provider/model shown by `openclaw models list --provider `. +Bare local filenames or display names are not enough when the allowlist is +active. + Example allowlist config: ```json5 diff --git a/src/auto-reply/reply/get-reply-directives-apply.test.ts b/src/auto-reply/reply/get-reply-directives-apply.test.ts new file mode 100644 index 00000000000..abf6875fde0 --- /dev/null +++ b/src/auto-reply/reply/get-reply-directives-apply.test.ts @@ -0,0 +1,23 @@ +import { describe, expect, it } from "vitest"; +import { formatModelOverrideResetEvent } from "./get-reply-directives-apply.js"; + +describe("formatModelOverrideResetEvent", () => { + it("names the rejected model override and allowlist recovery path", () => { + expect( + formatModelOverrideResetEvent({ + rejectedRef: "ollama/Gemma4-26b-a4-it-gguf", + initialModelLabel: "github-copilot/gpt-4o", + }), + ).toBe( + "Model override ollama/Gemma4-26b-a4-it-gguf is not allowed for this agent; reverted to github-copilot/gpt-4o. Add ollama/Gemma4-26b-a4-it-gguf to agents.defaults.models or pick an allowed model with /model list.", + ); + }); + + it("keeps the legacy generic message when the rejected ref is unknown", () => { + expect( + formatModelOverrideResetEvent({ + initialModelLabel: "github-copilot/gpt-4o", + }), + ).toBe("Model override not allowed for this agent; reverted to github-copilot/gpt-4o."); + }); +}); diff --git a/src/auto-reply/reply/get-reply-directives-apply.ts b/src/auto-reply/reply/get-reply-directives-apply.ts index c6b9db6feed..4bb9000623b 100644 --- a/src/auto-reply/reply/get-reply-directives-apply.ts +++ b/src/auto-reply/reply/get-reply-directives-apply.ts @@ -65,6 +65,16 @@ function hasOnlyModelDirective(directives: InlineDirectives): boolean { ); } +export function formatModelOverrideResetEvent(params: { + rejectedRef?: string; + initialModelLabel: string; +}): string { + if (params.rejectedRef) { + return `Model override ${params.rejectedRef} is not allowed for this agent; reverted to ${params.initialModelLabel}. Add ${params.rejectedRef} to agents.defaults.models or pick an allowed model with /model list.`; + } + return `Model override not allowed for this agent; reverted to ${params.initialModelLabel}.`; +} + export type ApplyDirectiveResult = | { kind: "reply"; reply: ReplyPayload | ReplyPayload[] | undefined } | { @@ -179,7 +189,10 @@ export async function applyInlineDirectiveOverrides(params: { if (modelState.resetModelOverride) { enqueueSystemEvent( - `Model override not allowed for this agent; reverted to ${initialModelLabel}.`, + formatModelOverrideResetEvent({ + rejectedRef: modelState.resetModelOverrideRef, + initialModelLabel, + }), { sessionKey, contextKey: `model:reset:${initialModelLabel}`, diff --git a/src/auto-reply/reply/model-selection.test.ts b/src/auto-reply/reply/model-selection.test.ts index 2a5f184a644..080348b5ef9 100644 --- a/src/auto-reply/reply/model-selection.test.ts +++ b/src/auto-reply/reply/model-selection.test.ts @@ -557,6 +557,7 @@ describe("createModelSelectionState respects session model override", () => { }); expect(state.resetModelOverride).toBe(true); + expect(state.resetModelOverrideRef).toBe("openai/gpt-4o-mini"); expect(sessionStore[sessionKey]?.modelOverride).toBeUndefined(); expect(sessionStore[sessionKey]?.providerOverride).toBeUndefined(); }); diff --git a/src/auto-reply/reply/model-selection.ts b/src/auto-reply/reply/model-selection.ts index d6d02570552..5ed9097a301 100644 --- a/src/auto-reply/reply/model-selection.ts +++ b/src/auto-reply/reply/model-selection.ts @@ -31,6 +31,7 @@ type ModelSelectionState = { allowedModelKeys: Set; allowedModelCatalog: ModelCatalog; resetModelOverride: boolean; + resetModelOverrideRef?: string; resolveThinkingCatalog: () => Promise; resolveDefaultThinkingLevel: () => Promise; /** Default reasoning level from model capability: "on" if model has reasoning, else "off". */ @@ -49,6 +50,7 @@ export function createFastTestModelSelectionState(params: { allowedModelKeys: new Set(), allowedModelCatalog: [], resetModelOverride: false, + resetModelOverrideRef: undefined, resolveThinkingCatalog: async () => [], resolveDefaultThinkingLevel: async () => params.agentCfg?.thinkingDefault as ThinkLevel, resolveDefaultReasoningLevel: async () => "off", @@ -129,6 +131,7 @@ export async function createModelSelectionState(params: { let allowedModelCatalog: ModelCatalog = configuredModelCatalog; let modelCatalog: ModelCatalog | null = null; let resetModelOverride = false; + let resetModelOverrideRef: string | undefined; const agentEntry = params.agentId ? resolveAgentConfig(cfg, params.agentId) : undefined; const directStoredOverride = resolvePersistedOverrideModelRef({ defaultProvider, @@ -192,6 +195,9 @@ export async function createModelSelectionState(params: { } } resetModelOverride = updated; + if (updated) { + resetModelOverrideRef = key; + } } } @@ -309,6 +315,7 @@ export async function createModelSelectionState(params: { allowedModelKeys, allowedModelCatalog, resetModelOverride, + resetModelOverrideRef, resolveThinkingCatalog, resolveDefaultThinkingLevel, resolveDefaultReasoningLevel,