fix(model): explain rejected session overrides

This commit is contained in:
Peter Steinberger
2026-04-28 23:33:15 +01:00
parent baeba45be9
commit f4a9d34f98
6 changed files with 52 additions and 1 deletions

View File

@@ -55,6 +55,7 @@ Docs: https://docs.openclaw.ai
- CLI/model probes: add repeatable image `--file` inputs to `infer model run` for local and gateway multimodal model smokes, so vision models such as Ollama Qwen VL and Gemini can be tested through the raw model-probe surface. Fixes #63700. Thanks @cedricjanssens.
- CLI/model probes: request trusted operator scope for `infer model run --gateway --model <provider/model>` so Gateway raw model smokes can use one-off provider/model overrides instead of being rejected before provider auth resolution. Fixes #73759. Thanks @chrislro.
- CLI/image describe: pass `--prompt` and `--timeout-ms` through `infer image describe` and `describe-many`, so custom vision instructions and slow local model budgets reach media-understanding providers such as Ollama, OpenAI, Google, and OpenRouter. Refs #63700. Thanks @cedricjanssens.
- Model selection: include the rejected provider/model ref and allowlist recovery hint when a stored session override is cleared, so local model selections such as Gemma GGUF variants do not fall back to the default with a generic message. Refs #71069. Thanks @CyberRaccoonTeam.
- WhatsApp/Web: pass explicit Baileys socket timings into every WhatsApp Web socket and expose `web.whatsapp.*` keepalive, connect, and query timeout settings so unstable networks can avoid repeated 408 disconnect and opening-handshake timeout loops. Fixes #56365. (#73580) Thanks @velvet-shark.
- Channels/Telegram: persist native command metadata on target sessions so topic, helper, and ACP-bound slash commands keep their session metadata attached to the routed conversation. (#57548) Thanks @GaosCode.
- Channels/native commands: keep validated native slash command replies visible in group chats while preserving explicit owner allowlists for command authorization. (#73672) Thanks @obviyus.

View File

@@ -131,6 +131,12 @@ This happens **before** a normal reply is generated, so the message can feel lik
</Warning>
For local/GGUF models, store the full provider-prefixed ref in the allowlist,
for example `ollama/gemma4:26b`, `lmstudio/Gemma4-26b-a4-it-gguf`, or the
exact provider/model shown by `openclaw models list --provider <provider>`.
Bare local filenames or display names are not enough when the allowlist is
active.
Example allowlist config:
```json5

View File

@@ -0,0 +1,23 @@
import { describe, expect, it } from "vitest";
import { formatModelOverrideResetEvent } from "./get-reply-directives-apply.js";
describe("formatModelOverrideResetEvent", () => {
it("names the rejected model override and allowlist recovery path", () => {
expect(
formatModelOverrideResetEvent({
rejectedRef: "ollama/Gemma4-26b-a4-it-gguf",
initialModelLabel: "github-copilot/gpt-4o",
}),
).toBe(
"Model override ollama/Gemma4-26b-a4-it-gguf is not allowed for this agent; reverted to github-copilot/gpt-4o. Add ollama/Gemma4-26b-a4-it-gguf to agents.defaults.models or pick an allowed model with /model list.",
);
});
it("keeps the legacy generic message when the rejected ref is unknown", () => {
expect(
formatModelOverrideResetEvent({
initialModelLabel: "github-copilot/gpt-4o",
}),
).toBe("Model override not allowed for this agent; reverted to github-copilot/gpt-4o.");
});
});

View File

@@ -65,6 +65,16 @@ function hasOnlyModelDirective(directives: InlineDirectives): boolean {
);
}
export function formatModelOverrideResetEvent(params: {
rejectedRef?: string;
initialModelLabel: string;
}): string {
if (params.rejectedRef) {
return `Model override ${params.rejectedRef} is not allowed for this agent; reverted to ${params.initialModelLabel}. Add ${params.rejectedRef} to agents.defaults.models or pick an allowed model with /model list.`;
}
return `Model override not allowed for this agent; reverted to ${params.initialModelLabel}.`;
}
export type ApplyDirectiveResult =
| { kind: "reply"; reply: ReplyPayload | ReplyPayload[] | undefined }
| {
@@ -179,7 +189,10 @@ export async function applyInlineDirectiveOverrides(params: {
if (modelState.resetModelOverride) {
enqueueSystemEvent(
`Model override not allowed for this agent; reverted to ${initialModelLabel}.`,
formatModelOverrideResetEvent({
rejectedRef: modelState.resetModelOverrideRef,
initialModelLabel,
}),
{
sessionKey,
contextKey: `model:reset:${initialModelLabel}`,

View File

@@ -557,6 +557,7 @@ describe("createModelSelectionState respects session model override", () => {
});
expect(state.resetModelOverride).toBe(true);
expect(state.resetModelOverrideRef).toBe("openai/gpt-4o-mini");
expect(sessionStore[sessionKey]?.modelOverride).toBeUndefined();
expect(sessionStore[sessionKey]?.providerOverride).toBeUndefined();
});

View File

@@ -31,6 +31,7 @@ type ModelSelectionState = {
allowedModelKeys: Set<string>;
allowedModelCatalog: ModelCatalog;
resetModelOverride: boolean;
resetModelOverrideRef?: string;
resolveThinkingCatalog: () => Promise<ModelCatalog | undefined>;
resolveDefaultThinkingLevel: () => Promise<ThinkLevel>;
/** Default reasoning level from model capability: "on" if model has reasoning, else "off". */
@@ -49,6 +50,7 @@ export function createFastTestModelSelectionState(params: {
allowedModelKeys: new Set<string>(),
allowedModelCatalog: [],
resetModelOverride: false,
resetModelOverrideRef: undefined,
resolveThinkingCatalog: async () => [],
resolveDefaultThinkingLevel: async () => params.agentCfg?.thinkingDefault as ThinkLevel,
resolveDefaultReasoningLevel: async () => "off",
@@ -129,6 +131,7 @@ export async function createModelSelectionState(params: {
let allowedModelCatalog: ModelCatalog = configuredModelCatalog;
let modelCatalog: ModelCatalog | null = null;
let resetModelOverride = false;
let resetModelOverrideRef: string | undefined;
const agentEntry = params.agentId ? resolveAgentConfig(cfg, params.agentId) : undefined;
const directStoredOverride = resolvePersistedOverrideModelRef({
defaultProvider,
@@ -192,6 +195,9 @@ export async function createModelSelectionState(params: {
}
}
resetModelOverride = updated;
if (updated) {
resetModelOverrideRef = key;
}
}
}
@@ -309,6 +315,7 @@ export async function createModelSelectionState(params: {
allowedModelKeys,
allowedModelCatalog,
resetModelOverride,
resetModelOverrideRef,
resolveThinkingCatalog,
resolveDefaultThinkingLevel,
resolveDefaultReasoningLevel,