fix(llm-task): normalize provider-prefixed model overrides

This commit is contained in:
Peter Steinberger
2026-04-27 06:01:45 +01:00
parent 05ebfa4146
commit 2dcc4605d4
3 changed files with 24 additions and 1 deletions

View File

@@ -55,6 +55,7 @@ Docs: https://docs.openclaw.ai
- Providers/Ollama: preserve explicit configured model input modalities when merging discovered provider metadata so custom vision models keep image support instead of silently dropping attachments. Fixes #39690; carries forward #39785. Thanks @Skrblik and @Mriris.
- Providers/Ollama: estimate native Ollama transcript usage when `/api/chat` omits prompt/eval counters while preserving exact zero counters, keeping local model runs visible in usage surfaces. Carries forward #39112. Thanks @TylonHH.
- Providers/PDF/Ollama: add bounded network timeouts for Ollama model pulls and native Anthropic/Gemini PDF analysis requests so unresponsive provider endpoints no longer hang sessions indefinitely. Fixes #54142; supersedes #54144 and #54145. Thanks @jinduwang1001-max and @arkyu2077.
- LLM Task/Ollama: accept model overrides that already include the selected provider prefix, avoiding doubled ids such as `ollama/ollama/llama3.2:latest`, and live-verify local Ollama JSON tasks return parsed output. Fixes #50052. Thanks @ralphy-maplebots and @Hollychou924.
- Memory/doctor: treat Ollama memory embeddings as key-optional so `openclaw doctor` no longer warns about a missing API key when the gateway reports embeddings are ready. Fixes #46584. Thanks @fengly78.
- Agents/Ollama: apply provider-owned replay turn normalization to native Ollama chat so Cloud models no longer reject non-alternating replay history in agent/Gateway runs. Fixes #71697. Thanks @ismael-81.
- Control UI/Ollama: show the resolved configured thinking default in chat and session thinking dropdowns so inherited `adaptive`/per-model thinking config no longer appears as `Default (off)` or a generic inherit value. Fixes #72407. Thanks @NotecAG.

View File

@@ -155,6 +155,17 @@ describe("llm-task tool (json-only)", () => {
expect(call.model).toBe("claude-4-sonnet");
});
it("accepts model overrides that already include the selected provider prefix", async () => {
mockEmbeddedRunJson({ ok: true });
const call = await executeEmbeddedRun({
prompt: "x",
provider: "anthropic",
model: "anthropic/claude-4-sonnet",
});
expect(call.provider).toBe("anthropic");
expect(call.model).toBe("claude-4-sonnet");
});
it("passes thinking override to embedded runner", async () => {
mockEmbeddedRunJson({ ok: true });
const call = await executeEmbeddedRun({ prompt: "x", thinking: "high" });

View File

@@ -38,6 +38,16 @@ function toModelKey(provider?: string, model?: string): string | undefined {
return `${p}/${m}`;
}
function stripDuplicateProviderPrefix(provider: string | undefined, model: string | undefined) {
const p = provider?.trim();
const m = model?.trim();
if (!p || !m) {
return m || undefined;
}
const prefix = `${p}/`;
return m.startsWith(prefix) ? m.slice(prefix.length) : m;
}
type PluginCfg = {
defaultProvider?: string;
defaultModel?: string;
@@ -109,11 +119,12 @@ export function createLlmTaskTool(api: OpenClawPluginApi) {
primaryProvider ||
undefined;
const model =
const rawModel =
(typeof params.model === "string" && params.model.trim()) ||
(typeof pluginCfg.defaultModel === "string" && pluginCfg.defaultModel.trim()) ||
primaryModel ||
undefined;
const model = stripDuplicateProviderPrefix(provider, rawModel);
const authProfileId =
(typeof params.authProfileId === "string" && params.authProfileId.trim()) ||