From 7a9efc13899886983c3183b968485132fe2bd153 Mon Sep 17 00:00:00 2001 From: yfge Date: Tue, 5 May 2026 09:14:49 +0800 Subject: [PATCH] fix: expose ollama thinking profile before activation Fixes openclaw/openclaw#77612 --- CHANGELOG.md | 1 + extensions/ollama/index.ts | 9 ++------- extensions/ollama/provider-policy-api.test.ts | 13 ++++++++++++- extensions/ollama/provider-policy-api.ts | 19 +++++++++++++++++++ 4 files changed, 34 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 14e0a2533b5..b349407ef6f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -68,6 +68,7 @@ Docs: https://docs.openclaw.ai ### Fixes +- Ollama/thinking: expose the lightweight Ollama provider thinking profile through the public provider-policy artifact too, so reasoning-capable Ollama models such as `ollama/deepseek-v4-pro:cloud` keep `/think max` available even before the full plugin runtime activates. Fixes #77612. Thanks @rriggs. - CLI/sessions: prune old unreferenced transcript, compaction checkpoint, and trajectory artifacts during normal `sessions cleanup`, so gateway restart or crash orphans do not accumulate indefinitely outside `sessions.json`. Fixes #77608. Thanks @slideshow-dingo. - Video generation: wait up to 20 minutes for slow fal/MiniMax queue-backed jobs, stop forwarding unsupported Google Veo generated-audio options, and normalize MiniMax `720P` requests to its supported `768P` resolution with the usual override warning/details instead of failing fallback. - Update/restart: probe managed Gateway restarts with the service environment and add a Docker product lane that exercises candidate-owned `openclaw update --yes --json` restarts, so SecretRef-backed local gateway auth cannot regress behind mocked restart checks. Thanks @vincentkoc. diff --git a/extensions/ollama/index.ts b/extensions/ollama/index.ts index 1621f364388..ad034f8dbfe 100644 --- a/extensions/ollama/index.ts +++ b/extensions/ollama/index.ts @@ -27,6 +27,7 @@ import { promptAndConfigureOllama, queryOllamaModelShowInfo, } from "./api.js"; +import { resolveThinkingProfile as resolveOllamaThinkingProfile } from "./provider-policy-api.js"; import { OLLAMA_DEFAULT_API_KEY, OLLAMA_PROVIDER_ID, @@ -249,13 +250,7 @@ export default definePluginEntry({ contributeResolvedModelCompat: ({ model }) => usesOllamaOpenAICompatTransport(model) ? { supportsUsageInStreaming: true } : undefined, resolveReasoningOutputMode: () => "native", - resolveThinkingProfile: ({ reasoning }) => ({ - levels: - reasoning === true - ? [{ id: "off" }, { id: "low" }, { id: "medium" }, { id: "high" }, { id: "max" }] - : [{ id: "off" }], - defaultLevel: "off", - }), + resolveThinkingProfile: resolveOllamaThinkingProfile, wrapStreamFn: createConfiguredOllamaCompatStreamWrapper, createEmbeddingProvider: async ({ config, model, provider: embeddingProvider, remote }) => { const { provider, client } = await createOllamaEmbeddingProvider({ diff --git a/extensions/ollama/provider-policy-api.test.ts b/extensions/ollama/provider-policy-api.test.ts index 3d11e1bc85a..126a06dccbd 100644 --- a/extensions/ollama/provider-policy-api.test.ts +++ b/extensions/ollama/provider-policy-api.test.ts @@ -1,6 +1,6 @@ import type { ModelDefinitionConfig } from "openclaw/plugin-sdk/provider-model-types"; import { describe, expect, it } from "vitest"; -import { normalizeConfig } from "./provider-policy-api.js"; +import { normalizeConfig, resolveThinkingProfile } from "./provider-policy-api.js"; import { OLLAMA_DEFAULT_BASE_URL } from "./src/defaults.js"; function createModel(id: string, name: string): ModelDefinitionConfig { @@ -58,4 +58,15 @@ describe("ollama provider policy public artifact", () => { }), ).toEqual({}); }); + + it("exposes max thinking for reasoning-capable models without full plugin activation", () => { + expect(resolveThinkingProfile({ reasoning: true })).toEqual({ + levels: [{ id: "off" }, { id: "low" }, { id: "medium" }, { id: "high" }, { id: "max" }], + defaultLevel: "off", + }); + expect(resolveThinkingProfile({ reasoning: false })).toEqual({ + levels: [{ id: "off" }], + defaultLevel: "off", + }); + }); }); diff --git a/extensions/ollama/provider-policy-api.ts b/extensions/ollama/provider-policy-api.ts index 433e296f18f..2ef81fb02b0 100644 --- a/extensions/ollama/provider-policy-api.ts +++ b/extensions/ollama/provider-policy-api.ts @@ -1,8 +1,19 @@ +import type { ProviderThinkingProfile } from "openclaw/plugin-sdk/plugin-entry"; import type { ModelProviderConfig } from "openclaw/plugin-sdk/provider-model-types"; import { OLLAMA_DEFAULT_BASE_URL } from "./src/defaults.js"; type OllamaProviderConfigDraft = Partial; +const OLLAMA_REASONING_THINKING_PROFILE = { + levels: [{ id: "off" }, { id: "low" }, { id: "medium" }, { id: "high" }, { id: "max" }], + defaultLevel: "off", +} satisfies ProviderThinkingProfile; + +const OLLAMA_NON_REASONING_THINKING_PROFILE = { + levels: [{ id: "off" }], + defaultLevel: "off", +} satisfies ProviderThinkingProfile; + /** * Provider policy surface for Ollama: normalize provider configs used by * core defaults/normalizers. This runs during config defaults application and @@ -38,3 +49,11 @@ export function normalizeConfig({ return next; } + +export function resolveThinkingProfile({ + reasoning, +}: { + reasoning?: boolean; +}): ProviderThinkingProfile { + return reasoning ? OLLAMA_REASONING_THINKING_PROFILE : OLLAMA_NON_REASONING_THINKING_PROFILE; +}