diff --git a/CHANGELOG.md b/CHANGELOG.md index 7224cb4336c..929ef959839 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ Docs: https://docs.openclaw.ai - Logging: write validated diagnostic trace context as top-level `traceId`, `spanId`, `parentSpanId`, and `traceFlags` fields in file-log JSONL records so traced requests and model calls are easier to correlate in log processors. Refs #40353. Thanks @liangruochong44-ui. - Logging/sessions: apply configured redaction patterns to persisted session transcript text and accept escaped character classes in safe custom redaction regexes, so transcript JSONL no longer keeps matching sensitive text in the clear. Fixes #42982. Thanks @panpan0000. +- Providers/Ollama: honor `/api/show` capabilities when registering local models so non-tool Ollama models no longer receive the agent tool surface, and keep native Ollama thinking opt-in instead of enabling it by default. Fixes #64710 and duplicate #65343. Thanks @yuan-b, @netherby, @xilopaint, and @Diyforfun2026. - Auto-reply: poison inbound message dedupe after replay-unsafe provider/runtime failures so retries stay safe before visible progress but cannot duplicate messages after block output, tool side effects, or session progress. Fixes #69303; keeps #58549 and #64606 as duplicate validation. Thanks @martingarramon, @NikolaFC, and @zeroth-blip. - Agents/model fallback: jump directly to a known later live-session model redirect instead of walking unrelated fallback candidates, while preserving the already-landed live-session/fallback loop guard. Fixes #57471; related loop family already closed via #58496. Thanks @yuxiaoyang2007-prog. - Gateway/Bonjour: keep @homebridge/ciao cancellation handlers registered across advertiser restarts so late probing cancellations cannot crash Linux and other mDNS-churned gateways. Thanks @codex. diff --git a/extensions/ollama/index.test.ts b/extensions/ollama/index.test.ts index 9253ff70b21..dacf16969b7 100644 --- a/extensions/ollama/index.test.ts +++ b/extensions/ollama/index.test.ts @@ -528,6 +528,32 @@ describe("ollama plugin", () => { expect((payloadSeen?.options as Record | undefined)?.think).toBeUndefined(); }); + it("keeps native Ollama thinking off by default while exposing an opt-in toggle", () => { + const provider = registerProvider(); + + expect( + provider.resolveThinkingProfile?.({ + provider: "ollama", + modelId: "llama3.2:latest", + reasoning: false, + }), + ).toEqual({ + levels: [{ id: "off" }], + defaultLevel: "off", + }); + + expect( + provider.resolveThinkingProfile?.({ + provider: "ollama", + modelId: "gemma4:31b", + reasoning: true, + }), + ).toEqual({ + levels: [{ id: "off" }, { id: "low", label: "on" }], + defaultLevel: "off", + }); + }); + it("wraps native Ollama payloads with top-level think=true when thinking is enabled", () => { const { baseStreamFn, payloadSeen } = captureWrappedOllamaPayload("low"); expect(baseStreamFn).toHaveBeenCalledTimes(1); diff --git a/extensions/ollama/index.ts b/extensions/ollama/index.ts index 7d12d4bebf5..c4fad4bfdd7 100644 --- a/extensions/ollama/index.ts +++ b/extensions/ollama/index.ts @@ -166,6 +166,10 @@ export default definePluginEntry({ contributeResolvedModelCompat: ({ model }) => usesOllamaOpenAICompatTransport(model) ? { supportsUsageInStreaming: true } : undefined, resolveReasoningOutputMode: () => "native", + resolveThinkingProfile: ({ reasoning }) => ({ + levels: reasoning === true ? [{ id: "off" }, { id: "low", label: "on" }] : [{ id: "off" }], + defaultLevel: "off", + }), wrapStreamFn: createConfiguredOllamaCompatStreamWrapper, createEmbeddingProvider: async ({ config, model, remote }) => { const { provider, client } = await createOllamaEmbeddingProvider({ diff --git a/extensions/ollama/src/provider-models.test.ts b/extensions/ollama/src/provider-models.test.ts index ed6ce868a01..76f85fbf34f 100644 --- a/extensions/ollama/src/provider-models.test.ts +++ b/extensions/ollama/src/provider-models.test.ts @@ -203,13 +203,26 @@ describe("ollama provider models", () => { "vision", "completion", "tools", + "thinking", ]); expect(visionModel.input).toEqual(["text", "image"]); + expect(visionModel.reasoning).toBe(true); + expect(visionModel.compat?.supportsTools).toBe(true); const textModel = buildOllamaModelDefinition("glm-5.1:cloud", 202752, ["completion", "tools"]); expect(textModel.input).toEqual(["text"]); + expect(textModel.reasoning).toBe(false); + expect(textModel.compat?.supportsTools).toBe(true); const noCapabilities = buildOllamaModelDefinition("unknown-model", 65536); expect(noCapabilities.input).toEqual(["text"]); + expect(noCapabilities.compat).toBeUndefined(); + }); + + it("disables tool support when Ollama capabilities omit tools", () => { + const model = buildOllamaModelDefinition("embeddinggemma:latest", 2048, ["embedding"]); + + expect(model.reasoning).toBe(false); + expect(model.compat?.supportsTools).toBe(false); }); }); diff --git a/extensions/ollama/src/provider-models.ts b/extensions/ollama/src/provider-models.ts index 82ccd88fde5..f3c891fae94 100644 --- a/extensions/ollama/src/provider-models.ts +++ b/extensions/ollama/src/provider-models.ts @@ -218,14 +218,25 @@ export function buildOllamaModelDefinition( ): ModelDefinitionConfig { const hasVision = capabilities?.includes("vision") ?? false; const input: ("text" | "image")[] = hasVision ? ["text", "image"] : ["text"]; + const reasoning = + capabilities === undefined + ? isReasoningModelHeuristic(modelId) + : capabilities.includes("thinking"); + const compat = + capabilities === undefined + ? undefined + : { + supportsTools: capabilities.includes("tools"), + }; return { id: modelId, name: modelId, - reasoning: isReasoningModelHeuristic(modelId), + reasoning, input, cost: OLLAMA_DEFAULT_COST, contextWindow: contextWindow ?? OLLAMA_DEFAULT_CONTEXT_WINDOW, maxTokens: OLLAMA_DEFAULT_MAX_TOKENS, + ...(compat ? { compat } : {}), }; }