fix: expose ollama thinking profile before activation

Fixes openclaw/openclaw#77612
This commit is contained in:
yfge
2026-05-05 09:14:49 +08:00
committed by Ayaan Zaidi
parent b8f9137d31
commit 7a9efc1389
4 changed files with 34 additions and 8 deletions

View File

@@ -68,6 +68,7 @@ Docs: https://docs.openclaw.ai
### Fixes
- Ollama/thinking: expose the lightweight Ollama provider thinking profile through the public provider-policy artifact too, so reasoning-capable Ollama models such as `ollama/deepseek-v4-pro:cloud` keep `/think max` available even before the full plugin runtime activates. Fixes #77612. Thanks @rriggs.
- CLI/sessions: prune old unreferenced transcript, compaction checkpoint, and trajectory artifacts during normal `sessions cleanup`, so gateway restart or crash orphans do not accumulate indefinitely outside `sessions.json`. Fixes #77608. Thanks @slideshow-dingo.
- Video generation: wait up to 20 minutes for slow fal/MiniMax queue-backed jobs, stop forwarding unsupported Google Veo generated-audio options, and normalize MiniMax `720P` requests to its supported `768P` resolution with the usual override warning/details instead of failing fallback.
- Update/restart: probe managed Gateway restarts with the service environment and add a Docker product lane that exercises candidate-owned `openclaw update --yes --json` restarts, so SecretRef-backed local gateway auth cannot regress behind mocked restart checks. Thanks @vincentkoc.

View File

@@ -27,6 +27,7 @@ import {
promptAndConfigureOllama,
queryOllamaModelShowInfo,
} from "./api.js";
import { resolveThinkingProfile as resolveOllamaThinkingProfile } from "./provider-policy-api.js";
import {
OLLAMA_DEFAULT_API_KEY,
OLLAMA_PROVIDER_ID,
@@ -249,13 +250,7 @@ export default definePluginEntry({
contributeResolvedModelCompat: ({ model }) =>
usesOllamaOpenAICompatTransport(model) ? { supportsUsageInStreaming: true } : undefined,
resolveReasoningOutputMode: () => "native",
resolveThinkingProfile: ({ reasoning }) => ({
levels:
reasoning === true
? [{ id: "off" }, { id: "low" }, { id: "medium" }, { id: "high" }, { id: "max" }]
: [{ id: "off" }],
defaultLevel: "off",
}),
resolveThinkingProfile: resolveOllamaThinkingProfile,
wrapStreamFn: createConfiguredOllamaCompatStreamWrapper,
createEmbeddingProvider: async ({ config, model, provider: embeddingProvider, remote }) => {
const { provider, client } = await createOllamaEmbeddingProvider({

View File

@@ -1,6 +1,6 @@
import type { ModelDefinitionConfig } from "openclaw/plugin-sdk/provider-model-types";
import { describe, expect, it } from "vitest";
import { normalizeConfig } from "./provider-policy-api.js";
import { normalizeConfig, resolveThinkingProfile } from "./provider-policy-api.js";
import { OLLAMA_DEFAULT_BASE_URL } from "./src/defaults.js";
function createModel(id: string, name: string): ModelDefinitionConfig {
@@ -58,4 +58,15 @@ describe("ollama provider policy public artifact", () => {
}),
).toEqual({});
});
it("exposes max thinking for reasoning-capable models without full plugin activation", () => {
expect(resolveThinkingProfile({ reasoning: true })).toEqual({
levels: [{ id: "off" }, { id: "low" }, { id: "medium" }, { id: "high" }, { id: "max" }],
defaultLevel: "off",
});
expect(resolveThinkingProfile({ reasoning: false })).toEqual({
levels: [{ id: "off" }],
defaultLevel: "off",
});
});
});

View File

@@ -1,8 +1,19 @@
import type { ProviderThinkingProfile } from "openclaw/plugin-sdk/plugin-entry";
import type { ModelProviderConfig } from "openclaw/plugin-sdk/provider-model-types";
import { OLLAMA_DEFAULT_BASE_URL } from "./src/defaults.js";
type OllamaProviderConfigDraft = Partial<ModelProviderConfig>;
const OLLAMA_REASONING_THINKING_PROFILE = {
levels: [{ id: "off" }, { id: "low" }, { id: "medium" }, { id: "high" }, { id: "max" }],
defaultLevel: "off",
} satisfies ProviderThinkingProfile;
const OLLAMA_NON_REASONING_THINKING_PROFILE = {
levels: [{ id: "off" }],
defaultLevel: "off",
} satisfies ProviderThinkingProfile;
/**
* Provider policy surface for Ollama: normalize provider configs used by
* core defaults/normalizers. This runs during config defaults application and
@@ -38,3 +49,11 @@ export function normalizeConfig({
return next;
}
export function resolveThinkingProfile({
reasoning,
}: {
reasoning?: boolean;
}): ProviderThinkingProfile {
return reasoning ? OLLAMA_REASONING_THINKING_PROFILE : OLLAMA_NON_REASONING_THINKING_PROFILE;
}