mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-06 08:00:42 +00:00
fix(ollama): honor native model capabilities
This commit is contained in:
@@ -8,6 +8,7 @@ Docs: https://docs.openclaw.ai
|
||||
|
||||
- Logging: write validated diagnostic trace context as top-level `traceId`, `spanId`, `parentSpanId`, and `traceFlags` fields in file-log JSONL records so traced requests and model calls are easier to correlate in log processors. Refs #40353. Thanks @liangruochong44-ui.
|
||||
- Logging/sessions: apply configured redaction patterns to persisted session transcript text and accept escaped character classes in safe custom redaction regexes, so transcript JSONL no longer keeps matching sensitive text in the clear. Fixes #42982. Thanks @panpan0000.
|
||||
- Providers/Ollama: honor `/api/show` capabilities when registering local models so non-tool Ollama models no longer receive the agent tool surface, and keep native Ollama thinking opt-in instead of enabling it by default. Fixes #64710 and duplicate #65343. Thanks @yuan-b, @netherby, @xilopaint, and @Diyforfun2026.
|
||||
- Auto-reply: poison inbound message dedupe after replay-unsafe provider/runtime failures so retries stay safe before visible progress but cannot duplicate messages after block output, tool side effects, or session progress. Fixes #69303; keeps #58549 and #64606 as duplicate validation. Thanks @martingarramon, @NikolaFC, and @zeroth-blip.
|
||||
- Agents/model fallback: jump directly to a known later live-session model redirect instead of walking unrelated fallback candidates, while preserving the already-landed live-session/fallback loop guard. Fixes #57471; related loop family already closed via #58496. Thanks @yuxiaoyang2007-prog.
|
||||
- Gateway/Bonjour: keep @homebridge/ciao cancellation handlers registered across advertiser restarts so late probing cancellations cannot crash Linux and other mDNS-churned gateways. Thanks @codex.
|
||||
|
||||
@@ -528,6 +528,32 @@ describe("ollama plugin", () => {
|
||||
expect((payloadSeen?.options as Record<string, unknown> | undefined)?.think).toBeUndefined();
|
||||
});
|
||||
|
||||
it("keeps native Ollama thinking off by default while exposing an opt-in toggle", () => {
|
||||
const provider = registerProvider();
|
||||
|
||||
expect(
|
||||
provider.resolveThinkingProfile?.({
|
||||
provider: "ollama",
|
||||
modelId: "llama3.2:latest",
|
||||
reasoning: false,
|
||||
}),
|
||||
).toEqual({
|
||||
levels: [{ id: "off" }],
|
||||
defaultLevel: "off",
|
||||
});
|
||||
|
||||
expect(
|
||||
provider.resolveThinkingProfile?.({
|
||||
provider: "ollama",
|
||||
modelId: "gemma4:31b",
|
||||
reasoning: true,
|
||||
}),
|
||||
).toEqual({
|
||||
levels: [{ id: "off" }, { id: "low", label: "on" }],
|
||||
defaultLevel: "off",
|
||||
});
|
||||
});
|
||||
|
||||
it("wraps native Ollama payloads with top-level think=true when thinking is enabled", () => {
|
||||
const { baseStreamFn, payloadSeen } = captureWrappedOllamaPayload("low");
|
||||
expect(baseStreamFn).toHaveBeenCalledTimes(1);
|
||||
|
||||
@@ -166,6 +166,10 @@ export default definePluginEntry({
|
||||
contributeResolvedModelCompat: ({ model }) =>
|
||||
usesOllamaOpenAICompatTransport(model) ? { supportsUsageInStreaming: true } : undefined,
|
||||
resolveReasoningOutputMode: () => "native",
|
||||
resolveThinkingProfile: ({ reasoning }) => ({
|
||||
levels: reasoning === true ? [{ id: "off" }, { id: "low", label: "on" }] : [{ id: "off" }],
|
||||
defaultLevel: "off",
|
||||
}),
|
||||
wrapStreamFn: createConfiguredOllamaCompatStreamWrapper,
|
||||
createEmbeddingProvider: async ({ config, model, remote }) => {
|
||||
const { provider, client } = await createOllamaEmbeddingProvider({
|
||||
|
||||
@@ -203,13 +203,26 @@ describe("ollama provider models", () => {
|
||||
"vision",
|
||||
"completion",
|
||||
"tools",
|
||||
"thinking",
|
||||
]);
|
||||
expect(visionModel.input).toEqual(["text", "image"]);
|
||||
expect(visionModel.reasoning).toBe(true);
|
||||
expect(visionModel.compat?.supportsTools).toBe(true);
|
||||
|
||||
const textModel = buildOllamaModelDefinition("glm-5.1:cloud", 202752, ["completion", "tools"]);
|
||||
expect(textModel.input).toEqual(["text"]);
|
||||
expect(textModel.reasoning).toBe(false);
|
||||
expect(textModel.compat?.supportsTools).toBe(true);
|
||||
|
||||
const noCapabilities = buildOllamaModelDefinition("unknown-model", 65536);
|
||||
expect(noCapabilities.input).toEqual(["text"]);
|
||||
expect(noCapabilities.compat).toBeUndefined();
|
||||
});
|
||||
|
||||
it("disables tool support when Ollama capabilities omit tools", () => {
|
||||
const model = buildOllamaModelDefinition("embeddinggemma:latest", 2048, ["embedding"]);
|
||||
|
||||
expect(model.reasoning).toBe(false);
|
||||
expect(model.compat?.supportsTools).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -218,14 +218,25 @@ export function buildOllamaModelDefinition(
|
||||
): ModelDefinitionConfig {
|
||||
const hasVision = capabilities?.includes("vision") ?? false;
|
||||
const input: ("text" | "image")[] = hasVision ? ["text", "image"] : ["text"];
|
||||
const reasoning =
|
||||
capabilities === undefined
|
||||
? isReasoningModelHeuristic(modelId)
|
||||
: capabilities.includes("thinking");
|
||||
const compat =
|
||||
capabilities === undefined
|
||||
? undefined
|
||||
: {
|
||||
supportsTools: capabilities.includes("tools"),
|
||||
};
|
||||
return {
|
||||
id: modelId,
|
||||
name: modelId,
|
||||
reasoning: isReasoningModelHeuristic(modelId),
|
||||
reasoning,
|
||||
input,
|
||||
cost: OLLAMA_DEFAULT_COST,
|
||||
contextWindow: contextWindow ?? OLLAMA_DEFAULT_CONTEXT_WINDOW,
|
||||
maxTokens: OLLAMA_DEFAULT_MAX_TOKENS,
|
||||
...(compat ? { compat } : {}),
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user