mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-06 16:01:01 +00:00
fix(ollama): honor native model capabilities
This commit is contained in:
@@ -528,6 +528,32 @@ describe("ollama plugin", () => {
|
||||
expect((payloadSeen?.options as Record<string, unknown> | undefined)?.think).toBeUndefined();
|
||||
});
|
||||
|
||||
it("keeps native Ollama thinking off by default while exposing an opt-in toggle", () => {
|
||||
const provider = registerProvider();
|
||||
|
||||
expect(
|
||||
provider.resolveThinkingProfile?.({
|
||||
provider: "ollama",
|
||||
modelId: "llama3.2:latest",
|
||||
reasoning: false,
|
||||
}),
|
||||
).toEqual({
|
||||
levels: [{ id: "off" }],
|
||||
defaultLevel: "off",
|
||||
});
|
||||
|
||||
expect(
|
||||
provider.resolveThinkingProfile?.({
|
||||
provider: "ollama",
|
||||
modelId: "gemma4:31b",
|
||||
reasoning: true,
|
||||
}),
|
||||
).toEqual({
|
||||
levels: [{ id: "off" }, { id: "low", label: "on" }],
|
||||
defaultLevel: "off",
|
||||
});
|
||||
});
|
||||
|
||||
it("wraps native Ollama payloads with top-level think=true when thinking is enabled", () => {
|
||||
const { baseStreamFn, payloadSeen } = captureWrappedOllamaPayload("low");
|
||||
expect(baseStreamFn).toHaveBeenCalledTimes(1);
|
||||
|
||||
@@ -166,6 +166,10 @@ export default definePluginEntry({
|
||||
contributeResolvedModelCompat: ({ model }) =>
|
||||
usesOllamaOpenAICompatTransport(model) ? { supportsUsageInStreaming: true } : undefined,
|
||||
resolveReasoningOutputMode: () => "native",
|
||||
resolveThinkingProfile: ({ reasoning }) => ({
|
||||
levels: reasoning === true ? [{ id: "off" }, { id: "low", label: "on" }] : [{ id: "off" }],
|
||||
defaultLevel: "off",
|
||||
}),
|
||||
wrapStreamFn: createConfiguredOllamaCompatStreamWrapper,
|
||||
createEmbeddingProvider: async ({ config, model, remote }) => {
|
||||
const { provider, client } = await createOllamaEmbeddingProvider({
|
||||
|
||||
@@ -203,13 +203,26 @@ describe("ollama provider models", () => {
|
||||
"vision",
|
||||
"completion",
|
||||
"tools",
|
||||
"thinking",
|
||||
]);
|
||||
expect(visionModel.input).toEqual(["text", "image"]);
|
||||
expect(visionModel.reasoning).toBe(true);
|
||||
expect(visionModel.compat?.supportsTools).toBe(true);
|
||||
|
||||
const textModel = buildOllamaModelDefinition("glm-5.1:cloud", 202752, ["completion", "tools"]);
|
||||
expect(textModel.input).toEqual(["text"]);
|
||||
expect(textModel.reasoning).toBe(false);
|
||||
expect(textModel.compat?.supportsTools).toBe(true);
|
||||
|
||||
const noCapabilities = buildOllamaModelDefinition("unknown-model", 65536);
|
||||
expect(noCapabilities.input).toEqual(["text"]);
|
||||
expect(noCapabilities.compat).toBeUndefined();
|
||||
});
|
||||
|
||||
it("disables tool support when Ollama capabilities omit tools", () => {
|
||||
const model = buildOllamaModelDefinition("embeddinggemma:latest", 2048, ["embedding"]);
|
||||
|
||||
expect(model.reasoning).toBe(false);
|
||||
expect(model.compat?.supportsTools).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -218,14 +218,25 @@ export function buildOllamaModelDefinition(
|
||||
): ModelDefinitionConfig {
|
||||
const hasVision = capabilities?.includes("vision") ?? false;
|
||||
const input: ("text" | "image")[] = hasVision ? ["text", "image"] : ["text"];
|
||||
const reasoning =
|
||||
capabilities === undefined
|
||||
? isReasoningModelHeuristic(modelId)
|
||||
: capabilities.includes("thinking");
|
||||
const compat =
|
||||
capabilities === undefined
|
||||
? undefined
|
||||
: {
|
||||
supportsTools: capabilities.includes("tools"),
|
||||
};
|
||||
return {
|
||||
id: modelId,
|
||||
name: modelId,
|
||||
reasoning: isReasoningModelHeuristic(modelId),
|
||||
reasoning,
|
||||
input,
|
||||
cost: OLLAMA_DEFAULT_COST,
|
||||
contextWindow: contextWindow ?? OLLAMA_DEFAULT_CONTEXT_WINDOW,
|
||||
maxTokens: OLLAMA_DEFAULT_MAX_TOKENS,
|
||||
...(compat ? { compat } : {}),
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user