fix(ollama): normalize prefixed tool calls

This commit is contained in:
Peter Steinberger
2026-04-29 19:27:49 +01:00
parent f5aebe42e1
commit 89f871679e
3 changed files with 106 additions and 3 deletions

View File

@@ -23,6 +23,7 @@ Docs: https://docs.openclaw.ai
### Fixes
- Ollama: normalize provider-prefixed tool-call names at the native stream boundary so Kimi/Ollama calls such as `functions.exec` dispatch as `exec` instead of missing configured tools. Fixes #74487. Thanks @afurm and @carreipeia.
- Security/audit: resolve configured model aliases before model-tier and small-parameter checks, so alias-based GPT-5/Codex configs no longer report false weak-model warnings. Fixes #74455. Thanks @blaspat.
- Models/UI: hide unauthenticated providers from the default Web chat, `/models`, and model setup pickers while keeping explicit full-catalog browse paths through `view: "all"`, `/models <provider> all`, and `models list --all`. Fixes #74423. Thanks @guarismo and @SymbolStar.
- Slack/prompts: rely on Slack `interactiveReplies` guidance instead of generic `inlineButtons` config hints so enabled Slack button directives are not contradicted. Fixes #46647. Thanks @jeremykoerber.

View File

@@ -560,6 +560,42 @@ describe("convertToOllamaMessages", () => {
]);
});
it("normalizes provider-prefixed tool-call names before Ollama replay", () => {
const messages = [
{
role: "assistant",
content: [
{ type: "toolCall", id: "call_1", name: "functions.exec", arguments: { command: "pwd" } },
{ type: "tool_use", id: "call_2", name: "tools/read", input: { path: "README.md" } },
],
},
];
const result = convertToOllamaMessages(messages);
expect(result[0].tool_calls).toEqual([
{ function: { name: "exec", arguments: { command: "pwd" } } },
{ function: { name: "read", arguments: { path: "README.md" } } },
]);
});
it("keeps non-prefixed Ollama replay tool names intact", () => {
const messages = [
{
role: "assistant",
content: [
{ type: "toolCall", id: "call_1", name: "functionshell", arguments: {} },
{ type: "toolCall", id: "call_2", name: "tooling", arguments: {} },
{ type: "toolCall", id: "call_3", name: "tools", arguments: {} },
],
},
];
const result = convertToOllamaMessages(messages);
expect(result[0].tool_calls).toEqual([
{ function: { name: "functionshell", arguments: {} } },
{ function: { name: "tooling", arguments: {} } },
{ function: { name: "tools", arguments: {} } },
]);
});
it("deserializes string arguments back to objects for Ollama (round-trip fix)", () => {
// When tool calls round-trip through OpenAI-format storage, arguments
// are serialized as a JSON string. Ollama expects an object.
@@ -764,6 +800,54 @@ describe("buildAssistantMessage", () => {
expect(toolCall.id).toMatch(/^ollama_call_[0-9a-f-]{36}$/);
});
it("normalizes provider-prefixed tool-call names in Ollama responses", () => {
const response = {
model: "qwen3:32b",
created_at: "2026-01-01T00:00:00Z",
message: {
role: "assistant" as const,
content: "",
tool_calls: [
{ function: { name: "functions.exec", arguments: { command: "pwd" } } },
{ function: { name: "tools/read", arguments: { path: "README.md" } } },
],
},
done: true,
};
const result = buildAssistantMessage(response, modelInfo);
expect(result.content).toEqual([
expect.objectContaining({ type: "toolCall", name: "exec", arguments: { command: "pwd" } }),
expect.objectContaining({
type: "toolCall",
name: "read",
arguments: { path: "README.md" },
}),
]);
});
it("keeps non-prefixed Ollama response tool names intact", () => {
const response = {
model: "qwen3:32b",
created_at: "2026-01-01T00:00:00Z",
message: {
role: "assistant" as const,
content: "",
tool_calls: [
{ function: { name: "functionshell", arguments: {} } },
{ function: { name: "tooling", arguments: {} } },
{ function: { name: "tools", arguments: {} } },
],
},
done: true,
};
const result = buildAssistantMessage(response, modelInfo);
expect(result.content).toEqual([
expect.objectContaining({ type: "toolCall", name: "functionshell", arguments: {} }),
expect.objectContaining({ type: "toolCall", name: "tooling", arguments: {} }),
expect.objectContaining({ type: "toolCall", name: "tools", arguments: {} }),
]);
});
it("parses stringified tool call arguments from Ollama responses", () => {
const response = {
model: "qwen3:32b",

View File

@@ -766,14 +766,32 @@ function extractToolCalls(content: unknown): OllamaToolCall[] {
const result: OllamaToolCall[] = [];
for (const part of parts) {
if (part.type === "toolCall") {
result.push({ function: { name: part.name, arguments: ensureArgsObject(part.arguments) } });
result.push({
function: {
name: normalizeOllamaToolCallName(part.name),
arguments: ensureArgsObject(part.arguments),
},
});
} else if (part.type === "tool_use") {
result.push({ function: { name: part.name, arguments: ensureArgsObject(part.input) } });
result.push({
function: {
name: normalizeOllamaToolCallName(part.name),
arguments: ensureArgsObject(part.input),
},
});
}
}
return result;
}
function normalizeOllamaToolCallName(rawName: string): string {
const trimmed = rawName.trim();
if (!trimmed) {
return trimmed;
}
return trimmed.replace(/^(?:functions?|tools?)[./_-]+/iu, "").trim();
}
export function convertToOllamaMessages(
messages: Array<{ role: string; content: unknown }>,
system?: string,
@@ -866,7 +884,7 @@ export function buildAssistantMessage(
content.push({
type: "toolCall",
id: `ollama_call_${randomUUID()}`,
name: toolCall.function.name,
name: normalizeOllamaToolCallName(toolCall.function.name),
arguments: normalizeOllamaToolCallArguments(toolCall.function.arguments),
});
}