From 5569d6d9d3abfae0729a0d64eb1ddf98ec2fa724 Mon Sep 17 00:00:00 2001 From: Peter Steinberger Date: Sat, 25 Apr 2026 03:59:55 +0100 Subject: [PATCH] fix: accept singular tool_call finish reason --- CHANGELOG.md | 1 + src/agents/openai-transport-stream.test.ts | 87 ++++++++++++++++++++++ src/agents/openai-transport-stream.ts | 1 + 3 files changed, 89 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 41d3ffa20cf..930b6853bb7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -76,6 +76,7 @@ Docs: https://docs.openclaw.ai - Providers/OpenAI: separate API-key and Codex sign-in onboarding groups, and avoid replaying stale OpenAI Responses reasoning blocks after a model route switch. - Providers/OpenAI-compatible: forward `prompt_cache_key` on Completions requests only for providers that opt in with `compat.supportsPromptCacheKey`, keeping default proxy payloads unchanged. Fixes #69272. - Providers/OpenAI-compatible: skip null or non-object streaming chunks from custom providers instead of failing the turn after partial output. Fixes #51112. +- Providers/OpenAI-compatible: treat singular MLX-style `finish_reason: "tool_call"` as tool use instead of a provider error. Fixes #61499. - Providers/ElevenLabs: omit the MP3-only `Accept` header for PCM telephony synthesis, so Voice Call requests for `pcm_22050` no longer receive MP3 audio. Fixes #67340. Thanks @marcchabot. - Plugins/Voice Call: reap stale pre-answer calls by default, honor configured TTS timeouts for Twilio media-stream playback, and fail empty telephony audio instead of completing as silence. Fixes #42071; supersedes #60957. Thanks @Ryce and @sliekens. - Plugins/Voice Call: terminate expired restored call sessions with the provider and restart restored max-duration timers with only the remaining duration, preventing stale outbound retry loops after Gateway restarts. Fixes #48739. Thanks @mira-solari. diff --git a/src/agents/openai-transport-stream.test.ts b/src/agents/openai-transport-stream.test.ts index 4a081adf8b5..2b067fc9800 100644 --- a/src/agents/openai-transport-stream.test.ts +++ b/src/agents/openai-transport-stream.test.ts @@ -2611,6 +2611,93 @@ describe("openai transport stream", () => { ]); }); + it("treats singular tool_call finish_reason as tool use", async () => { + const model = { + id: "minimax-m2.5-8bit", + name: "MiniMax M2.5 8bit", + api: "openai-completions", + provider: "mlx-lm", + baseUrl: "http://localhost:1234/v1", + reasoning: false, + input: ["text"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 128000, + maxTokens: 8192, + } satisfies Model<"openai-completions">; + + const output = { + role: "assistant" as const, + content: [], + api: model.api, + provider: model.provider, + model: model.id, + usage: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + totalTokens: 0, + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 }, + }, + stopReason: "stop", + timestamp: Date.now(), + }; + + const stream: { push(event: unknown): void } = { push() {} }; + + const mockChunks = [ + { + id: "chatcmpl-mlx", + object: "chat.completion.chunk" as const, + created: 1775425651, + model: model.id, + choices: [ + { + index: 0, + delta: { + tool_calls: [ + { + id: "call_1", + type: "function" as const, + function: { name: "lookup", arguments: "{}" }, + }, + ], + }, + logprobs: null, + finish_reason: null, + }, + ], + }, + { + id: "chatcmpl-mlx", + object: "chat.completion.chunk" as const, + created: 1775425651, + model: model.id, + choices: [ + { + index: 0, + delta: {}, + logprobs: null, + finish_reason: "tool_call", + }, + ], + }, + ] as const; + + async function* mockStream() { + for (const chunk of mockChunks) { + yield chunk as never; + } + } + + await __testing.processOpenAICompletionsStream(mockStream(), output, model, stream); + + expect(output.stopReason).toBe("toolUse"); + expect(output.content).toContainEqual( + expect.objectContaining({ type: "toolCall", id: "call_1", name: "lookup" }), + ); + }); + it("keeps streamed tool call arguments intact when reasoning_details repeats", async () => { const model = { id: "openrouter/qwen/qwen3-235b-a22b", diff --git a/src/agents/openai-transport-stream.ts b/src/agents/openai-transport-stream.ts index 084e0196ab1..6492679476c 100644 --- a/src/agents/openai-transport-stream.ts +++ b/src/agents/openai-transport-stream.ts @@ -1812,6 +1812,7 @@ function mapStopReason(reason: string | null) { case "length": return { stopReason: "length" }; case "function_call": + case "tool_call": case "tool_calls": return { stopReason: "toolUse" }; case "content_filter":