fix: accept singular tool_call finish reason

This commit is contained in:
Peter Steinberger
2026-04-25 03:59:55 +01:00
parent 8a14328c69
commit 5569d6d9d3
3 changed files with 89 additions and 0 deletions

View File

@@ -76,6 +76,7 @@ Docs: https://docs.openclaw.ai
- Providers/OpenAI: separate API-key and Codex sign-in onboarding groups, and avoid replaying stale OpenAI Responses reasoning blocks after a model route switch.
- Providers/OpenAI-compatible: forward `prompt_cache_key` on Completions requests only for providers that opt in with `compat.supportsPromptCacheKey`, keeping default proxy payloads unchanged. Fixes #69272.
- Providers/OpenAI-compatible: skip null or non-object streaming chunks from custom providers instead of failing the turn after partial output. Fixes #51112.
- Providers/OpenAI-compatible: treat singular MLX-style `finish_reason: "tool_call"` as tool use instead of a provider error. Fixes #61499.
- Providers/ElevenLabs: omit the MP3-only `Accept` header for PCM telephony synthesis, so Voice Call requests for `pcm_22050` no longer receive MP3 audio. Fixes #67340. Thanks @marcchabot.
- Plugins/Voice Call: reap stale pre-answer calls by default, honor configured TTS timeouts for Twilio media-stream playback, and fail empty telephony audio instead of completing as silence. Fixes #42071; supersedes #60957. Thanks @Ryce and @sliekens.
- Plugins/Voice Call: terminate expired restored call sessions with the provider and restart restored max-duration timers with only the remaining duration, preventing stale outbound retry loops after Gateway restarts. Fixes #48739. Thanks @mira-solari.

View File

@@ -2611,6 +2611,93 @@ describe("openai transport stream", () => {
]);
});
it("treats singular tool_call finish_reason as tool use", async () => {
const model = {
id: "minimax-m2.5-8bit",
name: "MiniMax M2.5 8bit",
api: "openai-completions",
provider: "mlx-lm",
baseUrl: "http://localhost:1234/v1",
reasoning: false,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 128000,
maxTokens: 8192,
} satisfies Model<"openai-completions">;
const output = {
role: "assistant" as const,
content: [],
api: model.api,
provider: model.provider,
model: model.id,
usage: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
totalTokens: 0,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
},
stopReason: "stop",
timestamp: Date.now(),
};
const stream: { push(event: unknown): void } = { push() {} };
const mockChunks = [
{
id: "chatcmpl-mlx",
object: "chat.completion.chunk" as const,
created: 1775425651,
model: model.id,
choices: [
{
index: 0,
delta: {
tool_calls: [
{
id: "call_1",
type: "function" as const,
function: { name: "lookup", arguments: "{}" },
},
],
},
logprobs: null,
finish_reason: null,
},
],
},
{
id: "chatcmpl-mlx",
object: "chat.completion.chunk" as const,
created: 1775425651,
model: model.id,
choices: [
{
index: 0,
delta: {},
logprobs: null,
finish_reason: "tool_call",
},
],
},
] as const;
async function* mockStream() {
for (const chunk of mockChunks) {
yield chunk as never;
}
}
await __testing.processOpenAICompletionsStream(mockStream(), output, model, stream);
expect(output.stopReason).toBe("toolUse");
expect(output.content).toContainEqual(
expect.objectContaining({ type: "toolCall", id: "call_1", name: "lookup" }),
);
});
it("keeps streamed tool call arguments intact when reasoning_details repeats", async () => {
const model = {
id: "openrouter/qwen/qwen3-235b-a22b",

View File

@@ -1812,6 +1812,7 @@ function mapStopReason(reason: string | null) {
case "length":
return { stopReason: "length" };
case "function_call":
case "tool_call":
case "tool_calls":
return { stopReason: "toolUse" };
case "content_filter":