fix: skip invalid completions stream chunks

This commit is contained in:
Peter Steinberger
2026-04-25 03:55:12 +01:00
parent 5381625f45
commit 9fbfedf12a
3 changed files with 63 additions and 1 deletions

View File

@@ -74,6 +74,7 @@ Docs: https://docs.openclaw.ai
- Config/doctor: reject legacy `secretref-env:<ENV_VAR>` marker strings on SecretRef credential paths and migrate valid markers to structured env SecretRefs with `openclaw doctor --fix`. Fixes #51794. Thanks @halointellicore.
- Providers/OpenAI: separate API-key and Codex sign-in onboarding groups, and avoid replaying stale OpenAI Responses reasoning blocks after a model route switch.
- Providers/OpenAI-compatible: forward `prompt_cache_key` on Completions requests only for providers that opt in with `compat.supportsPromptCacheKey`, keeping default proxy payloads unchanged. Fixes #69272.
- Providers/OpenAI-compatible: skip null or non-object streaming chunks from custom providers instead of failing the turn after partial output. Fixes #51112.
- Providers/ElevenLabs: omit the MP3-only `Accept` header for PCM telephony synthesis, so Voice Call requests for `pcm_22050` no longer receive MP3 audio. Fixes #67340. Thanks @marcchabot.
- Plugins/Voice Call: reap stale pre-answer calls by default, honor configured TTS timeouts for Twilio media-stream playback, and fail empty telephony audio instead of completing as silence. Fixes #42071; supersedes #60957. Thanks @Ryce and @sliekens.
- Plugins/Voice Call: terminate expired restored call sessions with the provider and restart restored max-duration timers with only the remaining duration, preventing stale outbound retry loops after Gateway restarts. Fixes #48739. Thanks @mira-solari.

View File

@@ -466,6 +466,63 @@ describe("openai transport stream", () => {
});
});
it("skips null and non-object OpenAI-compatible stream chunks", async () => {
const model = {
id: "glm-5",
name: "GLM-5",
api: "openai-completions",
provider: "vllm",
baseUrl: "http://localhost:8000/v1",
reasoning: false,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 128000,
maxTokens: 4096,
} satisfies Model<"openai-completions">;
const output = {
role: "assistant" as const,
content: [],
api: model.api,
provider: model.provider,
model: model.id,
usage: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
totalTokens: 0,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
},
stopReason: "stop",
timestamp: Date.now(),
};
const stream: { push(event: unknown): void } = { push() {} };
async function* mockStream() {
yield null as never;
yield "not-a-chunk" as never;
yield {
id: "chatcmpl-vllm",
object: "chat.completion.chunk" as const,
created: 1775425651,
model: "glm-5",
choices: [
{
index: 0,
delta: { role: "assistant" as const, content: "ok" },
logprobs: null,
finish_reason: "stop" as const,
},
],
};
}
await __testing.processOpenAICompletionsStream(mockStream(), output, model, stream);
expect(output.content).toContainEqual({ type: "text", text: "ok" });
expect(output.stopReason).toBe("stop");
});
it("keeps OpenRouter thinking format for declared OpenRouter providers on custom proxy URLs", async () => {
const streamFn = buildTransportAwareSimpleStreamFn(
attachModelProviderRequestTransport(

View File

@@ -1292,7 +1292,11 @@ async function processOpenAICompletionsStream(
flushPendingPostToolCallDeltas();
appendTextDeltaInternal(text);
};
for await (const chunk of responseStream) {
for await (const rawChunk of responseStream as AsyncIterable<unknown>) {
if (!rawChunk || typeof rawChunk !== "object") {
continue;
}
const chunk = rawChunk as ChatCompletionChunk;
output.responseId ||= chunk.id;
if (chunk.usage) {
output.usage = parseTransportChunkUsage(chunk.usage, model);