fix(gateway): flush initial openai chat stream chunk

This commit is contained in:
Peter Steinberger
2026-05-05 11:03:35 +01:00
parent b31774749c
commit d520bc4cb6
3 changed files with 83 additions and 0 deletions

View File

@@ -69,6 +69,7 @@ Docs: https://docs.openclaw.ai
### Fixes
- Gateway/OpenAI-compatible: send the assistant role SSE chunk as soon as streaming chat-completion headers are accepted, so cold agent setup cannot leave `/v1/chat/completions` clients with a bodyless 200 response until their idle timeout fires.
- TUI/sessions: bound the session picker to recent rows and use exact lookup-style refreshes for the active session, so dusty stores no longer make TUI hydrate weeks-old transcripts before becoming responsive. Thanks @vincentkoc.
- Doctor/gateway: report recent supervisor restart handoffs in `openclaw doctor --deep`, using the installed service environment when available so service-managed clean exits are visible in guided diagnostics. Thanks @shakkernerd.
- Gateway/status: show recent supervisor restart handoffs in `openclaw gateway status --deep`, including JSON details, so clean service-managed restarts are reported as restart handoffs instead of opaque stopped-service diagnostics. Thanks @shakkernerd.

View File

@@ -1028,6 +1028,85 @@ describe("OpenAI-compatible HTTP API (e2e)", () => {
}
});
it(
"sends an initial SSE chunk before a streaming agent run settles",
{ timeout: 15_000 },
async () => {
const port = enabledPort;
let serverAbortSignal: AbortSignal | undefined;
agentCommand.mockClear();
agentCommand.mockImplementationOnce(
(opts: unknown) =>
new Promise<undefined>((resolve) => {
const signal = (opts as { abortSignal?: AbortSignal } | undefined)?.abortSignal;
serverAbortSignal = signal;
if (signal?.aborted) {
resolve(undefined);
return;
}
signal?.addEventListener("abort", () => resolve(undefined), { once: true });
}),
);
let settled = false;
const firstChunk = new Promise<string>((resolve, reject) => {
const clientReq = http.request(
{
hostname: "127.0.0.1",
port,
path: "/v1/chat/completions",
method: "POST",
headers: {
"content-type": "application/json",
authorization: "Bearer secret",
},
},
(res) => {
expect(res.statusCode).toBe(200);
expect(res.headers["content-type"] ?? "").toContain("text/event-stream");
res.setEncoding("utf8");
res.once("data", (chunk) => {
settled = true;
resolve(String(chunk));
clientReq.destroy();
});
},
);
clientReq.on("error", (err) => {
if (!settled) {
reject(err);
}
});
clientReq.setTimeout(2_000, () => {
if (!settled) {
settled = true;
clientReq.destroy(new Error("timed out waiting for first SSE chunk"));
}
});
clientReq.end(
JSON.stringify({
stream: true,
model: "openclaw",
messages: [{ role: "user", content: "hi" }],
}),
);
});
await expect(firstChunk).resolves.toContain('"role":"assistant"');
await vi.waitFor(() => {
expect(agentCommand).toHaveBeenCalledTimes(1);
});
await vi.waitFor(
() => {
expect(serverAbortSignal?.aborted).toBe(true);
},
{ timeout: 5_000, interval: 50 },
);
},
);
it("includes usage in final stream chunk when stream_options.include_usage=true", async () => {
const port = enabledPort;
agentCommand.mockClear();

View File

@@ -732,6 +732,9 @@ export async function handleOpenAiHttpRequest(
unsubscribe();
});
wroteRole = true;
writeAssistantRoleChunk(res, { runId, model });
void (async () => {
try {
const result = await agentCommandFromIngress(commandInput, defaultRuntime, deps);