fix(ui): clear webchat pending state only for completed active run (#73368)

This commit is contained in:
Vincent Koc
2026-04-28 01:47:00 -07:00
committed by GitHub
parent 3ed3248d7b
commit 02908db62b
5 changed files with 95 additions and 3 deletions

View File

@@ -24,6 +24,7 @@ Docs: https://docs.openclaw.ai
- CLI/plugins: keep bundled plugin installs out of `plugins.load.paths` while preserving install records, so install/inspect/doctor loops no longer warn about the current bundled plugin directory. Thanks @vincentkoc.
- Control UI/WebChat: keep large attachment payloads out of Lit state and optimistic chat messages, using object URL previews plus send-time payload serialization so PDF/image uploads no longer trigger `RangeError: Maximum call stack size exceeded`. Fixes #73360; refs #54378 and #63432. Thanks @hejunhui-73, @Ansub, and @christianhernandez3-afk.
- Agents/Anthropic: cancel stalled Anthropic Messages SSE body reads when abort signals fire, so active-memory timeouts release transport resources instead of leaving hidden recall runs parked on `reader.read()`. Refs #72965 and #73120. Thanks @wdeveloper16.
- Control UI/WebChat: keep pending run and typing state attached to the active client run, so unowned inject/announce/side-result finals no longer unlock unrelated active runs while completed owned runs still clear promptly. Fixes #57795; carries forward the narrow diagnosis from #57887. Thanks @haoyu-haoyu.
- Agents/models: keep per-agent primary models strict when `fallbacks` is omitted, so probe-only custom providers are not tried as hidden fallback candidates unless the agent explicitly opts in. Fixes #73332. Thanks @haumanto.
- Gateway/models: add `models.pricing.enabled` so offline or restricted-network installs can skip startup OpenRouter and LiteLLM pricing-catalog fetches while keeping explicit model costs working. Fixes #53639. Thanks @callebtc, @palewire, and @rjdjohnston.
- Onboarding: pin interactive and non-interactive health checks to the just-configured setup token/password so stale `OPENCLAW_GATEWAY_TOKEN` or `OPENCLAW_GATEWAY_PASSWORD` values do not produce false gateway-token-mismatch failures after setup. Fixes #72203. Thanks @galiniliev.

View File

@@ -977,6 +977,47 @@ describe("connectGateway", () => {
expect(loadChatHistoryMock).toHaveBeenCalledWith(host);
});
it("keeps deferred session.message reload pending across unowned terminal events", () => {
const { host, client } = connectHostGateway();
host.chatRunId = "main-run-unowned";
host.chatStream = "still streaming";
loadChatHistoryMock.mockClear();
client.emitEvent({
event: "session.message",
payload: {
sessionKey: "main",
},
});
client.emitEvent({
event: "chat",
payload: {
sessionKey: "main",
state: "final",
},
});
expect(loadChatHistoryMock).not.toHaveBeenCalled();
expect(host.chatRunId).toBe("main-run-unowned");
expect(host.chatStream).toBe("still streaming");
client.emitEvent({
event: "chat",
payload: {
runId: "main-run-unowned",
sessionKey: "main",
state: "final",
message: {
role: "assistant",
content: [{ type: "text", text: "Done" }],
},
},
});
expect(host.chatRunId).toBeNull();
expect(loadChatHistoryMock).not.toHaveBeenCalled();
});
it("clears tracked BTW terminal runs after reconnect hello", () => {
const host = createHost();

View File

@@ -585,7 +585,7 @@ function isEventForDifferentActiveRun(
payload: ChatEventPayload | undefined,
activeRunId: string | null,
): boolean {
return Boolean(activeRunId && payload?.runId && payload.runId !== activeRunId);
return Boolean(activeRunId && payload && payload.runId !== activeRunId);
}
function handleChatGatewayEvent(host: GatewayHost, payload: ChatEventPayload | undefined) {

View File

@@ -201,6 +201,55 @@ describe("handleChatEvent", () => {
expect(state.chatMessages).toEqual([]);
});
it("keeps active stream for unowned final payloads", () => {
const state = createActiveStreamingState();
const payload: ChatEventPayload = {
sessionKey: "main",
state: "final",
};
expect(handleChatEvent(state, payload)).toBe("final");
expect(state.chatRunId).toBe("run-user");
expect(state.chatStream).toBe("Working...");
expect(state.chatStreamStartedAt).toBe(123);
expect(state.chatMessages).toEqual([]);
});
it("keeps active stream while appending unowned assistant finals", () => {
const state = createActiveStreamingState();
const payload: ChatEventPayload = {
sessionKey: "main",
state: "final",
message: {
role: "assistant",
content: [{ type: "text", text: "Injected note" }],
},
};
expect(handleChatEvent(state, payload)).toBe(null);
expect(state.chatRunId).toBe("run-user");
expect(state.chatStream).toBe("Working...");
expect(state.chatStreamStartedAt).toBe(123);
expect(state.chatMessages).toEqual([payload.message]);
});
it.each(["aborted", "error"] as const)(
"keeps active stream for unowned %s payloads",
(terminalState) => {
const state = createActiveStreamingState();
const payload: ChatEventPayload = {
sessionKey: "main",
state: terminalState,
};
expect(handleChatEvent(state, payload)).toBe(null);
expect(state.chatRunId).toBe("run-user");
expect(state.chatStream).toBe("Working...");
expect(state.chatStreamStartedAt).toBe(123);
expect(state.chatMessages).toEqual([]);
},
);
it("persists streamed text when final event carries no message", () => {
const existingMessage = {
role: "user",

View File

@@ -364,7 +364,7 @@ export type ChatState = {
};
export type ChatEventPayload = {
runId: string;
runId?: string;
sessionKey: string;
state: "delta" | "final" | "aborted" | "error";
message?: unknown;
@@ -718,9 +718,10 @@ export function handleChatEvent(state: ChatState, payload?: ChatEventPayload) {
return null;
}
// Terminal events for the active client run carry runId; missing-runId events are unowned.
// Final from another run (e.g. sub-agent announce): refresh history to show new message.
// See https://github.com/openclaw/openclaw/issues/1909
if (payload.runId && state.chatRunId && payload.runId !== state.chatRunId) {
if (state.chatRunId && payload.runId !== state.chatRunId) {
if (payload.state === "final") {
const finalMessage = normalizeFinalAssistantMessage(payload.message);
if (finalMessage && !isAssistantSilentReply(finalMessage)) {