mirror of
https://github.com/openclaw/openclaw.git
synced 2026-03-22 23:41:07 +00:00
fix(control-ui): prevent chat tab freeze when loading long history
- Request at most 25 messages from chat.history (was 200) to reduce payload size and JSON parse cost in the browser. - Cap rendered chat history at 25 messages to avoid main-thread freeze from rendering many markdown messages (fixes 'tab unresponsive'). - Defer applying messages with requestAnimationFrame so the UI can paint 'Loading chat...' before the heavy render. - Cap sessions.list to 40 when loading the chat tab to avoid large session dropdown response. Helps address #10622 (Webchat UI freezes when loading sessions with many messages). Gateway already caps payload size (#18505); this adds client-side limits so the Control UI stays responsive with long sessions. Made-with: Cursor
This commit is contained in:
@@ -356,8 +356,8 @@ export async function refreshChat(host: ChatHost, opts?: { scheduleScroll?: bool
|
||||
await Promise.all([
|
||||
loadChatHistory(host as unknown as OpenClawApp),
|
||||
loadSessions(host as unknown as OpenClawApp, {
|
||||
activeMinutes: 0,
|
||||
limit: 0,
|
||||
activeMinutes: CHAT_SESSIONS_ACTIVE_MINUTES,
|
||||
limit: 40,
|
||||
includeGlobal: true,
|
||||
includeUnknown: true,
|
||||
}),
|
||||
|
||||
@@ -71,21 +71,27 @@ export async function loadChatHistory(state: ChatState) {
|
||||
state.chatLoading = true;
|
||||
state.lastError = null;
|
||||
try {
|
||||
// Request a small batch to avoid huge payloads and main-thread freeze when
|
||||
// parsing JSON and rendering many markdown messages (browser "tab unresponsive").
|
||||
const CHAT_HISTORY_REQUEST_LIMIT = 25;
|
||||
const res = await state.client.request<{ messages?: Array<unknown>; thinkingLevel?: string }>(
|
||||
"chat.history",
|
||||
{
|
||||
sessionKey: state.sessionKey,
|
||||
limit: 200,
|
||||
limit: CHAT_HISTORY_REQUEST_LIMIT,
|
||||
},
|
||||
);
|
||||
const messages = Array.isArray(res.messages) ? res.messages : [];
|
||||
state.chatMessages = messages.filter((message) => !isAssistantSilentReply(message));
|
||||
const filtered = messages.filter((message) => !isAssistantSilentReply(message));
|
||||
state.chatThinkingLevel = res.thinkingLevel ?? null;
|
||||
// Clear all streaming state — history includes tool results and text
|
||||
// inline, so keeping streaming artifacts would cause duplicates.
|
||||
maybeResetToolStream(state);
|
||||
state.chatStream = null;
|
||||
state.chatStreamStartedAt = null;
|
||||
// Defer applying messages so the UI can paint "Loading chat..." before the heavy render.
|
||||
state.chatLoading = false;
|
||||
requestAnimationFrame(() => {
|
||||
state.chatMessages = filtered;
|
||||
});
|
||||
} catch (err) {
|
||||
state.lastError = String(err);
|
||||
} finally {
|
||||
|
||||
@@ -1328,7 +1328,8 @@ export function renderChat(props: ChatProps) {
|
||||
`;
|
||||
}
|
||||
|
||||
const CHAT_HISTORY_RENDER_LIMIT = 200;
|
||||
// Cap rendered history to avoid main-thread freeze (markdown + DOM for each message).
|
||||
const CHAT_HISTORY_RENDER_LIMIT = 25;
|
||||
|
||||
function groupMessages(items: ChatItem[]): Array<ChatItem | MessageGroup> {
|
||||
const result: Array<ChatItem | MessageGroup> = [];
|
||||
|
||||
Reference in New Issue
Block a user