Fix context usage display and active-run reload interruptions

Fixes context usage display regressions and prevents active runs from being interrupted by channel reloads. Adds persisted tool-result detail bounds so large tool metadata stays out of model/session payloads.
This commit is contained in:
Poo-Squirry
2026-04-26 03:07:52 +09:00
committed by GitHub
parent c3bfd328ad
commit fd3840cb00
11 changed files with 1014 additions and 43 deletions

View File

@@ -106,6 +106,40 @@ function renderAssistantMessage(
renderGroupedMessage(container, message, "assistant", opts);
}
function renderAssistantMessages(
container: HTMLElement,
messages: unknown[],
opts: Partial<RenderMessageGroupOptions> = {},
) {
const timestamp =
typeof messages[0] === "object" &&
messages[0] !== null &&
typeof (messages[0] as { timestamp?: unknown }).timestamp === "number"
? (messages[0] as { timestamp: number }).timestamp
: Date.now();
const group: MessageGroup = {
kind: "group",
key: "assistant-group",
role: "assistant",
messages: messages.map((message, index) => ({
key: `assistant-message-${index}`,
message,
})),
timestamp,
isStreaming: false,
};
render(
renderMessageGroup(group, {
showReasoning: true,
showToolCalls: true,
assistantName: "OpenClaw",
assistantAvatar: null,
...opts,
}),
container,
);
}
function renderGroupedMessage(
container: HTMLElement,
message: unknown,
@@ -318,6 +352,32 @@ describe("grouped chat rendering", () => {
expect(outputHeavy.querySelector(".msg-meta__ctx")?.textContent).toBe("10% ctx");
});
it("uses the largest single assistant call for grouped context usage", () => {
const container = document.createElement("div");
renderAssistantMessages(
container,
[
{
role: "assistant",
content: "Checking",
usage: { input: 105_944, output: 100 },
timestamp: 1000,
},
{
role: "assistant",
content: "Done",
usage: { input: 108_577, output: 100 },
timestamp: 1001,
},
],
{ contextWindow: 258_400 },
);
expect(container.querySelector(".msg-meta__ctx")?.textContent).toBe("42% ctx");
expect(container.textContent).toContain("↑214.5k");
});
it("renders full dates with message and streaming timestamps", () => {
const container = document.createElement("div");
const timestamp = Date.UTC(2026, 3, 24, 18, 30);

View File

@@ -432,6 +432,7 @@ function extractGroupMeta(group: MessageGroup, contextWindow: number | null): Gr
let cost = 0;
let model: string | null = null;
let hasUsage = false;
let maxPromptTokens = 0;
for (const { message } of group.messages) {
const m = message as Record<string, unknown>;
@@ -441,10 +442,15 @@ function extractGroupMeta(group: MessageGroup, contextWindow: number | null): Gr
const usage = m.usage as Record<string, number> | undefined;
if (usage) {
hasUsage = true;
input += usage.input ?? usage.inputTokens ?? 0;
output += usage.output ?? usage.outputTokens ?? 0;
cacheRead += usage.cacheRead ?? usage.cache_read_input_tokens ?? 0;
cacheWrite += usage.cacheWrite ?? usage.cache_creation_input_tokens ?? 0;
const callInput = usage.input ?? usage.inputTokens ?? 0;
const callOutput = usage.output ?? usage.outputTokens ?? 0;
const callCacheRead = usage.cacheRead ?? usage.cache_read_input_tokens ?? 0;
const callCacheWrite = usage.cacheWrite ?? usage.cache_creation_input_tokens ?? 0;
input += callInput;
output += callOutput;
cacheRead += callCacheRead;
cacheWrite += callCacheWrite;
maxPromptTokens = Math.max(maxPromptTokens, callInput + callCacheRead + callCacheWrite);
}
const c = m.cost as Record<string, number> | undefined;
if (c?.total) {
@@ -459,10 +465,9 @@ function extractGroupMeta(group: MessageGroup, contextWindow: number | null): Gr
return null;
}
const promptTokens = input + cacheRead + cacheWrite;
const contextPercent =
contextWindow && promptTokens > 0
? Math.min(Math.round((promptTokens / contextWindow) * 100), 100)
contextWindow && maxPromptTokens > 0
? Math.min(Math.round((maxPromptTokens / contextWindow) * 100), 100)
: null;
return { input, output, cacheRead, cacheWrite, cost, model, contextPercent };