fix(ui): use current context usage in Control UI

This commit is contained in:
Vincent Koc
2026-04-25 00:54:33 -07:00
committed by GitHub
parent 73d72204a0
commit ebb08dc70e
19 changed files with 194 additions and 47 deletions

View File

@@ -14,6 +14,7 @@ Docs: https://docs.openclaw.ai
### Fixes
- OpenAI/Codex image generation: canonicalize legacy `openai-codex.baseUrl` values such as `https://chatgpt.com/backend-api` to the Codex Responses backend before calling `gpt-image-2`, matching the chat transport. Fixes #71460.
- Control UI: make `/usage` use the fresh context snapshot for context percentage, and include cache-write tokens in the Usage overview cache-hit denominator. Fixes #47885. Thanks @imwyvern and @Ante042.
- Telegram/webhook: acknowledge validated webhook updates before running bot middleware, keeping slow agent turns from tripping Telegram delivery retries while preserving per-chat processing lanes. Fixes #71392.
- MCP: retire one-shot embedded bundled MCP runtimes at run end, skip bundle-MCP startup when a runtime tool allowlist cannot reach bundle-MCP tools, and add `mcp.sessionIdleTtlMs` idle eviction for leaked session runtimes. Fixes #71106, #71110, #70389, and #70808.
- MCP/config reload: hot-apply `mcp.*` changes by disposing cached session MCP runtimes, and dispose bundled MCP runtimes during gateway shutdown so removed `mcp.servers` entries reap child processes promptly. Fixes #60656.

View File

@@ -1,10 +1,10 @@
{
"fallbackKeys": [],
"generatedAt": "2026-04-24T20:26:59.540Z",
"generatedAt": "2026-04-25T07:32:16.675Z",
"locale": "de",
"model": "gpt-5.5",
"model": "gpt-5.4",
"provider": "openai",
"sourceHash": "33cba33627744c6bb03182f53b02a9a2640272ef0fa3d039ce52723a96f5099e",
"sourceHash": "2af900ae253948aab69216e38e0fce2dfde89801d178dee0ebb8dd28df2e11ef",
"totalKeys": 734,
"translatedKeys": 734,
"workflow": 1

View File

@@ -1,10 +1,10 @@
{
"fallbackKeys": [],
"generatedAt": "2026-04-24T20:27:55.136Z",
"generatedAt": "2026-04-25T07:32:18.681Z",
"locale": "es",
"model": "gpt-5.5",
"model": "gpt-5.4",
"provider": "openai",
"sourceHash": "33cba33627744c6bb03182f53b02a9a2640272ef0fa3d039ce52723a96f5099e",
"sourceHash": "2af900ae253948aab69216e38e0fce2dfde89801d178dee0ebb8dd28df2e11ef",
"totalKeys": 734,
"translatedKeys": 734,
"workflow": 1

View File

@@ -1,10 +1,10 @@
{
"fallbackKeys": [],
"generatedAt": "2026-04-24T20:29:33.727Z",
"generatedAt": "2026-04-25T07:32:27.034Z",
"locale": "fr",
"model": "gpt-5.5",
"model": "gpt-5.4",
"provider": "openai",
"sourceHash": "33cba33627744c6bb03182f53b02a9a2640272ef0fa3d039ce52723a96f5099e",
"sourceHash": "2af900ae253948aab69216e38e0fce2dfde89801d178dee0ebb8dd28df2e11ef",
"totalKeys": 734,
"translatedKeys": 734,
"workflow": 1

View File

@@ -1,10 +1,10 @@
{
"fallbackKeys": [],
"generatedAt": "2026-04-24T20:30:08.920Z",
"generatedAt": "2026-04-25T07:32:34.367Z",
"locale": "id",
"model": "gpt-5.5",
"model": "gpt-5.4",
"provider": "openai",
"sourceHash": "33cba33627744c6bb03182f53b02a9a2640272ef0fa3d039ce52723a96f5099e",
"sourceHash": "2af900ae253948aab69216e38e0fce2dfde89801d178dee0ebb8dd28df2e11ef",
"totalKeys": 734,
"translatedKeys": 734,
"workflow": 1

View File

@@ -1,10 +1,10 @@
{
"fallbackKeys": [],
"generatedAt": "2026-04-24T20:29:06.508Z",
"generatedAt": "2026-04-25T07:32:20.713Z",
"locale": "ja-JP",
"model": "gpt-5.5",
"model": "gpt-5.4",
"provider": "openai",
"sourceHash": "33cba33627744c6bb03182f53b02a9a2640272ef0fa3d039ce52723a96f5099e",
"sourceHash": "2af900ae253948aab69216e38e0fce2dfde89801d178dee0ebb8dd28df2e11ef",
"totalKeys": 734,
"translatedKeys": 734,
"workflow": 1

View File

@@ -1,10 +1,10 @@
{
"fallbackKeys": [],
"generatedAt": "2026-04-24T20:29:26.571Z",
"generatedAt": "2026-04-25T07:32:24.032Z",
"locale": "ko",
"model": "gpt-5.5",
"model": "gpt-5.4",
"provider": "openai",
"sourceHash": "33cba33627744c6bb03182f53b02a9a2640272ef0fa3d039ce52723a96f5099e",
"sourceHash": "2af900ae253948aab69216e38e0fce2dfde89801d178dee0ebb8dd28df2e11ef",
"totalKeys": 734,
"translatedKeys": 734,
"workflow": 1

View File

@@ -1,10 +1,10 @@
{
"fallbackKeys": [],
"generatedAt": "2026-04-24T20:30:13.404Z",
"generatedAt": "2026-04-25T07:32:38.432Z",
"locale": "pl",
"model": "gpt-5.5",
"model": "gpt-5.4",
"provider": "openai",
"sourceHash": "33cba33627744c6bb03182f53b02a9a2640272ef0fa3d039ce52723a96f5099e",
"sourceHash": "2af900ae253948aab69216e38e0fce2dfde89801d178dee0ebb8dd28df2e11ef",
"totalKeys": 734,
"translatedKeys": 734,
"workflow": 1

View File

@@ -1,10 +1,10 @@
{
"fallbackKeys": [],
"generatedAt": "2026-04-24T20:27:01.151Z",
"generatedAt": "2026-04-25T07:32:14.907Z",
"locale": "pt-BR",
"model": "gpt-5.5",
"model": "gpt-5.4",
"provider": "openai",
"sourceHash": "33cba33627744c6bb03182f53b02a9a2640272ef0fa3d039ce52723a96f5099e",
"sourceHash": "2af900ae253948aab69216e38e0fce2dfde89801d178dee0ebb8dd28df2e11ef",
"totalKeys": 734,
"translatedKeys": 734,
"workflow": 1

View File

@@ -1,10 +1,10 @@
{
"fallbackKeys": [],
"generatedAt": "2026-04-24T20:30:41.102Z",
"generatedAt": "2026-04-25T07:32:40.554Z",
"locale": "th",
"model": "gpt-5.5",
"model": "gpt-5.4",
"provider": "openai",
"sourceHash": "33cba33627744c6bb03182f53b02a9a2640272ef0fa3d039ce52723a96f5099e",
"sourceHash": "2af900ae253948aab69216e38e0fce2dfde89801d178dee0ebb8dd28df2e11ef",
"totalKeys": 734,
"translatedKeys": 734,
"workflow": 1

View File

@@ -1,10 +1,10 @@
{
"fallbackKeys": [],
"generatedAt": "2026-04-24T20:29:55.552Z",
"generatedAt": "2026-04-25T07:32:30.128Z",
"locale": "tr",
"model": "gpt-5.5",
"model": "gpt-5.4",
"provider": "openai",
"sourceHash": "33cba33627744c6bb03182f53b02a9a2640272ef0fa3d039ce52723a96f5099e",
"sourceHash": "2af900ae253948aab69216e38e0fce2dfde89801d178dee0ebb8dd28df2e11ef",
"totalKeys": 734,
"translatedKeys": 734,
"workflow": 1

View File

@@ -1,10 +1,10 @@
{
"fallbackKeys": [],
"generatedAt": "2026-04-24T20:30:10.003Z",
"generatedAt": "2026-04-25T07:32:32.725Z",
"locale": "uk",
"model": "gpt-5.5",
"model": "gpt-5.4",
"provider": "openai",
"sourceHash": "33cba33627744c6bb03182f53b02a9a2640272ef0fa3d039ce52723a96f5099e",
"sourceHash": "2af900ae253948aab69216e38e0fce2dfde89801d178dee0ebb8dd28df2e11ef",
"totalKeys": 734,
"translatedKeys": 734,
"workflow": 1

View File

@@ -1,10 +1,10 @@
{
"fallbackKeys": [],
"generatedAt": "2026-04-24T20:26:33.969Z",
"generatedAt": "2026-04-25T07:32:11.950Z",
"locale": "zh-CN",
"model": "gpt-5.5",
"model": "gpt-5.4",
"provider": "openai",
"sourceHash": "33cba33627744c6bb03182f53b02a9a2640272ef0fa3d039ce52723a96f5099e",
"sourceHash": "2af900ae253948aab69216e38e0fce2dfde89801d178dee0ebb8dd28df2e11ef",
"totalKeys": 734,
"translatedKeys": 734,
"workflow": 1

View File

@@ -1,10 +1,10 @@
{
"fallbackKeys": [],
"generatedAt": "2026-04-24T20:27:07.043Z",
"generatedAt": "2026-04-25T07:32:13.730Z",
"locale": "zh-TW",
"model": "gpt-5.5",
"model": "gpt-5.4",
"provider": "openai",
"sourceHash": "33cba33627744c6bb03182f53b02a9a2640272ef0fa3d039ce52723a96f5099e",
"sourceHash": "2af900ae253948aab69216e38e0fce2dfde89801d178dee0ebb8dd28df2e11ef",
"totalKeys": 734,
"translatedKeys": 734,
"workflow": 1

View File

@@ -535,7 +535,8 @@ export const en: TranslationMap = {
errorHint: "Error rate = errors / total messages. Lower is better.",
avgSession: "avg session",
cacheHitRate: "Cache Hit Rate",
cacheHint: "Cache hit rate = cache read / (input + cache read). Higher is better.",
cacheHint:
"Cache hit rate = cache read / (input + cache read + cache write). Higher is better.",
cached: "cached",
prompt: "prompt",
calls: "calls",

View File

@@ -488,7 +488,70 @@ describe("executeSlashCommand directives", () => {
);
expect(result.content).toBe(
"**Session Usage**\nInput: **1.2k** tokens\nOutput: **300** tokens\nTotal: **1.5k** tokens\nContext: **30%** of 4k\nModel: `gpt-4.1-mini`",
"**Session Usage**\nInput: **1.2k** tokens\nOutput: **300** tokens\nTotal: **1.5k** tokens\nContext: **38%** of 4k\nModel: `gpt-4.1-mini`",
);
expect(request).toHaveBeenNthCalledWith(1, "sessions.list", {});
});
it("keeps /usage context hidden when the context snapshot is stale", async () => {
const request = vi.fn(async (method: string, _payload?: unknown) => {
if (method === "sessions.list") {
return {
sessions: [
row("agent:main:main", {
model: "gpt-4.1-mini",
inputTokens: 1200,
outputTokens: 300,
totalTokens: 1500,
totalTokensFresh: false,
contextTokens: 4000,
}),
],
};
}
throw new Error(`unexpected method: ${method}`);
});
const result = await executeSlashCommand(
{ request } as unknown as GatewayBrowserClient,
"agent:main:main",
"usage",
"",
);
expect(result.content).toBe(
"**Session Usage**\nInput: **1.2k** tokens\nOutput: **300** tokens\nTotal: **~1.5k** tokens\nModel: `gpt-4.1-mini`",
);
expect(request).toHaveBeenNthCalledWith(1, "sessions.list", {});
});
it("uses the context snapshot for /usage while preserving cumulative total display", async () => {
const request = vi.fn(async (method: string, _payload?: unknown) => {
if (method === "sessions.list") {
return {
sessions: [
row("agent:main:main", {
model: "gpt-4.1-mini",
inputTokens: 1200,
outputTokens: 300,
totalTokens: 1250,
contextTokens: 4000,
}),
],
};
}
throw new Error(`unexpected method: ${method}`);
});
const result = await executeSlashCommand(
{ request } as unknown as GatewayBrowserClient,
"agent:main:main",
"usage",
"",
);
expect(result.content).toBe(
"**Session Usage**\nInput: **1.2k** tokens\nOutput: **300** tokens\nTotal: **1.5k** tokens\nContext: **31%** of 4k\nModel: `gpt-4.1-mini`",
);
expect(request).toHaveBeenNthCalledWith(1, "sessions.list", {});
});

View File

@@ -377,17 +377,30 @@ async function executeUsage(
if (!session) {
return { content: "No active session." };
}
const input = session.inputTokens ?? 0;
const output = session.outputTokens ?? 0;
const total = session.totalTokens ?? input + output;
const hasInputTokens = Number.isFinite(session.inputTokens);
const hasOutputTokens = Number.isFinite(session.outputTokens);
const input = hasInputTokens ? (session.inputTokens ?? 0) : 0;
const output = hasOutputTokens ? (session.outputTokens ?? 0) : 0;
const cumulativeTotal = hasInputTokens || hasOutputTokens ? input + output : null;
const contextSnapshotTotal = Number.isFinite(session.totalTokens)
? (session.totalTokens ?? null)
: cumulativeTotal;
const totalTokensFresh = session.totalTokensFresh !== false;
const ctx = session.contextTokens ?? 0;
const pct = ctx > 0 ? Math.round((input / ctx) * 100) : null;
const pct =
contextSnapshotTotal !== null && totalTokensFresh && ctx > 0
? Math.round((contextSnapshotTotal / ctx) * 100)
: null;
const totalDisplay =
cumulativeTotal === null
? "n/a"
: `${totalTokensFresh ? "" : "~"}${fmtTokens(cumulativeTotal)}`;
const lines = [
"**Session Usage**",
`Input: **${fmtTokens(input)}** tokens`,
`Output: **${fmtTokens(output)}** tokens`,
`Total: **${fmtTokens(total)}** tokens`,
`Total: **${totalDisplay}** tokens`,
];
if (pct !== null) {
lines.push(`Context: **${pct}%** of ${fmtTokens(ctx)}`);

View File

@@ -0,0 +1,69 @@
/* @vitest-environment jsdom */
import { render } from "lit";
import { describe, expect, it } from "vitest";
import { renderUsageInsights } from "./usage-render-overview.ts";
import type { UsageAggregates, UsageTotals } from "./usageTypes.ts";
const totals: UsageTotals = {
input: 100,
output: 40,
cacheRead: 300,
cacheWrite: 600,
totalTokens: 1040,
totalCost: 0,
inputCost: 0,
outputCost: 0,
cacheReadCost: 0,
cacheWriteCost: 0,
missingCostEntries: 0,
};
const aggregates = {
messages: {
total: 4,
user: 2,
assistant: 2,
toolCalls: 0,
toolResults: 0,
errors: 0,
},
tools: {
totalCalls: 0,
uniqueTools: 0,
tools: [],
},
byModel: [],
byProvider: [],
byAgent: [],
byChannel: [],
daily: [],
} as unknown as UsageAggregates;
describe("renderUsageInsights", () => {
it("includes cache writes in cache-hit-rate denominator", () => {
const container = document.createElement("div");
render(
renderUsageInsights(
totals,
aggregates,
{
durationSumMs: 0,
durationCount: 0,
avgDurationMs: 0,
errorRate: 0,
},
false,
[],
1,
1,
),
container,
);
expect(container.textContent).toContain("30.0%");
expect(container.textContent).toContain("300 cached");
expect(container.textContent).toContain("1.0K prompt");
});
});

View File

@@ -497,7 +497,7 @@ function renderUsageInsights(
? Math.round(totals.totalTokens / aggregates.messages.total)
: 0;
const avgCost = aggregates.messages.total ? totals.totalCost / aggregates.messages.total : 0;
const cacheBase = totals.input + totals.cacheRead;
const cacheBase = totals.input + totals.cacheRead + totals.cacheWrite;
const cacheHitRate = cacheBase > 0 ? totals.cacheRead / cacheBase : 0;
const cacheHitLabel =
cacheBase > 0 ? `${(cacheHitRate * 100).toFixed(1)}%` : t("usage.common.emptyValue");