fix: align session status transcript fallback

This commit is contained in:
Peter Steinberger
2026-04-06 19:49:05 +01:00
parent f9c721d5bf
commit b6e0a24d50
2 changed files with 89 additions and 55 deletions

View File

@@ -1082,6 +1082,69 @@ describe("buildStatusMessage", () => {
);
});
it("uses the same transcript usage fallback as sessions.list when a delivery mirror is last", async () => {
await withTempHome(
async (dir) => {
const sessionId = "sess-cache-delivery-mirror";
const logPath = path.join(
dir,
".openclaw",
"agents",
"main",
"sessions",
`${sessionId}.jsonl`,
);
fs.mkdirSync(path.dirname(logPath), { recursive: true });
fs.writeFileSync(
logPath,
[
JSON.stringify({ type: "session", version: 1, id: sessionId }),
JSON.stringify({
type: "message",
message: {
role: "assistant",
provider: "anthropic",
model: "claude-opus-4-6",
usage: {
input: 1,
output: 2,
cacheRead: 1000,
cacheWrite: 0,
totalTokens: 1003,
},
},
}),
JSON.stringify({
type: "message",
message: {
role: "assistant",
provider: "openclaw",
model: "delivery-mirror",
usage: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
totalTokens: 0,
},
},
}),
].join("\n"),
"utf-8",
);
const text = buildTranscriptStatusText({
sessionId,
sessionKey: "agent:main:main",
});
expect(normalizeTestText(text)).toContain("Cache: 100% hit · 1.0k cached, 0 new");
expect(normalizeTestText(text)).toContain("Context: 1.0k/32k");
},
{ prefix: "openclaw-status-" },
);
});
it("preserves existing nonzero cache usage over transcript fallback values", async () => {
await withTempHome(
async (dir) => {
@@ -1446,7 +1509,7 @@ describe("buildCommandsMessagePaginated", () => {
commands: { config: false, debug: false },
} as unknown as OpenClawConfig,
undefined,
{ surface: "telegram", page: 1 },
{ surface: "telegram", page: 1, forcePaginatedList: true },
);
expect(result.text).toContain(" Commands (1/");
expect(result.text).toContain("Session");
@@ -1466,7 +1529,7 @@ describe("buildCommandsMessagePaginated", () => {
commands: { config: false, debug: false },
} as unknown as OpenClawConfig,
undefined,
{ surface: "telegram", page: 1 },
{ surface: "telegram", page: 1, forcePaginatedList: true },
);
const pages = Array.from({ length: firstPage.totalPages }, (_, index) =>
buildPaginatedCommands(
@@ -1474,7 +1537,7 @@ describe("buildCommandsMessagePaginated", () => {
commands: { config: false, debug: false },
} as unknown as OpenClawConfig,
undefined,
{ surface: "telegram", page: index + 1 },
{ surface: "telegram", page: index + 1, forcePaginatedList: true },
),
);
const pluginPage = pages.find((page) => page.text.includes("/plugin_cmd (demo-plugin)"));

View File

@@ -14,7 +14,6 @@ import type { SkillCommandSpec } from "../agents/skills.js";
import { describeToolForVerbose } from "../agents/tool-description-summary.js";
import { normalizeToolName } from "../agents/tool-policy-shared.js";
import type { EffectiveToolInventoryResult } from "../agents/tools-effective-inventory.js";
import { derivePromptTokens, normalizeUsage, type UsageLike } from "../agents/usage.js";
import { resolveChannelModelOverride } from "../channels/model-overrides.js";
import { getChannelPlugin } from "../channels/plugins/index.js";
import { isCommandFlagEnabled } from "../config/commands.js";
@@ -26,6 +25,7 @@ import {
type SessionEntry,
type SessionScope,
} from "../config/sessions.js";
import { readLatestSessionUsageFromTranscript } from "../gateway/session-utils.fs.js";
import { formatTimeAgo } from "../infra/format-time/format-relative.ts";
import { resolveCommitHash } from "../infra/git-commit.js";
import type { MediaUnderstandingDecision } from "../media-understanding/types.js";
@@ -269,65 +269,36 @@ const readUsageFromSessionLog = (
}
try {
// Read the tail only; we only need the most recent usage entries.
const TAIL_BYTES = 8192;
const stat = fs.statSync(logPath);
const offset = Math.max(0, stat.size - TAIL_BYTES);
const buf = Buffer.alloc(Math.min(TAIL_BYTES, stat.size));
const fd = fs.openSync(logPath, "r");
try {
fs.readSync(fd, buf, 0, buf.length, offset);
} finally {
fs.closeSync(fd);
}
const tail = buf.toString("utf-8");
const lines = (offset > 0 ? tail.slice(tail.indexOf("\n") + 1) : tail).split(/\n+/);
let input = 0;
let output = 0;
let promptTokens = 0;
let model: string | undefined;
let lastUsage: ReturnType<typeof normalizeUsage> | undefined;
for (const line of lines) {
if (!line.trim()) {
continue;
}
try {
const parsed = JSON.parse(line) as {
message?: {
usage?: UsageLike;
model?: string;
};
usage?: UsageLike;
model?: string;
};
const usageRaw = parsed.message?.usage ?? parsed.usage;
const usage = normalizeUsage(usageRaw);
if (usage) {
lastUsage = usage;
}
model = parsed.message?.model ?? parsed.model ?? model;
} catch {
// ignore bad lines (including a truncated first tail line)
}
}
if (!lastUsage) {
const snapshot = readLatestSessionUsageFromTranscript(
sessionId,
storePath,
sessionEntry?.sessionFile,
agentId ?? (sessionKey ? resolveAgentIdFromSessionKey(sessionKey) : undefined),
);
if (!snapshot) {
return undefined;
}
input = lastUsage.input ?? 0;
output = lastUsage.output ?? 0;
promptTokens = derivePromptTokens(lastUsage) ?? lastUsage.total ?? input + output;
const total = lastUsage.total ?? promptTokens + output;
const input = snapshot.inputTokens ?? 0;
const output = snapshot.outputTokens ?? 0;
const cacheRead = snapshot.cacheRead ?? 0;
const cacheWrite = snapshot.cacheWrite ?? 0;
const promptTokens = snapshot.totalTokens ?? input + cacheRead + cacheWrite;
const total = promptTokens + output;
if (promptTokens === 0 && total === 0) {
return undefined;
}
const model = snapshot.modelProvider
? snapshot.model
? `${snapshot.modelProvider}/${snapshot.model}`
: snapshot.modelProvider
: snapshot.model;
return {
input,
output,
cacheRead: lastUsage.cacheRead ?? 0,
cacheWrite: lastUsage.cacheWrite ?? 0,
cacheRead,
cacheWrite,
promptTokens,
total,
model,