fix(webchat): keep runtime context out of visible transcripts

Keep WebChat runtime context available to the model while persisting only the transcript-facing user prompt across gateway, CLI, queued follow-up, and embedded Pi paths.

Adds regression coverage for history sanitization, CLI transcript persistence, media-only auto-reply prompts, and embedded Pi prompt rewrite against a real SessionManager file.

Co-authored-by: 91wan <91wan@users.noreply.github.com>
This commit is contained in:
Peter Steinberger
2026-04-24 22:17:03 +01:00
committed by GitHub
parent b20208fa4c
commit 6e985a421d
23 changed files with 341 additions and 9 deletions

View File

@@ -10,6 +10,7 @@ Docs: https://docs.openclaw.ai
### Changes
- WebChat/sessions: keep runtime-only prompt context out of visible transcript history and scrub legacy wrappers from session history surfaces. Thanks @91wan.
- Gradium: add a bundled text-to-speech provider with voice-note and telephony output support. (#64958) Thanks @LaurentMazare.
- Plugins/setup: honor explicit `setup.requiresRuntime: false` as a descriptor-only setup contract while keeping omitted values on the legacy setup-api fallback path. Thanks @vincentkoc.
- Plugins/setup: report descriptor/runtime drift when setup-api registrations disagree with `setup.providers` or `setup.cliBackends`, without rejecting legacy setup plugins. Thanks @vincentkoc.

View File

@@ -16,6 +16,7 @@ file is backed up alongside the session file.
Scope includes:
- Runtime-only prompt context staying out of user-visible transcript turns
- Tool call id sanitization
- Tool call input validation
- Tool result pairing repair
@@ -30,6 +31,20 @@ If you need transcript storage details, see:
---
## Global rule: runtime context is not user transcript
Runtime/system context can be added to the model prompt for a turn, but it is
not end-user-authored content. OpenClaw keeps a separate transcript-facing
prompt body for Gateway replies, queued followups, ACP, CLI, and embedded Pi
runs. Stored visible user turns use that transcript body instead of the
runtime-enriched prompt.
For legacy sessions that already persisted runtime wrappers, Gateway history
surfaces apply a display projection before returning messages to WebChat,
TUI, REST, or SSE clients.
---
## Where this runs
All transcript hygiene is centralized in the embedded runner:

View File

@@ -24,7 +24,8 @@ Status: the macOS/iOS SwiftUI chat UI talks directly to the Gateway WebSocket.
- The UI connects to the Gateway WebSocket and uses `chat.history`, `chat.send`, and `chat.inject`.
- `chat.history` is bounded for stability: Gateway may truncate long text fields, omit heavy metadata, and replace oversized entries with `[chat.history omitted: message too large]`.
- `chat.history` is also display-normalized: inline delivery directive tags
- `chat.history` is also display-normalized: runtime-only OpenClaw context,
inbound envelope wrappers, inline delivery directive tags
such as `[[reply_to_*]]` and `[[audio_as_voice]]`, plain-text tool-call XML
payloads (including `<tool_call>...</tool_call>`,
`<function_call>...</function_call>`, `<tool_calls>...</tool_calls>`,

View File

@@ -248,6 +248,7 @@ async function prepareAgentCommandExecution(
throw new Error("Message (--message) is required");
}
const body = prependInternalEventContext(message, opts.internalEvents);
const transcriptBody = opts.transcriptMessage ?? message;
if (!opts.to && !opts.sessionId && !opts.sessionKey && !opts.agentId) {
throw new Error("Pass --to <E.164>, --session-id, or --agent to choose a session");
}
@@ -368,6 +369,7 @@ async function prepareAgentCommandExecution(
return {
body,
transcriptBody,
cfg,
normalizedSpawned,
agentCfg,
@@ -402,6 +404,7 @@ async function agentCommandInternal(
const prepared = await prepareAgentCommandExecution(opts, runtime);
const {
body,
transcriptBody,
cfg,
normalizedSpawned,
agentCfg,
@@ -523,6 +526,7 @@ async function agentCommandInternal(
const { resolveAcpSessionCwd } = await loadAcpSessionIdentifiersRuntime();
sessionEntry = await attemptExecutionRuntime.persistAcpTurnTranscript({
body,
transcriptBody,
finalText: finalTextRaw,
sessionId,
sessionKey,
@@ -1068,6 +1072,7 @@ async function agentCommandInternal(
try {
sessionEntry = await attemptExecutionRuntime.persistCliTurnTranscript({
body,
transcriptBody,
result,
sessionId,
sessionKey: sessionKey ?? sessionId,

View File

@@ -19,6 +19,7 @@ export type RunCliAgentParams = {
workspaceDir: string;
config?: OpenClawConfig;
prompt: string;
transcriptPrompt?: string;
provider: string;
model?: string;
thinkLevel?: ThinkLevel;

View File

@@ -386,6 +386,41 @@ describe("CLI attempt execution", () => {
});
});
it("persists the transcript body instead of runtime-only CLI prompt context", async () => {
const sessionKey = "agent:main:subagent:cli-transcript-clean";
const sessionEntry: SessionEntry = {
sessionId: "session-cli-transcript-clean",
updatedAt: Date.now(),
};
const sessionStore: Record<string, SessionEntry> = { [sessionKey]: sessionEntry };
await fs.writeFile(storePath, JSON.stringify(sessionStore, null, 2), "utf-8");
const updatedEntry = await persistCliTurnTranscript({
body: [
"<<<BEGIN_OPENCLAW_INTERNAL_CONTEXT>>>",
"secret runtime context",
"<<<END_OPENCLAW_INTERNAL_CONTEXT>>>",
"",
"visible ask",
].join("\n"),
transcriptBody: "visible ask",
result: makeCliResult("hello from cli"),
sessionId: sessionEntry.sessionId,
sessionKey,
sessionEntry,
sessionStore,
storePath,
sessionAgentId: "main",
sessionCwd: tmpDir,
});
const messages = await readSessionMessages(updatedEntry?.sessionFile ?? "");
expect(messages[0]).toMatchObject({
role: "user",
content: "visible ask",
});
});
it("forwards user trigger and channel context to CLI runs", async () => {
const sessionKey = "agent:main:direct:claude-channel-context";
const sessionEntry: SessionEntry = {

View File

@@ -64,6 +64,7 @@ type TranscriptUsage = {
type PersistTextTurnTranscriptParams = {
body: string;
transcriptBody?: string;
finalText: string;
sessionId: string;
sessionKey: string;
@@ -97,7 +98,7 @@ function resolveTranscriptUsage(usage: PersistTextTurnTranscriptParams["assistan
async function persistTextTurnTranscript(
params: PersistTextTurnTranscriptParams,
): Promise<SessionEntry | undefined> {
const promptText = params.body;
const promptText = params.transcriptBody ?? params.body;
const replyText = params.finalText;
if (!promptText && !replyText) {
return params.sessionEntry;
@@ -169,6 +170,7 @@ function isClaudeCliProvider(provider: string): boolean {
export async function persistAcpTurnTranscript(params: {
body: string;
transcriptBody?: string;
finalText: string;
sessionId: string;
sessionKey: string;
@@ -191,6 +193,7 @@ export async function persistAcpTurnTranscript(params: {
export async function persistCliTurnTranscript(params: {
body: string;
transcriptBody?: string;
result: EmbeddedPiRunResult;
sessionId: string;
sessionKey: string;
@@ -207,6 +210,7 @@ export async function persistCliTurnTranscript(params: {
return await persistTextTurnTranscript({
body: params.body,
transcriptBody: params.transcriptBody,
finalText: replyText,
sessionId: params.sessionId,
sessionKey: params.sessionKey,

View File

@@ -27,6 +27,8 @@ export type AgentRunContext = {
export type AgentCommandOpts = {
message: string;
/** User-visible transcript body; defaults to message and excludes runtime-only context. */
transcriptMessage?: string;
/** Optional image attachments for multimodal messages. */
images?: ImageContent[];
/** Original inline/offloaded attachment order for inbound images. */

View File

@@ -866,6 +866,7 @@ export async function runEmbeddedPiAgent(
contextTokenBudget: ctxInfo.tokens,
skillsSnapshot: params.skillsSnapshot,
prompt,
transcriptPrompt: params.transcriptPrompt,
images: params.images,
imageOrder: params.imageOrder,
clientTools: params.clientTools,

View File

@@ -288,6 +288,7 @@ import {
PREEMPTIVE_OVERFLOW_ERROR_TEXT,
shouldPreemptivelyCompactBeforePrompt,
} from "./preemptive-compaction.js";
import { rewriteSubmittedPromptTranscript } from "./transcript-prompt-rewrite.js";
import type { EmbeddedRunAttemptParams, EmbeddedRunAttemptResult } from "./types.js";
export {
@@ -2424,6 +2425,13 @@ export async function runEmbeddedAttempt(
} else {
await abortable(activeSession.prompt(effectivePrompt));
}
rewriteSubmittedPromptTranscript({
sessionManager,
sessionFile: params.sessionFile,
previousLeafId: transcriptLeafId,
submittedPrompt: effectivePrompt,
transcriptPrompt: params.transcriptPrompt,
});
}
} catch (err) {
yieldAborted =

View File

@@ -78,6 +78,8 @@ export type RunEmbeddedPiAgentParams = {
config?: OpenClawConfig;
skillsSnapshot?: SkillSnapshot;
prompt: string;
/** User-visible prompt body to persist instead of runtime-enriched prompt text. */
transcriptPrompt?: string;
images?: ImageContent[];
imageOrder?: PromptImageOrderEntry[];
/** Optional client-provided tools (OpenResponses hosted tools). */

View File

@@ -0,0 +1,100 @@
import fs from "node:fs/promises";
import os from "node:os";
import path from "node:path";
import { SessionManager } from "@mariozechner/pi-coding-agent";
import { afterEach, describe, expect, it, vi } from "vitest";
import { onSessionTranscriptUpdate } from "../../../sessions/transcript-events.js";
import { rewriteSubmittedPromptTranscript } from "./transcript-prompt-rewrite.js";
type AppendMessage = Parameters<SessionManager["appendMessage"]>[0];
let tmpDir: string | undefined;
async function createTmpDir(): Promise<string> {
tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "transcript-prompt-rewrite-"));
return tmpDir;
}
afterEach(async () => {
if (tmpDir) {
await fs.rm(tmpDir, { recursive: true, force: true }).catch(() => {});
tmpDir = undefined;
}
});
function getUserTextMessages(sessionManager: SessionManager): string[] {
const messages: string[] = [];
for (const entry of sessionManager.getBranch()) {
if (entry.type !== "message" || entry.message.role !== "user") {
continue;
}
const content = (entry.message as { content?: unknown }).content;
if (typeof content === "string") {
messages.push(content);
continue;
}
if (!Array.isArray(content)) {
messages.push("");
continue;
}
messages.push(
content
.map((block) =>
block &&
typeof block === "object" &&
typeof (block as { text?: unknown }).text === "string"
? (block as { text: string }).text
: "",
)
.join(""),
);
}
return messages;
}
describe("rewriteSubmittedPromptTranscript", () => {
it("rewrites only the submitted embedded Pi prompt in a real session file", async () => {
const sessionDir = await createTmpDir();
const sessionManager = SessionManager.create(sessionDir, sessionDir);
const submittedPrompt =
"visible ask\n\n<<<BEGIN_OPENCLAW_INTERNAL_CONTEXT>>>\nsecret runtime context\n<<<END_OPENCLAW_INTERNAL_CONTEXT>>>";
const transcriptPrompt = "visible ask";
sessionManager.appendMessage({
role: "user",
content: submittedPrompt,
timestamp: 1,
});
const previousLeafId = sessionManager.appendMessage({
role: "assistant",
content: [{ type: "text", text: "old answer" }],
timestamp: 2,
} as AppendMessage);
sessionManager.appendMessage({
role: "user",
content: submittedPrompt,
timestamp: 3,
});
const sessionFile = sessionManager.getSessionFile();
expect(sessionFile).toBeTruthy();
const listener = vi.fn();
const cleanup = onSessionTranscriptUpdate(listener);
try {
rewriteSubmittedPromptTranscript({
sessionManager,
sessionFile: sessionFile!,
previousLeafId,
submittedPrompt,
transcriptPrompt,
});
} finally {
cleanup();
}
expect(listener).toHaveBeenCalledWith({ sessionFile });
const reopenedSession = SessionManager.open(sessionFile!);
expect(getUserTextMessages(reopenedSession)).toEqual([submittedPrompt, transcriptPrompt]);
});
});

View File

@@ -0,0 +1,94 @@
import type { AgentMessage } from "@mariozechner/pi-agent-core";
import { SessionManager } from "@mariozechner/pi-coding-agent";
import { emitSessionTranscriptUpdate } from "../../../sessions/transcript-events.js";
import { rewriteTranscriptEntriesInSessionManager } from "../transcript-rewrite.js";
type SessionManagerLike = ReturnType<typeof SessionManager.open>;
function extractPromptTextFromMessage(message: AgentMessage): string | undefined {
const content = (message as { content?: unknown }).content;
if (typeof content === "string") {
return content;
}
if (!Array.isArray(content)) {
return undefined;
}
const textBlocks = content
.map((block) =>
block && typeof block === "object" && typeof (block as { text?: unknown }).text === "string"
? (block as { text: string }).text
: undefined,
)
.filter((text): text is string => typeof text === "string");
return textBlocks.length > 0 ? textBlocks.join("") : undefined;
}
function replacePromptTextInMessage(message: AgentMessage, text: string): AgentMessage {
const content = (message as { content?: unknown }).content;
const entry = message as unknown as Record<string, unknown>;
if (typeof content === "string") {
return { ...entry, content: text } as AgentMessage;
}
if (!Array.isArray(content)) {
return { ...entry, content: text } as AgentMessage;
}
let replaced = false;
const nextContent: unknown[] = [];
for (const block of content) {
if (
replaced ||
!block ||
typeof block !== "object" ||
typeof (block as { text?: unknown }).text !== "string"
) {
nextContent.push(block);
continue;
}
replaced = true;
nextContent.push({ ...(block as Record<string, unknown>), text });
}
return {
...entry,
content: replaced ? nextContent : text,
} as AgentMessage;
}
export function rewriteSubmittedPromptTranscript(params: {
sessionManager: SessionManagerLike;
sessionFile: string;
previousLeafId: string | null;
submittedPrompt: string;
transcriptPrompt?: string;
}): void {
const transcriptPrompt = params.transcriptPrompt;
if (transcriptPrompt === undefined || transcriptPrompt === params.submittedPrompt) {
return;
}
const replacementText = transcriptPrompt.trim() || "[OpenClaw runtime event]";
const branch = params.sessionManager.getBranch();
const startIndex = params.previousLeafId
? Math.max(0, branch.findIndex((entry) => entry.id === params.previousLeafId) + 1)
: 0;
const target = branch.slice(startIndex).find((entry) => {
if (entry.type !== "message" || entry.message.role !== "user") {
return false;
}
const text = extractPromptTextFromMessage(entry.message as AgentMessage);
return text === params.submittedPrompt;
});
if (!target || target.type !== "message") {
return;
}
const result = rewriteTranscriptEntriesInSessionManager({
sessionManager: params.sessionManager,
replacements: [
{
entryId: target.id,
message: replacePromptTextInMessage(target.message, replacementText),
},
],
});
if (result.changed) {
emitSessionTranscriptUpdate(params.sessionFile);
}
}

View File

@@ -569,6 +569,7 @@ function isReplyOperationRestartAbort(replyOperation?: ReplyOperation): boolean
export async function runAgentTurnWithFallback(params: {
commandBody: string;
transcriptCommandBody?: string;
followupRun: FollowupRun;
sessionCtx: TemplateContext;
replyThreading?: TemplateContext["ReplyThreading"];
@@ -965,6 +966,7 @@ export async function runAgentTurnWithFallback(params: {
workspaceDir: params.followupRun.run.workspaceDir,
config: runtimeConfig,
prompt: params.commandBody,
transcriptPrompt: params.transcriptCommandBody,
provider,
model,
thinkLevel: params.followupRun.run.thinkLevel,
@@ -1087,6 +1089,7 @@ export async function runAgentTurnWithFallback(params: {
...runBaseParams,
sandboxSessionKey: params.runtimePolicySessionKey,
prompt: params.commandBody,
transcriptPrompt: params.transcriptCommandBody,
extraSystemPrompt: params.followupRun.run.extraSystemPrompt,
toolResultFormat: (() => {
const channel = resolveMessageChannel(

View File

@@ -861,6 +861,7 @@ function refreshSessionEntryFromStore(params: {
export async function runReplyAgent(params: {
commandBody: string;
transcriptCommandBody?: string;
followupRun: FollowupRun;
queueKey: string;
resolvedQueue: QueueSettings;
@@ -897,6 +898,7 @@ export async function runReplyAgent(params: {
}): Promise<ReplyPayload | ReplyPayload[] | undefined> {
const {
commandBody,
transcriptCommandBody,
followupRun,
queueKey,
resolvedQueue,
@@ -1198,6 +1200,7 @@ export async function runReplyAgent(params: {
const runStartedAt = Date.now();
const runOutcome = await runAgentTurnWithFallback({
commandBody,
transcriptCommandBody,
followupRun,
sessionCtx,
replyThreading: replyThreadingOverride ?? sessionCtx.ReplyThreading,

View File

@@ -310,6 +310,7 @@ export function createFollowupRunner(params: {
config: runtimeConfig,
skillsSnapshot: run.skillsSnapshot,
prompt: queued.prompt,
transcriptPrompt: queued.transcriptPrompt,
extraSystemPrompt: run.extraSystemPrompt,
ownerNumbers: run.ownerNumbers,
enforceFinalTag: run.enforceFinalTag,

View File

@@ -835,8 +835,10 @@ describe("runPreparedReply media-only handling", () => {
const call = vi.mocked(runReplyAgent).mock.calls.at(-1)?.[0];
expect(call?.commandBody).toContain("System: [t] Initial event.");
expect(call?.commandBody).not.toContain("System: [t] Post-compaction context.");
expect(call?.transcriptCommandBody).not.toContain("System: [t] Initial event.");
expect(call?.followupRun.prompt).toContain("System: [t] Initial event.");
expect(call?.followupRun.prompt).not.toContain("System: [t] Post-compaction context.");
expect(call?.followupRun.transcriptPrompt).not.toContain("System: [t] Initial event.");
});
it("uses inbound origin channel for run messageProvider", async () => {
await runPreparedReply(

View File

@@ -475,6 +475,7 @@ export async function runPreparedReply(
const effectiveBaseBody = hasUserBody
? baseBodyForPrompt
: [inboundUserContext, "[User sent media without caption]"].filter(Boolean).join("\n\n");
const transcriptBodyBase = hasUserBody ? baseBodyFinal : "[User sent media without caption]";
let prefixedBodyBase = await applySessionHints({
baseBody: effectiveBaseBody,
abortedLastRun,
@@ -510,6 +511,7 @@ export async function runPreparedReply(
const rebuildPromptBodies = async (): Promise<{
prefixedCommandBody: string;
queuedBody: string;
transcriptCommandBody: string;
}> => {
if (!useFastReplyRuntime) {
const eventsBlock = await drainFormattedSystemEvents({
@@ -530,6 +532,7 @@ export async function runPreparedReply(
sessionCtx,
effectiveBaseBody,
prefixedBody: prefixedBodyCore,
transcriptBody: transcriptBodyBase,
threadContextNote,
systemEventBlocks: drainedSystemEventBlocks,
});
@@ -558,7 +561,7 @@ export async function runPreparedReply(
sessionEntry = skillResult.sessionEntry ?? sessionEntry;
currentSystemSent = skillResult.systemSent;
const skillsSnapshot = skillResult.skillsSnapshot;
let { prefixedCommandBody, queuedBody } = await rebuildPromptBodies();
let { prefixedCommandBody, queuedBody, transcriptCommandBody } = await rebuildPromptBodies();
if (!resolvedThinkLevel) {
resolvedThinkLevel = await modelState.resolveDefaultThinkingLevel();
}
@@ -715,7 +718,7 @@ export async function runPreparedReply(
isNewSession,
});
preparedSessionState = resolvePreparedSessionState();
({ prefixedCommandBody, queuedBody } = await rebuildPromptBodies());
({ prefixedCommandBody, queuedBody, transcriptCommandBody } = await rebuildPromptBodies());
},
resolveBusyState: resolveQueueBusyState,
});
@@ -728,6 +731,7 @@ export async function runPreparedReply(
const authProfileIdSource = preparedSessionState.sessionEntry?.authProfileOverrideSource;
const followupRun = {
prompt: queuedBody,
transcriptPrompt: transcriptCommandBody,
messageId: sessionCtx.MessageSidFull ?? sessionCtx.MessageSid,
summaryLine: baseBodyTrimmedRaw,
enqueuedAt: Date.now(),
@@ -825,6 +829,7 @@ export async function runPreparedReply(
return runReplyAgent({
commandBody: prefixedCommandBody,
transcriptCommandBody,
followupRun,
queueKey,
resolvedQueue,

View File

@@ -10,6 +10,7 @@ export function buildReplyPromptBodies(params: {
sessionCtx: TemplateContext;
effectiveBaseBody: string;
prefixedBody: string;
transcriptBody?: string;
threadContextNote?: string;
systemEventBlocks?: string[];
}): {
@@ -17,6 +18,7 @@ export function buildReplyPromptBodies(params: {
mediaReplyHint?: string;
prefixedCommandBody: string;
queuedBody: string;
transcriptCommandBody: string;
} {
const combinedEventsBlock = (params.systemEventBlocks ?? []).filter(Boolean).join("\n");
const prependEvents = (body: string) =>
@@ -38,10 +40,15 @@ export function buildReplyPromptBodies(params: {
const prefixedCommandBody = mediaNote
? [mediaNote, mediaReplyHint, prefixedBody].filter(Boolean).join("\n").trim()
: prefixedBody;
const transcriptBody = params.transcriptBody ?? params.effectiveBaseBody;
const transcriptCommandBody = mediaNote
? [mediaNote, transcriptBody].filter(Boolean).join("\n").trim()
: transcriptBody;
return {
mediaNote,
mediaReplyHint,
prefixedCommandBody,
queuedBody,
transcriptCommandBody,
};
}

View File

@@ -22,6 +22,8 @@ export type QueueDedupeMode = "message-id" | "prompt" | "none";
export type FollowupRun = {
prompt: string;
/** User-visible prompt body persisted to transcript; excludes runtime-only prompt context. */
transcriptPrompt?: string;
/** Provider message ID, when available (for deduplication). */
messageId?: string;
summaryLine?: string;

View File

@@ -1,3 +1,4 @@
import { stripInternalRuntimeContext } from "../agents/internal-runtime-context.js";
import {
extractInboundSenderLabel,
stripInboundMetadata,
@@ -48,7 +49,8 @@ function stripEnvelopeFromContentWithRole(
if (entry.type !== "text" || typeof entry.text !== "string") {
return item;
}
const inboundStripped = stripInboundMetadata(entry.text);
const runtimeStripped = stripInternalRuntimeContext(entry.text);
const inboundStripped = stripInboundMetadata(runtimeStripped);
const stripped = stripUserEnvelope
? stripMessageIdHints(stripEnvelope(inboundStripped))
: inboundStripped;
@@ -81,7 +83,8 @@ export function stripEnvelopeFromMessage(message: unknown): unknown {
}
if (typeof entry.content === "string") {
const inboundStripped = stripInboundMetadata(entry.content);
const runtimeStripped = stripInternalRuntimeContext(entry.content);
const inboundStripped = stripInboundMetadata(runtimeStripped);
const stripped = stripUserEnvelope
? stripMessageIdHints(stripEnvelope(inboundStripped))
: inboundStripped;
@@ -96,7 +99,8 @@ export function stripEnvelopeFromMessage(message: unknown): unknown {
changed = true;
}
} else if (typeof entry.text === "string") {
const inboundStripped = stripInboundMetadata(entry.text);
const runtimeStripped = stripInternalRuntimeContext(entry.text);
const inboundStripped = stripInboundMetadata(runtimeStripped);
const stripped = stripUserEnvelope
? stripMessageIdHints(stripEnvelope(inboundStripped))
: inboundStripped;

View File

@@ -75,4 +75,36 @@ describe("SessionHistorySseState", () => {
expect(snapshot.history.messages[0]?.__openclaw?.seq).toBe(2);
expect(snapshot.rawTranscriptSeq).toBe(2);
});
test("strips legacy internal envelopes before exposing history", () => {
const snapshot = buildSessionHistorySnapshot({
rawMessages: [
{
role: "user",
content: [
{
type: "text",
text: [
"<<<BEGIN_OPENCLAW_INTERNAL_CONTEXT>>>",
"secret runtime context",
"<<<END_OPENCLAW_INTERNAL_CONTEXT>>>",
"",
"visible ask",
].join("\n"),
},
],
__openclaw: { seq: 1 },
},
],
});
expect(snapshot.history.messages).toHaveLength(1);
expect(
(
snapshot.history.messages[0] as {
content?: Array<{ text?: string }>;
}
).content?.[0]?.text,
).toBe("visible ask");
});
});

View File

@@ -1,3 +1,4 @@
import { stripEnvelopeFromMessages } from "./chat-sanitize.js";
import {
DEFAULT_CHAT_HISTORY_TEXT_MAX_CHARS,
sanitizeChatHistoryMessages,
@@ -102,7 +103,7 @@ export function buildSessionHistorySnapshot(params: {
const history = paginateSessionMessages(
toSessionHistoryMessages(
sanitizeChatHistoryMessages(
params.rawMessages,
stripEnvelopeFromMessages(params.rawMessages),
params.maxChars ?? DEFAULT_CHAT_HISTORY_TEXT_MAX_CHARS,
),
),
@@ -178,7 +179,10 @@ export class SessionHistorySseState {
...(typeof update.messageId === "string" ? { id: update.messageId } : {}),
seq: this.rawTranscriptSeq,
});
const sanitized = sanitizeChatHistoryMessages([nextMessage], this.maxChars);
const sanitized = sanitizeChatHistoryMessages(
stripEnvelopeFromMessages([nextMessage]),
this.maxChars,
);
if (sanitized.length === 0) {
return null;
}