fix(webchat): suppress NO_REPLY token in chat transcript rendering (#32183)

* fix(types): resolve pre-existing TS errors in agent-components and pairing-store

- agent-components.ts: normalizeDiscordAllowList returns {allowAll, ids, names},
  not an array — use ids.values().next().value instead of [0] indexing
- pairing-store.ts: add non-null assertions for stat after cache-miss guard
  (resolveAllowFromReadCacheOrMissing returns early when stat is null)

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(webchat): suppress NO_REPLY token in chat transcript rendering

Filter assistant NO_REPLY-only entries from chat.history responses at
the gateway API boundary and add client-side defense-in-depth guards in
the UI chat controller so internal silent tokens never render as visible
chat bubbles.

Two-layer fix:
1. Gateway: extractAssistantTextForSilentCheck + isSilentReplyText
   filter in sanitizeChatHistoryMessages (entry.text takes precedence
   over entry.content to avoid dropping messages with real text)
2. UI: isAssistantSilentReply + isSilentReplyStream guards on all 5
   message insertion points in handleChatEvent and loadChatHistory

Fixes #32015

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix(webchat): align isAssistantSilentReply text/content precedence with gateway

* webchat: tighten NO_REPLY transcript and delta filtering

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
Co-authored-by: Tak Hoffman <781889+Takhoffman@users.noreply.github.com>
This commit is contained in:
ademczuk
2026-03-02 23:39:08 +01:00
committed by GitHub
parent 48155729fc
commit 0743463b88
7 changed files with 492 additions and 19 deletions

View File

@@ -50,6 +50,8 @@ Docs: https://docs.openclaw.ai
- Config/raw redaction safety: preserve non-sensitive literals during raw redaction round-trips, scope SecretRef redaction to secret IDs (not structural fields like `source`/`provider`), and fall back to structured raw redaction when text replacement cannot restore the original config shape. (#32174) Thanks @bmendonca3.
- Models/Codex usage labels: infer weekly secondary usage windows from reset cadence when API window seconds are ambiguously reported as 24h, so `openclaw models status` no longer mislabels weekly limits as daily. (#31938) Thanks @bmendonca3.
- Config/backups hardening: enforce owner-only (`0600`) permissions on rotated config backups and clean orphan `.bak.*` files outside the managed backup ring, reducing credential leakage risk from stale or permissive backup artifacts. (#31718) Thanks @YUJIE2002.
- WhatsApp/inbound self-message context: propagate inbound `fromMe` through the web inbox pipeline and annotate direct self messages as `(self)` in envelopes so agents can distinguish owner-authored turns from contact turns. (#32167) Thanks @scoootscooob.
- Webchat/silent token leak: filter assistant `NO_REPLY`-only transcript entries from `chat.history` responses and add client-side defense-in-depth guards in the chat controller so internal silent tokens never render as visible chat bubbles. (#32015) Consolidates overlap from #32183, #32082, #32045, #32052, #32172, and #32112. Thanks @ademczuk, @liuxiaopai-ai, @ningding97, @bmendonca3, and @x4v13r1120.
- Exec approvals/allowlist matching: escape regex metacharacters in path-pattern literals (while preserving glob wildcards), preventing crashes on allowlisted executables like `/usr/bin/g++` and correctly matching mixed wildcard/literal token paths. (#32162) Thanks @stakeswky.
- Agents/tool-result guard: always clear pending tool-call state on interruptions even when synthetic tool results are disabled, preventing orphaned tool-use transcripts that cause follow-up provider request failures. (#32120) Thanks @jnMetaCode.
- Hooks/after_tool_call: include embedded session context (`sessionKey`, `agentId`) and fire the hook exactly once per tool execution by removing duplicate adapter-path dispatch in embedded runs. (#32201) Thanks @jbeno, @scoootscooob, @vincentkoc.

View File

@@ -871,7 +871,7 @@ async function dispatchDiscordComponentEvent(params: {
normalizeEntry: (entry) => {
const normalized = normalizeDiscordAllowList([entry], ["discord:", "user:", "pk:"]);
const candidate = normalized?.ids.values().next().value;
return candidate && /^\d+$/.test(candidate) ? candidate : undefined;
return typeof candidate === "string" && /^\d+$/.test(candidate) ? candidate : undefined;
},
})
: null;

View File

@@ -7,6 +7,7 @@ import { resolveAgentTimeoutMs } from "../../agents/timeout.js";
import { dispatchInboundMessage } from "../../auto-reply/dispatch.js";
import { createReplyDispatcher } from "../../auto-reply/reply/reply-dispatcher.js";
import type { MsgContext } from "../../auto-reply/templating.js";
import { isSilentReplyText, SILENT_REPLY_TOKEN } from "../../auto-reply/tokens.js";
import { createReplyPrefixOptions } from "../../channels/reply-prefix.js";
import { resolveSessionFilePath } from "../../config/sessions.js";
import { jsonUtf8Bytes } from "../../infra/json-utf8-bytes.js";
@@ -186,16 +187,61 @@ function sanitizeChatHistoryMessage(message: unknown): { message: unknown; chang
return { message: changed ? entry : message, changed };
}
/**
* Extract the visible text from an assistant history message for silent-token checks.
* Returns `undefined` for non-assistant messages or messages with no extractable text.
* When `entry.text` is present it takes precedence over `entry.content` to avoid
* dropping messages that carry real text alongside a stale `content: "NO_REPLY"`.
*/
function extractAssistantTextForSilentCheck(message: unknown): string | undefined {
if (!message || typeof message !== "object") {
return undefined;
}
const entry = message as Record<string, unknown>;
if (entry.role !== "assistant") {
return undefined;
}
if (typeof entry.text === "string") {
return entry.text;
}
if (typeof entry.content === "string") {
return entry.content;
}
if (!Array.isArray(entry.content) || entry.content.length === 0) {
return undefined;
}
const texts: string[] = [];
for (const block of entry.content) {
if (!block || typeof block !== "object") {
return undefined;
}
const typed = block as { type?: unknown; text?: unknown };
if (typed.type !== "text" || typeof typed.text !== "string") {
return undefined;
}
texts.push(typed.text);
}
return texts.length > 0 ? texts.join("\n") : undefined;
}
function sanitizeChatHistoryMessages(messages: unknown[]): unknown[] {
if (messages.length === 0) {
return messages;
}
let changed = false;
const next = messages.map((message) => {
const next: unknown[] = [];
for (const message of messages) {
const res = sanitizeChatHistoryMessage(message);
changed ||= res.changed;
return res.message;
});
// Drop assistant messages whose entire visible text is the silent reply token.
const text = extractAssistantTextForSilentCheck(res.message);
if (text !== undefined && isSilentReplyText(text, SILENT_REPLY_TOKEN)) {
changed = true;
continue;
}
next.push(res.message);
}
return changed ? next : messages;
}

View File

@@ -304,6 +304,77 @@ describe("gateway server chat", () => {
}
});
test("chat.history hides assistant NO_REPLY-only entries", async () => {
const dir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-gw-"));
try {
testState.sessionStorePath = path.join(dir, "sessions.json");
await writeSessionStore({
entries: {
main: {
sessionId: "sess-main",
updatedAt: Date.now(),
},
},
});
const messages = [
{
role: "user",
content: [{ type: "text", text: "hello" }],
timestamp: 1,
},
{
role: "assistant",
content: [{ type: "text", text: "NO_REPLY" }],
timestamp: 2,
},
{
role: "assistant",
content: [{ type: "text", text: "real reply" }],
timestamp: 3,
},
{
role: "assistant",
text: "real text field reply",
content: "NO_REPLY",
timestamp: 4,
},
{
role: "user",
content: [{ type: "text", text: "NO_REPLY" }],
timestamp: 5,
},
];
const lines = messages.map((message) => JSON.stringify({ message }));
await fs.writeFile(path.join(dir, "sess-main.jsonl"), lines.join("\n"), "utf-8");
const res = await rpcReq<{ messages?: unknown[] }>(ws, "chat.history", {
sessionKey: "main",
});
expect(res.ok).toBe(true);
const historyMessages = res.payload?.messages ?? [];
const textValues = historyMessages
.map((message) => {
if (message && typeof message === "object") {
const entry = message as { text?: unknown };
if (typeof entry.text === "string") {
return entry.text;
}
}
return extractFirstTextBlock(message);
})
.filter((value): value is string => typeof value === "string");
// The NO_REPLY assistant message (content block) should be dropped.
// The assistant with text="real text field reply" + content="NO_REPLY" stays
// because entry.text takes precedence over entry.content for the silent check.
// The user message with NO_REPLY text is preserved (only assistant filtered).
expect(textValues).toEqual(["hello", "real reply", "real text field reply", "NO_REPLY"]);
} finally {
testState.sessionStorePath = undefined;
await fs.rm(dir, { recursive: true, force: true });
}
});
test("routes chat.send slash commands without agent runs", async () => {
const dir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-gw-"));
try {
@@ -342,6 +413,94 @@ describe("gateway server chat", () => {
}
});
test("chat.history hides assistant NO_REPLY-only entries and keeps mixed-content assistant entries", async () => {
const dir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-gw-"));
try {
testState.sessionStorePath = path.join(dir, "sessions.json");
await writeSessionStore({
entries: {
main: {
sessionId: "sess-main",
updatedAt: Date.now(),
},
},
});
const messages = [
{
role: "user",
content: [{ type: "text", text: "hello" }],
timestamp: 1,
},
{
role: "assistant",
content: [{ type: "text", text: "NO_REPLY" }],
timestamp: 2,
},
{
role: "assistant",
content: [{ type: "text", text: "real reply" }],
timestamp: 3,
},
{
role: "assistant",
text: "real text field reply",
content: "NO_REPLY",
timestamp: 4,
},
{
role: "user",
content: [{ type: "text", text: "NO_REPLY" }],
timestamp: 5,
},
{
role: "assistant",
content: [
{ type: "text", text: "NO_REPLY" },
{ type: "image", source: { type: "base64", media_type: "image/png", data: "abc" } },
],
timestamp: 6,
},
];
const lines = messages.map((message) => JSON.stringify({ message }));
await fs.writeFile(path.join(dir, "sess-main.jsonl"), lines.join("\n"), "utf-8");
const res = await rpcReq<{ messages?: unknown[] }>(ws, "chat.history", {
sessionKey: "main",
});
expect(res.ok).toBe(true);
const historyMessages = res.payload?.messages ?? [];
const roleAndText = historyMessages
.map((message) => {
const role =
message &&
typeof message === "object" &&
typeof (message as { role?: unknown }).role === "string"
? (message as { role: string }).role
: "unknown";
const text =
message &&
typeof message === "object" &&
typeof (message as { text?: unknown }).text === "string"
? (message as { text: string }).text
: (extractFirstTextBlock(message) ?? "");
return `${role}:${text}`;
})
.filter((entry) => entry !== "unknown:");
expect(roleAndText).toEqual([
"user:hello",
"assistant:real reply",
"assistant:real text field reply",
"user:NO_REPLY",
"assistant:NO_REPLY",
]);
} finally {
testState.sessionStorePath = undefined;
await fs.rm(dir, { recursive: true, force: true });
}
});
test("agent events include sessionKey and agent.wait covers lifecycle flows", async () => {
const dir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-gw-"));
testState.sessionStorePath = path.join(dir, "sessions.json");

View File

@@ -374,10 +374,11 @@ async function readAllowFromStateForPathWithExists(
allowFrom: [],
});
const entries = normalizeAllowFromList(channel, value);
// stat is guaranteed non-null here: resolveAllowFromReadCacheOrMissing returns early when stat is null.
setAllowFromReadCache(filePath, {
exists,
mtimeMs: stat.mtimeMs,
size: stat.size,
mtimeMs: stat!.mtimeMs,
size: stat!.size,
entries,
});
return { entries, exists };
@@ -419,13 +420,14 @@ function readAllowFromStateForPathSyncWithExists(
}
return { entries: [], exists: false };
}
// stat is guaranteed non-null here: resolveAllowFromReadCacheOrMissing returns early when stat is null.
try {
const parsed = JSON.parse(raw) as AllowFromStore;
const entries = normalizeAllowFromList(channel, parsed);
setAllowFromReadCache(filePath, {
exists: true,
mtimeMs: stat.mtimeMs,
size: stat.size,
mtimeMs: stat!.mtimeMs,
size: stat!.size,
entries,
});
return { entries, exists: true };
@@ -433,8 +435,8 @@ function readAllowFromStateForPathSyncWithExists(
// Keep parity with async reads: malformed JSON still means the file exists.
setAllowFromReadCache(filePath, {
exists: true,
mtimeMs: stat.mtimeMs,
size: stat.size,
mtimeMs: stat!.mtimeMs,
size: stat!.size,
entries: [],
});
return { entries: [], exists: true };

View File

@@ -1,5 +1,5 @@
import { describe, expect, it } from "vitest";
import { handleChatEvent, type ChatEventPayload, type ChatState } from "./chat.ts";
import { describe, expect, it, vi } from "vitest";
import { handleChatEvent, loadChatHistory, type ChatEventPayload, type ChatState } from "./chat.ts";
function createState(overrides: Partial<ChatState> = {}): ChatState {
return {
@@ -53,6 +53,23 @@ describe("handleChatEvent", () => {
expect(state.chatStream).toBe("Hello");
});
it("ignores NO_REPLY delta updates", () => {
const state = createState({
sessionKey: "main",
chatRunId: "run-1",
chatStream: "Hello",
});
const payload: ChatEventPayload = {
runId: "run-1",
sessionKey: "main",
state: "delta",
message: { role: "assistant", content: [{ type: "text", text: "NO_REPLY" }] },
};
expect(handleChatEvent(state, payload)).toBe("delta");
expect(state.chatStream).toBe("Hello");
});
it("appends final payload from another run without clearing active stream", () => {
const state = createState({
sessionKey: "main",
@@ -77,6 +94,30 @@ describe("handleChatEvent", () => {
expect(state.chatMessages[0]).toEqual(payload.message);
});
it("drops NO_REPLY final payload from another run without clearing active stream", () => {
const state = createState({
sessionKey: "main",
chatRunId: "run-user",
chatStream: "Working...",
chatStreamStartedAt: 123,
});
const payload: ChatEventPayload = {
runId: "run-announce",
sessionKey: "main",
state: "final",
message: {
role: "assistant",
content: [{ type: "text", text: "NO_REPLY" }],
},
};
expect(handleChatEvent(state, payload)).toBe("final");
expect(state.chatRunId).toBe("run-user");
expect(state.chatStream).toBe("Working...");
expect(state.chatStreamStartedAt).toBe(123);
expect(state.chatMessages).toEqual([]);
});
it("returns final for another run when payload has no message", () => {
const state = createState({
sessionKey: "main",
@@ -325,4 +366,203 @@ describe("handleChatEvent", () => {
expect(state.chatStreamStartedAt).toBe(null);
expect(state.chatMessages).toEqual([existingMessage]);
});
it("drops NO_REPLY final payload from another run", () => {
const state = createState({
sessionKey: "main",
chatRunId: "run-user",
chatStream: "Working...",
chatStreamStartedAt: 123,
});
const payload: ChatEventPayload = {
runId: "run-announce",
sessionKey: "main",
state: "final",
message: {
role: "assistant",
content: [{ type: "text", text: "NO_REPLY" }],
},
};
expect(handleChatEvent(state, payload)).toBe("final");
expect(state.chatMessages).toEqual([]);
expect(state.chatRunId).toBe("run-user");
expect(state.chatStream).toBe("Working...");
});
it("drops NO_REPLY final payload from own run", () => {
const state = createState({
sessionKey: "main",
chatRunId: "run-1",
chatStream: "NO_REPLY",
chatStreamStartedAt: 100,
});
const payload: ChatEventPayload = {
runId: "run-1",
sessionKey: "main",
state: "final",
message: {
role: "assistant",
content: [{ type: "text", text: "NO_REPLY" }],
},
};
expect(handleChatEvent(state, payload)).toBe("final");
expect(state.chatMessages).toEqual([]);
expect(state.chatRunId).toBe(null);
expect(state.chatStream).toBe(null);
});
it("does not persist NO_REPLY stream text on final without message", () => {
const state = createState({
sessionKey: "main",
chatRunId: "run-1",
chatStream: "NO_REPLY",
chatStreamStartedAt: 100,
});
const payload: ChatEventPayload = {
runId: "run-1",
sessionKey: "main",
state: "final",
};
expect(handleChatEvent(state, payload)).toBe("final");
expect(state.chatMessages).toEqual([]);
});
it("does not persist NO_REPLY stream text on abort", () => {
const state = createState({
sessionKey: "main",
chatRunId: "run-1",
chatStream: "NO_REPLY",
chatStreamStartedAt: 100,
});
const payload = {
runId: "run-1",
sessionKey: "main",
state: "aborted",
message: "not-an-assistant-message",
} as unknown as ChatEventPayload;
expect(handleChatEvent(state, payload)).toBe("aborted");
expect(state.chatMessages).toEqual([]);
});
it("keeps user messages containing NO_REPLY text", () => {
const state = createState({
sessionKey: "main",
chatRunId: "run-user",
chatStream: "Working...",
chatStreamStartedAt: 123,
});
const payload: ChatEventPayload = {
runId: "run-announce",
sessionKey: "main",
state: "final",
message: {
role: "user",
content: [{ type: "text", text: "NO_REPLY" }],
},
};
// User messages with NO_REPLY text should NOT be filtered — only assistant messages.
// normalizeFinalAssistantMessage returns null for user role, so this falls through.
expect(handleChatEvent(state, payload)).toBe("final");
});
it("keeps assistant message when text field has real reply but content is NO_REPLY", () => {
const state = createState({
sessionKey: "main",
chatRunId: "run-1",
chatStream: "",
chatStreamStartedAt: 100,
});
const payload: ChatEventPayload = {
runId: "run-1",
sessionKey: "main",
state: "final",
message: {
role: "assistant",
text: "real reply",
content: "NO_REPLY",
},
};
// entry.text takes precedence — "real reply" is NOT silent, so the message is kept.
expect(handleChatEvent(state, payload)).toBe("final");
expect(state.chatMessages).toHaveLength(1);
});
});
describe("loadChatHistory", () => {
it("filters NO_REPLY assistant messages from history", async () => {
const messages = [
{ role: "user", content: [{ type: "text", text: "Hello" }] },
{ role: "assistant", content: [{ type: "text", text: "NO_REPLY" }] },
{ role: "assistant", content: [{ type: "text", text: "Real answer" }] },
{ role: "assistant", text: " NO_REPLY " },
];
const mockClient = {
request: vi.fn().mockResolvedValue({ messages, thinkingLevel: "low" }),
};
const state = createState({
client: mockClient as unknown as ChatState["client"],
connected: true,
});
await loadChatHistory(state);
expect(state.chatMessages).toHaveLength(2);
expect(state.chatMessages[0]).toEqual(messages[0]);
expect(state.chatMessages[1]).toEqual(messages[2]);
expect(state.chatThinkingLevel).toBe("low");
expect(state.chatLoading).toBe(false);
});
it("keeps assistant message when text field has real content but content is NO_REPLY", async () => {
const messages = [{ role: "assistant", text: "real reply", content: "NO_REPLY" }];
const mockClient = {
request: vi.fn().mockResolvedValue({ messages }),
};
const state = createState({
client: mockClient as unknown as ChatState["client"],
connected: true,
});
await loadChatHistory(state);
// text takes precedence — "real reply" is NOT silent, so message is kept.
expect(state.chatMessages).toHaveLength(1);
});
});
describe("loadChatHistory", () => {
it("filters assistant NO_REPLY messages and keeps user NO_REPLY messages", async () => {
const request = vi.fn().mockResolvedValue({
messages: [
{ role: "assistant", content: [{ type: "text", text: "NO_REPLY" }] },
{ role: "assistant", content: [{ type: "text", text: "visible answer" }] },
{ role: "user", content: [{ type: "text", text: "NO_REPLY" }] },
],
thinkingLevel: "low",
});
const state = createState({
connected: true,
client: { request } as unknown as ChatState["client"],
});
await loadChatHistory(state);
expect(request).toHaveBeenCalledWith("chat.history", {
sessionKey: "main",
limit: 200,
});
expect(state.chatMessages).toEqual([
{ role: "assistant", content: [{ type: "text", text: "visible answer" }] },
{ role: "user", content: [{ type: "text", text: "NO_REPLY" }] },
]);
expect(state.chatThinkingLevel).toBe("low");
expect(state.chatLoading).toBe(false);
expect(state.lastError).toBeNull();
});
});

View File

@@ -3,6 +3,29 @@ import type { GatewayBrowserClient } from "../gateway.ts";
import type { ChatAttachment } from "../ui-types.ts";
import { generateUUID } from "../uuid.ts";
const SILENT_REPLY_PATTERN = /^\s*NO_REPLY\s*$/;
function isSilentReplyStream(text: string): boolean {
return SILENT_REPLY_PATTERN.test(text);
}
/** Client-side defense-in-depth: detect assistant messages whose text is purely NO_REPLY. */
function isAssistantSilentReply(message: unknown): boolean {
if (!message || typeof message !== "object") {
return false;
}
const entry = message as Record<string, unknown>;
const role = typeof entry.role === "string" ? entry.role.toLowerCase() : "";
if (role !== "assistant") {
return false;
}
// entry.text takes precedence — matches gateway extractAssistantTextForSilentCheck
if (typeof entry.text === "string") {
return isSilentReplyStream(entry.text);
}
const text = extractText(message);
return typeof text === "string" && isSilentReplyStream(text);
}
export type ChatState = {
client: GatewayBrowserClient | null;
connected: boolean;
@@ -41,7 +64,8 @@ export async function loadChatHistory(state: ChatState) {
limit: 200,
},
);
state.chatMessages = Array.isArray(res.messages) ? res.messages : [];
const messages = Array.isArray(res.messages) ? res.messages : [];
state.chatMessages = messages.filter((message) => !isAssistantSilentReply(message));
state.chatThinkingLevel = res.thinkingLevel ?? null;
} catch (err) {
state.lastError = String(err);
@@ -230,7 +254,7 @@ export function handleChatEvent(state: ChatState, payload?: ChatEventPayload) {
if (payload.runId && state.chatRunId && payload.runId !== state.chatRunId) {
if (payload.state === "final") {
const finalMessage = normalizeFinalAssistantMessage(payload.message);
if (finalMessage) {
if (finalMessage && !isAssistantSilentReply(finalMessage)) {
state.chatMessages = [...state.chatMessages, finalMessage];
return null;
}
@@ -241,7 +265,7 @@ export function handleChatEvent(state: ChatState, payload?: ChatEventPayload) {
if (payload.state === "delta") {
const next = extractText(payload.message);
if (typeof next === "string") {
if (typeof next === "string" && !isSilentReplyStream(next)) {
const current = state.chatStream ?? "";
if (!current || next.length >= current.length) {
state.chatStream = next;
@@ -249,9 +273,9 @@ export function handleChatEvent(state: ChatState, payload?: ChatEventPayload) {
}
} else if (payload.state === "final") {
const finalMessage = normalizeFinalAssistantMessage(payload.message);
if (finalMessage) {
if (finalMessage && !isAssistantSilentReply(finalMessage)) {
state.chatMessages = [...state.chatMessages, finalMessage];
} else if (state.chatStream?.trim()) {
} else if (state.chatStream?.trim() && !isSilentReplyStream(state.chatStream)) {
state.chatMessages = [
...state.chatMessages,
{
@@ -266,11 +290,11 @@ export function handleChatEvent(state: ChatState, payload?: ChatEventPayload) {
state.chatStreamStartedAt = null;
} else if (payload.state === "aborted") {
const normalizedMessage = normalizeAbortedAssistantMessage(payload.message);
if (normalizedMessage) {
if (normalizedMessage && !isAssistantSilentReply(normalizedMessage)) {
state.chatMessages = [...state.chatMessages, normalizedMessage];
} else {
const streamedText = state.chatStream ?? "";
if (streamedText.trim()) {
if (streamedText.trim() && !isSilentReplyStream(streamedText)) {
state.chatMessages = [
...state.chatMessages,
{