fix(agents): guard context pruning against malformed thinking blocks (#35146)

Merged via squash.

Prepared head SHA: a196a565b1
Co-authored-by: Sid-Qin <201593046+Sid-Qin@users.noreply.github.com>
Co-authored-by: shakkernerd <165377636+shakkernerd@users.noreply.github.com>
Reviewed-by: @shakkernerd
This commit is contained in:
Sid
2026-03-05 13:52:24 +08:00
committed by GitHub
parent 8891e1e48d
commit 463fd4735e
3 changed files with 118 additions and 2 deletions

View File

@@ -18,6 +18,7 @@ Docs: https://docs.openclaw.ai
### Fixes
- Agents/context pruning: guard assistant thinking/text char estimation against malformed blocks (missing `thinking`/`text` strings or null entries) so pruning no longer crashes with malformed provider content. (openclaw#35146) thanks @Sid-Qin.
- Agents/schema cleaning: detect Venice + Grok model IDs as xAI-proxied targets so unsupported JSON Schema keywords are stripped before requests, preventing Venice/Grok `Invalid arguments` failures. (openclaw#35355) thanks @Sid-Qin.
- Skills/native command deduplication: centralize skill command dedupe by canonical `skillName` in `listSkillCommandsForAgents` so duplicate suffixed variants (for example `_2`) are no longer surfaced across interfaces outside Discord. (#27521) thanks @shivama205.
- Agents/xAI tool-call argument decoding: decode HTML-entity encoded xAI/Grok tool-call argument values (`&amp;`, `&quot;`, `&lt;`, `&gt;`, numeric entities) before tool execution so commands with shell operators and quotes no longer fail with parse errors. (#35276) Thanks @Sid-Qin.

View File

@@ -0,0 +1,112 @@
import type { AgentMessage } from "@mariozechner/pi-agent-core";
import type { ExtensionContext } from "@mariozechner/pi-coding-agent";
import { describe, expect, it } from "vitest";
import { pruneContextMessages } from "./pruner.js";
import { DEFAULT_CONTEXT_PRUNING_SETTINGS } from "./settings.js";
type AssistantMessage = Extract<AgentMessage, { role: "assistant" }>;
type AssistantContentBlock = AssistantMessage["content"][number];
const CONTEXT_WINDOW_1M = {
model: { contextWindow: 1_000_000 },
} as unknown as ExtensionContext;
function makeUser(text: string): AgentMessage {
return {
role: "user",
content: text,
timestamp: Date.now(),
};
}
function makeAssistant(content: AssistantMessage["content"]): AgentMessage {
return {
role: "assistant",
content,
api: "openai-responses",
provider: "openai",
model: "test-model",
usage: {
input: 1,
output: 1,
cacheRead: 0,
cacheWrite: 0,
totalTokens: 2,
cost: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
total: 0,
},
},
stopReason: "stop",
timestamp: Date.now(),
};
}
describe("pruneContextMessages", () => {
it("does not crash on assistant message with malformed thinking block (missing thinking string)", () => {
const messages: AgentMessage[] = [
makeUser("hello"),
makeAssistant([
{ type: "thinking" } as unknown as AssistantContentBlock,
{ type: "text", text: "ok" },
]),
];
expect(() =>
pruneContextMessages({
messages,
settings: DEFAULT_CONTEXT_PRUNING_SETTINGS,
ctx: CONTEXT_WINDOW_1M,
}),
).not.toThrow();
});
it("does not crash on assistant message with null content entries", () => {
const messages: AgentMessage[] = [
makeUser("hello"),
makeAssistant([null as unknown as AssistantContentBlock, { type: "text", text: "world" }]),
];
expect(() =>
pruneContextMessages({
messages,
settings: DEFAULT_CONTEXT_PRUNING_SETTINGS,
ctx: CONTEXT_WINDOW_1M,
}),
).not.toThrow();
});
it("does not crash on assistant message with malformed text block (missing text string)", () => {
const messages: AgentMessage[] = [
makeUser("hello"),
makeAssistant([
{ type: "text" } as unknown as AssistantContentBlock,
{ type: "thinking", thinking: "still fine" },
]),
];
expect(() =>
pruneContextMessages({
messages,
settings: DEFAULT_CONTEXT_PRUNING_SETTINGS,
ctx: CONTEXT_WINDOW_1M,
}),
).not.toThrow();
});
it("handles well-formed thinking blocks correctly", () => {
const messages: AgentMessage[] = [
makeUser("hello"),
makeAssistant([
{ type: "thinking", thinking: "let me think" },
{ type: "text", text: "here is the answer" },
]),
];
const result = pruneContextMessages({
messages,
settings: DEFAULT_CONTEXT_PRUNING_SETTINGS,
ctx: CONTEXT_WINDOW_1M,
});
expect(result).toHaveLength(2);
});
});

View File

@@ -121,10 +121,13 @@ function estimateMessageChars(message: AgentMessage): number {
if (message.role === "assistant") {
let chars = 0;
for (const b of message.content) {
if (b.type === "text") {
if (!b || typeof b !== "object") {
continue;
}
if (b.type === "text" && typeof b.text === "string") {
chars += b.text.length;
}
if (b.type === "thinking") {
if (b.type === "thinking" && typeof b.thinking === "string") {
chars += b.thinking.length;
}
if (b.type === "toolCall") {