fix: keep runtime prompt context out of system prompt (#77521)

This commit is contained in:
Josh Lehman
2026-05-04 16:54:16 -07:00
committed by GitHub
parent cb9824d6b4
commit 6dae3c273d
5 changed files with 94 additions and 33 deletions

View File

@@ -61,6 +61,7 @@ Docs: https://docs.openclaw.ai
- Codex plugin: mirror the experimental upstream app-server protocol and format generated TypeScript before drift checks, keeping OpenClaw's `experimentalApi` bridge compatible with latest Codex while preserving formatter gates.
- Telegram/media: derive no-caption inbound media placeholders from saved MIME metadata instead of the Telegram `photo` shape, so non-image and mixed attachments no longer reach the model as `<media:image>`. Fixes #69793. Thanks @aspalagin.
- Agents/cache: keep per-turn runtime context out of ordinary chat system prompts while still delivering hidden current-turn context, restoring prompt-cache reuse on chat continuations. Fixes #77431. Thanks @Udjin79.
- Gateway/startup: include resolved thinking and fast-mode defaults in the `agent model` startup log line, defaulting unset startup thinking to `medium` without mixing in reasoning visibility.
- Gateway/watch: suppress sync-I/O trace output during `pnpm gateway:watch --benchmark` unless explicitly requested, so CPU profiling no longer floods the terminal with stack traces.
- Gateway/watch: when benchmark sync-I/O tracing is explicitly enabled, tee trace blocks to the benchmark output log and filter them from the terminal pane while keeping normal Gateway logs visible.

View File

@@ -237,3 +237,23 @@ export function stripRuntimeContextCustomMessages<T>(messages: T[]): T[] {
}
return messages.filter((message) => !isOpenClawRuntimeContextCustomMessage(message));
}
function isUserMessage(message: unknown): boolean {
return Boolean(
message && typeof message === "object" && (message as { role?: unknown }).role === "user",
);
}
/** Removes stale runtime-context custom messages while preserving current-turn context. */
export function stripHistoricalRuntimeContextCustomMessages<T>(messages: T[]): T[] {
if (!messages.some(isOpenClawRuntimeContextCustomMessage)) {
return messages;
}
const lastUserIndex = messages.findLastIndex(isUserMessage);
if (lastUserIndex === -1) {
return messages.filter((message) => !isOpenClawRuntimeContextCustomMessage(message));
}
return messages.filter(
(message, index) => !isOpenClawRuntimeContextCustomMessage(message) || index > lastUserIndex,
);
}

View File

@@ -157,6 +157,7 @@ describe("runEmbeddedAttempt context engine sessionKey forwarding", () => {
sessionPrompt: async (session, prompt) => {
seen.prompt = prompt;
seen.messages = [...session.messages];
seen.systemPrompt = session.agent.state.systemPrompt;
session.messages = [
...session.messages,
{ role: "assistant", content: "done", timestamp: 2 },
@@ -181,6 +182,8 @@ describe("runEmbeddedAttempt context engine sessionKey forwarding", () => {
"OpenClaw runtime context for the immediately preceding user message.",
);
expect(JSON.stringify(seen.messages)).not.toContain("not user-authored");
expect(seen.systemPrompt).not.toContain("secret runtime context");
expect(seen.systemPrompt).not.toContain("OPENCLAW_INTERNAL_CONTEXT");
const trajectoryEvents = (
await fs.readFile(path.join(tempPaths[0] ?? "", "session.trajectory.jsonl"), "utf8")
)
@@ -207,6 +210,49 @@ describe("runEmbeddedAttempt context engine sessionKey forwarding", () => {
}
});
it("keeps before_prompt_build prependContext out of system prompt on transcriptPrompt runs", async () => {
const runBeforePromptBuild = vi.fn(async () => ({ prependContext: "dynamic hook context" }));
hoisted.getGlobalHookRunnerMock.mockReturnValue({
hasHooks: vi.fn((name: string) => name === "before_prompt_build"),
runBeforePromptBuild,
runBeforeAgentStart: vi.fn(),
});
const seen: { prompt?: string; messages?: unknown[]; systemPrompt?: string } = {};
const result = await createContextEngineAttemptRunner({
contextEngine: createContextEngineBootstrapAndAssemble(),
sessionKey,
tempPaths,
attemptOverrides: {
prompt: "visible ask",
transcriptPrompt: "visible ask",
},
sessionPrompt: async (session, prompt) => {
seen.prompt = prompt;
seen.messages = [...session.messages];
seen.systemPrompt = session.agent.state.systemPrompt;
session.messages = [
...session.messages,
{ role: "assistant", content: "done", timestamp: 2 },
];
},
});
expect(seen.prompt).toBe("visible ask");
expect(result.finalPromptText).toBe("visible ask");
expect(seen.systemPrompt).not.toContain("dynamic hook context");
expect(seen.messages).toEqual(
expect.arrayContaining([
expect.objectContaining({
role: "custom",
customType: "openclaw.runtime-context",
display: false,
content: "dynamic hook context",
}),
]),
);
});
it("keeps bootstrap truncation warnings out of WebChat runtime context", async () => {
const seen: { prompt?: string; messages?: unknown[] } = {};
hoisted.resolveBootstrapContextForRunMock.mockResolvedValueOnce({

View File

@@ -134,8 +134,15 @@ describe("normalizeMessagesForLlmBoundary", () => {
expect(input[0]).toHaveProperty("details");
});
it("keeps runtime-context transcript entries out of the LLM boundary", () => {
it("keeps historical runtime-context transcript entries out of the LLM boundary", () => {
const input = [
{
role: "custom",
customType: "openclaw.runtime-context",
content: "old secret runtime context",
display: false,
timestamp: 0,
},
{
role: "user",
content: [{ type: "text", text: "visible ask" }],
@@ -161,9 +168,12 @@ describe("normalizeMessagesForLlmBoundary", () => {
input as Parameters<typeof normalizeMessagesForLlmBoundary>[0],
) as Array<Record<string, unknown>>;
expect(output).toHaveLength(2);
expect(output).toHaveLength(3);
expect(output).not.toEqual(
expect.arrayContaining([expect.objectContaining({ customType: "openclaw.runtime-context" })]),
expect.arrayContaining([expect.objectContaining({ content: "old secret runtime context" })]),
);
expect(output).toEqual(
expect.arrayContaining([expect.objectContaining({ content: "secret runtime context" })]),
);
expect(output).toEqual(
expect.arrayContaining([expect.objectContaining({ customType: "other-extension-context" })]),

View File

@@ -80,7 +80,7 @@ import { resolveOpenClawReferencePaths } from "../../docs-path.js";
import { isTimeoutError } from "../../failover-error.js";
import { resolveHeartbeatPromptForSystemPrompt } from "../../heartbeat-system-prompt.js";
import { resolveImageSanitizationLimits } from "../../image-sanitization.js";
import { stripRuntimeContextCustomMessages } from "../../internal-runtime-context.js";
import { stripHistoricalRuntimeContextCustomMessages } from "../../internal-runtime-context.js";
import { buildModelAliasLines } from "../../model-alias-lines.js";
import { resolveModelAuthMode } from "../../model-auth.js";
import { resolveDefaultModelForAgent } from "../../model-selection.js";
@@ -330,7 +330,6 @@ import {
} from "./preemptive-compaction.js";
import {
buildCurrentTurnPromptContextSuffix,
buildRuntimeContextSystemContext,
queueRuntimeContextForNextTurn,
resolveRuntimeContextPromptParts,
} from "./runtime-context-prompt.js";
@@ -545,7 +544,7 @@ export function shouldBuildCoreCodingToolsForAllowlist(toolsAllow?: string[]): b
export function normalizeMessagesForLlmBoundary(messages: AgentMessage[]): AgentMessage[] {
const normalized = stripToolResultDetails(normalizeAssistantReplayContent(messages));
return stripRuntimeContextCustomMessages(normalized);
return stripHistoricalRuntimeContextCustomMessages(normalized);
}
function isMidTurnPrecheckAssistantError(message: AgentMessage | undefined): boolean {
@@ -3065,34 +3064,19 @@ export async function runEmbeddedAttempt(
await abortable(activeSession.prompt(promptForModel));
} else {
const runtimeContext = promptSubmission.runtimeContext?.trim();
const runtimeSystemPrompt = runtimeContext
? composeSystemPromptWithHookContext({
baseSystemPrompt: systemPromptText,
appendSystemContext: buildRuntimeContextSystemContext(runtimeContext),
})
: undefined;
if (runtimeSystemPrompt) {
applySystemPromptOverrideToSession(activeSession, runtimeSystemPrompt);
}
try {
await queueRuntimeContextForNextTurn({
session: activeSession,
runtimeContext,
});
await queueRuntimeContextForNextTurn({
session: activeSession,
runtimeContext,
});
// Only pass images option if there are actually images to pass
// This avoids potential issues with models that don't expect the images parameter
if (imageResult.images.length > 0) {
await abortable(
activeSession.prompt(promptForModel, { images: imageResult.images }),
);
} else {
await abortable(activeSession.prompt(promptForModel));
}
} finally {
if (runtimeSystemPrompt) {
applySystemPromptOverrideToSession(activeSession, systemPromptText);
}
// Only pass images option if there are actually images to pass
// This avoids potential issues with models that don't expect the images parameter
if (imageResult.images.length > 0) {
await abortable(
activeSession.prompt(promptForModel, { images: imageResult.images }),
);
} else {
await abortable(activeSession.prompt(promptForModel));
}
}
}