diff --git a/CHANGELOG.md b/CHANGELOG.md index 52b4db947a9..694b4ece1bb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -92,6 +92,7 @@ Docs: https://docs.openclaw.ai - TUI: skip the generic CLI respawn wrapper for interactive launches, exit cleanly on terminal loss, and refuse to restore heartbeat sessions as the remembered chat session, preventing stale heartbeat history and orphaned `openclaw-tui` processes on first boot. Thanks @vincentkoc. - Doctor/sessions: move heartbeat-poisoned default main session store entries to recovery keys and clear stale TUI restore pointers, so `doctor --fix` can repair instances already stuck on `agent:main:main` heartbeat history. Thanks @vincentkoc. +- Agents/context engines: keep hidden OpenClaw runtime-context custom messages out of context-engine assemble, afterTurn, and ingest hooks so transcript reconstruction plugins only see conversation messages. Thanks @vincentkoc. - Gateway/shutdown: cancel delayed post-ready maintenance during close and suppress maintenance/cron startup after quick restarts, preventing orphaned background timers. Thanks @vincentkoc. - Agents/generated media: treat attachment-style message tool actions as completed chat sends, preventing duplicate fallback media posts when generated files were already uploaded. - Control UI/sessions: show each session's agent runtime in the Sessions table and allow filtering by runtime labels, matching the Agents panel runtime wording. Thanks @vincentkoc. diff --git a/src/agents/harness/context-engine-lifecycle.test.ts b/src/agents/harness/context-engine-lifecycle.test.ts new file mode 100644 index 00000000000..54dc90ff5f1 --- /dev/null +++ b/src/agents/harness/context-engine-lifecycle.test.ts @@ -0,0 +1,152 @@ +import type { AgentMessage } from "@mariozechner/pi-agent-core"; +import { describe, expect, it, vi } from "vitest"; +import type { ContextEngine } from "../../context-engine/types.js"; +import { OPENCLAW_RUNTIME_CONTEXT_CUSTOM_TYPE } from "../internal-runtime-context.js"; +import { + assembleHarnessContextEngine, + finalizeHarnessContextEngineTurn, +} from "./context-engine-lifecycle.js"; + +function textMessage(role: "user" | "assistant", text: string, timestamp: number): AgentMessage { + return { + role, + content: [{ type: "text", text }], + timestamp, + } as AgentMessage; +} + +function runtimeContextMessage(content: string, timestamp: number): AgentMessage { + return { + role: "custom", + customType: OPENCLAW_RUNTIME_CONTEXT_CUSTOM_TYPE, + content, + display: false, + details: { source: "openclaw-runtime-context" }, + timestamp, + } as AgentMessage; +} + +function createContextEngine(overrides: Partial = {}): ContextEngine { + return { + info: { id: "test", name: "Test context engine" }, + ingest: vi.fn(async () => ({ ingested: true })), + assemble: vi.fn(async (params) => ({ + messages: params.messages, + estimatedTokens: 0, + })), + compact: vi.fn(async () => ({ ok: true, compacted: false })), + ...overrides, + }; +} + +const sessionParams = { + sessionIdUsed: "session-1", + sessionId: "session-1", + sessionKey: "agent:main", + sessionFile: "sessions/main.jsonl", +}; + +describe("harness context engine lifecycle", () => { + it("keeps hidden runtime-context custom messages out of assemble hooks", async () => { + const visibleUser = textMessage("user", "visible ask", 1); + const hiddenRuntimeContext = runtimeContextMessage("hidden runtime context", 2); + const visibleAssistant = textMessage("assistant", "visible answer", 3); + const assemble = vi.fn(async (params: Parameters[0]) => ({ + messages: params.messages, + estimatedTokens: 0, + })); + + await assembleHarnessContextEngine({ + contextEngine: createContextEngine({ assemble }), + sessionId: sessionParams.sessionId, + sessionKey: sessionParams.sessionKey, + messages: [visibleUser, hiddenRuntimeContext, visibleAssistant], + modelId: "gpt-test", + }); + + expect(assemble).toHaveBeenCalledWith( + expect.objectContaining({ + messages: [visibleUser, visibleAssistant], + }), + ); + }); + + it("keeps hidden runtime-context custom messages out of afterTurn hooks", async () => { + const beforePromptUser = textMessage("user", "old ask", 1); + const beforePromptRuntimeContext = runtimeContextMessage("old hidden context", 2); + const beforePromptAssistant = textMessage("assistant", "old answer", 3); + const turnUser = textMessage("user", "new ask", 4); + const turnRuntimeContext = runtimeContextMessage("new hidden context", 5); + const turnAssistant = textMessage("assistant", "new answer", 6); + const afterTurn = vi.fn(async () => {}); + + await finalizeHarnessContextEngineTurn({ + contextEngine: createContextEngine({ afterTurn }), + promptError: false, + aborted: false, + yieldAborted: false, + sessionIdUsed: sessionParams.sessionIdUsed, + sessionKey: sessionParams.sessionKey, + sessionFile: sessionParams.sessionFile, + messagesSnapshot: [ + beforePromptUser, + beforePromptRuntimeContext, + beforePromptAssistant, + turnUser, + turnRuntimeContext, + turnAssistant, + ], + prePromptMessageCount: 3, + tokenBudget: 2048, + runtimeContext: {}, + runMaintenance: async () => undefined, + warn: () => {}, + }); + + expect(afterTurn).toHaveBeenCalledWith( + expect.objectContaining({ + messages: [beforePromptUser, beforePromptAssistant, turnUser, turnAssistant], + prePromptMessageCount: 2, + }), + ); + }); + + it("keeps hidden runtime-context custom messages out of ingestBatch fallbacks", async () => { + const beforePromptUser = textMessage("user", "old ask", 1); + const beforePromptRuntimeContext = runtimeContextMessage("old hidden context", 2); + const beforePromptAssistant = textMessage("assistant", "old answer", 3); + const turnUser = textMessage("user", "new ask", 4); + const turnRuntimeContext = runtimeContextMessage("new hidden context", 5); + const turnAssistant = textMessage("assistant", "new answer", 6); + const ingestBatch = vi.fn(async () => ({ ingestedCount: 2 })); + + await finalizeHarnessContextEngineTurn({ + contextEngine: createContextEngine({ ingestBatch }), + promptError: false, + aborted: false, + yieldAborted: false, + sessionIdUsed: sessionParams.sessionIdUsed, + sessionKey: sessionParams.sessionKey, + sessionFile: sessionParams.sessionFile, + messagesSnapshot: [ + beforePromptUser, + beforePromptRuntimeContext, + beforePromptAssistant, + turnUser, + turnRuntimeContext, + turnAssistant, + ], + prePromptMessageCount: 3, + tokenBudget: 2048, + runtimeContext: {}, + runMaintenance: async () => undefined, + warn: () => {}, + }); + + expect(ingestBatch).toHaveBeenCalledWith( + expect.objectContaining({ + messages: [turnUser, turnAssistant], + }), + ); + }); +}); diff --git a/src/agents/harness/context-engine-lifecycle.ts b/src/agents/harness/context-engine-lifecycle.ts index e0a6c1e650c..01e4db51a6e 100644 --- a/src/agents/harness/context-engine-lifecycle.ts +++ b/src/agents/harness/context-engine-lifecycle.ts @@ -1,6 +1,7 @@ import type { AgentMessage } from "@mariozechner/pi-agent-core"; import type { MemoryCitationsMode } from "../../config/types.memory.js"; import type { ContextEngine, ContextEngineRuntimeContext } from "../../context-engine/types.js"; +import { stripRuntimeContextCustomMessages } from "../internal-runtime-context.js"; import { runContextEngineMaintenance } from "../pi-embedded-runner/context-engine-maintenance.js"; import { buildAfterTurnRuntimeContext, @@ -71,10 +72,11 @@ export async function assembleHarnessContextEngine(params: { if (!params.contextEngine) { return undefined; } + const messages = stripRuntimeContextCustomMessages(params.messages); return await params.contextEngine.assemble({ sessionId: params.sessionId, sessionKey: params.sessionKey, - messages: params.messages, + messages, tokenBudget: params.tokenBudget, ...(params.availableTools ? { availableTools: params.availableTools } : {}), ...(params.citationsMode ? { citationsMode: params.citationsMode } : {}), @@ -107,6 +109,10 @@ export async function finalizeHarnessContextEngineTurn(params: { return { postTurnFinalizationSucceeded: true }; } + const conversationSnapshot = buildContextEngineConversationSnapshot({ + messagesSnapshot: params.messagesSnapshot, + prePromptMessageCount: params.prePromptMessageCount, + }); let postTurnFinalizationSucceeded = true; if (typeof params.contextEngine.afterTurn === "function") { @@ -115,8 +121,8 @@ export async function finalizeHarnessContextEngineTurn(params: { sessionId: params.sessionIdUsed, sessionKey: params.sessionKey, sessionFile: params.sessionFile, - messages: params.messagesSnapshot, - prePromptMessageCount: params.prePromptMessageCount, + messages: conversationSnapshot.messages, + prePromptMessageCount: conversationSnapshot.prePromptMessageCount, tokenBudget: params.tokenBudget, runtimeContext: params.runtimeContext, }); @@ -125,7 +131,9 @@ export async function finalizeHarnessContextEngineTurn(params: { params.warn(`context engine afterTurn failed: ${String(afterTurnErr)}`); } } else { - const newMessages = params.messagesSnapshot.slice(params.prePromptMessageCount); + const newMessages = conversationSnapshot.messages.slice( + conversationSnapshot.prePromptMessageCount, + ); if (newMessages.length > 0) { if (typeof params.contextEngine.ingestBatch === "function") { try { @@ -176,6 +184,22 @@ export async function finalizeHarnessContextEngineTurn(params: { return { postTurnFinalizationSucceeded }; } +function buildContextEngineConversationSnapshot(params: { + messagesSnapshot: AgentMessage[]; + prePromptMessageCount: number; +}): { messages: AgentMessage[]; prePromptMessageCount: number } { + const prePromptMessages = stripRuntimeContextCustomMessages( + params.messagesSnapshot.slice(0, params.prePromptMessageCount), + ); + const turnMessages = stripRuntimeContextCustomMessages( + params.messagesSnapshot.slice(params.prePromptMessageCount), + ); + return { + messages: [...prePromptMessages, ...turnMessages], + prePromptMessageCount: prePromptMessages.length, + }; +} + /** * Build runtime context passed into harness context-engine hooks. */