diff --git a/CHANGELOG.md b/CHANGELOG.md index 1306b2393ef..323a8646ddd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ Docs: https://docs.openclaw.ai ### Fixes - Active Memory: allow `allowedChatTypes` to include explicit portal/webchat sessions and classify `agent:...:explicit:...` session keys before opaque session ids can shadow the chat type. Fixes #65775. (#66285) Thanks @Lidang-Jiang. +- Active Memory: allow the hidden recall sub-agent to use both `memory_recall` and the legacy `memory_search`/`memory_get` memory tool contract, so bundled `memory-lancedb` recall works without breaking the default `memory-core` path. Fixes #73502. (#73584) Thanks @Takhoffman. - fix(device-pairing): validate callerScopes against resolved token scopes on repair [AI]. (#72925) Thanks @pgondhi987. - Active Memory docs: document the `cacheTtlMs` 1000-120000 ms range and 15000 ms default so setup snippets do not lead users past the schema limit. Fixes #65708. (#65737) Thanks @WuKongAI-CMU. - fix(agents): canonicalize provider aliases in byProvider tool policy lookup [AI]. (#72917) Thanks @pgondhi987. diff --git a/docs/concepts/active-memory.md b/docs/concepts/active-memory.md index 4e971b735e8..8a00d046aa0 100644 --- a/docs/concepts/active-memory.md +++ b/docs/concepts/active-memory.md @@ -80,7 +80,7 @@ because it follows your existing provider, auth, and model preferences. If you want Active Memory to feel faster, use a dedicated inference model instead of borrowing the main chat model. Recall quality matters, but latency matters more than for the main answer path, and Active Memory's tool surface -is narrow (it only calls `memory_search` and `memory_get`). +is narrow (it only calls available memory recall tools). Good fast-model options: @@ -332,8 +332,9 @@ flowchart LR I --> M["Main Reply"] ``` -The blocking memory sub-agent can use only: +The blocking memory sub-agent can use only the available memory recall tools: +- `memory_recall` - `memory_search` - `memory_get` @@ -644,9 +645,10 @@ If active memory is too slow: ## Common issues -Active Memory rides on the normal `memory_search` pipeline under -`agents.defaults.memorySearch`, so most recall surprises are embedding-provider -problems, not Active Memory bugs. +Active Memory rides on the configured memory plugin's recall pipeline, so most +recall surprises are embedding-provider problems, not Active Memory bugs. The +default `memory-core` path uses `memory_search`; `memory-lancedb` uses +`memory_recall`. diff --git a/extensions/active-memory/index.test.ts b/extensions/active-memory/index.test.ts index e0232838b7c..34407692530 100644 --- a/extensions/active-memory/index.test.ts +++ b/extensions/active-memory/index.test.ts @@ -1015,9 +1015,14 @@ describe("active-memory plugin", () => { expect(runParams?.prompt).toContain( "You receive conversation context, including the user's latest message.", ); - expect(runParams?.prompt).toContain("Use only memory_search and memory_get."); + expect(runParams?.prompt).toContain("Use only the available memory tools."); + expect(runParams?.prompt).toContain("Prefer memory_recall when available."); expect(runParams?.prompt).toContain( - "When searching for preference or habit recall, use a permissive memory_search threshold before deciding that no useful memory exists.", + "If memory_recall is unavailable, use memory_search and memory_get.", + ); + expect(runParams?.toolsAllow).toEqual(["memory_recall", "memory_search", "memory_get"]); + expect(runParams?.prompt).toContain( + "When searching for preference or habit recall, use a permissive recall limit or memory_search threshold before deciding that no useful memory exists.", ); expect(runParams?.prompt).toContain( "If the user is directly asking about favorites, preferences, habits, routines, or personal facts, treat that as a strong recall signal.", diff --git a/extensions/active-memory/index.ts b/extensions/active-memory/index.ts index 8070e0ea4dc..95f12856290 100644 --- a/extensions/active-memory/index.ts +++ b/extensions/active-memory/index.ts @@ -848,8 +848,10 @@ function buildRecallPrompt(params: { "Another model is preparing the final user-facing answer.", "Your job is to search memory and return only the most relevant memory context for that model.", "You receive conversation context, including the user's latest message.", - "Use only memory_search and memory_get.", - "When searching for preference or habit recall, use a permissive memory_search threshold before deciding that no useful memory exists.", + "Use only the available memory tools.", + "Prefer memory_recall when available.", + "If memory_recall is unavailable, use memory_search and memory_get.", + "When searching for preference or habit recall, use a permissive recall limit or memory_search threshold before deciding that no useful memory exists.", "Do not answer the user directly.", `Prompt style: ${params.config.promptStyle}.`, ...buildPromptStyleLines(params.config.promptStyle), @@ -1448,14 +1450,18 @@ function extractActiveMemorySearchDebugFromSessionRecord( const record = asRecord(value); const nestedMessage = asRecord(record?.message); const topLevelMessage = - record?.role === "toolResult" || record?.toolName === "memory_search" ? record : undefined; + record?.role === "toolResult" || + record?.toolName === "memory_search" || + record?.toolName === "memory_recall" + ? record + : undefined; const message = nestedMessage ?? topLevelMessage; if (!message) { return undefined; } const role = normalizeOptionalString(message.role); const toolName = normalizeOptionalString(message.toolName); - if (role !== "toolResult" || toolName !== "memory_search") { + if (role !== "toolResult" || (toolName !== "memory_search" && toolName !== "memory_recall")) { return undefined; } const details = asRecord(message.details); @@ -2072,7 +2078,7 @@ async function runRecallSubagent(params: { timeoutMs: params.config.timeoutMs, runId: subagentSessionId, trigger: "manual", - toolsAllow: ["memory_search", "memory_get"], + toolsAllow: ["memory_recall", "memory_search", "memory_get"], disableMessageTool: true, bootstrapContextMode: "lightweight", verboseLevel: "off", diff --git a/extensions/qa-lab/src/providers/mock-openai/server.test.ts b/extensions/qa-lab/src/providers/mock-openai/server.test.ts index 3e10546b3eb..740ca768027 100644 --- a/extensions/qa-lab/src/providers/mock-openai/server.test.ts +++ b/extensions/qa-lab/src/providers/mock-openai/server.test.ts @@ -1195,7 +1195,9 @@ describe("qa mock openai server", () => { type: "input_text", text: [ "You are a memory search agent.", - "Use only memory_search and memory_get.", + "Use only the available memory tools.", + "Prefer memory_recall when available.", + "If memory_recall is unavailable, use memory_search and memory_get.", "", "Conversation context:", "Latest user message:", @@ -1208,9 +1210,9 @@ describe("qa mock openai server", () => { }), }); expect(activeMemorySearch.status).toBe(200); - expect(await activeMemorySearch.text()).toContain('"name":"memory_search"'); + expect(await activeMemorySearch.text()).toContain('"name":"memory_recall"'); - const activeMemoryGet = await fetch(`${server.baseUrl}/v1/responses`, { + const activeMemoryStreamSummary = await fetch(`${server.baseUrl}/v1/responses`, { method: "POST", headers: { "content-type": "application/json", @@ -1225,7 +1227,9 @@ describe("qa mock openai server", () => { type: "input_text", text: [ "You are a memory search agent.", - "Use only memory_search and memory_get.", + "Use only the available memory tools.", + "Prefer memory_recall when available.", + "If memory_recall is unavailable, use memory_search and memory_get.", "", "Conversation context:", "Latest user message:", @@ -1237,20 +1241,14 @@ describe("qa mock openai server", () => { { type: "function_call_output", output: JSON.stringify({ - results: [ - { - path: "MEMORY.md", - startLine: 1, - endLine: 1, - }, - ], + text: "Stable QA movie night snack preference: lemon pepper wings with blue cheese.", }), }, ], }), }); - expect(activeMemoryGet.status).toBe(200); - expect(await activeMemoryGet.text()).toContain('"name":"memory_get"'); + expect(activeMemoryStreamSummary.status).toBe(200); + expect(await activeMemoryStreamSummary.text()).toContain("lemon pepper wings with blue cheese"); const activeMemorySummary = await fetch(`${server.baseUrl}/v1/responses`, { method: "POST", @@ -1267,7 +1265,9 @@ describe("qa mock openai server", () => { type: "input_text", text: [ "You are a memory search agent.", - "Use only memory_search and memory_get.", + "Use only the available memory tools.", + "Prefer memory_recall when available.", + "If memory_recall is unavailable, use memory_search and memory_get.", "", "Conversation context:", "Latest user message:", diff --git a/extensions/qa-lab/src/providers/mock-openai/server.ts b/extensions/qa-lab/src/providers/mock-openai/server.ts index 22adf2515c2..5d92c69853c 100644 --- a/extensions/qa-lab/src/providers/mock-openai/server.ts +++ b/extensions/qa-lab/src/providers/mock-openai/server.ts @@ -1447,37 +1447,34 @@ async function buildResponsesPayload( /silent snack recall check/i.test(allInputText) ) { if (!toolOutput) { - return buildToolCallEventsWithArgs("memory_search", { + return buildToolCallEventsWithArgs("memory_recall", { query: "QA movie night snack lemon pepper wings blue cheese", - maxResults: 3, + limit: 3, }); } - const results = Array.isArray(toolJson?.results) - ? (toolJson.results as Array>) - : []; - const first = results[0]; - if ( - typeof first?.path === "string" && - (typeof first.startLine === "number" || typeof first.endLine === "number") - ) { - const from = - typeof first.startLine === "number" - ? Math.max(1, first.startLine) - : typeof first.endLine === "number" - ? Math.max(1, first.endLine) - : 1; - return buildToolCallEventsWithArgs("memory_get", { - path: first.path, - from, - lines: 4, - }); - } - const memorySnippet = + const memoryText = typeof toolJson?.text === "string" ? toolJson.text - : Array.isArray(toolJson?.results) - ? JSON.stringify(toolJson.results) - : toolOutput; + : Array.isArray(toolJson?.content) + ? toolJson.content + .map((item) => + typeof item === "object" && item && "text" in item && typeof item.text === "string" + ? item.text + : "", + ) + .filter(Boolean) + .join("\n") + : undefined; + if (memoryText) { + const snackPreference = extractSnackPreference(memoryText); + if (snackPreference) { + return buildAssistantEvents(`User usually wants ${snackPreference} for QA movie night.`); + } + return buildAssistantEvents("NONE"); + } + const memorySnippet = Array.isArray(toolJson?.results) + ? JSON.stringify(toolJson.results) + : toolOutput; const snackPreference = extractSnackPreference(memorySnippet); if (snackPreference) { return buildAssistantEvents(`User usually wants ${snackPreference} for QA movie night.`);