fix(memory): restore ollama embedding adapter (#66269)

* fix(memory): restore ollama embedding adapter

* Update CHANGELOG.md
This commit is contained in:
Vincent Koc
2026-04-14 09:02:31 +01:00
committed by GitHub
parent 6a5ff83b24
commit 37f449d7e1
3 changed files with 47 additions and 1 deletions

View File

@@ -46,6 +46,7 @@ Docs: https://docs.openclaw.ai
- Cron/scheduler: stop inventing short retries when cron next-run calculation returns no valid future slot, and keep a maintenance wake armed so enabled unscheduled jobs recover without entering a refire loop. (#66019, #66083) Thanks @mbelinky.
- Cron/scheduler: preserve the active error-backoff floor when maintenance repair recomputes a missing cron next-run, so recurring errored jobs do not resume early after a transient next-run resolution failure. (#66019, #66083, #66113) Thanks @mbelinky.
- Outbound/delivery-queue: persist the originating outbound `session` context on queued delivery entries and replay it during recovery, so write-ahead-queued sends keep their original outbound media policy context after restart instead of evaluating against a missing session. (#66025) Thanks @eleqtrizit.
- Memory/Ollama: restore the built-in `ollama` embedding adapter in memory-core so explicit `memorySearch.provider: "ollama"` works again, and include endpoint-aware cache keys so different Ollama hosts do not reuse each other's embeddings. (#63429, #66078, #66163) Thanks @nnish16 and @vincentkoc.
- Auto-reply/queue: split collect-mode followup drains into contiguous groups by per-message authorization context (sender id, owner status, exec/bash-elevated overrides), so queued items from different senders or exec configs no longer execute under the last queued run's owner-only and exec-approval context. (#66024) Thanks @eleqtrizit.
- Dreaming/memory-core: require a live queued Dreaming cron event before the heartbeat hook runs the sweep, so managed Dreaming no longer replays on later heartbeats after the scheduled run was already consumed. (#66139) Thanks @mbelinky.
- Control UI/Dreaming: stop Imported Insights and Memory Palace from calling optional `memory-wiki` gateway methods when the plugin is off, and refresh config before wiki reloads so the Dreaming tab stops showing misleading unknown-method failures. (#66140) Thanks @mbelinky.

View File

@@ -6,12 +6,16 @@ import { resolveSessionTranscriptsDirForAgent } from "openclaw/plugin-sdk/memory
import { afterAll, afterEach, beforeAll, beforeEach, describe, expect, it, vi } from "vitest";
import {
clearMemoryEmbeddingProviders as clearRegistry,
listMemoryEmbeddingProviders as listRegisteredAdapters,
registerMemoryEmbeddingProvider as registerAdapter,
} from "../../../../src/plugins/memory-embedding-providers.js";
import "./test-runtime-mocks.js";
import type { MemoryIndexManager } from "./index.js";
import { getMemorySearchManager, closeAllMemorySearchManagers } from "./index.js";
import { registerBuiltInMemoryEmbeddingProviders } from "./provider-adapters.js";
import {
DEFAULT_OLLAMA_EMBEDDING_MODEL,
registerBuiltInMemoryEmbeddingProviders,
} from "./provider-adapters.js";
let embedBatchCalls = 0;
let embedBatchInputCalls = 0;
@@ -108,6 +112,18 @@ vi.mock("./embeddings.js", () => {
});
describe("memory index", () => {
it("registers the builtin ollama embedding provider", () => {
const adapter = listRegisteredAdapters().find((entry) => entry.id === "ollama");
expect(adapter).toBeDefined();
expect(adapter).toEqual(
expect.objectContaining({
id: "ollama",
defaultModel: DEFAULT_OLLAMA_EMBEDDING_MODEL,
}),
);
});
let fixtureRoot = "";
let workspaceDir = "";
let memoryDir = "";

View File

@@ -4,6 +4,7 @@ import {
DEFAULT_LMSTUDIO_EMBEDDING_MODEL,
DEFAULT_LOCAL_MODEL,
DEFAULT_MISTRAL_EMBEDDING_MODEL,
DEFAULT_OLLAMA_EMBEDDING_MODEL,
DEFAULT_OPENAI_EMBEDDING_MODEL,
DEFAULT_VOYAGE_EMBEDDING_MODEL,
OPENAI_BATCH_ENDPOINT,
@@ -12,6 +13,7 @@ import {
createLmstudioEmbeddingProvider,
createLocalEmbeddingProvider,
createMistralEmbeddingProvider,
createOllamaEmbeddingProvider,
createOpenAiEmbeddingProvider,
createVoyageEmbeddingProvider,
hasNonTextEmbeddingParts,
@@ -290,6 +292,31 @@ const mistralAdapter: MemoryEmbeddingProviderAdapter = {
},
};
const ollamaAdapter: MemoryEmbeddingProviderAdapter = {
id: "ollama",
defaultModel: DEFAULT_OLLAMA_EMBEDDING_MODEL,
transport: "remote",
create: async (options) => {
const { provider, client } = await createOllamaEmbeddingProvider({
...options,
provider: "ollama",
fallback: "none",
});
return {
provider,
runtime: {
id: "ollama",
cacheKeyData: {
provider: "ollama",
baseUrl: client.baseUrl,
model: client.model,
headers: sanitizeHeaders(client.headers, ["authorization"]),
},
},
};
},
};
const lmstudioAdapter: MemoryEmbeddingProviderAdapter = {
id: "lmstudio",
defaultModel: DEFAULT_LMSTUDIO_EMBEDDING_MODEL,
@@ -347,6 +374,7 @@ export const builtinMemoryEmbeddingProviderAdapters = [
geminiAdapter,
voyageAdapter,
mistralAdapter,
ollamaAdapter,
lmstudioAdapter,
] as const;
@@ -409,6 +437,7 @@ export {
DEFAULT_LMSTUDIO_EMBEDDING_MODEL,
DEFAULT_LOCAL_MODEL,
DEFAULT_MISTRAL_EMBEDDING_MODEL,
DEFAULT_OLLAMA_EMBEDDING_MODEL,
DEFAULT_OPENAI_EMBEDDING_MODEL,
DEFAULT_VOYAGE_EMBEDDING_MODEL,
canAutoSelectLocal,