mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-06 12:30:44 +00:00
fix(memory): let lancedb use provider embedding auth
This commit is contained in:
@@ -26,6 +26,7 @@ Docs: https://docs.openclaw.ai
|
||||
|
||||
### Fixes
|
||||
|
||||
- Memory/LanceDB: let embedding config use provider-backed auth profiles, environment credentials, or provider config without a separate plugin `embedding.apiKey`, so OAuth-capable embedding providers can power auto-recall/capture. Fixes #68950. Thanks @malshaalan-ai.
|
||||
- Plugins/hooks: time out never-settling `agent_end` observation hooks after 30 seconds and log the plugin failure, so hung embedding endpoints no longer leave memory capture silently pending forever. Fixes #65544. Thanks @ghoc0099.
|
||||
- Gateway/config: serve runtime config schemas from the current plugin metadata snapshot and generated bundled channel schema metadata instead of rebuilding plugin channel config modules on every `config.get`/`config.schema`, preventing idle plugin-discovery CPU churn after upgrades. Fixes #73088. Thanks @sleitor and @geovansb.
|
||||
- Memory/LanceDB: call OpenAI-compatible embedding endpoints through the raw SDK transport without sending `encoding_format`, then normalize float-array or base64 responses so providers such as ZhiPu and DashScope no longer fail recall with wrong vector dimensions or rejected parameters. Fixes #63655. Thanks @kinthaiofficial.
|
||||
|
||||
@@ -35,7 +35,7 @@ slot with `plugins.slots.memory = "memory-lancedb"`. Companion plugins such as
|
||||
enabled: true,
|
||||
config: {
|
||||
embedding: {
|
||||
apiKey: "${OPENAI_API_KEY}",
|
||||
provider: "openai",
|
||||
model: "text-embedding-3-small",
|
||||
},
|
||||
autoRecall: true,
|
||||
@@ -59,12 +59,12 @@ Then verify the plugin is loaded:
|
||||
openclaw plugins list
|
||||
```
|
||||
|
||||
## Ollama embeddings
|
||||
## Provider-backed embeddings
|
||||
|
||||
`memory-lancedb` calls embeddings through an OpenAI-compatible embeddings API.
|
||||
For Ollama embeddings, use the Ollama `/v1` compatibility endpoint here. This
|
||||
is only for embeddings; the Ollama chat/model provider uses the native Ollama
|
||||
API URL documented in [Ollama](/providers/ollama).
|
||||
`memory-lancedb` can use the same memory embedding provider adapters as
|
||||
`memory-core`. Set `embedding.provider` and omit `embedding.apiKey` to use the
|
||||
provider's configured auth profile, environment variable, or
|
||||
`models.providers.<provider>.apiKey`.
|
||||
|
||||
```json5
|
||||
{
|
||||
@@ -77,8 +77,66 @@ API URL documented in [Ollama](/providers/ollama).
|
||||
enabled: true,
|
||||
config: {
|
||||
embedding: {
|
||||
apiKey: "ollama",
|
||||
baseUrl: "http://127.0.0.1:11434/v1",
|
||||
provider: "openai",
|
||||
model: "text-embedding-3-small",
|
||||
},
|
||||
autoRecall: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
This path works with provider auth profiles that expose embedding credentials.
|
||||
For example, GitHub Copilot can be used when the Copilot profile/plan supports
|
||||
embeddings:
|
||||
|
||||
```json5
|
||||
{
|
||||
plugins: {
|
||||
slots: {
|
||||
memory: "memory-lancedb",
|
||||
},
|
||||
entries: {
|
||||
"memory-lancedb": {
|
||||
enabled: true,
|
||||
config: {
|
||||
embedding: {
|
||||
provider: "github-copilot",
|
||||
model: "text-embedding-3-small",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
OpenAI Codex / ChatGPT OAuth (`openai-codex`) is not an OpenAI Platform
|
||||
embeddings credential. For OpenAI embeddings, use an OpenAI API key auth profile,
|
||||
`OPENAI_API_KEY`, or `models.providers.openai.apiKey`. OAuth-only users can use
|
||||
another embedding-capable provider such as GitHub Copilot or Ollama.
|
||||
|
||||
## Ollama embeddings
|
||||
|
||||
For Ollama embeddings, prefer the bundled Ollama embedding provider. It uses the
|
||||
native Ollama `/api/embed` endpoint and follows the same auth/base URL rules as
|
||||
the Ollama provider documented in [Ollama](/providers/ollama).
|
||||
|
||||
```json5
|
||||
{
|
||||
plugins: {
|
||||
slots: {
|
||||
memory: "memory-lancedb",
|
||||
},
|
||||
entries: {
|
||||
"memory-lancedb": {
|
||||
enabled: true,
|
||||
config: {
|
||||
embedding: {
|
||||
provider: "ollama",
|
||||
baseUrl: "http://127.0.0.1:11434",
|
||||
model: "mxbai-embed-large",
|
||||
dimensions: 1024,
|
||||
},
|
||||
@@ -106,6 +164,11 @@ parameter, while others ignore it and always return `number[]` vectors.
|
||||
`memory-lancedb` therefore omits `encoding_format` on embedding requests and
|
||||
accepts either float-array responses or base64-encoded float32 responses.
|
||||
|
||||
If you have a raw OpenAI-compatible embeddings endpoint that does not have a
|
||||
bundled provider adapter, omit `embedding.provider` (or leave it as `openai`) and
|
||||
set `embedding.apiKey` plus `embedding.baseUrl`. This preserves the direct
|
||||
OpenAI-compatible client path.
|
||||
|
||||
Set `embedding.dimensions` for providers whose model dimensions are not built
|
||||
in. For example, ZhiPu `embedding-3` uses `2048` dimensions:
|
||||
|
||||
|
||||
@@ -37,6 +37,41 @@ describe("memory-lancedb config", () => {
|
||||
});
|
||||
});
|
||||
|
||||
it("accepts provider-backed embedding config without a plugin apiKey", () => {
|
||||
const manifestResult = validateJsonSchemaValue({
|
||||
schema: manifest.configSchema,
|
||||
cacheKey: "memory-lancedb.manifest.provider-auth",
|
||||
value: {
|
||||
embedding: {
|
||||
provider: "openai",
|
||||
model: "text-embedding-3-small",
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const parsed = memoryConfigSchema.parse({
|
||||
embedding: {
|
||||
provider: "openai",
|
||||
model: "text-embedding-3-small",
|
||||
},
|
||||
});
|
||||
|
||||
expect(manifestResult.ok).toBe(true);
|
||||
expect(parsed.embedding.apiKey).toBeUndefined();
|
||||
expect(parsed.embedding.provider).toBe("openai");
|
||||
});
|
||||
|
||||
it("rejects empty embedding providers", () => {
|
||||
expect(() => {
|
||||
memoryConfigSchema.parse({
|
||||
embedding: {
|
||||
provider: "",
|
||||
model: "text-embedding-3-small",
|
||||
},
|
||||
});
|
||||
}).toThrow("embedding.provider must not be empty");
|
||||
});
|
||||
|
||||
it("still rejects unrelated unknown top-level config keys", () => {
|
||||
expect(() => {
|
||||
memoryConfigSchema.parse({
|
||||
|
||||
@@ -4,9 +4,9 @@ import { join } from "node:path";
|
||||
|
||||
export type MemoryConfig = {
|
||||
embedding: {
|
||||
provider: "openai";
|
||||
provider: string;
|
||||
model: string;
|
||||
apiKey: string;
|
||||
apiKey?: string;
|
||||
baseUrl?: string;
|
||||
dimensions?: number;
|
||||
};
|
||||
@@ -115,12 +115,20 @@ export const memoryConfigSchema = {
|
||||
);
|
||||
|
||||
const embedding = cfg.embedding as Record<string, unknown> | undefined;
|
||||
if (!embedding || typeof embedding.apiKey !== "string") {
|
||||
throw new Error("embedding.apiKey is required");
|
||||
if (!embedding || typeof embedding !== "object" || Array.isArray(embedding)) {
|
||||
throw new Error("embedding config required");
|
||||
}
|
||||
assertAllowedKeys(embedding, ["apiKey", "model", "baseUrl", "dimensions"], "embedding config");
|
||||
assertAllowedKeys(
|
||||
embedding,
|
||||
["provider", "apiKey", "model", "baseUrl", "dimensions"],
|
||||
"embedding config",
|
||||
);
|
||||
|
||||
const model = resolveEmbeddingModel(embedding);
|
||||
const provider = typeof embedding.provider === "string" ? embedding.provider.trim() : "openai";
|
||||
if (!provider) {
|
||||
throw new Error("embedding.provider must not be empty");
|
||||
}
|
||||
|
||||
const captureMaxChars =
|
||||
typeof cfg.captureMaxChars === "number" ? Math.floor(cfg.captureMaxChars) : undefined;
|
||||
@@ -164,9 +172,9 @@ export const memoryConfigSchema = {
|
||||
|
||||
return {
|
||||
embedding: {
|
||||
provider: "openai",
|
||||
provider,
|
||||
model,
|
||||
apiKey: resolveEnvVars(embedding.apiKey),
|
||||
apiKey: typeof embedding.apiKey === "string" ? resolveEnvVars(embedding.apiKey) : undefined,
|
||||
baseUrl:
|
||||
typeof embedding.baseUrl === "string" ? resolveEnvVars(embedding.baseUrl) : undefined,
|
||||
dimensions: typeof embedding.dimensions === "number" ? embedding.dimensions : undefined,
|
||||
@@ -181,16 +189,21 @@ export const memoryConfigSchema = {
|
||||
};
|
||||
},
|
||||
uiHints: {
|
||||
"embedding.provider": {
|
||||
label: "Embedding Provider",
|
||||
placeholder: "openai",
|
||||
help: "Memory embedding provider adapter to use (for example openai, github-copilot, ollama)",
|
||||
},
|
||||
"embedding.apiKey": {
|
||||
label: "OpenAI API Key",
|
||||
sensitive: true,
|
||||
placeholder: "sk-proj-...",
|
||||
help: "API key for OpenAI embeddings (or use ${OPENAI_API_KEY})",
|
||||
help: "Optional API key override for OpenAI-compatible embeddings; omit to use configured provider auth",
|
||||
},
|
||||
"embedding.baseUrl": {
|
||||
label: "Base URL",
|
||||
placeholder: "https://api.openai.com/v1",
|
||||
help: "Base URL for compatible providers (e.g. http://localhost:11434/v1)",
|
||||
help: "Optional provider or OpenAI-compatible embedding endpoint base URL",
|
||||
advanced: true,
|
||||
},
|
||||
"embedding.dimensions": {
|
||||
|
||||
@@ -24,8 +24,10 @@ import { installTmpDirHarness } from "./test-helpers.js";
|
||||
const OPENAI_API_KEY = process.env.OPENAI_API_KEY ?? "test-key";
|
||||
type MemoryPluginTestConfig = {
|
||||
embedding?: {
|
||||
provider?: string;
|
||||
apiKey?: string;
|
||||
model?: string;
|
||||
baseUrl?: string;
|
||||
dimensions?: number;
|
||||
};
|
||||
dbPath?: string;
|
||||
@@ -144,13 +146,17 @@ describe("memory plugin e2e", () => {
|
||||
delete process.env.TEST_MEMORY_API_KEY;
|
||||
});
|
||||
|
||||
test("config schema rejects missing apiKey", async () => {
|
||||
expect(() => {
|
||||
memoryPlugin.configSchema?.parse?.({
|
||||
embedding: {},
|
||||
dbPath: getDbPath(),
|
||||
});
|
||||
}).toThrow("embedding.apiKey is required");
|
||||
test("config schema accepts provider-backed embeddings without apiKey", async () => {
|
||||
const config = memoryPlugin.configSchema?.parse?.({
|
||||
embedding: {
|
||||
provider: "openai",
|
||||
},
|
||||
dbPath: getDbPath(),
|
||||
}) as MemoryPluginTestConfig | undefined;
|
||||
|
||||
expect(config?.embedding?.provider).toBe("openai");
|
||||
expect(config?.embedding?.apiKey).toBeUndefined();
|
||||
expect(config?.embedding?.model).toBe("text-embedding-3-small");
|
||||
});
|
||||
|
||||
test("config schema validates captureMaxChars range", async () => {
|
||||
@@ -232,6 +238,121 @@ describe("memory plugin e2e", () => {
|
||||
expect(on).not.toHaveBeenCalledWith("before_agent_start", expect.any(Function));
|
||||
});
|
||||
|
||||
test("uses provider adapter auth when embedding apiKey is omitted", async () => {
|
||||
const embedQuery = vi.fn(async () => [0.1, 0.2, 0.3]);
|
||||
const createProvider = vi.fn(async (options: Record<string, unknown>) => ({
|
||||
provider: {
|
||||
id: "openai",
|
||||
model: options.model,
|
||||
embedQuery,
|
||||
embedBatch: vi.fn(async () => [[0.1, 0.2, 0.3]]),
|
||||
},
|
||||
}));
|
||||
const getMemoryEmbeddingProvider = vi.fn(() => ({
|
||||
id: "openai",
|
||||
create: createProvider,
|
||||
}));
|
||||
const toArray = vi.fn(async () => []);
|
||||
const limit = vi.fn(() => ({ toArray }));
|
||||
const vectorSearch = vi.fn(() => ({ limit }));
|
||||
const loadLanceDbModule = vi.fn(async () => ({
|
||||
connect: vi.fn(async () => ({
|
||||
tableNames: vi.fn(async () => ["memories"]),
|
||||
openTable: vi.fn(async () => ({
|
||||
vectorSearch,
|
||||
countRows: vi.fn(async () => 0),
|
||||
add: vi.fn(async () => undefined),
|
||||
delete: vi.fn(async () => undefined),
|
||||
})),
|
||||
})),
|
||||
}));
|
||||
|
||||
vi.resetModules();
|
||||
vi.doMock("openclaw/plugin-sdk/memory-core-host-engine-embeddings", () => ({
|
||||
getMemoryEmbeddingProvider,
|
||||
}));
|
||||
vi.doMock("openai", () => ({
|
||||
default: function UnexpectedOpenAI() {
|
||||
throw new Error("direct OpenAI client should not be constructed");
|
||||
},
|
||||
}));
|
||||
vi.doMock("./lancedb-runtime.js", () => ({
|
||||
loadLanceDbModule,
|
||||
}));
|
||||
|
||||
try {
|
||||
const { default: dynamicMemoryPlugin } = await import("./index.js");
|
||||
const cfg = {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
apiKey: "profile-backed-key",
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
const registerTool = vi.fn();
|
||||
const mockApi = {
|
||||
id: "memory-lancedb",
|
||||
name: "Memory (LanceDB)",
|
||||
source: "test",
|
||||
config: cfg,
|
||||
pluginConfig: {
|
||||
embedding: {
|
||||
provider: "openai",
|
||||
model: "text-embedding-3-small",
|
||||
},
|
||||
dbPath: getDbPath(),
|
||||
},
|
||||
runtime: {
|
||||
config: {
|
||||
current: () => cfg,
|
||||
},
|
||||
agent: {
|
||||
resolveAgentDir: vi.fn(() => "/tmp/openclaw-agent"),
|
||||
},
|
||||
},
|
||||
logger: {
|
||||
info: vi.fn(),
|
||||
warn: vi.fn(),
|
||||
error: vi.fn(),
|
||||
debug: vi.fn(),
|
||||
},
|
||||
registerTool,
|
||||
registerCli: vi.fn(),
|
||||
registerService: vi.fn(),
|
||||
on: vi.fn(),
|
||||
resolvePath: (filePath: string) => filePath,
|
||||
};
|
||||
|
||||
dynamicMemoryPlugin.register(mockApi as any);
|
||||
const recallTool = registerTool.mock.calls
|
||||
.map(([tool]) => tool)
|
||||
.find((tool) => tool.name === "memory_recall");
|
||||
expect(recallTool).toBeTruthy();
|
||||
|
||||
await recallTool.execute("call-1", { query: "project memory" });
|
||||
|
||||
expect(getMemoryEmbeddingProvider).toHaveBeenCalledWith("openai", cfg);
|
||||
expect(createProvider).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
config: cfg,
|
||||
agentDir: "/tmp/openclaw-agent",
|
||||
provider: "openai",
|
||||
fallback: "none",
|
||||
model: "text-embedding-3-small",
|
||||
}),
|
||||
);
|
||||
expect(createProvider.mock.calls[0][0]).not.toHaveProperty("remote");
|
||||
expect(embedQuery).toHaveBeenCalledWith("project memory");
|
||||
} finally {
|
||||
vi.doUnmock("openclaw/plugin-sdk/memory-core-host-engine-embeddings");
|
||||
vi.doUnmock("openai");
|
||||
vi.doUnmock("./lancedb-runtime.js");
|
||||
vi.resetModules();
|
||||
}
|
||||
});
|
||||
|
||||
test("keeps before_prompt_build registered but inert when auto-recall is disabled", async () => {
|
||||
const on = vi.fn();
|
||||
const mockApi = {
|
||||
|
||||
@@ -10,7 +10,12 @@ import { Buffer } from "node:buffer";
|
||||
import { randomUUID } from "node:crypto";
|
||||
import type * as LanceDB from "@lancedb/lancedb";
|
||||
import OpenAI from "openai";
|
||||
import { resolveDefaultAgentId } from "openclaw/plugin-sdk/config-runtime";
|
||||
import type { OpenClawConfig } from "openclaw/plugin-sdk/config-types";
|
||||
import {
|
||||
getMemoryEmbeddingProvider,
|
||||
type MemoryEmbeddingProvider,
|
||||
} from "openclaw/plugin-sdk/memory-core-host-engine-embeddings";
|
||||
import { resolveLivePluginConfigObject } from "openclaw/plugin-sdk/plugin-config-runtime";
|
||||
import { ensureGlobalUndiciEnvProxyDispatcher } from "openclaw/plugin-sdk/runtime-env";
|
||||
import {
|
||||
@@ -23,6 +28,7 @@ import {
|
||||
DEFAULT_CAPTURE_MAX_CHARS,
|
||||
DEFAULT_RECALL_MAX_CHARS,
|
||||
MEMORY_CATEGORIES,
|
||||
type MemoryConfig,
|
||||
type MemoryCategory,
|
||||
memoryConfigSchema,
|
||||
vectorDimsForModel,
|
||||
@@ -252,10 +258,14 @@ class MemoryDB {
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// OpenAI Embeddings
|
||||
// Embeddings
|
||||
// ============================================================================
|
||||
|
||||
class Embeddings {
|
||||
type Embeddings = {
|
||||
embed(text: string): Promise<number[]>;
|
||||
};
|
||||
|
||||
class OpenAiCompatibleEmbeddings implements Embeddings {
|
||||
private client: OpenAI;
|
||||
|
||||
constructor(
|
||||
@@ -287,6 +297,70 @@ class Embeddings {
|
||||
}
|
||||
}
|
||||
|
||||
class ProviderAdapterEmbeddings implements Embeddings {
|
||||
private providerPromise: Promise<MemoryEmbeddingProvider> | undefined;
|
||||
|
||||
constructor(
|
||||
private api: OpenClawPluginApi,
|
||||
private embedding: MemoryConfig["embedding"],
|
||||
) {}
|
||||
|
||||
private getProvider(): Promise<MemoryEmbeddingProvider> {
|
||||
// Auth profiles and local providers can be repaired while the Gateway stays up.
|
||||
// Cache successful setup, but retry after failed provider discovery/auth.
|
||||
this.providerPromise ??= this.createProvider().catch((err) => {
|
||||
this.providerPromise = undefined;
|
||||
throw err;
|
||||
});
|
||||
return this.providerPromise;
|
||||
}
|
||||
|
||||
private async createProvider(): Promise<MemoryEmbeddingProvider> {
|
||||
const cfg = (this.api.runtime.config?.current?.() ?? this.api.config) as OpenClawConfig;
|
||||
const providerId = this.embedding.provider;
|
||||
const adapter = getMemoryEmbeddingProvider(providerId, cfg);
|
||||
if (!adapter) {
|
||||
throw new Error(`Unknown memory embedding provider: ${providerId}`);
|
||||
}
|
||||
const defaultAgentId = resolveDefaultAgentId(cfg);
|
||||
const agentDir = this.api.runtime.agent.resolveAgentDir(cfg, defaultAgentId);
|
||||
const remote =
|
||||
this.embedding.apiKey || this.embedding.baseUrl
|
||||
? {
|
||||
...(this.embedding.apiKey ? { apiKey: this.embedding.apiKey } : {}),
|
||||
...(this.embedding.baseUrl ? { baseUrl: this.embedding.baseUrl } : {}),
|
||||
}
|
||||
: undefined;
|
||||
const result = await adapter.create({
|
||||
config: cfg,
|
||||
agentDir,
|
||||
provider: providerId,
|
||||
fallback: "none",
|
||||
model: this.embedding.model,
|
||||
...(remote ? { remote } : {}),
|
||||
...(typeof this.embedding.dimensions === "number"
|
||||
? { outputDimensionality: this.embedding.dimensions }
|
||||
: {}),
|
||||
});
|
||||
if (!result.provider) {
|
||||
throw new Error(`Memory embedding provider ${providerId} is unavailable.`);
|
||||
}
|
||||
return result.provider;
|
||||
}
|
||||
|
||||
async embed(text: string): Promise<number[]> {
|
||||
return await (await this.getProvider()).embedQuery(text);
|
||||
}
|
||||
}
|
||||
|
||||
function createEmbeddings(api: OpenClawPluginApi, cfg: MemoryConfig): Embeddings {
|
||||
const { provider, model, dimensions, apiKey, baseUrl } = cfg.embedding;
|
||||
if (provider === "openai" && apiKey) {
|
||||
return new OpenAiCompatibleEmbeddings(apiKey, model, baseUrl, dimensions);
|
||||
}
|
||||
return new ProviderAdapterEmbeddings(api, cfg.embedding);
|
||||
}
|
||||
|
||||
type EmbeddingCreateResponse = {
|
||||
data?: Array<{
|
||||
embedding?: unknown;
|
||||
@@ -432,12 +506,12 @@ export default definePluginEntry({
|
||||
const cfg = memoryConfigSchema.parse(api.pluginConfig);
|
||||
const dbPath = cfg.dbPath!;
|
||||
const resolvedDbPath = dbPath.includes("://") ? dbPath : api.resolvePath(dbPath);
|
||||
const { model, dimensions, apiKey, baseUrl } = cfg.embedding;
|
||||
const { model, dimensions } = cfg.embedding;
|
||||
const disabledHookCfg = { ...cfg, autoCapture: false, autoRecall: false };
|
||||
|
||||
const vectorDim = dimensions ?? vectorDimsForModel(model);
|
||||
const db = new MemoryDB(resolvedDbPath, vectorDim, cfg.storageOptions);
|
||||
const embeddings = new Embeddings(apiKey, model, baseUrl, dimensions);
|
||||
const embeddings = createEmbeddings(api, cfg);
|
||||
const autoCaptureCursors = new Map<string, AutoCaptureCursor>();
|
||||
const resolveCurrentHookConfig = () => {
|
||||
const runtimePluginConfig = resolveLivePluginConfigObject(
|
||||
@@ -452,6 +526,7 @@ export default definePluginEntry({
|
||||
}
|
||||
return memoryConfigSchema.parse({
|
||||
embedding: {
|
||||
provider: cfg.embedding.provider,
|
||||
apiKey: cfg.embedding.apiKey,
|
||||
model: cfg.embedding.model,
|
||||
...(cfg.embedding.baseUrl ? { baseUrl: cfg.embedding.baseUrl } : {}),
|
||||
@@ -465,6 +540,7 @@ export default definePluginEntry({
|
||||
autoCapture: cfg.autoCapture,
|
||||
autoRecall: cfg.autoRecall,
|
||||
captureMaxChars: cfg.captureMaxChars,
|
||||
recallMaxChars: cfg.recallMaxChars,
|
||||
...(cfg.storageOptions ? { storageOptions: cfg.storageOptions } : {}),
|
||||
...asRecord(runtimePluginConfig),
|
||||
});
|
||||
|
||||
@@ -3,10 +3,15 @@
|
||||
"kind": "memory",
|
||||
"uiHints": {
|
||||
"embedding.apiKey": {
|
||||
"label": "OpenAI API Key",
|
||||
"label": "Embedding API Key",
|
||||
"sensitive": true,
|
||||
"placeholder": "sk-proj-...",
|
||||
"help": "API key for OpenAI embeddings (or use ${OPENAI_API_KEY})"
|
||||
"help": "Optional API key override for OpenAI-compatible embeddings; omit to use configured provider auth"
|
||||
},
|
||||
"embedding.provider": {
|
||||
"label": "Embedding Provider",
|
||||
"placeholder": "openai",
|
||||
"help": "Memory embedding provider adapter to use (for example openai, github-copilot, ollama)"
|
||||
},
|
||||
"embedding.model": {
|
||||
"label": "Embedding Model",
|
||||
@@ -16,7 +21,7 @@
|
||||
"embedding.baseUrl": {
|
||||
"label": "Base URL",
|
||||
"placeholder": "https://api.openai.com/v1",
|
||||
"help": "Base URL for compatible providers (e.g. http://localhost:11434/v1)",
|
||||
"help": "Optional provider or OpenAI-compatible embedding endpoint base URL",
|
||||
"advanced": true
|
||||
},
|
||||
"embedding.dimensions": {
|
||||
@@ -71,6 +76,9 @@
|
||||
"apiKey": {
|
||||
"type": "string"
|
||||
},
|
||||
"provider": {
|
||||
"type": "string"
|
||||
},
|
||||
"model": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -80,8 +88,7 @@
|
||||
"dimensions": {
|
||||
"type": "number"
|
||||
}
|
||||
},
|
||||
"required": ["apiKey"]
|
||||
}
|
||||
},
|
||||
"dbPath": {
|
||||
"type": "string"
|
||||
|
||||
Reference in New Issue
Block a user