mirror of
https://github.com/openclaw/openclaw.git
synced 2026-03-14 19:40:40 +00:00
258 lines
8.5 KiB
TypeScript
258 lines
8.5 KiB
TypeScript
import type { Llama, LlamaEmbeddingContext, LlamaModel } from "node-llama-cpp";
|
|
import fsSync from "node:fs";
|
|
import type { OpenClawConfig } from "../config/config.js";
|
|
import { formatErrorMessage } from "../infra/errors.js";
|
|
import { resolveUserPath } from "../utils.js";
|
|
import { createGeminiEmbeddingProvider, type GeminiEmbeddingClient } from "./embeddings-gemini.js";
|
|
import { createOpenAiEmbeddingProvider, type OpenAiEmbeddingClient } from "./embeddings-openai.js";
|
|
import { createVoyageEmbeddingProvider, type VoyageEmbeddingClient } from "./embeddings-voyage.js";
|
|
import { importNodeLlamaCpp } from "./node-llama.js";
|
|
|
|
function sanitizeAndNormalizeEmbedding(vec: number[]): number[] {
|
|
const sanitized = vec.map((value) => (Number.isFinite(value) ? value : 0));
|
|
const magnitude = Math.sqrt(sanitized.reduce((sum, value) => sum + value * value, 0));
|
|
if (magnitude < 1e-10) {
|
|
return sanitized;
|
|
}
|
|
return sanitized.map((value) => value / magnitude);
|
|
}
|
|
|
|
export type { GeminiEmbeddingClient } from "./embeddings-gemini.js";
|
|
export type { OpenAiEmbeddingClient } from "./embeddings-openai.js";
|
|
export type { VoyageEmbeddingClient } from "./embeddings-voyage.js";
|
|
|
|
export type EmbeddingProvider = {
|
|
id: string;
|
|
model: string;
|
|
maxInputTokens?: number;
|
|
embedQuery: (text: string) => Promise<number[]>;
|
|
embedBatch: (texts: string[]) => Promise<number[][]>;
|
|
};
|
|
|
|
export type EmbeddingProviderId = "openai" | "local" | "gemini" | "voyage";
|
|
export type EmbeddingProviderRequest = EmbeddingProviderId | "auto";
|
|
export type EmbeddingProviderFallback = EmbeddingProviderId | "none";
|
|
|
|
const REMOTE_EMBEDDING_PROVIDER_IDS = ["openai", "gemini", "voyage"] as const;
|
|
|
|
export type EmbeddingProviderResult = {
|
|
provider: EmbeddingProvider;
|
|
requestedProvider: EmbeddingProviderRequest;
|
|
fallbackFrom?: EmbeddingProviderId;
|
|
fallbackReason?: string;
|
|
openAi?: OpenAiEmbeddingClient;
|
|
gemini?: GeminiEmbeddingClient;
|
|
voyage?: VoyageEmbeddingClient;
|
|
};
|
|
|
|
export type EmbeddingProviderOptions = {
|
|
config: OpenClawConfig;
|
|
agentDir?: string;
|
|
provider: EmbeddingProviderRequest;
|
|
remote?: {
|
|
baseUrl?: string;
|
|
apiKey?: string;
|
|
headers?: Record<string, string>;
|
|
};
|
|
model: string;
|
|
fallback: EmbeddingProviderFallback;
|
|
local?: {
|
|
modelPath?: string;
|
|
modelCacheDir?: string;
|
|
};
|
|
};
|
|
|
|
export const DEFAULT_LOCAL_MODEL =
|
|
"hf:ggml-org/embeddinggemma-300m-qat-q8_0-GGUF/embeddinggemma-300m-qat-Q8_0.gguf";
|
|
|
|
function canAutoSelectLocal(options: EmbeddingProviderOptions): boolean {
|
|
const modelPath = options.local?.modelPath?.trim();
|
|
if (!modelPath) {
|
|
return false;
|
|
}
|
|
if (/^(hf:|https?:)/i.test(modelPath)) {
|
|
return false;
|
|
}
|
|
const resolved = resolveUserPath(modelPath);
|
|
try {
|
|
return fsSync.statSync(resolved).isFile();
|
|
} catch {
|
|
return false;
|
|
}
|
|
}
|
|
|
|
function isMissingApiKeyError(err: unknown): boolean {
|
|
const message = formatErrorMessage(err);
|
|
return message.includes("No API key found for provider");
|
|
}
|
|
|
|
async function createLocalEmbeddingProvider(
|
|
options: EmbeddingProviderOptions,
|
|
): Promise<EmbeddingProvider> {
|
|
const modelPath = options.local?.modelPath?.trim() || DEFAULT_LOCAL_MODEL;
|
|
const modelCacheDir = options.local?.modelCacheDir?.trim();
|
|
|
|
// Lazy-load node-llama-cpp to keep startup light unless local is enabled.
|
|
const { getLlama, resolveModelFile, LlamaLogLevel } = await importNodeLlamaCpp();
|
|
|
|
let llama: Llama | null = null;
|
|
let embeddingModel: LlamaModel | null = null;
|
|
let embeddingContext: LlamaEmbeddingContext | null = null;
|
|
|
|
const ensureContext = async () => {
|
|
if (!llama) {
|
|
llama = await getLlama({ logLevel: LlamaLogLevel.error });
|
|
}
|
|
if (!embeddingModel) {
|
|
const resolved = await resolveModelFile(modelPath, modelCacheDir || undefined);
|
|
embeddingModel = await llama.loadModel({ modelPath: resolved });
|
|
}
|
|
if (!embeddingContext) {
|
|
embeddingContext = await embeddingModel.createEmbeddingContext();
|
|
}
|
|
return embeddingContext;
|
|
};
|
|
|
|
return {
|
|
id: "local",
|
|
model: modelPath,
|
|
embedQuery: async (text) => {
|
|
const ctx = await ensureContext();
|
|
const embedding = await ctx.getEmbeddingFor(text);
|
|
return sanitizeAndNormalizeEmbedding(Array.from(embedding.vector));
|
|
},
|
|
embedBatch: async (texts) => {
|
|
const ctx = await ensureContext();
|
|
const embeddings = await Promise.all(
|
|
texts.map(async (text) => {
|
|
const embedding = await ctx.getEmbeddingFor(text);
|
|
return sanitizeAndNormalizeEmbedding(Array.from(embedding.vector));
|
|
}),
|
|
);
|
|
return embeddings;
|
|
},
|
|
};
|
|
}
|
|
|
|
export async function createEmbeddingProvider(
|
|
options: EmbeddingProviderOptions,
|
|
): Promise<EmbeddingProviderResult> {
|
|
const requestedProvider = options.provider;
|
|
const fallback = options.fallback;
|
|
|
|
const createProvider = async (id: EmbeddingProviderId) => {
|
|
if (id === "local") {
|
|
const provider = await createLocalEmbeddingProvider(options);
|
|
return { provider };
|
|
}
|
|
if (id === "gemini") {
|
|
const { provider, client } = await createGeminiEmbeddingProvider(options);
|
|
return { provider, gemini: client };
|
|
}
|
|
if (id === "voyage") {
|
|
const { provider, client } = await createVoyageEmbeddingProvider(options);
|
|
return { provider, voyage: client };
|
|
}
|
|
const { provider, client } = await createOpenAiEmbeddingProvider(options);
|
|
return { provider, openAi: client };
|
|
};
|
|
|
|
const formatPrimaryError = (err: unknown, provider: EmbeddingProviderId) =>
|
|
provider === "local" ? formatLocalSetupError(err) : formatErrorMessage(err);
|
|
|
|
if (requestedProvider === "auto") {
|
|
const missingKeyErrors: string[] = [];
|
|
let localError: string | null = null;
|
|
|
|
if (canAutoSelectLocal(options)) {
|
|
try {
|
|
const local = await createProvider("local");
|
|
return { ...local, requestedProvider };
|
|
} catch (err) {
|
|
localError = formatLocalSetupError(err);
|
|
}
|
|
}
|
|
|
|
for (const provider of REMOTE_EMBEDDING_PROVIDER_IDS) {
|
|
try {
|
|
const result = await createProvider(provider);
|
|
return { ...result, requestedProvider };
|
|
} catch (err) {
|
|
const message = formatPrimaryError(err, provider);
|
|
if (isMissingApiKeyError(err)) {
|
|
missingKeyErrors.push(message);
|
|
continue;
|
|
}
|
|
throw new Error(message, { cause: err });
|
|
}
|
|
}
|
|
|
|
const details = [...missingKeyErrors, localError].filter(Boolean) as string[];
|
|
if (details.length > 0) {
|
|
throw new Error(details.join("\n\n"));
|
|
}
|
|
throw new Error("No embeddings provider available.");
|
|
}
|
|
|
|
try {
|
|
const primary = await createProvider(requestedProvider);
|
|
return { ...primary, requestedProvider };
|
|
} catch (primaryErr) {
|
|
const reason = formatPrimaryError(primaryErr, requestedProvider);
|
|
if (fallback && fallback !== "none" && fallback !== requestedProvider) {
|
|
try {
|
|
const fallbackResult = await createProvider(fallback);
|
|
return {
|
|
...fallbackResult,
|
|
requestedProvider,
|
|
fallbackFrom: requestedProvider,
|
|
fallbackReason: reason,
|
|
};
|
|
} catch (fallbackErr) {
|
|
// oxlint-disable-next-line preserve-caught-error
|
|
throw new Error(
|
|
`${reason}\n\nFallback to ${fallback} failed: ${formatErrorMessage(fallbackErr)}`,
|
|
{ cause: fallbackErr },
|
|
);
|
|
}
|
|
}
|
|
throw new Error(reason, { cause: primaryErr });
|
|
}
|
|
}
|
|
|
|
function isNodeLlamaCppMissing(err: unknown): boolean {
|
|
if (!(err instanceof Error)) {
|
|
return false;
|
|
}
|
|
const code = (err as Error & { code?: unknown }).code;
|
|
if (code === "ERR_MODULE_NOT_FOUND") {
|
|
return err.message.includes("node-llama-cpp");
|
|
}
|
|
return false;
|
|
}
|
|
|
|
function formatLocalSetupError(err: unknown): string {
|
|
const detail = formatErrorMessage(err);
|
|
const missing = isNodeLlamaCppMissing(err);
|
|
return [
|
|
"Local embeddings unavailable.",
|
|
missing
|
|
? "Reason: optional dependency node-llama-cpp is missing (or failed to install)."
|
|
: detail
|
|
? `Reason: ${detail}`
|
|
: undefined,
|
|
missing && detail ? `Detail: ${detail}` : null,
|
|
"To enable local embeddings:",
|
|
"1) Use Node 22 LTS (recommended for installs/updates)",
|
|
missing
|
|
? "2) Reinstall OpenClaw (this should install node-llama-cpp): npm i -g openclaw@latest"
|
|
: null,
|
|
"3) If you use pnpm: pnpm approve-builds (select node-llama-cpp), then pnpm rebuild node-llama-cpp",
|
|
...REMOTE_EMBEDDING_PROVIDER_IDS.map(
|
|
(provider) => `Or set agents.defaults.memorySearch.provider = "${provider}" (remote).`,
|
|
),
|
|
]
|
|
.filter(Boolean)
|
|
.join("\n");
|
|
}
|