mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-06 19:31:00 +00:00
feat: LM Studio Integration (#53248)
* Feat: LM Studio Integration * Format * Support usage in streaming true Fix token count * Add custom window check * Drop max tokens fallback * tweak docs Update generated * Avoid error if stale header does not resolve * Fix test * Fix test * Fix rebase issues Trim code * Fix tests Drop keyless Fixes * Fix linter issues in tests * Update generated artifacts * Do not have fatal header resoltuion for discovery * Do the same for API key as well * fix: honor lmstudio preload runtime auth * fix: clear stale lmstudio header auth * fix: lazy-load lmstudio runtime facade * fix: preserve lmstudio shared synthetic auth * fix: clear stale lmstudio header auth in discovery * fix: prefer lmstudio header auth for discovery * fix: honor lmstudio header auth in warmup paths * fix: clear stale lmstudio profile auth * fix: ignore lmstudio env auth on header migration * fix: use local lmstudio setup seam * fix: resolve lmstudio rebase fallout --------- Co-authored-by: Frank Yang <frank.ekn@gmail.com>
This commit is contained in:
@@ -17,6 +17,7 @@ import { canAutoSelectLocal } from "./provider-adapters.js";
|
||||
|
||||
export {
|
||||
DEFAULT_GEMINI_EMBEDDING_MODEL,
|
||||
DEFAULT_LMSTUDIO_EMBEDDING_MODEL,
|
||||
DEFAULT_LOCAL_MODEL,
|
||||
DEFAULT_MISTRAL_EMBEDDING_MODEL,
|
||||
DEFAULT_OLLAMA_EMBEDDING_MODEL,
|
||||
|
||||
@@ -11,10 +11,15 @@ import {
|
||||
} from "./manager-provider-state.js";
|
||||
|
||||
const DEFAULT_OLLAMA_EMBEDDING_MODEL = "nomic-embed-text";
|
||||
const DEFAULT_LMSTUDIO_EMBEDDING_MODEL = "text-embedding-nomic-embed-text-v1.5";
|
||||
|
||||
vi.mock("./embeddings.js", () => ({
|
||||
resolveEmbeddingProviderFallbackModel: (providerId: string, fallbackSourceModel: string) =>
|
||||
providerId === "ollama" ? DEFAULT_OLLAMA_EMBEDDING_MODEL : fallbackSourceModel,
|
||||
providerId === "ollama"
|
||||
? DEFAULT_OLLAMA_EMBEDDING_MODEL
|
||||
: providerId === "lmstudio"
|
||||
? DEFAULT_LMSTUDIO_EMBEDDING_MODEL
|
||||
: fallbackSourceModel,
|
||||
}));
|
||||
|
||||
type EmbeddingProvider = {
|
||||
@@ -40,7 +45,7 @@ function createProvider(id: string): EmbeddingProvider {
|
||||
|
||||
function createSettings(params: {
|
||||
provider: "openai" | "mistral";
|
||||
fallback?: "none" | "mistral" | "ollama";
|
||||
fallback?: "none" | "mistral" | "ollama" | "lmstudio";
|
||||
}): ResolvedMemorySearchConfig {
|
||||
return {
|
||||
provider: params.provider,
|
||||
@@ -130,4 +135,16 @@ describe("memory manager mistral provider wiring", () => {
|
||||
expect(request.model).toBe("gemini-embedding-2-preview");
|
||||
expect(request.outputDimensionality).toBe(1536);
|
||||
});
|
||||
|
||||
it("uses default lmstudio model when activating lmstudio fallback", async () => {
|
||||
const request = resolveMemoryFallbackProviderRequest({
|
||||
cfg: {} as OpenClawConfig,
|
||||
settings: createSettings({ provider: "openai", fallback: "lmstudio" }),
|
||||
currentProviderId: "openai",
|
||||
});
|
||||
|
||||
expect(request?.provider).toBe("lmstudio");
|
||||
expect(request?.model).toBe(DEFAULT_LMSTUDIO_EMBEDDING_MODEL);
|
||||
expect(request?.fallback).toBe("none");
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import fsSync from "node:fs";
|
||||
import {
|
||||
DEFAULT_GEMINI_EMBEDDING_MODEL,
|
||||
DEFAULT_LMSTUDIO_EMBEDDING_MODEL,
|
||||
DEFAULT_LOCAL_MODEL,
|
||||
DEFAULT_MISTRAL_EMBEDDING_MODEL,
|
||||
DEFAULT_OPENAI_EMBEDDING_MODEL,
|
||||
@@ -8,6 +9,7 @@ import {
|
||||
OPENAI_BATCH_ENDPOINT,
|
||||
buildGeminiEmbeddingRequest,
|
||||
createGeminiEmbeddingProvider,
|
||||
createLmstudioEmbeddingProvider,
|
||||
createLocalEmbeddingProvider,
|
||||
createMistralEmbeddingProvider,
|
||||
createOpenAiEmbeddingProvider,
|
||||
@@ -288,6 +290,31 @@ const mistralAdapter: MemoryEmbeddingProviderAdapter = {
|
||||
},
|
||||
};
|
||||
|
||||
const lmstudioAdapter: MemoryEmbeddingProviderAdapter = {
|
||||
id: "lmstudio",
|
||||
defaultModel: DEFAULT_LMSTUDIO_EMBEDDING_MODEL,
|
||||
transport: "remote",
|
||||
create: async (options) => {
|
||||
const { provider, client } = await createLmstudioEmbeddingProvider({
|
||||
...options,
|
||||
provider: "lmstudio",
|
||||
fallback: "none",
|
||||
});
|
||||
return {
|
||||
provider,
|
||||
runtime: {
|
||||
id: "lmstudio",
|
||||
cacheKeyData: {
|
||||
provider: "lmstudio",
|
||||
baseUrl: client.baseUrl,
|
||||
model: client.model,
|
||||
headers: sanitizeHeaders(client.headers, ["authorization"]),
|
||||
},
|
||||
},
|
||||
};
|
||||
},
|
||||
};
|
||||
|
||||
const localAdapter: MemoryEmbeddingProviderAdapter = {
|
||||
id: "local",
|
||||
defaultModel: DEFAULT_LOCAL_MODEL,
|
||||
@@ -320,6 +347,7 @@ export const builtinMemoryEmbeddingProviderAdapters = [
|
||||
geminiAdapter,
|
||||
voyageAdapter,
|
||||
mistralAdapter,
|
||||
lmstudioAdapter,
|
||||
] as const;
|
||||
|
||||
const builtinMemoryEmbeddingProviderAdapterById = new Map(
|
||||
@@ -378,6 +406,7 @@ export function listBuiltinAutoSelectMemoryEmbeddingProviderDoctorMetadata(): Ar
|
||||
|
||||
export {
|
||||
DEFAULT_GEMINI_EMBEDDING_MODEL,
|
||||
DEFAULT_LMSTUDIO_EMBEDDING_MODEL,
|
||||
DEFAULT_LOCAL_MODEL,
|
||||
DEFAULT_MISTRAL_EMBEDDING_MODEL,
|
||||
DEFAULT_OPENAI_EMBEDDING_MODEL,
|
||||
|
||||
Reference in New Issue
Block a user