From f7081a3879f23abcc20f991bf42aa10c22a72901 Mon Sep 17 00:00:00 2001 From: Peter Steinberger Date: Mon, 27 Apr 2026 08:55:36 +0100 Subject: [PATCH] fix(lmstudio): trust configured local endpoints --- CHANGELOG.md | 1 + docs/providers/lmstudio.md | 22 ++++++++++++++++++ extensions/lmstudio/src/models.test.ts | 32 ++++++++++++++++++++++++++ extensions/lmstudio/src/models.ts | 25 +++++++++++++++++--- 4 files changed, 77 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 71dcbbb7b06..6e9114c6372 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ Docs: https://docs.openclaw.ai - CLI/update: keep the automatic post-update completion refresh on the core-command tree so it no longer stages bundled plugin runtime deps before the Gateway restart path, avoiding `.24` update hangs and 1006 disconnect cascades. Fixes #72665. Thanks @sakalaboator and @He-Pin. - Agents/Bedrock: stop heartbeat runs from persisting blank user transcript turns and repair existing blank user text messages before replay, preventing AWS Bedrock `ContentBlock` blank-text validation failures. Fixes #72640 and #72622. Thanks @goldzulu. - Agents/LM Studio: promote standalone bracketed local-model tool requests into registered tool calls and hide unsupported bracket blocks from visible replies, so MemPalace MCP lookups do not print raw `[tool]` JSON scaffolding in chat. Fixes #66178. Thanks @detroit357. +- LM Studio: trust configured LM Studio loopback, LAN, and tailnet endpoints for guarded model requests by default, preserving explicit private-network opt-outs. Refs #60994. Thanks @tnowakow. - Docker/setup: route Docker onboarding defaults for host-side LM Studio and Ollama through `host.docker.internal` and add the Linux host-gateway mapping to the bundled Compose file, so containerized gateways can reach local providers without using container loopback. Fixes #68684; supersedes #68702. Thanks @safrano9999 and @skolez. - Agents/LM Studio: strip prior-turn Gemma 4 reasoning from OpenAI-compatible replay while preserving active tool-call continuation reasoning. Fixes #68704. Thanks @chip-snomo and @Kailigithub. - LM Studio: allow interactive onboarding to leave the API key blank for unauthenticated local servers, using local synthetic auth while clearing stale LM Studio auth profiles. Fixes #66937. Thanks @olamedia. diff --git a/docs/providers/lmstudio.md b/docs/providers/lmstudio.md index 26f1aabb59c..b71c279d2f8 100644 --- a/docs/providers/lmstudio.md +++ b/docs/providers/lmstudio.md @@ -94,6 +94,7 @@ This writes `models.providers.lmstudio` and sets the default model to `lmstudio:default` auth profile. Interactive setup can prompt for an optional preferred load context length and applies it across the discovered LM Studio models it saves into config. +LM Studio plugin config trusts the configured LM Studio endpoint for model requests, including loopback, LAN, and tailnet hosts. You can opt out by setting `models.providers.lmstudio.request.allowPrivateNetwork: false`. ## Configuration @@ -169,6 +170,27 @@ If setup reports HTTP 401, verify your API key: LM Studio supports just-in-time (JIT) model loading, where models are loaded on first request. Make sure you have this enabled to avoid 'Model not loaded' errors. +### LAN or tailnet LM Studio host + +Use the LM Studio host's reachable address, keep `/v1`, and make sure LM Studio is bound beyond loopback on that machine: + +```json5 +{ + models: { + providers: { + lmstudio: { + baseUrl: "http://gpu-box.local:1234/v1", + apiKey: "lmstudio", + api: "openai-completions", + models: [{ id: "qwen/qwen3.5-9b" }], + }, + }, + }, +} +``` + +Unlike generic OpenAI-compatible providers, `lmstudio` automatically trusts its configured local/private endpoint for guarded model requests. If you use a custom provider id instead of `lmstudio`, set `models.providers..request.allowPrivateNetwork: true` explicitly. + ## Related - [Model selection](/concepts/model-providers) diff --git a/extensions/lmstudio/src/models.test.ts b/extensions/lmstudio/src/models.test.ts index f27620026aa..a4ace071cbe 100644 --- a/extensions/lmstudio/src/models.test.ts +++ b/extensions/lmstudio/src/models.test.ts @@ -6,6 +6,7 @@ import { afterEach, describe, expect, it, vi } from "vitest"; import { LMSTUDIO_DEFAULT_LOAD_CONTEXT_LENGTH } from "./defaults.js"; import { discoverLmstudioModels, ensureLmstudioModelLoaded } from "./models.fetch.js"; import { + normalizeLmstudioProviderConfig, resolveLmstudioInferenceBase, resolveLmstudioReasoningCapability, resolveLmstudioServerBase, @@ -89,6 +90,37 @@ describe("lmstudio-models", () => { expect(resolveLmstudioInferenceBase("localhost:1234/api/v1")).toBe("http://localhost:1234/v1"); }); + it("marks configured LM Studio endpoints as trusted private-network model targets", () => { + expect( + normalizeLmstudioProviderConfig({ + baseUrl: "http://192.168.1.10:1234", + models: [], + }), + ).toEqual({ + baseUrl: "http://192.168.1.10:1234/v1", + request: { allowPrivateNetwork: true }, + models: [], + }); + + expect( + normalizeLmstudioProviderConfig({ + baseUrl: "http://gpu-box.local:1234/v1", + request: { + allowPrivateNetwork: false, + headers: { "X-Proxy-Auth": "token" }, + }, + models: [], + }), + ).toEqual({ + baseUrl: "http://gpu-box.local:1234/v1", + request: { + allowPrivateNetwork: false, + headers: { "X-Proxy-Auth": "token" }, + }, + models: [], + }); + }); + it("resolves reasoning capability for supported and unsupported options", () => { expect(resolveLmstudioReasoningCapability({ capabilities: undefined })).toBe(false); expect( diff --git a/extensions/lmstudio/src/models.ts b/extensions/lmstudio/src/models.ts index 3c45b12b679..8873c63b424 100644 --- a/extensions/lmstudio/src/models.ts +++ b/extensions/lmstudio/src/models.ts @@ -175,9 +175,28 @@ export function normalizeLmstudioProviderConfig( return provider; } const normalizedBaseUrl = resolveLmstudioInferenceBase(configuredBaseUrl); - return normalizedBaseUrl === provider.baseUrl - ? provider - : { ...provider, baseUrl: normalizedBaseUrl }; + const request = + provider.request && typeof provider.request === "object" && !Array.isArray(provider.request) + ? provider.request + : undefined; + const requestWithPrivateNetworkDefault = + typeof request?.allowPrivateNetwork === "boolean" + ? request + : { + ...request, + allowPrivateNetwork: true, + }; + if ( + normalizedBaseUrl === provider.baseUrl && + requestWithPrivateNetworkDefault === provider.request + ) { + return provider; + } + return { + ...provider, + baseUrl: normalizedBaseUrl, + request: requestWithPrivateNetworkDefault, + }; } export function normalizeLmstudioConfiguredCatalogEntry(