mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-06 14:50:45 +00:00
fix(lmstudio): trust configured local endpoints
This commit is contained in:
@@ -17,6 +17,7 @@ Docs: https://docs.openclaw.ai
|
||||
- CLI/update: keep the automatic post-update completion refresh on the core-command tree so it no longer stages bundled plugin runtime deps before the Gateway restart path, avoiding `.24` update hangs and 1006 disconnect cascades. Fixes #72665. Thanks @sakalaboator and @He-Pin.
|
||||
- Agents/Bedrock: stop heartbeat runs from persisting blank user transcript turns and repair existing blank user text messages before replay, preventing AWS Bedrock `ContentBlock` blank-text validation failures. Fixes #72640 and #72622. Thanks @goldzulu.
|
||||
- Agents/LM Studio: promote standalone bracketed local-model tool requests into registered tool calls and hide unsupported bracket blocks from visible replies, so MemPalace MCP lookups do not print raw `[tool]` JSON scaffolding in chat. Fixes #66178. Thanks @detroit357.
|
||||
- LM Studio: trust configured LM Studio loopback, LAN, and tailnet endpoints for guarded model requests by default, preserving explicit private-network opt-outs. Refs #60994. Thanks @tnowakow.
|
||||
- Docker/setup: route Docker onboarding defaults for host-side LM Studio and Ollama through `host.docker.internal` and add the Linux host-gateway mapping to the bundled Compose file, so containerized gateways can reach local providers without using container loopback. Fixes #68684; supersedes #68702. Thanks @safrano9999 and @skolez.
|
||||
- Agents/LM Studio: strip prior-turn Gemma 4 reasoning from OpenAI-compatible replay while preserving active tool-call continuation reasoning. Fixes #68704. Thanks @chip-snomo and @Kailigithub.
|
||||
- LM Studio: allow interactive onboarding to leave the API key blank for unauthenticated local servers, using local synthetic auth while clearing stale LM Studio auth profiles. Fixes #66937. Thanks @olamedia.
|
||||
|
||||
@@ -94,6 +94,7 @@ This writes `models.providers.lmstudio` and sets the default model to
|
||||
`lmstudio:default` auth profile.
|
||||
|
||||
Interactive setup can prompt for an optional preferred load context length and applies it across the discovered LM Studio models it saves into config.
|
||||
LM Studio plugin config trusts the configured LM Studio endpoint for model requests, including loopback, LAN, and tailnet hosts. You can opt out by setting `models.providers.lmstudio.request.allowPrivateNetwork: false`.
|
||||
|
||||
## Configuration
|
||||
|
||||
@@ -169,6 +170,27 @@ If setup reports HTTP 401, verify your API key:
|
||||
|
||||
LM Studio supports just-in-time (JIT) model loading, where models are loaded on first request. Make sure you have this enabled to avoid 'Model not loaded' errors.
|
||||
|
||||
### LAN or tailnet LM Studio host
|
||||
|
||||
Use the LM Studio host's reachable address, keep `/v1`, and make sure LM Studio is bound beyond loopback on that machine:
|
||||
|
||||
```json5
|
||||
{
|
||||
models: {
|
||||
providers: {
|
||||
lmstudio: {
|
||||
baseUrl: "http://gpu-box.local:1234/v1",
|
||||
apiKey: "lmstudio",
|
||||
api: "openai-completions",
|
||||
models: [{ id: "qwen/qwen3.5-9b" }],
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
Unlike generic OpenAI-compatible providers, `lmstudio` automatically trusts its configured local/private endpoint for guarded model requests. If you use a custom provider id instead of `lmstudio`, set `models.providers.<id>.request.allowPrivateNetwork: true` explicitly.
|
||||
|
||||
## Related
|
||||
|
||||
- [Model selection](/concepts/model-providers)
|
||||
|
||||
@@ -6,6 +6,7 @@ import { afterEach, describe, expect, it, vi } from "vitest";
|
||||
import { LMSTUDIO_DEFAULT_LOAD_CONTEXT_LENGTH } from "./defaults.js";
|
||||
import { discoverLmstudioModels, ensureLmstudioModelLoaded } from "./models.fetch.js";
|
||||
import {
|
||||
normalizeLmstudioProviderConfig,
|
||||
resolveLmstudioInferenceBase,
|
||||
resolveLmstudioReasoningCapability,
|
||||
resolveLmstudioServerBase,
|
||||
@@ -89,6 +90,37 @@ describe("lmstudio-models", () => {
|
||||
expect(resolveLmstudioInferenceBase("localhost:1234/api/v1")).toBe("http://localhost:1234/v1");
|
||||
});
|
||||
|
||||
it("marks configured LM Studio endpoints as trusted private-network model targets", () => {
|
||||
expect(
|
||||
normalizeLmstudioProviderConfig({
|
||||
baseUrl: "http://192.168.1.10:1234",
|
||||
models: [],
|
||||
}),
|
||||
).toEqual({
|
||||
baseUrl: "http://192.168.1.10:1234/v1",
|
||||
request: { allowPrivateNetwork: true },
|
||||
models: [],
|
||||
});
|
||||
|
||||
expect(
|
||||
normalizeLmstudioProviderConfig({
|
||||
baseUrl: "http://gpu-box.local:1234/v1",
|
||||
request: {
|
||||
allowPrivateNetwork: false,
|
||||
headers: { "X-Proxy-Auth": "token" },
|
||||
},
|
||||
models: [],
|
||||
}),
|
||||
).toEqual({
|
||||
baseUrl: "http://gpu-box.local:1234/v1",
|
||||
request: {
|
||||
allowPrivateNetwork: false,
|
||||
headers: { "X-Proxy-Auth": "token" },
|
||||
},
|
||||
models: [],
|
||||
});
|
||||
});
|
||||
|
||||
it("resolves reasoning capability for supported and unsupported options", () => {
|
||||
expect(resolveLmstudioReasoningCapability({ capabilities: undefined })).toBe(false);
|
||||
expect(
|
||||
|
||||
@@ -175,9 +175,28 @@ export function normalizeLmstudioProviderConfig(
|
||||
return provider;
|
||||
}
|
||||
const normalizedBaseUrl = resolveLmstudioInferenceBase(configuredBaseUrl);
|
||||
return normalizedBaseUrl === provider.baseUrl
|
||||
? provider
|
||||
: { ...provider, baseUrl: normalizedBaseUrl };
|
||||
const request =
|
||||
provider.request && typeof provider.request === "object" && !Array.isArray(provider.request)
|
||||
? provider.request
|
||||
: undefined;
|
||||
const requestWithPrivateNetworkDefault =
|
||||
typeof request?.allowPrivateNetwork === "boolean"
|
||||
? request
|
||||
: {
|
||||
...request,
|
||||
allowPrivateNetwork: true,
|
||||
};
|
||||
if (
|
||||
normalizedBaseUrl === provider.baseUrl &&
|
||||
requestWithPrivateNetworkDefault === provider.request
|
||||
) {
|
||||
return provider;
|
||||
}
|
||||
return {
|
||||
...provider,
|
||||
baseUrl: normalizedBaseUrl,
|
||||
request: requestWithPrivateNetworkDefault,
|
||||
};
|
||||
}
|
||||
|
||||
export function normalizeLmstudioConfiguredCatalogEntry(
|
||||
|
||||
Reference in New Issue
Block a user