mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-06 05:10:44 +00:00
fix(docker): route local provider setup to host gateway
This commit is contained in:
@@ -16,6 +16,7 @@ Docs: https://docs.openclaw.ai
|
||||
- CLI/update: keep the automatic post-update completion refresh on the core-command tree so it no longer stages bundled plugin runtime deps before the Gateway restart path, avoiding `.24` update hangs and 1006 disconnect cascades. Fixes #72665. Thanks @sakalaboator and @He-Pin.
|
||||
- Agents/Bedrock: stop heartbeat runs from persisting blank user transcript turns and repair existing blank user text messages before replay, preventing AWS Bedrock `ContentBlock` blank-text validation failures. Fixes #72640 and #72622. Thanks @goldzulu.
|
||||
- Agents/LM Studio: promote standalone bracketed local-model tool requests into registered tool calls and hide unsupported bracket blocks from visible replies, so MemPalace MCP lookups do not print raw `[tool]` JSON scaffolding in chat. Fixes #66178. Thanks @detroit357.
|
||||
- Docker/setup: route Docker onboarding defaults for host-side LM Studio and Ollama through `host.docker.internal` and add the Linux host-gateway mapping to the bundled Compose file, so containerized gateways can reach local providers without using container loopback. Fixes #68684; supersedes #68702. Thanks @safrano9999 and @skolez.
|
||||
- Agents/LM Studio: strip prior-turn Gemma 4 reasoning from OpenAI-compatible replay while preserving active tool-call continuation reasoning. Fixes #68704. Thanks @chip-snomo and @Kailigithub.
|
||||
- LM Studio: allow interactive onboarding to leave the API key blank for unauthenticated local servers, using local synthetic auth while clearing stale LM Studio auth profiles. Fixes #66937. Thanks @olamedia.
|
||||
- Process/Windows: decode command stdout and stderr from raw bytes with console-codepage awareness, while preserving valid UTF-8 output and multibyte characters split across chunks. Fixes #50519. Thanks @iready, @kevinten10, @zhangyongjie1997, @knightplat-blip, @heiqishi666, and @slepybear.
|
||||
|
||||
@@ -34,6 +34,11 @@ services:
|
||||
# - /var/run/docker.sock:/var/run/docker.sock
|
||||
# group_add:
|
||||
# - "${DOCKER_GID:-999}"
|
||||
# Let bundled local-model providers reach host-side LM Studio/Ollama via
|
||||
# http://host.docker.internal:<port>. Docker Desktop usually provides this
|
||||
# alias; the host-gateway mapping makes it work on Linux Docker Engine too.
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
ports:
|
||||
- "${OPENCLAW_GATEWAY_PORT:-18789}:18789"
|
||||
- "${OPENCLAW_BRIDGE_PORT:-18790}:18790"
|
||||
|
||||
@@ -215,6 +215,33 @@ Use bind mode values in `gateway.bind` (`lan` / `loopback` / `custom` /
|
||||
`tailnet` / `auto`), not host aliases like `0.0.0.0` or `127.0.0.1`.
|
||||
</Note>
|
||||
|
||||
### Host Local Providers
|
||||
|
||||
When OpenClaw runs in Docker, `127.0.0.1` inside the container is the container
|
||||
itself, not your host machine. Use `host.docker.internal` for AI providers that
|
||||
run on the host:
|
||||
|
||||
| Provider | Host default URL | Docker setup URL |
|
||||
| --------- | ------------------------ | ----------------------------------- |
|
||||
| LM Studio | `http://127.0.0.1:1234` | `http://host.docker.internal:1234` |
|
||||
| Ollama | `http://127.0.0.1:11434` | `http://host.docker.internal:11434` |
|
||||
|
||||
The bundled Docker setup uses those host URLs as the LM Studio and Ollama
|
||||
onboarding defaults, and `docker-compose.yml` maps `host.docker.internal` to
|
||||
Docker's host gateway for Linux Docker Engine. Docker Desktop already provides
|
||||
the same hostname on macOS and Windows.
|
||||
|
||||
Host services must also listen on an address reachable from Docker:
|
||||
|
||||
```bash
|
||||
lms server start --port 1234 --bind 0.0.0.0
|
||||
OLLAMA_HOST=0.0.0.0:11434 ollama serve
|
||||
```
|
||||
|
||||
If you use your own Compose file or `docker run` command, add the same host
|
||||
mapping yourself, for example
|
||||
`--add-host=host.docker.internal:host-gateway`.
|
||||
|
||||
### Bonjour / mDNS
|
||||
|
||||
Docker bridge networking usually does not forward Bonjour/mDNS multicast
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
/** Shared LM Studio defaults used by setup, runtime discovery, and embeddings paths. */
|
||||
export const LMSTUDIO_DEFAULT_BASE_URL = "http://localhost:1234";
|
||||
export const LMSTUDIO_DEFAULT_INFERENCE_BASE_URL = `${LMSTUDIO_DEFAULT_BASE_URL}/v1`;
|
||||
export const LMSTUDIO_DOCKER_HOST_BASE_URL = "http://host.docker.internal:1234";
|
||||
export const LMSTUDIO_DOCKER_HOST_INFERENCE_BASE_URL = `${LMSTUDIO_DOCKER_HOST_BASE_URL}/v1`;
|
||||
export const LMSTUDIO_DEFAULT_EMBEDDING_MODEL = "text-embedding-nomic-embed-text-v1.5";
|
||||
export const LMSTUDIO_PROVIDER_LABEL = "LM Studio";
|
||||
export const LMSTUDIO_DEFAULT_API_KEY_ENV_VAR = "LM_API_TOKEN";
|
||||
|
||||
@@ -8,7 +8,7 @@ import {
|
||||
type ProviderCatalogContext,
|
||||
} from "openclaw/plugin-sdk/provider-setup";
|
||||
import type { WizardPrompter } from "openclaw/plugin-sdk/setup";
|
||||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import {
|
||||
LMSTUDIO_DEFAULT_API_KEY_ENV_VAR,
|
||||
LMSTUDIO_LOCAL_API_KEY_PLACEHOLDER,
|
||||
@@ -173,6 +173,10 @@ function createQueuedWizardPrompterHarness(textValues: string[]): {
|
||||
}
|
||||
|
||||
describe("lmstudio setup", () => {
|
||||
afterEach(() => {
|
||||
vi.unstubAllEnvs();
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
fetchLmstudioModelsMock.mockReset();
|
||||
discoverLmstudioModelsMock.mockReset();
|
||||
@@ -738,6 +742,36 @@ describe("lmstudio setup", () => {
|
||||
expect(result.configPatch?.models?.providers?.lmstudio).not.toHaveProperty("auth");
|
||||
});
|
||||
|
||||
it("interactive Docker setup defaults to the host LM Studio endpoint", async () => {
|
||||
vi.stubEnv("OPENCLAW_DOCKER_SETUP", "1");
|
||||
const { prompter, text } = createQueuedWizardPrompterHarness([
|
||||
"http://host.docker.internal:1234",
|
||||
"",
|
||||
"",
|
||||
]);
|
||||
|
||||
const result = await promptAndConfigureLmstudioInteractive({
|
||||
config: buildConfig(),
|
||||
prompter,
|
||||
});
|
||||
|
||||
expect(text).toHaveBeenNthCalledWith(
|
||||
1,
|
||||
expect.objectContaining({
|
||||
initialValue: "http://host.docker.internal:1234",
|
||||
placeholder: "http://host.docker.internal:1234",
|
||||
}),
|
||||
);
|
||||
expect(fetchLmstudioModelsMock).toHaveBeenCalledWith({
|
||||
baseUrl: "http://host.docker.internal:1234/v1",
|
||||
apiKey: LMSTUDIO_LOCAL_API_KEY_PLACEHOLDER,
|
||||
timeoutMs: 5000,
|
||||
});
|
||||
expect(result.configPatch?.models?.providers?.lmstudio).toMatchObject({
|
||||
baseUrl: "http://host.docker.internal:1234/v1",
|
||||
});
|
||||
});
|
||||
|
||||
it("interactive setup uses existing Authorization headers when the API key is blank", async () => {
|
||||
const config = {
|
||||
models: {
|
||||
|
||||
@@ -29,6 +29,8 @@ import {
|
||||
LMSTUDIO_LOCAL_API_KEY_PLACEHOLDER,
|
||||
LMSTUDIO_MODEL_PLACEHOLDER,
|
||||
LMSTUDIO_DEFAULT_BASE_URL,
|
||||
LMSTUDIO_DOCKER_HOST_BASE_URL,
|
||||
LMSTUDIO_DOCKER_HOST_INFERENCE_BASE_URL,
|
||||
LMSTUDIO_PROVIDER_LABEL,
|
||||
LMSTUDIO_DEFAULT_MODEL_ID,
|
||||
LMSTUDIO_PROVIDER_ID as PROVIDER_ID,
|
||||
@@ -66,6 +68,22 @@ type LmstudioSetupDiscovery = {
|
||||
defaultModelId: string | undefined;
|
||||
};
|
||||
|
||||
function isTruthyEnvValue(value: string | undefined): boolean {
|
||||
return ["1", "true", "yes", "on"].includes(value?.trim().toLowerCase() ?? "");
|
||||
}
|
||||
|
||||
function resolveLmstudioSetupDefaultBaseUrl(env: NodeJS.ProcessEnv = process.env): string {
|
||||
return isTruthyEnvValue(env.OPENCLAW_DOCKER_SETUP)
|
||||
? LMSTUDIO_DOCKER_HOST_BASE_URL
|
||||
: LMSTUDIO_DEFAULT_BASE_URL;
|
||||
}
|
||||
|
||||
function resolveLmstudioSetupDefaultInferenceBaseUrl(env: NodeJS.ProcessEnv = process.env): string {
|
||||
return isTruthyEnvValue(env.OPENCLAW_DOCKER_SETUP)
|
||||
? LMSTUDIO_DOCKER_HOST_INFERENCE_BASE_URL
|
||||
: LMSTUDIO_DEFAULT_INFERENCE_BASE_URL;
|
||||
}
|
||||
|
||||
function stripLmstudioStoredAuthConfig(cfg: OpenClawConfig): OpenClawConfig {
|
||||
const { profiles: _profiles, order: _order, ...restAuth } = cfg.auth ?? {};
|
||||
const nextProfiles = Object.fromEntries(
|
||||
@@ -376,13 +394,14 @@ export async function promptAndConfigureLmstudioInteractive(params: {
|
||||
throw new Error("LM Studio interactive setup requires a text prompter.");
|
||||
}
|
||||
const note = params.prompter?.note ?? params.note;
|
||||
const defaultBaseUrl = resolveLmstudioSetupDefaultBaseUrl();
|
||||
const baseUrlRaw = await promptText({
|
||||
message: `${LMSTUDIO_PROVIDER_LABEL} base URL`,
|
||||
initialValue: LMSTUDIO_DEFAULT_BASE_URL,
|
||||
placeholder: LMSTUDIO_DEFAULT_BASE_URL,
|
||||
initialValue: defaultBaseUrl,
|
||||
placeholder: defaultBaseUrl,
|
||||
validate: (value) => (value?.trim() ? undefined : "Required"),
|
||||
});
|
||||
const baseUrl = resolveLmstudioInferenceBase(baseUrlRaw ?? "");
|
||||
const baseUrl = resolveLmstudioInferenceBase(baseUrlRaw ?? defaultBaseUrl);
|
||||
let credentialInput: SecretInput | undefined;
|
||||
let credentialMode: SecretInputMode | undefined;
|
||||
const implicitRefMode = params.allowSecretRefPrompt === false && !params.secretInputMode;
|
||||
@@ -548,7 +567,7 @@ export async function configureLmstudioNonInteractive(
|
||||
): Promise<OpenClawConfig | null> {
|
||||
const customBaseUrl = normalizeOptionalSecretInput(ctx.opts.customBaseUrl);
|
||||
const baseUrl = resolveLmstudioInferenceBase(
|
||||
customBaseUrl || LMSTUDIO_DEFAULT_INFERENCE_BASE_URL,
|
||||
customBaseUrl || resolveLmstudioSetupDefaultInferenceBaseUrl(),
|
||||
);
|
||||
const normalizedCtx = customBaseUrl
|
||||
? {
|
||||
@@ -564,7 +583,7 @@ export async function configureLmstudioNonInteractive(
|
||||
ctx: configureCtx,
|
||||
providerId: PROVIDER_ID,
|
||||
providerLabel: LMSTUDIO_PROVIDER_LABEL,
|
||||
defaultBaseUrl: LMSTUDIO_DEFAULT_INFERENCE_BASE_URL,
|
||||
defaultBaseUrl: resolveLmstudioSetupDefaultInferenceBaseUrl(),
|
||||
defaultApiKeyEnvVar: LMSTUDIO_DEFAULT_API_KEY_ENV_VAR,
|
||||
modelPlaceholder: LMSTUDIO_MODEL_PLACEHOLDER,
|
||||
});
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
export const OLLAMA_DEFAULT_BASE_URL = "http://127.0.0.1:11434";
|
||||
export const OLLAMA_DOCKER_HOST_BASE_URL = "http://host.docker.internal:11434";
|
||||
export const OLLAMA_CLOUD_BASE_URL = "https://ollama.com";
|
||||
|
||||
export const OLLAMA_DEFAULT_CONTEXT_WINDOW = 128000;
|
||||
|
||||
@@ -10,6 +10,17 @@ import {
|
||||
} from "./setup.js";
|
||||
|
||||
const upsertAuthProfileWithLock = vi.hoisted(() => vi.fn(async () => {}));
|
||||
const fetchWithSsrFGuardMock = vi.hoisted(() =>
|
||||
vi.fn(async (params: { url: string; init?: RequestInit; signal?: AbortSignal }) => ({
|
||||
response: await globalThis.fetch(params.url, {
|
||||
...params.init,
|
||||
...(params.signal ? { signal: params.signal } : {}),
|
||||
}),
|
||||
finalUrl: params.url,
|
||||
release: async () => {},
|
||||
})),
|
||||
);
|
||||
|
||||
vi.mock("openclaw/plugin-sdk/provider-auth", async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import("openclaw/plugin-sdk/provider-auth")>();
|
||||
return {
|
||||
@@ -18,6 +29,15 @@ vi.mock("openclaw/plugin-sdk/provider-auth", async (importOriginal) => {
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock("openclaw/plugin-sdk/ssrf-runtime", async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import("openclaw/plugin-sdk/ssrf-runtime")>();
|
||||
return {
|
||||
...actual,
|
||||
fetchWithSsrFGuard: (...args: Parameters<typeof actual.fetchWithSsrFGuard>) =>
|
||||
fetchWithSsrFGuardMock(...args),
|
||||
};
|
||||
});
|
||||
|
||||
function createOllamaFetchMock(params: {
|
||||
tags?: string[];
|
||||
show?: Record<string, number | undefined>;
|
||||
@@ -93,7 +113,9 @@ function createRuntime() {
|
||||
describe("ollama setup", () => {
|
||||
afterEach(() => {
|
||||
vi.unstubAllGlobals();
|
||||
vi.unstubAllEnvs();
|
||||
upsertAuthProfileWithLock.mockClear();
|
||||
fetchWithSsrFGuardMock.mockClear();
|
||||
resetOllamaModelShowInfoCacheForTest();
|
||||
});
|
||||
|
||||
@@ -112,6 +134,34 @@ describe("ollama setup", () => {
|
||||
expect(modelIds?.[0]).toBe("gemma4");
|
||||
});
|
||||
|
||||
it("Docker setup defaults to the host Ollama endpoint", async () => {
|
||||
vi.stubEnv("OPENCLAW_DOCKER_SETUP", "1");
|
||||
const prompter = {
|
||||
select: vi.fn().mockResolvedValueOnce("local-only"),
|
||||
text: vi.fn().mockResolvedValueOnce("http://host.docker.internal:11434"),
|
||||
note: vi.fn(async () => undefined),
|
||||
} as unknown as WizardPrompter;
|
||||
|
||||
const fetchMock = createOllamaFetchMock({ tags: ["llama3:8b"] });
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
|
||||
const result = await promptAndConfigureOllama({
|
||||
cfg: {},
|
||||
prompter,
|
||||
});
|
||||
|
||||
expect(prompter.text).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
initialValue: "http://host.docker.internal:11434",
|
||||
placeholder: "http://host.docker.internal:11434",
|
||||
}),
|
||||
);
|
||||
expect(fetchMock.mock.calls[0]?.[0]).toBe("http://host.docker.internal:11434/api/tags");
|
||||
expect(result.config.models?.providers?.ollama?.baseUrl).toBe(
|
||||
"http://host.docker.internal:11434",
|
||||
);
|
||||
});
|
||||
|
||||
it("puts suggested cloud model first in cloud mode", async () => {
|
||||
const prompter = createCloudPrompter();
|
||||
vi.stubGlobal("fetch", createOllamaFetchMock({ tags: [] }));
|
||||
|
||||
@@ -23,6 +23,7 @@ import {
|
||||
import {
|
||||
OLLAMA_CLOUD_BASE_URL,
|
||||
OLLAMA_DEFAULT_BASE_URL,
|
||||
OLLAMA_DOCKER_HOST_BASE_URL,
|
||||
OLLAMA_DEFAULT_MODEL,
|
||||
} from "./defaults.js";
|
||||
import { readProviderBaseUrl } from "./provider-base-url.js";
|
||||
@@ -56,6 +57,16 @@ type OllamaSetupResult = {
|
||||
credentialMode?: SecretInputMode;
|
||||
};
|
||||
|
||||
function isTruthyEnvValue(value: string | undefined): boolean {
|
||||
return ["1", "true", "yes", "on"].includes(value?.trim().toLowerCase() ?? "");
|
||||
}
|
||||
|
||||
function resolveOllamaSetupDefaultBaseUrl(env: NodeJS.ProcessEnv = process.env): string {
|
||||
return isTruthyEnvValue(env.OPENCLAW_DOCKER_SETUP)
|
||||
? OLLAMA_DOCKER_HOST_BASE_URL
|
||||
: OLLAMA_DEFAULT_BASE_URL;
|
||||
}
|
||||
|
||||
type OllamaInteractiveMode = "cloud-local" | "cloud-only" | "local-only";
|
||||
type HostBackedOllamaInteractiveMode = Exclude<OllamaInteractiveMode, "cloud-only">;
|
||||
|
||||
@@ -457,14 +468,18 @@ async function storeOllamaCredential(agentDir?: string): Promise<void> {
|
||||
});
|
||||
}
|
||||
|
||||
async function promptForOllamaBaseUrl(prompter: WizardPrompter): Promise<string> {
|
||||
async function promptForOllamaBaseUrl(
|
||||
prompter: WizardPrompter,
|
||||
env: NodeJS.ProcessEnv = process.env,
|
||||
): Promise<string> {
|
||||
const defaultBaseUrl = resolveOllamaSetupDefaultBaseUrl(env);
|
||||
const baseUrlRaw = await prompter.text({
|
||||
message: "Ollama base URL",
|
||||
initialValue: OLLAMA_DEFAULT_BASE_URL,
|
||||
placeholder: OLLAMA_DEFAULT_BASE_URL,
|
||||
initialValue: defaultBaseUrl,
|
||||
placeholder: defaultBaseUrl,
|
||||
validate: (value) => (value?.trim() ? undefined : "Required"),
|
||||
});
|
||||
return resolveOllamaApiBase((baseUrlRaw ?? "").trim().replace(/\/+$/, ""));
|
||||
return resolveOllamaApiBase((baseUrlRaw ?? defaultBaseUrl).trim().replace(/\/+$/, ""));
|
||||
}
|
||||
|
||||
async function resolveHostBackedSuggestedModelNames(params: {
|
||||
@@ -493,8 +508,9 @@ async function promptAndConfigureHostBackedOllama(params: {
|
||||
cfg: OpenClawConfig;
|
||||
mode: HostBackedOllamaInteractiveMode;
|
||||
prompter: WizardPrompter;
|
||||
env?: NodeJS.ProcessEnv;
|
||||
}): Promise<OllamaSetupResult> {
|
||||
const baseUrl = await promptForOllamaBaseUrl(params.prompter);
|
||||
const baseUrl = await promptForOllamaBaseUrl(params.prompter, params.env);
|
||||
const { reachable, models } = await fetchOllamaModels(baseUrl);
|
||||
|
||||
if (!reachable) {
|
||||
@@ -586,6 +602,7 @@ export async function promptAndConfigureOllama(params: {
|
||||
cfg: params.cfg,
|
||||
mode,
|
||||
prompter: params.prompter,
|
||||
env: params.env,
|
||||
});
|
||||
}
|
||||
|
||||
@@ -596,7 +613,7 @@ export async function configureOllamaNonInteractive(params: {
|
||||
agentDir?: string;
|
||||
}): Promise<OpenClawConfig> {
|
||||
const baseUrl = resolveOllamaApiBase(
|
||||
(params.opts.customBaseUrl?.trim() || OLLAMA_DEFAULT_BASE_URL).replace(/\/+$/, ""),
|
||||
(params.opts.customBaseUrl?.trim() || resolveOllamaSetupDefaultBaseUrl()).replace(/\/+$/, ""),
|
||||
);
|
||||
const { reachable, models } = await fetchOllamaModels(baseUrl);
|
||||
const explicitModel = normalizeOllamaModelName(params.opts.customModelId);
|
||||
|
||||
@@ -285,6 +285,7 @@ export OPENCLAW_HOME_VOLUME="$HOME_VOLUME_NAME"
|
||||
export OPENCLAW_ALLOW_INSECURE_PRIVATE_WS="${OPENCLAW_ALLOW_INSECURE_PRIVATE_WS:-}"
|
||||
export OPENCLAW_SANDBOX="$SANDBOX_ENABLED"
|
||||
export OPENCLAW_DOCKER_SOCKET="$DOCKER_SOCKET_PATH"
|
||||
export OPENCLAW_DOCKER_SETUP=1
|
||||
export OPENCLAW_TZ="$TIMEZONE"
|
||||
export OTEL_EXPORTER_OTLP_ENDPOINT="${OTEL_EXPORTER_OTLP_ENDPOINT:-}"
|
||||
export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT="${OTEL_EXPORTER_OTLP_TRACES_ENDPOINT:-}"
|
||||
|
||||
Reference in New Issue
Block a user