fix(ollama): prioritize provider baseUrl for embedded runner (#30964)

* fix(ollama): honor provider baseUrl in embedded runner

* Embedded Ollama: clarify provider baseUrl precedence comment

* Changelog: note embedded Ollama baseUrl precedence fix

* Telegram: apply required formatter update in accounts config merge

* Revert "Telegram: apply required formatter update in accounts config merge"

This reverts commit d372b26975.

* Update CHANGELOG.md

---------

Co-authored-by: User <user@example.com>
Co-authored-by: Vincent Koc <vincentkoc@ieee.org>
This commit is contained in:
不做了睡大觉
2026-03-02 10:38:42 +08:00
committed by GitHub
parent fd341d0d3f
commit e482da6682
3 changed files with 47 additions and 4 deletions

View File

@@ -27,6 +27,7 @@ Docs: https://docs.openclaw.ai
- Control UI/Debug log layout: render Debug Event Log payloads at full width to prevent payload JSON from being squeezed into a narrow side column. Landed from contributor PR #30978 by @stozo04. Thanks @stozo04.
- Cron editor viewport: make the sticky cron edit form independently scrollable with viewport-bounded height so lower fields/actions are reachable on shorter screens. Landed from contributor PR #31133 by @Sid-Qin. Thanks @Sid-Qin.
- Agents/Thinking fallback: when providers reject unsupported thinking levels without enumerating alternatives, retry with `think=off` to avoid hard failure during model/provider fallback chains. Landed from contributor PR #31002 by @yfge. Thanks @yfge.
- Ollama/Embedded runner base URL precedence: prioritize configured provider `baseUrl` over model defaults for embedded Ollama runs so Docker and remote-host setups avoid localhost fetch failures. (#30964) Thanks @stakeswky.
- Agents/Failover reason classification: avoid false rate-limit classification from incidental `tpm` substrings by matching TPM as a standalone token/phrase and keeping auth-context errors on the auth path. Landed from contributor PR #31007 by @HOYALIM. Thanks @HOYALIM.
- Slack/Announce target account routing: enable session-backed announce-target lookup for Slack so multi-account announces resolve the correct `accountId` instead of defaulting to bot-token context. Landed from contributor PR #31028 by @taw0002. Thanks @taw0002.
- Tools/Edit workspace boundary errors: preserve the real `Path escapes workspace root` failure path instead of surfacing a misleading access/file-not-found error when editing outside workspace roots. Landed from contributor PR #31015 by @haosenwang1018. Thanks @haosenwang1018.

View File

@@ -3,6 +3,7 @@ import type { OpenClawConfig } from "../../../config/config.js";
import {
isOllamaCompatProvider,
resolveAttemptFsWorkspaceOnly,
resolveOllamaBaseUrlForRun,
resolveOllamaCompatNumCtxEnabled,
resolvePromptBuildHookResult,
resolvePromptModeForSession,
@@ -285,6 +286,29 @@ describe("isOllamaCompatProvider", () => {
});
});
describe("resolveOllamaBaseUrlForRun", () => {
it("prefers provider baseUrl over model baseUrl", () => {
expect(
resolveOllamaBaseUrlForRun({
modelBaseUrl: "http://model-host:11434",
providerBaseUrl: "http://provider-host:11434",
}),
).toBe("http://provider-host:11434");
});
it("falls back to model baseUrl when provider baseUrl is missing", () => {
expect(
resolveOllamaBaseUrlForRun({
modelBaseUrl: "http://model-host:11434",
}),
).toBe("http://model-host:11434");
});
it("falls back to native default when neither baseUrl is configured", () => {
expect(resolveOllamaBaseUrlForRun({})).toBe("http://127.0.0.1:11434");
});
});
describe("wrapOllamaCompatNumCtx", () => {
it("injects num_ctx and preserves downstream onPayload hooks", () => {
let payloadSeen: Record<string, unknown> | undefined;

View File

@@ -258,6 +258,21 @@ function normalizeToolCallNameForDispatch(rawName: string, allowedToolNames?: Se
return caseInsensitiveMatch ?? trimmed;
}
export function resolveOllamaBaseUrlForRun(params: {
modelBaseUrl?: string;
providerBaseUrl?: string;
}): string {
const providerBaseUrl = params.providerBaseUrl?.trim() ?? "";
if (providerBaseUrl) {
return providerBaseUrl;
}
const modelBaseUrl = params.modelBaseUrl?.trim() ?? "";
if (modelBaseUrl) {
return modelBaseUrl;
}
return OLLAMA_NATIVE_BASE_URL;
}
function trimWhitespaceFromToolCallNamesInMessage(
message: unknown,
allowedToolNames?: Set<string>,
@@ -902,13 +917,16 @@ export async function runEmbeddedAttempt(
// Ollama native API: bypass SDK's streamSimple and use direct /api/chat calls
// for reliable streaming + tool calling support (#11828).
if (params.model.api === "ollama") {
// Use the resolved model baseUrl first so custom provider aliases work.
// Prioritize configured provider baseUrl so Docker/remote Ollama hosts work reliably.
const providerConfig = params.config?.models?.providers?.[params.model.provider];
const modelBaseUrl =
typeof params.model.baseUrl === "string" ? params.model.baseUrl.trim() : "";
typeof params.model.baseUrl === "string" ? params.model.baseUrl : undefined;
const providerBaseUrl =
typeof providerConfig?.baseUrl === "string" ? providerConfig.baseUrl.trim() : "";
const ollamaBaseUrl = modelBaseUrl || providerBaseUrl || OLLAMA_NATIVE_BASE_URL;
typeof providerConfig?.baseUrl === "string" ? providerConfig.baseUrl : undefined;
const ollamaBaseUrl = resolveOllamaBaseUrlForRun({
modelBaseUrl,
providerBaseUrl,
});
activeSession.agent.streamFn = createOllamaStreamFn(ollamaBaseUrl);
} else if (params.model.api === "openai-responses" && params.provider === "openai") {
const wsApiKey = await params.authStorage.getApiKey(params.provider);