fix(ollama): pass provider headers to Ollama stream function (#24285)

createOllamaStreamFn() only accepted baseUrl, ignoring custom headers
configured in models.providers.<provider>.headers. This caused 403
errors when Ollama endpoints are behind reverse proxies that require
auth headers (e.g. X-OLLAMA-KEY via HAProxy).

Add optional defaultHeaders parameter to createOllamaStreamFn() and
merge them into every fetch request. Provider headers from config are
now passed through at the call site in the embedded runner.

Fixes #24285
This commit is contained in:
echoVic
2026-02-23 16:56:15 +08:00
committed by Shakker
parent 76bfd9b5e6
commit 7597fc556c
2 changed files with 6 additions and 2 deletions

View File

@@ -405,7 +405,10 @@ function resolveOllamaChatUrl(baseUrl: string): string {
return `${apiBase}/api/chat`;
}
export function createOllamaStreamFn(baseUrl: string): StreamFn {
export function createOllamaStreamFn(
baseUrl: string,
defaultHeaders?: Record<string, string>,
): StreamFn {
const chatUrl = resolveOllamaChatUrl(baseUrl);
return (model, context, options) => {
@@ -440,6 +443,7 @@ export function createOllamaStreamFn(baseUrl: string): StreamFn {
const headers: Record<string, string> = {
"Content-Type": "application/json",
...defaultHeaders,
...options?.headers,
};
if (options?.apiKey) {

View File

@@ -1022,7 +1022,7 @@ export async function runEmbeddedAttempt(
modelBaseUrl,
providerBaseUrl,
});
activeSession.agent.streamFn = createOllamaStreamFn(ollamaBaseUrl);
activeSession.agent.streamFn = createOllamaStreamFn(ollamaBaseUrl, params.model.headers);
} else if (params.model.api === "openai-responses" && params.provider === "openai") {
const wsApiKey = await params.authStorage.getApiKey(params.provider);
if (wsApiKey) {