Files
openclaw/extensions/ollama/index.test.ts
Yossi Eliaz 045d956111 fix(ollama): resolve per-provider baseUrl in createStreamFn
The createStreamFn callback hardcoded config.models.providers.ollama.baseUrl,
ignoring the actual provider ID from the context. When multiple Ollama providers
are configured on different ports (e.g. ollama on 11434, ollama2 on 11435), all
requests routed to the first provider's port.

Export resolveConfiguredOllamaProviderConfig from stream.ts and use it with the
ctx.provider parameter to dynamically look up the correct baseUrl per provider.

Closes #61678
2026-04-06 20:28:07 +09:00

330 lines
9.0 KiB
TypeScript

import { beforeEach, describe, expect, it, vi } from "vitest";
import { createTestPluginApi } from "../../test/helpers/plugins/plugin-api.js";
import plugin from "./index.js";
const promptAndConfigureOllamaMock = vi.hoisted(() =>
vi.fn(async () => ({
config: {
models: {
providers: {
ollama: {
baseUrl: "http://127.0.0.1:11434",
api: "ollama",
models: [],
},
},
},
},
})),
);
const ensureOllamaModelPulledMock = vi.hoisted(() => vi.fn(async () => {}));
const buildOllamaProviderMock = vi.hoisted(() => vi.fn());
const createConfiguredOllamaStreamFnMock = vi.hoisted(() =>
vi.fn((_params: { model: unknown; providerBaseUrl?: string }) => ({}) as never),
);
vi.mock("./api.js", () => ({
promptAndConfigureOllama: promptAndConfigureOllamaMock,
ensureOllamaModelPulled: ensureOllamaModelPulledMock,
configureOllamaNonInteractive: vi.fn(),
buildOllamaProvider: buildOllamaProviderMock,
}));
vi.mock("./src/stream.js", async (importOriginal) => {
const actual = await importOriginal<typeof import("./src/stream.js")>();
return {
...actual,
createConfiguredOllamaStreamFn: createConfiguredOllamaStreamFnMock,
};
});
beforeEach(() => {
promptAndConfigureOllamaMock.mockClear();
ensureOllamaModelPulledMock.mockClear();
buildOllamaProviderMock.mockReset();
createConfiguredOllamaStreamFnMock.mockClear();
});
function registerProvider() {
return registerProviderWithPluginConfig({});
}
function registerProviderWithPluginConfig(pluginConfig: Record<string, unknown>) {
const registerProviderMock = vi.fn();
plugin.register(
createTestPluginApi({
id: "ollama",
name: "Ollama",
source: "test",
config: {},
pluginConfig,
runtime: {} as never,
registerProvider: registerProviderMock,
}),
);
expect(registerProviderMock).toHaveBeenCalledTimes(1);
return registerProviderMock.mock.calls[0]?.[0];
}
describe("ollama plugin", () => {
it("does not preselect a default model during provider auth setup", async () => {
const provider = registerProvider();
const result = await provider.auth[0].run({
config: {},
prompter: {} as never,
isRemote: false,
openUrl: vi.fn(async () => undefined),
});
expect(promptAndConfigureOllamaMock).toHaveBeenCalledWith({
cfg: {},
prompter: {},
isRemote: false,
openUrl: expect.any(Function),
});
expect(result.configPatch).toEqual({
models: {
providers: {
ollama: {
baseUrl: "http://127.0.0.1:11434",
api: "ollama",
models: [],
},
},
},
});
expect(result.defaultModel).toBeUndefined();
});
it("pulls the model the user actually selected", async () => {
const provider = registerProvider();
const config = {
models: {
providers: {
ollama: {
baseUrl: "http://127.0.0.1:11434",
models: [],
},
},
},
};
const prompter = {} as never;
await provider.onModelSelected?.({
config,
model: "ollama/glm-4.7-flash",
prompter,
});
expect(ensureOllamaModelPulledMock).toHaveBeenCalledWith({
config,
model: "ollama/glm-4.7-flash",
prompter,
});
});
it("skips ambient discovery when plugin discovery is disabled", async () => {
const provider = registerProviderWithPluginConfig({ discovery: { enabled: false } });
const result = await provider.discovery.run({
config: {},
env: {},
resolveProviderApiKey: () => ({ apiKey: "", discoveryApiKey: "" }),
} as never);
expect(result).toBeNull();
expect(buildOllamaProviderMock).not.toHaveBeenCalled();
});
it("wraps OpenAI-compatible payloads with num_ctx for Ollama compat routes", () => {
const provider = registerProvider();
let payloadSeen: Record<string, unknown> | undefined;
const baseStreamFn = vi.fn((_model, _context, options) => {
const payload: Record<string, unknown> = { options: { temperature: 0.1 } };
options?.onPayload?.(payload, _model);
payloadSeen = payload;
return {} as never;
});
const wrapped = provider.wrapStreamFn?.({
config: {
models: {
providers: {
ollama: {
api: "openai-completions",
baseUrl: "http://127.0.0.1:11434/v1",
models: [],
},
},
},
},
provider: "ollama",
modelId: "qwen3:32b",
model: {
api: "openai-completions",
provider: "ollama",
id: "qwen3:32b",
baseUrl: "http://127.0.0.1:11434/v1",
contextWindow: 202_752,
},
streamFn: baseStreamFn,
});
expect(typeof wrapped).toBe("function");
void wrapped?.({} as never, {} as never, {});
expect(baseStreamFn).toHaveBeenCalledTimes(1);
expect((payloadSeen?.options as Record<string, unknown> | undefined)?.num_ctx).toBe(202752);
});
it("owns replay policy for OpenAI-compatible Ollama routes only", () => {
const provider = registerProvider();
expect(
provider.buildReplayPolicy?.({
provider: "ollama",
modelApi: "openai-completions",
modelId: "qwen3:32b",
} as never),
).toMatchObject({
sanitizeToolCallIds: true,
toolCallIdMode: "strict",
applyAssistantFirstOrderingFix: true,
validateGeminiTurns: true,
validateAnthropicTurns: true,
});
expect(
provider.buildReplayPolicy?.({
provider: "ollama",
modelApi: "openai-responses",
modelId: "qwen3:32b",
} as never),
).toMatchObject({
sanitizeToolCallIds: true,
toolCallIdMode: "strict",
applyAssistantFirstOrderingFix: false,
validateGeminiTurns: false,
validateAnthropicTurns: false,
});
expect(
provider.buildReplayPolicy?.({
provider: "ollama",
modelApi: "ollama",
modelId: "qwen3.5:9b",
} as never),
).toBeUndefined();
});
it("routes createStreamFn to the correct provider baseUrl for ollama2", () => {
const provider = registerProvider();
const config = {
models: {
providers: {
ollama: {
api: "ollama",
baseUrl: "http://127.0.0.1:11434",
models: [],
},
ollama2: {
api: "ollama",
baseUrl: "http://127.0.0.1:11435",
models: [],
},
},
},
};
const model = { id: "llama3.2", provider: "ollama2", baseUrl: undefined };
provider.createStreamFn?.({ config, model, provider: "ollama2" } as never);
expect(createConfiguredOllamaStreamFnMock).toHaveBeenCalledWith(
expect.objectContaining({ providerBaseUrl: "http://127.0.0.1:11435" }),
);
});
it("uses ollama provider baseUrl when provider is ollama (backward compat)", () => {
const provider = registerProvider();
const config = {
models: {
providers: {
ollama: {
api: "ollama",
baseUrl: "http://127.0.0.1:11434",
models: [],
},
ollama2: {
api: "ollama",
baseUrl: "http://127.0.0.1:11435",
models: [],
},
},
},
};
const model = { id: "llama3.2", provider: "ollama", baseUrl: undefined };
provider.createStreamFn?.({ config, model, provider: "ollama" } as never);
expect(createConfiguredOllamaStreamFnMock).toHaveBeenCalledWith(
expect.objectContaining({ providerBaseUrl: "http://127.0.0.1:11434" }),
);
});
it("wraps native Ollama payloads with top-level think=false when thinking is off", () => {
const provider = registerProvider();
let payloadSeen: Record<string, unknown> | undefined;
const baseStreamFn = vi.fn((_model, _context, options) => {
const payload: Record<string, unknown> = {
messages: [],
options: { num_ctx: 65536 },
stream: true,
};
options?.onPayload?.(payload, _model);
payloadSeen = payload;
return {} as never;
});
const wrapped = provider.wrapStreamFn?.({
config: {
models: {
providers: {
ollama: {
api: "ollama",
baseUrl: "http://127.0.0.1:11434",
models: [],
},
},
},
},
provider: "ollama",
modelId: "qwen3.5:9b",
thinkingLevel: "off",
model: {
api: "ollama",
provider: "ollama",
id: "qwen3.5:9b",
baseUrl: "http://127.0.0.1:11434",
contextWindow: 131_072,
},
streamFn: baseStreamFn,
});
expect(typeof wrapped).toBe("function");
void wrapped?.(
{
api: "ollama",
provider: "ollama",
id: "qwen3.5:9b",
} as never,
{} as never,
{},
);
expect(baseStreamFn).toHaveBeenCalledTimes(1);
expect(payloadSeen?.think).toBe(false);
expect((payloadSeen?.options as Record<string, unknown> | undefined)?.think).toBeUndefined();
});
});