mirror of
https://github.com/openclaw/openclaw.git
synced 2026-03-21 23:11:01 +00:00
fix: preserve interactive Ollama model selection (#49249) (thanks @BruceMacD)
This commit is contained in:
@@ -46,6 +46,7 @@ Docs: https://docs.openclaw.ai
|
||||
|
||||
### Fixes
|
||||
|
||||
- CLI/Ollama onboarding: keep the interactive model picker for explicit `openclaw onboard --auth-choice ollama` runs so setup still selects a default model without reintroducing pre-picker auto-pulls. (#49249) Thanks @BruceMacD.
|
||||
- Plugins/bundler TDZ: fix `RESERVED_COMMANDS` temporal dead zone error that prevented device-pair, phone-control, and talk-voice plugins from registering when the bundler placed the commands module after call sites in the same output chunk. Thanks @BunsDev.
|
||||
- Plugins/imports: fix stale googlechat runtime-api import paths and signal SDK circular re-exports broken by recent plugin-sdk refactors. Thanks @BunsDev.
|
||||
- Google auth/Node 25: patch `gaxios` to use native fetch without injecting `globalThis.window`, while translating proxy and mTLS transport settings so Google Vertex and Google Chat auth keep working on Node 25. (#47914) Thanks @pdd-cli.
|
||||
|
||||
100
extensions/ollama/index.test.ts
Normal file
100
extensions/ollama/index.test.ts
Normal file
@@ -0,0 +1,100 @@
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
import { createTestPluginApi } from "../../test/helpers/extensions/plugin-api.js";
|
||||
import plugin from "./index.js";
|
||||
|
||||
const promptAndConfigureOllamaMock = vi.hoisted(() =>
|
||||
vi.fn(async () => ({
|
||||
config: {
|
||||
models: {
|
||||
providers: {
|
||||
ollama: {
|
||||
baseUrl: "http://127.0.0.1:11434",
|
||||
api: "ollama",
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})),
|
||||
);
|
||||
const ensureOllamaModelPulledMock = vi.hoisted(() => vi.fn(async () => {}));
|
||||
|
||||
vi.mock("openclaw/plugin-sdk/ollama-setup", () => ({
|
||||
promptAndConfigureOllama: promptAndConfigureOllamaMock,
|
||||
ensureOllamaModelPulled: ensureOllamaModelPulledMock,
|
||||
configureOllamaNonInteractive: vi.fn(),
|
||||
buildOllamaProvider: vi.fn(),
|
||||
}));
|
||||
|
||||
function registerProvider() {
|
||||
const registerProviderMock = vi.fn();
|
||||
|
||||
plugin.register(
|
||||
createTestPluginApi({
|
||||
id: "ollama",
|
||||
name: "Ollama",
|
||||
source: "test",
|
||||
config: {},
|
||||
runtime: {} as never,
|
||||
registerProvider: registerProviderMock,
|
||||
}),
|
||||
);
|
||||
|
||||
expect(registerProviderMock).toHaveBeenCalledTimes(1);
|
||||
return registerProviderMock.mock.calls[0]?.[0];
|
||||
}
|
||||
|
||||
describe("ollama plugin", () => {
|
||||
it("does not preselect a default model during provider auth setup", async () => {
|
||||
const provider = registerProvider();
|
||||
|
||||
const result = await provider.auth[0].run({
|
||||
config: {},
|
||||
prompter: {} as never,
|
||||
});
|
||||
|
||||
expect(promptAndConfigureOllamaMock).toHaveBeenCalledWith({
|
||||
cfg: {},
|
||||
prompter: {},
|
||||
});
|
||||
expect(result.configPatch).toEqual({
|
||||
models: {
|
||||
providers: {
|
||||
ollama: {
|
||||
baseUrl: "http://127.0.0.1:11434",
|
||||
api: "ollama",
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
expect(result.defaultModel).toBeUndefined();
|
||||
});
|
||||
|
||||
it("pulls the model the user actually selected", async () => {
|
||||
const provider = registerProvider();
|
||||
const config = {
|
||||
models: {
|
||||
providers: {
|
||||
ollama: {
|
||||
baseUrl: "http://127.0.0.1:11434",
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
const prompter = {} as never;
|
||||
|
||||
await provider.onModelSelected?.({
|
||||
config,
|
||||
model: "ollama/glm-4.7-flash",
|
||||
prompter,
|
||||
});
|
||||
|
||||
expect(ensureOllamaModelPulledMock).toHaveBeenCalledWith({
|
||||
config,
|
||||
model: "ollama/glm-4.7-flash",
|
||||
prompter,
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -410,6 +410,33 @@ describe("runSetupWizard", () => {
|
||||
}
|
||||
});
|
||||
|
||||
it("prompts for a model during explicit interactive Ollama setup", async () => {
|
||||
promptDefaultModel.mockClear();
|
||||
const prompter = buildWizardPrompter({});
|
||||
const runtime = createRuntime();
|
||||
|
||||
await runSetupWizard(
|
||||
{
|
||||
acceptRisk: true,
|
||||
flow: "quickstart",
|
||||
authChoice: "ollama",
|
||||
installDaemon: false,
|
||||
skipSkills: true,
|
||||
skipSearch: true,
|
||||
skipHealth: true,
|
||||
skipUi: true,
|
||||
},
|
||||
runtime,
|
||||
prompter,
|
||||
);
|
||||
|
||||
expect(promptDefaultModel).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
allowKeep: false,
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it("shows plugin compatibility notices for an existing valid config", async () => {
|
||||
buildPluginCompatibilityNotices.mockReturnValue([
|
||||
{
|
||||
|
||||
@@ -482,7 +482,9 @@ export async function runSetupWizard(
|
||||
}
|
||||
}
|
||||
|
||||
if (authChoiceFromPrompt && authChoice !== "custom-api-key") {
|
||||
const shouldPromptModelSelection =
|
||||
authChoice !== "custom-api-key" && (authChoiceFromPrompt || authChoice === "ollama");
|
||||
if (shouldPromptModelSelection) {
|
||||
const modelSelection = await promptDefaultModel({
|
||||
config: nextConfig,
|
||||
prompter,
|
||||
|
||||
Reference in New Issue
Block a user