onboard (ollama): populate cloud-only model list from ollama.com/api/tags (#68463)

Merged via squash.

Prepared head SHA: fb12af3d63
Co-authored-by: BruceMacD <5853428+BruceMacD@users.noreply.github.com>
Co-authored-by: BruceMacD <5853428+BruceMacD@users.noreply.github.com>
Reviewed-by: @BruceMacD
This commit is contained in:
Bruce MacDonald
2026-04-21 08:51:54 -07:00
committed by GitHub
parent 06b4e3885e
commit 1be94b7a37
3 changed files with 67 additions and 5 deletions

View File

@@ -2,6 +2,12 @@
Docs: https://docs.openclaw.ai
## Unreleased
### Changes
- Ollama/onboard: populate the cloud-only model list from `ollama.com/api/tags` so `openclaw onboard` reflects the live cloud catalog instead of a static three-model seed; cap the discovered list at 500 and fall back to the previous hardcoded suggestions when ollama.com is unreachable or returns no models. (#68463) Thanks @BruceMacD.
## 2026.4.20
### Changes

View File

@@ -114,6 +114,7 @@ describe("ollama setup", () => {
it("puts suggested cloud model first in cloud mode", async () => {
const prompter = createCloudPrompter();
vi.stubGlobal("fetch", createOllamaFetchMock({ tags: [] }));
const result = await promptAndConfigureOllama({
cfg: {},
env: {},
@@ -130,6 +131,7 @@ describe("ollama setup", () => {
it("uses generic token flags for cloud-only setup", async () => {
const prompter = createCloudPrompter();
vi.stubGlobal("fetch", createOllamaFetchMock({ tags: [] }));
const result = await promptAndConfigureOllama({
cfg: {},
@@ -189,7 +191,7 @@ describe("ollama setup", () => {
it("cloud mode does not hit local Ollama endpoints", async () => {
const prompter = createCloudPrompter();
const fetchMock = vi.fn();
const fetchMock = createOllamaFetchMock({ tags: [] });
vi.stubGlobal("fetch", fetchMock);
await promptAndConfigureOllama({
@@ -199,7 +201,12 @@ describe("ollama setup", () => {
allowSecretRefPrompt: false,
});
expect(fetchMock).not.toHaveBeenCalled();
expect(fetchMock.mock.calls.some((call) => requestUrl(call[0]).includes("127.0.0.1"))).toBe(
false,
);
expect(fetchMock.mock.calls.some((call) => requestUrl(call[0]).includes("ollama.com"))).toBe(
true,
);
});
it("rejects the local marker during cloud-only setup", async () => {
@@ -250,6 +257,7 @@ describe("ollama setup", () => {
}),
note: vi.fn(async () => undefined),
} as unknown as WizardPrompter;
vi.stubGlobal("fetch", createOllamaFetchMock({ tags: [] }));
await promptAndConfigureOllama({
cfg: {},
@@ -315,8 +323,9 @@ describe("ollama setup", () => {
);
});
it("cloud mode seeds the hosted cloud model list", async () => {
it("cloud mode falls back to the hardcoded cloud model list when /api/tags is empty", async () => {
const prompter = createCloudPrompter();
vi.stubGlobal("fetch", createOllamaFetchMock({ tags: [] }));
const result = await promptAndConfigureOllama({
cfg: {},
env: {},
@@ -333,6 +342,36 @@ describe("ollama setup", () => {
]);
});
it("cloud mode populates models from ollama.com /api/tags when reachable", async () => {
const prompter = createCloudPrompter();
const fetchMock = createOllamaFetchMock({
tags: ["qwen3-coder:480b-cloud", "gpt-oss:120b-cloud"],
show: { "qwen3-coder:480b-cloud": 262144 },
});
vi.stubGlobal("fetch", fetchMock);
const result = await promptAndConfigureOllama({
cfg: {},
env: {},
prompter,
allowSecretRefPrompt: false,
});
const models = result.config.models?.providers?.ollama?.models;
const modelIds = models?.map((m) => m.id);
expect(modelIds).toEqual([
"kimi-k2.5:cloud",
"minimax-m2.7:cloud",
"glm-5.1:cloud",
"qwen3-coder:480b-cloud",
"gpt-oss:120b-cloud",
]);
expect(models?.find((m) => m.id === "qwen3-coder:480b-cloud")?.contextWindow).toBe(262144);
expect(
fetchMock.mock.calls.some((call) => requestUrl(call[0]) === "https://ollama.com/api/tags"),
).toBe(true);
});
it("uses /api/show context windows when building Ollama model configs", async () => {
const prompter = {
text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"),

View File

@@ -40,6 +40,7 @@ export { buildOllamaProvider };
const OLLAMA_SUGGESTED_MODELS_LOCAL = [OLLAMA_DEFAULT_MODEL];
const OLLAMA_SUGGESTED_MODELS_CLOUD = ["kimi-k2.5:cloud", "minimax-m2.7:cloud", "glm-5.1:cloud"];
const OLLAMA_CONTEXT_ENRICH_LIMIT = 200;
const OLLAMA_CLOUD_MAX_DISCOVERED_MODELS = 500;
type OllamaSetupOptions = {
customBaseUrl?: string;
@@ -499,14 +500,30 @@ export async function promptAndConfigureOllama(params: {
secretInputMode: params.secretInputMode,
allowSecretRefPrompt: params.allowSecretRefPrompt,
});
const { reachable, models: rawDiscoveredModels } =
await fetchOllamaModels(OLLAMA_CLOUD_BASE_URL);
const discoveredModels = rawDiscoveredModels.slice(0, OLLAMA_CLOUD_MAX_DISCOVERED_MODELS);
const enrichedModels =
reachable && discoveredModels.length > 0
? await enrichOllamaModelsWithContext(
OLLAMA_CLOUD_BASE_URL,
discoveredModels.slice(0, OLLAMA_CONTEXT_ENRICH_LIMIT),
)
: [];
const discoveredModelsByName = new Map(enrichedModels.map((model) => [model.name, model]));
const discoveredModelNames = discoveredModels.map((model) => model.name);
const modelNames =
discoveredModelNames.length > 0
? mergeUniqueModelNames(OLLAMA_SUGGESTED_MODELS_CLOUD, discoveredModelNames)
: OLLAMA_SUGGESTED_MODELS_CLOUD;
return {
credential,
credentialMode,
config: applyOllamaProviderConfig(
params.cfg,
OLLAMA_CLOUD_BASE_URL,
OLLAMA_SUGGESTED_MODELS_CLOUD,
undefined,
modelNames,
discoveredModelsByName,
credential,
),
};