Files
openclaw/src/agents/ollama-models.ts
Bruce MacDonald d6108a6f72 Onboard: add Ollama auth flow and improve model defaults
Add Ollama as a auth provider in onboarding with Cloud + Local mode
selection, browser-based sign-in via /api/me, smart model suggestions
per mode, and graceful fallback when the default model is unavailable.

- Extract shared ollama-models.ts
- Auto-pull missing models during onboarding
- Non-interactive mode support for CI/automation

Closes #8239
Closes #3494

Co-Authored-By: Jeffrey Morgan <jmorganca@gmail.com>
2026-03-11 14:52:55 +00:00

86 lines
2.5 KiB
TypeScript

import type { ModelDefinitionConfig } from "../config/types.models.js";
import { OLLAMA_NATIVE_BASE_URL } from "./ollama-stream.js";
export const OLLAMA_DEFAULT_BASE_URL = OLLAMA_NATIVE_BASE_URL;
export const OLLAMA_DEFAULT_CONTEXT_WINDOW = 128000;
export const OLLAMA_DEFAULT_MAX_TOKENS = 8192;
export const OLLAMA_DEFAULT_COST = {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
};
export type OllamaTagModel = {
name: string;
modified_at?: string;
size?: number;
digest?: string;
remote_host?: string;
details?: {
family?: string;
parameter_size?: string;
};
};
export type OllamaTagsResponse = {
models?: OllamaTagModel[];
};
/**
* Derive the Ollama native API base URL from a configured base URL.
*
* Users typically configure `baseUrl` with a `/v1` suffix (e.g.
* `http://192.168.20.14:11434/v1`) for the OpenAI-compatible endpoint.
* The native Ollama API lives at the root (e.g. `/api/tags`), so we
* strip the `/v1` suffix when present.
*/
export function resolveOllamaApiBase(configuredBaseUrl?: string): string {
if (!configuredBaseUrl) {
return OLLAMA_DEFAULT_BASE_URL;
}
const trimmed = configuredBaseUrl.replace(/\/+$/, "");
return trimmed.replace(/\/v1$/i, "");
}
/** Heuristic: treat models with "r1", "reasoning", or "think" in the name as reasoning models. */
export function isReasoningModelHeuristic(modelId: string): boolean {
return /r1|reasoning|think|reason/i.test(modelId);
}
/** Build a ModelDefinitionConfig for an Ollama model with default values. */
export function buildOllamaModelDefinition(
modelId: string,
contextWindow?: number,
): ModelDefinitionConfig {
return {
id: modelId,
name: modelId,
reasoning: isReasoningModelHeuristic(modelId),
input: ["text"],
cost: OLLAMA_DEFAULT_COST,
contextWindow: contextWindow ?? OLLAMA_DEFAULT_CONTEXT_WINDOW,
maxTokens: OLLAMA_DEFAULT_MAX_TOKENS,
};
}
/** Fetch the model list from a running Ollama instance. */
export async function fetchOllamaModels(
baseUrl: string,
): Promise<{ reachable: boolean; models: OllamaTagModel[] }> {
try {
const apiBase = resolveOllamaApiBase(baseUrl);
const response = await fetch(`${apiBase}/api/tags`, {
signal: AbortSignal.timeout(5000),
});
if (!response.ok) {
return { reachable: true, models: [] };
}
const data = (await response.json()) as OllamaTagsResponse;
const models = (data.models ?? []).filter((m) => m.name);
return { reachable: true, models };
} catch {
return { reachable: false, models: [] };
}
}