refactor: move provider runtime into extensions

This commit is contained in:
Peter Steinberger
2026-03-27 02:34:09 +00:00
parent 53a3922e1c
commit 64bf80d4d5
60 changed files with 2830 additions and 1951 deletions

View File

@@ -1,5 +1,4 @@
import { normalizeProviderId } from "../agents/provider-id.js";
import { findCatalogTemplate } from "./provider-catalog.js";
import type {
ProviderAugmentModelCatalogContext,
ProviderBuiltInModelSuppressionContext,
@@ -10,6 +9,22 @@ const OPENAI_CODEX_PROVIDER_ID = "openai-codex";
const OPENAI_DIRECT_SPARK_MODEL_ID = "gpt-5.3-codex-spark";
const SUPPRESSED_SPARK_PROVIDERS = new Set(["openai", "azure-openai-responses"]);
function findCatalogTemplate(params: {
entries: ReadonlyArray<{ provider: string; id: string }>;
providerId: string;
templateIds: readonly string[];
}) {
return params.templateIds
.map((templateId) =>
params.entries.find(
(entry) =>
entry.provider.toLowerCase() === params.providerId.toLowerCase() &&
entry.id.toLowerCase() === templateId.toLowerCase(),
),
)
.find((entry) => entry !== undefined);
}
export function resolveBundledProviderBuiltInModelSuppression(
context: ProviderBuiltInModelSuppressionContext,
) {

View File

@@ -1,533 +1,8 @@
import { upsertAuthProfileWithLock } from "../agents/auth-profiles/upsert-with-lock.js";
import { OLLAMA_DEFAULT_BASE_URL } from "../agents/ollama-defaults.js";
import {
buildOllamaModelDefinition,
enrichOllamaModelsWithContext,
fetchOllamaModels,
resolveOllamaApiBase,
type OllamaModelWithContext,
} from "../agents/ollama-models.js";
import type { OpenClawConfig } from "../config/config.js";
import type { RuntimeEnv } from "../runtime.js";
import { WizardCancelledError, type WizardPrompter } from "../wizard/prompts.js";
import { applyAgentDefaultModelPrimary } from "./provider-onboarding-config.js";
import { isRemoteEnvironment, openUrl } from "./setup-browser.js";
import type { ProviderAuthOptionBag } from "./types.js";
export { OLLAMA_DEFAULT_BASE_URL } from "../agents/ollama-defaults.js";
export const OLLAMA_DEFAULT_MODEL = "glm-4.7-flash";
const OLLAMA_SUGGESTED_MODELS_LOCAL = ["glm-4.7-flash"];
const OLLAMA_SUGGESTED_MODELS_CLOUD = ["kimi-k2.5:cloud", "minimax-m2.5:cloud", "glm-5:cloud"];
type OllamaMode = "remote" | "local";
type OllamaSetupOptions = ProviderAuthOptionBag & {
customBaseUrl?: string;
customModelId?: string;
};
function normalizeOllamaModelName(value: string | undefined): string | undefined {
const trimmed = value?.trim();
if (!trimmed) {
return undefined;
}
if (trimmed.toLowerCase().startsWith("ollama/")) {
const withoutPrefix = trimmed.slice("ollama/".length).trim();
return withoutPrefix || undefined;
}
return trimmed;
}
function isOllamaCloudModel(modelName: string | undefined): boolean {
return Boolean(modelName?.trim().toLowerCase().endsWith(":cloud"));
}
function formatOllamaPullStatus(status: string): { text: string; hidePercent: boolean } {
const trimmed = status.trim();
const partStatusMatch = trimmed.match(/^([a-z-]+)\s+(?:sha256:)?[a-f0-9]{8,}$/i);
if (partStatusMatch) {
return { text: `${partStatusMatch[1]} part`, hidePercent: false };
}
if (/^verifying\b.*\bdigest\b/i.test(trimmed)) {
return { text: "verifying digest", hidePercent: true };
}
return { text: trimmed, hidePercent: false };
}
type OllamaCloudAuthResult = {
signedIn: boolean;
signinUrl?: string;
};
/** Check if the user is signed in to Ollama cloud via /api/me. */
async function checkOllamaCloudAuth(baseUrl: string): Promise<OllamaCloudAuthResult> {
try {
const apiBase = resolveOllamaApiBase(baseUrl);
const response = await fetch(`${apiBase}/api/me`, {
method: "POST",
signal: AbortSignal.timeout(5000),
});
if (response.status === 401) {
// 401 body contains { error, signin_url }
const data = (await response.json()) as { signin_url?: string };
return { signedIn: false, signinUrl: data.signin_url };
}
if (!response.ok) {
return { signedIn: false };
}
return { signedIn: true };
} catch {
// /api/me not supported or unreachable — fail closed so cloud mode
// doesn't silently skip auth; the caller handles the fallback.
return { signedIn: false };
}
}
type OllamaPullChunk = {
status?: string;
total?: number;
completed?: number;
error?: string;
};
type OllamaPullFailureKind = "http" | "no-body" | "chunk-error" | "network";
type OllamaPullResult =
| { ok: true }
| {
ok: false;
kind: OllamaPullFailureKind;
message: string;
};
async function pullOllamaModelCore(params: {
baseUrl: string;
modelName: string;
onStatus?: (status: string, percent: number | null) => void;
}): Promise<OllamaPullResult> {
const { onStatus } = params;
const baseUrl = resolveOllamaApiBase(params.baseUrl);
const modelName = normalizeOllamaModelName(params.modelName) ?? params.modelName.trim();
try {
const response = await fetch(`${baseUrl}/api/pull`, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ name: modelName }),
});
if (!response.ok) {
return {
ok: false,
kind: "http",
message: `Failed to download ${modelName} (HTTP ${response.status})`,
};
}
if (!response.body) {
return {
ok: false,
kind: "no-body",
message: `Failed to download ${modelName} (no response body)`,
};
}
const reader = response.body.getReader();
const decoder = new TextDecoder();
let buffer = "";
const layers = new Map<string, { total: number; completed: number }>();
const parseLine = (line: string): OllamaPullResult => {
const trimmed = line.trim();
if (!trimmed) {
return { ok: true };
}
try {
const chunk = JSON.parse(trimmed) as OllamaPullChunk;
if (chunk.error) {
return {
ok: false,
kind: "chunk-error",
message: `Download failed: ${chunk.error}`,
};
}
if (!chunk.status) {
return { ok: true };
}
if (chunk.total && chunk.completed !== undefined) {
layers.set(chunk.status, { total: chunk.total, completed: chunk.completed });
let totalSum = 0;
let completedSum = 0;
for (const layer of layers.values()) {
totalSum += layer.total;
completedSum += layer.completed;
}
const percent = totalSum > 0 ? Math.round((completedSum / totalSum) * 100) : null;
onStatus?.(chunk.status, percent);
} else {
onStatus?.(chunk.status, null);
}
} catch {
// Ignore malformed lines from streaming output.
}
return { ok: true };
};
for (;;) {
const { done, value } = await reader.read();
if (done) {
break;
}
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split("\n");
buffer = lines.pop() ?? "";
for (const line of lines) {
const parsed = parseLine(line);
if (!parsed.ok) {
return parsed;
}
}
}
const trailing = buffer.trim();
if (trailing) {
const parsed = parseLine(trailing);
if (!parsed.ok) {
return parsed;
}
}
return { ok: true };
} catch (err) {
const reason = err instanceof Error ? err.message : String(err);
return {
ok: false,
kind: "network",
message: `Failed to download ${modelName}: ${reason}`,
};
}
}
/** Pull a model from Ollama, streaming progress updates. */
async function pullOllamaModel(
baseUrl: string,
modelName: string,
prompter: WizardPrompter,
): Promise<boolean> {
const spinner = prompter.progress(`Downloading ${modelName}...`);
const result = await pullOllamaModelCore({
baseUrl,
modelName,
onStatus: (status, percent) => {
const displayStatus = formatOllamaPullStatus(status);
if (displayStatus.hidePercent) {
spinner.update(`Downloading ${modelName} - ${displayStatus.text}`);
} else {
spinner.update(`Downloading ${modelName} - ${displayStatus.text} - ${percent ?? 0}%`);
}
},
});
if (!result.ok) {
spinner.stop(result.message);
return false;
}
spinner.stop(`Downloaded ${modelName}`);
return true;
}
async function pullOllamaModelNonInteractive(
baseUrl: string,
modelName: string,
runtime: RuntimeEnv,
): Promise<boolean> {
runtime.log(`Downloading ${modelName}...`);
const result = await pullOllamaModelCore({ baseUrl, modelName });
if (!result.ok) {
runtime.error(result.message);
return false;
}
runtime.log(`Downloaded ${modelName}`);
return true;
}
function buildOllamaModelsConfig(
modelNames: string[],
discoveredModelsByName?: Map<string, OllamaModelWithContext>,
) {
return modelNames.map((name) =>
buildOllamaModelDefinition(name, discoveredModelsByName?.get(name)?.contextWindow),
);
}
function applyOllamaProviderConfig(
cfg: OpenClawConfig,
baseUrl: string,
modelNames: string[],
discoveredModelsByName?: Map<string, OllamaModelWithContext>,
): OpenClawConfig {
return {
...cfg,
models: {
...cfg.models,
mode: cfg.models?.mode ?? "merge",
providers: {
...cfg.models?.providers,
ollama: {
baseUrl,
api: "ollama",
apiKey: "OLLAMA_API_KEY", // pragma: allowlist secret
models: buildOllamaModelsConfig(modelNames, discoveredModelsByName),
},
},
},
};
}
async function storeOllamaCredential(agentDir?: string): Promise<void> {
await upsertAuthProfileWithLock({
profileId: "ollama:default",
credential: { type: "api_key", provider: "ollama", key: "ollama-local" },
agentDir,
});
}
/**
* Interactive: prompt for base URL, discover models, configure provider.
* Model selection is handled by the standard model picker downstream.
*/
export async function promptAndConfigureOllama(params: {
cfg: OpenClawConfig;
prompter: WizardPrompter;
}): Promise<{ config: OpenClawConfig }> {
const { prompter } = params;
// 1. Prompt base URL
const baseUrlRaw = await prompter.text({
message: "Ollama base URL",
initialValue: OLLAMA_DEFAULT_BASE_URL,
placeholder: OLLAMA_DEFAULT_BASE_URL,
validate: (value) => (value?.trim() ? undefined : "Required"),
});
const configuredBaseUrl = String(baseUrlRaw ?? "")
.trim()
.replace(/\/+$/, "");
const baseUrl = resolveOllamaApiBase(configuredBaseUrl);
// 2. Check reachability
const { reachable, models } = await fetchOllamaModels(baseUrl);
if (!reachable) {
await prompter.note(
[
`Ollama could not be reached at ${baseUrl}.`,
"Download it at https://ollama.com/download",
"",
"Start Ollama and re-run setup.",
].join("\n"),
"Ollama",
);
throw new WizardCancelledError("Ollama not reachable");
}
const enrichedModels = await enrichOllamaModelsWithContext(baseUrl, models.slice(0, 50));
const discoveredModelsByName = new Map(enrichedModels.map((model) => [model.name, model]));
const modelNames = models.map((m) => m.name);
// 3. Mode selection
const mode = (await prompter.select({
message: "Ollama mode",
options: [
{ value: "remote", label: "Cloud + Local", hint: "Ollama cloud models + local models" },
{ value: "local", label: "Local", hint: "Local models only" },
],
})) as OllamaMode;
// 4. Cloud auth — check /api/me upfront for remote (cloud+local) mode
let cloudAuthVerified = false;
if (mode === "remote") {
const authResult = await checkOllamaCloudAuth(baseUrl);
if (!authResult.signedIn) {
if (authResult.signinUrl) {
if (!isRemoteEnvironment()) {
await openUrl(authResult.signinUrl);
}
await prompter.note(
["Sign in to Ollama Cloud:", authResult.signinUrl].join("\n"),
"Ollama Cloud",
);
const confirmed = await prompter.confirm({
message: "Have you signed in?",
});
if (!confirmed) {
throw new WizardCancelledError("Ollama cloud sign-in cancelled");
}
// Re-check after user claims sign-in
const recheck = await checkOllamaCloudAuth(baseUrl);
if (!recheck.signedIn) {
throw new WizardCancelledError("Ollama cloud sign-in required");
}
cloudAuthVerified = true;
} else {
// No signin URL available (older server, unreachable /api/me, or custom gateway).
await prompter.note(
[
"Could not verify Ollama Cloud authentication.",
"Cloud models may not work until you sign in at https://ollama.com.",
].join("\n"),
"Ollama Cloud",
);
const continueAnyway = await prompter.confirm({
message: "Continue without cloud auth?",
});
if (!continueAnyway) {
throw new WizardCancelledError("Ollama cloud auth could not be verified");
}
// Cloud auth unverified — fall back to local defaults so the model
// picker doesn't steer toward cloud models that may fail.
}
} else {
cloudAuthVerified = true;
}
}
// 5. Model ordering — suggested models first.
// Use cloud defaults only when auth was actually verified; otherwise fall
// back to local defaults so the user isn't steered toward cloud models
// that may fail at runtime.
const suggestedModels =
mode === "local" || !cloudAuthVerified
? OLLAMA_SUGGESTED_MODELS_LOCAL
: OLLAMA_SUGGESTED_MODELS_CLOUD;
const orderedModelNames = [
...suggestedModels,
...modelNames.filter((name) => !suggestedModels.includes(name)),
];
const config = applyOllamaProviderConfig(
params.cfg,
baseUrl,
orderedModelNames,
discoveredModelsByName,
);
return { config };
}
/** Non-interactive: auto-discover models and configure provider. */
export async function configureOllamaNonInteractive(params: {
nextConfig: OpenClawConfig;
opts: OllamaSetupOptions;
runtime: RuntimeEnv;
}): Promise<OpenClawConfig> {
const { opts, runtime } = params;
const configuredBaseUrl = (opts.customBaseUrl?.trim() || OLLAMA_DEFAULT_BASE_URL).replace(
/\/+$/,
"",
);
const baseUrl = resolveOllamaApiBase(configuredBaseUrl);
const { reachable, models } = await fetchOllamaModels(baseUrl);
const explicitModel = normalizeOllamaModelName(opts.customModelId);
if (!reachable) {
runtime.error(
[
`Ollama could not be reached at ${baseUrl}.`,
"Download it at https://ollama.com/download",
].join("\n"),
);
runtime.exit(1);
return params.nextConfig;
}
await storeOllamaCredential();
const enrichedModels = await enrichOllamaModelsWithContext(baseUrl, models.slice(0, 50));
const discoveredModelsByName = new Map(enrichedModels.map((model) => [model.name, model]));
const modelNames = models.map((m) => m.name);
// Apply local suggested model ordering.
const suggestedModels = OLLAMA_SUGGESTED_MODELS_LOCAL;
const orderedModelNames = [
...suggestedModels,
...modelNames.filter((name) => !suggestedModels.includes(name)),
];
const requestedDefaultModelId = explicitModel ?? suggestedModels[0];
let pulledRequestedModel = false;
const availableModelNames = new Set(modelNames);
const requestedCloudModel = isOllamaCloudModel(requestedDefaultModelId);
if (requestedCloudModel) {
availableModelNames.add(requestedDefaultModelId);
}
// Pull if model not in discovered list and Ollama is reachable
if (!requestedCloudModel && !modelNames.includes(requestedDefaultModelId)) {
pulledRequestedModel = await pullOllamaModelNonInteractive(
baseUrl,
requestedDefaultModelId,
runtime,
);
if (pulledRequestedModel) {
availableModelNames.add(requestedDefaultModelId);
}
}
let allModelNames = orderedModelNames;
let defaultModelId = requestedDefaultModelId;
if (
(pulledRequestedModel || requestedCloudModel) &&
!allModelNames.includes(requestedDefaultModelId)
) {
allModelNames = [...allModelNames, requestedDefaultModelId];
}
if (!availableModelNames.has(requestedDefaultModelId)) {
if (availableModelNames.size > 0) {
const firstAvailableModel =
allModelNames.find((name) => availableModelNames.has(name)) ??
Array.from(availableModelNames)[0];
defaultModelId = firstAvailableModel;
runtime.log(
`Ollama model ${requestedDefaultModelId} was not available; using ${defaultModelId} instead.`,
);
} else {
runtime.error(
[
`No Ollama models are available at ${baseUrl}.`,
"Pull a model first, then re-run setup.",
].join("\n"),
);
runtime.exit(1);
return params.nextConfig;
}
}
const config = applyOllamaProviderConfig(
params.nextConfig,
baseUrl,
allModelNames,
discoveredModelsByName,
);
const modelRef = `ollama/${defaultModelId}`;
runtime.log(`Default Ollama model: ${defaultModelId}`);
return applyAgentDefaultModelPrimary(config, modelRef);
}
/** Pull the configured default Ollama model if it isn't already available locally. */
export async function ensureOllamaModelPulled(params: {
config: OpenClawConfig;
model: string;
prompter: WizardPrompter;
}): Promise<void> {
if (!params.model.startsWith("ollama/")) {
return;
}
const baseUrl = params.config.models?.providers?.ollama?.baseUrl ?? OLLAMA_DEFAULT_BASE_URL;
const modelName = params.model.slice("ollama/".length);
if (isOllamaCloudModel(modelName)) {
return;
}
const { models } = await fetchOllamaModels(baseUrl);
if (models.some((m) => m.name === modelName)) {
return;
}
const pulled = await pullOllamaModel(baseUrl, modelName, params.prompter);
if (!pulled) {
throw new WizardCancelledError("Failed to download selected Ollama model");
}
}
export {
OLLAMA_DEFAULT_BASE_URL,
OLLAMA_DEFAULT_MODEL,
buildOllamaProvider,
configureOllamaNonInteractive,
ensureOllamaModelPulled,
promptAndConfigureOllama,
} from "../plugin-sdk/provider-setup.js";

View File

@@ -23,13 +23,17 @@ const resolveOwningPluginIdsForProviderMock = vi.fn<ResolveOwningPluginIdsForPro
let augmentModelCatalogWithProviderPlugins: typeof import("./provider-runtime.js").augmentModelCatalogWithProviderPlugins;
let buildProviderAuthDoctorHintWithPlugin: typeof import("./provider-runtime.js").buildProviderAuthDoctorHintWithPlugin;
let buildProviderMissingAuthMessageWithPlugin: typeof import("./provider-runtime.js").buildProviderMissingAuthMessageWithPlugin;
let buildProviderUnknownModelHintWithPlugin: typeof import("./provider-runtime.js").buildProviderUnknownModelHintWithPlugin;
let formatProviderAuthProfileApiKeyWithPlugin: typeof import("./provider-runtime.js").formatProviderAuthProfileApiKeyWithPlugin;
let prepareProviderExtraParams: typeof import("./provider-runtime.js").prepareProviderExtraParams;
let resolveProviderStreamFn: typeof import("./provider-runtime.js").resolveProviderStreamFn;
let resolveProviderCacheTtlEligibility: typeof import("./provider-runtime.js").resolveProviderCacheTtlEligibility;
let resolveProviderBinaryThinking: typeof import("./provider-runtime.js").resolveProviderBinaryThinking;
let resolveProviderBuiltInModelSuppression: typeof import("./provider-runtime.js").resolveProviderBuiltInModelSuppression;
let createProviderEmbeddingProvider: typeof import("./provider-runtime.js").createProviderEmbeddingProvider;
let resolveProviderDefaultThinkingLevel: typeof import("./provider-runtime.js").resolveProviderDefaultThinkingLevel;
let resolveProviderModernModelRef: typeof import("./provider-runtime.js").resolveProviderModernModelRef;
let resolveProviderSyntheticAuthWithPlugin: typeof import("./provider-runtime.js").resolveProviderSyntheticAuthWithPlugin;
let resolveProviderUsageSnapshotWithPlugin: typeof import("./provider-runtime.js").resolveProviderUsageSnapshotWithPlugin;
let resolveProviderCapabilitiesWithPlugin: typeof import("./provider-runtime.js").resolveProviderCapabilitiesWithPlugin;
let resolveProviderUsageAuthWithPlugin: typeof import("./provider-runtime.js").resolveProviderUsageAuthWithPlugin;
@@ -72,13 +76,17 @@ describe("provider-runtime", () => {
augmentModelCatalogWithProviderPlugins,
buildProviderAuthDoctorHintWithPlugin,
buildProviderMissingAuthMessageWithPlugin,
buildProviderUnknownModelHintWithPlugin,
formatProviderAuthProfileApiKeyWithPlugin,
prepareProviderExtraParams,
resolveProviderStreamFn,
resolveProviderCacheTtlEligibility,
resolveProviderBinaryThinking,
resolveProviderBuiltInModelSuppression,
createProviderEmbeddingProvider,
resolveProviderDefaultThinkingLevel,
resolveProviderModernModelRef,
resolveProviderSyntheticAuthWithPlugin,
resolveProviderUsageSnapshotWithPlugin,
resolveProviderCapabilitiesWithPlugin,
resolveProviderUsageAuthWithPlugin,
@@ -152,6 +160,22 @@ describe("provider-runtime", () => {
return undefined;
});
const prepareDynamicModel = vi.fn(async () => undefined);
const createStreamFn = vi.fn(() => vi.fn());
const createEmbeddingProvider = vi.fn(async () => ({
id: "demo",
model: "demo-embed",
embedQuery: async () => [1, 0, 0],
embedBatch: async () => [[1, 0, 0]],
client: { token: "embed-token" },
}));
const resolveSyntheticAuth = vi.fn(() => ({
apiKey: "demo-local",
source: "models.providers.demo (synthetic local key)",
mode: "api-key" as const,
}));
const buildUnknownModelHint = vi.fn(
({ modelId }: { modelId: string }) => `Use demo setup for ${modelId}`,
);
const prepareRuntimeAuth = vi.fn(async () => ({
apiKey: "runtime-token",
baseUrl: "https://runtime.example.com/v1",
@@ -185,7 +209,13 @@ describe("provider-runtime", () => {
...extraParams,
transport: "auto",
}),
wrapStreamFn: ({ streamFn }) => streamFn,
createStreamFn,
wrapStreamFn: ({ streamFn, model }) => {
expect(model).toMatchObject(MODEL);
return streamFn;
},
createEmbeddingProvider,
resolveSyntheticAuth,
normalizeResolvedModel: ({ model }) => ({
...model,
api: "openai-codex-responses",
@@ -210,6 +240,7 @@ describe("provider-runtime", () => {
auth: [],
buildMissingAuthMessage: () =>
'No API key found for provider "openai". Use openai-codex/gpt-5.4.',
buildUnknownModelHint,
suppressBuiltInModel: ({ provider, modelId }) =>
provider === "azure-openai-responses" && modelId === "gpt-5.3-codex-spark"
? { suppress: true, errorMessage: "openai-codex/gpt-5.3-codex-spark" }
@@ -270,12 +301,39 @@ describe("provider-runtime", () => {
transport: "auto",
});
expect(
resolveProviderStreamFn({
provider: "demo",
context: {
provider: "demo",
modelId: MODEL.id,
model: MODEL,
},
}),
).toBeTypeOf("function");
await expect(
createProviderEmbeddingProvider({
provider: "demo",
context: {
config: {} as never,
provider: "demo",
model: "demo-embed",
},
}),
).resolves.toMatchObject({
id: "demo",
model: "demo-embed",
client: { token: "embed-token" },
});
expect(
wrapProviderStreamFn({
provider: "demo",
context: {
provider: "demo",
modelId: MODEL.id,
model: MODEL,
streamFn: vi.fn(),
},
}),
@@ -439,12 +497,44 @@ describe("provider-runtime", () => {
}),
).toBe(true);
expect(
resolveProviderSyntheticAuthWithPlugin({
provider: "demo",
context: {
provider: "demo",
providerConfig: {
api: "openai-completions",
baseUrl: "http://localhost:11434",
models: [],
},
},
}),
).toEqual({
apiKey: "demo-local",
source: "models.providers.demo (synthetic local key)",
mode: "api-key",
});
expect(
buildProviderUnknownModelHintWithPlugin({
provider: "openai",
env: process.env,
context: {
env: process.env,
provider: "openai",
modelId: "gpt-5.4",
},
}),
).toBe("Use demo setup for gpt-5.4");
expectCodexMissingAuthHint(buildProviderMissingAuthMessageWithPlugin);
expectCodexBuiltInSuppression(resolveProviderBuiltInModelSuppression);
await expectAugmentedCodexCatalog(augmentModelCatalogWithProviderPlugins);
expect(prepareDynamicModel).toHaveBeenCalledTimes(1);
expect(refreshOAuth).toHaveBeenCalledTimes(1);
expect(resolveSyntheticAuth).toHaveBeenCalledTimes(1);
expect(buildUnknownModelHint).toHaveBeenCalledTimes(1);
expect(prepareRuntimeAuth).toHaveBeenCalledTimes(1);
expect(resolveUsageAuth).toHaveBeenCalledTimes(1);
expect(fetchUsageSnapshot).toHaveBeenCalledTimes(1);

View File

@@ -15,8 +15,12 @@ import type {
ProviderAuthDoctorHintContext,
ProviderAugmentModelCatalogContext,
ProviderBuildMissingAuthMessageContext,
ProviderBuildUnknownModelHintContext,
ProviderBuiltInModelSuppressionContext,
ProviderCacheTtlEligibilityContext,
ProviderCreateEmbeddingProviderContext,
ProviderResolveSyntheticAuthContext,
ProviderCreateStreamFnContext,
ProviderDefaultThinkingPolicyContext,
ProviderFetchUsageSnapshotContext,
ProviderModernModelPolicyContext,
@@ -234,6 +238,16 @@ export function prepareProviderExtraParams(params: {
return resolveProviderRuntimePlugin(params)?.prepareExtraParams?.(params.context) ?? undefined;
}
export function resolveProviderStreamFn(params: {
provider: string;
config?: OpenClawConfig;
workspaceDir?: string;
env?: NodeJS.ProcessEnv;
context: ProviderCreateStreamFnContext;
}) {
return resolveProviderRuntimePlugin(params)?.createStreamFn?.(params.context) ?? undefined;
}
export function wrapProviderStreamFn(params: {
provider: string;
config?: OpenClawConfig;
@@ -244,6 +258,16 @@ export function wrapProviderStreamFn(params: {
return resolveProviderRuntimePlugin(params)?.wrapStreamFn?.(params.context) ?? undefined;
}
export async function createProviderEmbeddingProvider(params: {
provider: string;
config?: OpenClawConfig;
workspaceDir?: string;
env?: NodeJS.ProcessEnv;
context: ProviderCreateEmbeddingProviderContext;
}) {
return await resolveProviderRuntimePlugin(params)?.createEmbeddingProvider?.(params.context);
}
export async function prepareProviderRuntimeAuth(params: {
provider: string;
config?: OpenClawConfig;
@@ -366,6 +390,26 @@ export function buildProviderMissingAuthMessageWithPlugin(params: {
);
}
export function buildProviderUnknownModelHintWithPlugin(params: {
provider: string;
config?: OpenClawConfig;
workspaceDir?: string;
env?: NodeJS.ProcessEnv;
context: ProviderBuildUnknownModelHintContext;
}) {
return resolveProviderRuntimePlugin(params)?.buildUnknownModelHint?.(params.context) ?? undefined;
}
export function resolveProviderSyntheticAuthWithPlugin(params: {
provider: string;
config?: OpenClawConfig;
workspaceDir?: string;
env?: NodeJS.ProcessEnv;
context: ProviderResolveSyntheticAuthContext;
}) {
return resolveProviderRuntimePlugin(params)?.resolveSyntheticAuth?.(params.context) ?? undefined;
}
export function resolveProviderBuiltInModelSuppression(params: {
config?: OpenClawConfig;
workspaceDir?: string;

View File

@@ -21,7 +21,11 @@ import type {
ChannelStructuredComponents,
} from "../channels/plugins/types.js";
import type { OpenClawConfig } from "../config/config.js";
import type { CliBackendConfig, ModelProviderConfig } from "../config/types.js";
import type {
CliBackendConfig,
ModelProviderAuthMode,
ModelProviderConfig,
} from "../config/types.js";
import type { OperatorScope } from "../gateway/method-scopes.js";
import type { GatewayRequestHandler } from "../gateway/server-methods/types.js";
import type { InternalHookHandler } from "../hooks/internal-hooks.js";
@@ -478,6 +482,22 @@ export type ProviderPrepareExtraParamsContext = {
thinkingLevel?: ThinkLevel;
};
/**
* Provider-owned transport creation.
*
* Use this when the provider needs to replace pi-ai's default transport with a
* custom StreamFn (for example a native API transport that cannot be expressed
* as a wrapper around `streamSimple`).
*/
export type ProviderCreateStreamFnContext = {
config?: OpenClawConfig;
agentDir?: string;
workspaceDir?: string;
provider: string;
modelId: string;
model: ProviderRuntimeModel;
};
/**
* Provider-owned stream wrapper hook after OpenClaw applies its generic
* transport-independent wrappers.
@@ -486,9 +506,48 @@ export type ProviderPrepareExtraParamsContext = {
* through the normal `pi-ai` stream path.
*/
export type ProviderWrapStreamFnContext = ProviderPrepareExtraParamsContext & {
model?: ProviderRuntimeModel;
streamFn?: StreamFn;
};
/**
* Generic embedding provider shape returned by provider plugins.
*
* Keep this aligned with the memory embedding contract without forcing the
* plugin system to import memory internals directly.
*/
export type PluginEmbeddingProvider = {
id: string;
model: string;
maxInputTokens?: number;
embedQuery: (text: string) => Promise<number[]>;
embedBatch: (texts: string[]) => Promise<number[][]>;
embedBatchInputs?: (inputs: unknown[]) => Promise<number[][]>;
client?: unknown;
};
/**
* Provider-owned embedding transport creation.
*
* Use this when a provider wants memory embeddings to live with the provider
* plugin instead of the core memory switchboard.
*/
export type ProviderCreateEmbeddingProviderContext = {
config: OpenClawConfig;
agentDir?: string;
workspaceDir?: string;
provider: string;
model: string;
remote?: {
baseUrl?: string;
apiKey?: unknown;
headers?: Record<string, string>;
};
providerApiKey?: string;
outputDimensionality?: number;
taskType?: string;
};
/**
* Provider-owned prompt-cache eligibility.
*
@@ -516,6 +575,22 @@ export type ProviderBuildMissingAuthMessageContext = {
listProfileIds: (providerId: string) => string[];
};
/**
* Provider-owned unknown-model hint override.
*
* Runs after catalog/runtime lookup misses for the requested provider. Return a
* hint suffix that OpenClaw should append to the generic `Unknown model`
* error.
*/
export type ProviderBuildUnknownModelHintContext = {
config?: OpenClawConfig;
agentDir?: string;
workspaceDir?: string;
env: NodeJS.ProcessEnv;
provider: string;
modelId: string;
};
/**
* Built-in model suppression hook.
*
@@ -632,6 +707,17 @@ export type ProviderPluginWizardSetup = {
initialSelections?: string[];
message?: string;
};
/**
* Optional default-model prompt policy for this auth/setup choice.
*
* Use this when selecting the auth choice should still force a model picker
* even if the choice was preseeded via CLI/configure, or when "keep current"
* would skip required provider-owned post-selection work.
*/
modelSelection?: {
promptWhenAuthChoiceProvided?: boolean;
allowKeepCurrent?: boolean;
};
};
/** Optional model-picker metadata shown in interactive provider selection flows. */
@@ -655,6 +741,18 @@ export type ProviderModelSelectedContext = {
workspaceDir?: string;
};
export type ProviderResolveSyntheticAuthContext = {
config?: OpenClawConfig;
provider: string;
providerConfig?: ModelProviderConfig;
};
export type ProviderSyntheticAuthResult = {
apiKey: string;
source: string;
mode: Exclude<ModelProviderAuthMode, "aws-sdk">;
};
/** Text-inference provider capability registered by a plugin. */
export type ProviderPlugin = {
id: string;
@@ -731,6 +829,13 @@ export type ProviderPlugin = {
prepareExtraParams?: (
ctx: ProviderPrepareExtraParamsContext,
) => Record<string, unknown> | null | undefined;
/**
* Provider-owned transport factory.
*
* Use this when the provider needs a fully custom StreamFn instead of a
* wrapper around the normal `streamSimple` path.
*/
createStreamFn?: (ctx: ProviderCreateStreamFnContext) => StreamFn | null | undefined;
/**
* Provider-owned stream wrapper applied after generic OpenClaw wrappers.
*
@@ -739,6 +844,19 @@ export type ProviderPlugin = {
* transport implementation.
*/
wrapStreamFn?: (ctx: ProviderWrapStreamFnContext) => StreamFn | null | undefined;
/**
* Provider-owned embedding provider factory.
*
* Use this when memory embedding behavior belongs with the provider plugin
* rather than the core embedding switchboard.
*/
createEmbeddingProvider?: (
ctx: ProviderCreateEmbeddingProviderContext,
) =>
| Promise<PluginEmbeddingProvider | null | undefined>
| PluginEmbeddingProvider
| null
| undefined;
/**
* Runtime auth exchange hook.
*
@@ -791,6 +909,14 @@ export type ProviderPlugin = {
buildMissingAuthMessage?: (
ctx: ProviderBuildMissingAuthMessageContext,
) => string | null | undefined;
/**
* Provider-owned unknown-model hint override.
*
* Return a suffix when the provider wants a more specific recovery hint than
* OpenClaw's generic `Unknown model` error after catalog/runtime lookup
* fails.
*/
buildUnknownModelHint?: (ctx: ProviderBuildUnknownModelHintContext) => string | null | undefined;
/**
* Provider-owned built-in model suppression.
*
@@ -882,6 +1008,16 @@ export type ProviderPlugin = {
buildAuthDoctorHint?: (
ctx: ProviderAuthDoctorHintContext,
) => string | Promise<string | null | undefined> | null | undefined;
/**
* Provider-owned synthetic auth marker.
*
* Use this when the provider can operate without a real secret for certain
* configured local/self-hosted cases and wants auth resolution to treat that
* config as available.
*/
resolveSyntheticAuth?: (
ctx: ProviderResolveSyntheticAuthContext,
) => ProviderSyntheticAuthResult | null | undefined;
onModelSelected?: (ctx: ProviderModelSelectedContext) => Promise<void>;
};