refactor(models): extract list row builders

This commit is contained in:
Peter Steinberger
2026-03-09 00:19:23 +00:00
parent 141738f717
commit dfc18b7a2b
4 changed files with 454 additions and 357 deletions

7
docs/refactor/cleanup.md Normal file
View File

@@ -0,0 +1,7 @@
# Cleanup tracker
- [x] Extract `models list` row/supplement helpers.
- [x] Split `models list` forward-compat tests by concern.
- [ ] Extract provider transport normalization from `pi-embedded-runner/model.ts`.
- [ ] Split `ensureOpenClawModelsJson()` into planning + IO layers.
- [ ] Split provider discovery helpers out of `models-config.providers.ts`.

View File

@@ -1,7 +1,24 @@
import { describe, expect, it, vi } from "vitest";
import { beforeEach, describe, expect, it, vi } from "vitest";
const OPENAI_CODEX_MODEL = {
provider: "openai-codex",
id: "gpt-5.4",
name: "GPT-5.4",
api: "openai-codex-responses",
baseUrl: "https://chatgpt.com/backend-api",
input: ["text"],
contextWindow: 272000,
maxTokens: 128000,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
};
const OPENAI_CODEX_53_MODEL = {
...OPENAI_CODEX_MODEL,
id: "gpt-5.3-codex",
name: "GPT-5.3 Codex",
};
const mocks = vi.hoisted(() => {
const printModelTable = vi.fn();
const sourceConfig = {
agents: { defaults: { model: { primary: "openai-codex/gpt-5.4" } } },
models: {
@@ -23,48 +40,62 @@ const mocks = vi.hoisted(() => {
},
};
return {
loadConfig: vi.fn().mockReturnValue({
agents: { defaults: { model: { primary: "openai-codex/gpt-5.4" } } },
models: { providers: {} },
}),
sourceConfig,
resolvedConfig,
loadModelsConfigWithSource: vi.fn().mockResolvedValue({
sourceConfig,
resolvedConfig,
diagnostics: [],
}),
ensureAuthProfileStore: vi.fn().mockReturnValue({ version: 1, profiles: {}, order: {} }),
loadModelRegistry: vi
.fn()
.mockResolvedValue({ models: [], availableKeys: new Set(), registry: {} }),
loadModelCatalog: vi.fn().mockResolvedValue([]),
resolveConfiguredEntries: vi.fn().mockReturnValue({
entries: [
{
key: "openai-codex/gpt-5.4",
ref: { provider: "openai-codex", model: "gpt-5.4" },
tags: new Set(["configured"]),
aliases: [],
},
],
}),
printModelTable,
listProfilesForProvider: vi.fn().mockReturnValue([]),
resolveModelWithRegistry: vi.fn().mockReturnValue({
provider: "openai-codex",
id: "gpt-5.4",
name: "GPT-5.4",
api: "openai-codex-responses",
baseUrl: "https://chatgpt.com/backend-api",
input: ["text"],
contextWindow: 272000,
maxTokens: 128000,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
}),
loadConfig: vi.fn(),
loadModelsConfigWithSource: vi.fn(),
ensureAuthProfileStore: vi.fn(),
loadModelRegistry: vi.fn(),
loadModelCatalog: vi.fn(),
resolveConfiguredEntries: vi.fn(),
printModelTable: vi.fn(),
listProfilesForProvider: vi.fn(),
resolveModelWithRegistry: vi.fn(),
};
});
function resetMocks() {
mocks.loadConfig.mockReturnValue({
agents: { defaults: { model: { primary: "openai-codex/gpt-5.4" } } },
models: { providers: {} },
});
mocks.loadModelsConfigWithSource.mockResolvedValue({
sourceConfig: mocks.sourceConfig,
resolvedConfig: mocks.resolvedConfig,
diagnostics: [],
});
mocks.ensureAuthProfileStore.mockReturnValue({ version: 1, profiles: {}, order: {} });
mocks.loadModelRegistry.mockResolvedValue({
models: [],
availableKeys: new Set(),
registry: {
getAll: () => [],
},
});
mocks.loadModelCatalog.mockResolvedValue([]);
mocks.resolveConfiguredEntries.mockReturnValue({
entries: [
{
key: "openai-codex/gpt-5.4",
ref: { provider: "openai-codex", model: "gpt-5.4" },
tags: new Set(["configured"]),
aliases: [],
},
],
});
mocks.printModelTable.mockReset();
mocks.listProfilesForProvider.mockReturnValue([]);
mocks.resolveModelWithRegistry.mockReturnValue({ ...OPENAI_CODEX_MODEL });
}
function createRuntime() {
return { log: vi.fn(), error: vi.fn() };
}
function lastPrintedRows<T>() {
return (mocks.printModelTable.mock.calls.at(-1)?.[0] ?? []) as T[];
}
vi.mock("../../config/config.js", () => ({
loadConfig: mocks.loadConfig,
getRuntimeConfigSnapshot: vi.fn().mockReturnValue(null),
@@ -114,188 +145,174 @@ vi.mock("../../agents/pi-embedded-runner/model.js", async (importOriginal) => {
import { modelsListCommand } from "./list.list-command.js";
beforeEach(() => {
vi.clearAllMocks();
resetMocks();
});
describe("modelsListCommand forward-compat", () => {
it("does not mark configured codex model as missing when forward-compat can build a fallback", async () => {
const runtime = { log: vi.fn(), error: vi.fn() };
describe("configured rows", () => {
it("does not mark configured codex model as missing when forward-compat can build a fallback", async () => {
const runtime = createRuntime();
await modelsListCommand({ json: true }, runtime as never);
expect(mocks.printModelTable).toHaveBeenCalled();
const rows = mocks.printModelTable.mock.calls[0]?.[0] as Array<{
key: string;
tags: string[];
missing: boolean;
}>;
const codex = rows.find((r) => r.key === "openai-codex/gpt-5.4");
expect(codex).toBeTruthy();
expect(codex?.missing).toBe(false);
expect(codex?.tags).not.toContain("missing");
});
it("passes source config to model registry loading for persistence safety", async () => {
const runtime = { log: vi.fn(), error: vi.fn() };
await modelsListCommand({ json: true }, runtime as never);
expect(mocks.loadModelRegistry).toHaveBeenCalledWith(mocks.resolvedConfig, {
sourceConfig: mocks.sourceConfig,
});
});
it("keeps configured local openai gpt-5.4 entries visible in --local output", async () => {
mocks.resolveConfiguredEntries.mockReturnValueOnce({
entries: [
{
key: "openai/gpt-5.4",
ref: { provider: "openai", model: "gpt-5.4" },
tags: new Set(["configured"]),
aliases: [],
},
],
});
mocks.resolveModelWithRegistry.mockReturnValueOnce({
provider: "openai",
id: "gpt-5.4",
name: "GPT-5.4",
api: "openai-responses",
baseUrl: "http://localhost:4000/v1",
input: ["text", "image"],
contextWindow: 1_050_000,
maxTokens: 128_000,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
});
const runtime = { log: vi.fn(), error: vi.fn() };
await modelsListCommand({ json: true, local: true }, runtime as never);
expect(mocks.printModelTable).toHaveBeenCalled();
const rows = mocks.printModelTable.mock.calls.at(-1)?.[0] as Array<{ key: string }>;
expect(rows).toEqual([
expect.objectContaining({
key: "openai/gpt-5.4",
}),
]);
});
it("marks synthetic codex gpt-5.4 rows as available when provider auth exists", async () => {
mocks.loadModelRegistry.mockResolvedValueOnce({
models: [],
availableKeys: new Set(),
registry: {},
});
mocks.listProfilesForProvider.mockImplementation((_: unknown, provider: string) =>
provider === "openai-codex" ? ([{ id: "profile-1" }] as Array<Record<string, unknown>>) : [],
);
const runtime = { log: vi.fn(), error: vi.fn() };
try {
await modelsListCommand({ json: true }, runtime as never);
expect(mocks.printModelTable).toHaveBeenCalled();
const rows = mocks.printModelTable.mock.calls.at(-1)?.[0] as Array<{
const rows = lastPrintedRows<{
key: string;
available: boolean;
}>;
tags: string[];
missing: boolean;
}>();
expect(rows).toContainEqual(
const codex = rows.find((row) => row.key === "openai-codex/gpt-5.4");
expect(codex).toBeTruthy();
expect(codex?.missing).toBe(false);
expect(codex?.tags).not.toContain("missing");
});
it("passes source config to model registry loading for persistence safety", async () => {
const runtime = createRuntime();
await modelsListCommand({ json: true }, runtime as never);
expect(mocks.loadModelRegistry).toHaveBeenCalledWith(mocks.resolvedConfig, {
sourceConfig: mocks.sourceConfig,
});
});
it("keeps configured local openai gpt-5.4 entries visible in --local output", async () => {
mocks.resolveConfiguredEntries.mockReturnValueOnce({
entries: [
{
key: "openai/gpt-5.4",
ref: { provider: "openai", model: "gpt-5.4" },
tags: new Set(["configured"]),
aliases: [],
},
],
});
mocks.resolveModelWithRegistry.mockReturnValueOnce({
provider: "openai",
id: "gpt-5.4",
name: "GPT-5.4",
api: "openai-responses",
baseUrl: "http://localhost:4000/v1",
input: ["text", "image"],
contextWindow: 1_050_000,
maxTokens: 128_000,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
});
const runtime = createRuntime();
await modelsListCommand({ json: true, local: true }, runtime as never);
expect(mocks.printModelTable).toHaveBeenCalled();
expect(lastPrintedRows<{ key: string }>()).toEqual([
expect.objectContaining({
key: "openai/gpt-5.4",
}),
]);
});
});
describe("availability fallback", () => {
it("marks synthetic codex gpt-5.4 rows as available when provider auth exists", async () => {
mocks.listProfilesForProvider.mockImplementation((_: unknown, provider: string) =>
provider === "openai-codex"
? ([{ id: "profile-1" }] as Array<Record<string, unknown>>)
: [],
);
const runtime = createRuntime();
await modelsListCommand({ json: true }, runtime as never);
expect(mocks.printModelTable).toHaveBeenCalled();
expect(lastPrintedRows<{ key: string; available: boolean }>()).toContainEqual(
expect.objectContaining({
key: "openai-codex/gpt-5.4",
available: true,
}),
);
} finally {
mocks.listProfilesForProvider.mockReturnValue([]);
}
});
it("exits with an error when configured-mode listing has no model registry", async () => {
const previousExitCode = process.exitCode;
process.exitCode = undefined;
mocks.loadModelRegistry.mockResolvedValueOnce({
models: [],
availableKeys: new Set<string>(),
registry: undefined,
});
const runtime = createRuntime();
let observedExitCode: number | undefined;
try {
await modelsListCommand({ json: true }, runtime as never);
observedExitCode = process.exitCode;
} finally {
process.exitCode = previousExitCode;
}
expect(runtime.error).toHaveBeenCalledWith("Model registry unavailable.");
expect(observedExitCode).toBe(1);
expect(mocks.printModelTable).not.toHaveBeenCalled();
});
});
it("includes synthetic codex gpt-5.4 in --all output when catalog supports it", async () => {
mocks.resolveConfiguredEntries.mockReturnValueOnce({ entries: [] });
mocks.loadModelRegistry.mockResolvedValueOnce({
models: [
describe("--all catalog supplementation", () => {
it("includes synthetic codex gpt-5.4 in --all output when catalog supports it", async () => {
mocks.resolveConfiguredEntries.mockReturnValueOnce({ entries: [] });
mocks.loadModelRegistry.mockResolvedValueOnce({
models: [{ ...OPENAI_CODEX_53_MODEL }],
availableKeys: new Set(["openai-codex/gpt-5.3-codex"]),
registry: {
getAll: () => [{ ...OPENAI_CODEX_53_MODEL }],
},
});
mocks.loadModelCatalog.mockResolvedValueOnce([
{
provider: "openai-codex",
id: "gpt-5.3-codex",
name: "GPT-5.3 Codex",
api: "openai-codex-responses",
baseUrl: "https://chatgpt.com/backend-api",
input: ["text"],
contextWindow: 272000,
maxTokens: 128000,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
},
],
availableKeys: new Set(["openai-codex/gpt-5.3-codex"]),
registry: {},
});
mocks.loadModelCatalog.mockResolvedValueOnce([
{
provider: "openai-codex",
id: "gpt-5.3-codex",
name: "GPT-5.3 Codex",
input: ["text"],
contextWindow: 272000,
},
{
provider: "openai-codex",
id: "gpt-5.4",
name: "GPT-5.4",
input: ["text"],
contextWindow: 272000,
},
]);
mocks.listProfilesForProvider.mockImplementation((_: unknown, provider: string) =>
provider === "openai-codex" ? ([{ id: "profile-1" }] as Array<Record<string, unknown>>) : [],
);
mocks.resolveModelWithRegistry.mockImplementation(
({ provider, modelId }: { provider: string; modelId: string }) => {
if (provider !== "openai-codex") {
{
provider: "openai-codex",
id: "gpt-5.4",
name: "GPT-5.4",
input: ["text"],
contextWindow: 272000,
},
]);
mocks.listProfilesForProvider.mockImplementation((_: unknown, provider: string) =>
provider === "openai-codex"
? ([{ id: "profile-1" }] as Array<Record<string, unknown>>)
: [],
);
mocks.resolveModelWithRegistry.mockImplementation(
({ provider, modelId }: { provider: string; modelId: string }) => {
if (provider !== "openai-codex") {
return undefined;
}
if (modelId === "gpt-5.3-codex") {
return { ...OPENAI_CODEX_53_MODEL };
}
if (modelId === "gpt-5.4") {
return { ...OPENAI_CODEX_MODEL };
}
return undefined;
}
if (modelId === "gpt-5.3-codex") {
return {
provider: "openai-codex",
id: "gpt-5.3-codex",
name: "GPT-5.3 Codex",
api: "openai-codex-responses",
baseUrl: "https://chatgpt.com/backend-api",
input: ["text"],
contextWindow: 272000,
maxTokens: 128000,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
};
}
if (modelId === "gpt-5.4") {
return {
provider: "openai-codex",
id: "gpt-5.4",
name: "GPT-5.4",
api: "openai-codex-responses",
baseUrl: "https://chatgpt.com/backend-api",
input: ["text"],
contextWindow: 272000,
maxTokens: 128000,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
};
}
return undefined;
},
);
const runtime = { log: vi.fn(), error: vi.fn() };
},
);
const runtime = createRuntime();
try {
await modelsListCommand(
{ all: true, provider: "openai-codex", json: true },
runtime as never,
);
expect(mocks.printModelTable).toHaveBeenCalled();
const rows = mocks.printModelTable.mock.calls.at(-1)?.[0] as Array<{
key: string;
available: boolean;
}>;
expect(rows).toEqual([
expect(lastPrintedRows<{ key: string; available: boolean }>()).toEqual([
expect.objectContaining({
key: "openai-codex/gpt-5.3-codex",
}),
@@ -304,66 +321,31 @@ describe("modelsListCommand forward-compat", () => {
available: true,
}),
]);
} finally {
mocks.listProfilesForProvider.mockReturnValue([]);
}
});
});
it("keeps discovered rows in --all output when catalog lookup is empty", async () => {
mocks.resolveConfiguredEntries.mockReturnValueOnce({ entries: [] });
mocks.loadModelRegistry.mockResolvedValueOnce({
models: [
{
provider: "openai-codex",
id: "gpt-5.3-codex",
name: "GPT-5.3 Codex",
api: "openai-codex-responses",
baseUrl: "https://chatgpt.com/backend-api",
input: ["text"],
contextWindow: 272000,
maxTokens: 128000,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
it("keeps discovered rows in --all output when catalog lookup is empty", async () => {
mocks.resolveConfiguredEntries.mockReturnValueOnce({ entries: [] });
mocks.loadModelRegistry.mockResolvedValueOnce({
models: [{ ...OPENAI_CODEX_53_MODEL }],
availableKeys: new Set(["openai-codex/gpt-5.3-codex"]),
registry: {
getAll: () => [{ ...OPENAI_CODEX_53_MODEL }],
},
],
availableKeys: new Set(["openai-codex/gpt-5.3-codex"]),
registry: {},
});
mocks.loadModelCatalog.mockResolvedValueOnce([]);
const runtime = createRuntime();
await modelsListCommand(
{ all: true, provider: "openai-codex", json: true },
runtime as never,
);
expect(mocks.printModelTable).toHaveBeenCalled();
expect(lastPrintedRows<{ key: string }>()).toEqual([
expect.objectContaining({
key: "openai-codex/gpt-5.3-codex",
}),
]);
});
mocks.loadModelCatalog.mockResolvedValueOnce([]);
const runtime = { log: vi.fn(), error: vi.fn() };
await modelsListCommand({ all: true, provider: "openai-codex", json: true }, runtime as never);
expect(mocks.printModelTable).toHaveBeenCalled();
const rows = mocks.printModelTable.mock.calls.at(-1)?.[0] as Array<{ key: string }>;
expect(rows).toEqual([
expect.objectContaining({
key: "openai-codex/gpt-5.3-codex",
}),
]);
});
it("exits with an error when configured-mode listing has no model registry", async () => {
vi.clearAllMocks();
const previousExitCode = process.exitCode;
process.exitCode = undefined;
mocks.loadModelRegistry.mockResolvedValueOnce({
models: [],
availableKeys: new Set<string>(),
registry: undefined,
});
const runtime = { log: vi.fn(), error: vi.fn() };
let observedExitCode: number | undefined;
try {
await modelsListCommand({ json: true }, runtime as never);
observedExitCode = process.exitCode;
} finally {
process.exitCode = previousExitCode;
}
expect(runtime.error).toHaveBeenCalledWith("Model registry unavailable.");
expect(observedExitCode).toBe(1);
expect(mocks.printModelTable).not.toHaveBeenCalled();
});
});

View File

@@ -1,16 +1,18 @@
import type { Api, Model } from "@mariozechner/pi-ai";
import type { ModelRegistry } from "@mariozechner/pi-coding-agent";
import { loadModelCatalog } from "../../agents/model-catalog.js";
import { parseModelRef } from "../../agents/model-selection.js";
import { resolveModelWithRegistry } from "../../agents/pi-embedded-runner/model.js";
import type { RuntimeEnv } from "../../runtime.js";
import { resolveConfiguredEntries } from "./list.configured.js";
import { formatErrorWithStack } from "./list.errors.js";
import { loadModelRegistry, toModelRow } from "./list.registry.js";
import {
appendCatalogSupplementRows,
appendConfiguredRows,
appendDiscoveredRows,
loadListModelRegistry,
} from "./list.rows.js";
import { printModelTable } from "./list.table.js";
import type { ModelRow } from "./list.types.js";
import { loadModelsConfigWithSource } from "./load-config.js";
import { DEFAULT_PROVIDER, ensureFlagCompatibility, isLocalBaseUrl, modelKey } from "./shared.js";
import { DEFAULT_PROVIDER, ensureFlagCompatibility } from "./shared.js";
export async function modelsListCommand(
opts: {
@@ -39,17 +41,17 @@ export async function modelsListCommand(
return parsed?.provider ?? raw.toLowerCase();
})();
let models: Model<Api>[] = [];
let modelRegistry: ModelRegistry | undefined;
let discoveredKeys = new Set<string>();
let availableKeys: Set<string> | undefined;
let availabilityErrorMessage: string | undefined;
try {
// Keep command behavior explicit: sync models.json from the source config
// before building the read-only model registry view.
await ensureOpenClawModelsJson(sourceConfig ?? cfg);
const loaded = await loadModelRegistry(cfg, { sourceConfig });
const loaded = await loadListModelRegistry(cfg, { sourceConfig });
modelRegistry = loaded.registry;
models = loaded.models;
discoveredKeys = loaded.discoveredKeys;
availableKeys = loaded.availableKeys;
availabilityErrorMessage = loaded.availabilityErrorMessage;
} catch (err) {
@@ -62,83 +64,36 @@ export async function modelsListCommand(
`Model availability lookup failed; falling back to auth heuristics for discovered models: ${availabilityErrorMessage}`,
);
}
const discoveredKeys = new Set(models.map((model) => modelKey(model.provider, model.id)));
const { entries } = resolveConfiguredEntries(cfg);
const configuredByKey = new Map(entries.map((entry) => [entry.key, entry]));
const rows: ModelRow[] = [];
const rowContext = {
cfg,
authStore,
availableKeys,
configuredByKey,
discoveredKeys,
filter: {
provider: providerFilter,
local: opts.local,
},
};
if (opts.all) {
const seenKeys = new Set<string>();
const sorted = [...models].toSorted((a, b) => {
const p = a.provider.localeCompare(b.provider);
if (p !== 0) {
return p;
}
return a.id.localeCompare(b.id);
const seenKeys = appendDiscoveredRows({
rows,
models: modelRegistry?.getAll() ?? [],
context: rowContext,
});
for (const model of sorted) {
if (providerFilter && model.provider.toLowerCase() !== providerFilter) {
continue;
}
if (opts.local && !isLocalBaseUrl(model.baseUrl)) {
continue;
}
const key = modelKey(model.provider, model.id);
const configured = configuredByKey.get(key);
rows.push(
toModelRow({
model,
key,
tags: configured ? Array.from(configured.tags) : [],
aliases: configured?.aliases ?? [],
availableKeys,
cfg,
authStore,
}),
);
seenKeys.add(key);
}
if (modelRegistry) {
const catalog = await loadModelCatalog({ config: cfg });
for (const entry of catalog) {
if (providerFilter && entry.provider.toLowerCase() !== providerFilter) {
continue;
}
const key = modelKey(entry.provider, entry.id);
if (seenKeys.has(key)) {
continue;
}
const model = resolveModelWithRegistry({
provider: entry.provider,
modelId: entry.id,
modelRegistry,
cfg,
});
if (!model) {
continue;
}
if (opts.local && !isLocalBaseUrl(model.baseUrl)) {
continue;
}
const configured = configuredByKey.get(key);
rows.push(
toModelRow({
model,
key,
tags: configured ? Array.from(configured.tags) : [],
aliases: configured?.aliases ?? [],
availableKeys,
cfg,
authStore,
allowProviderAvailabilityFallback: !discoveredKeys.has(key),
}),
);
seenKeys.add(key);
}
await appendCatalogSupplementRows({
rows,
modelRegistry,
context: rowContext,
seenKeys,
});
}
} else {
const registry = modelRegistry;
@@ -147,37 +102,12 @@ export async function modelsListCommand(
process.exitCode = 1;
return;
}
for (const entry of entries) {
if (providerFilter && entry.ref.provider.toLowerCase() !== providerFilter) {
continue;
}
const model = resolveModelWithRegistry({
provider: entry.ref.provider,
modelId: entry.ref.model,
modelRegistry: registry,
cfg,
});
if (opts.local && model && !isLocalBaseUrl(model.baseUrl)) {
continue;
}
if (opts.local && !model) {
continue;
}
rows.push(
toModelRow({
model,
key: entry.key,
tags: Array.from(entry.tags),
aliases: entry.aliases,
availableKeys,
cfg,
authStore,
allowProviderAvailabilityFallback: model
? !discoveredKeys.has(modelKey(model.provider, model.id))
: false,
}),
);
}
appendConfiguredRows({
rows,
entries,
modelRegistry: registry,
context: rowContext,
});
}
if (rows.length === 0) {

View File

@@ -0,0 +1,178 @@
import type { Api, Model } from "@mariozechner/pi-ai";
import type { ModelRegistry } from "@mariozechner/pi-coding-agent";
import type { AuthProfileStore } from "../../agents/auth-profiles.js";
import { loadModelCatalog } from "../../agents/model-catalog.js";
import { resolveModelWithRegistry } from "../../agents/pi-embedded-runner/model.js";
import type { OpenClawConfig } from "../../config/config.js";
import { loadModelRegistry, toModelRow } from "./list.registry.js";
import type { ConfiguredEntry, ModelRow } from "./list.types.js";
import { isLocalBaseUrl, modelKey } from "./shared.js";
type ConfiguredByKey = Map<string, ConfiguredEntry>;
type RowFilter = {
provider?: string;
local?: boolean;
};
type RowBuilderContext = {
cfg: OpenClawConfig;
authStore: AuthProfileStore;
availableKeys?: Set<string>;
configuredByKey: ConfiguredByKey;
discoveredKeys: Set<string>;
filter: RowFilter;
};
function matchesRowFilter(filter: RowFilter, model: { provider: string; baseUrl?: string }) {
if (filter.provider && model.provider.toLowerCase() !== filter.provider) {
return false;
}
if (filter.local && !isLocalBaseUrl(model.baseUrl ?? "")) {
return false;
}
return true;
}
function buildRow(params: {
model: Model<Api>;
key: string;
context: RowBuilderContext;
allowProviderAvailabilityFallback?: boolean;
}): ModelRow {
const configured = params.context.configuredByKey.get(params.key);
return toModelRow({
model: params.model,
key: params.key,
tags: configured ? Array.from(configured.tags) : [],
aliases: configured?.aliases ?? [],
availableKeys: params.context.availableKeys,
cfg: params.context.cfg,
authStore: params.context.authStore,
allowProviderAvailabilityFallback: params.allowProviderAvailabilityFallback ?? false,
});
}
export async function loadListModelRegistry(
cfg: OpenClawConfig,
opts?: { sourceConfig?: OpenClawConfig },
) {
const loaded = await loadModelRegistry(cfg, opts);
return {
...loaded,
discoveredKeys: new Set(loaded.models.map((model) => modelKey(model.provider, model.id))),
};
}
export function appendDiscoveredRows(params: {
rows: ModelRow[];
models: Model<Api>[];
context: RowBuilderContext;
}): Set<string> {
const seenKeys = new Set<string>();
const sorted = [...params.models].toSorted((a, b) => {
const providerCompare = a.provider.localeCompare(b.provider);
if (providerCompare !== 0) {
return providerCompare;
}
return a.id.localeCompare(b.id);
});
for (const model of sorted) {
if (!matchesRowFilter(params.context.filter, model)) {
continue;
}
const key = modelKey(model.provider, model.id);
params.rows.push(
buildRow({
model,
key,
context: params.context,
}),
);
seenKeys.add(key);
}
return seenKeys;
}
export async function appendCatalogSupplementRows(params: {
rows: ModelRow[];
modelRegistry: ModelRegistry;
context: RowBuilderContext;
seenKeys: Set<string>;
}): Promise<void> {
const catalog = await loadModelCatalog({ config: params.context.cfg });
for (const entry of catalog) {
if (
params.context.filter.provider &&
entry.provider.toLowerCase() !== params.context.filter.provider
) {
continue;
}
const key = modelKey(entry.provider, entry.id);
if (params.seenKeys.has(key)) {
continue;
}
const model = resolveModelWithRegistry({
provider: entry.provider,
modelId: entry.id,
modelRegistry: params.modelRegistry,
cfg: params.context.cfg,
});
if (!model || !matchesRowFilter(params.context.filter, model)) {
continue;
}
params.rows.push(
buildRow({
model,
key,
context: params.context,
allowProviderAvailabilityFallback: !params.context.discoveredKeys.has(key),
}),
);
params.seenKeys.add(key);
}
}
export function appendConfiguredRows(params: {
rows: ModelRow[];
entries: ConfiguredEntry[];
modelRegistry: ModelRegistry;
context: RowBuilderContext;
}) {
for (const entry of params.entries) {
if (
params.context.filter.provider &&
entry.ref.provider.toLowerCase() !== params.context.filter.provider
) {
continue;
}
const model = resolveModelWithRegistry({
provider: entry.ref.provider,
modelId: entry.ref.model,
modelRegistry: params.modelRegistry,
cfg: params.context.cfg,
});
if (params.context.filter.local && model && !isLocalBaseUrl(model.baseUrl ?? "")) {
continue;
}
if (params.context.filter.local && !model) {
continue;
}
params.rows.push(
toModelRow({
model,
key: entry.key,
tags: Array.from(entry.tags),
aliases: entry.aliases,
availableKeys: params.context.availableKeys,
cfg: params.context.cfg,
authStore: params.context.authStore,
allowProviderAvailabilityFallback: model
? !params.context.discoveredKeys.has(modelKey(model.provider, model.id))
: false,
}),
);
}
}