fix(models): keep --all aligned with synthetic catalog rows

This commit is contained in:
Doruk Ardahan
2026-03-08 21:34:52 +03:00
committed by Peter Steinberger
parent a3dc4b5a57
commit 02d749ae36
2 changed files with 174 additions and 0 deletions

View File

@@ -38,6 +38,7 @@ const mocks = vi.hoisted(() => {
loadModelRegistry: vi
.fn()
.mockResolvedValue({ models: [], availableKeys: new Set(), registry: {} }),
loadModelCatalog: vi.fn().mockResolvedValue([]),
resolveConfiguredEntries: vi.fn().mockReturnValue({
entries: [
{
@@ -77,6 +78,10 @@ vi.mock("../../agents/auth-profiles.js", async (importOriginal) => {
};
});
vi.mock("../../agents/model-catalog.js", () => ({
loadModelCatalog: mocks.loadModelCatalog,
}));
vi.mock("./list.registry.js", async (importOriginal) => {
const actual = await importOriginal<typeof import("./list.registry.js")>();
return {
@@ -198,6 +203,133 @@ describe("modelsListCommand forward-compat", () => {
);
});
it("includes synthetic codex gpt-5.4 in --all output when catalog supports it", async () => {
mocks.resolveConfiguredEntries.mockReturnValueOnce({ entries: [] });
mocks.loadModelRegistry.mockResolvedValueOnce({
models: [
{
provider: "openai-codex",
id: "gpt-5.3-codex",
name: "GPT-5.3 Codex",
api: "openai-codex-responses",
baseUrl: "https://chatgpt.com/backend-api",
input: ["text"],
contextWindow: 272000,
maxTokens: 128000,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
},
],
availableKeys: new Set(["openai-codex/gpt-5.3-codex"]),
registry: {},
});
mocks.loadModelCatalog.mockResolvedValueOnce([
{
provider: "openai-codex",
id: "gpt-5.3-codex",
name: "GPT-5.3 Codex",
input: ["text"],
contextWindow: 272000,
},
{
provider: "openai-codex",
id: "gpt-5.4",
name: "GPT-5.4",
input: ["text"],
contextWindow: 272000,
},
]);
mocks.listProfilesForProvider.mockImplementationOnce((_: unknown, provider: string) =>
provider === "openai-codex" ? ([{ id: "profile-1" }] as Array<Record<string, unknown>>) : [],
);
mocks.resolveModelWithRegistry.mockImplementation(
({ provider, modelId }: { provider: string; modelId: string }) => {
if (provider !== "openai-codex") {
return undefined;
}
if (modelId === "gpt-5.3-codex") {
return {
provider: "openai-codex",
id: "gpt-5.3-codex",
name: "GPT-5.3 Codex",
api: "openai-codex-responses",
baseUrl: "https://chatgpt.com/backend-api",
input: ["text"],
contextWindow: 272000,
maxTokens: 128000,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
};
}
if (modelId === "gpt-5.4") {
return {
provider: "openai-codex",
id: "gpt-5.4",
name: "GPT-5.4",
api: "openai-codex-responses",
baseUrl: "https://chatgpt.com/backend-api",
input: ["text"],
contextWindow: 272000,
maxTokens: 128000,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
};
}
return undefined;
},
);
const runtime = { log: vi.fn(), error: vi.fn() };
await modelsListCommand({ all: true, provider: "openai-codex", json: true }, runtime as never);
expect(mocks.printModelTable).toHaveBeenCalled();
const rows = mocks.printModelTable.mock.calls.at(-1)?.[0] as Array<{
key: string;
available: boolean;
}>;
expect(rows).toEqual([
expect.objectContaining({
key: "openai-codex/gpt-5.3-codex",
}),
expect.objectContaining({
key: "openai-codex/gpt-5.4",
available: true,
}),
]);
});
it("keeps discovered rows in --all output when catalog lookup is empty", async () => {
mocks.resolveConfiguredEntries.mockReturnValueOnce({ entries: [] });
mocks.loadModelRegistry.mockResolvedValueOnce({
models: [
{
provider: "openai-codex",
id: "gpt-5.3-codex",
name: "GPT-5.3 Codex",
api: "openai-codex-responses",
baseUrl: "https://chatgpt.com/backend-api",
input: ["text"],
contextWindow: 272000,
maxTokens: 128000,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
},
],
availableKeys: new Set(["openai-codex/gpt-5.3-codex"]),
registry: {},
});
mocks.loadModelCatalog.mockResolvedValueOnce([]);
const runtime = { log: vi.fn(), error: vi.fn() };
await modelsListCommand({ all: true, provider: "openai-codex", json: true }, runtime as never);
expect(mocks.printModelTable).toHaveBeenCalled();
const rows = mocks.printModelTable.mock.calls.at(-1)?.[0] as Array<{ key: string }>;
expect(rows).toEqual([
expect.objectContaining({
key: "openai-codex/gpt-5.3-codex",
}),
]);
});
it("exits with an error when configured-mode listing has no model registry", async () => {
vi.clearAllMocks();
const previousExitCode = process.exitCode;

View File

@@ -1,5 +1,6 @@
import type { Api, Model } from "@mariozechner/pi-ai";
import type { ModelRegistry } from "@mariozechner/pi-coding-agent";
import { loadModelCatalog } from "../../agents/model-catalog.js";
import { parseModelRef } from "../../agents/model-selection.js";
import { resolveModelWithRegistry } from "../../agents/pi-embedded-runner/model.js";
import type { RuntimeEnv } from "../../runtime.js";
@@ -69,6 +70,7 @@ export async function modelsListCommand(
const rows: ModelRow[] = [];
if (opts.all) {
const seenKeys = new Set<string>();
const sorted = [...models].toSorted((a, b) => {
const p = a.provider.localeCompare(b.provider);
if (p !== 0) {
@@ -97,6 +99,46 @@ export async function modelsListCommand(
authStore,
}),
);
seenKeys.add(key);
}
if (modelRegistry) {
const catalog = await loadModelCatalog({ config: cfg });
for (const entry of catalog) {
if (providerFilter && entry.provider.toLowerCase() !== providerFilter) {
continue;
}
const key = modelKey(entry.provider, entry.id);
if (seenKeys.has(key)) {
continue;
}
const model = resolveModelWithRegistry({
provider: entry.provider,
modelId: entry.id,
modelRegistry,
cfg,
});
if (!model) {
continue;
}
if (opts.local && !isLocalBaseUrl(model.baseUrl)) {
continue;
}
const configured = configuredByKey.get(key);
rows.push(
toModelRow({
model,
key,
tags: configured ? Array.from(configured.tags) : [],
aliases: configured?.aliases ?? [],
availableKeys,
cfg,
authStore,
allowProviderAvailabilityFallback: !discoveredKeys.has(key),
}),
);
seenKeys.add(key);
}
}
} else {
const registry = modelRegistry;