From bc0f54bd04469d79095283c3b7ab55a3118fc3ae Mon Sep 17 00:00:00 2001 From: Peter Steinberger Date: Fri, 24 Apr 2026 19:40:23 +0100 Subject: [PATCH] fix(models): separate Codex harness from model choices (#71193) * fix: separate Codex harness from model choices * docs: note Codex harness model choice fix --- CHANGELOG.md | 1 + docs/gateway/config-agents.md | 2 +- docs/plugins/codex-harness.md | 14 +- src/agents/model-picker-visibility.ts | 15 ++ src/auto-reply/reply/commands-models.test.ts | 22 ++ src/auto-reply/reply/commands-models.ts | 4 + .../doctor-legacy-config.migrations.test.ts | 71 +++++++ .../legacy-config-compatibility-base.ts | 2 + .../shared/legacy-config-core-normalizers.ts | 188 ++++++++++++++++++ src/commands/model-picker.test.ts | 55 +++++ src/flows/model-picker.ts | 41 ++-- 11 files changed, 396 insertions(+), 19 deletions(-) create mode 100644 src/agents/model-picker-visibility.ts diff --git a/CHANGELOG.md b/CHANGELOG.md index 42a951406de..f24bdb803b2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -46,6 +46,7 @@ Docs: https://docs.openclaw.ai ### Fixes +- Codex harness/models: keep legacy `codex/*` harness shorthand out of model picker and `/models` choice surfaces while migrating primary legacy refs to canonical `openai/*` plus explicit Codex harness config. (#71193) Thanks @vincentkoc. - Plugins/runtime deps: respect explicit plugin and channel disablement when repairing bundled runtime dependencies, so doctor and health checks no longer install deps for disabled configured channels. - Diagnostics: harden tool and model diagnostic events against hostile errors, blocking listeners, and unsafe stability reason fields. Thanks @vincentkoc. - Plugins/onboarding: record local plugin install source metadata without duplicating raw absolute local paths in persisted `plugins.installs`, while preserving linked load-path cleanup. (#70970) Thanks @vincentkoc. diff --git a/docs/gateway/config-agents.md b/docs/gateway/config-agents.md index 6204bb38383..cd1d73393c7 100644 --- a/docs/gateway/config-agents.md +++ b/docs/gateway/config-agents.md @@ -369,7 +369,7 @@ Time format in system prompt. Default: `auto` (OS preference). - For direct OpenAI Responses models, server-side compaction is enabled automatically. Use `params.responsesServerCompaction: false` to stop injecting `context_management`, or `params.responsesCompactThreshold` to override the threshold. See [OpenAI server-side compaction](/providers/openai#server-side-compaction-responses-api). - `params`: global default provider parameters applied to all models. Set at `agents.defaults.params` (e.g. `{ cacheRetention: "long" }`). - `params` merge precedence (config): `agents.defaults.params` (global base) is overridden by `agents.defaults.models["provider/model"].params` (per-model), then `agents.list[].params` (matching agent id) overrides by key. See [Prompt Caching](/reference/prompt-caching) for details. -- `embeddedHarness`: default low-level embedded agent runtime policy. Use `runtime: "auto"` to let registered plugin harnesses claim supported models, `runtime: "pi"` to force the built-in PI harness, or a registered harness id such as `runtime: "codex"`. Set `fallback: "none"` to disable automatic PI fallback. +- `embeddedHarness`: default low-level embedded agent runtime policy. Use `runtime: "auto"` to let registered plugin harnesses claim supported models, `runtime: "pi"` to force the built-in PI harness, or a registered harness id such as `runtime: "codex"`. Set `fallback: "none"` to disable automatic PI fallback. New Codex harness configs should keep model refs canonical as `openai/*` and select the harness here rather than using legacy `codex/*` model refs. - Config writers that mutate these fields (for example `/models set`, `/models set-image`, and fallback add/remove commands) save canonical object form and preserve existing fallback lists when possible. - `maxConcurrent`: max parallel agent runs across sessions (each session still serialized). Default: 4. diff --git a/docs/plugins/codex-harness.md b/docs/plugins/codex-harness.md index e520b3f8e28..b7612865448 100644 --- a/docs/plugins/codex-harness.md +++ b/docs/plugins/codex-harness.md @@ -3,7 +3,7 @@ summary: "Run OpenClaw embedded agent turns through the bundled Codex app-server title: "Codex harness" read_when: - You want to use the bundled Codex app-server harness - - You need Codex model refs and config examples + - You need Codex harness config examples - You want to disable PI fallback for Codex-only deployments --- @@ -35,7 +35,8 @@ The harness is off by default. New configs should keep OpenAI model refs canonical as `openai/gpt-*` and explicitly force `embeddedHarness.runtime: "codex"` or `OPENCLAW_AGENT_RUNTIME=codex` when they want native app-server execution. Legacy `codex/*` model refs still auto-select -the harness for compatibility. +the harness for compatibility, but they are not shown as normal model/provider +choices. ## Pick the right model prefix @@ -54,10 +55,11 @@ GPT-5.5 is currently subscription/OAuth-only in OpenClaw. Use app-server harness. Direct API-key access for `openai/gpt-5.5` is supported once OpenAI enables GPT-5.5 on the public API. -Legacy `codex/gpt-*` refs remain accepted as compatibility aliases. New PI -Codex OAuth configs should use `openai-codex/gpt-*`; new native app-server -harness configs should use `openai/gpt-*` plus `embeddedHarness.runtime: -"codex"`. +Legacy `codex/gpt-*` refs remain accepted as compatibility aliases. Doctor +compatibility migration rewrites legacy primary `codex/*` refs to `openai/*` +and records the Codex harness policy separately. New PI Codex OAuth configs +should use `openai-codex/gpt-*`; new native app-server harness configs should +use `openai/gpt-*` plus `embeddedHarness.runtime: "codex"`. `agents.defaults.imageModel` follows the same prefix split. Use `openai-codex/gpt-*` when image understanding should run through the OpenAI diff --git a/src/agents/model-picker-visibility.ts b/src/agents/model-picker-visibility.ts new file mode 100644 index 00000000000..d0e77cfba13 --- /dev/null +++ b/src/agents/model-picker-visibility.ts @@ -0,0 +1,15 @@ +import { normalizeProviderId } from "./provider-id.js"; + +const HIDDEN_MODEL_PICKER_PROVIDERS = new Set(["codex"]); + +export function isModelPickerVisibleProvider(provider: string): boolean { + return !HIDDEN_MODEL_PICKER_PROVIDERS.has(normalizeProviderId(provider)); +} + +export function isModelPickerVisibleModelRef(ref: string): boolean { + const separatorIndex = ref.indexOf("/"); + if (separatorIndex <= 0) { + return true; + } + return isModelPickerVisibleProvider(ref.slice(0, separatorIndex)); +} diff --git a/src/auto-reply/reply/commands-models.test.ts b/src/auto-reply/reply/commands-models.test.ts index 3d870e3f93c..b9b9a9fea02 100644 --- a/src/auto-reply/reply/commands-models.test.ts +++ b/src/auto-reply/reply/commands-models.test.ts @@ -207,6 +207,28 @@ describe("handleModelsCommand", () => { }); }); + it("hides the virtual Codex harness provider from /models menus", async () => { + modelCatalogMocks.loadModelCatalog.mockResolvedValue([ + { provider: "codex", id: "gpt-5.5", name: "GPT-5.5" }, + { provider: "openai", id: "gpt-5.5", name: "GPT-5.5" }, + ]); + const cfg = { + agents: { + defaults: { + models: { + "codex/gpt-5.5": { alias: "legacy-codex" }, + "openai/gpt-5.5": { alias: "gpt" }, + }, + }, + }, + } satisfies Partial; + + const result = await handleModelsCommand(buildParams("/models", cfg), true); + + expect(result?.reply?.text).toContain("- openai (1)"); + expect(result?.reply?.text).not.toContain("- codex"); + }); + it("lists models for /models ", async () => { const result = await handleModelsCommand(buildParams("/models openai"), true); diff --git a/src/auto-reply/reply/commands-models.ts b/src/auto-reply/reply/commands-models.ts index da7097433d4..25384753174 100644 --- a/src/auto-reply/reply/commands-models.ts +++ b/src/auto-reply/reply/commands-models.ts @@ -1,6 +1,7 @@ import { resolveAgentDir, resolveSessionAgentId } from "../../agents/agent-scope.js"; import { resolveModelAuthLabel } from "../../agents/model-auth-label.js"; import { loadModelCatalog } from "../../agents/model-catalog.js"; +import { isModelPickerVisibleProvider } from "../../agents/model-picker-visibility.js"; import { buildAllowedModelSet, buildModelAliasIndex, @@ -77,6 +78,9 @@ export async function buildModelsProviderData( const byProvider = new Map>(); const add = (p: string, m: string) => { const key = normalizeProviderId(p); + if (!isModelPickerVisibleProvider(key)) { + return; + } const set = byProvider.get(key) ?? new Set(); set.add(m); byProvider.set(key, set); diff --git a/src/commands/doctor-legacy-config.migrations.test.ts b/src/commands/doctor-legacy-config.migrations.test.ts index 94ef1dfb44f..02dbd8610b2 100644 --- a/src/commands/doctor-legacy-config.migrations.test.ts +++ b/src/commands/doctor-legacy-config.migrations.test.ts @@ -318,6 +318,77 @@ describe("normalizeCompatibilityConfigValues", () => { expect(res.changes).toEqual([]); }); + it("migrates legacy Codex primary refs to OpenAI refs plus explicit Codex harness", () => { + const res = normalizeCompatibilityConfigValues({ + agents: { + defaults: { + embeddedHarness: { runtime: "auto", fallback: "pi" }, + model: { + primary: "codex/gpt-5.5", + fallbacks: ["anthropic/claude-sonnet-4-6", "codex/gpt-5.4-mini"], + }, + models: { + "codex/gpt-5.5": { alias: "legacy-codex" }, + "openai/gpt-5.5": { alias: "gpt", params: { temperature: 0.2 } }, + "codex/gpt-5.4-mini": {}, + }, + }, + list: [ + { + id: "reviewer", + model: "codex/gpt-5.4-mini", + }, + ], + }, + } as unknown as OpenClawConfig); + + expect(res.config.agents?.defaults?.model).toEqual({ + primary: "openai/gpt-5.5", + fallbacks: ["anthropic/claude-sonnet-4-6", "openai/gpt-5.4-mini"], + }); + expect(res.config.agents?.defaults?.embeddedHarness).toEqual({ + runtime: "codex", + fallback: "pi", + }); + expect(res.config.agents?.defaults?.models).toEqual({ + "openai/gpt-5.5": { alias: "gpt", params: { temperature: 0.2 } }, + "openai/gpt-5.4-mini": {}, + }); + expect(res.config.agents?.list?.[0]).toMatchObject({ + id: "reviewer", + embeddedHarness: { runtime: "codex" }, + model: "openai/gpt-5.4-mini", + }); + expect(res.changes).toEqual( + expect.arrayContaining([ + "Moved agents.defaults.model legacy codex/* primary refs to openai/* with Codex harness.", + "Moved agents.defaults.models legacy codex/* keys to openai/*.", + "Moved agents.list.reviewer.model legacy codex/* primary refs to openai/* with Codex harness.", + ]), + ); + }); + + it("does not force Codex harness for legacy fallback-only refs", () => { + const input = { + agents: { + defaults: { + model: { + primary: "openai/gpt-5.5", + fallbacks: ["codex/gpt-5.4-mini"], + }, + models: { + "codex/gpt-5.4-mini": { alias: "legacy-codex" }, + }, + }, + }, + } as unknown as OpenClawConfig; + + const res = normalizeCompatibilityConfigValues(input); + + expect(res.config).toEqual(input); + expect(res.changes).toEqual([]); + }); + it("prefers legacy nano-banana env.GEMINI_API_KEY over skill apiKey during migration", () => { const res = normalizeCompatibilityConfigValues({ skills: { diff --git a/src/commands/doctor/shared/legacy-config-compatibility-base.ts b/src/commands/doctor/shared/legacy-config-compatibility-base.ts index 37a7b56d8a4..b69332eb479 100644 --- a/src/commands/doctor/shared/legacy-config-compatibility-base.ts +++ b/src/commands/doctor/shared/legacy-config-compatibility-base.ts @@ -1,6 +1,7 @@ import type { OpenClawConfig } from "../../../config/types.openclaw.js"; import { normalizeLegacyBrowserConfig, + normalizeLegacyCodexHarnessModelRefs, normalizeLegacyCrossContextMessageConfig, normalizeLegacyMediaProviderOptions, normalizeLegacyMistralModelMaxTokens, @@ -36,6 +37,7 @@ export function normalizeBaseCompatibilityConfigValues( next = normalizeLegacyNanoBananaSkill(next, changes); next = normalizeLegacyTalkConfig(next, changes); + next = normalizeLegacyCodexHarnessModelRefs(next, changes); next = normalizeLegacyCrossContextMessageConfig(next, changes); next = normalizeLegacyMediaProviderOptions(next, changes); return normalizeLegacyMistralModelMaxTokens(next, changes); diff --git a/src/commands/doctor/shared/legacy-config-core-normalizers.ts b/src/commands/doctor/shared/legacy-config-core-normalizers.ts index a8c703d09d0..121ea0c46ec 100644 --- a/src/commands/doctor/shared/legacy-config-core-normalizers.ts +++ b/src/commands/doctor/shared/legacy-config-core-normalizers.ts @@ -192,6 +192,194 @@ type ModelProviderEntry = Partial< >; type ModelsConfigPatch = Partial>; type ModelDefinitionEntry = NonNullable[number]; +type AgentEmbeddedHarnessPatch = NonNullable< + NonNullable["defaults"]>["embeddedHarness"] +>; + +const LEGACY_CODEX_PROVIDER_ID = "codex"; +const OPENAI_PROVIDER_ID = "openai"; +const CODEX_HARNESS_RUNTIME = "codex"; + +function migrateLegacyCodexModelRef(raw: string): string | null { + const trimmed = raw.trim(); + const separatorIndex = trimmed.indexOf("/"); + if (separatorIndex <= 0 || separatorIndex === trimmed.length - 1) { + return null; + } + if (normalizeProviderId(trimmed.slice(0, separatorIndex)) !== LEGACY_CODEX_PROVIDER_ID) { + return null; + } + return `${OPENAI_PROVIDER_ID}/${trimmed.slice(separatorIndex + 1)}`; +} + +function normalizeLegacyCodexAgentModelConfig(raw: unknown): { + value?: unknown; + changed: boolean; + codexPrimarySelected: boolean; +} { + if (typeof raw === "string") { + const migrated = migrateLegacyCodexModelRef(raw); + return migrated + ? { value: migrated, changed: true, codexPrimarySelected: true } + : { value: raw, changed: false, codexPrimarySelected: false }; + } + if (!isRecord(raw)) { + return { value: raw, changed: false, codexPrimarySelected: false }; + } + + const migratedPrimary = + typeof raw.primary === "string" ? migrateLegacyCodexModelRef(raw.primary) : null; + if (!migratedPrimary) { + return { value: raw, changed: false, codexPrimarySelected: false }; + } + + const next: Record = { ...raw, primary: migratedPrimary }; + if (Array.isArray(raw.fallbacks)) { + next.fallbacks = raw.fallbacks.map((fallback) => { + if (typeof fallback !== "string") { + return fallback; + } + return migrateLegacyCodexModelRef(fallback) ?? fallback; + }); + } + return { value: next, changed: true, codexPrimarySelected: true }; +} + +function mergeModelEntry(legacyEntry: unknown, currentEntry: unknown): unknown { + if (!isRecord(legacyEntry) || !isRecord(currentEntry)) { + return currentEntry ?? legacyEntry; + } + return { ...legacyEntry, ...currentEntry }; +} + +function normalizeLegacyCodexAllowlistModels( + rawModels: unknown, + migrateCodexKeys: boolean, +): { + value?: unknown; + changed: boolean; +} { + if (!migrateCodexKeys || !isRecord(rawModels)) { + return { value: rawModels, changed: false }; + } + + let changed = false; + const next: Record = {}; + const legacyEntries: Array<[string, unknown]> = []; + for (const [rawKey, entry] of Object.entries(rawModels)) { + const migratedKey = migrateLegacyCodexModelRef(rawKey); + if (migratedKey) { + changed = true; + legacyEntries.push([migratedKey, entry]); + continue; + } + next[rawKey] = mergeModelEntry(entry, next[rawKey]); + } + for (const [migratedKey, entry] of legacyEntries) { + next[migratedKey] = mergeModelEntry(entry, next[migratedKey]); + } + return { value: next, changed }; +} + +function ensureCodexEmbeddedHarness(raw: unknown): { + value: AgentEmbeddedHarnessPatch; + changed: boolean; +} { + if (!isRecord(raw)) { + return { value: { runtime: CODEX_HARNESS_RUNTIME }, changed: true }; + } + const runtime = normalizeOptionalLowercaseString(raw.runtime); + if (runtime === CODEX_HARNESS_RUNTIME) { + return { value: raw as AgentEmbeddedHarnessPatch, changed: false }; + } + return { + value: { ...raw, runtime: CODEX_HARNESS_RUNTIME } as AgentEmbeddedHarnessPatch, + changed: true, + }; +} + +function normalizeLegacyCodexAgentContainer( + raw: Record, + path: string, + changes: string[], +): { value: Record; changed: boolean } { + let changed = false; + const next: Record = { ...raw }; + + const model = normalizeLegacyCodexAgentModelConfig(raw.model); + if (model.changed) { + next.model = model.value; + changed = true; + changes.push(`Moved ${path}.model legacy codex/* primary refs to openai/* with Codex harness.`); + } + + const models = normalizeLegacyCodexAllowlistModels(raw.models, model.codexPrimarySelected); + if (models.changed) { + next.models = models.value; + changed = true; + changes.push(`Moved ${path}.models legacy codex/* keys to openai/*.`); + } + + if (model.codexPrimarySelected) { + const harness = ensureCodexEmbeddedHarness(raw.embeddedHarness); + if (harness.changed) { + next.embeddedHarness = harness.value; + changed = true; + } + } + + return { value: next, changed }; +} + +export function normalizeLegacyCodexHarnessModelRefs( + cfg: OpenClawConfig, + changes: string[], +): OpenClawConfig { + const rawAgents = cfg.agents; + if (!isRecord(rawAgents)) { + return cfg; + } + + let changed = false; + const nextAgents: Record = { ...rawAgents }; + if (isRecord(rawAgents.defaults)) { + const defaults = normalizeLegacyCodexAgentContainer( + rawAgents.defaults, + "agents.defaults", + changes, + ); + if (defaults.changed) { + nextAgents.defaults = defaults.value; + changed = true; + } + } + + if (Array.isArray(rawAgents.list)) { + const nextList = rawAgents.list.map((entry, index) => { + if (!isRecord(entry)) { + return entry; + } + const agentId = normalizeOptionalString(entry.id) ?? String(index); + const agent = normalizeLegacyCodexAgentContainer(entry, `agents.list.${agentId}`, changes); + if (!agent.changed) { + return entry; + } + changed = true; + return agent.value; + }); + if (changed) { + nextAgents.list = nextList; + } + } + + if (!changed) { + return cfg; + } + return { + ...cfg, + agents: nextAgents as OpenClawConfig["agents"], + }; +} export function normalizeLegacyOpenAICodexModelsAddMetadata( cfg: OpenClawConfig, diff --git a/src/commands/model-picker.test.ts b/src/commands/model-picker.test.ts index 9ac565452cf..aa91d4b8c8a 100644 --- a/src/commands/model-picker.test.ts +++ b/src/commands/model-picker.test.ts @@ -152,6 +152,32 @@ describe("promptDefaultModel", () => { ); }); + it("hides the virtual Codex harness provider from default model choices", async () => { + loadModelCatalog.mockResolvedValue([ + { provider: "codex", id: "gpt-5.5", name: "GPT-5.5" }, + { provider: "openai", id: "gpt-5.5", name: "GPT-5.5" }, + { provider: "openai-codex", id: "gpt-5.5", name: "GPT-5.5" }, + ]); + + const select = vi.fn(async (params) => params.initialValue as never); + const prompter = makePrompter({ select }); + + await promptDefaultModel({ + config: { agents: { defaults: {} } } as OpenClawConfig, + prompter, + allowKeep: false, + includeManual: false, + ignoreAllowlist: true, + }); + + const optionValues = (select.mock.calls[0]?.[0]?.options ?? []).map( + (option: { value: string }) => option.value, + ); + expect(optionValues).toContain("openai/gpt-5.5"); + expect(optionValues).toContain("openai-codex/gpt-5.5"); + expect(optionValues).not.toContain("codex/gpt-5.5"); + }); + it("treats byteplus plan models as preferred-provider matches", async () => { loadModelCatalog.mockResolvedValue([ { @@ -402,6 +428,35 @@ describe("promptModelAllowlist", () => { }); }); +describe("Codex harness model picker visibility", () => { + it("hides virtual Codex harness refs from allowlist choices and configured supplements", async () => { + loadModelCatalog.mockResolvedValue([ + { provider: "codex", id: "gpt-5.5", name: "GPT-5.5" }, + { provider: "openai", id: "gpt-5.5", name: "GPT-5.5" }, + ]); + + const multiselect = createSelectAllMultiselect(); + const prompter = makePrompter({ multiselect }); + const config = { + agents: { + defaults: { + models: { + "codex/gpt-5.5": { alias: "legacy-codex" }, + "openai/gpt-5.5": { alias: "gpt" }, + }, + }, + }, + } as OpenClawConfig; + + await promptModelAllowlist({ config, prompter }); + + const call = multiselect.mock.calls[0]?.[0]; + const optionValues = (call?.options ?? []).map((option: { value: string }) => option.value); + expect(optionValues).toEqual(["openai/gpt-5.5"]); + expect(call?.initialValues).toEqual(["openai/gpt-5.5"]); + }); +}); + describe("router model filtering", () => { it("filters internal router models in both default and allowlist prompts", async () => { loadModelCatalog.mockResolvedValue(OPENROUTER_CATALOG); diff --git a/src/flows/model-picker.ts b/src/flows/model-picker.ts index b75367d6168..af6ea1590a8 100644 --- a/src/flows/model-picker.ts +++ b/src/flows/model-picker.ts @@ -2,6 +2,10 @@ import { ensureAuthProfileStore, listProfilesForProvider } from "../agents/auth- import { DEFAULT_MODEL, DEFAULT_PROVIDER } from "../agents/defaults.js"; import { hasUsableCustomProviderApiKey, resolveEnvApiKey } from "../agents/model-auth.js"; import { loadModelCatalog } from "../agents/model-catalog.js"; +import { + isModelPickerVisibleModelRef, + isModelPickerVisibleProvider, +} from "../agents/model-picker-visibility.js"; import { buildAllowedModelSet, buildModelAliasIndex, @@ -142,7 +146,11 @@ function addModelSelectOption(params: { hasAuth: (provider: string) => boolean; }) { const key = modelKey(params.entry.provider, params.entry.id); - if (params.seen.has(key) || HIDDEN_ROUTER_MODELS.has(key)) { + if ( + params.seen.has(key) || + HIDDEN_ROUTER_MODELS.has(key) || + !isModelPickerVisibleProvider(params.entry.provider) + ) { return; } const hints: string[] = []; @@ -263,15 +271,13 @@ async function maybeFilterModelsByProvider(params: { workspaceDir?: string; env?: NodeJS.ProcessEnv; }): Promise { - const providerIds = Array.from(new Set(params.models.map((entry) => entry.provider))).toSorted( - (a, b) => a.localeCompare(b), + let next = params.models.filter((entry) => isModelPickerVisibleProvider(entry.provider)); + const providerIds = Array.from(new Set(next.map((entry) => entry.provider))).toSorted((a, b) => + a.localeCompare(b), ); const hasPreferredProvider = !!params.preferredProvider; const shouldPromptProvider = - !hasPreferredProvider && - providerIds.length > 1 && - params.models.length > PROVIDER_FILTER_THRESHOLD; - let next = params.models; + !hasPreferredProvider && providerIds.length > 1 && next.length > PROVIDER_FILTER_THRESHOLD; const matchesPreferredProvider = params.preferredProvider ? createPreferredProviderMatcher({ preferredProvider: params.preferredProvider, @@ -473,6 +479,13 @@ export async function promptDefaultModel( workspaceDir: params.workspaceDir, env: params.env, }); + if (filteredModels.length === 0) { + return promptManualModel({ + prompter: params.prompter, + allowBlank: allowKeep, + initialValue: configuredRaw || resolvedKey || undefined, + }); + } const matchesPreferredProvider = preferredProvider ? createPreferredProviderMatcher({ preferredProvider, @@ -609,7 +622,7 @@ export async function promptModelAllowlist(params: { ]); const initialKeys = allowedKeySet ? initialSeeds.filter((key) => allowedKeySet.has(key)) - : initialSeeds; + : initialSeeds.filter(isModelPickerVisibleModelRef); const allowlistProgress = params.prompter.progress("Loading available models"); let catalog: Awaited>; @@ -650,9 +663,11 @@ export async function promptModelAllowlist(params: { const options: WizardSelectOption[] = []; const seen = new Set(); - const allowedCatalog = allowedKeySet - ? catalog.filter((entry) => allowedKeySet.has(modelKey(entry.provider, entry.id))) - : catalog; + const allowedCatalog = ( + allowedKeySet + ? catalog.filter((entry) => allowedKeySet.has(modelKey(entry.provider, entry.id))) + : catalog + ).filter((entry) => isModelPickerVisibleProvider(entry.provider)); const filteredCatalog = preferredProvider && allowedCatalog.some((entry) => matchesPreferredProvider?.(entry.provider)) ? allowedCatalog.filter((entry) => matchesPreferredProvider?.(entry.provider)) @@ -668,7 +683,9 @@ export async function promptModelAllowlist(params: { addModelSelectOption({ entry, options, seen, aliasIndex, hasAuth }); } - const supplementalKeys = allowedKeySet ? allowedKeys : existingKeys; + const supplementalKeys = (allowedKeySet ? allowedKeys : existingKeys).filter( + isModelPickerVisibleModelRef, + ); for (const key of supplementalKeys) { if (seen.has(key)) { continue;