feat(plugins): merge openai vendor seams into one plugin

This commit is contained in:
Peter Steinberger
2026-03-15 17:50:16 -07:00
parent bc5054ce68
commit b54e37c71f
26 changed files with 833 additions and 548 deletions

View File

@@ -2,22 +2,31 @@ import { describe, expect, it } from "vitest";
import type { ProviderPlugin } from "../../src/plugins/types.js";
import openAIPlugin from "./index.js";
function registerProvider(): ProviderPlugin {
let provider: ProviderPlugin | undefined;
function registerProviders(): ProviderPlugin[] {
const providers: ProviderPlugin[] = [];
openAIPlugin.register({
registerProvider(nextProvider: ProviderPlugin) {
provider = nextProvider;
providers.push(nextProvider);
},
} as never);
return providers;
}
function requireProvider(id: string): ProviderPlugin {
const provider = registerProviders().find((entry) => entry.id === id);
if (!provider) {
throw new Error("provider registration missing");
throw new Error(`provider registration missing for ${id}`);
}
return provider;
}
describe("openai plugin", () => {
it("registers openai and openai-codex providers from one extension", () => {
expect(registerProviders().map((provider) => provider.id)).toEqual(["openai", "openai-codex"]);
});
it("owns openai gpt-5.4 forward-compat resolution", () => {
const provider = registerProvider();
const provider = requireProvider("openai");
const model = provider.resolveDynamicModel?.({
provider: "openai",
modelId: "gpt-5.4-pro",
@@ -51,7 +60,7 @@ describe("openai plugin", () => {
});
it("owns direct openai transport normalization", () => {
const provider = registerProvider();
const provider = requireProvider("openai");
expect(
provider.normalizeResolvedModel?.({
provider: "openai",
@@ -73,4 +82,24 @@ describe("openai plugin", () => {
api: "openai-responses",
});
});
it("owns codex-only missing-auth hints and Spark suppression", () => {
const provider = requireProvider("openai");
expect(
provider.buildMissingAuthMessage?.({
env: {} as NodeJS.ProcessEnv,
provider: "openai",
listProfileIds: (providerId) => (providerId === "openai-codex" ? ["p1"] : []),
}),
).toContain("openai-codex/gpt-5.4");
expect(
provider.suppressBuiltInModel?.({
env: {} as NodeJS.ProcessEnv,
provider: "azure-openai-responses",
modelId: "gpt-5.3-codex-spark",
}),
).toMatchObject({
suppress: true,
});
});
});

View File

@@ -1,136 +1,15 @@
import {
emptyPluginConfigSchema,
type OpenClawPluginApi,
type ProviderResolveDynamicModelContext,
type ProviderRuntimeModel,
} from "openclaw/plugin-sdk/core";
import { DEFAULT_CONTEXT_TOKENS } from "../../src/agents/defaults.js";
import { normalizeModelCompat } from "../../src/agents/model-compat.js";
import { normalizeProviderId } from "../../src/agents/model-selection.js";
const PROVIDER_ID = "openai";
const OPENAI_BASE_URL = "https://api.openai.com/v1";
const OPENAI_GPT_54_MODEL_ID = "gpt-5.4";
const OPENAI_GPT_54_PRO_MODEL_ID = "gpt-5.4-pro";
const OPENAI_GPT_54_CONTEXT_TOKENS = 1_050_000;
const OPENAI_GPT_54_MAX_TOKENS = 128_000;
const OPENAI_GPT_54_TEMPLATE_MODEL_IDS = ["gpt-5.2"] as const;
const OPENAI_GPT_54_PRO_TEMPLATE_MODEL_IDS = ["gpt-5.2-pro", "gpt-5.2"] as const;
function isOpenAIApiBaseUrl(baseUrl?: string): boolean {
const trimmed = baseUrl?.trim();
if (!trimmed) {
return false;
}
return /^https?:\/\/api\.openai\.com(?:\/v1)?\/?$/i.test(trimmed);
}
function normalizeOpenAITransport(model: ProviderRuntimeModel): ProviderRuntimeModel {
const useResponsesTransport =
model.api === "openai-completions" && (!model.baseUrl || isOpenAIApiBaseUrl(model.baseUrl));
if (!useResponsesTransport) {
return model;
}
return {
...model,
api: "openai-responses",
};
}
function cloneFirstTemplateModel(params: {
modelId: string;
templateIds: readonly string[];
ctx: ProviderResolveDynamicModelContext;
patch?: Partial<ProviderRuntimeModel>;
}): ProviderRuntimeModel | undefined {
const trimmedModelId = params.modelId.trim();
for (const templateId of [...new Set(params.templateIds)].filter(Boolean)) {
const template = params.ctx.modelRegistry.find(
PROVIDER_ID,
templateId,
) as ProviderRuntimeModel | null;
if (!template) {
continue;
}
return normalizeModelCompat({
...template,
id: trimmedModelId,
name: trimmedModelId,
...params.patch,
} as ProviderRuntimeModel);
}
return undefined;
}
function resolveOpenAIGpt54ForwardCompatModel(
ctx: ProviderResolveDynamicModelContext,
): ProviderRuntimeModel | undefined {
const trimmedModelId = ctx.modelId.trim();
const lower = trimmedModelId.toLowerCase();
let templateIds: readonly string[];
if (lower === OPENAI_GPT_54_MODEL_ID) {
templateIds = OPENAI_GPT_54_TEMPLATE_MODEL_IDS;
} else if (lower === OPENAI_GPT_54_PRO_MODEL_ID) {
templateIds = OPENAI_GPT_54_PRO_TEMPLATE_MODEL_IDS;
} else {
return undefined;
}
return (
cloneFirstTemplateModel({
modelId: trimmedModelId,
templateIds,
ctx,
patch: {
api: "openai-responses",
provider: PROVIDER_ID,
baseUrl: OPENAI_BASE_URL,
reasoning: true,
input: ["text", "image"],
contextWindow: OPENAI_GPT_54_CONTEXT_TOKENS,
maxTokens: OPENAI_GPT_54_MAX_TOKENS,
},
}) ??
normalizeModelCompat({
id: trimmedModelId,
name: trimmedModelId,
api: "openai-responses",
provider: PROVIDER_ID,
baseUrl: OPENAI_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: OPENAI_GPT_54_CONTEXT_TOKENS,
maxTokens: OPENAI_GPT_54_MAX_TOKENS,
} as ProviderRuntimeModel)
);
}
import { emptyPluginConfigSchema, type OpenClawPluginApi } from "openclaw/plugin-sdk/core";
import { buildOpenAICodexProviderPlugin } from "./openai-codex-provider.js";
import { buildOpenAIProvider } from "./openai-provider.js";
const openAIPlugin = {
id: PROVIDER_ID,
id: "openai",
name: "OpenAI Provider",
description: "Bundled OpenAI provider plugin",
description: "Bundled OpenAI provider plugins",
configSchema: emptyPluginConfigSchema(),
register(api: OpenClawPluginApi) {
api.registerProvider({
id: PROVIDER_ID,
label: "OpenAI",
docsPath: "/providers/models",
envVars: ["OPENAI_API_KEY"],
auth: [],
resolveDynamicModel: (ctx) => resolveOpenAIGpt54ForwardCompatModel(ctx),
normalizeResolvedModel: (ctx) => {
if (normalizeProviderId(ctx.provider) !== PROVIDER_ID) {
return undefined;
}
return normalizeOpenAITransport(ctx.model);
},
capabilities: {
providerFamily: "openai",
},
});
api.registerProvider(buildOpenAIProvider());
api.registerProvider(buildOpenAICodexProviderPlugin());
},
};

View File

@@ -0,0 +1,181 @@
import type {
ProviderResolveDynamicModelContext,
ProviderRuntimeModel,
} from "openclaw/plugin-sdk/core";
import { listProfilesForProvider } from "../../src/agents/auth-profiles/profiles.js";
import { ensureAuthProfileStore } from "../../src/agents/auth-profiles/store.js";
import { DEFAULT_CONTEXT_TOKENS } from "../../src/agents/defaults.js";
import { normalizeModelCompat } from "../../src/agents/model-compat.js";
import { normalizeProviderId } from "../../src/agents/model-selection.js";
import { buildOpenAICodexProvider } from "../../src/agents/models-config.providers.static.js";
import { fetchCodexUsage } from "../../src/infra/provider-usage.fetch.js";
import type { ProviderPlugin } from "../../src/plugins/types.js";
import { cloneFirstTemplateModel, findCatalogTemplate, isOpenAIApiBaseUrl } from "./shared.js";
const PROVIDER_ID = "openai-codex";
const OPENAI_CODEX_BASE_URL = "https://chatgpt.com/backend-api";
const OPENAI_CODEX_GPT_54_MODEL_ID = "gpt-5.4";
const OPENAI_CODEX_GPT_54_CONTEXT_TOKENS = 1_050_000;
const OPENAI_CODEX_GPT_54_MAX_TOKENS = 128_000;
const OPENAI_CODEX_GPT_54_TEMPLATE_MODEL_IDS = ["gpt-5.3-codex", "gpt-5.2-codex"] as const;
const OPENAI_CODEX_GPT_53_MODEL_ID = "gpt-5.3-codex";
const OPENAI_CODEX_GPT_53_SPARK_MODEL_ID = "gpt-5.3-codex-spark";
const OPENAI_CODEX_GPT_53_SPARK_CONTEXT_TOKENS = 128_000;
const OPENAI_CODEX_GPT_53_SPARK_MAX_TOKENS = 128_000;
const OPENAI_CODEX_TEMPLATE_MODEL_IDS = ["gpt-5.2-codex"] as const;
function isOpenAICodexBaseUrl(baseUrl?: string): boolean {
const trimmed = baseUrl?.trim();
if (!trimmed) {
return false;
}
return /^https?:\/\/chatgpt\.com\/backend-api\/?$/i.test(trimmed);
}
function normalizeCodexTransport(model: ProviderRuntimeModel): ProviderRuntimeModel {
const useCodexTransport =
!model.baseUrl || isOpenAIApiBaseUrl(model.baseUrl) || isOpenAICodexBaseUrl(model.baseUrl);
const api =
useCodexTransport && model.api === "openai-responses" ? "openai-codex-responses" : model.api;
const baseUrl =
api === "openai-codex-responses" && (!model.baseUrl || isOpenAIApiBaseUrl(model.baseUrl))
? OPENAI_CODEX_BASE_URL
: model.baseUrl;
if (api === model.api && baseUrl === model.baseUrl) {
return model;
}
return {
...model,
api,
baseUrl,
};
}
function resolveCodexForwardCompatModel(
ctx: ProviderResolveDynamicModelContext,
): ProviderRuntimeModel | undefined {
const trimmedModelId = ctx.modelId.trim();
const lower = trimmedModelId.toLowerCase();
let templateIds: readonly string[];
let patch: Partial<ProviderRuntimeModel> | undefined;
if (lower === OPENAI_CODEX_GPT_54_MODEL_ID) {
templateIds = OPENAI_CODEX_GPT_54_TEMPLATE_MODEL_IDS;
patch = {
contextWindow: OPENAI_CODEX_GPT_54_CONTEXT_TOKENS,
maxTokens: OPENAI_CODEX_GPT_54_MAX_TOKENS,
};
} else if (lower === OPENAI_CODEX_GPT_53_SPARK_MODEL_ID) {
templateIds = [OPENAI_CODEX_GPT_53_MODEL_ID, ...OPENAI_CODEX_TEMPLATE_MODEL_IDS];
patch = {
api: "openai-codex-responses",
provider: PROVIDER_ID,
baseUrl: OPENAI_CODEX_BASE_URL,
reasoning: true,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: OPENAI_CODEX_GPT_53_SPARK_CONTEXT_TOKENS,
maxTokens: OPENAI_CODEX_GPT_53_SPARK_MAX_TOKENS,
};
} else if (lower === OPENAI_CODEX_GPT_53_MODEL_ID) {
templateIds = OPENAI_CODEX_TEMPLATE_MODEL_IDS;
} else {
return undefined;
}
return (
cloneFirstTemplateModel({
providerId: PROVIDER_ID,
modelId: trimmedModelId,
templateIds,
ctx,
patch,
}) ??
normalizeModelCompat({
id: trimmedModelId,
name: trimmedModelId,
api: "openai-codex-responses",
provider: PROVIDER_ID,
baseUrl: OPENAI_CODEX_BASE_URL,
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: patch?.contextWindow ?? DEFAULT_CONTEXT_TOKENS,
maxTokens: patch?.maxTokens ?? DEFAULT_CONTEXT_TOKENS,
} as ProviderRuntimeModel)
);
}
export function buildOpenAICodexProviderPlugin(): ProviderPlugin {
return {
id: PROVIDER_ID,
label: "OpenAI Codex",
docsPath: "/providers/models",
auth: [],
catalog: {
order: "profile",
run: async (ctx) => {
const authStore = ensureAuthProfileStore(ctx.agentDir, {
allowKeychainPrompt: false,
});
if (listProfilesForProvider(authStore, PROVIDER_ID).length === 0) {
return null;
}
return {
provider: buildOpenAICodexProvider(),
};
},
},
resolveDynamicModel: (ctx) => resolveCodexForwardCompatModel(ctx),
capabilities: {
providerFamily: "openai",
},
prepareExtraParams: (ctx) => {
const transport = ctx.extraParams?.transport;
if (transport === "auto" || transport === "sse" || transport === "websocket") {
return ctx.extraParams;
}
return {
...ctx.extraParams,
transport: "auto",
};
},
normalizeResolvedModel: (ctx) => {
if (normalizeProviderId(ctx.provider) !== PROVIDER_ID) {
return undefined;
}
return normalizeCodexTransport(ctx.model);
},
resolveUsageAuth: async (ctx) => await ctx.resolveOAuthToken(),
fetchUsageSnapshot: async (ctx) =>
await fetchCodexUsage(ctx.token, ctx.accountId, ctx.timeoutMs, ctx.fetchFn),
augmentModelCatalog: (ctx) => {
const gpt54Template = findCatalogTemplate({
entries: ctx.entries,
providerId: PROVIDER_ID,
templateIds: OPENAI_CODEX_GPT_54_TEMPLATE_MODEL_IDS,
});
const sparkTemplate = findCatalogTemplate({
entries: ctx.entries,
providerId: PROVIDER_ID,
templateIds: [OPENAI_CODEX_GPT_53_MODEL_ID, ...OPENAI_CODEX_TEMPLATE_MODEL_IDS],
});
return [
gpt54Template
? {
...gpt54Template,
id: OPENAI_CODEX_GPT_54_MODEL_ID,
name: OPENAI_CODEX_GPT_54_MODEL_ID,
}
: undefined,
sparkTemplate
? {
...sparkTemplate,
id: OPENAI_CODEX_GPT_53_SPARK_MODEL_ID,
name: OPENAI_CODEX_GPT_53_SPARK_MODEL_ID,
}
: undefined,
].filter((entry): entry is NonNullable<typeof entry> => entry !== undefined);
},
};
}

View File

@@ -0,0 +1,103 @@
import { describe, expect, it } from "vitest";
import type { ProviderPlugin } from "../../src/plugins/types.js";
import {
createProviderUsageFetch,
makeResponse,
} from "../../src/test-utils/provider-usage-fetch.js";
import openAIPlugin from "./index.js";
function registerCodexProvider(): ProviderPlugin {
let provider: ProviderPlugin | undefined;
openAIPlugin.register({
registerProvider(nextProvider: ProviderPlugin) {
if (nextProvider.id === "openai-codex") {
provider = nextProvider;
}
},
} as never);
if (!provider) {
throw new Error("provider registration missing");
}
return provider;
}
describe("openai codex provider", () => {
it("owns forward-compat codex models", () => {
const provider = registerCodexProvider();
const model = provider.resolveDynamicModel?.({
provider: "openai-codex",
modelId: "gpt-5.4",
modelRegistry: {
find: (_provider: string, id: string) =>
id === "gpt-5.2-codex"
? {
id,
name: id,
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: "https://chatgpt.com/backend-api",
reasoning: true,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 200_000,
maxTokens: 8_192,
}
: null,
} as never,
});
expect(model).toMatchObject({
id: "gpt-5.4",
provider: "openai-codex",
api: "openai-codex-responses",
contextWindow: 1_050_000,
maxTokens: 128_000,
});
});
it("owns codex transport defaults", () => {
const provider = registerCodexProvider();
expect(
provider.prepareExtraParams?.({
provider: "openai-codex",
modelId: "gpt-5.4",
extraParams: { temperature: 0.2 },
}),
).toEqual({
temperature: 0.2,
transport: "auto",
});
});
it("owns usage snapshot fetching", async () => {
const provider = registerCodexProvider();
const mockFetch = createProviderUsageFetch(async (url) => {
if (url.includes("chatgpt.com/backend-api/wham/usage")) {
return makeResponse(200, {
rate_limit: {
primary_window: { used_percent: 12, limit_window_seconds: 10800, reset_at: 1_705_000 },
},
plan_type: "Plus",
});
}
return makeResponse(404, "not found");
});
await expect(
provider.fetchUsageSnapshot?.({
config: {} as never,
env: {} as NodeJS.ProcessEnv,
provider: "openai-codex",
token: "codex-token",
accountId: "acc-1",
timeoutMs: 5_000,
fetchFn: mockFetch as unknown as typeof fetch,
}),
).resolves.toEqual({
provider: "openai-codex",
displayName: "Codex",
windows: [{ label: "3h", usedPercent: 12, resetAt: 1_705_000_000 }],
plan: "Plus",
});
});
});

View File

@@ -0,0 +1,143 @@
import {
type ProviderResolveDynamicModelContext,
type ProviderRuntimeModel,
} from "openclaw/plugin-sdk/core";
import { normalizeModelCompat } from "../../src/agents/model-compat.js";
import { normalizeProviderId } from "../../src/agents/model-selection.js";
import type { ProviderPlugin } from "../../src/plugins/types.js";
import { cloneFirstTemplateModel, findCatalogTemplate, isOpenAIApiBaseUrl } from "./shared.js";
const PROVIDER_ID = "openai";
const OPENAI_GPT_54_MODEL_ID = "gpt-5.4";
const OPENAI_GPT_54_PRO_MODEL_ID = "gpt-5.4-pro";
const OPENAI_GPT_54_CONTEXT_TOKENS = 1_050_000;
const OPENAI_GPT_54_MAX_TOKENS = 128_000;
const OPENAI_GPT_54_TEMPLATE_MODEL_IDS = ["gpt-5.2"] as const;
const OPENAI_GPT_54_PRO_TEMPLATE_MODEL_IDS = ["gpt-5.2-pro", "gpt-5.2"] as const;
const OPENAI_DIRECT_SPARK_MODEL_ID = "gpt-5.3-codex-spark";
const SUPPRESSED_SPARK_PROVIDERS = new Set(["openai", "azure-openai-responses"]);
function normalizeOpenAITransport(model: ProviderRuntimeModel): ProviderRuntimeModel {
const useResponsesTransport =
model.api === "openai-completions" && (!model.baseUrl || isOpenAIApiBaseUrl(model.baseUrl));
if (!useResponsesTransport) {
return model;
}
return {
...model,
api: "openai-responses",
};
}
function resolveOpenAIGpt54ForwardCompatModel(
ctx: ProviderResolveDynamicModelContext,
): ProviderRuntimeModel | undefined {
const trimmedModelId = ctx.modelId.trim();
const lower = trimmedModelId.toLowerCase();
let templateIds: readonly string[];
if (lower === OPENAI_GPT_54_MODEL_ID) {
templateIds = OPENAI_GPT_54_TEMPLATE_MODEL_IDS;
} else if (lower === OPENAI_GPT_54_PRO_MODEL_ID) {
templateIds = OPENAI_GPT_54_PRO_TEMPLATE_MODEL_IDS;
} else {
return undefined;
}
return (
cloneFirstTemplateModel({
providerId: PROVIDER_ID,
modelId: trimmedModelId,
templateIds,
ctx,
patch: {
api: "openai-responses",
provider: PROVIDER_ID,
baseUrl: "https://api.openai.com/v1",
reasoning: true,
input: ["text", "image"],
contextWindow: OPENAI_GPT_54_CONTEXT_TOKENS,
maxTokens: OPENAI_GPT_54_MAX_TOKENS,
},
}) ??
normalizeModelCompat({
id: trimmedModelId,
name: trimmedModelId,
api: "openai-responses",
provider: PROVIDER_ID,
baseUrl: "https://api.openai.com/v1",
reasoning: true,
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: OPENAI_GPT_54_CONTEXT_TOKENS,
maxTokens: OPENAI_GPT_54_MAX_TOKENS,
} as ProviderRuntimeModel)
);
}
export function buildOpenAIProvider(): ProviderPlugin {
return {
id: PROVIDER_ID,
label: "OpenAI",
docsPath: "/providers/models",
envVars: ["OPENAI_API_KEY"],
auth: [],
resolveDynamicModel: (ctx) => resolveOpenAIGpt54ForwardCompatModel(ctx),
normalizeResolvedModel: (ctx) => {
if (normalizeProviderId(ctx.provider) !== PROVIDER_ID) {
return undefined;
}
return normalizeOpenAITransport(ctx.model);
},
capabilities: {
providerFamily: "openai",
},
buildMissingAuthMessage: (ctx) => {
if (ctx.provider !== PROVIDER_ID || ctx.listProfileIds("openai-codex").length === 0) {
return undefined;
}
return 'No API key found for provider "openai". You are authenticated with OpenAI Codex OAuth. Use openai-codex/gpt-5.4 (OAuth) or set OPENAI_API_KEY to use openai/gpt-5.4.';
},
suppressBuiltInModel: (ctx) => {
if (
!SUPPRESSED_SPARK_PROVIDERS.has(normalizeProviderId(ctx.provider)) ||
ctx.modelId.toLowerCase() !== OPENAI_DIRECT_SPARK_MODEL_ID
) {
return undefined;
}
return {
suppress: true,
errorMessage: `Unknown model: ${ctx.provider}/${OPENAI_DIRECT_SPARK_MODEL_ID}. ${OPENAI_DIRECT_SPARK_MODEL_ID} is only supported via openai-codex OAuth. Use openai-codex/${OPENAI_DIRECT_SPARK_MODEL_ID}.`,
};
},
augmentModelCatalog: (ctx) => {
const openAiGpt54Template = findCatalogTemplate({
entries: ctx.entries,
providerId: PROVIDER_ID,
templateIds: OPENAI_GPT_54_TEMPLATE_MODEL_IDS,
});
const openAiGpt54ProTemplate = findCatalogTemplate({
entries: ctx.entries,
providerId: PROVIDER_ID,
templateIds: OPENAI_GPT_54_PRO_TEMPLATE_MODEL_IDS,
});
return [
openAiGpt54Template
? {
...openAiGpt54Template,
id: OPENAI_GPT_54_MODEL_ID,
name: OPENAI_GPT_54_MODEL_ID,
}
: undefined,
openAiGpt54ProTemplate
? {
...openAiGpt54ProTemplate,
id: OPENAI_GPT_54_PRO_MODEL_ID,
name: OPENAI_GPT_54_PRO_MODEL_ID,
}
: undefined,
].filter((entry): entry is NonNullable<typeof entry> => entry !== undefined);
},
};
}

View File

@@ -1,6 +1,6 @@
{
"id": "openai",
"providers": ["openai"],
"providers": ["openai", "openai-codex"],
"configSchema": {
"type": "object",
"additionalProperties": false,

View File

@@ -2,7 +2,7 @@
"name": "@openclaw/openai-provider",
"version": "2026.3.14",
"private": true,
"description": "OpenClaw OpenAI provider plugin",
"description": "OpenClaw OpenAI provider plugins",
"type": "module",
"openclaw": {
"extensions": [

View File

@@ -0,0 +1,57 @@
import { normalizeModelCompat } from "../../src/agents/model-compat.js";
import type {
ProviderResolveDynamicModelContext,
ProviderRuntimeModel,
} from "../../src/plugins/types.js";
export const OPENAI_API_BASE_URL = "https://api.openai.com/v1";
export function isOpenAIApiBaseUrl(baseUrl?: string): boolean {
const trimmed = baseUrl?.trim();
if (!trimmed) {
return false;
}
return /^https?:\/\/api\.openai\.com(?:\/v1)?\/?$/i.test(trimmed);
}
export function cloneFirstTemplateModel(params: {
providerId: string;
modelId: string;
templateIds: readonly string[];
ctx: ProviderResolveDynamicModelContext;
patch?: Partial<ProviderRuntimeModel>;
}): ProviderRuntimeModel | undefined {
const trimmedModelId = params.modelId.trim();
for (const templateId of [...new Set(params.templateIds)].filter(Boolean)) {
const template = params.ctx.modelRegistry.find(
params.providerId,
templateId,
) as ProviderRuntimeModel | null;
if (!template) {
continue;
}
return normalizeModelCompat({
...template,
id: trimmedModelId,
name: trimmedModelId,
...params.patch,
} as ProviderRuntimeModel);
}
return undefined;
}
export function findCatalogTemplate(params: {
entries: ReadonlyArray<{ provider: string; id: string }>;
providerId: string;
templateIds: readonly string[];
}) {
return params.templateIds
.map((templateId) =>
params.entries.find(
(entry) =>
entry.provider.toLowerCase() === params.providerId.toLowerCase() &&
entry.id.toLowerCase() === templateId.toLowerCase(),
),
)
.find((entry) => entry !== undefined);
}