import { type ProviderResolveDynamicModelContext, type ProviderRuntimeModel, } from "openclaw/plugin-sdk/plugin-entry"; import { createProviderApiKeyAuthMethod } from "openclaw/plugin-sdk/provider-auth-api-key"; import { DEFAULT_CONTEXT_TOKENS, normalizeModelCompat, normalizeProviderId, type ProviderPlugin, } from "openclaw/plugin-sdk/provider-model-shared"; import { buildProviderStreamFamilyHooks } from "openclaw/plugin-sdk/provider-stream-family"; import { applyOpenAIConfig, OPENAI_DEFAULT_MODEL } from "./default-models.js"; import { buildOpenAIReplayPolicy } from "./replay-policy.js"; import { buildOpenAISyntheticCatalogEntry, cloneFirstTemplateModel, findCatalogTemplate, isOpenAIApiBaseUrl, matchesExactOrPrefix, } from "./shared.js"; import { resolveOpenAITransportTurnState, resolveOpenAIWebSocketSessionPolicy, } from "./transport-policy.js"; const PROVIDER_ID = "openai"; const OPENAI_GPT_54_MODEL_ID = "gpt-5.4"; const OPENAI_GPT_54_PRO_MODEL_ID = "gpt-5.4-pro"; const OPENAI_GPT_54_MINI_MODEL_ID = "gpt-5.4-mini"; const OPENAI_GPT_54_NANO_MODEL_ID = "gpt-5.4-nano"; const OPENAI_GPT_54_CONTEXT_TOKENS = 1_050_000; const OPENAI_GPT_54_PRO_CONTEXT_TOKENS = 1_050_000; const OPENAI_GPT_54_MINI_CONTEXT_TOKENS = 400_000; const OPENAI_GPT_54_NANO_CONTEXT_TOKENS = 400_000; const OPENAI_GPT_54_MAX_TOKENS = 128_000; const OPENAI_GPT_54_COST = { input: 2.5, output: 15, cacheRead: 0.25, cacheWrite: 0 } as const; const OPENAI_GPT_54_PRO_COST = { input: 30, output: 180, cacheRead: 0, cacheWrite: 0 } as const; const OPENAI_GPT_54_MINI_COST = { input: 0.75, output: 4.5, cacheRead: 0.075, cacheWrite: 0, } as const; const OPENAI_GPT_54_NANO_COST = { input: 0.2, output: 1.25, cacheRead: 0.02, cacheWrite: 0, } as const; const OPENAI_GPT_54_TEMPLATE_MODEL_IDS = ["gpt-5.2"] as const; const OPENAI_GPT_54_PRO_TEMPLATE_MODEL_IDS = ["gpt-5.2-pro", "gpt-5.2"] as const; const OPENAI_GPT_54_MINI_TEMPLATE_MODEL_IDS = ["gpt-5-mini"] as const; const OPENAI_GPT_54_NANO_TEMPLATE_MODEL_IDS = ["gpt-5-nano", "gpt-5-mini"] as const; const OPENAI_XHIGH_MODEL_IDS = [ "gpt-5.4", "gpt-5.4-pro", "gpt-5.4-mini", "gpt-5.4-nano", "gpt-5.2", ] as const; const OPENAI_MODERN_MODEL_IDS = [ "gpt-5.4", "gpt-5.4-pro", "gpt-5.4-mini", "gpt-5.4-nano", "gpt-5.2", ] as const; const OPENAI_DIRECT_SPARK_MODEL_ID = "gpt-5.3-codex-spark"; const SUPPRESSED_SPARK_PROVIDERS = new Set(["openai", "azure-openai-responses"]); const OPENAI_RESPONSES_STREAM_HOOKS = buildProviderStreamFamilyHooks("openai-responses-defaults"); function shouldUseOpenAIResponsesTransport(params: { provider: string; api?: string | null; baseUrl?: string; }): boolean { if (params.api !== "openai-completions") { return false; } const isOwnerProvider = normalizeProviderId(params.provider) === PROVIDER_ID; if (isOwnerProvider) { return !params.baseUrl || isOpenAIApiBaseUrl(params.baseUrl); } return typeof params.baseUrl === "string" && isOpenAIApiBaseUrl(params.baseUrl); } function normalizeOpenAITransport(model: ProviderRuntimeModel): ProviderRuntimeModel { const useResponsesTransport = shouldUseOpenAIResponsesTransport({ provider: model.provider, api: model.api, baseUrl: model.baseUrl, }); if (!useResponsesTransport) { return model; } return { ...model, api: "openai-responses", }; } function resolveOpenAIGpt54ForwardCompatModel( ctx: ProviderResolveDynamicModelContext, ): ProviderRuntimeModel | undefined { const trimmedModelId = ctx.modelId.trim(); const lower = trimmedModelId.toLowerCase(); let templateIds: readonly string[]; let patch: Partial; if (lower === OPENAI_GPT_54_MODEL_ID) { templateIds = OPENAI_GPT_54_TEMPLATE_MODEL_IDS; patch = { api: "openai-responses", provider: PROVIDER_ID, baseUrl: "https://api.openai.com/v1", reasoning: true, input: ["text", "image"], cost: OPENAI_GPT_54_COST, contextWindow: OPENAI_GPT_54_CONTEXT_TOKENS, maxTokens: OPENAI_GPT_54_MAX_TOKENS, }; } else if (lower === OPENAI_GPT_54_PRO_MODEL_ID) { templateIds = OPENAI_GPT_54_PRO_TEMPLATE_MODEL_IDS; patch = { api: "openai-responses", provider: PROVIDER_ID, baseUrl: "https://api.openai.com/v1", reasoning: true, input: ["text", "image"], cost: OPENAI_GPT_54_PRO_COST, contextWindow: OPENAI_GPT_54_PRO_CONTEXT_TOKENS, maxTokens: OPENAI_GPT_54_MAX_TOKENS, }; } else if (lower === OPENAI_GPT_54_MINI_MODEL_ID) { templateIds = OPENAI_GPT_54_MINI_TEMPLATE_MODEL_IDS; patch = { api: "openai-responses", provider: PROVIDER_ID, baseUrl: "https://api.openai.com/v1", reasoning: true, input: ["text", "image"], cost: OPENAI_GPT_54_MINI_COST, contextWindow: OPENAI_GPT_54_MINI_CONTEXT_TOKENS, maxTokens: OPENAI_GPT_54_MAX_TOKENS, }; } else if (lower === OPENAI_GPT_54_NANO_MODEL_ID) { templateIds = OPENAI_GPT_54_NANO_TEMPLATE_MODEL_IDS; patch = { api: "openai-responses", provider: PROVIDER_ID, baseUrl: "https://api.openai.com/v1", reasoning: true, input: ["text", "image"], cost: OPENAI_GPT_54_NANO_COST, contextWindow: OPENAI_GPT_54_NANO_CONTEXT_TOKENS, maxTokens: OPENAI_GPT_54_MAX_TOKENS, }; } else { return undefined; } return ( cloneFirstTemplateModel({ providerId: PROVIDER_ID, modelId: trimmedModelId, templateIds, ctx, patch, }) ?? normalizeModelCompat({ id: trimmedModelId, name: trimmedModelId, ...patch, cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, contextWindow: patch.contextWindow ?? DEFAULT_CONTEXT_TOKENS, maxTokens: patch.maxTokens ?? DEFAULT_CONTEXT_TOKENS, } as ProviderRuntimeModel) ); } export function buildOpenAIProvider(): ProviderPlugin { return { id: PROVIDER_ID, label: "OpenAI", hookAliases: ["azure-openai", "azure-openai-responses"], docsPath: "/providers/models", envVars: ["OPENAI_API_KEY"], auth: [ createProviderApiKeyAuthMethod({ providerId: PROVIDER_ID, methodId: "api-key", label: "OpenAI API key", hint: "Direct OpenAI API key", optionKey: "openaiApiKey", flagName: "--openai-api-key", envVar: "OPENAI_API_KEY", promptMessage: "Enter OpenAI API key", defaultModel: OPENAI_DEFAULT_MODEL, expectedProviders: ["openai"], applyConfig: (cfg) => applyOpenAIConfig(cfg), wizard: { choiceId: "openai-api-key", choiceLabel: "OpenAI API key", groupId: "openai", groupLabel: "OpenAI", groupHint: "Codex OAuth + API key", }, }), ], resolveDynamicModel: (ctx) => resolveOpenAIGpt54ForwardCompatModel(ctx), normalizeResolvedModel: (ctx) => { if (normalizeProviderId(ctx.provider) !== PROVIDER_ID) { return undefined; } return normalizeOpenAITransport(ctx.model); }, normalizeTransport: ({ provider, api, baseUrl }) => shouldUseOpenAIResponsesTransport({ provider, api, baseUrl }) ? { api: "openai-responses", baseUrl } : undefined, buildReplayPolicy: buildOpenAIReplayPolicy, prepareExtraParams: (ctx) => { const transport = ctx.extraParams?.transport; const hasSupportedTransport = transport === "auto" || transport === "sse" || transport === "websocket"; const hasExplicitWarmup = typeof ctx.extraParams?.openaiWsWarmup === "boolean"; if (hasSupportedTransport && hasExplicitWarmup) { return ctx.extraParams; } return { ...ctx.extraParams, ...(hasSupportedTransport ? {} : { transport: "auto" }), ...(hasExplicitWarmup ? {} : { openaiWsWarmup: true }), }; }, ...OPENAI_RESPONSES_STREAM_HOOKS, matchesContextOverflowError: ({ errorMessage }) => /content_filter.*(?:prompt|input).*(?:too long|exceed)/i.test(errorMessage), resolveTransportTurnState: (ctx) => resolveOpenAITransportTurnState(ctx), resolveWebSocketSessionPolicy: (ctx) => resolveOpenAIWebSocketSessionPolicy(ctx), resolveReasoningOutputMode: () => "native", supportsXHighThinking: ({ modelId }) => matchesExactOrPrefix(modelId, OPENAI_XHIGH_MODEL_IDS), isModernModelRef: ({ modelId }) => matchesExactOrPrefix(modelId, OPENAI_MODERN_MODEL_IDS), buildMissingAuthMessage: (ctx) => { if (ctx.provider !== PROVIDER_ID || ctx.listProfileIds("openai-codex").length === 0) { return undefined; } return 'No API key found for provider "openai". You are authenticated with OpenAI Codex OAuth. Use openai-codex/gpt-5.4 (OAuth) or set OPENAI_API_KEY to use openai/gpt-5.4.'; }, suppressBuiltInModel: (ctx) => { if ( !SUPPRESSED_SPARK_PROVIDERS.has(normalizeProviderId(ctx.provider)) || ctx.modelId.toLowerCase() !== OPENAI_DIRECT_SPARK_MODEL_ID ) { return undefined; } return { suppress: true, errorMessage: `Unknown model: ${ctx.provider}/${OPENAI_DIRECT_SPARK_MODEL_ID}. ${OPENAI_DIRECT_SPARK_MODEL_ID} is only supported via openai-codex OAuth. Use openai-codex/${OPENAI_DIRECT_SPARK_MODEL_ID}.`, }; }, augmentModelCatalog: (ctx) => { const openAiGpt54Template = findCatalogTemplate({ entries: ctx.entries, providerId: PROVIDER_ID, templateIds: OPENAI_GPT_54_TEMPLATE_MODEL_IDS, }); const openAiGpt54ProTemplate = findCatalogTemplate({ entries: ctx.entries, providerId: PROVIDER_ID, templateIds: OPENAI_GPT_54_PRO_TEMPLATE_MODEL_IDS, }); const openAiGpt54MiniTemplate = findCatalogTemplate({ entries: ctx.entries, providerId: PROVIDER_ID, templateIds: OPENAI_GPT_54_MINI_TEMPLATE_MODEL_IDS, }); const openAiGpt54NanoTemplate = findCatalogTemplate({ entries: ctx.entries, providerId: PROVIDER_ID, templateIds: OPENAI_GPT_54_NANO_TEMPLATE_MODEL_IDS, }); return [ buildOpenAISyntheticCatalogEntry(openAiGpt54Template, { id: OPENAI_GPT_54_MODEL_ID, reasoning: true, input: ["text", "image"], contextWindow: OPENAI_GPT_54_CONTEXT_TOKENS, }), buildOpenAISyntheticCatalogEntry(openAiGpt54ProTemplate, { id: OPENAI_GPT_54_PRO_MODEL_ID, reasoning: true, input: ["text", "image"], contextWindow: OPENAI_GPT_54_PRO_CONTEXT_TOKENS, }), buildOpenAISyntheticCatalogEntry(openAiGpt54MiniTemplate, { id: OPENAI_GPT_54_MINI_MODEL_ID, reasoning: true, input: ["text", "image"], contextWindow: OPENAI_GPT_54_MINI_CONTEXT_TOKENS, }), buildOpenAISyntheticCatalogEntry(openAiGpt54NanoTemplate, { id: OPENAI_GPT_54_NANO_MODEL_ID, reasoning: true, input: ["text", "image"], contextWindow: OPENAI_GPT_54_NANO_CONTEXT_TOKENS, }), ].filter((entry): entry is NonNullable => entry !== undefined); }, }; }