From 1707493be417b0cf9a457e091aba9bf9a4fa8a51 Mon Sep 17 00:00:00 2001 From: Vincent Koc Date: Thu, 2 Apr 2026 14:28:25 +0900 Subject: [PATCH] refactor(providers): add internal request config seam (#59454) --- src/agents/pi-embedded-runner/model.ts | 72 +++++++++++------- src/agents/provider-request-config.test.ts | 65 ++++++++++++++++ src/agents/provider-request-config.ts | 88 ++++++++++++++++++++++ 3 files changed, 197 insertions(+), 28 deletions(-) create mode 100644 src/agents/provider-request-config.test.ts create mode 100644 src/agents/provider-request-config.ts diff --git a/src/agents/pi-embedded-runner/model.ts b/src/agents/pi-embedded-runner/model.ts index a6c3ada20db..d0a10e37afa 100644 --- a/src/agents/pi-embedded-runner/model.ts +++ b/src/agents/pi-embedded-runner/model.ts @@ -22,6 +22,7 @@ import { shouldSuppressBuiltInModel, } from "../model-suppression.js"; import { discoverAuthStorage, discoverModels } from "../pi-model-discovery.js"; +import { resolveProviderRequestConfig } from "../provider-request-config.js"; import { normalizeResolvedProviderModel } from "./model.provider-normalization.js"; type InlineModelEntry = Omit & { @@ -328,26 +329,30 @@ function applyConfiguredProviderOverrides(params: { cfg: params.cfg, runtimeHooks: params.runtimeHooks, }); - return { - ...discoveredModel, + const requestConfig = resolveProviderRequestConfig({ + provider: params.provider, api: resolvedTransport.api ?? normalizeResolvedTransportApi(discoveredModel.api) ?? "openai-responses", baseUrl: resolvedTransport.baseUrl ?? discoveredModel.baseUrl, + discoveredHeaders, + providerHeaders, + modelHeaders: configuredHeaders, + authHeader: providerConfig.authHeader, + capability: "llm", + transport: "stream", + }); + return { + ...discoveredModel, + api: requestConfig.api ?? "openai-responses", + baseUrl: requestConfig.baseUrl ?? discoveredModel.baseUrl, reasoning: configuredModel?.reasoning ?? discoveredModel.reasoning, input: normalizedInput, cost: configuredModel?.cost ?? discoveredModel.cost, contextWindow: configuredModel?.contextWindow ?? discoveredModel.contextWindow, maxTokens: configuredModel?.maxTokens ?? discoveredModel.maxTokens, - headers: - discoveredHeaders || providerHeaders || configuredHeaders - ? { - ...discoveredHeaders, - ...providerHeaders, - ...configuredHeaders, - } - : undefined, + headers: requestConfig.headers, compat: configuredModel?.compat ?? discoveredModel.compat, }; } @@ -369,23 +374,25 @@ export function buildInlineProviderModels( api: model.api ?? entry?.api, baseUrl: entry?.baseUrl, }); + const modelHeaders = sanitizeModelHeaders((model as InlineModelEntry).headers, { + stripSecretRefMarkers: true, + }); + const requestConfig = resolveProviderRequestConfig({ + provider: trimmed, + api: transport.api ?? model.api, + baseUrl: transport.baseUrl, + providerHeaders, + modelHeaders, + authHeader: entry?.authHeader, + capability: "llm", + transport: "stream", + }); return { ...model, provider: trimmed, - baseUrl: transport.baseUrl, - api: transport.api ?? model.api, - headers: (() => { - const modelHeaders = sanitizeModelHeaders((model as InlineModelEntry).headers, { - stripSecretRefMarkers: true, - }); - if (!providerHeaders && !modelHeaders) { - return undefined; - } - return { - ...providerHeaders, - ...modelHeaders, - }; - })(), + baseUrl: requestConfig.baseUrl, + api: requestConfig.api ?? model.api, + headers: requestConfig.headers, }; }); }); @@ -534,6 +541,16 @@ function resolveConfiguredFallbackModel(params: { cfg, runtimeHooks, }); + const requestConfig = resolveProviderRequestConfig({ + provider, + api: fallbackTransport.api ?? "openai-responses", + baseUrl: fallbackTransport.baseUrl, + providerHeaders, + modelHeaders, + authHeader: providerConfig?.authHeader, + capability: "llm", + transport: "stream", + }); return normalizeResolvedModel({ provider, cfg, @@ -541,9 +558,9 @@ function resolveConfiguredFallbackModel(params: { model: { id: modelId, name: modelId, - api: fallbackTransport.api ?? "openai-responses", + api: requestConfig.api ?? "openai-responses", provider, - baseUrl: fallbackTransport.baseUrl, + baseUrl: requestConfig.baseUrl, reasoning: configuredModel?.reasoning ?? false, input: ["text"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, @@ -555,8 +572,7 @@ function resolveConfiguredFallbackModel(params: { configuredModel?.maxTokens ?? providerConfig?.models?.[0]?.maxTokens ?? DEFAULT_CONTEXT_TOKENS, - headers: - providerHeaders || modelHeaders ? { ...providerHeaders, ...modelHeaders } : undefined, + headers: requestConfig.headers, } as Model, runtimeHooks, }); diff --git a/src/agents/provider-request-config.test.ts b/src/agents/provider-request-config.test.ts new file mode 100644 index 00000000000..358d9fca1b8 --- /dev/null +++ b/src/agents/provider-request-config.test.ts @@ -0,0 +1,65 @@ +import { describe, expect, it } from "vitest"; +import { resolveProviderRequestConfig } from "./provider-request-config.js"; + +describe("provider request config", () => { + it("merges discovered, provider, and model headers in precedence order", () => { + const resolved = resolveProviderRequestConfig({ + provider: "custom-openai", + api: "openai-responses", + baseUrl: "https://proxy.example.com/v1", + discoveredHeaders: { + "X-Discovered": "1", + "X-Shared": "discovered", + }, + providerHeaders: { + "X-Provider": "2", + "X-Shared": "provider", + }, + modelHeaders: { + "X-Model": "3", + "X-Shared": "model", + }, + capability: "llm", + transport: "stream", + }); + + expect(resolved.headers).toEqual({ + "X-Discovered": "1", + "X-Provider": "2", + "X-Model": "3", + "X-Shared": "model", + }); + }); + + it("surfaces authHeader intent without mutating headers yet", () => { + const resolved = resolveProviderRequestConfig({ + provider: "google", + api: "google-generative-ai", + baseUrl: "https://generativelanguage.googleapis.com/v1beta", + authHeader: true, + capability: "llm", + transport: "stream", + }); + + expect(resolved.auth).toEqual({ + mode: "authorization-bearer", + injectAuthorizationHeader: true, + }); + expect(resolved.headers).toBeUndefined(); + }); + + it("keeps future proxy and tls slots stable for current callers", () => { + const resolved = resolveProviderRequestConfig({ + provider: "openrouter", + api: "openai-responses", + baseUrl: "https://openrouter.ai/api/v1", + capability: "llm", + transport: "stream", + }); + + expect(resolved.proxy).toEqual({ configured: false }); + expect(resolved.tls).toEqual({ configured: false }); + expect(resolved.policy.endpointClass).toBe("openrouter"); + expect(resolved.policy.attributionProvider).toBe("openrouter"); + }); +}); diff --git a/src/agents/provider-request-config.ts b/src/agents/provider-request-config.ts new file mode 100644 index 00000000000..22a22077105 --- /dev/null +++ b/src/agents/provider-request-config.ts @@ -0,0 +1,88 @@ +import type { Api } from "@mariozechner/pi-ai"; +import type { ModelDefinitionConfig } from "../config/types.js"; +import type { + ProviderRequestCapability, + ProviderRequestPolicyResolution, + ProviderRequestTransport, +} from "./provider-attribution.js"; +import { resolveProviderRequestPolicy } from "./provider-attribution.js"; + +type RequestApi = Api | ModelDefinitionConfig["api"]; + +export type ResolvedProviderRequestAuthConfig = { + mode: "provider-default" | "authorization-bearer"; + injectAuthorizationHeader: boolean; +}; + +export type ResolvedProviderRequestProxyConfig = { + configured: false; +}; + +export type ResolvedProviderRequestTlsConfig = { + configured: false; +}; + +export type ResolvedProviderRequestConfig = { + api?: RequestApi; + baseUrl?: string; + headers?: Record; + auth: ResolvedProviderRequestAuthConfig; + proxy: ResolvedProviderRequestProxyConfig; + tls: ResolvedProviderRequestTlsConfig; + policy: ProviderRequestPolicyResolution; +}; + +export function mergeProviderRequestHeaders( + ...headerSets: Array | undefined> +): Record | undefined { + let merged: Record | undefined; + for (const headers of headerSets) { + if (!headers) { + continue; + } + merged = { + ...merged, + ...headers, + }; + } + return merged && Object.keys(merged).length > 0 ? merged : undefined; +} + +export function resolveProviderRequestConfig(params: { + provider: string; + api?: RequestApi; + baseUrl?: string; + capability?: ProviderRequestCapability; + transport?: ProviderRequestTransport; + discoveredHeaders?: Record; + providerHeaders?: Record; + modelHeaders?: Record; + authHeader?: boolean; +}): ResolvedProviderRequestConfig { + const policy = resolveProviderRequestPolicy({ + provider: params.provider, + api: params.api, + baseUrl: params.baseUrl, + capability: params.capability ?? "llm", + transport: params.transport ?? "http", + }); + + return { + api: params.api, + baseUrl: params.baseUrl, + headers: mergeProviderRequestHeaders( + params.discoveredHeaders, + params.providerHeaders, + params.modelHeaders, + ), + auth: { + mode: params.authHeader ? "authorization-bearer" : "provider-default", + injectAuthorizationHeader: params.authHeader === true, + }, + // These slots are intentionally internal-first. Future provider request + // policy work can populate them without reshaping existing callers again. + proxy: { configured: false }, + tls: { configured: false }, + policy, + }; +}