refactor(providers): add internal request config seam (#59454)

This commit is contained in:
Vincent Koc
2026-04-02 14:28:25 +09:00
committed by GitHub
parent f69570f820
commit 1707493be4
3 changed files with 197 additions and 28 deletions

View File

@@ -22,6 +22,7 @@ import {
shouldSuppressBuiltInModel,
} from "../model-suppression.js";
import { discoverAuthStorage, discoverModels } from "../pi-model-discovery.js";
import { resolveProviderRequestConfig } from "../provider-request-config.js";
import { normalizeResolvedProviderModel } from "./model.provider-normalization.js";
type InlineModelEntry = Omit<ModelDefinitionConfig, "api"> & {
@@ -328,26 +329,30 @@ function applyConfiguredProviderOverrides(params: {
cfg: params.cfg,
runtimeHooks: params.runtimeHooks,
});
return {
...discoveredModel,
const requestConfig = resolveProviderRequestConfig({
provider: params.provider,
api:
resolvedTransport.api ??
normalizeResolvedTransportApi(discoveredModel.api) ??
"openai-responses",
baseUrl: resolvedTransport.baseUrl ?? discoveredModel.baseUrl,
discoveredHeaders,
providerHeaders,
modelHeaders: configuredHeaders,
authHeader: providerConfig.authHeader,
capability: "llm",
transport: "stream",
});
return {
...discoveredModel,
api: requestConfig.api ?? "openai-responses",
baseUrl: requestConfig.baseUrl ?? discoveredModel.baseUrl,
reasoning: configuredModel?.reasoning ?? discoveredModel.reasoning,
input: normalizedInput,
cost: configuredModel?.cost ?? discoveredModel.cost,
contextWindow: configuredModel?.contextWindow ?? discoveredModel.contextWindow,
maxTokens: configuredModel?.maxTokens ?? discoveredModel.maxTokens,
headers:
discoveredHeaders || providerHeaders || configuredHeaders
? {
...discoveredHeaders,
...providerHeaders,
...configuredHeaders,
}
: undefined,
headers: requestConfig.headers,
compat: configuredModel?.compat ?? discoveredModel.compat,
};
}
@@ -369,23 +374,25 @@ export function buildInlineProviderModels(
api: model.api ?? entry?.api,
baseUrl: entry?.baseUrl,
});
const modelHeaders = sanitizeModelHeaders((model as InlineModelEntry).headers, {
stripSecretRefMarkers: true,
});
const requestConfig = resolveProviderRequestConfig({
provider: trimmed,
api: transport.api ?? model.api,
baseUrl: transport.baseUrl,
providerHeaders,
modelHeaders,
authHeader: entry?.authHeader,
capability: "llm",
transport: "stream",
});
return {
...model,
provider: trimmed,
baseUrl: transport.baseUrl,
api: transport.api ?? model.api,
headers: (() => {
const modelHeaders = sanitizeModelHeaders((model as InlineModelEntry).headers, {
stripSecretRefMarkers: true,
});
if (!providerHeaders && !modelHeaders) {
return undefined;
}
return {
...providerHeaders,
...modelHeaders,
};
})(),
baseUrl: requestConfig.baseUrl,
api: requestConfig.api ?? model.api,
headers: requestConfig.headers,
};
});
});
@@ -534,6 +541,16 @@ function resolveConfiguredFallbackModel(params: {
cfg,
runtimeHooks,
});
const requestConfig = resolveProviderRequestConfig({
provider,
api: fallbackTransport.api ?? "openai-responses",
baseUrl: fallbackTransport.baseUrl,
providerHeaders,
modelHeaders,
authHeader: providerConfig?.authHeader,
capability: "llm",
transport: "stream",
});
return normalizeResolvedModel({
provider,
cfg,
@@ -541,9 +558,9 @@ function resolveConfiguredFallbackModel(params: {
model: {
id: modelId,
name: modelId,
api: fallbackTransport.api ?? "openai-responses",
api: requestConfig.api ?? "openai-responses",
provider,
baseUrl: fallbackTransport.baseUrl,
baseUrl: requestConfig.baseUrl,
reasoning: configuredModel?.reasoning ?? false,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
@@ -555,8 +572,7 @@ function resolveConfiguredFallbackModel(params: {
configuredModel?.maxTokens ??
providerConfig?.models?.[0]?.maxTokens ??
DEFAULT_CONTEXT_TOKENS,
headers:
providerHeaders || modelHeaders ? { ...providerHeaders, ...modelHeaders } : undefined,
headers: requestConfig.headers,
} as Model<Api>,
runtimeHooks,
});

View File

@@ -0,0 +1,65 @@
import { describe, expect, it } from "vitest";
import { resolveProviderRequestConfig } from "./provider-request-config.js";
describe("provider request config", () => {
it("merges discovered, provider, and model headers in precedence order", () => {
const resolved = resolveProviderRequestConfig({
provider: "custom-openai",
api: "openai-responses",
baseUrl: "https://proxy.example.com/v1",
discoveredHeaders: {
"X-Discovered": "1",
"X-Shared": "discovered",
},
providerHeaders: {
"X-Provider": "2",
"X-Shared": "provider",
},
modelHeaders: {
"X-Model": "3",
"X-Shared": "model",
},
capability: "llm",
transport: "stream",
});
expect(resolved.headers).toEqual({
"X-Discovered": "1",
"X-Provider": "2",
"X-Model": "3",
"X-Shared": "model",
});
});
it("surfaces authHeader intent without mutating headers yet", () => {
const resolved = resolveProviderRequestConfig({
provider: "google",
api: "google-generative-ai",
baseUrl: "https://generativelanguage.googleapis.com/v1beta",
authHeader: true,
capability: "llm",
transport: "stream",
});
expect(resolved.auth).toEqual({
mode: "authorization-bearer",
injectAuthorizationHeader: true,
});
expect(resolved.headers).toBeUndefined();
});
it("keeps future proxy and tls slots stable for current callers", () => {
const resolved = resolveProviderRequestConfig({
provider: "openrouter",
api: "openai-responses",
baseUrl: "https://openrouter.ai/api/v1",
capability: "llm",
transport: "stream",
});
expect(resolved.proxy).toEqual({ configured: false });
expect(resolved.tls).toEqual({ configured: false });
expect(resolved.policy.endpointClass).toBe("openrouter");
expect(resolved.policy.attributionProvider).toBe("openrouter");
});
});

View File

@@ -0,0 +1,88 @@
import type { Api } from "@mariozechner/pi-ai";
import type { ModelDefinitionConfig } from "../config/types.js";
import type {
ProviderRequestCapability,
ProviderRequestPolicyResolution,
ProviderRequestTransport,
} from "./provider-attribution.js";
import { resolveProviderRequestPolicy } from "./provider-attribution.js";
type RequestApi = Api | ModelDefinitionConfig["api"];
export type ResolvedProviderRequestAuthConfig = {
mode: "provider-default" | "authorization-bearer";
injectAuthorizationHeader: boolean;
};
export type ResolvedProviderRequestProxyConfig = {
configured: false;
};
export type ResolvedProviderRequestTlsConfig = {
configured: false;
};
export type ResolvedProviderRequestConfig = {
api?: RequestApi;
baseUrl?: string;
headers?: Record<string, string>;
auth: ResolvedProviderRequestAuthConfig;
proxy: ResolvedProviderRequestProxyConfig;
tls: ResolvedProviderRequestTlsConfig;
policy: ProviderRequestPolicyResolution;
};
export function mergeProviderRequestHeaders(
...headerSets: Array<Record<string, string> | undefined>
): Record<string, string> | undefined {
let merged: Record<string, string> | undefined;
for (const headers of headerSets) {
if (!headers) {
continue;
}
merged = {
...merged,
...headers,
};
}
return merged && Object.keys(merged).length > 0 ? merged : undefined;
}
export function resolveProviderRequestConfig(params: {
provider: string;
api?: RequestApi;
baseUrl?: string;
capability?: ProviderRequestCapability;
transport?: ProviderRequestTransport;
discoveredHeaders?: Record<string, string>;
providerHeaders?: Record<string, string>;
modelHeaders?: Record<string, string>;
authHeader?: boolean;
}): ResolvedProviderRequestConfig {
const policy = resolveProviderRequestPolicy({
provider: params.provider,
api: params.api,
baseUrl: params.baseUrl,
capability: params.capability ?? "llm",
transport: params.transport ?? "http",
});
return {
api: params.api,
baseUrl: params.baseUrl,
headers: mergeProviderRequestHeaders(
params.discoveredHeaders,
params.providerHeaders,
params.modelHeaders,
),
auth: {
mode: params.authHeader ? "authorization-bearer" : "provider-default",
injectAuthorizationHeader: params.authHeader === true,
},
// These slots are intentionally internal-first. Future provider request
// policy work can populate them without reshaping existing callers again.
proxy: { configured: false },
tls: { configured: false },
policy,
};
}