mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-06 16:50:43 +00:00
refactor(agents): clarify prompt cache compatibility gates
This commit is contained in:
@@ -573,6 +573,23 @@ export function resolveProviderRequestCapabilities(
|
||||
compatibilityFamily = "moonshot";
|
||||
}
|
||||
|
||||
const isResponsesApi = api !== undefined && OPENAI_RESPONSES_APIS.has(api);
|
||||
const promptCacheKeySupport = input.compat?.supportsPromptCacheKey;
|
||||
// Default strip behavior (proxy-like endpoints with responses APIs) is
|
||||
// preserved as a safety net for providers that reject prompt_cache_key,
|
||||
// see #48155 (Volcano Engine DeepSeek). Operators running their payload
|
||||
// through an OpenAI-compatible proxy known to forward the field
|
||||
// (CLIProxy, LiteLLM, etc.) can opt out via compat.supportsPromptCacheKey
|
||||
// to recover prompt caching; providers known to reject the field can
|
||||
// force the strip with compat.supportsPromptCacheKey = false even on
|
||||
// native endpoints.
|
||||
const shouldStripResponsesPromptCache =
|
||||
promptCacheKeySupport === true
|
||||
? false
|
||||
: promptCacheKeySupport === false
|
||||
? isResponsesApi
|
||||
: isResponsesApi && policy.usesExplicitProxyLikeEndpoint;
|
||||
|
||||
return {
|
||||
...policy,
|
||||
isKnownNativeEndpoint,
|
||||
@@ -599,29 +616,14 @@ export function resolveProviderRequestCapabilities(
|
||||
(endpointClass === "default" || endpointClass === "anthropic-public"),
|
||||
// This is intentionally the gate for emitting `store: false` on Responses
|
||||
// transports, not just a statement about vendor support in the abstract.
|
||||
supportsResponsesStoreField:
|
||||
input.compat?.supportsStore !== false && api !== undefined && OPENAI_RESPONSES_APIS.has(api),
|
||||
supportsResponsesStoreField: input.compat?.supportsStore !== false && isResponsesApi,
|
||||
allowsResponsesStore:
|
||||
input.compat?.supportsStore !== false &&
|
||||
provider !== undefined &&
|
||||
api !== undefined &&
|
||||
OPENAI_RESPONSES_APIS.has(api) &&
|
||||
isResponsesApi &&
|
||||
OPENAI_RESPONSES_PROVIDERS.has(provider) &&
|
||||
policy.usesKnownNativeOpenAIEndpoint,
|
||||
// Default strip behavior (proxy-like endpoints with responses APIs) is
|
||||
// preserved as a safety net for providers that reject prompt_cache_key —
|
||||
// see #48155 (Volcano Engine DeepSeek). Operators running their payload
|
||||
// through an OpenAI-compatible proxy known to forward the field
|
||||
// (CLIProxy, LiteLLM, etc.) can opt out via compat.supportsPromptCacheKey
|
||||
// to recover prompt caching; providers known to reject the field can
|
||||
// force the strip with compat.supportsPromptCacheKey = false even on
|
||||
// native endpoints.
|
||||
shouldStripResponsesPromptCache:
|
||||
input.compat?.supportsPromptCacheKey === true
|
||||
? false
|
||||
: input.compat?.supportsPromptCacheKey === false
|
||||
? api !== undefined && OPENAI_RESPONSES_APIS.has(api)
|
||||
: api !== undefined && OPENAI_RESPONSES_APIS.has(api) && policy.usesExplicitProxyLikeEndpoint,
|
||||
shouldStripResponsesPromptCache,
|
||||
// Native endpoint class is the real signal here. Users can point a generic
|
||||
// provider key at Moonshot or DashScope and still need streaming usage.
|
||||
supportsNativeStreamingUsageCompat:
|
||||
|
||||
Reference in New Issue
Block a user