mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-06 09:10:45 +00:00
fix: unify responses api capability detection (#67918)
* fix: unify responses api capability detection * fix: unify responses api capability detection (#67918)
This commit is contained in:
@@ -7,6 +7,7 @@ Docs: https://docs.openclaw.ai
|
||||
### Fixes
|
||||
|
||||
- Onboarding/non-interactive: preserve existing gateway auth tokens during re-onboard so active local gateway clients are not disconnected by an implicit token rotation. (#67821) Thanks @BKF-Gitty.
|
||||
- OpenAI Codex/Responses: unify native Responses API capability detection so Codex OAuth requests emit the required `store: false` field on the native Responses path. (#67918) Thanks @obviyus.
|
||||
|
||||
## 2026.4.15
|
||||
|
||||
|
||||
@@ -106,4 +106,21 @@ describe("openai responses payload policy", () => {
|
||||
|
||||
expect(payload).not.toHaveProperty("reasoning");
|
||||
});
|
||||
|
||||
it("emits store false for native OpenAI Codex responses disable mode", () => {
|
||||
expect(
|
||||
resolveOpenAIResponsesPayloadPolicy(
|
||||
{
|
||||
api: "openai-codex-responses",
|
||||
provider: "openai-codex",
|
||||
baseUrl: "https://chatgpt.com/backend-api/codex",
|
||||
},
|
||||
{ storeMode: "disable" },
|
||||
),
|
||||
).toMatchObject({
|
||||
explicitStore: false,
|
||||
allowsServiceTier: true,
|
||||
shouldStripStore: false,
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { readStringValue } from "../shared/string-coerce.js";
|
||||
import { isOpenAIResponsesApi } from "./provider-attribution.js";
|
||||
import { resolveProviderRequestPolicyConfig } from "./provider-request-config.js";
|
||||
|
||||
type OpenAIResponsesPayloadModel = {
|
||||
@@ -26,12 +27,6 @@ export type OpenAIResponsesPayloadPolicy = {
|
||||
useServerCompaction: boolean;
|
||||
};
|
||||
|
||||
const OPENAI_RESPONSES_APIS = new Set([
|
||||
"openai-responses",
|
||||
"azure-openai-responses",
|
||||
"openai-codex-responses",
|
||||
]);
|
||||
|
||||
function parsePositiveInteger(value: unknown): number | undefined {
|
||||
if (typeof value === "number" && Number.isFinite(value) && value > 0) {
|
||||
return Math.floor(value);
|
||||
@@ -112,7 +107,7 @@ export function resolveOpenAIResponsesPayloadPolicy(
|
||||
: capabilities.allowsResponsesStore
|
||||
? true
|
||||
: undefined;
|
||||
const isResponsesApi = typeof model.api === "string" && OPENAI_RESPONSES_APIS.has(model.api);
|
||||
const isResponsesApi = isOpenAIResponsesApi(readStringValue(model.api));
|
||||
|
||||
return {
|
||||
allowsServiceTier: capabilities.allowsOpenAIServiceTier,
|
||||
|
||||
@@ -957,6 +957,28 @@ describe("provider attribution", () => {
|
||||
supportsNativeStreamingUsageCompat: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "native OpenAI Codex responses",
|
||||
input: {
|
||||
provider: "openai-codex",
|
||||
api: "openai-codex-responses",
|
||||
baseUrl: "https://chatgpt.com/backend-api/codex",
|
||||
capability: "llm" as const,
|
||||
transport: "stream" as const,
|
||||
},
|
||||
expected: {
|
||||
knownProviderFamily: "openai-family",
|
||||
endpointClass: "openai-codex",
|
||||
isKnownNativeEndpoint: true,
|
||||
allowsOpenAIServiceTier: true,
|
||||
supportsOpenAIReasoningCompatPayload: true,
|
||||
allowsResponsesStore: false,
|
||||
supportsResponsesStoreField: true,
|
||||
shouldStripResponsesPromptCache: false,
|
||||
allowsAnthropicServiceTier: false,
|
||||
supportsNativeStreamingUsageCompat: false,
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
for (const testCase of cases) {
|
||||
|
||||
@@ -124,7 +124,11 @@ const MODELSTUDIO_NATIVE_BASE_URLS = new Set([
|
||||
"https://dashscope.aliyuncs.com/compatible-mode/v1",
|
||||
"https://dashscope-intl.aliyuncs.com/compatible-mode/v1",
|
||||
]);
|
||||
const OPENAI_RESPONSES_APIS = new Set(["openai-responses", "azure-openai-responses"]);
|
||||
const OPENAI_RESPONSES_APIS = new Set([
|
||||
"openai-responses",
|
||||
"azure-openai-responses",
|
||||
"openai-codex-responses",
|
||||
]);
|
||||
const OPENAI_RESPONSES_PROVIDERS = new Set(["openai", "azure-openai", "azure-openai-responses"]);
|
||||
const MOONSHOT_COMPAT_PROVIDERS = new Set(["moonshot", "kimi"]);
|
||||
|
||||
@@ -317,6 +321,11 @@ function resolveKnownProviderFamily(provider: string | undefined): string {
|
||||
}
|
||||
}
|
||||
|
||||
export function isOpenAIResponsesApi(api: string | null | undefined): boolean {
|
||||
const normalizedApi = normalizeOptionalLowercaseString(api);
|
||||
return normalizedApi !== undefined && OPENAI_RESPONSES_APIS.has(normalizedApi);
|
||||
}
|
||||
|
||||
export function resolveProviderAttributionIdentity(
|
||||
env: RuntimeVersionEnv = process.env as RuntimeVersionEnv,
|
||||
): ProviderAttributionIdentity {
|
||||
@@ -573,7 +582,7 @@ export function resolveProviderRequestCapabilities(
|
||||
compatibilityFamily = "moonshot";
|
||||
}
|
||||
|
||||
const isResponsesApi = api !== undefined && OPENAI_RESPONSES_APIS.has(api);
|
||||
const isResponsesApi = isOpenAIResponsesApi(api);
|
||||
const promptCacheKeySupport = input.compat?.supportsPromptCacheKey;
|
||||
// Default strip behavior (proxy-like endpoints with responses APIs) is
|
||||
// preserved as a safety net for providers that reject prompt_cache_key,
|
||||
|
||||
Reference in New Issue
Block a user