diff --git a/CHANGELOG.md b/CHANGELOG.md index ce57f29e3a7..dcce92716c8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -52,6 +52,7 @@ Docs: https://docs.openclaw.ai ### Fixes +- Agents/OpenAI: default direct OpenAI Responses models to the SSE transport instead of WebSocket auto-selection, preventing pi runtime chat turns from hanging on servers where the WebSocket path stalls while the OpenAI HTTP stream works. Thanks @vincentkoc. - CLI/update: disable and skip plugins that fail package-update plugin sync, so a broken npm/ClawHub/git/marketplace plugin cannot turn a successful OpenClaw package update into a failed update result. Thanks @vincentkoc. - CLI/update: use an absolute POSIX npm script shell during package-manager updates, so restricted PATH environments can still run dependency lifecycle scripts while updating from `--tag main`. Fixes #77530. Thanks @PeterTremonti. - Diagnostics: grant the internal diagnostics event bus to official installed diagnostics exporter plugins, so npm-installed `@openclaw/diagnostics-prometheus` can emit metrics without broadening the capability to arbitrary global plugins. Fixes #76628. Thanks @RayWoo. diff --git a/extensions/openai/openai-provider.test.ts b/extensions/openai/openai-provider.test.ts index 3cb2872f5f0..24b96524566 100644 --- a/extensions/openai/openai-provider.test.ts +++ b/extensions/openai/openai-provider.test.ts @@ -508,9 +508,9 @@ describe("buildOpenAIProvider", () => { }); expect(extraParams).toMatchObject({ - transport: "auto", - openaiWsWarmup: true, + transport: "sse", }); + expect(extraParams?.openaiWsWarmup).toBeUndefined(); expect(result.payload.store).toBe(true); expect(result.payload.context_management).toEqual([ { type: "compaction", compact_threshold: 140_000 }, diff --git a/extensions/openai/openai-provider.ts b/extensions/openai/openai-provider.ts index 969b04ed5dc..76503bb293d 100644 --- a/extensions/openai/openai-provider.ts +++ b/extensions/openai/openai-provider.ts @@ -227,7 +227,7 @@ export function buildOpenAIProvider(): ProviderPlugin { shouldUseOpenAIResponsesTransport({ provider, api, baseUrl }) ? { api: "openai-responses", baseUrl } : undefined, - ...buildOpenAIResponsesProviderHooks({ openaiWsWarmup: true }), + ...buildOpenAIResponsesProviderHooks({ transport: "sse" }), matchesContextOverflowError: ({ errorMessage }) => /content_filter.*(?:prompt|input).*(?:too long|exceed)/i.test(errorMessage), resolveReasoningOutputMode: () => "native", diff --git a/extensions/openai/shared.ts b/extensions/openai/shared.ts index c2b43cbe9be..143737f3d9d 100644 --- a/extensions/openai/shared.ts +++ b/extensions/openai/shared.ts @@ -50,10 +50,11 @@ function hasSupportedOpenAIResponsesTransport( function defaultOpenAIResponsesExtraParams( extraParams: Record | undefined, - options?: { openaiWsWarmup?: boolean }, + options?: { openaiWsWarmup?: boolean; transport?: "auto" | "sse" | "websocket" }, ): Record | undefined { const hasSupportedTransport = hasSupportedOpenAIResponsesTransport(extraParams?.transport); const hasExplicitWarmup = typeof extraParams?.openaiWsWarmup === "boolean"; + const defaultTransport = options?.transport ?? "auto"; const shouldDefaultWarmup = options?.openaiWsWarmup === true; if (hasSupportedTransport && (!shouldDefaultWarmup || hasExplicitWarmup)) { return extraParams; @@ -61,7 +62,7 @@ function defaultOpenAIResponsesExtraParams( return { ...extraParams, - ...(hasSupportedTransport ? {} : { transport: "auto" }), + ...(hasSupportedTransport ? {} : { transport: defaultTransport }), ...(shouldDefaultWarmup && !hasExplicitWarmup ? { openaiWsWarmup: true } : {}), }; } @@ -93,6 +94,7 @@ const wrapOpenAIResponsesProviderStreamFn: NonNullable< export function buildOpenAIResponsesProviderHooks(options?: { openaiWsWarmup?: boolean; + transport?: "auto" | "sse" | "websocket"; }): OpenAIResponsesProviderHooks { return { buildReplayPolicy: buildOpenAIReplayPolicy,