diff --git a/CHANGELOG.md b/CHANGELOG.md index 9a6cf6ebbf3..63a4fbbd160 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ Docs: https://docs.openclaw.ai ### Changes - Highlight: External Secrets Management introduces a full `openclaw secrets` workflow (`audit`, `configure`, `apply`, `reload`) with runtime snapshot activation, strict `secrets apply` target-path validation, safer migration scrubbing, ref-only auth-profile support, and dedicated docs. (#26155) Thanks @joshavant. +- Codex/WebSocket transport: make `openai-codex` WebSocket-first by default (`transport: "auto"` with SSE fallback), keep explicit per-model/runtime transport overrides, and add regression coverage + docs for transport selection. - Agents/Routing CLI: add `openclaw agents bindings`, `openclaw agents bind`, and `openclaw agents unbind` for account-scoped route management, including channel-only to account-scoped binding upgrades, role-aware binding identity handling, plugin-resolved binding account IDs, and optional account-binding prompts in `openclaw channels add`. (#27195) thanks @gumadeiras. - ACP/Thread-bound agents: make ACP agents first-class runtimes for thread sessions with `acp` spawn/send dispatch integration, acpx backend bridging, lifecycle controls, startup reconciliation, runtime cleanup, and coalesced thread replies. (#23580) thanks @osolmaz. - Onboarding/Plugins: let channel plugins own interactive onboarding flows with optional `configureInteractive` and `configureWhenConfigured` hooks while preserving the generic fallback path. (#27191) thanks @gumadeiras. diff --git a/docs/concepts/model-providers.md b/docs/concepts/model-providers.md index 6210f592482..94675b639a0 100644 --- a/docs/concepts/model-providers.md +++ b/docs/concepts/model-providers.md @@ -70,6 +70,8 @@ OpenClaw ships with the pi‑ai catalog. These providers require **no** - Auth: OAuth (ChatGPT) - Example model: `openai-codex/gpt-5.3-codex` - CLI: `openclaw onboard --auth-choice openai-codex` or `openclaw models auth login --provider openai-codex` +- Default transport is `auto` (WebSocket-first, SSE fallback) +- Override per model via `agents.defaults.models["openai-codex/"].params.transport` (`"sse"`, `"websocket"`, or `"auto"`) ```json5 { diff --git a/docs/providers/openai.md b/docs/providers/openai.md index 54e3d29e454..1a47081a9a6 100644 --- a/docs/providers/openai.md +++ b/docs/providers/openai.md @@ -56,6 +56,33 @@ openclaw models auth login --provider openai-codex } ``` +### Codex transport default + +OpenClaw uses `pi-ai` for model streaming. For `openai-codex/*` models you can set +`agents.defaults.models..params.transport` to select transport: + +- Default is `"auto"` (WebSocket-first, then SSE fallback). +- `"sse"`: force SSE +- `"websocket"`: force WebSocket +- `"auto"`: try WebSocket, then fall back to SSE + +```json5 +{ + agents: { + defaults: { + model: { primary: "openai-codex/gpt-5.3-codex" }, + models: { + "openai-codex/gpt-5.3-codex": { + params: { + transport: "auto", + }, + }, + }, + }, + }, +} +``` + ## Notes - Model refs always use `provider/model` (see [/concepts/models](/concepts/models)). diff --git a/src/agents/pi-embedded-runner-extraparams.test.ts b/src/agents/pi-embedded-runner-extraparams.test.ts index 404d4439da4..3b717d3ab96 100644 --- a/src/agents/pi-embedded-runner-extraparams.test.ts +++ b/src/agents/pi-embedded-runner-extraparams.test.ts @@ -490,6 +490,160 @@ describe("applyExtraParamsToAgent", () => { }); }); + it("passes configured websocket transport through stream options", () => { + const { calls, agent } = createOptionsCaptureAgent(); + const cfg = { + agents: { + defaults: { + models: { + "openai-codex/gpt-5.3-codex": { + params: { + transport: "websocket", + }, + }, + }, + }, + }, + }; + + applyExtraParamsToAgent(agent, cfg, "openai-codex", "gpt-5.3-codex"); + + const model = { + api: "openai-codex-responses", + provider: "openai-codex", + id: "gpt-5.3-codex", + } as Model<"openai-codex-responses">; + const context: Context = { messages: [] }; + void agent.streamFn?.(model, context, {}); + + expect(calls).toHaveLength(1); + expect(calls[0]?.transport).toBe("websocket"); + }); + + it("defaults Codex transport to auto (WebSocket-first)", () => { + const { calls, agent } = createOptionsCaptureAgent(); + + applyExtraParamsToAgent(agent, undefined, "openai-codex", "gpt-5.3-codex"); + + const model = { + api: "openai-codex-responses", + provider: "openai-codex", + id: "gpt-5.3-codex", + } as Model<"openai-codex-responses">; + const context: Context = { messages: [] }; + void agent.streamFn?.(model, context, {}); + + expect(calls).toHaveLength(1); + expect(calls[0]?.transport).toBe("auto"); + }); + + it("does not set transport defaults for non-Codex providers", () => { + const { calls, agent } = createOptionsCaptureAgent(); + + applyExtraParamsToAgent(agent, undefined, "openai", "gpt-5"); + + const model = { + api: "openai-responses", + provider: "openai", + id: "gpt-5", + } as Model<"openai-responses">; + const context: Context = { messages: [] }; + void agent.streamFn?.(model, context, {}); + + expect(calls).toHaveLength(1); + expect(calls[0]?.transport).toBeUndefined(); + }); + + it("allows forcing Codex transport to SSE", () => { + const { calls, agent } = createOptionsCaptureAgent(); + const cfg = { + agents: { + defaults: { + models: { + "openai-codex/gpt-5.3-codex": { + params: { + transport: "sse", + }, + }, + }, + }, + }, + }; + + applyExtraParamsToAgent(agent, cfg, "openai-codex", "gpt-5.3-codex"); + + const model = { + api: "openai-codex-responses", + provider: "openai-codex", + id: "gpt-5.3-codex", + } as Model<"openai-codex-responses">; + const context: Context = { messages: [] }; + void agent.streamFn?.(model, context, {}); + + expect(calls).toHaveLength(1); + expect(calls[0]?.transport).toBe("sse"); + }); + + it("lets runtime options override configured transport", () => { + const { calls, agent } = createOptionsCaptureAgent(); + const cfg = { + agents: { + defaults: { + models: { + "openai-codex/gpt-5.3-codex": { + params: { + transport: "websocket", + }, + }, + }, + }, + }, + }; + + applyExtraParamsToAgent(agent, cfg, "openai-codex", "gpt-5.3-codex"); + + const model = { + api: "openai-codex-responses", + provider: "openai-codex", + id: "gpt-5.3-codex", + } as Model<"openai-codex-responses">; + const context: Context = { messages: [] }; + void agent.streamFn?.(model, context, { transport: "sse" }); + + expect(calls).toHaveLength(1); + expect(calls[0]?.transport).toBe("sse"); + }); + + it("falls back to Codex default transport when configured value is invalid", () => { + const { calls, agent } = createOptionsCaptureAgent(); + const cfg = { + agents: { + defaults: { + models: { + "openai-codex/gpt-5.3-codex": { + params: { + transport: "udp", + }, + }, + }, + }, + }, + }; + + applyExtraParamsToAgent(agent, cfg, "openai-codex", "gpt-5.3-codex"); + + const model = { + api: "openai-codex-responses", + provider: "openai-codex", + id: "gpt-5.3-codex", + } as Model<"openai-codex-responses">; + const context: Context = { messages: [] }; + void agent.streamFn?.(model, context, {}); + + expect(calls).toHaveLength(1); + expect(calls[0]?.transport).toBe("auto"); + }); + it("disables prompt caching for non-Anthropic Bedrock models", () => { const { calls, agent } = createOptionsCaptureAgent(); diff --git a/src/agents/pi-embedded-runner/extra-params.ts b/src/agents/pi-embedded-runner/extra-params.ts index dc1db5f7642..70662760235 100644 --- a/src/agents/pi-embedded-runner/extra-params.ts +++ b/src/agents/pi-embedded-runner/extra-params.ts @@ -117,6 +117,13 @@ function createStreamFnWithExtraParams( if (typeof extraParams.maxTokens === "number") { streamParams.maxTokens = extraParams.maxTokens; } + const transport = extraParams.transport; + if (transport === "sse" || transport === "websocket" || transport === "auto") { + streamParams.transport = transport; + } else if (transport != null) { + const transportSummary = typeof transport === "string" ? transport : typeof transport; + log.warn(`ignoring invalid transport param: ${transportSummary}`); + } const cacheRetention = resolveCacheRetention(extraParams, provider); if (cacheRetention) { streamParams.cacheRetention = cacheRetention; @@ -234,6 +241,15 @@ function createOpenAIResponsesStoreWrapper(baseStreamFn: StreamFn | undefined): }; } +function createCodexDefaultTransportWrapper(baseStreamFn: StreamFn | undefined): StreamFn { + const underlying = baseStreamFn ?? streamSimple; + return (model, context, options) => + underlying(model, context, { + ...options, + transport: options?.transport ?? "auto", + }); +} + function isAnthropic1MModel(modelId: string): boolean { const normalized = modelId.trim().toLowerCase(); return ANTHROPIC_1M_MODEL_PREFIXES.some((prefix) => normalized.startsWith(prefix)); @@ -652,6 +668,10 @@ export function applyExtraParamsToAgent( modelId, agentId, }); + if (provider === "openai-codex") { + // Default Codex to WebSocket-first when nothing else specifies transport. + agent.streamFn = createCodexDefaultTransportWrapper(agent.streamFn); + } const override = extraParamsOverride && Object.keys(extraParamsOverride).length > 0 ? Object.fromEntries(