mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-06 13:10:43 +00:00
fix(agents): keep mocked OpenAI Responses on HTTP (#69815)
* fix(agents): keep mocked OpenAI responses on HTTP * docs(changelog): add entry for mocked responses fix
This commit is contained in:
@@ -17,6 +17,7 @@ Docs: https://docs.openclaw.ai
|
||||
|
||||
### Fixes
|
||||
|
||||
- OpenAI/Responses: keep embedded OpenAI Responses runs on HTTP when `models.providers.openai.baseUrl` points at a local mock or other non-public endpoint, so mocked/custom endpoints no longer drift onto the hardcoded public websocket transport. (#69815) Thanks @vincentkoc.
|
||||
- Channels/config: require resolved runtime config on channel send/action/client helpers and block runtime helper `loadConfig()` calls, so SecretRefs are resolved at startup/boundaries instead of being re-read during sends.
|
||||
- CLI/channels: preserve bundled setup promotion metadata when a loaded partial channel plugin omits it, so adding a non-default account still moves legacy single-account fields such as Telegram `streaming` into `accounts.default`.
|
||||
- Telegram: keep the sent-message ownership cache isolated per configured session store, so own-message reaction filtering remains correct with custom `session.store` paths.
|
||||
|
||||
@@ -853,6 +853,7 @@ export async function compactEmbeddedPiSessionDirect(
|
||||
const shouldUseWebSocketTransport = shouldUseOpenAIWebSocketTransport({
|
||||
provider,
|
||||
modelApi: effectiveModel.api,
|
||||
modelBaseUrl: effectiveModel.baseUrl,
|
||||
});
|
||||
const wsApiKey = shouldUseWebSocketTransport
|
||||
? await resolveEmbeddedAgentApiKey({
|
||||
|
||||
@@ -2,13 +2,52 @@ import { describe, expect, it } from "vitest";
|
||||
import { shouldUseOpenAIWebSocketTransport } from "./attempt.thread-helpers.js";
|
||||
|
||||
describe("openai websocket transport selection", () => {
|
||||
it("accepts the direct OpenAI responses transport pair", () => {
|
||||
it("accepts direct OpenAI Responses endpoints", () => {
|
||||
expect(
|
||||
shouldUseOpenAIWebSocketTransport({
|
||||
provider: "openai",
|
||||
modelApi: "openai-responses",
|
||||
modelBaseUrl: undefined,
|
||||
}),
|
||||
).toBe(true);
|
||||
expect(
|
||||
shouldUseOpenAIWebSocketTransport({
|
||||
provider: "openai",
|
||||
modelApi: "openai-responses",
|
||||
modelBaseUrl: "https://api.openai.com/v1",
|
||||
}),
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
it("rejects non-public baseUrls even when the provider/api pair matches", () => {
|
||||
expect(
|
||||
shouldUseOpenAIWebSocketTransport({
|
||||
provider: "openai",
|
||||
modelApi: "openai-responses",
|
||||
modelBaseUrl: "http://127.0.0.1:4100/v1",
|
||||
}),
|
||||
).toBe(false);
|
||||
expect(
|
||||
shouldUseOpenAIWebSocketTransport({
|
||||
provider: "openai",
|
||||
modelApi: "openai-responses",
|
||||
modelBaseUrl: "https://example.com/v1",
|
||||
}),
|
||||
).toBe(false);
|
||||
expect(
|
||||
shouldUseOpenAIWebSocketTransport({
|
||||
provider: "openai",
|
||||
modelApi: "openai-responses",
|
||||
modelBaseUrl: "https://chatgpt.com/backend-api",
|
||||
}),
|
||||
).toBe(false);
|
||||
expect(
|
||||
shouldUseOpenAIWebSocketTransport({
|
||||
provider: "openai",
|
||||
modelApi: "openai-responses",
|
||||
modelBaseUrl: "https://example.openai.azure.com/openai/v1",
|
||||
}),
|
||||
).toBe(false);
|
||||
});
|
||||
|
||||
it("rejects mismatched OpenAI websocket transport pairs", () => {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import type { OpenClawConfig } from "../../../config/types.openclaw.js";
|
||||
import { joinPresentTextSegments } from "../../../shared/text/join-segments.js";
|
||||
import { normalizeStructuredPromptSection } from "../../prompt-cache-stability.js";
|
||||
import { resolveProviderEndpoint } from "../../provider-attribution.js";
|
||||
|
||||
export const ATTEMPT_CACHE_TTL_CUSTOM_TYPE = "openclaw.cache-ttl";
|
||||
|
||||
@@ -40,11 +41,18 @@ export function resolveAttemptSpawnWorkspaceDir(params: {
|
||||
export function shouldUseOpenAIWebSocketTransport(params: {
|
||||
provider: string;
|
||||
modelApi?: string | null;
|
||||
modelBaseUrl?: string | null;
|
||||
}): boolean {
|
||||
if (params.modelApi !== "openai-responses" || params.provider !== "openai") {
|
||||
return false;
|
||||
}
|
||||
|
||||
// openai-codex normalizes to the ChatGPT backend HTTP path, not the public
|
||||
// OpenAI Responses websocket endpoint. Keep it on HTTP until a provider-
|
||||
// specific websocket target exists and is verified end-to-end.
|
||||
return params.modelApi === "openai-responses" && params.provider === "openai";
|
||||
// OpenAI Responses websocket endpoint. Local mocks, proxies, and custom
|
||||
// baseUrls must stay on HTTP because the websocket runtime targets the
|
||||
// native api.openai.com endpoint directly.
|
||||
const endpointClass = resolveProviderEndpoint(params.modelBaseUrl).endpointClass;
|
||||
return endpointClass === "default" || endpointClass === "openai-public";
|
||||
}
|
||||
|
||||
export function shouldAppendAttemptCacheTtl(params: {
|
||||
|
||||
@@ -1232,6 +1232,7 @@ export async function runEmbeddedAttempt(
|
||||
const shouldUseWebSocketTransport = shouldUseOpenAIWebSocketTransport({
|
||||
provider: params.provider,
|
||||
modelApi: params.model.api,
|
||||
modelBaseUrl: params.model.baseUrl,
|
||||
});
|
||||
const wsApiKey = shouldUseWebSocketTransport
|
||||
? await resolveEmbeddedAgentApiKey({
|
||||
|
||||
Reference in New Issue
Block a user