fix: honor env proxy for provider guarded fetch

This commit is contained in:
Peter Steinberger
2026-05-03 21:48:18 +01:00
parent e387764014
commit ca620fbd4f
3 changed files with 90 additions and 4 deletions

View File

@@ -34,6 +34,7 @@ Docs: https://docs.openclaw.ai
- Telegram: add `channels.telegram.mediaGroupFlushMs` at the top level and per account so operators can tune album buffering instead of being stuck with the hard-coded 500ms media-group flush window. Fixes #76149. Thanks @vincentkoc.
- Config/messages: coerce boolean `messages.visibleReplies` and `messages.groupChat.visibleReplies` values to the documented enum modes so an intuitive toggle no longer invalidates config and drops channel startup. Fixes #75390. Thanks @scottgl9.
- Agents/network: allow trusted web-search providers and configured model-provider hosts to work behind Surge/Clash/sing-box fake-IP DNS by accepting RFC 2544 and IPv6 ULA synthetic answers only for the request's scoped hostname, without broad private-network access. Refs #76530 and #76549. Thanks @zqchris.
- Providers: honor env-proxy settings for guarded provider model fetches when no explicit dispatcher policy is configured, preserving explicit transport overrides. Fixes #70453. (#72480) Thanks @mjamiv.
- Feishu: accept and honor `channels.feishu.blockStreaming` at the top level and per account, while keeping the legacy default off so Feishu cards no longer reject documented config or silently drop block replies. Fixes #75555. Thanks @vincentkoc.
- Google Chat: normalize custom Google auth transport headers before google-auth/gaxios interceptors run, restoring webhook token verification when certificate retrieval expects Fetch `Headers`. Fixes #76742. Thanks @donbowman.
- Doctor/plugins: reset stale `plugins.slots.memory` and `plugins.slots.contextEngine` references during `doctor --fix`, so cleanup of missing plugin config does not leave unrecoverable slot owners behind. Fixes #76550 and #76551. Thanks @vincentkoc.

View File

@@ -3,24 +3,40 @@ import { Stream } from "openai/streaming";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
const {
buildProviderRequestDispatcherPolicyMock,
fetchWithSsrFGuardMock,
mergeModelProviderRequestOverridesMock,
resolveProviderRequestPolicyConfigMock,
shouldUseEnvHttpProxyForUrlMock,
withTrustedEnvProxyGuardedFetchModeMock,
} = vi.hoisted(() => ({
buildProviderRequestDispatcherPolicyMock: vi.fn<
(_request?: unknown) => { mode: "direct" } | undefined
>(() => undefined),
fetchWithSsrFGuardMock: vi.fn(),
mergeModelProviderRequestOverridesMock: vi.fn((current, overrides) => ({
...current,
...overrides,
})),
resolveProviderRequestPolicyConfigMock: vi.fn(() => ({ allowPrivateNetwork: false })),
shouldUseEnvHttpProxyForUrlMock: vi.fn(() => false),
withTrustedEnvProxyGuardedFetchModeMock: vi.fn((params: Record<string, unknown>) => ({
...params,
mode: "trusted_env_proxy",
})),
}));
vi.mock("../infra/net/fetch-guard.js", () => ({
fetchWithSsrFGuard: fetchWithSsrFGuardMock,
withTrustedEnvProxyGuardedFetchMode: withTrustedEnvProxyGuardedFetchModeMock,
}));
vi.mock("../infra/net/proxy-env.js", () => ({
shouldUseEnvHttpProxyForUrl: shouldUseEnvHttpProxyForUrlMock,
}));
vi.mock("./provider-request-config.js", () => ({
buildProviderRequestDispatcherPolicy: vi.fn(() => ({ mode: "direct" })),
buildProviderRequestDispatcherPolicy: buildProviderRequestDispatcherPolicyMock,
getModelProviderRequestTransport: vi.fn(() => undefined),
mergeModelProviderRequestOverrides: mergeModelProviderRequestOverridesMock,
resolveProviderRequestPolicyConfig: resolveProviderRequestPolicyConfigMock,
@@ -33,10 +49,13 @@ describe("buildGuardedModelFetch", () => {
finalUrl: "https://api.openai.com/v1/responses",
release: vi.fn(async () => undefined),
});
buildProviderRequestDispatcherPolicyMock.mockClear().mockReturnValue(undefined);
mergeModelProviderRequestOverridesMock.mockClear();
resolveProviderRequestPolicyConfigMock
.mockClear()
.mockReturnValue({ allowPrivateNetwork: false });
shouldUseEnvHttpProxyForUrlMock.mockClear().mockReturnValue(false);
withTrustedEnvProxyGuardedFetchModeMock.mockClear();
delete process.env.OPENCLAW_DEBUG_PROXY_ENABLED;
delete process.env.OPENCLAW_DEBUG_PROXY_URL;
delete process.env.OPENCLAW_SDK_RETRY_MAX_WAIT_SECONDS;
@@ -137,6 +156,63 @@ describe("buildGuardedModelFetch", () => {
});
});
it("uses trusted env-proxy mode for provider calls when no explicit dispatcher policy is configured", async () => {
shouldUseEnvHttpProxyForUrlMock.mockReturnValueOnce(true);
const { buildGuardedModelFetch } = await import("./provider-transport-fetch.js");
const model = {
id: "gpt-5.4",
provider: "openai",
api: "openai-responses",
baseUrl: "https://api.openai.com/v1",
} as unknown as Model<"openai-responses">;
const fetcher = buildGuardedModelFetch(model);
await fetcher("https://api.openai.com/v1/responses", { method: "POST" });
expect(shouldUseEnvHttpProxyForUrlMock).toHaveBeenCalledWith(
"https://api.openai.com/v1/responses",
);
expect(withTrustedEnvProxyGuardedFetchModeMock).toHaveBeenCalledWith(
expect.objectContaining({
url: "https://api.openai.com/v1/responses",
dispatcherPolicy: undefined,
policy: {
allowRfc2544BenchmarkRange: true,
allowIpv6UniqueLocalRange: true,
hostnameAllowlist: ["api.openai.com"],
},
}),
);
expect(fetchWithSsrFGuardMock).toHaveBeenCalledWith(
expect.objectContaining({
url: "https://api.openai.com/v1/responses",
mode: "trusted_env_proxy",
}),
);
});
it("keeps explicit provider dispatcher policies in strict guarded-fetch mode", async () => {
shouldUseEnvHttpProxyForUrlMock.mockReturnValueOnce(true);
buildProviderRequestDispatcherPolicyMock.mockReturnValueOnce({ mode: "direct" });
const { buildGuardedModelFetch } = await import("./provider-transport-fetch.js");
const model = {
id: "gpt-5.4",
provider: "openai",
api: "openai-responses",
baseUrl: "https://api.openai.com/v1",
} as unknown as Model<"openai-responses">;
const fetcher = buildGuardedModelFetch(model);
await fetcher("https://api.openai.com/v1/responses", { method: "POST" });
expect(withTrustedEnvProxyGuardedFetchModeMock).not.toHaveBeenCalled();
expect(fetchWithSsrFGuardMock).toHaveBeenCalledWith(
expect.objectContaining({
dispatcherPolicy: { mode: "direct" },
}),
);
});
it("threads explicit transport timeouts into the shared guarded fetch seam", async () => {
const { buildGuardedModelFetch } = await import("./provider-transport-fetch.js");
const model = {

View File

@@ -1,5 +1,9 @@
import type { Api, Model } from "@mariozechner/pi-ai";
import { fetchWithSsrFGuard } from "../infra/net/fetch-guard.js";
import {
fetchWithSsrFGuard,
withTrustedEnvProxyGuardedFetchMode,
} from "../infra/net/fetch-guard.js";
import { shouldUseEnvHttpProxyForUrl } from "../infra/net/proxy-env.js";
import {
ssrfPolicyFromHttpBaseUrlFakeIpHostnameAllowlist,
type SsrFPolicy,
@@ -340,7 +344,7 @@ export function buildGuardedModelFetch(model: Model<Api>, timeoutMs?: number): t
signal: request.signal,
...(request.body ? ({ duplex: "half" } as const) : {}),
} satisfies RequestInit & { duplex?: "half" });
const result = await fetchWithSsrFGuard({
const guardedFetchOptions = {
url,
init: requestInit ?? init,
capture: {
@@ -356,7 +360,12 @@ export function buildGuardedModelFetch(model: Model<Api>, timeoutMs?: number): t
// replays unsafe request bodies across cross-origin redirects.
allowCrossOriginUnsafeRedirectReplay: false,
...(policy ? { policy } : {}),
});
};
const result = await fetchWithSsrFGuard(
!dispatcherPolicy && shouldUseEnvHttpProxyForUrl(url)
? withTrustedEnvProxyGuardedFetchMode(guardedFetchOptions)
: guardedFetchOptions,
);
let response = result.response;
if (shouldBypassLongSdkRetry(response)) {
const headers = new Headers(response.headers);