fix: preserve OpenAI Codex OAuth transport (#75111)

Preserve the existing wrapped OpenAI Codex stream so PI OAuth bearer injection reaches ChatGPT/Codex Responses, and scope native Codex payload sanitization to the ChatGPT backend.\n\nThanks @keshavbotagent.
This commit is contained in:
keshavbotagent
2026-04-30 22:30:12 +05:30
committed by GitHub
parent adc20fed0d
commit 388019f5b6
10 changed files with 292 additions and 23 deletions

View File

@@ -11,6 +11,7 @@ Docs: https://docs.openclaw.ai
### Fixes
- Providers/OpenAI Codex: preserve existing wrapped Codex streams during OpenAI attribution so PI OAuth bearer injection reaches ChatGPT/Codex Responses, and strip native Codex-only unsupported payload fields without touching custom compatible endpoints. (#75111) Thanks @keshavbotagent.
- Agents/tool-result guard: use the resolved runtime context token budget for non-context-engine tool-result overflow checks, so long tool-heavy sessions no longer compact early when `contextTokens` is larger than native `contextWindow`. Fixes #74917. Thanks @kAIborg24.
- Gateway/systemd: exit with sysexits 78 for supervised lock and `EADDRINUSE` conflicts so `RestartPreventExitStatus=78` stops `Restart=always` restart loops instead of repeatedly reloading plugins against an occupied port. Fixes #75115. Thanks @yhyatt.
- Agents/runtime: skip blank visible user prompts at the embedded-runner boundary before provider submission while still allowing internal runtime-only turns and media-only prompts, so Telegram/group sessions no longer leak raw empty-input provider errors when replay history exists. Fixes #74137. Thanks @yelog, @Gracker, and @nhaener.

View File

@@ -24,6 +24,27 @@ describe("resolveCodexAuthIdentity", () => {
});
});
it("extracts account and plan metadata from the JWT auth claim", () => {
const identity = resolveCodexAuthIdentity({
accessToken: createJwt({
"https://api.openai.com/profile": {
email: "jwt-user@example.com",
},
"https://api.openai.com/auth": {
chatgpt_account_id: "acct-123",
chatgpt_plan_type: "prolite",
},
}),
});
expect(identity).toEqual({
accountId: "acct-123",
chatgptPlanType: "prolite",
email: "jwt-user@example.com",
profileName: "jwt-user@example.com",
});
});
it("falls back to credential email before synthetic ids", () => {
const identity = resolveCodexAuthIdentity({
accessToken: createJwt({}),

View File

@@ -10,6 +10,7 @@ type CodexJwtPayload = {
"https://api.openai.com/auth"?: {
chatgpt_account_id?: unknown;
chatgpt_account_user_id?: unknown;
chatgpt_plan_type?: unknown;
chatgpt_user_id?: unknown;
user_id?: unknown;
};
@@ -67,23 +68,33 @@ export function resolveCodexAccessTokenExpiry(accessToken: string): number | und
}
export function resolveCodexAuthIdentity(params: { accessToken: string; email?: string | null }): {
accountId?: string;
chatgptPlanType?: string;
email?: string;
profileName?: string;
} {
const payload = decodeCodexJwtPayload(params.accessToken);
const auth = payload?.["https://api.openai.com/auth"];
const accountId = trimNonEmptyString(auth?.chatgpt_account_id);
const chatgptPlanType = trimNonEmptyString(auth?.chatgpt_plan_type);
const email =
trimNonEmptyString(payload?.["https://api.openai.com/profile"]?.email) ??
trimNonEmptyString(params.email);
const metadata = {
...(accountId ? { accountId } : {}),
...(chatgptPlanType ? { chatgptPlanType } : {}),
};
if (email) {
return { email, profileName: email };
return { ...metadata, email, profileName: email };
}
const stableSubject = resolveCodexStableSubject(payload);
if (!stableSubject) {
return {};
return metadata;
}
return {
...metadata,
profileName: `id-${Buffer.from(stableSubject).toString("base64url")}`,
};
}

View File

@@ -225,13 +225,13 @@ describe("openai codex provider", () => {
access:
"eyJhbGciOiJub25lIiwidHlwIjoiSldUIn0.eyJodHRwczovL2FwaS5vcGVuYWkuY29tL2F1dGgiOnsiY2hhdGdwdF9hY2NvdW50X2lkIjoiYWNjdC1kZXZpY2UtMTIzIn19.signature",
refresh: "device-refresh-token",
accountId: "acct-device-123",
},
},
],
defaultModel: "openai-codex/gpt-5.5",
});
expect(result?.profiles[0]?.credential).not.toHaveProperty("idToken");
expect(result?.profiles[0]?.credential).not.toHaveProperty("accountId");
});
it("does not log the device pairing code in remote mode", async () => {

View File

@@ -304,17 +304,33 @@ function withDefaultCodexContextMetadata(params: {
};
}
function buildCodexCredentialExtra(identity: {
accountId?: string;
chatgptPlanType?: string;
}): Record<string, unknown> | undefined {
const extra = {
...(identity.accountId ? { accountId: identity.accountId } : {}),
...(identity.chatgptPlanType ? { chatgptPlanType: identity.chatgptPlanType } : {}),
};
return Object.keys(extra).length > 0 ? extra : undefined;
}
async function refreshOpenAICodexOAuthCredential(cred: OAuthCredential) {
try {
const { refreshOpenAICodexToken } = await import("./openai-codex-provider.runtime.js");
const refreshed = await refreshOpenAICodexToken(cred.refresh);
const identity = resolveCodexAuthIdentity({
accessToken: refreshed.access,
email: cred.email,
});
return {
...cred,
...refreshed,
type: "oauth" as const,
provider: PROVIDER_ID,
email: cred.email,
email: identity.email ?? cred.email,
displayName: cred.displayName,
...buildCodexCredentialExtra(identity),
};
} catch (error) {
const message = formatErrorMessage(error);
@@ -359,6 +375,7 @@ async function runOpenAICodexOAuth(ctx: ProviderAuthContext) {
expires: creds.expires,
email: identity.email,
profileName: identity.profileName,
credentialExtra: buildCodexCredentialExtra(identity),
});
}
@@ -409,6 +426,7 @@ async function runOpenAICodexDeviceCode(ctx: ProviderAuthContext) {
expires: creds.expires,
email: identity.email,
profileName: identity.profileName,
credentialExtra: buildCodexCredentialExtra(identity),
});
} catch (error) {
spin.stop("OpenAI device code failed");

View File

@@ -12,6 +12,7 @@ export type OAuthCredentials = {
enterpriseUrl?: string;
projectId?: string;
accountId?: string;
chatgptPlanType?: string;
idToken?: string;
};

View File

@@ -980,7 +980,7 @@ describe("openai transport stream", () => {
expect(params.input?.[0]).toMatchObject({ role: "developer" });
});
it("uses top-level instructions for Codex responses without dropping parity fields", () => {
it("uses top-level instructions for Codex responses and strips unsupported ChatGPT params", () => {
const params = buildOpenAIResponsesParams(
{
id: "gpt-5.4",
@@ -1020,15 +1020,122 @@ describe("openai transport stream", () => {
false,
);
expect(params.prompt_cache_key).toBe("session-123");
expect(params.prompt_cache_retention).toBeUndefined();
expect(params.store).toBe(false);
expect(params).not.toHaveProperty("metadata");
expect(params).not.toHaveProperty("max_output_tokens");
expect(params).not.toHaveProperty("prompt_cache_retention");
expect(params).not.toHaveProperty("service_tier");
expect(params).not.toHaveProperty("temperature");
});
it("sanitizes Codex responses params after payload hooks mutate them", () => {
const payload = {
model: "gpt-5.4",
input: [],
stream: true,
max_output_tokens: 1024,
metadata: { openclaw_session_id: "session-123" },
prompt_cache_key: "session-123",
prompt_cache_retention: "24h",
service_tier: "auto",
temperature: 0.2,
};
const sanitized = __testing.sanitizeOpenAICodexResponsesParams(
{
id: "gpt-5.4",
name: "GPT-5.4",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: "https://chatgpt.com/backend-api",
reasoning: true,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 200000,
maxTokens: 8192,
} satisfies Model<"openai-codex-responses">,
payload,
);
expect(sanitized.prompt_cache_key).toBe("session-123");
expect(sanitized).not.toHaveProperty("metadata");
expect(sanitized).not.toHaveProperty("max_output_tokens");
expect(sanitized).not.toHaveProperty("prompt_cache_retention");
expect(sanitized).not.toHaveProperty("service_tier");
expect(sanitized).not.toHaveProperty("temperature");
});
it("preserves custom Codex-compatible responses params", () => {
const params = buildOpenAIResponsesParams(
{
id: "gpt-5.4",
name: "GPT-5.4",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: "https://proxy.example.com/v1",
reasoning: true,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 200000,
maxTokens: 8192,
} satisfies Model<"openai-codex-responses">,
{
systemPrompt: `Stable prefix${SYSTEM_PROMPT_CACHE_BOUNDARY}Dynamic suffix`,
messages: [{ role: "user", content: "Hello", timestamp: 1 }],
tools: [],
} as never,
{
cacheRetention: "long",
maxTokens: 1024,
sessionId: "session-123",
temperature: 0.2,
},
{
openclaw_session_id: "session-123",
openclaw_turn_id: "turn-123",
},
) as Record<string, unknown>;
expect(params.instructions).toBe("Stable prefix\nDynamic suffix");
expect(params.prompt_cache_key).toBe("session-123");
expect(params.metadata).toEqual({
openclaw_session_id: "session-123",
openclaw_turn_id: "turn-123",
});
expect(params.store).toBe(false);
expect(params.max_output_tokens).toBe(1024);
expect(params.temperature).toBe(0.2);
expect(params.service_tier).toBe("auto");
});
it("preserves custom Codex-compatible responses params after payload hooks mutate them", () => {
const payload = {
model: "gpt-5.4",
input: [],
stream: true,
max_output_tokens: 1024,
metadata: { openclaw_session_id: "session-123" },
prompt_cache_key: "session-123",
prompt_cache_retention: "24h",
service_tier: "auto",
temperature: 0.2,
};
const sanitized = __testing.sanitizeOpenAICodexResponsesParams(
{
id: "gpt-5.4",
name: "GPT-5.4",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: "https://proxy.example.com/v1",
reasoning: true,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 200000,
maxTokens: 8192,
} satisfies Model<"openai-codex-responses">,
payload,
);
expect(sanitized).toEqual(payload);
});
it("adds minimal user input for Codex responses when only the system prompt is present", () => {

View File

@@ -766,7 +766,13 @@ export function createOpenAIResponsesTransportStreamFn(): StreamFn {
if (nextParams !== undefined) {
params = nextParams as typeof params;
}
params = mergeTransportMetadata(params, turnState?.metadata);
if (!isOpenAICodexResponsesModel(model)) {
params = mergeTransportMetadata(params, turnState?.metadata);
}
params = sanitizeOpenAICodexResponsesParams(
model,
params as Record<string, unknown>,
) as typeof params;
const responseStream = (await client.responses.create(
params as never,
buildOpenAISdkRequestOptions(model, options?.signal),
@@ -870,6 +876,56 @@ function isOpenAICodexResponsesModel(model: Model<Api>): boolean {
return model.provider === "openai-codex" && model.api === "openai-codex-responses";
}
function isNativeOpenAICodexResponsesBaseUrl(baseUrl?: string): boolean {
const trimmed = typeof baseUrl === "string" ? baseUrl.trim() : "";
if (!trimmed) {
return false;
}
try {
const url = new URL(trimmed);
if (url.protocol !== "http:" && url.protocol !== "https:") {
return false;
}
if (url.hostname.toLowerCase() !== "chatgpt.com") {
return false;
}
const pathname = url.pathname.replace(/\/+$/u, "").toLowerCase();
return [
"/backend-api",
"/backend-api/v1",
"/backend-api/codex",
"/backend-api/codex/v1",
].includes(pathname);
} catch {
return false;
}
}
function usesNativeOpenAICodexResponsesBackend(model: Model<Api>): boolean {
return isOpenAICodexResponsesModel(model) && isNativeOpenAICodexResponsesBaseUrl(model.baseUrl);
}
const OPENAI_CODEX_RESPONSES_UNSUPPORTED_PARAMS = [
"max_output_tokens",
"metadata",
"prompt_cache_retention",
"service_tier",
"temperature",
] as const;
function sanitizeOpenAICodexResponsesParams<T extends Record<string, unknown>>(
model: Model<Api>,
params: T,
): T {
if (!usesNativeOpenAICodexResponsesBackend(model)) {
return params;
}
for (const key of OPENAI_CODEX_RESPONSES_UNSUPPORTED_PARAMS) {
delete params[key];
}
return params;
}
function buildOpenAICodexResponsesInstructions(context: Context): string | undefined {
if (!context.systemPrompt) {
return undefined;
@@ -977,7 +1033,10 @@ export function buildOpenAIResponsesParams(
}
}
applyOpenAIResponsesPayloadPolicy(params as Record<string, unknown>, payloadPolicy);
return params;
return sanitizeOpenAICodexResponsesParams(
model,
params as Record<string, unknown>,
) as typeof params;
}
export function createAzureOpenAIResponsesTransportStreamFn(): StreamFn {
@@ -1029,7 +1088,13 @@ export function createAzureOpenAIResponsesTransportStreamFn(): StreamFn {
if (nextParams !== undefined) {
params = nextParams as typeof params;
}
params = mergeTransportMetadata(params, turnState?.metadata);
if (!isOpenAICodexResponsesModel(model)) {
params = mergeTransportMetadata(params, turnState?.metadata);
}
params = sanitizeOpenAICodexResponsesParams(
model,
params as Record<string, unknown>,
) as typeof params;
const responseStream = (await client.responses.create(
params as never,
buildOpenAISdkRequestOptions(model, options?.signal),
@@ -1901,6 +1966,7 @@ export const __testing = {
createAzureOpenAIClient,
createOpenAICompletionsClient,
createOpenAIResponsesClient,
sanitizeOpenAICodexResponsesParams,
buildOpenAICompletionsClientConfig,
processOpenAICompletionsStream,
};

View File

@@ -211,20 +211,15 @@ describe("createOpenAIThinkingLevelWrapper", () => {
});
describe("createOpenAIAttributionHeadersWrapper", () => {
it("routes native Codex traffic through the OpenClaw transport instead of pi upstream", () => {
let upstreamCalls = 0;
it("routes native Codex traffic through the OpenClaw transport when no wrapped stream exists", () => {
let codexCalls = 0;
let capturedHeaders: Record<string, string> | undefined;
const upstream: StreamFn = () => {
upstreamCalls += 1;
return createAssistantMessageEventStream();
};
const codexTransport: StreamFn = (_model, _context, options) => {
codexCalls += 1;
capturedHeaders = options?.headers;
return createAssistantMessageEventStream();
};
const wrapped = createOpenAIAttributionHeadersWrapper(upstream, {
const wrapped = createOpenAIAttributionHeadersWrapper(undefined, {
codexNativeTransportStreamFn: codexTransport,
});
@@ -242,11 +237,58 @@ describe("createOpenAIAttributionHeadersWrapper", () => {
},
);
expect(upstreamCalls).toBe(0);
expect(codexCalls).toBe(1);
expect(capturedHeaders).toMatchObject({
originator: "openclaw",
"User-Agent": expect.stringMatching(/^openclaw\//),
});
});
it("keeps existing wrapped Codex streams so runtime OAuth injection is preserved", () => {
let upstreamCalls = 0;
let codexCalls = 0;
let capturedOptions:
| {
apiKey?: string;
headers?: Record<string, string>;
}
| undefined;
const upstream: StreamFn = (_model, _context, options) => {
upstreamCalls += 1;
capturedOptions = options;
return createAssistantMessageEventStream();
};
const codexTransport: StreamFn = () => {
codexCalls += 1;
return createAssistantMessageEventStream();
};
const wrapped = createOpenAIAttributionHeadersWrapper(upstream, {
codexNativeTransportStreamFn: codexTransport,
});
void wrapped(
{
...codexModel,
baseUrl: "https://chatgpt.com/backend-api",
} as Model<"openai-codex-responses">,
{ messages: [] },
{
apiKey: "oauth-bearer-token",
headers: {
originator: "pi",
"User-Agent": "pi",
},
},
);
expect(upstreamCalls).toBe(1);
expect(codexCalls).toBe(0);
expect(capturedOptions).toMatchObject({
apiKey: "oauth-bearer-token",
headers: {
originator: "openclaw",
"User-Agent": expect.stringMatching(/^openclaw\//),
},
});
});
});

View File

@@ -498,10 +498,12 @@ export function createOpenAIAttributionHeadersWrapper(
if (!attributionProvider) {
return underlying(model, context, options);
}
const streamFn =
attributionProvider === "openai-codex"
? (opts?.codexNativeTransportStreamFn ?? createOpenAIResponsesTransportStreamFn())
: underlying;
const shouldCreateCodexTransport =
attributionProvider === "openai-codex" &&
(baseStreamFn === undefined || baseStreamFn === streamSimple);
const streamFn = shouldCreateCodexTransport
? (opts?.codexNativeTransportStreamFn ?? createOpenAIResponsesTransportStreamFn())
: underlying;
return streamFn(model, context, {
...options,
headers: resolveProviderRequestPolicyConfig({