fix(openai-codex): match codex replay identity

This commit is contained in:
Peter Steinberger
2026-05-05 00:25:49 +01:00
parent 27e467ad23
commit 15d3fd83bb
6 changed files with 25 additions and 23 deletions

View File

@@ -1020,7 +1020,7 @@ describe("openai transport stream", () => {
expect(params.max_output_tokens).toBe(65_536);
});
it("uses top-level instructions for Codex responses and strips unsupported ChatGPT params", () => {
it("uses top-level instructions for Codex responses and preserves prompt cache identity", () => {
const params = buildOpenAIResponsesParams(
{
id: "gpt-5.4",
@@ -1059,7 +1059,7 @@ describe("openai transport stream", () => {
expect(params.input?.some((item) => item.role === "system" || item.role === "developer")).toBe(
false,
);
expect(params).not.toHaveProperty("prompt_cache_key");
expect(params.prompt_cache_key).toBe("session-123");
expect(params.store).toBe(false);
expect(params).not.toHaveProperty("metadata");
expect(params).not.toHaveProperty("max_output_tokens");
@@ -1068,7 +1068,7 @@ describe("openai transport stream", () => {
expect(params).not.toHaveProperty("temperature");
});
it("sanitizes Codex responses params after payload hooks mutate them", () => {
it("sanitizes Codex responses params after payload hooks mutate them without stripping cache identity", () => {
const payload = {
model: "gpt-5.4",
input: [],
@@ -1097,7 +1097,7 @@ describe("openai transport stream", () => {
payload,
);
expect(sanitized).not.toHaveProperty("prompt_cache_key");
expect(sanitized.prompt_cache_key).toBe("session-123");
expect(sanitized).not.toHaveProperty("metadata");
expect(sanitized).not.toHaveProperty("max_output_tokens");
expect(sanitized).not.toHaveProperty("prompt_cache_retention");
@@ -1257,10 +1257,16 @@ describe("openai transport stream", () => {
id?: string;
call_id?: string;
phase?: string;
encrypted_content?: string;
}>;
};
expect(params.input?.some((item) => item.type === "reasoning")).toBe(false);
const reasoningItem = params.input?.find((item) => item.type === "reasoning");
expect(reasoningItem).toMatchObject({
type: "reasoning",
encrypted_content: "ciphertext",
});
expect(reasoningItem?.id).toBeUndefined();
const assistantMessage = params.input?.find(
(item) => item.type === "message" && item.role === "assistant",
);

View File

@@ -20,6 +20,7 @@ import type {
ResponseInputItem,
ResponseInputMessageContentList,
ResponseOutputMessage,
ResponseReasoningItem,
} from "openai/resources/responses/responses.js";
import type { ModelCompatConfig } from "../config/types.models.js";
import { createSubsystemLogger } from "../logging/subsystem.js";
@@ -60,6 +61,7 @@ const OPENAI_CODEX_RESPONSES_EMPTY_INPUT_TEXT = " ";
const log = createSubsystemLogger("openai-transport");
type ReplayableResponseOutputMessage = Omit<ResponseOutputMessage, "id"> & { id?: string };
type ReplayableResponseReasoningItem = Omit<ResponseReasoningItem, "id"> & { id?: string };
type BaseStreamOptions = {
temperature?: number;
@@ -299,7 +301,13 @@ function convertResponsesMessages(
for (const block of msg.content) {
if (block.type === "thinking") {
if (shouldReplayReasoningItems && block.thinkingSignature) {
output.push(JSON.parse(block.thinkingSignature));
const reasoningItem = JSON.parse(
block.thinkingSignature,
) as ReplayableResponseReasoningItem;
if (!shouldReplayResponsesItemIds) {
delete reasoningItem.id;
}
output.push(reasoningItem as ResponseInputItem);
}
} else if (block.type === "text") {
const textSignature = parseTextSignature(block.textSignature);
@@ -927,7 +935,6 @@ function usesNativeOpenAICodexResponsesBackend(model: Model<Api>): boolean {
const OPENAI_CODEX_RESPONSES_UNSUPPORTED_PARAMS = [
"max_output_tokens",
"metadata",
"prompt_cache_key",
"prompt_cache_retention",
"service_tier",
"temperature",
@@ -987,7 +994,7 @@ export function buildOpenAIResponsesParams(
{
includeSystemPrompt: !isCodexResponses,
supportsDeveloperRole,
replayReasoningItems: !isNativeCodexResponses,
replayReasoningItems: true,
replayResponsesItemIds: !isNativeCodexResponses,
},
);