fix(agents): preserve openai replay ids and timeout hooks

This commit is contained in:
Vincent Koc
2026-04-12 10:35:22 +01:00
parent d2edb559b9
commit a09e228e3e
4 changed files with 24 additions and 14 deletions

View File

@@ -123,7 +123,7 @@ export function expectOpenAIResponsesStrictSanitizeCall(
"session:history",
expect.objectContaining({
sanitizeMode: "images-only",
sanitizeToolCallIds: true,
sanitizeToolCallIds: false,
toolCallIdMode: "strict",
}),
);

View File

@@ -28,12 +28,12 @@ import {
sanitizeToolUseResultPairing,
stripToolResultDetails,
} from "../session-transcript-repair.js";
import { sanitizeToolCallIdsForCloudCodeAssist } from "../tool-call-id.js";
import type { TranscriptPolicy } from "../transcript-policy.js";
import {
resolveTranscriptPolicy,
shouldAllowProviderOwnedThinkingReplay,
} from "../transcript-policy.js";
import { sanitizeToolCallIdsForCloudCodeAssist } from "../tool-call-id.js";
import {
makeZeroUsageSnapshot,
normalizeUsage,
@@ -408,12 +408,17 @@ export async function sanitizeSessionHistory(params: {
modelApi: params.modelApi,
policy,
});
const isOpenAIResponsesApi =
params.modelApi === "openai-responses" ||
params.modelApi === "openai-codex-responses" ||
params.modelApi === "azure-openai-responses";
const sanitizedImages = await sanitizeSessionMessagesImages(
withInterSessionMarkers,
"session:history",
{
sanitizeMode: policy.sanitizeMode,
sanitizeToolCallIds: policy.sanitizeToolCallIds && !allowProviderOwnedThinkingReplay,
sanitizeToolCallIds:
policy.sanitizeToolCallIds && !allowProviderOwnedThinkingReplay && !isOpenAIResponsesApi,
toolCallIdMode: policy.toolCallIdMode,
preserveNativeAnthropicToolUseIds: policy.preserveNativeAnthropicToolUseIds,
preserveSignatures: policy.preserveSignatures,
@@ -429,7 +434,7 @@ export async function sanitizeSessionHistory(params: {
allowProviderOwnedThinkingReplay,
});
const sanitizedToolIds =
policy.sanitizeToolCallIds && policy.toolCallIdMode
policy.sanitizeToolCallIds && policy.toolCallIdMode && !isOpenAIResponsesApi
? sanitizeToolCallIdsForCloudCodeAssist(sanitizedToolCalls, policy.toolCallIdMode, {
preserveNativeAnthropicToolUseIds: policy.preserveNativeAnthropicToolUseIds,
preserveReplaySafeThinkingToolCallIds: allowProviderOwnedThinkingReplay,
@@ -446,10 +451,6 @@ export async function sanitizeSessionHistory(params: {
stripStaleAssistantUsageBeforeLatestCompaction(sanitizedToolResults),
);
const isOpenAIResponsesApi =
params.modelApi === "openai-responses" ||
params.modelApi === "openai-codex-responses" ||
params.modelApi === "azure-openai-responses";
const hasSnapshot = Boolean(params.provider || params.modelApi || params.modelId);
const priorSnapshot = hasSnapshot ? readLastModelSnapshot(params.sessionManager) : null;
const modelChanged = priorSnapshot

View File

@@ -484,6 +484,10 @@ export async function loadRunOverflowCompactionHarness(): Promise<{
buildEmbeddedRunPayloads: vi.fn(() => []),
}));
vi.doMock("./compaction-hooks.js", () => ({
runPostCompactionSideEffects: mockedRunPostCompactionSideEffects,
}));
vi.doMock("./compact.js", () => ({
runPostCompactionSideEffects: mockedRunPostCompactionSideEffects,
}));

View File

@@ -1150,7 +1150,16 @@ export async function runEmbeddedAttempt(
// historical messages at attempt start, but the agent loop's internal tool call →
// tool result cycles bypass that path. Wrap streamFn so every outbound request
// sees sanitized tool call IDs.
if (transcriptPolicy.sanitizeToolCallIds && transcriptPolicy.toolCallIdMode) {
const isOpenAIResponsesApi =
params.model.api === "openai-responses" ||
params.model.api === "azure-openai-responses" ||
params.model.api === "openai-codex-responses";
if (
transcriptPolicy.sanitizeToolCallIds &&
transcriptPolicy.toolCallIdMode &&
!isOpenAIResponsesApi
) {
const inner = activeSession.agent.streamFn;
const mode = transcriptPolicy.toolCallIdMode;
activeSession.agent.streamFn = (model, context, options) => {
@@ -1183,11 +1192,7 @@ export async function runEmbeddedAttempt(
};
}
if (
params.model.api === "openai-responses" ||
params.model.api === "azure-openai-responses" ||
params.model.api === "openai-codex-responses"
) {
if (isOpenAIResponsesApi) {
const inner = activeSession.agent.streamFn;
activeSession.agent.streamFn = (model, context, options) => {
const ctx = context as unknown as { messages?: unknown };