fix(agents): suppress mid-turn continuation prompts

This commit is contained in:
Vincent Koc
2026-05-03 22:22:44 -07:00
parent 5a6cedc14a
commit 1d935cce51
3 changed files with 11 additions and 4 deletions

View File

@@ -50,6 +50,7 @@ Docs: https://docs.openclaw.ai
### Fixes
- Agents/Pi: suppress persistence for synthetic mid-turn overflow continuation prompts, so transcript-retry recovery does not write the "continue from transcript" prompt as a new user turn. Thanks @vincentkoc.
- Exec approvals: detect `env -S` split-string command-carrier risks when `-S`/`-s` is combined with other env short options, so approval explanations do not miss split payloads hidden behind `env -iS...`. Thanks @vincentkoc.
- Voice Call: mark realtime calls completed when the realtime provider closes normally, so Twilio/OpenAI/Google realtime stop events do not leave active call records behind. Thanks @vincentkoc.
- Exec approvals: treat POSIX `exec` as a command carrier for inline eval, shell-wrapper, and eval/source detection, so approval explanations and command-risk checks do not miss payloads hidden behind `exec`. Thanks @vincentkoc.

View File

@@ -359,6 +359,7 @@ describe("overflow compaction in run loop", () => {
2,
expect.objectContaining({
prompt: expect.stringContaining("Continue from the current transcript"),
suppressNextUserMessagePersistence: true,
}),
);
expect(mockedRunEmbeddedAttempt).not.toHaveBeenNthCalledWith(
@@ -433,6 +434,7 @@ describe("overflow compaction in run loop", () => {
2,
expect.objectContaining({
prompt: expect.stringContaining("Continue from the current transcript"),
suppressNextUserMessagePersistence: true,
}),
);
expect(mockedRunEmbeddedAttempt).not.toHaveBeenNthCalledWith(

View File

@@ -817,6 +817,10 @@ export async function runEmbeddedPiAgent(
}
params.onUserMessagePersisted?.(message);
};
const continueFromCurrentTranscript = () => {
nextAttemptPromptOverride = MID_TURN_PRECHECK_CONTINUATION_PROMPT;
suppressNextUserMessagePersistence = true;
};
const maybeEscalateRateLimitProfileFallback = (params: {
failoverProvider: string;
failoverModel: string;
@@ -1327,7 +1331,7 @@ export async function runEmbeddedPiAgent(
(retryingFromTranscript ? "retrying from current transcript" : "retrying prompt"),
);
if (retryingFromTranscript) {
nextAttemptPromptOverride = MID_TURN_PRECHECK_CONTINUATION_PROMPT;
continueFromCurrentTranscript();
}
continue;
}
@@ -1512,7 +1516,7 @@ export async function runEmbeddedPiAgent(
`context overflow persisted after in-attempt compaction (attempt ${overflowCompactionAttempts}/${MAX_OVERFLOW_COMPACTION_ATTEMPTS}); retrying prompt without additional compaction for ${provider}/${modelId}`,
);
if (preflightRecovery?.source === "mid-turn") {
nextAttemptPromptOverride = MID_TURN_PRECHECK_CONTINUATION_PROMPT;
continueFromCurrentTranscript();
}
continue;
}
@@ -1645,7 +1649,7 @@ export async function runEmbeddedPiAgent(
autoCompactionCount += 1;
log.info(`auto-compaction succeeded for ${provider}/${modelId}; retrying prompt`);
if (preflightRecovery?.source === "mid-turn") {
nextAttemptPromptOverride = MID_TURN_PRECHECK_CONTINUATION_PROMPT;
continueFromCurrentTranscript();
} else if (
params.currentMessageId !== undefined &&
params.currentMessageId === lastPersistedCurrentMessageId
@@ -1696,7 +1700,7 @@ export async function runEmbeddedPiAgent(
`[context-overflow-recovery] Truncated ${truncResult.truncatedCount} tool result(s); retrying prompt`,
);
if (preflightRecovery?.source === "mid-turn") {
nextAttemptPromptOverride = MID_TURN_PRECHECK_CONTINUATION_PROMPT;
continueFromCurrentTranscript();
}
continue;
}