Files
openclaw/src/agents/pi-embedded-runner/run.ts
Forgely3D 4fa11632b4 fix: escalate to model fallback after rate-limit profile rotation cap (#58707)
* fix: escalate to model fallback after rate-limit profile rotation cap

Per-model rate limits (e.g. Anthropic Sonnet-only quotas) are not
relieved by rotating auth profiles — if all profiles share the same
model quota, cycling between them loops forever without falling back
to the next model in the configured fallbacks chain.

Apply the same rotation-cap pattern introduced for overloaded_error
(#58348) to rate_limit errors:

- Add `rateLimitedProfileRotations` to auth.cooldowns config (default: 1)
- After N profile rotations on a rate_limit error, throw FailoverError
  to trigger cross-provider model fallback
- Add `resolveRateLimitProfileRotationLimit` helper following the same
  pattern as `resolveOverloadProfileRotationLimit`

Fixes #58572

* fix: cap prompt-side rate-limit failover (#58707) (thanks @Forgely3D)

* fix: restore latest-main gates for #58707

---------

Co-authored-by: Ember (Forgely3D) <ember@forgely.co>
Co-authored-by: Peter Steinberger <steipete@gmail.com>
2026-04-01 17:54:10 +09:00

1484 lines
65 KiB
TypeScript

import { randomBytes } from "node:crypto";
import fs from "node:fs/promises";
import type { ThinkLevel } from "../../auto-reply/thinking.js";
import {
ensureContextEnginesInitialized,
resolveContextEngine,
} from "../../context-engine/index.js";
import { sleepWithAbort } from "../../infra/backoff.js";
import { getGlobalHookRunner } from "../../plugins/hook-runner-global.js";
import { enqueueCommandInLane } from "../../process/command-queue.js";
import { sanitizeForLog } from "../../terminal/ansi.js";
import { isMarkdownCapableMessageChannel } from "../../utils/message-channel.js";
import { resolveOpenClawAgentDir } from "../agent-paths.js";
import { hasConfiguredModelFallbacks } from "../agent-scope.js";
import {
type AuthProfileFailureReason,
markAuthProfileFailure,
markAuthProfileGood,
markAuthProfileUsed,
} from "../auth-profiles.js";
import { DEFAULT_MODEL, DEFAULT_PROVIDER } from "../defaults.js";
import {
coerceToFailoverError,
describeFailoverError,
FailoverError,
resolveFailoverStatus,
} from "../failover-error.js";
import {
hasDifferentLiveSessionModelSelection,
LiveSessionModelSwitchError,
consumeLiveSessionModelSwitch,
} from "../live-model-switch.js";
import {
applyLocalNoAuthHeaderOverride,
ensureAuthProfileStore,
type ResolvedProviderAuth,
resolveAuthProfileOrder,
} from "../model-auth.js";
import { normalizeProviderId } from "../model-selection.js";
import { ensureOpenClawModelsJson } from "../models-config.js";
import { disposeSessionMcpRuntime } from "../pi-bundle-mcp-tools.js";
import {
classifyFailoverReason,
extractObservedOverflowTokenCount,
type FailoverReason,
formatAssistantErrorText,
formatBillingErrorMessage,
isAuthAssistantError,
isBillingAssistantError,
isCompactionFailureError,
isFailoverAssistantError,
isFailoverErrorMessage,
isLikelyContextOverflowError,
isRateLimitAssistantError,
isTimeoutErrorMessage,
parseImageDimensionError,
parseImageSizeError,
pickFallbackThinkingLevel,
} from "../pi-embedded-helpers.js";
import { ensureRuntimePluginsLoaded } from "../runtime-plugins.js";
import { isLikelyMutatingToolName } from "../tool-mutation.js";
import { derivePromptTokens, normalizeUsage, type UsageLike } from "../usage.js";
import { redactRunIdentifier, resolveRunWorkspaceDir } from "../workspace-run.js";
import { runPostCompactionSideEffects } from "./compact.js";
import { buildEmbeddedCompactionRuntimeContext } from "./compaction-runtime-context.js";
import { runContextEngineMaintenance } from "./context-engine-maintenance.js";
import { resolveGlobalLane, resolveSessionLane } from "./lanes.js";
import { log } from "./logger.js";
import { resolveModelAsync } from "./model.js";
import { runEmbeddedAttempt } from "./run/attempt.js";
import { createEmbeddedRunAuthController } from "./run/auth-controller.js";
import { createFailoverDecisionLogger } from "./run/failover-observation.js";
import {
buildErrorAgentMeta,
buildUsageAgentMetaFields,
createCompactionDiagId,
resolveActiveErrorContext,
resolveMaxRunRetryIterations,
resolveOverloadFailoverBackoffMs,
resolveOverloadProfileRotationLimit,
resolveRateLimitProfileRotationLimit,
type RuntimeAuthState,
scrubAnthropicRefusalMagic,
} from "./run/helpers.js";
import type { RunEmbeddedPiAgentParams } from "./run/params.js";
import { buildEmbeddedRunPayloads } from "./run/payloads.js";
import { resolveEffectiveRuntimeModel, resolveHookModelSelection } from "./run/setup.js";
import {
sessionLikelyHasOversizedToolResults,
truncateOversizedToolResultsInSession,
} from "./tool-result-truncation.js";
import type { EmbeddedPiAgentMeta, EmbeddedPiRunResult } from "./types.js";
import { createUsageAccumulator, mergeUsageIntoAccumulator } from "./usage-accumulator.js";
import { describeUnknownError } from "./utils.js";
type ApiKeyInfo = ResolvedProviderAuth;
export async function runEmbeddedPiAgent(
params: RunEmbeddedPiAgentParams,
): Promise<EmbeddedPiRunResult> {
const sessionLane = resolveSessionLane(params.sessionKey?.trim() || params.sessionId);
const globalLane = resolveGlobalLane(params.lane);
const enqueueGlobal =
params.enqueue ?? ((task, opts) => enqueueCommandInLane(globalLane, task, opts));
const enqueueSession =
params.enqueue ?? ((task, opts) => enqueueCommandInLane(sessionLane, task, opts));
const channelHint = params.messageChannel ?? params.messageProvider;
const resolvedToolResultFormat =
params.toolResultFormat ??
(channelHint
? isMarkdownCapableMessageChannel(channelHint)
? "markdown"
: "plain"
: "markdown");
const isProbeSession = params.sessionId?.startsWith("probe-") ?? false;
return enqueueSession(() =>
enqueueGlobal(async () => {
const started = Date.now();
const workspaceResolution = resolveRunWorkspaceDir({
workspaceDir: params.workspaceDir,
sessionKey: params.sessionKey,
agentId: params.agentId,
config: params.config,
});
const resolvedWorkspace = workspaceResolution.workspaceDir;
const redactedSessionId = redactRunIdentifier(params.sessionId);
const redactedSessionKey = redactRunIdentifier(params.sessionKey);
const redactedWorkspace = redactRunIdentifier(resolvedWorkspace);
if (workspaceResolution.usedFallback) {
log.warn(
`[workspace-fallback] caller=runEmbeddedPiAgent reason=${workspaceResolution.fallbackReason} run=${params.runId} session=${redactedSessionId} sessionKey=${redactedSessionKey} agent=${workspaceResolution.agentId} workspace=${redactedWorkspace}`,
);
}
ensureRuntimePluginsLoaded({
config: params.config,
workspaceDir: resolvedWorkspace,
allowGatewaySubagentBinding: params.allowGatewaySubagentBinding,
});
let provider = (params.provider ?? DEFAULT_PROVIDER).trim() || DEFAULT_PROVIDER;
let modelId = (params.model ?? DEFAULT_MODEL).trim() || DEFAULT_MODEL;
const agentDir = params.agentDir ?? resolveOpenClawAgentDir();
const fallbackConfigured = hasConfiguredModelFallbacks({
cfg: params.config,
agentId: params.agentId,
sessionKey: params.sessionKey,
});
await ensureOpenClawModelsJson(params.config, agentDir);
const hookRunner = getGlobalHookRunner();
const hookCtx = {
runId: params.runId,
agentId: workspaceResolution.agentId,
sessionKey: params.sessionKey,
sessionId: params.sessionId,
workspaceDir: resolvedWorkspace,
messageProvider: params.messageProvider ?? undefined,
trigger: params.trigger,
channelId: params.messageChannel ?? params.messageProvider ?? undefined,
};
const hookSelection = await resolveHookModelSelection({
prompt: params.prompt,
provider,
modelId,
hookRunner,
hookContext: hookCtx,
});
provider = hookSelection.provider;
modelId = hookSelection.modelId;
const legacyBeforeAgentStartResult = hookSelection.legacyBeforeAgentStartResult;
const { model, error, authStorage, modelRegistry } = await resolveModelAsync(
provider,
modelId,
agentDir,
params.config,
);
if (!model) {
throw new FailoverError(error ?? `Unknown model: ${provider}/${modelId}`, {
reason: "model_not_found",
provider,
model: modelId,
});
}
let runtimeModel = model;
const resolvedRuntimeModel = resolveEffectiveRuntimeModel({
cfg: params.config,
provider,
modelId,
runtimeModel,
});
const ctxInfo = resolvedRuntimeModel.ctxInfo;
let effectiveModel = resolvedRuntimeModel.effectiveModel;
const authStore = ensureAuthProfileStore(agentDir, {
allowKeychainPrompt: false,
});
const preferredProfileId = params.authProfileId?.trim();
let lockedProfileId = params.authProfileIdSource === "user" ? preferredProfileId : undefined;
if (lockedProfileId) {
const lockedProfile = authStore.profiles[lockedProfileId];
if (
!lockedProfile ||
normalizeProviderId(lockedProfile.provider) !== normalizeProviderId(provider)
) {
lockedProfileId = undefined;
}
}
const profileOrder = resolveAuthProfileOrder({
cfg: params.config,
store: authStore,
provider,
preferredProfile: preferredProfileId,
});
if (lockedProfileId && !profileOrder.includes(lockedProfileId)) {
throw new Error(`Auth profile "${lockedProfileId}" is not configured for ${provider}.`);
}
const profileCandidates = lockedProfileId
? [lockedProfileId]
: profileOrder.length > 0
? profileOrder
: [undefined];
let profileIndex = 0;
const initialThinkLevel = params.thinkLevel ?? "off";
let thinkLevel = initialThinkLevel;
const attemptedThinking = new Set<ThinkLevel>();
let apiKeyInfo: ApiKeyInfo | null = null;
let lastProfileId: string | undefined;
let runtimeAuthState: RuntimeAuthState | null = null;
let runtimeAuthRefreshCancelled = false;
const resolveCurrentLiveSelection = () => ({
provider,
model: modelId,
authProfileId: preferredProfileId,
authProfileIdSource: params.authProfileIdSource,
});
const {
advanceAuthProfile,
initializeAuthProfile,
maybeRefreshRuntimeAuthForAuthError,
stopRuntimeAuthRefreshTimer,
} = createEmbeddedRunAuthController({
config: params.config,
agentDir,
workspaceDir: resolvedWorkspace,
authStore,
authStorage,
profileCandidates,
lockedProfileId,
initialThinkLevel,
attemptedThinking,
fallbackConfigured,
allowTransientCooldownProbe: params.allowTransientCooldownProbe === true,
getProvider: () => provider,
getModelId: () => modelId,
getRuntimeModel: () => runtimeModel,
setRuntimeModel: (next) => {
runtimeModel = next;
},
getEffectiveModel: () => effectiveModel,
setEffectiveModel: (next) => {
effectiveModel = next;
},
getApiKeyInfo: () => apiKeyInfo,
setApiKeyInfo: (next) => {
apiKeyInfo = next;
},
getLastProfileId: () => lastProfileId,
setLastProfileId: (next) => {
lastProfileId = next;
},
getRuntimeAuthState: () => runtimeAuthState,
setRuntimeAuthState: (next) => {
runtimeAuthState = next;
},
getRuntimeAuthRefreshCancelled: () => runtimeAuthRefreshCancelled,
setRuntimeAuthRefreshCancelled: (next) => {
runtimeAuthRefreshCancelled = next;
},
getProfileIndex: () => profileIndex,
setProfileIndex: (next) => {
profileIndex = next;
},
setThinkLevel: (next) => {
thinkLevel = next;
},
log,
});
await initializeAuthProfile();
const MAX_TIMEOUT_COMPACTION_ATTEMPTS = 2;
const MAX_OVERFLOW_COMPACTION_ATTEMPTS = 3;
const MAX_RUN_LOOP_ITERATIONS = resolveMaxRunRetryIterations(profileCandidates.length);
let overflowCompactionAttempts = 0;
let toolResultTruncationAttempted = false;
let bootstrapPromptWarningSignaturesSeen =
params.bootstrapPromptWarningSignaturesSeen ??
(params.bootstrapPromptWarningSignature ? [params.bootstrapPromptWarningSignature] : []);
const usageAccumulator = createUsageAccumulator();
let lastRunPromptUsage: ReturnType<typeof normalizeUsage> | undefined;
let autoCompactionCount = 0;
let runLoopIterations = 0;
let overloadProfileRotations = 0;
let rateLimitProfileRotations = 0;
let timeoutCompactionAttempts = 0;
const overloadFailoverBackoffMs = resolveOverloadFailoverBackoffMs(params.config);
const overloadProfileRotationLimit = resolveOverloadProfileRotationLimit(params.config);
const rateLimitProfileRotationLimit = resolveRateLimitProfileRotationLimit(params.config);
const maybeEscalateRateLimitProfileFallback = (params: {
failoverProvider: string;
failoverModel: string;
logFallbackDecision: (decision: "fallback_model", extra?: { status?: number }) => void;
}) => {
rateLimitProfileRotations += 1;
if (rateLimitProfileRotations <= rateLimitProfileRotationLimit || !fallbackConfigured) {
return;
}
const status = resolveFailoverStatus("rate_limit");
log.warn(
`rate-limit profile rotation cap reached for ${sanitizeForLog(provider)}/${sanitizeForLog(modelId)} after ${rateLimitProfileRotations} rotations; escalating to model fallback`,
);
params.logFallbackDecision("fallback_model", { status });
throw new FailoverError(
"The AI service is temporarily rate-limited. Please try again in a moment.",
{
reason: "rate_limit",
provider: params.failoverProvider,
model: params.failoverModel,
profileId: lastProfileId,
status,
},
);
};
const maybeMarkAuthProfileFailure = async (failure: {
profileId?: string;
reason?: AuthProfileFailureReason | null;
config?: RunEmbeddedPiAgentParams["config"];
agentDir?: RunEmbeddedPiAgentParams["agentDir"];
modelId?: string;
}) => {
const { profileId, reason } = failure;
if (!profileId || !reason || reason === "timeout") {
return;
}
await markAuthProfileFailure({
store: authStore,
profileId,
reason,
cfg: params.config,
agentDir,
runId: params.runId,
modelId: failure.modelId,
});
};
const resolveAuthProfileFailureReason = (
failoverReason: FailoverReason | null,
): AuthProfileFailureReason | null => {
// Timeouts are transport/model-path failures, not auth health signals,
// so they should not persist auth-profile failure state.
if (!failoverReason || failoverReason === "timeout") {
return null;
}
return failoverReason;
};
const maybeBackoffBeforeOverloadFailover = async (reason: FailoverReason | null) => {
if (reason !== "overloaded" || overloadFailoverBackoffMs <= 0) {
return;
}
log.warn(
`overload backoff before failover for ${provider}/${modelId}: delayMs=${overloadFailoverBackoffMs}`,
);
try {
await sleepWithAbort(overloadFailoverBackoffMs, params.abortSignal);
} catch (err) {
if (params.abortSignal?.aborted) {
const abortErr = new Error("Operation aborted", { cause: err });
abortErr.name = "AbortError";
throw abortErr;
}
throw err;
}
};
// Resolve the context engine once and reuse across retries to avoid
// repeated initialization/connection overhead per attempt.
ensureContextEnginesInitialized();
const contextEngine = await resolveContextEngine(params.config);
try {
// When the engine owns compaction, compactEmbeddedPiSessionDirect is
// bypassed. Fire lifecycle hooks here so recovery paths still notify
// subscribers like memory extensions and usage trackers.
const runOwnsCompactionBeforeHook = async (reason: string) => {
if (
contextEngine.info.ownsCompaction !== true ||
!hookRunner?.hasHooks("before_compaction")
) {
return;
}
try {
await hookRunner.runBeforeCompaction(
{ messageCount: -1, sessionFile: params.sessionFile },
hookCtx,
);
} catch (hookErr) {
log.warn(`before_compaction hook failed during ${reason}: ${String(hookErr)}`);
}
};
const runOwnsCompactionAfterHook = async (
reason: string,
compactResult: Awaited<ReturnType<typeof contextEngine.compact>>,
) => {
if (
contextEngine.info.ownsCompaction !== true ||
!compactResult.ok ||
!compactResult.compacted ||
!hookRunner?.hasHooks("after_compaction")
) {
return;
}
try {
await hookRunner.runAfterCompaction(
{
messageCount: -1,
compactedCount: -1,
tokenCount: compactResult.result?.tokensAfter,
sessionFile: params.sessionFile,
},
hookCtx,
);
} catch (hookErr) {
log.warn(`after_compaction hook failed during ${reason}: ${String(hookErr)}`);
}
};
let authRetryPending = false;
// Hoisted so the retry-limit error path can use the most recent API total.
let lastTurnTotal: number | undefined;
while (true) {
if (runLoopIterations >= MAX_RUN_LOOP_ITERATIONS) {
const message =
`Exceeded retry limit after ${runLoopIterations} attempts ` +
`(max=${MAX_RUN_LOOP_ITERATIONS}).`;
log.error(
`[run-retry-limit] sessionKey=${params.sessionKey ?? params.sessionId} ` +
`provider=${provider}/${modelId} attempts=${runLoopIterations} ` +
`maxAttempts=${MAX_RUN_LOOP_ITERATIONS}`,
);
return {
payloads: [
{
text:
"Request failed after repeated internal retries. " +
"Please try again, or use /new to start a fresh session.",
isError: true,
},
],
meta: {
durationMs: Date.now() - started,
agentMeta: buildErrorAgentMeta({
sessionId: params.sessionId,
provider,
model: model.id,
usageAccumulator,
lastRunPromptUsage,
lastTurnTotal,
}),
error: { kind: "retry_limit", message },
},
};
}
runLoopIterations += 1;
const runtimeAuthRetry = authRetryPending;
authRetryPending = false;
attemptedThinking.add(thinkLevel);
await fs.mkdir(resolvedWorkspace, { recursive: true });
const prompt =
provider === "anthropic" ? scrubAnthropicRefusalMagic(params.prompt) : params.prompt;
const attempt = await runEmbeddedAttempt({
sessionId: params.sessionId,
sessionKey: params.sessionKey,
trigger: params.trigger,
memoryFlushWritePath: params.memoryFlushWritePath,
messageChannel: params.messageChannel,
messageProvider: params.messageProvider,
agentAccountId: params.agentAccountId,
messageTo: params.messageTo,
messageThreadId: params.messageThreadId,
groupId: params.groupId,
groupChannel: params.groupChannel,
groupSpace: params.groupSpace,
spawnedBy: params.spawnedBy,
senderId: params.senderId,
senderName: params.senderName,
senderUsername: params.senderUsername,
senderE164: params.senderE164,
senderIsOwner: params.senderIsOwner,
currentChannelId: params.currentChannelId,
currentThreadTs: params.currentThreadTs,
currentMessageId: params.currentMessageId,
replyToMode: params.replyToMode,
hasRepliedRef: params.hasRepliedRef,
sessionFile: params.sessionFile,
workspaceDir: resolvedWorkspace,
agentDir,
config: params.config,
allowGatewaySubagentBinding: params.allowGatewaySubagentBinding,
contextEngine,
contextTokenBudget: ctxInfo.tokens,
skillsSnapshot: params.skillsSnapshot,
prompt,
images: params.images,
imageOrder: params.imageOrder,
clientTools: params.clientTools,
disableTools: params.disableTools,
provider,
modelId,
model: applyLocalNoAuthHeaderOverride(effectiveModel, apiKeyInfo),
authProfileId: lastProfileId,
authProfileIdSource: lockedProfileId ? "user" : "auto",
authStorage,
modelRegistry,
agentId: workspaceResolution.agentId,
legacyBeforeAgentStartResult,
thinkLevel,
fastMode: params.fastMode,
verboseLevel: params.verboseLevel,
reasoningLevel: params.reasoningLevel,
toolResultFormat: resolvedToolResultFormat,
execOverrides: params.execOverrides,
bashElevated: params.bashElevated,
timeoutMs: params.timeoutMs,
runId: params.runId,
abortSignal: params.abortSignal,
shouldEmitToolResult: params.shouldEmitToolResult,
shouldEmitToolOutput: params.shouldEmitToolOutput,
onPartialReply: params.onPartialReply,
onAssistantMessageStart: params.onAssistantMessageStart,
onBlockReply: params.onBlockReply,
onBlockReplyFlush: params.onBlockReplyFlush,
blockReplyBreak: params.blockReplyBreak,
blockReplyChunking: params.blockReplyChunking,
onReasoningStream: params.onReasoningStream,
onReasoningEnd: params.onReasoningEnd,
onToolResult: params.onToolResult,
onAgentEvent: params.onAgentEvent,
extraSystemPrompt: params.extraSystemPrompt,
inputProvenance: params.inputProvenance,
streamParams: params.streamParams,
ownerNumbers: params.ownerNumbers,
enforceFinalTag: params.enforceFinalTag,
silentExpected: params.silentExpected,
bootstrapPromptWarningSignaturesSeen,
bootstrapPromptWarningSignature:
bootstrapPromptWarningSignaturesSeen[bootstrapPromptWarningSignaturesSeen.length - 1],
});
const {
aborted,
promptError,
timedOut,
timedOutDuringCompaction,
sessionIdUsed,
lastAssistant,
} = attempt;
bootstrapPromptWarningSignaturesSeen =
attempt.bootstrapPromptWarningSignaturesSeen ??
(attempt.bootstrapPromptWarningSignature
? Array.from(
new Set([
...bootstrapPromptWarningSignaturesSeen,
attempt.bootstrapPromptWarningSignature,
]),
)
: bootstrapPromptWarningSignaturesSeen);
const lastAssistantUsage = normalizeUsage(lastAssistant?.usage as UsageLike);
const attemptUsage = attempt.attemptUsage ?? lastAssistantUsage;
mergeUsageIntoAccumulator(usageAccumulator, attemptUsage);
// Keep prompt size from the latest model call so session totalTokens
// reflects current context usage, not accumulated tool-loop usage.
lastRunPromptUsage = lastAssistantUsage ?? attemptUsage;
lastTurnTotal = lastAssistantUsage?.total ?? attemptUsage?.total;
const attemptCompactionCount = Math.max(0, attempt.compactionCount ?? 0);
autoCompactionCount += attemptCompactionCount;
const activeErrorContext = resolveActiveErrorContext({
lastAssistant,
provider,
model: modelId,
});
const formattedAssistantErrorText = lastAssistant
? formatAssistantErrorText(lastAssistant, {
cfg: params.config,
sessionKey: params.sessionKey ?? params.sessionId,
provider: activeErrorContext.provider,
model: activeErrorContext.model,
})
: undefined;
const assistantErrorText =
lastAssistant?.stopReason === "error"
? lastAssistant.errorMessage?.trim() || formattedAssistantErrorText
: undefined;
const canRestartForLiveSwitch =
!attempt.didSendViaMessagingTool &&
!attempt.didSendDeterministicApprovalPrompt &&
!attempt.lastToolError &&
attempt.toolMetas.length === 0 &&
attempt.assistantTexts.length === 0;
const requestedSelection = consumeLiveSessionModelSwitch(params.sessionId);
if (
requestedSelection &&
canRestartForLiveSwitch &&
hasDifferentLiveSessionModelSelection(resolveCurrentLiveSelection(), requestedSelection)
) {
log.info(
`live session model switch requested during active attempt for ${params.sessionId}: ${provider}/${modelId} -> ${requestedSelection.provider}/${requestedSelection.model}`,
);
throw new LiveSessionModelSwitchError(requestedSelection);
}
// ── Timeout-triggered compaction ──────────────────────────────────
// When the LLM times out with high context usage, compact before
// retrying to break the death spiral of repeated timeouts.
if (timedOut && !timedOutDuringCompaction) {
// Only consider prompt-side tokens here. API totals include output
// tokens, which can make a long generation look like high context
// pressure even when the prompt itself was small.
const lastTurnPromptTokens = derivePromptTokens(lastRunPromptUsage);
const tokenUsedRatio =
lastTurnPromptTokens != null && ctxInfo.tokens > 0
? lastTurnPromptTokens / ctxInfo.tokens
: 0;
if (timeoutCompactionAttempts >= MAX_TIMEOUT_COMPACTION_ATTEMPTS) {
log.warn(
`[timeout-compaction] already attempted timeout compaction ${timeoutCompactionAttempts} time(s); falling through to failover rotation`,
);
} else if (tokenUsedRatio > 0.65) {
const timeoutDiagId = createCompactionDiagId();
timeoutCompactionAttempts++;
log.warn(
`[timeout-compaction] LLM timed out with high prompt token usage (${Math.round(tokenUsedRatio * 100)}%); ` +
`attempting compaction before retry (attempt ${timeoutCompactionAttempts}/${MAX_TIMEOUT_COMPACTION_ATTEMPTS}) diagId=${timeoutDiagId}`,
);
let timeoutCompactResult: Awaited<ReturnType<typeof contextEngine.compact>>;
await runOwnsCompactionBeforeHook("timeout recovery");
try {
const timeoutCompactionRuntimeContext = {
...buildEmbeddedCompactionRuntimeContext({
sessionKey: params.sessionKey,
messageChannel: params.messageChannel,
messageProvider: params.messageProvider,
agentAccountId: params.agentAccountId,
currentChannelId: params.currentChannelId,
currentThreadTs: params.currentThreadTs,
currentMessageId: params.currentMessageId,
authProfileId: lastProfileId,
workspaceDir: resolvedWorkspace,
agentDir,
config: params.config,
skillsSnapshot: params.skillsSnapshot,
senderIsOwner: params.senderIsOwner,
senderId: params.senderId,
provider,
modelId,
thinkLevel,
reasoningLevel: params.reasoningLevel,
bashElevated: params.bashElevated,
extraSystemPrompt: params.extraSystemPrompt,
ownerNumbers: params.ownerNumbers,
}),
runId: params.runId,
trigger: "timeout_recovery",
diagId: timeoutDiagId,
attempt: timeoutCompactionAttempts,
maxAttempts: MAX_TIMEOUT_COMPACTION_ATTEMPTS,
};
timeoutCompactResult = await contextEngine.compact({
sessionId: params.sessionId,
sessionKey: params.sessionKey,
sessionFile: params.sessionFile,
tokenBudget: ctxInfo.tokens,
force: true,
compactionTarget: "budget",
runtimeContext: timeoutCompactionRuntimeContext,
});
} catch (compactErr) {
log.warn(
`[timeout-compaction] contextEngine.compact() threw during timeout recovery for ${provider}/${modelId}: ${String(compactErr)}`,
);
timeoutCompactResult = {
ok: false,
compacted: false,
reason: String(compactErr),
};
}
await runOwnsCompactionAfterHook("timeout recovery", timeoutCompactResult);
if (timeoutCompactResult.compacted) {
autoCompactionCount += 1;
if (contextEngine.info.ownsCompaction === true) {
await runPostCompactionSideEffects({
config: params.config,
sessionKey: params.sessionKey,
sessionFile: params.sessionFile,
});
}
log.info(
`[timeout-compaction] compaction succeeded for ${provider}/${modelId}; retrying prompt`,
);
continue;
} else {
log.warn(
`[timeout-compaction] compaction did not reduce context for ${provider}/${modelId}; falling through to normal handling`,
);
}
}
}
const contextOverflowError = !aborted
? (() => {
if (promptError) {
const errorText = describeUnknownError(promptError);
if (isLikelyContextOverflowError(errorText)) {
return { text: errorText, source: "promptError" as const };
}
// Prompt submission failed with a non-overflow error. Do not
// inspect prior assistant errors from history for this attempt.
return null;
}
if (assistantErrorText && isLikelyContextOverflowError(assistantErrorText)) {
return {
text: assistantErrorText,
source: "assistantError" as const,
};
}
return null;
})()
: null;
if (contextOverflowError) {
const overflowDiagId = createCompactionDiagId();
const errorText = contextOverflowError.text;
const msgCount = attempt.messagesSnapshot?.length ?? 0;
const observedOverflowTokens = extractObservedOverflowTokenCount(errorText);
log.warn(
`[context-overflow-diag] sessionKey=${params.sessionKey ?? params.sessionId} ` +
`provider=${provider}/${modelId} source=${contextOverflowError.source} ` +
`messages=${msgCount} sessionFile=${params.sessionFile} ` +
`diagId=${overflowDiagId} compactionAttempts=${overflowCompactionAttempts} ` +
`observedTokens=${observedOverflowTokens ?? "unknown"} ` +
`error=${errorText.slice(0, 200)}`,
);
const isCompactionFailure = isCompactionFailureError(errorText);
const hadAttemptLevelCompaction = attemptCompactionCount > 0;
// If this attempt already compacted (SDK auto-compaction), avoid immediately
// running another explicit compaction for the same overflow trigger.
if (
!isCompactionFailure &&
hadAttemptLevelCompaction &&
overflowCompactionAttempts < MAX_OVERFLOW_COMPACTION_ATTEMPTS
) {
overflowCompactionAttempts++;
log.warn(
`context overflow persisted after in-attempt compaction (attempt ${overflowCompactionAttempts}/${MAX_OVERFLOW_COMPACTION_ATTEMPTS}); retrying prompt without additional compaction for ${provider}/${modelId}`,
);
continue;
}
// Attempt explicit overflow compaction only when this attempt did not
// already auto-compact.
if (
!isCompactionFailure &&
!hadAttemptLevelCompaction &&
overflowCompactionAttempts < MAX_OVERFLOW_COMPACTION_ATTEMPTS
) {
if (log.isEnabled("debug")) {
log.debug(
`[compaction-diag] decision diagId=${overflowDiagId} branch=compact ` +
`isCompactionFailure=${isCompactionFailure} hasOversizedToolResults=unknown ` +
`attempt=${overflowCompactionAttempts + 1} maxAttempts=${MAX_OVERFLOW_COMPACTION_ATTEMPTS}`,
);
}
overflowCompactionAttempts++;
log.warn(
`context overflow detected (attempt ${overflowCompactionAttempts}/${MAX_OVERFLOW_COMPACTION_ATTEMPTS}); attempting auto-compaction for ${provider}/${modelId}`,
);
let compactResult: Awaited<ReturnType<typeof contextEngine.compact>>;
await runOwnsCompactionBeforeHook("overflow recovery");
try {
const overflowCompactionRuntimeContext = {
...buildEmbeddedCompactionRuntimeContext({
sessionKey: params.sessionKey,
messageChannel: params.messageChannel,
messageProvider: params.messageProvider,
agentAccountId: params.agentAccountId,
currentChannelId: params.currentChannelId,
currentThreadTs: params.currentThreadTs,
currentMessageId: params.currentMessageId,
authProfileId: lastProfileId,
workspaceDir: resolvedWorkspace,
agentDir,
config: params.config,
skillsSnapshot: params.skillsSnapshot,
senderIsOwner: params.senderIsOwner,
senderId: params.senderId,
provider,
modelId,
thinkLevel,
reasoningLevel: params.reasoningLevel,
bashElevated: params.bashElevated,
extraSystemPrompt: params.extraSystemPrompt,
ownerNumbers: params.ownerNumbers,
}),
runId: params.runId,
trigger: "overflow",
...(observedOverflowTokens !== undefined
? { currentTokenCount: observedOverflowTokens }
: {}),
diagId: overflowDiagId,
attempt: overflowCompactionAttempts,
maxAttempts: MAX_OVERFLOW_COMPACTION_ATTEMPTS,
};
compactResult = await contextEngine.compact({
sessionId: params.sessionId,
sessionKey: params.sessionKey,
sessionFile: params.sessionFile,
tokenBudget: ctxInfo.tokens,
...(observedOverflowTokens !== undefined
? { currentTokenCount: observedOverflowTokens }
: {}),
force: true,
compactionTarget: "budget",
runtimeContext: overflowCompactionRuntimeContext,
});
if (compactResult.ok && compactResult.compacted) {
await runContextEngineMaintenance({
contextEngine,
sessionId: params.sessionId,
sessionKey: params.sessionKey,
sessionFile: params.sessionFile,
reason: "compaction",
runtimeContext: overflowCompactionRuntimeContext,
});
}
} catch (compactErr) {
log.warn(
`contextEngine.compact() threw during overflow recovery for ${provider}/${modelId}: ${String(compactErr)}`,
);
compactResult = {
ok: false,
compacted: false,
reason: String(compactErr),
};
}
await runOwnsCompactionAfterHook("overflow recovery", compactResult);
if (compactResult.compacted) {
autoCompactionCount += 1;
log.info(`auto-compaction succeeded for ${provider}/${modelId}; retrying prompt`);
continue;
}
log.warn(
`auto-compaction failed for ${provider}/${modelId}: ${compactResult.reason ?? "nothing to compact"}`,
);
}
// Fallback: try truncating oversized tool results in the session.
// This handles the case where a single tool result exceeds the
// context window and compaction cannot reduce it further.
if (!toolResultTruncationAttempted) {
const contextWindowTokens = ctxInfo.tokens;
const hasOversized = attempt.messagesSnapshot
? sessionLikelyHasOversizedToolResults({
messages: attempt.messagesSnapshot,
contextWindowTokens,
})
: false;
if (hasOversized) {
if (log.isEnabled("debug")) {
log.debug(
`[compaction-diag] decision diagId=${overflowDiagId} branch=truncate_tool_results ` +
`isCompactionFailure=${isCompactionFailure} hasOversizedToolResults=${hasOversized} ` +
`attempt=${overflowCompactionAttempts} maxAttempts=${MAX_OVERFLOW_COMPACTION_ATTEMPTS}`,
);
}
toolResultTruncationAttempted = true;
log.warn(
`[context-overflow-recovery] Attempting tool result truncation for ${provider}/${modelId} ` +
`(contextWindow=${contextWindowTokens} tokens)`,
);
const truncResult = await truncateOversizedToolResultsInSession({
sessionFile: params.sessionFile,
contextWindowTokens,
sessionId: params.sessionId,
sessionKey: params.sessionKey,
});
if (truncResult.truncated) {
log.info(
`[context-overflow-recovery] Truncated ${truncResult.truncatedCount} tool result(s); retrying prompt`,
);
// Do NOT reset overflowCompactionAttempts here — the global cap must remain
// enforced across all iterations to prevent unbounded compaction cycles (OC-65).
continue;
}
log.warn(
`[context-overflow-recovery] Tool result truncation did not help: ${truncResult.reason ?? "unknown"}`,
);
} else if (log.isEnabled("debug")) {
log.debug(
`[compaction-diag] decision diagId=${overflowDiagId} branch=give_up ` +
`isCompactionFailure=${isCompactionFailure} hasOversizedToolResults=${hasOversized} ` +
`attempt=${overflowCompactionAttempts} maxAttempts=${MAX_OVERFLOW_COMPACTION_ATTEMPTS}`,
);
}
}
if (
(isCompactionFailure ||
overflowCompactionAttempts >= MAX_OVERFLOW_COMPACTION_ATTEMPTS ||
toolResultTruncationAttempted) &&
log.isEnabled("debug")
) {
log.debug(
`[compaction-diag] decision diagId=${overflowDiagId} branch=give_up ` +
`isCompactionFailure=${isCompactionFailure} hasOversizedToolResults=unknown ` +
`attempt=${overflowCompactionAttempts} maxAttempts=${MAX_OVERFLOW_COMPACTION_ATTEMPTS}`,
);
}
const kind = isCompactionFailure ? "compaction_failure" : "context_overflow";
return {
payloads: [
{
text:
"Context overflow: prompt too large for the model. " +
"Try /reset (or /new) to start a fresh session, or use a larger-context model.",
isError: true,
},
],
meta: {
durationMs: Date.now() - started,
agentMeta: buildErrorAgentMeta({
sessionId: sessionIdUsed,
provider,
model: model.id,
usageAccumulator,
lastRunPromptUsage,
lastAssistant,
lastTurnTotal,
}),
systemPromptReport: attempt.systemPromptReport,
error: { kind, message: errorText },
},
};
}
if (promptError && !aborted) {
// Normalize wrapped errors (e.g. abort-wrapped RESOURCE_EXHAUSTED) into
// FailoverError so rate-limit classification works even for nested shapes.
const normalizedPromptFailover = coerceToFailoverError(promptError, {
provider: activeErrorContext.provider,
model: activeErrorContext.model,
profileId: lastProfileId,
});
const promptErrorDetails = normalizedPromptFailover
? describeFailoverError(normalizedPromptFailover)
: describeFailoverError(promptError);
const errorText = promptErrorDetails.message || describeUnknownError(promptError);
if (await maybeRefreshRuntimeAuthForAuthError(errorText, runtimeAuthRetry)) {
authRetryPending = true;
continue;
}
// Handle role ordering errors with a user-friendly message
if (/incorrect role information|roles must alternate/i.test(errorText)) {
return {
payloads: [
{
text:
"Message ordering conflict - please try again. " +
"If this persists, use /new to start a fresh session.",
isError: true,
},
],
meta: {
durationMs: Date.now() - started,
agentMeta: buildErrorAgentMeta({
sessionId: sessionIdUsed,
provider,
model: model.id,
usageAccumulator,
lastRunPromptUsage,
lastAssistant,
lastTurnTotal,
}),
systemPromptReport: attempt.systemPromptReport,
error: { kind: "role_ordering", message: errorText },
},
};
}
// Handle image size errors with a user-friendly message (no retry needed)
const imageSizeError = parseImageSizeError(errorText);
if (imageSizeError) {
const maxMb = imageSizeError.maxMb;
const maxMbLabel =
typeof maxMb === "number" && Number.isFinite(maxMb) ? `${maxMb}` : null;
const maxBytesHint = maxMbLabel ? ` (max ${maxMbLabel}MB)` : "";
return {
payloads: [
{
text:
`Image too large for the model${maxBytesHint}. ` +
"Please compress or resize the image and try again.",
isError: true,
},
],
meta: {
durationMs: Date.now() - started,
agentMeta: buildErrorAgentMeta({
sessionId: sessionIdUsed,
provider,
model: model.id,
usageAccumulator,
lastRunPromptUsage,
lastAssistant,
lastTurnTotal,
}),
systemPromptReport: attempt.systemPromptReport,
error: { kind: "image_size", message: errorText },
},
};
}
const promptFailoverReason =
promptErrorDetails.reason ?? classifyFailoverReason(errorText);
const promptProfileFailureReason =
resolveAuthProfileFailureReason(promptFailoverReason);
await maybeMarkAuthProfileFailure({
profileId: lastProfileId,
reason: promptProfileFailureReason,
modelId,
});
const promptFailoverFailure =
promptFailoverReason !== null || isFailoverErrorMessage(errorText);
// Capture the failing profile before auth-profile rotation mutates `lastProfileId`.
const failedPromptProfileId = lastProfileId;
const logPromptFailoverDecision = createFailoverDecisionLogger({
stage: "prompt",
runId: params.runId,
rawError: errorText,
failoverReason: promptFailoverReason,
profileFailureReason: promptProfileFailureReason,
provider,
model: modelId,
profileId: failedPromptProfileId,
fallbackConfigured,
aborted,
});
if (promptFailoverReason === "rate_limit") {
maybeEscalateRateLimitProfileFallback({
failoverProvider: provider,
failoverModel: modelId,
logFallbackDecision: logPromptFailoverDecision,
});
}
if (
promptFailoverFailure &&
promptFailoverReason !== "timeout" &&
(await advanceAuthProfile())
) {
logPromptFailoverDecision("rotate_profile");
await maybeBackoffBeforeOverloadFailover(promptFailoverReason);
continue;
}
const fallbackThinking = pickFallbackThinkingLevel({
message: errorText,
attempted: attemptedThinking,
});
if (fallbackThinking) {
log.warn(
`unsupported thinking level for ${provider}/${modelId}; retrying with ${fallbackThinking}`,
);
thinkLevel = fallbackThinking;
continue;
}
// Throw FailoverError for prompt-side failover reasons when fallbacks
// are configured so outer model fallback can continue on overload,
// rate-limit, auth, or billing failures.
if (fallbackConfigured && promptFailoverFailure) {
const status = resolveFailoverStatus(promptFailoverReason ?? "unknown");
logPromptFailoverDecision("fallback_model", { status });
await maybeBackoffBeforeOverloadFailover(promptFailoverReason);
throw (
normalizedPromptFailover ??
new FailoverError(errorText, {
reason: promptFailoverReason ?? "unknown",
provider,
model: modelId,
profileId: lastProfileId,
status: resolveFailoverStatus(promptFailoverReason ?? "unknown"),
})
);
}
if (promptFailoverFailure || promptFailoverReason) {
logPromptFailoverDecision("surface_error");
}
throw promptError;
}
const fallbackThinking = pickFallbackThinkingLevel({
message: lastAssistant?.errorMessage,
attempted: attemptedThinking,
});
if (fallbackThinking && !aborted) {
log.warn(
`unsupported thinking level for ${provider}/${modelId}; retrying with ${fallbackThinking}`,
);
thinkLevel = fallbackThinking;
continue;
}
const authFailure = isAuthAssistantError(lastAssistant);
const rateLimitFailure = isRateLimitAssistantError(lastAssistant);
const billingFailure = isBillingAssistantError(lastAssistant);
const failoverFailure = isFailoverAssistantError(lastAssistant);
const assistantFailoverReason = classifyFailoverReason(lastAssistant?.errorMessage ?? "");
const assistantProfileFailureReason =
resolveAuthProfileFailureReason(assistantFailoverReason);
const cloudCodeAssistFormatError = attempt.cloudCodeAssistFormatError;
const imageDimensionError = parseImageDimensionError(lastAssistant?.errorMessage ?? "");
// Capture the failing profile before auth-profile rotation mutates `lastProfileId`.
const failedAssistantProfileId = lastProfileId;
const logAssistantFailoverDecision = createFailoverDecisionLogger({
stage: "assistant",
runId: params.runId,
rawError: lastAssistant?.errorMessage?.trim(),
failoverReason: assistantFailoverReason,
profileFailureReason: assistantProfileFailureReason,
provider: activeErrorContext.provider,
model: activeErrorContext.model,
profileId: failedAssistantProfileId,
fallbackConfigured,
timedOut,
aborted,
});
if (
authFailure &&
(await maybeRefreshRuntimeAuthForAuthError(
lastAssistant?.errorMessage ?? "",
runtimeAuthRetry,
))
) {
authRetryPending = true;
continue;
}
if (imageDimensionError && lastProfileId) {
const details = [
imageDimensionError.messageIndex !== undefined
? `message=${imageDimensionError.messageIndex}`
: null,
imageDimensionError.contentIndex !== undefined
? `content=${imageDimensionError.contentIndex}`
: null,
imageDimensionError.maxDimensionPx !== undefined
? `limit=${imageDimensionError.maxDimensionPx}px`
: null,
]
.filter(Boolean)
.join(" ");
log.warn(
`Profile ${lastProfileId} rejected image payload${details ? ` (${details})` : ""}.`,
);
}
// Rotate on timeout to try another account/model path in this turn,
// but exclude post-prompt compaction timeouts (model succeeded; no profile issue).
const shouldRotate =
(!aborted && failoverFailure) || (timedOut && !timedOutDuringCompaction);
if (shouldRotate) {
if (lastProfileId) {
const reason = timedOut ? "timeout" : assistantProfileFailureReason;
// Skip cooldown for timeouts: a timeout is model/network-specific,
// not an auth issue. Marking the profile would poison fallback models
// on the same provider (e.g. gpt-5.3 timeout blocks gpt-5.2).
await maybeMarkAuthProfileFailure({
profileId: lastProfileId,
reason,
modelId,
});
if (timedOut && !isProbeSession) {
log.warn(`Profile ${lastProfileId} timed out. Trying next account...`);
}
if (cloudCodeAssistFormatError) {
log.warn(
`Profile ${lastProfileId} hit Cloud Code Assist format error. Tool calls will be sanitized on retry.`,
);
}
}
// For overloaded errors, check the configured rotation cap *before*
// calling advanceAuthProfile() to avoid a wasted auth-profile setup
// cycle. advanceAuthProfile() runs applyApiKeyInfo() which
// initializes the next profile — costly work that is pointless when
// we already know we will escalate to cross-provider fallback.
// See: https://github.com/openclaw/openclaw/issues/58348
if (assistantFailoverReason === "overloaded") {
overloadProfileRotations += 1;
if (overloadProfileRotations > overloadProfileRotationLimit && fallbackConfigured) {
const status = resolveFailoverStatus("overloaded");
log.warn(
`overload profile rotation cap reached for ${sanitizeForLog(provider)}/${sanitizeForLog(modelId)} after ${overloadProfileRotations} rotations; escalating to model fallback`,
);
logAssistantFailoverDecision("fallback_model", { status });
throw new FailoverError(
"The AI service is temporarily overloaded. Please try again in a moment.",
{
reason: "overloaded",
provider: activeErrorContext.provider,
model: activeErrorContext.model,
profileId: lastProfileId,
status,
},
);
}
}
// For rate-limit errors, apply the same rotation cap so that
// per-model quota exhaustion (e.g. Anthropic Sonnet-only limits)
// escalates to cross-provider model fallback instead of spinning
// forever across profiles that share the same model quota.
// See: https://github.com/openclaw/openclaw/issues/58572
if (assistantFailoverReason === "rate_limit") {
maybeEscalateRateLimitProfileFallback({
failoverProvider: activeErrorContext.provider,
failoverModel: activeErrorContext.model,
logFallbackDecision: logAssistantFailoverDecision,
});
}
const rotated = await advanceAuthProfile();
if (rotated) {
logAssistantFailoverDecision("rotate_profile");
await maybeBackoffBeforeOverloadFailover(assistantFailoverReason);
continue;
}
if (fallbackConfigured) {
await maybeBackoffBeforeOverloadFailover(assistantFailoverReason);
// Prefer formatted error message (user-friendly) over raw errorMessage
const message =
(lastAssistant
? formatAssistantErrorText(lastAssistant, {
cfg: params.config,
sessionKey: params.sessionKey ?? params.sessionId,
provider: activeErrorContext.provider,
model: activeErrorContext.model,
})
: undefined) ||
lastAssistant?.errorMessage?.trim() ||
(timedOut
? "LLM request timed out."
: rateLimitFailure
? "LLM request rate limited."
: billingFailure
? formatBillingErrorMessage(
activeErrorContext.provider,
activeErrorContext.model,
)
: authFailure
? "LLM request unauthorized."
: "LLM request failed.");
const status =
resolveFailoverStatus(assistantFailoverReason ?? "unknown") ??
(isTimeoutErrorMessage(message) ? 408 : undefined);
logAssistantFailoverDecision("fallback_model", { status });
throw new FailoverError(message, {
reason: assistantFailoverReason ?? "unknown",
provider: activeErrorContext.provider,
model: activeErrorContext.model,
profileId: lastProfileId,
status,
});
}
logAssistantFailoverDecision("surface_error");
}
const usageMeta = buildUsageAgentMetaFields({
usageAccumulator,
lastAssistantUsage: lastAssistant?.usage as UsageLike | undefined,
lastRunPromptUsage,
lastTurnTotal,
});
const agentMeta: EmbeddedPiAgentMeta = {
sessionId: sessionIdUsed,
provider: lastAssistant?.provider ?? provider,
model: lastAssistant?.model ?? model.id,
usage: usageMeta.usage,
lastCallUsage: usageMeta.lastCallUsage,
promptTokens: usageMeta.promptTokens,
compactionCount: autoCompactionCount > 0 ? autoCompactionCount : undefined,
};
const payloads = buildEmbeddedRunPayloads({
assistantTexts: attempt.assistantTexts,
toolMetas: attempt.toolMetas,
lastAssistant: attempt.lastAssistant,
lastToolError: attempt.lastToolError,
config: params.config,
sessionKey: params.sessionKey ?? params.sessionId,
provider: activeErrorContext.provider,
model: activeErrorContext.model,
verboseLevel: params.verboseLevel,
reasoningLevel: params.reasoningLevel,
toolResultFormat: resolvedToolResultFormat,
suppressToolErrorWarnings: params.suppressToolErrorWarnings,
inlineToolResultsAllowed: false,
didSendViaMessagingTool: attempt.didSendViaMessagingTool,
didSendDeterministicApprovalPrompt: attempt.didSendDeterministicApprovalPrompt,
});
// Timeout aborts can leave the run without any assistant payloads.
// Emit an explicit timeout error instead of silently completing, so
// callers do not lose the turn as an orphaned user message.
if (timedOut && !timedOutDuringCompaction && payloads.length === 0) {
return {
payloads: [
{
text:
"Request timed out before a response was generated. " +
"Please try again, or increase `agents.defaults.timeoutSeconds` in your config.",
isError: true,
},
],
meta: {
durationMs: Date.now() - started,
agentMeta,
aborted,
systemPromptReport: attempt.systemPromptReport,
},
didSendViaMessagingTool: attempt.didSendViaMessagingTool,
didSendDeterministicApprovalPrompt: attempt.didSendDeterministicApprovalPrompt,
messagingToolSentTexts: attempt.messagingToolSentTexts,
messagingToolSentMediaUrls: attempt.messagingToolSentMediaUrls,
messagingToolSentTargets: attempt.messagingToolSentTargets,
successfulCronAdds: attempt.successfulCronAdds,
};
}
// Detect incomplete turns where prompt() resolved prematurely due to
// pi-agent-core's auto-retry timing issue: when a mid-turn 429/overload
// triggers an internal retry, waitForRetry() resolves on the next
// assistant message *before* tool execution completes in the retried
// loop (see #8643). The captured lastAssistant has a non-terminal
// stopReason (e.g. "toolUse") with no text content, producing empty
// payloads. Surface an error instead of silently dropping the reply.
//
// Exclusions:
// - didSendDeterministicApprovalPrompt: approval-prompt turns
// intentionally produce empty payloads with stopReason=toolUse
// - lastToolError: suppressed/recoverable tool failures also produce
// empty payloads with stopReason=toolUse; those are handled by
// buildEmbeddedRunPayloads' own warning policy
if (
payloads.length === 0 &&
!aborted &&
!timedOut &&
!attempt.clientToolCall &&
!attempt.yieldDetected &&
!attempt.didSendDeterministicApprovalPrompt &&
!attempt.lastToolError
) {
const incompleteStopReason = lastAssistant?.stopReason;
// Only trigger for non-terminal stop reasons (toolUse, etc.) to
// avoid false positives when the model legitimately produces no text.
// StopReason union: "aborted" | "error" | "length" | "toolUse"
// "toolUse" is the key signal that prompt() resolved mid-turn.
if (incompleteStopReason === "toolUse" || incompleteStopReason === "error") {
log.warn(
`incomplete turn detected: runId=${params.runId} sessionId=${params.sessionId} ` +
`stopReason=${incompleteStopReason} payloads=0 — surfacing error to user`,
);
// Mark the failing profile for cooldown so multi-profile setups
// rotate away from the exhausted credential on the next turn.
if (lastProfileId) {
const failoverReason = classifyFailoverReason(lastAssistant?.errorMessage ?? "");
await maybeMarkAuthProfileFailure({
profileId: lastProfileId,
reason: resolveAuthProfileFailureReason(failoverReason),
});
}
// Warn about potential side-effects when mutating tools executed
// before the turn was interrupted, so users don't blindly retry.
const hadMutatingTools = attempt.toolMetas.some((t) =>
isLikelyMutatingToolName(t.toolName),
);
const errorText = hadMutatingTools
? "⚠️ Agent couldn't generate a response. Note: some tool actions may have already been executed — please verify before retrying."
: "⚠️ Agent couldn't generate a response. Please try again.";
return {
payloads: [
{
text: errorText,
isError: true,
},
],
meta: {
durationMs: Date.now() - started,
agentMeta,
aborted,
systemPromptReport: attempt.systemPromptReport,
},
didSendViaMessagingTool: attempt.didSendViaMessagingTool,
didSendDeterministicApprovalPrompt: attempt.didSendDeterministicApprovalPrompt,
messagingToolSentTexts: attempt.messagingToolSentTexts,
messagingToolSentMediaUrls: attempt.messagingToolSentMediaUrls,
messagingToolSentTargets: attempt.messagingToolSentTargets,
successfulCronAdds: attempt.successfulCronAdds,
};
}
}
log.debug(
`embedded run done: runId=${params.runId} sessionId=${params.sessionId} durationMs=${Date.now() - started} aborted=${aborted}`,
);
if (lastProfileId) {
await markAuthProfileGood({
store: authStore,
provider,
profileId: lastProfileId,
agentDir: params.agentDir,
});
await markAuthProfileUsed({
store: authStore,
profileId: lastProfileId,
agentDir: params.agentDir,
});
}
return {
payloads: payloads.length ? payloads : undefined,
meta: {
durationMs: Date.now() - started,
agentMeta,
aborted,
systemPromptReport: attempt.systemPromptReport,
// Handle client tool calls (OpenResponses hosted tools)
// Propagate the LLM stop reason so callers (lifecycle events,
// ACP bridge) can distinguish end_turn from max_tokens.
stopReason: attempt.clientToolCall
? "tool_calls"
: attempt.yieldDetected
? "end_turn"
: (lastAssistant?.stopReason as string | undefined),
pendingToolCalls: attempt.clientToolCall
? [
{
id: randomBytes(5).toString("hex").slice(0, 9),
name: attempt.clientToolCall.name,
arguments: JSON.stringify(attempt.clientToolCall.params),
},
]
: undefined,
},
didSendViaMessagingTool: attempt.didSendViaMessagingTool,
didSendDeterministicApprovalPrompt: attempt.didSendDeterministicApprovalPrompt,
messagingToolSentTexts: attempt.messagingToolSentTexts,
messagingToolSentMediaUrls: attempt.messagingToolSentMediaUrls,
messagingToolSentTargets: attempt.messagingToolSentTargets,
successfulCronAdds: attempt.successfulCronAdds,
};
}
} finally {
await contextEngine.dispose?.();
stopRuntimeAuthRefreshTimer();
if (params.cleanupBundleMcpOnRunEnd === true) {
await disposeSessionMcpRuntime(params.sessionId).catch((error) => {
log.warn(
`bundle-mcp cleanup failed after run for ${params.sessionId}: ${describeUnknownError(error)}`,
);
});
}
}
}),
);
}