fix(logging): include model and provider in overload/error log

When an embedded agent run ends with an error (e.g. overloaded_error),
the warn log now includes the model and provider that triggered the error.

Before:
  embedded run agent end: runId=xxx isError=true error=The AI service is temporarily overloaded...

After:
  embedded run agent end: runId=xxx isError=true model=claude-sonnet-4-6 provider=anthropic error=The AI service is temporarily overloaded...

This makes it easy to correlate which model/provider is failing
without needing to cross-reference token-usage.json.
This commit is contained in:
jiarung
2026-03-09 15:16:43 +00:00
parent 6d0547dc2e
commit 0ed550d09a

View File

@@ -48,6 +48,8 @@ export function handleAgentEnd(ctx: EmbeddedPiSubscribeContext) {
const safeErrorText =
buildTextObservationFields(errorText).textPreview ?? "LLM request failed.";
const safeRunId = sanitizeForConsole(ctx.params.runId) ?? "-";
const safeModel = sanitizeForConsole(lastAssistant.model) ?? "unknown";
const safeProvider = sanitizeForConsole(lastAssistant.provider) ?? "unknown";
ctx.log.warn("embedded run agent end", {
event: "embedded_run_agent_end",
tags: ["error_handling", "lifecycle", "agent_end", "assistant_error"],
@@ -55,10 +57,10 @@ export function handleAgentEnd(ctx: EmbeddedPiSubscribeContext) {
isError: true,
error: safeErrorText,
failoverReason,
provider: lastAssistant.provider,
model: lastAssistant.model,
provider: lastAssistant.provider,
...observedError,
consoleMessage: `embedded run agent end: runId=${safeRunId} isError=true error=${safeErrorText}`,
consoleMessage: `embedded run agent end: runId=${safeRunId} isError=true model=${safeModel} provider=${safeProvider} error=${safeErrorText}`,
});
emitAgentEvent({
runId: ctx.params.runId,