mirror of
https://github.com/openclaw/openclaw.git
synced 2026-03-15 03:50:40 +00:00
fix(logging): include model and provider in overload/error log
When an embedded agent run ends with an error (e.g. overloaded_error), the warn log now includes the model and provider that triggered the error. Before: embedded run agent end: runId=xxx isError=true error=The AI service is temporarily overloaded... After: embedded run agent end: runId=xxx isError=true model=claude-sonnet-4-6 provider=anthropic error=The AI service is temporarily overloaded... This makes it easy to correlate which model/provider is failing without needing to cross-reference token-usage.json.
This commit is contained in:
@@ -48,6 +48,8 @@ export function handleAgentEnd(ctx: EmbeddedPiSubscribeContext) {
|
||||
const safeErrorText =
|
||||
buildTextObservationFields(errorText).textPreview ?? "LLM request failed.";
|
||||
const safeRunId = sanitizeForConsole(ctx.params.runId) ?? "-";
|
||||
const safeModel = sanitizeForConsole(lastAssistant.model) ?? "unknown";
|
||||
const safeProvider = sanitizeForConsole(lastAssistant.provider) ?? "unknown";
|
||||
ctx.log.warn("embedded run agent end", {
|
||||
event: "embedded_run_agent_end",
|
||||
tags: ["error_handling", "lifecycle", "agent_end", "assistant_error"],
|
||||
@@ -55,10 +57,10 @@ export function handleAgentEnd(ctx: EmbeddedPiSubscribeContext) {
|
||||
isError: true,
|
||||
error: safeErrorText,
|
||||
failoverReason,
|
||||
provider: lastAssistant.provider,
|
||||
model: lastAssistant.model,
|
||||
provider: lastAssistant.provider,
|
||||
...observedError,
|
||||
consoleMessage: `embedded run agent end: runId=${safeRunId} isError=true error=${safeErrorText}`,
|
||||
consoleMessage: `embedded run agent end: runId=${safeRunId} isError=true model=${safeModel} provider=${safeProvider} error=${safeErrorText}`,
|
||||
});
|
||||
emitAgentEvent({
|
||||
runId: ctx.params.runId,
|
||||
|
||||
Reference in New Issue
Block a user