mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-06 09:50:42 +00:00
fix(agents): keep PI telemetry on model provider
Keep PI embedded-run usage metadata and traces attributed to the resolved model provider instead of the PI harness label.
This commit is contained in:
@@ -21,6 +21,7 @@ Docs: https://docs.openclaw.ai
|
||||
- Plugins/runtime-deps: memoize packaged bundled runtime dist-mirror preparation after the first successful pass while keeping source-checkout mirrors refreshable, so constrained Docker/VPS installs avoid repeated root scans before chat turns. Refs #73428, #73421, #73532, and #73477. Thanks @Dimaoggg, @oromeis, @oadiazp, @jmfraga, @bstanbury, @antoniusfelix, and @jkobject.
|
||||
- Channels/Discord: treat bare numeric outbound targets that match the effective Discord DM allowlist as user DMs while preserving account-specific legacy `dm.allowFrom` precedence over inherited root `allowFrom`. (#74303) Thanks @Squirbie.
|
||||
- Control UI: make the chat sidebar split divider focusable, keyboard-resizable, ARIA-described, and pointer-event based so sidebar resizing works without a mouse. Thanks @BunsDev.
|
||||
- Agents/usage: keep PI embedded-run telemetry attributed to the resolved model provider instead of the PI harness label, so OpenRouter and other provider-backed turns report the right provider in session usage and traces. Thanks @vincentkoc.
|
||||
- Agents/auth: keep OAuth auth profiles inherited from the main agent read-through instead of copying refresh tokens into secondary agents, and refresh Codex app-server tokens against the owning store so multi-agent swarms avoid reused refresh-token failures. Fixes #74055. Thanks @ClarityInvest.
|
||||
- Channels/Telegram: honor `ALL_PROXY` / `all_proxy` and service-level `OPENCLAW_PROXY_URL` when constructing the HTTP/1-only Telegram Bot API transport, so Windows and service installs that rely on those proxy settings no longer fall back to direct egress. Fixes #74014; refs #74086. Thanks @SymbolStar.
|
||||
- Channels/Telegram: continue polling when `deleteWebhook` hits a transient network failure but `getWebhookInfo` confirms no webhook is configured, so startup does not retry cleanup forever after the webhook was already removed. Refs #74086; carries forward #47384. Thanks @clovericbot.
|
||||
|
||||
@@ -104,6 +104,7 @@ import {
|
||||
resolveFinalAssistantRawText,
|
||||
resolveFinalAssistantVisibleText,
|
||||
resolveMaxRunRetryIterations,
|
||||
resolveReportedModelRef,
|
||||
resolveOverloadFailoverBackoffMs,
|
||||
resolveOverloadProfileRotationLimit,
|
||||
resolveRateLimitProfileRotationLimit,
|
||||
@@ -1878,11 +1879,16 @@ export async function runEmbeddedPiAgent(
|
||||
lastRunPromptUsage,
|
||||
lastTurnTotal,
|
||||
});
|
||||
const reportedModelRef = resolveReportedModelRef({
|
||||
provider,
|
||||
model: model.id,
|
||||
assistant: sessionLastAssistant,
|
||||
});
|
||||
const agentMeta: EmbeddedPiAgentMeta = {
|
||||
sessionId: sessionIdUsed,
|
||||
sessionFile: sessionFileUsed,
|
||||
provider: sessionLastAssistant?.provider ?? provider,
|
||||
model: sessionLastAssistant?.model ?? model.id,
|
||||
provider: reportedModelRef.provider,
|
||||
model: reportedModelRef.model,
|
||||
contextTokens: ctxInfo.tokens,
|
||||
agentHarnessId: attempt.agentHarnessId,
|
||||
usage: usageMeta.usage,
|
||||
@@ -2403,8 +2409,8 @@ export async function runEmbeddedPiAgent(
|
||||
]
|
||||
: undefined,
|
||||
executionTrace: {
|
||||
winnerProvider: sessionLastAssistant?.provider ?? provider,
|
||||
winnerModel: sessionLastAssistant?.model ?? model.id,
|
||||
winnerProvider: reportedModelRef.provider,
|
||||
winnerModel: reportedModelRef.model,
|
||||
attempts:
|
||||
traceAttempts.length > 0 ||
|
||||
sessionLastAssistant?.provider ||
|
||||
@@ -2412,8 +2418,8 @@ export async function runEmbeddedPiAgent(
|
||||
? [
|
||||
...traceAttempts,
|
||||
{
|
||||
provider: sessionLastAssistant?.provider ?? provider,
|
||||
model: sessionLastAssistant?.model ?? model.id,
|
||||
provider: reportedModelRef.provider,
|
||||
model: reportedModelRef.model,
|
||||
result: "success",
|
||||
stage: "assistant",
|
||||
},
|
||||
|
||||
@@ -22,4 +22,17 @@ describe("resolveActiveErrorContext", () => {
|
||||
|
||||
expect(result).toEqual({ provider: "openai", model: "gpt-5.4-codex" });
|
||||
});
|
||||
|
||||
it("ignores the embedded PI harness provider when the model provider is known", () => {
|
||||
const result = resolveActiveErrorContext({
|
||||
provider: "openrouter",
|
||||
model: "openai/gpt-5.4",
|
||||
assistant: {
|
||||
provider: "pi",
|
||||
model: "pi",
|
||||
},
|
||||
});
|
||||
|
||||
expect(result).toEqual({ provider: "openrouter", model: "openai/gpt-5.4" });
|
||||
});
|
||||
});
|
||||
|
||||
@@ -85,11 +85,38 @@ export function resolveActiveErrorContext(params: {
|
||||
}): {
|
||||
provider: string;
|
||||
model: string;
|
||||
} {
|
||||
return resolveReportedModelRef(params);
|
||||
}
|
||||
|
||||
function isEmbeddedHarnessProvider(provider: string): boolean {
|
||||
return provider.trim().toLowerCase() === "pi";
|
||||
}
|
||||
|
||||
export function resolveReportedModelRef(params: {
|
||||
provider: string;
|
||||
model: string;
|
||||
assistant?: { provider?: string; model?: string } | null;
|
||||
}): {
|
||||
provider: string;
|
||||
model: string;
|
||||
} {
|
||||
const assistantProvider = params.assistant?.provider?.trim();
|
||||
const assistantModel = params.assistant?.model?.trim();
|
||||
if (!assistantProvider) {
|
||||
return {
|
||||
provider: params.provider,
|
||||
model: assistantModel || params.model,
|
||||
};
|
||||
}
|
||||
if (isEmbeddedHarnessProvider(assistantProvider)) {
|
||||
return {
|
||||
provider: params.provider,
|
||||
model: params.model,
|
||||
};
|
||||
}
|
||||
return {
|
||||
provider: assistantProvider || params.provider,
|
||||
provider: assistantProvider,
|
||||
model: assistantModel || params.model,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
import type { AssistantMessage } from "@mariozechner/pi-ai";
|
||||
import { beforeAll, beforeEach, describe, expect, it } from "vitest";
|
||||
import { beforeAll, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { makeAttemptResult } from "./run.overflow-compaction.fixture.js";
|
||||
import {
|
||||
loadRunOverflowCompactionHarness,
|
||||
mockedEnsureRuntimePluginsLoaded,
|
||||
mockedResolveModelAsync,
|
||||
mockedRunEmbeddedAttempt,
|
||||
} from "./run.overflow-compaction.harness.js";
|
||||
import type { EmbeddedRunAttemptResult } from "./run/types.js";
|
||||
@@ -191,4 +192,48 @@ describe("runEmbeddedPiAgent usage reporting", () => {
|
||||
// If the bug exists, it will likely be 350
|
||||
expect(usage?.total).toBe(200);
|
||||
});
|
||||
|
||||
it("reports the resolved model provider when PI marks the assistant message as pi", async () => {
|
||||
mockedResolveModelAsync.mockResolvedValueOnce({
|
||||
model: {
|
||||
id: "openai/gpt-5.4",
|
||||
provider: "openrouter",
|
||||
contextWindow: 200000,
|
||||
api: "openai-completions",
|
||||
},
|
||||
error: null,
|
||||
authStorage: {
|
||||
setRuntimeApiKey: vi.fn(),
|
||||
},
|
||||
modelRegistry: {},
|
||||
});
|
||||
mockedRunEmbeddedAttempt.mockResolvedValueOnce(
|
||||
makeAttemptResult({
|
||||
assistantTexts: ["Response 1"],
|
||||
lastAssistant: makeAssistantMessage({
|
||||
provider: "pi",
|
||||
model: "pi",
|
||||
usage: { input: 100, output: 50, total: 150 } as unknown as AssistantMessage["usage"],
|
||||
}),
|
||||
attemptUsage: { input: 100, output: 50, total: 150 },
|
||||
}),
|
||||
);
|
||||
|
||||
const result = await runEmbeddedPiAgent({
|
||||
sessionId: "test-session",
|
||||
sessionKey: "test-key",
|
||||
sessionFile: "/tmp/session.json",
|
||||
workspaceDir: "/tmp/workspace",
|
||||
prompt: "hello",
|
||||
provider: "openrouter",
|
||||
model: "openai/gpt-5.4",
|
||||
timeoutMs: 30000,
|
||||
runId: "run-provider-attribution",
|
||||
});
|
||||
|
||||
expect(result.meta.agentMeta?.provider).toBe("openrouter");
|
||||
expect(result.meta.agentMeta?.model).toBe("openai/gpt-5.4");
|
||||
expect(result.meta.executionTrace?.winnerProvider).toBe("openrouter");
|
||||
expect(result.meta.executionTrace?.winnerModel).toBe("openai/gpt-5.4");
|
||||
});
|
||||
});
|
||||
|
||||
Reference in New Issue
Block a user