fix(status): show runtime in CLI sessions (#77776)

* fix(status): show agent runtime in cli status

* fix(status): preserve configured runtime labels
This commit is contained in:
Vincent Koc
2026-05-05 16:50:22 -07:00
committed by GitHub
parent 180e295dc6
commit 46c99cff0b
11 changed files with 167 additions and 49 deletions

View File

@@ -109,6 +109,7 @@ Docs: https://docs.openclaw.ai
- WhatsApp responsiveness: stop only verified stale local TUI clients when they degrade the Gateway event loop and delay replies. Thanks @vincentkoc.
- Hooks/session-memory: add collision suffixes to fallback memory filenames so repeated `/new` or `/reset` captures in the same minute do not overwrite the earlier session archive. Thanks @vincentkoc.
- Agents/config: remove the ambiguous legacy `main` agent dir helper from runtime paths; model, auth, gateway, bundled plugin, and test helpers now resolve default/session agent dirs through `agents.list`/agent-scope helpers while plugin SDK keeps a deprecated compatibility export.
- CLI/status: show the selected agent runtime/harness in `openclaw status` session rows so terminal status matches the `/status` runtime line. Thanks @vincentkoc.
- Video generation: wait up to 20 minutes for slow fal/MiniMax queue-backed jobs, stop forwarding unsupported Google Veo generated-audio options, and normalize MiniMax `720P` requests to its supported `768P` resolution with the usual override warning/details instead of failing fallback.
- Video generation: accept provider-specific aspect-ratio and resolution hints at the tool boundary, normalize `720P` to MiniMax's supported `768P`, and stop sending Google `generateAudio` on Gemini video requests so provider fallback can recover from model-specific parameter differences. Thanks @vincentkoc.
- OpenAI/Google Meet: fail realtime voice connection attempts when the socket closes before `session.updated`, avoiding stuck Meet joins waiting on a bridge that never became ready. Thanks @vincentkoc.

View File

@@ -118,6 +118,7 @@ export async function buildStatusCommandReportData(
{ key: "Kind", header: "Kind", minWidth: 6 },
{ key: "Age", header: "Age", minWidth: 9 },
{ key: "Model", header: "Model", minWidth: 14 },
{ key: "Runtime", header: "Runtime", minWidth: 14 },
{ key: "Tokens", header: "Tokens", minWidth: 16 },
...(params.opts.verbose ? [{ key: "Cache", header: "Cache", minWidth: 16, flex: true }] : []),
] satisfies TableColumn[];

View File

@@ -63,6 +63,7 @@ describe("status.command-sections", () => {
updatedAt: 1,
age: 5_000,
model: "gpt-5.4",
runtime: "OpenAI Codex",
totalTokens: null,
totalTokensFresh: false,
remainingTokens: null,
@@ -76,6 +77,7 @@ describe("status.command-sections", () => {
updatedAt: 2,
age: 7_000,
model: "gpt-5.5",
runtime: "OpenClaw Pi Default",
totalTokens: null,
totalTokensFresh: false,
remainingTokens: null,
@@ -98,6 +100,7 @@ describe("status.command-sections", () => {
Kind: "direct",
Age: "5000ms",
Model: "gpt-5.4",
Runtime: "OpenAI Codex",
Tokens: "12k",
Cache: "cache ok",
},
@@ -106,6 +109,7 @@ describe("status.command-sections", () => {
Kind: "cron",
Age: "7000ms",
Model: "gpt-5.5",
Runtime: "OpenClaw Pi Default",
Tokens: "12k",
Cache: "cache ok",
},
@@ -127,6 +131,7 @@ describe("status.command-sections", () => {
Kind: "",
Age: "",
Model: "",
Runtime: "",
Tokens: "",
Cache: "",
},

View File

@@ -326,6 +326,7 @@ export function buildStatusSessionsRows(params: {
Kind: "",
Age: "",
Model: "",
Runtime: "",
Tokens: "",
...(params.verbose ? { Cache: "" } : {}),
},
@@ -336,6 +337,7 @@ export function buildStatusSessionsRows(params: {
Kind: sess.kind,
Age: sess.updatedAt && sess.age != null ? params.formatTimeAgo(sess.age) : "no activity",
Model: sess.model ?? "unknown",
Runtime: sess.runtime ?? "unknown",
Tokens: params.formatTokensCompact(sess),
...(params.verbose
? { Cache: params.formatPromptCacheCompact(sess) || params.muted("—") }

View File

@@ -50,6 +50,73 @@ describe("statusSummaryRuntime.classifySessionKey", () => {
});
});
describe("statusSummaryRuntime.resolveSessionRuntimeLabel", () => {
it("uses the shared /status runtime labels for persisted harness metadata", () => {
expect(
statusSummaryRuntime.resolveSessionRuntimeLabel({
cfg: {} as never,
entry: {
sessionId: "session-1",
updatedAt: 0,
agentRuntimeOverride: "codex",
},
provider: "openai",
model: "gpt-5.5",
sessionKey: "agent:main:main",
}),
).toBe("OpenAI Codex");
});
it("preserves configured default CLI runtimes when sessions lack persisted harness metadata", () => {
expect(
statusSummaryRuntime.resolveSessionRuntimeLabel({
cfg: {
agents: {
defaults: {
agentRuntime: { id: "claude-cli" },
},
},
} as never,
entry: {
sessionId: "session-1",
updatedAt: 0,
},
provider: "anthropic",
model: "claude-sonnet-4-6",
sessionKey: "agent:main:main",
}),
).toBe("Claude CLI");
});
it("preserves configured agent runtimes before harness selection", () => {
expect(
statusSummaryRuntime.resolveSessionRuntimeLabel({
cfg: {
agents: {
defaults: {
agentRuntime: { id: "pi" },
},
list: [
{
id: "research",
agentRuntime: { id: "codex" },
},
],
},
} as never,
entry: {
sessionId: "session-1",
updatedAt: 0,
},
provider: "openai",
model: "gpt-5.5",
agentId: "research",
sessionKey: "agent:research:main",
}),
).toBe("OpenAI Codex");
});
});
describe("statusSummaryRuntime.resolveSessionModelRef", () => {
const cfg = {
agents: {

View File

@@ -1,5 +1,7 @@
import { resolveAgentRuntimeMetadata } from "../agents/agent-runtime-metadata.js";
import { resolveConfiguredProviderFallback } from "../agents/configured-provider-fallback.js";
import { DEFAULT_CONTEXT_TOKENS, DEFAULT_MODEL, DEFAULT_PROVIDER } from "../agents/defaults.js";
import { selectAgentHarness } from "../agents/harness/selection.js";
import { parseModelRef, resolvePersistedSelectedModelRef } from "../agents/model-selection.js";
import { normalizeProviderId } from "../agents/provider-id.js";
import { resolveAgentModelPrimaryValue } from "../config/model-input.js";
@@ -11,6 +13,7 @@ import {
normalizeOptionalString,
normalizeOptionalLowercaseString,
} from "../shared/string-coerce.js";
import { resolveAgentRuntimeLabel } from "../status/agent-runtime-label.js";
function resolveStatusModelRefFromRaw(params: {
cfg: OpenClawConfig;
@@ -167,6 +170,53 @@ function resolveSessionModelRef(
);
}
function resolveSessionRuntimeLabel(params: {
cfg: OpenClawConfig;
entry?: SessionEntry;
provider: string;
model: string;
agentId?: string;
sessionKey: string;
}): string {
const agentRuntime = resolveAgentRuntimeMetadata(params.cfg, params.agentId ?? "");
const explicitRuntime =
normalizeOptionalLowercaseString(params.entry?.agentRuntimeOverride) ??
normalizeOptionalLowercaseString(params.entry?.agentHarnessId) ??
(agentRuntime.source === "implicit"
? undefined
: normalizeOptionalLowercaseString(agentRuntime.id));
if (explicitRuntime && explicitRuntime !== "auto" && explicitRuntime !== "default") {
return resolveAgentRuntimeLabel({
config: params.cfg,
sessionEntry: params.entry,
resolvedHarness: explicitRuntime,
fallbackProvider: params.provider,
});
}
let resolvedHarness: string | undefined;
try {
const selected = selectAgentHarness({
provider: params.provider,
modelId: params.model,
config: params.cfg,
agentId: params.agentId,
sessionKey: params.sessionKey,
agentHarnessId: params.entry?.agentHarnessId,
});
const id = normalizeOptionalLowercaseString(selected.id);
resolvedHarness = id && id !== "pi" ? id : undefined;
} catch {
resolvedHarness = undefined;
}
return resolveAgentRuntimeLabel({
config: params.cfg,
sessionEntry: params.entry,
resolvedHarness,
fallbackProvider: params.provider,
});
}
function resolveContextTokensForModel(params: {
cfg?: OpenClawConfig;
provider?: string;
@@ -196,5 +246,6 @@ export const statusSummaryRuntime = {
resolveContextTokensForModel,
classifySessionKey,
resolveSessionModelRef,
resolveSessionRuntimeLabel,
resolveConfiguredStatusModelRef,
};

View File

@@ -3,6 +3,7 @@ import { beforeAll, beforeEach, describe, expect, it, vi } from "vitest";
const statusSummaryMocks = vi.hoisted(() => ({
hasConfiguredChannelsForReadOnlyScope: vi.fn(() => true),
buildChannelSummary: vi.fn(async () => ["ok"]),
readSessionStoreReadOnly: vi.fn(() => ({})),
}));
vi.mock("../plugins/channel-plugin-ids.js", () => ({
@@ -20,6 +21,7 @@ vi.mock("./status.summary.runtime.js", () => ({
provider: "openai",
model: "gpt-5.5",
})),
resolveSessionRuntimeLabel: vi.fn(() => "OpenClaw Pi Default"),
resolveContextTokensForModel: vi.fn(() => 200_000),
},
}));
@@ -38,6 +40,14 @@ vi.mock("../config/config.js", () => ({
getRuntimeConfig: vi.fn(() => ({})),
}));
vi.mock("../config/sessions/paths.js", () => ({
resolveStorePath: vi.fn(() => "/tmp/sessions.json"),
}));
vi.mock("../config/sessions/store-read.js", () => ({
readSessionStoreReadOnly: statusSummaryMocks.readSessionStoreReadOnly,
}));
vi.mock("../gateway/agent-list.js", () => ({
listGatewayAgentsBasic: vi.fn(() => ({
defaultId: "main",
@@ -132,6 +142,7 @@ describe("getStatusSummary", () => {
vi.clearAllMocks();
statusSummaryMocks.hasConfiguredChannelsForReadOnlyScope.mockReturnValue(true);
statusSummaryMocks.buildChannelSummary.mockResolvedValue(["ok"]);
statusSummaryMocks.readSessionStoreReadOnly.mockReturnValue({});
});
it("includes runtimeVersion in the status payload", async () => {
@@ -175,4 +186,18 @@ describe("getStatusSummary", () => {
expect.objectContaining({ allowAsyncLoad: false }),
);
});
it("includes the selected agent runtime on recent sessions", async () => {
vi.mocked(statusSummaryRuntime.resolveSessionRuntimeLabel).mockReturnValue("OpenAI Codex");
statusSummaryMocks.readSessionStoreReadOnly.mockReturnValue({
"agent:main:main": {
sessionId: "session-1",
updatedAt: Date.now(),
},
});
const summary = await getStatusSummary();
expect(summary.sessions.recent[0]?.runtime).toBe("OpenAI Codex");
});
});

View File

@@ -111,6 +111,7 @@ export async function getStatusSummary(
classifySessionKey,
resolveConfiguredStatusModelRef,
resolveContextTokensForModel,
resolveSessionRuntimeLabel,
resolveSessionModelRef,
} = await loadStatusSummaryRuntimeModule();
const cfg = options.config ?? getRuntimeConfig();
@@ -191,6 +192,8 @@ export async function getStatusSummary(
.map(([key, entry]) => {
const updatedAt = entry?.updatedAt ?? null;
const age = updatedAt ? now - updatedAt : null;
const parsedAgentId = parseAgentSessionKey(key)?.agentId;
const agentId = opts.agentIdOverride ?? parsedAgentId;
const resolvedModel = resolveSessionModelRef(cfg, entry, opts.agentIdOverride);
const model = resolvedModel.model ?? configModel ?? null;
const contextTokens =
@@ -211,8 +214,14 @@ export async function getStatusSummary(
contextTokens && contextTokens > 0 && total !== undefined
? Math.min(999, Math.round((total / contextTokens) * 100))
: null;
const parsedAgentId = parseAgentSessionKey(key)?.agentId;
const agentId = opts.agentIdOverride ?? parsedAgentId;
const runtime = resolveSessionRuntimeLabel({
cfg,
entry,
provider: resolvedModel.provider,
model: model ?? "",
agentId,
sessionKey: key,
});
return {
agentId,
@@ -238,6 +247,7 @@ export async function getStatusSummary(
remainingTokens: remaining,
percentUsed: pct,
model,
runtime,
contextTokens,
flags: buildFlags(entry),
} satisfies SessionStatus;

View File

@@ -116,6 +116,7 @@ const baseStatusSummary = {
updatedAt: 1,
age: 5_000,
model: "gpt-5.5",
runtime: "OpenClaw Pi Default",
totalTokens: 12_000,
totalTokensFresh: true,
remainingTokens: 4_000,

View File

@@ -26,6 +26,7 @@ export type SessionStatus = {
remainingTokens: number | null;
percentUsed: number | null;
model: string | null;
runtime?: string | null;
contextTokens: number | null;
flags: string[];
};

View File

@@ -4,7 +4,6 @@ import { DEFAULT_CONTEXT_TOKENS, DEFAULT_MODEL, DEFAULT_PROVIDER } from "../agen
import { resolveModelAuthMode } from "../agents/model-auth.js";
import {
buildModelAliasIndex,
isCliProvider,
resolveConfiguredModelRef,
resolveModelRefFromString,
} from "../agents/model-selection.js";
@@ -47,7 +46,6 @@ import {
normalizeOptionalLowercaseString,
normalizeOptionalString,
} from "../shared/string-coerce.js";
import { sanitizeTerminalText } from "../terminal/safe-text.js";
import { resolveStatusTtsSnapshot } from "../tts/status-config.js";
import {
estimateUsageCost,
@@ -56,6 +54,7 @@ import {
resolveModelCostConfig,
} from "../utils/usage-format.js";
import { VERSION } from "../version.js";
import { resolveAgentRuntimeLabel } from "./agent-runtime-label.js";
import { resolveActiveFallbackState } from "./fallback-notice-state.js";
import { formatFastModeLabel } from "./status-labels.js";
@@ -199,51 +198,6 @@ function resolveExecutionLabel(
return `${runtime}/${sandboxMode}`;
}
const AGENT_RUNTIME_LABELS: Readonly<Record<string, string>> = {
pi: "OpenClaw Pi Default",
codex: "OpenAI Codex",
"codex-cli": "OpenAI Codex",
"claude-cli": "Claude CLI",
"google-gemini-cli": "Gemini CLI",
};
function resolveAgentRuntimeLabel(
args: Pick<StatusArgs, "config" | "sessionEntry" | "resolvedHarness"> & {
fallbackProvider?: string;
},
): string {
const acpAgentRaw = normalizeOptionalString(args.sessionEntry?.acp?.agent);
const acpAgent = acpAgentRaw ? sanitizeTerminalText(acpAgentRaw) : undefined;
if (acpAgent) {
const backendRaw = normalizeOptionalString(args.sessionEntry?.acp?.backend);
const backend = backendRaw ? sanitizeTerminalText(backendRaw) : undefined;
return backend ? `${acpAgent} (acp/${backend})` : `${acpAgent} (acp)`;
}
const runtimeRaw =
normalizeOptionalString(args.resolvedHarness) ??
normalizeOptionalString(args.sessionEntry?.agentRuntimeOverride) ??
normalizeOptionalString(args.sessionEntry?.agentHarnessId);
const runtime = normalizeOptionalLowercaseString(runtimeRaw);
if (runtime && runtime !== "auto" && runtime !== "default") {
return AGENT_RUNTIME_LABELS[runtime] ?? sanitizeTerminalText(runtimeRaw ?? runtime);
}
const providerRaw =
normalizeOptionalString(args.sessionEntry?.modelProvider) ??
normalizeOptionalString(args.sessionEntry?.providerOverride) ??
normalizeOptionalString(args.fallbackProvider);
const provider = providerRaw ? sanitizeTerminalText(providerRaw) : undefined;
if (provider && isCliProvider(provider, args.config)) {
return (
AGENT_RUNTIME_LABELS[normalizeOptionalLowercaseString(providerRaw) ?? ""] ??
`${provider} (cli)`
);
}
return AGENT_RUNTIME_LABELS.pi;
}
const formatTokens = (total: number | null | undefined, contextTokens: number | null) => {
const ctx = contextTokens ?? null;
if (total == null) {