feat(agents): add tool progress detail modes

This commit is contained in:
Peter Steinberger
2026-05-04 01:35:20 +01:00
parent 0fa70f5a47
commit 5d09b4b92c
29 changed files with 217 additions and 30 deletions

View File

@@ -11,6 +11,7 @@ Docs: https://docs.openclaw.ai
### Changes
- Channels/streaming: add unified `streaming.mode: "progress"` drafts with auto single-word status labels and shared progress configuration across Discord, Telegram, Matrix, Slack, and Microsoft Teams.
- Agents/verbose: use compact explain-mode tool summaries for `/verbose` and progress drafts by default, with `agents.defaults.toolProgressDetail: "raw"` and per-agent overrides for debugging raw command/detail output.
- Agents/commands: add `/steer <message>` for queue-independent steering of the active current-session run without starting a new turn when the session is idle. (#76934)
- Tools/BTW: add `/side` as a text and native slash-command alias for `/btw` side questions.
- Doctor/config: `doctor --fix` now commits safe legacy migrations even when unrelated validation issues (e.g. a missing plugin) prevent full validation from passing, so `agents.defaults.llm` and other known-legacy keys are always cleaned up by `doctor --fix` regardless of other config problems. Fixes #76798. (#76800) Thanks @hclsys.

View File

@@ -1,4 +1,4 @@
34e7f2742624de44bfd1df7743e65ff33a04b0f6fe251bc417a6b33f85529772 config-baseline.json
5b5ebd95939d75496597d9858a375e27544812d0f79dc3b4bf87c794ada2ba08 config-baseline.core.json
3e7cbffbe3849b5201716f359dde9089d61d618c1a4206255c20887a855d85a9 config-baseline.json
31ec333df9f8b92c7656ac7107cecd5860dd02e08f7e18c7c674dc47a8811baa config-baseline.core.json
655d1309b70505e73198df20c5088784290b33098efd42027d3c09beeb3704a7 config-baseline.channel.json
055fae0d0067a751dc10125af7421da45633f73519c94c982d02b0c4eb2bdf67 config-baseline.plugin.json

View File

@@ -18,9 +18,9 @@ into the final answer when the channel can do that safely.
```text
Shelling...
- reading recent channel context
- checking matching issues
- preparing reply
📖 Read: from docs/concepts/progress-drafts.md
🔎 Web Search: for "discord edit message"
🛠️ Exec: run tests
```
Use progress drafts when you want one tidy status message during tool-heavy work
@@ -60,6 +60,9 @@ The label appears after the agent starts meaningful work and either remains busy
for five seconds or emits a second work event. Plain text-only replies do not
show a progress draft. Progress lines are added only when the agent emits useful
work updates, for example `🛠️ Exec`, `🔎 Web Search`, or `✍️ Write: to /tmp/file`.
By default they use the same compact explain mode as `/verbose`; set
`agents.defaults.toolProgressDetail: "raw"` when debugging and you also want raw
commands/details appended.
The final answer replaces the draft when possible; otherwise
OpenClaw sends the final answer normally and cleans up or stops updating the
draft according to the channel's transport.
@@ -173,6 +176,30 @@ Progress lines are enabled by default in progress mode. They come from real run
events: tool starts, item updates, task plans, approvals, command output, patch
summaries, and similar agent activity.
OpenClaw uses the same formatter for progress drafts and `/verbose`:
```json5
{
agents: {
defaults: {
toolProgressDetail: "explain", // explain | raw
},
},
}
```
`"explain"` is the default and keeps drafts stable with concise labels like
`🛠️ Exec: check JS syntax for /tmp/app.js`. `"raw"` appends the underlying
command/detail when available, which is useful while debugging but noisier in
chat.
For example, the same command appears differently depending on the detail mode:
| Mode | Progress line |
| --------- | -------------------------------------------------------------------- |
| `explain` | `🛠️ Exec: check JS syntax for /tmp/app.js` |
| `raw` | `🛠️ Exec: check JS syntax for /tmp/app.js, node --check /tmp/app.js` |
Limit how many lines stay visible:
```json5

View File

@@ -343,6 +343,7 @@ Time format in system prompt. Default: `auto` (OS preference).
pdfMaxPages: 20,
thinkingDefault: "low",
verboseDefault: "off",
toolProgressDetail: "explain",
reasoningDefault: "off",
elevatedDefault: "on",
timeoutSeconds: 600,
@@ -383,6 +384,7 @@ Time format in system prompt. Default: `auto` (OS preference).
- `pdfMaxBytesMb`: default PDF size limit for the `pdf` tool when `maxBytesMb` is not passed at call time.
- `pdfMaxPages`: default maximum pages considered by extraction fallback mode in the `pdf` tool.
- `verboseDefault`: default verbose level for agents. Values: `"off"`, `"on"`, `"full"`. Default: `"off"`.
- `toolProgressDetail`: detail mode for `/verbose` tool summaries and progress-draft tool lines. Values: `"explain"` (default, compact human labels) or `"raw"` (append raw command/detail when available). Per-agent `agents.list[].toolProgressDetail` overrides this default.
- `reasoningDefault`: default reasoning visibility for agents. Values: `"off"`, `"on"`, `"stream"`. Per-agent `agents.list[].reasoningDefault` overrides this default. Configured reasoning defaults are only applied for owners, authorized senders, or operator-admin gateway contexts when no per-message or session reasoning override is set.
- `elevatedDefault`: default elevated-output level for agents. Values: `"off"`, `"on"`, `"ask"`, `"full"`. Default: `"on"`.
- `model.primary`: format `provider/model` (e.g. `openai/gpt-5.5` for API-key access or `openai-codex/gpt-5.5` for Codex OAuth). If you omit the provider, OpenClaw tries an alias first, then a unique configured-provider match for that exact model id, and only then falls back to the configured default provider (deprecated compatibility behavior, so prefer explicit `provider/model`). If that provider no longer exposes the configured default model, OpenClaw falls back to the first configured provider/model instead of surfacing a stale removed-provider default.

View File

@@ -249,6 +249,7 @@ Save to `~/.openclaw/openclaw.json` and you can DM the bot from that number.
skills: ["github", "weather"], // inherited by agents that omit list[].skills
thinkingDefault: "low",
verboseDefault: "off",
toolProgressDetail: "explain",
reasoningDefault: "off",
elevatedDefault: "on",
blockStreamingDefault: "off",

View File

@@ -80,9 +80,12 @@ title: "Thinking levels"
- `/verbose off` stores an explicit session override; clear it via the Sessions UI by choosing `inherit`.
- Inline directive affects only that message; session/global defaults apply otherwise.
- Send `/verbose` (or `/verbose:`) with no argument to see the current verbose level.
- When verbose is on, agents that emit structured tool results (Pi, other JSON agents) send each tool call back as its own metadata-only message, prefixed with `<emoji> <tool-name>: <arg>` when available (path/command). These tool summaries are sent as soon as each tool starts (separate bubbles), not as streaming deltas.
- When verbose is on, agents that emit structured tool results (Pi, other JSON agents) send each tool call back as its own metadata-only message, prefixed with `<emoji> <tool-name>: <arg>` when available. These tool summaries are sent as soon as each tool starts (separate bubbles), not as streaming deltas.
- Tool failure summaries remain visible in normal mode, but raw error detail suffixes are hidden unless verbose is `on` or `full`.
- When verbose is `full`, tool outputs are also forwarded after completion (separate bubble, truncated to a safe length). If you toggle `/verbose on|full|off` while a run is in-flight, subsequent tool bubbles honor the new setting.
- `agents.defaults.toolProgressDetail` controls the shape of `/verbose` tool summaries and progress-draft tool lines. Use `"explain"` (default) for compact human labels such as `🛠️ Exec: checking JS syntax`; use `"raw"` when you also want the raw command/detail appended for debugging. Per-agent `agents.list[].toolProgressDetail` overrides the default.
- `explain`: `🛠️ Exec: check JS syntax for /tmp/app.js`
- `raw`: `🛠️ Exec: check JS syntax for /tmp/app.js, node --check /tmp/app.js`
## Plugin trace directives (/trace)

View File

@@ -579,6 +579,38 @@ describe("CodexAppServerEventProjector", () => {
);
expect(onToolResult).toHaveBeenCalledTimes(1);
expect(onToolResult).toHaveBeenCalledWith({
text: "🛠️ Bash: `run tests (in /workspace)`",
});
});
it("can emit raw verbose tool summaries through onToolResult", async () => {
const onToolResult = vi.fn();
const projector = await createProjector({
...(await createParams()),
verboseLevel: "on",
toolProgressDetail: "raw",
onToolResult,
});
await projector.handleNotification(
forCurrentTurn("item/started", {
item: {
type: "commandExecution",
id: "cmd-1",
command: "pnpm test extensions/codex",
cwd: "/workspace",
processId: null,
source: "agent",
status: "inProgress",
commandActions: [],
aggregatedOutput: null,
exitCode: null,
durationMs: null,
},
}),
);
expect(onToolResult).toHaveBeenCalledWith({
text: "🛠️ Bash: `` run tests (in /workspace), `pnpm test extensions/codex` ``",
});
@@ -589,6 +621,7 @@ describe("CodexAppServerEventProjector", () => {
const projector = await createProjector({
...(await createParams()),
verboseLevel: "on",
toolProgressDetail: "raw",
onToolResult,
});

View File

@@ -16,6 +16,7 @@ import {
type EmbeddedRunAttemptResult,
type HeartbeatToolResponse,
type MessagingToolSend,
type ToolProgressDetailMode,
} from "openclaw/plugin-sdk/agent-harness-runtime";
import { readCodexTurn } from "./protocol-validators.js";
import {
@@ -614,6 +615,7 @@ export class CodexAppServerEventProjector {
if (!kind) {
return;
}
const meta = itemMeta(item, this.toolProgressDetailMode());
this.emitAgentEvent({
stream: "item",
data: {
@@ -623,7 +625,7 @@ export class CodexAppServerEventProjector {
title: itemTitle(item),
status: params.phase === "start" ? "running" : itemStatus(item),
...(itemName(item) ? { name: itemName(item) } : {}),
...(itemMeta(item) ? { meta: itemMeta(item) } : {}),
...(meta ? { meta } : {}),
},
});
}
@@ -641,7 +643,7 @@ export class CodexAppServerEventProjector {
return;
}
this.toolResultSummaryItemIds.add(itemId);
const meta = itemMeta(item);
const meta = itemMeta(item, this.toolProgressDetailMode());
this.emitToolResultMessage({
itemId,
text: formatToolSummary(toolName, meta),
@@ -666,7 +668,7 @@ export class CodexAppServerEventProjector {
}
this.emitToolResultMessage({
itemId,
text: formatToolOutput(toolName, itemMeta(item), output),
text: formatToolOutput(toolName, itemMeta(item, this.toolProgressDetailMode()), output),
finalOutput: true,
});
}
@@ -700,6 +702,10 @@ export class CodexAppServerEventProjector {
: this.params.verboseLevel === "full";
}
private toolProgressDetailMode(): ToolProgressDetailMode {
return this.params.toolProgressDetail === "raw" ? "raw" : "explain";
}
private recordToolMeta(item: CodexThreadItem | undefined): void {
if (!item) {
return;
@@ -708,9 +714,10 @@ export class CodexAppServerEventProjector {
if (!toolName) {
return;
}
const meta = itemMeta(item, this.toolProgressDetailMode());
this.toolMetas.set(item.id, {
toolName,
...(itemMeta(item) ? { meta: itemMeta(item) } : {}),
...(meta ? { meta } : {}),
});
}
@@ -1047,19 +1054,26 @@ function itemName(item: CodexThreadItem): string | undefined {
return undefined;
}
function itemMeta(item: CodexThreadItem): string | undefined {
function itemMeta(
item: CodexThreadItem,
detailMode: ToolProgressDetailMode = "explain",
): string | undefined {
if (item.type === "commandExecution" && typeof item.command === "string") {
return inferToolMetaFromArgs("exec", {
command: item.command,
cwd: typeof item.cwd === "string" ? item.cwd : undefined,
});
return inferToolMetaFromArgs(
"exec",
{
command: item.command,
cwd: typeof item.cwd === "string" ? item.cwd : undefined,
},
{ detailMode },
);
}
if (item.type === "webSearch" && typeof item.query === "string") {
return item.query;
}
const toolName = itemName(item);
if ((item.type === "dynamicToolCall" || item.type === "mcpToolCall") && toolName) {
return inferToolMetaFromArgs(toolName, item.arguments);
return inferToolMetaFromArgs(toolName, item.arguments, { detailMode });
}
return undefined;
}

View File

@@ -1128,6 +1128,7 @@ export async function runEmbeddedPiAgent(
verboseLevel: params.verboseLevel,
reasoningLevel: params.reasoningLevel,
toolResultFormat: resolvedToolResultFormat,
toolProgressDetail: params.toolProgressDetail,
execOverrides: params.execOverrides,
bashElevated: params.bashElevated,
timeoutMs: params.timeoutMs,

View File

@@ -14,6 +14,7 @@ import type { AgentInternalEvent } from "../../internal-events.js";
import type { BlockReplyPayload } from "../../pi-embedded-payloads.js";
import type {
BlockReplyChunking,
ToolProgressDetailMode,
ToolResultFormat,
} from "../../pi-embedded-subscribe.shared-types.js";
import type { SkillSnapshot } from "../../skills.js";
@@ -129,6 +130,7 @@ export type RunEmbeddedPiAgentParams = {
verboseLevel?: VerboseLevel;
reasoningLevel?: ReasoningLevel;
toolResultFormat?: ToolResultFormat;
toolProgressDetail?: ToolProgressDetailMode;
/** If true, suppress tool error warning payloads for this run (including mutating tools). */
suppressToolErrorWarnings?: boolean;
/** Bootstrap context mode for workspace file injection. */

View File

@@ -617,7 +617,13 @@ export function handleToolExecutionStart(
}
}
const meta = extendExecMeta(toolName, args, inferToolMetaFromArgs(toolName, args));
const meta = extendExecMeta(
toolName,
args,
inferToolMetaFromArgs(toolName, args, {
detailMode: ctx.params.toolProgressDetail ?? "explain",
}),
);
ctx.state.toolMetaById.set(toolCallId, buildToolCallSummary(toolName, args, meta));
ctx.log.debug(
`embedded run tool start: runId=${ctx.params.runId} tool=${toolName} toolCallId=${toolCallId}`,

View File

@@ -186,6 +186,7 @@ type ToolHandlerParams = Pick<
| "sessionId"
| "agentId"
| "toolResultFormat"
| "toolProgressDetail"
>;
type ToolHandlerState = Pick<

View File

@@ -1,5 +1,6 @@
import type { BlockReplyChunking } from "./pi-embedded-block-chunker.js";
export type ToolResultFormat = "markdown" | "plain";
export type ToolProgressDetailMode = "explain" | "raw";
export type { BlockReplyChunking };

View File

@@ -6,8 +6,16 @@ import type { HookRunner } from "../plugins/hooks.js";
import type { AgentInternalEvent } from "./internal-events.js";
import type { BlockReplyPayload } from "./pi-embedded-payloads.js";
import type { EmbeddedRunReplayState } from "./pi-embedded-runner/replay-state.js";
import type { BlockReplyChunking, ToolResultFormat } from "./pi-embedded-subscribe.shared-types.js";
export type { BlockReplyChunking, ToolResultFormat } from "./pi-embedded-subscribe.shared-types.js";
import type {
BlockReplyChunking,
ToolProgressDetailMode,
ToolResultFormat,
} from "./pi-embedded-subscribe.shared-types.js";
export type {
BlockReplyChunking,
ToolProgressDetailMode,
ToolResultFormat,
} from "./pi-embedded-subscribe.shared-types.js";
export type SubscribeEmbeddedPiSessionParams = {
session: AgentSession;
@@ -18,6 +26,7 @@ export type SubscribeEmbeddedPiSessionParams = {
reasoningMode?: ReasoningLevel;
thinkingLevel?: ThinkLevel;
toolResultFormat?: ToolResultFormat;
toolProgressDetail?: ToolProgressDetailMode;
shouldEmitToolResult?: () => boolean;
shouldEmitToolOutput?: () => boolean;
onToolResult?: (payload: ReplyPayload) => void | Promise<void>;

View File

@@ -353,7 +353,11 @@ export function extractThinkingFromTaggedStream(text: string): string {
return text.slice(start).trim();
}
export function inferToolMetaFromArgs(toolName: string, args: unknown): string | undefined {
const display = resolveToolDisplay({ name: toolName, args });
export function inferToolMetaFromArgs(
toolName: string,
args: unknown,
options?: { detailMode?: "explain" | "raw" },
): string | undefined {
const display = resolveToolDisplay({ name: toolName, args, detailMode: options?.detailMode });
return formatToolDetail(display);
}

View File

@@ -2,7 +2,7 @@ import {
normalizeLowercaseStringOrEmpty,
normalizeOptionalString,
} from "../shared/string-coerce.js";
import { resolveExecDetail } from "./tool-display-exec.js";
import { resolveExecDetail, type ToolDetailMode } from "./tool-display-exec.js";
import { asRecord } from "./tool-display-record.js";
type ToolDisplayActionSpec = {
@@ -71,6 +71,7 @@ export function resolveToolVerbAndDetailForArgs(params: {
spec?: ToolDisplaySpec;
fallbackDetailKeys?: string[];
detailMode: "first" | "summary";
toolDetailMode?: ToolDetailMode;
detailCoerce?: CoerceDisplayValueOptions;
detailMaxEntries?: number;
detailFormatKey?: (raw: string) => string;
@@ -83,6 +84,7 @@ export function resolveToolVerbAndDetailForArgs(params: {
spec: params.spec,
fallbackDetailKeys: params.fallbackDetailKeys,
detailMode: params.detailMode,
toolDetailMode: params.toolDetailMode,
detailCoerce: params.detailCoerce,
detailMaxEntries: params.detailMaxEntries,
detailFormatKey: params.detailFormatKey,
@@ -378,6 +380,7 @@ function resolveToolVerbAndDetail(params: {
spec?: ToolDisplaySpec;
fallbackDetailKeys?: string[];
detailMode: "first" | "summary";
toolDetailMode?: ToolDetailMode;
detailCoerce?: CoerceDisplayValueOptions;
detailMaxEntries?: number;
detailFormatKey?: (raw: string) => string;
@@ -393,7 +396,7 @@ function resolveToolVerbAndDetail(params: {
let detail: string | undefined;
if (params.toolKey === "exec") {
detail = resolveExecDetail(params.args);
detail = resolveExecDetail(params.args, { detailMode: params.toolDetailMode });
}
if (!detail && params.toolKey === "read") {
detail = resolveReadDetail(params.args);

View File

@@ -385,7 +385,12 @@ function compactRawCommand(raw: string, maxLength = 120): string {
return `${oneLine.slice(0, Math.max(0, maxLength - 1))}`;
}
export function resolveExecDetail(args: unknown): string | undefined {
export type ToolDetailMode = "explain" | "raw";
export function resolveExecDetail(
args: unknown,
options?: { detailMode?: ToolDetailMode },
): string | undefined {
const record = asRecord(args);
if (!record) {
return undefined;
@@ -414,7 +419,12 @@ export function resolveExecDetail(args: unknown): string | undefined {
}
const displaySummary = cwd ? `${summary} (in ${cwd})` : summary;
if (compact && compact !== displaySummary && compact !== summary) {
if (
options?.detailMode !== "explain" &&
compact &&
compact !== displaySummary &&
compact !== summary
) {
return `${displaySummary} · \`${compact}\``;
}

View File

@@ -115,6 +115,18 @@ describe("tool display details", () => {
expect(detail).toBe("install dependencies (in ~/my-project), `cd ~/my-project && npm install`");
});
it("omits raw command details in explain mode", () => {
const detail = formatToolDetail(
resolveToolDisplay({
name: "exec",
args: { command: "cd ~/my-project && npm install" },
detailMode: "explain",
}),
);
expect(detail).toBe("install dependencies (in ~/my-project)");
});
it("moves cd path to context suffix with multiple stages and raw command", () => {
const detail = formatToolDetail(
resolveToolDisplay({

View File

@@ -9,6 +9,7 @@ import {
resolveToolVerbAndDetailForArgs,
} from "./tool-display-common.js";
import { TOOL_DISPLAY_CONFIG } from "./tool-display-config.js";
import type { ToolDetailMode } from "./tool-display-exec.js";
type ToolDisplay = {
name: string;
@@ -45,6 +46,7 @@ export function resolveToolDisplay(params: {
name?: string;
args?: unknown;
meta?: string;
detailMode?: ToolDetailMode;
}): ToolDisplay {
const name = normalizeToolName(params.name);
const key = normalizeLowercaseStringOrEmpty(name);
@@ -59,6 +61,7 @@ export function resolveToolDisplay(params: {
spec,
fallbackDetailKeys: FALLBACK.detailKeys,
detailMode: "summary",
toolDetailMode: params.detailMode,
detailMaxEntries: MAX_DETAIL_ENTRIES,
detailFormatKey: (raw) => formatDetailKey(raw, DETAIL_LABEL_OVERRIDES),
});

View File

@@ -899,6 +899,7 @@ export async function runAgentTurnWithFallback(params: {
activeSessionStore?: Record<string, SessionEntry>;
storePath?: string;
resolvedVerboseLevel: VerboseLevel;
toolProgressDetail?: "explain" | "raw";
replyMediaContext?: ReplyMediaContext;
}): Promise<AgentRunLoopResult> {
const TRANSIENT_HTTP_RETRY_DELAY_MS = 2_500;
@@ -1465,6 +1466,7 @@ export async function runAgentTurnWithFallback(params: {
}
return isMarkdownCapableMessageChannel(channel) ? "markdown" : "plain";
})(),
toolProgressDetail: params.toolProgressDetail,
suppressToolErrorWarnings: params.opts?.suppressToolErrorWarnings,
disableTools: params.opts?.disableTools,
enableHeartbeatTool: params.opts?.enableHeartbeatTool,

View File

@@ -906,6 +906,7 @@ export async function runReplyAgent(params: {
defaultModel: string;
agentCfgContextTokens?: number;
resolvedVerboseLevel: VerboseLevel;
toolProgressDetail?: "explain" | "raw";
isNewSession: boolean;
blockStreamingEnabled: boolean;
blockReplyChunking?: {
@@ -943,6 +944,7 @@ export async function runReplyAgent(params: {
defaultModel,
agentCfgContextTokens,
resolvedVerboseLevel,
toolProgressDetail,
isNewSession,
blockStreamingEnabled,
blockReplyChunking,
@@ -1263,6 +1265,7 @@ export async function runReplyAgent(params: {
activeSessionStore,
storePath,
resolvedVerboseLevel,
toolProgressDetail,
replyMediaContext,
});

View File

@@ -103,6 +103,10 @@ function normalizePromptRouteChannel(raw?: string | null): string | undefined {
return normalized && normalized !== "none" ? normalized : undefined;
}
function normalizeToolProgressDetail(value: unknown): "explain" | "raw" | undefined {
return value === "explain" || value === "raw" ? value : undefined;
}
function resolvePersistedPromptProvider(entry?: SessionEntry): string | undefined {
return (
normalizePromptRouteChannel(entry?.origin?.provider) ??
@@ -1067,6 +1071,9 @@ export async function runPreparedReply(
defaultModel,
agentCfgContextTokens: agentCfg?.contextTokens,
resolvedVerboseLevel: resolvedVerboseLevel ?? "off",
toolProgressDetail:
normalizeToolProgressDetail(agentCfg?.toolProgressDetail) ??
normalizeToolProgressDetail(cfg.agents?.defaults?.toolProgressDetail),
isNewSession,
blockStreamingEnabled,
blockReplyChunking,

View File

@@ -5336,6 +5336,18 @@ export const GENERATED_BASE_CONFIG_SCHEMA: BaseConfigSchemaResponse = {
},
],
},
toolProgressDetail: {
anyOf: [
{
type: "string",
const: "explain",
},
{
type: "string",
const: "raw",
},
],
},
reasoningDefault: {
anyOf: [
{
@@ -6339,6 +6351,14 @@ export const GENERATED_BASE_CONFIG_SCHEMA: BaseConfigSchemaResponse = {
description:
"Optional per-agent default thinking level. Overrides agents.defaults.thinkingDefault for this agent when no per-message or session override is set.",
},
verboseDefault: {
type: "string",
enum: ["off", "on", "full"],
},
toolProgressDetail: {
type: "string",
enum: ["explain", "raw"],
},
reasoningDefault: {
type: "string",
enum: ["on", "off", "stream"],

View File

@@ -316,6 +316,12 @@ export type AgentDefaultsConfig = {
thinkingDefault?: "off" | "minimal" | "low" | "medium" | "high" | "xhigh" | "adaptive" | "max";
/** Default verbose level when no /verbose directive is present. */
verboseDefault?: "off" | "on" | "full";
/**
* Detail mode for user-visible tool progress in /verbose and editable progress drafts.
* - explain: compact human summary (default)
* - raw: include raw command/detail when available
*/
toolProgressDetail?: "explain" | "raw";
/** Default reasoning level when no /reasoning directive is present. */
reasoningDefault?: "off" | "on" | "stream";
/** Default elevated level when no /elevated directive is present. */

View File

@@ -90,6 +90,8 @@ export type AgentConfig = {
thinkingDefault?: "off" | "minimal" | "low" | "medium" | "high" | "xhigh" | "adaptive" | "max";
/** Optional per-agent default verbosity level. */
verboseDefault?: "off" | "on" | "full";
/** Optional per-agent tool progress detail mode. */
toolProgressDetail?: AgentDefaultsConfig["toolProgressDetail"];
/** Optional per-agent default reasoning visibility. */
reasoningDefault?: "on" | "off" | "stream";
/** Optional per-agent default for fast mode. */

View File

@@ -239,6 +239,7 @@ export const AgentDefaultsSchema = z
])
.optional(),
verboseDefault: z.union([z.literal("off"), z.literal("on"), z.literal("full")]).optional(),
toolProgressDetail: z.union([z.literal("explain"), z.literal("raw")]).optional(),
reasoningDefault: z.union([z.literal("off"), z.literal("on"), z.literal("stream")]).optional(),
elevatedDefault: z
.union([z.literal("off"), z.literal("on"), z.literal("ask"), z.literal("full")])

View File

@@ -839,6 +839,8 @@ export const AgentEntrySchema = z
thinkingDefault: z
.enum(["off", "minimal", "low", "medium", "high", "xhigh", "adaptive", "max"])
.optional(),
verboseDefault: z.enum(["off", "on", "full"]).optional(),
toolProgressDetail: z.enum(["explain", "raw"]).optional(),
reasoningDefault: z.enum(["on", "off", "stream"]).optional(),
fastModeDefault: z.boolean().optional(),
skills: z.array(z.string()).optional(),

View File

@@ -154,8 +154,14 @@ export {
/**
* Derive the same compact user-facing tool detail that Pi uses for progress logs.
*/
export function inferToolMetaFromArgs(toolName: string, args: unknown): string | undefined {
const display = resolveToolDisplay({ name: toolName, args });
export type ToolProgressDetailMode = "explain" | "raw";
export function inferToolMetaFromArgs(
toolName: string,
args: unknown,
options?: { detailMode?: ToolProgressDetailMode },
): string | undefined {
const display = resolveToolDisplay({ name: toolName, args, detailMode: options?.detailMode });
return formatToolDetail(display);
}

View File

@@ -128,6 +128,7 @@ export function isChannelProgressDraftWorkToolName(name: string | null | undefin
type ChannelProgressLineOptions = {
markdown?: boolean;
detailMode?: "explain" | "raw";
};
const EMOJI_PREFIX_RE = /^\p{Extended_Pictographic}/u;
@@ -188,11 +189,15 @@ function compactStrings(values: readonly (string | undefined | null)[]): string[
return values.map((value) => value?.replace(/\s+/g, " ").trim()).filter(Boolean) as string[];
}
function inferToolMeta(name: string | undefined, args: Record<string, unknown> | undefined) {
function inferToolMeta(
name: string | undefined,
args: Record<string, unknown> | undefined,
detailMode: "explain" | "raw" = "explain",
) {
if (!name || !args) {
return undefined;
}
return formatToolDetail(resolveToolDisplay({ name, args }));
return formatToolDetail(resolveToolDisplay({ name, args, detailMode }));
}
function formatNamedProgressLine(
@@ -240,7 +245,7 @@ export function formatChannelProgressDraftLine(
return formatNamedProgressLine(
input.name,
[
inferToolMeta(input.name, input.args),
inferToolMeta(input.name, input.args, options?.detailMode),
input.phase && !input.name ? input.phase : undefined,
],
options,