fix: improve gpt execution flow and visibility

This commit is contained in:
Peter Steinberger
2026-04-05 10:31:36 +01:00
parent 219afbc2cc
commit e468da1040
16 changed files with 399 additions and 21 deletions

View File

@@ -117,6 +117,7 @@ Docs: https://docs.openclaw.ai
- Agents/exec: restore `host=node` routing for node-pinned and `host=auto` sessions, while still blocking sandboxed `auto` sessions from jumping to gateway. (#60788) Thanks @openperf.
- Agents/compaction: keep assistant tool calls and displaced tool results in the same compaction chunk so strict summarization providers stop rejecting orphaned tool pairs. (#58849) Thanks @openperf.
- Outbound/sanitizer: strip leaked `<tool_call>`, `<function_calls>`, and model special tokens from shared user-visible assistant text, including truncated tool-call streams, so internal scaffolding no longer bleeds into replies across surfaces. (#60619) Thanks @oliviareid-svg.
- Providers/OpenAI: make GPT-5 and Codex runs act sooner with lower-verbosity defaults, visible progress during tool work, and a one-shot retry when a turn only narrates the plan instead of taking action.
- Telegram: restore DM voice-note preflight transcription so direct-message audio stops arriving as raw `<media:audio>` placeholders. (#61008) Thanks @manueltarouca.
- Control UI/avatar: honor `ui.assistant.avatar` when serving `/avatar/:agentId` so Appearance UI avatar paths stop falling back to initials placeholders. (#60778) Thanks @hannasdev.
- Control UI/Overview: prevent gateway access token/password visibility toggle buttons from overlapping their inputs at narrow widths. (#56924) Thanks @bbddbb1.

View File

@@ -297,6 +297,8 @@ Convention:
- OpenClaw strips/suppresses this in the delivery layer.
- Exact silent-token suppression is case-insensitive, so `NO_REPLY` and
`no_reply` both count when the whole payload is just the silent token.
- This is for true background/no-delivery turns only; it is not a shortcut for
ordinary actionable user requests.
As of `2026.1.10`, OpenClaw also suppresses **draft/typing streaming** when a
partial chunk begins with `NO_REPLY`, so silent operations dont leak partial

View File

@@ -268,6 +268,15 @@ describe("openai plugin", () => {
expect(nonOpenAIResult).toBeUndefined();
});
it("includes stronger execution guidance in the OpenAI prompt overlay", () => {
expect(OPENAI_FRIENDLY_PROMPT_OVERLAY).toContain(
"If the user asks you to do the work, start in the same turn instead of restating the plan.",
);
expect(OPENAI_FRIENDLY_PROMPT_OVERLAY).toContain(
"Commentary-only turns are incomplete when the next action is clear.",
);
});
it("supports opting out of the prompt overlay via plugin config", async () => {
const { on } = await registerOpenAIPluginWithHook({
pluginConfig: { personalityOverlay: "off" },

View File

@@ -5,6 +5,10 @@ export const OPENAI_FRIENDLY_PROMPT_OVERLAY = `## Interaction Style
Be warm, collaborative, and quietly supportive.
Communicate like a capable teammate sitting next to the user.
Keep progress updates clear and concrete.
If the user asks you to do the work, start in the same turn instead of restating the plan.
Commentary-only turns are incomplete when the next action is clear.
Prefer the first real tool step over more narration.
If work will take more than a moment, send a brief progress update while acting.
Explain decisions without ego.
When the user is wrong or a plan is risky, say so kindly and directly.
Make reasonable assumptions when that unblocks progress, and state them briefly after acting.

View File

@@ -250,6 +250,20 @@ describe("resolveExtraParams", () => {
expect(result).toBeUndefined();
});
it("applies default runtime params for OpenAI GPT-5 models", () => {
const result = resolveExtraParams({
cfg: undefined,
provider: "openai",
modelId: "gpt-5.4",
});
expect(result).toEqual({
parallel_tool_calls: true,
text_verbosity: "low",
openaiWsWarmup: true,
});
});
it("returns params for exact provider/model key", () => {
const result = resolveExtraParams({
cfg: {
@@ -413,6 +427,8 @@ describe("resolveExtraParams", () => {
});
expect(result).toEqual({
openaiWsWarmup: true,
parallel_tool_calls: true,
text_verbosity: "low",
});
});
@@ -760,8 +776,10 @@ describe("applyExtraParamsToAgent", () => {
expect(payloads).toHaveLength(1);
expect(payloads[0]).toEqual({
context_management: [{ type: "compaction", compact_threshold: 80000 }],
parallel_tool_calls: true,
reasoning: { effort: "none", summary: "auto" },
store: true,
text: { verbosity: "low" },
});
});
@@ -1620,6 +1638,22 @@ describe("applyExtraParamsToAgent", () => {
expect(calls[0]?.openaiWsWarmup).toBe(true);
});
it("injects GPT-5 default parallel tool calls and low verbosity for OpenAI Responses payloads", () => {
const payload = runResponsesPayloadMutationCase({
applyProvider: "openai",
applyModelId: "gpt-5.4",
model: {
api: "openai-responses",
provider: "openai",
id: "gpt-5.4",
} as Model<"openai-responses">,
payload: {},
});
expect(payload.parallel_tool_calls).toBe(true);
expect(payload.text).toEqual({ verbosity: "low" });
});
it("injects native Codex web_search for direct openai-codex Responses models", () => {
const payload = runResponsesPayloadMutationCase({
applyProvider: "openai-codex",
@@ -2570,7 +2604,7 @@ describe("applyExtraParamsToAgent", () => {
},
});
expect(payload).not.toHaveProperty("reasoning");
expect(payload).not.toHaveProperty("text");
expect(payload.text).toEqual({ verbosity: "low" });
expect(payload.service_tier).toBe("priority");
});
@@ -2987,7 +3021,7 @@ describe("applyExtraParamsToAgent", () => {
},
});
expect(payload).not.toHaveProperty("reasoning");
expect(payload).not.toHaveProperty("text");
expect(payload.text).toEqual({ verbosity: "low" });
expect(payload.service_tier).toBe("priority");
});

View File

@@ -346,6 +346,59 @@ describe("runEmbeddedPiAgent", () => {
expect(disposeSessionMcpRuntimeMock).toHaveBeenCalledWith("session:test");
});
it("retries a planning-only GPT turn once with an act-now steer", async () => {
const sessionFile = nextSessionFile();
const cfg = createEmbeddedPiRunnerOpenAiConfig(["gpt-5.4"]);
const sessionKey = nextSessionKey();
runEmbeddedAttemptMock
.mockImplementationOnce(async (params: unknown) => {
expect((params as { prompt?: string }).prompt).toBe("ship it");
return makeEmbeddedRunnerAttempt({
assistantTexts: ["I'll inspect the files, make the change, and run the checks."],
lastAssistant: buildEmbeddedRunnerAssistant({
model: "gpt-5.4",
content: [
{
type: "text",
text: "I'll inspect the files, make the change, and run the checks.",
},
],
}),
});
})
.mockImplementationOnce(async (params: unknown) => {
expect((params as { prompt?: string }).prompt).toContain(
"Do not restate the plan. Act now",
);
return makeEmbeddedRunnerAttempt({
assistantTexts: ["done"],
lastAssistant: buildEmbeddedRunnerAssistant({
model: "gpt-5.4",
content: [{ type: "text", text: "done" }],
}),
});
});
const result = await runEmbeddedPiAgent({
sessionId: "session:test",
sessionKey,
sessionFile,
workspaceDir,
config: cfg,
prompt: "ship it",
provider: "openai",
model: "gpt-5.4",
timeoutMs: 5_000,
agentDir,
runId: nextRunId("planning-only-retry"),
enqueue: immediateEnqueue,
});
expect(runEmbeddedAttemptMock).toHaveBeenCalledTimes(2);
expect(result.payloads?.[0]).toMatchObject({ text: "done" });
});
it("handles prompt error paths without dropping user state", async () => {
const sessionFile = nextSessionFile();
const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-error"]);

View File

@@ -67,10 +67,6 @@ export function resolveExtraParams(params: {
? params.cfg.agents.list.find((agent) => agent.id === params.agentId)?.params
: undefined;
if (!defaultParams && !globalParams && !agentParams) {
return undefined;
}
const merged = Object.assign({}, defaultParams, globalParams, agentParams);
const resolvedParallelToolCalls = resolveAliasedParamValue(
[defaultParams, globalParams, agentParams],
@@ -102,7 +98,9 @@ export function resolveExtraParams(params: {
delete merged.cached_content;
}
return merged;
applyDefaultOpenAIGptRuntimeParams(params, merged);
return Object.keys(merged).length > 0 ? merged : undefined;
}
type CacheRetentionStreamOptions = Partial<SimpleStreamOptions> & {
@@ -186,6 +184,37 @@ function sanitizeExtraParamsRecord(
);
}
function shouldApplyDefaultOpenAIGptRuntimeParams(params: {
provider: string;
modelId: string;
}): boolean {
if (params.provider !== "openai" && params.provider !== "openai-codex") {
return false;
}
return /^gpt-5(?:[.-]|$)/i.test(params.modelId);
}
function applyDefaultOpenAIGptRuntimeParams(
params: { provider: string; modelId: string },
merged: Record<string, unknown>,
): void {
if (!shouldApplyDefaultOpenAIGptRuntimeParams(params)) {
return;
}
if (
!Object.hasOwn(merged, "parallel_tool_calls") &&
!Object.hasOwn(merged, "parallelToolCalls")
) {
merged.parallel_tool_calls = true;
}
if (!Object.hasOwn(merged, "text_verbosity") && !Object.hasOwn(merged, "textVerbosity")) {
merged.text_verbosity = "low";
}
if (!Object.hasOwn(merged, "openaiWsWarmup")) {
merged.openaiWsWarmup = true;
}
}
export function resolveAgentTransportOverride(params: {
settingsManager: Pick<SettingsManager, "getGlobalSettings" | "getProjectSettings">;
effectiveExtraParams: Record<string, unknown> | undefined;

View File

@@ -42,12 +42,12 @@ function makeGoogleModel(id = "gemini-3.1-pro-preview") {
api: "google-generative-ai",
provider: "google",
baseUrl: "https://generativelanguage.googleapis.com/v1beta",
reasoning: false,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 128000,
maxTokens: 8192,
headers: { "X-Provider": "google" },
reasoning: true,
} satisfies Model<"google-generative-ai">;
}
@@ -236,10 +236,10 @@ describe("google prompt cache", () => {
const systemPromptDigest = crypto.createHash("sha256").update("Follow policy.").digest("hex");
const sessionManager = makeSessionManager([
{
type: "custom",
id: "entry-1",
parentId: null,
timestamp: new Date(now - 5_000).toISOString(),
type: "custom",
customType: "openclaw.google-prompt-cache",
data: {
status: "ready",

View File

@@ -68,6 +68,10 @@ type GooglePromptCacheDeps = {
now?: () => number;
};
type GooglePromptCacheStreamOptions = {
cachedContent?: string;
};
function resolveGooglePromptCacheTtl(cacheRetention: CacheRetention): string {
return cacheRetention === "long" ? "3600s" : "300s";
}

View File

@@ -8,6 +8,7 @@ import {
overflowBaseRunParams,
resetRunOverflowCompactionHarnessMocks,
} from "./run.overflow-compaction.harness.js";
import { resolvePlanningOnlyRetryInstruction } from "./run/incomplete-turn.js";
import type { EmbeddedRunAttemptResult } from "./run/types.js";
let runEmbeddedPiAgent: typeof import("./run.js").runEmbeddedPiAgent;
@@ -48,4 +49,33 @@ describe("runEmbeddedPiAgent incomplete-turn safety", () => {
expect(result.payloads?.[0]?.isError).toBe(true);
expect(result.payloads?.[0]?.text).toContain("verify before retrying");
});
it("detects replay-safe planning-only GPT turns", () => {
const retryInstruction = resolvePlanningOnlyRetryInstruction({
provider: "openai",
modelId: "gpt-5.4",
aborted: false,
timedOut: false,
attempt: makeAttemptResult({
assistantTexts: ["I'll inspect the code, make the change, and run the checks."],
}),
});
expect(retryInstruction).toContain("Do not restate the plan");
});
it("does not retry planning-only detection after tool activity", () => {
const retryInstruction = resolvePlanningOnlyRetryInstruction({
provider: "openai",
modelId: "gpt-5.4",
aborted: false,
timedOut: false,
attempt: makeAttemptResult({
assistantTexts: ["I'll inspect the code, make the change, and run the checks."],
toolMetas: [{ toolName: "bash", meta: "ls" }],
}),
});
expect(retryInstruction).toBeNull();
});
});

View File

@@ -80,7 +80,10 @@ import {
type RuntimeAuthState,
scrubAnthropicRefusalMagic,
} from "./run/helpers.js";
import { resolveIncompleteTurnPayloadText } from "./run/incomplete-turn.js";
import {
resolveIncompleteTurnPayloadText,
resolvePlanningOnlyRetryInstruction,
} from "./run/incomplete-turn.js";
import type { RunEmbeddedPiAgentParams } from "./run/params.js";
import { buildEmbeddedRunPayloads } from "./run/payloads.js";
import { handleRetryLimitExhaustion } from "./run/retry-limit.js";
@@ -303,7 +306,9 @@ export async function runEmbeddedPiAgent(
let autoCompactionCount = 0;
let runLoopIterations = 0;
let overloadProfileRotations = 0;
let planningOnlyRetryAttempts = 0;
let lastRetryFailoverReason: FailoverReason | null = null;
let planningOnlyRetryInstruction: string | null = null;
let rateLimitProfileRotations = 0;
let timeoutCompactionAttempts = 0;
const overloadFailoverBackoffMs = resolveOverloadFailoverBackoffMs(params.config);
@@ -474,8 +479,11 @@ export async function runEmbeddedPiAgent(
attemptedThinking.add(thinkLevel);
await fs.mkdir(resolvedWorkspace, { recursive: true });
const prompt =
const basePrompt =
provider === "anthropic" ? scrubAnthropicRefusalMagic(params.prompt) : params.prompt;
const prompt = planningOnlyRetryInstruction
? `${basePrompt}\n\n${planningOnlyRetryInstruction}`
: basePrompt;
let resolvedStreamApiKey: string | undefined;
if (!runtimeAuthState && apiKeyInfo) {
resolvedStreamApiKey = (apiKeyInfo as ApiKeyInfo).apiKey;
@@ -1337,6 +1345,26 @@ export async function runEmbeddedPiAgent(
timedOut,
attempt,
});
const nextPlanningOnlyRetryInstruction = resolvePlanningOnlyRetryInstruction({
provider,
modelId,
aborted,
timedOut,
attempt,
});
if (
!incompleteTurnText &&
nextPlanningOnlyRetryInstruction &&
planningOnlyRetryAttempts < 1
) {
planningOnlyRetryAttempts += 1;
planningOnlyRetryInstruction = nextPlanningOnlyRetryInstruction;
log.warn(
`planning-only turn detected: runId=${params.runId} sessionId=${params.sessionId} ` +
`provider=${provider}/${modelId} — retrying once with act-now steer`,
);
continue;
}
if (incompleteTurnText) {
const incompleteStopReason = attempt.lastAssistant?.stopReason;
log.warn(

View File

@@ -16,6 +16,27 @@ type IncompleteTurnAttempt = Pick<
| "replayMetadata"
>;
type PlanningOnlyAttempt = Pick<
EmbeddedRunAttemptResult,
| "assistantTexts"
| "clientToolCall"
| "yieldDetected"
| "didSendDeterministicApprovalPrompt"
| "didSendViaMessagingTool"
| "lastToolError"
| "lastAssistant"
| "replayMetadata"
| "toolMetas"
>;
const PLANNING_ONLY_PROMISE_RE =
/\b(?:i(?:'ll| will)|let me|going to|first[, ]+i(?:'ll| will)|next[, ]+i(?:'ll| will)|i can do that)\b/i;
const PLANNING_ONLY_COMPLETION_RE =
/\b(?:done|finished|implemented|updated|fixed|changed|ran|verified|found|here(?:'s| is) what|blocked by|the blocker is)\b/i;
export const PLANNING_ONLY_RETRY_INSTRUCTION =
"The previous assistant turn only described the plan. Do not restate the plan. Act now: take the first concrete tool action you can. If a real blocker prevents action, reply with the exact blocker in one sentence.";
export function buildAttemptReplayMetadata(
params: ReplayMetadataAttempt,
): EmbeddedRunAttemptResult["replayMetadata"] {
@@ -55,3 +76,56 @@ export function resolveIncompleteTurnPayloadText(params: {
? "⚠️ Agent couldn't generate a response. Note: some tool actions may have already been executed — please verify before retrying."
: "⚠️ Agent couldn't generate a response. Please try again.";
}
function shouldApplyPlanningOnlyRetryGuard(params: {
provider?: string;
modelId?: string;
}): boolean {
if (params.provider !== "openai" && params.provider !== "openai-codex") {
return false;
}
return /^gpt-5(?:[.-]|$)/i.test(params.modelId ?? "");
}
export function resolvePlanningOnlyRetryInstruction(params: {
provider?: string;
modelId?: string;
aborted: boolean;
timedOut: boolean;
attempt: PlanningOnlyAttempt;
}): string | null {
if (
!shouldApplyPlanningOnlyRetryGuard({
provider: params.provider,
modelId: params.modelId,
}) ||
params.aborted ||
params.timedOut ||
params.attempt.clientToolCall ||
params.attempt.yieldDetected ||
params.attempt.didSendDeterministicApprovalPrompt ||
params.attempt.didSendViaMessagingTool ||
params.attempt.lastToolError ||
params.attempt.toolMetas.length > 0 ||
params.attempt.replayMetadata.hadPotentialSideEffects
) {
return null;
}
const stopReason = params.attempt.lastAssistant?.stopReason;
if (stopReason && stopReason !== "stop") {
return null;
}
const text = params.attempt.assistantTexts.join("\n\n").trim();
if (!text || text.length > 700 || text.includes("```")) {
return null;
}
if (!PLANNING_ONLY_PROMISE_RE.test(text)) {
return null;
}
if (PLANNING_ONLY_COMPLETION_RE.test(text)) {
return null;
}
return PLANNING_ONLY_RETRY_INSTRUCTION;
}

View File

@@ -169,6 +169,33 @@ describe("buildAgentSystemPrompt", () => {
);
});
it("adds stronger execution-bias guidance for actionable turns", () => {
const prompt = buildAgentSystemPrompt({
workspaceDir: "/tmp/openclaw",
});
expect(prompt).toContain("## Execution Bias");
expect(prompt).toContain(
"If the user asks you to do the work, start doing it in the same turn.",
);
expect(prompt).toContain(
"Commentary-only turns are incomplete when tools are available and the next action is clear.",
);
});
it("narrows silent reply guidance to true no-delivery cases", () => {
const prompt = buildAgentSystemPrompt({
workspaceDir: "/tmp/openclaw",
});
expect(prompt).toContain(
`Use ${SILENT_REPLY_TOKEN} ONLY when no user-visible reply is required.`,
);
expect(prompt).toContain(
"Never use it to avoid doing requested work or to end an actionable turn early.",
);
});
it("keeps manual /approve instructions for non-native approval channels", () => {
const prompt = buildAgentSystemPrompt({
workspaceDir: "/tmp/openclaw",

View File

@@ -255,6 +255,20 @@ function buildDocsSection(params: { docsPath?: string; isMinimal: boolean; readT
];
}
function buildExecutionBiasSection(params: { isMinimal: boolean }) {
if (params.isMinimal) {
return [];
}
return [
"## Execution Bias",
"If the user asks you to do the work, start doing it in the same turn.",
"Use a real tool call or concrete action first when the task is actionable; do not stop at a plan or promise-to-act reply.",
"Commentary-only turns are incomplete when tools are available and the next action is clear.",
"If the work will take multiple steps or a while to finish, send one short progress update before or while acting.",
"",
];
}
function buildExecApprovalPromptGuidance(params: {
runtimeChannel?: string;
inlineButtonsEnabled?: boolean;
@@ -303,6 +317,7 @@ export function buildAgentSystemPrompt(params: {
os?: string;
arch?: string;
node?: string;
provider?: string;
model?: string;
defaultModel?: string;
shell?: string;
@@ -589,6 +604,9 @@ export function buildAgentSystemPrompt(params: {
"Treat allow-once as single-command only: if another elevated command needs approval, request a fresh /approve and do not claim prior approval covered it.",
"When approvals are required, preserve and show the full command/script exactly as provided (including chained operators like &&, ||, |, ;, or multiline shells) so the user can approve what will actually run.",
"",
...buildExecutionBiasSection({
isMinimal,
}),
...safetySection,
"## OpenClaw CLI Quick Reference",
"OpenClaw is controlled via subcommands. Do not invent commands.",
@@ -743,10 +761,12 @@ export function buildAgentSystemPrompt(params: {
if (!isMinimal) {
lines.push(
"## Silent Replies",
`When you have nothing to say, respond with ONLY: ${SILENT_REPLY_TOKEN}`,
`Use ${SILENT_REPLY_TOKEN} ONLY when no user-visible reply is required.`,
"",
"⚠️ Rules:",
"- It must be your ENTIRE message — nothing else",
"- Valid cases: silent housekeeping, deliberate no-op ambient wakeups, or after a messaging tool already delivered the user-visible reply.",
"- Never use it to avoid doing requested work or to end an actionable turn early.",
"- It must be your ENTIRE message - nothing else",
`- Never append it to an actual response (never include "${SILENT_REPLY_TOKEN}" in real replies)`,
"- Never wrap it in markdown or code blocks",
"",

View File

@@ -1,4 +1,4 @@
import { beforeAll, beforeEach, describe, expect, it, vi } from "vitest";
import { beforeAll, beforeEach, describe, expect, it, vi, type Mock } from "vitest";
import type { OpenClawConfig } from "../../config/config.js";
import type { SessionBindingRecord } from "../../infra/outbound/session-binding-service.js";
import type {
@@ -858,7 +858,7 @@ describe("dispatchReplyFromConfig", () => {
expect(dispatcher.sendFinalReply).toHaveBeenCalledTimes(1);
});
it("suppresses native tool summaries but still forwards tool media", async () => {
it("delivers native tool summaries and tool media", async () => {
setNoAbort();
const cfg = emptyConfig;
const dispatcher = createDispatcher();
@@ -883,13 +883,52 @@ describe("dispatchReplyFromConfig", () => {
await dispatchReplyFromConfig({ ctx, cfg, dispatcher, replyResolver });
expect(dispatcher.sendToolResult).toHaveBeenCalledTimes(1);
const sent = firstToolResultPayload(dispatcher);
expect(dispatcher.sendToolResult).toHaveBeenCalledTimes(2);
expect(dispatcher.sendToolResult).toHaveBeenNthCalledWith(
1,
expect.objectContaining({ text: "🔧 tools/sessions_send" }),
);
const sent = (dispatcher.sendToolResult as Mock).mock.calls[1]?.[0] as ReplyPayload | undefined;
expect(sent?.mediaUrl).toBe("https://example.com/tts-native.opus");
expect(sent?.text).toBeUndefined();
expect(dispatcher.sendFinalReply).toHaveBeenCalledTimes(1);
});
it("emits concise tool-start progress updates for direct sessions", async () => {
setNoAbort();
const cfg = emptyConfig;
const dispatcher = createDispatcher();
const ctx = buildTestCtx({
Provider: "telegram",
ChatType: "direct",
});
const replyResolver = async (
_ctx: MsgContext,
opts?: GetReplyOptions,
_cfg?: OpenClawConfig,
) => {
await opts?.onToolStart?.({ name: "read", phase: "start" });
await opts?.onToolStart?.({ name: "read", phase: "update" });
await opts?.onToolStart?.({ name: "grep", phase: "start" });
await opts?.onToolStart?.({ name: "exec", phase: "start" });
return { text: "done" } satisfies ReplyPayload;
};
await dispatchReplyFromConfig({ ctx, cfg, dispatcher, replyResolver });
expect(dispatcher.sendToolResult).toHaveBeenNthCalledWith(
1,
expect.objectContaining({ text: "Working: read" }),
);
expect(dispatcher.sendToolResult).toHaveBeenNthCalledWith(
2,
expect.objectContaining({ text: "Working: grep" }),
);
expect(dispatcher.sendToolResult).toHaveBeenCalledTimes(2);
expect(dispatcher.sendFinalReply).toHaveBeenCalledWith({ text: "done" });
});
it("delivers deterministic exec approval tool payloads for native commands", async () => {
setNoAbort();
const cfg = emptyConfig;

View File

@@ -597,10 +597,12 @@ export async function dispatchReplyFromConfig(params: {
}
}
// Forum topics are threaded conversations within a group — verbose tool
// summaries should be delivered into the topic thread, same as DMs.
const shouldSendToolSummaries =
(ctx.ChatType !== "group" || ctx.IsForum === true) && ctx.CommandSource !== "native";
// Forum topics are threaded conversations within a group — tool visibility
// should be delivered into the topic thread, same as DMs.
const shouldSendToolSummaries = ctx.ChatType !== "group" || ctx.IsForum === true;
const shouldSendToolStartStatuses = ctx.ChatType !== "group" || ctx.IsForum === true;
const toolStartStatusesSent = new Set<string>();
let toolStartStatusCount = 0;
const acpDispatch = await dispatchAcpRuntime.tryDispatchAcpReply({
ctx,
cfg,
@@ -699,6 +701,28 @@ export async function dispatchReplyFromConfig(params: {
};
return run();
},
onToolStart: ({ name, phase }) => {
if (!shouldSendToolStartStatuses || phase !== "start") {
return;
}
const normalizedName = typeof name === "string" ? name.trim() : "";
if (
!normalizedName ||
toolStartStatusCount >= 2 ||
toolStartStatusesSent.has(normalizedName)
) {
return;
}
toolStartStatusesSent.add(normalizedName);
toolStartStatusCount += 1;
const payload: ReplyPayload = {
text: `Working: ${normalizedName}`,
};
if (shouldRouteToOriginating) {
return sendPayloadAsync(payload, undefined, false);
}
dispatcher.sendToolResult(payload);
},
onBlockReply: (payload: ReplyPayload, context?: BlockReplyContext) => {
const run = async () => {
// Suppress reasoning payloads — channels using this generic dispatch