fix(agents): treat empty group replies as silent

This commit is contained in:
Peter Steinberger
2026-04-26 05:16:55 +01:00
parent c953e98c59
commit f1eef47839
14 changed files with 349 additions and 33 deletions

View File

@@ -69,6 +69,7 @@ Docs: https://docs.openclaw.ai
### Fixes
- Agents/groups: treat clean empty assistant stops as silent `NO_REPLY` only for always-on groups where silent replies are allowed, while keeping direct and mention-gated sessions on the incomplete-turn retry path. Thanks @MagnaAI.
- Providers/Z.AI: map OpenClaw thinking controls to Z.AI's `thinking` payload and add opt-in preserved thinking replay via `params.preserveThinking`, so GLM 5.x can keep prior `reasoning_content` when requested. Fixes #58680. Thanks @xuanmingguo.
- Channels/status: keep read-only channel lists on manifest and package metadata by default, loading setup runtime only for explicit fallback callers. Thanks @shakkernerd.
- Plugins/onboarding: defer onboarding install-record index writes until the guarded config commit so setup failures cannot leave the plugin index ahead of `openclaw.json`. Thanks @shakkernerd.

View File

@@ -272,6 +272,7 @@ Notes:
- Surfaces that provide explicit mentions still pass; patterns are a fallback.
- Per-agent override: `agents.list[].groupChat.mentionPatterns` (useful when multiple agents share a group).
- Mention gating is only enforced when mention detection is possible (native mentions or `mentionPatterns` are configured).
- Always-on groups where silent replies are allowed treat a clean empty model reply as silent, equivalent to `NO_REPLY`. Mention-gated groups and direct chats still treat empty replies as a failed agent turn.
- Discord defaults live in `channels.discord.guilds."*"` (overridable per guild/channel).
- Group history context is wrapped uniformly across channels and is **pending-only** (messages skipped due to mention gating); use `messages.groupChat.historyLimit` for the global default and `channels.<channel>.historyLimit` (or `channels.<channel>.accounts.*.historyLimit`) for overrides. Set `0` to disable.

View File

@@ -30,6 +30,7 @@ import {
STRICT_AGENTIC_BLOCKED_TEXT,
resolveReplayInvalidFlag,
resolveRunLivenessState,
shouldTreatEmptyAssistantReplyAsSilent,
} from "./run/incomplete-turn.js";
import type { EmbeddedRunAttemptResult } from "./run/types.js";
@@ -1151,6 +1152,138 @@ describe("runEmbeddedPiAgent incomplete-turn safety", () => {
expect(DEFAULT_EMPTY_RESPONSE_RETRY_LIMIT).toBe(1);
});
it("treats clean empty assistant turns as silent only when the caller allows it", () => {
const attempt = makeAttemptResult({
assistantTexts: [],
lastAssistant: {
role: "assistant",
stopReason: "stop",
provider: "openai-codex",
model: "gpt-5.5",
content: [{ type: "text", text: "" }],
} as unknown as EmbeddedRunAttemptResult["lastAssistant"],
});
expect(
shouldTreatEmptyAssistantReplyAsSilent({
allowEmptyAssistantReplyAsSilent: true,
payloadCount: 0,
aborted: false,
timedOut: false,
attempt,
}),
).toBe(true);
expect(
shouldTreatEmptyAssistantReplyAsSilent({
allowEmptyAssistantReplyAsSilent: false,
payloadCount: 0,
aborted: false,
timedOut: false,
attempt,
}),
).toBe(false);
});
it("does not treat error or side-effect empty turns as silent", () => {
const errorAttempt = makeAttemptResult({
assistantTexts: [],
lastAssistant: {
role: "assistant",
stopReason: "error",
provider: "openai-codex",
model: "gpt-5.5",
content: [],
} as unknown as EmbeddedRunAttemptResult["lastAssistant"],
});
const sideEffectAttempt = makeAttemptResult({
assistantTexts: [],
didSendViaMessagingTool: true,
messagingToolSentTexts: ["sent already"],
lastAssistant: {
role: "assistant",
stopReason: "stop",
provider: "openai-codex",
model: "gpt-5.5",
content: [{ type: "text", text: "" }],
} as unknown as EmbeddedRunAttemptResult["lastAssistant"],
});
expect(
shouldTreatEmptyAssistantReplyAsSilent({
allowEmptyAssistantReplyAsSilent: true,
payloadCount: 0,
aborted: false,
timedOut: false,
attempt: errorAttempt,
}),
).toBe(false);
expect(
shouldTreatEmptyAssistantReplyAsSilent({
allowEmptyAssistantReplyAsSilent: true,
payloadCount: 0,
aborted: false,
timedOut: false,
attempt: sideEffectAttempt,
}),
).toBe(false);
});
it("returns NO_REPLY without retrying clean empty assistant turns when silence is allowed", async () => {
mockedClassifyFailoverReason.mockReturnValue(null);
mockedRunEmbeddedAttempt.mockResolvedValue(
makeAttemptResult({
assistantTexts: [],
lastAssistant: {
role: "assistant",
stopReason: "stop",
provider: "openai-codex",
model: "gpt-5.5",
content: [{ type: "text", text: "" }],
} as unknown as EmbeddedRunAttemptResult["lastAssistant"],
}),
);
const result = await runEmbeddedPiAgent({
...overflowBaseRunParams,
allowEmptyAssistantReplyAsSilent: true,
provider: "openai-codex",
model: "gpt-5.5",
runId: "run-empty-assistant-silent",
});
expect(mockedRunEmbeddedAttempt).toHaveBeenCalledTimes(1);
expect(result.payloads).toEqual([{ text: "NO_REPLY" }]);
expect(result.meta.terminalReplyKind).toBe("silent-empty");
expect(result.meta.livenessState).toBe("working");
});
it("keeps retrying and surfacing clean empty assistant turns without the silence flag", async () => {
mockedClassifyFailoverReason.mockReturnValue(null);
mockedRunEmbeddedAttempt.mockResolvedValue(
makeAttemptResult({
assistantTexts: [],
lastAssistant: {
role: "assistant",
stopReason: "stop",
provider: "openai",
model: "gpt-5.4",
content: [{ type: "text", text: "" }],
} as unknown as EmbeddedRunAttemptResult["lastAssistant"],
}),
);
const result = await runEmbeddedPiAgent({
...overflowBaseRunParams,
provider: "openai",
model: "gpt-5.4",
runId: "run-empty-assistant-error",
});
expect(mockedRunEmbeddedAttempt).toHaveBeenCalledTimes(2);
expect(result.payloads?.[0]?.isError).toBe(true);
expect(result.payloads?.[0]?.text).toContain("couldn't generate a response");
});
it("detects generic empty Gemini turns without visible text", () => {
const retryInstruction = resolveEmptyResponseRetryInstruction({
provider: "google-vertex",

View File

@@ -121,6 +121,7 @@ import {
STRICT_AGENTIC_BLOCKED_TEXT,
resolveReplayInvalidFlag,
resolveRunLivenessState,
shouldTreatEmptyAssistantReplyAsSilent,
} from "./run/incomplete-turn.js";
import type { RunEmbeddedPiAgentParams } from "./run/params.js";
import { buildEmbeddedRunPayloads } from "./run/payloads.js";
@@ -1866,32 +1867,45 @@ export async function runEmbeddedPiAgent(
}
const payloadCount = payloadsWithToolMedia?.length ?? 0;
const nextPlanningOnlyRetryInstruction = resolvePlanningOnlyRetryInstruction({
provider,
modelId,
executionContract,
prompt: params.prompt,
aborted,
timedOut,
attempt,
});
const nextReasoningOnlyRetryInstruction = resolveReasoningOnlyRetryInstruction({
provider: activeErrorContext.provider,
modelId: activeErrorContext.model,
executionContract,
aborted,
timedOut,
attempt,
});
const nextEmptyResponseRetryInstruction = resolveEmptyResponseRetryInstruction({
provider: activeErrorContext.provider,
modelId: activeErrorContext.model,
executionContract,
const emptyAssistantReplyIsSilent = shouldTreatEmptyAssistantReplyAsSilent({
allowEmptyAssistantReplyAsSilent: params.allowEmptyAssistantReplyAsSilent,
payloadCount,
aborted,
timedOut,
attempt,
});
const nextPlanningOnlyRetryInstruction = emptyAssistantReplyIsSilent
? null
: resolvePlanningOnlyRetryInstruction({
provider,
modelId,
executionContract,
prompt: params.prompt,
aborted,
timedOut,
attempt,
});
const nextReasoningOnlyRetryInstruction = emptyAssistantReplyIsSilent
? null
: resolveReasoningOnlyRetryInstruction({
provider: activeErrorContext.provider,
modelId: activeErrorContext.model,
executionContract,
aborted,
timedOut,
attempt,
});
const nextEmptyResponseRetryInstruction = emptyAssistantReplyIsSilent
? null
: resolveEmptyResponseRetryInstruction({
provider: activeErrorContext.provider,
modelId: activeErrorContext.model,
executionContract,
payloadCount,
aborted,
timedOut,
attempt,
});
if (
nextPlanningOnlyRetryInstruction &&
planningOnlyRetryAttempts < maxPlanningOnlyRetryAttempts
@@ -1963,12 +1977,14 @@ export async function runEmbeddedPiAgent(
);
continue;
}
const incompleteTurnText = resolveIncompleteTurnPayloadText({
payloadCount,
aborted,
timedOut,
attempt,
});
const incompleteTurnText = emptyAssistantReplyIsSilent
? null
: resolveIncompleteTurnPayloadText({
payloadCount,
aborted,
timedOut,
attempt,
});
if (reasoningOnlyRetriesExhausted && !finalAssistantVisibleText) {
log.warn(
`reasoning-only retries exhausted: runId=${params.runId} sessionId=${params.sessionId} ` +
@@ -2213,12 +2229,15 @@ export async function runEmbeddedPiAgent(
: attempt.yieldDetected
? "end_turn"
: (sessionLastAssistant?.stopReason as string | undefined);
const terminalPayloads = emptyAssistantReplyIsSilent
? [{ text: SILENT_REPLY_TOKEN }]
: payloadsWithToolMedia;
attempt.setTerminalLifecycleMeta?.({
replayInvalid,
livenessState,
});
return {
payloads: payloadsWithToolMedia?.length ? payloadsWithToolMedia : undefined,
payloads: terminalPayloads?.length ? terminalPayloads : undefined,
...(attempt.diagnosticTrace
? { diagnosticTrace: freezeDiagnosticTraceContext(attempt.diagnosticTrace) }
: {}),
@@ -2233,6 +2252,9 @@ export async function runEmbeddedPiAgent(
replayInvalid,
livenessState,
agentHarnessResultClassification: attempt.agentHarnessResultClassification,
...(emptyAssistantReplyIsSilent
? { terminalReplyKind: "silent-empty" as const }
: {}),
// Handle client tool calls (OpenResponses hosted tools)
// Propagate the LLM stop reason so callers (lifecycle events,
// ACP bridge) can distinguish end_turn from max_tokens.

View File

@@ -345,6 +345,25 @@ function shouldSkipPlanningOnlyRetry(params: {
);
}
export function shouldTreatEmptyAssistantReplyAsSilent(params: {
allowEmptyAssistantReplyAsSilent?: boolean;
payloadCount: number;
aborted: boolean;
timedOut: boolean;
attempt: IncompleteTurnAttempt;
}): boolean {
if (!params.allowEmptyAssistantReplyAsSilent || shouldSkipPlanningOnlyRetry(params)) {
return false;
}
if (hasCommittedUserVisibleToolDelivery(params.attempt)) {
return false;
}
return isEmptyResponseAssistantTurn({
payloadCount: params.payloadCount,
attempt: params.attempt,
});
}
export function resolveReasoningOnlyRetryInstruction(params: {
provider?: string;
modelId?: string;

View File

@@ -142,6 +142,12 @@ export type RunEmbeddedPiAgentParams = {
ownerNumbers?: string[];
enforceFinalTag?: boolean;
silentExpected?: boolean;
/**
* Treat a clean empty assistant stop as an intentional silent reply.
* Only set when the caller's prompt policy already allows an exact NO_REPLY
* final answer for silence.
*/
allowEmptyAssistantReplyAsSilent?: boolean;
authProfileFailurePolicy?: AuthProfileFailurePolicy;
/**
* Allow a single run attempt even when all auth profiles are in cooldown,

View File

@@ -113,6 +113,7 @@ export type EmbeddedPiRunMeta = {
replayInvalid?: boolean;
livenessState?: EmbeddedRunLivenessState;
agentHarnessResultClassification?: "empty" | "reasoning-only" | "planning-only";
terminalReplyKind?: "silent-empty";
error?: {
kind:
| "context_overflow"

View File

@@ -217,6 +217,7 @@ export function buildEmbeddedRunBaseParams(params: {
senderIsOwner: params.run.senderIsOwner,
enforceFinalTag: resolveEnforceFinalTag(params.run, params.provider, params.model),
silentExpected: params.run.silentExpected,
allowEmptyAssistantReplyAsSilent: params.run.allowEmptyAssistantReplyAsSilent,
provider: params.provider,
model: params.model,
...params.authProfile,

View File

@@ -314,6 +314,7 @@ export function createFollowupRunner(params: {
extraSystemPrompt: run.extraSystemPrompt,
ownerNumbers: run.ownerNumbers,
enforceFinalTag: run.enforceFinalTag,
allowEmptyAssistantReplyAsSilent: run.allowEmptyAssistantReplyAsSilent,
provider,
model,
...authProfile,

View File

@@ -77,6 +77,24 @@ vi.mock("./groups.js", () => ({
buildDirectChatContext: vi.fn().mockReturnValue(""),
buildGroupIntro: vi.fn().mockReturnValue(""),
buildGroupChatContext: vi.fn().mockReturnValue(""),
resolveGroupSilentReplyBehavior: vi.fn(
(params: {
sessionEntry?: SessionEntry;
defaultActivation: "always" | "mention";
silentReplyPolicy?: "allow" | "disallow";
silentReplyRewrite?: boolean;
}) => {
const activation = params.sessionEntry?.groupActivation ?? params.defaultActivation;
const canUseSilentReply =
params.silentReplyPolicy !== "disallow" || params.silentReplyRewrite === true;
return {
activation,
canUseSilentReply,
allowEmptyAssistantReplyAsSilent:
activation === "always" && params.silentReplyPolicy === "allow",
};
},
),
}));
vi.mock("./inbound-meta.js", () => ({
@@ -266,6 +284,51 @@ describe("runPreparedReply media-only handling", () => {
);
});
it("propagates empty-assistant silence only for always-on group runs", async () => {
await runPreparedReply(baseParams());
let call = vi.mocked(runReplyAgent).mock.calls.at(-1)?.[0];
expect(call?.followupRun.run.allowEmptyAssistantReplyAsSilent).toBe(true);
await runPreparedReply(
baseParams({
defaultActivation: "mention",
}),
);
call = vi.mocked(runReplyAgent).mock.calls.at(-1)?.[0];
expect(call?.followupRun.run.allowEmptyAssistantReplyAsSilent).toBe(false);
});
it("does not propagate empty-assistant silence for direct runs", async () => {
await runPreparedReply(
baseParams({
ctx: {
Body: "",
RawBody: "",
CommandBody: "",
ThreadHistoryBody: "Earlier direct message",
OriginatingChannel: "slack",
OriginatingTo: "D123",
ChatType: "direct",
},
sessionCtx: {
Body: "",
BodyStripped: "",
ThreadHistoryBody: "Earlier direct message",
MediaPath: "/tmp/input.png",
Provider: "slack",
ChatType: "direct",
OriginatingChannel: "slack",
OriginatingTo: "D123",
},
}),
);
const call = vi.mocked(runReplyAgent).mock.calls.at(-1)?.[0];
expect(call?.followupRun.run.allowEmptyAssistantReplyAsSilent).toBe(false);
});
it("allows media-only prompts and preserves thread context in queued followups", async () => {
const result = await runPreparedReply(baseParams());
expect(result).toEqual({ text: "ok" });

View File

@@ -45,7 +45,12 @@ import type { buildCommandContext } from "./commands.js";
import type { InlineDirectives } from "./directive-handling.js";
import { shouldUseReplyFastTestRuntime } from "./get-reply-fast-path.js";
import { resolvePreparedReplyQueueState } from "./get-reply-run-queue.js";
import { buildDirectChatContext, buildGroupChatContext, buildGroupIntro } from "./groups.js";
import {
buildDirectChatContext,
buildGroupChatContext,
buildGroupIntro,
resolveGroupSilentReplyBehavior,
} from "./groups.js";
import { hasInboundMedia } from "./inbound-media.js";
import { buildInboundMetaSystemPrompt, buildInboundUserContextPrefix } from "./inbound-meta.js";
import type { createModelSelectionState } from "./model-selection.js";
@@ -346,6 +351,14 @@ export async function runPreparedReply(
silentReplyRewrite: silentReplySettings.rewrite,
})
: "";
const allowEmptyAssistantReplyAsSilent =
isGroupChat &&
resolveGroupSilentReplyBehavior({
sessionEntry,
defaultActivation,
silentReplyPolicy: silentReplySettings.policy,
silentReplyRewrite: silentReplySettings.rewrite,
}).allowEmptyAssistantReplyAsSilent;
const groupSystemPrompt = normalizeOptionalString(sessionCtx.GroupSystemPrompt) ?? "";
const inboundMetaPrompt = buildInboundMetaSystemPrompt(
isNewSession ? sessionCtx : { ...sessionCtx, ThreadStarterBody: undefined },
@@ -818,6 +831,7 @@ export async function runPreparedReply(
extraSystemPrompt: extraSystemPromptParts.join("\n\n") || undefined,
extraSystemPromptStatic: extraSystemPromptStaticParts.join("\n\n"),
skipProviderRuntimeHints: useFastReplyRuntime,
allowEmptyAssistantReplyAsSilent,
...(!useFastReplyRuntime &&
isReasoningTagProvider(provider, {
config: cfg,

View File

@@ -124,6 +124,40 @@ describe("group runtime loading", () => {
expect(rewritten).not.toContain("Be extremely selective");
});
it("marks empty assistant replies silent only for always-on groups with silence allowed", async () => {
const groups = await import("./groups.js");
expect(
groups.resolveGroupSilentReplyBehavior({
defaultActivation: "always",
silentReplyPolicy: "allow",
}).allowEmptyAssistantReplyAsSilent,
).toBe(true);
expect(
groups.resolveGroupSilentReplyBehavior({
defaultActivation: "mention",
silentReplyPolicy: "allow",
}).allowEmptyAssistantReplyAsSilent,
).toBe(false);
expect(
groups.resolveGroupSilentReplyBehavior({
sessionEntry: { groupActivation: "mention" } as never,
defaultActivation: "always",
silentReplyPolicy: "allow",
}).allowEmptyAssistantReplyAsSilent,
).toBe(false);
expect(
groups.resolveGroupSilentReplyBehavior({
defaultActivation: "always",
silentReplyPolicy: "disallow",
silentReplyRewrite: true,
}).allowEmptyAssistantReplyAsSilent,
).toBe(false);
});
it("loads the group runtime only when requireMention resolution needs it", async () => {
const groupsRuntimeLoads = vi.fn();
vi.doMock("./groups.runtime.js", () => {

View File

@@ -252,6 +252,28 @@ export function buildDirectChatContext(params: {
return lines.join(" ");
}
export function resolveGroupSilentReplyBehavior(params: {
sessionEntry?: SessionEntry;
defaultActivation: "always" | "mention";
silentReplyPolicy?: SilentReplyPolicy;
silentReplyRewrite?: boolean;
}): {
activation: "always" | "mention";
canUseSilentReply: boolean;
allowEmptyAssistantReplyAsSilent: boolean;
} {
const activation =
normalizeGroupActivation(params.sessionEntry?.groupActivation) ?? params.defaultActivation;
const canUseSilentReply =
params.silentReplyPolicy !== "disallow" || params.silentReplyRewrite === true;
return {
activation,
canUseSilentReply,
allowEmptyAssistantReplyAsSilent:
activation === "always" && params.silentReplyPolicy === "allow",
};
}
export function buildGroupIntro(params: {
cfg: OpenClawConfig;
sessionCtx: TemplateContext;
@@ -261,10 +283,7 @@ export function buildGroupIntro(params: {
silentReplyPolicy?: SilentReplyPolicy;
silentReplyRewrite?: boolean;
}): string {
const activation =
normalizeGroupActivation(params.sessionEntry?.groupActivation) ?? params.defaultActivation;
const canUseSilentReply =
params.silentReplyPolicy !== "disallow" || params.silentReplyRewrite === true;
const { activation, canUseSilentReply } = resolveGroupSilentReplyBehavior(params);
const activationLine =
activation === "always"
? "Activation: always-on (you receive every group message)."

View File

@@ -91,6 +91,7 @@ export type FollowupRun = {
enforceFinalTag?: boolean;
skipProviderRuntimeHints?: boolean;
silentExpected?: boolean;
allowEmptyAssistantReplyAsSilent?: boolean;
};
};