fix(auto-reply): honor direct silent empty replies

* fix(auto-reply): allow direct silent empty replies

* fix(auto-reply): guard direct silent empty replies
This commit is contained in:
Vyctor Huggo Przozwski da Silva
2026-04-29 16:02:38 -03:00
committed by GitHub
parent eb7d89f4b9
commit 97e2f5b332
4 changed files with 106 additions and 22 deletions

View File

@@ -23,6 +23,7 @@ Docs: https://docs.openclaw.ai
### Fixes
- Auto-reply: honor explicit `silentReply.direct: "allow"` for clean empty or reasoning-only direct chat turns while keeping the default direct-chat empty-response guard conservative. Fixes #74409. Thanks @jesuskannolis.
- Ollama: normalize provider-prefixed tool-call names at the native stream boundary so Kimi/Ollama calls such as `functions.exec` dispatch as `exec` instead of missing configured tools. Fixes #74487. Thanks @afurm and @carreipeia.
- Security/audit: resolve configured model aliases before model-tier and small-parameter checks, so alias-based GPT-5/Codex configs no longer report false weak-model warnings. Fixes #74455. Thanks @blaspat.
- CLI/agent: isolate Gateway-timeout embedded fallback runs under explicit `gateway-fallback-*` sessions so accepted Gateway runs cannot race transcript locks or replace the routed conversation session. Fixes #62981. Thanks @HemantSudarshan.

View File

@@ -329,7 +329,7 @@ Replying to a bot message counts as an implicit mention when the channel support
- Per-agent override: `agents.list[].groupChat.mentionPatterns` (useful when multiple agents share a group).
- Mention gating is only enforced when mention detection is possible (native mentions or `mentionPatterns` are configured).
- Group chat prompt context carries the resolved silent-reply instruction every turn; workspace files should not duplicate `NO_REPLY` mechanics.
- Groups where silent replies are allowed treat clean empty or reasoning-only model turns as silent, equivalent to `NO_REPLY`. Direct chats still treat empty replies as a failed agent turn.
- Groups where silent replies are allowed treat clean empty or reasoning-only model turns as silent, equivalent to `NO_REPLY`. Direct chats do the same only when direct silent replies are explicitly allowed; otherwise empty replies remain failed agent turns.
- Discord defaults live in `channels.discord.guilds."*"` (overridable per guild/channel).
- Group history context is wrapped uniformly across channels and is **pending-only** (messages skipped due to mention gating); use `messages.groupChat.historyLimit` for the global default and `channels.<channel>.historyLimit` (or `channels.<channel>.accounts.*.historyLimit`) for overrides. Set `0` to disable.

View File

@@ -302,7 +302,7 @@ describe("runPreparedReply media-only handling", () => {
expect(call?.followupRun.run.allowEmptyAssistantReplyAsSilent).toBe(true);
});
it("does not propagate empty-assistant silence for direct runs", async () => {
it("keeps empty-assistant silence disabled for direct runs by default", async () => {
await runPreparedReply(
baseParams({
ctx: {
@@ -331,6 +331,85 @@ describe("runPreparedReply media-only handling", () => {
expect(call?.followupRun.run.allowEmptyAssistantReplyAsSilent).toBe(false);
});
it.each(["direct", "dm"] as const)(
"propagates empty-assistant silence for %s runs with explicit direct silent replies",
async (chatType) => {
await runPreparedReply(
baseParams({
ctx: {
Body: "",
RawBody: "",
CommandBody: "",
ThreadHistoryBody: "Earlier direct message",
OriginatingChannel: "slack",
OriginatingTo: "D123",
ChatType: chatType,
},
sessionCtx: {
Body: "",
BodyStripped: "",
ThreadHistoryBody: "Earlier direct message",
MediaPath: "/tmp/input.png",
Provider: "slack",
ChatType: chatType,
OriginatingChannel: "slack",
OriginatingTo: "D123",
},
cfg: {
session: {},
channels: {},
agents: {
defaults: {
silentReply: {
direct: "allow",
},
},
},
},
}),
);
const call = vi.mocked(runReplyAgent).mock.calls.at(-1)?.[0];
expect(call?.followupRun.run.allowEmptyAssistantReplyAsSilent).toBe(true);
},
);
it("does not borrow target-session silence for native commands sent from direct chats", async () => {
await runPreparedReply(
baseParams({
sessionKey: "agent:main:telegram:group:target",
ctx: {
Body: "",
RawBody: "",
CommandBody: "",
ThreadHistoryBody: "Earlier direct message",
OriginatingChannel: "telegram",
OriginatingTo: "D123",
ChatType: "direct",
CommandSource: "native",
SessionKey: "agent:main:telegram:direct:source",
CommandTargetSessionKey: "agent:main:telegram:group:target",
},
sessionCtx: {
Body: "",
BodyStripped: "",
ThreadHistoryBody: "Earlier direct message",
MediaPath: "/tmp/input.png",
Provider: "telegram",
ChatType: "direct",
OriginatingChannel: "telegram",
OriginatingTo: "D123",
CommandSource: "native",
SessionKey: "agent:main:telegram:direct:source",
CommandTargetSessionKey: "agent:main:telegram:group:target",
},
}),
);
const call = vi.mocked(runReplyAgent).mock.calls.at(-1)?.[0];
expect(call?.followupRun.run.allowEmptyAssistantReplyAsSilent).toBe(false);
});
it("allows media-only prompts and preserves thread context in queued followups", async () => {
const result = await runPreparedReply(baseParams());
expect(result).toEqual({ text: "ok" });

View File

@@ -400,14 +400,15 @@ export async function runPreparedReply(
ctx,
isHeartbeat,
});
const silentReplyConversationType = resolvePromptSilentReplyConversationType({
ctx: promptSessionCtx,
inboundSessionKey: ctx.SessionKey,
});
const silentReplySettings = resolveSilentReplySettings({
cfg,
sessionKey: runtimePolicySessionKey,
surface: promptSessionCtx.Surface ?? promptSessionCtx.Provider,
conversationType: resolvePromptSilentReplyConversationType({
ctx: promptSessionCtx,
inboundSessionKey: ctx.SessionKey,
}),
conversationType: silentReplyConversationType,
});
const useFastReplyRuntime = shouldUseReplyFastTestRuntime({
cfg,
@@ -425,6 +426,7 @@ export async function runPreparedReply(
const isFirstTurnInSession = isNewSession || !currentSystemSent;
const isGroupChat =
promptSessionCtx.ChatType === "group" || promptSessionCtx.ChatType === "channel";
const isDirectChat = promptSessionCtx.ChatType === "direct" || promptSessionCtx.ChatType === "dm";
const wasMentioned = ctx.WasMentioned === true;
const { typingPolicy, suppressTyping } = resolveRunTypingPolicy({
requestedPolicy: opts?.typingPolicy,
@@ -444,15 +446,14 @@ export async function runPreparedReply(
const shouldInjectGroupIntro = Boolean(
isGroupChat && (isFirstTurnInSession || sessionEntry?.groupActivationNeedsSystemIntro),
);
const directChatContext =
promptSessionCtx.ChatType === "direct" || promptSessionCtx.ChatType === "dm"
? buildDirectChatContext({
sessionCtx: promptSessionCtx,
silentReplyPolicy: silentReplySettings.policy,
silentReplyRewrite: silentReplySettings.rewrite,
silentToken: SILENT_REPLY_TOKEN,
})
: "";
const directChatContext = isDirectChat
? buildDirectChatContext({
sessionCtx: promptSessionCtx,
silentReplyPolicy: silentReplySettings.policy,
silentReplyRewrite: silentReplySettings.rewrite,
silentToken: SILENT_REPLY_TOKEN,
})
: "";
// Always include persistent group chat context (provider + reply guidance).
const groupChatContext = isGroupChat
? buildGroupChatContext({
@@ -476,13 +477,16 @@ export async function runPreparedReply(
})
: "";
const allowEmptyAssistantReplyAsSilent =
isGroupChat &&
resolveGroupSilentReplyBehavior({
sessionEntry,
defaultActivation,
silentReplyPolicy: silentReplySettings.policy,
silentReplyRewrite: silentReplySettings.rewrite,
}).allowEmptyAssistantReplyAsSilent;
(isDirectChat &&
silentReplyConversationType === "direct" &&
silentReplySettings.policy === "allow") ||
(isGroupChat &&
resolveGroupSilentReplyBehavior({
sessionEntry,
defaultActivation,
silentReplyPolicy: silentReplySettings.policy,
silentReplyRewrite: silentReplySettings.rewrite,
}).allowEmptyAssistantReplyAsSilent);
const groupSystemPrompt = normalizeOptionalString(promptSessionCtx.GroupSystemPrompt) ?? "";
const inboundMetaPrompt = buildInboundMetaSystemPrompt(
isNewSession ? sessionCtx : { ...sessionCtx, ThreadStarterBody: undefined },