fix(channels): honor reasoning defaults in previews (#71817) (thanks @anagnorisis2peripeteia)

This commit is contained in:
Ayaan Zaidi
2026-05-08 18:09:15 +05:30
parent 5c589673ec
commit 30e079dd89
9 changed files with 153 additions and 10 deletions

View File

@@ -6,6 +6,7 @@ Docs: https://docs.openclaw.ai
### Changes
- Telegram/Feishu: honor configured per-agent and global `reasoningDefault` values when deciding whether channel reasoning previews should stream or stay hidden, addressing the preview-default part of #73182. Thanks @anagnorisis2peripeteia.
- Docker: run the runtime image under `tini` so long-lived containers reap orphaned child processes and forward signals correctly. (#77885) Thanks @VintageAyu.
- Google/Gemini: normalize retired `google/gemini-3-pro-preview` and `google-gemini-cli/gemini-3-pro-preview` selections to `google/gemini-3.1-pro-preview` before they are written to model config.
- Amazon Bedrock: support `serviceTier` parameter for Bedrock models, configurable via `agents.defaults.params.serviceTier` or per-model in `agents.defaults.models`. Valid values: `default`, `flex`, `priority`, `reserved`. (#64512) Thanks @mobilinkd.

View File

@@ -106,7 +106,7 @@ title: "Thinking levels"
- `stream` (Telegram only): streams reasoning into the Telegram draft bubble while the reply is generating, then sends the final answer without reasoning.
- Alias: `/reason`.
- Send `/reasoning` (or `/reasoning:`) with no argument to see the current reasoning level.
- Resolution order: inline directive, then session override, then per-agent default (`agents.list[].reasoningDefault`), then fallback (`off`).
- Resolution order: inline directive, then session override, then per-agent default (`agents.list[].reasoningDefault`), then global default (`agents.defaults.reasoningDefault`), then fallback (`off`).
Malformed local-model reasoning tags are handled conservatively. Closed `<think>...</think>` blocks stay hidden on normal replies, and unclosed reasoning after already visible text is also hidden. If a reply is fully wrapped in a single unclosed opening tag and would otherwise deliver as empty text, OpenClaw removes the malformed opening tag and delivers the remaining text.

View File

@@ -0,0 +1,21 @@
import type { ClawdbotConfig } from "./bot-runtime-api.js";
type ReasoningDefault = "on" | "stream" | "off";
const DEFAULT_AGENT_ID = "main";
function normalizeAgentId(value: string | undefined | null): string {
const normalized = (value ?? "").trim().toLowerCase();
return normalized || DEFAULT_AGENT_ID;
}
export function resolveFeishuConfigReasoningDefault(
cfg: ClawdbotConfig,
agentId: string,
): ReasoningDefault {
const id = normalizeAgentId(agentId);
const agentDefault = cfg.agents?.list?.find(
(entry) => normalizeAgentId(entry?.id) === id,
)?.reasoningDefault;
return agentDefault ?? cfg.agents?.defaults?.reasoningDefault ?? "off";
}

View File

@@ -1357,6 +1357,8 @@ export async function handleFeishuMessage(params: {
},
};
const allowReasoningPreview = resolveFeishuReasoningPreviewEnabled({
cfg,
agentId,
storePath: agentStorePath,
sessionKey: agentSessionKey,
});
@@ -1532,6 +1534,8 @@ export async function handleFeishuMessage(params: {
agentId: route.agentId,
});
const allowReasoningPreview = resolveFeishuReasoningPreviewEnabled({
cfg,
agentId: route.agentId,
storePath,
sessionKey: route.sessionKey,
});

View File

@@ -1,4 +1,5 @@
import { afterAll, beforeEach, describe, expect, it, vi } from "vitest";
import type { ClawdbotConfig } from "./bot-runtime-api.js";
import { resolveFeishuReasoningPreviewEnabled } from "./reasoning-preview.js";
const { loadSessionStoreMock } = vi.hoisted(() => ({
@@ -20,6 +21,8 @@ afterAll(() => {
});
describe("resolveFeishuReasoningPreviewEnabled", () => {
const emptyCfg: ClawdbotConfig = {};
beforeEach(() => {
vi.clearAllMocks();
});
@@ -32,12 +35,16 @@ describe("resolveFeishuReasoningPreviewEnabled", () => {
expect(
resolveFeishuReasoningPreviewEnabled({
cfg: emptyCfg,
agentId: "main",
storePath: "/tmp/feishu-sessions.json",
sessionKey: "agent:main:feishu:dm:ou_sender_1",
}),
).toBe(true);
expect(
resolveFeishuReasoningPreviewEnabled({
cfg: emptyCfg,
agentId: "main",
storePath: "/tmp/feishu-sessions.json",
sessionKey: "agent:main:feishu:dm:ou_sender_2",
}),
@@ -51,14 +58,56 @@ describe("resolveFeishuReasoningPreviewEnabled", () => {
expect(
resolveFeishuReasoningPreviewEnabled({
cfg: emptyCfg,
agentId: "main",
storePath: "/tmp/feishu-sessions.json",
sessionKey: "agent:main:feishu:dm:ou_sender_1",
}),
).toBe(false);
expect(
resolveFeishuReasoningPreviewEnabled({
cfg: emptyCfg,
agentId: "main",
storePath: "/tmp/feishu-sessions.json",
}),
).toBe(false);
});
it("falls back to configured stream defaults", () => {
loadSessionStoreMock.mockReturnValue({
"agent:main:feishu:dm:ou_sender_1": {},
"agent:main:feishu:dm:ou_sender_2": { reasoningLevel: "off" },
});
const cfg: ClawdbotConfig = {
agents: {
defaults: { reasoningDefault: "stream" },
list: [{ id: "Ops", reasoningDefault: "off" }],
},
};
expect(
resolveFeishuReasoningPreviewEnabled({
cfg,
agentId: "main",
storePath: "/tmp/feishu-sessions.json",
sessionKey: "agent:main:feishu:dm:ou_sender_1",
}),
).toBe(true);
expect(
resolveFeishuReasoningPreviewEnabled({
cfg,
agentId: "ops",
storePath: "/tmp/feishu-sessions.json",
}),
).toBe(false);
expect(
resolveFeishuReasoningPreviewEnabled({
cfg,
agentId: "main",
storePath: "/tmp/feishu-sessions.json",
sessionKey: "agent:main:feishu:dm:ou_sender_2",
}),
).toBe(false);
});
});

View File

@@ -1,20 +1,28 @@
import { resolveFeishuConfigReasoningDefault } from "./agent-config.js";
import { loadSessionStore, resolveSessionStoreEntry } from "./bot-runtime-api.js";
import type { ClawdbotConfig } from "./bot-runtime-api.js";
export function resolveFeishuReasoningPreviewEnabled(params: {
cfg: ClawdbotConfig;
agentId: string;
storePath: string;
sessionKey?: string;
}): boolean {
const configDefault = resolveFeishuConfigReasoningDefault(params.cfg, params.agentId);
if (!params.sessionKey) {
return false;
return configDefault === "stream";
}
try {
const store = loadSessionStore(params.storePath, { skipCache: true });
return (
resolveSessionStoreEntry({ store, sessionKey: params.sessionKey }).existing
?.reasoningLevel === "stream"
);
const level = resolveSessionStoreEntry({ store, sessionKey: params.sessionKey }).existing
?.reasoningLevel;
if (level === "on" || level === "stream" || level === "off") {
return level === "stream";
}
} catch {
return false;
}
return configDefault === "stream";
}

View File

@@ -0,0 +1,21 @@
import type { OpenClawConfig } from "openclaw/plugin-sdk/config-types";
type ReasoningDefault = "on" | "stream" | "off";
const DEFAULT_AGENT_ID = "main";
function normalizeAgentId(value: string | undefined | null): string {
const normalized = (value ?? "").trim().toLowerCase();
return normalized || DEFAULT_AGENT_ID;
}
export function resolveTelegramConfigReasoningDefault(
cfg: OpenClawConfig,
agentId: string,
): ReasoningDefault {
const id = normalizeAgentId(agentId);
const agentDefault = cfg.agents?.list?.find(
(entry) => normalizeAgentId(entry?.id) === id,
)?.reasoningDefault;
return agentDefault ?? cfg.agents?.defaults?.reasoningDefault ?? "off";
}

View File

@@ -409,6 +409,16 @@ describe("dispatchTelegramMessage draft streaming", () => {
});
}
function createReasoningDefaultContext(): TelegramMessageContext {
loadSessionStore.mockReturnValue({
s1: {},
});
return createContext({
ctxPayload: { SessionKey: "s1" } as unknown as TelegramMessageContext["ctxPayload"],
route: { agentId: "ops" } as unknown as TelegramMessageContext["route"],
});
}
it("streams drafts in private threads and forwards thread id", async () => {
const draftStream = createDraftStream();
createTelegramDraftStream.mockReturnValue(draftStream);
@@ -1149,6 +1159,33 @@ describe("dispatchTelegramMessage draft streaming", () => {
expect(deliverReplies).not.toHaveBeenCalled();
});
it("streams reasoning from configured defaults", async () => {
const { answerDraftStream, reasoningDraftStream } = setupDraftStreams({
answerMessageId: 2001,
reasoningMessageId: 3001,
});
dispatchReplyWithBufferedBlockDispatcher.mockImplementation(
async ({ dispatcherOptions, replyOptions }) => {
await replyOptions?.onReasoningStream?.({ text: "<think>Thinking</think>" });
await dispatcherOptions.deliver({ text: "Answer" }, { kind: "final" });
return { queuedFinal: true };
},
);
await dispatchWithContext({
context: createReasoningDefaultContext(),
cfg: {
agents: {
defaults: { reasoningDefault: "off" },
list: [{ id: "Ops", reasoningDefault: "stream" }],
},
},
});
expect(reasoningDraftStream.update).toHaveBeenCalledWith("Reasoning:\n_Thinking_");
expect(answerDraftStream.update).toHaveBeenCalledWith("Answer");
});
it("suppresses reasoning-only finals without raw text fallback", async () => {
setupDraftStreams({ answerMessageId: 2001, reasoningMessageId: 3001 });
dispatchReplyWithBufferedBlockDispatcher.mockImplementation(async ({ dispatcherOptions }) => {

View File

@@ -42,6 +42,7 @@ import {
logVerbose,
sleepWithAbort,
} from "openclaw/plugin-sdk/runtime-env";
import { resolveTelegramConfigReasoningDefault } from "./agent-config.js";
import type { TelegramBotDeps } from "./bot-deps.js";
import type { TelegramMessageContext } from "./bot-message-context.js";
import {
@@ -214,8 +215,9 @@ function resolveTelegramReasoningLevel(params: {
telegramDeps: TelegramBotDeps;
}): TelegramReasoningLevel {
const { cfg, sessionKey, agentId, telegramDeps } = params;
const configDefault = resolveTelegramConfigReasoningDefault(cfg, agentId);
if (!sessionKey) {
return "off";
return configDefault;
}
try {
const storePath = telegramDeps.resolveStorePath(cfg.session?.store, { agentId });
@@ -224,13 +226,13 @@ function resolveTelegramReasoningLevel(params: {
});
const entry = resolveSessionStoreEntry({ store, sessionKey }).existing;
const level = entry?.reasoningLevel;
if (level === "on" || level === "stream") {
if (level === "on" || level === "stream" || level === "off") {
return level;
}
} catch {
// Fall through to default.
return "off";
}
return "off";
return configDefault;
}
const MAX_PROGRESS_MARKDOWN_TEXT_CHARS = 300;