diff --git a/CHANGELOG.md b/CHANGELOG.md
index cd1bbd3815f..2cce549543a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -6,6 +6,7 @@ Docs: https://docs.openclaw.ai
### Changes
+- Telegram/Feishu: honor configured per-agent and global `reasoningDefault` values when deciding whether channel reasoning previews should stream or stay hidden, addressing the preview-default part of #73182. Thanks @anagnorisis2peripeteia.
- Docker: run the runtime image under `tini` so long-lived containers reap orphaned child processes and forward signals correctly. (#77885) Thanks @VintageAyu.
- Google/Gemini: normalize retired `google/gemini-3-pro-preview` and `google-gemini-cli/gemini-3-pro-preview` selections to `google/gemini-3.1-pro-preview` before they are written to model config.
- Amazon Bedrock: support `serviceTier` parameter for Bedrock models, configurable via `agents.defaults.params.serviceTier` or per-model in `agents.defaults.models`. Valid values: `default`, `flex`, `priority`, `reserved`. (#64512) Thanks @mobilinkd.
diff --git a/docs/tools/thinking.md b/docs/tools/thinking.md
index 1ad5e01eac3..0968bf25ea4 100644
--- a/docs/tools/thinking.md
+++ b/docs/tools/thinking.md
@@ -106,7 +106,7 @@ title: "Thinking levels"
- `stream` (Telegram only): streams reasoning into the Telegram draft bubble while the reply is generating, then sends the final answer without reasoning.
- Alias: `/reason`.
- Send `/reasoning` (or `/reasoning:`) with no argument to see the current reasoning level.
-- Resolution order: inline directive, then session override, then per-agent default (`agents.list[].reasoningDefault`), then fallback (`off`).
+- Resolution order: inline directive, then session override, then per-agent default (`agents.list[].reasoningDefault`), then global default (`agents.defaults.reasoningDefault`), then fallback (`off`).
Malformed local-model reasoning tags are handled conservatively. Closed `...` blocks stay hidden on normal replies, and unclosed reasoning after already visible text is also hidden. If a reply is fully wrapped in a single unclosed opening tag and would otherwise deliver as empty text, OpenClaw removes the malformed opening tag and delivers the remaining text.
diff --git a/extensions/feishu/src/agent-config.ts b/extensions/feishu/src/agent-config.ts
new file mode 100644
index 00000000000..ca5ab8ea810
--- /dev/null
+++ b/extensions/feishu/src/agent-config.ts
@@ -0,0 +1,21 @@
+import type { ClawdbotConfig } from "./bot-runtime-api.js";
+
+type ReasoningDefault = "on" | "stream" | "off";
+
+const DEFAULT_AGENT_ID = "main";
+
+function normalizeAgentId(value: string | undefined | null): string {
+ const normalized = (value ?? "").trim().toLowerCase();
+ return normalized || DEFAULT_AGENT_ID;
+}
+
+export function resolveFeishuConfigReasoningDefault(
+ cfg: ClawdbotConfig,
+ agentId: string,
+): ReasoningDefault {
+ const id = normalizeAgentId(agentId);
+ const agentDefault = cfg.agents?.list?.find(
+ (entry) => normalizeAgentId(entry?.id) === id,
+ )?.reasoningDefault;
+ return agentDefault ?? cfg.agents?.defaults?.reasoningDefault ?? "off";
+}
diff --git a/extensions/feishu/src/bot.ts b/extensions/feishu/src/bot.ts
index 4f7376cb55f..52920116c48 100644
--- a/extensions/feishu/src/bot.ts
+++ b/extensions/feishu/src/bot.ts
@@ -1357,6 +1357,8 @@ export async function handleFeishuMessage(params: {
},
};
const allowReasoningPreview = resolveFeishuReasoningPreviewEnabled({
+ cfg,
+ agentId,
storePath: agentStorePath,
sessionKey: agentSessionKey,
});
@@ -1532,6 +1534,8 @@ export async function handleFeishuMessage(params: {
agentId: route.agentId,
});
const allowReasoningPreview = resolveFeishuReasoningPreviewEnabled({
+ cfg,
+ agentId: route.agentId,
storePath,
sessionKey: route.sessionKey,
});
diff --git a/extensions/feishu/src/reasoning-preview.test.ts b/extensions/feishu/src/reasoning-preview.test.ts
index c6bf99c9b2a..49f6b8e798c 100644
--- a/extensions/feishu/src/reasoning-preview.test.ts
+++ b/extensions/feishu/src/reasoning-preview.test.ts
@@ -1,4 +1,5 @@
import { afterAll, beforeEach, describe, expect, it, vi } from "vitest";
+import type { ClawdbotConfig } from "./bot-runtime-api.js";
import { resolveFeishuReasoningPreviewEnabled } from "./reasoning-preview.js";
const { loadSessionStoreMock } = vi.hoisted(() => ({
@@ -20,6 +21,8 @@ afterAll(() => {
});
describe("resolveFeishuReasoningPreviewEnabled", () => {
+ const emptyCfg: ClawdbotConfig = {};
+
beforeEach(() => {
vi.clearAllMocks();
});
@@ -32,12 +35,16 @@ describe("resolveFeishuReasoningPreviewEnabled", () => {
expect(
resolveFeishuReasoningPreviewEnabled({
+ cfg: emptyCfg,
+ agentId: "main",
storePath: "/tmp/feishu-sessions.json",
sessionKey: "agent:main:feishu:dm:ou_sender_1",
}),
).toBe(true);
expect(
resolveFeishuReasoningPreviewEnabled({
+ cfg: emptyCfg,
+ agentId: "main",
storePath: "/tmp/feishu-sessions.json",
sessionKey: "agent:main:feishu:dm:ou_sender_2",
}),
@@ -51,14 +58,56 @@ describe("resolveFeishuReasoningPreviewEnabled", () => {
expect(
resolveFeishuReasoningPreviewEnabled({
+ cfg: emptyCfg,
+ agentId: "main",
storePath: "/tmp/feishu-sessions.json",
sessionKey: "agent:main:feishu:dm:ou_sender_1",
}),
).toBe(false);
expect(
resolveFeishuReasoningPreviewEnabled({
+ cfg: emptyCfg,
+ agentId: "main",
storePath: "/tmp/feishu-sessions.json",
}),
).toBe(false);
});
+
+ it("falls back to configured stream defaults", () => {
+ loadSessionStoreMock.mockReturnValue({
+ "agent:main:feishu:dm:ou_sender_1": {},
+ "agent:main:feishu:dm:ou_sender_2": { reasoningLevel: "off" },
+ });
+
+ const cfg: ClawdbotConfig = {
+ agents: {
+ defaults: { reasoningDefault: "stream" },
+ list: [{ id: "Ops", reasoningDefault: "off" }],
+ },
+ };
+
+ expect(
+ resolveFeishuReasoningPreviewEnabled({
+ cfg,
+ agentId: "main",
+ storePath: "/tmp/feishu-sessions.json",
+ sessionKey: "agent:main:feishu:dm:ou_sender_1",
+ }),
+ ).toBe(true);
+ expect(
+ resolveFeishuReasoningPreviewEnabled({
+ cfg,
+ agentId: "ops",
+ storePath: "/tmp/feishu-sessions.json",
+ }),
+ ).toBe(false);
+ expect(
+ resolveFeishuReasoningPreviewEnabled({
+ cfg,
+ agentId: "main",
+ storePath: "/tmp/feishu-sessions.json",
+ sessionKey: "agent:main:feishu:dm:ou_sender_2",
+ }),
+ ).toBe(false);
+ });
});
diff --git a/extensions/feishu/src/reasoning-preview.ts b/extensions/feishu/src/reasoning-preview.ts
index 4f752b840a4..93ecccc4591 100644
--- a/extensions/feishu/src/reasoning-preview.ts
+++ b/extensions/feishu/src/reasoning-preview.ts
@@ -1,20 +1,28 @@
+import { resolveFeishuConfigReasoningDefault } from "./agent-config.js";
import { loadSessionStore, resolveSessionStoreEntry } from "./bot-runtime-api.js";
+import type { ClawdbotConfig } from "./bot-runtime-api.js";
export function resolveFeishuReasoningPreviewEnabled(params: {
+ cfg: ClawdbotConfig;
+ agentId: string;
storePath: string;
sessionKey?: string;
}): boolean {
+ const configDefault = resolveFeishuConfigReasoningDefault(params.cfg, params.agentId);
+
if (!params.sessionKey) {
- return false;
+ return configDefault === "stream";
}
try {
const store = loadSessionStore(params.storePath, { skipCache: true });
- return (
- resolveSessionStoreEntry({ store, sessionKey: params.sessionKey }).existing
- ?.reasoningLevel === "stream"
- );
+ const level = resolveSessionStoreEntry({ store, sessionKey: params.sessionKey }).existing
+ ?.reasoningLevel;
+ if (level === "on" || level === "stream" || level === "off") {
+ return level === "stream";
+ }
} catch {
return false;
}
+ return configDefault === "stream";
}
diff --git a/extensions/telegram/src/agent-config.ts b/extensions/telegram/src/agent-config.ts
new file mode 100644
index 00000000000..74cb9da2a89
--- /dev/null
+++ b/extensions/telegram/src/agent-config.ts
@@ -0,0 +1,21 @@
+import type { OpenClawConfig } from "openclaw/plugin-sdk/config-types";
+
+type ReasoningDefault = "on" | "stream" | "off";
+
+const DEFAULT_AGENT_ID = "main";
+
+function normalizeAgentId(value: string | undefined | null): string {
+ const normalized = (value ?? "").trim().toLowerCase();
+ return normalized || DEFAULT_AGENT_ID;
+}
+
+export function resolveTelegramConfigReasoningDefault(
+ cfg: OpenClawConfig,
+ agentId: string,
+): ReasoningDefault {
+ const id = normalizeAgentId(agentId);
+ const agentDefault = cfg.agents?.list?.find(
+ (entry) => normalizeAgentId(entry?.id) === id,
+ )?.reasoningDefault;
+ return agentDefault ?? cfg.agents?.defaults?.reasoningDefault ?? "off";
+}
diff --git a/extensions/telegram/src/bot-message-dispatch.test.ts b/extensions/telegram/src/bot-message-dispatch.test.ts
index 5f1e22d75af..f824b407ca3 100644
--- a/extensions/telegram/src/bot-message-dispatch.test.ts
+++ b/extensions/telegram/src/bot-message-dispatch.test.ts
@@ -409,6 +409,16 @@ describe("dispatchTelegramMessage draft streaming", () => {
});
}
+ function createReasoningDefaultContext(): TelegramMessageContext {
+ loadSessionStore.mockReturnValue({
+ s1: {},
+ });
+ return createContext({
+ ctxPayload: { SessionKey: "s1" } as unknown as TelegramMessageContext["ctxPayload"],
+ route: { agentId: "ops" } as unknown as TelegramMessageContext["route"],
+ });
+ }
+
it("streams drafts in private threads and forwards thread id", async () => {
const draftStream = createDraftStream();
createTelegramDraftStream.mockReturnValue(draftStream);
@@ -1149,6 +1159,33 @@ describe("dispatchTelegramMessage draft streaming", () => {
expect(deliverReplies).not.toHaveBeenCalled();
});
+ it("streams reasoning from configured defaults", async () => {
+ const { answerDraftStream, reasoningDraftStream } = setupDraftStreams({
+ answerMessageId: 2001,
+ reasoningMessageId: 3001,
+ });
+ dispatchReplyWithBufferedBlockDispatcher.mockImplementation(
+ async ({ dispatcherOptions, replyOptions }) => {
+ await replyOptions?.onReasoningStream?.({ text: "Thinking" });
+ await dispatcherOptions.deliver({ text: "Answer" }, { kind: "final" });
+ return { queuedFinal: true };
+ },
+ );
+
+ await dispatchWithContext({
+ context: createReasoningDefaultContext(),
+ cfg: {
+ agents: {
+ defaults: { reasoningDefault: "off" },
+ list: [{ id: "Ops", reasoningDefault: "stream" }],
+ },
+ },
+ });
+
+ expect(reasoningDraftStream.update).toHaveBeenCalledWith("Reasoning:\n_Thinking_");
+ expect(answerDraftStream.update).toHaveBeenCalledWith("Answer");
+ });
+
it("suppresses reasoning-only finals without raw text fallback", async () => {
setupDraftStreams({ answerMessageId: 2001, reasoningMessageId: 3001 });
dispatchReplyWithBufferedBlockDispatcher.mockImplementation(async ({ dispatcherOptions }) => {
diff --git a/extensions/telegram/src/bot-message-dispatch.ts b/extensions/telegram/src/bot-message-dispatch.ts
index a61bf47adf0..5cb0388ca5c 100644
--- a/extensions/telegram/src/bot-message-dispatch.ts
+++ b/extensions/telegram/src/bot-message-dispatch.ts
@@ -42,6 +42,7 @@ import {
logVerbose,
sleepWithAbort,
} from "openclaw/plugin-sdk/runtime-env";
+import { resolveTelegramConfigReasoningDefault } from "./agent-config.js";
import type { TelegramBotDeps } from "./bot-deps.js";
import type { TelegramMessageContext } from "./bot-message-context.js";
import {
@@ -214,8 +215,9 @@ function resolveTelegramReasoningLevel(params: {
telegramDeps: TelegramBotDeps;
}): TelegramReasoningLevel {
const { cfg, sessionKey, agentId, telegramDeps } = params;
+ const configDefault = resolveTelegramConfigReasoningDefault(cfg, agentId);
if (!sessionKey) {
- return "off";
+ return configDefault;
}
try {
const storePath = telegramDeps.resolveStorePath(cfg.session?.store, { agentId });
@@ -224,13 +226,13 @@ function resolveTelegramReasoningLevel(params: {
});
const entry = resolveSessionStoreEntry({ store, sessionKey }).existing;
const level = entry?.reasoningLevel;
- if (level === "on" || level === "stream") {
+ if (level === "on" || level === "stream" || level === "off") {
return level;
}
} catch {
- // Fall through to default.
+ return "off";
}
- return "off";
+ return configDefault;
}
const MAX_PROGRESS_MARKDOWN_TEXT_CHARS = 300;