diff --git a/CHANGELOG.md b/CHANGELOG.md index 005bf63c018..4becf532790 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -136,6 +136,7 @@ Docs: https://docs.openclaw.ai - Heartbeat: skip wake delivery when the target session lane is already busy so the pending event is retried instead of getting drained too early. (#40526) Thanks @lucky7323. - Plugin SDK/context engines: export the missing context-engine result and subagent lifecycle types from `openclaw/plugin-sdk` so context engine plugins can type `ContextEngine` implementations without local workarounds. (#61251) Thanks @DaevMithran. - Agents/errors: surface an explicit disk-full message when local session or transcript writes fail with `ENOSPC`/`disk full`, so those runs stop degrading into opaque `NO_REPLY`-style failures. Thanks @vincentkoc. +- Telegram/reasoning: only create a Telegram reasoning preview lane when the session is explicitly `reasoning:stream`, so hidden `` traces from streamed replies stop surfacing as chat previews on normal sessions. Thanks @vincentkoc. ## 2026.4.2 diff --git a/extensions/telegram/src/bot-message-dispatch.test.ts b/extensions/telegram/src/bot-message-dispatch.test.ts index 70035d74198..0b7ae188f19 100644 --- a/extensions/telegram/src/bot-message-dispatch.test.ts +++ b/extensions/telegram/src/bot-message-dispatch.test.ts @@ -539,6 +539,25 @@ describe("dispatchTelegramMessage draft streaming", () => { expect(loadSessionStore).toHaveBeenCalledWith("/tmp/sessions.json", { skipCache: true }); }); + it("does not expose reasoning preview callbacks unless session reasoning is stream", async () => { + let seenReasoningCallback: unknown; + const answerDraftStream = createDraftStream(999); + createTelegramDraftStream.mockImplementationOnce(() => answerDraftStream); + dispatchReplyWithBufferedBlockDispatcher.mockImplementation(async ({ replyOptions }) => { + seenReasoningCallback = replyOptions?.onReasoningStream; + await replyOptions?.onPartialReply?.({ + text: "internal chain of thoughtVisible answer", + }); + return { queuedFinal: false }; + }); + + await dispatchWithContext({ context: createContext(), streamMode: "partial" }); + + expect(seenReasoningCallback).toBeUndefined(); + expect(createTelegramDraftStream).toHaveBeenCalledTimes(1); + expect(answerDraftStream.update).toHaveBeenCalledWith("Visible answer"); + }); + it("does not overwrite finalized preview when additional final payloads are sent", async () => { const draftStream = createDraftStream(999); createTelegramDraftStream.mockReturnValue(draftStream); diff --git a/extensions/telegram/src/bot-message-dispatch.ts b/extensions/telegram/src/bot-message-dispatch.ts index 93ab6f0c2ae..ed5d3653fe1 100644 --- a/extensions/telegram/src/bot-message-dispatch.ts +++ b/extensions/telegram/src/bot-message-dispatch.ts @@ -213,7 +213,7 @@ export const dispatchTelegramMessage = async ({ const previewStreamingEnabled = streamMode !== "off"; const canStreamAnswerDraft = previewStreamingEnabled && !accountBlockStreamingEnabled && !forceBlockStreamingForReasoning; - const canStreamReasoningDraft = canStreamAnswerDraft || streamReasoningDraft; + const canStreamReasoningDraft = streamReasoningDraft; const draftReplyToMessageId = replyToMode !== "off" && typeof msg.message_id === "number" ? msg.message_id : undefined; const draftMinInitialChars = DRAFT_MIN_INITIAL_CHARS;