fix(telegram): keep partial stream previews in one message

This commit is contained in:
Ayaan Zaidi
2026-02-17 12:04:22 +05:30
parent 5649e403df
commit 1a1d0088ad
2 changed files with 47 additions and 9 deletions

View File

@@ -343,6 +343,25 @@ describe("dispatchTelegramMessage draft streaming", () => {
expect(draftStream.forceNewMessage).toHaveBeenCalled();
});
it("does not force new message in partial mode when assistant message restarts", async () => {
const draftStream = createDraftStream(999);
createTelegramDraftStream.mockReturnValue(draftStream);
dispatchReplyWithBufferedBlockDispatcher.mockImplementation(
async ({ dispatcherOptions, replyOptions }) => {
await replyOptions?.onPartialReply?.({ text: "First response" });
await replyOptions?.onAssistantMessageStart?.();
await replyOptions?.onPartialReply?.({ text: "After tool call" });
await dispatcherOptions.deliver({ text: "After tool call" }, { kind: "final" });
return { queuedFinal: true };
},
);
deliverReplies.mockResolvedValue({ delivered: true });
await dispatchWithContext({ context: createContext(), streamMode: "partial" });
expect(draftStream.forceNewMessage).not.toHaveBeenCalled();
});
it("does not force new message on first assistant message start", async () => {
const draftStream = createDraftStream(999);
createTelegramDraftStream.mockReturnValue(draftStream);
@@ -390,6 +409,25 @@ describe("dispatchTelegramMessage draft streaming", () => {
expect(draftStream.forceNewMessage).toHaveBeenCalled();
});
it("does not force new message in partial mode when reasoning ends", async () => {
const draftStream = createDraftStream(999);
createTelegramDraftStream.mockReturnValue(draftStream);
dispatchReplyWithBufferedBlockDispatcher.mockImplementation(
async ({ dispatcherOptions, replyOptions }) => {
await replyOptions?.onPartialReply?.({ text: "Let me check" });
await replyOptions?.onReasoningEnd?.();
await replyOptions?.onPartialReply?.({ text: "Here's the answer" });
await dispatcherOptions.deliver({ text: "Here's the answer" }, { kind: "final" });
return { queuedFinal: true };
},
);
deliverReplies.mockResolvedValue({ delivered: true });
await dispatchWithContext({ context: createContext(), streamMode: "partial" });
expect(draftStream.forceNewMessage).not.toHaveBeenCalled();
});
it("does not force new message on reasoning end without previous output", async () => {
const draftStream = createDraftStream(999);
createTelegramDraftStream.mockReturnValue(draftStream);

View File

@@ -113,6 +113,7 @@ export const dispatchTelegramMessage = async ({
draftStream && streamMode === "block"
? resolveTelegramDraftStreamingChunking(cfg, route.accountId)
: undefined;
const shouldSplitPreviewMessages = streamMode === "block";
const draftChunker = draftChunking ? new EmbeddedBlockChunker(draftChunking) : undefined;
const mediaLocalRoots = getAgentScopedMediaLocalRoots(cfg, route.agentId);
let lastPartialText = "";
@@ -424,13 +425,12 @@ export const dispatchTelegramMessage = async ({
onPartialReply: draftStream ? (payload) => updateDraftFromPartial(payload.text) : undefined,
onAssistantMessageStart: draftStream
? () => {
// When a new assistant message starts (e.g., after tool call),
// force a new Telegram message if we have previous content.
// Only force once per response to avoid excessive splitting.
// Only split preview bubbles in block mode. In partial mode, keep
// editing one preview message to avoid flooding the chat.
logVerbose(
`telegram: onAssistantMessageStart called, hasStreamedMessage=${hasStreamedMessage}`,
);
if (hasStreamedMessage) {
if (shouldSplitPreviewMessages && hasStreamedMessage) {
logVerbose(`telegram: calling forceNewMessage()`);
draftStream.forceNewMessage();
}
@@ -441,13 +441,13 @@ export const dispatchTelegramMessage = async ({
: undefined,
onReasoningEnd: draftStream
? () => {
// When a thinking block ends, force a new Telegram message for the next text output.
if (hasStreamedMessage) {
// Same policy as assistant-message boundaries: split only in block mode.
if (shouldSplitPreviewMessages && hasStreamedMessage) {
draftStream.forceNewMessage();
lastPartialText = "";
draftText = "";
draftChunker?.reset();
}
lastPartialText = "";
draftText = "";
draftChunker?.reset();
}
: undefined,
onModelSelected,