fix(telegram): reuse preview for long text finals (#77658)

* fix(telegram): reuse preview for long text finals

* test(qa): cover long telegram finals

* fix(qa): satisfy extension lint

* fix(qa): keep telegram long final fixture to two chunks

* test(telegram): cover three chunk finals

* fix(telegram): force long final preview boundary
This commit is contained in:
Vincent Koc
2026-05-04 21:19:44 -07:00
committed by GitHub
parent 3290cba1a6
commit e03fe1e289
12 changed files with 671 additions and 6 deletions

View File

@@ -73,6 +73,7 @@ Docs: https://docs.openclaw.ai
- Agents/tools: honor narrow runtime tool allowlists when constructing embedded-runner tool families and bundled MCP/LSP runtimes, so cron/subagent runs that request tools such as `update_plan`, `browser`, `x_search`, channel login tools, or `group:plugins` no longer start with missing tools or unrelated bootstrap work. (#77519, #77532)
- Codex plugin: mirror the experimental upstream app-server protocol and format generated TypeScript before drift checks, keeping OpenClaw's `experimentalApi` bridge compatible with latest Codex while preserving formatter gates.
- Telegram/media: derive no-caption inbound media placeholders from saved MIME metadata instead of the Telegram `photo` shape, so non-image and mixed attachments no longer reach the model as `<media:image>`. Fixes #69793. Thanks @aspalagin.
- Telegram/streaming: reuse the active preview as the first chunk for long text finals, so multi-chunk replies no longer create a transient extra bubble that appears and then disappears. Thanks @vincentkoc.
- Agents/cache: keep per-turn runtime context out of ordinary chat system prompts while still delivering hidden current-turn context, restoring prompt-cache reuse on chat continuations. Fixes #77431. Thanks @Udjin79.
- Gateway/startup: include resolved thinking and fast-mode defaults in the `agent model` startup log line, defaulting unset startup thinking to `medium` without mixing in reasoning visibility.
- Gateway/update: resolve local gateway probe auth from the installed config during post-update restart verification, so token/device-authenticated VPS gateways are not misreported as unhealthy port conflicts after a package swap. Thanks @vincentkoc.

View File

@@ -344,6 +344,7 @@ curl "https://api.telegram.org/bot<bot_token>/getUpdates"
For text-only replies:
- short DM/group/topic previews: OpenClaw keeps the same preview message and performs a final edit in place, unless a visible non-preview message was sent after the preview appeared
- long text finals that split into multiple Telegram messages reuse the existing preview as the first final chunk when possible, then send only the remaining chunks
- previews followed by visible non-preview output: OpenClaw sends the completed reply as a fresh final message and cleans up the older preview, so the final answer appears after intermediate output
- previews older than about one minute: OpenClaw sends the completed reply as a fresh final message and then cleans up the preview, so Telegram's visible timestamp reflects completion time instead of the preview creation time

View File

@@ -232,6 +232,8 @@ Scenarios (`extensions/qa-lab/src/live-transports/telegram/telegram-live.runtime
- `telegram-tools-compact-command`
- `telegram-whoami-command`
- `telegram-context-command`
- `telegram-long-final-reuses-preview`
- `telegram-long-final-three-chunks`
Output artifacts:

View File

@@ -2236,6 +2236,8 @@ export function createDiagnosticsOtelService(): OpenClawPluginService {
return;
case "session.long_running":
case "session.stalled":
case "session.recovery.completed":
case "session.recovery.requested":
return;
case "session.stuck":
recordSessionStuck(evt);

View File

@@ -333,6 +333,8 @@ describe("telegram live qa runtime", () => {
"telegram-context-command",
"telegram-current-session-status-tool",
"telegram-mentioned-message-reply",
"telegram-long-final-reuses-preview",
"telegram-long-final-three-chunks",
"telegram-mention-gating",
]);
expect(scenarios.map((scenario) => scenario.id)).toEqual([
@@ -343,6 +345,8 @@ describe("telegram live qa runtime", () => {
"telegram-context-command",
"telegram-current-session-status-tool",
"telegram-mentioned-message-reply",
"telegram-long-final-reuses-preview",
"telegram-long-final-three-chunks",
"telegram-mention-gating",
]);
expect(
@@ -355,6 +359,25 @@ describe("telegram live qa runtime", () => {
.find((scenario) => scenario.id === "telegram-mentioned-message-reply")
?.buildRun("sut_bot").replyToLatestSutMessage,
).toBe(true);
expect(
scenarios
.find((scenario) => scenario.id === "telegram-long-final-reuses-preview")
?.buildRun("sut_bot"),
).toMatchObject({
expectedJoinedSutTextIncludes: ["TELEGRAM-LONG-FINAL-BEGIN", "TELEGRAM-LONG-FINAL-END"],
expectedSutMessageCount: 2,
});
expect(
scenarios
.find((scenario) => scenario.id === "telegram-long-final-three-chunks")
?.buildRun("sut_bot"),
).toMatchObject({
expectedJoinedSutTextIncludes: [
"TELEGRAM-LONG-FINAL-3CHUNK-BEGIN",
"TELEGRAM-LONG-FINAL-3CHUNK-END",
],
expectedSutMessageCount: 3,
});
});
it("keeps bot-to-bot plain mentions out of the default Telegram live set", () => {
@@ -382,6 +405,160 @@ describe("telegram live qa runtime", () => {
).toEqual(["allowlist-block", "top-level-reply-shape", "restart-resume"]);
});
it("asserts long Telegram final replies reuse the streamed preview message", () => {
expect(() =>
__testing.assertTelegramScenarioMessageSet({
expectedJoinedSutTextIncludes: ["TELEGRAM-LONG-FINAL-BEGIN", "TELEGRAM-LONG-FINAL-END"],
expectedSutMessageCount: 2,
groupId: "-100123",
scenarioId: "telegram-long-final-reuses-preview",
sutBotId: 99,
observedMessages: [
{
updateId: 1,
messageId: 10,
chatId: -100123,
senderId: 99,
senderIsBot: true,
scenarioId: "telegram-long-final-reuses-preview",
scenarioTitle: "Telegram long final reuses the preview message",
matchedScenario: true,
text: "TELEGRAM-LONG-FINAL-BEGIN part one ",
timestamp: 1_700_000_000_000,
inlineButtons: [],
mediaKinds: [],
},
{
updateId: 2,
messageId: 11,
chatId: -100123,
senderId: 99,
senderIsBot: true,
scenarioId: "telegram-long-final-reuses-preview",
scenarioTitle: "Telegram long final reuses the preview message",
matchedScenario: true,
text: "part two TELEGRAM-LONG-FINAL-END",
timestamp: 1_700_000_001_000,
inlineButtons: [],
mediaKinds: [],
},
],
}),
).not.toThrow();
expect(() =>
__testing.assertTelegramScenarioMessageSet({
expectedSutMessageCount: 2,
groupId: "-100123",
scenarioId: "telegram-long-final-reuses-preview",
sutBotId: 99,
observedMessages: [
{
updateId: 1,
messageId: 10,
chatId: -100123,
senderId: 99,
senderIsBot: true,
scenarioId: "telegram-long-final-reuses-preview",
scenarioTitle: "Telegram long final reuses the preview message",
matchedScenario: true,
text: "preview",
timestamp: 1_700_000_000_000,
inlineButtons: [],
mediaKinds: [],
},
{
updateId: 2,
messageId: 11,
chatId: -100123,
senderId: 99,
senderIsBot: true,
scenarioId: "telegram-long-final-reuses-preview",
scenarioTitle: "Telegram long final reuses the preview message",
matchedScenario: true,
text: "final chunk one",
timestamp: 1_700_000_001_000,
inlineButtons: [],
mediaKinds: [],
},
{
updateId: 3,
messageId: 12,
chatId: -100123,
senderId: 99,
senderIsBot: true,
scenarioId: "telegram-long-final-reuses-preview",
scenarioTitle: "Telegram long final reuses the preview message",
matchedScenario: true,
text: "final chunk two",
timestamp: 1_700_000_002_000,
inlineButtons: [],
mediaKinds: [],
},
],
}),
).toThrow("expected 2 SUT message(s), observed 3");
});
it("accepts legitimate three-chunk Telegram final replies", () => {
expect(() =>
__testing.assertTelegramScenarioMessageSet({
expectedJoinedSutTextIncludes: [
"TELEGRAM-LONG-FINAL-3CHUNK-BEGIN",
"TELEGRAM-LONG-FINAL-3CHUNK-END",
],
expectedSutMessageCount: 3,
groupId: "-100123",
scenarioId: "telegram-long-final-three-chunks",
sutBotId: 99,
observedMessages: [
{
updateId: 1,
messageId: 10,
chatId: -100123,
senderId: 99,
senderIsBot: true,
scenarioId: "telegram-long-final-three-chunks",
scenarioTitle: "Telegram three-chunk final keeps only final chunks",
matchedScenario: true,
text: "TELEGRAM-LONG-FINAL-3CHUNK-BEGIN part one ",
timestamp: 1_700_000_000_000,
inlineButtons: [],
mediaKinds: [],
},
{
updateId: 2,
messageId: 11,
chatId: -100123,
senderId: 99,
senderIsBot: true,
scenarioId: "telegram-long-final-three-chunks",
scenarioTitle: "Telegram three-chunk final keeps only final chunks",
matchedScenario: true,
text: "part two ",
timestamp: 1_700_000_001_000,
inlineButtons: [],
mediaKinds: [],
},
{
updateId: 3,
messageId: 12,
chatId: -100123,
senderId: 99,
senderIsBot: true,
scenarioId: "telegram-long-final-three-chunks",
scenarioTitle: "Telegram three-chunk final keeps only final chunks",
matchedScenario: true,
text: "part three TELEGRAM-LONG-FINAL-3CHUNK-END",
timestamp: 1_700_000_002_000,
inlineButtons: [],
mediaKinds: [],
},
],
}),
).not.toThrow();
});
it("matches scenario replies by thread or exact marker", () => {
expect(
__testing.matchesTelegramScenarioReply({

View File

@@ -48,6 +48,8 @@ type TelegramQaScenarioId =
| "telegram-whoami-command"
| "telegram-context-command"
| "telegram-current-session-status-tool"
| "telegram-long-final-three-chunks"
| "telegram-long-final-reuses-preview"
| "telegram-mentioned-message-reply"
| "telegram-mention-gating";
@@ -56,8 +58,11 @@ type TelegramQaScenarioRun = {
expectReply: boolean;
input: string;
expectedTextIncludes?: string[];
expectedJoinedSutTextIncludes?: string[];
expectedSutMessageCount?: number;
matchText?: string;
replyToLatestSutMessage?: boolean;
settleMs?: number;
};
type TelegramQaScenarioDefinition = LiveTransportScenarioDefinition<TelegramQaScenarioId> & {
@@ -295,6 +300,39 @@ const TELEGRAM_QA_SCENARIOS: TelegramQaScenarioDefinition[] = [
replyToLatestSutMessage: true,
}),
},
{
id: "telegram-long-final-reuses-preview",
title: "Telegram long final reuses the preview message",
defaultEnabled: false,
timeoutMs: 60_000,
buildRun: (sutUsername) => ({
allowAnySutReply: true,
expectReply: true,
input: `@${sutUsername} Telegram long final QA check. Use the scripted long final response.`,
expectedTextIncludes: ["TELEGRAM-LONG-FINAL-BEGIN"],
expectedJoinedSutTextIncludes: ["TELEGRAM-LONG-FINAL-BEGIN", "TELEGRAM-LONG-FINAL-END"],
expectedSutMessageCount: 2,
settleMs: 4_000,
}),
},
{
id: "telegram-long-final-three-chunks",
title: "Telegram three-chunk final keeps only final chunks",
defaultEnabled: false,
timeoutMs: 60_000,
buildRun: (sutUsername) => ({
allowAnySutReply: true,
expectReply: true,
input: `@${sutUsername} Telegram long final three chunk QA check. Use the scripted three chunk final response.`,
expectedTextIncludes: ["TELEGRAM-LONG-FINAL-3CHUNK-BEGIN"],
expectedJoinedSutTextIncludes: [
"TELEGRAM-LONG-FINAL-3CHUNK-BEGIN",
"TELEGRAM-LONG-FINAL-3CHUNK-END",
],
expectedSutMessageCount: 3,
settleMs: 4_000,
}),
},
{
id: "telegram-mention-gating",
standardId: "mention-gating",
@@ -744,6 +782,102 @@ async function waitForObservedMessage(params: {
throw new Error(timeoutMessage);
}
async function collectObservedMessages(params: {
token: string;
initialOffset: number;
settleMs: number;
predicate: (message: TelegramObservedMessage) => boolean;
observedMessages: TelegramObservedMessage[];
observationScenarioId: string;
observationScenarioTitle: string;
}) {
const startedAt = Date.now();
let offset = params.initialOffset;
while (Date.now() - startedAt < params.settleMs) {
const remainingMs = Math.max(1, params.settleMs - (Date.now() - startedAt));
const timeoutSeconds = Math.max(1, Math.min(2, Math.ceil(remainingMs / 1000)));
let updates: TelegramUpdate[];
try {
updates = await callTelegramApi<TelegramUpdate[]>(
params.token,
"getUpdates",
{
offset,
timeout: timeoutSeconds,
allowed_updates: ["message", "edited_message"],
},
timeoutSeconds * 1000 + 5_000,
);
} catch (error) {
if (!isRecoverableTelegramQaPollError(error)) {
throw error;
}
await waitForTelegramPollRetryDelay(params.settleMs - (Date.now() - startedAt));
continue;
}
if (updates.length === 0) {
continue;
}
offset = (updates.at(-1)?.update_id ?? offset) + 1;
for (const update of updates) {
const normalized = normalizeTelegramObservedMessage(update);
if (!normalized) {
continue;
}
params.observedMessages.push({
...normalized,
scenarioId: params.observationScenarioId,
scenarioTitle: params.observationScenarioTitle,
matchedScenario: params.predicate(normalized),
});
}
}
return offset;
}
function assertTelegramScenarioMessageSet(params: {
expectedJoinedSutTextIncludes?: string[];
expectedSutMessageCount?: number;
groupId: string;
observedMessages: TelegramObservedMessage[];
scenarioId: string;
sutBotId: number;
}) {
if (
params.expectedSutMessageCount === undefined &&
(params.expectedJoinedSutTextIncludes ?? []).length === 0
) {
return;
}
const byMessageId = new Map<number, TelegramObservedMessage>();
for (const message of params.observedMessages) {
if (
message.scenarioId === params.scenarioId &&
message.chatId === Number(params.groupId) &&
message.senderId === params.sutBotId
) {
byMessageId.set(message.messageId, message);
}
}
const messages = [...byMessageId.values()].toSorted((a, b) => a.messageId - b.messageId);
if (
params.expectedSutMessageCount !== undefined &&
messages.length !== params.expectedSutMessageCount
) {
throw new Error(
`expected ${params.expectedSutMessageCount} SUT message(s), observed ${messages.length}: ${messages
.map((message) => message.messageId)
.join(", ")}`,
);
}
const joinedText = messages.map((message) => message.text).join("");
for (const expected of params.expectedJoinedSutTextIncludes ?? []) {
if (!joinedText.includes(expected)) {
throw new Error(`joined SUT reply text missing expected text: ${expected}`);
}
}
}
async function waitForTelegramChannelRunning(
gateway: Awaited<ReturnType<typeof startQaGatewayChild>>,
accountId: string,
@@ -1374,6 +1508,25 @@ export async function runTelegramQaLive(params: {
}),
});
driverOffset = matched.nextOffset;
if (scenarioRun.settleMs !== undefined) {
driverOffset = await collectObservedMessages({
token: runtimeEnv.driverToken,
initialOffset: driverOffset,
settleMs: scenarioRun.settleMs,
observedMessages,
observationScenarioId: scenario.id,
observationScenarioTitle: scenario.title,
predicate: (message) =>
matchesTelegramScenarioReply({
allowAnySutReply: scenarioRun.allowAnySutReply,
groupId: runtimeEnv.groupId,
matchText: scenarioRun.matchText,
message,
sentMessageId: sent.message_id,
sutBotId: sutIdentity.id,
}),
});
}
if (!scenarioRun.expectReply) {
throw new Error(`unexpected reply message ${matched.message.messageId} matched`);
}
@@ -1381,14 +1534,26 @@ export async function runTelegramQaLive(params: {
expectedTextIncludes: scenarioRun.expectedTextIncludes,
message: matched.message,
});
assertTelegramScenarioMessageSet({
expectedJoinedSutTextIncludes: scenarioRun.expectedJoinedSutTextIncludes,
expectedSutMessageCount: scenarioRun.expectedSutMessageCount,
groupId: runtimeEnv.groupId,
observedMessages,
scenarioId: scenario.id,
sutBotId: sutIdentity.id,
});
const rttMs = matched.observedAtMs - requestStartedAtMs;
const suffix =
scenarioRun.expectedSutMessageCount === undefined
? ""
: `; observed ${scenarioRun.expectedSutMessageCount} SUT message(s)`;
const result = {
id: scenario.id,
title: scenario.title,
status: "pass",
details: redactPublicMetadata
? `reply matched in ${rttMs}ms`
: `reply message ${matched.message.messageId} matched in ${rttMs}ms`,
? `reply matched in ${rttMs}ms${suffix}`
: `reply message ${matched.message.messageId} matched in ${rttMs}ms${suffix}`,
rttMs,
requestStartedAt,
responseObservedAt: new Date(matched.observedAtMs).toISOString(),
@@ -1565,6 +1730,7 @@ export const __testing = {
buildObservedMessagesArtifact,
canaryFailureMessage,
callTelegramApi,
assertTelegramScenarioMessageSet,
isRecoverableTelegramQaPollError,
assertTelegramScenarioReply,
classifyCanaryReply,

View File

@@ -221,6 +221,48 @@ describe("qa mock openai server", () => {
expect(partialBody).toContain('"type":"response.output_text.delta"');
expect(partialBody).toContain("QA_PARTIAL_OK");
const telegramLongResponse = await fetch(`${server.baseUrl}/v1/responses`, {
method: "POST",
headers: {
"content-type": "application/json",
},
body: JSON.stringify({
stream: true,
input: [
makeUserInput("Telegram long final QA check. Use the scripted long final response."),
],
}),
});
expect(telegramLongResponse.status).toBe(200);
const telegramLongBody = await telegramLongResponse.text();
expect(telegramLongBody).toContain('"type":"response.output_text.delta"');
expect(telegramLongBody).toContain('"phase":"final_answer"');
expect(telegramLongBody).toContain("TELEGRAM-LONG-FINAL-BEGIN");
expect(telegramLongBody).toContain("TELEGRAM-LONG-FINAL-END");
expect(telegramLongBody.length).toBeGreaterThan(4_500);
const telegramThreeChunkLongResponse = await fetch(`${server.baseUrl}/v1/responses`, {
method: "POST",
headers: {
"content-type": "application/json",
},
body: JSON.stringify({
stream: true,
input: [
makeUserInput(
"Telegram long final three chunk QA check. Use the scripted three chunk final response.",
),
],
}),
});
expect(telegramThreeChunkLongResponse.status).toBe(200);
const telegramThreeChunkLongBody = await telegramThreeChunkLongResponse.text();
expect(telegramThreeChunkLongBody).toContain('"type":"response.output_text.delta"');
expect(telegramThreeChunkLongBody).toContain('"phase":"final_answer"');
expect(telegramThreeChunkLongBody).toContain("TELEGRAM-LONG-FINAL-3CHUNK-BEGIN");
expect(telegramThreeChunkLongBody).toContain("TELEGRAM-LONG-FINAL-3CHUNK-END");
expect(telegramThreeChunkLongBody.length).toBeGreaterThan(8_000);
const blockResponse = await fetch(`${server.baseUrl}/v1/responses`, {
method: "POST",
headers: {

View File

@@ -153,6 +153,8 @@ const QA_GROUP_VISIBLE_REPLY_TOOL_PROMPT_RE = /qa group visible reply tool check
const QA_GROUP_MESSAGE_UNAVAILABLE_FALLBACK_PROMPT_RE =
/qa group message unavailable fallback check/i;
const QA_TELEGRAM_CURRENT_SESSION_STATUS_PROMPT_RE = /telegram current session_status qa check/i;
const QA_TELEGRAM_LONG_FINAL_THREE_CHUNK_PROMPT_RE = /telegram long final three chunk qa check/i;
const QA_TELEGRAM_LONG_FINAL_PROMPT_RE = /telegram long final qa check/i;
const QA_SUBAGENT_DIRECT_FALLBACK_PROMPT_RE = /subagent direct fallback qa check/i;
const QA_SUBAGENT_DIRECT_FALLBACK_WORKER_RE = /subagent direct fallback worker/i;
const QA_SUBAGENT_DIRECT_FALLBACK_MARKER = "QA-SUBAGENT-DIRECT-FALLBACK-OK";
@@ -1034,6 +1036,23 @@ function splitMockStreamingText(text: string, parts = 3) {
return chunks.length > 1 ? chunks : [text.slice(0, 1), text.slice(1)];
}
function buildTelegramLongFinalText({
endMarker = "TELEGRAM-LONG-FINAL-END",
segmentCount = 54,
startMarker = "TELEGRAM-LONG-FINAL-BEGIN",
}: {
endMarker?: string;
segmentCount?: number;
startMarker?: string;
} = {}) {
const body = Array.from(
{ length: segmentCount },
(_, index) =>
`telegram-long-final-segment-${String(index + 1).padStart(3, "0")} ${"x".repeat(54)}`,
).join("\n");
return `${startMarker}\n${body}\n${endMarker}`;
}
function buildAssistantOutputItem(spec: MockAssistantMessageSpec) {
return {
type: "message",
@@ -1310,6 +1329,32 @@ async function buildResponsesPayload(
}
return buildAssistantEvents("");
}
if (QA_TELEGRAM_LONG_FINAL_THREE_CHUNK_PROMPT_RE.test(allInputText)) {
const text = buildTelegramLongFinalText({
endMarker: "TELEGRAM-LONG-FINAL-3CHUNK-END",
segmentCount: 96,
startMarker: "TELEGRAM-LONG-FINAL-3CHUNK-BEGIN",
});
return buildAssistantEvents([
{
id: "msg_mock_telegram_long_final_three_chunk",
phase: "final_answer",
streamDeltas: splitMockStreamingText(text),
text,
},
]);
}
if (QA_TELEGRAM_LONG_FINAL_PROMPT_RE.test(allInputText)) {
const text = buildTelegramLongFinalText();
return buildAssistantEvents([
{
id: "msg_mock_telegram_long_final",
phase: "final_answer",
streamDeltas: splitMockStreamingText(text),
text,
},
]);
}
if (QA_STREAMING_PROMPT_RE.test(allInputText) && exactReplyDirective) {
return buildAssistantEvents([
{

View File

@@ -373,6 +373,7 @@ describe("dispatchTelegramMessage draft streaming", () => {
telegramDeps?: TelegramBotDeps;
bot?: Bot;
replyToMode?: Parameters<typeof dispatchTelegramMessage>[0]["replyToMode"];
textLimit?: number;
}) {
const bot = params.bot ?? createBot();
await dispatchTelegramMessage({
@@ -382,7 +383,7 @@ describe("dispatchTelegramMessage draft streaming", () => {
runtime: createRuntime(),
replyToMode: params.replyToMode ?? "first",
streamMode: params.streamMode ?? "partial",
textLimit: 4096,
textLimit: params.textLimit ?? 4096,
telegramCfg: params.telegramCfg ?? {},
telegramDeps: params.telegramDeps ?? telegramDepsForTest,
opts: { token: "token" },
@@ -1576,6 +1577,89 @@ describe("dispatchTelegramMessage draft streaming", () => {
);
});
it("uses the active preview as the first chunk for long text finals", async () => {
const answerDraftStream = createSequencedDraftStream(1001);
const reasoningDraftStream = createDraftStream();
createTelegramDraftStream
.mockImplementationOnce(() => answerDraftStream)
.mockImplementationOnce(() => reasoningDraftStream);
const finalText = `${"A".repeat(70)}${"B".repeat(70)}`;
dispatchReplyWithBufferedBlockDispatcher.mockImplementation(
async ({ dispatcherOptions, replyOptions }) => {
await replyOptions?.onPartialReply?.({ text: "Working preview" });
await dispatcherOptions.deliver({ text: finalText, replyToId: "456" }, { kind: "final" });
return { queuedFinal: true };
},
);
deliverReplies.mockResolvedValue({ delivered: true });
editMessageTelegram.mockResolvedValue({ ok: true, chatId: "123", messageId: "1001" });
await dispatchWithContext({
context: createContext(),
streamMode: "partial",
textLimit: 80,
});
const editedText = editMessageTelegram.mock.calls[0]?.[2] as string;
const followUpText =
(deliverReplies.mock.calls[0]?.[0] as { replies?: Array<{ text?: string }> })?.replies?.[0]
?.text ?? "";
expect(editMessageTelegram).toHaveBeenCalledTimes(1);
expect(editedText.length).toBeLessThanOrEqual(80);
expect(followUpText.length).toBeGreaterThan(0);
expect(`${editedText}${followUpText}`).toBe(finalText);
expect(deliverReplies).toHaveBeenCalledTimes(1);
expect(deliverReplies).toHaveBeenCalledWith(
expect.objectContaining({
replies: [expect.not.objectContaining({ replyToId: expect.any(String) })],
}),
);
expect(answerDraftStream.clear).not.toHaveBeenCalled();
});
it("uses the active preview as the first chunk for three-chunk long text finals", async () => {
const answerDraftStream = createSequencedDraftStream(1001);
const reasoningDraftStream = createDraftStream();
createTelegramDraftStream
.mockImplementationOnce(() => answerDraftStream)
.mockImplementationOnce(() => reasoningDraftStream);
const finalText = `${"A".repeat(70)}${"B".repeat(70)}${"C".repeat(70)}`;
dispatchReplyWithBufferedBlockDispatcher.mockImplementation(
async ({ dispatcherOptions, replyOptions }) => {
await replyOptions?.onPartialReply?.({ text: "Working preview" });
await dispatcherOptions.deliver({ text: finalText, replyToId: "456" }, { kind: "final" });
return { queuedFinal: true };
},
);
deliverReplies.mockResolvedValue({ delivered: true });
editMessageTelegram.mockResolvedValue({ ok: true, chatId: "123", messageId: "1001" });
await dispatchWithContext({
context: createContext(),
streamMode: "partial",
textLimit: 80,
});
const editedText = editMessageTelegram.mock.calls[0]?.[2] as string;
const followUpReplies =
(deliverReplies.mock.calls[0]?.[0] as { replies?: Array<{ text?: string }> })?.replies ?? [];
const followUpText = followUpReplies.map((reply) => reply.text ?? "").join("");
expect(editMessageTelegram).toHaveBeenCalledTimes(1);
expect(editedText.length).toBeLessThanOrEqual(80);
expect(followUpReplies).toHaveLength(1);
expect(followUpText.length).toBeGreaterThan(80);
expect(`${editedText}${followUpText}`).toBe(finalText);
expect(deliverReplies).toHaveBeenCalledTimes(1);
expect(deliverReplies).toHaveBeenCalledWith(
expect.objectContaining({
replies: [expect.not.objectContaining({ replyToId: expect.any(String) })],
}),
);
expect(answerDraftStream.clear).not.toHaveBeenCalled();
});
it("does not force new message on first assistant message start", async () => {
const draftStream = createDraftStream(999);
createTelegramDraftStream.mockReturnValue(draftStream);

View File

@@ -28,6 +28,7 @@ import {
createOutboundPayloadPlan,
projectOutboundPayloadPlanForDelivery,
} from "openclaw/plugin-sdk/outbound-runtime";
import { chunkMarkdownTextWithMode } from "openclaw/plugin-sdk/reply-chunking";
import { clearHistoryEntriesIfEnabled } from "openclaw/plugin-sdk/reply-history";
import { resolveSendableOutboundReplyParts } from "openclaw/plugin-sdk/reply-payload";
import type { ReplyPayload } from "openclaw/plugin-sdk/reply-payload";
@@ -75,7 +76,7 @@ import {
shouldSuppressTelegramError,
} from "./error-policy.js";
import { shouldSuppressLocalTelegramExecApprovalPrompt } from "./exec-approvals.js";
import { renderTelegramHtmlText } from "./format.js";
import { markdownToTelegramChunks, renderTelegramHtmlText } from "./format.js";
import {
type ArchivedPreview,
createLaneDeliveryStateTracker,
@@ -784,6 +785,27 @@ export const dispatchTelegramMessage = async ({
}
return { ...payload, text };
};
const applyTextToFollowUpPayload = (payload: ReplyPayload, text: string): ReplyPayload => {
const next = applyTextToPayload(payload, text);
const {
replyToId: _replyToId,
replyToCurrent: _replyToCurrent,
replyToTag: _replyToTag,
...followUp
} = next;
return followUp;
};
const splitFinalTextForPreview = (text: string): string[] => {
const markdownChunks =
chunkMode === "newline"
? chunkMarkdownTextWithMode(text, draftMaxChars, chunkMode)
: [text];
return markdownChunks.flatMap((chunk) =>
markdownToTelegramChunks(chunk, draftMaxChars, { tableMode }).map(
(telegramChunk) => telegramChunk.text,
),
);
};
const applyQuoteReplyTarget = (payload: ReplyPayload): ReplyPayload => {
if (
!implicitQuoteReplyTargetId ||
@@ -836,6 +858,8 @@ export const dispatchTelegramMessage = async ({
retainPreviewOnCleanupByLane,
draftMaxChars,
applyTextToPayload,
applyTextToFollowUpPayload,
splitFinalTextForPreview,
sendPayload,
flushDraftLane,
stopDraftLane: async (lane) => {

View File

@@ -81,6 +81,8 @@ type CreateLaneTextDelivererParams = {
retainPreviewOnCleanupByLane: Record<LaneName, boolean>;
draftMaxChars: number;
applyTextToPayload: (payload: ReplyPayload, text: string) => ReplyPayload;
applyTextToFollowUpPayload?: (payload: ReplyPayload, text: string) => ReplyPayload;
splitFinalTextForPreview?: (text: string) => readonly string[];
sendPayload: (payload: ReplyPayload) => Promise<boolean>;
flushDraftLane: (lane: DraftLaneState) => Promise<void>;
stopDraftLane: (lane: DraftLaneState) => Promise<void>;
@@ -117,7 +119,7 @@ type TryUpdatePreviewParams = {
previewButtons?: TelegramInlineButtons;
stopBeforeEdit?: boolean;
updateLaneSnapshot?: boolean;
skipRegressive: "always" | "existingOnly";
skipRegressive: RegressiveSkipMode;
context: "final" | "update";
previewMessageId?: number;
previewTextSnapshot?: string;
@@ -134,7 +136,7 @@ type ConsumeArchivedAnswerPreviewParams = {
};
type PreviewUpdateContext = "final" | "update";
type RegressiveSkipMode = "always" | "existingOnly";
type RegressiveSkipMode = "always" | "existingOnly" | "never";
type ResolvePreviewTargetParams = {
lane: DraftLaneState;
@@ -169,6 +171,9 @@ function shouldSkipRegressivePreviewUpdate(args: {
if (currentPreviewText === undefined) {
return false;
}
if (args.skipRegressive === "never") {
return false;
}
return (
currentPreviewText.startsWith(args.text) &&
args.text.length < currentPreviewText.length &&
@@ -184,6 +189,26 @@ function isLongLivedPreview(visibleSinceMs: number | undefined, nowMs: number):
);
}
function compactPreviewFinalChunks(chunks: readonly string[]): string[] {
const result: string[] = [];
let pendingWhitespace = "";
for (const chunk of chunks) {
if (!chunk) {
continue;
}
if (chunk.trim().length === 0) {
pendingWhitespace += chunk;
continue;
}
result.push(`${pendingWhitespace}${chunk}`);
pendingWhitespace = "";
}
if (pendingWhitespace && result.length > 0) {
result[result.length - 1] = `${result[result.length - 1]}${pendingWhitespace}`;
}
return result;
}
function resolvePreviewTarget(params: ResolvePreviewTargetParams): PreviewTargetResolution {
const lanePreviewMessageId = params.lane.stream?.messageId();
const previewMessageId =
@@ -227,6 +252,10 @@ export function createLaneTextDeliverer(params: CreateLaneTextDelivererParams) {
const shouldUseFreshFinalForPreview = (lane: DraftLaneState, visibleSinceMs?: number) =>
isMessagePreviewLane(lane) &&
(isLongLivedPreview(visibleSinceMs, readNow()) || wasVisiblyOverwrittenSince(visibleSinceMs));
const buildFollowUpPayload = (payload: ReplyPayload, text: string) =>
params.applyTextToFollowUpPayload
? params.applyTextToFollowUpPayload(payload, text)
: params.applyTextToPayload(payload, text);
const clearActivePreviewAfterFreshFinal = async (lane: DraftLaneState, laneName: LaneName) => {
try {
await lane.stream?.clear();
@@ -330,6 +359,56 @@ export function createLaneTextDeliverer(params: CreateLaneTextDelivererParams) {
return "fallback";
}
};
const tryDeliverLongFinalThroughPreview = async (args: {
lane: DraftLaneState;
laneName: LaneName;
text: string;
payload: ReplyPayload;
previewButtons?: TelegramInlineButtons;
}): Promise<LaneDeliveryResult | undefined> => {
if (
!args.lane.stream ||
args.previewButtons !== undefined ||
params.activePreviewLifecycleByLane[args.laneName] !== "transient"
) {
return undefined;
}
const chunks = compactPreviewFinalChunks(params.splitFinalTextForPreview?.(args.text) ?? []);
const [firstChunk, ...remainingChunks] = chunks;
if (!firstChunk || remainingChunks.length === 0 || firstChunk.length > params.draftMaxChars) {
return undefined;
}
await params.flushDraftLane(args.lane);
const previewMessageId = args.lane.stream.messageId();
if (typeof previewMessageId !== "number") {
return undefined;
}
const finalized = await tryUpdatePreviewForLane({
lane: args.lane,
laneName: args.laneName,
text: firstChunk,
stopBeforeEdit: true,
updateLaneSnapshot: true,
skipRegressive: "never",
context: "final",
});
if (finalized === "fallback") {
return undefined;
}
if (finalized === "retained") {
markActivePreviewComplete(args.laneName);
return result("preview-retained");
}
markActivePreviewComplete(args.laneName);
const remainingText = remainingChunks.join("");
if (remainingText.trim().length > 0) {
await params.sendPayload(buildFollowUpPayload(args.payload, remainingText));
}
return result("preview-finalized", {
content: args.text,
messageId: previewMessageId,
});
};
const tryUpdatePreviewForLane = async ({
lane,
@@ -596,6 +675,16 @@ export function createLaneTextDeliverer(params: CreateLaneTextDelivererParams) {
return result("preview-retained");
}
} else if (!hasMedia && !payload.isError && text.length > params.draftMaxChars) {
const longFinalResult = await tryDeliverLongFinalThroughPreview({
lane,
laneName,
text,
payload,
previewButtons,
});
if (longFinalResult) {
return longFinalResult;
}
params.log(
`telegram: preview final too long for edit (${text.length} > ${params.draftMaxChars}); falling back to standard send`,
);

View File

@@ -22,6 +22,7 @@ function createHarness(params?: {
answerHasStreamedMessage?: boolean;
answerLastPartialText?: string;
answerPreviewVisibleSinceMs?: number;
splitFinalTextForPreview?: (text: string) => readonly string[];
nowMs?: number;
}) {
const answer =
@@ -70,6 +71,7 @@ function createHarness(params?: {
retainPreviewOnCleanupByLane: { ...retainPreviewOnCleanupByLane },
draftMaxChars: params?.draftMaxChars ?? 4_096,
applyTextToPayload: (payload: ReplyPayload, text: string) => ({ ...payload, text }),
splitFinalTextForPreview: params?.splitFinalTextForPreview,
sendPayload,
flushDraftLane,
stopDraftLane,
@@ -383,6 +385,36 @@ describe("createLaneTextDeliverer", () => {
expect(harness.log).toHaveBeenCalledWith(expect.stringContaining("preview final too long"));
});
it("forces a long final preview back to the first chunk before sending the rest", async () => {
const firstChunk = "First chunk boundary.";
const remainingText = " Follow-up body after the boundary.";
const finalText = `${firstChunk}${remainingText}`;
const harness = createHarness({
answerMessageId: 999,
answerHasStreamedMessage: true,
answerLastPartialText: `${firstChunk} overlap already visible`,
draftMaxChars: 24,
splitFinalTextForPreview: () => [firstChunk, remainingText],
});
const result = await deliverFinalAnswer(harness, finalText);
expect(expectPreviewFinalized(result)).toEqual({
content: finalText,
messageId: 999,
});
expect(harness.editPreview).toHaveBeenCalledWith(
expect.objectContaining({
messageId: 999,
text: firstChunk,
}),
);
expect(harness.sendPayload).toHaveBeenCalledWith(
expect.objectContaining({ text: remainingText }),
);
expect(harness.lanes.answer.lastPartialText).toBe(firstChunk);
});
it("sends a fresh final when a message preview is long lived", async () => {
const visibleSinceMs = 10_000;
const harness = createHarness({