fix(feishu): honor block streaming config

This commit is contained in:
Vincent Koc
2026-05-03 11:39:05 -07:00
parent f74e901794
commit 03e35b1d83
7 changed files with 78 additions and 6 deletions

View File

@@ -29,6 +29,7 @@ Docs: https://docs.openclaw.ai
### Fixes
- Feishu: accept and honor `channels.feishu.blockStreaming` at the top level and per account, while keeping the legacy default off so Feishu cards no longer reject documented config or silently drop block replies. Fixes #75555. Thanks @vincentkoc.
- Google Chat: normalize custom Google auth transport headers before google-auth/gaxios interceptors run, restoring webhook token verification when certificate retrieval expects Fetch `Headers`. Fixes #76742. Thanks @donbowman.
- Doctor/plugins: reset stale `plugins.slots.memory` and `plugins.slots.contextEngine` references during `doctor --fix`, so cleanup of missing plugin config does not leave unrecoverable slot owners behind. Fixes #76550 and #76551. Thanks @vincentkoc.
- Docs/WhatsApp: merge the duplicate top-level `web` objects in the gateway channel config example so copy-pasted WhatsApp config keeps both `web.whatsapp` and reconnect settings. Fixes #76619. Thanks @WadydX.

View File

@@ -273,13 +273,13 @@ Feishu/Lark supports streaming replies via interactive cards. When enabled, the
channels: {
feishu: {
streaming: true, // enable streaming card output (default: true)
blockStreaming: true, // enable block-level streaming (default: true)
blockStreaming: true, // opt into completed-block streaming
},
},
}
```
Set `streaming: false` to send the complete reply in one message.
Set `streaming: false` to send the complete reply in one message. `blockStreaming` is off by default; enable it only when you want completed assistant blocks flushed before the final reply.
### Quota optimization
@@ -428,7 +428,7 @@ Full configuration: [Gateway configuration](/gateway/configuration)
| `channels.feishu.textChunkLimit` | Message chunk size | `2000` |
| `channels.feishu.mediaMaxMb` | Media size limit | `30` |
| `channels.feishu.streaming` | Streaming card output | `true` |
| `channels.feishu.blockStreaming` | Block-level streaming | `true` |
| `channels.feishu.blockStreaming` | Completed-block reply streaming | `false` |
| `channels.feishu.typingIndicator` | Send typing reactions | `true` |
| `channels.feishu.resolveSenderNames` | Resolve sender display names | `true` |

View File

@@ -126,6 +126,7 @@
"enum": ["auto", "raw", "card"]
},
"streaming": { "type": "boolean" },
"blockStreaming": { "type": "boolean" },
"replyInThread": {
"type": "string",
"enum": ["disabled", "enabled"]
@@ -162,6 +163,7 @@
"enum": ["auto", "raw", "card"]
},
"streaming": { "type": "boolean" },
"blockStreaming": { "type": "boolean" },
"replyInThread": {
"type": "string",
"enum": ["disabled", "enabled"]

View File

@@ -206,6 +206,20 @@ describe("FeishuConfigSchema optimization flags", () => {
expect(result.resolveSenderNames).toBe(true);
});
it("accepts top-level and account-level block streaming", () => {
const result = FeishuConfigSchema.parse({
blockStreaming: true,
accounts: {
main: {
blockStreaming: false,
},
},
});
expect(result.blockStreaming).toBe(true);
expect(result.accounts?.main?.blockStreaming).toBe(false);
});
it("accepts account-level optimization flags", () => {
const result = FeishuConfigSchema.parse({
accounts: {

View File

@@ -68,6 +68,7 @@ const RenderModeSchema = z.enum(["auto", "raw", "card"]).optional();
// Streaming card mode: when enabled, card replies use Feishu's Card Kit streaming API
// for incremental text display with a "Thinking..." placeholder
const StreamingModeSchema = z.boolean().optional();
const BlockStreamingSchema = z.boolean().optional();
const BlockStreamingCoalesceSchema = z
.object({
@@ -188,6 +189,7 @@ const FeishuSharedConfigShape = {
dms: z.record(z.string(), DmConfigSchema).optional(),
textChunkLimit: z.number().int().positive().optional(),
chunkMode: z.enum(["length", "newline"]).optional(),
blockStreaming: BlockStreamingSchema,
blockStreamingCoalesce: BlockStreamingCoalesceSchema,
mediaMaxMb: z.number().positive().optional(),
httpTimeoutMs: z.number().int().positive().max(300_000).optional(),

View File

@@ -286,7 +286,55 @@ describe("createFeishuReplyDispatcher streaming behavior", () => {
expect(sendMediaFeishuMock).not.toHaveBeenCalled();
});
it("sets disableBlockStreaming in replyOptions to prevent silent reply drops", async () => {
it("disables block streaming by default to prevent silent reply drops", async () => {
const result = createFeishuReplyDispatcher({
cfg: {} as never,
agentId: "agent",
runtime: {} as never,
chatId: "oc_chat",
});
expect(result.replyOptions).toHaveProperty("disableBlockStreaming", true);
});
it("enables core block streaming when Feishu blockStreaming is explicitly true", async () => {
resolveFeishuAccountMock.mockReturnValue({
accountId: "main",
appId: "app_id",
appSecret: "app_secret",
domain: "feishu",
config: {
renderMode: "auto",
streaming: true,
blockStreaming: true,
},
});
const { result, options } = createDispatcherHarness();
expect(result.replyOptions).toHaveProperty("disableBlockStreaming", false);
await options.deliver({ text: "plain block" }, { kind: "block" });
await options.onIdle?.();
expect(streamingInstances).toHaveLength(1);
expect(streamingInstances[0].close).toHaveBeenCalledWith("plain block", {
note: "Agent: agent",
});
});
it("keeps core block streaming disabled when Feishu blockStreaming is explicitly false", async () => {
resolveFeishuAccountMock.mockReturnValue({
accountId: "main",
appId: "app_id",
appSecret: "app_secret",
domain: "feishu",
config: {
renderMode: "auto",
streaming: true,
blockStreaming: false,
},
});
const result = createFeishuReplyDispatcher({
cfg: {} as never,
agentId: "agent",

View File

@@ -222,6 +222,7 @@ export function createFeishuReplyDispatcher(params: CreateFeishuReplyDispatcherP
const tableMode = core.channel.text.resolveMarkdownTableMode({ cfg, channel: "feishu" });
const renderMode = account.config?.renderMode ?? "auto";
const streamingEnabled = account.config?.streaming !== false && renderMode !== "raw";
const coreBlockStreamingEnabled = account.config?.blockStreaming === true;
const reasoningPreviewEnabled = streamingEnabled && params.allowReasoningPreview === true;
let streaming: FeishuStreamingSession | null = null;
@@ -530,7 +531,10 @@ export function createFeishuReplyDispatcher(params: CreateFeishuReplyDispatcherP
}),
);
const useCard =
hasText && (renderMode === "card" || (renderMode === "auto" && shouldUseCard(text)));
hasText &&
(renderMode === "card" ||
(info?.kind === "block" && coreBlockStreamingEnabled && renderMode !== "raw") ||
(renderMode === "auto" && shouldUseCard(text)));
const skipTextForDuplicateFinal =
info?.kind === "final" && hasText && deliveredFinalTexts.has(text);
const skipTextForClosedStreamingFinal =
@@ -660,7 +664,8 @@ export function createFeishuReplyDispatcher(params: CreateFeishuReplyDispatcherP
replyOptions: {
...replyOptions,
onModelSelected: prefixContext.onModelSelected,
disableBlockStreaming: true,
disableBlockStreaming:
typeof account.config?.blockStreaming === "boolean" ? !account.config.blockStreaming : true,
onPartialReply: streamingEnabled
? (payload: ReplyPayload) => {
if (!payload.text) {