mirror of
https://github.com/openclaw/openclaw.git
synced 2026-04-26 08:31:55 +00:00
Replaces String(err) with the existing formatUnknownError() utility across the msteams extension to prevent [object Object] appearing in error logs when non-Error objects are caught (e.g., Axios errors, Bot Framework SDK error objects). Fixes #53910 thanks @bradgroux
259 lines
7.9 KiB
TypeScript
259 lines
7.9 KiB
TypeScript
import {
|
|
createChannelReplyPipeline,
|
|
logTypingFailure,
|
|
resolveChannelMediaMaxBytes,
|
|
type OpenClawConfig,
|
|
type MSTeamsReplyStyle,
|
|
type RuntimeEnv,
|
|
} from "../runtime-api.js";
|
|
import type { MSTeamsAccessTokenProvider } from "./attachments/types.js";
|
|
import type { StoredConversationReference } from "./conversation-store.js";
|
|
import {
|
|
classifyMSTeamsSendError,
|
|
formatMSTeamsSendErrorHint,
|
|
formatUnknownError,
|
|
} from "./errors.js";
|
|
import {
|
|
buildConversationReference,
|
|
type MSTeamsAdapter,
|
|
type MSTeamsRenderedMessage,
|
|
renderReplyPayloadsToMessages,
|
|
sendMSTeamsMessages,
|
|
} from "./messenger.js";
|
|
import type { MSTeamsMonitorLogger } from "./monitor-types.js";
|
|
import { createTeamsReplyStreamController } from "./reply-stream-controller.js";
|
|
import { withRevokedProxyFallback } from "./revoked-context.js";
|
|
import { getMSTeamsRuntime } from "./runtime.js";
|
|
import type { MSTeamsTurnContext } from "./sdk-types.js";
|
|
|
|
export { pickInformativeStatusText } from "./reply-stream-controller.js";
|
|
|
|
export function createMSTeamsReplyDispatcher(params: {
|
|
cfg: OpenClawConfig;
|
|
agentId: string;
|
|
accountId?: string;
|
|
runtime: RuntimeEnv;
|
|
log: MSTeamsMonitorLogger;
|
|
adapter: MSTeamsAdapter;
|
|
appId: string;
|
|
conversationRef: StoredConversationReference;
|
|
context: MSTeamsTurnContext;
|
|
replyStyle: MSTeamsReplyStyle;
|
|
textLimit: number;
|
|
onSentMessageIds?: (ids: string[]) => void;
|
|
tokenProvider?: MSTeamsAccessTokenProvider;
|
|
sharePointSiteId?: string;
|
|
}) {
|
|
const core = getMSTeamsRuntime();
|
|
const msteamsCfg = params.cfg.channels?.msteams;
|
|
const conversationType = params.conversationRef.conversation?.conversationType?.toLowerCase();
|
|
const isTypingSupported = conversationType === "personal" || conversationType === "groupchat";
|
|
|
|
const sendTypingIndicator = isTypingSupported
|
|
? async () => {
|
|
await withRevokedProxyFallback({
|
|
run: async () => {
|
|
await params.context.sendActivity({ type: "typing" });
|
|
},
|
|
onRevoked: async () => {
|
|
const baseRef = buildConversationReference(params.conversationRef);
|
|
await params.adapter.continueConversation(
|
|
params.appId,
|
|
{ ...baseRef, activityId: undefined },
|
|
async (ctx) => {
|
|
await ctx.sendActivity({ type: "typing" });
|
|
},
|
|
);
|
|
},
|
|
onRevokedLog: () => {
|
|
params.log.debug?.("turn context revoked, sending typing via proactive messaging");
|
|
},
|
|
});
|
|
}
|
|
: async () => {};
|
|
|
|
const { onModelSelected, typingCallbacks, ...replyPipeline } = createChannelReplyPipeline({
|
|
cfg: params.cfg,
|
|
agentId: params.agentId,
|
|
channel: "msteams",
|
|
accountId: params.accountId,
|
|
typing: {
|
|
start: sendTypingIndicator,
|
|
onStartError: (err) => {
|
|
logTypingFailure({
|
|
log: (message) => params.log.debug?.(message),
|
|
channel: "msteams",
|
|
action: "start",
|
|
error: err,
|
|
});
|
|
},
|
|
},
|
|
});
|
|
|
|
const chunkMode = core.channel.text.resolveChunkMode(params.cfg, "msteams");
|
|
const tableMode = core.channel.text.resolveMarkdownTableMode({
|
|
cfg: params.cfg,
|
|
channel: "msteams",
|
|
});
|
|
const mediaMaxBytes = resolveChannelMediaMaxBytes({
|
|
cfg: params.cfg,
|
|
resolveChannelLimitMb: ({ cfg }) => cfg.channels?.msteams?.mediaMaxMb,
|
|
});
|
|
const feedbackLoopEnabled = params.cfg.channels?.msteams?.feedbackEnabled !== false;
|
|
const streamController = createTeamsReplyStreamController({
|
|
conversationType,
|
|
context: params.context,
|
|
feedbackLoopEnabled,
|
|
log: params.log,
|
|
});
|
|
|
|
const blockStreamingEnabled =
|
|
typeof msteamsCfg?.blockStreaming === "boolean" ? msteamsCfg.blockStreaming : false;
|
|
|
|
const pendingMessages: MSTeamsRenderedMessage[] = [];
|
|
|
|
const sendMessages = async (messages: MSTeamsRenderedMessage[]): Promise<string[]> => {
|
|
return sendMSTeamsMessages({
|
|
replyStyle: params.replyStyle,
|
|
adapter: params.adapter,
|
|
appId: params.appId,
|
|
conversationRef: params.conversationRef,
|
|
context: params.context,
|
|
messages,
|
|
retry: {},
|
|
onRetry: (event) => {
|
|
params.log.debug?.("retrying send", {
|
|
replyStyle: params.replyStyle,
|
|
...event,
|
|
});
|
|
},
|
|
tokenProvider: params.tokenProvider,
|
|
sharePointSiteId: params.sharePointSiteId,
|
|
mediaMaxBytes,
|
|
feedbackLoopEnabled,
|
|
});
|
|
};
|
|
|
|
const flushPendingMessages = async () => {
|
|
if (pendingMessages.length === 0) {
|
|
return;
|
|
}
|
|
const toSend = pendingMessages.splice(0);
|
|
const total = toSend.length;
|
|
let ids: string[];
|
|
try {
|
|
ids = await sendMessages(toSend);
|
|
} catch {
|
|
ids = [];
|
|
let failed = 0;
|
|
for (const msg of toSend) {
|
|
try {
|
|
const msgIds = await sendMessages([msg]);
|
|
ids.push(...msgIds);
|
|
} catch {
|
|
failed += 1;
|
|
params.log.debug?.("individual message send failed, continuing with remaining blocks");
|
|
}
|
|
}
|
|
if (failed > 0) {
|
|
params.log.warn?.(`failed to deliver ${failed} of ${total} message blocks`, {
|
|
failed,
|
|
total,
|
|
});
|
|
}
|
|
}
|
|
if (ids.length > 0) {
|
|
params.onSentMessageIds?.(ids);
|
|
}
|
|
};
|
|
|
|
const {
|
|
dispatcher,
|
|
replyOptions,
|
|
markDispatchIdle: baseMarkDispatchIdle,
|
|
} = core.channel.reply.createReplyDispatcherWithTyping({
|
|
...replyPipeline,
|
|
humanDelay: core.channel.reply.resolveHumanDelayConfig(params.cfg, params.agentId),
|
|
onReplyStart: async () => {
|
|
await streamController.onReplyStart();
|
|
await typingCallbacks?.onReplyStart?.();
|
|
},
|
|
typingCallbacks,
|
|
deliver: async (payload) => {
|
|
const preparedPayload = streamController.preparePayload(payload);
|
|
if (!preparedPayload) {
|
|
return;
|
|
}
|
|
|
|
const messages = renderReplyPayloadsToMessages([preparedPayload], {
|
|
textChunkLimit: params.textLimit,
|
|
chunkText: true,
|
|
mediaMode: "split",
|
|
tableMode,
|
|
chunkMode,
|
|
});
|
|
pendingMessages.push(...messages);
|
|
|
|
// When block streaming is enabled, flush immediately so blocks are
|
|
// delivered progressively instead of batching until markDispatchIdle.
|
|
if (blockStreamingEnabled) {
|
|
await flushPendingMessages();
|
|
}
|
|
},
|
|
onError: (err, info) => {
|
|
const errMsg = formatUnknownError(err);
|
|
const classification = classifyMSTeamsSendError(err);
|
|
const hint = formatMSTeamsSendErrorHint(classification);
|
|
params.runtime.error?.(
|
|
`msteams ${info.kind} reply failed: ${errMsg}${hint ? ` (${hint})` : ""}`,
|
|
);
|
|
params.log.error("reply failed", {
|
|
kind: info.kind,
|
|
error: errMsg,
|
|
classification,
|
|
hint,
|
|
});
|
|
},
|
|
});
|
|
|
|
const markDispatchIdle = (): Promise<void> => {
|
|
return flushPendingMessages()
|
|
.catch((err) => {
|
|
const errMsg = formatUnknownError(err);
|
|
const classification = classifyMSTeamsSendError(err);
|
|
const hint = formatMSTeamsSendErrorHint(classification);
|
|
params.runtime.error?.(`msteams flush reply failed: ${errMsg}${hint ? ` (${hint})` : ""}`);
|
|
params.log.error("flush reply failed", {
|
|
error: errMsg,
|
|
classification,
|
|
hint,
|
|
});
|
|
})
|
|
.then(() => {
|
|
return streamController.finalize().catch((err) => {
|
|
params.log.debug?.("stream finalize failed", { error: formatUnknownError(err) });
|
|
});
|
|
})
|
|
.finally(() => {
|
|
baseMarkDispatchIdle();
|
|
});
|
|
};
|
|
|
|
return {
|
|
dispatcher,
|
|
replyOptions: {
|
|
...replyOptions,
|
|
...(streamController.hasStream()
|
|
? {
|
|
onPartialReply: (payload: { text?: string }) =>
|
|
streamController.onPartialReply(payload),
|
|
}
|
|
: {}),
|
|
disableBlockStreaming:
|
|
typeof msteamsCfg?.blockStreaming === "boolean" ? !msteamsCfg.blockStreaming : undefined,
|
|
onModelSelected,
|
|
},
|
|
markDispatchIdle,
|
|
};
|
|
}
|