mirror of
https://github.com/openclaw/openclaw.git
synced 2026-03-20 14:30:57 +00:00
Merge branch 'main' into fix/issue-35881-cron-perms
This commit is contained in:
74
CHANGELOG.md
74
CHANGELOG.md
@@ -17,29 +17,78 @@ Docs: https://docs.openclaw.ai
|
||||
- Slack/DM typing feedback: add `channels.slack.typingReaction` so Socket Mode DMs can show reaction-based processing status even when Slack native assistant typing is unavailable. (#19816) Thanks @dalefrieswthat.
|
||||
- Cron/job snapshot persistence: skip backup during normalization persistence in `ensureLoaded` so `jobs.json.bak` keeps the pre-edit snapshot for recovery, while preserving backup creation on explicit user-driven writes. (#35234) Thanks @0xsline.
|
||||
- TTS/OpenAI-compatible endpoints: add `messages.tts.openai.baseUrl` config support with config-over-env precedence, endpoint-aware directive validation, and OpenAI TTS request routing to the resolved base URL. (#34321) thanks @RealKai42.
|
||||
- Plugins/before_prompt_build system-context fields: add `prependSystemContext` and `appendSystemContext` so static plugin guidance can be placed in system prompt space for provider caching and lower repeated prompt token cost. (#35177) thanks @maweibin.
|
||||
- Gateway: add SecretRef support for gateway.auth.token with auth-mode guardrails. (#35094) Thanks @joshavant.
|
||||
- Plugins/hook policy: add `plugins.entries.<id>.hooks.allowPromptInjection`, validate unknown typed hook names at runtime, and preserve legacy `before_agent_start` model/provider overrides while stripping prompt-mutating fields when prompt injection is disabled. (#36567) thanks @gumadeiras.
|
||||
- Tools/Diffs guidance: restore a short system-prompt hint for enabled diffs while keeping the detailed instructions in the companion skill, so diffs usage guidance stays out of user-prompt space. (#36904) thanks @gumadeiras.
|
||||
- Telegram/ACP topic bindings: accept Telegram Mac Unicode dash option prefixes in `/acp spawn`, support Telegram topic thread binding (`--thread here|auto`), route bound-topic follow-ups to ACP sessions, add actionable Telegram approval buttons with prefixed approval-id resolution, and pin successful bind confirmations in-topic. (#36683) Thanks @huntharo.
|
||||
- Hooks/Compaction lifecycle: emit `session:compact:before` and `session:compact:after` internal events plus plugin compaction callbacks with session/count metadata, so automations can react to compaction runs consistently. (#16788) thanks @vincentkoc.
|
||||
- CLI: make read-only SecretRef status flows degrade safely (#37023) thanks @joshavant.
|
||||
|
||||
### Breaking
|
||||
|
||||
- **BREAKING:** Gateway auth now requires explicit `gateway.auth.mode` when both `gateway.auth.token` and `gateway.auth.password` are configured (including SecretRefs). Set `gateway.auth.mode` to `token` or `password` before upgrade to avoid startup/pairing/TUI failures. (#35094) Thanks @joshavant.
|
||||
|
||||
### Fixes
|
||||
|
||||
- Models/openai-completions streaming compatibility: force `compat.supportsUsageInStreaming=false` for non-native OpenAI-compatible endpoints during model normalization, preventing usage-only stream chunks from triggering `choices[0]` parser crashes in provider streams. (#8714) Thanks @nonanon1.
|
||||
- Tools/xAI native web-search collision guard: drop OpenClaw `web_search` from tool registration when routing to xAI/Grok model providers (including OpenRouter `x-ai/*`) to avoid duplicate tool-name request failures against provider-native `web_search`. (#14749) Thanks @realsamrat.
|
||||
- TUI/token copy-safety rendering: treat long credential-like mixed alphanumeric tokens (including quoted forms) as copy-sensitive in render sanitization so formatter hard-wrap guards no longer inject visible spaces into auth-style values before display. (#26710) Thanks @jasonthane.
|
||||
- WhatsApp/self-chat response prefix fallback: stop forcing `"[openclaw]"` as the implicit outbound response prefix when no identity name or response prefix is configured, so blank/default prefix settings no longer inject branding text unexpectedly in self-chat flows. (#27962) Thanks @ecanmor.
|
||||
- Memory/QMD search result decoding: accept `qmd search` hits that only include `file` URIs (for example `qmd://collection/path.md`) without `docid`, resolve them through managed collection roots, and keep multi-collection results keyed by file fallback so valid QMD hits no longer collapse to empty `memory_search` output. (#28181) Thanks @0x76696265.
|
||||
- Memory/QMD collection-name conflict recovery: when `qmd collection add` fails because another collection already occupies the same `path + pattern`, detect the conflicting collection from `collection list`, remove it, and retry add so agent-scoped managed collections are created deterministically instead of being silently skipped; also add warning-only fallback when qmd metadata is unavailable to avoid destructive guesses. (#25496) Thanks @Ramsbaby.
|
||||
- Slack/app_mention race dedupe: when `app_mention` dispatch wins while same-`ts` `message` prepare is still in-flight, suppress the later message dispatch so near-simultaneous Slack deliveries do not produce duplicate replies; keep single-retry behavior and add regression coverage for both dropped and successful message-prepare outcomes. (#37033) Thanks @Takhoffman.
|
||||
- Gateway/chat streaming tool-boundary text retention: merge assistant delta segments into per-run chat buffers so pre-tool text is preserved in live chat deltas/finals when providers emit post-tool assistant segments as non-prefix snapshots. (#36957) Thanks @Datyedyeguy.
|
||||
- TUI/model indicator freshness: prevent stale session snapshots from overwriting freshly patched model selection (and reset per-session freshness when switching session keys) so `/model` updates reflect immediately instead of lagging by one or more commands. (#21255) Thanks @kowza.
|
||||
- TUI/final-error rendering fallback: when a chat `final` event has no renderable assistant content but includes envelope `errorMessage`, render the formatted error text instead of collapsing to `"(no output)"`, preserving actionable failure context in-session. (#14687) Thanks @Mquarmoc.
|
||||
- OpenAI Codex OAuth/login hardening: fail OAuth completion early when the returned token is missing `api.responses.write`, and allow `openclaw models auth login --provider openai-codex` to use the built-in OAuth path even when no provider plugins are installed. (#36660) Thanks @driesvints.
|
||||
- OpenAI Codex OAuth/scope request parity: augment the OAuth authorize URL with required API scopes (`api.responses.write`, `model.request`, `api.model.read`) before browser handoff so OAuth tokens include runtime model/request permissions expected by OpenAI API calls. (#24720) Thanks @Skippy-Gunboat.
|
||||
- Onboarding/API key input hardening: strip non-Latin1 Unicode artifacts from normalized secret input (while preserving Latin-1 content and internal spaces) so malformed copied API keys cannot trigger HTTP header `ByteString` construction crashes; adds regression coverage for shared normalization and MiniMax auth header usage. (#24496) Thanks @fa6maalassaf.
|
||||
- Kimi Coding/Anthropic tools compatibility: normalize `anthropic-messages` tool payloads to OpenAI-style `tools[].function` + compatible `tool_choice` when targeting Kimi Coding endpoints, restoring tool-call workflows that regressed after v2026.3.2. (#37038) Thanks @mochimochimochi-hub.
|
||||
- Heartbeat/workspace-path guardrails: append explicit workspace `HEARTBEAT.md` path guidance (and `docs/heartbeat.md` avoidance) to heartbeat prompts so heartbeat runs target workspace checklists reliably across packaged install layouts. (#37037) Thanks @stofancy.
|
||||
- Subagents/kill-complete announce race: when a late `subagent-complete` lifecycle event arrives after an earlier kill marker, clear stale kill suppression/cleanup flags and re-run announce cleanup so finished runs no longer get silently swallowed. (#37024) Thanks @cmfinlan.
|
||||
- Agents/tool-result cleanup timeout hardening: on embedded runner teardown idle timeouts, clear pending tool-call state without persisting synthetic `missing tool result` entries, preventing timeout cleanups from poisoning follow-up turns; adds regression coverage for timeout clear-vs-flush behavior. (#37081) Thanks @Coyote-Den.
|
||||
- Agents/openai-completions stream timeout hardening: ensure runtime undici global dispatchers use extended streaming body/header timeouts (including env-proxy dispatcher mode) before embedded runs, reducing forced mid-stream `terminated` failures on long generations; adds regression coverage for dispatcher selection and idempotent reconfiguration. (#9708) Thanks @scottchguard.
|
||||
- Agents/fallback cooldown probe execution: thread explicit rate-limit cooldown probe intent from model fallback into embedded runner auth-profile selection so same-provider fallback attempts can actually run when all profiles are cooldowned for `rate_limit` (instead of failing pre-run as `No available auth profile`), while preserving default cooldown skip behavior and adding regression tests at both fallback and runner layers. (#13623) Thanks @asfura.
|
||||
- Cron/OpenAI Codex OAuth refresh hardening: when `openai-codex` token refresh fails specifically on account-id extraction, reuse the cached access token instead of failing the run immediately, with regression coverage to keep non-Codex and unrelated refresh failures unchanged. (#36604) Thanks @laulopezreal.
|
||||
- Gateway/remote WS break-glass hostname support: honor `OPENCLAW_ALLOW_INSECURE_PRIVATE_WS=1` for `ws://` hostname URLs (not only private IP literals) across onboarding validation and runtime gateway connection checks, while still rejecting public IP literals and non-unicast IPv6 endpoints. (#36930) Thanks @manju-rn.
|
||||
- Routing/binding lookup scalability: pre-index route bindings by channel/account and avoid full binding-list rescans on channel-account cache rollover, preventing multi-second `resolveAgentRoute` stalls in large binding configurations. (#36915) Thanks @songchenghao.
|
||||
- Browser/session cleanup: track browser tabs opened by session-scoped browser tool runs and close tracked tabs during `sessions.reset`/`sessions.delete` runtime cleanup, preventing orphaned tabs and unbounded browser memory growth after session teardown. (#36666) Thanks @Harnoor6693.
|
||||
- Slack/local file upload allowlist parity: propagate `mediaLocalRoots` through the Slack send action pipeline so workspace-rooted attachments pass `assertLocalMediaAllowed` checks while non-allowlisted paths remain blocked. (synthesis: #36656; overlap considered from #36516, #36496, #36493, #36484, #32648, #30888) Thanks @2233admin.
|
||||
- Agents/compaction safeguard pre-check: skip embedded compaction before entering the Pi SDK when a session has no real conversation messages, avoiding unnecessary LLM API calls on idle sessions. (#36451) thanks @Sid-Qin.
|
||||
- Config/schema cache key stability: build merged schema cache keys with incremental hashing to avoid large single-string serialization and prevent `RangeError: Invalid string length` on high-cardinality plugin/channel metadata. (#36603) Thanks @powermaster888.
|
||||
- iMessage/cron completion announces: strip leaked inline reply tags (for example `[[reply_to:6100]]`) from user-visible completion text so announcement deliveries do not expose threading metadata. (#24600) Thanks @vincentkoc.
|
||||
- Sessions/daily reset transcript archival: archive prior transcript files during stale-session scheduled/daily resets by capturing the previous session entry before rollover, preventing orphaned transcript files on disk. (#35493) Thanks @byungsker.
|
||||
- Feishu/group slash command detection: normalize group mention wrappers before command-authorization probing so mention-prefixed commands (for example `@Bot/model` and `@Bot /reset`) are recognized as gateway commands instead of being forwarded to the agent. (#35994) Thanks @liuxiaopai-ai.
|
||||
- Agents/context pruning: guard assistant thinking/text char estimation against malformed blocks (missing `thinking`/`text` strings or null entries) so pruning no longer crashes with malformed provider content. (openclaw#35146) thanks @Sid-Qin.
|
||||
- Agents/transcript policy: set `preserveSignatures` to Anthropic-only handling in `resolveTranscriptPolicy` so Anthropic thinking signatures are preserved while non-Anthropic providers remain unchanged. (#32813) thanks @Sid-Qin.
|
||||
- Agents/schema cleaning: detect Venice + Grok model IDs as xAI-proxied targets so unsupported JSON Schema keywords are stripped before requests, preventing Venice/Grok `Invalid arguments` failures. (openclaw#35355) thanks @Sid-Qin.
|
||||
- Skills/native command deduplication: centralize skill command dedupe by canonical `skillName` in `listSkillCommandsForAgents` so duplicate suffixed variants (for example `_2`) are no longer surfaced across interfaces outside Discord. (#27521) thanks @shivama205.
|
||||
- Agents/xAI tool-call argument decoding: decode HTML-entity encoded xAI/Grok tool-call argument values (`&`, `"`, `<`, `>`, numeric entities) before tool execution so commands with shell operators and quotes no longer fail with parse errors. (#35276) Thanks @Sid-Qin.
|
||||
- Agents/thinking-tag promotion hardening: guard `promoteThinkingTagsToBlocks` against malformed assistant content entries (`null`/`undefined`) before `block.type` reads so malformed provider payloads no longer crash session processing while preserving pass-through behavior. (#35143) thanks @Sid-Qin.
|
||||
- Gateway/Control UI version reporting: align runtime and browser client version metadata to avoid `dev` placeholders, wait for bootstrap version before first UI websocket connect, and only forward bootstrap `serverVersion` to same-origin gateway targets to prevent cross-target version leakage. (from #35230, #30928, #33928) Thanks @Sid-Qin, @joelnishanth, and @MoerAI.
|
||||
- Control UI/markdown parser crash fallback: catch `marked.parse()` failures and fall back to escaped plain-text `<pre>` rendering so malformed recursive markdown no longer crashes Control UI session rendering on load. (#36445) Thanks @BinHPdev.
|
||||
- Control UI/markdown fallback regression coverage: add explicit regression assertions for parser-error fallback behavior so malformed markdown no longer risks reintroducing hard-crash rendering paths in future markdown/parser upgrades. (#36445) Thanks @BinHPdev.
|
||||
- Web UI/config form: treat `additionalProperties: true` object schemas as editable map entries instead of unsupported fields so Accounts-style maps stay editable in form mode. (#35380, supersedes #32072) Thanks @stakeswky and @liuxiaopai-ai.
|
||||
- Feishu/streaming card delivery synthesis: unify snapshot and delta streaming merge semantics, apply overlap-aware final merge, suppress duplicate final text delivery (including text+media final packets), prefer topic-thread `message.reply` routing when a reply target exists, and tune card print cadence to avoid duplicate incremental rendering. (from #33245, #32896, #33840) Thanks @rexl2018, @kcinzgg, and @aerelune.
|
||||
- Feishu/group mention detection: carry startup-probed bot display names through monitor dispatch so `requireMention` checks compare against current bot identity instead of stale config names, fixing missed `@bot` handling in groups while preserving multi-bot false-positive guards. (#36317, #34271) Thanks @liuxiaopai-ai.
|
||||
- Security/dependency audit: patch transitive Hono vulnerabilities by pinning `hono` to `4.12.5` and `@hono/node-server` to `1.19.10` in production resolution paths. Thanks @shakkernerd.
|
||||
- Security/dependency audit: bump `tar` to `7.5.10` (from `7.5.9`) to address the high-severity hardlink path traversal advisory (`GHSA-qffp-2rhf-9h96`). Thanks @shakkernerd.
|
||||
- Cron/announce delivery robustness: bypass pending-descendant announce guards for cron completion sends, ensure named-agent announce routes have outbound session entries, and fall back to direct delivery only when an announce send was actually attempted and failed. (from #35185, #32443, #34987) Thanks @Sid-Qin, @scoootscooob, and @bmendonca3.
|
||||
- Cron/announce best-effort fallback: run direct outbound fallback after attempted announce failures even when delivery is configured as best-effort, so Telegram cron sends are not left as attempted-but-undelivered after `cron announce delivery failed` warnings.
|
||||
- Auto-reply/system events: restore runtime system events to the message timeline (`System:` lines), preserve think-hint parsing with prepended events, and carry events into deferred followup/collect/steer-backlog prompts to keep cache behavior stable without dropping queued metadata. (#34794) Thanks @anisoptera.
|
||||
- Security/audit account handling: avoid prototype-chain account IDs in audit validation by using own-property checks for `accounts`. (#34982) Thanks @HOYALIM.
|
||||
- Cron/restart catch-up semantics: replay interrupted recurring jobs and missed immediate cron slots on startup without replaying interrupted one-shot jobs, with guarded missed-slot probing to avoid malformed-schedule startup aborts and duplicate-trigger drift after restart. (from #34466, #34896, #34625, #33206) Thanks @dunamismax, @dsantoreis, @Octane0411, and @Sid-Qin.
|
||||
- Agents/session usage tracking: preserve accumulated usage metadata on embedded Pi runner error exits so failed turns still update session `totalTokens` from real usage instead of stale prior values. (#34275) thanks @RealKai42.
|
||||
- Slack/reaction thread context routing: carry Slack native DM channel IDs through inbound context and threading tool resolution so reaction targets resolve consistently for DM `To=user:*` sessions (including `toolContext.currentChannelId` fallback behavior). (from #34831; overlaps #34440, #34502, #34483, #32754) Thanks @dunamismax.
|
||||
- Subagents/announce completion scoping: scope nested direct-child completion aggregation to the current requester run window, harden frozen completion capture for deterministic descendant synthesis, and route completion announce delivery through parent-agent announce turns with provenance-aware internal events. (#35080) Thanks @tyler6204.
|
||||
- Nodes/system.run approval hardening: use explicit argv-mutation signaling when regenerating prepared `rawCommand`, and cover the `system.run.prepare -> system.run` handoff so direct PATH-based `nodes.run` commands no longer fail with `rawCommand does not match command`. (#33137) thanks @Sid-Qin.
|
||||
- Models/custom provider headers: propagate `models.providers.<name>.headers` across inline, fallback, and registry-found model resolution so header-authenticated proxies consistently receive configured request headers. (#27490) thanks @Sid-Qin.
|
||||
- Ollama/remote provider auth fallback: synthesize a local runtime auth key for explicitly configured `models.providers.ollama` entries that omit `apiKey`, so remote Ollama endpoints run without requiring manual dummy-key setup while preserving env/profile/config key precedence and missing-config failures. (#11283) Thanks @cpreecs.
|
||||
- Ollama/custom provider headers: forward resolved model headers into native Ollama stream requests so header-authenticated Ollama proxies receive configured request headers. (#24337) thanks @echoVic.
|
||||
- Daemon/systemd install robustness: treat `systemctl --user is-enabled` exit-code-4 `not-found` responses as not-enabled by combining stderr/stdout detail parsing, so Ubuntu fresh installs no longer fail with `systemctl is-enabled unavailable`. (#33634) Thanks @Yuandiaodiaodiao.
|
||||
- Slack/system-event session routing: resolve reaction/member/pin/interaction system-event session keys through channel/account bindings (with sender-aware DM routing) so inbound Slack events target the correct agent session in multi-account setups instead of defaulting to `agent:main`. (#34045) Thanks @paulomcg, @daht-mad and @vincentkoc.
|
||||
- Slack/native streaming markdown conversion: stop pre-normalizing text passed to Slack native `markdown_text` in streaming start/append/stop paths to prevent Markdown style corruption from double conversion. (#34931)
|
||||
- Gateway/HTTP tools invoke media compatibility: preserve raw media payload access for direct `/tools/invoke` clients by allowing media `nodes` invoke commands only in HTTP tool context, while keeping agent-context media invoke blocking to prevent base64 prompt bloat. (#34365) Thanks @obviyus.
|
||||
- Agents/Nodes media outputs: add dedicated `photos_latest` action handling, block media-returning `nodes invoke` commands, keep metadata-only `camera.list` invoke allowed, and normalize empty `photos_latest` results to a consistent response shape to prevent base64 context bloat. (#34332) Thanks @obviyus.
|
||||
- TUI/session-key canonicalization: normalize `openclaw tui --session` values to lowercase so uppercase session names no longer drop real-time streaming updates due to gateway/TUI key mismatches. (#33866, #34013) thanks @lynnzc.
|
||||
@@ -65,6 +114,7 @@ Docs: https://docs.openclaw.ai
|
||||
- Security/audit denyCommands guidance: suggest likely exact node command IDs for unknown `gateway.nodes.denyCommands` entries so ineffective denylist entries are easier to correct. (#29713) thanks @liquidhorizon88-bot.
|
||||
- Docs/security hardening guidance: document Docker `DOCKER-USER` + UFW policy and add cross-linking from Docker install docs for VPS/public-host setups. (#27613) thanks @dorukardahan.
|
||||
- Docs/security threat-model links: replace relative `.md` links with Mintlify-compatible root-relative routes in security docs to prevent broken internal navigation. (#27698) thanks @clawdoo.
|
||||
- Plugins/Update integrity drift: avoid false integrity drift prompts when updating npm-installed plugins from unpinned specs, while keeping drift checks for exact pinned versions. (#37179) Thanks @vincentkoc.
|
||||
- iOS/Voice timing safety: guard system speech start/finish callbacks to the active utterance to avoid misattributed start events during rapid stop/restart cycles. (#33304) thanks @mbelinky; original implementation direction by @ngutman.
|
||||
- iOS/Talk incremental speech pacing: allow long punctuation-free assistant chunks to start speaking at safe whitespace boundaries so voice responses begin sooner instead of waiting for terminal punctuation. (#33305) thanks @mbelinky; original implementation by @ngutman.
|
||||
- iOS/Watch reply reliability: make watch session activation waiters robust under concurrent requests so status/send calls no longer hang intermittently, and align delegate callbacks with Swift 6 actor safety. (#33306) thanks @mbelinky; original implementation by @Rocuts.
|
||||
@@ -100,6 +150,7 @@ Docs: https://docs.openclaw.ai
|
||||
- Telegram/multi-account default routing clarity: warn only for ambiguous (2+) account setups without an explicit default, add `openclaw doctor` warnings for missing/invalid multi-account defaults across channels, and document explicit-default guidance for channel routing and Telegram config. (#32544) thanks @Sid-Qin.
|
||||
- Telegram/plugin outbound hook parity: run `message_sending` + `message_sent` in Telegram reply delivery, include reply-path hook metadata (`mediaUrls`, `threadId`), and report `message_sent.success=false` when hooks blank text and no outbound message is delivered. (#32649) Thanks @KimGLee.
|
||||
- CLI/Coding-agent reliability: switch default `claude-cli` non-interactive args to `--permission-mode bypassPermissions`, auto-normalize legacy `--dangerously-skip-permissions` backend overrides to the modern permission-mode form, align coding-agent + live-test docs with the non-PTY Claude path, and emit session system-event heartbeat notices when CLI watchdog no-output timeouts terminate runs. (#28610, #31149, #34055). Thanks @niceysam, @cryptomaltese and @vincentkoc.
|
||||
- Gateway/OpenAI chat completions: parse active-turn `image_url` content parts (including parameterized data URIs and guarded URL sources), forward them as multimodal `images`, accept image-only user turns, enforce per-request image-part/byte budgets, default URL-based image fetches to disabled unless explicitly enabled by config, and redact image base64 data in cache-trace/provider payload diagnostics. (#17685) Thanks @vincentkoc
|
||||
- ACP/ACPX session bootstrap: retry with `sessions new` when `sessions ensure` returns no session identifiers so ACP spawns avoid `NO_SESSION`/`ACP_TURN_FAILED` failures on affected agents. (#28786, #31338, #34055). Thanks @Sid-Qin and @vincentkoc.
|
||||
- ACP/sessions_spawn parent stream visibility: add `streamTo: "parent"` for `runtime: "acp"` to forward initial child-run progress/no-output/completion updates back into the requester session as system events (instead of direct child delivery), and emit a tail-able session-scoped relay log (`<sessionId>.acp-stream.jsonl`, returned as `streamLogPath` when available), improving orchestrator visibility for blocked or long-running harness turns. (#34310, #29909; reopened from #34055). Thanks @vincentkoc.
|
||||
- Agents/bootstrap truncation warning handling: unify bootstrap budget/truncation analysis across embedded + CLI runtime, `/context`, and `openclaw doctor`; add `agents.defaults.bootstrapPromptTruncationWarning` (`off|once|always`, default `once`) and persist warning-signature metadata so truncation warnings are consistent and deduped across turns. (#32769) Thanks @gumadeiras.
|
||||
@@ -110,6 +161,8 @@ Docs: https://docs.openclaw.ai
|
||||
- Agents/Compaction safeguard structure hardening: require exact fallback summary headings, sanitize untrusted compaction instruction text before prompt embedding, and keep structured sections when preserving all turns. (#25555) thanks @rodrigouroz.
|
||||
- Gateway/status self version reporting: make Gateway self version in `openclaw status` prefer runtime `VERSION` (while preserving explicit `OPENCLAW_VERSION` override), preventing stale post-upgrade app version output. (#32655) thanks @liuxiaopai-ai.
|
||||
- Memory/QMD index isolation: set `QMD_CONFIG_DIR` alongside `XDG_CONFIG_HOME` so QMD config state stays per-agent despite upstream XDG handling bugs, preventing cross-agent collection indexing and excess disk/CPU usage. (#27028) thanks @HenryLoenwind.
|
||||
- Memory/QMD collection safety: stop destructive collection rebinds when QMD `collection list` only reports names without path metadata, preventing `memory search` from dropping existing collections if re-add fails. (#36870) Thanks @Adnannnnnnna.
|
||||
- Memory/QMD duplicate-document recovery: detect `UNIQUE constraint failed: documents.collection, documents.path` update failures, rebuild managed collections once, and retry update so periodic QMD syncs recover instead of failing every run; includes regression coverage to avoid over-matching unrelated unique constraints. (#27649) Thanks @MiscMich.
|
||||
- Memory/local embedding initialization hardening: add regression coverage for transient initialization retry and mixed `embedQuery` + `embedBatch` concurrent startup to lock single-flight initialization behavior. (#15639) thanks @SubtleSpark.
|
||||
- CLI/Coding-agent reliability: switch default `claude-cli` non-interactive args to `--permission-mode bypassPermissions`, auto-normalize legacy `--dangerously-skip-permissions` backend overrides to the modern permission-mode form, align coding-agent + live-test docs with the non-PTY Claude path, and emit session system-event heartbeat notices when CLI watchdog no-output timeouts terminate runs. Related to #28261. Landed from contributor PRs #28610 and #31149. Thanks @niceysam, @cryptomaltese and @vincentkoc.
|
||||
- ACP/ACPX session bootstrap: retry with `sessions new` when `sessions ensure` returns no session identifiers so ACP spawns avoid `NO_SESSION`/`ACP_TURN_FAILED` failures on affected agents. Related to #28786. Landed from contributor PR #31338. Thanks @Sid-Qin and @vincentkoc.
|
||||
@@ -118,6 +171,21 @@ Docs: https://docs.openclaw.ai
|
||||
- LINE/context and routing synthesis: fix group/room peer routing and command-authorization context propagation, and keep processing later events in mixed-success webhook batches. (from #21955, #24475, #27035, #28286) Thanks @lailoo, @mcaxtr, @jervyclaw, @Glucksberg, and @Takhoffman.
|
||||
- LINE/status/config/webhook synthesis: fix status false positives from snapshot/config state and accept LINE webhook HEAD probes for compatibility. (from #10487, #25726, #27537, #27908, #31387) Thanks @BlueBirdBack, @stakeswky, @loiie45e, @puritysb, and @mcaxtr.
|
||||
- LINE cleanup/test follow-ups: fold cleanup/test learnings into the synthesis review path while keeping runtime changes focused on regression fixes. (from #17630, #17289) Thanks @Clawborn and @davidahmann.
|
||||
- Mattermost/interactive buttons: add interactive button send/callback support with directory-based channel/user target resolution, and harden callbacks via account-scoped HMAC verification plus sender-scoped DM routing. (#19957) thanks @tonydehnke.
|
||||
- Feishu/groupPolicy legacy alias compatibility: treat legacy `groupPolicy: "allowall"` as `open` in both schema parsing and runtime policy checks so intended open-group configs no longer silently drop group messages when `groupAllowFrom` is empty. (from #36358) Thanks @Sid-Qin.
|
||||
- Mattermost/plugin SDK import policy: replace remaining monolithic `openclaw/plugin-sdk` imports in Mattermost mention-gating paths/tests with scoped subpaths (`openclaw/plugin-sdk/compat` and `openclaw/plugin-sdk/mattermost`) so `pnpm check` passes `lint:plugins:no-monolithic-plugin-sdk-entry-imports` on baseline. (#36480) Thanks @Takhoffman.
|
||||
- Telegram/polls: add Telegram poll action support to channel action discovery and tool/CLI poll flows, with multi-account discoverability gated to accounts that can actually execute polls (`sendMessage` + `poll`). (#36547) thanks @gumadeiras.
|
||||
- Agents/failover cooldown classification: stop treating generic `cooling down` text as provider `rate_limit` so healthy models no longer show false global cooldown/rate-limit warnings while explicit `model_cooldown` markers still trigger failover. (#32972) thanks @stakeswky.
|
||||
- Agents/failover service-unavailable handling: stop treating bare proxy/CDN `service unavailable` errors as provider overload while keeping them retryable via the timeout/failover path, so transient outages no longer show false rate-limit warnings or block fallback. (#36646) thanks @jnMetaCode.
|
||||
- Plugins/HTTP route migration diagnostics: rewrite legacy `api.registerHttpHandler(...)` loader failures into actionable migration guidance so doctor/plugin diagnostics point operators to `api.registerHttpRoute(...)` or `registerPluginHttpRoute(...)`. (#36794) Thanks @vincentkoc
|
||||
- Doctor/Heartbeat upgrade diagnostics: warn when heartbeat delivery is configured with an implicit `directPolicy` so upgrades pin direct/DM behavior explicitly instead of relying on the current default. (#36789) Thanks @vincentkoc.
|
||||
- Agents/current-time UTC anchor: append a machine-readable UTC suffix alongside local `Current time:` lines in shared cron-style prompt contexts so agents can compare UTC-stamped workspace timestamps without doing timezone math. (#32423) thanks @jriff.
|
||||
- TUI/webchat command-owner scope alignment: treat internal-channel gateway sessions with `operator.admin` as owner-authorized in command auth, restoring cron/gateway/connector tool access for affected TUI/webchat sessions while keeping external channels on identity-based owner checks. (from #35666, #35673, #35704) Thanks @Naylenv, @Octane0411, and @Sid-Qin.
|
||||
- Discord/inbound timeout isolation: separate inbound worker timeout tracking from listener timeout budgets so queued Discord replies are no longer dropped when listener watchdog windows expire mid-run. (#36602) Thanks @dutifulbob.
|
||||
- Memory/doctor SecretRef handling: treat SecretRef-backed memory-search API keys as configured, and fail embedding setup with explicit unresolved-secret errors instead of crashing. (#36835) Thanks @joshavant.
|
||||
- Memory/flush default prompt: ban timestamped variant filenames during default memory flush runs so durable notes stay in the canonical daily `memory/YYYY-MM-DD.md` file. (#34951) thanks @zerone0x.
|
||||
- Agents/reply delivery timing: flush embedded Pi block replies before waiting on compaction retries so already-generated assistant replies reach channels before compaction wait completes. (#35489) thanks @Sid-Qin.
|
||||
- Agents/gateway config guidance: stop exposing `config.schema` through the agent `gateway` tool, remove prompt/docs guidance that told agents to call it, and keep agents on `config.get` plus `config.patch`/`config.apply` for config changes. (#7382) thanks @kakuteki.
|
||||
|
||||
## 2026.3.2
|
||||
|
||||
@@ -143,6 +211,7 @@ Docs: https://docs.openclaw.ai
|
||||
- Plugin runtime/system: expose `runtime.system.requestHeartbeatNow(...)` so extensions can wake targeted sessions immediately after enqueueing system events. (#19464) Thanks @AustinEral.
|
||||
- Plugin runtime/events: expose `runtime.events.onAgentEvent` and `runtime.events.onSessionTranscriptUpdate` for extension-side subscriptions, and isolate transcript-listener failures so one faulty listener cannot break the entire update fanout. (#16044) Thanks @scifantastic.
|
||||
- CLI/Banner taglines: add `cli.banner.taglineMode` (`random` | `default` | `off`) to control funny tagline behavior in startup output, with docs + FAQ guidance and regression tests for config override behavior.
|
||||
- Agents/compaction safeguard quality-audit rollout: keep summary quality audits disabled by default unless `agents.defaults.compaction.qualityGuard` is explicitly enabled, and add config plumbing for bounded retry control. (#25556) thanks @rodrigouroz.
|
||||
|
||||
### Breaking
|
||||
|
||||
@@ -229,6 +298,7 @@ Docs: https://docs.openclaw.ai
|
||||
- Synology Chat/reply delivery: resolve webhook usernames to Chat API `user_id` values for outbound chatbot replies, avoiding mismatches between webhook user IDs and `method=chatbot` recipient IDs in multi-account setups. (#23709) Thanks @druide67.
|
||||
- Slack/thread context payloads: only inject thread starter/history text on first thread turn for new sessions while preserving thread metadata, reducing repeated context-token bloat on long-lived thread sessions. (#32133) Thanks @sourman.
|
||||
- Slack/session routing: keep top-level channel messages in one shared session when `replyToMode=off`, while preserving thread-scoped keys for true thread replies and non-off modes. (#32193) Thanks @bmendonca3.
|
||||
- Slack/app_mention dedupe race handling: keep seen-message dedupe to prevent duplicate replies while allowing a one-time app_mention retry when the paired message event was dropped pre-dispatch, so requireMention channels do not lose mentions under Slack event reordering. (#34937) Thanks @littleben.
|
||||
- Voice-call/webhook routing: require exact webhook path matches (instead of prefix matches) so lookalike paths cannot reach provider verification/dispatch logic. (#31930) Thanks @afurm.
|
||||
- Zalo/Pairing auth tests: add webhook regression coverage asserting DM pairing-store reads/writes remain account-scoped, preventing cross-account authorization bleed in multi-account setups. (#26121) Thanks @bmendonca3.
|
||||
- Zalouser/Pairing auth tests: add account-scoped DM pairing-store regression coverage (`monitor.account-scope.test.ts`) to prevent cross-account allowlist bleed in multi-account setups. (#26672) Thanks @bmendonca3.
|
||||
@@ -334,6 +404,7 @@ Docs: https://docs.openclaw.ai
|
||||
- Cron/store migration: normalize legacy cron jobs with string `schedule` and top-level `command`/`timeout` fields into canonical schedule/payload/session-target shape on load, preventing schedule-error loops on old persisted stores. (#31926) Thanks @bmendonca3.
|
||||
- Tests/Windows backup rotation: skip chmod-only backup permission assertions on Windows while retaining compose/rotation/prune coverage across platforms to avoid false CI failures from Windows non-POSIX mode semantics. (#32286) Thanks @jalehman.
|
||||
- Tests/Subagent announce: set `OPENCLAW_TEST_FAST=1` before importing `subagent-announce` format suites so module-level fast-mode constants are captured deterministically on Windows CI, preventing timeout flakes in nested completion announce coverage. (#31370) Thanks @zwffff.
|
||||
- Control UI/markdown recursion fallback: catch markdown parser failures and safely render escaped plain-text fallback instead of crashing the Control UI on pathological markdown history payloads. (#36445, fixes #36213) Thanks @BinHPdev.
|
||||
|
||||
## 2026.3.1
|
||||
|
||||
@@ -466,9 +537,11 @@ Docs: https://docs.openclaw.ai
|
||||
### Changes
|
||||
|
||||
- Docs/Contributing: require before/after screenshots for UI or visual PRs in the pre-PR checklist. (#32206) Thanks @hydro13.
|
||||
- Models/OpenAI forward compat: add support for `openai/gpt-5.4`, `openai/gpt-5.4-pro`, and `openai-codex/gpt-5.4`, including direct OpenAI Responses `serviceTier` passthrough safeguards for valid values. (#36590) Thanks @dorukardahan.
|
||||
|
||||
### Fixes
|
||||
|
||||
- Models/provider config precedence: prefer exact `models.providers.<name>` matches before normalized provider aliases in embedded model resolution, preventing alias/canonical key collisions from applying the wrong provider `api`, `baseUrl`, or headers. (#35934) thanks @RealKai42.
|
||||
- Logging/Subsystem console timestamps: route subsystem console timestamp rendering through `formatConsoleTimestamp(...)` so `pretty` and timestamp-prefix output use local timezone formatting consistently instead of inline UTC `toISOString()` paths. (#25970) Thanks @openperf.
|
||||
- Feishu/Multi-account + reply reliability: add `channels.feishu.defaultAccount` outbound routing support with schema validation, prevent inbound preview text from leaking into prompt system events, keep quoted-message extraction text-first (post/interactive/file placeholders instead of raw JSON), route Feishu video sends as `msg_type: "file"`, and avoid websocket event blocking by using non-blocking event handling in monitor dispatch. Landed from contributor PRs #31209, #29610, #30432, #30331, and #29501. Thanks @stakeswky, @hclsys, @bmendonca3, @patrick-yingxi-pan, and @zwffff.
|
||||
- Feishu/Target routing + replies + dedupe: normalize provider-prefixed targets (`feishu:`/`lark:`), prefer configured `channels.feishu.defaultAccount` for tool execution, honor Feishu outbound `renderMode` in adapter text/caption sends, fall back to normal send when reply targets are withdrawn/deleted, and add synchronous in-memory dedupe guard for concurrent duplicate inbound events. Landed from contributor PRs #30428, #30438, #29958, #30444, and #29463. Thanks @bmendonca3 and @Yaxuan42.
|
||||
@@ -715,6 +788,7 @@ Docs: https://docs.openclaw.ai
|
||||
- Channels/Multi-account config: when adding a non-default channel account to a single-account top-level channel setup, move existing account-scoped top-level single-account values into `channels.<channel>.accounts.default` before writing the new account so the original account keeps working without duplicated account values at channel root; `openclaw doctor --fix` now repairs previously mixed channel account shapes the same way. (#27334) thanks @gumadeiras.
|
||||
- iOS/Talk mode: stop injecting the voice directive hint into iOS Talk prompts and remove the Voice Directive Hint setting, reducing model bias toward tool-style TTS directives and keeping relay responses text-first by default. (#27543) thanks @ngutman.
|
||||
- CI/Windows: shard the Windows `checks-windows` test lane into two matrix jobs and honor explicit shard index overrides in `scripts/test-parallel.mjs` to reduce CI critical-path wall time. (#27234) Thanks @joshavant.
|
||||
- Mattermost/mention gating: honor `chatmode: "onmessage"` account override in inbound group/channel mention-gate resolution, while preserving explicit group `requireMention` config precedence and adding verbose drop diagnostics for skipped inbound posts. (#27160) thanks @turian.
|
||||
|
||||
## 2026.2.25
|
||||
|
||||
|
||||
@@ -549,7 +549,7 @@ Thanks to all clawtributors:
|
||||
<a href="https://github.com/mattqdev"><img src="https://avatars.githubusercontent.com/u/115874885?v=4&s=48" width="48" height="48" alt="MattQ" title="MattQ"/></a> <a href="https://github.com/Milofax"><img src="https://avatars.githubusercontent.com/u/2537423?v=4&s=48" width="48" height="48" alt="Milofax" title="Milofax"/></a> <a href="https://github.com/stevebot-alive"><img src="https://avatars.githubusercontent.com/u/261149299?v=4&s=48" width="48" height="48" alt="Steve (OpenClaw)" title="Steve (OpenClaw)"/></a> <a href="https://github.com/ZetiMente"><img src="https://avatars.githubusercontent.com/u/76985631?v=4&s=48" width="48" height="48" alt="Matthew" title="Matthew"/></a> <a href="https://github.com/Cassius0924"><img src="https://avatars.githubusercontent.com/u/62874592?v=4&s=48" width="48" height="48" alt="Cassius0924" title="Cassius0924"/></a> <a href="https://github.com/0xbrak"><img src="https://avatars.githubusercontent.com/u/181251288?v=4&s=48" width="48" height="48" alt="0xbrak" title="0xbrak"/></a> <a href="https://github.com/8BlT"><img src="https://avatars.githubusercontent.com/u/162764392?v=4&s=48" width="48" height="48" alt="8BlT" title="8BlT"/></a> <a href="https://github.com/Abdul535"><img src="https://avatars.githubusercontent.com/u/54276938?v=4&s=48" width="48" height="48" alt="Abdul535" title="Abdul535"/></a> <a href="https://github.com/abhaymundhara"><img src="https://avatars.githubusercontent.com/u/62872231?v=4&s=48" width="48" height="48" alt="abhaymundhara" title="abhaymundhara"/></a> <a href="https://github.com/aduk059"><img src="https://avatars.githubusercontent.com/u/257603478?v=4&s=48" width="48" height="48" alt="aduk059" title="aduk059"/></a>
|
||||
<a href="https://github.com/afurm"><img src="https://avatars.githubusercontent.com/u/6375192?v=4&s=48" width="48" height="48" alt="afurm" title="afurm"/></a> <a href="https://github.com/aisling404"><img src="https://avatars.githubusercontent.com/u/211950534?v=4&s=48" width="48" height="48" alt="aisling404" title="aisling404"/></a> <a href="https://github.com/akari-musubi"><img src="https://avatars.githubusercontent.com/u/259925157?v=4&s=48" width="48" height="48" alt="akari-musubi" title="akari-musubi"/></a> <a href="https://github.com/albertlieyingadrian"><img src="https://avatars.githubusercontent.com/u/12984659?v=4&s=48" width="48" height="48" alt="albertlieyingadrian" title="albertlieyingadrian"/></a> <a href="https://github.com/Alex-Alaniz"><img src="https://avatars.githubusercontent.com/u/88956822?v=4&s=48" width="48" height="48" alt="Alex-Alaniz" title="Alex-Alaniz"/></a> <a href="https://github.com/ali-aljufairi"><img src="https://avatars.githubusercontent.com/u/85583841?v=4&s=48" width="48" height="48" alt="ali-aljufairi" title="ali-aljufairi"/></a> <a href="https://github.com/altaywtf"><img src="https://avatars.githubusercontent.com/u/9790196?v=4&s=48" width="48" height="48" alt="altaywtf" title="altaywtf"/></a> <a href="https://github.com/araa47"><img src="https://avatars.githubusercontent.com/u/22760261?v=4&s=48" width="48" height="48" alt="araa47" title="araa47"/></a> <a href="https://github.com/Asleep123"><img src="https://avatars.githubusercontent.com/u/122379135?v=4&s=48" width="48" height="48" alt="Asleep123" title="Asleep123"/></a> <a href="https://github.com/avacadobanana352"><img src="https://avatars.githubusercontent.com/u/263496834?v=4&s=48" width="48" height="48" alt="avacadobanana352" title="avacadobanana352"/></a>
|
||||
<a href="https://github.com/barronlroth"><img src="https://avatars.githubusercontent.com/u/5567884?v=4&s=48" width="48" height="48" alt="barronlroth" title="barronlroth"/></a> <a href="https://github.com/bennewton999"><img src="https://avatars.githubusercontent.com/u/458991?v=4&s=48" width="48" height="48" alt="bennewton999" title="bennewton999"/></a> <a href="https://github.com/bguidolim"><img src="https://avatars.githubusercontent.com/u/987360?v=4&s=48" width="48" height="48" alt="bguidolim" title="bguidolim"/></a> <a href="https://github.com/bigwest60"><img src="https://avatars.githubusercontent.com/u/12373979?v=4&s=48" width="48" height="48" alt="bigwest60" title="bigwest60"/></a> <a href="https://github.com/caelum0x"><img src="https://avatars.githubusercontent.com/u/130079063?v=4&s=48" width="48" height="48" alt="caelum0x" title="caelum0x"/></a> <a href="https://github.com/championswimmer"><img src="https://avatars.githubusercontent.com/u/1327050?v=4&s=48" width="48" height="48" alt="championswimmer" title="championswimmer"/></a> <a href="https://github.com/dutifulbob"><img src="https://avatars.githubusercontent.com/u/261991368?v=4&s=48" width="48" height="48" alt="dutifulbob" title="dutifulbob"/></a> <a href="https://github.com/eternauta1337"><img src="https://avatars.githubusercontent.com/u/550409?v=4&s=48" width="48" height="48" alt="eternauta1337" title="eternauta1337"/></a> <a href="https://github.com/foeken"><img src="https://avatars.githubusercontent.com/u/13864?v=4&s=48" width="48" height="48" alt="foeken" title="foeken"/></a> <a href="https://github.com/gittb"><img src="https://avatars.githubusercontent.com/u/8284364?v=4&s=48" width="48" height="48" alt="gittb" title="gittb"/></a>
|
||||
<a href="https://github.com/HeimdallStrategy"><img src="https://avatars.githubusercontent.com/u/223014405?v=4&s=48" width="48" height="48" alt="HeimdallStrategy" title="HeimdallStrategy"/></a> <a href="https://github.com/junsuwhy"><img src="https://avatars.githubusercontent.com/u/4645498?v=4&s=48" width="48" height="48" alt="junsuwhy" title="junsuwhy"/></a> <a href="https://github.com/knocte"><img src="https://avatars.githubusercontent.com/u/331303?v=4&s=48" width="48" height="48" alt="knocte" title="knocte"/></a> <a href="https://github.com/MackDing"><img src="https://avatars.githubusercontent.com/u/19878893?v=4&s=48" width="48" height="48" alt="MackDing" title="MackDing"/></a> <a href="https://github.com/nobrainer-tech"><img src="https://avatars.githubusercontent.com/u/445466?v=4&s=48" width="48" height="48" alt="nobrainer-tech" title="nobrainer-tech"/></a> <a href="https://github.com/Noctivoro"><img src="https://avatars.githubusercontent.com/u/183974570?v=4&s=48" width="48" height="48" alt="Noctivoro" title="Noctivoro"/></a> <a href="https://github.com/Raikan10"><img src="https://avatars.githubusercontent.com/u/20675476?v=4&s=48" width="48" height="48" alt="Raikan10" title="Raikan10"/></a> <a href="https://github.com/Swader"><img src="https://avatars.githubusercontent.com/u/1430603?v=4&s=48" width="48" height="48" alt="Swader" title="Swader"/></a> <a href="https://github.com/alexstyl"><img src="https://avatars.githubusercontent.com/u/1665273?v=4&s=48" width="48" height="48" alt="alexstyl" title="alexstyl"/></a> <a href="https://github.com/ethanpalm"><img src="https://avatars.githubusercontent.com/u/56270045?v=4&s=48" width="48" height="48" alt="Ethan Palm" title="Ethan Palm"/></a>
|
||||
<a href="https://github.com/HeimdallStrategy"><img src="https://avatars.githubusercontent.com/u/223014405?v=4&s=48" width="48" height="48" alt="HeimdallStrategy" title="HeimdallStrategy"/></a> <a href="https://github.com/junsuwhy"><img src="https://avatars.githubusercontent.com/u/4645498?v=4&s=48" width="48" height="48" alt="junsuwhy" title="junsuwhy"/></a> <a href="https://github.com/knocte"><img src="https://avatars.githubusercontent.com/u/331303?v=4&s=48" width="48" height="48" alt="knocte" title="knocte"/></a> <a href="https://github.com/MackDing"><img src="https://avatars.githubusercontent.com/u/19878893?v=4&s=48" width="48" height="48" alt="MackDing" title="MackDing"/></a> <a href="https://github.com/nobrainer-tech"><img src="https://avatars.githubusercontent.com/u/445466?v=4&s=48" width="48" height="48" alt="nobrainer-tech" title="nobrainer-tech"/></a> <a href="https://github.com/Noctivoro"><img src="https://avatars.githubusercontent.com/u/183974570?v=4&s=48" width="48" height="48" alt="Noctivoro" title="Noctivoro"/></a> <a href="https://github.com/Raikan10"><img src="https://avatars.githubusercontent.com/u/20675476?v=4&s=48" width="48" height="48" alt="Raikan10" title="Raikan10"/></a> <a href="https://github.com/Swader"><img src="https://avatars.githubusercontent.com/u/1430603?v=4&s=48" width="48" height="48" alt="Swader" title="Swader"/></a> <a href="https://github.com/algal"><img src="https://avatars.githubusercontent.com/u/264412?v=4&s=48" width="48" height="48" alt="Alexis Gallagher" title="Alexis Gallagher"/></a> <a href="https://github.com/alexstyl"><img src="https://avatars.githubusercontent.com/u/1665273?v=4&s=48" width="48" height="48" alt="alexstyl" title="alexstyl"/></a> <a href="https://github.com/ethanpalm"><img src="https://avatars.githubusercontent.com/u/56270045?v=4&s=48" width="48" height="48" alt="Ethan Palm" title="Ethan Palm"/></a>
|
||||
<a href="https://github.com/yingchunbai"><img src="https://avatars.githubusercontent.com/u/33477283?v=4&s=48" width="48" height="48" alt="yingchunbai" title="yingchunbai"/></a> <a href="https://github.com/joshrad-dev"><img src="https://avatars.githubusercontent.com/u/62785552?v=4&s=48" width="48" height="48" alt="joshrad-dev" title="joshrad-dev"/></a> <a href="https://github.com/danballance"><img src="https://avatars.githubusercontent.com/u/13839912?v=4&s=48" width="48" height="48" alt="Dan Ballance" title="Dan Ballance"/></a> <a href="https://github.com/GHesericsu"><img src="https://avatars.githubusercontent.com/u/60202455?v=4&s=48" width="48" height="48" alt="Eric Su" title="Eric Su"/></a> <a href="https://github.com/kimitaka"><img src="https://avatars.githubusercontent.com/u/167225?v=4&s=48" width="48" height="48" alt="Kimitaka Watanabe" title="Kimitaka Watanabe"/></a> <a href="https://github.com/itsjling"><img src="https://avatars.githubusercontent.com/u/2521993?v=4&s=48" width="48" height="48" alt="Justin Ling" title="Justin Ling"/></a> <a href="https://github.com/lutr0"><img src="https://avatars.githubusercontent.com/u/76906369?v=4&s=48" width="48" height="48" alt="lutr0" title="lutr0"/></a> <a href="https://github.com/RayBB"><img src="https://avatars.githubusercontent.com/u/921217?v=4&s=48" width="48" height="48" alt="Raymond Berger" title="Raymond Berger"/></a> <a href="https://github.com/atalovesyou"><img src="https://avatars.githubusercontent.com/u/3534502?v=4&s=48" width="48" height="48" alt="atalovesyou" title="atalovesyou"/></a> <a href="https://github.com/jayhickey"><img src="https://avatars.githubusercontent.com/u/1676460?v=4&s=48" width="48" height="48" alt="jayhickey" title="jayhickey"/></a>
|
||||
<a href="https://github.com/jonasjancarik"><img src="https://avatars.githubusercontent.com/u/2459191?v=4&s=48" width="48" height="48" alt="jonasjancarik" title="jonasjancarik"/></a> <a href="https://github.com/latitudeki5223"><img src="https://avatars.githubusercontent.com/u/119656367?v=4&s=48" width="48" height="48" alt="latitudeki5223" title="latitudeki5223"/></a> <a href="https://github.com/minghinmatthewlam"><img src="https://avatars.githubusercontent.com/u/14224566?v=4&s=48" width="48" height="48" alt="minghinmatthewlam" title="minghinmatthewlam"/></a> <a href="https://github.com/rafaelreis-r"><img src="https://avatars.githubusercontent.com/u/57492577?v=4&s=48" width="48" height="48" alt="rafaelreis-r" title="rafaelreis-r"/></a> <a href="https://github.com/ratulsarna"><img src="https://avatars.githubusercontent.com/u/105903728?v=4&s=48" width="48" height="48" alt="ratulsarna" title="ratulsarna"/></a> <a href="https://github.com/timkrase"><img src="https://avatars.githubusercontent.com/u/38947626?v=4&s=48" width="48" height="48" alt="timkrase" title="timkrase"/></a> <a href="https://github.com/efe-buken"><img src="https://avatars.githubusercontent.com/u/262546946?v=4&s=48" width="48" height="48" alt="efe-buken" title="efe-buken"/></a> <a href="https://github.com/manmal"><img src="https://avatars.githubusercontent.com/u/142797?v=4&s=48" width="48" height="48" alt="manmal" title="manmal"/></a> <a href="https://github.com/easternbloc"><img src="https://avatars.githubusercontent.com/u/92585?v=4&s=48" width="48" height="48" alt="easternbloc" title="easternbloc"/></a> <a href="https://github.com/ManuelHettich"><img src="https://avatars.githubusercontent.com/u/17690367?v=4&s=48" width="48" height="48" alt="manuelhettich" title="manuelhettich"/></a>
|
||||
<a href="https://github.com/sktbrd"><img src="https://avatars.githubusercontent.com/u/116202536?v=4&s=48" width="48" height="48" alt="sktbrd" title="sktbrd"/></a> <a href="https://github.com/larlyssa"><img src="https://avatars.githubusercontent.com/u/13128869?v=4&s=48" width="48" height="48" alt="larlyssa" title="larlyssa"/></a> <a href="https://github.com/Mind-Dragon"><img src="https://avatars.githubusercontent.com/u/262945885?v=4&s=48" width="48" height="48" alt="Mind-Dragon" title="Mind-Dragon"/></a> <a href="https://github.com/pcty-nextgen-service-account"><img src="https://avatars.githubusercontent.com/u/112553441?v=4&s=48" width="48" height="48" alt="pcty-nextgen-service-account" title="pcty-nextgen-service-account"/></a> <a href="https://github.com/tmchow"><img src="https://avatars.githubusercontent.com/u/517103?v=4&s=48" width="48" height="48" alt="tmchow" title="tmchow"/></a> <a href="https://github.com/uli-will-code"><img src="https://avatars.githubusercontent.com/u/49715419?v=4&s=48" width="48" height="48" alt="uli-will-code" title="uli-will-code"/></a> <a href="https://github.com/mgratch"><img src="https://avatars.githubusercontent.com/u/2238658?v=4&s=48" width="48" height="48" alt="Marc Gratch" title="Marc Gratch"/></a> <a href="https://github.com/JackyWay"><img src="https://avatars.githubusercontent.com/u/53031570?v=4&s=48" width="48" height="48" alt="JackyWay" title="JackyWay"/></a> <a href="https://github.com/aaronveklabs"><img src="https://avatars.githubusercontent.com/u/225997828?v=4&s=48" width="48" height="48" alt="aaronveklabs" title="aaronveklabs"/></a> <a href="https://github.com/CJWTRUST"><img src="https://avatars.githubusercontent.com/u/235565898?v=4&s=48" width="48" height="48" alt="CJWTRUST" title="CJWTRUST"/></a>
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
- Feishu reply routing now uses one canonical reply-target path across inbound and outbound flows: normal groups reply to the triggering message while topic-mode groups stay on topic roots, outbound sends preserve `replyToId`/`threadId`, withdrawn reply targets fall back to direct sends, and cron duplicate suppression normalizes Feishu/Lark target IDs consistently (#32980, #32958, #33572, #33526; #33789, #33575, #33515, #33161). Thanks @guoqunabc, @bmendonca3, @MunemHashmi, and @Jimmy-xuzimo.
|
||||
@@ -176,6 +176,7 @@ Common `agentTurn` fields:
|
||||
- `message`: required text prompt.
|
||||
- `model` / `thinking`: optional overrides (see below).
|
||||
- `timeoutSeconds`: optional timeout override.
|
||||
- `lightContext`: optional lightweight bootstrap mode for jobs that do not need workspace bootstrap file injection.
|
||||
|
||||
Delivery config:
|
||||
|
||||
@@ -235,6 +236,14 @@ Resolution priority:
|
||||
2. Hook-specific defaults (e.g., `hooks.gmail.model`)
|
||||
3. Agent config default
|
||||
|
||||
### Lightweight bootstrap context
|
||||
|
||||
Isolated jobs (`agentTurn`) can set `lightContext: true` to run with lightweight bootstrap context.
|
||||
|
||||
- Use this for scheduled chores that do not need workspace bootstrap file injection.
|
||||
- In practice, the embedded runtime runs with `bootstrapContextMode: "lightweight"`, which keeps cron bootstrap context empty on purpose.
|
||||
- CLI equivalents: `openclaw cron add --light-context ...` and `openclaw cron edit --light-context`.
|
||||
|
||||
### Delivery (channel + target)
|
||||
|
||||
Isolated jobs can deliver output to a channel via the top-level `delivery` config:
|
||||
@@ -298,7 +307,8 @@ Recurring, isolated job with delivery:
|
||||
"wakeMode": "next-heartbeat",
|
||||
"payload": {
|
||||
"kind": "agentTurn",
|
||||
"message": "Summarize overnight updates."
|
||||
"message": "Summarize overnight updates.",
|
||||
"lightContext": true
|
||||
},
|
||||
"delivery": {
|
||||
"mode": "announce",
|
||||
|
||||
@@ -243,6 +243,14 @@ Triggered when agent commands are issued:
|
||||
- **`command:reset`**: When `/reset` command is issued
|
||||
- **`command:stop`**: When `/stop` command is issued
|
||||
|
||||
### Session Events
|
||||
|
||||
- **`session:compact:before`**: Right before compaction summarizes history
|
||||
- **`session:compact:after`**: After compaction completes with summary metadata
|
||||
|
||||
Internal hook payloads emit these as `type: "session"` with `action: "compact:before"` / `action: "compact:after"`; listeners subscribe with the combined keys above.
|
||||
Specific handler registration uses the literal key format `${type}:${action}`. For these events, register `session:compact:before` and `session:compact:after`.
|
||||
|
||||
### Agent Events
|
||||
|
||||
- **`agent:bootstrap`**: Before workspace bootstrap files are injected (hooks may mutate `context.bootstrapFiles`)
|
||||
@@ -351,6 +359,13 @@ These hooks are not event-stream listeners; they let plugins synchronously adjus
|
||||
|
||||
- **`tool_result_persist`**: transform tool results before they are written to the session transcript. Must be synchronous; return the updated tool result payload or `undefined` to keep it as-is. See [Agent Loop](/concepts/agent-loop).
|
||||
|
||||
### Plugin Hook Events
|
||||
|
||||
Compaction lifecycle hooks exposed through the plugin hook runner:
|
||||
|
||||
- **`before_compaction`**: Runs before compaction with count/token metadata
|
||||
- **`after_compaction`**: Runs after compaction with compaction summary metadata
|
||||
|
||||
### Future Events
|
||||
|
||||
Planned event types:
|
||||
|
||||
@@ -10,6 +10,7 @@ title: "Polls"
|
||||
|
||||
## Supported channels
|
||||
|
||||
- Telegram
|
||||
- WhatsApp (web channel)
|
||||
- Discord
|
||||
- MS Teams (Adaptive Cards)
|
||||
@@ -17,6 +18,13 @@ title: "Polls"
|
||||
## CLI
|
||||
|
||||
```bash
|
||||
# Telegram
|
||||
openclaw message poll --channel telegram --target 123456789 \
|
||||
--poll-question "Ship it?" --poll-option "Yes" --poll-option "No"
|
||||
openclaw message poll --channel telegram --target -1001234567890:topic:42 \
|
||||
--poll-question "Pick a time" --poll-option "10am" --poll-option "2pm" \
|
||||
--poll-duration-seconds 300
|
||||
|
||||
# WhatsApp
|
||||
openclaw message poll --target +15555550123 \
|
||||
--poll-question "Lunch today?" --poll-option "Yes" --poll-option "No" --poll-option "Maybe"
|
||||
@@ -36,9 +44,11 @@ openclaw message poll --channel msteams --target conversation:19:abc@thread.tacv
|
||||
|
||||
Options:
|
||||
|
||||
- `--channel`: `whatsapp` (default), `discord`, or `msteams`
|
||||
- `--channel`: `whatsapp` (default), `telegram`, `discord`, or `msteams`
|
||||
- `--poll-multi`: allow selecting multiple options
|
||||
- `--poll-duration-hours`: Discord-only (defaults to 24 when omitted)
|
||||
- `--poll-duration-seconds`: Telegram-only (5-600 seconds)
|
||||
- `--poll-anonymous` / `--poll-public`: Telegram-only poll visibility
|
||||
|
||||
## Gateway RPC
|
||||
|
||||
@@ -51,11 +61,14 @@ Params:
|
||||
- `options` (string[], required)
|
||||
- `maxSelections` (number, optional)
|
||||
- `durationHours` (number, optional)
|
||||
- `durationSeconds` (number, optional, Telegram-only)
|
||||
- `isAnonymous` (boolean, optional, Telegram-only)
|
||||
- `channel` (string, optional, default: `whatsapp`)
|
||||
- `idempotencyKey` (string, required)
|
||||
|
||||
## Channel differences
|
||||
|
||||
- Telegram: 2-10 options. Supports forum topics via `threadId` or `:topic:` targets. Uses `durationSeconds` instead of `durationHours`, limited to 5-600 seconds. Supports anonymous and public polls.
|
||||
- WhatsApp: 2-12 options, `maxSelections` must be within option count, ignores `durationHours`.
|
||||
- Discord: 2-10 options, `durationHours` clamped to 1-768 hours (default 24). `maxSelections > 1` enables multi-select; Discord does not support a strict selection count.
|
||||
- MS Teams: Adaptive Card polls (OpenClaw-managed). No native poll API; `durationHours` is ignored.
|
||||
@@ -64,6 +77,10 @@ Params:
|
||||
|
||||
Use the `message` tool with `poll` action (`to`, `pollQuestion`, `pollOption`, optional `pollMulti`, `pollDurationHours`, `channel`).
|
||||
|
||||
For Telegram, the tool also accepts `pollDurationSeconds`, `pollAnonymous`, and `pollPublic`.
|
||||
|
||||
Use `action: "poll"` for poll creation. Poll fields passed with `action: "send"` are rejected.
|
||||
|
||||
Note: Discord has no “pick exactly N” mode; `pollMulti` maps to multi-select.
|
||||
Teams polls are rendered as Adaptive Cards and require the gateway to stay online
|
||||
to record votes in `~/.openclaw/msteams-polls.json`.
|
||||
|
||||
@@ -1102,12 +1102,19 @@ openclaw logs --follow
|
||||
|
||||
- `Listener DiscordMessageListener timed out after 30000ms for event MESSAGE_CREATE`
|
||||
- `Slow listener detected ...`
|
||||
- `discord inbound worker timed out after ...`
|
||||
|
||||
Canonical knob:
|
||||
Listener budget knob:
|
||||
|
||||
- single-account: `channels.discord.eventQueue.listenerTimeout`
|
||||
- multi-account: `channels.discord.accounts.<accountId>.eventQueue.listenerTimeout`
|
||||
|
||||
Worker run timeout knob:
|
||||
|
||||
- single-account: `channels.discord.inboundWorker.runTimeoutMs`
|
||||
- multi-account: `channels.discord.accounts.<accountId>.inboundWorker.runTimeoutMs`
|
||||
- default: `1800000` (30 minutes); set `0` to disable
|
||||
|
||||
Recommended baseline:
|
||||
|
||||
```json5
|
||||
@@ -1119,6 +1126,9 @@ openclaw logs --follow
|
||||
eventQueue: {
|
||||
listenerTimeout: 120000,
|
||||
},
|
||||
inboundWorker: {
|
||||
runTimeoutMs: 1800000,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1126,7 +1136,8 @@ openclaw logs --follow
|
||||
}
|
||||
```
|
||||
|
||||
Tune this first before adding alternate timeout controls elsewhere.
|
||||
Use `eventQueue.listenerTimeout` for slow listener setup and `inboundWorker.runTimeoutMs`
|
||||
only if you want a separate safety valve for queued agent turns.
|
||||
|
||||
</Accordion>
|
||||
|
||||
@@ -1177,7 +1188,8 @@ High-signal Discord fields:
|
||||
- startup/auth: `enabled`, `token`, `accounts.*`, `allowBots`
|
||||
- policy: `groupPolicy`, `dm.*`, `guilds.*`, `guilds.*.channels.*`
|
||||
- command: `commands.native`, `commands.useAccessGroups`, `configWrites`, `slashCommand.*`
|
||||
- event queue: `eventQueue.listenerTimeout` (canonical), `eventQueue.maxQueueSize`, `eventQueue.maxConcurrency`
|
||||
- event queue: `eventQueue.listenerTimeout` (listener budget), `eventQueue.maxQueueSize`, `eventQueue.maxConcurrency`
|
||||
- inbound worker: `inboundWorker.runTimeoutMs`
|
||||
- reply/history: `replyToMode`, `historyLimit`, `dmHistoryLimit`, `dms.*.historyLimit`
|
||||
- delivery: `textChunkLimit`, `chunkMode`, `maxLinesPerMessage`
|
||||
- streaming: `streaming` (legacy alias: `streamMode`), `draftChunk`, `blockStreaming`, `blockStreamingCoalesce`
|
||||
|
||||
@@ -175,6 +175,151 @@ Config:
|
||||
- `channels.mattermost.actions.reactions`: enable/disable reaction actions (default true).
|
||||
- Per-account override: `channels.mattermost.accounts.<id>.actions.reactions`.
|
||||
|
||||
## Interactive buttons (message tool)
|
||||
|
||||
Send messages with clickable buttons. When a user clicks a button, the agent receives the
|
||||
selection and can respond.
|
||||
|
||||
Enable buttons by adding `inlineButtons` to the channel capabilities:
|
||||
|
||||
```json5
|
||||
{
|
||||
channels: {
|
||||
mattermost: {
|
||||
capabilities: ["inlineButtons"],
|
||||
},
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
Use `message action=send` with a `buttons` parameter. Buttons are a 2D array (rows of buttons):
|
||||
|
||||
```
|
||||
message action=send channel=mattermost target=channel:<channelId> buttons=[[{"text":"Yes","callback_data":"yes"},{"text":"No","callback_data":"no"}]]
|
||||
```
|
||||
|
||||
Button fields:
|
||||
|
||||
- `text` (required): display label.
|
||||
- `callback_data` (required): value sent back on click (used as the action ID).
|
||||
- `style` (optional): `"default"`, `"primary"`, or `"danger"`.
|
||||
|
||||
When a user clicks a button:
|
||||
|
||||
1. All buttons are replaced with a confirmation line (e.g., "✓ **Yes** selected by @user").
|
||||
2. The agent receives the selection as an inbound message and responds.
|
||||
|
||||
Notes:
|
||||
|
||||
- Button callbacks use HMAC-SHA256 verification (automatic, no config needed).
|
||||
- Mattermost strips callback data from its API responses (security feature), so all buttons
|
||||
are removed on click — partial removal is not possible.
|
||||
- Action IDs containing hyphens or underscores are sanitized automatically
|
||||
(Mattermost routing limitation).
|
||||
|
||||
Config:
|
||||
|
||||
- `channels.mattermost.capabilities`: array of capability strings. Add `"inlineButtons"` to
|
||||
enable the buttons tool description in the agent system prompt.
|
||||
|
||||
### Direct API integration (external scripts)
|
||||
|
||||
External scripts and webhooks can post buttons directly via the Mattermost REST API
|
||||
instead of going through the agent's `message` tool. Use `buildButtonAttachments()` from
|
||||
the extension when possible; if posting raw JSON, follow these rules:
|
||||
|
||||
**Payload structure:**
|
||||
|
||||
```json5
|
||||
{
|
||||
channel_id: "<channelId>",
|
||||
message: "Choose an option:",
|
||||
props: {
|
||||
attachments: [
|
||||
{
|
||||
actions: [
|
||||
{
|
||||
id: "mybutton01", // alphanumeric only — see below
|
||||
type: "button", // required, or clicks are silently ignored
|
||||
name: "Approve", // display label
|
||||
style: "primary", // optional: "default", "primary", "danger"
|
||||
integration: {
|
||||
url: "http://localhost:18789/mattermost/interactions/default",
|
||||
context: {
|
||||
action_id: "mybutton01", // must match button id (for name lookup)
|
||||
action: "approve",
|
||||
// ... any custom fields ...
|
||||
_token: "<hmac>", // see HMAC section below
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
**Critical rules:**
|
||||
|
||||
1. Attachments go in `props.attachments`, not top-level `attachments` (silently ignored).
|
||||
2. Every action needs `type: "button"` — without it, clicks are swallowed silently.
|
||||
3. Every action needs an `id` field — Mattermost ignores actions without IDs.
|
||||
4. Action `id` must be **alphanumeric only** (`[a-zA-Z0-9]`). Hyphens and underscores break
|
||||
Mattermost's server-side action routing (returns 404). Strip them before use.
|
||||
5. `context.action_id` must match the button's `id` so the confirmation message shows the
|
||||
button name (e.g., "Approve") instead of a raw ID.
|
||||
6. `context.action_id` is required — the interaction handler returns 400 without it.
|
||||
|
||||
**HMAC token generation:**
|
||||
|
||||
The gateway verifies button clicks with HMAC-SHA256. External scripts must generate tokens
|
||||
that match the gateway's verification logic:
|
||||
|
||||
1. Derive the secret from the bot token:
|
||||
`HMAC-SHA256(key="openclaw-mattermost-interactions", data=botToken)`
|
||||
2. Build the context object with all fields **except** `_token`.
|
||||
3. Serialize with **sorted keys** and **no spaces** (the gateway uses `JSON.stringify`
|
||||
with sorted keys, which produces compact output).
|
||||
4. Sign: `HMAC-SHA256(key=secret, data=serializedContext)`
|
||||
5. Add the resulting hex digest as `_token` in the context.
|
||||
|
||||
Python example:
|
||||
|
||||
```python
|
||||
import hmac, hashlib, json
|
||||
|
||||
secret = hmac.new(
|
||||
b"openclaw-mattermost-interactions",
|
||||
bot_token.encode(), hashlib.sha256
|
||||
).hexdigest()
|
||||
|
||||
ctx = {"action_id": "mybutton01", "action": "approve"}
|
||||
payload = json.dumps(ctx, sort_keys=True, separators=(",", ":"))
|
||||
token = hmac.new(secret.encode(), payload.encode(), hashlib.sha256).hexdigest()
|
||||
|
||||
context = {**ctx, "_token": token}
|
||||
```
|
||||
|
||||
Common HMAC pitfalls:
|
||||
|
||||
- Python's `json.dumps` adds spaces by default (`{"key": "val"}`). Use
|
||||
`separators=(",", ":")` to match JavaScript's compact output (`{"key":"val"}`).
|
||||
- Always sign **all** context fields (minus `_token`). The gateway strips `_token` then
|
||||
signs everything remaining. Signing a subset causes silent verification failure.
|
||||
- Use `sort_keys=True` — the gateway sorts keys before signing, and Mattermost may
|
||||
reorder context fields when storing the payload.
|
||||
- Derive the secret from the bot token (deterministic), not random bytes. The secret
|
||||
must be the same across the process that creates buttons and the gateway that verifies.
|
||||
|
||||
## Directory adapter
|
||||
|
||||
The Mattermost plugin includes a directory adapter that resolves channel and user names
|
||||
via the Mattermost API. This enables `#channel-name` and `@username` targets in
|
||||
`openclaw message send` and cron/webhook deliveries.
|
||||
|
||||
No configuration is needed — the adapter uses the bot token from the account config.
|
||||
|
||||
## Multi-account
|
||||
|
||||
Mattermost supports multiple accounts under `channels.mattermost.accounts`:
|
||||
@@ -197,3 +342,10 @@ Mattermost supports multiple accounts under `channels.mattermost.accounts`:
|
||||
- No replies in channels: ensure the bot is in the channel and mention it (oncall), use a trigger prefix (onchar), or set `chatmode: "onmessage"`.
|
||||
- Auth errors: check the bot token, base URL, and whether the account is enabled.
|
||||
- Multi-account issues: env vars only apply to the `default` account.
|
||||
- Buttons appear as white boxes: the agent may be sending malformed button data. Check that each button has both `text` and `callback_data` fields.
|
||||
- Buttons render but clicks do nothing: verify `AllowedUntrustedInternalConnections` in Mattermost server config includes `127.0.0.1 localhost`, and that `EnablePostActionIntegration` is `true` in ServiceSettings.
|
||||
- Buttons return 404 on click: the button `id` likely contains hyphens or underscores. Mattermost's action router breaks on non-alphanumeric IDs. Use `[a-zA-Z0-9]` only.
|
||||
- Gateway logs `invalid _token`: HMAC mismatch. Check that you sign all context fields (not a subset), use sorted keys, and use compact JSON (no spaces). See the HMAC section above.
|
||||
- Gateway logs `missing _token in context`: the `_token` field is not in the button's context. Ensure it is included when building the integration payload.
|
||||
- Confirmation shows raw ID instead of button name: `context.action_id` does not match the button's `id`. Set both to the same sanitized value.
|
||||
- Agent doesn't know about buttons: add `capabilities: ["inlineButtons"]` to the Mattermost channel config.
|
||||
|
||||
@@ -321,7 +321,21 @@ Resolution order:
|
||||
Notes:
|
||||
|
||||
- Slack expects shortcodes (for example `"eyes"`).
|
||||
- Use `""` to disable the reaction for a channel or account.
|
||||
- Use `""` to disable the reaction for the Slack account or globally.
|
||||
|
||||
## Typing reaction fallback
|
||||
|
||||
`typingReaction` adds a temporary reaction to the inbound Slack message while OpenClaw is processing a reply, then removes it when the run finishes. This is a useful fallback when Slack native assistant typing is unavailable, especially in DMs.
|
||||
|
||||
Resolution order:
|
||||
|
||||
- `channels.slack.accounts.<accountId>.typingReaction`
|
||||
- `channels.slack.typingReaction`
|
||||
|
||||
Notes:
|
||||
|
||||
- Slack expects shortcodes (for example `"hourglass_flowing_sand"`).
|
||||
- The reaction is best-effort and cleanup is attempted automatically after the reply or failure path completes.
|
||||
|
||||
## Manifest and scope checklist
|
||||
|
||||
|
||||
@@ -524,6 +524,13 @@ curl "https://api.telegram.org/bot<bot_token>/getUpdates"
|
||||
|
||||
This is currently scoped to forum topics in groups and supergroups.
|
||||
|
||||
**Thread-bound ACP spawn from chat**:
|
||||
|
||||
- `/acp spawn <agent> --thread here|auto` can bind the current Telegram topic to a new ACP session.
|
||||
- Follow-up topic messages route to the bound ACP session directly (no `/acp steer` required).
|
||||
- OpenClaw pins the spawn confirmation message in-topic after a successful bind.
|
||||
- Requires `channels.telegram.threadBindings.spawnAcpSessions=true`.
|
||||
|
||||
Template context includes:
|
||||
|
||||
- `MessageThreadId`
|
||||
@@ -732,6 +739,28 @@ openclaw message send --channel telegram --target 123456789 --message "hi"
|
||||
openclaw message send --channel telegram --target @name --message "hi"
|
||||
```
|
||||
|
||||
Telegram polls use `openclaw message poll` and support forum topics:
|
||||
|
||||
```bash
|
||||
openclaw message poll --channel telegram --target 123456789 \
|
||||
--poll-question "Ship it?" --poll-option "Yes" --poll-option "No"
|
||||
openclaw message poll --channel telegram --target -1001234567890:topic:42 \
|
||||
--poll-question "Pick a time" --poll-option "10am" --poll-option "2pm" \
|
||||
--poll-duration-seconds 300 --poll-public
|
||||
```
|
||||
|
||||
Telegram-only poll flags:
|
||||
|
||||
- `--poll-duration-seconds` (5-600)
|
||||
- `--poll-anonymous`
|
||||
- `--poll-public`
|
||||
- `--thread-id` for forum topics (or use a `:topic:` target)
|
||||
|
||||
Action gating:
|
||||
|
||||
- `channels.telegram.actions.sendMessage=false` disables outbound Telegram messages, including polls
|
||||
- `channels.telegram.actions.poll=false` disables Telegram poll creation while leaving regular sends enabled
|
||||
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
||||
@@ -813,6 +842,7 @@ Primary reference:
|
||||
- `channels.telegram.tokenFile`: read token from file path.
|
||||
- `channels.telegram.dmPolicy`: `pairing | allowlist | open | disabled` (default: pairing).
|
||||
- `channels.telegram.allowFrom`: DM allowlist (numeric Telegram user IDs). `allowlist` requires at least one sender ID. `open` requires `"*"`. `openclaw doctor --fix` can resolve legacy `@username` entries to IDs and can recover allowlist entries from pairing-store files in allowlist migration flows.
|
||||
- `channels.telegram.actions.poll`: enable or disable Telegram poll creation (default: enabled; still requires `sendMessage`).
|
||||
- `channels.telegram.defaultTo`: default Telegram target used by CLI `--deliver` when no explicit `--reply-to` is provided.
|
||||
- `channels.telegram.groupPolicy`: `open | allowlist | disabled` (default: allowlist).
|
||||
- `channels.telegram.groupAllowFrom`: group sender allowlist (numeric Telegram user IDs). `openclaw doctor --fix` can resolve legacy `@username` entries to IDs. Non-numeric entries are ignored at auth time. Group auth does not use DM pairing-store fallback (`2026.2.25+`).
|
||||
|
||||
@@ -67,6 +67,7 @@ openclaw channels logout --channel whatsapp
|
||||
- Run `openclaw status --deep` for a broad probe.
|
||||
- Use `openclaw doctor` for guided fixes.
|
||||
- `openclaw channels list` prints `Claude: HTTP 403 ... user:profile` → usage snapshot needs the `user:profile` scope. Use `--no-usage`, or provide a claude.ai session key (`CLAUDE_WEB_SESSION_KEY` / `CLAUDE_WEB_COOKIE`), or re-auth via Claude Code CLI.
|
||||
- `openclaw channels status` falls back to config-only summaries when the gateway is unreachable. If a supported channel credential is configured via SecretRef but unavailable in the current command path, it reports that account as configured with degraded notes instead of showing it as not configured.
|
||||
|
||||
## Capabilities probe
|
||||
|
||||
@@ -97,3 +98,4 @@ Notes:
|
||||
|
||||
- Use `--kind user|group|auto` to force the target type.
|
||||
- Resolution prefers active matches when multiple entries share the same name.
|
||||
- `channels resolve` is read-only. If a selected account is configured via SecretRef but that credential is unavailable in the current command path, the command returns degraded unresolved results with notes instead of aborting the entire run.
|
||||
|
||||
@@ -24,6 +24,9 @@ Notes:
|
||||
|
||||
- Choosing where the Gateway runs always updates `gateway.mode`. You can select "Continue" without other sections if that is all you need.
|
||||
- Channel-oriented services (Slack/Discord/Matrix/Microsoft Teams) prompt for channel/room allowlists during setup. You can enter names or IDs; the wizard resolves names to IDs when possible.
|
||||
- If you run the daemon install step, token auth requires a token, and `gateway.auth.token` is SecretRef-managed, configure validates the SecretRef but does not persist resolved plaintext token values into supervisor service environment metadata.
|
||||
- If token auth requires a token and the configured token SecretRef is unresolved, configure blocks daemon install with actionable remediation guidance.
|
||||
- If both `gateway.auth.token` and `gateway.auth.password` are configured and `gateway.auth.mode` is unset, configure blocks daemon install until mode is set explicitly.
|
||||
|
||||
## Examples
|
||||
|
||||
|
||||
@@ -42,8 +42,28 @@ Disable delivery for an isolated job:
|
||||
openclaw cron edit <job-id> --no-deliver
|
||||
```
|
||||
|
||||
Enable lightweight bootstrap context for an isolated job:
|
||||
|
||||
```bash
|
||||
openclaw cron edit <job-id> --light-context
|
||||
```
|
||||
|
||||
Announce to a specific channel:
|
||||
|
||||
```bash
|
||||
openclaw cron edit <job-id> --announce --channel slack --to "channel:C1234567890"
|
||||
```
|
||||
|
||||
Create an isolated job with lightweight bootstrap context:
|
||||
|
||||
```bash
|
||||
openclaw cron add \
|
||||
--name "Lightweight morning brief" \
|
||||
--cron "0 7 * * *" \
|
||||
--session isolated \
|
||||
--message "Summarize overnight updates." \
|
||||
--light-context \
|
||||
--no-deliver
|
||||
```
|
||||
|
||||
`--light-context` applies to isolated agent-turn jobs only. For cron runs, lightweight mode keeps bootstrap context empty instead of injecting the full workspace bootstrap set.
|
||||
|
||||
@@ -38,6 +38,13 @@ openclaw daemon uninstall
|
||||
- `install`: `--port`, `--runtime <node|bun>`, `--token`, `--force`, `--json`
|
||||
- lifecycle (`uninstall|start|stop|restart`): `--json`
|
||||
|
||||
Notes:
|
||||
|
||||
- `status` resolves configured auth SecretRefs for probe auth when possible.
|
||||
- When token auth requires a token and `gateway.auth.token` is SecretRef-managed, `install` validates that the SecretRef is resolvable but does not persist the resolved token into service environment metadata.
|
||||
- If token auth requires a token and the configured token SecretRef is unresolved, install fails closed.
|
||||
- If both `gateway.auth.token` and `gateway.auth.password` are configured and `gateway.auth.mode` is unset, install is blocked until mode is set explicitly.
|
||||
|
||||
## Prefer
|
||||
|
||||
Use [`openclaw gateway`](/cli/gateway) for current docs and examples.
|
||||
|
||||
@@ -14,3 +14,9 @@ Open the Control UI using your current auth.
|
||||
openclaw dashboard
|
||||
openclaw dashboard --no-open
|
||||
```
|
||||
|
||||
Notes:
|
||||
|
||||
- `dashboard` resolves configured `gateway.auth.token` SecretRefs when possible.
|
||||
- For SecretRef-managed tokens (resolved or unresolved), `dashboard` prints/copies/opens a non-tokenized URL to avoid exposing external secrets in terminal output, clipboard history, or browser-launch arguments.
|
||||
- If `gateway.auth.token` is SecretRef-managed but unresolved in this command path, the command prints a non-tokenized URL and explicit remediation guidance instead of embedding an invalid token placeholder.
|
||||
|
||||
@@ -105,6 +105,11 @@ Options:
|
||||
- `--no-probe`: skip the RPC probe (service-only view).
|
||||
- `--deep`: scan system-level services too.
|
||||
|
||||
Notes:
|
||||
|
||||
- `gateway status` resolves configured auth SecretRefs for probe auth when possible.
|
||||
- If a required auth SecretRef is unresolved in this command path, probe auth can fail; pass `--token`/`--password` explicitly or resolve the secret source first.
|
||||
|
||||
### `gateway probe`
|
||||
|
||||
`gateway probe` is the “debug everything” command. It always probes:
|
||||
@@ -162,6 +167,10 @@ openclaw gateway uninstall
|
||||
Notes:
|
||||
|
||||
- `gateway install` supports `--port`, `--runtime`, `--token`, `--force`, `--json`.
|
||||
- When token auth requires a token and `gateway.auth.token` is SecretRef-managed, `gateway install` validates that the SecretRef is resolvable but does not persist the resolved token into service environment metadata.
|
||||
- If token auth requires a token and the configured token SecretRef is unresolved, install fails closed instead of persisting fallback plaintext.
|
||||
- In inferred auth mode, shell-only `OPENCLAW_GATEWAY_PASSWORD`/`CLAWDBOT_GATEWAY_PASSWORD` does not relax install token requirements; use durable config (`gateway.auth.password` or config `env`) when installing a managed service.
|
||||
- If both `gateway.auth.token` and `gateway.auth.password` are configured and `gateway.auth.mode` is unset, install is blocked until mode is set explicitly.
|
||||
- Lifecycle commands accept `--json` for scripting.
|
||||
|
||||
## Discover gateways (Bonjour)
|
||||
|
||||
@@ -359,6 +359,7 @@ Options:
|
||||
- `--gateway-bind <loopback|lan|tailnet|auto|custom>`
|
||||
- `--gateway-auth <token|password>`
|
||||
- `--gateway-token <token>`
|
||||
- `--gateway-token-ref-env <name>` (non-interactive; store `gateway.auth.token` as an env SecretRef; requires that env var to be set; cannot be combined with `--gateway-token`)
|
||||
- `--gateway-password <password>`
|
||||
- `--remote-url <url>`
|
||||
- `--remote-token <token>`
|
||||
|
||||
@@ -61,6 +61,28 @@ Non-interactive `ref` mode contract:
|
||||
- Do not pass inline key flags (for example `--openai-api-key`) unless that env var is also set.
|
||||
- If an inline key flag is passed without the required env var, onboarding fails fast with guidance.
|
||||
|
||||
Gateway token options in non-interactive mode:
|
||||
|
||||
- `--gateway-auth token --gateway-token <token>` stores a plaintext token.
|
||||
- `--gateway-auth token --gateway-token-ref-env <name>` stores `gateway.auth.token` as an env SecretRef.
|
||||
- `--gateway-token` and `--gateway-token-ref-env` are mutually exclusive.
|
||||
- `--gateway-token-ref-env` requires a non-empty env var in the onboarding process environment.
|
||||
- With `--install-daemon`, when token auth requires a token, SecretRef-managed gateway tokens are validated but not persisted as resolved plaintext in supervisor service environment metadata.
|
||||
- With `--install-daemon`, if token mode requires a token and the configured token SecretRef is unresolved, onboarding fails closed with remediation guidance.
|
||||
- With `--install-daemon`, if both `gateway.auth.token` and `gateway.auth.password` are configured and `gateway.auth.mode` is unset, onboarding blocks install until mode is set explicitly.
|
||||
|
||||
Example:
|
||||
|
||||
```bash
|
||||
export OPENCLAW_GATEWAY_TOKEN="your-token"
|
||||
openclaw onboard --non-interactive \
|
||||
--mode local \
|
||||
--auth-choice skip \
|
||||
--gateway-auth token \
|
||||
--gateway-token-ref-env OPENCLAW_GATEWAY_TOKEN \
|
||||
--accept-risk
|
||||
```
|
||||
|
||||
Interactive onboarding behavior with reference mode:
|
||||
|
||||
- Choose **Use secret reference** when prompted.
|
||||
|
||||
@@ -35,7 +35,10 @@ openclaw qr --url wss://gateway.example/ws --token '<token>'
|
||||
|
||||
- `--token` and `--password` are mutually exclusive.
|
||||
- With `--remote`, if effectively active remote credentials are configured as SecretRefs and you do not pass `--token` or `--password`, the command resolves them from the active gateway snapshot. If gateway is unavailable, the command fails fast.
|
||||
- Without `--remote`, local `gateway.auth.password` SecretRefs are resolved when password auth can win (explicit `gateway.auth.mode="password"` or inferred password mode with no winning token from auth/env), and no CLI auth override is passed.
|
||||
- Without `--remote`, local gateway auth SecretRefs are resolved when no CLI auth override is passed:
|
||||
- `gateway.auth.token` resolves when token auth can win (explicit `gateway.auth.mode="token"` or inferred mode where no password source wins).
|
||||
- `gateway.auth.password` resolves when password auth can win (explicit `gateway.auth.mode="password"` or inferred mode with no winning token from auth/env).
|
||||
- If both `gateway.auth.token` and `gateway.auth.password` are configured (including SecretRefs) and `gateway.auth.mode` is unset, setup-code resolution fails until mode is set explicitly.
|
||||
- Gateway version skew note: this command path requires a gateway that supports `secrets.resolve`; older gateways return an unknown-method error.
|
||||
- After scanning, approve device pairing with:
|
||||
- `openclaw devices list`
|
||||
|
||||
@@ -24,3 +24,5 @@ Notes:
|
||||
- Overview includes Gateway + node host service install/runtime status when available.
|
||||
- Overview includes update channel + git SHA (for source checkouts).
|
||||
- Update info surfaces in the Overview; if an update is available, status prints a hint to run `openclaw update` (see [Updating](/install/updating)).
|
||||
- Read-only status surfaces (`status`, `status --json`, `status --all`) resolve supported SecretRefs for their targeted config paths when possible.
|
||||
- If a supported channel SecretRef is configured but unavailable in the current command path, status stays read-only and reports degraded output instead of crashing. Human output shows warnings such as “configured token unavailable in this command path”, and JSON output includes `secretDiagnostics`.
|
||||
|
||||
@@ -14,6 +14,10 @@ Related:
|
||||
|
||||
- TUI guide: [TUI](/web/tui)
|
||||
|
||||
Notes:
|
||||
|
||||
- `tui` resolves configured gateway auth SecretRefs for token/password auth when possible (`env`/`file`/`exec` providers).
|
||||
|
||||
## Examples
|
||||
|
||||
```bash
|
||||
|
||||
@@ -82,7 +82,7 @@ See [Hooks](/automation/hooks) for setup and examples.
|
||||
These run inside the agent loop or gateway pipeline:
|
||||
|
||||
- **`before_model_resolve`**: runs pre-session (no `messages`) to deterministically override provider/model before model resolution.
|
||||
- **`before_prompt_build`**: runs after session load (with `messages`) to inject `prependContext`/`systemPrompt` before prompt submission.
|
||||
- **`before_prompt_build`**: runs after session load (with `messages`) to inject `prependContext`, `systemPrompt`, `prependSystemContext`, or `appendSystemContext` before prompt submission. Use `prependContext` for per-turn dynamic text and system-context fields for stable guidance that should sit in system prompt space.
|
||||
- **`before_agent_start`**: legacy compatibility hook that may run in either phase; prefer the explicit hooks above.
|
||||
- **`agent_end`**: inspect the final message list and run metadata after completion.
|
||||
- **`before_compaction` / `after_compaction`**: observe or annotate compaction cycles.
|
||||
|
||||
@@ -41,15 +41,16 @@ OpenClaw ships with the pi‑ai catalog. These providers require **no**
|
||||
- Provider: `openai`
|
||||
- Auth: `OPENAI_API_KEY`
|
||||
- Optional rotation: `OPENAI_API_KEYS`, `OPENAI_API_KEY_1`, `OPENAI_API_KEY_2`, plus `OPENCLAW_LIVE_OPENAI_KEY` (single override)
|
||||
- Example model: `openai/gpt-5.1-codex`
|
||||
- Example models: `openai/gpt-5.4`, `openai/gpt-5.4-pro`
|
||||
- CLI: `openclaw onboard --auth-choice openai-api-key`
|
||||
- Default transport is `auto` (WebSocket-first, SSE fallback)
|
||||
- Override per model via `agents.defaults.models["openai/<model>"].params.transport` (`"sse"`, `"websocket"`, or `"auto"`)
|
||||
- OpenAI Responses WebSocket warm-up defaults to enabled via `params.openaiWsWarmup` (`true`/`false`)
|
||||
- OpenAI priority processing can be enabled via `agents.defaults.models["openai/<model>"].params.serviceTier`
|
||||
|
||||
```json5
|
||||
{
|
||||
agents: { defaults: { model: { primary: "openai/gpt-5.1-codex" } } },
|
||||
agents: { defaults: { model: { primary: "openai/gpt-5.4" } } },
|
||||
}
|
||||
```
|
||||
|
||||
@@ -73,7 +74,7 @@ OpenClaw ships with the pi‑ai catalog. These providers require **no**
|
||||
|
||||
- Provider: `openai-codex`
|
||||
- Auth: OAuth (ChatGPT)
|
||||
- Example model: `openai-codex/gpt-5.3-codex`
|
||||
- Example model: `openai-codex/gpt-5.4`
|
||||
- CLI: `openclaw onboard --auth-choice openai-codex` or `openclaw models auth login --provider openai-codex`
|
||||
- Default transport is `auto` (WebSocket-first, SSE fallback)
|
||||
- Override per model via `agents.defaults.models["openai-codex/<model>"].params.transport` (`"sse"`, `"websocket"`, or `"auto"`)
|
||||
@@ -81,7 +82,7 @@ OpenClaw ships with the pi‑ai catalog. These providers require **no**
|
||||
|
||||
```json5
|
||||
{
|
||||
agents: { defaults: { model: { primary: "openai-codex/gpt-5.3-codex" } } },
|
||||
agents: { defaults: { model: { primary: "openai-codex/gpt-5.4" } } },
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
337
docs/experiments/plans/discord-async-inbound-worker.md
Normal file
337
docs/experiments/plans/discord-async-inbound-worker.md
Normal file
@@ -0,0 +1,337 @@
|
||||
---
|
||||
summary: "Status and next steps for decoupling Discord gateway listeners from long-running agent turns with a Discord-specific inbound worker"
|
||||
owner: "openclaw"
|
||||
status: "in_progress"
|
||||
last_updated: "2026-03-05"
|
||||
title: "Discord Async Inbound Worker Plan"
|
||||
---
|
||||
|
||||
# Discord Async Inbound Worker Plan
|
||||
|
||||
## Objective
|
||||
|
||||
Remove Discord listener timeout as a user-facing failure mode by making inbound Discord turns asynchronous:
|
||||
|
||||
1. Gateway listener accepts and normalizes inbound events quickly.
|
||||
2. A Discord run queue stores serialized jobs keyed by the same ordering boundary we use today.
|
||||
3. A worker executes the actual agent turn outside the Carbon listener lifetime.
|
||||
4. Replies are delivered back to the originating channel or thread after the run completes.
|
||||
|
||||
This is the long-term fix for queued Discord runs timing out at `channels.discord.eventQueue.listenerTimeout` while the agent run itself is still making progress.
|
||||
|
||||
## Current status
|
||||
|
||||
This plan is partially implemented.
|
||||
|
||||
Already done:
|
||||
|
||||
- Discord listener timeout and Discord run timeout are now separate settings.
|
||||
- Accepted inbound Discord turns are enqueued into `src/discord/monitor/inbound-worker.ts`.
|
||||
- The worker now owns the long-running turn instead of the Carbon listener.
|
||||
- Existing per-route ordering is preserved by queue key.
|
||||
- Timeout regression coverage exists for the Discord worker path.
|
||||
|
||||
What this means in plain language:
|
||||
|
||||
- the production timeout bug is fixed
|
||||
- the long-running turn no longer dies just because the Discord listener budget expires
|
||||
- the worker architecture is not finished yet
|
||||
|
||||
What is still missing:
|
||||
|
||||
- `DiscordInboundJob` is still only partially normalized and still carries live runtime references
|
||||
- command semantics (`stop`, `new`, `reset`, future session controls) are not yet fully worker-native
|
||||
- worker observability and operator status are still minimal
|
||||
- there is still no restart durability
|
||||
|
||||
## Why this exists
|
||||
|
||||
Current behavior ties the full agent turn to the listener lifetime:
|
||||
|
||||
- `src/discord/monitor/listeners.ts` applies the timeout and abort boundary.
|
||||
- `src/discord/monitor/message-handler.ts` keeps the queued run inside that boundary.
|
||||
- `src/discord/monitor/message-handler.process.ts` performs media loading, routing, dispatch, typing, draft streaming, and final reply delivery inline.
|
||||
|
||||
That architecture has two bad properties:
|
||||
|
||||
- long but healthy turns can be aborted by the listener watchdog
|
||||
- users can see no reply even when the downstream runtime would have produced one
|
||||
|
||||
Raising the timeout helps but does not change the failure mode.
|
||||
|
||||
## Non-goals
|
||||
|
||||
- Do not redesign non-Discord channels in this pass.
|
||||
- Do not broaden this into a generic all-channel worker framework in the first implementation.
|
||||
- Do not extract a shared cross-channel inbound worker abstraction yet; only share low-level primitives when duplication is obvious.
|
||||
- Do not add durable crash recovery in the first pass unless needed to land safely.
|
||||
- Do not change route selection, binding semantics, or ACP policy in this plan.
|
||||
|
||||
## Current constraints
|
||||
|
||||
The current Discord processing path still depends on some live runtime objects that should not stay inside the long-term job payload:
|
||||
|
||||
- Carbon `Client`
|
||||
- raw Discord event shapes
|
||||
- in-memory guild history map
|
||||
- thread binding manager callbacks
|
||||
- live typing and draft stream state
|
||||
|
||||
We already moved execution onto a worker queue, but the normalization boundary is still incomplete. Right now the worker is "run later in the same process with some of the same live objects," not a fully data-only job boundary.
|
||||
|
||||
## Target architecture
|
||||
|
||||
### 1. Listener stage
|
||||
|
||||
`DiscordMessageListener` remains the ingress point, but its job becomes:
|
||||
|
||||
- run preflight and policy checks
|
||||
- normalize accepted input into a serializable `DiscordInboundJob`
|
||||
- enqueue the job into a per-session or per-channel async queue
|
||||
- return immediately to Carbon once the enqueue succeeds
|
||||
|
||||
The listener should no longer own the end-to-end LLM turn lifetime.
|
||||
|
||||
### 2. Normalized job payload
|
||||
|
||||
Introduce a serializable job descriptor that contains only the data needed to run the turn later.
|
||||
|
||||
Minimum shape:
|
||||
|
||||
- route identity
|
||||
- `agentId`
|
||||
- `sessionKey`
|
||||
- `accountId`
|
||||
- `channel`
|
||||
- delivery identity
|
||||
- destination channel id
|
||||
- reply target message id
|
||||
- thread id if present
|
||||
- sender identity
|
||||
- sender id, label, username, tag
|
||||
- channel context
|
||||
- guild id
|
||||
- channel name or slug
|
||||
- thread metadata
|
||||
- resolved system prompt override
|
||||
- normalized message body
|
||||
- base text
|
||||
- effective message text
|
||||
- attachment descriptors or resolved media references
|
||||
- gating decisions
|
||||
- mention requirement outcome
|
||||
- command authorization outcome
|
||||
- bound session or agent metadata if applicable
|
||||
|
||||
The job payload must not contain live Carbon objects or mutable closures.
|
||||
|
||||
Current implementation status:
|
||||
|
||||
- partially done
|
||||
- `src/discord/monitor/inbound-job.ts` exists and defines the worker handoff
|
||||
- the payload still contains live Discord runtime context and should be reduced further
|
||||
|
||||
### 3. Worker stage
|
||||
|
||||
Add a Discord-specific worker runner responsible for:
|
||||
|
||||
- reconstructing the turn context from `DiscordInboundJob`
|
||||
- loading media and any additional channel metadata needed for the run
|
||||
- dispatching the agent turn
|
||||
- delivering final reply payloads
|
||||
- updating status and diagnostics
|
||||
|
||||
Recommended location:
|
||||
|
||||
- `src/discord/monitor/inbound-worker.ts`
|
||||
- `src/discord/monitor/inbound-job.ts`
|
||||
|
||||
### 4. Ordering model
|
||||
|
||||
Ordering must remain equivalent to today for a given route boundary.
|
||||
|
||||
Recommended key:
|
||||
|
||||
- use the same queue key logic as `resolveDiscordRunQueueKey(...)`
|
||||
|
||||
This preserves existing behavior:
|
||||
|
||||
- one bound agent conversation does not interleave with itself
|
||||
- different Discord channels can still progress independently
|
||||
|
||||
### 5. Timeout model
|
||||
|
||||
After cutover, there are two separate timeout classes:
|
||||
|
||||
- listener timeout
|
||||
- only covers normalization and enqueue
|
||||
- should be short
|
||||
- run timeout
|
||||
- optional, worker-owned, explicit, and user-visible
|
||||
- should not be inherited accidentally from Carbon listener settings
|
||||
|
||||
This removes the current accidental coupling between "Discord gateway listener stayed alive" and "agent run is healthy."
|
||||
|
||||
## Recommended implementation phases
|
||||
|
||||
### Phase 1: normalization boundary
|
||||
|
||||
- Status: partially implemented
|
||||
- Done:
|
||||
- extracted `buildDiscordInboundJob(...)`
|
||||
- added worker handoff tests
|
||||
- Remaining:
|
||||
- make `DiscordInboundJob` plain data only
|
||||
- move live runtime dependencies to worker-owned services instead of per-job payload
|
||||
- stop rebuilding process context by stitching live listener refs back into the job
|
||||
|
||||
### Phase 2: in-memory worker queue
|
||||
|
||||
- Status: implemented
|
||||
- Done:
|
||||
- added `DiscordInboundWorkerQueue` keyed by resolved run queue key
|
||||
- listener enqueues jobs instead of directly awaiting `processDiscordMessage(...)`
|
||||
- worker executes jobs in-process, in memory only
|
||||
|
||||
This is the first functional cutover.
|
||||
|
||||
### Phase 3: process split
|
||||
|
||||
- Status: not started
|
||||
- Move delivery, typing, and draft streaming ownership behind worker-facing adapters.
|
||||
- Replace direct use of live preflight context with worker context reconstruction.
|
||||
- Keep `processDiscordMessage(...)` temporarily as a facade if needed, then split it.
|
||||
|
||||
### Phase 4: command semantics
|
||||
|
||||
- Status: not started
|
||||
Make sure native Discord commands still behave correctly when work is queued:
|
||||
|
||||
- `stop`
|
||||
- `new`
|
||||
- `reset`
|
||||
- any future session-control commands
|
||||
|
||||
The worker queue must expose enough run state for commands to target the active or queued turn.
|
||||
|
||||
### Phase 5: observability and operator UX
|
||||
|
||||
- Status: not started
|
||||
- emit queue depth and active worker counts into monitor status
|
||||
- record enqueue time, start time, finish time, and timeout or cancellation reason
|
||||
- surface worker-owned timeout or delivery failures clearly in logs
|
||||
|
||||
### Phase 6: optional durability follow-up
|
||||
|
||||
- Status: not started
|
||||
Only after the in-memory version is stable:
|
||||
|
||||
- decide whether queued Discord jobs should survive gateway restart
|
||||
- if yes, persist job descriptors and delivery checkpoints
|
||||
- if no, document the explicit in-memory boundary
|
||||
|
||||
This should be a separate follow-up unless restart recovery is required to land.
|
||||
|
||||
## File impact
|
||||
|
||||
Current primary files:
|
||||
|
||||
- `src/discord/monitor/listeners.ts`
|
||||
- `src/discord/monitor/message-handler.ts`
|
||||
- `src/discord/monitor/message-handler.preflight.ts`
|
||||
- `src/discord/monitor/message-handler.process.ts`
|
||||
- `src/discord/monitor/status.ts`
|
||||
|
||||
Current worker files:
|
||||
|
||||
- `src/discord/monitor/inbound-job.ts`
|
||||
- `src/discord/monitor/inbound-worker.ts`
|
||||
- `src/discord/monitor/inbound-job.test.ts`
|
||||
- `src/discord/monitor/message-handler.queue.test.ts`
|
||||
|
||||
Likely next touch points:
|
||||
|
||||
- `src/auto-reply/dispatch.ts`
|
||||
- `src/discord/monitor/reply-delivery.ts`
|
||||
- `src/discord/monitor/thread-bindings.ts`
|
||||
- `src/discord/monitor/native-command.ts`
|
||||
|
||||
## Next step now
|
||||
|
||||
The next step is to make the worker boundary real instead of partial.
|
||||
|
||||
Do this next:
|
||||
|
||||
1. Move live runtime dependencies out of `DiscordInboundJob`
|
||||
2. Keep those dependencies on the Discord worker instance instead
|
||||
3. Reduce queued jobs to plain Discord-specific data:
|
||||
- route identity
|
||||
- delivery target
|
||||
- sender info
|
||||
- normalized message snapshot
|
||||
- gating and binding decisions
|
||||
4. Reconstruct worker execution context from that plain data inside the worker
|
||||
|
||||
In practice, that means:
|
||||
|
||||
- `client`
|
||||
- `threadBindings`
|
||||
- `guildHistories`
|
||||
- `discordRestFetch`
|
||||
- other mutable runtime-only handles
|
||||
|
||||
should stop living on each queued job and instead live on the worker itself or behind worker-owned adapters.
|
||||
|
||||
After that lands, the next follow-up should be command-state cleanup for `stop`, `new`, and `reset`.
|
||||
|
||||
## Testing plan
|
||||
|
||||
Keep the existing timeout repro coverage in:
|
||||
|
||||
- `src/discord/monitor/message-handler.queue.test.ts`
|
||||
|
||||
Add new tests for:
|
||||
|
||||
1. listener returns after enqueue without awaiting full turn
|
||||
2. per-route ordering is preserved
|
||||
3. different channels still run concurrently
|
||||
4. replies are delivered to the original message destination
|
||||
5. `stop` cancels the active worker-owned run
|
||||
6. worker failure produces visible diagnostics without blocking later jobs
|
||||
7. ACP-bound Discord channels still route correctly under worker execution
|
||||
|
||||
## Risks and mitigations
|
||||
|
||||
- Risk: command semantics drift from current synchronous behavior
|
||||
Mitigation: land command-state plumbing in the same cutover, not later
|
||||
|
||||
- Risk: reply delivery loses thread or reply-to context
|
||||
Mitigation: make delivery identity first-class in `DiscordInboundJob`
|
||||
|
||||
- Risk: duplicate sends during retries or queue restarts
|
||||
Mitigation: keep first pass in-memory only, or add explicit delivery idempotency before persistence
|
||||
|
||||
- Risk: `message-handler.process.ts` becomes harder to reason about during migration
|
||||
Mitigation: split into normalization, execution, and delivery helpers before or during worker cutover
|
||||
|
||||
## Acceptance criteria
|
||||
|
||||
The plan is complete when:
|
||||
|
||||
1. Discord listener timeout no longer aborts healthy long-running turns.
|
||||
2. Listener lifetime and agent-turn lifetime are separate concepts in code.
|
||||
3. Existing per-session ordering is preserved.
|
||||
4. ACP-bound Discord channels work through the same worker path.
|
||||
5. `stop` targets the worker-owned run instead of the old listener-owned call stack.
|
||||
6. Timeout and delivery failures become explicit worker outcomes, not silent listener drops.
|
||||
|
||||
## Remaining landing strategy
|
||||
|
||||
Finish this in follow-up PRs:
|
||||
|
||||
1. make `DiscordInboundJob` plain-data only and move live runtime refs onto the worker
|
||||
2. clean up command-state ownership for `stop`, `new`, and `reset`
|
||||
3. add worker observability and operator status
|
||||
4. decide whether durability is needed or explicitly document the in-memory boundary
|
||||
|
||||
This is still a bounded follow-up if kept Discord-only and if we continue to avoid a premature cross-channel worker abstraction.
|
||||
@@ -31,7 +31,7 @@ openclaw agent --message "hi" --model claude-cli/opus-4.6
|
||||
Codex CLI also works out of the box:
|
||||
|
||||
```bash
|
||||
openclaw agent --message "hi" --model codex-cli/gpt-5.3-codex
|
||||
openclaw agent --message "hi" --model codex-cli/gpt-5.4
|
||||
```
|
||||
|
||||
If your gateway runs under launchd/systemd and PATH is minimal, add just the
|
||||
|
||||
@@ -406,6 +406,7 @@ WhatsApp runs through the gateway's web channel (Baileys Web). It starts automat
|
||||
sessionPrefix: "slack:slash",
|
||||
ephemeral: true,
|
||||
},
|
||||
typingReaction: "hourglass_flowing_sand",
|
||||
textChunkLimit: 4000,
|
||||
chunkMode: "length",
|
||||
streaming: "partial", // off | partial | block | progress (preview mode)
|
||||
@@ -427,6 +428,8 @@ WhatsApp runs through the gateway's web channel (Baileys Web). It starts automat
|
||||
|
||||
**Thread session isolation:** `thread.historyScope` is per-thread (default) or shared across channel. `thread.inheritParent` copies parent channel transcript to new threads.
|
||||
|
||||
- `typingReaction` adds a temporary reaction to the inbound Slack message while a reply is running, then removes it on completion. Use a Slack emoji shortcode such as `"hourglass_flowing_sand"`.
|
||||
|
||||
| Action group | Default | Notes |
|
||||
| ------------ | ------- | ---------------------- |
|
||||
| reactions | enabled | React + list reactions |
|
||||
@@ -968,6 +971,7 @@ Periodic heartbeat runs.
|
||||
every: "30m", // 0m disables
|
||||
model: "openai/gpt-5.2-mini",
|
||||
includeReasoning: false,
|
||||
lightContext: false, // default: false; true keeps only HEARTBEAT.md from workspace bootstrap files
|
||||
session: "main",
|
||||
to: "+15555550123",
|
||||
directPolicy: "allow", // allow (default) | block
|
||||
@@ -984,6 +988,7 @@ Periodic heartbeat runs.
|
||||
- `every`: duration string (ms/s/m/h). Default: `30m`.
|
||||
- `suppressToolErrorWarnings`: when true, suppresses tool error warning payloads during heartbeat runs.
|
||||
- `directPolicy`: direct/DM delivery policy. `allow` (default) permits direct-target delivery. `block` suppresses direct-target delivery and emits `reason=dm-blocked`.
|
||||
- `lightContext`: when true, heartbeat runs use lightweight bootstrap context and keep only `HEARTBEAT.md` from workspace bootstrap files.
|
||||
- Per-agent: set `agents.list[].heartbeat`. When any agent defines `heartbeat`, **only those agents** run heartbeats.
|
||||
- Heartbeats run full agent turns — shorter intervals burn more tokens.
|
||||
|
||||
@@ -1618,6 +1623,7 @@ Batches rapid text-only messages from the same sender into a single agent turn.
|
||||
},
|
||||
openai: {
|
||||
apiKey: "openai_api_key",
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
model: "gpt-4o-mini-tts",
|
||||
voice: "alloy",
|
||||
},
|
||||
@@ -1630,6 +1636,8 @@ Batches rapid text-only messages from the same sender into a single agent turn.
|
||||
- `summaryModel` overrides `agents.defaults.model.primary` for auto-summary.
|
||||
- `modelOverrides` is enabled by default; `modelOverrides.allowProvider` defaults to `false` (opt-in).
|
||||
- API keys fall back to `ELEVENLABS_API_KEY`/`XI_API_KEY` and `OPENAI_API_KEY`.
|
||||
- `openai.baseUrl` overrides the OpenAI TTS endpoint. Resolution order is config, then `OPENAI_TTS_BASE_URL`, then `https://api.openai.com/v1`.
|
||||
- When `openai.baseUrl` points to a non-OpenAI endpoint, OpenClaw treats it as an OpenAI-compatible TTS server and relaxes model/voice validation.
|
||||
|
||||
---
|
||||
|
||||
@@ -2287,6 +2295,9 @@ See [Local Models](/gateway/local-models). TL;DR: run MiniMax M2.5 via LM Studio
|
||||
entries: {
|
||||
"voice-call": {
|
||||
enabled: true,
|
||||
hooks: {
|
||||
allowPromptInjection: false,
|
||||
},
|
||||
config: { provider: "twilio" },
|
||||
},
|
||||
},
|
||||
@@ -2299,6 +2310,7 @@ See [Local Models](/gateway/local-models). TL;DR: run MiniMax M2.5 via LM Studio
|
||||
- `allow`: optional allowlist (only listed plugins load). `deny` wins.
|
||||
- `plugins.entries.<id>.apiKey`: plugin-level API key convenience field (when supported by the plugin).
|
||||
- `plugins.entries.<id>.env`: plugin-scoped env var map.
|
||||
- `plugins.entries.<id>.hooks.allowPromptInjection`: when `false`, core blocks `before_prompt_build` and ignores prompt-mutating fields from legacy `before_agent_start`, while preserving legacy `modelOverride` and `providerOverride`.
|
||||
- `plugins.entries.<id>.config`: plugin-defined config object (validated by plugin schema).
|
||||
- `plugins.slots.memory`: pick the active memory plugin id, or `"none"` to disable memory plugins.
|
||||
- `plugins.installs`: CLI-managed install metadata used by `openclaw plugins update`.
|
||||
@@ -2431,6 +2443,7 @@ See [Plugins](/tools/plugin).
|
||||
- **Legacy bind aliases**: use bind mode values in `gateway.bind` (`auto`, `loopback`, `lan`, `tailnet`, `custom`), not host aliases (`0.0.0.0`, `127.0.0.1`, `localhost`, `::`, `::1`).
|
||||
- **Docker note**: the default `loopback` bind listens on `127.0.0.1` inside the container. With Docker bridge networking (`-p 18789:18789`), traffic arrives on `eth0`, so the gateway is unreachable. Use `--network host`, or set `bind: "lan"` (or `bind: "custom"` with `customBindHost: "0.0.0.0"`) to listen on all interfaces.
|
||||
- **Auth**: required by default. Non-loopback binds require a shared token/password. Onboarding wizard generates a token by default.
|
||||
- If both `gateway.auth.token` and `gateway.auth.password` are configured (including SecretRefs), set `gateway.auth.mode` explicitly to `token` or `password`. Startup and service install/repair flows fail when both are configured and mode is unset.
|
||||
- `gateway.auth.mode: "none"`: explicit no-auth mode. Use only for trusted local loopback setups; this is intentionally not offered by onboarding prompts.
|
||||
- `gateway.auth.mode: "trusted-proxy"`: delegate auth to an identity-aware reverse proxy and trust identity headers from `gateway.trustedProxies` (see [Trusted Proxy Auth](/gateway/trusted-proxy-auth)).
|
||||
- `gateway.auth.allowTailscale`: when `true`, Tailscale Serve identity headers can satisfy Control UI/WebSocket auth (verified via `tailscale whois`); HTTP API endpoints still require token/password auth. This tokenless flow assumes the gateway host is trusted. Defaults to `true` when `tailscale.mode = "serve"`.
|
||||
|
||||
@@ -77,7 +77,7 @@ cat ~/.openclaw/openclaw.json
|
||||
- Gateway runtime best-practice checks (Node vs Bun, version-manager paths).
|
||||
- Gateway port collision diagnostics (default `18789`).
|
||||
- Security warnings for open DM policies.
|
||||
- Gateway auth warnings when no `gateway.auth.token` is set (local mode; offers token generation).
|
||||
- Gateway auth checks for local token mode (offers token generation when no token source exists; does not overwrite token SecretRef configs).
|
||||
- systemd linger check on Linux.
|
||||
- Source install checks (pnpm workspace mismatch, missing UI assets, missing tsx binary).
|
||||
- Writes updated config + wizard metadata.
|
||||
@@ -238,9 +238,19 @@ workspace.
|
||||
|
||||
### 12) Gateway auth checks (local token)
|
||||
|
||||
Doctor warns when `gateway.auth` is missing on a local gateway and offers to
|
||||
generate a token. Use `openclaw doctor --generate-gateway-token` to force token
|
||||
creation in automation.
|
||||
Doctor checks local gateway token auth readiness.
|
||||
|
||||
- If token mode needs a token and no token source exists, doctor offers to generate one.
|
||||
- If `gateway.auth.token` is SecretRef-managed but unavailable, doctor warns and does not overwrite it with plaintext.
|
||||
- `openclaw doctor --generate-gateway-token` forces generation only when no token SecretRef is configured.
|
||||
|
||||
### 12b) Read-only SecretRef-aware repairs
|
||||
|
||||
Some repair flows need to inspect configured credentials without weakening runtime fail-fast behavior.
|
||||
|
||||
- `openclaw doctor --fix` now uses the same read-only SecretRef summary model as status-family commands for targeted config repairs.
|
||||
- Example: Telegram `allowFrom` / `groupAllowFrom` `@username` repair tries to use configured bot credentials when available.
|
||||
- If the Telegram bot token is configured via SecretRef but unavailable in the current command path, doctor reports that the credential is configured-but-unavailable and skips auto-resolution instead of crashing or misreporting the token as missing.
|
||||
|
||||
### 13) Gateway health check + restart
|
||||
|
||||
@@ -265,6 +275,9 @@ Notes:
|
||||
- `openclaw doctor --yes` accepts the default repair prompts.
|
||||
- `openclaw doctor --repair` applies recommended fixes without prompts.
|
||||
- `openclaw doctor --repair --force` overwrites custom supervisor configs.
|
||||
- If token auth requires a token and `gateway.auth.token` is SecretRef-managed, doctor service install/repair validates the SecretRef but does not persist resolved plaintext token values into supervisor service environment metadata.
|
||||
- If token auth requires a token and the configured token SecretRef is unresolved, doctor blocks the install/repair path with actionable guidance.
|
||||
- If both `gateway.auth.token` and `gateway.auth.password` are configured and `gateway.auth.mode` is unset, doctor blocks install/repair until mode is set explicitly.
|
||||
- You can always force a full rewrite via `openclaw gateway install --force`.
|
||||
|
||||
### 16) Gateway runtime + port diagnostics
|
||||
|
||||
@@ -21,7 +21,8 @@ Troubleshooting: [/automation/troubleshooting](/automation/troubleshooting)
|
||||
2. Create a tiny `HEARTBEAT.md` checklist in the agent workspace (optional but recommended).
|
||||
3. Decide where heartbeat messages should go (`target: "none"` is the default; set `target: "last"` to route to the last contact).
|
||||
4. Optional: enable heartbeat reasoning delivery for transparency.
|
||||
5. Optional: restrict heartbeats to active hours (local time).
|
||||
5. Optional: use lightweight bootstrap context if heartbeat runs only need `HEARTBEAT.md`.
|
||||
6. Optional: restrict heartbeats to active hours (local time).
|
||||
|
||||
Example config:
|
||||
|
||||
@@ -33,6 +34,7 @@ Example config:
|
||||
every: "30m",
|
||||
target: "last", // explicit delivery to last contact (default is "none")
|
||||
directPolicy: "allow", // default: allow direct/DM targets; set "block" to suppress
|
||||
lightContext: true, // optional: only inject HEARTBEAT.md from bootstrap files
|
||||
// activeHours: { start: "08:00", end: "24:00" },
|
||||
// includeReasoning: true, // optional: send separate `Reasoning:` message too
|
||||
},
|
||||
@@ -88,6 +90,7 @@ and logged; a message that is only `HEARTBEAT_OK` is dropped.
|
||||
every: "30m", // default: 30m (0m disables)
|
||||
model: "anthropic/claude-opus-4-6",
|
||||
includeReasoning: false, // default: false (deliver separate Reasoning: message when available)
|
||||
lightContext: false, // default: false; true keeps only HEARTBEAT.md from workspace bootstrap files
|
||||
target: "last", // default: none | options: last | none | <channel id> (core or plugin, e.g. "bluebubbles")
|
||||
to: "+15551234567", // optional channel-specific override
|
||||
accountId: "ops-bot", // optional multi-account channel id
|
||||
@@ -208,6 +211,7 @@ Use `accountId` to target a specific account on multi-account channels like Tele
|
||||
- `every`: heartbeat interval (duration string; default unit = minutes).
|
||||
- `model`: optional model override for heartbeat runs (`provider/model`).
|
||||
- `includeReasoning`: when enabled, also deliver the separate `Reasoning:` message when available (same shape as `/reasoning on`).
|
||||
- `lightContext`: when true, heartbeat runs use lightweight bootstrap context and keep only `HEARTBEAT.md` from workspace bootstrap files.
|
||||
- `session`: optional session key for heartbeat runs.
|
||||
- `main` (default): agent main session.
|
||||
- Explicit session key (copy from `openclaw sessions --json` or the [sessions CLI](/cli/sessions)).
|
||||
|
||||
@@ -46,11 +46,13 @@ Examples of inactive surfaces:
|
||||
In local mode without those remote surfaces:
|
||||
- `gateway.remote.token` is active when token auth can win and no env/auth token is configured.
|
||||
- `gateway.remote.password` is active only when password auth can win and no env/auth password is configured.
|
||||
- `gateway.auth.token` SecretRef is inactive for startup auth resolution when `OPENCLAW_GATEWAY_TOKEN` (or `CLAWDBOT_GATEWAY_TOKEN`) is set, because env token input wins for that runtime.
|
||||
|
||||
## Gateway auth surface diagnostics
|
||||
|
||||
When a SecretRef is configured on `gateway.auth.password`, `gateway.remote.token`, or
|
||||
`gateway.remote.password`, gateway startup/reload logs the surface state explicitly:
|
||||
When a SecretRef is configured on `gateway.auth.token`, `gateway.auth.password`,
|
||||
`gateway.remote.token`, or `gateway.remote.password`, gateway startup/reload logs the
|
||||
surface state explicitly:
|
||||
|
||||
- `active`: the SecretRef is part of the effective auth surface and must resolve.
|
||||
- `inactive`: the SecretRef is ignored for this runtime because another auth surface wins, or
|
||||
@@ -65,6 +67,7 @@ When onboarding runs in interactive mode and you choose SecretRef storage, OpenC
|
||||
|
||||
- Env refs: validates env var name and confirms a non-empty value is visible during onboarding.
|
||||
- Provider refs (`file` or `exec`): validates provider selection, resolves `id`, and checks resolved value type.
|
||||
- Quickstart reuse path: when `gateway.auth.token` is already a SecretRef, onboarding resolves it before probe/dashboard bootstrap (for `env`, `file`, and `exec` refs) using the same fail-fast gate.
|
||||
|
||||
If validation fails, onboarding shows the error and lets you retry.
|
||||
|
||||
@@ -336,10 +339,22 @@ Behavior:
|
||||
|
||||
## Command-path resolution
|
||||
|
||||
Credential-sensitive command paths that opt in (for example `openclaw memory` remote-memory paths and `openclaw qr --remote`) can resolve supported SecretRefs via gateway snapshot RPC.
|
||||
Command paths can opt into supported SecretRef resolution via gateway snapshot RPC.
|
||||
|
||||
There are two broad behaviors:
|
||||
|
||||
- Strict command paths (for example `openclaw memory` remote-memory paths and `openclaw qr --remote`) read from the active snapshot and fail fast when a required SecretRef is unavailable.
|
||||
- Read-only command paths (for example `openclaw status`, `openclaw status --all`, `openclaw channels status`, `openclaw channels resolve`, and read-only doctor/config repair flows) also prefer the active snapshot, but degrade instead of aborting when a targeted SecretRef is unavailable in that command path.
|
||||
|
||||
Read-only behavior:
|
||||
|
||||
- When the gateway is running, these commands read from the active snapshot first.
|
||||
- If gateway resolution is incomplete or the gateway is unavailable, they attempt targeted local fallback for the specific command surface.
|
||||
- If a targeted SecretRef is still unavailable, the command continues with degraded read-only output and explicit diagnostics such as “configured but unavailable in this command path”.
|
||||
- This degraded behavior is command-local only. It does not weaken runtime startup, reload, or send/auth paths.
|
||||
|
||||
Other notes:
|
||||
|
||||
- When gateway is running, those command paths read from the active snapshot.
|
||||
- If a configured SecretRef is required and gateway is unavailable, command resolution fails fast with actionable diagnostics.
|
||||
- Snapshot refresh after backend secret rotation is handled by `openclaw secrets reload`.
|
||||
- Gateway RPC method used by these command paths: `secrets.resolve`.
|
||||
|
||||
|
||||
@@ -767,7 +767,7 @@ Yes - via pi-ai's **Amazon Bedrock (Converse)** provider with **manual config**.
|
||||
|
||||
### How does Codex auth work
|
||||
|
||||
OpenClaw supports **OpenAI Code (Codex)** via OAuth (ChatGPT sign-in). The wizard can run the OAuth flow and will set the default model to `openai-codex/gpt-5.3-codex` when appropriate. See [Model providers](/concepts/model-providers) and [Wizard](/start/wizard).
|
||||
OpenClaw supports **OpenAI Code (Codex)** via OAuth (ChatGPT sign-in). The wizard can run the OAuth flow and will set the default model to `openai-codex/gpt-5.4` when appropriate. See [Model providers](/concepts/model-providers) and [Wizard](/start/wizard).
|
||||
|
||||
### Do you support OpenAI subscription auth Codex OAuth
|
||||
|
||||
@@ -2156,8 +2156,8 @@ Use `/model status` to confirm which auth profile is active.
|
||||
|
||||
Yes. Set one as default and switch as needed:
|
||||
|
||||
- **Quick switch (per session):** `/model gpt-5.2` for daily tasks, `/model gpt-5.3-codex` for coding.
|
||||
- **Default + switch:** set `agents.defaults.model.primary` to `openai/gpt-5.2`, then switch to `openai-codex/gpt-5.3-codex` when coding (or the other way around).
|
||||
- **Quick switch (per session):** `/model gpt-5.2` for daily tasks, `/model openai-codex/gpt-5.4` for coding with Codex OAuth.
|
||||
- **Default + switch:** set `agents.defaults.model.primary` to `openai/gpt-5.2`, then switch to `openai-codex/gpt-5.4` when coding (or the other way around).
|
||||
- **Sub-agents:** route coding tasks to sub-agents with a different default model.
|
||||
|
||||
See [Models](/concepts/models) and [Slash commands](/tools/slash-commands).
|
||||
|
||||
@@ -222,7 +222,7 @@ OPENCLAW_LIVE_SETUP_TOKEN=1 OPENCLAW_LIVE_SETUP_TOKEN_PROFILE=anthropic:setup-to
|
||||
- Args: `["-p","--output-format","json","--permission-mode","bypassPermissions"]`
|
||||
- Overrides (optional):
|
||||
- `OPENCLAW_LIVE_CLI_BACKEND_MODEL="claude-cli/claude-opus-4-6"`
|
||||
- `OPENCLAW_LIVE_CLI_BACKEND_MODEL="codex-cli/gpt-5.3-codex"`
|
||||
- `OPENCLAW_LIVE_CLI_BACKEND_MODEL="codex-cli/gpt-5.4"`
|
||||
- `OPENCLAW_LIVE_CLI_BACKEND_COMMAND="/full/path/to/claude"`
|
||||
- `OPENCLAW_LIVE_CLI_BACKEND_ARGS='["-p","--output-format","json","--permission-mode","bypassPermissions"]'`
|
||||
- `OPENCLAW_LIVE_CLI_BACKEND_CLEAR_ENV='["ANTHROPIC_API_KEY","ANTHROPIC_API_KEY_OLD"]'`
|
||||
@@ -275,7 +275,7 @@ There is no fixed “CI model list” (live is opt-in), but these are the **reco
|
||||
This is the “common models” run we expect to keep working:
|
||||
|
||||
- OpenAI (non-Codex): `openai/gpt-5.2` (optional: `openai/gpt-5.1`)
|
||||
- OpenAI Codex: `openai-codex/gpt-5.3-codex` (optional: `openai-codex/gpt-5.3-codex-codex`)
|
||||
- OpenAI Codex: `openai-codex/gpt-5.4`
|
||||
- Anthropic: `anthropic/claude-opus-4-6` (or `anthropic/claude-sonnet-4-5`)
|
||||
- Google (Gemini API): `google/gemini-3-pro-preview` and `google/gemini-3-flash-preview` (avoid older Gemini 2.x models)
|
||||
- Google (Antigravity): `google-antigravity/claude-opus-4-6-thinking` and `google-antigravity/gemini-3-flash`
|
||||
@@ -283,7 +283,7 @@ This is the “common models” run we expect to keep working:
|
||||
- MiniMax: `minimax/minimax-m2.5`
|
||||
|
||||
Run gateway smoke with tools + image:
|
||||
`OPENCLAW_LIVE_GATEWAY_MODELS="openai/gpt-5.2,openai-codex/gpt-5.3-codex,anthropic/claude-opus-4-6,google/gemini-3-pro-preview,google/gemini-3-flash-preview,google-antigravity/claude-opus-4-6-thinking,google-antigravity/gemini-3-flash,zai/glm-4.7,minimax/minimax-m2.5" pnpm test:live src/gateway/gateway-models.profiles.live.test.ts`
|
||||
`OPENCLAW_LIVE_GATEWAY_MODELS="openai/gpt-5.2,openai-codex/gpt-5.4,anthropic/claude-opus-4-6,google/gemini-3-pro-preview,google/gemini-3-flash-preview,google-antigravity/claude-opus-4-6-thinking,google-antigravity/gemini-3-flash,zai/glm-4.7,minimax/minimax-m2.5" pnpm test:live src/gateway/gateway-models.profiles.live.test.ts`
|
||||
|
||||
### Baseline: tool calling (Read + optional Exec)
|
||||
|
||||
|
||||
@@ -30,10 +30,13 @@ openclaw onboard --openai-api-key "$OPENAI_API_KEY"
|
||||
```json5
|
||||
{
|
||||
env: { OPENAI_API_KEY: "sk-..." },
|
||||
agents: { defaults: { model: { primary: "openai/gpt-5.2" } } },
|
||||
agents: { defaults: { model: { primary: "openai/gpt-5.4" } } },
|
||||
}
|
||||
```
|
||||
|
||||
OpenAI's current API model docs list `gpt-5.4` and `gpt-5.4-pro` for direct
|
||||
OpenAI API usage. OpenClaw forwards both through the `openai/*` Responses path.
|
||||
|
||||
## Option B: OpenAI Code (Codex) subscription
|
||||
|
||||
**Best for:** using ChatGPT/Codex subscription access instead of an API key.
|
||||
@@ -53,10 +56,13 @@ openclaw models auth login --provider openai-codex
|
||||
|
||||
```json5
|
||||
{
|
||||
agents: { defaults: { model: { primary: "openai-codex/gpt-5.3-codex" } } },
|
||||
agents: { defaults: { model: { primary: "openai-codex/gpt-5.4" } } },
|
||||
}
|
||||
```
|
||||
|
||||
OpenAI's current Codex docs list `gpt-5.4` as the current Codex model. OpenClaw
|
||||
maps that to `openai-codex/gpt-5.4` for ChatGPT/Codex OAuth usage.
|
||||
|
||||
### Transport default
|
||||
|
||||
OpenClaw uses `pi-ai` for model streaming. For both `openai/*` and
|
||||
@@ -81,9 +87,9 @@ Related OpenAI docs:
|
||||
{
|
||||
agents: {
|
||||
defaults: {
|
||||
model: { primary: "openai-codex/gpt-5.3-codex" },
|
||||
model: { primary: "openai-codex/gpt-5.4" },
|
||||
models: {
|
||||
"openai-codex/gpt-5.3-codex": {
|
||||
"openai-codex/gpt-5.4": {
|
||||
params: {
|
||||
transport: "auto",
|
||||
},
|
||||
@@ -106,7 +112,7 @@ OpenAI docs describe warm-up as optional. OpenClaw enables it by default for
|
||||
agents: {
|
||||
defaults: {
|
||||
models: {
|
||||
"openai/gpt-5.2": {
|
||||
"openai/gpt-5.4": {
|
||||
params: {
|
||||
openaiWsWarmup: false,
|
||||
},
|
||||
@@ -124,7 +130,7 @@ OpenAI docs describe warm-up as optional. OpenClaw enables it by default for
|
||||
agents: {
|
||||
defaults: {
|
||||
models: {
|
||||
"openai/gpt-5.2": {
|
||||
"openai/gpt-5.4": {
|
||||
params: {
|
||||
openaiWsWarmup: true,
|
||||
},
|
||||
@@ -135,6 +141,30 @@ OpenAI docs describe warm-up as optional. OpenClaw enables it by default for
|
||||
}
|
||||
```
|
||||
|
||||
### OpenAI priority processing
|
||||
|
||||
OpenAI's API exposes priority processing via `service_tier=priority`. In
|
||||
OpenClaw, set `agents.defaults.models["openai/<model>"].params.serviceTier` to
|
||||
pass that field through on direct `openai/*` Responses requests.
|
||||
|
||||
```json5
|
||||
{
|
||||
agents: {
|
||||
defaults: {
|
||||
models: {
|
||||
"openai/gpt-5.4": {
|
||||
params: {
|
||||
serviceTier: "priority",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
Supported values are `auto`, `default`, `flex`, and `priority`.
|
||||
|
||||
### OpenAI Responses server-side compaction
|
||||
|
||||
For direct OpenAI Responses models (`openai/*` using `api: "openai-responses"` with
|
||||
@@ -157,7 +187,7 @@ Responses models (for example Azure OpenAI Responses):
|
||||
agents: {
|
||||
defaults: {
|
||||
models: {
|
||||
"azure-openai-responses/gpt-5.2": {
|
||||
"azure-openai-responses/gpt-5.4": {
|
||||
params: {
|
||||
responsesServerCompaction: true,
|
||||
},
|
||||
@@ -175,7 +205,7 @@ Responses models (for example Azure OpenAI Responses):
|
||||
agents: {
|
||||
defaults: {
|
||||
models: {
|
||||
"openai/gpt-5.2": {
|
||||
"openai/gpt-5.4": {
|
||||
params: {
|
||||
responsesServerCompaction: true,
|
||||
responsesCompactThreshold: 120000,
|
||||
@@ -194,7 +224,7 @@ Responses models (for example Azure OpenAI Responses):
|
||||
agents: {
|
||||
defaults: {
|
||||
models: {
|
||||
"openai/gpt-5.2": {
|
||||
"openai/gpt-5.4": {
|
||||
params: {
|
||||
responsesServerCompaction: false,
|
||||
},
|
||||
|
||||
@@ -36,6 +36,7 @@ Scope intent:
|
||||
- `tools.web.search.kimi.apiKey`
|
||||
- `tools.web.search.perplexity.apiKey`
|
||||
- `gateway.auth.password`
|
||||
- `gateway.auth.token`
|
||||
- `gateway.remote.token`
|
||||
- `gateway.remote.password`
|
||||
- `cron.webhookToken`
|
||||
@@ -107,7 +108,6 @@ Out-of-scope credentials include:
|
||||
|
||||
[//]: # "secretref-unsupported-list-start"
|
||||
|
||||
- `gateway.auth.token`
|
||||
- `commands.ownerDisplaySecret`
|
||||
- `channels.matrix.accessToken`
|
||||
- `channels.matrix.accounts.*.accessToken`
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
"commands.ownerDisplaySecret",
|
||||
"channels.matrix.accessToken",
|
||||
"channels.matrix.accounts.*.accessToken",
|
||||
"gateway.auth.token",
|
||||
"hooks.token",
|
||||
"hooks.gmail.pushToken",
|
||||
"hooks.mappings[].sessionKey",
|
||||
@@ -385,6 +384,13 @@
|
||||
"secretShape": "secret_input",
|
||||
"optIn": true
|
||||
},
|
||||
{
|
||||
"id": "gateway.auth.token",
|
||||
"configFile": "openclaw.json",
|
||||
"path": "gateway.auth.token",
|
||||
"secretShape": "secret_input",
|
||||
"optIn": true
|
||||
},
|
||||
{
|
||||
"id": "gateway.remote.password",
|
||||
"configFile": "openclaw.json",
|
||||
|
||||
@@ -71,6 +71,15 @@ For a high-level overview, see [Onboarding Wizard](/start/wizard).
|
||||
<Step title="Gateway">
|
||||
- Port, bind, auth mode, tailscale exposure.
|
||||
- Auth recommendation: keep **Token** even for loopback so local WS clients must authenticate.
|
||||
- In token mode, interactive onboarding offers:
|
||||
- **Generate/store plaintext token** (default)
|
||||
- **Use SecretRef** (opt-in)
|
||||
- Quickstart reuses existing `gateway.auth.token` SecretRefs across `env`, `file`, and `exec` providers for onboarding probe/dashboard bootstrap.
|
||||
- If that SecretRef is configured but cannot be resolved, onboarding fails early with a clear fix message instead of silently degrading runtime auth.
|
||||
- In password mode, interactive onboarding also supports plaintext or SecretRef storage.
|
||||
- Non-interactive token SecretRef path: `--gateway-token-ref-env <ENV_VAR>`.
|
||||
- Requires a non-empty env var in the onboarding process environment.
|
||||
- Cannot be combined with `--gateway-token`.
|
||||
- Disable auth only if you fully trust every local process.
|
||||
- Non‑loopback binds still require auth.
|
||||
</Step>
|
||||
@@ -92,6 +101,9 @@ For a high-level overview, see [Onboarding Wizard](/start/wizard).
|
||||
- Wizard attempts to enable lingering via `loginctl enable-linger <user>` so the Gateway stays up after logout.
|
||||
- May prompt for sudo (writes `/var/lib/systemd/linger`); it tries without sudo first.
|
||||
- **Runtime selection:** Node (recommended; required for WhatsApp/Telegram). Bun is **not recommended**.
|
||||
- If token auth requires a token and `gateway.auth.token` is SecretRef-managed, daemon install validates it but does not persist resolved plaintext token values into supervisor service environment metadata.
|
||||
- If token auth requires a token and the configured token SecretRef is unresolved, daemon install is blocked with actionable guidance.
|
||||
- If both `gateway.auth.token` and `gateway.auth.password` are configured and `gateway.auth.mode` is unset, daemon install is blocked until mode is set explicitly.
|
||||
</Step>
|
||||
<Step title="Health check">
|
||||
- Starts the Gateway (if needed) and runs `openclaw health`.
|
||||
@@ -130,6 +142,19 @@ openclaw onboard --non-interactive \
|
||||
|
||||
Add `--json` for a machine‑readable summary.
|
||||
|
||||
Gateway token SecretRef in non-interactive mode:
|
||||
|
||||
```bash
|
||||
export OPENCLAW_GATEWAY_TOKEN="your-token"
|
||||
openclaw onboard --non-interactive \
|
||||
--mode local \
|
||||
--auth-choice skip \
|
||||
--gateway-auth token \
|
||||
--gateway-token-ref-env OPENCLAW_GATEWAY_TOKEN
|
||||
```
|
||||
|
||||
`--gateway-token` and `--gateway-token-ref-env` are mutually exclusive.
|
||||
|
||||
<Note>
|
||||
`--json` does **not** imply non-interactive mode. Use `--non-interactive` (and `--workspace`) for scripts.
|
||||
</Note>
|
||||
|
||||
@@ -51,6 +51,13 @@ It does not install or modify anything on the remote host.
|
||||
<Step title="Gateway">
|
||||
- Prompts for port, bind, auth mode, and tailscale exposure.
|
||||
- Recommended: keep token auth enabled even for loopback so local WS clients must authenticate.
|
||||
- In token mode, interactive onboarding offers:
|
||||
- **Generate/store plaintext token** (default)
|
||||
- **Use SecretRef** (opt-in)
|
||||
- In password mode, interactive onboarding also supports plaintext or SecretRef storage.
|
||||
- Non-interactive token SecretRef path: `--gateway-token-ref-env <ENV_VAR>`.
|
||||
- Requires a non-empty env var in the onboarding process environment.
|
||||
- Cannot be combined with `--gateway-token`.
|
||||
- Disable auth only if you fully trust every local process.
|
||||
- Non-loopback binds still require auth.
|
||||
</Step>
|
||||
@@ -136,7 +143,7 @@ What you set:
|
||||
<Accordion title="OpenAI Code subscription (OAuth)">
|
||||
Browser flow; paste `code#state`.
|
||||
|
||||
Sets `agents.defaults.model` to `openai-codex/gpt-5.3-codex` when model is unset or `openai/*`.
|
||||
Sets `agents.defaults.model` to `openai-codex/gpt-5.4` when model is unset or `openai/*`.
|
||||
|
||||
</Accordion>
|
||||
<Accordion title="OpenAI API key">
|
||||
@@ -206,7 +213,7 @@ Credential and profile paths:
|
||||
- OAuth credentials: `~/.openclaw/credentials/oauth.json`
|
||||
- Auth profiles (API keys + OAuth): `~/.openclaw/agents/<agentId>/agent/auth-profiles.json`
|
||||
|
||||
API key storage mode:
|
||||
Credential storage mode:
|
||||
|
||||
- Default onboarding behavior persists API keys as plaintext values in auth profiles.
|
||||
- `--secret-input-mode ref` enables reference mode instead of plaintext key storage.
|
||||
@@ -222,6 +229,10 @@ API key storage mode:
|
||||
- Inline key flags (for example `--openai-api-key`) require that env var to be set; otherwise onboarding fails fast.
|
||||
- For custom providers, non-interactive `ref` mode stores `models.providers.<id>.apiKey` as `{ source: "env", provider: "default", id: "CUSTOM_API_KEY" }`.
|
||||
- In that custom-provider case, `--custom-api-key` requires `CUSTOM_API_KEY` to be set; otherwise onboarding fails fast.
|
||||
- Gateway auth credentials support plaintext and SecretRef choices in interactive onboarding:
|
||||
- Token mode: **Generate/store plaintext token** (default) or **Use SecretRef**.
|
||||
- Password mode: plaintext or SecretRef.
|
||||
- Non-interactive token SecretRef path: `--gateway-token-ref-env <ENV_VAR>`.
|
||||
- Existing plaintext setups continue to work unchanged.
|
||||
|
||||
<Note>
|
||||
|
||||
@@ -72,8 +72,13 @@ The wizard starts with **QuickStart** (defaults) vs **Advanced** (full control).
|
||||
In interactive runs, choosing secret reference mode lets you point at either an environment variable or a configured provider ref (`file` or `exec`), with a fast preflight validation before saving.
|
||||
2. **Workspace** — Location for agent files (default `~/.openclaw/workspace`). Seeds bootstrap files.
|
||||
3. **Gateway** — Port, bind address, auth mode, Tailscale exposure.
|
||||
In interactive token mode, choose default plaintext token storage or opt into SecretRef.
|
||||
Non-interactive token SecretRef path: `--gateway-token-ref-env <ENV_VAR>`.
|
||||
4. **Channels** — WhatsApp, Telegram, Discord, Google Chat, Mattermost, Signal, BlueBubbles, or iMessage.
|
||||
5. **Daemon** — Installs a LaunchAgent (macOS) or systemd user unit (Linux/WSL2).
|
||||
If token auth requires a token and `gateway.auth.token` is SecretRef-managed, daemon install validates it but does not persist the resolved token into supervisor service environment metadata.
|
||||
If token auth requires a token and the configured token SecretRef is unresolved, daemon install is blocked with actionable guidance.
|
||||
If both `gateway.auth.token` and `gateway.auth.password` are configured and `gateway.auth.mode` is unset, daemon install is blocked until mode is set explicitly.
|
||||
6. **Health check** — Starts the Gateway and verifies it's running.
|
||||
7. **Skills** — Installs recommended skills and optional dependencies.
|
||||
|
||||
|
||||
@@ -79,11 +79,14 @@ Required feature flags for thread-bound ACP:
|
||||
- `acp.dispatch.enabled` is on by default (set `false` to pause ACP dispatch)
|
||||
- Channel-adapter ACP thread-spawn flag enabled (adapter-specific)
|
||||
- Discord: `channels.discord.threadBindings.spawnAcpSessions=true`
|
||||
- Telegram: `channels.telegram.threadBindings.spawnAcpSessions=true`
|
||||
|
||||
### Thread supporting channels
|
||||
|
||||
- Any channel adapter that exposes session/thread binding capability.
|
||||
- Current built-in support: Discord.
|
||||
- Current built-in support:
|
||||
- Discord threads/channels
|
||||
- Telegram topics (forum topics in groups/supergroups and DM topics)
|
||||
- Plugin channels can add support through the same binding interface.
|
||||
|
||||
## Channel specific settings
|
||||
@@ -303,7 +306,9 @@ If no target resolves, OpenClaw returns a clear error (`Unable to resolve sessio
|
||||
Notes:
|
||||
|
||||
- On non-thread binding surfaces, default behavior is effectively `off`.
|
||||
- Thread-bound spawn requires channel policy support (for Discord: `channels.discord.threadBindings.spawnAcpSessions=true`).
|
||||
- Thread-bound spawn requires channel policy support:
|
||||
- Discord: `channels.discord.threadBindings.spawnAcpSessions=true`
|
||||
- Telegram: `channels.telegram.threadBindings.spawnAcpSessions=true`
|
||||
|
||||
## ACP controls
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ read_when:
|
||||
|
||||
# Diffs
|
||||
|
||||
`diffs` is an optional plugin tool and companion skill that turns change content into a read-only diff artifact for agents.
|
||||
`diffs` is an optional plugin tool with short built-in system guidance and a companion skill that turns change content into a read-only diff artifact for agents.
|
||||
|
||||
It accepts either:
|
||||
|
||||
@@ -23,6 +23,8 @@ It can return:
|
||||
- a rendered file path (PNG or PDF) for message delivery
|
||||
- both outputs in one call
|
||||
|
||||
When enabled, the plugin prepends concise usage guidance into system-prompt space and also exposes a detailed skill for cases where the agent needs fuller instructions.
|
||||
|
||||
## Quick start
|
||||
|
||||
1. Enable the plugin.
|
||||
@@ -44,6 +46,29 @@ It can return:
|
||||
}
|
||||
```
|
||||
|
||||
## Disable built-in system guidance
|
||||
|
||||
If you want to keep the `diffs` tool enabled but disable its built-in system-prompt guidance, set `plugins.entries.diffs.hooks.allowPromptInjection` to `false`:
|
||||
|
||||
```json5
|
||||
{
|
||||
plugins: {
|
||||
entries: {
|
||||
diffs: {
|
||||
enabled: true,
|
||||
hooks: {
|
||||
allowPromptInjection: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
This blocks the diffs plugin's `before_prompt_build` hook while keeping the plugin, tool, and companion skill available.
|
||||
|
||||
If you want to disable both the guidance and the tool, disable the plugin instead.
|
||||
|
||||
## Typical agent workflow
|
||||
|
||||
1. Agent calls `diffs`.
|
||||
|
||||
@@ -453,7 +453,7 @@ Restart or apply updates to the running Gateway process (in-place).
|
||||
Core actions:
|
||||
|
||||
- `restart` (authorizes + sends `SIGUSR1` for in-process restart; `openclaw gateway` restart in-place)
|
||||
- `config.get` / `config.schema`
|
||||
- `config.get`
|
||||
- `config.apply` (validate + write config + restart + wake)
|
||||
- `config.patch` (merge partial update + restart + wake)
|
||||
- `update.run` (run update + restart + wake)
|
||||
@@ -461,6 +461,7 @@ Core actions:
|
||||
Notes:
|
||||
|
||||
- Use `delayMs` (defaults to 2000) to avoid interrupting an in-flight reply.
|
||||
- `config.schema` remains available to internal Control UI flows and is not exposed through the agent `gateway` tool.
|
||||
- `restart` is enabled by default; set `commands.restart: false` to disable it.
|
||||
|
||||
### `sessions_list` / `sessions_history` / `sessions_send` / `sessions_spawn` / `session_status`
|
||||
|
||||
@@ -53,9 +53,9 @@ without writing custom OpenClaw code for each workflow.
|
||||
"enabled": true,
|
||||
"config": {
|
||||
"defaultProvider": "openai-codex",
|
||||
"defaultModel": "gpt-5.2",
|
||||
"defaultModel": "gpt-5.4",
|
||||
"defaultAuthProfileId": "main",
|
||||
"allowedModels": ["openai-codex/gpt-5.3-codex"],
|
||||
"allowedModels": ["openai-codex/gpt-5.4"],
|
||||
"maxTokens": 800,
|
||||
"timeoutMs": 30000
|
||||
}
|
||||
|
||||
@@ -62,7 +62,7 @@ Schema instead. See [Plugin manifest](/plugins/manifest).
|
||||
Plugins can register:
|
||||
|
||||
- Gateway RPC methods
|
||||
- Gateway HTTP handlers
|
||||
- Gateway HTTP routes
|
||||
- Agent tools
|
||||
- CLI commands
|
||||
- Background services
|
||||
@@ -106,6 +106,37 @@ Notes:
|
||||
- Uses core media-understanding audio configuration (`tools.media.audio`) and provider fallback order.
|
||||
- Returns `{ text: undefined }` when no transcription output is produced (for example skipped/unsupported input).
|
||||
|
||||
## Gateway HTTP routes
|
||||
|
||||
Plugins can expose HTTP endpoints with `api.registerHttpRoute(...)`.
|
||||
|
||||
```ts
|
||||
api.registerHttpRoute({
|
||||
path: "/acme/webhook",
|
||||
auth: "plugin",
|
||||
match: "exact",
|
||||
handler: async (_req, res) => {
|
||||
res.statusCode = 200;
|
||||
res.end("ok");
|
||||
return true;
|
||||
},
|
||||
});
|
||||
```
|
||||
|
||||
Route fields:
|
||||
|
||||
- `path`: route path under the gateway HTTP server.
|
||||
- `auth`: required. Use `"gateway"` to require normal gateway auth, or `"plugin"` for plugin-managed auth/webhook verification.
|
||||
- `match`: optional. `"exact"` (default) or `"prefix"`.
|
||||
- `replaceExisting`: optional. Allows the same plugin to replace its own existing route registration.
|
||||
- `handler`: return `true` when the route handled the request.
|
||||
|
||||
Notes:
|
||||
|
||||
- `api.registerHttpHandler(...)` is obsolete. Use `api.registerHttpRoute(...)`.
|
||||
- Plugin routes must declare `auth` explicitly.
|
||||
- Exact `path + match` conflicts are rejected unless `replaceExisting: true`, and one plugin cannot replace another plugin's route.
|
||||
|
||||
## Plugin SDK import paths
|
||||
|
||||
Use SDK subpaths instead of the monolithic `openclaw/plugin-sdk` import when
|
||||
@@ -147,6 +178,38 @@ Compatibility note:
|
||||
subpaths; use `core` for generic surfaces and `compat` only when broader
|
||||
shared helpers are required.
|
||||
|
||||
## Read-only channel inspection
|
||||
|
||||
If your plugin registers a channel, prefer implementing
|
||||
`plugin.config.inspectAccount(cfg, accountId)` alongside `resolveAccount(...)`.
|
||||
|
||||
Why:
|
||||
|
||||
- `resolveAccount(...)` is the runtime path. It is allowed to assume credentials
|
||||
are fully materialized and can fail fast when required secrets are missing.
|
||||
- Read-only command paths such as `openclaw status`, `openclaw status --all`,
|
||||
`openclaw channels status`, `openclaw channels resolve`, and doctor/config
|
||||
repair flows should not need to materialize runtime credentials just to
|
||||
describe configuration.
|
||||
|
||||
Recommended `inspectAccount(...)` behavior:
|
||||
|
||||
- Return descriptive account state only.
|
||||
- Preserve `enabled` and `configured`.
|
||||
- Include credential source/status fields when relevant, such as:
|
||||
- `tokenSource`, `tokenStatus`
|
||||
- `botTokenSource`, `botTokenStatus`
|
||||
- `appTokenSource`, `appTokenStatus`
|
||||
- `signingSecretSource`, `signingSecretStatus`
|
||||
- You do not need to return raw token values just to report read-only
|
||||
availability. Returning `tokenStatus: "available"` (and the matching source
|
||||
field) is enough for status-style commands.
|
||||
- Use `configured_unavailable` when a credential is configured via SecretRef but
|
||||
unavailable in the current command path.
|
||||
|
||||
This lets read-only commands report “configured but unavailable in this command
|
||||
path” instead of crashing or misreporting the account as not configured.
|
||||
|
||||
Performance note:
|
||||
|
||||
- Plugin discovery and manifest metadata use short in-process caches to reduce
|
||||
@@ -431,6 +494,59 @@ Notes:
|
||||
- Plugin-managed hooks show up in `openclaw hooks list` with `plugin:<id>`.
|
||||
- You cannot enable/disable plugin-managed hooks via `openclaw hooks`; enable/disable the plugin instead.
|
||||
|
||||
### Agent lifecycle hooks (`api.on`)
|
||||
|
||||
For typed runtime lifecycle hooks, use `api.on(...)`:
|
||||
|
||||
```ts
|
||||
export default function register(api) {
|
||||
api.on(
|
||||
"before_prompt_build",
|
||||
(event, ctx) => {
|
||||
return {
|
||||
prependSystemContext: "Follow company style guide.",
|
||||
};
|
||||
},
|
||||
{ priority: 10 },
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
Important hooks for prompt construction:
|
||||
|
||||
- `before_model_resolve`: runs before session load (`messages` are not available). Use this to deterministically override `modelOverride` or `providerOverride`.
|
||||
- `before_prompt_build`: runs after session load (`messages` are available). Use this to shape prompt input.
|
||||
- `before_agent_start`: legacy compatibility hook. Prefer the two explicit hooks above.
|
||||
|
||||
Core-enforced hook policy:
|
||||
|
||||
- Operators can disable prompt mutation hooks per plugin via `plugins.entries.<id>.hooks.allowPromptInjection: false`.
|
||||
- When disabled, OpenClaw blocks `before_prompt_build` and ignores prompt-mutating fields returned from legacy `before_agent_start` while preserving legacy `modelOverride` and `providerOverride`.
|
||||
|
||||
`before_prompt_build` result fields:
|
||||
|
||||
- `prependContext`: prepends text to the user prompt for this run. Best for turn-specific or dynamic content.
|
||||
- `systemPrompt`: full system prompt override.
|
||||
- `prependSystemContext`: prepends text to the current system prompt.
|
||||
- `appendSystemContext`: appends text to the current system prompt.
|
||||
|
||||
Prompt build order in embedded runtime:
|
||||
|
||||
1. Apply `prependContext` to the user prompt.
|
||||
2. Apply `systemPrompt` override when provided.
|
||||
3. Apply `prependSystemContext + current system prompt + appendSystemContext`.
|
||||
|
||||
Merge and precedence notes:
|
||||
|
||||
- Hook handlers run by priority (higher first).
|
||||
- For merged context fields, values are concatenated in execution order.
|
||||
- `before_prompt_build` values are applied before legacy `before_agent_start` fallback values.
|
||||
|
||||
Migration guidance:
|
||||
|
||||
- Move static guidance from `prependContext` to `prependSystemContext` (or `appendSystemContext`) so providers can cache stable system-prefix content.
|
||||
- Keep `prependContext` for per-turn dynamic context that should stay tied to the user message.
|
||||
|
||||
## Provider plugins (model auth)
|
||||
|
||||
Plugins can register **model provider auth** flows so users can run OAuth or
|
||||
|
||||
@@ -214,7 +214,11 @@ Sub-agents report back via an announce step:
|
||||
|
||||
- The announce step runs inside the sub-agent session (not the requester session).
|
||||
- If the sub-agent replies exactly `ANNOUNCE_SKIP`, nothing is posted.
|
||||
- Otherwise the announce reply is posted to the requester chat channel via a follow-up `agent` call (`deliver=true`).
|
||||
- Otherwise delivery depends on requester depth:
|
||||
- top-level requester sessions use a follow-up `agent` call with external delivery (`deliver=true`)
|
||||
- nested requester subagent sessions receive an internal follow-up injection (`deliver=false`) so the orchestrator can synthesize child results in-session
|
||||
- if a nested requester subagent session is gone, OpenClaw falls back to that session's requester when available
|
||||
- Child completion aggregation is scoped to the current requester run when building nested completion findings, preventing stale prior-run child outputs from leaking into the current announce.
|
||||
- Announce replies preserve thread/topic routing when available on channel adapters.
|
||||
- Announce context is normalized to a stable internal event block:
|
||||
- source (`subagent` or `cron`)
|
||||
|
||||
@@ -93,6 +93,7 @@ Full schema is in [Gateway configuration](/gateway/configuration).
|
||||
},
|
||||
openai: {
|
||||
apiKey: "openai_api_key",
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
model: "gpt-4o-mini-tts",
|
||||
voice: "alloy",
|
||||
},
|
||||
@@ -216,6 +217,9 @@ Then run:
|
||||
- `prefsPath`: override the local prefs JSON path (provider/limit/summary).
|
||||
- `apiKey` values fall back to env vars (`ELEVENLABS_API_KEY`/`XI_API_KEY`, `OPENAI_API_KEY`).
|
||||
- `elevenlabs.baseUrl`: override ElevenLabs API base URL.
|
||||
- `openai.baseUrl`: override the OpenAI TTS endpoint.
|
||||
- Resolution order: `messages.tts.openai.baseUrl` -> `OPENAI_TTS_BASE_URL` -> `https://api.openai.com/v1`
|
||||
- Non-default values are treated as OpenAI-compatible TTS endpoints, so custom model and voice names are accepted.
|
||||
- `elevenlabs.voiceSettings`:
|
||||
- `stability`, `similarityBoost`, `style`: `0..1`
|
||||
- `useSpeakerBoost`: `true|false`
|
||||
|
||||
@@ -60,6 +60,15 @@ you revoke it with `openclaw devices revoke --device <id> --role <role>`. See
|
||||
- Each browser profile generates a unique device ID, so switching browsers or
|
||||
clearing browser data will require re-pairing.
|
||||
|
||||
## Language support
|
||||
|
||||
The Control UI can localize itself on first load based on your browser locale, and you can override it later from the language picker in the Access card.
|
||||
|
||||
- Supported locales: `en`, `zh-CN`, `zh-TW`, `pt-BR`, `de`, `es`
|
||||
- Non-English translations are lazy-loaded in the browser.
|
||||
- The selected locale is saved in browser storage and reused on future visits.
|
||||
- Missing translation keys fall back to English.
|
||||
|
||||
## What it can do (today)
|
||||
|
||||
- Chat with the model via Gateway WS (`chat.history`, `chat.send`, `chat.abort`, `chat.inject`)
|
||||
|
||||
@@ -37,10 +37,15 @@ Prefer localhost, Tailscale Serve, or an SSH tunnel.
|
||||
|
||||
- **Localhost**: open `http://127.0.0.1:18789/`.
|
||||
- **Token source**: `gateway.auth.token` (or `OPENCLAW_GATEWAY_TOKEN`); the UI stores a copy in localStorage after you connect.
|
||||
- If `gateway.auth.token` is SecretRef-managed, `openclaw dashboard` prints/copies/opens a non-tokenized URL by design. This avoids exposing externally managed tokens in shell logs, clipboard history, or browser-launch arguments.
|
||||
- If `gateway.auth.token` is configured as a SecretRef and is unresolved in your current shell, `openclaw dashboard` still prints a non-tokenized URL plus actionable auth setup guidance.
|
||||
- **Not localhost**: use Tailscale Serve (tokenless for Control UI/WebSocket if `gateway.auth.allowTailscale: true`, assumes trusted gateway host; HTTP APIs still need token/password), tailnet bind with a token, or an SSH tunnel. See [Web surfaces](/web).
|
||||
|
||||
## If you see “unauthorized” / 1008
|
||||
|
||||
- Ensure the gateway is reachable (local: `openclaw status`; remote: SSH tunnel `ssh -N -L 18789:127.0.0.1:18789 user@host` then open `http://127.0.0.1:18789/`).
|
||||
- Retrieve the token from the gateway host: `openclaw config get gateway.auth.token` (or generate one: `openclaw doctor --generate-gateway-token`).
|
||||
- Retrieve or supply the token from the gateway host:
|
||||
- Plaintext config: `openclaw config get gateway.auth.token`
|
||||
- SecretRef-managed config: resolve the external secret provider or export `OPENCLAW_GATEWAY_TOKEN` in this shell, then rerun `openclaw dashboard`
|
||||
- No token configured: `openclaw doctor --generate-gateway-token`
|
||||
- In the dashboard settings, paste the token into the auth field, then connect.
|
||||
|
||||
@@ -223,6 +223,10 @@ if (command === "prompt") {
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
if (stdinText.includes("permission-denied")) {
|
||||
process.exit(5);
|
||||
}
|
||||
|
||||
if (stdinText.includes("split-spacing")) {
|
||||
emitUpdate(sessionFromOption, {
|
||||
sessionUpdate: "agent_message_chunk",
|
||||
|
||||
@@ -224,6 +224,42 @@ describe("AcpxRuntime", () => {
|
||||
});
|
||||
});
|
||||
|
||||
it("maps acpx permission-denied exits to actionable guidance", async () => {
|
||||
const runtime = sharedFixture?.runtime;
|
||||
expect(runtime).toBeDefined();
|
||||
if (!runtime) {
|
||||
throw new Error("shared runtime fixture missing");
|
||||
}
|
||||
const handle = await runtime.ensureSession({
|
||||
sessionKey: "agent:codex:acp:permission-denied",
|
||||
agent: "codex",
|
||||
mode: "persistent",
|
||||
});
|
||||
|
||||
const events = [];
|
||||
for await (const event of runtime.runTurn({
|
||||
handle,
|
||||
text: "permission-denied",
|
||||
mode: "prompt",
|
||||
requestId: "req-perm",
|
||||
})) {
|
||||
events.push(event);
|
||||
}
|
||||
|
||||
expect(events).toContainEqual(
|
||||
expect.objectContaining({
|
||||
type: "error",
|
||||
message: expect.stringContaining("Permission denied by ACP runtime (acpx)."),
|
||||
}),
|
||||
);
|
||||
expect(events).toContainEqual(
|
||||
expect.objectContaining({
|
||||
type: "error",
|
||||
message: expect.stringContaining("approve-reads, approve-all, deny-all"),
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it("supports cancel and close using encoded runtime handle state", async () => {
|
||||
const { runtime, logPath, config } = await createMockRuntimeFixture();
|
||||
const handle = await runtime.ensureSession({
|
||||
|
||||
@@ -42,10 +42,30 @@ export const ACPX_BACKEND_ID = "acpx";
|
||||
|
||||
const ACPX_RUNTIME_HANDLE_PREFIX = "acpx:v1:";
|
||||
const DEFAULT_AGENT_FALLBACK = "codex";
|
||||
const ACPX_EXIT_CODE_PERMISSION_DENIED = 5;
|
||||
const ACPX_CAPABILITIES: AcpRuntimeCapabilities = {
|
||||
controls: ["session/set_mode", "session/set_config_option", "session/status"],
|
||||
};
|
||||
|
||||
function formatPermissionModeGuidance(): string {
|
||||
return "Configure plugins.entries.acpx.config.permissionMode to one of: approve-reads, approve-all, deny-all.";
|
||||
}
|
||||
|
||||
function formatAcpxExitMessage(params: {
|
||||
stderr: string;
|
||||
exitCode: number | null | undefined;
|
||||
}): string {
|
||||
const stderr = params.stderr.trim();
|
||||
if (params.exitCode === ACPX_EXIT_CODE_PERMISSION_DENIED) {
|
||||
return [
|
||||
stderr || "Permission denied by ACP runtime (acpx).",
|
||||
"ACPX blocked a write/exec permission request in a non-interactive session.",
|
||||
formatPermissionModeGuidance(),
|
||||
].join(" ");
|
||||
}
|
||||
return stderr || `acpx exited with code ${params.exitCode ?? "unknown"}`;
|
||||
}
|
||||
|
||||
export function encodeAcpxRuntimeHandleState(state: AcpxHandleState): string {
|
||||
const payload = Buffer.from(JSON.stringify(state), "utf8").toString("base64url");
|
||||
return `${ACPX_RUNTIME_HANDLE_PREFIX}${payload}`;
|
||||
@@ -333,7 +353,10 @@ export class AcpxRuntime implements AcpRuntime {
|
||||
if ((exit.code ?? 0) !== 0 && !sawError) {
|
||||
yield {
|
||||
type: "error",
|
||||
message: stderr.trim() || `acpx exited with code ${exit.code ?? "unknown"}`,
|
||||
message: formatAcpxExitMessage({
|
||||
stderr,
|
||||
exitCode: exit.code,
|
||||
}),
|
||||
};
|
||||
return;
|
||||
}
|
||||
@@ -639,7 +662,10 @@ export class AcpxRuntime implements AcpRuntime {
|
||||
if ((result.code ?? 0) !== 0) {
|
||||
throw new AcpRuntimeError(
|
||||
params.fallbackCode,
|
||||
result.stderr.trim() || `acpx exited with code ${result.code ?? "unknown"}`,
|
||||
formatAcpxExitMessage({
|
||||
stderr: result.stderr,
|
||||
exitCode: result.code,
|
||||
}),
|
||||
);
|
||||
}
|
||||
return events;
|
||||
|
||||
@@ -16,7 +16,7 @@ The tool can return:
|
||||
- `details.filePath`: a local rendered artifact path when file rendering is requested
|
||||
- `details.fileFormat`: the rendered file format (`png` or `pdf`)
|
||||
|
||||
When the plugin is enabled, it also ships a companion skill from `skills/` that guides when to use `diffs`. This guidance is delivered through normal skill loading, not unconditional prompt-hook injection on every turn.
|
||||
When the plugin is enabled, it also ships a companion skill from `skills/` and prepends stable tool-usage guidance into system-prompt space via `before_prompt_build`. The hook uses `prependSystemContext`, so the guidance stays out of user-prompt space while still being available every turn.
|
||||
|
||||
This means an agent can:
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ import { createMockServerResponse } from "../../src/test-utils/mock-http-respons
|
||||
import plugin from "./index.js";
|
||||
|
||||
describe("diffs plugin registration", () => {
|
||||
it("registers the tool and http route", () => {
|
||||
it("registers the tool, http route, and system-prompt guidance hook", async () => {
|
||||
const registerTool = vi.fn();
|
||||
const registerHttpRoute = vi.fn();
|
||||
const on = vi.fn();
|
||||
@@ -43,7 +43,14 @@ describe("diffs plugin registration", () => {
|
||||
auth: "plugin",
|
||||
match: "prefix",
|
||||
});
|
||||
expect(on).not.toHaveBeenCalled();
|
||||
expect(on).toHaveBeenCalledTimes(1);
|
||||
expect(on.mock.calls[0]?.[0]).toBe("before_prompt_build");
|
||||
const beforePromptBuild = on.mock.calls[0]?.[1];
|
||||
const result = await beforePromptBuild?.({}, {});
|
||||
expect(result).toMatchObject({
|
||||
prependSystemContext: expect.stringContaining("prefer the `diffs` tool"),
|
||||
});
|
||||
expect(result?.prependContext).toBeUndefined();
|
||||
});
|
||||
|
||||
it("applies plugin-config defaults through registered tool and viewer handler", async () => {
|
||||
|
||||
@@ -7,6 +7,7 @@ import {
|
||||
resolveDiffsPluginSecurity,
|
||||
} from "./src/config.js";
|
||||
import { createDiffsHttpHandler } from "./src/http.js";
|
||||
import { DIFFS_AGENT_GUIDANCE } from "./src/prompt-guidance.js";
|
||||
import { DiffArtifactStore } from "./src/store.js";
|
||||
import { createDiffsTool } from "./src/tool.js";
|
||||
|
||||
@@ -34,6 +35,9 @@ const plugin = {
|
||||
allowRemoteViewer: security.allowRemoteViewer,
|
||||
}),
|
||||
});
|
||||
api.on("before_prompt_build", async () => ({
|
||||
prependSystemContext: DIFFS_AGENT_GUIDANCE,
|
||||
}));
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
7
extensions/diffs/src/prompt-guidance.ts
Normal file
7
extensions/diffs/src/prompt-guidance.ts
Normal file
@@ -0,0 +1,7 @@
|
||||
export const DIFFS_AGENT_GUIDANCE = [
|
||||
"When you need to show edits as a real diff, prefer the `diffs` tool instead of writing a manual summary.",
|
||||
"It accepts either `before` + `after` text or a unified `patch`.",
|
||||
"`mode=view` returns `details.viewerUrl` for canvas use; `mode=file` returns `details.filePath`; `mode=both` returns both.",
|
||||
"If you need to send the rendered file, use the `message` tool with `path` or `filePath`.",
|
||||
"Include `path` when you know the filename, and omit presentation overrides unless needed.",
|
||||
].join("\n");
|
||||
@@ -10,6 +10,7 @@ import {
|
||||
DiscordConfigSchema,
|
||||
formatPairingApproveHint,
|
||||
getChatChannelMeta,
|
||||
inspectDiscordAccount,
|
||||
listDiscordAccountIds,
|
||||
listDiscordDirectoryGroupsFromConfig,
|
||||
listDiscordDirectoryPeersFromConfig,
|
||||
@@ -19,6 +20,8 @@ import {
|
||||
normalizeDiscordMessagingTarget,
|
||||
normalizeDiscordOutboundTarget,
|
||||
PAIRING_APPROVED_MESSAGE,
|
||||
projectCredentialSnapshotFields,
|
||||
resolveConfiguredFromCredentialStatuses,
|
||||
resolveDiscordAccount,
|
||||
resolveDefaultDiscordAccountId,
|
||||
resolveDiscordGroupRequireMention,
|
||||
@@ -80,6 +83,7 @@ export const discordPlugin: ChannelPlugin<ResolvedDiscordAccount> = {
|
||||
config: {
|
||||
listAccountIds: (cfg) => listDiscordAccountIds(cfg),
|
||||
resolveAccount: (cfg, accountId) => resolveDiscordAccount({ cfg, accountId }),
|
||||
inspectAccount: (cfg, accountId) => inspectDiscordAccount({ cfg, accountId }),
|
||||
defaultAccountId: (cfg) => resolveDefaultDiscordAccountId(cfg),
|
||||
setAccountEnabled: ({ cfg, accountId, enabled }) =>
|
||||
setAccountEnabledInConfigSection({
|
||||
@@ -390,7 +394,8 @@ export const discordPlugin: ChannelPlugin<ResolvedDiscordAccount> = {
|
||||
return { ...audit, unresolvedChannels };
|
||||
},
|
||||
buildAccountSnapshot: ({ account, runtime, probe, audit }) => {
|
||||
const configured = Boolean(account.token?.trim());
|
||||
const configured =
|
||||
resolveConfiguredFromCredentialStatuses(account) ?? Boolean(account.token?.trim());
|
||||
const app = runtime?.application ?? (probe as { application?: unknown })?.application;
|
||||
const bot = runtime?.bot ?? (probe as { bot?: unknown })?.bot;
|
||||
return {
|
||||
@@ -398,7 +403,7 @@ export const discordPlugin: ChannelPlugin<ResolvedDiscordAccount> = {
|
||||
name: account.name,
|
||||
enabled: account.enabled,
|
||||
configured,
|
||||
tokenSource: account.tokenSource,
|
||||
...projectCredentialSnapshotFields(account),
|
||||
running: runtime?.running ?? false,
|
||||
lastStartAt: runtime?.lastStartAt ?? null,
|
||||
lastStopAt: runtime?.lastStopAt ?? null,
|
||||
|
||||
@@ -76,6 +76,14 @@ describe("parseFeishuMessageEvent – mentionedBot", () => {
|
||||
expect(ctx.mentionedBot).toBe(true);
|
||||
});
|
||||
|
||||
it("returns mentionedBot=true when bot mention name differs from configured botName", () => {
|
||||
const event = makeEvent("group", [
|
||||
{ key: "@_user_1", name: "OpenClaw Bot (Alias)", id: { open_id: BOT_OPEN_ID } },
|
||||
]);
|
||||
const ctx = parseFeishuMessageEvent(event as any, BOT_OPEN_ID, "OpenClaw Bot");
|
||||
expect(ctx.mentionedBot).toBe(true);
|
||||
});
|
||||
|
||||
it("returns mentionedBot=false when only other users are mentioned", () => {
|
||||
const event = makeEvent("group", [
|
||||
{ key: "@_user_1", name: "Alice", id: { open_id: "ou_alice" } },
|
||||
|
||||
@@ -37,7 +37,7 @@ describe("normalizeMentions (via parseFeishuMessageEvent)", () => {
|
||||
expect(ctx.content).toBe("hello");
|
||||
});
|
||||
|
||||
it("normalizes bot mention to <at> tag in group (semantic content)", () => {
|
||||
it("strips bot mention in group so slash commands work (#35994)", () => {
|
||||
const ctx = parseFeishuMessageEvent(
|
||||
makeEvent(
|
||||
"@_bot_1 hello",
|
||||
@@ -46,7 +46,19 @@ describe("normalizeMentions (via parseFeishuMessageEvent)", () => {
|
||||
) as any,
|
||||
BOT_OPEN_ID,
|
||||
);
|
||||
expect(ctx.content).toBe('<at user_id="ou_bot">Bot</at> hello');
|
||||
expect(ctx.content).toBe("hello");
|
||||
});
|
||||
|
||||
it("strips bot mention in group preserving slash command prefix (#35994)", () => {
|
||||
const ctx = parseFeishuMessageEvent(
|
||||
makeEvent(
|
||||
"@_bot_1 /model",
|
||||
[{ key: "@_bot_1", name: "Bot", id: { open_id: "ou_bot" } }],
|
||||
"group",
|
||||
) as any,
|
||||
BOT_OPEN_ID,
|
||||
);
|
||||
expect(ctx.content).toBe("/model");
|
||||
});
|
||||
|
||||
it("strips bot mention but normalizes other mentions in p2p (mention-forward)", () => {
|
||||
|
||||
@@ -521,6 +521,42 @@ describe("handleFeishuMessage command authorization", () => {
|
||||
);
|
||||
});
|
||||
|
||||
it("normalizes group mention-prefixed slash commands before command-auth probing", async () => {
|
||||
mockShouldComputeCommandAuthorized.mockReturnValue(true);
|
||||
|
||||
const cfg: ClawdbotConfig = {
|
||||
channels: {
|
||||
feishu: {
|
||||
groups: {
|
||||
"oc-group": {
|
||||
requireMention: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
} as ClawdbotConfig;
|
||||
|
||||
const event: FeishuMessageEvent = {
|
||||
sender: {
|
||||
sender_id: {
|
||||
open_id: "ou-attacker",
|
||||
},
|
||||
},
|
||||
message: {
|
||||
message_id: "msg-group-mention-command-probe",
|
||||
chat_id: "oc-group",
|
||||
chat_type: "group",
|
||||
message_type: "text",
|
||||
content: JSON.stringify({ text: "@_user_1/model" }),
|
||||
mentions: [{ key: "@_user_1", id: { open_id: "ou-bot" }, name: "Bot", tenant_key: "" }],
|
||||
},
|
||||
};
|
||||
|
||||
await dispatchMessage({ cfg, event });
|
||||
|
||||
expect(mockShouldComputeCommandAuthorized).toHaveBeenCalledWith("/model", cfg);
|
||||
});
|
||||
|
||||
it("falls back to top-level allowFrom for group command authorization", async () => {
|
||||
mockShouldComputeCommandAuthorized.mockReturnValue(true);
|
||||
mockResolveCommandAuthorizedFromAuthorizers.mockReturnValue(true);
|
||||
|
||||
@@ -450,24 +450,15 @@ function formatSubMessageContent(content: string, contentType: string): string {
|
||||
}
|
||||
}
|
||||
|
||||
function checkBotMentioned(
|
||||
event: FeishuMessageEvent,
|
||||
botOpenId?: string,
|
||||
botName?: string,
|
||||
): boolean {
|
||||
function checkBotMentioned(event: FeishuMessageEvent, botOpenId?: string): boolean {
|
||||
if (!botOpenId) return false;
|
||||
// Check for @all (@_all in Feishu) — treat as mentioning every bot
|
||||
const rawContent = event.message.content ?? "";
|
||||
if (rawContent.includes("@_all")) return true;
|
||||
const mentions = event.message.mentions ?? [];
|
||||
if (mentions.length > 0) {
|
||||
return mentions.some((m) => {
|
||||
if (m.id.open_id !== botOpenId) return false;
|
||||
// Guard against Feishu WS open_id remapping in multi-app groups:
|
||||
// if botName is known and mention name differs, this is a false positive.
|
||||
if (botName && m.name && m.name !== botName) return false;
|
||||
return true;
|
||||
});
|
||||
// Rely on Feishu mention IDs; display names can vary by alias/context.
|
||||
return mentions.some((m) => m.id.open_id === botOpenId);
|
||||
}
|
||||
// Post (rich text) messages may have empty message.mentions when they contain docs/paste
|
||||
if (event.message.message_type === "post") {
|
||||
@@ -503,6 +494,17 @@ function normalizeMentions(
|
||||
return result;
|
||||
}
|
||||
|
||||
function normalizeFeishuCommandProbeBody(text: string): string {
|
||||
if (!text) {
|
||||
return "";
|
||||
}
|
||||
return text
|
||||
.replace(/<at\b[^>]*>[^<]*<\/at>/giu, " ")
|
||||
.replace(/(^|\s)@[^/\s]+(?=\s|$|\/)/gu, "$1")
|
||||
.replace(/\s+/g, " ")
|
||||
.trim();
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse media keys from message content based on message type.
|
||||
*/
|
||||
@@ -768,19 +770,17 @@ export function buildBroadcastSessionKey(
|
||||
export function parseFeishuMessageEvent(
|
||||
event: FeishuMessageEvent,
|
||||
botOpenId?: string,
|
||||
botName?: string,
|
||||
_botName?: string,
|
||||
): FeishuMessageContext {
|
||||
const rawContent = parseMessageContent(event.message.content, event.message.message_type);
|
||||
const mentionedBot = checkBotMentioned(event, botOpenId, botName);
|
||||
const mentionedBot = checkBotMentioned(event, botOpenId);
|
||||
const hasAnyMention = (event.message.mentions?.length ?? 0) > 0;
|
||||
// In p2p, the bot mention is a pure addressing prefix with no semantic value;
|
||||
// strip it so slash commands like @Bot /help still have a leading /.
|
||||
// Strip the bot's own mention so slash commands like @Bot /help retain
|
||||
// the leading /. This applies in both p2p *and* group contexts — the
|
||||
// mentionedBot flag already captures whether the bot was addressed, so
|
||||
// keeping the mention tag in content only breaks command detection (#35994).
|
||||
// Non-bot mentions (e.g. mention-forward targets) are still normalized to <at> tags.
|
||||
const content = normalizeMentions(
|
||||
rawContent,
|
||||
event.message.mentions,
|
||||
event.message.chat_type === "p2p" ? botOpenId : undefined,
|
||||
);
|
||||
const content = normalizeMentions(rawContent, event.message.mentions, botOpenId);
|
||||
const senderOpenId = event.sender.sender_id.open_id?.trim();
|
||||
const senderUserId = event.sender.sender_id.user_id?.trim();
|
||||
const senderFallbackId = senderOpenId || senderUserId || "";
|
||||
@@ -1080,8 +1080,9 @@ export async function handleFeishuMessage(params: {
|
||||
channel: "feishu",
|
||||
accountId: account.accountId,
|
||||
});
|
||||
const commandProbeBody = isGroup ? normalizeFeishuCommandProbeBody(ctx.content) : ctx.content;
|
||||
const shouldComputeCommandAuthorized = core.channel.commands.shouldComputeCommandAuthorized(
|
||||
ctx.content,
|
||||
commandProbeBody,
|
||||
cfg,
|
||||
);
|
||||
const storeAllowFrom =
|
||||
|
||||
@@ -12,6 +12,17 @@ const httpsProxyAgentCtorMock = vi.hoisted(() =>
|
||||
}),
|
||||
);
|
||||
|
||||
const mockBaseHttpInstance = vi.hoisted(() => ({
|
||||
request: vi.fn().mockResolvedValue({}),
|
||||
get: vi.fn().mockResolvedValue({}),
|
||||
post: vi.fn().mockResolvedValue({}),
|
||||
put: vi.fn().mockResolvedValue({}),
|
||||
patch: vi.fn().mockResolvedValue({}),
|
||||
delete: vi.fn().mockResolvedValue({}),
|
||||
head: vi.fn().mockResolvedValue({}),
|
||||
options: vi.fn().mockResolvedValue({}),
|
||||
}));
|
||||
|
||||
vi.mock("@larksuiteoapi/node-sdk", () => ({
|
||||
AppType: { SelfBuild: "self" },
|
||||
Domain: { Feishu: "https://open.feishu.cn", Lark: "https://open.larksuite.com" },
|
||||
@@ -19,18 +30,28 @@ vi.mock("@larksuiteoapi/node-sdk", () => ({
|
||||
Client: vi.fn(),
|
||||
WSClient: wsClientCtorMock,
|
||||
EventDispatcher: vi.fn(),
|
||||
defaultHttpInstance: mockBaseHttpInstance,
|
||||
}));
|
||||
|
||||
vi.mock("https-proxy-agent", () => ({
|
||||
HttpsProxyAgent: httpsProxyAgentCtorMock,
|
||||
}));
|
||||
|
||||
import { createFeishuWSClient } from "./client.js";
|
||||
import { Client as LarkClient } from "@larksuiteoapi/node-sdk";
|
||||
import {
|
||||
createFeishuClient,
|
||||
createFeishuWSClient,
|
||||
clearClientCache,
|
||||
FEISHU_HTTP_TIMEOUT_MS,
|
||||
FEISHU_HTTP_TIMEOUT_MAX_MS,
|
||||
FEISHU_HTTP_TIMEOUT_ENV_VAR,
|
||||
} from "./client.js";
|
||||
|
||||
const proxyEnvKeys = ["https_proxy", "HTTPS_PROXY", "http_proxy", "HTTP_PROXY"] as const;
|
||||
type ProxyEnvKey = (typeof proxyEnvKeys)[number];
|
||||
|
||||
let priorProxyEnv: Partial<Record<ProxyEnvKey, string | undefined>> = {};
|
||||
let priorFeishuTimeoutEnv: string | undefined;
|
||||
|
||||
const baseAccount: ResolvedFeishuAccount = {
|
||||
accountId: "main",
|
||||
@@ -50,6 +71,8 @@ function firstWsClientOptions(): { agent?: unknown } {
|
||||
|
||||
beforeEach(() => {
|
||||
priorProxyEnv = {};
|
||||
priorFeishuTimeoutEnv = process.env[FEISHU_HTTP_TIMEOUT_ENV_VAR];
|
||||
delete process.env[FEISHU_HTTP_TIMEOUT_ENV_VAR];
|
||||
for (const key of proxyEnvKeys) {
|
||||
priorProxyEnv[key] = process.env[key];
|
||||
delete process.env[key];
|
||||
@@ -66,6 +89,179 @@ afterEach(() => {
|
||||
process.env[key] = value;
|
||||
}
|
||||
}
|
||||
if (priorFeishuTimeoutEnv === undefined) {
|
||||
delete process.env[FEISHU_HTTP_TIMEOUT_ENV_VAR];
|
||||
} else {
|
||||
process.env[FEISHU_HTTP_TIMEOUT_ENV_VAR] = priorFeishuTimeoutEnv;
|
||||
}
|
||||
});
|
||||
|
||||
describe("createFeishuClient HTTP timeout", () => {
|
||||
beforeEach(() => {
|
||||
clearClientCache();
|
||||
});
|
||||
|
||||
it("passes a custom httpInstance with default timeout to Lark.Client", () => {
|
||||
createFeishuClient({ appId: "app_1", appSecret: "secret_1", accountId: "timeout-test" });
|
||||
|
||||
const calls = (LarkClient as unknown as ReturnType<typeof vi.fn>).mock.calls;
|
||||
const lastCall = calls[calls.length - 1][0] as { httpInstance?: unknown };
|
||||
expect(lastCall.httpInstance).toBeDefined();
|
||||
});
|
||||
|
||||
it("injects default timeout into HTTP request options", async () => {
|
||||
createFeishuClient({ appId: "app_2", appSecret: "secret_2", accountId: "timeout-inject" });
|
||||
|
||||
const calls = (LarkClient as unknown as ReturnType<typeof vi.fn>).mock.calls;
|
||||
const lastCall = calls[calls.length - 1][0] as {
|
||||
httpInstance: { post: (...args: unknown[]) => Promise<unknown> };
|
||||
};
|
||||
const httpInstance = lastCall.httpInstance;
|
||||
|
||||
await httpInstance.post(
|
||||
"https://example.com/api",
|
||||
{ data: 1 },
|
||||
{ headers: { "X-Custom": "yes" } },
|
||||
);
|
||||
|
||||
expect(mockBaseHttpInstance.post).toHaveBeenCalledWith(
|
||||
"https://example.com/api",
|
||||
{ data: 1 },
|
||||
expect.objectContaining({ timeout: FEISHU_HTTP_TIMEOUT_MS, headers: { "X-Custom": "yes" } }),
|
||||
);
|
||||
});
|
||||
|
||||
it("allows explicit timeout override per-request", async () => {
|
||||
createFeishuClient({ appId: "app_3", appSecret: "secret_3", accountId: "timeout-override" });
|
||||
|
||||
const calls = (LarkClient as unknown as ReturnType<typeof vi.fn>).mock.calls;
|
||||
const lastCall = calls[calls.length - 1][0] as {
|
||||
httpInstance: { get: (...args: unknown[]) => Promise<unknown> };
|
||||
};
|
||||
const httpInstance = lastCall.httpInstance;
|
||||
|
||||
await httpInstance.get("https://example.com/api", { timeout: 5_000 });
|
||||
|
||||
expect(mockBaseHttpInstance.get).toHaveBeenCalledWith(
|
||||
"https://example.com/api",
|
||||
expect.objectContaining({ timeout: 5_000 }),
|
||||
);
|
||||
});
|
||||
|
||||
it("uses config-configured default timeout when provided", async () => {
|
||||
createFeishuClient({
|
||||
appId: "app_4",
|
||||
appSecret: "secret_4",
|
||||
accountId: "timeout-config",
|
||||
config: { httpTimeoutMs: 45_000 },
|
||||
});
|
||||
|
||||
const calls = (LarkClient as unknown as ReturnType<typeof vi.fn>).mock.calls;
|
||||
const lastCall = calls[calls.length - 1][0] as {
|
||||
httpInstance: { get: (...args: unknown[]) => Promise<unknown> };
|
||||
};
|
||||
const httpInstance = lastCall.httpInstance;
|
||||
|
||||
await httpInstance.get("https://example.com/api");
|
||||
|
||||
expect(mockBaseHttpInstance.get).toHaveBeenCalledWith(
|
||||
"https://example.com/api",
|
||||
expect.objectContaining({ timeout: 45_000 }),
|
||||
);
|
||||
});
|
||||
|
||||
it("falls back to default timeout when configured timeout is invalid", async () => {
|
||||
createFeishuClient({
|
||||
appId: "app_5",
|
||||
appSecret: "secret_5",
|
||||
accountId: "timeout-config-invalid",
|
||||
config: { httpTimeoutMs: -1 },
|
||||
});
|
||||
|
||||
const calls = (LarkClient as unknown as ReturnType<typeof vi.fn>).mock.calls;
|
||||
const lastCall = calls[calls.length - 1][0] as {
|
||||
httpInstance: { get: (...args: unknown[]) => Promise<unknown> };
|
||||
};
|
||||
const httpInstance = lastCall.httpInstance;
|
||||
|
||||
await httpInstance.get("https://example.com/api");
|
||||
|
||||
expect(mockBaseHttpInstance.get).toHaveBeenCalledWith(
|
||||
"https://example.com/api",
|
||||
expect.objectContaining({ timeout: FEISHU_HTTP_TIMEOUT_MS }),
|
||||
);
|
||||
});
|
||||
|
||||
it("uses env timeout override when provided", async () => {
|
||||
process.env[FEISHU_HTTP_TIMEOUT_ENV_VAR] = "60000";
|
||||
|
||||
createFeishuClient({
|
||||
appId: "app_8",
|
||||
appSecret: "secret_8",
|
||||
accountId: "timeout-env-override",
|
||||
config: { httpTimeoutMs: 45_000 },
|
||||
});
|
||||
|
||||
const calls = (LarkClient as unknown as ReturnType<typeof vi.fn>).mock.calls;
|
||||
const lastCall = calls[calls.length - 1][0] as {
|
||||
httpInstance: { get: (...args: unknown[]) => Promise<unknown> };
|
||||
};
|
||||
await lastCall.httpInstance.get("https://example.com/api");
|
||||
|
||||
expect(mockBaseHttpInstance.get).toHaveBeenCalledWith(
|
||||
"https://example.com/api",
|
||||
expect.objectContaining({ timeout: 60_000 }),
|
||||
);
|
||||
});
|
||||
|
||||
it("clamps env timeout override to max bound", async () => {
|
||||
process.env[FEISHU_HTTP_TIMEOUT_ENV_VAR] = String(FEISHU_HTTP_TIMEOUT_MAX_MS + 123_456);
|
||||
|
||||
createFeishuClient({
|
||||
appId: "app_9",
|
||||
appSecret: "secret_9",
|
||||
accountId: "timeout-env-clamp",
|
||||
});
|
||||
|
||||
const calls = (LarkClient as unknown as ReturnType<typeof vi.fn>).mock.calls;
|
||||
const lastCall = calls[calls.length - 1][0] as {
|
||||
httpInstance: { get: (...args: unknown[]) => Promise<unknown> };
|
||||
};
|
||||
await lastCall.httpInstance.get("https://example.com/api");
|
||||
|
||||
expect(mockBaseHttpInstance.get).toHaveBeenCalledWith(
|
||||
"https://example.com/api",
|
||||
expect.objectContaining({ timeout: FEISHU_HTTP_TIMEOUT_MAX_MS }),
|
||||
);
|
||||
});
|
||||
|
||||
it("recreates cached client when configured timeout changes", async () => {
|
||||
createFeishuClient({
|
||||
appId: "app_6",
|
||||
appSecret: "secret_6",
|
||||
accountId: "timeout-cache-change",
|
||||
config: { httpTimeoutMs: 30_000 },
|
||||
});
|
||||
createFeishuClient({
|
||||
appId: "app_6",
|
||||
appSecret: "secret_6",
|
||||
accountId: "timeout-cache-change",
|
||||
config: { httpTimeoutMs: 45_000 },
|
||||
});
|
||||
|
||||
const calls = (LarkClient as unknown as ReturnType<typeof vi.fn>).mock.calls;
|
||||
expect(calls.length).toBe(2);
|
||||
|
||||
const lastCall = calls[calls.length - 1][0] as {
|
||||
httpInstance: { get: (...args: unknown[]) => Promise<unknown> };
|
||||
};
|
||||
await lastCall.httpInstance.get("https://example.com/api");
|
||||
|
||||
expect(mockBaseHttpInstance.get).toHaveBeenCalledWith(
|
||||
"https://example.com/api",
|
||||
expect.objectContaining({ timeout: 45_000 }),
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("createFeishuWSClient proxy handling", () => {
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
import * as Lark from "@larksuiteoapi/node-sdk";
|
||||
import { HttpsProxyAgent } from "https-proxy-agent";
|
||||
import type { FeishuDomain, ResolvedFeishuAccount } from "./types.js";
|
||||
import type { FeishuConfig, FeishuDomain, ResolvedFeishuAccount } from "./types.js";
|
||||
|
||||
/** Default HTTP timeout for Feishu API requests (30 seconds). */
|
||||
export const FEISHU_HTTP_TIMEOUT_MS = 30_000;
|
||||
export const FEISHU_HTTP_TIMEOUT_MAX_MS = 300_000;
|
||||
export const FEISHU_HTTP_TIMEOUT_ENV_VAR = "OPENCLAW_FEISHU_HTTP_TIMEOUT_MS";
|
||||
|
||||
function getWsProxyAgent(): HttpsProxyAgent<string> | undefined {
|
||||
const proxyUrl =
|
||||
@@ -17,7 +22,7 @@ const clientCache = new Map<
|
||||
string,
|
||||
{
|
||||
client: Lark.Client;
|
||||
config: { appId: string; appSecret: string; domain?: FeishuDomain };
|
||||
config: { appId: string; appSecret: string; domain?: FeishuDomain; httpTimeoutMs: number };
|
||||
}
|
||||
>();
|
||||
|
||||
@@ -31,6 +36,30 @@ function resolveDomain(domain: FeishuDomain | undefined): Lark.Domain | string {
|
||||
return domain.replace(/\/+$/, ""); // Custom URL for private deployment
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an HTTP instance that delegates to the Lark SDK's default instance
|
||||
* but injects a default request timeout to prevent indefinite hangs
|
||||
* (e.g. when the Feishu API is slow, causing per-chat queue deadlocks).
|
||||
*/
|
||||
function createTimeoutHttpInstance(defaultTimeoutMs: number): Lark.HttpInstance {
|
||||
const base: Lark.HttpInstance = Lark.defaultHttpInstance as unknown as Lark.HttpInstance;
|
||||
|
||||
function injectTimeout<D>(opts?: Lark.HttpRequestOptions<D>): Lark.HttpRequestOptions<D> {
|
||||
return { timeout: defaultTimeoutMs, ...opts } as Lark.HttpRequestOptions<D>;
|
||||
}
|
||||
|
||||
return {
|
||||
request: (opts) => base.request(injectTimeout(opts)),
|
||||
get: (url, opts) => base.get(url, injectTimeout(opts)),
|
||||
post: (url, data, opts) => base.post(url, data, injectTimeout(opts)),
|
||||
put: (url, data, opts) => base.put(url, data, injectTimeout(opts)),
|
||||
patch: (url, data, opts) => base.patch(url, data, injectTimeout(opts)),
|
||||
delete: (url, opts) => base.delete(url, injectTimeout(opts)),
|
||||
head: (url, opts) => base.head(url, injectTimeout(opts)),
|
||||
options: (url, opts) => base.options(url, injectTimeout(opts)),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Credentials needed to create a Feishu client.
|
||||
* Both FeishuConfig and ResolvedFeishuAccount satisfy this interface.
|
||||
@@ -40,14 +69,40 @@ export type FeishuClientCredentials = {
|
||||
appId?: string;
|
||||
appSecret?: string;
|
||||
domain?: FeishuDomain;
|
||||
httpTimeoutMs?: number;
|
||||
config?: Pick<FeishuConfig, "httpTimeoutMs">;
|
||||
};
|
||||
|
||||
function resolveConfiguredHttpTimeoutMs(creds: FeishuClientCredentials): number {
|
||||
const clampTimeout = (value: number): number => {
|
||||
const rounded = Math.floor(value);
|
||||
return Math.min(Math.max(rounded, 1), FEISHU_HTTP_TIMEOUT_MAX_MS);
|
||||
};
|
||||
|
||||
const envRaw = process.env[FEISHU_HTTP_TIMEOUT_ENV_VAR];
|
||||
if (envRaw) {
|
||||
const envValue = Number(envRaw);
|
||||
if (Number.isFinite(envValue) && envValue > 0) {
|
||||
return clampTimeout(envValue);
|
||||
}
|
||||
}
|
||||
|
||||
const fromConfig = creds.config?.httpTimeoutMs;
|
||||
const fromDirectField = creds.httpTimeoutMs;
|
||||
const timeout = fromDirectField ?? fromConfig;
|
||||
if (typeof timeout !== "number" || !Number.isFinite(timeout) || timeout <= 0) {
|
||||
return FEISHU_HTTP_TIMEOUT_MS;
|
||||
}
|
||||
return clampTimeout(timeout);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create or get a cached Feishu client for an account.
|
||||
* Accepts any object with appId, appSecret, and optional domain/accountId.
|
||||
*/
|
||||
export function createFeishuClient(creds: FeishuClientCredentials): Lark.Client {
|
||||
const { accountId = "default", appId, appSecret, domain } = creds;
|
||||
const defaultHttpTimeoutMs = resolveConfiguredHttpTimeoutMs(creds);
|
||||
|
||||
if (!appId || !appSecret) {
|
||||
throw new Error(`Feishu credentials not configured for account "${accountId}"`);
|
||||
@@ -59,23 +114,25 @@ export function createFeishuClient(creds: FeishuClientCredentials): Lark.Client
|
||||
cached &&
|
||||
cached.config.appId === appId &&
|
||||
cached.config.appSecret === appSecret &&
|
||||
cached.config.domain === domain
|
||||
cached.config.domain === domain &&
|
||||
cached.config.httpTimeoutMs === defaultHttpTimeoutMs
|
||||
) {
|
||||
return cached.client;
|
||||
}
|
||||
|
||||
// Create new client
|
||||
// Create new client with timeout-aware HTTP instance
|
||||
const client = new Lark.Client({
|
||||
appId,
|
||||
appSecret,
|
||||
appType: Lark.AppType.SelfBuild,
|
||||
domain: resolveDomain(domain),
|
||||
httpInstance: createTimeoutHttpInstance(defaultHttpTimeoutMs),
|
||||
});
|
||||
|
||||
// Cache it
|
||||
clientCache.set(accountId, {
|
||||
client,
|
||||
config: { appId, appSecret, domain },
|
||||
config: { appId, appSecret, domain, httpTimeoutMs: defaultHttpTimeoutMs },
|
||||
});
|
||||
|
||||
return client;
|
||||
|
||||
@@ -24,6 +24,14 @@ describe("FeishuConfigSchema webhook validation", () => {
|
||||
expect(result.accounts?.main?.requireMention).toBeUndefined();
|
||||
});
|
||||
|
||||
it("normalizes legacy groupPolicy allowall to open", () => {
|
||||
const result = FeishuConfigSchema.parse({
|
||||
groupPolicy: "allowall",
|
||||
});
|
||||
|
||||
expect(result.groupPolicy).toBe("open");
|
||||
});
|
||||
|
||||
it("rejects top-level webhook mode without verificationToken", () => {
|
||||
const result = FeishuConfigSchema.safeParse({
|
||||
connectionMode: "webhook",
|
||||
|
||||
@@ -4,7 +4,10 @@ export { z };
|
||||
import { buildSecretInputSchema, hasConfiguredSecretInput } from "./secret-input.js";
|
||||
|
||||
const DmPolicySchema = z.enum(["open", "pairing", "allowlist"]);
|
||||
const GroupPolicySchema = z.enum(["open", "allowlist", "disabled"]);
|
||||
const GroupPolicySchema = z.union([
|
||||
z.enum(["open", "allowlist", "disabled"]),
|
||||
z.literal("allowall").transform(() => "open" as const),
|
||||
]);
|
||||
const FeishuDomainSchema = z.union([
|
||||
z.enum(["feishu", "lark"]),
|
||||
z.string().url().startsWith("https://"),
|
||||
@@ -162,6 +165,7 @@ const FeishuSharedConfigShape = {
|
||||
chunkMode: z.enum(["length", "newline"]).optional(),
|
||||
blockStreamingCoalesce: BlockStreamingCoalesceSchema,
|
||||
mediaMaxMb: z.number().positive().optional(),
|
||||
httpTimeoutMs: z.number().int().positive().max(300_000).optional(),
|
||||
heartbeat: ChannelHeartbeatVisibilitySchema,
|
||||
renderMode: RenderModeSchema,
|
||||
streaming: StreamingModeSchema,
|
||||
|
||||
@@ -10,6 +10,7 @@ const resolveReceiveIdTypeMock = vi.hoisted(() => vi.fn());
|
||||
const loadWebMediaMock = vi.hoisted(() => vi.fn());
|
||||
|
||||
const fileCreateMock = vi.hoisted(() => vi.fn());
|
||||
const imageCreateMock = vi.hoisted(() => vi.fn());
|
||||
const imageGetMock = vi.hoisted(() => vi.fn());
|
||||
const messageCreateMock = vi.hoisted(() => vi.fn());
|
||||
const messageResourceGetMock = vi.hoisted(() => vi.fn());
|
||||
@@ -75,6 +76,7 @@ describe("sendMediaFeishu msg_type routing", () => {
|
||||
create: fileCreateMock,
|
||||
},
|
||||
image: {
|
||||
create: imageCreateMock,
|
||||
get: imageGetMock,
|
||||
},
|
||||
message: {
|
||||
@@ -91,6 +93,10 @@ describe("sendMediaFeishu msg_type routing", () => {
|
||||
code: 0,
|
||||
data: { file_key: "file_key_1" },
|
||||
});
|
||||
imageCreateMock.mockResolvedValue({
|
||||
code: 0,
|
||||
data: { image_key: "image_key_1" },
|
||||
});
|
||||
|
||||
messageCreateMock.mockResolvedValue({
|
||||
code: 0,
|
||||
@@ -176,6 +182,26 @@ describe("sendMediaFeishu msg_type routing", () => {
|
||||
);
|
||||
});
|
||||
|
||||
it("uses image upload timeout override for image media", async () => {
|
||||
await sendMediaFeishu({
|
||||
cfg: {} as any,
|
||||
to: "user:ou_target",
|
||||
mediaBuffer: Buffer.from("image"),
|
||||
fileName: "photo.png",
|
||||
});
|
||||
|
||||
expect(imageCreateMock).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
timeout: 120_000,
|
||||
}),
|
||||
);
|
||||
expect(messageCreateMock).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
data: expect.objectContaining({ msg_type: "image" }),
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it("uses msg_type=media when replying with mp4", async () => {
|
||||
await sendMediaFeishu({
|
||||
cfg: {} as any,
|
||||
@@ -291,6 +317,12 @@ describe("sendMediaFeishu msg_type routing", () => {
|
||||
imageKey,
|
||||
});
|
||||
|
||||
expect(imageGetMock).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
path: { image_key: imageKey },
|
||||
timeout: 120_000,
|
||||
}),
|
||||
);
|
||||
expect(result.buffer).toEqual(Buffer.from("image-data"));
|
||||
expect(capturedPath).toBeDefined();
|
||||
expectPathIsolatedToTmpRoot(capturedPath as string, imageKey);
|
||||
@@ -476,10 +508,13 @@ describe("downloadMessageResourceFeishu", () => {
|
||||
type: "file",
|
||||
});
|
||||
|
||||
expect(messageResourceGetMock).toHaveBeenCalledWith({
|
||||
path: { message_id: "om_audio_msg", file_key: "file_key_audio" },
|
||||
params: { type: "file" },
|
||||
});
|
||||
expect(messageResourceGetMock).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
path: { message_id: "om_audio_msg", file_key: "file_key_audio" },
|
||||
params: { type: "file" },
|
||||
timeout: 120_000,
|
||||
}),
|
||||
);
|
||||
expect(result.buffer).toBeInstanceOf(Buffer);
|
||||
});
|
||||
|
||||
@@ -493,10 +528,13 @@ describe("downloadMessageResourceFeishu", () => {
|
||||
type: "image",
|
||||
});
|
||||
|
||||
expect(messageResourceGetMock).toHaveBeenCalledWith({
|
||||
path: { message_id: "om_img_msg", file_key: "img_key_1" },
|
||||
params: { type: "image" },
|
||||
});
|
||||
expect(messageResourceGetMock).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
path: { message_id: "om_img_msg", file_key: "img_key_1" },
|
||||
params: { type: "image" },
|
||||
timeout: 120_000,
|
||||
}),
|
||||
);
|
||||
expect(result.buffer).toBeInstanceOf(Buffer);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -9,6 +9,8 @@ import { getFeishuRuntime } from "./runtime.js";
|
||||
import { assertFeishuMessageApiSuccess, toFeishuSendResult } from "./send-result.js";
|
||||
import { resolveFeishuSendTarget } from "./send-target.js";
|
||||
|
||||
const FEISHU_MEDIA_HTTP_TIMEOUT_MS = 120_000;
|
||||
|
||||
export type DownloadImageResult = {
|
||||
buffer: Buffer;
|
||||
contentType?: string;
|
||||
@@ -97,7 +99,10 @@ export async function downloadImageFeishu(params: {
|
||||
throw new Error(`Feishu account "${account.accountId}" not configured`);
|
||||
}
|
||||
|
||||
const client = createFeishuClient(account);
|
||||
const client = createFeishuClient({
|
||||
...account,
|
||||
httpTimeoutMs: FEISHU_MEDIA_HTTP_TIMEOUT_MS,
|
||||
});
|
||||
|
||||
const response = await client.im.image.get({
|
||||
path: { image_key: normalizedImageKey },
|
||||
@@ -132,7 +137,10 @@ export async function downloadMessageResourceFeishu(params: {
|
||||
throw new Error(`Feishu account "${account.accountId}" not configured`);
|
||||
}
|
||||
|
||||
const client = createFeishuClient(account);
|
||||
const client = createFeishuClient({
|
||||
...account,
|
||||
httpTimeoutMs: FEISHU_MEDIA_HTTP_TIMEOUT_MS,
|
||||
});
|
||||
|
||||
const response = await client.im.messageResource.get({
|
||||
path: { message_id: messageId, file_key: normalizedFileKey },
|
||||
@@ -176,7 +184,10 @@ export async function uploadImageFeishu(params: {
|
||||
throw new Error(`Feishu account "${account.accountId}" not configured`);
|
||||
}
|
||||
|
||||
const client = createFeishuClient(account);
|
||||
const client = createFeishuClient({
|
||||
...account,
|
||||
httpTimeoutMs: FEISHU_MEDIA_HTTP_TIMEOUT_MS,
|
||||
});
|
||||
|
||||
// SDK accepts Buffer directly or fs.ReadStream for file paths
|
||||
// Using Readable.from(buffer) causes issues with form-data library
|
||||
@@ -243,7 +254,10 @@ export async function uploadFileFeishu(params: {
|
||||
throw new Error(`Feishu account "${account.accountId}" not configured`);
|
||||
}
|
||||
|
||||
const client = createFeishuClient(account);
|
||||
const client = createFeishuClient({
|
||||
...account,
|
||||
httpTimeoutMs: FEISHU_MEDIA_HTTP_TIMEOUT_MS,
|
||||
});
|
||||
|
||||
// SDK accepts Buffer directly or fs.ReadStream for file paths
|
||||
// Using Readable.from(buffer) causes issues with form-data library
|
||||
|
||||
@@ -19,8 +19,8 @@ import {
|
||||
warmupDedupFromDisk,
|
||||
} from "./dedup.js";
|
||||
import { isMentionForwardRequest } from "./mention.js";
|
||||
import { fetchBotOpenIdForMonitor } from "./monitor.startup.js";
|
||||
import { botOpenIds } from "./monitor.state.js";
|
||||
import { fetchBotIdentityForMonitor } from "./monitor.startup.js";
|
||||
import { botNames, botOpenIds } from "./monitor.state.js";
|
||||
import { monitorWebhook, monitorWebSocket } from "./monitor.transport.js";
|
||||
import { getFeishuRuntime } from "./runtime.js";
|
||||
import { getMessageFeishu } from "./send.js";
|
||||
@@ -247,6 +247,7 @@ function registerEventHandlers(
|
||||
cfg,
|
||||
event,
|
||||
botOpenId: botOpenIds.get(accountId),
|
||||
botName: botNames.get(accountId),
|
||||
runtime,
|
||||
chatHistories,
|
||||
accountId,
|
||||
@@ -260,7 +261,7 @@ function registerEventHandlers(
|
||||
};
|
||||
const resolveDebounceText = (event: FeishuMessageEvent): string => {
|
||||
const botOpenId = botOpenIds.get(accountId);
|
||||
const parsed = parseFeishuMessageEvent(event, botOpenId);
|
||||
const parsed = parseFeishuMessageEvent(event, botOpenId, botNames.get(accountId));
|
||||
return parsed.content.trim();
|
||||
};
|
||||
const recordSuppressedMessageIds = async (
|
||||
@@ -430,6 +431,7 @@ function registerEventHandlers(
|
||||
cfg,
|
||||
event: syntheticEvent,
|
||||
botOpenId: myBotId,
|
||||
botName: botNames.get(accountId),
|
||||
runtime,
|
||||
chatHistories,
|
||||
accountId,
|
||||
@@ -483,7 +485,9 @@ function registerEventHandlers(
|
||||
});
|
||||
}
|
||||
|
||||
export type BotOpenIdSource = { kind: "prefetched"; botOpenId?: string } | { kind: "fetch" };
|
||||
export type BotOpenIdSource =
|
||||
| { kind: "prefetched"; botOpenId?: string; botName?: string }
|
||||
| { kind: "fetch" };
|
||||
|
||||
export type MonitorSingleAccountParams = {
|
||||
cfg: ClawdbotConfig;
|
||||
@@ -499,11 +503,18 @@ export async function monitorSingleAccount(params: MonitorSingleAccountParams):
|
||||
const log = runtime?.log ?? console.log;
|
||||
|
||||
const botOpenIdSource = params.botOpenIdSource ?? { kind: "fetch" };
|
||||
const botOpenId =
|
||||
const botIdentity =
|
||||
botOpenIdSource.kind === "prefetched"
|
||||
? botOpenIdSource.botOpenId
|
||||
: await fetchBotOpenIdForMonitor(account, { runtime, abortSignal });
|
||||
? { botOpenId: botOpenIdSource.botOpenId, botName: botOpenIdSource.botName }
|
||||
: await fetchBotIdentityForMonitor(account, { runtime, abortSignal });
|
||||
const botOpenId = botIdentity.botOpenId;
|
||||
const botName = botIdentity.botName?.trim();
|
||||
botOpenIds.set(accountId, botOpenId ?? "");
|
||||
if (botName) {
|
||||
botNames.set(accountId, botName);
|
||||
} else {
|
||||
botNames.delete(accountId);
|
||||
}
|
||||
log(`feishu[${accountId}]: bot open_id resolved: ${botOpenId ?? "unknown"}`);
|
||||
|
||||
const connectionMode = account.config.connectionMode ?? "websocket";
|
||||
|
||||
@@ -109,7 +109,10 @@ function createTextEvent(params: {
|
||||
};
|
||||
}
|
||||
|
||||
async function setupDebounceMonitor(): Promise<(data: unknown) => Promise<void>> {
|
||||
async function setupDebounceMonitor(params?: {
|
||||
botOpenId?: string;
|
||||
botName?: string;
|
||||
}): Promise<(data: unknown) => Promise<void>> {
|
||||
const register = vi.fn((registered: Record<string, (data: unknown) => Promise<void>>) => {
|
||||
handlers = registered;
|
||||
});
|
||||
@@ -123,7 +126,11 @@ async function setupDebounceMonitor(): Promise<(data: unknown) => Promise<void>>
|
||||
error: vi.fn(),
|
||||
exit: vi.fn(),
|
||||
} as RuntimeEnv,
|
||||
botOpenIdSource: { kind: "prefetched", botOpenId: "ou_bot" },
|
||||
botOpenIdSource: {
|
||||
kind: "prefetched",
|
||||
botOpenId: params?.botOpenId ?? "ou_bot",
|
||||
botName: params?.botName,
|
||||
},
|
||||
});
|
||||
|
||||
const onMessage = handlers["im.message.receive_v1"];
|
||||
@@ -434,6 +441,37 @@ describe("Feishu inbound debounce regressions", () => {
|
||||
expect(mergedMentions.some((mention) => mention.id.open_id === "ou_user_a")).toBe(false);
|
||||
});
|
||||
|
||||
it("passes prefetched botName through to handleFeishuMessage", async () => {
|
||||
vi.spyOn(dedup, "tryRecordMessage").mockReturnValue(true);
|
||||
vi.spyOn(dedup, "tryRecordMessagePersistent").mockResolvedValue(true);
|
||||
vi.spyOn(dedup, "hasRecordedMessage").mockReturnValue(false);
|
||||
vi.spyOn(dedup, "hasRecordedMessagePersistent").mockResolvedValue(false);
|
||||
const onMessage = await setupDebounceMonitor({ botName: "OpenClaw Bot" });
|
||||
|
||||
await onMessage(
|
||||
createTextEvent({
|
||||
messageId: "om_name_passthrough",
|
||||
text: "@bot hello",
|
||||
mentions: [
|
||||
{
|
||||
key: "@_user_1",
|
||||
id: { open_id: "ou_bot" },
|
||||
name: "OpenClaw Bot",
|
||||
},
|
||||
],
|
||||
}),
|
||||
);
|
||||
await Promise.resolve();
|
||||
await Promise.resolve();
|
||||
await vi.advanceTimersByTimeAsync(25);
|
||||
|
||||
expect(handleFeishuMessageMock).toHaveBeenCalledTimes(1);
|
||||
const firstParams = handleFeishuMessageMock.mock.calls[0]?.[0] as
|
||||
| { botName?: string }
|
||||
| undefined;
|
||||
expect(firstParams?.botName).toBe("OpenClaw Bot");
|
||||
});
|
||||
|
||||
it("does not synthesize mention-forward intent across separate messages", async () => {
|
||||
vi.spyOn(dedup, "tryRecordMessage").mockReturnValue(true);
|
||||
vi.spyOn(dedup, "tryRecordMessagePersistent").mockResolvedValue(true);
|
||||
|
||||
@@ -10,6 +10,11 @@ type FetchBotOpenIdOptions = {
|
||||
timeoutMs?: number;
|
||||
};
|
||||
|
||||
export type FeishuMonitorBotIdentity = {
|
||||
botOpenId?: string;
|
||||
botName?: string;
|
||||
};
|
||||
|
||||
function isTimeoutErrorMessage(message: string | undefined): boolean {
|
||||
return message?.toLowerCase().includes("timeout") || message?.toLowerCase().includes("timed out")
|
||||
? true
|
||||
@@ -20,12 +25,12 @@ function isAbortErrorMessage(message: string | undefined): boolean {
|
||||
return message?.toLowerCase().includes("aborted") ?? false;
|
||||
}
|
||||
|
||||
export async function fetchBotOpenIdForMonitor(
|
||||
export async function fetchBotIdentityForMonitor(
|
||||
account: ResolvedFeishuAccount,
|
||||
options: FetchBotOpenIdOptions = {},
|
||||
): Promise<string | undefined> {
|
||||
): Promise<FeishuMonitorBotIdentity> {
|
||||
if (options.abortSignal?.aborted) {
|
||||
return undefined;
|
||||
return {};
|
||||
}
|
||||
|
||||
const timeoutMs = options.timeoutMs ?? FEISHU_STARTUP_BOT_INFO_TIMEOUT_MS;
|
||||
@@ -34,11 +39,11 @@ export async function fetchBotOpenIdForMonitor(
|
||||
abortSignal: options.abortSignal,
|
||||
});
|
||||
if (result.ok) {
|
||||
return result.botOpenId;
|
||||
return { botOpenId: result.botOpenId, botName: result.botName };
|
||||
}
|
||||
|
||||
if (options.abortSignal?.aborted || isAbortErrorMessage(result.error)) {
|
||||
return undefined;
|
||||
return {};
|
||||
}
|
||||
|
||||
if (isTimeoutErrorMessage(result.error)) {
|
||||
@@ -47,5 +52,13 @@ export async function fetchBotOpenIdForMonitor(
|
||||
`feishu[${account.accountId}]: bot info probe timed out after ${timeoutMs}ms; continuing startup`,
|
||||
);
|
||||
}
|
||||
return undefined;
|
||||
return {};
|
||||
}
|
||||
|
||||
export async function fetchBotOpenIdForMonitor(
|
||||
account: ResolvedFeishuAccount,
|
||||
options: FetchBotOpenIdOptions = {},
|
||||
): Promise<string | undefined> {
|
||||
const identity = await fetchBotIdentityForMonitor(account, options);
|
||||
return identity.botOpenId;
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import {
|
||||
export const wsClients = new Map<string, Lark.WSClient>();
|
||||
export const httpServers = new Map<string, http.Server>();
|
||||
export const botOpenIds = new Map<string, string>();
|
||||
export const botNames = new Map<string, string>();
|
||||
|
||||
export const FEISHU_WEBHOOK_MAX_BODY_BYTES = 1024 * 1024;
|
||||
export const FEISHU_WEBHOOK_BODY_TIMEOUT_MS = 30_000;
|
||||
@@ -140,6 +141,7 @@ export function stopFeishuMonitorState(accountId?: string): void {
|
||||
httpServers.delete(accountId);
|
||||
}
|
||||
botOpenIds.delete(accountId);
|
||||
botNames.delete(accountId);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -149,4 +151,5 @@ export function stopFeishuMonitorState(accountId?: string): void {
|
||||
}
|
||||
httpServers.clear();
|
||||
botOpenIds.clear();
|
||||
botNames.clear();
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import {
|
||||
} from "openclaw/plugin-sdk/feishu";
|
||||
import { createFeishuWSClient } from "./client.js";
|
||||
import {
|
||||
botNames,
|
||||
botOpenIds,
|
||||
FEISHU_WEBHOOK_BODY_TIMEOUT_MS,
|
||||
FEISHU_WEBHOOK_MAX_BODY_BYTES,
|
||||
@@ -42,6 +43,7 @@ export async function monitorWebSocket({
|
||||
const cleanup = () => {
|
||||
wsClients.delete(accountId);
|
||||
botOpenIds.delete(accountId);
|
||||
botNames.delete(accountId);
|
||||
};
|
||||
|
||||
const handleAbort = () => {
|
||||
@@ -134,6 +136,7 @@ export async function monitorWebhook({
|
||||
server.close();
|
||||
httpServers.delete(accountId);
|
||||
botOpenIds.delete(accountId);
|
||||
botNames.delete(accountId);
|
||||
};
|
||||
|
||||
const handleAbort = () => {
|
||||
|
||||
@@ -5,7 +5,7 @@ import {
|
||||
resolveReactionSyntheticEvent,
|
||||
type FeishuReactionCreatedEvent,
|
||||
} from "./monitor.account.js";
|
||||
import { fetchBotOpenIdForMonitor } from "./monitor.startup.js";
|
||||
import { fetchBotIdentityForMonitor } from "./monitor.startup.js";
|
||||
import {
|
||||
clearFeishuWebhookRateLimitStateForTest,
|
||||
getFeishuWebhookRateLimitStateSizeForTest,
|
||||
@@ -66,7 +66,7 @@ export async function monitorFeishuProvider(opts: MonitorFeishuOpts = {}): Promi
|
||||
}
|
||||
|
||||
// Probe sequentially so large multi-account startups do not burst Feishu's bot-info endpoint.
|
||||
const botOpenId = await fetchBotOpenIdForMonitor(account, {
|
||||
const { botOpenId, botName } = await fetchBotIdentityForMonitor(account, {
|
||||
runtime: opts.runtime,
|
||||
abortSignal: opts.abortSignal,
|
||||
});
|
||||
@@ -82,7 +82,7 @@ export async function monitorFeishuProvider(opts: MonitorFeishuOpts = {}): Promi
|
||||
account,
|
||||
runtime: opts.runtime,
|
||||
abortSignal: opts.abortSignal,
|
||||
botOpenIdSource: { kind: "prefetched", botOpenId },
|
||||
botOpenIdSource: { kind: "prefetched", botOpenId, botName },
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
@@ -110,5 +110,45 @@ describe("feishu policy", () => {
|
||||
}),
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
it("allows group when groupPolicy is 'open'", () => {
|
||||
expect(
|
||||
isFeishuGroupAllowed({
|
||||
groupPolicy: "open",
|
||||
allowFrom: [],
|
||||
senderId: "oc_group_999",
|
||||
}),
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
it("treats 'allowall' as equivalent to 'open'", () => {
|
||||
expect(
|
||||
isFeishuGroupAllowed({
|
||||
groupPolicy: "allowall",
|
||||
allowFrom: [],
|
||||
senderId: "oc_group_999",
|
||||
}),
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
it("rejects group when groupPolicy is 'disabled'", () => {
|
||||
expect(
|
||||
isFeishuGroupAllowed({
|
||||
groupPolicy: "disabled",
|
||||
allowFrom: ["oc_group_999"],
|
||||
senderId: "oc_group_999",
|
||||
}),
|
||||
).toBe(false);
|
||||
});
|
||||
|
||||
it("rejects group when groupPolicy is 'allowlist' and allowFrom is empty", () => {
|
||||
expect(
|
||||
isFeishuGroupAllowed({
|
||||
groupPolicy: "allowlist",
|
||||
allowFrom: [],
|
||||
senderId: "oc_group_999",
|
||||
}),
|
||||
).toBe(false);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -92,7 +92,7 @@ export function resolveFeishuGroupToolPolicy(
|
||||
}
|
||||
|
||||
export function isFeishuGroupAllowed(params: {
|
||||
groupPolicy: "open" | "allowlist" | "disabled";
|
||||
groupPolicy: "open" | "allowlist" | "disabled" | "allowall";
|
||||
allowFrom: Array<string | number>;
|
||||
senderId: string;
|
||||
senderIds?: Array<string | null | undefined>;
|
||||
@@ -102,7 +102,7 @@ export function isFeishuGroupAllowed(params: {
|
||||
if (groupPolicy === "disabled") {
|
||||
return false;
|
||||
}
|
||||
if (groupPolicy === "open") {
|
||||
if (groupPolicy === "open" || groupPolicy === "allowall") {
|
||||
return true;
|
||||
}
|
||||
return resolveFeishuAllowlistMatch(params).allowed;
|
||||
|
||||
@@ -25,11 +25,15 @@ async function loadRunEmbeddedPiAgent(): Promise<RunEmbeddedPiAgentFn> {
|
||||
}
|
||||
|
||||
// Bundled install (built)
|
||||
const mod = await import("../../../src/agents/pi-embedded-runner.js");
|
||||
if (typeof mod.runEmbeddedPiAgent !== "function") {
|
||||
// NOTE: there is no src/ tree in a packaged install. Prefer a stable internal entrypoint.
|
||||
const distExtensionApi = "../../../dist/extensionAPI.js";
|
||||
const mod = (await import(distExtensionApi)) as { runEmbeddedPiAgent?: unknown };
|
||||
// oxlint-disable-next-line typescript/no-explicit-any
|
||||
const fn = (mod as any).runEmbeddedPiAgent;
|
||||
if (typeof fn !== "function") {
|
||||
throw new Error("Internal error: runEmbeddedPiAgent not available");
|
||||
}
|
||||
return mod.runEmbeddedPiAgent as RunEmbeddedPiAgentFn;
|
||||
return fn as RunEmbeddedPiAgentFn;
|
||||
}
|
||||
|
||||
function stripCodeFences(s: string): string {
|
||||
|
||||
@@ -102,8 +102,9 @@ describe("mattermostPlugin", () => {
|
||||
|
||||
const actions = mattermostPlugin.actions?.listActions?.({ cfg }) ?? [];
|
||||
expect(actions).toContain("react");
|
||||
expect(actions).not.toContain("send");
|
||||
expect(actions).toContain("send");
|
||||
expect(mattermostPlugin.actions?.supportsAction?.({ action: "react" })).toBe(true);
|
||||
expect(mattermostPlugin.actions?.supportsAction?.({ action: "send" })).toBe(true);
|
||||
});
|
||||
|
||||
it("hides react when mattermost is not configured", () => {
|
||||
@@ -133,7 +134,7 @@ describe("mattermostPlugin", () => {
|
||||
|
||||
const actions = mattermostPlugin.actions?.listActions?.({ cfg }) ?? [];
|
||||
expect(actions).not.toContain("react");
|
||||
expect(actions).not.toContain("send");
|
||||
expect(actions).toContain("send");
|
||||
});
|
||||
|
||||
it("respects per-account actions.reactions in listActions", () => {
|
||||
|
||||
@@ -22,6 +22,15 @@ import {
|
||||
type ResolvedMattermostAccount,
|
||||
} from "./mattermost/accounts.js";
|
||||
import { normalizeMattermostBaseUrl } from "./mattermost/client.js";
|
||||
import {
|
||||
listMattermostDirectoryGroups,
|
||||
listMattermostDirectoryPeers,
|
||||
} from "./mattermost/directory.js";
|
||||
import {
|
||||
buildButtonAttachments,
|
||||
resolveInteractionCallbackUrl,
|
||||
setInteractionSecret,
|
||||
} from "./mattermost/interactions.js";
|
||||
import { monitorMattermostProvider } from "./mattermost/monitor.js";
|
||||
import { probeMattermost } from "./mattermost/probe.js";
|
||||
import { addMattermostReaction, removeMattermostReaction } from "./mattermost/reactions.js";
|
||||
@@ -32,62 +41,91 @@ import { getMattermostRuntime } from "./runtime.js";
|
||||
|
||||
const mattermostMessageActions: ChannelMessageActionAdapter = {
|
||||
listActions: ({ cfg }) => {
|
||||
const actionsConfig = cfg.channels?.mattermost?.actions as { reactions?: boolean } | undefined;
|
||||
const baseReactions = actionsConfig?.reactions;
|
||||
const hasReactionCapableAccount = listMattermostAccountIds(cfg)
|
||||
const enabledAccounts = listMattermostAccountIds(cfg)
|
||||
.map((accountId) => resolveMattermostAccount({ cfg, accountId }))
|
||||
.filter((account) => account.enabled)
|
||||
.filter((account) => Boolean(account.botToken?.trim() && account.baseUrl?.trim()))
|
||||
.some((account) => {
|
||||
const accountActions = account.config.actions as { reactions?: boolean } | undefined;
|
||||
return (accountActions?.reactions ?? baseReactions ?? true) !== false;
|
||||
});
|
||||
.filter((account) => Boolean(account.botToken?.trim() && account.baseUrl?.trim()));
|
||||
|
||||
if (!hasReactionCapableAccount) {
|
||||
return [];
|
||||
const actions: ChannelMessageActionName[] = [];
|
||||
|
||||
// Send (buttons) is available whenever there's at least one enabled account
|
||||
if (enabledAccounts.length > 0) {
|
||||
actions.push("send");
|
||||
}
|
||||
|
||||
return ["react"];
|
||||
// React requires per-account reactions config check
|
||||
const actionsConfig = cfg.channels?.mattermost?.actions as { reactions?: boolean } | undefined;
|
||||
const baseReactions = actionsConfig?.reactions;
|
||||
const hasReactionCapableAccount = enabledAccounts.some((account) => {
|
||||
const accountActions = account.config.actions as { reactions?: boolean } | undefined;
|
||||
return (accountActions?.reactions ?? baseReactions ?? true) !== false;
|
||||
});
|
||||
if (hasReactionCapableAccount) {
|
||||
actions.push("react");
|
||||
}
|
||||
|
||||
return actions;
|
||||
},
|
||||
supportsAction: ({ action }) => {
|
||||
return action === "react";
|
||||
return action === "send" || action === "react";
|
||||
},
|
||||
supportsButtons: ({ cfg }) => {
|
||||
const accounts = listMattermostAccountIds(cfg)
|
||||
.map((id) => resolveMattermostAccount({ cfg, accountId: id }))
|
||||
.filter((a) => a.enabled && a.botToken?.trim() && a.baseUrl?.trim());
|
||||
return accounts.length > 0;
|
||||
},
|
||||
handleAction: async ({ action, params, cfg, accountId }) => {
|
||||
if (action !== "react") {
|
||||
throw new Error(`Mattermost action ${action} not supported`);
|
||||
}
|
||||
// Check reactions gate: per-account config takes precedence over base config
|
||||
const mmBase = cfg?.channels?.mattermost as Record<string, unknown> | undefined;
|
||||
const accounts = mmBase?.accounts as Record<string, Record<string, unknown>> | undefined;
|
||||
const resolvedAccountId = accountId ?? resolveDefaultMattermostAccountId(cfg);
|
||||
const acctConfig = accounts?.[resolvedAccountId];
|
||||
const acctActions = acctConfig?.actions as { reactions?: boolean } | undefined;
|
||||
const baseActions = mmBase?.actions as { reactions?: boolean } | undefined;
|
||||
const reactionsEnabled = acctActions?.reactions ?? baseActions?.reactions ?? true;
|
||||
if (!reactionsEnabled) {
|
||||
throw new Error("Mattermost reactions are disabled in config");
|
||||
}
|
||||
if (action === "react") {
|
||||
// Check reactions gate: per-account config takes precedence over base config
|
||||
const mmBase = cfg?.channels?.mattermost as Record<string, unknown> | undefined;
|
||||
const accounts = mmBase?.accounts as Record<string, Record<string, unknown>> | undefined;
|
||||
const resolvedAccountId = accountId ?? resolveDefaultMattermostAccountId(cfg);
|
||||
const acctConfig = accounts?.[resolvedAccountId];
|
||||
const acctActions = acctConfig?.actions as { reactions?: boolean } | undefined;
|
||||
const baseActions = mmBase?.actions as { reactions?: boolean } | undefined;
|
||||
const reactionsEnabled = acctActions?.reactions ?? baseActions?.reactions ?? true;
|
||||
if (!reactionsEnabled) {
|
||||
throw new Error("Mattermost reactions are disabled in config");
|
||||
}
|
||||
|
||||
const postIdRaw =
|
||||
typeof (params as any)?.messageId === "string"
|
||||
? (params as any).messageId
|
||||
: typeof (params as any)?.postId === "string"
|
||||
? (params as any).postId
|
||||
: "";
|
||||
const postId = postIdRaw.trim();
|
||||
if (!postId) {
|
||||
throw new Error("Mattermost react requires messageId (post id)");
|
||||
}
|
||||
const postIdRaw =
|
||||
typeof (params as any)?.messageId === "string"
|
||||
? (params as any).messageId
|
||||
: typeof (params as any)?.postId === "string"
|
||||
? (params as any).postId
|
||||
: "";
|
||||
const postId = postIdRaw.trim();
|
||||
if (!postId) {
|
||||
throw new Error("Mattermost react requires messageId (post id)");
|
||||
}
|
||||
|
||||
const emojiRaw = typeof (params as any)?.emoji === "string" ? (params as any).emoji : "";
|
||||
const emojiName = emojiRaw.trim().replace(/^:+|:+$/g, "");
|
||||
if (!emojiName) {
|
||||
throw new Error("Mattermost react requires emoji");
|
||||
}
|
||||
const emojiRaw = typeof (params as any)?.emoji === "string" ? (params as any).emoji : "";
|
||||
const emojiName = emojiRaw.trim().replace(/^:+|:+$/g, "");
|
||||
if (!emojiName) {
|
||||
throw new Error("Mattermost react requires emoji");
|
||||
}
|
||||
|
||||
const remove = (params as any)?.remove === true;
|
||||
if (remove) {
|
||||
const result = await removeMattermostReaction({
|
||||
const remove = (params as any)?.remove === true;
|
||||
if (remove) {
|
||||
const result = await removeMattermostReaction({
|
||||
cfg,
|
||||
postId,
|
||||
emojiName,
|
||||
accountId: resolvedAccountId,
|
||||
});
|
||||
if (!result.ok) {
|
||||
throw new Error(result.error);
|
||||
}
|
||||
return {
|
||||
content: [
|
||||
{ type: "text" as const, text: `Removed reaction :${emojiName}: from ${postId}` },
|
||||
],
|
||||
details: {},
|
||||
};
|
||||
}
|
||||
|
||||
const result = await addMattermostReaction({
|
||||
cfg,
|
||||
postId,
|
||||
emojiName,
|
||||
@@ -96,26 +134,92 @@ const mattermostMessageActions: ChannelMessageActionAdapter = {
|
||||
if (!result.ok) {
|
||||
throw new Error(result.error);
|
||||
}
|
||||
|
||||
return {
|
||||
content: [
|
||||
{ type: "text" as const, text: `Removed reaction :${emojiName}: from ${postId}` },
|
||||
],
|
||||
content: [{ type: "text" as const, text: `Reacted with :${emojiName}: on ${postId}` }],
|
||||
details: {},
|
||||
};
|
||||
}
|
||||
|
||||
const result = await addMattermostReaction({
|
||||
cfg,
|
||||
postId,
|
||||
emojiName,
|
||||
accountId: resolvedAccountId,
|
||||
});
|
||||
if (!result.ok) {
|
||||
throw new Error(result.error);
|
||||
if (action !== "send") {
|
||||
throw new Error(`Unsupported Mattermost action: ${action}`);
|
||||
}
|
||||
|
||||
// Send action with optional interactive buttons
|
||||
const to =
|
||||
typeof params.to === "string"
|
||||
? params.to.trim()
|
||||
: typeof params.target === "string"
|
||||
? params.target.trim()
|
||||
: "";
|
||||
if (!to) {
|
||||
throw new Error("Mattermost send requires a target (to).");
|
||||
}
|
||||
|
||||
const message = typeof params.message === "string" ? params.message : "";
|
||||
const replyToId = typeof params.replyToId === "string" ? params.replyToId : undefined;
|
||||
const resolvedAccountId = accountId || undefined;
|
||||
|
||||
// Build props with button attachments if buttons are provided
|
||||
let props: Record<string, unknown> | undefined;
|
||||
if (params.buttons && Array.isArray(params.buttons)) {
|
||||
const account = resolveMattermostAccount({ cfg, accountId: resolvedAccountId });
|
||||
if (account.botToken) setInteractionSecret(account.accountId, account.botToken);
|
||||
const callbackUrl = resolveInteractionCallbackUrl(account.accountId, cfg);
|
||||
|
||||
// Flatten 2D array (rows of buttons) to 1D — core schema sends Array<Array<Button>>
|
||||
// but Mattermost doesn't have row layout, so we flatten all rows into a single list.
|
||||
// Also supports 1D arrays for backward compatibility.
|
||||
const rawButtons = (params.buttons as Array<unknown>).flatMap((item) =>
|
||||
Array.isArray(item) ? item : [item],
|
||||
) as Array<Record<string, unknown>>;
|
||||
|
||||
const buttons = rawButtons
|
||||
.map((btn) => ({
|
||||
id: String(btn.id ?? btn.callback_data ?? ""),
|
||||
name: String(btn.text ?? btn.name ?? btn.label ?? ""),
|
||||
style: (btn.style as "default" | "primary" | "danger") ?? "default",
|
||||
context:
|
||||
typeof btn.context === "object" && btn.context !== null
|
||||
? (btn.context as Record<string, unknown>)
|
||||
: undefined,
|
||||
}))
|
||||
.filter((btn) => btn.id && btn.name);
|
||||
|
||||
const attachmentText =
|
||||
typeof params.attachmentText === "string" ? params.attachmentText : undefined;
|
||||
props = {
|
||||
attachments: buildButtonAttachments({
|
||||
callbackUrl,
|
||||
accountId: account.accountId,
|
||||
buttons,
|
||||
text: attachmentText,
|
||||
}),
|
||||
};
|
||||
}
|
||||
|
||||
const mediaUrl =
|
||||
typeof params.media === "string" ? params.media.trim() || undefined : undefined;
|
||||
|
||||
const result = await sendMessageMattermost(to, message, {
|
||||
accountId: resolvedAccountId,
|
||||
replyToId,
|
||||
props,
|
||||
mediaUrl,
|
||||
});
|
||||
|
||||
return {
|
||||
content: [{ type: "text" as const, text: `Reacted with :${emojiName}: on ${postId}` }],
|
||||
content: [
|
||||
{
|
||||
type: "text" as const,
|
||||
text: JSON.stringify({
|
||||
ok: true,
|
||||
channel: "mattermost",
|
||||
messageId: result.messageId,
|
||||
channelId: result.channelId,
|
||||
}),
|
||||
},
|
||||
],
|
||||
details: {},
|
||||
};
|
||||
},
|
||||
@@ -249,6 +353,12 @@ export const mattermostPlugin: ChannelPlugin<ResolvedMattermostAccount> = {
|
||||
resolveRequireMention: resolveMattermostGroupRequireMention,
|
||||
},
|
||||
actions: mattermostMessageActions,
|
||||
directory: {
|
||||
listGroups: async (params) => listMattermostDirectoryGroups(params),
|
||||
listGroupsLive: async (params) => listMattermostDirectoryGroups(params),
|
||||
listPeers: async (params) => listMattermostDirectoryPeers(params),
|
||||
listPeersLive: async (params) => listMattermostDirectoryPeers(params),
|
||||
},
|
||||
messaging: {
|
||||
normalizeTarget: normalizeMattermostMessagingTarget,
|
||||
targetResolver: {
|
||||
|
||||
@@ -50,6 +50,11 @@ const MattermostAccountSchemaBase = z
|
||||
})
|
||||
.optional(),
|
||||
commands: MattermostSlashCommandsSchema,
|
||||
interactions: z
|
||||
.object({
|
||||
callbackBaseUrl: z.string().optional(),
|
||||
})
|
||||
.optional(),
|
||||
})
|
||||
.strict();
|
||||
|
||||
|
||||
46
extensions/mattermost/src/group-mentions.test.ts
Normal file
46
extensions/mattermost/src/group-mentions.test.ts
Normal file
@@ -0,0 +1,46 @@
|
||||
import type { OpenClawConfig } from "openclaw/plugin-sdk/mattermost";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { resolveMattermostGroupRequireMention } from "./group-mentions.js";
|
||||
|
||||
describe("resolveMattermostGroupRequireMention", () => {
|
||||
it("defaults to requiring mention when no override is configured", () => {
|
||||
const cfg: OpenClawConfig = {
|
||||
channels: {
|
||||
mattermost: {},
|
||||
},
|
||||
};
|
||||
|
||||
const requireMention = resolveMattermostGroupRequireMention({ cfg, accountId: "default" });
|
||||
expect(requireMention).toBe(true);
|
||||
});
|
||||
|
||||
it("respects chatmode-derived account override", () => {
|
||||
const cfg: OpenClawConfig = {
|
||||
channels: {
|
||||
mattermost: {
|
||||
chatmode: "onmessage",
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const requireMention = resolveMattermostGroupRequireMention({ cfg, accountId: "default" });
|
||||
expect(requireMention).toBe(false);
|
||||
});
|
||||
|
||||
it("prefers an explicit runtime override when provided", () => {
|
||||
const cfg: OpenClawConfig = {
|
||||
channels: {
|
||||
mattermost: {
|
||||
chatmode: "oncall",
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const requireMention = resolveMattermostGroupRequireMention({
|
||||
cfg,
|
||||
accountId: "default",
|
||||
requireMentionOverride: false,
|
||||
});
|
||||
expect(requireMention).toBe(false);
|
||||
});
|
||||
});
|
||||
@@ -1,15 +1,23 @@
|
||||
import { resolveChannelGroupRequireMention } from "openclaw/plugin-sdk/compat";
|
||||
import type { ChannelGroupContext } from "openclaw/plugin-sdk/mattermost";
|
||||
import { resolveMattermostAccount } from "./mattermost/accounts.js";
|
||||
|
||||
export function resolveMattermostGroupRequireMention(
|
||||
params: ChannelGroupContext,
|
||||
params: ChannelGroupContext & { requireMentionOverride?: boolean },
|
||||
): boolean | undefined {
|
||||
const account = resolveMattermostAccount({
|
||||
cfg: params.cfg,
|
||||
accountId: params.accountId,
|
||||
});
|
||||
if (typeof account.requireMention === "boolean") {
|
||||
return account.requireMention;
|
||||
}
|
||||
return true;
|
||||
const requireMentionOverride =
|
||||
typeof params.requireMentionOverride === "boolean"
|
||||
? params.requireMentionOverride
|
||||
: account.requireMention;
|
||||
return resolveChannelGroupRequireMention({
|
||||
cfg: params.cfg,
|
||||
channel: "mattermost",
|
||||
groupId: params.groupId,
|
||||
accountId: params.accountId,
|
||||
requireMentionOverride,
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1,19 +1,298 @@
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
import { createMattermostClient } from "./client.js";
|
||||
import {
|
||||
createMattermostClient,
|
||||
createMattermostPost,
|
||||
normalizeMattermostBaseUrl,
|
||||
updateMattermostPost,
|
||||
} from "./client.js";
|
||||
|
||||
describe("mattermost client", () => {
|
||||
it("request returns undefined on 204 responses", async () => {
|
||||
// ── Helper: mock fetch that captures requests ────────────────────────
|
||||
|
||||
function createMockFetch(response?: { status?: number; body?: unknown; contentType?: string }) {
|
||||
const status = response?.status ?? 200;
|
||||
const body = response?.body ?? {};
|
||||
const contentType = response?.contentType ?? "application/json";
|
||||
|
||||
const calls: Array<{ url: string; init?: RequestInit }> = [];
|
||||
|
||||
const mockFetch = vi.fn(async (url: string | URL | Request, init?: RequestInit) => {
|
||||
const urlStr = typeof url === "string" ? url : url.toString();
|
||||
calls.push({ url: urlStr, init });
|
||||
return new Response(JSON.stringify(body), {
|
||||
status,
|
||||
headers: { "content-type": contentType },
|
||||
});
|
||||
});
|
||||
|
||||
return { mockFetch: mockFetch as unknown as typeof fetch, calls };
|
||||
}
|
||||
|
||||
// ── normalizeMattermostBaseUrl ────────────────────────────────────────
|
||||
|
||||
describe("normalizeMattermostBaseUrl", () => {
|
||||
it("strips trailing slashes", () => {
|
||||
expect(normalizeMattermostBaseUrl("http://localhost:8065/")).toBe("http://localhost:8065");
|
||||
});
|
||||
|
||||
it("strips /api/v4 suffix", () => {
|
||||
expect(normalizeMattermostBaseUrl("http://localhost:8065/api/v4")).toBe(
|
||||
"http://localhost:8065",
|
||||
);
|
||||
});
|
||||
|
||||
it("returns undefined for empty input", () => {
|
||||
expect(normalizeMattermostBaseUrl("")).toBeUndefined();
|
||||
expect(normalizeMattermostBaseUrl(null)).toBeUndefined();
|
||||
expect(normalizeMattermostBaseUrl(undefined)).toBeUndefined();
|
||||
});
|
||||
|
||||
it("preserves valid base URL", () => {
|
||||
expect(normalizeMattermostBaseUrl("http://mm.example.com")).toBe("http://mm.example.com");
|
||||
});
|
||||
});
|
||||
|
||||
// ── createMattermostClient ───────────────────────────────────────────
|
||||
|
||||
describe("createMattermostClient", () => {
|
||||
it("creates a client with normalized baseUrl", () => {
|
||||
const { mockFetch } = createMockFetch();
|
||||
const client = createMattermostClient({
|
||||
baseUrl: "http://localhost:8065/",
|
||||
botToken: "tok",
|
||||
fetchImpl: mockFetch,
|
||||
});
|
||||
expect(client.baseUrl).toBe("http://localhost:8065");
|
||||
expect(client.apiBaseUrl).toBe("http://localhost:8065/api/v4");
|
||||
});
|
||||
|
||||
it("throws on empty baseUrl", () => {
|
||||
expect(() => createMattermostClient({ baseUrl: "", botToken: "tok" })).toThrow(
|
||||
"baseUrl is required",
|
||||
);
|
||||
});
|
||||
|
||||
it("sends Authorization header with Bearer token", async () => {
|
||||
const { mockFetch, calls } = createMockFetch({ body: { id: "u1" } });
|
||||
const client = createMattermostClient({
|
||||
baseUrl: "http://localhost:8065",
|
||||
botToken: "my-secret-token",
|
||||
fetchImpl: mockFetch,
|
||||
});
|
||||
await client.request("/users/me");
|
||||
const headers = new Headers(calls[0].init?.headers);
|
||||
expect(headers.get("Authorization")).toBe("Bearer my-secret-token");
|
||||
});
|
||||
|
||||
it("sets Content-Type for string bodies", async () => {
|
||||
const { mockFetch, calls } = createMockFetch({ body: { id: "p1" } });
|
||||
const client = createMattermostClient({
|
||||
baseUrl: "http://localhost:8065",
|
||||
botToken: "tok",
|
||||
fetchImpl: mockFetch,
|
||||
});
|
||||
await client.request("/posts", { method: "POST", body: JSON.stringify({ message: "hi" }) });
|
||||
const headers = new Headers(calls[0].init?.headers);
|
||||
expect(headers.get("Content-Type")).toBe("application/json");
|
||||
});
|
||||
|
||||
it("throws on non-ok responses", async () => {
|
||||
const { mockFetch } = createMockFetch({
|
||||
status: 404,
|
||||
body: { message: "Not Found" },
|
||||
});
|
||||
const client = createMattermostClient({
|
||||
baseUrl: "http://localhost:8065",
|
||||
botToken: "tok",
|
||||
fetchImpl: mockFetch,
|
||||
});
|
||||
await expect(client.request("/missing")).rejects.toThrow("Mattermost API 404");
|
||||
});
|
||||
|
||||
it("returns undefined on 204 responses", async () => {
|
||||
const fetchImpl = vi.fn(async () => {
|
||||
return new Response(null, { status: 204 });
|
||||
});
|
||||
|
||||
const client = createMattermostClient({
|
||||
baseUrl: "https://chat.example.com",
|
||||
botToken: "test-token",
|
||||
fetchImpl: fetchImpl as any,
|
||||
});
|
||||
|
||||
const result = await client.request<unknown>("/anything", { method: "DELETE" });
|
||||
expect(result).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
// ── createMattermostPost ─────────────────────────────────────────────
|
||||
|
||||
describe("createMattermostPost", () => {
|
||||
it("sends channel_id and message", async () => {
|
||||
const { mockFetch, calls } = createMockFetch({ body: { id: "post1" } });
|
||||
const client = createMattermostClient({
|
||||
baseUrl: "http://localhost:8065",
|
||||
botToken: "tok",
|
||||
fetchImpl: mockFetch,
|
||||
});
|
||||
|
||||
await createMattermostPost(client, {
|
||||
channelId: "ch123",
|
||||
message: "Hello world",
|
||||
});
|
||||
|
||||
const body = JSON.parse(calls[0].init?.body as string);
|
||||
expect(body.channel_id).toBe("ch123");
|
||||
expect(body.message).toBe("Hello world");
|
||||
});
|
||||
|
||||
it("includes rootId when provided", async () => {
|
||||
const { mockFetch, calls } = createMockFetch({ body: { id: "post2" } });
|
||||
const client = createMattermostClient({
|
||||
baseUrl: "http://localhost:8065",
|
||||
botToken: "tok",
|
||||
fetchImpl: mockFetch,
|
||||
});
|
||||
|
||||
await createMattermostPost(client, {
|
||||
channelId: "ch123",
|
||||
message: "Reply",
|
||||
rootId: "root456",
|
||||
});
|
||||
|
||||
const body = JSON.parse(calls[0].init?.body as string);
|
||||
expect(body.root_id).toBe("root456");
|
||||
});
|
||||
|
||||
it("includes fileIds when provided", async () => {
|
||||
const { mockFetch, calls } = createMockFetch({ body: { id: "post3" } });
|
||||
const client = createMattermostClient({
|
||||
baseUrl: "http://localhost:8065",
|
||||
botToken: "tok",
|
||||
fetchImpl: mockFetch,
|
||||
});
|
||||
|
||||
await createMattermostPost(client, {
|
||||
channelId: "ch123",
|
||||
message: "With file",
|
||||
fileIds: ["file1", "file2"],
|
||||
});
|
||||
|
||||
const body = JSON.parse(calls[0].init?.body as string);
|
||||
expect(body.file_ids).toEqual(["file1", "file2"]);
|
||||
});
|
||||
|
||||
it("includes props when provided (for interactive buttons)", async () => {
|
||||
const { mockFetch, calls } = createMockFetch({ body: { id: "post4" } });
|
||||
const client = createMattermostClient({
|
||||
baseUrl: "http://localhost:8065",
|
||||
botToken: "tok",
|
||||
fetchImpl: mockFetch,
|
||||
});
|
||||
|
||||
const props = {
|
||||
attachments: [
|
||||
{
|
||||
text: "Choose:",
|
||||
actions: [{ id: "btn1", type: "button", name: "Click" }],
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
await createMattermostPost(client, {
|
||||
channelId: "ch123",
|
||||
message: "Pick an option",
|
||||
props,
|
||||
});
|
||||
|
||||
const body = JSON.parse(calls[0].init?.body as string);
|
||||
expect(body.props).toEqual(props);
|
||||
expect(body.props.attachments[0].actions[0].type).toBe("button");
|
||||
});
|
||||
|
||||
it("omits props when not provided", async () => {
|
||||
const { mockFetch, calls } = createMockFetch({ body: { id: "post5" } });
|
||||
const client = createMattermostClient({
|
||||
baseUrl: "http://localhost:8065",
|
||||
botToken: "tok",
|
||||
fetchImpl: mockFetch,
|
||||
});
|
||||
|
||||
await createMattermostPost(client, {
|
||||
channelId: "ch123",
|
||||
message: "No props",
|
||||
});
|
||||
|
||||
const body = JSON.parse(calls[0].init?.body as string);
|
||||
expect(body.props).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
// ── updateMattermostPost ─────────────────────────────────────────────
|
||||
|
||||
describe("updateMattermostPost", () => {
|
||||
it("sends PUT to /posts/{id}", async () => {
|
||||
const { mockFetch, calls } = createMockFetch({ body: { id: "post1" } });
|
||||
const client = createMattermostClient({
|
||||
baseUrl: "http://localhost:8065",
|
||||
botToken: "tok",
|
||||
fetchImpl: mockFetch,
|
||||
});
|
||||
|
||||
await updateMattermostPost(client, "post1", { message: "Updated" });
|
||||
|
||||
expect(calls[0].url).toContain("/posts/post1");
|
||||
expect(calls[0].init?.method).toBe("PUT");
|
||||
});
|
||||
|
||||
it("includes post id in the body", async () => {
|
||||
const { mockFetch, calls } = createMockFetch({ body: { id: "post1" } });
|
||||
const client = createMattermostClient({
|
||||
baseUrl: "http://localhost:8065",
|
||||
botToken: "tok",
|
||||
fetchImpl: mockFetch,
|
||||
});
|
||||
|
||||
await updateMattermostPost(client, "post1", { message: "Updated" });
|
||||
|
||||
const body = JSON.parse(calls[0].init?.body as string);
|
||||
expect(body.id).toBe("post1");
|
||||
expect(body.message).toBe("Updated");
|
||||
});
|
||||
|
||||
it("includes props for button completion updates", async () => {
|
||||
const { mockFetch, calls } = createMockFetch({ body: { id: "post1" } });
|
||||
const client = createMattermostClient({
|
||||
baseUrl: "http://localhost:8065",
|
||||
botToken: "tok",
|
||||
fetchImpl: mockFetch,
|
||||
});
|
||||
|
||||
await updateMattermostPost(client, "post1", {
|
||||
message: "Original message",
|
||||
props: {
|
||||
attachments: [{ text: "✓ **do_now** selected by @tony" }],
|
||||
},
|
||||
});
|
||||
|
||||
const body = JSON.parse(calls[0].init?.body as string);
|
||||
expect(body.message).toBe("Original message");
|
||||
expect(body.props.attachments[0].text).toContain("✓");
|
||||
expect(body.props.attachments[0].text).toContain("do_now");
|
||||
});
|
||||
|
||||
it("omits message when not provided", async () => {
|
||||
const { mockFetch, calls } = createMockFetch({ body: { id: "post1" } });
|
||||
const client = createMattermostClient({
|
||||
baseUrl: "http://localhost:8065",
|
||||
botToken: "tok",
|
||||
fetchImpl: mockFetch,
|
||||
});
|
||||
|
||||
await updateMattermostPost(client, "post1", {
|
||||
props: { attachments: [] },
|
||||
});
|
||||
|
||||
const body = JSON.parse(calls[0].init?.body as string);
|
||||
expect(body.id).toBe("post1");
|
||||
expect(body.message).toBeUndefined();
|
||||
expect(body.props).toEqual({ attachments: [] });
|
||||
});
|
||||
});
|
||||
|
||||
@@ -138,6 +138,16 @@ export async function fetchMattermostChannel(
|
||||
return await client.request<MattermostChannel>(`/channels/${channelId}`);
|
||||
}
|
||||
|
||||
export async function fetchMattermostChannelByName(
|
||||
client: MattermostClient,
|
||||
teamId: string,
|
||||
channelName: string,
|
||||
): Promise<MattermostChannel> {
|
||||
return await client.request<MattermostChannel>(
|
||||
`/teams/${teamId}/channels/name/${encodeURIComponent(channelName)}`,
|
||||
);
|
||||
}
|
||||
|
||||
export async function sendMattermostTyping(
|
||||
client: MattermostClient,
|
||||
params: { channelId: string; parentId?: string },
|
||||
@@ -172,9 +182,10 @@ export async function createMattermostPost(
|
||||
message: string;
|
||||
rootId?: string;
|
||||
fileIds?: string[];
|
||||
props?: Record<string, unknown>;
|
||||
},
|
||||
): Promise<MattermostPost> {
|
||||
const payload: Record<string, string> = {
|
||||
const payload: Record<string, unknown> = {
|
||||
channel_id: params.channelId,
|
||||
message: params.message,
|
||||
};
|
||||
@@ -182,7 +193,10 @@ export async function createMattermostPost(
|
||||
payload.root_id = params.rootId;
|
||||
}
|
||||
if (params.fileIds?.length) {
|
||||
(payload as Record<string, unknown>).file_ids = params.fileIds;
|
||||
payload.file_ids = params.fileIds;
|
||||
}
|
||||
if (params.props) {
|
||||
payload.props = params.props;
|
||||
}
|
||||
return await client.request<MattermostPost>("/posts", {
|
||||
method: "POST",
|
||||
@@ -203,6 +217,27 @@ export async function fetchMattermostUserTeams(
|
||||
return await client.request<MattermostTeam[]>(`/users/${userId}/teams`);
|
||||
}
|
||||
|
||||
export async function updateMattermostPost(
|
||||
client: MattermostClient,
|
||||
postId: string,
|
||||
params: {
|
||||
message?: string;
|
||||
props?: Record<string, unknown>;
|
||||
},
|
||||
): Promise<MattermostPost> {
|
||||
const payload: Record<string, unknown> = { id: postId };
|
||||
if (params.message !== undefined) {
|
||||
payload.message = params.message;
|
||||
}
|
||||
if (params.props !== undefined) {
|
||||
payload.props = params.props;
|
||||
}
|
||||
return await client.request<MattermostPost>(`/posts/${postId}`, {
|
||||
method: "PUT",
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
}
|
||||
|
||||
export async function uploadMattermostFile(
|
||||
client: MattermostClient,
|
||||
params: {
|
||||
|
||||
172
extensions/mattermost/src/mattermost/directory.ts
Normal file
172
extensions/mattermost/src/mattermost/directory.ts
Normal file
@@ -0,0 +1,172 @@
|
||||
import type {
|
||||
ChannelDirectoryEntry,
|
||||
OpenClawConfig,
|
||||
RuntimeEnv,
|
||||
} from "openclaw/plugin-sdk/mattermost";
|
||||
import { listMattermostAccountIds, resolveMattermostAccount } from "./accounts.js";
|
||||
import {
|
||||
createMattermostClient,
|
||||
fetchMattermostMe,
|
||||
type MattermostChannel,
|
||||
type MattermostClient,
|
||||
type MattermostUser,
|
||||
} from "./client.js";
|
||||
|
||||
export type MattermostDirectoryParams = {
|
||||
cfg: OpenClawConfig;
|
||||
accountId?: string | null;
|
||||
query?: string | null;
|
||||
limit?: number | null;
|
||||
runtime: RuntimeEnv;
|
||||
};
|
||||
|
||||
function buildClient(params: {
|
||||
cfg: OpenClawConfig;
|
||||
accountId?: string | null;
|
||||
}): MattermostClient | null {
|
||||
const account = resolveMattermostAccount({ cfg: params.cfg, accountId: params.accountId });
|
||||
if (!account.enabled || !account.botToken || !account.baseUrl) {
|
||||
return null;
|
||||
}
|
||||
return createMattermostClient({ baseUrl: account.baseUrl, botToken: account.botToken });
|
||||
}
|
||||
|
||||
/**
|
||||
* Build clients from ALL enabled accounts (deduplicated by token).
|
||||
*
|
||||
* We always scan every account because:
|
||||
* - Private channels are only visible to bots that are members
|
||||
* - The requesting agent's account may have an expired/invalid token
|
||||
*
|
||||
* This means a single healthy bot token is enough for directory discovery.
|
||||
*/
|
||||
function buildClients(params: MattermostDirectoryParams): MattermostClient[] {
|
||||
const accountIds = listMattermostAccountIds(params.cfg);
|
||||
const seen = new Set<string>();
|
||||
const clients: MattermostClient[] = [];
|
||||
for (const id of accountIds) {
|
||||
const client = buildClient({ cfg: params.cfg, accountId: id });
|
||||
if (client && !seen.has(client.token)) {
|
||||
seen.add(client.token);
|
||||
clients.push(client);
|
||||
}
|
||||
}
|
||||
return clients;
|
||||
}
|
||||
|
||||
/**
|
||||
* List channels (public + private) visible to any configured bot account.
|
||||
*
|
||||
* NOTE: Uses per_page=200 which covers most instances. Mattermost does not
|
||||
* return a "has more" indicator, so very large instances (200+ channels per bot)
|
||||
* may see incomplete results. Pagination can be added if needed.
|
||||
*/
|
||||
export async function listMattermostDirectoryGroups(
|
||||
params: MattermostDirectoryParams,
|
||||
): Promise<ChannelDirectoryEntry[]> {
|
||||
const clients = buildClients(params);
|
||||
if (!clients.length) {
|
||||
return [];
|
||||
}
|
||||
const q = params.query?.trim().toLowerCase() || "";
|
||||
const seenIds = new Set<string>();
|
||||
const entries: ChannelDirectoryEntry[] = [];
|
||||
|
||||
for (const client of clients) {
|
||||
try {
|
||||
const me = await fetchMattermostMe(client);
|
||||
const channels = await client.request<MattermostChannel[]>(
|
||||
`/users/${me.id}/channels?per_page=200`,
|
||||
);
|
||||
for (const ch of channels) {
|
||||
if (ch.type !== "O" && ch.type !== "P") continue;
|
||||
if (seenIds.has(ch.id)) continue;
|
||||
if (q) {
|
||||
const name = (ch.name ?? "").toLowerCase();
|
||||
const display = (ch.display_name ?? "").toLowerCase();
|
||||
if (!name.includes(q) && !display.includes(q)) continue;
|
||||
}
|
||||
seenIds.add(ch.id);
|
||||
entries.push({
|
||||
kind: "group" as const,
|
||||
id: `channel:${ch.id}`,
|
||||
name: ch.name ?? undefined,
|
||||
handle: ch.display_name ?? undefined,
|
||||
});
|
||||
}
|
||||
} catch (err) {
|
||||
// Token may be expired/revoked — skip this account and try others
|
||||
console.debug?.(
|
||||
"[mattermost-directory] listGroups: skipping account:",
|
||||
(err as Error)?.message,
|
||||
);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
return params.limit && params.limit > 0 ? entries.slice(0, params.limit) : entries;
|
||||
}
|
||||
|
||||
/**
|
||||
* List team members as peer directory entries.
|
||||
*
|
||||
* Uses only the first available client since all bots in a team see the same
|
||||
* user list (unlike channels where membership varies). Uses the first team
|
||||
* returned — multi-team setups will only see members from that team.
|
||||
*
|
||||
* NOTE: per_page=200 for member listing; same pagination caveat as groups.
|
||||
*/
|
||||
export async function listMattermostDirectoryPeers(
|
||||
params: MattermostDirectoryParams,
|
||||
): Promise<ChannelDirectoryEntry[]> {
|
||||
const clients = buildClients(params);
|
||||
if (!clients.length) {
|
||||
return [];
|
||||
}
|
||||
// All bots see the same user list, so one client suffices (unlike channels
|
||||
// where private channel membership varies per bot).
|
||||
const client = clients[0];
|
||||
try {
|
||||
const me = await fetchMattermostMe(client);
|
||||
const teams = await client.request<{ id: string }[]>("/users/me/teams");
|
||||
if (!teams.length) {
|
||||
return [];
|
||||
}
|
||||
// Uses first team — multi-team setups may need iteration in the future
|
||||
const teamId = teams[0].id;
|
||||
const q = params.query?.trim().toLowerCase() || "";
|
||||
|
||||
let users: MattermostUser[];
|
||||
if (q) {
|
||||
users = await client.request<MattermostUser[]>("/users/search", {
|
||||
method: "POST",
|
||||
body: JSON.stringify({ term: q, team_id: teamId }),
|
||||
});
|
||||
} else {
|
||||
const members = await client.request<{ user_id: string }[]>(
|
||||
`/teams/${teamId}/members?per_page=200`,
|
||||
);
|
||||
const userIds = members.map((m) => m.user_id).filter((id) => id !== me.id);
|
||||
if (!userIds.length) {
|
||||
return [];
|
||||
}
|
||||
users = await client.request<MattermostUser[]>("/users/ids", {
|
||||
method: "POST",
|
||||
body: JSON.stringify(userIds),
|
||||
});
|
||||
}
|
||||
|
||||
const entries = users
|
||||
.filter((u) => u.id !== me.id)
|
||||
.map((u) => ({
|
||||
kind: "user" as const,
|
||||
id: `user:${u.id}`,
|
||||
name: u.username ?? undefined,
|
||||
handle:
|
||||
[u.first_name, u.last_name].filter(Boolean).join(" ").trim() || u.nickname || undefined,
|
||||
}));
|
||||
return params.limit && params.limit > 0 ? entries.slice(0, params.limit) : entries;
|
||||
} catch (err) {
|
||||
console.debug?.("[mattermost-directory] listPeers failed:", (err as Error)?.message);
|
||||
return [];
|
||||
}
|
||||
}
|
||||
335
extensions/mattermost/src/mattermost/interactions.test.ts
Normal file
335
extensions/mattermost/src/mattermost/interactions.test.ts
Normal file
@@ -0,0 +1,335 @@
|
||||
import { type IncomingMessage } from "node:http";
|
||||
import { describe, expect, it, beforeEach, afterEach } from "vitest";
|
||||
import {
|
||||
buildButtonAttachments,
|
||||
generateInteractionToken,
|
||||
getInteractionCallbackUrl,
|
||||
getInteractionSecret,
|
||||
isLocalhostRequest,
|
||||
resolveInteractionCallbackUrl,
|
||||
setInteractionCallbackUrl,
|
||||
setInteractionSecret,
|
||||
verifyInteractionToken,
|
||||
} from "./interactions.js";
|
||||
|
||||
// ── HMAC token management ────────────────────────────────────────────
|
||||
|
||||
describe("setInteractionSecret / getInteractionSecret", () => {
|
||||
beforeEach(() => {
|
||||
setInteractionSecret("test-bot-token");
|
||||
});
|
||||
|
||||
it("derives a deterministic secret from the bot token", () => {
|
||||
setInteractionSecret("token-a");
|
||||
const secretA = getInteractionSecret();
|
||||
setInteractionSecret("token-a");
|
||||
const secretA2 = getInteractionSecret();
|
||||
expect(secretA).toBe(secretA2);
|
||||
});
|
||||
|
||||
it("produces different secrets for different tokens", () => {
|
||||
setInteractionSecret("token-a");
|
||||
const secretA = getInteractionSecret();
|
||||
setInteractionSecret("token-b");
|
||||
const secretB = getInteractionSecret();
|
||||
expect(secretA).not.toBe(secretB);
|
||||
});
|
||||
|
||||
it("returns a hex string", () => {
|
||||
expect(getInteractionSecret()).toMatch(/^[0-9a-f]+$/);
|
||||
});
|
||||
});
|
||||
|
||||
// ── Token generation / verification ──────────────────────────────────
|
||||
|
||||
describe("generateInteractionToken / verifyInteractionToken", () => {
|
||||
beforeEach(() => {
|
||||
setInteractionSecret("test-bot-token");
|
||||
});
|
||||
|
||||
it("generates a hex token", () => {
|
||||
const token = generateInteractionToken({ action_id: "click" });
|
||||
expect(token).toMatch(/^[0-9a-f]{64}$/);
|
||||
});
|
||||
|
||||
it("verifies a valid token", () => {
|
||||
const context = { action_id: "do_now", item_id: "123" };
|
||||
const token = generateInteractionToken(context);
|
||||
expect(verifyInteractionToken(context, token)).toBe(true);
|
||||
});
|
||||
|
||||
it("rejects a tampered token", () => {
|
||||
const context = { action_id: "do_now" };
|
||||
const token = generateInteractionToken(context);
|
||||
const tampered = token.replace(/.$/, token.endsWith("0") ? "1" : "0");
|
||||
expect(verifyInteractionToken(context, tampered)).toBe(false);
|
||||
});
|
||||
|
||||
it("rejects a token generated with different context", () => {
|
||||
const token = generateInteractionToken({ action_id: "a" });
|
||||
expect(verifyInteractionToken({ action_id: "b" }, token)).toBe(false);
|
||||
});
|
||||
|
||||
it("rejects tokens with wrong length", () => {
|
||||
const context = { action_id: "test" };
|
||||
expect(verifyInteractionToken(context, "short")).toBe(false);
|
||||
});
|
||||
|
||||
it("is deterministic for the same context", () => {
|
||||
const context = { action_id: "test", x: 1 };
|
||||
const t1 = generateInteractionToken(context);
|
||||
const t2 = generateInteractionToken(context);
|
||||
expect(t1).toBe(t2);
|
||||
});
|
||||
|
||||
it("produces the same token regardless of key order", () => {
|
||||
const contextA = { action_id: "do_now", tweet_id: "123", action: "do" };
|
||||
const contextB = { action: "do", action_id: "do_now", tweet_id: "123" };
|
||||
const contextC = { tweet_id: "123", action: "do", action_id: "do_now" };
|
||||
const tokenA = generateInteractionToken(contextA);
|
||||
const tokenB = generateInteractionToken(contextB);
|
||||
const tokenC = generateInteractionToken(contextC);
|
||||
expect(tokenA).toBe(tokenB);
|
||||
expect(tokenB).toBe(tokenC);
|
||||
});
|
||||
|
||||
it("verifies a token when Mattermost reorders context keys", () => {
|
||||
// Simulate: token generated with keys in one order, verified with keys in another
|
||||
// (Mattermost reorders context keys when storing/returning interactive message payloads)
|
||||
const originalContext = { action_id: "bm_do", tweet_id: "999", action: "do" };
|
||||
const token = generateInteractionToken(originalContext);
|
||||
|
||||
// Mattermost returns keys in alphabetical order (or any arbitrary order)
|
||||
const reorderedContext = { action: "do", action_id: "bm_do", tweet_id: "999" };
|
||||
expect(verifyInteractionToken(reorderedContext, token)).toBe(true);
|
||||
});
|
||||
|
||||
it("scopes tokens per account when account secrets differ", () => {
|
||||
setInteractionSecret("acct-a", "bot-token-a");
|
||||
setInteractionSecret("acct-b", "bot-token-b");
|
||||
const context = { action_id: "do_now", item_id: "123" };
|
||||
const tokenA = generateInteractionToken(context, "acct-a");
|
||||
|
||||
expect(verifyInteractionToken(context, tokenA, "acct-a")).toBe(true);
|
||||
expect(verifyInteractionToken(context, tokenA, "acct-b")).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
// ── Callback URL registry ────────────────────────────────────────────
|
||||
|
||||
describe("callback URL registry", () => {
|
||||
it("stores and retrieves callback URLs", () => {
|
||||
setInteractionCallbackUrl("acct1", "http://localhost:18789/mattermost/interactions/acct1");
|
||||
expect(getInteractionCallbackUrl("acct1")).toBe(
|
||||
"http://localhost:18789/mattermost/interactions/acct1",
|
||||
);
|
||||
});
|
||||
|
||||
it("returns undefined for unknown account", () => {
|
||||
expect(getInteractionCallbackUrl("nonexistent-account-id")).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe("resolveInteractionCallbackUrl", () => {
|
||||
afterEach(() => {
|
||||
setInteractionCallbackUrl("resolve-test", "");
|
||||
});
|
||||
|
||||
it("prefers cached URL from registry", () => {
|
||||
setInteractionCallbackUrl("cached", "http://cached:1234/path");
|
||||
expect(resolveInteractionCallbackUrl("cached")).toBe("http://cached:1234/path");
|
||||
});
|
||||
|
||||
it("falls back to computed URL from gateway port config", () => {
|
||||
const url = resolveInteractionCallbackUrl("default", { gateway: { port: 9999 } });
|
||||
expect(url).toBe("http://localhost:9999/mattermost/interactions/default");
|
||||
});
|
||||
|
||||
it("uses default port 18789 when no config provided", () => {
|
||||
const url = resolveInteractionCallbackUrl("myaccount");
|
||||
expect(url).toBe("http://localhost:18789/mattermost/interactions/myaccount");
|
||||
});
|
||||
|
||||
it("uses default port when gateway config has no port", () => {
|
||||
const url = resolveInteractionCallbackUrl("acct", { gateway: {} });
|
||||
expect(url).toBe("http://localhost:18789/mattermost/interactions/acct");
|
||||
});
|
||||
});
|
||||
|
||||
// ── buildButtonAttachments ───────────────────────────────────────────
|
||||
|
||||
describe("buildButtonAttachments", () => {
|
||||
beforeEach(() => {
|
||||
setInteractionSecret("test-bot-token");
|
||||
});
|
||||
|
||||
it("returns an array with one attachment containing all buttons", () => {
|
||||
const result = buildButtonAttachments({
|
||||
callbackUrl: "http://localhost:18789/mattermost/interactions/default",
|
||||
buttons: [
|
||||
{ id: "btn1", name: "Click Me" },
|
||||
{ id: "btn2", name: "Skip", style: "danger" },
|
||||
],
|
||||
});
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].actions).toHaveLength(2);
|
||||
});
|
||||
|
||||
it("sets type to 'button' on every action", () => {
|
||||
const result = buildButtonAttachments({
|
||||
callbackUrl: "http://localhost:18789/cb",
|
||||
buttons: [{ id: "a", name: "A" }],
|
||||
});
|
||||
|
||||
expect(result[0].actions![0].type).toBe("button");
|
||||
});
|
||||
|
||||
it("includes HMAC _token in integration context", () => {
|
||||
const result = buildButtonAttachments({
|
||||
callbackUrl: "http://localhost:18789/cb",
|
||||
buttons: [{ id: "test", name: "Test" }],
|
||||
});
|
||||
|
||||
const action = result[0].actions![0];
|
||||
expect(action.integration.context._token).toMatch(/^[0-9a-f]{64}$/);
|
||||
});
|
||||
|
||||
it("includes sanitized action_id in integration context", () => {
|
||||
const result = buildButtonAttachments({
|
||||
callbackUrl: "http://localhost:18789/cb",
|
||||
buttons: [{ id: "my_action", name: "Do It" }],
|
||||
});
|
||||
|
||||
const action = result[0].actions![0];
|
||||
// sanitizeActionId strips hyphens and underscores (Mattermost routing bug #25747)
|
||||
expect(action.integration.context.action_id).toBe("myaction");
|
||||
expect(action.id).toBe("myaction");
|
||||
});
|
||||
|
||||
it("merges custom context into integration context", () => {
|
||||
const result = buildButtonAttachments({
|
||||
callbackUrl: "http://localhost:18789/cb",
|
||||
buttons: [{ id: "btn", name: "Go", context: { tweet_id: "123", batch: true } }],
|
||||
});
|
||||
|
||||
const ctx = result[0].actions![0].integration.context;
|
||||
expect(ctx.tweet_id).toBe("123");
|
||||
expect(ctx.batch).toBe(true);
|
||||
expect(ctx.action_id).toBe("btn");
|
||||
expect(ctx._token).toBeDefined();
|
||||
});
|
||||
|
||||
it("passes callback URL to each button integration", () => {
|
||||
const url = "http://localhost:18789/mattermost/interactions/default";
|
||||
const result = buildButtonAttachments({
|
||||
callbackUrl: url,
|
||||
buttons: [
|
||||
{ id: "a", name: "A" },
|
||||
{ id: "b", name: "B" },
|
||||
],
|
||||
});
|
||||
|
||||
for (const action of result[0].actions!) {
|
||||
expect(action.integration.url).toBe(url);
|
||||
}
|
||||
});
|
||||
|
||||
it("preserves button style", () => {
|
||||
const result = buildButtonAttachments({
|
||||
callbackUrl: "http://localhost/cb",
|
||||
buttons: [
|
||||
{ id: "ok", name: "OK", style: "primary" },
|
||||
{ id: "no", name: "No", style: "danger" },
|
||||
],
|
||||
});
|
||||
|
||||
expect(result[0].actions![0].style).toBe("primary");
|
||||
expect(result[0].actions![1].style).toBe("danger");
|
||||
});
|
||||
|
||||
it("uses provided text for the attachment", () => {
|
||||
const result = buildButtonAttachments({
|
||||
callbackUrl: "http://localhost/cb",
|
||||
buttons: [{ id: "x", name: "X" }],
|
||||
text: "Choose an action:",
|
||||
});
|
||||
|
||||
expect(result[0].text).toBe("Choose an action:");
|
||||
});
|
||||
|
||||
it("defaults to empty string text when not provided", () => {
|
||||
const result = buildButtonAttachments({
|
||||
callbackUrl: "http://localhost/cb",
|
||||
buttons: [{ id: "x", name: "X" }],
|
||||
});
|
||||
|
||||
expect(result[0].text).toBe("");
|
||||
});
|
||||
|
||||
it("generates verifiable tokens", () => {
|
||||
const result = buildButtonAttachments({
|
||||
callbackUrl: "http://localhost/cb",
|
||||
buttons: [{ id: "verify_me", name: "V", context: { extra: "data" } }],
|
||||
});
|
||||
|
||||
const ctx = result[0].actions![0].integration.context;
|
||||
const token = ctx._token as string;
|
||||
const { _token, ...contextWithoutToken } = ctx;
|
||||
expect(verifyInteractionToken(contextWithoutToken, token)).toBe(true);
|
||||
});
|
||||
|
||||
it("generates tokens that verify even when Mattermost reorders context keys", () => {
|
||||
const result = buildButtonAttachments({
|
||||
callbackUrl: "http://localhost/cb",
|
||||
buttons: [{ id: "do_action", name: "Do", context: { tweet_id: "42", category: "ai" } }],
|
||||
});
|
||||
|
||||
const ctx = result[0].actions![0].integration.context;
|
||||
const token = ctx._token as string;
|
||||
|
||||
// Simulate Mattermost returning context with keys in a different order
|
||||
const reordered: Record<string, unknown> = {};
|
||||
const keys = Object.keys(ctx).filter((k) => k !== "_token");
|
||||
// Reverse the key order to simulate reordering
|
||||
for (const key of keys.reverse()) {
|
||||
reordered[key] = ctx[key];
|
||||
}
|
||||
expect(verifyInteractionToken(reordered, token)).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
// ── isLocalhostRequest ───────────────────────────────────────────────
|
||||
|
||||
describe("isLocalhostRequest", () => {
|
||||
function fakeReq(remoteAddress?: string): IncomingMessage {
|
||||
return {
|
||||
socket: { remoteAddress },
|
||||
} as unknown as IncomingMessage;
|
||||
}
|
||||
|
||||
it("accepts 127.0.0.1", () => {
|
||||
expect(isLocalhostRequest(fakeReq("127.0.0.1"))).toBe(true);
|
||||
});
|
||||
|
||||
it("accepts ::1", () => {
|
||||
expect(isLocalhostRequest(fakeReq("::1"))).toBe(true);
|
||||
});
|
||||
|
||||
it("accepts ::ffff:127.0.0.1", () => {
|
||||
expect(isLocalhostRequest(fakeReq("::ffff:127.0.0.1"))).toBe(true);
|
||||
});
|
||||
|
||||
it("rejects external addresses", () => {
|
||||
expect(isLocalhostRequest(fakeReq("10.0.0.1"))).toBe(false);
|
||||
expect(isLocalhostRequest(fakeReq("192.168.1.1"))).toBe(false);
|
||||
});
|
||||
|
||||
it("rejects when socket has no remote address", () => {
|
||||
expect(isLocalhostRequest(fakeReq(undefined))).toBe(false);
|
||||
});
|
||||
|
||||
it("rejects when socket is missing", () => {
|
||||
expect(isLocalhostRequest({} as IncomingMessage)).toBe(false);
|
||||
});
|
||||
});
|
||||
429
extensions/mattermost/src/mattermost/interactions.ts
Normal file
429
extensions/mattermost/src/mattermost/interactions.ts
Normal file
@@ -0,0 +1,429 @@
|
||||
import { createHmac, timingSafeEqual } from "node:crypto";
|
||||
import type { IncomingMessage, ServerResponse } from "node:http";
|
||||
import { getMattermostRuntime } from "../runtime.js";
|
||||
import { updateMattermostPost, type MattermostClient } from "./client.js";
|
||||
|
||||
const INTERACTION_MAX_BODY_BYTES = 64 * 1024;
|
||||
const INTERACTION_BODY_TIMEOUT_MS = 10_000;
|
||||
|
||||
/**
|
||||
* Mattermost interactive message callback payload.
|
||||
* Sent by Mattermost when a user clicks an action button.
|
||||
* See: https://developers.mattermost.com/integrate/plugins/interactive-messages/
|
||||
*/
|
||||
export type MattermostInteractionPayload = {
|
||||
user_id: string;
|
||||
user_name?: string;
|
||||
channel_id: string;
|
||||
team_id?: string;
|
||||
post_id: string;
|
||||
trigger_id?: string;
|
||||
type?: string;
|
||||
data_source?: string;
|
||||
context?: Record<string, unknown>;
|
||||
};
|
||||
|
||||
export type MattermostInteractionResponse = {
|
||||
update?: {
|
||||
message: string;
|
||||
props?: Record<string, unknown>;
|
||||
};
|
||||
ephemeral_text?: string;
|
||||
};
|
||||
|
||||
// ── Callback URL registry ──────────────────────────────────────────────
|
||||
|
||||
const callbackUrls = new Map<string, string>();
|
||||
|
||||
export function setInteractionCallbackUrl(accountId: string, url: string): void {
|
||||
callbackUrls.set(accountId, url);
|
||||
}
|
||||
|
||||
export function getInteractionCallbackUrl(accountId: string): string | undefined {
|
||||
return callbackUrls.get(accountId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve the interaction callback URL for an account.
|
||||
* Prefers the in-memory registered URL (set by the gateway monitor).
|
||||
* Falls back to computing it from the gateway port in config (for CLI callers).
|
||||
*/
|
||||
export function resolveInteractionCallbackUrl(
|
||||
accountId: string,
|
||||
cfg?: { gateway?: { port?: number } },
|
||||
): string {
|
||||
const cached = callbackUrls.get(accountId);
|
||||
if (cached) {
|
||||
return cached;
|
||||
}
|
||||
const port = typeof cfg?.gateway?.port === "number" ? cfg.gateway.port : 18789;
|
||||
return `http://localhost:${port}/mattermost/interactions/${accountId}`;
|
||||
}
|
||||
|
||||
// ── HMAC token management ──────────────────────────────────────────────
|
||||
// Secret is derived from the bot token so it's stable across CLI and gateway processes.
|
||||
|
||||
const interactionSecrets = new Map<string, string>();
|
||||
let defaultInteractionSecret: string | undefined;
|
||||
|
||||
function deriveInteractionSecret(botToken: string): string {
|
||||
return createHmac("sha256", "openclaw-mattermost-interactions").update(botToken).digest("hex");
|
||||
}
|
||||
|
||||
export function setInteractionSecret(accountIdOrBotToken: string, botToken?: string): void {
|
||||
if (typeof botToken === "string") {
|
||||
interactionSecrets.set(accountIdOrBotToken, deriveInteractionSecret(botToken));
|
||||
return;
|
||||
}
|
||||
// Backward-compatible fallback for call sites/tests that only pass botToken.
|
||||
defaultInteractionSecret = deriveInteractionSecret(accountIdOrBotToken);
|
||||
}
|
||||
|
||||
export function getInteractionSecret(accountId?: string): string {
|
||||
const scoped = accountId ? interactionSecrets.get(accountId) : undefined;
|
||||
if (scoped) {
|
||||
return scoped;
|
||||
}
|
||||
if (defaultInteractionSecret) {
|
||||
return defaultInteractionSecret;
|
||||
}
|
||||
// Fallback for single-account runtimes that only registered scoped secrets.
|
||||
if (interactionSecrets.size === 1) {
|
||||
const first = interactionSecrets.values().next().value;
|
||||
if (typeof first === "string") {
|
||||
return first;
|
||||
}
|
||||
}
|
||||
throw new Error(
|
||||
"Interaction secret not initialized — call setInteractionSecret(accountId, botToken) first",
|
||||
);
|
||||
}
|
||||
|
||||
export function generateInteractionToken(
|
||||
context: Record<string, unknown>,
|
||||
accountId?: string,
|
||||
): string {
|
||||
const secret = getInteractionSecret(accountId);
|
||||
// Sort keys for stable serialization — Mattermost may reorder context keys
|
||||
const payload = JSON.stringify(context, Object.keys(context).sort());
|
||||
return createHmac("sha256", secret).update(payload).digest("hex");
|
||||
}
|
||||
|
||||
export function verifyInteractionToken(
|
||||
context: Record<string, unknown>,
|
||||
token: string,
|
||||
accountId?: string,
|
||||
): boolean {
|
||||
const expected = generateInteractionToken(context, accountId);
|
||||
if (expected.length !== token.length) {
|
||||
return false;
|
||||
}
|
||||
return timingSafeEqual(Buffer.from(expected), Buffer.from(token));
|
||||
}
|
||||
|
||||
// ── Button builder helpers ─────────────────────────────────────────────
|
||||
|
||||
export type MattermostButton = {
|
||||
id: string;
|
||||
type: "button" | "select";
|
||||
name: string;
|
||||
style?: "default" | "primary" | "danger";
|
||||
integration: {
|
||||
url: string;
|
||||
context: Record<string, unknown>;
|
||||
};
|
||||
};
|
||||
|
||||
export type MattermostAttachment = {
|
||||
text?: string;
|
||||
actions?: MattermostButton[];
|
||||
[key: string]: unknown;
|
||||
};
|
||||
|
||||
/**
|
||||
* Build Mattermost `props.attachments` with interactive buttons.
|
||||
*
|
||||
* Each button includes an HMAC token in its integration context so the
|
||||
* callback handler can verify the request originated from a legitimate
|
||||
* button click (Mattermost's recommended security pattern).
|
||||
*/
|
||||
/**
|
||||
* Sanitize a button ID so Mattermost's action router can match it.
|
||||
* Mattermost uses the action ID in the URL path `/api/v4/posts/{id}/actions/{actionId}`
|
||||
* and IDs containing hyphens or underscores break the server-side routing.
|
||||
* See: https://github.com/mattermost/mattermost/issues/25747
|
||||
*/
|
||||
function sanitizeActionId(id: string): string {
|
||||
return id.replace(/[-_]/g, "");
|
||||
}
|
||||
|
||||
export function buildButtonAttachments(params: {
|
||||
callbackUrl: string;
|
||||
accountId?: string;
|
||||
buttons: Array<{
|
||||
id: string;
|
||||
name: string;
|
||||
style?: "default" | "primary" | "danger";
|
||||
context?: Record<string, unknown>;
|
||||
}>;
|
||||
text?: string;
|
||||
}): MattermostAttachment[] {
|
||||
const actions: MattermostButton[] = params.buttons.map((btn) => {
|
||||
const safeId = sanitizeActionId(btn.id);
|
||||
const context: Record<string, unknown> = {
|
||||
action_id: safeId,
|
||||
...btn.context,
|
||||
};
|
||||
const token = generateInteractionToken(context, params.accountId);
|
||||
return {
|
||||
id: safeId,
|
||||
type: "button" as const,
|
||||
name: btn.name,
|
||||
style: btn.style,
|
||||
integration: {
|
||||
url: params.callbackUrl,
|
||||
context: {
|
||||
...context,
|
||||
_token: token,
|
||||
},
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
return [
|
||||
{
|
||||
text: params.text ?? "",
|
||||
actions,
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
// ── Localhost validation ───────────────────────────────────────────────
|
||||
|
||||
const LOCALHOST_ADDRESSES = new Set(["127.0.0.1", "::1", "::ffff:127.0.0.1"]);
|
||||
|
||||
export function isLocalhostRequest(req: IncomingMessage): boolean {
|
||||
const addr = req.socket?.remoteAddress;
|
||||
if (!addr) {
|
||||
return false;
|
||||
}
|
||||
return LOCALHOST_ADDRESSES.has(addr);
|
||||
}
|
||||
|
||||
// ── Request body reader ────────────────────────────────────────────────
|
||||
|
||||
function readInteractionBody(req: IncomingMessage): Promise<string> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const chunks: Buffer[] = [];
|
||||
let totalBytes = 0;
|
||||
|
||||
const timer = setTimeout(() => {
|
||||
req.destroy();
|
||||
reject(new Error("Request body read timeout"));
|
||||
}, INTERACTION_BODY_TIMEOUT_MS);
|
||||
|
||||
req.on("data", (chunk: Buffer) => {
|
||||
totalBytes += chunk.length;
|
||||
if (totalBytes > INTERACTION_MAX_BODY_BYTES) {
|
||||
req.destroy();
|
||||
clearTimeout(timer);
|
||||
reject(new Error("Request body too large"));
|
||||
return;
|
||||
}
|
||||
chunks.push(chunk);
|
||||
});
|
||||
|
||||
req.on("end", () => {
|
||||
clearTimeout(timer);
|
||||
resolve(Buffer.concat(chunks).toString("utf8"));
|
||||
});
|
||||
|
||||
req.on("error", (err) => {
|
||||
clearTimeout(timer);
|
||||
reject(err);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// ── HTTP handler ───────────────────────────────────────────────────────
|
||||
|
||||
export function createMattermostInteractionHandler(params: {
|
||||
client: MattermostClient;
|
||||
botUserId: string;
|
||||
accountId: string;
|
||||
callbackUrl: string;
|
||||
resolveSessionKey?: (channelId: string, userId: string) => Promise<string>;
|
||||
dispatchButtonClick?: (opts: {
|
||||
channelId: string;
|
||||
userId: string;
|
||||
userName: string;
|
||||
actionId: string;
|
||||
actionName: string;
|
||||
postId: string;
|
||||
}) => Promise<void>;
|
||||
log?: (message: string) => void;
|
||||
}): (req: IncomingMessage, res: ServerResponse) => Promise<void> {
|
||||
const { client, accountId, log } = params;
|
||||
const core = getMattermostRuntime();
|
||||
|
||||
return async (req: IncomingMessage, res: ServerResponse) => {
|
||||
// Only accept POST
|
||||
if (req.method !== "POST") {
|
||||
res.statusCode = 405;
|
||||
res.setHeader("Allow", "POST");
|
||||
res.setHeader("Content-Type", "application/json");
|
||||
res.end(JSON.stringify({ error: "Method Not Allowed" }));
|
||||
return;
|
||||
}
|
||||
|
||||
// Verify request is from localhost
|
||||
if (!isLocalhostRequest(req)) {
|
||||
log?.(
|
||||
`mattermost interaction: rejected non-localhost request from ${req.socket?.remoteAddress}`,
|
||||
);
|
||||
res.statusCode = 403;
|
||||
res.setHeader("Content-Type", "application/json");
|
||||
res.end(JSON.stringify({ error: "Forbidden" }));
|
||||
return;
|
||||
}
|
||||
|
||||
let payload: MattermostInteractionPayload;
|
||||
try {
|
||||
const raw = await readInteractionBody(req);
|
||||
payload = JSON.parse(raw) as MattermostInteractionPayload;
|
||||
} catch (err) {
|
||||
log?.(`mattermost interaction: failed to parse body: ${String(err)}`);
|
||||
res.statusCode = 400;
|
||||
res.setHeader("Content-Type", "application/json");
|
||||
res.end(JSON.stringify({ error: "Invalid request body" }));
|
||||
return;
|
||||
}
|
||||
|
||||
const context = payload.context;
|
||||
if (!context) {
|
||||
res.statusCode = 400;
|
||||
res.setHeader("Content-Type", "application/json");
|
||||
res.end(JSON.stringify({ error: "Missing context" }));
|
||||
return;
|
||||
}
|
||||
|
||||
// Verify HMAC token
|
||||
const token = context._token;
|
||||
if (typeof token !== "string") {
|
||||
log?.("mattermost interaction: missing _token in context");
|
||||
res.statusCode = 403;
|
||||
res.setHeader("Content-Type", "application/json");
|
||||
res.end(JSON.stringify({ error: "Missing token" }));
|
||||
return;
|
||||
}
|
||||
|
||||
// Strip _token before verification (it wasn't in the original context)
|
||||
const { _token, ...contextWithoutToken } = context;
|
||||
if (!verifyInteractionToken(contextWithoutToken, token, accountId)) {
|
||||
log?.("mattermost interaction: invalid _token");
|
||||
res.statusCode = 403;
|
||||
res.setHeader("Content-Type", "application/json");
|
||||
res.end(JSON.stringify({ error: "Invalid token" }));
|
||||
return;
|
||||
}
|
||||
|
||||
const actionId = context.action_id;
|
||||
if (typeof actionId !== "string") {
|
||||
res.statusCode = 400;
|
||||
res.setHeader("Content-Type", "application/json");
|
||||
res.end(JSON.stringify({ error: "Missing action_id in context" }));
|
||||
return;
|
||||
}
|
||||
|
||||
log?.(
|
||||
`mattermost interaction: action=${actionId} user=${payload.user_name ?? payload.user_id} ` +
|
||||
`post=${payload.post_id} channel=${payload.channel_id}`,
|
||||
);
|
||||
|
||||
// Dispatch as system event so the agent can handle it.
|
||||
// Wrapped in try/catch — the post update below must still run even if
|
||||
// system event dispatch fails (e.g. missing sessionKey or channel lookup).
|
||||
try {
|
||||
const eventLabel =
|
||||
`Mattermost button click: action="${actionId}" ` +
|
||||
`by ${payload.user_name ?? payload.user_id} ` +
|
||||
`in channel ${payload.channel_id}`;
|
||||
|
||||
const sessionKey = params.resolveSessionKey
|
||||
? await params.resolveSessionKey(payload.channel_id, payload.user_id)
|
||||
: `agent:main:mattermost:${accountId}:${payload.channel_id}`;
|
||||
|
||||
core.system.enqueueSystemEvent(eventLabel, {
|
||||
sessionKey,
|
||||
contextKey: `mattermost:interaction:${payload.post_id}:${actionId}`,
|
||||
});
|
||||
} catch (err) {
|
||||
log?.(`mattermost interaction: system event dispatch failed: ${String(err)}`);
|
||||
}
|
||||
|
||||
// Fetch the original post to preserve its message and find the clicked button name.
|
||||
const userName = payload.user_name ?? payload.user_id;
|
||||
let originalMessage = "";
|
||||
let clickedButtonName = actionId; // fallback to action ID if we can't find the name
|
||||
try {
|
||||
const originalPost = await client.request<{
|
||||
message?: string;
|
||||
props?: Record<string, unknown>;
|
||||
}>(`/posts/${payload.post_id}`);
|
||||
originalMessage = originalPost?.message ?? "";
|
||||
|
||||
// Find the clicked button's display name from the original attachments
|
||||
const postAttachments = Array.isArray(originalPost?.props?.attachments)
|
||||
? (originalPost.props.attachments as Array<{
|
||||
actions?: Array<{ id?: string; name?: string }>;
|
||||
}>)
|
||||
: [];
|
||||
for (const att of postAttachments) {
|
||||
const match = att.actions?.find((a) => a.id === actionId);
|
||||
if (match?.name) {
|
||||
clickedButtonName = match.name;
|
||||
break;
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
log?.(`mattermost interaction: failed to fetch post ${payload.post_id}: ${String(err)}`);
|
||||
}
|
||||
|
||||
// Update the post via API to replace buttons with a completion indicator.
|
||||
try {
|
||||
await updateMattermostPost(client, payload.post_id, {
|
||||
message: originalMessage,
|
||||
props: {
|
||||
attachments: [
|
||||
{
|
||||
text: `✓ **${clickedButtonName}** selected by @${userName}`,
|
||||
},
|
||||
],
|
||||
},
|
||||
});
|
||||
} catch (err) {
|
||||
log?.(`mattermost interaction: failed to update post ${payload.post_id}: ${String(err)}`);
|
||||
}
|
||||
|
||||
// Respond with empty JSON — the post update is handled above
|
||||
res.statusCode = 200;
|
||||
res.setHeader("Content-Type", "application/json");
|
||||
res.end("{}");
|
||||
|
||||
// Dispatch a synthetic inbound message so the agent responds to the button click.
|
||||
if (params.dispatchButtonClick) {
|
||||
try {
|
||||
await params.dispatchButtonClick({
|
||||
channelId: payload.channel_id,
|
||||
userId: payload.user_id,
|
||||
userName,
|
||||
actionId,
|
||||
actionName: clickedButtonName,
|
||||
postId: payload.post_id,
|
||||
});
|
||||
} catch (err) {
|
||||
log?.(`mattermost interaction: dispatchButtonClick failed: ${String(err)}`);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
109
extensions/mattermost/src/mattermost/monitor.test.ts
Normal file
109
extensions/mattermost/src/mattermost/monitor.test.ts
Normal file
@@ -0,0 +1,109 @@
|
||||
import type { OpenClawConfig } from "openclaw/plugin-sdk/mattermost";
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
import { resolveMattermostAccount } from "./accounts.js";
|
||||
import {
|
||||
evaluateMattermostMentionGate,
|
||||
type MattermostMentionGateInput,
|
||||
type MattermostRequireMentionResolverInput,
|
||||
} from "./monitor.js";
|
||||
|
||||
function resolveRequireMentionForTest(params: MattermostRequireMentionResolverInput): boolean {
|
||||
const root = params.cfg.channels?.mattermost;
|
||||
const accountGroups = root?.accounts?.[params.accountId]?.groups;
|
||||
const groups = accountGroups ?? root?.groups;
|
||||
const groupConfig = params.groupId ? groups?.[params.groupId] : undefined;
|
||||
const defaultGroupConfig = groups?.["*"];
|
||||
const configMention =
|
||||
typeof groupConfig?.requireMention === "boolean"
|
||||
? groupConfig.requireMention
|
||||
: typeof defaultGroupConfig?.requireMention === "boolean"
|
||||
? defaultGroupConfig.requireMention
|
||||
: undefined;
|
||||
if (typeof configMention === "boolean") {
|
||||
return configMention;
|
||||
}
|
||||
if (typeof params.requireMentionOverride === "boolean") {
|
||||
return params.requireMentionOverride;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
function evaluateMentionGateForMessage(params: { cfg: OpenClawConfig; threadRootId?: string }) {
|
||||
const account = resolveMattermostAccount({ cfg: params.cfg, accountId: "default" });
|
||||
const resolver = vi.fn(resolveRequireMentionForTest);
|
||||
const input: MattermostMentionGateInput = {
|
||||
kind: "channel",
|
||||
cfg: params.cfg,
|
||||
accountId: account.accountId,
|
||||
channelId: "chan-1",
|
||||
threadRootId: params.threadRootId,
|
||||
requireMentionOverride: account.requireMention,
|
||||
resolveRequireMention: resolver,
|
||||
wasMentioned: false,
|
||||
isControlCommand: false,
|
||||
commandAuthorized: false,
|
||||
oncharEnabled: false,
|
||||
oncharTriggered: false,
|
||||
canDetectMention: true,
|
||||
};
|
||||
const decision = evaluateMattermostMentionGate(input);
|
||||
return { account, resolver, decision };
|
||||
}
|
||||
|
||||
describe("mattermost mention gating", () => {
|
||||
it("accepts unmentioned root channel posts in onmessage mode", () => {
|
||||
const cfg: OpenClawConfig = {
|
||||
channels: {
|
||||
mattermost: {
|
||||
chatmode: "onmessage",
|
||||
groupPolicy: "open",
|
||||
},
|
||||
},
|
||||
};
|
||||
const { resolver, decision } = evaluateMentionGateForMessage({ cfg });
|
||||
expect(decision.dropReason).toBeNull();
|
||||
expect(decision.shouldRequireMention).toBe(false);
|
||||
expect(resolver).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
accountId: "default",
|
||||
groupId: "chan-1",
|
||||
requireMentionOverride: false,
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it("accepts unmentioned thread replies in onmessage mode", () => {
|
||||
const cfg: OpenClawConfig = {
|
||||
channels: {
|
||||
mattermost: {
|
||||
chatmode: "onmessage",
|
||||
groupPolicy: "open",
|
||||
},
|
||||
},
|
||||
};
|
||||
const { resolver, decision } = evaluateMentionGateForMessage({
|
||||
cfg,
|
||||
threadRootId: "thread-root-1",
|
||||
});
|
||||
expect(decision.dropReason).toBeNull();
|
||||
expect(decision.shouldRequireMention).toBe(false);
|
||||
const resolverCall = resolver.mock.calls.at(-1)?.[0];
|
||||
expect(resolverCall?.groupId).toBe("chan-1");
|
||||
expect(resolverCall?.groupId).not.toBe("thread-root-1");
|
||||
});
|
||||
|
||||
it("rejects unmentioned channel posts in oncall mode", () => {
|
||||
const cfg: OpenClawConfig = {
|
||||
channels: {
|
||||
mattermost: {
|
||||
chatmode: "oncall",
|
||||
groupPolicy: "open",
|
||||
},
|
||||
},
|
||||
};
|
||||
const { decision, account } = evaluateMentionGateForMessage({ cfg });
|
||||
expect(account.requireMention).toBe(true);
|
||||
expect(decision.shouldRequireMention).toBe(true);
|
||||
expect(decision.dropReason).toBe("missing-mention");
|
||||
});
|
||||
});
|
||||
@@ -18,6 +18,7 @@ import {
|
||||
DEFAULT_GROUP_HISTORY_LIMIT,
|
||||
recordPendingHistoryEntryIfEnabled,
|
||||
isDangerousNameMatchingEnabled,
|
||||
registerPluginHttpRoute,
|
||||
resolveControlCommandGate,
|
||||
readStoreAllowFromForDmPolicy,
|
||||
resolveDmGroupAccessWithLists,
|
||||
@@ -42,6 +43,11 @@ import {
|
||||
type MattermostPost,
|
||||
type MattermostUser,
|
||||
} from "./client.js";
|
||||
import {
|
||||
createMattermostInteractionHandler,
|
||||
setInteractionCallbackUrl,
|
||||
setInteractionSecret,
|
||||
} from "./interactions.js";
|
||||
import { isMattermostSenderAllowed, normalizeMattermostAllowList } from "./monitor-auth.js";
|
||||
import {
|
||||
createDedupeCache,
|
||||
@@ -156,6 +162,89 @@ function channelChatType(kind: ChatType): "direct" | "group" | "channel" {
|
||||
return "channel";
|
||||
}
|
||||
|
||||
export type MattermostRequireMentionResolverInput = {
|
||||
cfg: OpenClawConfig;
|
||||
channel: "mattermost";
|
||||
accountId: string;
|
||||
groupId: string;
|
||||
requireMentionOverride?: boolean;
|
||||
};
|
||||
|
||||
export type MattermostMentionGateInput = {
|
||||
kind: ChatType;
|
||||
cfg: OpenClawConfig;
|
||||
accountId: string;
|
||||
channelId: string;
|
||||
threadRootId?: string;
|
||||
requireMentionOverride?: boolean;
|
||||
resolveRequireMention: (params: MattermostRequireMentionResolverInput) => boolean;
|
||||
wasMentioned: boolean;
|
||||
isControlCommand: boolean;
|
||||
commandAuthorized: boolean;
|
||||
oncharEnabled: boolean;
|
||||
oncharTriggered: boolean;
|
||||
canDetectMention: boolean;
|
||||
};
|
||||
|
||||
type MattermostMentionGateDecision = {
|
||||
shouldRequireMention: boolean;
|
||||
shouldBypassMention: boolean;
|
||||
effectiveWasMentioned: boolean;
|
||||
dropReason: "onchar-not-triggered" | "missing-mention" | null;
|
||||
};
|
||||
|
||||
export function evaluateMattermostMentionGate(
|
||||
params: MattermostMentionGateInput,
|
||||
): MattermostMentionGateDecision {
|
||||
const shouldRequireMention =
|
||||
params.kind !== "direct" &&
|
||||
params.resolveRequireMention({
|
||||
cfg: params.cfg,
|
||||
channel: "mattermost",
|
||||
accountId: params.accountId,
|
||||
groupId: params.channelId,
|
||||
requireMentionOverride: params.requireMentionOverride,
|
||||
});
|
||||
const shouldBypassMention =
|
||||
params.isControlCommand &&
|
||||
shouldRequireMention &&
|
||||
!params.wasMentioned &&
|
||||
params.commandAuthorized;
|
||||
const effectiveWasMentioned =
|
||||
params.wasMentioned || shouldBypassMention || params.oncharTriggered;
|
||||
if (
|
||||
params.oncharEnabled &&
|
||||
!params.oncharTriggered &&
|
||||
!params.wasMentioned &&
|
||||
!params.isControlCommand
|
||||
) {
|
||||
return {
|
||||
shouldRequireMention,
|
||||
shouldBypassMention,
|
||||
effectiveWasMentioned,
|
||||
dropReason: "onchar-not-triggered",
|
||||
};
|
||||
}
|
||||
if (
|
||||
params.kind !== "direct" &&
|
||||
shouldRequireMention &&
|
||||
params.canDetectMention &&
|
||||
!effectiveWasMentioned
|
||||
) {
|
||||
return {
|
||||
shouldRequireMention,
|
||||
shouldBypassMention,
|
||||
effectiveWasMentioned,
|
||||
dropReason: "missing-mention",
|
||||
};
|
||||
}
|
||||
return {
|
||||
shouldRequireMention,
|
||||
shouldBypassMention,
|
||||
effectiveWasMentioned,
|
||||
dropReason: null,
|
||||
};
|
||||
}
|
||||
type MattermostMediaInfo = {
|
||||
path: string;
|
||||
contentType?: string;
|
||||
@@ -235,12 +324,12 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {}
|
||||
// a different port.
|
||||
const envPortRaw = process.env.OPENCLAW_GATEWAY_PORT?.trim();
|
||||
const envPort = envPortRaw ? Number.parseInt(envPortRaw, 10) : NaN;
|
||||
const gatewayPort =
|
||||
const slashGatewayPort =
|
||||
Number.isFinite(envPort) && envPort > 0 ? envPort : (cfg.gateway?.port ?? 18789);
|
||||
|
||||
const callbackUrl = resolveCallbackUrl({
|
||||
const slashCallbackUrl = resolveCallbackUrl({
|
||||
config: slashConfig,
|
||||
gatewayPort,
|
||||
gatewayPort: slashGatewayPort,
|
||||
gatewayHost: cfg.gateway?.customBindHost ?? undefined,
|
||||
});
|
||||
|
||||
@@ -249,7 +338,7 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {}
|
||||
|
||||
try {
|
||||
const mmHost = new URL(baseUrl).hostname;
|
||||
const callbackHost = new URL(callbackUrl).hostname;
|
||||
const callbackHost = new URL(slashCallbackUrl).hostname;
|
||||
|
||||
// NOTE: We cannot infer network reachability from hostnames alone.
|
||||
// Mattermost might be accessed via a public domain while still running on the same
|
||||
@@ -257,7 +346,7 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {}
|
||||
// So treat loopback callback URLs as an advisory warning only.
|
||||
if (isLoopbackHost(callbackHost) && !isLoopbackHost(mmHost)) {
|
||||
runtime.error?.(
|
||||
`mattermost: slash commands callbackUrl resolved to ${callbackUrl} (loopback) while baseUrl is ${baseUrl}. This MAY be unreachable depending on your deployment. If native slash commands don't work, set channels.mattermost.commands.callbackUrl to a URL reachable from the Mattermost server (e.g. your public reverse proxy URL).`,
|
||||
`mattermost: slash commands callbackUrl resolved to ${slashCallbackUrl} (loopback) while baseUrl is ${baseUrl}. This MAY be unreachable depending on your deployment. If native slash commands don't work, set channels.mattermost.commands.callbackUrl to a URL reachable from the Mattermost server (e.g. your public reverse proxy URL).`,
|
||||
);
|
||||
}
|
||||
} catch {
|
||||
@@ -307,7 +396,7 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {}
|
||||
client,
|
||||
teamId: team.id,
|
||||
creatorUserId: botUserId,
|
||||
callbackUrl,
|
||||
callbackUrl: slashCallbackUrl,
|
||||
commands: dedupedCommands,
|
||||
log: (msg) => runtime.log?.(msg),
|
||||
});
|
||||
@@ -349,7 +438,7 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {}
|
||||
});
|
||||
|
||||
runtime.log?.(
|
||||
`mattermost: slash commands registered (${allRegistered.length} commands across ${teams.length} teams, callback=${callbackUrl})`,
|
||||
`mattermost: slash commands registered (${allRegistered.length} commands across ${teams.length} teams, callback=${slashCallbackUrl})`,
|
||||
);
|
||||
}
|
||||
} catch (err) {
|
||||
@@ -357,6 +446,182 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {}
|
||||
}
|
||||
}
|
||||
|
||||
// ─── Interactive buttons registration ──────────────────────────────────────
|
||||
// Derive a stable HMAC secret from the bot token so CLI and gateway share it.
|
||||
setInteractionSecret(account.accountId, botToken);
|
||||
|
||||
// Register HTTP callback endpoint for interactive button clicks.
|
||||
// Mattermost POSTs to this URL when a user clicks a button action.
|
||||
const gatewayPort = typeof cfg.gateway?.port === "number" ? cfg.gateway.port : 18789;
|
||||
const interactionPath = `/mattermost/interactions/${account.accountId}`;
|
||||
const callbackUrl = `http://localhost:${gatewayPort}${interactionPath}`;
|
||||
setInteractionCallbackUrl(account.accountId, callbackUrl);
|
||||
const unregisterInteractions = registerPluginHttpRoute({
|
||||
path: interactionPath,
|
||||
fallbackPath: "/mattermost/interactions/default",
|
||||
auth: "plugin",
|
||||
handler: createMattermostInteractionHandler({
|
||||
client,
|
||||
botUserId,
|
||||
accountId: account.accountId,
|
||||
callbackUrl,
|
||||
resolveSessionKey: async (channelId: string, userId: string) => {
|
||||
const channelInfo = await resolveChannelInfo(channelId);
|
||||
const kind = mapMattermostChannelTypeToChatType(channelInfo?.type);
|
||||
const teamId = channelInfo?.team_id ?? undefined;
|
||||
const route = core.channel.routing.resolveAgentRoute({
|
||||
cfg,
|
||||
channel: "mattermost",
|
||||
accountId: account.accountId,
|
||||
teamId,
|
||||
peer: {
|
||||
kind,
|
||||
id: kind === "direct" ? userId : channelId,
|
||||
},
|
||||
});
|
||||
return route.sessionKey;
|
||||
},
|
||||
dispatchButtonClick: async (opts) => {
|
||||
const channelInfo = await resolveChannelInfo(opts.channelId);
|
||||
const kind = mapMattermostChannelTypeToChatType(channelInfo?.type);
|
||||
const chatType = channelChatType(kind);
|
||||
const teamId = channelInfo?.team_id ?? undefined;
|
||||
const channelName = channelInfo?.name ?? undefined;
|
||||
const channelDisplay = channelInfo?.display_name ?? channelName ?? opts.channelId;
|
||||
const route = core.channel.routing.resolveAgentRoute({
|
||||
cfg,
|
||||
channel: "mattermost",
|
||||
accountId: account.accountId,
|
||||
teamId,
|
||||
peer: {
|
||||
kind,
|
||||
id: kind === "direct" ? opts.userId : opts.channelId,
|
||||
},
|
||||
});
|
||||
const to = kind === "direct" ? `user:${opts.userId}` : `channel:${opts.channelId}`;
|
||||
const bodyText = `[Button click: user @${opts.userName} selected "${opts.actionName}"]`;
|
||||
const ctxPayload = core.channel.reply.finalizeInboundContext({
|
||||
Body: bodyText,
|
||||
BodyForAgent: bodyText,
|
||||
RawBody: bodyText,
|
||||
CommandBody: bodyText,
|
||||
From:
|
||||
kind === "direct"
|
||||
? `mattermost:${opts.userId}`
|
||||
: kind === "group"
|
||||
? `mattermost:group:${opts.channelId}`
|
||||
: `mattermost:channel:${opts.channelId}`,
|
||||
To: to,
|
||||
SessionKey: route.sessionKey,
|
||||
AccountId: route.accountId,
|
||||
ChatType: chatType,
|
||||
ConversationLabel: `mattermost:${opts.userName}`,
|
||||
GroupSubject: kind !== "direct" ? channelDisplay : undefined,
|
||||
GroupChannel: channelName ? `#${channelName}` : undefined,
|
||||
GroupSpace: teamId,
|
||||
SenderName: opts.userName,
|
||||
SenderId: opts.userId,
|
||||
Provider: "mattermost" as const,
|
||||
Surface: "mattermost" as const,
|
||||
MessageSid: `interaction:${opts.postId}:${opts.actionId}`,
|
||||
WasMentioned: true,
|
||||
CommandAuthorized: true,
|
||||
OriginatingChannel: "mattermost" as const,
|
||||
OriginatingTo: to,
|
||||
});
|
||||
|
||||
const textLimit = core.channel.text.resolveTextChunkLimit(
|
||||
cfg,
|
||||
"mattermost",
|
||||
account.accountId,
|
||||
{ fallbackLimit: account.textChunkLimit ?? 4000 },
|
||||
);
|
||||
const tableMode = core.channel.text.resolveMarkdownTableMode({
|
||||
cfg,
|
||||
channel: "mattermost",
|
||||
accountId: account.accountId,
|
||||
});
|
||||
const { onModelSelected, ...prefixOptions } = createReplyPrefixOptions({
|
||||
cfg,
|
||||
agentId: route.agentId,
|
||||
channel: "mattermost",
|
||||
accountId: account.accountId,
|
||||
});
|
||||
const typingCallbacks = createTypingCallbacks({
|
||||
start: () => sendTypingIndicator(opts.channelId),
|
||||
onStartError: (err) => {
|
||||
logTypingFailure({
|
||||
log: (message) => logger.debug?.(message),
|
||||
channel: "mattermost",
|
||||
target: opts.channelId,
|
||||
error: err,
|
||||
});
|
||||
},
|
||||
});
|
||||
const { dispatcher, replyOptions, markDispatchIdle } =
|
||||
core.channel.reply.createReplyDispatcherWithTyping({
|
||||
...prefixOptions,
|
||||
humanDelay: core.channel.reply.resolveHumanDelayConfig(cfg, route.agentId),
|
||||
deliver: async (payload: ReplyPayload) => {
|
||||
const mediaUrls = payload.mediaUrls ?? (payload.mediaUrl ? [payload.mediaUrl] : []);
|
||||
const text = core.channel.text.convertMarkdownTables(payload.text ?? "", tableMode);
|
||||
if (mediaUrls.length === 0) {
|
||||
const chunkMode = core.channel.text.resolveChunkMode(
|
||||
cfg,
|
||||
"mattermost",
|
||||
account.accountId,
|
||||
);
|
||||
const chunks = core.channel.text.chunkMarkdownTextWithMode(
|
||||
text,
|
||||
textLimit,
|
||||
chunkMode,
|
||||
);
|
||||
for (const chunk of chunks.length > 0 ? chunks : [text]) {
|
||||
if (!chunk) continue;
|
||||
await sendMessageMattermost(to, chunk, {
|
||||
accountId: account.accountId,
|
||||
});
|
||||
}
|
||||
} else {
|
||||
let first = true;
|
||||
for (const mediaUrl of mediaUrls) {
|
||||
const caption = first ? text : "";
|
||||
first = false;
|
||||
await sendMessageMattermost(to, caption, {
|
||||
accountId: account.accountId,
|
||||
mediaUrl,
|
||||
});
|
||||
}
|
||||
}
|
||||
runtime.log?.(`delivered button-click reply to ${to}`);
|
||||
},
|
||||
onError: (err, info) => {
|
||||
runtime.error?.(`mattermost button-click ${info.kind} reply failed: ${String(err)}`);
|
||||
},
|
||||
onReplyStart: typingCallbacks.onReplyStart,
|
||||
});
|
||||
|
||||
await core.channel.reply.dispatchReplyFromConfig({
|
||||
ctx: ctxPayload,
|
||||
cfg,
|
||||
dispatcher,
|
||||
replyOptions: {
|
||||
...replyOptions,
|
||||
disableBlockStreaming:
|
||||
typeof account.blockStreaming === "boolean" ? !account.blockStreaming : undefined,
|
||||
onModelSelected,
|
||||
},
|
||||
});
|
||||
markDispatchIdle();
|
||||
},
|
||||
log: (msg) => runtime.log?.(msg),
|
||||
}),
|
||||
pluginId: "mattermost",
|
||||
source: "mattermost-interactions",
|
||||
accountId: account.accountId,
|
||||
log: (msg: string) => runtime.log?.(msg),
|
||||
});
|
||||
|
||||
const channelCache = new Map<string, { value: MattermostChannel | null; expiresAt: number }>();
|
||||
const userCache = new Map<string, { value: MattermostUser | null; expiresAt: number }>();
|
||||
const logger = core.logging.getChildLogger({ module: "mattermost" });
|
||||
@@ -410,6 +675,10 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {}
|
||||
},
|
||||
filePathHint: fileId,
|
||||
maxBytes: mediaMaxBytes,
|
||||
// Allow fetching from the Mattermost server host (may be localhost or
|
||||
// a private IP). Without this, SSRF guards block media downloads.
|
||||
// Credit: #22594 (@webclerk)
|
||||
ssrfPolicy: { allowedHostnames: [new URL(client.baseUrl).hostname] },
|
||||
});
|
||||
const saved = await core.channel.media.saveMediaBuffer(
|
||||
fetched.buffer,
|
||||
@@ -485,28 +754,36 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {}
|
||||
) => {
|
||||
const channelId = post.channel_id ?? payload.data?.channel_id ?? payload.broadcast?.channel_id;
|
||||
if (!channelId) {
|
||||
logVerboseMessage("mattermost: drop post (missing channel id)");
|
||||
return;
|
||||
}
|
||||
|
||||
const allMessageIds = messageIds?.length ? messageIds : post.id ? [post.id] : [];
|
||||
if (allMessageIds.length === 0) {
|
||||
logVerboseMessage("mattermost: drop post (missing message id)");
|
||||
return;
|
||||
}
|
||||
const dedupeEntries = allMessageIds.map((id) =>
|
||||
recentInboundMessages.check(`${account.accountId}:${id}`),
|
||||
);
|
||||
if (dedupeEntries.length > 0 && dedupeEntries.every(Boolean)) {
|
||||
logVerboseMessage(
|
||||
`mattermost: drop post (dedupe account=${account.accountId} ids=${allMessageIds.length})`,
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
const senderId = post.user_id ?? payload.broadcast?.user_id;
|
||||
if (!senderId) {
|
||||
logVerboseMessage("mattermost: drop post (missing sender id)");
|
||||
return;
|
||||
}
|
||||
if (senderId === botUserId) {
|
||||
logVerboseMessage(`mattermost: drop post (self sender=${senderId})`);
|
||||
return;
|
||||
}
|
||||
if (isSystemPost(post)) {
|
||||
logVerboseMessage(`mattermost: drop post (system post type=${post.type ?? "unknown"})`);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -707,30 +984,38 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {}
|
||||
? stripOncharPrefix(rawText, oncharPrefixes)
|
||||
: { triggered: false, stripped: rawText };
|
||||
const oncharTriggered = oncharResult.triggered;
|
||||
|
||||
const shouldRequireMention =
|
||||
kind !== "direct" &&
|
||||
core.channel.groups.resolveRequireMention({
|
||||
cfg,
|
||||
channel: "mattermost",
|
||||
accountId: account.accountId,
|
||||
groupId: channelId,
|
||||
});
|
||||
const shouldBypassMention =
|
||||
isControlCommand && shouldRequireMention && !wasMentioned && commandAuthorized;
|
||||
const effectiveWasMentioned = wasMentioned || shouldBypassMention || oncharTriggered;
|
||||
const canDetectMention = Boolean(botUsername) || mentionRegexes.length > 0;
|
||||
const mentionDecision = evaluateMattermostMentionGate({
|
||||
kind,
|
||||
cfg,
|
||||
accountId: account.accountId,
|
||||
channelId,
|
||||
threadRootId,
|
||||
requireMentionOverride: account.requireMention,
|
||||
resolveRequireMention: core.channel.groups.resolveRequireMention,
|
||||
wasMentioned,
|
||||
isControlCommand,
|
||||
commandAuthorized,
|
||||
oncharEnabled,
|
||||
oncharTriggered,
|
||||
canDetectMention,
|
||||
});
|
||||
const { shouldRequireMention, shouldBypassMention } = mentionDecision;
|
||||
|
||||
if (oncharEnabled && !oncharTriggered && !wasMentioned && !isControlCommand) {
|
||||
if (mentionDecision.dropReason === "onchar-not-triggered") {
|
||||
logVerboseMessage(
|
||||
`mattermost: drop group message (onchar not triggered channel=${channelId} sender=${senderId})`,
|
||||
);
|
||||
recordPendingHistory();
|
||||
return;
|
||||
}
|
||||
|
||||
if (kind !== "direct" && shouldRequireMention && canDetectMention) {
|
||||
if (!effectiveWasMentioned) {
|
||||
recordPendingHistory();
|
||||
return;
|
||||
}
|
||||
if (mentionDecision.dropReason === "missing-mention") {
|
||||
logVerboseMessage(
|
||||
`mattermost: drop group message (missing mention channel=${channelId} sender=${senderId} requireMention=${shouldRequireMention} bypass=${shouldBypassMention} canDetectMention=${canDetectMention})`,
|
||||
);
|
||||
recordPendingHistory();
|
||||
return;
|
||||
}
|
||||
const mediaList = await resolveMattermostMedia(post.file_ids);
|
||||
const mediaPlaceholder = buildMattermostAttachmentPlaceholder(mediaList);
|
||||
@@ -738,6 +1023,9 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {}
|
||||
const baseText = [bodySource, mediaPlaceholder].filter(Boolean).join("\n").trim();
|
||||
const bodyText = normalizeMention(baseText, botUsername);
|
||||
if (!bodyText) {
|
||||
logVerboseMessage(
|
||||
`mattermost: drop group message (empty body after normalization channel=${channelId} sender=${senderId})`,
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -841,7 +1129,7 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {}
|
||||
ReplyToId: threadRootId,
|
||||
MessageThreadId: threadRootId,
|
||||
Timestamp: typeof post.create_at === "number" ? post.create_at : undefined,
|
||||
WasMentioned: kind !== "direct" ? effectiveWasMentioned : undefined,
|
||||
WasMentioned: kind !== "direct" ? mentionDecision.effectiveWasMentioned : undefined,
|
||||
CommandAuthorized: commandAuthorized,
|
||||
OriginatingChannel: "mattermost" as const,
|
||||
OriginatingTo: to,
|
||||
@@ -1194,17 +1482,21 @@ export async function monitorMattermostProvider(opts: MonitorMattermostOpts = {}
|
||||
}
|
||||
}
|
||||
|
||||
await runWithReconnect(connectOnce, {
|
||||
abortSignal: opts.abortSignal,
|
||||
jitterRatio: 0.2,
|
||||
onError: (err) => {
|
||||
runtime.error?.(`mattermost connection failed: ${String(err)}`);
|
||||
opts.statusSink?.({ lastError: String(err), connected: false });
|
||||
},
|
||||
onReconnect: (delayMs) => {
|
||||
runtime.log?.(`mattermost reconnecting in ${Math.round(delayMs / 1000)}s`);
|
||||
},
|
||||
});
|
||||
try {
|
||||
await runWithReconnect(connectOnce, {
|
||||
abortSignal: opts.abortSignal,
|
||||
jitterRatio: 0.2,
|
||||
onError: (err) => {
|
||||
runtime.error?.(`mattermost connection failed: ${String(err)}`);
|
||||
opts.statusSink?.({ lastError: String(err), connected: false });
|
||||
},
|
||||
onReconnect: (delayMs) => {
|
||||
runtime.log?.(`mattermost reconnecting in ${Math.round(delayMs / 1000)}s`);
|
||||
},
|
||||
});
|
||||
} finally {
|
||||
unregisterInteractions?.();
|
||||
}
|
||||
|
||||
if (slashShutdownCleanup) {
|
||||
await slashShutdownCleanup;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { sendMessageMattermost } from "./send.js";
|
||||
import { parseMattermostTarget, sendMessageMattermost } from "./send.js";
|
||||
|
||||
const mockState = vi.hoisted(() => ({
|
||||
loadConfig: vi.fn(() => ({})),
|
||||
@@ -12,7 +12,9 @@ const mockState = vi.hoisted(() => ({
|
||||
createMattermostClient: vi.fn(),
|
||||
createMattermostDirectChannel: vi.fn(),
|
||||
createMattermostPost: vi.fn(),
|
||||
fetchMattermostChannelByName: vi.fn(),
|
||||
fetchMattermostMe: vi.fn(),
|
||||
fetchMattermostUserTeams: vi.fn(),
|
||||
fetchMattermostUserByUsername: vi.fn(),
|
||||
normalizeMattermostBaseUrl: vi.fn((input: string | undefined) => input?.trim() ?? ""),
|
||||
uploadMattermostFile: vi.fn(),
|
||||
@@ -30,7 +32,9 @@ vi.mock("./client.js", () => ({
|
||||
createMattermostClient: mockState.createMattermostClient,
|
||||
createMattermostDirectChannel: mockState.createMattermostDirectChannel,
|
||||
createMattermostPost: mockState.createMattermostPost,
|
||||
fetchMattermostChannelByName: mockState.fetchMattermostChannelByName,
|
||||
fetchMattermostMe: mockState.fetchMattermostMe,
|
||||
fetchMattermostUserTeams: mockState.fetchMattermostUserTeams,
|
||||
fetchMattermostUserByUsername: mockState.fetchMattermostUserByUsername,
|
||||
normalizeMattermostBaseUrl: mockState.normalizeMattermostBaseUrl,
|
||||
uploadMattermostFile: mockState.uploadMattermostFile,
|
||||
@@ -71,11 +75,16 @@ describe("sendMessageMattermost", () => {
|
||||
mockState.createMattermostClient.mockReset();
|
||||
mockState.createMattermostDirectChannel.mockReset();
|
||||
mockState.createMattermostPost.mockReset();
|
||||
mockState.fetchMattermostChannelByName.mockReset();
|
||||
mockState.fetchMattermostMe.mockReset();
|
||||
mockState.fetchMattermostUserTeams.mockReset();
|
||||
mockState.fetchMattermostUserByUsername.mockReset();
|
||||
mockState.uploadMattermostFile.mockReset();
|
||||
mockState.createMattermostClient.mockReturnValue({});
|
||||
mockState.createMattermostPost.mockResolvedValue({ id: "post-1" });
|
||||
mockState.fetchMattermostMe.mockResolvedValue({ id: "bot-user" });
|
||||
mockState.fetchMattermostUserTeams.mockResolvedValue([{ id: "team-1" }]);
|
||||
mockState.fetchMattermostChannelByName.mockResolvedValue({ id: "town-square" });
|
||||
mockState.uploadMattermostFile.mockResolvedValue({ id: "file-1" });
|
||||
});
|
||||
|
||||
@@ -148,3 +157,86 @@ describe("sendMessageMattermost", () => {
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("parseMattermostTarget", () => {
|
||||
it("parses channel: prefix with valid ID as channel id", () => {
|
||||
const target = parseMattermostTarget("channel:dthcxgoxhifn3pwh65cut3ud3w");
|
||||
expect(target).toEqual({ kind: "channel", id: "dthcxgoxhifn3pwh65cut3ud3w" });
|
||||
});
|
||||
|
||||
it("parses channel: prefix with non-ID as channel name", () => {
|
||||
const target = parseMattermostTarget("channel:abc123");
|
||||
expect(target).toEqual({ kind: "channel-name", name: "abc123" });
|
||||
});
|
||||
|
||||
it("parses user: prefix as user id", () => {
|
||||
const target = parseMattermostTarget("user:usr456");
|
||||
expect(target).toEqual({ kind: "user", id: "usr456" });
|
||||
});
|
||||
|
||||
it("parses mattermost: prefix as user id", () => {
|
||||
const target = parseMattermostTarget("mattermost:usr789");
|
||||
expect(target).toEqual({ kind: "user", id: "usr789" });
|
||||
});
|
||||
|
||||
it("parses @ prefix as username", () => {
|
||||
const target = parseMattermostTarget("@alice");
|
||||
expect(target).toEqual({ kind: "user", username: "alice" });
|
||||
});
|
||||
|
||||
it("parses # prefix as channel name", () => {
|
||||
const target = parseMattermostTarget("#off-topic");
|
||||
expect(target).toEqual({ kind: "channel-name", name: "off-topic" });
|
||||
});
|
||||
|
||||
it("parses # prefix with spaces", () => {
|
||||
const target = parseMattermostTarget(" #general ");
|
||||
expect(target).toEqual({ kind: "channel-name", name: "general" });
|
||||
});
|
||||
|
||||
it("treats 26-char alphanumeric bare string as channel id", () => {
|
||||
const target = parseMattermostTarget("dthcxgoxhifn3pwh65cut3ud3w");
|
||||
expect(target).toEqual({ kind: "channel", id: "dthcxgoxhifn3pwh65cut3ud3w" });
|
||||
});
|
||||
|
||||
it("treats non-ID bare string as channel name", () => {
|
||||
const target = parseMattermostTarget("off-topic");
|
||||
expect(target).toEqual({ kind: "channel-name", name: "off-topic" });
|
||||
});
|
||||
|
||||
it("treats channel: with non-ID value as channel name", () => {
|
||||
const target = parseMattermostTarget("channel:off-topic");
|
||||
expect(target).toEqual({ kind: "channel-name", name: "off-topic" });
|
||||
});
|
||||
|
||||
it("throws on empty string", () => {
|
||||
expect(() => parseMattermostTarget("")).toThrow("Recipient is required");
|
||||
});
|
||||
|
||||
it("throws on empty # prefix", () => {
|
||||
expect(() => parseMattermostTarget("#")).toThrow("Channel name is required");
|
||||
});
|
||||
|
||||
it("throws on empty @ prefix", () => {
|
||||
expect(() => parseMattermostTarget("@")).toThrow("Username is required");
|
||||
});
|
||||
|
||||
it("parses channel:#name as channel name", () => {
|
||||
const target = parseMattermostTarget("channel:#off-topic");
|
||||
expect(target).toEqual({ kind: "channel-name", name: "off-topic" });
|
||||
});
|
||||
|
||||
it("parses channel:#name with spaces", () => {
|
||||
const target = parseMattermostTarget(" channel: #general ");
|
||||
expect(target).toEqual({ kind: "channel-name", name: "general" });
|
||||
});
|
||||
|
||||
it("is case-insensitive for prefixes", () => {
|
||||
expect(parseMattermostTarget("CHANNEL:dthcxgoxhifn3pwh65cut3ud3w")).toEqual({
|
||||
kind: "channel",
|
||||
id: "dthcxgoxhifn3pwh65cut3ud3w",
|
||||
});
|
||||
expect(parseMattermostTarget("User:XYZ")).toEqual({ kind: "user", id: "XYZ" });
|
||||
expect(parseMattermostTarget("Mattermost:QRS")).toEqual({ kind: "user", id: "QRS" });
|
||||
});
|
||||
});
|
||||
|
||||
@@ -5,8 +5,10 @@ import {
|
||||
createMattermostClient,
|
||||
createMattermostDirectChannel,
|
||||
createMattermostPost,
|
||||
fetchMattermostChannelByName,
|
||||
fetchMattermostMe,
|
||||
fetchMattermostUserByUsername,
|
||||
fetchMattermostUserTeams,
|
||||
normalizeMattermostBaseUrl,
|
||||
uploadMattermostFile,
|
||||
type MattermostUser,
|
||||
@@ -20,6 +22,7 @@ export type MattermostSendOpts = {
|
||||
mediaUrl?: string;
|
||||
mediaLocalRoots?: readonly string[];
|
||||
replyToId?: string;
|
||||
props?: Record<string, unknown>;
|
||||
};
|
||||
|
||||
export type MattermostSendResult = {
|
||||
@@ -29,10 +32,12 @@ export type MattermostSendResult = {
|
||||
|
||||
type MattermostTarget =
|
||||
| { kind: "channel"; id: string }
|
||||
| { kind: "channel-name"; name: string }
|
||||
| { kind: "user"; id?: string; username?: string };
|
||||
|
||||
const botUserCache = new Map<string, MattermostUser>();
|
||||
const userByNameCache = new Map<string, MattermostUser>();
|
||||
const channelByNameCache = new Map<string, string>();
|
||||
|
||||
const getCore = () => getMattermostRuntime();
|
||||
|
||||
@@ -50,7 +55,12 @@ function isHttpUrl(value: string): boolean {
|
||||
return /^https?:\/\//i.test(value);
|
||||
}
|
||||
|
||||
function parseMattermostTarget(raw: string): MattermostTarget {
|
||||
/** Mattermost IDs are 26-character lowercase alphanumeric strings. */
|
||||
function isMattermostId(value: string): boolean {
|
||||
return /^[a-z0-9]{26}$/.test(value);
|
||||
}
|
||||
|
||||
export function parseMattermostTarget(raw: string): MattermostTarget {
|
||||
const trimmed = raw.trim();
|
||||
if (!trimmed) {
|
||||
throw new Error("Recipient is required for Mattermost sends");
|
||||
@@ -61,6 +71,16 @@ function parseMattermostTarget(raw: string): MattermostTarget {
|
||||
if (!id) {
|
||||
throw new Error("Channel id is required for Mattermost sends");
|
||||
}
|
||||
if (id.startsWith("#")) {
|
||||
const name = id.slice(1).trim();
|
||||
if (!name) {
|
||||
throw new Error("Channel name is required for Mattermost sends");
|
||||
}
|
||||
return { kind: "channel-name", name };
|
||||
}
|
||||
if (!isMattermostId(id)) {
|
||||
return { kind: "channel-name", name: id };
|
||||
}
|
||||
return { kind: "channel", id };
|
||||
}
|
||||
if (lower.startsWith("user:")) {
|
||||
@@ -84,6 +104,16 @@ function parseMattermostTarget(raw: string): MattermostTarget {
|
||||
}
|
||||
return { kind: "user", username };
|
||||
}
|
||||
if (trimmed.startsWith("#")) {
|
||||
const name = trimmed.slice(1).trim();
|
||||
if (!name) {
|
||||
throw new Error("Channel name is required for Mattermost sends");
|
||||
}
|
||||
return { kind: "channel-name", name };
|
||||
}
|
||||
if (!isMattermostId(trimmed)) {
|
||||
return { kind: "channel-name", name: trimmed };
|
||||
}
|
||||
return { kind: "channel", id: trimmed };
|
||||
}
|
||||
|
||||
@@ -116,6 +146,34 @@ async function resolveUserIdByUsername(params: {
|
||||
return user.id;
|
||||
}
|
||||
|
||||
async function resolveChannelIdByName(params: {
|
||||
baseUrl: string;
|
||||
token: string;
|
||||
name: string;
|
||||
}): Promise<string> {
|
||||
const { baseUrl, token, name } = params;
|
||||
const key = `${cacheKey(baseUrl, token)}::channel::${name.toLowerCase()}`;
|
||||
const cached = channelByNameCache.get(key);
|
||||
if (cached) {
|
||||
return cached;
|
||||
}
|
||||
const client = createMattermostClient({ baseUrl, botToken: token });
|
||||
const me = await fetchMattermostMe(client);
|
||||
const teams = await fetchMattermostUserTeams(client, me.id);
|
||||
for (const team of teams) {
|
||||
try {
|
||||
const channel = await fetchMattermostChannelByName(client, team.id, name);
|
||||
if (channel?.id) {
|
||||
channelByNameCache.set(key, channel.id);
|
||||
return channel.id;
|
||||
}
|
||||
} catch {
|
||||
// Channel not found in this team, try next
|
||||
}
|
||||
}
|
||||
throw new Error(`Mattermost channel "#${name}" not found in any team the bot belongs to`);
|
||||
}
|
||||
|
||||
async function resolveTargetChannelId(params: {
|
||||
target: MattermostTarget;
|
||||
baseUrl: string;
|
||||
@@ -124,6 +182,13 @@ async function resolveTargetChannelId(params: {
|
||||
if (params.target.kind === "channel") {
|
||||
return params.target.id;
|
||||
}
|
||||
if (params.target.kind === "channel-name") {
|
||||
return await resolveChannelIdByName({
|
||||
baseUrl: params.baseUrl,
|
||||
token: params.token,
|
||||
name: params.target.name,
|
||||
});
|
||||
}
|
||||
const userId = params.target.id
|
||||
? params.target.id
|
||||
: await resolveUserIdByUsername({
|
||||
@@ -221,6 +286,7 @@ export async function sendMessageMattermost(
|
||||
message,
|
||||
rootId: opts.replyToId,
|
||||
fileIds,
|
||||
props: opts.props,
|
||||
});
|
||||
|
||||
core.channel.activity.record({
|
||||
|
||||
96
extensions/mattermost/src/normalize.test.ts
Normal file
96
extensions/mattermost/src/normalize.test.ts
Normal file
@@ -0,0 +1,96 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { looksLikeMattermostTargetId, normalizeMattermostMessagingTarget } from "./normalize.js";
|
||||
|
||||
describe("normalizeMattermostMessagingTarget", () => {
|
||||
it("returns undefined for empty input", () => {
|
||||
expect(normalizeMattermostMessagingTarget("")).toBeUndefined();
|
||||
expect(normalizeMattermostMessagingTarget(" ")).toBeUndefined();
|
||||
});
|
||||
|
||||
it("normalizes channel: prefix", () => {
|
||||
expect(normalizeMattermostMessagingTarget("channel:abc123")).toBe("channel:abc123");
|
||||
expect(normalizeMattermostMessagingTarget("Channel:ABC")).toBe("channel:ABC");
|
||||
});
|
||||
|
||||
it("normalizes group: prefix to channel:", () => {
|
||||
expect(normalizeMattermostMessagingTarget("group:abc123")).toBe("channel:abc123");
|
||||
});
|
||||
|
||||
it("normalizes user: prefix", () => {
|
||||
expect(normalizeMattermostMessagingTarget("user:abc123")).toBe("user:abc123");
|
||||
});
|
||||
|
||||
it("normalizes mattermost: prefix to user:", () => {
|
||||
expect(normalizeMattermostMessagingTarget("mattermost:abc123")).toBe("user:abc123");
|
||||
});
|
||||
|
||||
it("keeps @username targets", () => {
|
||||
expect(normalizeMattermostMessagingTarget("@alice")).toBe("@alice");
|
||||
expect(normalizeMattermostMessagingTarget("@Alice")).toBe("@Alice");
|
||||
});
|
||||
|
||||
it("returns undefined for #channel (triggers directory lookup)", () => {
|
||||
expect(normalizeMattermostMessagingTarget("#bookmarks")).toBeUndefined();
|
||||
expect(normalizeMattermostMessagingTarget("#off-topic")).toBeUndefined();
|
||||
expect(normalizeMattermostMessagingTarget("# ")).toBeUndefined();
|
||||
});
|
||||
|
||||
it("returns undefined for bare names (triggers directory lookup)", () => {
|
||||
expect(normalizeMattermostMessagingTarget("bookmarks")).toBeUndefined();
|
||||
expect(normalizeMattermostMessagingTarget("off-topic")).toBeUndefined();
|
||||
});
|
||||
|
||||
it("returns undefined for empty prefixed values", () => {
|
||||
expect(normalizeMattermostMessagingTarget("channel:")).toBeUndefined();
|
||||
expect(normalizeMattermostMessagingTarget("user:")).toBeUndefined();
|
||||
expect(normalizeMattermostMessagingTarget("@")).toBeUndefined();
|
||||
expect(normalizeMattermostMessagingTarget("#")).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe("looksLikeMattermostTargetId", () => {
|
||||
it("returns false for empty input", () => {
|
||||
expect(looksLikeMattermostTargetId("")).toBe(false);
|
||||
expect(looksLikeMattermostTargetId(" ")).toBe(false);
|
||||
});
|
||||
|
||||
it("recognizes prefixed targets", () => {
|
||||
expect(looksLikeMattermostTargetId("channel:abc")).toBe(true);
|
||||
expect(looksLikeMattermostTargetId("Channel:abc")).toBe(true);
|
||||
expect(looksLikeMattermostTargetId("user:abc")).toBe(true);
|
||||
expect(looksLikeMattermostTargetId("group:abc")).toBe(true);
|
||||
expect(looksLikeMattermostTargetId("mattermost:abc")).toBe(true);
|
||||
});
|
||||
|
||||
it("recognizes @username", () => {
|
||||
expect(looksLikeMattermostTargetId("@alice")).toBe(true);
|
||||
});
|
||||
|
||||
it("does NOT recognize #channel (should go to directory)", () => {
|
||||
expect(looksLikeMattermostTargetId("#bookmarks")).toBe(false);
|
||||
expect(looksLikeMattermostTargetId("#off-topic")).toBe(false);
|
||||
});
|
||||
|
||||
it("recognizes 26-char alphanumeric Mattermost IDs", () => {
|
||||
expect(looksLikeMattermostTargetId("abcdefghijklmnopqrstuvwxyz")).toBe(true);
|
||||
expect(looksLikeMattermostTargetId("12345678901234567890123456")).toBe(true);
|
||||
expect(looksLikeMattermostTargetId("AbCdEf1234567890abcdef1234")).toBe(true);
|
||||
});
|
||||
|
||||
it("recognizes DM channel format (26__26)", () => {
|
||||
expect(
|
||||
looksLikeMattermostTargetId("abcdefghijklmnopqrstuvwxyz__12345678901234567890123456"),
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
it("rejects short strings that are not Mattermost IDs", () => {
|
||||
expect(looksLikeMattermostTargetId("password")).toBe(false);
|
||||
expect(looksLikeMattermostTargetId("hi")).toBe(false);
|
||||
expect(looksLikeMattermostTargetId("bookmarks")).toBe(false);
|
||||
expect(looksLikeMattermostTargetId("off-topic")).toBe(false);
|
||||
});
|
||||
|
||||
it("rejects strings longer than 26 chars that are not DM format", () => {
|
||||
expect(looksLikeMattermostTargetId("abcdefghijklmnopqrstuvwxyz1")).toBe(false);
|
||||
});
|
||||
});
|
||||
@@ -25,13 +25,16 @@ export function normalizeMattermostMessagingTarget(raw: string): string | undefi
|
||||
return id ? `@${id}` : undefined;
|
||||
}
|
||||
if (trimmed.startsWith("#")) {
|
||||
const id = trimmed.slice(1).trim();
|
||||
return id ? `channel:${id}` : undefined;
|
||||
// Strip # prefix and fall through to directory lookup (same as bare names).
|
||||
// The core's resolveMessagingTarget will use the directory adapter to
|
||||
// resolve the channel name to its Mattermost ID.
|
||||
return undefined;
|
||||
}
|
||||
return `channel:${trimmed}`;
|
||||
// Bare name without prefix — return undefined to allow directory lookup
|
||||
return undefined;
|
||||
}
|
||||
|
||||
export function looksLikeMattermostTargetId(raw: string): boolean {
|
||||
export function looksLikeMattermostTargetId(raw: string, normalized?: string): boolean {
|
||||
const trimmed = raw.trim();
|
||||
if (!trimmed) {
|
||||
return false;
|
||||
@@ -39,8 +42,9 @@ export function looksLikeMattermostTargetId(raw: string): boolean {
|
||||
if (/^(user|channel|group|mattermost):/i.test(trimmed)) {
|
||||
return true;
|
||||
}
|
||||
if (/^[@#]/.test(trimmed)) {
|
||||
if (trimmed.startsWith("@")) {
|
||||
return true;
|
||||
}
|
||||
return /^[a-z0-9]{8,}$/i.test(trimmed);
|
||||
// Mattermost IDs: 26-char alnum, or DM channels like "abc123__xyz789" (53 chars)
|
||||
return /^[a-z0-9]{26}$/i.test(trimmed) || /^[a-z0-9]{26}__[a-z0-9]{26}$/i.test(trimmed);
|
||||
}
|
||||
|
||||
@@ -70,6 +70,10 @@ export type MattermostAccountConfig = {
|
||||
/** Explicit callback URL (e.g. behind reverse proxy). */
|
||||
callbackUrl?: string;
|
||||
};
|
||||
interactions?: {
|
||||
/** External base URL used for Mattermost interaction callbacks. */
|
||||
callbackBaseUrl?: string;
|
||||
};
|
||||
};
|
||||
|
||||
export type MattermostConfig = {
|
||||
|
||||
@@ -182,4 +182,53 @@ describe("slackPlugin config", () => {
|
||||
expect(configured).toBe(false);
|
||||
expect(snapshot?.configured).toBe(false);
|
||||
});
|
||||
|
||||
it("does not mark partial configured-unavailable token status as configured", async () => {
|
||||
const snapshot = await slackPlugin.status?.buildAccountSnapshot?.({
|
||||
account: {
|
||||
accountId: "default",
|
||||
name: "Default",
|
||||
enabled: true,
|
||||
configured: false,
|
||||
botTokenStatus: "configured_unavailable",
|
||||
appTokenStatus: "missing",
|
||||
botTokenSource: "config",
|
||||
appTokenSource: "none",
|
||||
config: {},
|
||||
} as never,
|
||||
cfg: {} as OpenClawConfig,
|
||||
runtime: undefined,
|
||||
});
|
||||
|
||||
expect(snapshot?.configured).toBe(false);
|
||||
expect(snapshot?.botTokenStatus).toBe("configured_unavailable");
|
||||
expect(snapshot?.appTokenStatus).toBe("missing");
|
||||
});
|
||||
|
||||
it("keeps HTTP mode signing-secret unavailable accounts configured in snapshots", async () => {
|
||||
const snapshot = await slackPlugin.status?.buildAccountSnapshot?.({
|
||||
account: {
|
||||
accountId: "default",
|
||||
name: "Default",
|
||||
enabled: true,
|
||||
configured: true,
|
||||
mode: "http",
|
||||
botTokenStatus: "available",
|
||||
signingSecretStatus: "configured_unavailable",
|
||||
botTokenSource: "config",
|
||||
signingSecretSource: "config",
|
||||
config: {
|
||||
mode: "http",
|
||||
botToken: "xoxb-http",
|
||||
signingSecret: { source: "env", provider: "default", id: "SLACK_SIGNING_SECRET" },
|
||||
},
|
||||
} as never,
|
||||
cfg: {} as OpenClawConfig,
|
||||
runtime: undefined,
|
||||
});
|
||||
|
||||
expect(snapshot?.configured).toBe(true);
|
||||
expect(snapshot?.botTokenStatus).toBe("available");
|
||||
expect(snapshot?.signingSecretStatus).toBe("configured_unavailable");
|
||||
});
|
||||
});
|
||||
|
||||
@@ -7,6 +7,7 @@ import {
|
||||
formatPairingApproveHint,
|
||||
getChatChannelMeta,
|
||||
handleSlackMessageAction,
|
||||
inspectSlackAccount,
|
||||
listSlackMessageActions,
|
||||
listSlackAccountIds,
|
||||
listSlackDirectoryGroupsFromConfig,
|
||||
@@ -16,6 +17,8 @@ import {
|
||||
normalizeAccountId,
|
||||
normalizeSlackMessagingTarget,
|
||||
PAIRING_APPROVED_MESSAGE,
|
||||
projectCredentialSnapshotFields,
|
||||
resolveConfiguredFromRequiredCredentialStatuses,
|
||||
resolveDefaultSlackAccountId,
|
||||
resolveSlackAccount,
|
||||
resolveSlackReplyToMode,
|
||||
@@ -131,6 +134,7 @@ export const slackPlugin: ChannelPlugin<ResolvedSlackAccount> = {
|
||||
config: {
|
||||
listAccountIds: (cfg) => listSlackAccountIds(cfg),
|
||||
resolveAccount: (cfg, accountId) => resolveSlackAccount({ cfg, accountId }),
|
||||
inspectAccount: (cfg, accountId) => inspectSlackAccount({ cfg, accountId }),
|
||||
defaultAccountId: (cfg) => resolveDefaultSlackAccountId(cfg),
|
||||
setAccountEnabled: ({ cfg, accountId, enabled }) =>
|
||||
setAccountEnabledInConfigSection({
|
||||
@@ -428,14 +432,23 @@ export const slackPlugin: ChannelPlugin<ResolvedSlackAccount> = {
|
||||
return await getSlackRuntime().channel.slack.probeSlack(token, timeoutMs);
|
||||
},
|
||||
buildAccountSnapshot: ({ account, runtime, probe }) => {
|
||||
const configured = isSlackAccountConfigured(account);
|
||||
const mode = account.config.mode ?? "socket";
|
||||
const configured =
|
||||
(mode === "http"
|
||||
? resolveConfiguredFromRequiredCredentialStatuses(account, [
|
||||
"botTokenStatus",
|
||||
"signingSecretStatus",
|
||||
])
|
||||
: resolveConfiguredFromRequiredCredentialStatuses(account, [
|
||||
"botTokenStatus",
|
||||
"appTokenStatus",
|
||||
])) ?? isSlackAccountConfigured(account);
|
||||
return {
|
||||
accountId: account.accountId,
|
||||
name: account.name,
|
||||
enabled: account.enabled,
|
||||
configured,
|
||||
botTokenSource: account.botTokenSource,
|
||||
appTokenSource: account.appTokenSource,
|
||||
...projectCredentialSnapshotFields(account),
|
||||
running: runtime?.running ?? false,
|
||||
lastStartAt: runtime?.lastStartAt ?? null,
|
||||
lastStopAt: runtime?.lastStopAt ?? null,
|
||||
|
||||
@@ -7,6 +7,7 @@ import {
|
||||
deleteAccountFromConfigSection,
|
||||
formatPairingApproveHint,
|
||||
getChatChannelMeta,
|
||||
inspectTelegramAccount,
|
||||
listTelegramAccountIds,
|
||||
listTelegramDirectoryGroupsFromConfig,
|
||||
listTelegramDirectoryPeersFromConfig,
|
||||
@@ -17,6 +18,8 @@ import {
|
||||
PAIRING_APPROVED_MESSAGE,
|
||||
parseTelegramReplyToMessageId,
|
||||
parseTelegramThreadId,
|
||||
projectCredentialSnapshotFields,
|
||||
resolveConfiguredFromCredentialStatuses,
|
||||
resolveDefaultTelegramAccountId,
|
||||
resolveAllowlistProviderRuntimeGroupPolicy,
|
||||
resolveDefaultGroupPolicy,
|
||||
@@ -43,7 +46,7 @@ function findTelegramTokenOwnerAccountId(params: {
|
||||
const normalizedAccountId = normalizeAccountId(params.accountId);
|
||||
const tokenOwners = new Map<string, string>();
|
||||
for (const id of listTelegramAccountIds(params.cfg)) {
|
||||
const account = resolveTelegramAccount({ cfg: params.cfg, accountId: id });
|
||||
const account = inspectTelegramAccount({ cfg: params.cfg, accountId: id });
|
||||
const token = (account.token ?? "").trim();
|
||||
if (!token) {
|
||||
continue;
|
||||
@@ -122,6 +125,7 @@ export const telegramPlugin: ChannelPlugin<ResolvedTelegramAccount, TelegramProb
|
||||
config: {
|
||||
listAccountIds: (cfg) => listTelegramAccountIds(cfg),
|
||||
resolveAccount: (cfg, accountId) => resolveTelegramAccount({ cfg, accountId }),
|
||||
inspectAccount: (cfg, accountId) => inspectTelegramAccount({ cfg, accountId }),
|
||||
defaultAccountId: (cfg) => resolveDefaultTelegramAccountId(cfg),
|
||||
setAccountEnabled: ({ cfg, accountId, enabled }) =>
|
||||
setAccountEnabledInConfigSection({
|
||||
@@ -416,6 +420,7 @@ export const telegramPlugin: ChannelPlugin<ResolvedTelegramAccount, TelegramProb
|
||||
return { ...audit, unresolvedGroups, hasWildcardUnmentionedGroups };
|
||||
},
|
||||
buildAccountSnapshot: ({ account, cfg, runtime, probe, audit }) => {
|
||||
const configuredFromStatus = resolveConfiguredFromCredentialStatuses(account);
|
||||
const ownerAccountId = findTelegramTokenOwnerAccountId({
|
||||
cfg,
|
||||
accountId: account.accountId,
|
||||
@@ -426,7 +431,8 @@ export const telegramPlugin: ChannelPlugin<ResolvedTelegramAccount, TelegramProb
|
||||
ownerAccountId,
|
||||
})
|
||||
: null;
|
||||
const configured = Boolean(account.token?.trim()) && !ownerAccountId;
|
||||
const configured =
|
||||
(configuredFromStatus ?? Boolean(account.token?.trim())) && !ownerAccountId;
|
||||
const groups =
|
||||
cfg.channels?.telegram?.accounts?.[account.accountId]?.groups ??
|
||||
cfg.channels?.telegram?.groups;
|
||||
@@ -440,7 +446,7 @@ export const telegramPlugin: ChannelPlugin<ResolvedTelegramAccount, TelegramProb
|
||||
name: account.name,
|
||||
enabled: account.enabled,
|
||||
configured,
|
||||
tokenSource: account.tokenSource,
|
||||
...projectCredentialSnapshotFields(account),
|
||||
running: runtime?.running ?? false,
|
||||
lastStartAt: runtime?.lastStartAt ?? null,
|
||||
lastStopAt: runtime?.lastStopAt ?? null,
|
||||
|
||||
11
pnpm-lock.yaml
generated
11
pnpm-lock.yaml
generated
@@ -553,8 +553,8 @@ importers:
|
||||
specifier: 3.0.0
|
||||
version: 3.0.0
|
||||
dompurify:
|
||||
specifier: ^3.3.1
|
||||
version: 3.3.1
|
||||
specifier: ^3.3.2
|
||||
version: 3.3.2
|
||||
lit:
|
||||
specifier: ^3.3.2
|
||||
version: 3.3.2
|
||||
@@ -3820,8 +3820,9 @@ packages:
|
||||
resolution: {integrity: sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==}
|
||||
engines: {node: '>= 4'}
|
||||
|
||||
dompurify@3.3.1:
|
||||
resolution: {integrity: sha512-qkdCKzLNtrgPFP1Vo+98FRzJnBRGe4ffyCea9IwHB1fyxPOeNTHpLKYGd4Uk9xvNoH0ZoOjwZxNptyMwqrId1Q==}
|
||||
dompurify@3.3.2:
|
||||
resolution: {integrity: sha512-6obghkliLdmKa56xdbLOpUZ43pAR6xFy1uOrxBaIDjT+yaRuuybLjGS9eVBoSR/UPU5fq3OXClEHLJNGvbxKpQ==}
|
||||
engines: {node: '>=20'}
|
||||
|
||||
domutils@3.2.2:
|
||||
resolution: {integrity: sha512-6kZKyUajlDuqlHKVX1w7gyslj9MPIXzIFiz/rGu35uC1wMi+kMhQwGhl4lt9unC9Vb9INnY9Z3/ZA3+FhASLaw==}
|
||||
@@ -9885,7 +9886,7 @@ snapshots:
|
||||
dependencies:
|
||||
domelementtype: 2.3.0
|
||||
|
||||
dompurify@3.3.1:
|
||||
dompurify@3.3.2:
|
||||
optionalDependencies:
|
||||
'@types/trusted-types': 2.0.7
|
||||
|
||||
|
||||
@@ -50,9 +50,16 @@ API key
|
||||
- `GEMINI_API_KEY` env var
|
||||
- Or set `skills."nano-banana-pro".apiKey` / `skills."nano-banana-pro".env.GEMINI_API_KEY` in `~/.openclaw/openclaw.json`
|
||||
|
||||
Specific aspect ratio (optional)
|
||||
|
||||
```bash
|
||||
uv run {baseDir}/scripts/generate_image.py --prompt "portrait photo" --filename "output.png" --aspect-ratio 9:16
|
||||
```
|
||||
|
||||
Notes
|
||||
|
||||
- Resolutions: `1K` (default), `2K`, `4K`.
|
||||
- Aspect ratios: `1:1`, `2:3`, `3:2`, `3:4`, `4:3`, `4:5`, `5:4`, `9:16`, `16:9`, `21:9`. Without `--aspect-ratio` / `-a`, the model picks freely - use this flag for avatars, profile pics, or consistent batch generation.
|
||||
- Use timestamps in filenames: `yyyy-mm-dd-hh-mm-ss-name.png`.
|
||||
- The script prints a `MEDIA:` line for OpenClaw to auto-attach on supported chat providers.
|
||||
- Do not read the image back; report the saved path only.
|
||||
|
||||
@@ -21,6 +21,19 @@ import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
SUPPORTED_ASPECT_RATIOS = [
|
||||
"1:1",
|
||||
"2:3",
|
||||
"3:2",
|
||||
"3:4",
|
||||
"4:3",
|
||||
"4:5",
|
||||
"5:4",
|
||||
"9:16",
|
||||
"16:9",
|
||||
"21:9",
|
||||
]
|
||||
|
||||
|
||||
def get_api_key(provided_key: str | None) -> str | None:
|
||||
"""Get API key from argument first, then environment."""
|
||||
@@ -56,6 +69,12 @@ def main():
|
||||
default="1K",
|
||||
help="Output resolution: 1K (default), 2K, or 4K"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--aspect-ratio", "-a",
|
||||
choices=SUPPORTED_ASPECT_RATIOS,
|
||||
default=None,
|
||||
help=f"Output aspect ratio (default: model decides). Options: {', '.join(SUPPORTED_ASPECT_RATIOS)}"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--api-key", "-k",
|
||||
help="Gemini API key (overrides GEMINI_API_KEY env var)"
|
||||
@@ -127,14 +146,17 @@ def main():
|
||||
print(f"Generating image with resolution {output_resolution}...")
|
||||
|
||||
try:
|
||||
# Build image config with optional aspect ratio
|
||||
image_cfg_kwargs = {"image_size": output_resolution}
|
||||
if args.aspect_ratio:
|
||||
image_cfg_kwargs["aspect_ratio"] = args.aspect_ratio
|
||||
|
||||
response = client.models.generate_content(
|
||||
model="gemini-3-pro-image-preview",
|
||||
contents=contents,
|
||||
config=types.GenerateContentConfig(
|
||||
response_modalities=["TEXT", "IMAGE"],
|
||||
image_config=types.ImageConfig(
|
||||
image_size=output_resolution
|
||||
)
|
||||
image_config=types.ImageConfig(**image_cfg_kwargs)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
49
src/agents/anthropic-payload-log.test.ts
Normal file
49
src/agents/anthropic-payload-log.test.ts
Normal file
@@ -0,0 +1,49 @@
|
||||
import crypto from "node:crypto";
|
||||
import type { StreamFn } from "@mariozechner/pi-agent-core";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { createAnthropicPayloadLogger } from "./anthropic-payload-log.js";
|
||||
|
||||
describe("createAnthropicPayloadLogger", () => {
|
||||
it("redacts image base64 payload data before writing logs", async () => {
|
||||
const lines: string[] = [];
|
||||
const logger = createAnthropicPayloadLogger({
|
||||
env: { OPENCLAW_ANTHROPIC_PAYLOAD_LOG: "1" },
|
||||
writer: {
|
||||
filePath: "memory",
|
||||
write: (line) => lines.push(line),
|
||||
},
|
||||
});
|
||||
expect(logger).not.toBeNull();
|
||||
|
||||
const payload = {
|
||||
messages: [
|
||||
{
|
||||
role: "user",
|
||||
content: [
|
||||
{
|
||||
type: "image",
|
||||
source: { type: "base64", media_type: "image/png", data: "QUJDRA==" },
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
};
|
||||
const streamFn: StreamFn = ((_, __, options) => {
|
||||
options?.onPayload?.(payload);
|
||||
return {} as never;
|
||||
}) as StreamFn;
|
||||
|
||||
const wrapped = logger?.wrapStreamFn(streamFn);
|
||||
await wrapped?.({ api: "anthropic-messages" } as never, { messages: [] } as never, {});
|
||||
|
||||
const event = JSON.parse(lines[0]?.trim() ?? "{}") as Record<string, unknown>;
|
||||
const message = ((event.payload as { messages?: unknown[] } | undefined)?.messages ??
|
||||
[]) as Array<Record<string, unknown>>;
|
||||
const source = (((message[0]?.content as Array<Record<string, unknown>> | undefined) ?? [])[0]
|
||||
?.source ?? {}) as Record<string, unknown>;
|
||||
expect(source.data).toBe("<redacted>");
|
||||
expect(source.bytes).toBe(4);
|
||||
expect(source.sha256).toBe(crypto.createHash("sha256").update("QUJDRA==").digest("hex"));
|
||||
expect(event.payloadDigest).toBeDefined();
|
||||
});
|
||||
});
|
||||
@@ -7,6 +7,7 @@ import { createSubsystemLogger } from "../logging/subsystem.js";
|
||||
import { resolveUserPath } from "../utils.js";
|
||||
import { parseBooleanValue } from "../utils/boolean.js";
|
||||
import { safeJsonStringify } from "../utils/safe-json.js";
|
||||
import { redactImageDataForDiagnostics } from "./payload-redaction.js";
|
||||
import { getQueuedFileWriter, type QueuedFileWriter } from "./queued-file-writer.js";
|
||||
|
||||
type PayloadLogStage = "request" | "usage";
|
||||
@@ -103,6 +104,7 @@ export function createAnthropicPayloadLogger(params: {
|
||||
modelId?: string;
|
||||
modelApi?: string | null;
|
||||
workspaceDir?: string;
|
||||
writer?: PayloadLogWriter;
|
||||
}): AnthropicPayloadLogger | null {
|
||||
const env = params.env ?? process.env;
|
||||
const cfg = resolvePayloadLogConfig(env);
|
||||
@@ -110,7 +112,7 @@ export function createAnthropicPayloadLogger(params: {
|
||||
return null;
|
||||
}
|
||||
|
||||
const writer = getWriter(cfg.filePath);
|
||||
const writer = params.writer ?? getWriter(cfg.filePath);
|
||||
const base: Omit<PayloadLogEvent, "ts" | "stage"> = {
|
||||
runId: params.runId,
|
||||
sessionId: params.sessionId,
|
||||
@@ -135,12 +137,13 @@ export function createAnthropicPayloadLogger(params: {
|
||||
return streamFn(model, context, options);
|
||||
}
|
||||
const nextOnPayload = (payload: unknown) => {
|
||||
const redactedPayload = redactImageDataForDiagnostics(payload);
|
||||
record({
|
||||
...base,
|
||||
ts: new Date().toISOString(),
|
||||
stage: "request",
|
||||
payload,
|
||||
payloadDigest: digest(payload),
|
||||
payload: redactedPayload,
|
||||
payloadDigest: digest(redactedPayload),
|
||||
});
|
||||
options?.onPayload?.(payload);
|
||||
};
|
||||
|
||||
@@ -0,0 +1,141 @@
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { captureEnv } from "../../test-utils/env.js";
|
||||
import { resolveApiKeyForProfile } from "./oauth.js";
|
||||
import {
|
||||
clearRuntimeAuthProfileStoreSnapshots,
|
||||
ensureAuthProfileStore,
|
||||
saveAuthProfileStore,
|
||||
} from "./store.js";
|
||||
import type { AuthProfileStore } from "./types.js";
|
||||
|
||||
const { getOAuthApiKeyMock } = vi.hoisted(() => ({
|
||||
getOAuthApiKeyMock: vi.fn(async () => {
|
||||
throw new Error("Failed to extract accountId from token");
|
||||
}),
|
||||
}));
|
||||
|
||||
vi.mock("@mariozechner/pi-ai", async () => {
|
||||
const actual = await vi.importActual<typeof import("@mariozechner/pi-ai")>("@mariozechner/pi-ai");
|
||||
return {
|
||||
...actual,
|
||||
getOAuthApiKey: getOAuthApiKeyMock,
|
||||
getOAuthProviders: () => [
|
||||
{ id: "openai-codex", envApiKey: "OPENAI_API_KEY", oauthTokenEnv: "OPENAI_OAUTH_TOKEN" },
|
||||
{ id: "anthropic", envApiKey: "ANTHROPIC_API_KEY", oauthTokenEnv: "ANTHROPIC_OAUTH_TOKEN" },
|
||||
],
|
||||
};
|
||||
});
|
||||
|
||||
function createExpiredOauthStore(params: {
|
||||
profileId: string;
|
||||
provider: string;
|
||||
access?: string;
|
||||
}): AuthProfileStore {
|
||||
return {
|
||||
version: 1,
|
||||
profiles: {
|
||||
[params.profileId]: {
|
||||
type: "oauth",
|
||||
provider: params.provider,
|
||||
access: params.access ?? "cached-access-token",
|
||||
refresh: "refresh-token",
|
||||
expires: Date.now() - 60_000,
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
describe("resolveApiKeyForProfile openai-codex refresh fallback", () => {
|
||||
const envSnapshot = captureEnv([
|
||||
"OPENCLAW_STATE_DIR",
|
||||
"OPENCLAW_AGENT_DIR",
|
||||
"PI_CODING_AGENT_DIR",
|
||||
]);
|
||||
let tempRoot = "";
|
||||
let agentDir = "";
|
||||
|
||||
beforeEach(async () => {
|
||||
getOAuthApiKeyMock.mockClear();
|
||||
clearRuntimeAuthProfileStoreSnapshots();
|
||||
tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-codex-refresh-fallback-"));
|
||||
agentDir = path.join(tempRoot, "agents", "main", "agent");
|
||||
await fs.mkdir(agentDir, { recursive: true });
|
||||
process.env.OPENCLAW_STATE_DIR = tempRoot;
|
||||
process.env.OPENCLAW_AGENT_DIR = agentDir;
|
||||
process.env.PI_CODING_AGENT_DIR = agentDir;
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
clearRuntimeAuthProfileStoreSnapshots();
|
||||
envSnapshot.restore();
|
||||
await fs.rm(tempRoot, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it("falls back to cached access token when openai-codex refresh fails on accountId extraction", async () => {
|
||||
const profileId = "openai-codex:default";
|
||||
saveAuthProfileStore(
|
||||
createExpiredOauthStore({
|
||||
profileId,
|
||||
provider: "openai-codex",
|
||||
}),
|
||||
agentDir,
|
||||
);
|
||||
|
||||
const result = await resolveApiKeyForProfile({
|
||||
store: ensureAuthProfileStore(agentDir),
|
||||
profileId,
|
||||
agentDir,
|
||||
});
|
||||
|
||||
expect(result).toEqual({
|
||||
apiKey: "cached-access-token",
|
||||
provider: "openai-codex",
|
||||
email: undefined,
|
||||
});
|
||||
expect(getOAuthApiKeyMock).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it("keeps throwing for non-codex providers on the same refresh error", async () => {
|
||||
const profileId = "anthropic:default";
|
||||
saveAuthProfileStore(
|
||||
createExpiredOauthStore({
|
||||
profileId,
|
||||
provider: "anthropic",
|
||||
}),
|
||||
agentDir,
|
||||
);
|
||||
|
||||
await expect(
|
||||
resolveApiKeyForProfile({
|
||||
store: ensureAuthProfileStore(agentDir),
|
||||
profileId,
|
||||
agentDir,
|
||||
}),
|
||||
).rejects.toThrow(/OAuth token refresh failed for anthropic/);
|
||||
});
|
||||
|
||||
it("does not use fallback for unrelated openai-codex refresh errors", async () => {
|
||||
const profileId = "openai-codex:default";
|
||||
saveAuthProfileStore(
|
||||
createExpiredOauthStore({
|
||||
profileId,
|
||||
provider: "openai-codex",
|
||||
}),
|
||||
agentDir,
|
||||
);
|
||||
getOAuthApiKeyMock.mockImplementationOnce(async () => {
|
||||
throw new Error("invalid_grant");
|
||||
});
|
||||
|
||||
await expect(
|
||||
resolveApiKeyForProfile({
|
||||
store: ensureAuthProfileStore(agentDir),
|
||||
profileId,
|
||||
agentDir,
|
||||
}),
|
||||
).rejects.toThrow(/OAuth token refresh failed for openai-codex/);
|
||||
});
|
||||
});
|
||||
@@ -10,6 +10,7 @@ import { withFileLock } from "../../infra/file-lock.js";
|
||||
import { refreshQwenPortalCredentials } from "../../providers/qwen-portal-oauth.js";
|
||||
import { resolveSecretRefString, type SecretRefResolveCache } from "../../secrets/resolve.js";
|
||||
import { refreshChutesTokens } from "../chutes-oauth.js";
|
||||
import { normalizeProviderId } from "../model-selection.js";
|
||||
import { AUTH_STORE_LOCK_OPTIONS, log } from "./constants.js";
|
||||
import { resolveTokenExpiryState } from "./credential-state.js";
|
||||
import { formatAuthDoctorHint } from "./doctor.js";
|
||||
@@ -87,6 +88,27 @@ function buildOAuthProfileResult(params: {
|
||||
});
|
||||
}
|
||||
|
||||
function extractErrorMessage(error: unknown): string {
|
||||
return error instanceof Error ? error.message : String(error);
|
||||
}
|
||||
|
||||
function shouldUseOpenaiCodexRefreshFallback(params: {
|
||||
provider: string;
|
||||
credentials: OAuthCredentials;
|
||||
error: unknown;
|
||||
}): boolean {
|
||||
if (normalizeProviderId(params.provider) !== "openai-codex") {
|
||||
return false;
|
||||
}
|
||||
const message = extractErrorMessage(params.error);
|
||||
if (!/extract\s+accountid\s+from\s+token/i.test(message)) {
|
||||
return false;
|
||||
}
|
||||
return (
|
||||
typeof params.credentials.access === "string" && params.credentials.access.trim().length > 0
|
||||
);
|
||||
}
|
||||
|
||||
type ResolveApiKeyForProfileParams = {
|
||||
cfg?: OpenClawConfig;
|
||||
store: AuthProfileStore;
|
||||
@@ -434,7 +456,25 @@ export async function resolveApiKeyForProfile(
|
||||
}
|
||||
}
|
||||
|
||||
const message = error instanceof Error ? error.message : String(error);
|
||||
if (
|
||||
shouldUseOpenaiCodexRefreshFallback({
|
||||
provider: cred.provider,
|
||||
credentials: cred,
|
||||
error,
|
||||
})
|
||||
) {
|
||||
log.warn("openai-codex oauth refresh failed; using cached access token fallback", {
|
||||
profileId,
|
||||
provider: cred.provider,
|
||||
});
|
||||
return buildApiKeyProfileResult({
|
||||
apiKey: cred.access,
|
||||
provider: cred.provider,
|
||||
email: cred.email,
|
||||
});
|
||||
}
|
||||
|
||||
const message = extractErrorMessage(error);
|
||||
const hint = formatAuthDoctorHint({
|
||||
cfg,
|
||||
store: refreshedStore,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user