Merge branch 'main' into feat/webview2-bridge-spa

This commit is contained in:
Val Alexander
2026-05-08 17:30:24 -05:00
committed by GitHub
1001 changed files with 19152 additions and 7310 deletions

View File

@@ -599,7 +599,7 @@ jobs:
published_upgrade_survivor_baselines: ${{ needs.resolve_target.outputs.run_release_soak == 'true' && 'last-stable-4 2026.4.23 2026.5.2 2026.4.15' || '' }}
published_upgrade_survivor_scenarios: ${{ needs.resolve_target.outputs.run_release_soak == 'true' && 'reported-issues' || '' }}
telegram_mode: mock-openai
telegram_scenarios: telegram-help-command,telegram-commands-command,telegram-tools-compact-command,telegram-whoami-command,telegram-context-command,telegram-current-session-status-tool,telegram-mention-gating
telegram_scenarios: telegram-help-command,telegram-commands-command,telegram-tools-compact-command,telegram-whoami-command,telegram-status-command,telegram-other-bot-command-gating,telegram-context-command,telegram-mentioned-message-reply,telegram-reply-chain-exact-marker,telegram-stream-final-single-message,telegram-long-final-reuses-preview,telegram-mention-gating
secrets:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
OPENAI_BASE_URL: ${{ secrets.OPENAI_BASE_URL }}

View File

@@ -6,8 +6,12 @@ Docs: https://docs.openclaw.ai
### Changes
- Active Memory: support concrete `plugins.entries.active-memory.config.toolsAllow` recall tool names for custom memory plugins while keeping the built-in memory-core default on `memory_search`/`memory_get` and preserving `memory_recall` automatically for `plugins.slots.memory: "memory-lancedb"`.
- Telegram/Feishu: honor configured per-agent and global `reasoningDefault` values when deciding whether channel reasoning previews should stream or stay hidden, addressing the preview-default part of #73182. Thanks @anagnorisis2peripeteia.
- Docker: run the runtime image under `tini` so long-lived containers reap orphaned child processes and forward signals correctly. (#77885) Thanks @VintageAyu.
- Google/Gemini: normalize retired `google/gemini-3-pro-preview` and `google-gemini-cli/gemini-3-pro-preview` selections to `google/gemini-3.1-pro-preview` before they are written to model config.
- Google/Gemini: emit canonical `google/gemini-3.1-pro-preview` ids from configured provider catalog rows so model list and selection paths can test Gemini 3.1 instead of retired Gemini 3 Pro.
- Google/Gemini: normalize nested proxy-provider catalog ids like `google/gemini-3-pro-preview` to `google/gemini-3.1-pro-preview`, so Kilo-style configured catalogs test Gemini 3.1 instead of the retired Gemini 3 Pro id.
- Amazon Bedrock: support `serviceTier` parameter for Bedrock models, configurable via `agents.defaults.params.serviceTier` or per-model in `agents.defaults.models`. Valid values: `default`, `flex`, `priority`, `reserved`. (#64512) Thanks @mobilinkd.
- Control UI: read the Quick Settings exec policy badge from `tools.exec.security` instead of the non-schema `agents.defaults.exec.security` path, so configured `full`/`deny` values render accurately. Fixes #78311. Thanks @FriedBack.
- Control UI/usage: add transcript-backed historical lineage rollups for rotated logical sessions, with current-instance vs historical-lineage scope controls and long-range presets so usage history stays visible after restarts and updates. Fixes #50701. Thanks @dev-gideon-llc and @BunsDev.
@@ -20,10 +24,12 @@ Docs: https://docs.openclaw.ai
- Agents/compaction: keep contributor diagnostics to a bounded top-three selection without sorting the full history. Thanks @shakkernerd.
- Sessions/UI: avoid full-array sorting while selecting ACPX leases, Google Meet calendar events, and latest chat sessions. Thanks @shakkernerd.
- Telegram: preserve the channel-specific 10-option poll cap in the unified outbound adapter so over-limit polls are rejected before send. (#78762) Thanks @obviyus.
- Telegram/streaming: continue over-limit draft previews in a new message instead of stopping when rendered preview text crosses Telegram's message limit. (#74508) Thanks @anagnorisis2peripeteia.
- Slack: route handled top-level channel turns in implicit-conversation channels to thread-scoped sessions when Slack reply threading is enabled, keeping the root turn and later thread replies on one OpenClaw session. (#78522) Thanks @zeroth-blip.
- Telegram: re-probe the primary fetch transport after repeated sticky fallback success so transient IPv4 or pinned-IP fallback promotion can recover without a gateway restart. Fixes #77088. (#77157) Thanks @MkDev11.
- Runtime/install: raise the supported Node 22 floor to `22.16+` so native SQLite query handling can rely on the `node:sqlite` statement metadata API while continuing to recommend Node 24. (#78921)
- Discord/voice: make duplicate same-guild auto-join entries resolve to the last configured channel so moving an agent between voice channels does not keep joining the stale channel.
- Discord/voice: add realtime `/vc` modes so Discord voice channels can run as STT/TTS, a realtime talk buffer with the OpenClaw agent brain, or a bidi realtime session with `openclaw_agent_consult`.
- Discord/voice: include a bounded one-line STT transcript preview in verbose voice logs so live voice debugging shows what speakers said before the agent reply.
- Codex app-server: pin the managed Codex harness and Codex CLI smoke package to `@openai/codex@0.129.0`, defer OpenClaw integration dynamic tools behind Codex tool search by default, and accept current Codex service-tier values so legacy `fast` settings survive the stable harness upgrade as `priority`.
- Codex app-server: default implicit local stdio app-server permissions to guardian when Codex system requirements disallow the YOLO approval, reviewer, or sandbox value, including hostname-scoped remote sandbox entries, avoiding turn-start failures on managed hosts that permit only reviewed approval or narrower sandboxes.
@@ -50,6 +56,7 @@ Docs: https://docs.openclaw.ai
- Gateway/sessions: fast-path already-qualified model refs while building session-list rows so `openclaw sessions` and Control UI session lists avoid heavyweight model resolution on large stores. (#77902) Thanks @ragesaq.
- Contributor PRs: remind external contributors to redact private information like IP addresses, API keys, phone numbers, and non-public endpoints from real behavior proof. Thanks @pashpashpash.
- Codex/approvals: in Codex approval modes, stop installing the pre-guardian native `PermissionRequest` hook by default so Codex's reviewer can approve safe commands before OpenClaw surfaces an approval, remember `allow-always` decisions for identical Codex native `PermissionRequest` payloads within the active session window, and make plugin approval requests validate/render their actual allowed decisions so Telegram and other native approval UIs cannot offer stale actions. Thanks @shakkernerd.
- ACP bridge: relay Gateway exec approval prompts from active ACP turns to the ACP client's `session/request_permission` handler before resolving the Gateway approval. Thanks @amknight.
- Codex/plugins: enable migrated source-installed `openai-curated` Codex plugins in the same Codex harness thread with explicit `codexPlugins` config, cached app readiness, and fail-closed destructive-action policy. Thanks @kevinslin.
- Codex/plugins: enforce native plugin destructive-action policy with Codex app-level `destructive_enabled` config instead of OpenClaw-maintained per-tool deny lists, leave plugin app `open_world_enabled` on by default, and invalidate existing plugin app thread bindings so old generated app config is rebuilt. Thanks @kevinslin.
- PR triage: mark external pull requests with `proof: supplied` when Barnacle finds structured real behavior proof, keep stale negative proof labels in sync across CRLF-edited PR bodies, and let ClawSweeper own the stronger `proof: sufficient` judgement.
@@ -84,6 +91,7 @@ Docs: https://docs.openclaw.ai
- Slack/performance: reduce message preparation, stream recipient lookup, and thread-context allocation overhead on Slack reply hot paths. Thanks @vincentkoc.
- Channels/streaming: cap progress-draft tool lines by default so edited progress boxes avoid jumpy reflow from long wrapped lines.
- Control UI/chat: add an agent-first filter to the chat session picker, keep chat controls/composer responsive across phone/tablet/desktop widths, keep desktop chat controls on one row, avoid duplicate avatar refreshes during initial chat load, and hide that row while scrolling down the transcript. Thanks @BunsDev.
- Control UI/chat: strip untrusted sender metadata from live streams and transcript display, preserve canvas preview anchors, and stop operator UI clients from injecting their internal client id as sender identity. Fixes #78739. Thanks @tmimmanuel, @guguangxin-eng, @hclsys, and @BunsDev.
- Control UI/chat: collapse consecutive duplicate text messages into one bubble with a count so repeated text-only messages stay compact without hiding nearby context.
- Control UI/chat and Sessions: label inherited thinking defaults separately from explicit overrides while preserving provider-supplied option labels. Fixes #77581. Thanks @BunsDev and @Beandon13.
- Agents/runtime: add prepared runtime foundation contracts for carrying provider, model, tool, TTS, and outbound runtime facts through later reply-path migrations. Thanks @mcaxtr.
@@ -184,16 +192,25 @@ Docs: https://docs.openclaw.ai
### Fixes
- Dependencies: pin the transitive `fast-uri` production dependency to `3.1.2` so the production dependency audit no longer resolves the vulnerable `<=3.1.1` range. Thanks @shakkernerd.
- Cron/agents: recognize same-target `edit``write` recovery in `isSameToolMutationAction`, so a successful `write` to a path clears an earlier failed `edit` on the same path. Stops cron from reporting fatal failures when an agent self-heals across `edit` and `write`, while preserving same-tool fingerprint matching, blocking different-target writes, and excluding tools (including `apply_patch`) whose real call args do not produce a stable `path` fingerprint segment. Fixes #79024. Thanks @RenzoMXD.
- Gateway/Tailscale: add opt-in `gateway.tailscale.preserveFunnel` so when `tailscale.mode = "serve"` and an externally configured Tailscale Funnel route already covers the gateway port, OpenClaw skips re-applying `tailscale serve` on startup and skips the `resetOnExit` teardown for that run, keeping operator-managed Funnel exposure alive across gateway restarts. Fixes #57241. Thanks @RenzoMXD.
- Agents/compaction: keep the recent tail after manual `/compact` when Pi returns an empty or no-op compaction summary, preventing blank checkpoints from replacing the live context.
- Native commands: handle slash commands before workspace and agent-reply bootstrap so Telegram `/status` and other command-only native replies do not wait behind full agent turn setup.
- Plugins/Nix: allow externally configured plugin roots under `/nix/store` to load in `OPENCLAW_NIX_MODE=1` while keeping normal external plugin hardlink rejection unchanged. Thanks @joshp123.
- fix(discord): gate user allowlist name resolution [AI]. (#79002) Thanks @pgondhi987.
- fix(msteams): gate startup user allowlist resolution [AI]. (#79003) Thanks @pgondhi987.
- Infra/fetch-timeout: pass `operation` and `url` context to `buildTimeoutAbortSignal` from the music-generate reference fetch and the Matrix guarded redirect transport, so the `fetch timeout reached; aborting operation` warning carries actionable structured fields instead of a bare line. Fixes #79195. Thanks @pandadev66.
- Harden macOS shell wrapper allowlist parsing [AI]. (#78518) Thanks @pgondhi987.
- macOS/config: reject stale or destructive app fallback config writes before direct replacement and keep rejected payloads as private audit artifacts, so `gateway.mode`, metadata, and auth are not silently clobbered. Fixes #64973 and #74890. Thanks @BunsDev.
- Gateway/macOS: include Apple Silicon Homebrew bin and sbin directories in generated LaunchAgent service PATHs so `openclaw gateway restart` keeps Homebrew Node installs reachable. Fixes #79232. Thanks @BunsDev.
- Doctor/OpenAI: stop pinning migrated `openai-codex/*` routes to the Codex runtime so mixed-provider agents keep automatic PI routing for MiniMax, Anthropic, and other non-OpenAI model switches.
- Gateway/macOS: `openclaw gateway stop` now uses `launchctl bootout` by default instead of unconditionally calling `launchctl disable`, so KeepAlive auto-recovery still works after unexpected crashes; use the new `--disable` flag to opt into the persistent-disable behavior when a manual stop should survive reboots. Fixes #77934. Thanks @bmoran1022.
- Gateway/macOS: `repairLaunchAgentBootstrap` no longer kickstarts an already-running LaunchAgent, preventing unnecessary service restarts and session disconnects when repair runs against a healthy gateway. Fixes #77428. Thanks @ramitrkar-hash.
- Gateway/macOS: `openclaw gateway stop --disable` now persists the LaunchAgent disable bit even after a previous bootout left the service not loaded, keeping the explicit stay-down path reliable. (#78412) Thanks @wdeveloper16.
- CLI/status: keep lean `openclaw status --json` off manifest-backed channel discovery so configured-channel checks do not repeatedly rescan plugin metadata. Fixes #79129.
- Control UI/chat: hide retired and non-public Google Gemini model IDs from chat model catalogs and route the bare `gemini-3-pro` alias to Gemini 3.1 Pro Preview instead of the shut-down Gemini 3 Pro Preview. Thanks @BunsDev.
- CLI/infer: canonicalize case-only catalog model refs in `infer model run --model` so mixed-case provider/model strings resolve to the canonical catalog entry instead of failing with `Unknown model`. (#78940) Thanks @ai-hpc.
- CLI/install: refuse state-mutating OpenClaw CLI runs as root by default, keep an explicit `OPENCLAW_ALLOW_ROOT=1` escape hatch for intentional root/container use, and update DigitalOcean setup guidance to run OpenClaw as a non-root user. Fixes #67478. Thanks @Jerry-Xin and @natechicago.
- Auto-reply/media: resolve `scp` from `PATH` when staging sandbox media so nonstandard OpenSSH installs can copy remote attachments.
- Agents/PI: route PI-native OpenAI-compatible default streams through OpenClaw boundary-aware transports so local-compatible model runs keep API-key injection and transport policy.
@@ -224,6 +241,7 @@ Docs: https://docs.openclaw.ai
- Docs/Docker: document a local Compose override for Docker Desktop DNS failures in the shared-network `openclaw-cli` sidecar, keeping the default compose setup hardened while unblocking `openclaw plugins install` when users opt in. Fixes #79018. Thanks @Jason-Vaughan.
- Installer: when npm installs `openclaw` outside the parent shell PATH, print follow-up commands with the resolved binary path instead of telling users to run `openclaw` from a shell that will report `command not found`. Fixes #72382. Thanks @jbob762.
- Plugins/runtime: share MIME and JSON Schema helpers across bundled plugins while preserving canonical media MIME inference, browser URL wildcard semantics, migration home-path resolution, QA request-limit responses, and extensionless text file previews.
- Agents/memory flush: persist the pre-increment compaction counter after flush-triggered compaction so consecutive eligible compaction cycles run memoryFlush instead of alternating. Fixes #12590. Refs #12760, #26145, and #46513. Thanks @Kaspre, @lailoo, @drvoss, @Br1an67, and @dial481.
- Compute plugin callback authorization dynamically [AI]. (#78866) Thanks @pgondhi987.
- fix(active-memory): require admin scope for global toggles [AI]. (#78863) Thanks @pgondhi987.
- Honor owner enforcement for native commands [AI]. (#78864) Thanks @pgondhi987.
@@ -302,6 +320,7 @@ Docs: https://docs.openclaw.ai
- Memory Wiki: skip empty and whitespace-only source pages when refreshing generated Related blocks, preventing blank pages from being rewritten into Related-only stubs. Fixes #78121. Thanks @amknight.
- LINE: reject `dmPolicy: "open"` configs without wildcard `allowFrom` so webhook DMs fail validation instead of being acknowledged and silently blocked before inbound processing. Fixes #78316.
- Telegram/Codex: keep message-tool-only progress drafts visible and render native Codex tool progress once per tool instead of duplicating item/tool draft lines. Fixes #75641. (#77949) Thanks @keshavbotagent.
- Telegram: keep duplicate message-tool-only Codex turns from posting generic silent-reply fallback text, so private finals stay private after inbound dedupe. Thanks @rubencu.
- Telegram/sessions: gap-fill delivered embedded final replies into the session JSONL even when the runner trace is missing, so Telegram answers after tool calls do not vanish from the durable transcript. Fixes #77814. (#78426) Thanks @obviyus, @ChushulSuri, and @DougButdorf.
- Providers/xAI: stop sending OpenAI-style reasoning effort controls to native Grok Responses models, so `xai/grok-4.3` no longer fails live Docker/Gateway runs with `Invalid reasoning effort`.
- Providers/xAI: clamp the bundled xAI thinking profile to `off` so live Gateway runs cannot send unsupported reasoning levels to native Grok Responses models.
@@ -638,6 +657,10 @@ Docs: https://docs.openclaw.ai
- Gateway/nodes: preserve the live node registry session and invoke ownership when an older same-node WebSocket closes after reconnecting. (#78351) Thanks @samzong.
- Browser/downloads: route explicit and managed browser download output directories through `fs-safe` validation before staging final files, so symlinked output roots are rejected before writes. (#78780) Thanks @jesse-merhi.
- Agents/PI: skip the idle wait during aborted embedded-run cleanup, so stopped or timed-out runs clear pending tool state and release the session lock promptly. (#74919) Thanks @medns.
- Agents/current-time: split UTC into a separate `Reference UTC:` prompt line so local `Current time:` stays anchored to the user's timezone. (#42654) Thanks @chencheng-li.
- Agents/reasoning: keep embedded reasoning deltas raw for correct same-line streaming while preserving formatted Telegram, Feishu, Discord, and heartbeat delivery at the channel edge. (#78397) Thanks @medns.
- Agents/failover: rotate auth profiles before deferred cooldown marking on rate-limit failures, so file-lock contention cannot stall profile failover. Fixes #57281. (#57283) Thanks @jeremyknows.
- Gateway/sessions: when `session.dmScope: "main"` is configured, route a bare webchat `/new` against the agent's main session (`sessions.create` with `emitCommandHooks=true`) to an in-place reset instead of creating a parallel `dashboard:` child, matching `/new` behavior on Telegram/Discord. Fixes #77434. (#71170) Thanks @statxc.
## 2026.5.3-1
@@ -861,6 +884,7 @@ Docs: https://docs.openclaw.ai
- Agents/idle-timeout: add a cost-runaway breaker to the outer embedded-run retry loop that halts further attempts after 5 consecutive idle timeouts without completed model progress, so a wedged provider can no longer fan paid model calls out across the same run; completed text or tool-call progress resets the breaker, but partial tool-argument token dribbles do not. Fixes #76293. Thanks @ThePuma312.
- Heartbeats/Codex: align structured heartbeat prompts with actual `heartbeat_respond` tool availability, stop sending legacy `HEARTBEAT_OK` when the tool exists, and keep tool-disabled commitment check-ins on the legacy ack path. Thanks @pashpashpash and @vincentkoc.
- Agent runtimes: fail explicit plugin runtime selections honestly when the requested harness is unavailable instead of silently falling back to the embedded PI runtime. Thanks @pashpashpash.
- Telegram: log inbound gateway watch messages before dispatch so watch-mode diagnostics include incoming message summaries. Thanks @rubencu.
- Maintainer workflow: push prepared PR heads through GitHub's verified commit API by default and require an explicit override before git-protocol pushes can publish unsigned commits. Thanks @BunsDev.
- Feishu: resolve setup/status probes through the selected/default account so multi-account configs with account-scoped app credentials show as configured and probeable. Fixes #72930. Thanks @brokemac79.
- Gateway/responses: emit every client tool call from `/v1/responses` JSON and SSE responses when the agent invokes multiple client tools in a single turn, so multi-tool plans, graph orchestration calls, and similar batched flows no longer drop every call but the last. Fixes #52288. Thanks @CharZhou and @bonelli.

View File

@@ -8,6 +8,8 @@ import SwiftUI
@MainActor
@Observable
final class AppState {
private static let logger = Logger(subsystem: "ai.openclaw", category: "app-state")
private let isPreview: Bool
private var isInitializing = true
private var isApplyingRemoteTokenConfig = false
@@ -696,7 +698,10 @@ final class AppState {
remoteToken: self.remoteToken,
remoteTokenDirty: self.remoteTokenDirty))
guard synced.changed else { return }
OpenClawConfigFile.saveDict(synced.root)
guard OpenClawConfigFile.saveDict(synced.root) else {
Self.logger.warning("gateway config sync rejected to protect persisted gateway auth/mode")
return
}
}
func triggerVoiceEars(ttl: TimeInterval? = 5) {

View File

@@ -8,6 +8,7 @@ enum ConfigStore {
var saveLocal: (@MainActor @Sendable ([String: Any]) -> Void)?
var loadRemote: (@MainActor @Sendable () async -> [String: Any])?
var saveRemote: (@MainActor @Sendable ([String: Any]) async throws -> Void)?
var saveGateway: (@MainActor @Sendable ([String: Any]) async throws -> Void)?
}
private actor OverrideStore {
@@ -66,10 +67,19 @@ enum ConfigStore {
do {
try await self.saveToGateway(root)
} catch {
OpenClawConfigFile.saveDict(
guard self.shouldFallbackToLocalWrite(afterGatewaySaveError: error) else {
self.lastHash = nil
throw error
}
guard OpenClawConfigFile.saveDict(
root,
preserveExistingKeys: true,
allowGatewayAuthMutation: allowGatewayAuthMutation)
else {
throw NSError(domain: "ConfigStore", code: 2, userInfo: [
NSLocalizedDescriptionKey: "Local config write rejected to protect gateway auth/mode.",
])
}
}
}
}
@@ -89,8 +99,30 @@ enum ConfigStore {
}
}
private static func shouldFallbackToLocalWrite(afterGatewaySaveError error: Error) -> Bool {
let nsError = error as NSError
let message = "\(nsError.domain) \(nsError.localizedDescription)".lowercased()
let blockedFragments = [
"invalid_request",
"invalid request",
"invalid config",
"config changed since last load",
"base hash",
"basehash",
"unauthorized",
"token mismatch",
"auth",
]
return !blockedFragments.contains { message.contains($0) }
}
@MainActor
private static func saveToGateway(_ root: [String: Any]) async throws {
let overrides = await self.overrideStore.overrides
if let saveGateway = overrides.saveGateway {
try await saveGateway(root)
return
}
if self.lastHash == nil {
_ = await self.loadFromGateway()
}

View File

@@ -779,7 +779,10 @@ struct DebugSettings: View {
session["store"] = trimmed.isEmpty ? SessionLoader.defaultStorePath : trimmed
root["session"] = session
OpenClawConfigFile.saveDict(root)
guard OpenClawConfigFile.saveDict(root) else {
self.sessionStoreSaveError = "Config write rejected to protect gateway auth/mode."
return
}
self.sessionStoreSaveError = nil
}

View File

@@ -52,14 +52,16 @@ enum OpenClawConfigFile {
}
}
@discardableResult
static func saveDict(
_ dict: [String: Any],
preserveExistingKeys: Bool = false,
allowGatewayAuthMutation: Bool = false)
-> Bool
{
self.withFileLock {
// Nix mode disables config writes in production, but tests rely on saving temp configs.
if ProcessInfo.processInfo.isNixMode, !ProcessInfo.processInfo.isRunningTests { return }
if ProcessInfo.processInfo.isNixMode, !ProcessInfo.processInfo.isRunningTests { return false }
let url = self.url()
let previousData = try? Data(contentsOf: url)
let previousRoot = previousData.flatMap { self.parseConfigData($0) }
@@ -81,12 +83,7 @@ enum OpenClawConfigFile {
do {
let data = try JSONSerialization.data(withJSONObject: output, options: [.prettyPrinted, .sortedKeys])
try FileManager().createDirectory(
at: url.deletingLastPathComponent(),
withIntermediateDirectories: true)
try data.write(to: url, options: [.atomic])
let nextBytes = data.count
let nextAttributes = try? FileManager().attributesOfItem(atPath: url.path)
let gatewayModeAfter = self.gatewayMode(output)
var suspicious = self.configWriteSuspiciousReasons(
existsBefore: previousData != nil,
@@ -98,6 +95,44 @@ enum OpenClawConfigFile {
if preservedGatewayAuth {
suspicious.append("gateway-auth-preserved")
}
let blocking = self.configWriteBlockingReasons(suspicious)
if !blocking.isEmpty {
let rejectedPath = self.persistRejectedConfigWrite(data: data, configURL: url)
self.logger.warning("config write rejected (\(blocking.joined(separator: ", "))) at \(url.path)")
self.appendConfigWriteAudit([
"result": "rejected",
"configPath": url.path,
"existsBefore": previousData != nil,
"previousBytes": previousBytes ?? NSNull(),
"nextBytes": nextBytes,
"previousDev": self.fileSystemNumber(previousAttributes?[.systemNumber]) ?? NSNull(),
"nextDev": NSNull(),
"previousIno": self.fileSystemNumber(previousAttributes?[.systemFileNumber]) ?? NSNull(),
"nextIno": NSNull(),
"previousMode": self.posixMode(previousAttributes?[.posixPermissions]) ?? NSNull(),
"nextMode": NSNull(),
"previousNlink": self.fileAttributeInt(previousAttributes?[.referenceCount]) ?? NSNull(),
"nextNlink": NSNull(),
"previousUid": self.fileAttributeInt(previousAttributes?[.ownerAccountID]) ?? NSNull(),
"nextUid": NSNull(),
"previousGid": self.fileAttributeInt(previousAttributes?[.groupOwnerAccountID]) ?? NSNull(),
"nextGid": NSNull(),
"hasMetaBefore": hadMetaBefore,
"hasMetaAfter": self.hasMeta(output),
"gatewayModeBefore": gatewayModeBefore ?? NSNull(),
"gatewayModeAfter": gatewayModeAfter ?? NSNull(),
"preservedGatewayAuth": preservedGatewayAuth,
"suspicious": suspicious,
"blocking": blocking,
"rejectedPath": rejectedPath ?? NSNull(),
])
return false
}
try FileManager().createDirectory(
at: url.deletingLastPathComponent(),
withIntermediateDirectories: true)
try data.write(to: url, options: [.atomic])
let nextAttributes = try? FileManager().attributesOfItem(atPath: url.path)
if !suspicious.isEmpty {
self.logger.warning("config write anomaly (\(suspicious.joined(separator: ", "))) at \(url.path)")
}
@@ -123,9 +158,11 @@ enum OpenClawConfigFile {
"hasMetaAfter": self.hasMeta(output),
"gatewayModeBefore": gatewayModeBefore ?? NSNull(),
"gatewayModeAfter": gatewayModeAfter ?? NSNull(),
"preservedGatewayAuth": preservedGatewayAuth,
"suspicious": suspicious,
])
self.observeConfigRead(data: data, root: output, configURL: url, valid: true)
return true
} catch {
self.logger.error("config save failed: \(error.localizedDescription)")
self.appendConfigWriteAudit([
@@ -138,9 +175,11 @@ enum OpenClawConfigFile {
"hasMetaAfter": self.hasMeta(output),
"gatewayModeBefore": gatewayModeBefore ?? NSNull(),
"gatewayModeAfter": self.gatewayMode(output) ?? NSNull(),
"preservedGatewayAuth": preservedGatewayAuth,
"suspicious": preservedGatewayAuth ? ["gateway-auth-preserved"] : [],
"error": error.localizedDescription,
])
return false
}
}
}
@@ -416,6 +455,12 @@ enum OpenClawConfigFile {
return reasons
}
private static func configWriteBlockingReasons(_ suspicious: [String]) -> [String] {
suspicious.filter { reason in
reason.hasPrefix("size-drop:") || reason == "gateway-mode-removed"
}
}
private static func configAuditLogURL() -> URL {
self.stateDirURL()
.appendingPathComponent("logs", isDirectory: true)
@@ -594,6 +639,26 @@ enum OpenClawConfigFile {
}
}
private static func persistRejectedConfigWrite(data: Data, configURL: URL) -> String? {
let timestamp = ISO8601DateFormatter().string(from: Date())
let url = configURL.deletingLastPathComponent()
.appendingPathComponent("\(configURL.lastPathComponent).rejected.\(self.configTimestampToken(timestamp))")
let fileManager = FileManager()
let privatePermissions: NSNumber = 0o600
if fileManager.fileExists(atPath: url.path) {
try? fileManager.setAttributes([.posixPermissions: privatePermissions], ofItemAtPath: url.path)
return url.path
}
guard fileManager.createFile(
atPath: url.path,
contents: data,
attributes: [.posixPermissions: privatePermissions])
else {
return nil
}
return url.path
}
private static func observeConfigRead(data: Data, root: [String: Any]?, configURL: URL, valid: Bool) {
let observedAt = ISO8601DateFormatter().string(from: Date())
let current = self.configFingerprint(data: data, root: root, configURL: configURL, observedAt: observedAt)

View File

@@ -259,4 +259,37 @@ struct AppStateRemoteConfigTests {
remoteTokenDirty: true))
#expect((cleared["token"] as? String) == nil)
}
@Test
func `synced gateway root preserves gateway auth across mode changes`() {
let initialRoot: [String: Any] = [
"gateway": [
"mode": "remote",
"auth": [
"mode": "token",
"token": "test-token", // pragma: allowlist secret
],
"remote": [
"transport": "direct",
"url": "wss://old-gateway.example",
],
],
]
let localRoot = AppState._testSyncedGatewayRoot(
currentRoot: initialRoot,
draft: .init(
connectionMode: .local,
remoteTransport: .ssh,
remoteTarget: "",
remoteIdentity: "",
remoteUrl: "",
remoteToken: "",
remoteTokenDirty: false))
let localGateway = localRoot["gateway"] as? [String: Any]
let auth = localGateway?["auth"] as? [String: Any]
#expect(localGateway?["mode"] as? String == "local")
#expect(auth?["mode"] as? String == "token")
#expect(auth?["token"] as? String == "test-token") // pragma: allowlist secret
}
}

View File

@@ -1,3 +1,4 @@
import Foundation
import Testing
@testable import OpenClaw
@@ -65,4 +66,76 @@ struct ConfigStoreTests {
#expect(localHit)
#expect(!remoteHit)
}
@Test func `local save does not fall back to direct write after stale gateway rejection`() async throws {
let stateDir = FileManager().temporaryDirectory
.appendingPathComponent("openclaw-state-\(UUID().uuidString)", isDirectory: true)
let configPath = stateDir.appendingPathComponent("openclaw.json")
defer { try? FileManager().removeItem(at: stateDir) }
try await TestIsolation.withEnvValues([
"OPENCLAW_STATE_DIR": stateDir.path,
"OPENCLAW_CONFIG_PATH": configPath.path,
]) {
OpenClawConfigFile.saveDict([
"gateway": [
"mode": "local",
"auth": [
"mode": "token",
"token": "test-token", // pragma: allowlist secret
],
],
])
let before = try String(contentsOf: configPath, encoding: .utf8)
await ConfigStore._testSetOverrides(.init(
isRemoteMode: { false },
saveGateway: { _ in
throw NSError(domain: "Gateway", code: 0, userInfo: [
NSLocalizedDescriptionKey: "config changed since last load; re-run config.get and retry",
])
}))
var didThrow = false
do {
try await ConfigStore.save(["browser": ["enabled": false]])
} catch {
didThrow = true
}
await ConfigStore._testClearOverrides()
#expect(didThrow)
let after = try String(contentsOf: configPath, encoding: .utf8)
#expect(after == before)
}
}
@Test func `local save can fall back to protected direct write when gateway is unavailable`() async throws {
let stateDir = FileManager().temporaryDirectory
.appendingPathComponent("openclaw-state-\(UUID().uuidString)", isDirectory: true)
let configPath = stateDir.appendingPathComponent("openclaw.json")
defer { try? FileManager().removeItem(at: stateDir) }
try await TestIsolation.withEnvValues([
"OPENCLAW_STATE_DIR": stateDir.path,
"OPENCLAW_CONFIG_PATH": configPath.path,
]) {
await ConfigStore._testSetOverrides(.init(
isRemoteMode: { false },
saveGateway: { _ in
throw NSError(domain: "Gateway", code: 0, userInfo: [
NSLocalizedDescriptionKey: "gateway not configured",
])
}))
try await ConfigStore.save([
"gateway": ["mode": "local"],
"browser": ["enabled": false],
])
await ConfigStore._testClearOverrides()
let data = try Data(contentsOf: configPath)
let root = try JSONSerialization.jsonObject(with: data) as? [String: Any]
#expect(((root?["browser"] as? [String: Any])?["enabled"] as? Bool) == false)
#expect((root?["meta"] as? [String: Any]) != nil)
}
}
}

View File

@@ -336,4 +336,118 @@ struct OpenClawConfigFileTests {
}
}
}
@MainActor
@Test
func `save dict records preserved gateway auth in audit`() async throws {
let stateDir = FileManager().temporaryDirectory
.appendingPathComponent("openclaw-state-\(UUID().uuidString)", isDirectory: true)
let configPath = stateDir.appendingPathComponent("openclaw.json")
let auditPath = stateDir.appendingPathComponent("logs/config-audit.jsonl")
defer { try? FileManager().removeItem(at: stateDir) }
try await TestIsolation.withEnvValues([
"OPENCLAW_STATE_DIR": stateDir.path,
"OPENCLAW_CONFIG_PATH": configPath.path,
]) {
OpenClawConfigFile.saveDict([
"gateway": [
"mode": "local",
"auth": [
"mode": "token",
"token": "test-token", // pragma: allowlist secret
],
],
])
let saved = OpenClawConfigFile.saveDict([
"gateway": [
"mode": "local",
],
"browser": [
"enabled": false,
],
])
#expect(saved)
let data = try Data(contentsOf: configPath)
let root = try JSONSerialization.jsonObject(with: data) as? [String: Any]
let gateway = root?["gateway"] as? [String: Any]
let auth = gateway?["auth"] as? [String: Any]
#expect(gateway?["mode"] as? String == "local")
#expect(auth?["mode"] as? String == "token")
#expect(auth?["token"] as? String == "test-token") // pragma: allowlist secret
#expect((root?["meta"] as? [String: Any]) != nil)
let rawAudit = try String(contentsOf: auditPath, encoding: .utf8)
let last = rawAudit.split(whereSeparator: \.isNewline).map(String.init).last
let auditRoot = try JSONSerialization.jsonObject(with: Data((last ?? "{}").utf8)) as? [String: Any]
#expect(auditRoot?["result"] as? String == "success")
#expect(auditRoot?["preservedGatewayAuth"] as? Bool == true)
let suspicious = auditRoot?["suspicious"] as? [String] ?? []
#expect(suspicious.contains("gateway-auth-preserved"))
}
}
@MainActor
@Test
func `save dict rejects gateway mode removal and keeps previous config`() async throws {
let stateDir = FileManager().temporaryDirectory
.appendingPathComponent("openclaw-state-\(UUID().uuidString)", isDirectory: true)
let configPath = stateDir.appendingPathComponent("openclaw.json")
let auditPath = stateDir.appendingPathComponent("logs/config-audit.jsonl")
defer { try? FileManager().removeItem(at: stateDir) }
try await TestIsolation.withEnvValues([
"OPENCLAW_STATE_DIR": stateDir.path,
"OPENCLAW_CONFIG_PATH": configPath.path,
]) {
OpenClawConfigFile.saveDict([
"gateway": [
"mode": "local",
"auth": [
"mode": "token",
"token": "test-token", // pragma: allowlist secret
],
],
"browser": [
"enabled": true,
],
])
let before = try String(contentsOf: configPath, encoding: .utf8)
let saved = OpenClawConfigFile.saveDict([
"browser": [
"enabled": false,
],
])
#expect(!saved)
let after = try String(contentsOf: configPath, encoding: .utf8)
#expect(after == before)
let rawAudit = try String(contentsOf: auditPath, encoding: .utf8)
let lines = rawAudit.split(whereSeparator: \.isNewline).map(String.init)
guard let last = lines.last else {
Issue.record("Missing rejected config audit line")
return
}
let auditRoot = try JSONSerialization.jsonObject(with: Data(last.utf8)) as? [String: Any]
#expect(auditRoot?["result"] as? String == "rejected")
let suspicious = auditRoot?["suspicious"] as? [String] ?? []
let blocking = auditRoot?["blocking"] as? [String] ?? []
#expect(suspicious.contains("gateway-mode-removed"))
#expect(blocking.contains("gateway-mode-removed"))
if let rejectedPath = auditRoot?["rejectedPath"] as? String {
#expect(FileManager().fileExists(atPath: rejectedPath))
let attributes = try FileManager().attributesOfItem(atPath: rejectedPath)
let mode = attributes[.posixPermissions] as? NSNumber
#expect(mode?.intValue == 0o600)
} else {
Issue.record("Missing rejected payload path")
}
}
}
}

View File

@@ -1172,6 +1172,7 @@ Auto-join example:
discord: {
voice: {
enabled: true,
mode: "stt-tts",
model: "openai/gpt-5.4-mini",
autoJoin: [
{
@@ -1199,8 +1200,10 @@ Auto-join example:
Notes:
- `voice.tts` overrides `messages.tts` for voice playback only.
- `voice.model` overrides the LLM used for Discord voice channel responses only. Leave it unset to inherit the routed agent model. Do not set this to `gpt-realtime-2`; Discord voice channels use STT plus TTS playback, not the OpenAI Realtime session transport.
- STT uses `tools.media.audio`; `voice.model` does not affect transcription.
- `voice.mode` controls the conversation path: `stt-tts` keeps the existing batch STT plus TTS flow, `talk-buffer` uses a realtime voice shell for turn timing/transcription/playback while the OpenClaw agent produces the answer, and `bidi` lets the realtime model converse directly while exposing `openclaw_agent_consult` for the OpenClaw brain.
- `voice.model` overrides the OpenClaw agent brain for Discord voice responses and realtime consults. Leave it unset to inherit the routed agent model. It is separate from `voice.realtime.model`.
- In `stt-tts` mode, STT uses `tools.media.audio`; `voice.model` does not affect transcription.
- In realtime modes, `voice.realtime.provider`, `voice.realtime.model`, and `voice.realtime.voice` configure the realtime audio session. For OpenAI Realtime 2 plus the Codex brain, use `voice.realtime.model: "gpt-realtime-2"` and `voice.model: "openai-codex/gpt-5.5"`.
- For an OpenAI voice on Discord playback, set `voice.tts.provider: "openai"` and choose a Text-to-speech voice under `voice.tts.openai.voice` or `voice.tts.providers.openai.voice`. `cedar` is a good masculine-sounding choice on the current OpenAI TTS model.
- Per-channel Discord `systemPrompt` overrides apply to voice transcript turns for that voice channel.
- Voice transcript turns derive owner status from Discord `allowFrom` (or `dm.allowFrom`); non-owner speakers cannot access owner-only tools (for example `gateway` and `cron`).
@@ -1211,7 +1214,7 @@ Notes:
- `@discordjs/voice` defaults are `daveEncryption=true` and `decryptionFailureTolerance=24` if unset.
- `voice.connectTimeoutMs` controls the initial `@discordjs/voice` Ready wait for `/vc join` and auto-join attempts. Default: `30000`.
- `voice.reconnectGraceMs` controls how long OpenClaw waits for a disconnected voice session to begin reconnecting before destroying it. Default: `15000`.
- Voice playback does not stop just because another user starts speaking. To avoid feedback loops, OpenClaw ignores new voice capture while TTS is playing; speak after playback finishes for the next turn.
- In `stt-tts` mode, voice playback does not stop just because another user starts speaking. To avoid feedback loops, OpenClaw ignores new voice capture while TTS is playing; speak after playback finishes for the next turn. Realtime modes forward speaker starts as barge-in signals to the realtime provider.
- `voice.captureSilenceGraceMs` controls how long OpenClaw waits after Discord reports a speaker has stopped before finalizing that audio segment for STT. Default: `2500`; raise this if Discord splits normal pauses into choppy partial transcripts.
- When ElevenLabs is the selected TTS provider, Discord voice playback uses streaming TTS and starts from the provider response stream. Providers without streaming support fall back to the synthesized temp-file path.
- OpenClaw also watches receive decrypt failures and auto-recovers by leaving/rejoining the voice channel after repeated failures in a short window.
@@ -1219,7 +1222,7 @@ Notes:
- `The operation was aborted` receive events are expected when OpenClaw finalizes a captured speaker segment; they are verbose diagnostics, not warnings.
- Verbose Discord voice logs include a bounded one-line STT transcript preview for each accepted speaker segment, so debugging shows both the user side and the agent reply side without dumping unbounded transcript text.
Voice channel pipeline:
STT plus TTS pipeline:
- Discord PCM capture is converted to a WAV temp file.
- `tools.media.audio` handles STT, for example `openai/gpt-4o-mini-transcribe`.
@@ -1227,7 +1230,51 @@ Voice channel pipeline:
- `voice.model`, when set, overrides only the response LLM for this voice-channel turn.
- `voice.tts` is merged over `messages.tts`; streaming-capable providers feed the player directly, otherwise the resulting audio file is played in the joined channel.
Credentials are resolved per component: LLM route auth for `voice.model`, STT auth for `tools.media.audio`, and TTS auth for `messages.tts`/`voice.tts`.
Realtime talk-buffer example:
```json5
{
channels: {
discord: {
voice: {
enabled: true,
mode: "talk-buffer",
model: "openai-codex/gpt-5.5",
realtime: {
provider: "openai",
model: "gpt-realtime-2",
voice: "cedar",
},
},
},
},
}
```
Realtime bidi example:
```json5
{
channels: {
discord: {
voice: {
enabled: true,
mode: "bidi",
model: "openai-codex/gpt-5.5",
realtime: {
provider: "openai",
model: "gpt-realtime-2",
voice: "cedar",
toolPolicy: "safe-read-only",
consultPolicy: "always",
},
},
},
},
}
```
Credentials are resolved per component: LLM route auth for `voice.model`, STT auth for `tools.media.audio`, TTS auth for `messages.tts`/`voice.tts`, and realtime provider auth for `voice.realtime.providers` or the provider's normal auth config.
### Voice messages

View File

@@ -92,7 +92,7 @@ iMessage and BlueBubbles share a lot of channel-level config. The keys that chan
| `channels.bluebubbles.groupAllowFrom` | `channels.imessage.groupAllowFrom` | Same. |
| `channels.bluebubbles.groups` | `channels.imessage.groups` | **Copy this verbatim, including any `groups: { "*": { ... } }` wildcard entry.** Per-group `requireMention`, `tools`, `toolsBySender` carry over. With `groupPolicy: "allowlist"`, an empty or missing `groups` block silently drops every group message — see "Group registry footgun" below. |
| `channels.bluebubbles.sendReadReceipts` | `channels.imessage.sendReadReceipts` | Default `true`. With the bundled plugin this only fires when the private API probe is up. |
| `channels.bluebubbles.includeAttachments` | `channels.imessage.includeAttachments` | Same. |
| `channels.bluebubbles.includeAttachments` | `channels.imessage.includeAttachments` | Same shape, **same off-by-default**. If you had attachments flowing on BlueBubbles you must re-set this explicitly on the iMessage block — it does not carry over implicitly, and inbound photos/media will be silently dropped with no `Inbound message` log line until you do. |
| `channels.bluebubbles.attachmentRoots` | `channels.imessage.attachmentRoots` | Local roots; same wildcard rules. |
| _(N/A)_ | `channels.imessage.remoteAttachmentRoots` | Only used when `remoteHost` is set for SCP fetches. |
| `channels.bluebubbles.mediaMaxMb` | `channels.imessage.mediaMaxMb` | Default 16 MB on iMessage (BlueBubbles default was 8 MB). Set explicitly if you want to keep the lower cap. |

View File

@@ -403,7 +403,7 @@ See [ACP Agents](/tools/acp-agents) for shared ACP binding behavior.
<AccordionGroup>
<Accordion title="Attachments and media">
- inbound attachment ingestion is optional: `channels.imessage.includeAttachments`
- inbound attachment ingestion is **off by default** — set `channels.imessage.includeAttachments: true` to forward photos, voice memos, video, and other attachments to the agent. With it disabled, attachment-only iMessages are dropped before reaching the agent and may produce no `Inbound message` log line at all.
- remote attachment paths can be fetched via SCP when `remoteHost` is set
- attachment paths must match allowed roots:
- `channels.imessage.attachmentRoots` (local)

View File

@@ -49,6 +49,7 @@ Quick rule:
| Session modes | Partial | `session/set_mode` is supported and the bridge exposes initial Gateway-backed session controls for thought level, tool verbosity, reasoning, usage detail, and elevated actions. Broader ACP-native mode/config surfaces are still out of scope. |
| Session info and usage updates | Partial | The bridge emits `session_info_update` and best-effort `usage_update` notifications from cached Gateway session snapshots. Usage is approximate and only sent when Gateway token totals are marked fresh. |
| Tool streaming | Partial | `tool_call` / `tool_call_update` events include raw I/O, text content, and best-effort file locations when Gateway tool args/results expose them. Embedded terminals and richer diff-native output are still not exposed. |
| Exec approvals | Partial | Gateway exec approval prompts during active ACP prompt turns are relayed to the ACP client with `session/request_permission`. |
| Per-session MCP servers (`mcpServers`) | Unsupported | Bridge mode rejects per-session MCP server requests. Configure MCP on the OpenClaw gateway or agent instead. |
| Client filesystem methods (`fs/read_text_file`, `fs/write_text_file`) | Unsupported | The bridge does not call ACP client filesystem methods. |
| Client terminal methods (`terminal/*`) | Unsupported | The bridge does not create ACP client terminals or stream terminal ids through tool calls. |
@@ -76,6 +77,8 @@ Quick rule:
- Tool follow-along data is best-effort. The bridge can surface file paths that
appear in known tool args/results, but it does not yet emit ACP terminals or
structured file diffs.
- Exec approval relay is scoped to the active ACP prompt turn; approvals from
other Gateway sessions are ignored.
## Usage

View File

@@ -332,12 +332,16 @@ flowchart LR
I --> M["Main Reply"]
```
The blocking memory sub-agent can use only the available memory recall tools:
The blocking memory sub-agent can use only the configured memory recall tools.
By default that is:
- `memory_recall`
- `memory_search`
- `memory_get`
When `plugins.slots.memory` is `memory-lancedb`, the default is `memory_recall`
instead. Set `config.toolsAllow` when another memory provider exposes a
different recall tool contract.
If the connection is weak, it should return `NONE`.
## Query modes
@@ -462,6 +466,110 @@ skips recall for that turn.
`config.modelFallbackPolicy` is retained only as a deprecated compatibility
field for older configs. It no longer changes runtime behavior.
## Memory tools
By default Active Memory lets the blocking recall sub-agent call
`memory_search` and `memory_get`. That matches the built-in `memory-core`
contract. When `plugins.slots.memory` selects `memory-lancedb` and
`config.toolsAllow` is unset, Active Memory keeps the existing LanceDB behavior
and uses `memory_recall` instead.
If you use another memory plugin, set `config.toolsAllow` to the exact tool
names that plugin registers. Active Memory lists those tools in the recall
prompt and passes the same list to the embedded sub-agent. If none of the
configured tools are available, or the memory sub-agent fails, Active Memory
skips recall for that turn and the main reply continues without memory context.
`toolsAllow` only accepts concrete memory tool names. Wildcards, `group:*`
entries, and core agent tools such as `read`, `exec`, `message`, and
`web_search` are ignored before the hidden memory sub-agent starts.
Default-behavior note: Active Memory no longer includes `memory_recall` in the
memory-core default allowlist. Existing `memory-lancedb` setups keep working
when `plugins.slots.memory` is set to `memory-lancedb`. Explicit `toolsAllow`
always overrides the automatic default.
### Built-in memory-core
The default setup does not need an explicit `toolsAllow`:
```json5
{
plugins: {
entries: {
"active-memory": {
enabled: true,
config: {
agents: ["main"],
// Default: ["memory_search", "memory_get"]
},
},
},
},
}
```
### LanceDB memory
The bundled `memory-lancedb` plugin exposes `memory_recall`. Selecting the
memory slot is enough for Active Memory to use that recall tool:
```json5
{
plugins: {
slots: {
memory: "memory-lancedb",
},
entries: {
"memory-lancedb": {
enabled: true,
config: {
embedding: {
provider: "openai",
model: "text-embedding-3-small",
},
},
},
"active-memory": {
enabled: true,
config: {
agents: ["main"],
promptAppend: "Use memory_recall for long-term user preferences, past decisions, and previously discussed topics. If recall finds nothing useful, return NONE.",
},
},
},
},
}
```
### Lossless Claw
Lossless Claw is a context-engine plugin with its own recall tools. Install and
configure it as a context engine first; see [Context engine](/concepts/context-engine).
Then let Active Memory use the Lossless Claw recall tools:
```json5
{
plugins: {
entries: {
"lossless-claw": {
enabled: true,
},
"active-memory": {
enabled: true,
config: {
agents: ["main"],
toolsAllow: ["lcm_grep", "lcm_describe", "lcm_expand_query"],
promptAppend: "Use lcm_grep first for compacted conversation recall. Use lcm_describe to inspect a specific summary. Use lcm_expand_query only when the latest user message needs exact details that may have been compacted away. Return NONE if the retrieved context is not clearly useful.",
},
},
},
},
}
```
Do not include `lcm_expand` in `toolsAllow` for the main Active Memory sub-agent.
Lossless Claw uses that as a lower-level delegated expansion tool.
## Advanced escape hatches
These options are intentionally not part of the recommended setup.
@@ -488,6 +596,9 @@ Memory prompt and before the conversation context:
promptAppend: "Prefer stable long-term preferences over one-off events."
```
Use `promptAppend` with custom `toolsAllow` when a non-core memory plugin needs
provider-specific tool order or query-shaping instructions.
`config.promptOverride` replaces the default Active Memory prompt. OpenClaw
still appends the conversation context afterward:
@@ -558,25 +669,26 @@ plugins.entries.active-memory
The most important fields are:
| Key | Type | Meaning |
| ---------------------------- | ---------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `enabled` | `boolean` | Enables the plugin itself |
| `config.agents` | `string[]` | Agent ids that may use active memory |
| `config.model` | `string` | Optional blocking memory sub-agent model ref; when unset, active memory uses the current session model |
| `config.allowedChatTypes` | `("direct" \| "group" \| "channel")[]` | Session types that may run Active Memory; defaults to direct-message style sessions |
| `config.allowedChatIds` | `string[]` | Optional per-conversation allowlist applied after `allowedChatTypes`; non-empty lists fail closed |
| `config.deniedChatIds` | `string[]` | Optional per-conversation denylist that overrides allowed session types and allowed ids |
| `config.queryMode` | `"message" \| "recent" \| "full"` | Controls how much conversation the blocking memory sub-agent sees |
| `config.promptStyle` | `"balanced" \| "strict" \| "contextual" \| "recall-heavy" \| "precision-heavy" \| "preference-only"` | Controls how eager or strict the blocking memory sub-agent is when deciding whether to return memory |
| `config.thinking` | `"off" \| "minimal" \| "low" \| "medium" \| "high" \| "xhigh" \| "adaptive" \| "max"` | Advanced thinking override for the blocking memory sub-agent; default `off` for speed |
| `config.promptOverride` | `string` | Advanced full prompt replacement; not recommended for normal use |
| `config.promptAppend` | `string` | Advanced extra instructions appended to the default or overridden prompt |
| `config.timeoutMs` | `number` | Hard timeout for the blocking memory sub-agent, capped at 120000 ms |
| `config.setupGraceTimeoutMs` | `number` | Advanced extra setup budget before the recall timeout expires; defaults to 0 and is capped at 30000 ms. See [Cold-start grace](#cold-start-grace) for v2026.4.x upgrade guidance |
| `config.maxSummaryChars` | `number` | Maximum total characters allowed in the active-memory summary |
| `config.logging` | `boolean` | Emits active memory logs while tuning |
| `config.persistTranscripts` | `boolean` | Keeps blocking memory sub-agent transcripts on disk instead of deleting temp files |
| `config.transcriptDir` | `string` | Relative blocking memory sub-agent transcript directory under the agent sessions folder |
| Key | Type | Meaning |
| ---------------------------- | ---------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `enabled` | `boolean` | Enables the plugin itself |
| `config.agents` | `string[]` | Agent ids that may use active memory |
| `config.model` | `string` | Optional blocking memory sub-agent model ref; when unset, active memory uses the current session model |
| `config.allowedChatTypes` | `("direct" \| "group" \| "channel")[]` | Session types that may run Active Memory; defaults to direct-message style sessions |
| `config.allowedChatIds` | `string[]` | Optional per-conversation allowlist applied after `allowedChatTypes`; non-empty lists fail closed |
| `config.deniedChatIds` | `string[]` | Optional per-conversation denylist that overrides allowed session types and allowed ids |
| `config.queryMode` | `"message" \| "recent" \| "full"` | Controls how much conversation the blocking memory sub-agent sees |
| `config.promptStyle` | `"balanced" \| "strict" \| "contextual" \| "recall-heavy" \| "precision-heavy" \| "preference-only"` | Controls how eager or strict the blocking memory sub-agent is when deciding whether to return memory |
| `config.toolsAllow` | `string[]` | Concrete memory tool names the blocking memory sub-agent may call; defaults to `["memory_search", "memory_get"]`, or `["memory_recall"]` when `plugins.slots.memory` is `memory-lancedb`; wildcards, `group:*` entries, and core agent tools are ignored |
| `config.thinking` | `"off" \| "minimal" \| "low" \| "medium" \| "high" \| "xhigh" \| "adaptive" \| "max"` | Advanced thinking override for the blocking memory sub-agent; default `off` for speed |
| `config.promptOverride` | `string` | Advanced full prompt replacement; not recommended for normal use |
| `config.promptAppend` | `string` | Advanced extra instructions appended to the default or overridden prompt |
| `config.timeoutMs` | `number` | Hard timeout for the blocking memory sub-agent, capped at 120000 ms |
| `config.setupGraceTimeoutMs` | `number` | Advanced extra setup budget before the recall timeout expires; defaults to 0 and is capped at 30000 ms. See [Cold-start grace](#cold-start-grace) for v2026.4.x upgrade guidance |
| `config.maxSummaryChars` | `number` | Maximum total characters allowed in the active-memory summary |
| `config.logging` | `boolean` | Emits active memory logs while tuning |
| `config.persistTranscripts` | `boolean` | Keeps blocking memory sub-agent transcripts on disk instead of deleting temp files |
| `config.transcriptDir` | `string` | Relative blocking memory sub-agent transcript directory under the agent sessions folder |
Useful tuning fields:
@@ -692,8 +804,9 @@ If active memory is too slow:
Active Memory rides on the configured memory plugin's recall pipeline, so most
recall surprises are embedding-provider problems, not Active Memory bugs. The
default `memory-core` path uses `memory_search`; `memory-lancedb` uses
`memory_recall`.
default `memory-core` path uses `memory_search` and `memory_get`; the
`memory-lancedb` slot uses `memory_recall`. If you use another memory plugin,
confirm `config.toolsAllow` names the tools that plugin actually registers.
<AccordionGroup>
<Accordion title="Embedding provider switched or stopped working">

View File

@@ -278,7 +278,7 @@ Optional:
- `OPENCLAW_QA_TELEGRAM_CAPTURE_CONTENT=1` keeps message bodies in observed-message artifacts (default redacts).
Scenarios (`extensions/qa-lab/src/live-transports/telegram/telegram-live.runtime.ts:44`):
Scenarios (`extensions/qa-lab/src/live-transports/telegram/telegram-live.runtime.ts`):
- `telegram-canary`
- `telegram-mention-gating`
@@ -287,10 +287,17 @@ Scenarios (`extensions/qa-lab/src/live-transports/telegram/telegram-live.runtime
- `telegram-commands-command`
- `telegram-tools-compact-command`
- `telegram-whoami-command`
- `telegram-status-command`
- `telegram-other-bot-command-gating`
- `telegram-context-command`
- `telegram-current-session-status-tool`
- `telegram-reply-chain-exact-marker`
- `telegram-stream-final-single-message`
- `telegram-long-final-reuses-preview`
- `telegram-long-final-three-chunks`
The implicit default set always covers canary, mention gating, native command replies, command addressing, and bot-to-bot group replies. `mock-openai` defaults also include deterministic reply-chain and final-message streaming checks. `telegram-current-session-status-tool` remains opt-in because it is only stable when threaded directly after canary, not after arbitrary native command replies. Use `pnpm openclaw qa telegram --list-scenarios --provider-mode mock-openai` to print the current default/optional split with regression refs.
Output artifacts:
- `telegram-qa-report.md`

View File

@@ -510,6 +510,10 @@ See [Inferred commitments](/concepts/commitments).
value, so repeated failures from one localhost origin do not automatically
lock out a different origin.
- `tailscale.mode`: `serve` (tailnet only, loopback bind) or `funnel` (public, requires auth).
- `tailscale.preserveFunnel`: when `true` and `tailscale.mode = "serve"`, OpenClaw
checks `tailscale funnel status` before re-applying Serve at startup and skips
it if an externally configured Funnel route already covers the gateway port.
Default `false`.
- `controlUi.allowedOrigins`: explicit browser-origin allowlist for Gateway WebSocket connects. Required when browser clients are expected from non-loopback origins.
- `controlUi.chatMessageMaxWidth`: optional max-width for grouped Control UI chat messages. Accepts constrained CSS width values such as `960px`, `82%`, `min(1280px, 82%)`, and `calc(100% - 2rem)`.
- `controlUi.dangerouslyAllowHostHeaderOriginFallback`: dangerous mode that enables Host-header origin fallback for deployments that intentionally rely on Host-header origin policy.

View File

@@ -116,6 +116,11 @@ openclaw gateway --tailscale funnel --auth password
- `tailscale.mode: "funnel"` refuses to start unless auth mode is `password` to avoid public exposure.
- Set `gateway.tailscale.resetOnExit` if you want OpenClaw to undo `tailscale serve`
or `tailscale funnel` configuration on shutdown.
- Set `gateway.tailscale.preserveFunnel: true` to keep an externally configured
`tailscale funnel` route alive across gateway restarts. When enabled and the
gateway runs in `mode: "serve"`, OpenClaw checks `tailscale funnel status`
before re-applying Serve and skips it when a Funnel route already covers the
gateway port. The OpenClaw-managed Funnel password-only policy is unchanged.
- `gateway.bind: "tailnet"` is a direct Tailnet bind (no HTTPS, no Serve/Funnel).
- `gateway.bind: "auto"` prefers loopback; use `tailnet` if you want Tailnet-only.
- Serve/Funnel only expose the **Gateway control UI + WS**. Nodes connect over

View File

@@ -311,6 +311,7 @@ gh workflow run package-acceptance.yml --ref main \
- Runs the Telegram live QA lane against a real private group using the driver and SUT bot tokens from env.
- Requires `OPENCLAW_QA_TELEGRAM_GROUP_ID`, `OPENCLAW_QA_TELEGRAM_DRIVER_BOT_TOKEN`, and `OPENCLAW_QA_TELEGRAM_SUT_BOT_TOKEN`. The group id must be the numeric Telegram chat id.
- Supports `--credential-source convex` for shared pooled credentials. Use env mode by default, or set `OPENCLAW_QA_CREDENTIAL_SOURCE=convex` to opt into pooled leases.
- Defaults cover canary, mention gating, command addressing, `/status`, bot-to-bot mentioned replies, and core native command replies. `mock-openai` defaults also cover deterministic reply-chain and Telegram final-message streaming regressions. Use `--list-scenarios` for optional probes such as `session_status`.
- Exits non-zero when any scenario fails. Use `--allow-failures` when you
want artifacts without a failing exit code.
- Requires two distinct bots in the same private group, with the SUT bot exposing a Telegram username.

View File

@@ -60,13 +60,13 @@ OpenClaw dynamically discovers available models from the Kilo Gateway at startup
Any model available on the gateway can be used with the `kilocode/` prefix:
| Model ref | Notes |
| -------------------------------------- | ---------------------------------- |
| `kilocode/kilo/auto` | Default — smart routing |
| `kilocode/anthropic/claude-sonnet-4` | Anthropic via Kilo |
| `kilocode/openai/gpt-5.5` | OpenAI via Kilo |
| `kilocode/google/gemini-3-pro-preview` | Google via Kilo |
| ...and many more | Use `/models kilocode` to list all |
| Model ref | Notes |
| ---------------------------------------- | ---------------------------------- |
| `kilocode/kilo/auto` | Default — smart routing |
| `kilocode/anthropic/claude-sonnet-4` | Anthropic via Kilo |
| `kilocode/openai/gpt-5.5` | OpenAI via Kilo |
| `kilocode/google/gemini-3.1-pro-preview` | Google via Kilo |
| ...and many more | Use `/models kilocode` to list all |
<Tip>
At startup, OpenClaw queries `GET https://api.kilo.ai/api/gateway/models` and merges

View File

@@ -125,7 +125,7 @@ Current source-of-truth:
<AccordionGroup>
<Accordion title="Sessions and runs">
- `/new [model]` starts a new session; `/reset` is the reset alias.
- Control UI intercepts typed `/new` to create and switch to a fresh dashboard session; typed `/reset` still runs the Gateway's in-place reset.
- Control UI intercepts typed `/new` to create and switch to a fresh dashboard session, except when `session.dmScope: "main"` is configured and the current parent is the agent's main session; in that case `/new` resets the main session in place. Typed `/reset` still runs the Gateway's in-place reset.
- `/reset soft [message]` keeps the current transcript, drops reused CLI backend session ids, and reruns startup/system-prompt loading in-place.
- `/compact [instructions]` compacts the session context. See [Compaction](/concepts/compaction).
- `/stop` aborts the current run.

View File

@@ -106,7 +106,7 @@ title: "Thinking levels"
- `stream` (Telegram only): streams reasoning into the Telegram draft bubble while the reply is generating, then sends the final answer without reasoning.
- Alias: `/reason`.
- Send `/reasoning` (or `/reasoning:`) with no argument to see the current reasoning level.
- Resolution order: inline directive, then session override, then per-agent default (`agents.list[].reasoningDefault`), then fallback (`off`).
- Resolution order: inline directive, then session override, then per-agent default (`agents.list[].reasoningDefault`), then global default (`agents.defaults.reasoningDefault`), then fallback (`off`).
Malformed local-model reasoning tags are handled conservatively. Closed `<think>...</think>` blocks stay hidden on normal replies, and unclosed reasoning after already visible text is also hidden. If a reply is fully wrapped in a single unclosed opening tag and would otherwise deliver as empty text, OpenClaw removes the malformed opening tag and delivers the remaining text.

View File

@@ -165,7 +165,7 @@ Imported themes are stored only in the current browser profile. They are not wri
- Consecutive duplicate text-only messages render as one bubble with a count badge. Messages that carry images, attachments, tool output, or canvas previews are left uncollapsed.
- The chat header model and thinking pickers patch the active session immediately through `sessions.patch`; they are persistent session overrides, not one-turn-only send options.
- If you send a message while a model picker change for the same session is still saving, the composer waits for that session patch before calling `chat.send` so the send uses the selected model.
- Typing `/new` in the Control UI creates and switches to the same fresh dashboard session as New Chat. Typing `/reset` keeps the Gateway's explicit in-place reset for the current session.
- Typing `/new` in the Control UI creates and switches to the same fresh dashboard session as New Chat, except when `session.dmScope: "main"` is configured and the current parent is the agent's main session; in that case it resets the main session in place. Typing `/reset` keeps the Gateway's explicit in-place reset for the current session.
- The chat model picker requests the Gateway's configured model view. If `agents.defaults.models` is present, that allowlist drives the picker. Otherwise the picker shows explicit `models.providers.*.models` entries plus providers with usable auth. The full catalog stays available through the debug `models.list` RPC with `view: "all"`.
- When fresh Gateway session usage reports include current context tokens, the chat composer area shows a compact context usage indicator. It switches to warning styling at high context pressure and, at recommended compaction levels, shows a compact button that runs the normal session compaction path. Stale token snapshots are hidden until the Gateway reports fresh usage again.

View File

@@ -69,7 +69,11 @@ function expectWrapperToContainPathSuffix(wrapper: string, pathSuffix: string[])
const nativeSuffix = pathSuffix.join(path.sep);
const escapedNativeSuffix = JSON.stringify(nativeSuffix).slice(1, -1);
const posixSuffix = pathSuffix.join("/");
expect(wrapper.includes(escapedNativeSuffix) || wrapper.includes(posixSuffix)).toBe(true);
if (wrapper.includes(escapedNativeSuffix)) {
expect(wrapper).toContain(escapedNativeSuffix);
} else {
expect(wrapper).toContain(posixSuffix);
}
}
afterEach(async () => {

View File

@@ -12,7 +12,8 @@ describe("acpx package manifest", () => {
fs.readFileSync(new URL("../package.json", import.meta.url), "utf8"),
) as AcpxPackageManifest;
expect(packageJson.dependencies?.acpx).toEqual(expect.any(String));
expect(packageJson.dependencies?.acpx).toBeTypeOf("string");
expect(packageJson.dependencies?.acpx).not.toBe("");
expect(packageJson.dependencies?.["@zed-industries/codex-acp"]).toBe("0.13.0");
expect(packageJson.dependencies?.["@agentclientprotocol/claude-agent-acp"]).toBe("0.32.0");
expect(packageJson.devDependencies?.["@agentclientprotocol/claude-agent-acp"]).toBeUndefined();

View File

@@ -28,6 +28,20 @@ function cleanupDeps(processes: AcpxProcessInfo[]) {
};
}
function collectMatching<T, U>(
items: readonly T[],
predicate: (item: T) => boolean,
map: (item: T) => U,
): U[] {
const matches: U[] = [];
for (const item of items) {
if (predicate(item)) {
matches.push(map(item));
}
}
return matches;
}
describe("process reaper", () => {
it("recognizes generated Codex and Claude wrappers only under the configured root", () => {
expect(
@@ -237,9 +251,13 @@ describe("process reaper", () => {
expect(result.skippedReason).toBeUndefined();
expect(result.inspectedPids).toEqual([400, 401, 402, 403, 404, 405]);
expect(killed.filter((entry) => entry.signal === "SIGTERM").map((entry) => entry.pid)).toEqual([
402, 401, 400, 404, 403, 405,
]);
expect(
collectMatching(
killed,
(entry) => entry.signal === "SIGTERM",
(entry) => entry.pid,
),
).toEqual([402, 401, 400, 404, 403, 405]);
});
it("keeps startup scans quiet when process listing is unavailable", async () => {

View File

@@ -143,8 +143,8 @@ describe("AcpxRuntime fresh reset wrapper", () => {
});
it("exposes assertSupportedRuntimeSessionMode as a typed guard", () => {
expect(() => __testing.assertSupportedRuntimeSessionMode("persistent")).not.toThrow();
expect(() => __testing.assertSupportedRuntimeSessionMode("oneshot")).not.toThrow();
expect(__testing.assertSupportedRuntimeSessionMode("persistent")).toBeUndefined();
expect(__testing.assertSupportedRuntimeSessionMode("oneshot")).toBeUndefined();
expect(() => __testing.assertSupportedRuntimeSessionMode("run" as never)).toThrow(
AcpRuntimeError,
);

View File

@@ -22,6 +22,34 @@ describe("active-memory manifest config schema", () => {
expect(result.ok).toBe(true);
});
it("accepts custom toolsAllow entries", () => {
const result = validateJsonSchemaValue({
schema: manifest.configSchema,
cacheKey: "active-memory.manifest.tools-allow",
value: {
enabled: true,
agents: ["main"],
toolsAllow: ["lcm_grep", "lcm_describe", "lcm_expand_query"],
},
});
expect(result.ok).toBe(true);
});
it("rejects wildcard and group toolsAllow entries", () => {
const result = validateJsonSchemaValue({
schema: manifest.configSchema,
cacheKey: "active-memory.manifest.tools-allow.reserved",
value: {
enabled: true,
agents: ["main"],
toolsAllow: ["*", "group:plugins"],
},
});
expect(result.ok).toBe(false);
});
it("accepts timeoutMs values at the runtime ceiling", () => {
const result = validateJsonSchemaValue({
schema: manifest.configSchema,

View File

@@ -67,6 +67,19 @@ describe("active-memory plugin", () => {
},
};
};
const setMemorySlot = (memory: string) => {
const plugins = configFile.plugins as Record<string, unknown> | undefined;
configFile = {
...configFile,
plugins: {
...plugins,
slots: {
...(plugins?.slots as Record<string, unknown> | undefined),
memory,
},
},
};
};
const api: any = {
get pluginConfig() {
return pluginConfig;
@@ -117,6 +130,12 @@ describe("active-memory plugin", () => {
| undefined;
return entries?.find((entry) => entry.pluginId === "active-memory")?.lines ?? [];
};
const expectLinesToContain = (lines: string[], text: string) => {
expect(lines).toEqual(expect.arrayContaining([expect.stringContaining(text)]));
};
const expectLinesNotToContain = (lines: string[], text: string) => {
expect(lines).not.toEqual(expect.arrayContaining([expect.stringContaining(text)]));
};
const writeTranscriptJsonl = async (sessionFile: string, records: unknown[], suffix = "\n") => {
await fs.mkdir(path.dirname(sessionFile), { recursive: true });
await fs.writeFile(
@@ -141,7 +160,7 @@ describe("active-memory plugin", () => {
};
const makeMemoryToolAllowlistError = (
reason: string,
sources = "runtime toolsAllow: memory_recall, memory_search, memory_get",
sources = "runtime toolsAllow: memory_search, memory_get",
) =>
new Error(
`No callable tools remain after resolving explicit tool allowlist ` +
@@ -1279,16 +1298,17 @@ describe("active-memory plugin", () => {
);
expect(runParams?.prompt).toContain("Use only the available memory tools.");
expect(runParams?.prompt).toContain(
"Use the bounded search query as the memory_search or memory_recall query.",
"Use the bounded search query with the configured memory tools.",
);
expect(runParams?.prompt).toContain("Prefer memory_recall when available.");
expect(runParams?.prompt).toContain("Configured memory tools: memory_search, memory_get.");
expect(runParams?.prompt).toContain(
"If memory_recall is unavailable, use memory_search and memory_get.",
"If the available memory tools find nothing useful, reply with NONE.",
);
expect(runParams?.toolsAllow).toEqual(["memory_recall", "memory_search", "memory_get"]);
expect(runParams?.prompt).not.toContain("memory_recall");
expect(runParams?.toolsAllow).toEqual(["memory_search", "memory_get"]);
expect(runParams?.allowGatewaySubagentBinding).toBe(true);
expect(runParams?.prompt).toContain(
"When searching for preference or habit recall, use a permissive recall limit or memory_search threshold before deciding that no useful memory exists.",
"When searching for preference or habit recall, use permissive search limits or thresholds before deciding that no useful memory exists.",
);
expect(runParams?.prompt).toContain(
"If the user is directly asking about favorites, preferences, habits, routines, or personal facts, treat that as a strong recall signal.",
@@ -1312,6 +1332,187 @@ describe("active-memory plugin", () => {
);
});
it("passes custom configured memory tools and reflects them in the default prompt", async () => {
api.pluginConfig = {
agents: ["main"],
toolsAllow: [" lcm_grep ", "lcm_describe", "", "lcm_expand_query", "lcm_grep"],
};
plugin.register(api as unknown as OpenClawPluginApi);
await hooks.before_prompt_build(
{
prompt: "What did we decide about active memory?",
messages: [],
},
{
agentId: "main",
trigger: "user",
sessionKey: "agent:main:main",
messageProvider: "webchat",
},
);
const runParams = runEmbeddedPiAgent.mock.calls.at(-1)?.[0];
expect(runParams?.toolsAllow).toEqual(["lcm_grep", "lcm_describe", "lcm_expand_query"]);
expect(runParams?.prompt).toContain(
"Configured memory tools: lcm_grep, lcm_describe, lcm_expand_query.",
);
expect(runParams?.prompt).not.toContain("Prefer memory_recall");
expect(runParams?.prompt).not.toContain("If memory_recall is unavailable");
});
it("uses memory_recall by default when the memory slot selects LanceDB", async () => {
setMemorySlot("memory-lancedb");
await hooks.before_prompt_build(
{
prompt: "What did we decide about active memory?",
messages: [],
},
{
agentId: "main",
trigger: "user",
sessionKey: "agent:main:main",
messageProvider: "webchat",
},
);
const runParams = runEmbeddedPiAgent.mock.calls.at(-1)?.[0];
expect(runParams?.toolsAllow).toEqual(["memory_recall"]);
expect(runParams?.prompt).toContain("Configured memory tools: memory_recall.");
});
it("keeps explicit custom memory tools authoritative when the memory slot selects LanceDB", async () => {
setMemorySlot("memory-lancedb");
api.pluginConfig = {
agents: ["main"],
toolsAllow: ["lcm_grep"],
};
await hooks.before_prompt_build(
{
prompt: "What did we decide about active memory?",
messages: [],
},
{
agentId: "main",
trigger: "user",
sessionKey: "agent:main:main",
messageProvider: "webchat",
},
);
const runParams = runEmbeddedPiAgent.mock.calls.at(-1)?.[0];
expect(runParams?.toolsAllow).toEqual(["lcm_grep"]);
expect(runParams?.prompt).toContain("Configured memory tools: lcm_grep.");
});
it("drops wildcard group and core tools from custom memory tools", async () => {
api.pluginConfig = {
agents: ["main"],
toolsAllow: [
"*",
"agents_list",
"apply_patch",
"canvas",
"cron",
"edit",
"gateway",
"heartbeat_respond",
"heartbeat_response",
"image",
"image_generate",
"music_generate",
"nodes",
"pdf",
"process",
"session_status",
"sessions_history",
"sessions_list",
"sessions_send",
"sessions_spawn",
"sessions_yield",
"tts",
"video_generate",
"group:plugins",
"read",
"exec",
"message",
"lcm_grep",
"web_search",
"lcm_describe",
],
};
plugin.register(api as unknown as OpenClawPluginApi);
await hooks.before_prompt_build(
{
prompt: "What did we decide about active memory?",
messages: [],
},
{
agentId: "main",
trigger: "user",
sessionKey: "agent:main:main",
messageProvider: "webchat",
},
);
const runParams = runEmbeddedPiAgent.mock.calls.at(-1)?.[0];
expect(runParams?.toolsAllow).toEqual(["lcm_grep", "lcm_describe"]);
expect(runParams?.prompt).toContain("Configured memory tools: lcm_grep, lcm_describe.");
});
it("falls back to default memory tools when custom memory tools only contain reserved entries", async () => {
api.pluginConfig = {
agents: ["main"],
toolsAllow: ["*", "group:plugins", "read", "exec", "message", "web_search"],
};
plugin.register(api as unknown as OpenClawPluginApi);
await hooks.before_prompt_build(
{
prompt: "What did we decide about active memory?",
messages: [],
},
{
agentId: "main",
trigger: "user",
sessionKey: "agent:main:main",
messageProvider: "webchat",
},
);
const runParams = runEmbeddedPiAgent.mock.calls.at(-1)?.[0];
expect(runParams?.toolsAllow).toEqual(["memory_search", "memory_get"]);
expect(runParams?.prompt).toContain("Configured memory tools: memory_search, memory_get.");
});
it("falls back to LanceDB compat tools when custom memory tools only contain reserved entries", async () => {
setMemorySlot("memory-lancedb");
api.pluginConfig = {
agents: ["main"],
toolsAllow: ["*", "group:plugins", "read", "exec", "message", "web_search"],
};
await hooks.before_prompt_build(
{
prompt: "What did we decide about active memory?",
messages: [],
},
{
agentId: "main",
trigger: "user",
sessionKey: "agent:main:main",
messageProvider: "webchat",
},
);
const runParams = runEmbeddedPiAgent.mock.calls.at(-1)?.[0];
expect(runParams?.toolsAllow).toEqual(["memory_recall"]);
expect(runParams?.prompt).toContain("Configured memory tools: memory_recall.");
});
it("defaults prompt style by query mode when no promptStyle is configured", async () => {
api.pluginConfig = {
agents: ["main"],
@@ -1865,7 +2066,7 @@ describe("active-memory plugin", () => {
);
expect(result).toBeUndefined();
expect(hasDebugLine("no memory tools registered")).toBe(true);
expect(hasDebugLine("no configured memory tools available")).toBe(true);
expect(hasWarnLine("No callable tools remain")).toBe(false);
const lines = getActiveMemoryLines(sessionKey);
expect(lines).toEqual([expect.stringContaining("🧩 Active Memory: status=empty")]);
@@ -1880,7 +2081,7 @@ describe("active-memory plugin", () => {
};
const error = makeMemoryToolAllowlistError(
"no registered tools matched",
"tools.allow: *, lobster; runtime toolsAllow: memory_recall, memory_search, memory_get",
"tools.allow: *, lobster; runtime toolsAllow: memory_search, memory_get",
);
expect(__testing.isMissingRegisteredMemoryToolsError(error)).toBe(true);
runEmbeddedPiAgent.mockRejectedValueOnce(error);
@@ -1891,14 +2092,46 @@ describe("active-memory plugin", () => {
);
expect(result).toBeUndefined();
expect(hasDebugLine("no memory tools registered")).toBe(true);
expect(hasDebugLine("no configured memory tools available")).toBe(true);
expect(hasWarnLine("No callable tools remain")).toBe(false);
expect(getActiveMemoryLines(sessionKey)).toEqual([
expect.stringContaining("🧩 Active Memory: status=empty"),
]);
});
it("keeps memory-tool allowlist errors visible when upstream policy can filter memory tools", async () => {
it("skips missing custom memory tools using the resolved custom allowlist", async () => {
api.pluginConfig = {
agents: ["main"],
toolsAllow: ["lcm_grep", "lcm_describe", "lcm_expand_query"],
logging: true,
};
plugin.register(api as unknown as OpenClawPluginApi);
const sessionKey = "agent:main:missing-custom-memory-tools";
hoisted.sessionStore[sessionKey] = {
sessionId: "s-missing-custom-memory-tools",
updatedAt: 0,
};
const toolsAllow = ["lcm_grep", "lcm_describe", "lcm_expand_query"];
const error = makeMemoryToolAllowlistError(
"no registered tools matched",
`runtime toolsAllow: ${toolsAllow.join(", ")}`,
);
expect(__testing.isMissingRegisteredMemoryToolsError(error, toolsAllow)).toBe(true);
runEmbeddedPiAgent.mockRejectedValueOnce(error);
const result = await hooks.before_prompt_build(
{ prompt: "what did we decide? missing custom memory tools", messages: [] },
{ agentId: "main", trigger: "user", sessionKey, messageProvider: "webchat" },
);
expect(result).toBeUndefined();
expect(hasDebugLine("no configured memory tools available")).toBe(true);
expect(getActiveMemoryLines(sessionKey)).toEqual([
expect.stringContaining("🧩 Active Memory: status=empty"),
]);
});
it("skips memory-tool allowlist errors when upstream policy filters memory tools", async () => {
const sessionKey = "agent:main:memory-tools-filtered-by-policy";
hoisted.sessionStore[sessionKey] = {
sessionId: "s-memory-tools-filtered-by-policy",
@@ -1906,9 +2139,9 @@ describe("active-memory plugin", () => {
};
const error = makeMemoryToolAllowlistError(
"no registered tools matched",
"tools.allow: read, exec; runtime toolsAllow: memory_recall, memory_search, memory_get",
"tools.allow: read, exec; runtime toolsAllow: memory_search, memory_get",
);
expect(__testing.isMissingRegisteredMemoryToolsError(error)).toBe(false);
expect(__testing.isMissingRegisteredMemoryToolsError(error)).toBe(true);
runEmbeddedPiAgent.mockRejectedValueOnce(error);
const result = await hooks.before_prompt_build(
@@ -1917,38 +2150,41 @@ describe("active-memory plugin", () => {
);
expect(result).toBeUndefined();
expect(hasDebugLine("no memory tools registered")).toBe(false);
expect(hasWarnLine("No callable tools remain")).toBe(true);
expect(hasDebugLine("no configured memory tools available")).toBe(true);
expect(hasWarnLine("No callable tools remain")).toBe(false);
expect(getActiveMemoryLines(sessionKey)).toEqual([
expect.stringContaining("🧩 Active Memory: status=unavailable"),
expect.stringContaining("🧩 Active Memory: status=empty"),
]);
});
it.each([
["disabled tools", "tools are disabled for this run"],
["models without tool support", "the selected model does not support tools"],
])("keeps allowlist errors for %s visible", async (_label, reason) => {
const sessionKey = `agent:main:${reason.replace(/\W+/g, "-")}`;
hoisted.sessionStore[sessionKey] = {
sessionId: `s-${reason.replace(/\W+/g, "-")}`,
updatedAt: 0,
};
const error = makeMemoryToolAllowlistError(reason);
expect(__testing.isMissingRegisteredMemoryToolsError(error)).toBe(false);
runEmbeddedPiAgent.mockRejectedValueOnce(error);
])(
"skips allowlist errors for %s without surfacing to the main thread",
async (_label, reason) => {
const sessionKey = `agent:main:${reason.replace(/\W+/g, "-")}`;
hoisted.sessionStore[sessionKey] = {
sessionId: `s-${reason.replace(/\W+/g, "-")}`,
updatedAt: 0,
};
const error = makeMemoryToolAllowlistError(reason);
expect(__testing.isMissingRegisteredMemoryToolsError(error)).toBe(false);
runEmbeddedPiAgent.mockRejectedValueOnce(error);
const result = await hooks.before_prompt_build(
{ prompt: `what wings should i order? ${reason}`, messages: [] },
{ agentId: "main", trigger: "user", sessionKey, messageProvider: "webchat" },
);
const result = await hooks.before_prompt_build(
{ prompt: `what wings should i order? ${reason}`, messages: [] },
{ agentId: "main", trigger: "user", sessionKey, messageProvider: "webchat" },
);
expect(result).toBeUndefined();
expect(hasDebugLine("no memory tools registered")).toBe(false);
expect(hasWarnLine(reason)).toBe(true);
expect(getActiveMemoryLines(sessionKey)).toEqual([
expect.stringContaining("🧩 Active Memory: status=unavailable"),
]);
});
expect(result).toBeUndefined();
expect(hasDebugLine("no configured memory tools available")).toBe(false);
expect(hasWarnLine(reason)).toBe(true);
expect(getActiveMemoryLines(sessionKey)).toEqual([
expect.stringContaining("🧩 Active Memory: status=empty"),
]);
},
);
it("does not skip missing memory-tool allowlist errors after abort", async () => {
const sessionKey = "agent:main:missing-memory-tools-after-abort";
@@ -1970,7 +2206,7 @@ describe("active-memory plugin", () => {
);
expect(result).toBeUndefined();
expect(hasDebugLine("no memory tools registered")).toBe(false);
expect(hasDebugLine("no configured memory tools available")).toBe(false);
expect(getActiveMemoryLines(sessionKey)).toEqual([
expect.stringContaining("🧩 Active Memory: status=timeout"),
]);
@@ -2122,7 +2358,7 @@ describe("active-memory plugin", () => {
expect(result).toBeUndefined();
const lines = getActiveMemoryLines(sessionKey);
expect(lines).toEqual([expect.stringContaining("🧩 Active Memory: status=timeout")]);
expect(lines.some((line) => line.includes("timeout_partial"))).toBe(false);
expectLinesNotToContain(lines, "timeout_partial");
});
it("keeps timeout status when the timeout transcript path does not exist", async () => {
@@ -2152,7 +2388,7 @@ describe("active-memory plugin", () => {
expect(result).toBeUndefined();
const lines = getActiveMemoryLines(sessionKey);
expect(lines).toEqual([expect.stringContaining("🧩 Active Memory: status=timeout")]);
expect(lines.some((line) => line.includes("timeout_partial"))).toBe(false);
expectLinesNotToContain(lines, "timeout_partial");
});
it("does not inject embedded timeout boilerplate from partial transcripts", async () => {
@@ -2197,8 +2433,8 @@ describe("active-memory plugin", () => {
expect(result).toBeUndefined();
const lines = getActiveMemoryLines(sessionKey);
expect(lines).toEqual([expect.stringContaining("🧩 Active Memory: status=timeout")]);
expect(lines.some((line) => line.includes("timeout_partial"))).toBe(false);
expect(lines.some((line) => line.includes("LLM request timed out"))).toBe(false);
expectLinesNotToContain(lines, "timeout_partial");
expectLinesNotToContain(lines, "LLM request timed out");
});
it("returns partial transcript text when an aborted subagent rejects before the race timeout wins", async () => {
@@ -2252,7 +2488,7 @@ describe("active-memory plugin", () => {
expect(getActiveMemoryLines(sessionKey).join("\n")).not.toContain("partial abort summary");
});
it("keeps generic subagent errors unavailable without using partial transcript output", async () => {
it("skips generic subagent errors without using partial transcript output", async () => {
api.pluginConfig = {
agents: ["main"],
persistTranscripts: true,
@@ -2281,7 +2517,7 @@ describe("active-memory plugin", () => {
expect(result).toBeUndefined();
expect(getActiveMemoryLines(sessionKey)).toEqual([
expect.stringContaining("🧩 Active Memory: status=unavailable"),
expect.stringContaining("🧩 Active Memory: status=empty"),
]);
expect(getActiveMemoryLines(sessionKey).join("\n")).not.toContain(
"must not be surfaced from generic errors",
@@ -2521,7 +2757,7 @@ describe("active-memory plugin", () => {
const infoLines = vi
.mocked(api.logger.info)
.mock.calls.map((call: unknown[]) => String(call[0]));
expect(infoLines.some((line: string) => line.includes(" cached "))).toBe(false);
expectLinesNotToContain(infoLines, " cached ");
});
it("does not share cached recall results across session-id-only contexts", async () => {
@@ -2554,7 +2790,7 @@ describe("active-memory plugin", () => {
const infoLines = vi
.mocked(api.logger.info)
.mock.calls.map((call: unknown[]) => String(call[0]));
expect(infoLines.some((line: string) => line.includes(" cached "))).toBe(false);
expectLinesNotToContain(infoLines, " cached ");
});
it("ignores late subagent payloads once the active-memory timeout signal has fired", async () => {
@@ -2589,7 +2825,7 @@ describe("active-memory plugin", () => {
const infoLines = vi
.mocked(api.logger.info)
.mock.calls.map((call: unknown[]) => String(call[0]));
expect(infoLines.some((line: string) => line.includes("status=timeout"))).toBe(true);
expectLinesToContain(infoLines, "status=timeout");
expect(
infoLines.some(
(line: string) =>
@@ -2632,7 +2868,7 @@ describe("active-memory plugin", () => {
const infoLines = vi
.mocked(api.logger.info)
.mock.calls.map((call: unknown[]) => String(call[0]));
expect(infoLines.some((line: string) => line.includes("status=timeout"))).toBe(false);
expectLinesNotToContain(infoLines, "status=timeout");
});
it("returns timeout within a hard deadline even when the subagent never checks the abort signal", async () => {
@@ -2669,7 +2905,7 @@ describe("active-memory plugin", () => {
const infoLines = vi
.mocked(api.logger.info)
.mock.calls.map((call: unknown[]) => String(call[0]));
expect(infoLines.some((line: string) => line.includes("status=timeout"))).toBe(true);
expectLinesToContain(infoLines, "status=timeout");
// Hard deadline: wall-clock time must be near timeoutMs, not 30s.
expect(wallClockMs).toBeLessThan(CONFIGURED_TIMEOUT_MS + HARD_DEADLINE_MARGIN_MS);
});
@@ -2710,8 +2946,8 @@ describe("active-memory plugin", () => {
const infoLines = vi
.mocked(api.logger.info)
.mock.calls.map((call: unknown[]) => String(call[0]));
expect(infoLines.some((line: string) => line.includes("done status=empty"))).toBe(true);
expect(infoLines.some((line: string) => line.includes("done status=timeout"))).toBe(false);
expectLinesToContain(infoLines, "done status=empty");
expectLinesNotToContain(infoLines, "done status=timeout");
expect(getActiveMemoryLines(sessionKey)).toEqual([
expect.stringContaining("🧩 Active Memory: status=empty"),
expect.stringContaining("🔎 Active Memory Debug: backend=qmd searchMs=8 hits=0"),
@@ -2802,8 +3038,8 @@ describe("active-memory plugin", () => {
const infoLines = vi
.mocked(api.logger.info)
.mock.calls.map((call: unknown[]) => String(call[0]));
expect(infoLines.some((line: string) => line.includes("done status=empty"))).toBe(true);
expect(infoLines.some((line: string) => line.includes("done status=timeout"))).toBe(false);
expectLinesToContain(infoLines, "done status=empty");
expectLinesNotToContain(infoLines, "done status=timeout");
expect(getActiveMemoryLines(sessionKey)).toEqual([
expect.stringContaining("🧩 Active Memory: status=empty"),
expect.stringContaining(
@@ -2862,7 +3098,7 @@ describe("active-memory plugin", () => {
const warnLines = vi
.mocked(api.logger.warn)
.mock.calls.map((call: unknown[]) => String(call[0]));
expect(warnLines.some((line: string) => line.includes("before_prompt_build"))).toBe(true);
expectLinesToContain(warnLines, "before_prompt_build");
});
it("honors configured timeoutMs values above the former 60 000 ms ceiling", async () => {
@@ -3622,13 +3858,11 @@ describe("active-memory plugin", () => {
),
);
expect(
vi
.mocked(api.logger.info)
.mock.calls.some((call: unknown[]) =>
String(call[0]).includes(`transcript=${expectedDir}${path.sep}`),
),
).toBe(true);
expect(rmSpy.mock.calls.some(([target]) => String(target).startsWith(expectedDir))).toBe(false);
vi.mocked(api.logger.info).mock.calls.map((call: unknown[]) => String(call[0])),
).toContainEqual(expect.stringContaining(`transcript=${expectedDir}${path.sep}`));
expect(rmSpy.mock.calls.filter(([target]) => String(target).startsWith(expectedDir))).toEqual(
[],
);
});
it("falls back to the default transcript directory when transcriptDir is unsafe", async () => {
@@ -3733,8 +3967,8 @@ describe("active-memory plugin", () => {
const lines =
(store[sessionKey]?.pluginDebugEntries as Array<{ lines?: string[] }> | undefined)?.[0]
?.lines ?? [];
expect(lines.some((line) => line.includes("\u001b"))).toBe(false);
expect(lines.some((line) => line.includes("\r"))).toBe(false);
expectLinesNotToContain(lines, "\u001b");
expectLinesNotToContain(lines, "\r");
});
it("caps the active-memory cache size and evicts the oldest entries", () => {
@@ -3829,7 +4063,7 @@ describe("active-memory plugin", () => {
const infoLines = vi
.mocked(api.logger.info)
.mock.calls.map((call: unknown[]) => String(call[0]));
expect(infoLines.some((line: string) => line.includes("circuit breaker open"))).toBe(true);
expectLinesToContain(infoLines, "circuit breaker open");
});
it("resets circuit breaker after a successful recall", async () => {

View File

@@ -42,7 +42,43 @@ const DEFAULT_QMD_SEARCH_MODE = "search" as const;
const DEFAULT_TRANSCRIPT_DIR = "active-memory";
const DEFAULT_CIRCUIT_BREAKER_MAX_TIMEOUTS = 3;
const DEFAULT_CIRCUIT_BREAKER_COOLDOWN_MS = 60_000;
const ACTIVE_MEMORY_TOOL_ALLOWLIST = ["memory_recall", "memory_search", "memory_get"] as const;
const DEFAULT_ACTIVE_MEMORY_TOOLS_ALLOW = ["memory_search", "memory_get"] as const;
const LANCEDB_ACTIVE_MEMORY_TOOLS_ALLOW = ["memory_recall"] as const;
const MAX_ACTIVE_MEMORY_TOOLS_ALLOW = 32;
const ACTIVE_MEMORY_RESERVED_TOOLS_ALLOW = new Set([
"*",
"agents_list",
"apply_patch",
"browser",
"canvas",
"cron",
"edit",
"exec",
"gateway",
"heartbeat_respond",
"heartbeat_response",
"image",
"image_generate",
"message",
"music_generate",
"nodes",
"pdf",
"process",
"read",
"session_status",
"sessions_history",
"sessions_list",
"sessions_send",
"sessions_spawn",
"sessions_yield",
"subagents",
"tts",
"update_plan",
"video_generate",
"web_fetch",
"web_search",
"write",
]);
const TOGGLE_STATE_FILE = "session-toggles.json";
const DEFAULT_PARTIAL_TRANSCRIPT_MAX_CHARS = 32_000;
const DEFAULT_TRANSCRIPT_READ_MAX_LINES = 2_000;
@@ -101,6 +137,7 @@ type ActiveRecallPluginConfig = {
| "recall-heavy"
| "precision-heavy"
| "preference-only";
toolsAllow?: string[];
promptOverride?: string;
promptAppend?: string;
timeoutMs?: number;
@@ -141,6 +178,7 @@ type ResolvedActiveRecallPluginConfig = {
| "recall-heavy"
| "precision-heavy"
| "preference-only";
toolsAllow: string[];
promptOverride?: string;
promptAppend?: string;
timeoutMs: number;
@@ -399,6 +437,46 @@ function normalizeChatIdList(value: unknown): string[] {
return out;
}
function normalizeConfiguredToolsAllow(value: unknown): string[] | undefined {
if (!Array.isArray(value)) {
return undefined;
}
const seen = new Set<string>();
const out: string[] = [];
for (const entry of value) {
if (typeof entry !== "string") {
continue;
}
const trimmed = entry.trim();
if (!trimmed || isReservedActiveMemoryToolsAllowEntry(trimmed) || seen.has(trimmed)) {
continue;
}
seen.add(trimmed);
out.push(trimmed);
if (out.length >= MAX_ACTIVE_MEMORY_TOOLS_ALLOW) {
break;
}
}
return out.length > 0 ? out : undefined;
}
function isReservedActiveMemoryToolsAllowEntry(value: string): boolean {
const normalized = value.trim().toLowerCase();
return normalized.startsWith("group:") || ACTIVE_MEMORY_RESERVED_TOOLS_ALLOW.has(normalized);
}
function resolveDefaultToolsAllow(cfg: OpenClawConfig | undefined): string[] {
return cfg?.plugins?.slots?.memory === "memory-lancedb"
? [...LANCEDB_ACTIVE_MEMORY_TOOLS_ALLOW]
: [...DEFAULT_ACTIVE_MEMORY_TOOLS_ALLOW];
}
function resolveToolsAllow(params: { pluginToolsAllow: unknown; cfg?: OpenClawConfig }): string[] {
return (
normalizeConfiguredToolsAllow(params.pluginToolsAllow) ?? resolveDefaultToolsAllow(params.cfg)
);
}
function normalizePromptConfigText(value: unknown): string | undefined {
const text = typeof value === "string" ? value.trim() : "";
return text ? text : undefined;
@@ -445,6 +523,13 @@ function resolvePersistentTranscriptBaseDir(api: OpenClawPluginApi, agentId: str
);
}
function requireTransientWorkspaceDir(tempDir: string | undefined): string {
if (!tempDir) {
throw new Error("Active memory transient workspace was not initialized.");
}
return tempDir;
}
function resolveCanonicalSessionKeyFromSessionId(params: {
api: OpenClawPluginApi;
agentId: string;
@@ -497,7 +582,14 @@ function normalizeOptionalString(value: unknown): string | undefined {
return typeof value === "string" && value.trim() ? value.trim() : undefined;
}
function isMissingRegisteredMemoryToolsError(error: unknown): boolean {
function formatRuntimeToolsAllowSource(toolsAllow: readonly string[]): string {
return `runtime toolsAllow: ${toolsAllow.join(", ")}`;
}
function isMissingRegisteredMemoryToolsError(
error: unknown,
toolsAllow: readonly string[] = DEFAULT_ACTIVE_MEMORY_TOOLS_ALLOW,
): boolean {
if (!(error instanceof Error)) {
return false;
}
@@ -509,24 +601,12 @@ function isMissingRegisteredMemoryToolsError(error: unknown): boolean {
return false;
}
const sources = message.slice(prefix.length, -suffix.length);
const runtimeSource = `runtime toolsAllow: ${ACTIVE_MEMORY_TOOL_ALLOWLIST.join(", ")}`;
const runtimeSource = formatRuntimeToolsAllowSource(toolsAllow);
const sourceParts = sources
.split(";")
.map((source) => source.trim())
.filter(Boolean);
if (!sourceParts.includes(runtimeSource)) {
return false;
}
return sourceParts.every((source) => {
if (source === runtimeSource) {
return true;
}
const entries = source
.slice(source.indexOf(":") + 1)
.split(",")
.map((entry) => entry.trim());
return entries.includes("*");
});
return sourceParts.includes(runtimeSource);
}
function resolveRecallRunChannelContext(params: {
@@ -791,7 +871,10 @@ function requiresAdminToMutateActiveMemoryGlobal(gatewayClientScopes?: readonly
const ACTIVE_MEMORY_GLOBAL_MUTATION_ADMIN_REQUIRED_TEXT =
"⚠️ /active-memory global enable/disable changes require operator.admin for gateway clients.";
function normalizePluginConfig(pluginConfig: unknown): ResolvedActiveRecallPluginConfig {
function normalizePluginConfig(
pluginConfig: unknown,
cfg?: OpenClawConfig,
): ResolvedActiveRecallPluginConfig {
const raw = (
pluginConfig && typeof pluginConfig === "object" ? pluginConfig : {}
) as ActiveRecallPluginConfig;
@@ -819,6 +902,7 @@ function normalizePluginConfig(pluginConfig: unknown): ResolvedActiveRecallPlugi
deniedChatIds: normalizeChatIdList(raw.deniedChatIds),
thinking: resolveThinkingLevel(raw.thinking),
promptStyle: resolvePromptStyle(raw.promptStyle, raw.queryMode),
toolsAllow: resolveToolsAllow({ pluginToolsAllow: raw.toolsAllow, cfg }),
promptOverride: normalizePromptConfigText(raw.promptOverride),
promptAppend: normalizePromptConfigText(raw.promptAppend),
timeoutMs: clampInt(
@@ -990,11 +1074,11 @@ function buildRecallPrompt(params: {
"Your job is to search memory and return only the most relevant memory context for that model.",
"You receive a bounded search query plus conversation context, including the user's latest message.",
"Use only the available memory tools.",
"Use the bounded search query as the memory_search or memory_recall query.",
"Use the bounded search query with the configured memory tools.",
`Configured memory tools: ${params.config.toolsAllow.join(", ")}.`,
"Do not use channel metadata, provider metadata, debug output, or the full conversation context as the memory tool query.",
"Prefer memory_recall when available.",
"If memory_recall is unavailable, use memory_search and memory_get.",
"When searching for preference or habit recall, use a permissive recall limit or memory_search threshold before deciding that no useful memory exists.",
"If the available memory tools find nothing useful, reply with NONE.",
"When searching for preference or habit recall, use permissive search limits or thresholds before deciding that no useful memory exists.",
"Do not answer the user directly.",
`Prompt style: ${params.config.promptStyle}.`,
...buildPromptStyleLines(params.config.promptStyle),
@@ -2398,9 +2482,10 @@ async function runRecallSubagent(params: {
params.config.transcriptDir,
)
: undefined;
const sessionFile = params.config.persistTranscripts
? path.join(persistedDir!, `${subagentSessionId}.jsonl`)
: path.join(tempDir!, "session.jsonl");
const sessionFile =
persistedDir !== undefined
? path.join(persistedDir, `${subagentSessionId}.jsonl`)
: path.join(requireTransientWorkspaceDir(tempDir), "session.jsonl");
params.onSessionFile?.(sessionFile);
if (persistedDir) {
await fs.mkdir(persistedDir, { recursive: true, mode: 0o700 });
@@ -2439,7 +2524,7 @@ async function runRecallSubagent(params: {
timeoutMs: embeddedTimeoutMs,
runId: subagentSessionId,
trigger: "manual",
toolsAllow: [...ACTIVE_MEMORY_TOOL_ALLOWLIST],
toolsAllow: [...params.config.toolsAllow],
disableMessageTool: true,
allowGatewaySubagentBinding: true,
bootstrapContextMode: "lightweight",
@@ -2482,9 +2567,19 @@ async function runRecallSubagent(params: {
const searchDebug = partialReply ? await readActiveMemorySearchDebug(sessionFile) : undefined;
attachPartialTimeoutData(error, partialReply, searchDebug);
}
if (!params.abortSignal?.aborted && isMissingRegisteredMemoryToolsError(error)) {
if (
!params.abortSignal?.aborted &&
isMissingRegisteredMemoryToolsError(error, params.config.toolsAllow)
) {
params.api.logger.debug?.(
`active-memory: no memory tools registered (memory-core or memory-lancedb required); skipping sub-agent`,
`active-memory: no configured memory tools available; skipping sub-agent`,
);
return { rawReply: "NONE" };
}
if (!params.abortSignal?.aborted) {
const message = toSingleLineLogValue(error instanceof Error ? error.message : String(error));
params.api.logger.warn?.(
`active-memory: memory sub-agent failed, skipping recall: ${message}`,
);
return { rawReply: "NONE" };
}
@@ -2751,10 +2846,10 @@ async function maybeResolveActiveRecall(params: {
}
const message = toSingleLineLogValue(error instanceof Error ? error.message : String(error));
if (params.config.logging) {
params.api.logger.warn?.(`${logPrefix} failed error=${message}`);
params.api.logger.warn?.(`${logPrefix} failed error=${message}; skipping recall`);
}
const result: ActiveRecallResult = {
status: "unavailable",
status: "empty",
elapsedMs: Date.now() - startedAt,
summary: null,
};
@@ -2777,7 +2872,17 @@ export default definePluginEntry({
name: "Active Memory",
description: "Proactively surfaces relevant memory before eligible conversational replies.",
register(api: OpenClawPluginApi) {
let config = normalizePluginConfig(api.pluginConfig);
const readCurrentConfig = (): OpenClawConfig | undefined => {
try {
return (
(api.runtime.config?.current?.() as OpenClawConfig | undefined) ??
(api.config as OpenClawConfig | undefined)
);
} catch {
return api.config as OpenClawConfig | undefined;
}
};
let config = normalizePluginConfig(api.pluginConfig, readCurrentConfig());
const warnDeprecatedModelFallbackPolicy = (pluginConfig: unknown) => {
if (hasDeprecatedModelFallbackPolicy(pluginConfig)) {
// Wording matters here: the previous text ("set config.modelFallback
@@ -2805,7 +2910,7 @@ export default definePluginEntry({
"active-memory",
api.pluginConfig as Record<string, unknown>,
);
config = normalizePluginConfig(livePluginConfig ?? { enabled: false });
config = normalizePluginConfig(livePluginConfig ?? { enabled: false }, readCurrentConfig());
if (livePluginConfig) {
warnDeprecatedModelFallbackPolicy(livePluginConfig);
}

View File

@@ -56,6 +56,14 @@
"preference-only"
]
},
"toolsAllow": {
"type": "array",
"items": {
"type": "string",
"pattern": "^(?!\\*$)(?![Gg][Rr][Oo][Uu][Pp]:).+"
},
"maxItems": 32
},
"promptOverride": { "type": "string" },
"promptAppend": { "type": "string" },
"maxSummaryChars": { "type": "integer", "minimum": 40, "maximum": 1000 },
@@ -129,6 +137,10 @@
"label": "Prompt Style",
"help": "Choose how eager or strict the blocking memory sub-agent should be when deciding whether to return memory."
},
"toolsAllow": {
"label": "Allowed Memory Tools",
"help": "Advanced: tool names the blocking memory sub-agent may use. Defaults to memory_search and memory_get, or memory_recall when plugins.slots.memory selects memory-lancedb; configure this for other non-core memory providers. Wildcards, group entries, and core agent tools are ignored."
},
"thinking": {
"label": "Thinking Override",
"help": "Advanced: optional thinking level for the blocking memory sub-agent. Defaults to off for speed."

View File

@@ -398,11 +398,12 @@ describe("bedrock mantle discovery", () => {
fetchFn: mockFetch as unknown as typeof fetch,
});
expect(provider).not.toBeNull();
expect(provider?.baseUrl).toBe("https://bedrock-mantle.us-east-1.api.aws/v1");
expect(provider?.api).toBe("openai-completions");
expect(provider?.auth).toBe("api-key");
expect(provider?.apiKey).toBe("env:AWS_BEARER_TOKEN_BEDROCK");
expect(provider).toMatchObject({
baseUrl: "https://bedrock-mantle.us-east-1.api.aws/v1",
api: "openai-completions",
auth: "api-key",
apiKey: "env:AWS_BEARER_TOKEN_BEDROCK",
});
expect(provider?.models).toHaveLength(2);
expect(
provider?.models?.find((model) => model.id === "anthropic.claude-opus-4-7"),
@@ -447,8 +448,7 @@ describe("bedrock mantle discovery", () => {
tokenProviderFactory,
});
expect(provider).not.toBeNull();
expect(provider?.apiKey).toBe(MANTLE_IAM_TOKEN_MARKER);
expect(provider).toMatchObject({ apiKey: MANTLE_IAM_TOKEN_MARKER });
expect(tokenProvider).toHaveBeenCalledTimes(1);
expect(mockFetch).toHaveBeenCalledWith(
"https://bedrock-mantle.us-east-1.api.aws/v1/models",

View File

@@ -23,6 +23,16 @@ function createModel(id: string, name: string): ModelDefinitionConfig {
};
}
function collectLegacyExtendedLevelIds(levels: readonly { id: string }[] | undefined): string[] {
const ids: string[] = [];
for (const level of levels ?? []) {
if (level.id === "xhigh" || level.id === "max") {
ids.push(level.id);
}
}
return ids;
}
describe("anthropic provider policy public artifact", () => {
it("normalizes Anthropic provider config", () => {
expect(
@@ -117,9 +127,7 @@ describe("anthropic provider policy public artifact", () => {
if (!profile) {
throw new Error("Expected Anthropic policy profile");
}
expect(
profile.levels.map((level) => level.id).filter((id) => id === "xhigh" || id === "max"),
).toEqual([]);
expect(collectLegacyExtendedLevelIds(profile.levels)).toEqual([]);
});
it("does not expose Anthropic thinking profiles for unrelated providers", () => {

View File

@@ -20,17 +20,19 @@ describe("arcee provider plugin", () => {
providers: [provider],
choice: "arceeai-api-key",
});
expect(directChoice).not.toBeNull();
expect(directChoice?.provider.id).toBe("arcee");
expect(directChoice?.method.id).toBe("arcee-platform");
expect(directChoice).toMatchObject({
provider: { id: "arcee" },
method: { id: "arcee-platform" },
});
const orChoice = resolveProviderPluginChoice({
providers: [provider],
choice: "arceeai-openrouter",
});
expect(orChoice).not.toBeNull();
expect(orChoice?.provider.id).toBe("arcee");
expect(orChoice?.method.id).toBe("openrouter");
expect(orChoice).toMatchObject({
provider: { id: "arcee" },
method: { id: "openrouter" },
});
});
it("stores the OpenRouter onboarding path under the OpenRouter auth profile", async () => {

View File

@@ -13,12 +13,15 @@ beforeEach(() => {
});
function createDeferred<T = void>() {
let resolve!: (value: T | PromiseLike<T>) => void;
let reject!: (reason?: unknown) => void;
let resolve: ((value: T | PromiseLike<T>) => void) | undefined;
let reject: ((reason?: unknown) => void) | undefined;
const promise = new Promise<T>((res, rej) => {
resolve = res;
reject = rej;
});
if (!resolve || !reject) {
throw new Error("Expected deferred callbacks to be initialized");
}
return { promise, resolve, reject };
}

View File

@@ -200,7 +200,7 @@ describe("fuzz: isDirectCdpWebSocketEndpoint", () => {
}
});
it("never throws on random input (including invalid URLs)", () => {
it("returns booleans for random input including invalid URLs", () => {
const rng = makeRng(0x2004);
const junkPool = [
"",
@@ -215,7 +215,6 @@ describe("fuzz: isDirectCdpWebSocketEndpoint", () => {
];
for (let i = 0; i < ITERATIONS; i += 1) {
const input = rng() < 0.5 ? pick(rng, junkPool) : String.fromCharCode(randInt(rng, 0, 0x7f));
expect(() => isDirectCdpWebSocketEndpoint(input)).not.toThrow();
expect(typeof isDirectCdpWebSocketEndpoint(input)).toBe("boolean");
}
});
@@ -271,12 +270,12 @@ describe("fuzz: normalizeCdpHttpBaseForJsonEndpoints", () => {
}
});
it("falls back safely for non-URL-ish inputs (never throws)", () => {
it("returns normalized strings for non-URL-ish inputs", () => {
const rng = makeRng(0x3003);
// These inputs either trigger the catch branch (empty / "garbage" /
// bare "ws://" / "wss://") or are accepted by WHATWG URL as
// special-scheme absolute URLs (e.g. "ws:host/path" becomes
// "ws://host/path"). Either way the helper must never throw.
// "ws://host/path"). Both paths must return strings.
const junk = [
"ws:/devtools/browser/abc",
"wss:/devtools/browser/abc",
@@ -289,7 +288,6 @@ describe("fuzz: normalizeCdpHttpBaseForJsonEndpoints", () => {
];
for (let i = 0; i < ITERATIONS; i += 1) {
const input = pick(rng, junk);
expect(() => normalizeCdpHttpBaseForJsonEndpoints(input)).not.toThrow();
const out = normalizeCdpHttpBaseForJsonEndpoints(input);
expect(typeof out).toBe("string");
// Scheme swap invariant: whatever branch ran, ws:/wss: never
@@ -377,11 +375,10 @@ describe("fuzz: redactCdpUrl", () => {
expect(redactCdpUrl(" ")).toBe("");
});
it("falls back to redactSensitiveText for non-URL-ish inputs (never throws)", () => {
it("falls back to redactSensitiveText for non-URL-ish inputs", () => {
const rng = makeRng(0x5002);
for (let i = 0; i < ITERATIONS; i += 1) {
const junk = pick(rng, ["not-a-url", "http://", "ws://", "::::", "Bearer ey.SECRET.xyz"]);
expect(() => redactCdpUrl(junk)).not.toThrow();
const out = redactCdpUrl(junk);
expect(typeof out).toBe("string");
}
@@ -405,7 +402,7 @@ describe("fuzz: appendCdpPath", () => {
});
describe("fuzz: getHeadersWithAuth", () => {
it("never throws and always returns a mergedHeaders object", () => {
it("always returns a mergedHeaders object", () => {
const rng = makeRng(0x7001);
for (let i = 0; i < ITERATIONS; i += 1) {
const withAuth = rng() < 0.3;

View File

@@ -35,6 +35,16 @@ function sendCdpResult(socket: WebSocket, id: number | undefined, result: Record
socket.send(JSON.stringify({ id, result }));
}
function countMatching<T>(items: readonly T[], predicate: (item: T) => boolean): number {
let count = 0;
for (const item of items) {
if (predicate(item)) {
count += 1;
}
}
return count;
}
function replyToPageEnable(msg: CdpMockMessage, socket: WebSocket): boolean {
if (msg.method !== "Page.enable") {
return false;
@@ -197,7 +207,7 @@ describe("cdp internal", () => {
}
if (msg.method === "Runtime.evaluate") {
// Pre-capture viewport probe + post-capture probe.
const isPre = events.filter((m) => m === "Runtime.evaluate").length === 1;
const isPre = countMatching(events, (m) => m === "Runtime.evaluate") === 1;
socket.send(
JSON.stringify({
id: msg.id,

View File

@@ -349,10 +349,13 @@ describe("chrome MCP page parsing", () => {
it("reuses a single pending session for concurrent requests", async () => {
let factoryCalls = 0;
let releaseFactory!: () => void;
let releaseFactory: (() => void) | undefined;
const factoryGate = new Promise<void>((resolve) => {
releaseFactory = resolve;
});
if (!releaseFactory) {
throw new Error("Expected Chrome MCP factory release callback to be initialized");
}
const factory: ChromeMcpSessionFactory = async () => {
factoryCalls += 1;

View File

@@ -706,14 +706,15 @@ describe("browser config", () => {
},
});
const profile = resolveProfile(resolved, "chrome-live");
expect(profile).not.toBeNull();
expect(profile?.driver).toBe("existing-session");
expect(profile?.attachOnly).toBe(true);
expect(profile?.cdpPort).toBe(0);
expect(profile?.cdpUrl).toBe("");
expect(profile?.cdpIsLoopback).toBe(true);
expect(profile).toMatchObject({
driver: "existing-session",
attachOnly: true,
cdpPort: 0,
cdpUrl: "",
cdpIsLoopback: true,
color: "#00AA00",
});
expect(profile?.userDataDir).toBeUndefined();
expect(profile?.color).toBe("#00AA00");
});
it("expands tilde-prefixed userDataDir for existing-session profiles", () => {

View File

@@ -1,6 +1,16 @@
import { describe, expect, it } from "vitest";
import { buildBrowserDoctorReport } from "./doctor.js";
function collectWarningCheckIds(checks: readonly { id: string; status: string }[]): string[] {
const ids: string[] = [];
for (const check of checks) {
if (check.status === "warn") {
ids.push(check.id);
}
}
return ids;
}
describe("buildBrowserDoctorReport", () => {
it("reports stopped managed browsers as launchable diagnostics", () => {
const report = buildBrowserDoctorReport({
@@ -101,9 +111,11 @@ describe("buildBrowserDoctorReport", () => {
});
expect(report.ok).toBe(true);
expect(
report.checks.filter((check) => check.status === "warn").map((check) => check.id),
).toEqual(["managed-executable", "display", "linux-sandbox"]);
expect(collectWarningCheckIds(report.checks)).toEqual([
"managed-executable",
"display",
"linux-sandbox",
]);
expect(report.checks.find((check) => check.id === "display")).toMatchObject({
summary: "No DISPLAY or WAYLAND_DISPLAY is set while headed mode is selected (config)",
});

View File

@@ -21,6 +21,11 @@ export async function writeExternalFileWithinOutputRoot(params: {
rootDir,
path: outputPath,
write: params.write,
}).catch((err: unknown) => {
if (err instanceof Error && /file not found/i.test(err.message)) {
throw new Error("output directory changed while writing file");
}
throw err;
});
return result.path;
}

View File

@@ -18,7 +18,8 @@ describeLive("browser (live): remote CDP tab persistence", () => {
await pw.closePlaywrightBrowserConnection().catch(() => {});
const created = await pw.createPageViaPlaywright({ cdpUrl: CDP_URL, url: "about:blank" });
expect(created.targetId).toEqual(expect.any(String));
expect(created.targetId).toBeTypeOf("string");
expect(created.targetId).not.toBe("");
try {
await waitFor(
async () => {

View File

@@ -168,6 +168,18 @@ describe("pw-session ensurePageState", () => {
expect(path.basename(managedPathB ?? "")).toMatch(/-report\.pdf$/);
expect(saveAsA.mock.calls[0]?.[0]).not.toBe(managedPathA);
expect(saveAsB.mock.calls[0]?.[0]).not.toBe(managedPathB);
for (const call of [saveAsA.mock.calls[0], saveAsB.mock.calls[0]]) {
const savedPath = call?.[0];
expect(savedPath).toEqual(expect.any(String));
if (typeof savedPath !== "string") {
throw new Error("Expected saved download path");
}
const savedParentName = path.basename(path.dirname(savedPath));
expect(
savedParentName.includes("fs-safe-output") ||
savedParentName === path.basename(DEFAULT_DOWNLOAD_DIR),
).toBe(true);
}
await expect(fs.readFile(managedPathA ?? "", "utf8")).resolves.toBe("download-a");
await expect(fs.readFile(managedPathB ?? "", "utf8")).resolves.toBe("download-b");
});

View File

@@ -34,10 +34,13 @@ vi.mock("./pw-session.js", () => {
const { evaluateViaPlaywright } = await import("./pw-tools-core.interactions.js");
function createPendingEval() {
let evalCalled!: () => void;
let evalCalled: (() => void) | undefined;
const evalCalledPromise = new Promise<void>((resolve) => {
evalCalled = resolve;
});
if (!evalCalled) {
throw new Error("Expected evaluate callback to be initialized");
}
return {
evalCalledPromise,
resolveEvalCalled: evalCalled,

View File

@@ -137,7 +137,11 @@ describe("pw-tools-core", () => {
const savedPath = params.saveAs.mock.calls[0]?.[0];
expect(typeof savedPath).toBe("string");
expect(savedPath).not.toBe(params.targetPath);
expect(path.basename(path.dirname(String(savedPath)))).toContain("fs-safe-output");
const savedParentName = path.basename(path.dirname(String(savedPath)));
expect(
savedParentName.includes("fs-safe-output") ||
savedParentName === path.basename(path.dirname(params.targetPath)),
).toBe(true);
expect(path.basename(String(savedPath))).toContain(path.basename(params.targetPath));
expect(path.basename(String(savedPath))).toMatch(/\.part$/);
expect(await fs.readFile(params.targetPath, "utf8")).toBe(params.content);
@@ -208,6 +212,57 @@ describe("pw-tools-core", () => {
});
});
it.runIf(process.platform !== "win32")(
"does not write outside the output root when a download parent is swapped after save",
async () => {
await withTempDir(async (tempDir) => {
const rootDir = path.join(tempDir, "downloads");
const targetParent = path.join(rootDir, "race");
const outsideDir = path.join(tempDir, "outside");
const targetPath = path.join(targetParent, "file.bin");
const outsideTargetPath = path.join(outsideDir, "file.bin");
await fs.mkdir(targetParent, { recursive: true });
await fs.mkdir(outsideDir);
const harness = createDownloadEventHarness();
let parentSwappedBeforeFinalize = false;
const saveAs = vi.fn(async (outPath: string) => {
await fs.writeFile(outPath, "race-content", "utf8");
const beforeSwap = await fs.lstat(targetParent);
expect(beforeSwap.isDirectory()).toBe(true);
expect(beforeSwap.isSymbolicLink()).toBe(false);
await fs.rm(targetParent, { recursive: true, force: true });
await fs.symlink(outsideDir, targetParent);
const afterSwap = await fs.lstat(targetParent);
expect(afterSwap.isSymbolicLink()).toBe(true);
parentSwappedBeforeFinalize = true;
});
const p = mod.waitForDownloadViaPlaywright({
cdpUrl: "http://127.0.0.1:18792",
targetId: "T1",
path: targetPath,
rootDir,
timeoutMs: 1000,
});
await Promise.resolve();
harness.expectArmed();
harness.trigger({
url: () => "https://example.com/file.bin",
suggestedFilename: () => "file.bin",
saveAs,
});
await expect(p).rejects.toThrow(/path alias|outside workspace|directory changed/i);
expect(parentSwappedBeforeFinalize).toBe(true);
expect(saveAs).toHaveBeenCalledOnce();
await expect(fs.access(outsideTargetPath)).rejects.toThrow();
await expect(fs.readdir(outsideDir)).resolves.toEqual([]);
});
},
);
it("marks explicit download waiters as owning the next download until cleanup", async () => {
const harness = createDownloadEventHarness();
const state = sessionMocks.ensurePageState();

View File

@@ -27,8 +27,15 @@ export function installBrowserCommonMiddleware(app: Express) {
abort();
}
});
// Make the signal available to browser route handlers (best-effort).
(req as unknown as { signal?: AbortSignal }).signal = ctrl.signal;
// Make the signal available to browser route handlers on Node versions
// whose IncomingMessage does not already expose a native read-only signal.
const requestWithSignal = req as Request & { signal?: AbortSignal };
if (!(requestWithSignal.signal instanceof AbortSignal)) {
Object.defineProperty(req, "signal", {
value: ctrl.signal,
configurable: true,
});
}
next();
});
app.use(express.json({ limit: "1mb" }));

View File

@@ -51,7 +51,7 @@ const pwMocks = getPwMocks();
describe("browser control server", () => {
installAgentContractHooks();
const slowTimeoutMs = process.platform === "win32" ? 40_000 : 20_000;
const slowTimeoutMs = 60_000;
it(
"returns ACT_KIND_REQUIRED when kind is missing",

View File

@@ -1,9 +1,15 @@
import fs from "node:fs/promises";
import path from "node:path";
import {
findExistingAncestor,
pathScope as sdkPathScope,
} from "openclaw/plugin-sdk/security-runtime";
export { createSubsystemLogger } from "openclaw/plugin-sdk/logging-core";
export {
ensurePortAvailable,
extractErrorCode,
formatErrorMessage,
ensureAbsoluteDirectory,
hasProxyEnvConfigured,
isNotFoundPathError,
isPathInside,
@@ -28,3 +34,33 @@ export {
wrapExternalContent,
} from "openclaw/plugin-sdk/security-runtime";
export type { LookupFn, SsrFPolicy } from "openclaw/plugin-sdk/security-runtime";
export async function ensureAbsoluteDirectory(
dirPath: string,
options?: { scopeLabel?: string; mode?: number },
): Promise<{ ok: true; path: string } | { ok: false; error: Error }> {
const absolutePath = path.resolve(dirPath);
const scopeLabel = options?.scopeLabel ?? "directory";
const existingAncestor = await findExistingAncestor(absolutePath);
if (!existingAncestor) {
return { ok: false, error: new Error(`Invalid path: must stay within ${scopeLabel}`) };
}
if (existingAncestor === absolutePath) {
try {
const stat = await fs.lstat(absolutePath);
if (!stat.isSymbolicLink() && stat.isDirectory()) {
return { ok: true, path: absolutePath };
}
} catch {
// Fall through to the uniform invalid-path result below.
}
return { ok: false, error: new Error(`Invalid path: must stay within ${scopeLabel}`) };
}
const result = await sdkPathScope(existingAncestor, {
label: options?.scopeLabel ?? "directory",
}).ensureDir(path.relative(existingAncestor, absolutePath), { mode: options?.mode });
if (result.ok) {
return result;
}
return { ok: false, error: new Error(result.error) };
}

View File

@@ -41,6 +41,19 @@ function mockSuccessfulBytePlusTask(params?: { model?: string }) {
});
}
function requireBytePlusPostBody(): Record<string, unknown> {
const request = postJsonRequestMock.mock.calls[0]?.[0] as
| { body?: Record<string, unknown> }
| undefined;
if (!request) {
throw new Error("expected BytePlus video request");
}
if (!request.body) {
throw new Error("expected BytePlus video request body");
}
return request.body;
}
describe("byteplus video generation provider", () => {
it("declares explicit mode capabilities", () => {
expectExplicitVideoGenerationCapabilities(buildBytePlusVideoGenerationProvider());
@@ -63,7 +76,11 @@ describe("byteplus video generation provider", () => {
}),
);
expect(result.videos).toHaveLength(1);
expect(result.videos[0]?.fileName).toBe("video-1.webm");
const [video] = result.videos;
if (!video) {
throw new Error("Expected generated BytePlus video");
}
expect(video.fileName).toBe("video-1.webm");
expect(result.metadata).toEqual(
expect.objectContaining({
taskId: "task_123",
@@ -84,8 +101,7 @@ describe("byteplus video generation provider", () => {
cfg: {},
});
const request = postJsonRequestMock.mock.calls[0]?.[0] as { body?: Record<string, unknown> };
expect(request.body).toMatchObject({
expect(requireBytePlusPostBody()).toMatchObject({
model: "seedance-1-0-lite-i2v-250428",
resolution: "720p",
content: [
@@ -115,8 +131,7 @@ describe("byteplus video generation provider", () => {
cfg: {},
});
const request = postJsonRequestMock.mock.calls[0]?.[0] as { body?: Record<string, unknown> };
expect(request.body).toMatchObject({
expect(requireBytePlusPostBody()).toMatchObject({
model: "seedance-1-0-pro-250528",
seed: 42,
resolution: "480p",

View File

@@ -68,8 +68,12 @@ describe("canvas CLI", () => {
}),
);
expect(writtenFiles).toHaveLength(1);
expect(writtenFiles[0]?.filePath).toMatch(/openclaw-canvas-snapshot-.*\.png$/);
expect(writtenFiles[0]?.base64).toBe("aGk=");
const [writtenFile] = writtenFiles;
if (!writtenFile) {
throw new Error("Expected canvas snapshot file");
}
expect(writtenFile.filePath).toMatch(/openclaw-canvas-snapshot-.*\.png$/);
expect(writtenFile.base64).toBe("aGk=");
expect(runtime.log).toHaveBeenCalledWith(expect.stringMatching(/^MEDIA:.*\.png$/));
});
});

View File

@@ -12,8 +12,11 @@ describe("migrateLegacyCanvasHostConfig", () => {
},
} as OpenClawConfig);
expect(result?.changes).toEqual(["migrated canvasHost to plugins.entries.canvas.config.host"]);
expect(result?.config).toEqual({
if (!result) {
throw new Error("expected Canvas config migration result");
}
expect(result.changes).toEqual(["migrated canvasHost to plugins.entries.canvas.config.host"]);
expect(result.config).toEqual({
plugins: {
entries: {
canvas: {
@@ -51,7 +54,10 @@ describe("migrateLegacyCanvasHostConfig", () => {
},
} as OpenClawConfig);
expect(result?.config).toEqual({
if (!result) {
throw new Error("expected Canvas config migration result");
}
expect(result.config).toEqual({
plugins: {
entries: {
canvas: {

View File

@@ -4,6 +4,8 @@ import { resolvePreferredOpenClawTmpDir, withTempWorkspace } from "openclaw/plug
import { describe, expect, it } from "vitest";
import { normalizeUrlPath, resolveFileWithinRoot } from "./file-resolver.js";
type ResolvedFile = NonNullable<Awaited<ReturnType<typeof resolveFileWithinRoot>>>;
async function withCanvasTemp<T>(prefix: string, run: (dir: string) => Promise<T>): Promise<T> {
return await withTempWorkspace(
{ rootDir: resolvePreferredOpenClawTmpDir(), prefix },
@@ -11,6 +13,16 @@ async function withCanvasTemp<T>(prefix: string, run: (dir: string) => Promise<T
);
}
function expectResolvedFile(
result: Awaited<ReturnType<typeof resolveFileWithinRoot>>,
): ResolvedFile {
expect(result).toEqual(expect.objectContaining({ handle: expect.any(Object) }));
if (result === null) {
throw new Error("Expected resolved file within root");
}
return result;
}
describe("resolveFileWithinRoot", () => {
it("normalizes URL paths", () => {
expect(normalizeUrlPath("/nested/../file.txt")).toBe("/file.txt");
@@ -23,11 +35,11 @@ describe("resolveFileWithinRoot", () => {
await fs.writeFile(path.join(root, "docs", "index.html"), "<h1>docs</h1>");
const result = await resolveFileWithinRoot(root, "/docs");
expect(result).not.toBeNull();
const resolved = expectResolvedFile(result);
try {
await expect(result?.handle.readFile({ encoding: "utf8" })).resolves.toBe("<h1>docs</h1>");
await expect(resolved.handle.readFile({ encoding: "utf8" })).resolves.toBe("<h1>docs</h1>");
} finally {
await result?.handle.close().catch(() => {});
await resolved.handle.close().catch(() => {});
}
});
});

View File

@@ -260,7 +260,7 @@ describe("canvas host", () => {
const dir = await createCaseDir();
const index = path.join(dir, "index.html");
await fs.writeFile(index, "<html><body>v1</body></html>", "utf8");
let resolveReload!: () => void;
let resolveReload: (() => void) | undefined;
const reloadSent = new Promise<void>((resolve) => {
resolveReload = resolve;
});
@@ -306,6 +306,9 @@ describe("canvas host", () => {
send: (message: string) => {
ws.sent.push(message);
if (message === "reload") {
if (!resolveReload) {
throw new Error("Expected Canvas reload resolver to be initialized");
}
resolveReload();
}
},
@@ -342,7 +345,11 @@ describe("canvas host", () => {
Buffer.alloc(0),
);
expect(upgraded).toBe(true);
expect(TrackingWebSocketServerClass.latestInstance?.connectionCount).toBe(1);
const latestServer = TrackingWebSocketServerClass.latestInstance;
if (!latestServer) {
throw new Error("expected Canvas host websocket server");
}
expect(latestServer.connectionCount).toBe(1);
const ws = TrackingWebSocketServerClass.latestSocket;
if (!ws) {
throw new Error("expected Canvas host websocket");

View File

@@ -53,6 +53,17 @@ function createAuthEchoFetchMock() {
});
}
function requireChutesModel(
models: Awaited<ReturnType<typeof discoverChutesModels>>,
index: number,
): Awaited<ReturnType<typeof discoverChutesModels>>[number] {
const model = models[index];
if (!model) {
throw new Error(`expected Chutes model at index ${index}`);
}
return model;
}
describe("chutes-models", () => {
beforeEach(() => {
clearChutesModelCacheForTests();
@@ -68,7 +79,10 @@ describe("chutes-models", () => {
expect(def.cost).toEqual(entry.cost);
expect(def.contextWindow).toBe(entry.contextWindow);
expect(def.maxTokens).toBe(entry.maxTokens);
expect(def.compat?.supportsUsageInStreaming).toBe(false);
if (!def.compat) {
throw new Error("expected Chutes model compat");
}
expect(def.compat.supportsUsageInStreaming).toBe(false);
});
it("discoverChutesModels returns static catalog when accessToken is empty", async () => {
@@ -80,7 +94,7 @@ describe("chutes-models", () => {
it("discoverChutesModels returns static catalog in test env by default", async () => {
const models = await discoverChutesModels("test-token");
expect(models).toHaveLength(CHUTES_MODEL_CATALOG.length);
expect(models[0]?.id).toBe("Qwen/Qwen3-32B");
expect(requireChutesModel(models, 0).id).toBe("Qwen/Qwen3-32B");
});
it("discoverChutesModels correctly maps API response when not in test env", async () => {
@@ -105,9 +119,14 @@ describe("chutes-models", () => {
const models = await discoverChutesModels("test-token-real-fetch");
expect(models.length).toBeGreaterThan(0);
if (models.length === 3) {
expect(models[0]?.id).toBe("zai-org/GLM-4.7-TEE");
expect(models[1]?.reasoning).toBe(true);
expect(models[1]?.compat?.supportsUsageInStreaming).toBe(false);
const firstModel = requireChutesModel(models, 0);
const secondModel = requireChutesModel(models, 1);
expect(firstModel.id).toBe("zai-org/GLM-4.7-TEE");
expect(secondModel.reasoning).toBe(true);
if (!secondModel.compat) {
throw new Error("expected Chutes API model compat");
}
expect(secondModel.compat.supportsUsageInStreaming).toBe(false);
}
});
});
@@ -208,9 +227,9 @@ describe("chutes-models", () => {
const modelsA = await discoverChutesModels("chutes-token-a");
const modelsB = await discoverChutesModels("chutes-token-b");
const modelsASecond = await discoverChutesModels("chutes-token-a");
expect(modelsA[0]?.id).toBe("private/model-a");
expect(modelsB[0]?.id).toBe("private/model-b");
expect(modelsASecond[0]?.id).toBe("private/model-a");
expect(requireChutesModel(modelsA, 0).id).toBe("private/model-a");
expect(requireChutesModel(modelsB, 0).id).toBe("private/model-b");
expect(requireChutesModel(modelsASecond, 0).id).toBe("private/model-a");
expect(mockFetch).toHaveBeenCalledTimes(2);
});
});

View File

@@ -6,14 +6,20 @@ import plugin from "./index.js";
function registerProvider() {
const captured = capturePluginRegistration(plugin);
const provider = captured.providers[0];
expect(provider?.id).toBe("cloudflare-ai-gateway");
if (!provider) {
throw new Error("expected Cloudflare AI Gateway provider");
}
expect(provider.id).toBe("cloudflare-ai-gateway");
return provider;
}
describe("cloudflare-ai-gateway plugin", () => {
it("registers a stream wrapper that strips Anthropic thinking assistant prefill", () => {
const provider = registerProvider();
expect(provider?.wrapStreamFn).toBeTypeOf("function");
expect(provider.wrapStreamFn).toBeTypeOf("function");
if (!provider.wrapStreamFn) {
throw new Error("expected Cloudflare AI Gateway stream wrapper");
}
let capturedPayload: Record<string, unknown> | undefined;
const baseStreamFn: StreamFn = (_model, _context, options) => {
@@ -29,19 +35,26 @@ describe("cloudflare-ai-gateway plugin", () => {
return {} as ReturnType<StreamFn>;
};
const wrapped = provider?.wrapStreamFn?.({
const wrapped = provider.wrapStreamFn({
provider: "cloudflare-ai-gateway",
modelId: "claude-sonnet-4-6",
model: { api: "anthropic-messages" },
streamFn: baseStreamFn,
} as never);
expect(wrapped).toBeTypeOf("function");
if (!wrapped) {
throw new Error("expected Cloudflare AI Gateway wrapped stream function");
}
void wrapped?.(
void wrapped(
{ provider: "cloudflare-ai-gateway", api: "anthropic-messages" } as never,
{} as never,
{},
);
expect(capturedPayload?.messages).toEqual([{ role: "user", content: "Return JSON." }]);
if (!capturedPayload) {
throw new Error("expected Cloudflare AI Gateway payload capture");
}
expect(capturedPayload.messages).toEqual([{ role: "user", content: "Return JSON." }]);
});
});

View File

@@ -84,7 +84,8 @@ describe("codex plugin", () => {
};
delete (api as { onConversationBindingResolved?: unknown }).onConversationBindingResolved;
expect(() => plugin.register(api)).not.toThrow();
plugin.register(api);
expect(api.registerProvider).toHaveBeenCalledWith(expect.objectContaining({ id: "codex" }));
});
it("only claims the codex provider by default", () => {

View File

@@ -26,6 +26,7 @@ function threadStartResult() {
return {
thread: {
id: "thread-1",
sessionId: "session-1",
forkedFromId: null,
preview: "",
ephemeral: true,

View File

@@ -112,7 +112,7 @@ function createCodexAuthProfileHarness(params: { startMethod: "thread/start" | "
seenAuthProfileIds,
seenAgentDirs,
async waitForMethod(method: string) {
await vi.waitFor(() => expect(requests.some((entry) => entry.method === method)).toBe(true), {
await vi.waitFor(() => expect(requests.map((entry) => entry.method)).toContain(method), {
interval: 1,
});
},

View File

@@ -261,7 +261,9 @@ describe("Codex plugin thread config", () => {
pluginName: "google-calendar",
});
expect(config.diagnostics).toEqual([]);
expect(request.mock.calls.filter(([method]) => method === "app/list")).toHaveLength(1);
expect(
request.mock.calls.reduce((count, [method]) => count + (method === "app/list" ? 1 : 0), 0),
).toBe(1);
});
it("does not expose plugin apps missing from the app inventory snapshot", async () => {
@@ -391,10 +393,8 @@ describe("Codex plugin thread config", () => {
});
expect(config.diagnostics).toEqual([]);
expect(request.mock.calls.map(([method]) => method)).toContain("plugin/install");
expect(request.mock.calls.filter(([method]) => method === "app/list").length).toBeGreaterThan(
0,
);
expect(appListParams.some((params) => params.forceRefetch)).toBe(true);
expect(request.mock.calls.some(([method]) => method === "app/list")).toBe(true);
expect(appListParams.map((params) => params.forceRefetch)).toContain(true);
});
it("surfaces critical post-install refresh failures and keeps plugin apps disabled", async () => {

View File

@@ -147,7 +147,7 @@ function createStartedThreadHarness(
return {
requests,
async waitForMethod(method: string) {
await vi.waitFor(() => expect(requests.some((entry) => entry.method === method)).toBe(true), {
await vi.waitFor(() => expect(requests.map((entry) => entry.method)).toContain(method), {
interval: 1,
});
},

View File

@@ -1749,16 +1749,20 @@ describe("runCodexAppServerAttempt", () => {
const sessionFile = path.join(tempDir, "session.jsonl");
const workspaceDir = path.join(tempDir, "workspace");
const resetsAt = Math.ceil(Date.now() / 1000) + 120;
let harness!: ReturnType<typeof createStartedThreadHarness>;
harness = createStartedThreadHarness(async (method) => {
const harnessRef: { current?: ReturnType<typeof createStartedThreadHarness> } = {};
const harness = createStartedThreadHarness(async (method) => {
if (method === "turn/start") {
await harness.notify(rateLimitsUpdated(resetsAt));
if (!harnessRef.current) {
throw new Error("Expected Codex app-server harness to be initialized");
}
await harnessRef.current.notify(rateLimitsUpdated(resetsAt));
throw Object.assign(new Error("You've reached your usage limit."), {
data: { codexErrorInfo: "usageLimitExceeded" },
});
}
return undefined;
});
harnessRef.current = harness;
const runError = runCodexAppServerAttempt(createParams(sessionFile, workspaceDir)).catch(
(error: unknown) => error,
@@ -1982,13 +1986,12 @@ describe("runCodexAppServerAttempt", () => {
await waitForMethod("turn/start");
expect(queueAgentHarnessMessage("session-1", "more context", { debounceMs: 1 })).toBe(true);
await vi.waitFor(
() => expect(requests.some((entry) => entry.method === "turn/steer")).toBe(true),
{ interval: 1 },
);
await vi.waitFor(() => expect(requests.map((entry) => entry.method)).toContain("turn/steer"), {
interval: 1,
});
expect(abortAgentHarnessRun("session-1")).toBe(true);
await vi.waitFor(
() => expect(requests.some((entry) => entry.method === "turn/interrupt")).toBe(true),
() => expect(requests.map((entry) => entry.method)).toContain("turn/interrupt"),
{ interval: 1 },
);
@@ -2164,7 +2167,7 @@ describe("runCodexAppServerAttempt", () => {
params.onBlockReply = vi.fn();
const run = runCodexAppServerAttempt(params);
await vi.waitFor(
() => expect(request.mock.calls.some(([method]) => method === "turn/start")).toBe(true),
() => expect(request.mock.calls.map(([method]) => method)).toContain("turn/start"),
{ interval: 1 },
);
await vi.waitFor(() => expect(handleRequest).toBeTypeOf("function"), { interval: 1 });
@@ -2409,7 +2412,7 @@ describe("runCodexAppServerAttempt", () => {
};
const run = runCodexAppServerAttempt(params);
await vi.waitFor(() =>
expect(request.mock.calls.some(([method]) => method === "turn/start")).toBe(true),
expect(request.mock.calls.map(([method]) => method)).toContain("turn/start"),
);
await notify({
method: "turn/completed",

View File

@@ -8,6 +8,8 @@ import {
resolveCodexTrajectoryPointerFlags,
} from "./trajectory.js";
type CodexTrajectoryRecorder = NonNullable<ReturnType<typeof createCodexTrajectoryRecorder>>;
const tempDirs: string[] = [];
function makeTempDir(): string {
@@ -22,6 +24,16 @@ afterEach(() => {
}
});
function expectTrajectoryRecorder(
recorder: ReturnType<typeof createCodexTrajectoryRecorder>,
): CodexTrajectoryRecorder {
expect(recorder).toEqual(expect.objectContaining({ recordEvent: expect.any(Function) }));
if (recorder === null) {
throw new Error("Expected Codex trajectory recorder");
}
return recorder;
}
describe("Codex trajectory recorder", () => {
it("keeps write flags usable when O_NOFOLLOW is unavailable", () => {
const constants = {
@@ -52,13 +64,13 @@ describe("Codex trajectory recorder", () => {
env: {},
});
expect(recorder).not.toBeNull();
recorder?.recordEvent("session.started", {
const trajectoryRecorder = expectTrajectoryRecorder(recorder);
trajectoryRecorder.recordEvent("session.started", {
apiKey: "secret",
headers: [{ name: "Authorization", value: "Bearer sk-test-secret-token" }],
command: "curl -H 'Authorization: Bearer sk-other-secret-token'",
});
await recorder?.flush();
await trajectoryRecorder.flush();
const filePath = path.join(tmpDir, "session.trajectory.jsonl");
const content = fs.readFileSync(filePath, "utf8");
@@ -82,8 +94,9 @@ describe("Codex trajectory recorder", () => {
env: { OPENCLAW_TRAJECTORY_DIR: tmpDir },
});
recorder?.recordEvent("session.started");
await recorder?.flush();
const trajectoryRecorder = expectTrajectoryRecorder(recorder);
trajectoryRecorder.recordEvent("session.started");
await trajectoryRecorder.flush();
expect(fs.existsSync(path.join(tmpDir, "___evil_session.jsonl"))).toBe(true);
});
@@ -119,8 +132,9 @@ describe("Codex trajectory recorder", () => {
env: {},
});
recorder?.recordEvent("session.started");
await recorder?.flush();
const trajectoryRecorder = expectTrajectoryRecorder(recorder);
trajectoryRecorder.recordEvent("session.started");
await trajectoryRecorder.flush();
expect(fs.existsSync(path.join(targetDir, "session.trajectory.jsonl"))).toBe(false);
});
@@ -137,12 +151,13 @@ describe("Codex trajectory recorder", () => {
env: {},
});
recorder?.recordEvent("context.compiled", {
const trajectoryRecorder = expectTrajectoryRecorder(recorder);
trajectoryRecorder.recordEvent("context.compiled", {
fields: Object.fromEntries(
Array.from({ length: 100 }, (_, index) => [`field-${index}`, "x".repeat(3_000)]),
),
});
await recorder?.flush();
await trajectoryRecorder.flush();
const parsed = JSON.parse(
fs.readFileSync(path.join(tmpDir, "session.trajectory.jsonl"), "utf8"),

View File

@@ -10,6 +10,18 @@ function createParams(): EmbeddedRunAttemptParams {
} as unknown as EmbeddedRunAttemptParams;
}
function expectFirstBlockReplyText(params: EmbeddedRunAttemptParams): string {
const onBlockReply = params.onBlockReply;
if (onBlockReply === undefined) {
throw new Error("Expected onBlockReply callback");
}
const payload = vi.mocked(onBlockReply).mock.calls[0]?.[0];
if (typeof payload?.text !== "string") {
throw new Error("Expected first block reply text");
}
return payload.text;
}
describe("Codex app-server user input bridge", () => {
it("prompts the originating chat and resolves request_user_input from the next queued message", async () => {
const params = createParams();
@@ -161,9 +173,7 @@ describe("Codex app-server user input bridge", () => {
});
await vi.waitFor(() => expect(params.onBlockReply).toHaveBeenCalledTimes(1));
const payload = vi.mocked(params.onBlockReply!).mock.calls[0]?.[0];
expect(payload).toEqual(expect.objectContaining({ text: expect.any(String) }));
const text = payload?.text ?? "";
const text = expectFirstBlockReplyText(params);
expect(text).toContain("Mode &lt;\uff20U123&gt;");
expect(text).toContain("Pick \uff3btrusted\uff3d\uff08https://evil\uff09 \uff20here");
expect(text).toContain(

View File

@@ -55,14 +55,17 @@ describe("transcribeDeepgramAudio", () => {
expect(seenUrl).toBe(
"https://api.example.com/v1/listen?model=nova-3&language=en&punctuate=false&smart_format=true",
);
expect(seenInit?.method).toBe("POST");
expect(seenInit?.signal).toBeInstanceOf(AbortSignal);
if (!seenInit) {
throw new Error("Expected Deepgram fetch request init");
}
expect(seenInit.method).toBe("POST");
expect(seenInit.signal).toBeInstanceOf(AbortSignal);
const headers = new Headers(seenInit?.headers);
const headers = new Headers(seenInit.headers);
expect(headers.get("authorization")).toBe("Token test-key");
expect(headers.get("x-custom")).toBe("1");
expect(headers.get("content-type")).toBe("audio/wav");
expect(seenInit?.body).toBeInstanceOf(Uint8Array);
expect(seenInit.body).toBeInstanceOf(Uint8Array);
});
it("throws when the provider response omits transcript", async () => {

View File

@@ -116,9 +116,14 @@ describe("deepinfra image generation provider", () => {
},
}),
);
expect(result.images[0]?.mimeType).toBe("image/jpeg");
expect(result.images[0]?.fileName).toBe("image-1.jpg");
expect(result.images[0]?.revisedPrompt).toBe("red square");
expect(result.images).toHaveLength(1);
const [firstImage] = result.images;
if (!firstImage) {
throw new Error("Expected generated DeepInfra image");
}
expect(firstImage.mimeType).toBe("image/jpeg");
expect(firstImage.fileName).toBe("image-1.jpg");
expect(firstImage.revisedPrompt).toBe("red square");
expect(release).toHaveBeenCalledOnce();
});
@@ -152,11 +157,20 @@ describe("deepinfra image generation provider", () => {
url: "https://api.deepinfra.com/v1/openai/images/edits",
}),
);
const form = postMultipartRequestMock.mock.calls[0]?.[0].body as FormData;
const firstCall = postMultipartRequestMock.mock.calls[0];
if (!firstCall) {
throw new Error("Expected DeepInfra multipart request");
}
const form = firstCall[0].body as FormData;
expect(form.get("model")).toBe("black-forest-labs/FLUX-1-schnell");
expect(form.get("prompt")).toBe("make it neon");
expect(form.get("response_format")).toBe("b64_json");
expect(form.get("image")).toBeInstanceOf(File);
expect(result.images[0]?.mimeType).toBe("image/png");
expect(result.images).toHaveLength(1);
const [image] = result.images;
if (!image) {
throw new Error("Expected edited DeepInfra image");
}
expect(image.mimeType).toBe("image/png");
});
});

View File

@@ -115,9 +115,10 @@ describe("DeepInfra provider config", () => {
try {
const result = resolveEnvApiKey("deepinfra");
expect(result).not.toBeNull();
expect(result?.apiKey).toBe("test-deepinfra-key");
expect(result?.source).toContain("DEEPINFRA_API_KEY");
expect(result).toMatchObject({
apiKey: "test-deepinfra-key",
source: expect.stringContaining("DEEPINFRA_API_KEY"),
});
} finally {
envSnapshot.restore();
}

View File

@@ -104,10 +104,15 @@ describe("deepinfra video generation provider", () => {
cfg: {},
});
expect(result.videos[0]).toMatchObject({
expect(result.videos).toHaveLength(1);
const [video] = result.videos;
if (!video) {
throw new Error("Expected generated DeepInfra video");
}
expect(video).toMatchObject({
mimeType: "video/webm",
fileName: "video-1.webm",
});
expect(result.videos[0]?.buffer).toEqual(Buffer.from("webm-data"));
expect(video.buffer).toEqual(Buffer.from("webm-data"));
});
});

View File

@@ -16,6 +16,8 @@ type PayloadCapture = {
payload?: Record<string, unknown>;
};
type RegisteredProvider = Awaited<ReturnType<typeof registerSingleProviderPlugin>>;
const emptyUsage = {
input: 0,
output: 0,
@@ -25,6 +27,15 @@ const emptyUsage = {
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
};
function requireThinkingProfileResolver(
provider: RegisteredProvider,
): NonNullable<RegisteredProvider["resolveThinkingProfile"]> {
if (!provider.resolveThinkingProfile) {
throw new Error("DeepSeek provider did not register a thinking profile resolver");
}
return provider.resolveThinkingProfile;
}
const readToolCall = { type: "toolCall", id: "call_1", name: "read", arguments: {} };
const readToolResult = {
role: "toolResult",
@@ -141,9 +152,10 @@ describe("deepseek provider plugin", () => {
expect(provider.label).toBe("DeepSeek");
expect(provider.envVars).toEqual(["DEEPSEEK_API_KEY"]);
expect(provider.auth).toHaveLength(1);
expect(resolved).not.toBeNull();
expect(resolved?.provider.id).toBe("deepseek");
expect(resolved?.method.id).toBe("api-key");
expect(resolved).toMatchObject({
provider: { id: "deepseek" },
method: { id: "api-key" },
});
});
it("builds the static DeepSeek model catalog", async () => {
@@ -189,7 +201,7 @@ describe("deepseek provider plugin", () => {
it("advertises max thinking levels for DeepSeek V4 models only", async () => {
const provider = await registerSingleProviderPlugin(deepseekPlugin);
const resolveThinkingProfile = provider.resolveThinkingProfile!;
const resolveThinkingProfile = requireThinkingProfileResolver(provider);
const expectedV4Levels = ["off", "minimal", "low", "medium", "high", "xhigh", "max"];
expect(

View File

@@ -133,6 +133,13 @@ function requireText(result: { text?: unknown } | null | undefined): string {
return result.text;
}
function requireMediaUrl(opts: { mediaUrl?: string }): string {
if (!opts.mediaUrl) {
throw new Error("pair command did not send a media URL");
}
return opts.mediaUrl;
}
function createChannelRuntime(
runtimeKey: string,
sendKey: string,
@@ -479,11 +486,12 @@ describe("device-pair /pair qr", () => {
expect(caption).toContain("Scan this QR code with the OpenClaw iOS app:");
expect(caption).toContain("IMPORTANT: After pairing finishes, run /pair cleanup.");
expect(caption).toContain("If this QR code leaks, run /pair cleanup immediately.");
expect(opts.mediaUrl).toMatch(/pair-qr\.png$/);
expect(opts.mediaLocalRoots).toEqual([path.dirname(opts.mediaUrl!)]);
const mediaUrl = requireMediaUrl(opts);
expect(mediaUrl).toMatch(/pair-qr\.png$/);
expect(opts.mediaLocalRoots).toEqual([path.dirname(mediaUrl)]);
expect(opts).toMatchObject(testCase.expectedOpts);
expect(sentPng).toBe("fakepng");
await expect(fs.access(opts.mediaUrl!)).rejects.toThrow();
await expect(fs.access(mediaUrl)).rejects.toThrow();
expect(text).toContain("QR code sent above.");
expect(text).toContain("IMPORTANT: Run /pair cleanup after pairing finishes.");
});

View File

@@ -68,7 +68,7 @@ describe("diffs tool rendered output guards", () => {
});
expect(screenshotter.screenshotHtml).toHaveBeenCalledTimes(1);
expect((result?.details as Record<string, unknown>).filePath).toEqual(expect.any(String));
expect((result?.details as Record<string, unknown>).filePath).toMatch(/preview\.png$/);
});
});

View File

@@ -190,8 +190,10 @@ describe("diffs tool", () => {
});
expectArtifactOnlyFileResult(screenshotter, result);
expect((result?.details as Record<string, unknown>).artifactId).toEqual(expect.any(String));
expect((result?.details as Record<string, unknown>).expiresAt).toEqual(expect.any(String));
expect(requireString(readDetails(result).artifactId, "artifactId")).toMatch(/^[a-f0-9]{20}$/u);
expect(requireString(readDetails(result).expiresAt, "expiresAt")).toMatch(
/^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$/u,
);
});
it("honors ttlSeconds for artifact-only file output", async () => {

View File

@@ -142,8 +142,11 @@ describe("fetchDiscord", () => {
});
expect(result).toEqual({ id: "42" });
expect(request?.method).toBe("POST");
expect(request?.body).toBe(JSON.stringify({ content: "hello" }));
expect(new Headers(request?.headers).get("content-type")).toBe("application/json");
if (!request) {
throw new Error("expected Discord request init");
}
expect(request.method).toBe("POST");
expect(request.body).toBe(JSON.stringify({ content: "hello" }));
expect(new Headers(request.headers).get("content-type")).toBe("application/json");
});
});

View File

@@ -19,20 +19,61 @@ beforeAll(async () => {
({ discordPlugin } = await import("./channel.js"));
});
type DiscordMessageAdapter = NonNullable<typeof discordPlugin.message>;
type DiscordMessageSender = NonNullable<DiscordMessageAdapter["send"]>;
function requireDiscordMessageAdapter(): DiscordMessageAdapter {
const adapter = discordPlugin.message;
if (!adapter) {
throw new Error("Expected discord plugin to expose a channel message adapter");
}
return adapter;
}
function requireTextSender(
adapter: DiscordMessageAdapter,
): NonNullable<DiscordMessageSender["text"]> {
const text = adapter.send?.text;
if (!text) {
throw new Error("Expected discord message adapter text sender");
}
return text;
}
function requireMediaSender(
adapter: DiscordMessageAdapter,
): NonNullable<DiscordMessageSender["media"]> {
const media = adapter.send?.media;
if (!media) {
throw new Error("Expected discord message adapter media sender");
}
return media;
}
function requirePayloadSender(
adapter: DiscordMessageAdapter,
): NonNullable<DiscordMessageSender["payload"]> {
const payload = adapter.send?.payload;
if (!payload) {
throw new Error("Expected discord message adapter payload sender");
}
return payload;
}
describe("discord channel message adapter", () => {
beforeEach(() => {
resetDiscordOutboundMocks(hoisted);
});
it("backs declared durable-final capabilities with outbound send proofs", async () => {
const adapter = discordPlugin.message;
if (!adapter) {
throw new Error("Expected discord plugin to expose a channel message adapter");
}
const adapter = requireDiscordMessageAdapter();
const sendText = requireTextSender(adapter);
const sendMedia = requireMediaSender(adapter);
const sendPayload = requirePayloadSender(adapter);
const proveText = async () => {
resetDiscordOutboundMocks(hoisted);
const result = await adapter.send!.text!({
const result = await sendText({
cfg: {},
to: "channel:123456",
text: "hello",
@@ -49,7 +90,7 @@ describe("discord channel message adapter", () => {
const proveMedia = async () => {
resetDiscordOutboundMocks(hoisted);
const result = await adapter.send!.media!({
const result = await sendMedia({
cfg: {},
to: "channel:123456",
text: "caption",
@@ -69,7 +110,7 @@ describe("discord channel message adapter", () => {
const provePayload = async () => {
resetDiscordOutboundMocks(hoisted);
const result = await adapter.send!.payload!({
const result = await sendPayload({
cfg: {},
to: "channel:123456",
text: "payload",
@@ -86,7 +127,7 @@ describe("discord channel message adapter", () => {
const proveReplyThreadSilent = async () => {
resetDiscordOutboundMocks(hoisted);
const result = await adapter.send!.text!({
const result = await sendText({
cfg: {},
to: "channel:parent-1",
text: "threaded",
@@ -110,7 +151,7 @@ describe("discord channel message adapter", () => {
await verifyChannelMessageAdapterCapabilityProofs({
adapterName: "discordMessageAdapter",
adapter: adapter,
adapter,
proofs: {
text: proveText,
media: proveMedia,
@@ -119,43 +160,44 @@ describe("discord channel message adapter", () => {
replyTo: proveReplyThreadSilent,
thread: proveReplyThreadSilent,
messageSendingHooks: () => {
expect(adapter.send!.text).toBeTypeOf("function");
expect(sendText).toBeTypeOf("function");
},
},
});
});
it("backs declared live preview finalizer capabilities with adapter proofs", async () => {
const adapter = discordPlugin.message;
const adapter = requireDiscordMessageAdapter();
const sendText = requireTextSender(adapter);
await verifyChannelMessageLiveCapabilityAdapterProofs({
adapterName: "discordMessageAdapter",
adapter: adapter!,
adapter,
proofs: {
draftPreview: () => {
expect(adapter!.live?.finalizer?.capabilities?.discardPending).toBe(true);
expect(adapter.live?.finalizer?.capabilities?.discardPending).toBe(true);
},
previewFinalization: () => {
expect(adapter!.live?.finalizer?.capabilities?.finalEdit).toBe(true);
expect(adapter.live?.finalizer?.capabilities?.finalEdit).toBe(true);
},
progressUpdates: () => {
expect(adapter!.live?.capabilities?.draftPreview).toBe(true);
expect(adapter.live?.capabilities?.draftPreview).toBe(true);
},
},
});
await verifyChannelMessageLiveFinalizerProofs({
adapterName: "discordMessageAdapter",
adapter: adapter!,
adapter,
proofs: {
finalEdit: () => {
expect(adapter!.live?.capabilities?.previewFinalization).toBe(true);
expect(adapter.live?.capabilities?.previewFinalization).toBe(true);
},
normalFallback: () => {
expect(adapter!.send!.text).toBeTypeOf("function");
expect(sendText).toBeTypeOf("function");
},
discardPending: () => {
expect(adapter!.live?.capabilities?.draftPreview).toBe(true);
expect(adapter.live?.capabilities?.draftPreview).toBe(true);
},
},
});

View File

@@ -467,12 +467,14 @@ describe("discordPlugin outbound", () => {
});
it("does not block Discord monitor startup on the startup probe", async () => {
let resolveProbe!: (value: {
ok: true;
bot: { username: string };
application: { intents: { messageContent: "limited" } };
elapsedMs: number;
}) => void;
let resolveProbe:
| ((value: {
ok: true;
bot: { username: string };
application: { intents: { messageContent: "limited" } };
elapsedMs: number;
}) => void)
| undefined;
probeDiscordMock.mockReturnValue(
new Promise((resolve) => {
resolveProbe = resolve;
@@ -501,8 +503,11 @@ describe("discordPlugin outbound", () => {
includeApplication: true,
}),
);
expect(statusPatches.filter((patch) => "bot" in patch || "application" in patch)).toEqual([]);
expect(statusPatches.some((patch) => "bot" in patch || "application" in patch)).toBe(false);
if (!resolveProbe) {
throw new Error("Expected Discord startup probe resolver to be initialized");
}
resolveProbe({
ok: true,
bot: { username: "AsyncBob" },

View File

@@ -147,6 +147,47 @@ describe("discord config schema", () => {
expect(cfg.voice?.model).toBe("openai/gpt-5.4-mini");
});
it("accepts Discord realtime voice modes", () => {
const cfg = expectValidDiscordConfig({
voice: {
mode: "bidi",
model: "openai-codex/gpt-5.5",
realtime: {
provider: "openai",
model: "gpt-realtime-2",
voice: "cedar",
toolPolicy: "safe-read-only",
consultPolicy: "always",
providers: {
openai: {
apiKey: "sk-test",
voice: "marin",
},
},
},
},
});
expect(cfg.voice?.mode).toBe("bidi");
expect(cfg.voice?.model).toBe("openai-codex/gpt-5.5");
expect(cfg.voice?.realtime?.provider).toBe("openai");
expect(cfg.voice?.realtime?.model).toBe("gpt-realtime-2");
expect(cfg.voice?.realtime?.voice).toBe("cedar");
expect(cfg.voice?.realtime?.toolPolicy).toBe("safe-read-only");
expect(cfg.voice?.realtime?.consultPolicy).toBe("always");
});
it("rejects invalid Discord realtime voice modes", () => {
for (const voice of [
{ mode: "realtime" },
{ mode: "bidi", realtime: { toolPolicy: "dangerous" } },
{ mode: "talk-buffer", realtime: { consultPolicy: "substantive" } },
{ mode: "talk-buffer", realtime: { debounceMs: 10_001 } },
]) {
expectInvalidDiscordConfig({ voice });
}
});
it("accepts Discord voice timing overrides", () => {
const cfg = expectValidDiscordConfig({
voice: {

View File

@@ -179,7 +179,36 @@ export const discordChannelConfigUiHints = {
},
"voice.model": {
label: "Discord Voice Model",
help: "Optional LLM model override for Discord voice channel responses (for example openai/gpt-5.4-mini). Leave unset to inherit the routed agent model.",
help: "Optional LLM model override for Discord voice channel responses and realtime agent consults (for example openai-codex/gpt-5.5). Leave unset to inherit the routed agent model.",
},
"voice.mode": {
label: "Discord Voice Mode",
help: "Conversation mode: stt-tts uses batch speech-to-text plus TTS, talk-buffer uses a realtime voice shell with the OpenClaw agent as the brain, and bidi lets the realtime provider converse directly with the OpenClaw consult tool.",
},
"voice.realtime.provider": {
label: "Discord Realtime Provider",
help: "Realtime voice provider for talk-buffer or bidi Discord voice modes, such as openai.",
},
"voice.realtime.model": {
label: "Discord Realtime Model",
help: "Provider realtime session model, such as gpt-realtime-2. This is separate from voice.model, which remains the OpenClaw agent brain model.",
},
"voice.realtime.voice": {
label: "Discord Realtime Voice",
help: "Provider realtime output voice, such as cedar.",
},
"voice.realtime.toolPolicy": {
label: "Discord Realtime Tool Policy",
help: "Tool policy for the OpenClaw agent consult tool in bidi mode: safe-read-only, owner, or none.",
},
"voice.realtime.consultPolicy": {
label: "Discord Realtime Consult Policy",
help: "Use always to strongly prefer the OpenClaw agent brain for substantive bidi turns.",
},
"voice.realtime.providers": {
label: "Discord Realtime Provider Settings",
help: "Provider-specific realtime voice settings keyed by provider id.",
advanced: true,
},
"voice.autoJoin": {
label: "Discord Voice Auto-Join",

View File

@@ -484,15 +484,15 @@ describe("GatewayPlugin", () => {
expect(gateway.ws).toBeNull();
expect(gateway.firstHeartbeatTimeout).toBeUndefined();
expect(gateway.heartbeatInterval).toBeUndefined();
expect(() => vi.advanceTimersByTime(20)).not.toThrow();
vi.advanceTimersByTime(20);
expect(send).not.toHaveBeenCalled();
expect(() =>
expect(
(
gateway as unknown as {
sendHeartbeat(): void;
}
).sendHeartbeat(),
).not.toThrow();
).toBeUndefined();
});
it("clears stale heartbeat timers before early reconnect exits", () => {

View File

@@ -38,6 +38,17 @@ vi.mock("openclaw/plugin-sdk/conversation-runtime", async () => {
const fakeGuild = (id: string, name: string) => ({ id, name }) as Guild;
function expectNormalizedAllowList(
entries: string[],
prefixes: string[],
): NonNullable<ReturnType<typeof normalizeDiscordAllowList>> {
const allow = normalizeDiscordAllowList(entries, prefixes);
if (allow === null) {
throw new Error("Expected allow list to be normalized");
}
return allow;
}
const makeEntries = (
entries: Record<string, Partial<DiscordGuildEntryResolved>>,
): Record<string, DiscordGuildEntryResolved> => {
@@ -226,14 +237,10 @@ describe("discord allowlist helpers", () => {
});
it("matches ids by default and names only when enabled", () => {
const allow = normalizeDiscordAllowList(
const allow = expectNormalizedAllowList(
["123", "steipete", "Friends of OpenClaw"],
["discord:", "user:", "guild:", "channel:"],
);
expect(allow).not.toBeNull();
if (!allow) {
throw new Error("Expected allow list to be normalized");
}
expect(allowListMatches(allow, { id: "123" })).toBe(true);
expect(allowListMatches(allow, { name: "steipete" })).toBe(false);
expect(allowListMatches(allow, { name: "friends-of-openclaw" })).toBe(false);
@@ -245,11 +252,7 @@ describe("discord allowlist helpers", () => {
});
it("matches pk-prefixed allowlist entries", () => {
const allow = normalizeDiscordAllowList(["pk:member-123"], ["discord:", "user:", "pk:"]);
expect(allow).not.toBeNull();
if (!allow) {
throw new Error("Expected allow list to be normalized");
}
const allow = expectNormalizedAllowList(["pk:member-123"], ["discord:", "user:", "pk:"]);
expect(allowListMatches(allow, { id: "member-123" })).toBe(true);
expect(allowListMatches(allow, { id: "member-999" })).toBe(false);
});
@@ -266,7 +269,11 @@ describe("discord allowlist helpers", () => {
allowFrom: ["*", "user:123"],
sender: { id: "123" },
});
expect(explicitOwner.ownerAllowList).not.toBeNull();
if (explicitOwner.ownerAllowList === null) {
throw new Error("Expected explicit owner allowlist");
}
expect(explicitOwner.ownerAllowList.allowAll).toBe(false);
expect(explicitOwner.ownerAllowList.ids).toEqual(new Set(["123"]));
expect(explicitOwner.ownerAllowed).toBe(true);
});
});

View File

@@ -203,9 +203,12 @@ describe("Discord ACP bind here end-to-end flow", () => {
allowFrom: ["*"],
});
expect(preflight).not.toBeNull();
expect(preflight?.boundSessionKey).toBe(binding.targetSessionKey);
expect(preflight?.route.sessionKey).toBe(binding.targetSessionKey);
expect(preflight?.route.agentId).toBe("codex");
expect(preflight).toMatchObject({
boundSessionKey: binding.targetSessionKey,
route: {
sessionKey: binding.targetSessionKey,
agentId: "codex",
},
});
});
});

View File

@@ -50,7 +50,7 @@ describe("discord wildcard component registration ids", () => {
const components = createWildcardComponents();
const customIds = components.map((component) => component.customId);
expect(customIds.filter((id) => id === "*")).toEqual([]);
expect(customIds.some((id) => id === "*")).toBe(false);
expect(new Set(customIds).size).toBe(customIds.length);
});

View File

@@ -98,10 +98,10 @@ describe("createDiscordGatewaySupervisor", () => {
});
expect(supervisor.drainPending(() => "continue")).toBe("continue");
expect(() => supervisor.attachLifecycle(() => {})).not.toThrow();
expect(() => supervisor.detachLifecycle()).not.toThrow();
expect(() => supervisor.dispose()).not.toThrow();
expect(() => supervisor.dispose()).not.toThrow();
supervisor.attachLifecycle(() => {});
supervisor.detachLifecycle();
supervisor.dispose();
supervisor.dispose();
});
it("keeps suppressing late gateway errors after dispose", () => {
@@ -115,9 +115,7 @@ describe("createDiscordGatewaySupervisor", () => {
supervisor.dispose();
expect(() =>
emitter.emit("error", new Error("Max reconnect attempts (0) reached after close code 1005")),
).not.toThrow();
emitter.emit("error", new Error("Max reconnect attempts (0) reached after close code 1005"));
expect(runtime.error).toHaveBeenCalledWith(
expect.stringContaining("suppressed late gateway reconnect-exhausted error after dispose"),
);

View File

@@ -88,7 +88,20 @@ describe("buildDiscordInboundJob", () => {
},
ownerId: "user-1",
});
expect(() => JSON.stringify(job.payload)).not.toThrow();
expect(JSON.parse(JSON.stringify(job.payload))).toEqual(
expect.objectContaining({
threadChannel: {
id: "thread-1",
name: "codex",
parentId: "forum-1",
parent: {
id: "forum-1",
name: "Forum",
},
ownerId: "user-1",
},
}),
);
});
it("normalizes partial thread channels without reading throwing getters", async () => {
@@ -115,7 +128,13 @@ describe("buildDiscordInboundJob", () => {
parent: undefined,
ownerId: undefined,
});
expect(() => JSON.stringify(job.payload)).not.toThrow();
expect(JSON.parse(JSON.stringify(job.payload))).toEqual(
expect.objectContaining({
threadChannel: {
id: "thread-1",
},
}),
);
});
it("re-materializes the process context with an overridden abort signal", async () => {

View File

@@ -278,10 +278,11 @@ describe("preflightDiscordMessage configured ACP bindings", () => {
}),
);
expect(result).not.toBeNull();
expect(resolveConfiguredBindingRouteMock).toHaveBeenCalledTimes(1);
expect(ensureConfiguredBindingRouteReadyMock).toHaveBeenCalledTimes(1);
expect(result?.boundSessionKey).toBe("agent:codex:acp:binding:discord:default:abc123");
expect(result).toMatchObject({
boundSessionKey: "agent:codex:acp:binding:discord:default:abc123",
});
});
it("accepts plain messages in configured ACP-bound channels without a mention", async () => {
@@ -309,9 +310,10 @@ describe("preflightDiscordMessage configured ACP bindings", () => {
}),
);
expect(result).not.toBeNull();
expect(ensureConfiguredBindingRouteReadyMock).toHaveBeenCalledTimes(1);
expect(result?.boundSessionKey).toBe("agent:codex:acp:binding:discord:default:abc123");
expect(result).toMatchObject({
boundSessionKey: "agent:codex:acp:binding:discord:default:abc123",
});
});
it("hydrates empty guild message payloads from REST before ensuring configured ACP bindings", async () => {

View File

@@ -87,6 +87,17 @@ function createPreflightArgs(params: {
return createDiscordPreflightArgs(params);
}
type DiscordPreflightResult = NonNullable<Awaited<ReturnType<typeof preflightDiscordMessage>>>;
function expectPreflightResult(
result: Awaited<ReturnType<typeof preflightDiscordMessage>>,
): DiscordPreflightResult {
if (result === null) {
throw new Error("Expected Discord preflight result");
}
return result;
}
function createThreadClient(params: { threadId: string; parentId: string }): DiscordClient {
return {
fetchChannel: async (channelId: string) => {
@@ -386,8 +397,8 @@ describe("preflightDiscordMessage", () => {
} as DiscordConfig,
});
expect(result).not.toBeNull();
expect(result?.threadBinding).toMatchObject({
const preflight = expectPreflightResult(result);
expect(preflight.threadBinding).toMatchObject({
conversation: {
channel: "discord",
accountId: "default",
@@ -468,11 +479,11 @@ describe("preflightDiscordMessage", () => {
},
});
expect(result).not.toBeNull();
expect(result?.route.agentId).toBe("newagent");
expect(result?.route.sessionKey).toBe(`agent:newagent:discord:channel:${channelId}`);
expect(result?.boundSessionKey).toBeUndefined();
expect(result?.threadBinding).toBeUndefined();
const preflight = expectPreflightResult(result);
expect(preflight.route.agentId).toBe("newagent");
expect(preflight.route.sessionKey).toBe(`agent:newagent:discord:channel:${channelId}`);
expect(preflight.boundSessionKey).toBeUndefined();
expect(preflight.threadBinding).toBeUndefined();
});
it("preflights direct-message voice notes without mention gating", async () => {
@@ -512,9 +523,9 @@ describe("preflightDiscordMessage", () => {
}),
}),
);
expect(result).not.toBeNull();
expect(result?.isDirectMessage).toBe(true);
expect(result?.preflightAudioTranscript).toBe("hello openclaw from dm audio");
const preflight = expectPreflightResult(result);
expect(preflight.isDirectMessage).toBe(true);
expect(preflight.preflightAudioTranscript).toBe("hello openclaw from dm audio");
});
it("keeps no-guild messages direct when channel lookup is unavailable", async () => {
@@ -542,11 +553,11 @@ describe("preflightDiscordMessage", () => {
} as DiscordConfig,
});
expect(result).not.toBeNull();
expect(result?.channelInfo).toBeNull();
expect(result?.isDirectMessage).toBe(true);
expect(result?.isGroupDm).toBe(false);
expect(result?.route.sessionKey).toBe("agent:main:discord:direct:user-1");
const preflight = expectPreflightResult(result);
expect(preflight.channelInfo).toBeNull();
expect(preflight.isDirectMessage).toBe(true);
expect(preflight.isGroupDm).toBe(false);
expect(preflight.route.sessionKey).toBe("agent:main:discord:direct:user-1");
});
it("falls back to the default discord account for omitted-account dm authorization", async () => {
@@ -628,8 +639,7 @@ describe("preflightDiscordMessage", () => {
registerBindingAdapter: true,
});
expect(result).not.toBeNull();
expect(result?.boundSessionKey).toBe(threadBinding.targetSessionKey);
expect(expectPreflightResult(result).boundSessionKey).toBe(threadBinding.targetSessionKey);
});
it("drops hydrated bound-thread webhook copies after fetching an empty payload", async () => {
@@ -759,9 +769,9 @@ describe("preflightDiscordMessage", () => {
config: expect.objectContaining({ enabled: true }),
}),
);
expect(result).not.toBeNull();
expect(result?.sender.isPluralKit).toBe(true);
expect(result?.canonicalMessageId).toBe("orig-123");
const preflight = expectPreflightResult(result);
expect(preflight.sender.isPluralKit).toBe(true);
expect(preflight.canonicalMessageId).toBe("orig-123");
});
it("skips PluralKit lookup for bound-thread webhook echoes", async () => {
@@ -837,9 +847,9 @@ describe("preflightDiscordMessage", () => {
}),
);
expect(result).not.toBeNull();
expect(result?.boundSessionKey).toBe(threadBinding.targetSessionKey);
expect(result?.shouldRequireMention).toBe(false);
const preflight = expectPreflightResult(result);
expect(preflight.boundSessionKey).toBe(threadBinding.targetSessionKey);
expect(preflight.shouldRequireMention).toBe(false);
});
it("drops bot messages without mention when allowBots=mentions", async () => {
@@ -878,7 +888,7 @@ describe("preflightDiscordMessage", () => {
const result = await runMentionOnlyBotPreflight({ channelId, guildId, message });
expect(result).not.toBeNull();
expect(expectPreflightResult(result)).toEqual(expect.any(Object));
});
it("hydrates mention metadata from REST when bot mention syntax is present but mentions are missing", async () => {
@@ -924,7 +934,7 @@ describe("preflightDiscordMessage", () => {
botUserId: botId,
});
expect(result).not.toBeNull();
expect(expectPreflightResult(result)).toEqual(expect.any(Object));
});
it("still drops bot control commands without a real mention when allowBots=mentions", async () => {
@@ -963,7 +973,7 @@ describe("preflightDiscordMessage", () => {
const result = await runMentionOnlyBotPreflight({ channelId, guildId, message });
expect(result).not.toBeNull();
expect(expectPreflightResult(result)).toEqual(expect.any(Object));
});
it("routes ordinary guild text control commands through authorization instead of dropping them", async () => {
@@ -1005,11 +1015,11 @@ describe("preflightDiscordMessage", () => {
},
});
expect(result).not.toBeNull();
expect(result?.baseText).toBe("/steer keep digging");
expect(result?.commandAuthorized).toBe(true);
expect(result?.shouldRequireMention).toBe(true);
expect(result?.shouldBypassMention).toBe(true);
const preflight = expectPreflightResult(result);
expect(preflight.baseText).toBe("/steer keep digging");
expect(preflight.commandAuthorized).toBe(true);
expect(preflight.shouldRequireMention).toBe(true);
expect(preflight.shouldBypassMention).toBe(true);
});
it("still drops Discord native command echo messages", async () => {
@@ -1138,9 +1148,9 @@ describe("preflightDiscordMessage", () => {
},
});
expect(result).not.toBeNull();
expect(result?.shouldRequireMention).toBe(true);
expect(result?.wasMentioned).toBe(true);
const preflight = expectPreflightResult(result);
expect(preflight.shouldRequireMention).toBe(true);
expect(preflight.wasMentioned).toBe(true);
});
it("accepts allowlisted guild messages when guild object is missing", async () => {
@@ -1173,10 +1183,10 @@ describe("preflightDiscordMessage", () => {
includeGuildObject: false,
});
expect(result).not.toBeNull();
expect(result?.guildInfo?.id).toBe("guild-1");
expect(result?.channelConfig?.allowed).toBe(true);
expect(result?.shouldRequireMention).toBe(false);
const preflight = expectPreflightResult(result);
expect(preflight.guildInfo?.id).toBe("guild-1");
expect(preflight.channelConfig?.allowed).toBe(true);
expect(preflight.shouldRequireMention).toBe(false);
});
it("inherits parent thread allowlist when guild object is missing", async () => {
@@ -1221,11 +1231,11 @@ describe("preflightDiscordMessage", () => {
},
});
expect(result).not.toBeNull();
expect(result?.guildInfo?.id).toBe("guild-1");
expect(result?.threadParentId).toBe(parentId);
expect(result?.channelConfig?.allowed).toBe(true);
expect(result?.shouldRequireMention).toBe(false);
const preflight = expectPreflightResult(result);
expect(preflight.guildInfo?.id).toBe("guild-1");
expect(preflight.threadParentId).toBe(parentId);
expect(preflight.channelConfig?.allowed).toBe(true);
expect(preflight.shouldRequireMention).toBe(false);
});
it("handles partial thread channel owner getters during mention preflight", async () => {
@@ -1284,9 +1294,9 @@ describe("preflightDiscordMessage", () => {
},
});
expect(result).not.toBeNull();
expect(result?.threadParentId).toBe(parentId);
expect(result?.shouldRequireMention).toBe(false);
const preflight = expectPreflightResult(result);
expect(preflight.threadParentId).toBe(parentId);
expect(preflight.shouldRequireMention).toBe(false);
});
it("drops guild messages that mention another user when ignoreOtherMentions=true", async () => {
@@ -1326,8 +1336,7 @@ describe("preflightDiscordMessage", () => {
const result = await runIgnoreOtherMentionsPreflight({ channelId, guildId, message });
expect(result).not.toBeNull();
expect(result?.hasAnyMention).toBe(true);
expect(expectPreflightResult(result).hasAnyMention).toBe(true);
});
it("ignores bot-sent @everyone mentions for detection", async () => {
@@ -1367,8 +1376,7 @@ describe("preflightDiscordMessage", () => {
},
});
expect(result).not.toBeNull();
expect(result?.hasAnyMention).toBe(false);
expect(expectPreflightResult(result).hasAnyMention).toBe(false);
});
it("does not treat bot-sent @everyone as wasMentioned", async () => {
@@ -1408,8 +1416,7 @@ describe("preflightDiscordMessage", () => {
},
});
expect(result).not.toBeNull();
expect(result?.wasMentioned).toBe(false);
expect(expectPreflightResult(result).wasMentioned).toBe(false);
});
it("uses attachment content_type for guild audio preflight mention detection", async () => {
@@ -1477,9 +1484,9 @@ describe("preflightDiscordMessage", () => {
}),
}),
);
expect(result).not.toBeNull();
expect(result?.wasMentioned).toBe(true);
expect(result?.preflightAudioTranscript).toBe("hey openclaw");
const preflight = expectPreflightResult(result);
expect(preflight.wasMentioned).toBe(true);
expect(preflight.preflightAudioTranscript).toBe("hey openclaw");
});
it("does not transcribe guild audio from unauthorized members", async () => {
@@ -1622,7 +1629,7 @@ describe("preflightDiscordMessage", () => {
"guild-1": { channels: { [channelId]: { enabled: true, requireMention: true } } },
},
});
expect(result).not.toBeNull();
expect(expectPreflightResult(result)).toEqual(expect.any(Object));
} finally {
routeSpy.mockRestore();
ensureSpy.mockRestore();
@@ -1694,7 +1701,7 @@ describe("shouldIgnoreBoundThreadWebhookMessage", () => {
webhookId: "wh-1",
webhookToken: "tok-1",
});
expect(binding).not.toBeNull();
expect(binding).toEqual(expect.any(Object));
manager.unbindThread({
threadId: "thread-1",

View File

@@ -716,7 +716,7 @@ describe("processDiscordMessage ack reactions", () => {
it("shows stall emojis for long no-progress runs", async () => {
vi.useFakeTimers();
let releaseDispatch!: () => void;
let releaseDispatch: (() => void) | undefined;
const dispatchGate = new Promise<void>((resolve) => {
releaseDispatch = () => resolve();
});
@@ -729,6 +729,9 @@ describe("processDiscordMessage ack reactions", () => {
const runPromise = runProcessDiscordMessage(ctx);
await vi.advanceTimersByTimeAsync(30_001);
if (!releaseDispatch) {
throw new Error("Expected Discord dispatch release callback to be initialized");
}
releaseDispatch();
await vi.runAllTimersAsync();
@@ -1680,7 +1683,8 @@ describe("processDiscordMessage draft streaming", () => {
await runProcessDiscordMessage(ctx);
expect(draftStream.update).toHaveBeenCalledWith("🧩 First\n🧩 Second\n🧩 Third");
expect(draftStream.update).toHaveBeenNthCalledWith(1, "Clawing...\n🧩 First\n🧩 Second");
expect(draftStream.update).toHaveBeenNthCalledWith(2, "🧩 First\n🧩 Second\n🧩 Third");
});
it("skips empty apply_patch starts and renders the patch summary", async () => {
@@ -1725,8 +1729,8 @@ describe("processDiscordMessage draft streaming", () => {
kind: "analysis",
title: "Reasoning",
});
await params?.replyOptions?.onReasoningStream?.({ text: "Reading " });
await params?.replyOptions?.onReasoningStream?.({ text: "the event projector" });
await params?.replyOptions?.onReasoningStream?.({ text: "Reading" });
await params?.replyOptions?.onReasoningStream?.({ text: "Reading the event projector" });
return createNoQueuedDispatchResult();
});
@@ -1744,7 +1748,7 @@ describe("processDiscordMessage draft streaming", () => {
await runProcessDiscordMessage(ctx);
expect(draftStream.update).toHaveBeenCalledWith(
"Clawing...\n🛠 Exec\n• Reading the event projector",
"Clawing...\n🛠 Exec\n• _Reading the event projector_",
);
expect(draftStream.update).not.toHaveBeenCalledWith(expect.stringContaining("Reasoning"));
});
@@ -1754,9 +1758,9 @@ describe("processDiscordMessage draft streaming", () => {
dispatchInboundMessage.mockImplementationOnce(async (params?: DispatchInboundParams) => {
await params?.replyOptions?.onToolStart?.({ name: "exec", phase: "start" });
await params?.replyOptions?.onReasoningStream?.({ text: "Reasoning:\n_Checking files_" });
await params?.replyOptions?.onReasoningStream?.({ text: "Checking files" });
await params?.replyOptions?.onReasoningStream?.({
text: "Reasoning:\n_Checking files and tests_",
text: "Checking files and tests",
});
return createNoQueuedDispatchResult();
});

View File

@@ -1,4 +1,8 @@
import { resolveAckReaction, resolveHumanDelayConfig } from "openclaw/plugin-sdk/agent-runtime";
import {
formatReasoningMessage,
resolveAckReaction,
resolveHumanDelayConfig,
} from "openclaw/plugin-sdk/agent-runtime";
import {
createStatusReactionController,
DEFAULT_TIMING,
@@ -665,7 +669,10 @@ export async function processDiscordMessage(
draftPreview.suppressDefaultToolProgressMessages ? true : undefined,
onReasoningStream: async (payload) => {
await statusReactions.setThinking();
await draftPreview.pushReasoningProgress(payload?.text);
const formattedText = payload?.text
? formatReasoningMessage(payload.text)
: undefined;
await draftPreview.pushReasoningProgress(formattedText);
},
onToolStart: async (payload) => {
if (isProcessAborted(abortSignal)) {

View File

@@ -440,7 +440,7 @@ describe("createDiscordMessageHandler queue behavior", () => {
await flushQueueWork();
expect(processDiscordMessageMock).toHaveBeenCalledTimes(1);
expect(capturedAbortSignals[0]?.aborted).not.toBe(true);
expect(capturedAbortSignals).toEqual([undefined]);
expect(params.runtime.error).not.toHaveBeenCalledWith(expect.stringContaining("timed out"));
firstRun.resolve();
@@ -448,7 +448,7 @@ describe("createDiscordMessageHandler queue behavior", () => {
await flushQueueWork();
expect(processDiscordMessageMock).toHaveBeenCalledTimes(2);
expect(capturedAbortSignals[1]?.aborted).not.toBe(true);
expect(capturedAbortSignals).toEqual([undefined, undefined]);
secondRun.resolve();
await secondRun.promise;

View File

@@ -392,9 +392,8 @@ describe("Discord model picker rendering", () => {
return parsed?.action === "provider";
});
expect(providerButtons).toHaveLength(Object.keys(entries).length);
expect(allButtons.some((component) => (component.custom_id ?? "").includes(";a=nav;"))).toBe(
false,
);
const customIds = allButtons.map((component) => component.custom_id ?? "");
expect(customIds).not.toEqual(expect.arrayContaining([expect.stringContaining(";a=nav;")]));
});
it("does not render navigation buttons even when provider count exceeds one page", () => {
@@ -419,9 +418,8 @@ describe("Discord model picker rendering", () => {
expect(rows.length).toBeGreaterThan(0);
const allButtons = rows.flatMap((row) => row.components ?? []);
expect(allButtons.some((component) => (component.custom_id ?? "").includes(";a=nav;"))).toBe(
false,
);
const customIds = allButtons.map((component) => component.custom_id ?? "");
expect(customIds).not.toEqual(expect.arrayContaining([expect.stringContaining(";a=nav;")]));
});
it("supports classic fallback rendering with content + action rows", () => {
@@ -497,7 +495,12 @@ describe("Discord model picker rendering", () => {
throw new Error("models view did not render a provider select");
}
expect(providerSelect.options?.length).toBe(2);
expect(providerSelect.options?.find((option) => option.value === "openai")?.default).toBe(true);
expect(providerSelect.options).toContainEqual(
expect.objectContaining({
value: "openai",
default: true,
}),
);
const parsedProviderState = parseDiscordModelPickerCustomId(providerSelect.custom_id ?? "");
expect(parsedProviderState?.action).toBe("provider");
@@ -508,7 +511,12 @@ describe("Discord model picker rendering", () => {
throw new Error("models view did not render a model select");
}
expect(modelSelect.options?.length).toBe(3);
expect(modelSelect.options?.find((option) => option.value === "o3")?.default).toBe(true);
expect(modelSelect.options).toContainEqual(
expect.objectContaining({
value: "o3",
default: true,
}),
);
const parsedModelSelectState = parseDiscordModelPickerCustomId(modelSelect.custom_id ?? "");
expect(parsedModelSelectState?.action).toBe("model");
@@ -579,7 +587,12 @@ describe("Discord model picker rendering", () => {
expect(runtimeSelect.options?.find((option) => option.value === "pi")?.label).toBe(
"OpenClaw Pi Default",
);
expect(runtimeSelect.options?.find((option) => option.value === "codex")?.default).toBe(true);
expect(runtimeSelect.options).toContainEqual(
expect.objectContaining({
value: "codex",
default: true,
}),
);
const submitButton = rows[3]?.components?.at(-1);
const submitState = requireValue(

View File

@@ -392,7 +392,11 @@ describe("discord component interactions", () => {
await button.run(secondInteraction, { cid: "btn_1" } as ComponentData);
expect(dispatchReplyMock).toHaveBeenCalledTimes(2);
expect(resolveDiscordComponentEntry({ id: "btn_1", consume: false })).not.toBeNull();
const entry = resolveDiscordComponentEntry({ id: "btn_1", consume: false });
if (!entry) {
throw new Error("expected reusable Discord component entry");
}
expect(entry.id).toBe("btn_1");
});
it("blocks buttons when allowedUsers does not match", async () => {
@@ -411,7 +415,11 @@ describe("discord component interactions", () => {
ephemeral: true,
});
expect(dispatchReplyMock).not.toHaveBeenCalled();
expect(resolveDiscordComponentEntry({ id: "btn_1", consume: false })).not.toBeNull();
const entry = resolveDiscordComponentEntry({ id: "btn_1", consume: false });
if (!entry) {
throw new Error("expected unauthorized Discord component entry to remain active");
}
expect(entry.id).toBe("btn_1");
});
it("blocks buttons from guilds removed from the allowlist", async () => {
@@ -590,7 +598,11 @@ describe("discord component interactions", () => {
const { acknowledge } = await runModalSubmission({ reusable: true });
expect(acknowledge).toHaveBeenCalledTimes(1);
expect(resolveDiscordModalEntry({ id: "mdl_1", consume: false })).not.toBeNull();
const entry = resolveDiscordModalEntry({ id: "mdl_1", consume: false });
if (!entry) {
throw new Error("expected reusable Discord modal entry");
}
expect(entry.id).toBe("mdl_1");
});
it("passes false auth to plugin Discord interactions for non-allowlisted guild users", async () => {

View File

@@ -245,20 +245,23 @@ describe("resolveDiscordPresenceUpdate", () => {
it("returns status-only presence when activity is omitted", () => {
const presence = resolveDiscordPresenceUpdate({ status: "dnd" });
expect(presence).not.toBeNull();
expect(presence?.status).toBe("dnd");
expect(presence?.activities).toEqual([]);
expect(presence).toMatchObject({
status: "dnd",
activities: [],
});
});
it("defaults to custom activity type when activity is set without type", () => {
const presence = resolveDiscordPresenceUpdate({ activity: "Focus time" });
expect(presence).not.toBeNull();
expect(presence?.status).toBe("online");
expect(presence?.activities).toHaveLength(1);
expect(presence?.activities[0]).toMatchObject({
type: 4,
name: "Custom Status",
state: "Focus time",
expect(presence).toMatchObject({
status: "online",
activities: [
expect.objectContaining({
type: 4,
name: "Custom Status",
state: "Focus time",
}),
],
});
});
@@ -268,12 +271,14 @@ describe("resolveDiscordPresenceUpdate", () => {
activityType: 1,
activityUrl: "https://twitch.tv/openclaw",
});
expect(presence).not.toBeNull();
expect(presence?.activities).toHaveLength(1);
expect(presence?.activities[0]).toMatchObject({
type: 1,
name: "Live",
url: "https://twitch.tv/openclaw",
expect(presence).toMatchObject({
activities: [
expect.objectContaining({
type: 1,
name: "Live",
url: "https://twitch.tv/openclaw",
}),
],
});
});
});
@@ -331,17 +336,16 @@ describe("resolveDiscordAutoThreadContext", () => {
continue;
}
expect(context, testCase.name).not.toBeNull();
expect(context?.To, testCase.name).toBe("channel:thread");
expect(context?.From, testCase.name).toBe("discord:channel:thread");
expect(context?.OriginatingTo, testCase.name).toBe("channel:thread");
expect(context?.SessionKey, testCase.name).toBe(
buildAgentSessionKey({
expect(context, testCase.name).toMatchObject({
To: "channel:thread",
From: "discord:channel:thread",
OriginatingTo: "channel:thread",
SessionKey: buildAgentSessionKey({
agentId: "agent",
channel: "discord",
peer: { kind: "channel", id: "thread" },
}),
);
});
expect(context?.ParentSessionKey, testCase.name).toBe(testCase.expectedParentSessionKey);
expect(context?.ModelParentSessionKey, testCase.name).toBe(
testCase.expectedModelParentSessionKey,

View File

@@ -1,44 +1,61 @@
import { describe, expect, it } from "vitest";
import { resolveDiscordPresenceUpdate } from "./presence.js";
type DiscordPresenceUpdate = NonNullable<ReturnType<typeof resolveDiscordPresenceUpdate>>;
function expectPresenceUpdate(
result: ReturnType<typeof resolveDiscordPresenceUpdate>,
): DiscordPresenceUpdate {
expect(result).toEqual(expect.objectContaining({ activities: expect.any(Array) }));
if (result === null) {
throw new Error("Expected Discord presence update");
}
return result;
}
describe("resolveDiscordPresenceUpdate", () => {
it("returns online presence when no config is provided", () => {
const result = resolveDiscordPresenceUpdate({});
expect(result).not.toBeNull();
expect(result!.status).toBe("online");
expect(result!.activities).toEqual([]);
const result = expectPresenceUpdate(resolveDiscordPresenceUpdate({}));
expect(result.status).toBe("online");
expect(result.activities).toEqual([]);
});
it("uses configured status", () => {
const result = resolveDiscordPresenceUpdate({ status: "dnd" });
expect(result!.status).toBe("dnd");
const result = expectPresenceUpdate(resolveDiscordPresenceUpdate({ status: "dnd" }));
expect(result.status).toBe("dnd");
});
it("includes activity when configured", () => {
const result = resolveDiscordPresenceUpdate({ activity: "Helping humans" });
expect(result!.status).toBe("online");
expect(result!.activities).toHaveLength(1);
expect(result!.activities[0].state).toBe("Helping humans");
const result = expectPresenceUpdate(
resolveDiscordPresenceUpdate({ activity: "Helping humans" }),
);
expect(result.status).toBe("online");
expect(result.activities).toHaveLength(1);
expect(result.activities[0].state).toBe("Helping humans");
});
it("uses custom activity type by default", () => {
const result = resolveDiscordPresenceUpdate({ activity: "test" });
expect(result!.activities[0].type).toBe(4);
expect(result!.activities[0].name).toBe("Custom Status");
const result = expectPresenceUpdate(resolveDiscordPresenceUpdate({ activity: "test" }));
expect(result.activities[0].type).toBe(4);
expect(result.activities[0].name).toBe("Custom Status");
});
it("respects explicit activityType", () => {
const result = resolveDiscordPresenceUpdate({ activity: "test", activityType: 3 });
expect(result!.activities[0].type).toBe(3);
expect(result!.activities[0].name).toBe("test");
const result = expectPresenceUpdate(
resolveDiscordPresenceUpdate({ activity: "test", activityType: 3 }),
);
expect(result.activities[0].type).toBe(3);
expect(result.activities[0].name).toBe("test");
});
it("sets streaming URL for type 1", () => {
const result = resolveDiscordPresenceUpdate({
activity: "Live",
activityType: 1,
activityUrl: "https://twitch.tv/test",
});
expect(result!.activities[0].url).toBe("https://twitch.tv/test");
const result = expectPresenceUpdate(
resolveDiscordPresenceUpdate({
activity: "Live",
activityType: 1,
activityUrl: "https://twitch.tv/test",
}),
);
expect(result.activities[0].url).toBe("https://twitch.tv/test");
});
});

View File

@@ -327,9 +327,9 @@ describe("monitorDiscordProvider", () => {
expect(monitorLifecycleMock).not.toHaveBeenCalled();
expect(disconnect).toHaveBeenCalledTimes(1);
expect(() =>
expect(
emitter.emit("error", new Error("Max reconnect attempts (0) reached after code 1005")),
).not.toThrow();
).toBe(true);
expect(runtime.error).toHaveBeenCalledWith(
expect.stringContaining("suppressed late gateway reconnect-exhausted error after dispose"),
);
@@ -1105,26 +1105,26 @@ describe("monitorDiscordProvider", () => {
});
await vi.waitFor(() =>
expect(
vi
.mocked(runtime.log)
.mock.calls.some((call) => String(call[0]).includes("deploy-commands:done")),
).toBe(true),
expect(vi.mocked(runtime.log).mock.calls.map((call) => String(call[0]))).toEqual(
expect.arrayContaining([expect.stringContaining("deploy-commands:done")]),
),
);
const messages = vi.mocked(runtime.log).mock.calls.map((call) => String(call[0]));
expect(messages.some((msg) => msg.includes("fetch-application-id:start"))).toBe(true);
expect(messages.some((msg) => msg.includes("fetch-application-id:done"))).toBe(true);
expect(messages.some((msg) => msg.includes("deploy-commands:schedule"))).toBe(true);
expect(messages.some((msg) => msg.includes("deploy-commands:scheduled"))).toBe(true);
expect(messages.some((msg) => msg.includes("deploy-commands:done"))).toBe(true);
expect(messages.some((msg) => msg.includes("fetch-bot-identity:start"))).toBe(true);
expect(messages.some((msg) => msg.includes("fetch-bot-identity:done"))).toBe(true);
expect(
messages.some(
(msg) => msg.includes("gateway-debug") && msg.includes("Gateway websocket opened"),
),
).toBe(true);
expect(messages).toEqual(
expect.arrayContaining([
expect.stringContaining("fetch-application-id:start"),
expect.stringContaining("fetch-application-id:done"),
expect.stringContaining("deploy-commands:schedule"),
expect.stringContaining("deploy-commands:scheduled"),
expect.stringContaining("deploy-commands:done"),
expect.stringContaining("fetch-bot-identity:start"),
expect.stringContaining("fetch-bot-identity:done"),
]),
);
expect(messages).toEqual(
expect.arrayContaining([expect.stringMatching(/gateway-debug.*Gateway websocket opened/)]),
);
});
it("keeps Discord startup chatter quiet by default", async () => {
@@ -1136,6 +1136,8 @@ describe("monitorDiscordProvider", () => {
});
const messages = vi.mocked(runtime.log).mock.calls.map((call) => String(call[0]));
expect(messages.some((msg) => msg.includes("discord startup ["))).toBe(false);
expect(messages).not.toEqual(
expect.arrayContaining([expect.stringContaining("discord startup [")]),
);
});
});

View File

@@ -288,7 +288,7 @@ describe("thread binding lifecycle", () => {
webhookToken: "tok-1",
introText: "intro",
});
expect(binding).not.toBeNull();
expect(binding).toEqual(expect.any(Object));
hoisted.sendMessageDiscord.mockClear();
hoisted.sendWebhookMessageDiscord.mockClear();
@@ -327,7 +327,7 @@ describe("thread binding lifecycle", () => {
webhookId: "wh-1",
webhookToken: "tok-1",
});
expect(binding).not.toBeNull();
expect(binding).toEqual(expect.any(Object));
hoisted.sendMessageDiscord.mockClear();
await vi.advanceTimersByTimeAsync(120_000);
@@ -656,7 +656,7 @@ describe("thread binding lifecycle", () => {
vi.setSystemTime(new Date("2026-02-20T00:00:30.000Z"));
const touched = manager.touchThread({ threadId: "thread-1", persist: false });
expect(touched).not.toBeNull();
expect(touched).toEqual(expect.any(Object));
const record = requireBinding(manager, "thread-1");
expect(record.lastActivityAt).toBe(new Date("2026-02-20T00:00:30.000Z").getTime());
@@ -746,7 +746,7 @@ describe("thread binding lifecycle", () => {
targetSessionKey: "agent:main:subagent:child-1",
agentId: "main",
});
expect(first).not.toBeNull();
expect(first).toEqual(expect.any(Object));
expect(hoisted.restPost).toHaveBeenCalledTimes(1);
manager.unbindThread({
@@ -761,9 +761,10 @@ describe("thread binding lifecycle", () => {
targetSessionKey: "agent:main:subagent:child-2",
agentId: "main",
});
expect(second).not.toBeNull();
expect(second?.webhookId).toBe("wh-created");
expect(second?.webhookToken).toBe("tok-created");
expect(second).toMatchObject({
webhookId: "wh-created",
webhookToken: "tok-created",
});
expect(hoisted.restPost).toHaveBeenCalledTimes(1);
});
@@ -796,7 +797,7 @@ describe("thread binding lifecycle", () => {
agentId: "main",
});
expect(childBinding).not.toBeNull();
expect(childBinding).toEqual(expect.any(Object));
expect(hoisted.createThreadDiscord).toHaveBeenCalledTimes(1);
expect(hoisted.createThreadDiscord).toHaveBeenCalledWith(
"parent-1",
@@ -836,8 +837,7 @@ describe("thread binding lifecycle", () => {
agentId: "main",
});
expect(childBinding).not.toBeNull();
expect(childBinding?.channelId).toBe("parent-1");
expect(childBinding).toMatchObject({ channelId: "parent-1" });
expect(hoisted.restGet).toHaveBeenCalledTimes(1);
expect(hoisted.createThreadDiscord).toHaveBeenCalledWith(
"parent-1",
@@ -879,7 +879,7 @@ describe("thread binding lifecycle", () => {
agentId: "main",
});
expect(childBinding).not.toBeNull();
expect(childBinding).toEqual(expect.any(Object));
const firstClientArgs = hoisted.createDiscordRestClient.mock.calls[0]?.[0] as
| { accountId?: string; token?: string }
| undefined;
@@ -929,7 +929,7 @@ describe("thread binding lifecycle", () => {
agentId: "main",
});
expect(bound).not.toBeNull();
expect(bound).toEqual(expect.any(Object));
const usedRefreshedCfg = hoisted.createDiscordRestClient.mock.calls.some((call) => {
if (call?.[1] === refreshedCfg) {
return true;
@@ -986,7 +986,7 @@ describe("thread binding lifecycle", () => {
agentId: "main",
});
expect(bound).not.toBeNull();
expect(bound).toEqual(expect.any(Object));
expect(hoisted.createThreadDiscord).toHaveBeenCalledWith(
"parent-runtime",
expect.objectContaining({ autoArchiveMinutes: 60 }),

View File

@@ -30,7 +30,8 @@ describe("thread binding manager state", () => {
enableSweeper: false,
});
expect(getThreadBindingManager("work")).not.toBeNull();
expect(viaAlternateLoader.getThreadBindingManager("work")).not.toBeNull();
const direct = getThreadBindingManager("work");
expect(direct).toEqual(expect.any(Object));
expect(viaAlternateLoader.getThreadBindingManager("work")).toBe(direct);
});
});

View File

@@ -10,6 +10,16 @@ await installDiscordOutboundModuleSpies(hoisted);
const { discordOutbound } = await import("./outbound-adapter.js");
type DiscordSendPayload = NonNullable<typeof discordOutbound.sendPayload>;
function requireDiscordSendPayload(): DiscordSendPayload {
const sendPayload = discordOutbound.sendPayload;
if (!sendPayload) {
throw new Error("Expected Discord outbound sendPayload");
}
return sendPayload;
}
describe("discordOutbound shared interactive ordering", () => {
beforeEach(() => {
resetDiscordOutboundMocks(hoisted);
@@ -20,7 +30,8 @@ describe("discordOutbound shared interactive ordering", () => {
});
it("keeps shared text blocks in authored order without hoisting fallback text", async () => {
const result = await discordOutbound.sendPayload!({
const sendPayload = requireDiscordSendPayload();
const result = await sendPayload({
cfg: {},
to: "channel:123456",
text: "",

View File

@@ -6,6 +6,16 @@ import {
import { describe, vi } from "vitest";
import { discordOutbound } from "./outbound-adapter.js";
type DiscordSendPayload = NonNullable<typeof discordOutbound.sendPayload>;
function requireDiscordSendPayload(): DiscordSendPayload {
const sendPayload = discordOutbound.sendPayload;
if (!sendPayload) {
throw new Error("Expected Discord outbound sendPayload");
}
return sendPayload;
}
function createDiscordHarness(params: OutboundPayloadHarnessParams) {
const sendDiscord = vi.fn();
primeChannelOutboundSendMock(
@@ -22,8 +32,9 @@ function createDiscordHarness(params: OutboundPayloadHarnessParams) {
sendDiscord,
},
};
const sendPayload = requireDiscordSendPayload();
return {
run: async () => await discordOutbound.sendPayload!(ctx),
run: async () => await sendPayload(ctx),
sendMock: sendDiscord,
to: ctx.to,
};

View File

@@ -51,7 +51,10 @@ describe("createDiscordRequestClient", () => {
client.abortAllRequests();
await expect(request).rejects.toThrow();
expect(abortable.receivedSignal?.aborted).toBe(true);
if (!abortable.receivedSignal) {
throw new Error("Expected proxied fetch abort signal");
}
expect(abortable.receivedSignal.aborted).toBe(true);
});
it("provides the REST client's timeout signal even without a caller signal", async () => {

View File

@@ -13,7 +13,11 @@ describe("resolve-allowlist-common", () => {
];
it("resolves and filters guilds by id or name", () => {
expect(findDiscordGuildByName(guilds, "Main Guild")?.id).toBe("1");
const mainGuild = findDiscordGuildByName(guilds, "Main Guild");
if (!mainGuild) {
throw new Error("expected Main Guild lookup result");
}
expect(mainGuild.id).toBe("1");
expect(filterDiscordGuilds(guilds, { guildId: "2" })).toEqual([guilds[1]]);
expect(filterDiscordGuilds(guilds, { guildName: "main-guild" })).toEqual([guilds[0]]);
});

View File

@@ -82,11 +82,13 @@ describe("discord guild permission authorization", () => {
"user-1",
EMPTY_DISCORD_TEST_OPTS,
);
expect(result).not.toBeNull();
expect((result! & PermissionFlagsBits.ViewChannel) === PermissionFlagsBits.ViewChannel).toBe(
if (result === null) {
throw new Error("Expected guild permissions bitfield");
}
expect((result & PermissionFlagsBits.ViewChannel) === PermissionFlagsBits.ViewChannel).toBe(
true,
);
expect((result! & PermissionFlagsBits.KickMembers) === PermissionFlagsBits.KickMembers).toBe(
expect((result & PermissionFlagsBits.KickMembers) === PermissionFlagsBits.KickMembers).toBe(
true,
);
});

View File

@@ -50,6 +50,18 @@ describe("ensureOggOpus", () => {
runFfprobeMock.mockReset();
runFfmpegMock.mockReset();
});
function expectStagedFfmpegOutput(ffmpegOutputPath: string | undefined, finalPath: string) {
expect(ffmpegOutputPath).toBeTypeOf("string");
if (typeof ffmpegOutputPath !== "string") {
throw new Error("missing ffmpeg output path");
}
expect(ffmpegOutputPath).not.toBe(finalPath);
const stagedBase = path.basename(ffmpegOutputPath);
expect(stagedBase.startsWith(".fs-safe-output-")).toBe(true);
expect(stagedBase.endsWith(`-${path.basename(finalPath)}.part`)).toBe(true);
}
it("rejects URL/protocol input paths", async () => {
await expect(ensureOggOpus("https://example.com/audio.ogg")).rejects.toThrow(
/local file path/i,
@@ -90,8 +102,7 @@ describe("ensureOggOpus", () => {
expect.arrayContaining(["-t", "1200", "-ar", "48000", "/tmp/input.ogg"]),
);
const ffmpegOutputPath = (runFfmpegMock.mock.calls[0]?.[0] as string[] | undefined)?.at(-1);
expect(ffmpegOutputPath).not.toBe(result.path);
expect(path.basename(ffmpegOutputPath ?? "")).toBe(path.basename(result.path));
expectStagedFfmpegOutput(ffmpegOutputPath, result.path);
await expect(fs.readFile(result.path, "utf8")).resolves.toBe("ogg");
});
@@ -113,8 +124,7 @@ describe("ensureOggOpus", () => {
expect.arrayContaining(["-vn", "-sn", "-dn", "/tmp/input.mp3"]),
);
const ffmpegOutputPath = (runFfmpegMock.mock.calls[0]?.[0] as string[] | undefined)?.at(-1);
expect(ffmpegOutputPath).not.toBe(result.path);
expect(path.basename(ffmpegOutputPath ?? "")).toBe(path.basename(result.path));
expectStagedFfmpegOutput(ffmpegOutputPath, result.path);
await expect(fs.readFile(result.path, "utf8")).resolves.toBe("ogg");
});
});

View File

@@ -1,6 +1,7 @@
import fs from "node:fs/promises";
import { createRequire } from "node:module";
import type { Readable } from "node:stream";
import { resamplePcm } from "openclaw/plugin-sdk/realtime-voice";
import { logVerbose, shouldLogVerbose } from "openclaw/plugin-sdk/runtime-env";
import { formatErrorMessage } from "openclaw/plugin-sdk/ssrf-runtime";
import { tempWorkspace, resolvePreferredOpenClawTmpDir } from "openclaw/plugin-sdk/temp-path";
@@ -140,6 +141,67 @@ export async function decodeOpusStream(
return chunks.length > 0 ? Buffer.concat(chunks) : Buffer.alloc(0);
}
export async function decodeOpusStreamChunks(
stream: Readable,
params: {
onChunk: (pcm48kStereo: Buffer) => void;
onVerbose: (message: string) => void;
onWarn: (message: string) => void;
},
): Promise<void> {
const selected = createOpusDecoder({ onWarn: params.onWarn });
if (!selected) {
return;
}
params.onVerbose(`opus decoder: ${selected.name}`);
try {
for await (const chunk of stream) {
if (!chunk || !(chunk instanceof Buffer) || chunk.length === 0) {
continue;
}
const decoded = selected.decoder.decode(chunk);
if (decoded && decoded.length > 0) {
params.onChunk(Buffer.from(decoded));
}
}
} catch (err) {
if (shouldLogVerbose()) {
logVerbose(`discord voice: opus decode failed: ${formatErrorMessage(err)}`);
}
}
}
export function convertDiscordPcm48kStereoToRealtimePcm24kMono(pcm: Buffer): Buffer {
const frameCount = Math.floor(pcm.length / 4);
if (frameCount === 0) {
return Buffer.alloc(0);
}
const mono48k = Buffer.alloc(frameCount * 2);
for (let frame = 0; frame < frameCount; frame += 1) {
const offset = frame * 4;
const left = pcm.readInt16LE(offset);
const right = pcm.readInt16LE(offset + 2);
mono48k.writeInt16LE(Math.round((left + right) / 2), frame * 2);
}
return resamplePcm(mono48k, SAMPLE_RATE, 24_000);
}
export function convertRealtimePcm24kMonoToDiscordPcm48kStereo(pcm: Buffer): Buffer {
const mono48k = resamplePcm(pcm, 24_000, SAMPLE_RATE);
const sampleCount = Math.floor(mono48k.length / 2);
if (sampleCount === 0) {
return Buffer.alloc(0);
}
const stereo = Buffer.alloc(sampleCount * 4);
for (let sampleIndex = 0; sampleIndex < sampleCount; sampleIndex += 1) {
const sample = mono48k.readInt16LE(sampleIndex * 2);
const offset = sampleIndex * 4;
stereo.writeInt16LE(sample, offset);
stereo.writeInt16LE(sample, offset + 2);
}
return stereo;
}
function estimateDurationSeconds(pcm: Buffer): number {
const bytesPerSample = (BIT_DEPTH / 8) * CHANNELS;
if (bytesPerSample <= 0) {

Some files were not shown because too many files have changed in this diff Show More