chore: merge main into Codex dynamic tools

This commit is contained in:
Peter Steinberger
2026-05-01 12:23:17 +01:00
598 changed files with 15255 additions and 6253 deletions

View File

@@ -10,6 +10,9 @@ description: Run Blacksmith Testbox for CI-parity checks, secrets, hosted servic
Use Testbox when you need remote CI parity, injected secrets, hosted services,
or an OS/runtime image that your local machine cannot provide cheaply.
For OpenClaw, Crabbox is a supported alternative when Blacksmith is unavailable
or owned cloud capacity is preferable.
Do not default to Testbox for every local test/build loop. If the repo has
documented local commands for normal iteration, use those first so you keep
warm caches, local build state, and fast feedback.

View File

@@ -0,0 +1,81 @@
---
name: crabbox
description: Use Crabbox for OpenClaw remote Linux validation, warmed reusable boxes, GitHub Actions hydration, sync timing, logs, results, caches, and lease cleanup.
---
# Crabbox
Use Crabbox when OpenClaw needs remote Linux proof on owned capacity, a large
runner class, reusable warm state, or a Blacksmith alternative.
## Before Running
- Run from the repo root. Crabbox sync mirrors the current checkout.
- Prefer local targeted tests for tight edit loops.
- Prefer Blacksmith Testbox when the task explicitly asks for Blacksmith or a
Blacksmith-specific CI comparison.
- Use Crabbox for broad OpenClaw gates when owned AWS/Hetzner capacity is the
right remote lane.
- Check `.crabbox.yaml` for repo defaults before adding flags.
- Install with `brew install openclaw/tap/crabbox`; auth is required before use:
`printf '%s' "$CRABBOX_COORDINATOR_TOKEN" | crabbox login --url https://crabbox-coordinator.steipete.workers.dev --provider aws --token-stdin`.
- On macOS the user config is `~/Library/Application Support/crabbox/config.yaml`;
it must include `broker.url`, `broker.token`, and usually `provider: aws`.
## OpenClaw Flow
Warm a reusable box:
```sh
pnpm crabbox:warmup -- --idle-timeout 90m
```
Hydrate it through the repository workflow:
```sh
pnpm crabbox:hydrate -- --id <cbx_id-or-slug>
```
Run broad proof:
```sh
pnpm crabbox:run -- --id <cbx_id-or-slug> --shell "OPENCLAW_TESTBOX=1 pnpm check:changed"
pnpm crabbox:run -- --id <cbx_id-or-slug> --shell "corepack enable && pnpm install --frozen-lockfile && pnpm test"
```
Stop boxes you created before handoff:
```sh
pnpm crabbox:stop -- <cbx_id-or-slug>
```
## Useful Commands
```sh
crabbox status --id <id-or-slug> --wait
crabbox inspect --id <id-or-slug> --json
crabbox sync-plan
crabbox history --lease <id-or-slug>
crabbox logs <run_id>
crabbox results <run_id>
crabbox cache stats --id <id-or-slug>
crabbox ssh --id <id-or-slug>
```
Use `--debug` on `run` when measuring sync timing.
## Hydration Boundary
`.github/workflows/crabbox-hydrate.yml` is repo-specific on purpose. It owns
OpenClaw checkout, setup-node, pnpm setup, provider env hydration, ready marker,
and keepalive. Crabbox owns runner registration, workflow dispatch, SSH sync,
command execution, logs/results, local lease claims, and idle cleanup.
Do not add OpenClaw-specific setup to Crabbox. Put repo setup in the hydration
workflow and generic lease/sync behavior in Crabbox.
## Cleanup
Crabbox has coordinator-owned idle expiry and local lease claims, so OpenClaw
does not need a custom ledger. Default idle timeout is 30 minutes unless config
or flags set a different value. Still stop boxes you created when done.

View File

@@ -9,6 +9,7 @@ capacity:
- eu-west-1
actions:
workflow: .github/workflows/crabbox-hydrate.yml
job: hydrate
ref: main
runnerLabels:
- crabbox
@@ -26,14 +27,8 @@ sync:
baseRef: main
exclude:
- .artifacts
- .cache
- .codex
- .DS_Store
- .turbo
- coverage
- dist
- dist-runtime
- node_modules
- playwright-report
- test-results
env:

View File

@@ -7,10 +7,19 @@ on:
description: "Crabbox lease ID"
required: true
type: string
ref:
description: "Git ref to hydrate"
required: false
type: string
crabbox_runner_label:
description: "Dynamic Crabbox runner label"
required: true
type: string
crabbox_job:
description: "Hydration job identifier expected by Crabbox"
required: false
default: "hydrate"
type: string
crabbox_keep_alive_minutes:
description: "Minutes to keep the hydrated job alive"
required: false
@@ -30,6 +39,8 @@ jobs:
timeout-minutes: 120
steps:
- uses: actions/checkout@v6
with:
ref: ${{ inputs.ref || github.ref }}
- name: Setup Node environment
uses: ./.github/actions/setup-node-env
@@ -81,12 +92,37 @@ jobs:
shell: bash
run: |
set -euo pipefail
job="${{ inputs.crabbox_job }}"
if [ -z "$job" ]; then job=hydrate; fi
mkdir -p "$HOME/.crabbox/actions"
state="$HOME/.crabbox/actions/${{ inputs.crabbox_id }}.env"
env_file="$HOME/.crabbox/actions/${{ inputs.crabbox_id }}.env.sh"
services_file="$HOME/.crabbox/actions/${{ inputs.crabbox_id }}.services"
write_export() {
key="$1"
value="${!key-}"
if [ -n "$value" ]; then
printf 'export %s=%q\n' "$key" "$value"
fi
}
{
for key in CI GITHUB_ACTIONS GITHUB_WORKSPACE GITHUB_REPOSITORY GITHUB_RUN_ID GITHUB_RUN_NUMBER GITHUB_RUN_ATTEMPT GITHUB_REF GITHUB_REF_NAME GITHUB_SHA GITHUB_EVENT_NAME GITHUB_ACTOR RUNNER_OS RUNNER_ARCH RUNNER_TEMP RUNNER_TOOL_CACHE; do
write_export "$key"
done
} > "${env_file}.tmp"
mv "${env_file}.tmp" "$env_file"
{
echo "# Docker containers visible from the hydrated runner"
docker ps --format '{{.Names}}\t{{.Image}}\t{{.Ports}}' 2>/dev/null || true
} > "${services_file}.tmp"
mv "${services_file}.tmp" "$services_file"
tmp="${state}.tmp"
{
echo "WORKSPACE=${GITHUB_WORKSPACE}"
echo "RUN_ID=${GITHUB_RUN_ID}"
echo "JOB=${job}"
echo "ENV_FILE=${env_file}"
echo "SERVICES_FILE=${services_file}"
echo "READY_AT=$(date -u +%Y-%m-%dT%H:%M:%SZ)"
} > "$tmp"
mv "$tmp" "$state"

View File

@@ -38,6 +38,16 @@ on:
required: false
default: openclaw@latest
type: string
published_upgrade_survivor_baselines:
description: Optional exact baseline list for published-upgrade-survivor lane expansion
required: false
default: ""
type: string
published_upgrade_survivor_scenarios:
description: Optional scenario list for published-upgrade-survivor lane expansion
required: false
default: ""
type: string
package_artifact_name:
description: Existing workflow artifact containing openclaw-current.tgz; blank packs the selected ref
required: false
@@ -123,6 +133,16 @@ on:
required: false
default: openclaw@latest
type: string
published_upgrade_survivor_baselines:
description: Optional exact baseline list for published-upgrade-survivor lane expansion
required: false
default: ""
type: string
published_upgrade_survivor_scenarios:
description: Optional scenario list for published-upgrade-survivor lane expansion
required: false
default: ""
type: string
package_artifact_name:
description: Existing workflow artifact containing openclaw-current.tgz; blank packs the selected ref
required: false
@@ -695,6 +715,8 @@ jobs:
OPENCLAW_DOCKER_E2E_SELECTED_SHA: ${{ needs.validate_selected_ref.outputs.selected_sha }}
OPENCLAW_CURRENT_PACKAGE_TGZ: .artifacts/docker-e2e-package/openclaw-current.tgz
OPENCLAW_UPGRADE_SURVIVOR_BASELINE_SPEC: ${{ inputs.published_upgrade_survivor_baseline }}
OPENCLAW_UPGRADE_SURVIVOR_BASELINE_SPECS: ${{ inputs.published_upgrade_survivor_baselines }}
OPENCLAW_UPGRADE_SURVIVOR_SCENARIOS: ${{ inputs.published_upgrade_survivor_scenarios }}
OPENCLAW_SKIP_DOCKER_BUILD: "1"
INCLUDE_OPENWEBUI: ${{ inputs.include_openwebui }}
DOCKER_E2E_CHUNK: ${{ matrix.chunk_id }}
@@ -929,6 +951,8 @@ jobs:
OPENCLAW_DOCKER_E2E_SELECTED_SHA: ${{ needs.validate_selected_ref.outputs.selected_sha }}
OPENCLAW_CURRENT_PACKAGE_TGZ: .artifacts/docker-e2e-package/openclaw-current.tgz
OPENCLAW_UPGRADE_SURVIVOR_BASELINE_SPEC: ${{ inputs.published_upgrade_survivor_baseline }}
OPENCLAW_UPGRADE_SURVIVOR_BASELINE_SPECS: ${{ inputs.published_upgrade_survivor_baselines }}
OPENCLAW_UPGRADE_SURVIVOR_SCENARIOS: ${{ inputs.published_upgrade_survivor_scenarios }}
OPENCLAW_SKIP_DOCKER_BUILD: "1"
INCLUDE_OPENWEBUI: ${{ inputs.include_openwebui }}
DOCKER_E2E_LANES: ${{ matrix.group.docker_lanes }}

View File

@@ -69,6 +69,16 @@ on:
required: false
default: openclaw@latest
type: string
published_upgrade_survivor_baselines:
description: Optional baseline list for published-upgrade-survivor; use release-history for last 6 plus key legacy releases
required: false
default: ""
type: string
published_upgrade_survivor_scenarios:
description: Optional scenario list for published-upgrade-survivor; use reported-issues for known upgrade failure shapes
required: false
default: ""
type: string
telegram_mode:
description: Optional Telegram QA lane for the resolved package candidate
required: true
@@ -139,6 +149,16 @@ on:
required: false
default: openclaw@latest
type: string
published_upgrade_survivor_baselines:
description: Optional baseline list for published-upgrade-survivor; use release-history for last 6 plus key legacy releases
required: false
default: ""
type: string
published_upgrade_survivor_scenarios:
description: Optional scenario list for published-upgrade-survivor; use reported-issues for known upgrade failure shapes
required: false
default: ""
type: string
telegram_mode:
description: Optional Telegram QA lane for the resolved package candidate
required: false
@@ -275,6 +295,8 @@ jobs:
package_source_sha: ${{ steps.resolve.outputs.package_source_sha }}
package_sha256: ${{ steps.resolve.outputs.sha256 }}
package_version: ${{ steps.resolve.outputs.package_version }}
published_upgrade_survivor_baselines: ${{ steps.upgrade_survivor_baselines.outputs.baselines }}
published_upgrade_survivor_scenarios: ${{ inputs.published_upgrade_survivor_scenarios }}
telegram_enabled: ${{ steps.profile.outputs.telegram_enabled }}
telegram_mode: ${{ steps.profile.outputs.telegram_mode }}
steps:
@@ -405,6 +427,44 @@ jobs:
echo "package_artifact_name=${PACKAGE_ARTIFACT_NAME}"
} >> "$GITHUB_OUTPUT"
- name: Resolve published upgrade survivor baselines
id: upgrade_survivor_baselines
env:
FALLBACK_BASELINE: ${{ inputs.published_upgrade_survivor_baseline }}
REQUESTED_BASELINES: ${{ inputs.published_upgrade_survivor_baselines }}
GH_TOKEN: ${{ github.token }}
shell: bash
run: |
set -euo pipefail
if [[ -z "${REQUESTED_BASELINES// }" ]]; then
echo "baselines=" >> "$GITHUB_OUTPUT"
exit 0
fi
releases_json=""
npm_versions_json=""
if [[ "$REQUESTED_BASELINES" == *"release-history"* ]]; then
releases_json=".artifacts/package-candidate-input/openclaw-releases.json"
npm_versions_json=".artifacts/package-candidate-input/openclaw-npm-versions.json"
mkdir -p "$(dirname "$releases_json")"
gh release list --repo "$GITHUB_REPOSITORY" --limit 100 --json tagName,publishedAt,isPrerelease > "$releases_json"
npm view openclaw versions --json > "$npm_versions_json"
fi
args=(
--requested "$REQUESTED_BASELINES"
--fallback "$FALLBACK_BASELINE"
--github-output "$GITHUB_OUTPUT"
)
if [[ -n "$releases_json" ]]; then
args+=(
--releases-json "$releases_json"
--npm-versions-json "$npm_versions_json"
--history-count 6
--include-version 2026.4.23
--pre-date 2026-03-15T00:00:00Z
)
fi
node scripts/resolve-upgrade-survivor-baselines.mjs "${args[@]}" >/dev/null
- name: Upload package-under-test artifact
uses: actions/upload-artifact@v7
with:
@@ -424,6 +484,8 @@ jobs:
SUITE_PROFILE: ${{ inputs.suite_profile }}
WORKFLOW_REF: ${{ inputs.workflow_ref }}
PUBLISHED_UPGRADE_SURVIVOR_BASELINE: ${{ inputs.published_upgrade_survivor_baseline }}
PUBLISHED_UPGRADE_SURVIVOR_BASELINES: ${{ steps.upgrade_survivor_baselines.outputs.baselines }}
PUBLISHED_UPGRADE_SURVIVOR_SCENARIOS: ${{ inputs.published_upgrade_survivor_scenarios }}
shell: bash
run: |
{
@@ -438,6 +500,8 @@ jobs:
echo "- SHA-256: \`${PACKAGE_SHA256}\`"
echo "- Profile: \`${SUITE_PROFILE}\`"
echo "- Published upgrade survivor baseline: \`${PUBLISHED_UPGRADE_SURVIVOR_BASELINE}\`"
echo "- Published upgrade survivor baselines: \`${PUBLISHED_UPGRADE_SURVIVOR_BASELINES}\`"
echo "- Published upgrade survivor scenarios: \`${PUBLISHED_UPGRADE_SURVIVOR_SCENARIOS}\`"
} >> "$GITHUB_STEP_SUMMARY"
docker_acceptance:
@@ -451,6 +515,8 @@ jobs:
include_openwebui: ${{ needs.resolve_package.outputs.include_openwebui == 'true' }}
docker_lanes: ${{ needs.resolve_package.outputs.docker_lanes }}
published_upgrade_survivor_baseline: ${{ inputs.published_upgrade_survivor_baseline }}
published_upgrade_survivor_baselines: ${{ needs.resolve_package.outputs.published_upgrade_survivor_baselines }}
published_upgrade_survivor_scenarios: ${{ needs.resolve_package.outputs.published_upgrade_survivor_scenarios }}
package_artifact_name: ${{ needs.resolve_package.outputs.package_artifact_name }}
include_live_suites: ${{ needs.resolve_package.outputs.include_live_suites == 'true' }}
live_models_only: false

2
.gitignore vendored
View File

@@ -104,6 +104,8 @@ USER.md
.agents/skills/*
!.agents/skills/blacksmith-testbox/
!.agents/skills/blacksmith-testbox/**
!.agents/skills/crabbox/
!.agents/skills/crabbox/**
!.agents/skills/gitcrawl/
!.agents/skills/gitcrawl/**
!.agents/skills/openclaw-ghsa-maintainer/

View File

@@ -6,6 +6,11 @@ Docs: https://docs.openclaw.ai
### Changes
- Agents/workspace: add `agents.defaults.skipOptionalBootstrapFiles` for skipping selected optional workspace files during bootstrap without disabling required workspace setup. (#62110) Thanks @mainstay22.
- Plugins/CLI: add first-class `git:` plugin installs with ref checkout, commit metadata, normal scanner/staging, and `plugins update` support for recorded git sources. Thanks @badlogic.
- Voice Call/Google Meet: add Twilio Meet join phase logs around pre-connect DTMF, realtime stream setup, and initial greeting handoff for easier live-call debugging. Thanks @donkeykong91 and @PfanP.
- macOS app: move recent session context rows into a Context submenu while keeping usage and cost details root-level, so the menu bar companion stays compact with many active sessions. Thanks @guti.
- Gateway/SDK: add SDK-facing tools.invoke RPC with shared HTTP policy, typed approval/refusal results, and SDK helper support. Refs #74705. Thanks @BunsDev and @ai-hpc.
- Messages/docs: clarify that `BodyForAgent` is the primary inbound model text while `Body` is the legacy envelope fallback, and add Signal coverage so channel hardening patches target the real prompt path. Refs #66198. Thanks @defonota3box.
- Control UI/Usage: add UTC quarter-hour token buckets for the Usage Mosaic and reuse them for hour filtering, keeping the legacy session-span fallback for older summaries. (#74337) Thanks @konanok.
- BlueBubbles: add opt-in `channels.bluebubbles.replyContextApiFallback` that fetches the original message from the BlueBubbles HTTP API when the in-memory reply-context cache misses (multi-instance deployments sharing one BB account, post-restart, after long-lived TTL/LRU eviction). Off by default; channel-level setting propagates to accounts that omit the flag through `mergeAccountConfig`; routed through the typed `BlueBubblesClient` so every fetch is SSRF-guarded by the same three-mode policy as every other BB client request; reply-id shape is validated and part-index prefixes (`p:0/<guid>`) are stripped before the request; concurrent webhooks for the same `replyToId` coalesce into one fetch and successful responses populate the reply cache for subsequent hits. Also promotes BlueBubbles attachment download failures from verbose to runtime error so silently-dropped inbound images are visible at default log level, and extends `sanitizeForLog` to redact `?password=…`/`?token=…` query params and `Authorization:` headers before they reach the log sink (CWE-532). (#71820) Thanks @coletebou and @zqchris.
@@ -14,11 +19,32 @@ Docs: https://docs.openclaw.ai
### Fixes
- Discord/voice: lengthen the default voice join Ready wait, add configurable `voice.connectTimeoutMs`/`voice.reconnectGraceMs`, and warn before destroying unrecovered disconnected sessions so slow Discord voice handshakes and reconnects no longer fail silently. Fixes #63098; refs #39825 and #65039. Thanks @darealgege, @kzicherman, and @ayochim.
- Gateway/health: refresh cached health RPC snapshots when channel runtime state diverges, so Discord and other channel status reads no longer report stale running or connected values until the cache TTL expires. (#75423) Thanks @clawsweeper.
- Discord/voice: merge configured media-understanding providers such as Deepgram into partial active provider registries, so follow-up voice turns keep transcribing after another media plugin is already active. Fixes #65687. Thanks @OneMintJulep.
- WhatsApp: stage `qrcode` through root mirrored runtime dependencies so packaged QR pairing can render from staged plugin-runtime-deps installs. Fixes #75394. Thanks @FelipeX2001.
- Discord/voice: apply per-channel Discord `systemPrompt` overrides to voice transcript turns by forwarding the trusted channel prompt through the voice agent run. Fixes #47095. Thanks @qearlyao.
- Discord/voice: run voice-channel turns under a voice-output policy that hides the agent `tts` tool and asks for spoken reply text, so `/vc join` sessions synthesize and play agent replies instead of ending with `NO_REPLY`. Fixes #61536. Thanks @aounakram.
- Plugins/runtime-deps: include packaged OpenClaw identity in bundled plugin loader cache keys, so same-path package upgrades stop reusing stale versioned runtime-deps mirrors. Fixes #75045. Thanks @sahilsatralkar.
- Plugin SDK: restore reply-prefix and reply-pipeline helpers on the deprecated root/compat SDK surface so external plugins still using `openclaw/plugin-sdk` do not fail message dispatch after update. Fixes #75171. Thanks @zhangxiliang.
- Plugins/runtime-deps: prune inactive same-package versioned runtime-deps roots after bundled dependency repair, so upgrades do not leave old `openclaw-<version>-<hash>` package caches behind after doctor runs. Thanks @vincentkoc.
- Plugins/runtime-deps: prune legacy version-scoped plugin runtime-deps roots during bundled dependency repair and cover the path in Package Acceptance's upgrade-survivor matrix, so upgrades from 2026.4.x no longer leave stale per-plugin runtime trees after doctor runs. Thanks @vincentkoc.
- Plugins/runtime-deps: keep Gateway startup plugin imports and runtime plugin fallback loads verify-only after startup/config repair planning, so packaged installs no longer spawn package-manager repair from hot paths after readiness. Refs #75283 and #75069. Thanks @brokemac79 and @xiaohuaxi.
- Plugins/runtime-deps: treat package.json runtime-deps manifests as supersets when generated materialization metadata is absent, so bundled plugin activation stops restaging already-installed dependency subsets on every activation. Fixes #75429. (#75431) Thanks @loyur.
- Voice Call/realtime: add default-off fast memory/session context for `openclaw_agent_consult`, giving live calls a bounded answer-or-miss path before the full agent consult. Fixes #71849. Thanks @amzzzzzzz.
- Google Meet: interrupt Realtime provider output when local barge-in clears playback, so command-pair audio stops model speech instead of only restarting Chrome playback. Fixes #73850. (#73834) Thanks @shhtheonlyperson.
- Gateway/config: cap oversized plugin-owned schemas in the full `config.schema` response so large installed plugin sets cannot balloon Gateway RSS or crash schema clients. Thanks @vincentkoc.
- Plugins/update: skip ClawHub and marketplace plugin updates when the bundled version is newer than the recorded installed version, so `openclaw update` no longer overwrites working bundled plugins with older external packages. Fixes #75447. Thanks @amknight.
- Gateway/sessions: use bounded tail reads for sessions-list transcript usage fallbacks and cap bulk title/last-message hydration, keeping large session stores responsive when rows request derived previews. Thanks @vincentkoc.
- Gateway/chat: bound chat-history transcript reads to the requested display window so large session logs no longer OOM the Gateway when clients ask for a small history page. Thanks @vincentkoc.
- Voice Call/Twilio: honor stored pre-connect TwiML before realtime webhook shortcuts and reject DTMF sequences outside conversation mode, so Meet PIN entry cannot be skipped or silently dropped. Thanks @donkeykong91 and @PfanP.
- Docs/sandboxing: clarify that sandbox setup scripts (`sandbox-setup.sh`, `sandbox-common-setup.sh`, `sandbox-browser-setup.sh`) are only available from a source checkout, and add inline `docker build` commands for npm-installed users so sandbox image setup works without cloning the repo. Fixes #75485. Thanks @amknight.
- Google Meet/Voice Call: play Twilio Meet DTMF before opening the realtime media stream and carry the intro as the initial Voice Call message, so the greeting is generated after Meet admits the phone participant instead of racing a live-call TwiML update. Thanks @donkeykong91 and @PfanP.
- Google Meet/Voice Call: make Twilio setup preflight honor explicit `--transport twilio` and fail local/private Voice Call webhook URLs before joins. Thanks @donkeykong91 and @PfanP.
- Google Meet/Voice Call: make Twilio setup preflight honor explicit `--transport twilio` and fail local/private Voice Call webhook URLs, including IPv6 loopback and unique-local forms, before joins. Thanks @donkeykong91 and @PfanP.
- Voice Call/Twilio: retry transient 21220 live-call TwiML updates and catch answered-path initial-greeting failures, so a fast answered callback no longer crashes the Gateway or drops the Twilio greeting/listen transition. (#74606) Thanks @Sivan22.
- CLI/startup: preserve `OPENCLAW_HIDE_BANNER` banner suppression for route-first startup callers that rely on the default process environment while keeping read-only status/channel paths from repairing bundled plugin runtime dependencies. Refs #75183.
- Voice Call/Twilio: register accepted media streams immediately but wait for realtime transcription readiness before speaking the initial greeting, so reconnect grace handling stays live while OpenAI STT startup is no longer starved by TTS. Fixes #75197. (#75257) Thanks @donkeykong91 and @PfanP.
- Voice Call CLI: run gateway-delegated `voicecall continue` through operation-id polling and protocol-shaped errors, so long conversational turns keep their transcript result without blocking a single Gateway RPC. (#75459) Thanks @serrurco and @DougButdorf.
- Voice Call CLI: delegate operational `voicecall` commands to the running Gateway runtime and skip webhook startup during CLI-only plugin loading, preventing webhook port conflicts and `setup --json` hangs. Fixes #72345. Thanks @serrurco and @DougButdorf.
- Agents/pi-embedded-runner: extract the `abortable` provider-call wrapper from `runEmbeddedAttempt` to module scope so its promise handlers no longer close over the run lexical context, releasing transcripts, tool buffers, and subscription callbacks when a provider call hangs past abort. (#74182) Thanks @cjboy007.
- Docker: restore `python3` in the gateway runtime image after the slim-runtime switch. Fixes #75041.
@@ -33,12 +59,19 @@ Docs: https://docs.openclaw.ai
- Security/config-audit: redact CLI argv and execArgv secrets before persisting config audit records, covering write, observe, and recovery paths. Fixes #60826. Thanks @koshaji.
- Gateway/models: keep default and configured model-list views responsive when provider catalog discovery stalls, without hiding real catalog load failures, while `--all` still waits for the exact full catalog. Fixes #75297; refs #74404. Thanks @lisandromachado and @najef1979-code.
- Plugins/runtime-deps: accept already materialized package-level runtime-deps supersets as converged, so later lazy plugin activation no longer prunes and relaunches `pnpm install` after gateway startup pre-staging, reducing event-loop pressure from repeated runtime-deps repair on packaged installs. Fixes #75283; refs #75297 and #72338. Thanks @brokemac79, @lisandromachado, and @midhunmonachan.
- Plugins/runtime-deps: remove OpenClaw-owned legacy runtime-deps symlinks before replacing staged bundled plugin dependencies, so updates can recover from older symlinked installs instead of failing the symlink safety guard. Thanks @goldmar.
- Discord: retry queued REST 429s against learned bucket/global cooldowns and reacquire fresh voice upload URLs after CDN upload rate limits, so outbound sends recover without reusing stale single-use upload URLs. Thanks @discord.
- TTS/providers: keep bundled speech-provider compat fallback available when plugins are globally disabled, so cold gateway and CLI startup can still resolve fallback speech providers instead of leaving explicit TTS provider selection with no registered providers. Refs #75265. Thanks @sliekens.
- Discord: collapse repeated native slash-command deploy rate-limit startup logs into one non-fatal warning while keeping per-request REST timing in verbose output. Thanks @discord.
- Discord: report native slash-command deploy aborts as REST timeouts with method, path, timeout budget, and observed duration, so startup logs explain slow Discord API calls instead of showing a generic aborted operation. Thanks @discord.
- Security/logging: redact payment credential field names such as card number, CVC/CVV, shared payment token, and payment credential across default log and tool-payload redaction patterns so wallet-style MCP tools do not expose raw payment credentials in UI events or transcripts. Thanks @stainlu.
- Providers/OpenAI Codex: preserve existing wrapped Codex streams during OpenAI attribution so PI OAuth bearer injection reaches ChatGPT/Codex Responses, and strip native Codex-only unsupported payload fields without touching custom compatible endpoints. (#75111) Thanks @keshavbotagent.
- Plugins/runtime-deps: materialize newly required bundled plugin packages after local `openclaw onboard` and `openclaw configure` config writes, while keeping remote setup read-only, so first Gateway startup no longer discovers missing channel/provider deps after setup claimed success. Fixes #75309; refs #75069. Thanks @scottgl9 and @xiaohuaxi.
- Plugins/runtime-deps: expire stale legacy install locks whose live PID cannot be tied to the current process incarnation, so Docker PID reuse no longer leaves bundled dependency repair stuck behind old `.openclaw-runtime-deps.lock` directories. Fixes #74948; refs #74950 and #74346. Thanks @dchekmarev.
- Plugins/runtime-deps: recover interrupted bundled runtime-dependency installs whose package sentinels exist but generated materialization is incomplete, forcing npm/pnpm repair in Gateway startup, doctor, and lazy plugin loads instead of leaving channels crash-looping on missing packages. Fixes #75309; refs #75310, #75296, and #75304. Thanks @scottgl9.
- Plugins/runtime-deps: treat no-main and export-map package sentinels without reachable entry files as incomplete, so Gateway startup, doctor, and lazy plugin loads repair interrupted bundled dependency installs instead of accepting package.json-only partial installs. Fixes #75309; refs #75183. Thanks @shakkernerd.
- Plugins/runtime-deps: keep runtime inspection and channel maintenance commands from downloading bundled plugin dependencies, route explicit repairs through `openclaw plugins deps --repair`, and still allow Gateway/DO paths to repair missing deps before import. Refs #75069. Thanks @xiaohuaxi.
- Updates: force non-deferred, no-cooldown update restarts after package-manager updates requested through the live Gateway control plane and fail release validation on post-swap stale chunk import crashes, so Telegram/Discord imports do not stay pointed at removed dist files. Fixes #75206. Thanks @xonaman and @faux123.
- Agents/tool-result guard: use the resolved runtime context token budget for non-context-engine tool-result overflow checks, so long tool-heavy sessions no longer compact early when `contextTokens` is larger than native `contextWindow`. Fixes #74917. Thanks @kAIborg24.
- Gateway/systemd: exit with sysexits 78 for supervised lock and `EADDRINUSE` conflicts so `RestartPreventExitStatus=78` stops `Restart=always` restart loops instead of repeatedly reloading plugins against an occupied port. Fixes #75115. Thanks @yhyatt.
- Agents/runtime: skip blank visible user prompts at the embedded-runner boundary before provider submission while still allowing internal runtime-only turns and media-only prompts, so Telegram/group sessions no longer leak raw empty-input provider errors when replay history exists. Fixes #74137. Thanks @yelog, @Gracker, and @nhaener.
@@ -70,6 +103,8 @@ Docs: https://docs.openclaw.ai
- Plugins/runtime-deps: hash the OS-canonical `packageRoot` via `fs.realpathSync.native` (with `path.resolve` fallback) when computing the bundled runtime-deps stage key, so loader and channel `bundled-root` callers no longer derive divergent stage directories under `~/.openclaw/plugin-runtime-deps/openclaw-<version>-<hash>/` and bundled channels stop failing with `ENOENT` on shared dist chunks under Windows npm symlinks, junctions, or PM2 multi-instance worker layouts. Fixes #74963. (#75048) Thanks @openperf and @vincentkoc.
- fix(logging): add redaction patterns for Tencent Cloud, Alibaba Cloud, HuggingFace and Replicate API keys (#58162). Thanks @gavyngong
- Pairing: surface unexpected allowlist filesystem stat errors instead of treating the allowlist as missing, so permission and I/O failures are visible during pairing authorization checks. (#63324) Thanks @franciscomaestre.
- macOS app: reserve layout space for exec approval command details so the allow dialog no longer overlaps the command, context, and action buttons. (#75470) Thanks @ngutman.
- Agents/failover: carry `sessionId`, `lane`, `provider`, `model`, and `profileId` attribution through `FailoverError` and `describeFailoverError`/`coerceToFailoverError` so structured error logs (e.g. `gateway.err.log` ingestion) can attribute exhausted-fallback wrapper errors to the originating session and last-attempted provider instead of dropping the metadata after the per-profile errors. Fixes #42713. (#73506) Thanks @wenxu007.
## 2026.4.29
@@ -112,6 +147,7 @@ Docs: https://docs.openclaw.ai
### Fixes
- Voice Call: resolve SecretRef-backed Twilio auth tokens and realtime/streaming provider API keys before initializing call providers, so SecretRef-backed voice-call credentials reach runtime as strings. (#73632) Thanks @VACInc.
- Security/outbound: strip re-formed HTML tags during plain-text sanitization so nested tag fragments cannot leave a CodeQL-detected `<script>` sequence behind. Thanks @vincentkoc.
- Security/secrets: compare credential bytes with padded timing-safe buffers instead of hashing candidate passwords before equality checks. Thanks @vincentkoc.
- Security/QQBot: sanitize debug log arguments before writing to `console.*`, so gateway payload fields cannot forge extra log lines when debug logging is enabled. Thanks @vincentkoc.

View File

@@ -0,0 +1,39 @@
import SwiftUI
struct ContextRootMenuLabelView: View {
let subtitle: String
let width: CGFloat
@Environment(\.menuItemHighlighted) private var isHighlighted
private var palette: MenuItemHighlightColors.Palette {
MenuItemHighlightColors.palette(self.isHighlighted)
}
var body: some View {
HStack(alignment: .firstTextBaseline, spacing: 8) {
Text("Context")
.font(.callout.weight(.semibold))
.foregroundStyle(self.palette.primary)
.lineLimit(1)
.layoutPriority(1)
Spacer(minLength: 8)
Text(self.subtitle)
.font(.caption.monospacedDigit())
.foregroundStyle(self.palette.secondary)
.lineLimit(1)
.truncationMode(.tail)
.layoutPriority(2)
Image(systemName: "chevron.right")
.font(.caption.weight(.semibold))
.foregroundStyle(self.palette.secondary)
.padding(.leading, 2)
}
.padding(.vertical, 8)
.padding(.leading, 22)
.padding(.trailing, 14)
.frame(width: max(1, self.width), alignment: .leading)
}
}

View File

@@ -253,12 +253,11 @@ enum ExecApprovalsPromptPresenter {
}
@MainActor
private static func buildAccessoryView(_ request: ExecApprovalPromptRequest) -> NSView {
static func buildAccessoryView(_ request: ExecApprovalPromptRequest) -> NSView {
let stack = NSStackView()
stack.orientation = .vertical
stack.spacing = 8
stack.alignment = .leading
stack.translatesAutoresizingMaskIntoConstraints = false
stack.widthAnchor.constraint(greaterThanOrEqualToConstant: 380).isActive = true
let commandTitle = NSTextField(labelWithString: "Command")
@@ -337,6 +336,10 @@ enum ExecApprovalsPromptPresenter {
footer.font = NSFont.systemFont(ofSize: NSFont.smallSystemFontSize)
stack.addArrangedSubview(footer)
// NSAlert reserves accessory space from the view frame, not from Auto Layout constraints.
// Give the top-level accessory an explicit frame so its subviews do not paint over the
// alert title, message, and buttons while the frame remains zero-sized.
stack.frame = NSRect(origin: .zero, size: stack.fittingSize)
return stack
}

View File

@@ -176,99 +176,31 @@ extension MenuSessionsInjector {
let channelState = ControlChannel.shared.state
var cursor = insertIndex
var headerView: NSView?
if let snapshot = self.cachedSnapshot {
let now = Date()
let mainKey = self.mainSessionKey
let rows = snapshot.rows.filter { row in
if row.key == "main", mainKey != "main" { return false }
if row.key == mainKey { return true }
guard let updatedAt = row.updatedAt else { return false }
return now.timeIntervalSince(updatedAt) <= self.activeWindowSeconds
}.sorted { lhs, rhs in
if lhs.key == mainKey { return true }
if rhs.key == mainKey { return false }
return (lhs.updatedAt ?? .distantPast) > (rhs.updatedAt ?? .distantPast)
}
if !rows.isEmpty {
let previewKeys = rows.prefix(20).map(\.key)
let task = Task {
await SessionMenuPreviewLoader.prewarm(sessionKeys: previewKeys, maxItems: 10)
}
self.previewTasks.append(task)
}
let headerItem = NSMenuItem()
headerItem.tag = self.tag
headerItem.isEnabled = false
let statusText = self
.cachedErrorText ?? (isConnected ? nil : self.controlChannelStatusText(for: channelState))
let hosted = self.makeHostedView(
rootView: AnyView(MenuSessionsHeaderView(
count: rows.count,
statusText: statusText)),
width: width,
highlighted: false)
headerItem.view = hosted
headerView = hosted
menu.insertItem(headerItem, at: cursor)
cursor += 1
if rows.isEmpty {
menu.insertItem(
self.makeMessageItem(text: "No active sessions", symbolName: "minus", width: width),
at: cursor)
cursor += 1
} else {
for row in rows {
let item = NSMenuItem()
item.tag = self.tag
item.isEnabled = true
item.submenu = self.buildSubmenu(for: row, storePath: snapshot.storePath)
item.view = self.makeHostedView(
rootView: AnyView(SessionMenuLabelView(row: row, width: width)),
width: width,
highlighted: true)
menu.insertItem(item, at: cursor)
cursor += 1
}
}
} else {
let headerItem = NSMenuItem()
headerItem.tag = self.tag
headerItem.isEnabled = false
let statusText = isConnected
? (self.cachedErrorText ?? "Loading sessions…")
: self.controlChannelStatusText(for: channelState)
let hosted = self.makeHostedView(
rootView: AnyView(MenuSessionsHeaderView(
count: 0,
statusText: statusText)),
width: width,
highlighted: false)
headerItem.view = hosted
headerView = hosted
menu.insertItem(headerItem, at: cursor)
cursor += 1
if !isConnected {
menu.insertItem(
self.makeMessageItem(
text: "Connect the gateway to see sessions",
symbolName: "bolt.slash",
width: width),
at: cursor)
cursor += 1
}
}
let item = NSMenuItem(title: "Context", action: nil, keyEquivalent: "")
item.tag = self.tag
item.isEnabled = true
item.submenu = self.buildContextSubmenu(
width: width,
isConnected: isConnected,
channelState: channelState)
let hosted = self.makeHostedView(
rootView: AnyView(ContextRootMenuLabelView(
subtitle: self.contextRootSubtitle(
isConnected: isConnected,
channelState: channelState),
width: width)),
width: width,
highlighted: true)
item.view = hosted
menu.insertItem(item, at: cursor)
cursor += 1
cursor = self.insertUsageSection(into: menu, at: cursor, width: width)
cursor = self.insertCostUsageSection(into: menu, at: cursor, width: width)
DispatchQueue.main.async { [weak self, weak headerView] in
guard let self, let headerView else { return }
self.captureMenuWidthIfAvailable(from: headerView)
DispatchQueue.main.async { [weak self, weak hosted] in
guard let self, let hosted else { return }
self.captureMenuWidthIfAvailable(from: hosted)
}
}
@@ -346,6 +278,125 @@ extension MenuSessionsInjector {
_ = cursor
}
private func buildContextSubmenu(
width: CGFloat,
isConnected: Bool,
channelState: ControlChannel.ConnectionState) -> NSMenu
{
let menu = NSMenu()
let width = max(300, width)
var cursor = 0
if let snapshot = self.cachedSnapshot {
let rows = self.activeRows(from: snapshot)
if !rows.isEmpty {
let previewKeys = rows.prefix(20).map(\.key)
let task = Task {
await SessionMenuPreviewLoader.prewarm(sessionKeys: previewKeys, maxItems: 10)
}
self.previewTasks.append(task)
}
let headerItem = NSMenuItem()
headerItem.tag = self.tag
headerItem.isEnabled = false
let statusText = self.cachedErrorText
?? (isConnected ? nil : self.controlChannelStatusText(for: channelState))
headerItem.view = self.makeHostedView(
rootView: AnyView(MenuSessionsHeaderView(
count: rows.count,
statusText: statusText)),
width: width,
highlighted: false)
menu.insertItem(headerItem, at: cursor)
cursor += 1
if rows.isEmpty {
menu.insertItem(
self.makeMessageItem(text: "No active sessions", symbolName: "minus", width: width),
at: cursor)
cursor += 1
} else {
for row in rows {
let item = NSMenuItem()
item.tag = self.tag
item.isEnabled = true
item.representedObject = row.key
item.submenu = self.buildSubmenu(for: row, storePath: snapshot.storePath)
item.view = self.makeHostedView(
rootView: AnyView(SessionMenuLabelView(row: row, width: width)),
width: width,
highlighted: true)
menu.insertItem(item, at: cursor)
cursor += 1
}
}
} else {
let headerItem = NSMenuItem()
headerItem.tag = self.tag
headerItem.isEnabled = false
let statusText = isConnected
? (self.cachedErrorText ?? "Loading sessions…")
: self.controlChannelStatusText(for: channelState)
headerItem.view = self.makeHostedView(
rootView: AnyView(MenuSessionsHeaderView(
count: 0,
statusText: statusText)),
width: width,
highlighted: false)
menu.insertItem(headerItem, at: cursor)
cursor += 1
if !isConnected {
menu.insertItem(
self.makeMessageItem(
text: "Connect the gateway to see sessions",
symbolName: "bolt.slash",
width: width),
at: cursor)
cursor += 1
}
}
_ = cursor
return menu
}
private func contextRootSubtitle(
isConnected: Bool,
channelState: ControlChannel.ConnectionState) -> String
{
if let snapshot = self.cachedSnapshot {
return self.sessionsSubtitle(count: self.activeRows(from: snapshot).count)
}
if isConnected {
return self.cachedErrorText ?? "Loading…"
}
return self.controlChannelStatusText(for: channelState)
}
private func activeRows(from snapshot: SessionStoreSnapshot) -> [SessionRow] {
let now = Date()
let mainKey = self.mainSessionKey
return snapshot.rows.filter { row in
if row.key == "main", mainKey != "main" { return false }
if row.key == mainKey { return true }
guard let updatedAt = row.updatedAt else { return false }
return now.timeIntervalSince(updatedAt) <= self.activeWindowSeconds
}.sorted { lhs, rhs in
if lhs.key == mainKey { return true }
if rhs.key == mainKey { return false }
return (lhs.updatedAt ?? .distantPast) > (rhs.updatedAt ?? .distantPast)
}
}
private func sessionsSubtitle(count: Int) -> String {
if count == 1 { return "1 session · 24h" }
return "\(count) sessions · 24h"
}
private func insertUsageSection(into menu: NSMenu, at cursor: Int, width: CGFloat) -> Int {
let rows = self.usageRows
if rows.isEmpty {

View File

@@ -3830,6 +3830,100 @@ public struct ToolsEffectiveResult: Codable, Sendable {
}
}
public struct ToolsInvokeParams: Codable, Sendable {
public let name: String
public let args: [String: AnyCodable]?
public let sessionkey: String?
public let agentid: String?
public let confirm: Bool?
public let idempotencykey: String?
public init(
name: String,
args: [String: AnyCodable]?,
sessionkey: String?,
agentid: String?,
confirm: Bool?,
idempotencykey: String?)
{
self.name = name
self.args = args
self.sessionkey = sessionkey
self.agentid = agentid
self.confirm = confirm
self.idempotencykey = idempotencykey
}
private enum CodingKeys: String, CodingKey {
case name
case args
case sessionkey = "sessionKey"
case agentid = "agentId"
case confirm
case idempotencykey = "idempotencyKey"
}
}
public struct ToolsInvokeError: Codable, Sendable {
public let code: String
public let message: String
public let details: AnyCodable?
public init(
code: String,
message: String,
details: AnyCodable?)
{
self.code = code
self.message = message
self.details = details
}
private enum CodingKeys: String, CodingKey {
case code
case message
case details
}
}
public struct ToolsInvokeResult: Codable, Sendable {
public let ok: Bool
public let toolname: String
public let output: AnyCodable?
public let requiresapproval: Bool?
public let approvalid: String?
public let source: AnyCodable?
public let error: [String: AnyCodable]?
public init(
ok: Bool,
toolname: String,
output: AnyCodable?,
requiresapproval: Bool?,
approvalid: String?,
source: AnyCodable?,
error: [String: AnyCodable]?)
{
self.ok = ok
self.toolname = toolname
self.output = output
self.requiresapproval = requiresapproval
self.approvalid = approvalid
self.source = source
self.error = error
}
private enum CodingKeys: String, CodingKey {
case ok
case toolname = "toolName"
case output
case requiresapproval = "requiresApproval"
case approvalid = "approvalId"
case source
case error
}
}
public struct SkillsBinsParams: Codable, Sendable {}
public struct SkillsBinsResult: Codable, Sendable {

View File

@@ -0,0 +1,31 @@
import AppKit
import Testing
@testable import OpenClaw
@Suite(.serialized)
@MainActor
struct ExecApprovalPromptLayoutTests {
@Test func `accessory view reserves nonzero alert layout space`() {
let accessory = ExecApprovalsPromptPresenter.buildAccessoryView(
ExecApprovalPromptRequest(
command: "/bin/sh -lc \"hostname; uptime; echo '---'\"",
cwd: "/Users/example/projects/openclaw",
host: "node",
security: "allowlist",
ask: "on-miss",
agentId: "main",
resolvedPath: "/bin/sh",
sessionKey: "session-1"))
#expect(accessory.frame.width >= 380)
#expect(accessory.frame.height >= 160)
let alert = NSAlert()
alert.messageText = "Allow this command?"
alert.informativeText = "Review the command details before allowing."
alert.accessoryView = accessory
#expect(alert.accessoryView?.frame.width == accessory.frame.width)
#expect(alert.accessoryView?.frame.height == accessory.frame.height)
}
}

View File

@@ -35,7 +35,9 @@ struct MenuSessionsInjectorTests {
menu.addItem(NSMenuItem(title: "Send Heartbeats", action: nil, keyEquivalent: ""))
injector.injectForTesting(into: menu)
#expect(menu.items.contains { $0.tag == 9_415_557 })
let contextItem = menu.items.first { $0.tag == 9_415_557 && $0.title == "Context" }
#expect(contextItem != nil)
#expect(contextItem?.submenu != nil)
}
@Test func `injects session rows`() throws {
@@ -114,8 +116,12 @@ struct MenuSessionsInjectorTests {
menu.addItem(NSMenuItem(title: "Settings…", action: nil, keyEquivalent: ""))
injector.injectForTesting(into: menu)
#expect(menu.items.contains { $0.tag == 9_415_557 })
let contextItem = try #require(menu.items.first { $0.tag == 9_415_557 && $0.title == "Context" })
let contextSubmenu = try #require(contextItem.submenu)
#expect(menu.items.filter { $0.tag == 9_415_557 && $0.title == "Context" }.count == 1)
#expect(menu.items.contains { $0.tag == 9_415_557 && $0.isSeparatorItem })
#expect(contextSubmenu.items.compactMap { $0.representedObject as? String }.filter { ["main", "discord:group:alpha"].contains($0) }.count == 2)
#expect(contextSubmenu.items.allSatisfy { $0.title != "Usage cost (30 days)" })
let sendHeartbeatsIndex = try #require(menu.items.firstIndex(where: { $0.title == "Send Heartbeats" }))
let openDashboardIndex = try #require(menu.items.firstIndex(where: { $0.title == "Open Dashboard" }))
let firstInjectedIndex = try #require(menu.items.firstIndex(where: { $0.tag == 9_415_557 }))
@@ -160,6 +166,8 @@ struct MenuSessionsInjectorTests {
injector.injectForTesting(into: menu)
let contextItem = menu.items.first { $0.tag == 9_415_557 && $0.title == "Context" }
#expect(contextItem?.submenu?.items.allSatisfy { $0.title != "Usage cost (30 days)" } == true)
let usageCostItem = menu.items.first { $0.title == "Usage cost (30 days)" }
#expect(usageCostItem != nil)
#expect(usageCostItem?.submenu != nil)

View File

@@ -3830,6 +3830,100 @@ public struct ToolsEffectiveResult: Codable, Sendable {
}
}
public struct ToolsInvokeParams: Codable, Sendable {
public let name: String
public let args: [String: AnyCodable]?
public let sessionkey: String?
public let agentid: String?
public let confirm: Bool?
public let idempotencykey: String?
public init(
name: String,
args: [String: AnyCodable]?,
sessionkey: String?,
agentid: String?,
confirm: Bool?,
idempotencykey: String?)
{
self.name = name
self.args = args
self.sessionkey = sessionkey
self.agentid = agentid
self.confirm = confirm
self.idempotencykey = idempotencykey
}
private enum CodingKeys: String, CodingKey {
case name
case args
case sessionkey = "sessionKey"
case agentid = "agentId"
case confirm
case idempotencykey = "idempotencyKey"
}
}
public struct ToolsInvokeError: Codable, Sendable {
public let code: String
public let message: String
public let details: AnyCodable?
public init(
code: String,
message: String,
details: AnyCodable?)
{
self.code = code
self.message = message
self.details = details
}
private enum CodingKeys: String, CodingKey {
case code
case message
case details
}
}
public struct ToolsInvokeResult: Codable, Sendable {
public let ok: Bool
public let toolname: String
public let output: AnyCodable?
public let requiresapproval: Bool?
public let approvalid: String?
public let source: AnyCodable?
public let error: [String: AnyCodable]?
public init(
ok: Bool,
toolname: String,
output: AnyCodable?,
requiresapproval: Bool?,
approvalid: String?,
source: AnyCodable?,
error: [String: AnyCodable]?)
{
self.ok = ok
self.toolname = toolname
self.output = output
self.requiresapproval = requiresapproval
self.approvalid = approvalid
self.source = source
self.error = error
}
private enum CodingKeys: String, CodingKey {
case ok
case toolname = "toolName"
case output
case requiresapproval = "requiresApproval"
case approvalid = "approvalId"
case source
case error
}
}
public struct SkillsBinsParams: Codable, Sendable {}
public struct SkillsBinsResult: Codable, Sendable {

View File

@@ -1,4 +1,4 @@
94e043a9bbdcbb7196c52ecae5e4d4002f2be4983abbcd241146517db844c3e8 config-baseline.json
0a259216178a582c567d1fa48c5236bff4bbd27c3e6af838ffcd042459ffce3c config-baseline.core.json
da8e055ebba0730498703d209f9e2cfaa1484a83f3240e611dcdd7280e22a525 config-baseline.channel.json
463cb7a495b1463847ca3ba46f172af8d505aa1781a07188f57368ddc8c1bfb1 config-baseline.plugin.json
1deb67d0a40456e77cb67685f6ae2f14a8ddc2c4be488d4b1a1f1127598982dd config-baseline.json
ac7537ed5b5a2d9e7fa50977aa99f5e0babfbe1a93c7c14b93a184b36bb4f539 config-baseline.core.json
f3326cd9490169afefe93625f63699266b75db93855ed439c9692e3c286a990c config-baseline.channel.json
7731a0b93cb335b56fac4c807447ba659fea51ea7a6cd844dc0ef5616669ee75 config-baseline.plugin.json

View File

@@ -1048,6 +1048,8 @@ Auto-join example:
],
daveEncryption: true,
decryptionFailureTolerance: 24,
connectTimeoutMs: 30000,
reconnectGraceMs: 15000,
tts: {
provider: "openai",
openai: { voice: "onyx" },
@@ -1063,11 +1065,14 @@ Notes:
- `voice.tts` overrides `messages.tts` for voice playback only.
- `voice.model` overrides the LLM used for Discord voice channel responses only. Leave it unset to inherit the routed agent model.
- STT uses `tools.media.audio`; `voice.model` does not affect transcription.
- Per-channel Discord `systemPrompt` overrides apply to voice transcript turns for that voice channel.
- Voice transcript turns derive owner status from Discord `allowFrom` (or `dm.allowFrom`); non-owner speakers cannot access owner-only tools (for example `gateway` and `cron`).
- Voice is enabled by default; set `channels.discord.voice.enabled=false` to disable voice runtime and the `GuildVoiceStates` gateway intent.
- `channels.discord.intents.voiceStates` can explicitly override voice-state intent subscription. Leave it unset for the intent to follow `voice.enabled`.
- `voice.daveEncryption` and `voice.decryptionFailureTolerance` pass through to `@discordjs/voice` join options.
- `@discordjs/voice` defaults are `daveEncryption=true` and `decryptionFailureTolerance=24` if unset.
- `voice.connectTimeoutMs` controls the initial `@discordjs/voice` Ready wait for `/vc join` and auto-join attempts. Default: `30000`.
- `voice.reconnectGraceMs` controls how long OpenClaw waits for a disconnected voice session to begin reconnecting before destroying it. Default: `15000`.
- OpenClaw also watches receive decrypt failures and auto-recovers by leaving/rejoining the voice channel after repeated failures in a short window.
- If receive logs repeatedly show `DecryptionFailed(UnencryptedWhenPassthroughDisabled)` after updating, collect a dependency report and logs. The bundled `@discordjs/voice` line includes the upstream padding fix from discord.js PR #11449, which closed discord.js issue #11419.
@@ -1075,7 +1080,7 @@ Voice channel pipeline:
- Discord PCM capture is converted to a WAV temp file.
- `tools.media.audio` handles STT, for example `openai/gpt-4o-mini-transcribe`.
- The transcript is sent through normal Discord ingress and routing.
- The transcript is sent through Discord ingress and routing while the response LLM runs with a voice-output policy that hides the agent `tts` tool and asks for returned text, because Discord voice owns final TTS playback.
- `voice.model`, when set, overrides only the response LLM for this voice-channel turn.
- `voice.tts` is merged over `messages.tts`; the resulting audio is played in the joined channel.

View File

@@ -188,7 +188,7 @@ Keep `workflow_ref` and `package_ref` separate. `workflow_ref` is the trusted wo
The `package` profile uses offline plugin coverage so published-package validation is not gated on live ClawHub availability. The optional Telegram lane reuses the `package-under-test` artifact in `NPM Telegram Beta E2E`, with the published npm spec path kept for standalone dispatches.
Release checks call Package Acceptance with `source=ref`, `package_ref=<release-ref>`, `workflow_ref=<release workflow ref>`, `suite_profile=custom`, `docker_lanes='bundled-channel-deps-compat plugins-offline'`, and `telegram_mode=mock-openai`. Release-path Docker chunks cover the overlapping package/update/plugin lanes; Package Acceptance keeps the artifact-native bundled-channel compat, offline plugin, and Telegram proof against the same resolved package tarball. Cross-OS release checks still cover OS-specific onboarding, installer, and platform behavior; package/update product validation should start with Package Acceptance. The `published-upgrade-survivor` Docker lane validates one published package baseline per run. In Package Acceptance, the resolved `package-under-test` tarball is always the candidate and `published_upgrade_survivor_baseline` selects the published baseline, defaulting to `openclaw@latest`; failed-lane rerun commands preserve that baseline. Local runs can set `OPENCLAW_UPGRADE_SURVIVOR_BASELINE_SPEC` to an exact package such as `openclaw@2026.4.15`. The published lane configures the baseline with a baked `openclaw config set` command recipe, then records recipe steps in `summary.json`. Broader previous-version coverage should shard Package Acceptance across exact `published_upgrade_survivor_baseline` values. The Windows packaged and installer fresh lanes also verify that an installed package can import a browser-control override from a raw absolute Windows path. The OpenAI cross-OS agent-turn smoke defaults to `OPENCLAW_CROSS_OS_OPENAI_MODEL` when set, otherwise `openai/gpt-5.4-mini`, so the install and gateway proof stays fast and deterministic.
Release checks call Package Acceptance with `source=ref`, `package_ref=<release-ref>`, `workflow_ref=<release workflow ref>`, `suite_profile=custom`, `docker_lanes='bundled-channel-deps-compat plugins-offline'`, and `telegram_mode=mock-openai`. Release-path Docker chunks cover the overlapping package/update/plugin lanes; Package Acceptance keeps the artifact-native bundled-channel compat, offline plugin, and Telegram proof against the same resolved package tarball. Cross-OS release checks still cover OS-specific onboarding, installer, and platform behavior; package/update product validation should start with Package Acceptance. The `published-upgrade-survivor` Docker lane validates one published package baseline per run. In Package Acceptance, the resolved `package-under-test` tarball is always the candidate and `published_upgrade_survivor_baseline` selects the fallback published baseline, defaulting to `openclaw@latest`; failed-lane rerun commands preserve that baseline. Set `published_upgrade_survivor_baselines=release-history` to expand the lane across a deduped history matrix: the latest six stable releases, `2026.4.23`, and the latest stable release before `2026-03-15`. Set `published_upgrade_survivor_scenarios=reported-issues` to expand the same baselines across issue-shaped fixtures for Feishu config/runtime-deps, preserved bootstrap/persona files, tilde log paths, and stale versioned runtime-deps roots. Local aggregate runs can pass exact package specs with `OPENCLAW_UPGRADE_SURVIVOR_BASELINE_SPECS`, keep a single lane with `OPENCLAW_UPGRADE_SURVIVOR_BASELINE_SPEC` such as `openclaw@2026.4.15`, or set `OPENCLAW_UPGRADE_SURVIVOR_SCENARIOS` for the scenario matrix. The published lane configures the baseline with a baked `openclaw config set` command recipe, records recipe steps in `summary.json`, and probes `/healthz`, `/readyz`, plus RPC status after Gateway start. The Windows packaged and installer fresh lanes also verify that an installed package can import a browser-control override from a raw absolute Windows path. The OpenAI cross-OS agent-turn smoke defaults to `OPENCLAW_CROSS_OS_OPENAI_MODEL` when set, otherwise `openai/gpt-5.4-mini`, so the install and gateway proof stays fast and deterministic.
### Legacy compatibility windows
@@ -419,6 +419,17 @@ The sanity check fails fast when required root files such as `pnpm-lock.yaml` di
`pnpm testbox:run` also terminates a local Blacksmith CLI invocation that stays in the sync phase for more than five minutes without post-sync output. Set `OPENCLAW_TESTBOX_SYNC_TIMEOUT_MS=0` to disable that guard, or use a larger millisecond value for unusually large local diffs.
Crabbox is the repo-owned second remote-box path for Linux proof when Blacksmith is unavailable or when owned cloud capacity is preferable. Warm a box, hydrate it through the project workflow, then run commands through the Crabbox CLI:
```bash
pnpm crabbox:warmup -- --idle-timeout 90m
pnpm crabbox:hydrate -- --id <cbx_id>
pnpm crabbox:run -- --id <cbx_id> --shell "OPENCLAW_TESTBOX=1 pnpm check:changed"
pnpm crabbox:stop -- <cbx_id>
```
`.crabbox.yaml` owns provider, sync, and GitHub Actions hydration defaults. It excludes local `.git` so the hydrated Actions checkout keeps its own remote Git metadata instead of syncing maintainer-local remotes and object stores, and it excludes local runtime/build artifacts that should never be transferred. `.github/workflows/crabbox-hydrate.yml` owns checkout, Node/pnpm setup, `origin/main` fetch, and the non-secret environment handoff that later `crabbox run --id <cbx_id>` commands source.
## Related
- [Install overview](/install)

View File

@@ -51,6 +51,8 @@ openclaw channels remove --channel telegram --delete
`openclaw channels add --help` shows per-channel flags (token, private key, app token, signal-cli paths, etc).
</Tip>
`channels remove` only operates on installed/configured channel plugins. Use `channels add` first for installable catalog channels.
Common non-interactive add surfaces include:
- bot-token channels: `--token`, `--bot-token`, `--app-token`, `--token-file`
@@ -132,6 +134,7 @@ Notes:
- Use `--kind user|group|auto` to force the target type.
- Resolution prefers active matches when multiple entries share the same name.
- `channels resolve` is read-only. If a selected account is configured via SecretRef but that credential is unavailable in the current command path, the command returns degraded unresolved results with notes instead of aborting the entire run.
- `channels resolve` does not install channel plugins. Use `channels add --channel <name>` before resolving names for an installable catalog channel.
## Related

View File

@@ -52,6 +52,7 @@ Available sections:
Notes:
- Choosing where the Gateway runs always updates `gateway.mode`. You can select "Continue" without other sections if that is all you need.
- After local config writes, configure materializes newly required bundled plugin runtime dependencies. This is a narrow package-manager repair step, not a full `openclaw doctor` run. Remote gateway config does not install local plugin dependencies.
- Channel-oriented services (Slack/Discord/Matrix/Microsoft Teams) prompt for channel/room allowlists during setup. You can enter names or IDs; the wizard resolves names to IDs when possible.
- If you run the daemon install step, token auth requires a token, and `gateway.auth.token` is SecretRef-managed, configure validates the SecretRef but does not persist resolved plaintext token values into supervisor service environment metadata.
- If token auth requires a token and the configured token SecretRef is unresolved, configure blocks daemon install with actionable remediation guidance.

View File

@@ -146,7 +146,7 @@ When you set `--url`, the CLI does not fall back to config or environment creden
openclaw gateway health --url ws://127.0.0.1:18789
```
The HTTP `/healthz` endpoint is a liveness probe: it returns once the server can answer HTTP. The HTTP `/readyz` endpoint is stricter and stays red while startup sidecars, channels, or configured hooks are still settling. Local or authenticated detailed readiness responses include an `eventLoop` diagnostic block with event-loop delay, event-loop utilization, CPU core ratio, and a `degraded` flag.
The HTTP `/healthz` endpoint is a liveness probe: it returns once the server can answer HTTP. The HTTP `/readyz` endpoint is stricter and stays red while startup plugin runtime dependencies, sidecars, channels, or configured hooks are still settling. Local or authenticated detailed readiness responses include an `eventLoop` diagnostic block with event-loop delay, event-loop utilization, CPU core ratio, and a `degraded` flag.
### `gateway usage-cost`

View File

@@ -119,6 +119,8 @@ Gateway token options in non-interactive mode:
- With `--install-daemon`, if token mode requires a token and the configured token SecretRef is unresolved, onboarding fails closed with remediation guidance.
- With `--install-daemon`, if both `gateway.auth.token` and `gateway.auth.password` are configured and `gateway.auth.mode` is unset, onboarding blocks install until mode is set explicitly.
- Local onboarding writes `gateway.mode="local"` into the config. If a later config file is missing `gateway.mode`, treat that as config damage or an incomplete manual edit, not as a valid local-mode shortcut.
- Local onboarding materializes newly required bundled plugin runtime dependencies after writing config, before workspace/bootstrap, daemon install, or health checks continue. This is a narrow package-manager repair step, not a full `openclaw doctor` run.
- Remote onboarding only writes connection info for the remote Gateway and does not install local bundled plugin dependencies.
- `--allow-unconfigured` is a separate gateway runtime escape hatch. It does not mean onboarding may omit `gateway.mode`.
Example:

View File

@@ -33,6 +33,7 @@ openclaw plugins list --verbose
openclaw plugins list --json
openclaw plugins install <path-or-spec>
openclaw plugins inspect <id>
openclaw plugins inspect <id> --runtime
openclaw plugins inspect <id> --json
openclaw plugins inspect --all
openclaw plugins info <id>
@@ -70,6 +71,8 @@ Native OpenClaw plugins must ship `openclaw.plugin.json` with an inline JSON Sch
openclaw plugins install <package> # ClawHub first, then npm
openclaw plugins install clawhub:<package> # ClawHub only
openclaw plugins install npm:<package> # npm only
openclaw plugins install git:github.com/<owner>/<repo> # git repo
openclaw plugins install git:github.com/<owner>/<repo>@<ref>
openclaw plugins install <package> --force # overwrite existing install
openclaw plugins install <package> --pin # pin version
openclaw plugins install <package> --dangerously-force-unsafe-install
@@ -107,7 +110,7 @@ current OpenClaw or a local checkout until a newer npm package is published.
</Accordion>
<Accordion title="--pin scope">
`--pin` applies to npm installs only. It is not supported with `--marketplace`, because marketplace installs persist marketplace source metadata instead of an npm spec.
`--pin` applies to npm installs only. It is not supported with `git:` installs; use an explicit git ref such as `git:github.com/acme/plugin@v1.2.3` when you want a pinned source. It is not supported with `--marketplace`, because marketplace installs persist marketplace source metadata instead of an npm spec.
</Accordion>
<Accordion title="--dangerously-force-unsafe-install">
`--dangerously-force-unsafe-install` is a break-glass option for false positives in the built-in dangerous-code scanner. It allows the install to continue even when the built-in scanner reports `critical` findings, but it does **not** bypass plugin `before_install` hook policy blocks and does **not** bypass scan failures.
@@ -128,6 +131,14 @@ current OpenClaw or a local checkout until a newer npm package is published.
If a bare install spec matches a bundled plugin id (for example `diffs`), OpenClaw installs the bundled plugin directly. To install an npm package with the same name, use an explicit scoped spec (for example `@scope/diffs`).
</Accordion>
<Accordion title="Git repositories">
Use `git:<repo>` to install directly from a git repository. Supported forms include `git:github.com/owner/repo`, `git:owner/repo`, full `https://`, `ssh://`, `git://`, `file://`, and `git@host:owner/repo.git` clone URLs. Add `@<ref>` or `#<ref>` to check out a branch, tag, or commit before install.
Git installs clone into a temporary directory, check out the requested ref when present, then use the normal plugin directory installer. That means manifest validation, dangerous-code scanning, runtime dependency staging, and install records behave like local-path installs. Recorded git installs include the source URL/ref plus the resolved commit so `openclaw plugins update` can re-resolve the source later.
After installing from git, use `openclaw plugins inspect <id> --runtime --json` to verify runtime registrations such as gateway methods and CLI commands. If the plugin registered a CLI root with `api.registerCli`, execute that command directly through the OpenClaw root CLI, for example `openclaw demo-plugin ping`.
</Accordion>
<Accordion title="Archives">
Supported archives: `.zip`, `.tgz`, `.tar.gz`, `.tar`. Native OpenClaw plugin archives must contain a valid `openclaw.plugin.json` at the extracted plugin root; archives that only contain `package.json` are rejected before OpenClaw writes install records.
@@ -234,7 +245,7 @@ directory remains inert so normal packaged installs still use compiled dist.
For runtime hook debugging:
- `openclaw plugins inspect <id> --json` shows registered hooks and diagnostics from a module-loaded inspection pass.
- `openclaw plugins inspect <id> --runtime --json` shows registered hooks and diagnostics from a module-loaded inspection pass. Runtime inspection never downloads missing bundled runtime dependencies; use `openclaw plugins deps --repair` when repair is needed.
- `openclaw gateway status --deep --require-rpc` confirms the reachable Gateway, service/process hints, config path, and RPC health.
- Non-bundled conversation hooks (`llm_input`, `llm_output`, `before_agent_finalize`, `agent_end`) require `plugins.entries.<id>.hooks.allowConversationAccess=true`.
@@ -269,6 +280,8 @@ openclaw plugins deps --json
Use `--repair` when a packaged install reports missing bundled runtime dependencies during Gateway startup or `plugins doctor`. Repair installs only missing enabled bundled-plugin deps with lifecycle scripts disabled. Use `--prune` to remove stale unknown external runtime-dependency roots left behind by older packaged layouts.
For the full plan, staging, and repair lifecycle, see [Plugin dependency resolution](/plugins/dependency-resolution).
### Uninstall
```bash
@@ -319,10 +332,13 @@ Updates apply to tracked plugin installs in the managed plugin index and tracked
```bash
openclaw plugins inspect <id>
openclaw plugins inspect <id> --runtime
openclaw plugins inspect <id> --json
```
Deep introspection for a single plugin. Shows identity, load status, source, registered capabilities, hooks, tools, commands, services, gateway methods, HTTP routes, policy flags, diagnostics, install metadata, bundle capabilities, and any detected MCP or LSP server support.
Inspect shows identity, load status, source, manifest capabilities, policy flags, diagnostics, install metadata, bundle capabilities, and any detected MCP or LSP server support without importing plugin runtime by default. Add `--runtime` to load the plugin module and include registered hooks, tools, commands, services, gateway methods, and HTTP routes. Runtime inspection fails with a repair hint when bundled runtime dependencies are missing; use `openclaw plugins deps --repair` to repair them explicitly.
Plugin-owned CLI commands are installed as root `openclaw` command groups. After `inspect --runtime` shows a command under `cliCommands`, run it as `openclaw <command> ...`; for example a plugin that registers `demo-git` can be verified with `openclaw demo-git ping`.
Each plugin is classified by what it actually registers at runtime:

View File

@@ -82,7 +82,11 @@ install method aligned:
- `beta` → prefers npm dist-tag `beta`, but falls back to `latest` when beta is
missing or older than the current stable release.
The Gateway core auto-updater (when enabled via config) reuses this same update path.
The Gateway core auto-updater (when enabled via config) launches the CLI update path
outside the live Gateway request handler. Control-plane `update.run` package-manager
updates force a non-deferred, no-cooldown update restart after the package swap,
because the old Gateway process may still have in-memory chunks that point at
files removed by the new package.
For package-manager installs, `openclaw update` resolves the target package
version before invoking the package manager. npm global installs use a staged
@@ -151,7 +155,7 @@ If an exact pinned npm plugin update resolves to an artifact whose integrity dif
<Note>
Post-update plugin sync failures fail the update result and stop restart follow-up work. Fix the plugin install or update error, then rerun `openclaw update`.
When the updated Gateway starts, enabled bundled plugin runtime dependencies are staged before plugin activation. Update-triggered restarts drain any active runtime-dependency staging before closing the Gateway, so service-manager restarts do not interrupt an in-flight npm install.
When the updated Gateway starts, enabled bundled plugin runtime dependencies are staged before plugin activation. Package-manager `update.run` restarts bypass the normal idle deferral and restart cooldown after the package tree has been swapped, so the old process cannot keep lazy-loading removed chunks. Service-manager restarts still drain runtime-dependency staging before closing the Gateway.
If pnpm bootstrap still fails, the updater stops early with a package-manager-specific error instead of trying `npm run build` inside the checkout.
</Note>

View File

@@ -25,24 +25,24 @@ resources.
`@openclaw/sdk` ships with:
| Surface | Status | What it does |
| ------------------------- | ------- | ---------------------------------------------------------------------------- |
| `OpenClaw` | Ready | Main client entry point. Owns transport, connection, requests, and events. |
| `GatewayClientTransport` | Ready | WebSocket transport backed by the Gateway client. |
| `oc.agents` | Ready | Lists, creates, updates, deletes, and gets agent handles. |
| `Agent.run()` | Ready | Starts a Gateway `agent` run and returns a `Run`. |
| `oc.runs` | Ready | Creates, gets, waits for, cancels, and streams runs. |
| `Run.events()` | Ready | Streams normalized per-run events with replay for fast runs. |
| `Run.wait()` | Ready | Calls `agent.wait` and returns a stable `RunResult`. |
| `Run.cancel()` | Ready | Calls `sessions.abort` by run id, with session key when available. |
| `oc.sessions` | Ready | Creates, resolves, sends to, patches, compacts, and gets session handles. |
| `Session.send()` | Ready | Calls `sessions.send` and returns a `Run`. |
| `oc.models` | Ready | Calls `models.list` and the current `models.authStatus` status RPC. |
| `oc.tools` | Partial | Lists tool catalog and effective tools; direct tool invocation is not wired. |
| `oc.artifacts` | Ready | Lists, gets, and downloads Gateway transcript artifacts. |
| `oc.approvals` | Ready | Lists and resolves exec approvals through Gateway approval RPCs. |
| `oc.rawEvents()` | Ready | Exposes raw Gateway events for advanced consumers. |
| `normalizeGatewayEvent()` | Ready | Converts raw Gateway events into the stable SDK event shape. |
| Surface | Status | What it does |
| ------------------------- | ------ | -------------------------------------------------------------------------- |
| `OpenClaw` | Ready | Main client entry point. Owns transport, connection, requests, and events. |
| `GatewayClientTransport` | Ready | WebSocket transport backed by the Gateway client. |
| `oc.agents` | Ready | Lists, creates, updates, deletes, and gets agent handles. |
| `Agent.run()` | Ready | Starts a Gateway `agent` run and returns a `Run`. |
| `oc.runs` | Ready | Creates, gets, waits for, cancels, and streams runs. |
| `Run.events()` | Ready | Streams normalized per-run events with replay for fast runs. |
| `Run.wait()` | Ready | Calls `agent.wait` and returns a stable `RunResult`. |
| `Run.cancel()` | Ready | Calls `sessions.abort` by run id, with session key when available. |
| `oc.sessions` | Ready | Creates, resolves, sends to, patches, compacts, and gets session handles. |
| `Session.send()` | Ready | Calls `sessions.send` and returns a `Run`. |
| `oc.models` | Ready | Calls `models.list` and the current `models.authStatus` status RPC. |
| `oc.tools` | Ready | Lists, scopes, and invokes Gateway tools through the policy pipeline. |
| `oc.artifacts` | Ready | Lists, gets, and downloads Gateway transcript artifacts. |
| `oc.approvals` | Ready | Lists and resolves exec approvals through Gateway approval RPCs. |
| `oc.rawEvents()` | Ready | Exposes raw Gateway events for advanced consumers. |
| `normalizeGatewayEvent()` | Ready | Converts raw Gateway events into the stable SDK event shape. |
The SDK also exports the core types used by those surfaces:
`AgentRunParams`, `RunResult`, `RunStatus`, `OpenClawEvent`,
@@ -216,11 +216,19 @@ await oc.models.list();
await oc.models.status({ probe: false }); // calls models.authStatus
```
Tool helpers expose the Gateway catalog and effective tool view:
Tool helpers expose the Gateway catalog, effective tool view, and direct
Gateway tool invocation. `oc.tools.invoke()` returns a typed envelope instead
of throwing for policy or approval refusals.
```typescript
await oc.tools.list();
await oc.tools.effective({ sessionKey: "main" });
await oc.tools.invoke("tool-name", {
args: { input: "value" },
sessionKey: "main",
confirm: false,
idempotencyKey: "tool-call-1",
});
```
Artifact helpers expose the Gateway artifact projection for session, run, or
@@ -256,8 +264,6 @@ await oc.tasks.list();
await oc.tasks.get("task-id");
await oc.tasks.cancel("task-id");
await oc.tools.invoke("tool-name", {});
await oc.environments.list();
await oc.environments.create({});
await oc.environments.status("environment-id");

View File

@@ -1182,6 +1182,7 @@
"tools/plugin",
"plugins/community",
"plugins/bundles",
"plugins/dependency-resolution",
"plugins/codex-harness",
"plugins/codex-computer-use",
"plugins/google-meet",

View File

@@ -67,6 +67,20 @@ Disables automatic creation of workspace bootstrap files (`AGENTS.md`, `SOUL.md`
}
```
### `agents.defaults.skipOptionalBootstrapFiles`
Skips creation of selected optional workspace files while still writing required bootstrap files. Valid values: `SOUL.md`, `USER.md`, `HEARTBEAT.md`, and `IDENTITY.md`.
```json5
{
agents: {
defaults: {
skipOptionalBootstrapFiles: ["SOUL.md", "USER.md"],
},
},
}
```
### `agents.defaults.contextInjection`
Controls when workspace bootstrap files are injected into the system prompt. Default: `"always"`.
@@ -908,13 +922,15 @@ noVNC observer access uses VNC auth by default and OpenClaw emits a short-lived
Browser sandboxing and `sandbox.docker.binds` are Docker-only.
Build images:
Build images (from a source checkout):
```bash
scripts/sandbox-setup.sh # main sandbox image
scripts/sandbox-browser-setup.sh # optional browser image
```
For npm installs without a source checkout, see [Sandboxing § Images and setup](/gateway/sandboxing#images-and-setup) for inline `docker build` commands.
### `agents.list` (per-agent overrides)
Use `agents.list[].tts` to give an agent its own TTS provider, voice, model,

View File

@@ -297,6 +297,8 @@ WhatsApp runs through the gateway's web channel (Baileys Web). It starts automat
],
daveEncryption: true,
decryptionFailureTolerance: 24,
connectTimeoutMs: 30000,
reconnectGraceMs: 15000,
tts: {
provider: "openai",
openai: { voice: "alloy" },
@@ -339,6 +341,8 @@ WhatsApp runs through the gateway's web channel (Baileys Web). It starts automat
- `channels.discord.voice` enables Discord voice channel conversations and optional auto-join + LLM + TTS overrides.
- `channels.discord.voice.model` optionally overrides the LLM model used for Discord voice channel responses.
- `channels.discord.voice.daveEncryption` and `channels.discord.voice.decryptionFailureTolerance` pass through to `@discordjs/voice` DAVE options (`true` and `24` by default).
- `channels.discord.voice.connectTimeoutMs` controls the initial `@discordjs/voice` Ready wait for `/vc join` and auto-join attempts (`30000` by default).
- `channels.discord.voice.reconnectGraceMs` controls how long a disconnected voice session may take to enter reconnect signalling before OpenClaw destroys it (`15000` by default).
- OpenClaw additionally attempts voice receive recovery by leaving/rejoining a voice session after repeated decrypt failures.
- `channels.discord.streaming` is the canonical stream mode key. Legacy `streamMode` and boolean `streaming` values are auto-migrated.
- `channels.discord.autoPresence` maps runtime availability to bot presence (healthy => online, degraded => idle, exhausted => dnd) and allows optional status text overrides.

View File

@@ -333,7 +333,7 @@ cannot roll back unrelated user settings.
}
```
Build the image first: `scripts/sandbox-setup.sh`
Build the image first — from a source checkout run `scripts/sandbox-setup.sh`, or from an npm install see the inline `docker build` command in [Sandboxing § Images and setup](/gateway/sandboxing#images-and-setup).
See [Sandboxing](/gateway/sandboxing) for the full guide and [full reference](/gateway/config-agents#agentsdefaultssandbox) for all options.

View File

@@ -342,7 +342,7 @@ That stages grounded durable candidates into the short-term dreaming store while
<Accordion title="7b. Bundled plugin runtime deps">
Doctor verifies runtime dependencies only for bundled plugins that are active in the current config or enabled by their bundled manifest default, for example `plugins.entries.discord.enabled: true`, legacy `channels.discord.enabled: true`, configured `models.providers.*` / agent model refs, or a default-enabled bundled plugin without provider ownership. If any are missing, doctor reports the packages and installs them in `openclaw doctor --fix` / `openclaw doctor --repair` mode. External plugins still use `openclaw plugins install` / `openclaw plugins update`; doctor does not install dependencies for arbitrary plugin paths.
During doctor repair, bundled runtime-dependency npm installs report spinner progress in TTY sessions and periodic line progress in piped/headless output. The Gateway and local CLI can also repair active bundled plugin runtime dependencies on demand before importing a bundled plugin. These installs are scoped to the plugin runtime install root, run with scripts disabled, do not write a package lock, and are guarded by an install-root lock so concurrent CLI or Gateway starts do not mutate the same `node_modules` tree at the same time.
During doctor repair, bundled runtime-dependency npm installs report spinner progress in TTY sessions and periodic line progress in piped/headless output. Gateway startup and config reload enter plugin-plan mode before importing bundled plugin runtime modules; normal runtime imports are verify-only and do not spawn package-manager repair. These installs are scoped to the plugin runtime install root, run with scripts disabled, do not write a package lock, and are guarded by an install-root lock so concurrent CLI or Gateway starts do not mutate the same `node_modules` tree at the same time. Stale legacy locks from killed Docker/container starts are reclaimed when their owner metadata cannot prove a current process incarnation and the lock files are old.
</Accordion>
<Accordion title="8. Gateway service migrations and cleanup hints">

View File

@@ -378,7 +378,7 @@ enumeration of `src/gateway/server-methods/*.ts`.
- `config.apply` validates + replaces the full config payload.
- `config.schema` returns the live config schema payload used by Control UI and CLI tooling: schema, `uiHints`, version, and generation metadata, including plugin + channel schema metadata when the runtime can load it. The schema includes field `title` / `description` metadata derived from the same labels and help text used by the UI, including nested object, wildcard, array-item, and `anyOf` / `oneOf` / `allOf` composition branches when matching field documentation exists.
- `config.schema.lookup` returns a path-scoped lookup payload for one config path: normalized path, a shallow schema node, matched hint + `hintPath`, and immediate child summaries for UI/CLI drill-down. Lookup schema nodes keep the user-facing docs and common validation fields (`title`, `description`, `type`, `enum`, `const`, `format`, `pattern`, numeric/string/array/object bounds, and flags like `additionalProperties`, `deprecated`, `readOnly`, `writeOnly`). Child summaries expose `key`, normalized `path`, `type`, `required`, `hasChildren`, plus the matched `hint` / `hintPath`.
- `update.run` runs the gateway update flow and schedules a restart only when the update itself succeeded.
- `update.run` runs the gateway update flow and schedules a restart only when the update itself succeeded. Package-manager updates force a non-deferred, no-cooldown update restart after the package swap so the old Gateway process does not keep lazy-loading from a replaced `dist` tree.
- `update.status` returns the latest cached update restart sentinel, including the post-restart running version when available.
- `wizard.start`, `wizard.next`, `wizard.status`, and `wizard.cancel` expose the onboarding wizard over WS RPC.
@@ -443,7 +443,7 @@ enumeration of `src/gateway/server-methods/*.ts`.
<Accordion title="Automation, skills, and tools">
- Automation: `wake` schedules an immediate or next-heartbeat wake text injection; `cron.list`, `cron.status`, `cron.add`, `cron.update`, `cron.remove`, `cron.run`, `cron.runs` manage scheduled work.
- Skills and tools: `commands.list`, `skills.*`, `tools.catalog`, `tools.effective`.
- Skills and tools: `commands.list`, `skills.*`, `tools.catalog`, `tools.effective`, `tools.invoke`.
</Accordion>
</AccordionGroup>
@@ -501,6 +501,15 @@ enumeration of `src/gateway/server-methods/*.ts`.
caller-supplied auth or delivery context.
- The response is session-scoped and reflects what the active conversation can use right now,
including core, plugin, and channel tools.
- Operators may call `tools.invoke` (`operator.write`) to invoke one available tool through the
same gateway policy path as `/tools/invoke`.
- `name` is required. `args`, `sessionKey`, `agentId`, `confirm`, and
`idempotencyKey` are optional.
- If both `sessionKey` and `agentId` are present, the resolved session agent must match
`agentId`.
- The response is an SDK-facing envelope with `ok`, `toolName`, optional `output`, and typed
`error` fields. Approval or policy refusals return `ok:false` in the payload rather than
bypassing the gateway tool policy pipeline.
- Operators may call `skills.status` (`operator.read`) to fetch the visible
skill inventory for an agent.
- `agentId` is optional; omit it to read the default agent workspace.

View File

@@ -363,31 +363,66 @@ Example (read-only source + an extra data directory):
Default Docker image: `openclaw-sandbox:bookworm-slim`
<Note>
**Source checkout vs npm install**
The `scripts/sandbox-setup.sh`, `scripts/sandbox-common-setup.sh`, and `scripts/sandbox-browser-setup.sh` helper scripts are only available when running from a [source checkout](https://github.com/openclaw/openclaw). They are not included in the npm package.
If you installed OpenClaw via `npm install -g openclaw`, use the inline `docker build` commands shown below instead.
</Note>
<Steps>
<Step title="Build the default image">
From a source checkout:
```bash
scripts/sandbox-setup.sh
```
From an npm install (no source checkout needed):
```bash
docker build -t openclaw-sandbox:bookworm-slim - <<'DOCKERFILE'
FROM debian:bookworm-slim
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get install -y --no-install-recommends \
bash ca-certificates curl git jq python3 ripgrep \
&& rm -rf /var/lib/apt/lists/*
RUN useradd --create-home --shell /bin/bash sandbox
USER sandbox
WORKDIR /home/sandbox
CMD ["sleep", "infinity"]
DOCKERFILE
```
The default image does **not** include Node. If a skill needs Node (or other runtimes), either bake a custom image or install via `sandbox.docker.setupCommand` (requires network egress + writable root + root user).
OpenClaw does not silently substitute plain `debian:bookworm-slim` when `openclaw-sandbox:bookworm-slim` is missing. Sandbox runs that target the default image fail fast with a build instruction until you run `scripts/sandbox-setup.sh`, because the bundled image carries `python3` for sandbox write/edit helpers.
OpenClaw does not silently substitute plain `debian:bookworm-slim` when `openclaw-sandbox:bookworm-slim` is missing. Sandbox runs that target the default image fail fast with a build instruction until you build it, because the bundled image carries `python3` for sandbox write/edit helpers.
</Step>
<Step title="Optional: build the common image">
For a more functional sandbox image with common tooling (for example `curl`, `jq`, `nodejs`, `python3`, `git`):
From a source checkout:
```bash
scripts/sandbox-common-setup.sh
```
From an npm install, build the default image first (see above), then build the common image on top using the [`Dockerfile.sandbox-common`](https://github.com/openclaw/openclaw/blob/main/Dockerfile.sandbox-common) from the repository.
Then set `agents.defaults.sandbox.docker.image` to `openclaw-sandbox-common:bookworm-slim`.
</Step>
<Step title="Optional: build the sandbox browser image">
From a source checkout:
```bash
scripts/sandbox-browser-setup.sh
```
From an npm install, build using the [`Dockerfile.sandbox-browser`](https://github.com/openclaw/openclaw/blob/main/Dockerfile.sandbox-browser) from the repository.
</Step>
</Steps>

View File

@@ -600,7 +600,7 @@ These Docker runners split into two buckets:
`OPENCLAW_LIVE_GATEWAY_MODEL_TIMEOUT_MS=90000`. Override those env vars when you
explicitly want the larger exhaustive scan.
- `test:docker:all` builds the live Docker image once via `test:docker:live-build`, packs OpenClaw once as an npm tarball through `scripts/package-openclaw-for-docker.mjs`, then builds/reuses two `scripts/e2e/Dockerfile` images. The bare image is only the Node/Git runner for install/update/plugin-dependency lanes; those lanes mount the prebuilt tarball. The functional image installs the same tarball into `/app` for built-app functionality lanes. Docker lane definitions live in `scripts/lib/docker-e2e-scenarios.mjs`; planner logic lives in `scripts/lib/docker-e2e-plan.mjs`; `scripts/test-docker-all.mjs` executes the selected plan. The aggregate uses a weighted local scheduler: `OPENCLAW_DOCKER_ALL_PARALLELISM` controls process slots, while resource caps keep heavy live, npm-install, and multi-service lanes from all starting at once. If a single lane is heavier than the active caps, the scheduler can still start it when the pool is empty and then keeps it running alone until capacity is available again. Defaults are 10 slots, `OPENCLAW_DOCKER_ALL_LIVE_LIMIT=9`, `OPENCLAW_DOCKER_ALL_NPM_LIMIT=10`, and `OPENCLAW_DOCKER_ALL_SERVICE_LIMIT=7`; tune `OPENCLAW_DOCKER_ALL_WEIGHT_LIMIT` or `OPENCLAW_DOCKER_ALL_DOCKER_LIMIT` only when the Docker host has more headroom. The runner performs a Docker preflight by default, removes stale OpenClaw E2E containers, prints status every 30 seconds, stores successful lane timings in `.artifacts/docker-tests/lane-timings.json`, and uses those timings to start longer lanes first on later runs. Use `OPENCLAW_DOCKER_ALL_DRY_RUN=1` to print the weighted lane manifest without building or running Docker, or `node scripts/test-docker-all.mjs --plan-json` to print the CI plan for selected lanes, package/image needs, and credentials.
- `Package Acceptance` is the GitHub-native package gate for "does this installable tarball work as a product?" It resolves one candidate package from `source=npm`, `source=ref`, `source=url`, or `source=artifact`, uploads it as `package-under-test`, then runs the reusable Docker E2E lanes against that exact tarball instead of repacking the selected ref. `workflow_ref` selects the trusted workflow/harness scripts, while `package_ref` selects the source commit/branch/tag to pack when `source=ref`; this lets current acceptance logic validate older trusted commits. Profiles are ordered by breadth: `smoke` is quick install/channel/agent plus gateway/config, `package` is the package/update/plugin contract plus the keyless upgrade-survivor fixture, the published-baseline upgrade survivor lane, and the default native replacement for most Parallels package/update coverage, `product` adds MCP channels, cron/subagent cleanup, OpenAI web search, and OpenWebUI, and `full` runs the release-path Docker chunks with OpenWebUI. For `published-upgrade-survivor`, Package Acceptance always uses `package-under-test` as the candidate and `published_upgrade_survivor_baseline` as the published baseline, defaulting to `openclaw@latest`; shard broader coverage by dispatching multiple runs with exact baseline values. The published lane configures its baseline with a baked `openclaw config set` command recipe, then records recipe steps in the lane summary. Release validation runs a custom package delta (`bundled-channel-deps-compat plugins-offline`) plus Telegram package QA because the release-path Docker chunks already cover the overlapping package/update/plugin lanes. Targeted GitHub Docker rerun commands generated from artifacts include prior package artifact, prepared image inputs, and the published upgrade-survivor baseline when available, so failed lanes can avoid rebuilding the package and images.
- `Package Acceptance` is the GitHub-native package gate for "does this installable tarball work as a product?" It resolves one candidate package from `source=npm`, `source=ref`, `source=url`, or `source=artifact`, uploads it as `package-under-test`, then runs the reusable Docker E2E lanes against that exact tarball instead of repacking the selected ref. `workflow_ref` selects the trusted workflow/harness scripts, while `package_ref` selects the source commit/branch/tag to pack when `source=ref`; this lets current acceptance logic validate older trusted commits. Profiles are ordered by breadth: `smoke` is quick install/channel/agent plus gateway/config, `package` is the package/update/plugin contract plus the keyless upgrade-survivor fixture, the published-baseline upgrade survivor lane, and the default native replacement for most Parallels package/update coverage, `product` adds MCP channels, cron/subagent cleanup, OpenAI web search, and OpenWebUI, and `full` runs the release-path Docker chunks with OpenWebUI. For `published-upgrade-survivor`, Package Acceptance always uses `package-under-test` as the candidate and `published_upgrade_survivor_baseline` as the fallback published baseline, defaulting to `openclaw@latest`; set `published_upgrade_survivor_baselines=release-history` to shard the lane across a deduped matrix of the latest six stable releases, `2026.4.23`, and the latest stable release before `2026-03-15`. The published lane configures its baseline with a baked `openclaw config set` command recipe, then records recipe steps in the lane summary. Release validation runs a custom package delta (`bundled-channel-deps-compat plugins-offline`) plus Telegram package QA because the release-path Docker chunks already cover the overlapping package/update/plugin lanes. Targeted GitHub Docker rerun commands generated from artifacts include prior package artifact, prepared image inputs, and the published upgrade-survivor baseline list when available, so failed lanes can avoid rebuilding the package and images.
- Build and release checks run `scripts/check-cli-bootstrap-imports.mjs` after tsdown. The guard walks the static built graph from `dist/entry.js` and `dist/cli/run-main.js` and fails if pre-dispatch startup imports package dependencies such as Commander, prompt UI, undici, or logging before command dispatch; it also keeps the bundled gateway run chunk under budget and rejects static imports of known cold gateway paths. Packaged CLI smoke also covers root help, onboard help, doctor help, status, config schema, and a model-list command.
- Package Acceptance legacy compatibility is capped at `2026.4.25` (`2026.4.25-beta.*` included). Through that cutoff, the harness tolerates only shipped-package metadata gaps: omitted private QA inventory entries, missing `gateway install --wrapper`, missing patch files in the tarball-derived git fixture, missing persisted `update.channel`, legacy plugin install-record locations, missing marketplace install-record persistence, and config metadata migration during `plugins update`. For packages after `2026.4.25`, those paths are strict failures.
- Container smoke runners: `test:docker:openwebui`, `test:docker:onboard`, `test:docker:npm-onboard-channel-agent`, `test:docker:update-channel-switch`, `test:docker:upgrade-survivor`, `test:docker:published-upgrade-survivor`, `test:docker:session-runtime-context`, `test:docker:agents-delete-shared-workspace`, `test:docker:gateway-network`, `test:docker:browser-cdp-snapshot`, `test:docker:mcp-channels`, `test:docker:pi-bundle-mcp-tools`, `test:docker:cron-mcp-cleanup`, `test:docker:plugins`, `test:docker:plugin-update`, and `test:docker:config-reload` boot one or more real containers and verify higher-level integration paths.
@@ -618,7 +618,7 @@ The live-model Docker runners also bind-mount only the needed CLI auth homes (or
- Npm tarball onboarding/channel/agent smoke: `pnpm test:docker:npm-onboard-channel-agent` installs the packed OpenClaw tarball globally in Docker, configures OpenAI via env-ref onboarding plus Telegram by default, verifies doctor repairs activated plugin runtime deps, and runs one mocked OpenAI agent turn. Reuse a prebuilt tarball with `OPENCLAW_CURRENT_PACKAGE_TGZ=/path/to/openclaw-*.tgz`, skip the host rebuild with `OPENCLAW_NPM_ONBOARD_HOST_BUILD=0`, or switch channel with `OPENCLAW_NPM_ONBOARD_CHANNEL=discord`.
- Update channel switch smoke: `pnpm test:docker:update-channel-switch` installs the packed OpenClaw tarball globally in Docker, switches from package `stable` to git `dev`, verifies the persisted channel and plugin post-update work, then switches back to package `stable` and checks update status.
- Upgrade survivor smoke: `pnpm test:docker:upgrade-survivor` installs the packed OpenClaw tarball over a dirty old-user fixture with agents, channel config, plugin allowlists, stale plugin runtime-deps state, and existing workspace/session files. It runs package update plus non-interactive doctor without live provider or channel keys, then starts a loopback Gateway and checks config/state preservation plus startup/status budgets.
- Published upgrade survivor smoke: `pnpm test:docker:published-upgrade-survivor` installs `openclaw@latest` by default, seeds realistic existing-user files, configures that baseline with a baked command recipe, validates the resulting config, updates that published install to the candidate tarball, runs non-interactive doctor, writes `.artifacts/upgrade-survivor/summary.json`, then starts a loopback Gateway and checks configured intents, state preservation, startup, and status budgets. Override the baseline with `OPENCLAW_UPGRADE_SURVIVOR_BASELINE_SPEC`; Package Acceptance exposes the same value as `published_upgrade_survivor_baseline`.
- Published upgrade survivor smoke: `pnpm test:docker:published-upgrade-survivor` installs `openclaw@latest` by default, seeds realistic existing-user files, configures that baseline with a baked command recipe, validates the resulting config, updates that published install to the candidate tarball, runs non-interactive doctor, writes `.artifacts/upgrade-survivor/summary.json`, then starts a loopback Gateway and checks configured intents, state preservation, startup, `/healthz`, `/readyz`, and RPC status budgets. Override one baseline with `OPENCLAW_UPGRADE_SURVIVOR_BASELINE_SPEC`, ask the aggregate scheduler to expand exact baselines with `OPENCLAW_UPGRADE_SURVIVOR_BASELINE_SPECS`, and expand issue-shaped fixtures with `OPENCLAW_UPGRADE_SURVIVOR_SCENARIOS` such as `reported-issues`; Package Acceptance exposes those as `published_upgrade_survivor_baseline`, `published_upgrade_survivor_baselines`, and `published_upgrade_survivor_scenarios`.
- Session runtime context smoke: `pnpm test:docker:session-runtime-context` verifies hidden runtime context transcript persistence plus doctor repair of affected duplicated prompt-rewrite branches.
- Bun global install smoke: `bash scripts/e2e/bun-global-install-smoke.sh` packs the current tree, installs it with `bun install -g` in an isolated home, and verifies `openclaw infer image providers --json` returns bundled image providers instead of hanging. Reuse a prebuilt tarball with `OPENCLAW_BUN_GLOBAL_SMOKE_PACKAGE_TGZ=/path/to/openclaw-*.tgz`, skip the host build with `OPENCLAW_BUN_GLOBAL_SMOKE_HOST_BUILD=0`, or copy `dist/` from a built Docker image with `OPENCLAW_BUN_GLOBAL_SMOKE_DIST_IMAGE=openclaw-dockerfile-smoke:local`.
- Installer Docker smoke: `bash scripts/test-install-sh-docker.sh` shares one npm cache across its root, update, and direct-npm containers. Update smoke defaults to npm `latest` as the stable baseline before upgrading to the candidate tarball. Override with `OPENCLAW_INSTALL_SMOKE_UPDATE_BASELINE=2026.4.22` locally, or with the Install Smoke workflow's `update_baseline_version` input on GitHub. Non-root installer checks keep an isolated npm cache so root-owned cache entries do not mask user-local install behavior. Set `OPENCLAW_INSTALL_SMOKE_NPM_CACHE_DIR=/path/to/cache` to reuse the root/update/direct-npm cache across local reruns.

View File

@@ -202,9 +202,11 @@ This is idempotent and safe to run multiple times.
# Check sandbox image
sudo docker images | grep openclaw-sandbox
# Build sandbox image if missing
# Build sandbox image if missing (requires source checkout)
cd /opt/openclaw/openclaw
sudo -u openclaw ./scripts/sandbox-setup.sh
# For npm installs without a source checkout, see
# https://docs.openclaw.ai/gateway/sandboxing#images-and-setup
```
</Accordion>

View File

@@ -452,18 +452,21 @@ For full configuration, images, security notes, and multi-agent profiles, see:
}
```
Build the default sandbox image:
Build the default sandbox image (from a source checkout):
```bash
scripts/sandbox-setup.sh
```
For npm installs without a source checkout, see [Sandboxing § Images and setup](/gateway/sandboxing#images-and-setup) for inline `docker build` commands.
## Troubleshooting
<AccordionGroup>
<Accordion title="Image missing or sandbox container not starting">
Build the sandbox image with
[`scripts/sandbox-setup.sh`](https://github.com/openclaw/openclaw/blob/main/scripts/sandbox-setup.sh)
(source checkout) or the inline `docker build` command from [Sandboxing § Images and setup](/gateway/sandboxing#images-and-setup) (npm install),
or set `agents.defaults.sandbox.docker.image` to your custom image.
Containers are auto-created per session on demand.
</Accordion>

View File

@@ -168,6 +168,13 @@ The auto-updater is off by default. Enable it in `~/.openclaw/openclaw.json`:
The gateway also logs an update hint on startup (disable with `update.checkOnStart: false`).
For downgrade or incident recovery, set `OPENCLAW_NO_AUTO_UPDATE=1` in the gateway environment to block automatic applies even when `update.auto.enabled` is configured. Startup update hints can still run unless `update.checkOnStart` is also disabled.
Package-manager updates requested through the live Gateway control-plane handler
force a non-deferred, no-cooldown update restart after the package swap. That
avoids leaving an old in-memory process around long enough to lazy-load chunks
from a package tree that has already been replaced. Shell `openclaw update`
remains the preferred path for supervised installs because it can stop and
restart the service around the update.
## After updating
<Steps>

View File

@@ -11,8 +11,9 @@ title: "Menu bar"
- We surface the current agent work state in the menu bar icon and in the first status row of the menu.
- Health status is hidden while work is active; it returns when all sessions are idle.
- The “Nodes” block in the menu lists **devices** only (paired nodes via `node.list`), not client/presence entries.
- A “Usage” section appears under Context when provider usage snapshots are available.
- A root “Context” submenu contains recent sessions instead of expanding them directly in the root menu.
- The “Nodes” block in the root menu lists **devices** only (paired nodes via `node.list`), not client/presence entries.
- A root “Usage” section appears below Context when provider usage snapshots are available, followed by usage-cost details when available.
## State model
@@ -45,6 +46,14 @@ title: "Menu bar"
- `workingOther`: badge with glyph, muted tint, no scurry.
- `overridden`: uses the chosen glyph/tint regardless of activity.
## Context submenu
- The root menu shows one “Context” row with a session count/status and opens a submenu.
- The Context submenu header shows the active session count for the last 24 hours.
- Each session row keeps its token bar, age, preview, thinking/verbose, reset, compact, and delete actions.
- Loading, disconnected, and session-load error messages appear inside the Context submenu.
- Provider usage and usage-cost details stay root-level below Context so they remain glanceable without opening the submenu.
## Status row text (menu)
- While work is active: `<Session role> · <activity label>`

View File

@@ -253,6 +253,47 @@ Users enable optional tools in config:
- Use `optional: true` for tools with side effects or extra binary requirements
- Users can enable all tools from a plugin by adding the plugin id to `tools.allow`
## Registering CLI commands
Plugins can add root `openclaw` command groups with `api.registerCli`. Provide
`descriptors` for every top-level command root so OpenClaw can show and route
the command without eagerly loading every plugin runtime.
```typescript
register(api) {
api.registerCli(
({ program }) => {
const demo = program
.command("demo-plugin")
.description("Run demo plugin commands");
demo
.command("ping")
.description("Check that the plugin CLI is executable")
.action(() => {
console.log("demo-plugin:pong");
});
},
{
descriptors: [
{
name: "demo-plugin",
description: "Run demo plugin commands",
hasSubcommands: true,
},
],
},
);
}
```
After install, verify the runtime registration and execute the command:
```bash
openclaw plugins inspect demo-plugin --runtime --json
openclaw demo-plugin ping
```
## Import conventions
Always import from focused `openclaw/plugin-sdk/<subpath>` paths:

View File

@@ -0,0 +1,214 @@
---
summary: "How OpenClaw plans, stages, and repairs bundled plugin runtime dependencies"
read_when:
- You are debugging bundled plugin runtime dependency repair
- You are changing plugin startup, doctor, or package-manager install behavior
- You are maintaining packaged OpenClaw installs or bundled plugin manifests
title: "Plugin dependency resolution"
sidebarTitle: "Dependencies"
---
OpenClaw does not install every bundled plugin dependency tree at package install
time. It first derives an effective plugin plan from config and plugin metadata,
then stages runtime dependencies only for bundled OpenClaw-owned plugins that
the plan can actually load.
This page covers packaged runtime dependencies for bundled OpenClaw plugins.
Third-party plugins and custom plugin paths still use explicit plugin
installation commands such as `openclaw plugins install` and
`openclaw plugins update`.
## Responsibility split
OpenClaw owns the plan and policy:
- which plugins are active for this config
- which dependency roots are writable or read-only
- when repair is allowed
- which plugin ids are staged for startup
- final checks before importing plugin runtime modules
The package manager owns dependency convergence:
- package graph resolution
- production, optional, and peer dependency handling
- `node_modules` layout
- package integrity
- lock and install metadata
In practice, OpenClaw should decide what needs to exist. `pnpm` or `npm` should
make the filesystem match that decision.
OpenClaw also owns the per-install-root coordination lock. Package managers
protect their own install transaction, but they do not serialize OpenClaw's
manifest writes, isolated-stage copy/rename, final validation, or plugin import
against another Gateway, doctor, or CLI process touching the same runtime
dependency root.
## Effective plugin plan
The effective plugin plan is derived from config plus discovered plugin
metadata. These inputs can activate bundled plugin runtime dependencies:
- `plugins.entries.<id>.enabled`
- `plugins.allow`, `plugins.deny`, and `plugins.enabled`
- legacy channel config such as `channels.telegram.enabled`
- configured providers, models, or CLI backend references that require a plugin
- bundled manifest defaults such as `enabledByDefault`
- the installed plugin index and bundled manifest metadata
Explicit disablement wins. A disabled plugin, denied plugin id, disabled plugin
system, or disabled channel does not trigger runtime dependency repair. Persisted
auth state alone also does not activate a bundled channel or provider.
The plugin plan is the stable input. The generated dependency materialization is
an output of that plan.
## Startup flow
Gateway startup parses config and builds the startup plugin lookup table before
plugin runtime modules are loaded. Startup then stages runtime dependencies only
for the `startupPluginIds` selected by that plan.
For packaged installs, dependency staging is allowed before plugin import. After
staging, the runtime loader imports startup plugins with install repair disabled;
at that point missing dependency materialization is treated as a load failure,
not another repair loop.
When startup dependency staging is deferred behind the HTTP bind, Gateway
readiness stays blocked on the `plugin-runtime-deps` reason until the selected
startup plugin dependencies are materialized and the startup plugin runtime has
loaded.
## When repair runs
Runtime dependency repair should run when one of these is true:
- the effective plugin plan changed and adds bundled plugins that need runtime
dependencies
- the generated dependency manifest no longer matches the effective plan
- expected installed package sentinels are missing or incomplete
- `openclaw doctor --fix` or `openclaw plugins deps --repair` was requested
Runtime dependency repair should not run just because OpenClaw started. A normal
startup with an unchanged plan and complete dependency materialization should
skip package-manager work.
Commands that edit config, enable plugins, or repair doctor findings can enter
plugin plan mode once, materialize the newly required bundled dependencies, then
return to the normal command flow. Local `openclaw onboard` and
`openclaw configure` do this automatically after they successfully write config,
so the next Gateway run does not discover missing bundled plugin packages after
startup has already begun. Remote onboarding/configure stays read-only for local
runtime deps.
## Hot reload rule
Hot reload paths that can change active plugins must go back through plugin plan
mode before loading plugin runtime. The reload should compare the new effective
plugin plan with the previous one, stage missing dependencies for newly active
bundled plugins, then load or restart the affected runtime.
If a config reload does not change the effective plugin plan, it should not
repair bundled runtime dependencies.
## Package manager execution
OpenClaw writes a generated install manifest for the selected bundled runtime
dependencies and runs the package manager in the runtime dependency install
root. It prefers `pnpm` when available and falls back to the Node-bundled `npm`
runner.
The `pnpm` path uses production dependencies, disables lifecycle scripts, ignores
the workspace, and keeps the store inside the install root:
```bash
pnpm install \
--prod \
--ignore-scripts \
--ignore-workspace \
--config.frozen-lockfile=false \
--config.minimum-release-age=0 \
--config.store-dir=<install-root>/.openclaw-pnpm-store \
--config.node-linker=hoisted \
--config.virtual-store-dir=.pnpm
```
The `npm` fallback uses the safe npm install wrapper with production
dependencies, lifecycle scripts disabled, workspace mode disabled, audit
disabled, fund output disabled, legacy peer dependency behavior, and package-lock
output enabled for the generated install root.
After install, OpenClaw validates the staged dependency tree before making it
visible to the runtime dependency root. Isolated staging is copied into the
runtime dependency root and validated again.
The whole repair/materialization section is guarded by an install-root lock.
Current lock owners record PID, process start-time when available, and creation
time. Legacy locks without process start-time or creation-time evidence are only
reclaimed by filesystem age, so recycled Docker PID 1 locks recover without
expiring normal long-running current installs by age alone.
## Install roots
Packaged installs must not mutate read-only package directories. OpenClaw can
read dependency roots from packaged layers, but writes generated runtime
dependencies to a writable stage such as:
- `OPENCLAW_PLUGIN_STAGE_DIR`
- `$STATE_DIRECTORY`
- `~/.openclaw/plugin-runtime-deps`
- `/var/lib/openclaw/plugin-runtime-deps` in container-style installs
The writable root is the final materialization target. Older read-only roots are
kept as compatibility layers only when needed.
When a packaged OpenClaw update changes the versioned writable root but the
selected bundled-plugin dependency plan is still satisfied by a previous staged
root, repair reuses that previous `node_modules` tree instead of running the
package manager again. The new versioned root still gets its own current package
runtime mirror, so plugin code comes from the current OpenClaw package while
unchanged dependency trees are shared across updates. Reuse skips previous roots
with an active OpenClaw runtime-dependency lock, so a new root does not link to a
dependency tree that another Gateway, doctor, or CLI process is currently
repairing.
## Doctor and CLI commands
Use `plugins deps` to inspect or repair bundled plugin runtime dependency
materialization:
```bash
openclaw plugins deps
openclaw plugins deps --json
openclaw plugins deps --repair
openclaw plugins deps --prune
```
Use doctor when the dependency state is part of broader install health:
```bash
openclaw doctor
openclaw doctor --fix
```
`plugins deps` and doctor operate on OpenClaw-owned bundled plugin runtime
dependencies selected by the effective plugin plan. They are not third-party
plugin install or update commands.
## Troubleshooting
If a packaged install reports missing bundled runtime dependencies:
1. Run `openclaw plugins deps --json` to inspect the selected plan and missing
packages.
2. Run `openclaw plugins deps --repair` or `openclaw doctor --fix` to repair the
writable dependency stage.
3. If the install root is read-only, set `OPENCLAW_PLUGIN_STAGE_DIR` to a
writable path and rerun repair.
4. Restart Gateway after repair if the missing dependency blocked startup plugin
loading.
In source checkouts, the workspace install usually provides bundled plugin
dependencies. Run `pnpm install` for source dependency repair instead of using
packaged runtime dependency repair as the first step.

View File

@@ -924,6 +924,16 @@ Defaults:
and writing audio in `chrome.audioFormat`
- `chrome.audioOutputCommand`: SoX command reading audio in `chrome.audioFormat`
and writing to CoreAudio `BlackHole 2ch`
- `chrome.bargeInInputCommand`: optional local microphone command that writes
signed 16-bit little-endian mono PCM for human barge-in detection while
assistant playback is active. This currently applies to the Gateway-hosted
`chrome` command-pair bridge.
- `chrome.bargeInRmsThreshold: 650`: RMS level that counts as a human
interruption on `chrome.bargeInInputCommand`
- `chrome.bargeInPeakThreshold: 2500`: peak level that counts as a human
interruption on `chrome.bargeInInputCommand`
- `chrome.bargeInCooldownMs: 900`: minimum delay between repeated human
interruption clears
- `realtime.provider: "openai"`
- `realtime.toolPolicy: "safe-read-only"`
- `realtime.instructions`: brief spoken replies, with
@@ -946,6 +956,24 @@ Optional overrides:
chrome: {
guestName: "OpenClaw Agent",
waitForInCallMs: 30000,
bargeInInputCommand: [
"sox",
"-q",
"-t",
"coreaudio",
"External Microphone",
"-r",
"24000",
"-c",
"1",
"-b",
"16",
"-e",
"signed-integer",
"-t",
"raw",
"-",
],
},
chromeNode: {
node: "parallels-macos",
@@ -1028,6 +1056,8 @@ a session ended.
not send the intro/test phrase into the audio bridge.
- `providerConnected` / `realtimeReady`: realtime voice bridge state
- `lastInputAt` / `lastOutputAt`: last audio seen from or sent to the bridge
- `lastSuppressedInputAt` / `suppressedInputBytes`: loopback input ignored while
assistant playback is active
```json
{
@@ -1133,6 +1163,8 @@ Expected Twilio state:
`twilio-voice-call-credentials`, and `twilio-voice-call-webhook` checks.
- `voicecall` is available in the CLI after Gateway reload.
- The returned session has `transport: "twilio"` and a `twilio.voiceCallId`.
- `openclaw logs --follow` shows DTMF TwiML served before realtime TwiML, then a
realtime bridge with the initial greeting queued.
- `googlemeet leave <sessionId>` hangs up the delegated voice call.
## Troubleshooting
@@ -1407,6 +1439,10 @@ participant:
active.
- Run `openclaw voicecall tail` and check that Twilio webhooks are arriving at
the Gateway.
- Run `openclaw logs --follow` and look for the Twilio Meet sequence: Google
Meet delegates the join, Voice Call stores pre-connect DTMF TwiML, serves
that initial TwiML, then serves realtime TwiML and starts the realtime bridge
with `initialGreeting=queued`.
- Re-run `openclaw googlemeet setup --transport twilio`; a green setup check is
required but does not prove the meeting PIN sequence is correct.
- Confirm the dial-in number belongs to the same Meet invitation and region as
@@ -1414,9 +1450,9 @@ participant:
- Increase the leading pauses in `--dtmf-sequence` if Meet answers slowly, for
example `wwww123456#`.
- If the participant joins but you do not hear the greeting, check
`openclaw voicecall tail` for a Twilio stream start followed by realtime
provider readiness. The greeting is now generated from the initial
`voicecall.start` message after the stream connects.
`openclaw logs --follow` for realtime TwiML, realtime bridge startup, and
`initialGreeting=queued`. The greeting is generated from the initial
`voicecall.start` message after the realtime bridge connects.
If webhooks do not arrive, debug the Voice Call plugin first: the provider must
reach `plugins.entries.voice-call.config.publicUrl` or the configured tunnel.
@@ -1442,6 +1478,14 @@ For clean duplex audio, route Meet output and Meet microphone through separate
virtual devices or a Loopback-style virtual device graph. A single shared
BlackHole device can echo other participants back into the call.
With the command-pair Chrome bridge, `chrome.bargeInInputCommand` can listen to a
separate local microphone and clear assistant playback when the human starts
talking. This keeps human speech ahead of assistant output even when the shared
BlackHole loopback input is temporarily suppressed during assistant playback.
Like `chrome.audioInputCommand` and `chrome.audioOutputCommand`, it is an
operator-configured local command. Use an explicit trusted command path or
argument list, and do not point it at scripts from untrusted locations.
`googlemeet speak` triggers the active realtime audio bridge for a Chrome
session. `googlemeet leave` stops that bridge. For Twilio sessions delegated
through the Voice Call plugin, `leave` also hangs up the underlying voice call.

View File

@@ -593,6 +593,7 @@ API key auth, and dynamic model resolution.
connect: async () => {},
sendAudio: () => {},
setMediaTimestamp: () => {},
handleBargeIn: () => {},
submitToolResult: () => {},
acknowledgeMark: () => {},
close: () => {},
@@ -600,6 +601,10 @@ API key auth, and dynamic model resolution.
}),
});
```
Implement `handleBargeIn` when a transport can detect that a human is
interrupting assistant playback and the provider supports truncating or
clearing the active audio response.
</Tab>
<Tab title="Media understanding">
```typescript

View File

@@ -96,7 +96,7 @@ skips starting the runtime. Commands, RPC calls, and agent tools still
return the exact missing provider configuration when used.
<Note>
Voice-call credentials accept SecretRefs. `plugins.entries.voice-call.config.twilio.authToken` and `plugins.entries.voice-call.config.tts.providers.*.apiKey` resolve through the standard SecretRef surface; see [SecretRef credential surface](/reference/secretref-credential-surface).
Voice-call credentials accept SecretRefs. `plugins.entries.voice-call.config.twilio.authToken`, `plugins.entries.voice-call.config.realtime.providers.*.apiKey`, `plugins.entries.voice-call.config.streaming.providers.*.apiKey`, and `plugins.entries.voice-call.config.tts.providers.*.apiKey` resolve through the standard SecretRef surface; see [SecretRef credential surface](/reference/secretref-credential-surface).
</Note>
```json5
@@ -210,6 +210,7 @@ Current runtime behaviour:
- Bundled realtime voice providers: Google Gemini Live (`google`) and OpenAI (`openai`), registered by their provider plugins.
- Provider-owned raw config lives under `realtime.providers.<providerId>`.
- Voice Call exposes the shared `openclaw_agent_consult` realtime tool by default. The realtime model can call it when the caller asks for deeper reasoning, current information, or normal OpenClaw tools.
- `realtime.fastContext.enabled` is default-off. When enabled, Voice Call first searches indexed memory/session context for the consult question and returns those snippets to the realtime model within `realtime.fastContext.timeoutMs` before falling back to the full consult agent only if `realtime.fastContext.fallbackToConsult` is true.
- If `realtime.provider` points at an unregistered provider, or no realtime voice provider is registered at all, Voice Call logs a warning and skips realtime media instead of failing the whole plugin.
- Consult session keys reuse the existing voice session when available, then fall back to the caller/callee phone number so follow-up consult calls keep context during the call.
@@ -723,6 +724,7 @@ Then inspect runtime state:
```bash
openclaw voicecall status --call-id <id>
openclaw voicecall tail
openclaw logs --follow
```
Common causes:
@@ -775,6 +777,19 @@ For Twilio calls, Voice Call serves the DTMF TwiML first, redirects back to the
webhook, then opens the realtime media stream so the saved intro is generated
after the phone participant has joined the meeting.
Use `openclaw logs --follow` for the live phase trace. A healthy Twilio Meet
join logs this order:
- Google Meet delegates the Twilio join to Voice Call.
- Voice Call stores pre-connect DTMF TwiML.
- Twilio initial TwiML is consumed and served before realtime handling.
- Voice Call serves realtime TwiML for the Twilio call.
- The realtime bridge starts with the initial greeting queued.
`openclaw voicecall tail` still shows persisted call records; it is useful for
call state and transcripts, but not every webhook/realtime transition appears
there.
### Realtime call has no speech
Confirm only one audio mode is enabled. `realtime.enabled` and
@@ -785,8 +800,8 @@ For realtime Twilio calls, also verify:
- A realtime provider plugin is loaded and registered.
- `realtime.provider` is unset or names a registered provider.
- The provider API key is available to the Gateway process.
- `openclaw voicecall tail` shows the media stream accepted and realtime
provider readiness before the initial greeting.
- `openclaw logs --follow` shows realtime TwiML served, the realtime bridge
started, and the initial greeting queued.
## Related

View File

@@ -50,6 +50,8 @@ Scope intent:
- `plugins.entries.firecrawl.config.webSearch.apiKey`
- `plugins.entries.minimax.config.webSearch.apiKey`
- `plugins.entries.tavily.config.webSearch.apiKey`
- `plugins.entries.voice-call.config.realtime.providers.*.apiKey`
- `plugins.entries.voice-call.config.streaming.providers.*.apiKey`
- `plugins.entries.voice-call.config.tts.providers.*.apiKey`
- `plugins.entries.voice-call.config.twilio.authToken`
- `tools.web.search.apiKey`

View File

@@ -589,6 +589,20 @@
"secretShape": "secret_input",
"optIn": true
},
{
"id": "plugins.entries.voice-call.config.realtime.providers.*.apiKey",
"configFile": "openclaw.json",
"path": "plugins.entries.voice-call.config.realtime.providers.*.apiKey",
"secretShape": "secret_input",
"optIn": true
},
{
"id": "plugins.entries.voice-call.config.streaming.providers.*.apiKey",
"configFile": "openclaw.json",
"path": "plugins.entries.voice-call.config.streaming.providers.*.apiKey",
"secretShape": "secret_input",
"optIn": true
},
{
"id": "plugins.entries.voice-call.config.tts.providers.*.apiKey",
"configFile": "openclaw.json",

View File

@@ -43,7 +43,7 @@ title: "Tests"
- `pnpm test:docker:openwebui`: Starts Dockerized OpenClaw + Open WebUI, signs in through Open WebUI, checks `/api/models`, then runs a real proxied chat through `/api/chat/completions`. Requires a usable live model key (for example OpenAI in `~/.profile`), pulls an external Open WebUI image, and is not expected to be CI-stable like the normal unit/e2e suites.
- `pnpm test:docker:mcp-channels`: Starts a seeded Gateway container and a second client container that spawns `openclaw mcp serve`, then verifies routed conversation discovery, transcript reads, attachment metadata, live event queue behavior, outbound send routing, and Claude-style channel + permission notifications over the real stdio bridge. The Claude notification assertion reads the raw stdio MCP frames directly so the smoke reflects what the bridge actually emits.
- `pnpm test:docker:upgrade-survivor`: Installs the packed OpenClaw tarball over a dirty old-user fixture, runs package update plus non-interactive doctor without live provider or channel keys, then starts a loopback Gateway and checks that agents, channel config, plugin allowlists, workspace/session files, stale plugin runtime-deps state, startup, and RPC status survive.
- `pnpm test:docker:published-upgrade-survivor`: Installs `openclaw@latest` by default, seeds realistic existing-user files without live provider or channel keys, configures that baseline with a baked `openclaw config set` command recipe, updates that published install to the packed OpenClaw tarball, runs non-interactive doctor, writes `.artifacts/upgrade-survivor/summary.json`, then starts a loopback Gateway and checks that configured intents, workspace/session files, stale plugin config/runtime-deps state, startup, and RPC status survive or repair cleanly. Override the baseline with `OPENCLAW_UPGRADE_SURVIVOR_BASELINE_SPEC`; Package Acceptance exposes the same value as `published_upgrade_survivor_baseline`.
- `pnpm test:docker:published-upgrade-survivor`: Installs `openclaw@latest` by default, seeds realistic existing-user files without live provider or channel keys, configures that baseline with a baked `openclaw config set` command recipe, updates that published install to the packed OpenClaw tarball, runs non-interactive doctor, writes `.artifacts/upgrade-survivor/summary.json`, then starts a loopback Gateway and checks that configured intents, workspace/session files, stale plugin config/runtime-deps state, startup, `/healthz`, `/readyz`, and RPC status survive or repair cleanly. Override one baseline with `OPENCLAW_UPGRADE_SURVIVOR_BASELINE_SPEC`, expand an exact matrix with `OPENCLAW_UPGRADE_SURVIVOR_BASELINE_SPECS`, or add scenario fixtures with `OPENCLAW_UPGRADE_SURVIVOR_SCENARIOS=reported-issues`; Package Acceptance exposes those as `published_upgrade_survivor_baseline`, `published_upgrade_survivor_baselines`, and `published_upgrade_survivor_scenarios`.
## Local PR gate

View File

@@ -41,7 +41,8 @@ directly to existing OpenClaw channel conversations, use
Usually yes. Fresh installs ship the bundled `acpx` runtime plugin enabled
by default with a plugin-local pinned `acpx` binary that OpenClaw probes
and self-repairs on startup. Run `/acp doctor` for a readiness check.
and self-repairs immediately after the Gateway HTTP listener is live. Run
`/acp doctor` for a readiness check.
OpenClaw only teaches agents about ACP spawning when ACP is **truly
usable**: ACP must be enabled, dispatch must not be disabled, the current

View File

@@ -30,6 +30,9 @@ temporary set of OpenClaw-owned plugin packages while that migration finishes.
# From npm
openclaw plugins install npm:@acme/openclaw-plugin
# From git
openclaw plugins install git:github.com/acme/openclaw-plugin@v1.0.0
# From a local directory or archive
openclaw plugins install ./my-plugin
openclaw plugins install ./my-plugin.tgz
@@ -45,6 +48,20 @@ temporary set of OpenClaw-owned plugin packages while that migration finishes.
Then configure under `plugins.entries.\<id\>.config` in your config file.
</Step>
<Step title="Verify the plugin">
```bash
openclaw plugins inspect <plugin-id> --runtime --json
# If the plugin registered a CLI root, run one command from that root.
openclaw <plugin-command> --help
```
Use `--runtime` when you need to prove registered tools, services, gateway
methods, hooks, or plugin-owned CLI commands. Plain `inspect` is a cold
manifest/registry check and intentionally avoids importing plugin runtime.
</Step>
</Steps>
If you prefer chat-native control, enable `commands.plugins: true` and use:
@@ -56,8 +73,8 @@ If you prefer chat-native control, enable `commands.plugins: true` and use:
```
The install path uses the same resolver as the CLI: local path/archive, explicit
`clawhub:<pkg>`, explicit `npm:<pkg>`, or bare package spec (ClawHub first, then
npm fallback).
`clawhub:<pkg>`, explicit `npm:<pkg>`, explicit `git:<repo>`, or bare package
spec (ClawHub first, then npm fallback).
If config is invalid, install normally fails closed and points you at
`openclaw doctor --fix`. The only recovery exception is a narrow bundled-plugin
@@ -93,6 +110,8 @@ repair; explicit bundled channel enablement (`channels.<id>.enabled: true`) can
still repair that channel's plugin dependencies.
External plugins and custom load paths must still be installed through
`openclaw plugins install`.
See [Plugin dependency resolution](/plugins/dependency-resolution) for the full
planning and staging lifecycle.
## Plugin types
@@ -309,7 +328,7 @@ do not run in live chat traffic, check these first:
- Restart the live Gateway after plugin install/config/code changes. In wrapper
containers, PID 1 may only be a supervisor; restart or signal the child
`openclaw gateway run` process.
- Use `openclaw plugins inspect <id> --json` to confirm hook registrations and
- Use `openclaw plugins inspect <id> --runtime --json` to confirm hook registrations and
diagnostics. Non-bundled conversation hooks such as `llm_input`,
`llm_output`, `before_agent_finalize`, and `agent_end` need
`plugins.entries.<id>.hooks.allowConversationAccess=true`.
@@ -336,7 +355,7 @@ Debug steps:
- Run `openclaw plugins list --enabled --verbose` to see every enabled plugin
and origin.
- Run `openclaw plugins inspect <id> --json` for each suspected plugin and
- Run `openclaw plugins inspect <id> --runtime --json` for each suspected plugin and
compare `channels`, `channelConfigs`, `tools`, and diagnostics.
- Run `openclaw plugins registry --refresh` after installing or removing
plugin packages so persisted metadata reflects the current install.
@@ -381,7 +400,8 @@ openclaw plugins list # compact inventory
openclaw plugins list --enabled # only enabled plugins
openclaw plugins list --verbose # per-plugin detail lines
openclaw plugins list --json # machine-readable inventory
openclaw plugins inspect <id> # deep detail
openclaw plugins inspect <id> # static detail
openclaw plugins inspect <id> --runtime # registered hooks/tools/CLI/gateway methods
openclaw plugins inspect <id> --json # machine-readable
openclaw plugins inspect --all # fleet-wide table
openclaw plugins info <id> # inspect alias
@@ -393,6 +413,8 @@ openclaw doctor --fix # repair plugin registry state
openclaw plugins install <package> # install (ClawHub first, then npm)
openclaw plugins install clawhub:<pkg> # install from ClawHub only
openclaw plugins install npm:<pkg> # install from npm only
openclaw plugins install git:<repo> # install from git
openclaw plugins install git:<repo>@<ref> # install from git ref
openclaw plugins install <spec> --force # overwrite existing install
openclaw plugins install <path> # install from local path
openclaw plugins install -l <path> # link (no copy) for dev
@@ -408,6 +430,12 @@ openclaw plugins uninstall <id> --keep-files
openclaw plugins marketplace list <source>
openclaw plugins marketplace list <source> --json
# Verify runtime registrations after install.
openclaw plugins inspect <id> --runtime --json
# Run plugin-owned CLI commands directly from the OpenClaw root CLI.
openclaw <plugin-command> --help
openclaw plugins enable <id>
openclaw plugins disable <id>
```

View File

@@ -247,7 +247,7 @@ User-invocable skills are also exposed as slash commands:
- In multi-account channels, config-targeted `/allowlist --account <id>` and `/config set channels.<provider>.accounts.<id>...` also honor the target account's `configWrites`.
- `/usage` controls the per-response usage footer; `/usage cost` prints a local cost summary from OpenClaw session logs.
- `/restart` is enabled by default; set `commands.restart: false` to disable it.
- `/plugins install <spec>` accepts the same plugin specs as `openclaw plugins install`: local path/archive, npm package, or `clawhub:<pkg>`.
- `/plugins install <spec>` accepts the same plugin specs as `openclaw plugins install`: local path/archive, npm package, `git:<repo>`, or `clawhub:<pkg>`.
- `/plugins enable|disable` updates plugin config and may prompt for a restart.
</Accordion>

View File

@@ -1,6 +1,4 @@
import { buildPluginConfigSchema } from "openclaw/plugin-sdk/core";
import { z } from "openclaw/plugin-sdk/zod";
import type { OpenClawPluginConfigSchema } from "../runtime-api.js";
export const ACPX_PERMISSION_MODES = ["approve-all", "approve-reads", "deny-all"] as const;
export type AcpxPermissionMode = (typeof ACPX_PERMISSION_MODES)[number];
@@ -117,7 +115,3 @@ export const AcpxPluginConfigSchema = z.strictObject({
)
.optional(),
});
export function createAcpxPluginConfigSchema(): OpenClawPluginConfigSchema {
return buildPluginConfigSchema(AcpxPluginConfigSchema);
}

View File

@@ -14,15 +14,12 @@ import type {
ResolvedAcpxPluginConfig,
} from "./config-schema.js";
export {
ACPX_NON_INTERACTIVE_POLICIES,
ACPX_PERMISSION_MODES,
type AcpxMcpServer,
type AcpxNonInteractivePermissionPolicy,
type AcpxPermissionMode,
type AcpxPluginConfig,
type McpServerConfig,
type ResolvedAcpxPluginConfig,
createAcpxPluginConfigSchema,
} from "./config-schema.js";
export const ACPX_PLUGIN_TOOLS_MCP_SERVER_NAME = "openclaw-plugin-tools";
@@ -104,8 +101,6 @@ export function resolveAcpxPluginRoot(moduleUrl: string = import.meta.url): stri
);
}
export const ACPX_PLUGIN_ROOT = resolveAcpxPluginRoot();
const DEFAULT_PERMISSION_MODE: AcpxPermissionMode = "approve-reads";
const DEFAULT_NON_INTERACTIVE_POLICY: AcpxNonInteractivePermissionPolicy = "fail";
const DEFAULT_QUEUE_OWNER_TTL_SECONDS = 0.1;

View File

@@ -51,8 +51,8 @@ function isSupportedRegion(region: string): boolean {
// Bearer token resolution
// ---------------------------------------------------------------------------
export type MantleBearerTokenProvider = () => Promise<string>;
export type MantleBearerTokenProviderFactory = (opts?: {
type MantleBearerTokenProvider = () => Promise<string>;
type MantleBearerTokenProviderFactory = (opts?: {
region?: string;
expiresInSeconds?: number;
}) => MantleBearerTokenProvider;

View File

@@ -10,7 +10,7 @@ import { normalizeLowercaseStringOrEmpty } from "openclaw/plugin-sdk/text-runtim
// Types & constants
// ---------------------------------------------------------------------------
export type BedrockEmbeddingClient = {
type BedrockEmbeddingClient = {
region: string;
model: string;
dimensions?: number;
@@ -162,7 +162,7 @@ async function loadCredentialProviderSdk(): Promise<AwsCredentialProviderSdk | n
const MODEL_PREFIX_RE = /^(?:bedrock|amazon-bedrock|aws)\//;
const REGION_RE = /bedrock-runtime\.([a-z0-9-]+)\./;
export function normalizeBedrockEmbeddingModel(model: string): string {
function normalizeBedrockEmbeddingModel(model: string): string {
const trimmed = model.trim();
return trimmed ? trimmed.replace(MODEL_PREFIX_RE, "") : DEFAULT_BEDROCK_EMBEDDING_MODEL;
}
@@ -337,7 +337,7 @@ export async function createBedrockEmbeddingProvider(
// Client resolution
// ---------------------------------------------------------------------------
export function resolveBedrockEmbeddingClient(
function resolveBedrockEmbeddingClient(
options: MemoryEmbeddingProviderCreateOptions,
): BedrockEmbeddingClient {
const model = normalizeBedrockEmbeddingModel(options.model);

View File

@@ -165,7 +165,7 @@ function toCanonicalAnthropicModelRef(ref: string): string {
: ref;
}
export function normalizeAnthropicProviderConfig<T extends { api?: string; models?: unknown[] }>(
function normalizeAnthropicProviderConfig<T extends { api?: string; models?: unknown[] }>(
providerConfig: T,
): T {
if (

View File

@@ -23,7 +23,7 @@ function resolveClaudeCliSyntheticAuth() {
};
}
export const anthropicProviderDiscovery: ProviderPlugin = {
const anthropicProviderDiscovery: ProviderPlugin = {
id: CLAUDE_CLI_BACKEND_ID,
label: "Claude CLI",
docsPath: "/providers/models",

View File

@@ -34,10 +34,6 @@ const arceeOpenRouterPresetAppliers = createModelCatalogPresetAppliers({
}),
});
export function applyArceeProviderConfig(cfg: OpenClawConfig): OpenClawConfig {
return arceePresetAppliers.applyProviderConfig(cfg);
}
export function applyArceeConfig(cfg: OpenClawConfig): OpenClawConfig {
return arceePresetAppliers.applyConfig(cfg);
}

View File

@@ -19,10 +19,6 @@ export function normalizeArceeOpenRouterBaseUrl(baseUrl: string | undefined): st
return undefined;
}
export function isArceeOpenRouterBaseUrl(baseUrl: string | undefined): boolean {
return normalizeArceeOpenRouterBaseUrl(baseUrl) === OPENROUTER_BASE_URL;
}
export function toArceeOpenRouterModelId(modelId: string): string {
const normalized = modelId.trim();
if (!normalized || normalized.startsWith("arcee/")) {

View File

@@ -12,7 +12,7 @@ export const DEFAULT_AZURE_SPEECH_AUDIO_FORMAT = "audio-24khz-48kbitrate-mono-mp
export const DEFAULT_AZURE_SPEECH_VOICE_NOTE_FORMAT = "ogg-24khz-16bit-mono-opus";
export const DEFAULT_AZURE_SPEECH_TELEPHONY_FORMAT = "raw-8khz-8bit-mono-mulaw";
export type AzureSpeechVoiceEntry = {
type AzureSpeechVoiceEntry = {
ShortName?: string;
DisplayName?: string;
LocalName?: string;
@@ -52,11 +52,11 @@ function azureSpeechUrl(params: {
return `${baseUrl}${params.path}`;
}
export function escapeXmlText(text: string): string {
function escapeXmlText(text: string): string {
return text.replace(/&/g, "&amp;").replace(/</g, "&lt;").replace(/>/g, "&gt;");
}
export function escapeXmlAttr(value: string): string {
function escapeXmlAttr(value: string): string {
return escapeXmlText(value).replace(/"/g, "&quot;").replace(/'/g, "&apos;");
}

View File

@@ -92,9 +92,3 @@ export function resolveBlueBubblesEffectiveAllowPrivateNetwork(params: {
}): boolean {
return resolveBlueBubblesEffectiveAllowPrivateNetworkFromConfig(params);
}
export function listEnabledBlueBubblesAccounts(cfg: OpenClawConfig): ResolvedBlueBubblesAccount[] {
return listBlueBubblesAccountIds(cfg)
.map((accountId) => resolveBlueBubblesAccount({ cfg, accountId }))
.filter((account) => account.enabled);
}

View File

@@ -68,7 +68,7 @@ export function createTimestampedNewMessagePayloadForTest(
});
}
export function createMessageReactionPayloadForTest(dataOverrides: Record<string, unknown> = {}) {
function createMessageReactionPayloadForTest(dataOverrides: Record<string, unknown> = {}) {
return {
type: "message-reaction",
data: {
@@ -128,7 +128,7 @@ export function createMockRequest(
return req;
}
export function createMockRequestForTest(params: WebhookRequestParams = {}): IncomingMessage {
function createMockRequestForTest(params: WebhookRequestParams = {}): IncomingMessage {
return createMockRequest(
params.method ?? "POST",
params.url ?? "/bluebubbles-webhook",
@@ -198,7 +198,7 @@ export function createHangingWebhookRequestForTest(
return { req, destroyMock };
}
export function createMockResponse(): ServerResponse & { body: string; statusCode: number } {
function createMockResponse(): ServerResponse & { body: string; statusCode: number } {
const res = {
statusCode: 200,
body: "",
@@ -210,7 +210,7 @@ export function createMockResponse(): ServerResponse & { body: string; statusCod
return res;
}
export async function flushAsync() {
async function flushAsync() {
for (let i = 0; i < 2; i += 1) {
await new Promise<void>((resolve) => setImmediate(resolve));
}
@@ -269,7 +269,7 @@ export function trackWebhookRegistrationForTest<T extends { unregister: () => vo
return registration;
}
export function registerWebhookTargetForTest(params: {
function registerWebhookTargetForTest(params: {
core: PluginRuntime;
account?: ResolvedBlueBubblesAccount;
config?: OpenClawConfig;
@@ -292,7 +292,7 @@ export function registerWebhookTargetForTest(params: {
});
}
export function registerWebhookTargetsForTest(params: {
function registerWebhookTargetsForTest(params: {
core: PluginRuntime;
accounts: Array<{
account: ResolvedBlueBubblesAccount;

View File

@@ -133,11 +133,6 @@ export function isMacOS26OrHigher(accountId?: string): boolean {
return major !== null && major >= 26;
}
/** Clear the server info cache (for testing) */
export function clearServerInfoCache(): void {
serverInfoCache.clear();
}
export async function probeBlueBubbles(params: {
baseUrl?: string | null;
password?: string | null;

View File

@@ -12,10 +12,6 @@ export function clearBlueBubblesRuntime(): void {
runtimeStore.clearRuntime();
}
export function tryGetBlueBubblesRuntime(): PluginRuntime | null {
return runtimeStore.tryGetRuntime();
}
export function getBlueBubblesRuntime(): PluginRuntime {
return runtimeStore.getRuntime();
}

View File

@@ -126,15 +126,6 @@ export type BlueBubblesAccountConfig = {
coalesceSameSenderDms?: boolean;
};
export type BlueBubblesConfig = Omit<BlueBubblesAccountConfig, "actions"> & {
/** Optional per-account BlueBubbles configuration (multi-account). */
accounts?: Record<string, BlueBubblesAccountConfig>;
/** Optional default account id when multiple accounts are configured. */
defaultAccount?: string;
/** Per-action tool gating (default: true for all). */
actions?: BlueBubblesActionConfig;
};
export type BlueBubblesSendTarget =
| { kind: "chat_id"; chatId: number }
| { kind: "chat_guid"; chatGuid: string }
@@ -173,19 +164,6 @@ export function normalizeBlueBubblesServerUrl(raw: string): string {
return withScheme.replace(/\/+$/, "");
}
export function buildBlueBubblesApiUrl(params: {
baseUrl: string;
path: string;
password?: string;
}): string {
const normalized = normalizeBlueBubblesServerUrl(params.baseUrl);
const url = new URL(params.path, `${normalized}/`);
if (params.password) {
url.searchParams.set("password", params.password);
}
return url.toString();
}
// Overridable guard for testing; production code uses fetchWithSsrFGuard.
let _fetchGuard = fetchWithSsrFGuard;

View File

@@ -4,9 +4,6 @@
"private": true,
"description": "OpenClaw Brave plugin",
"type": "module",
"dependencies": {
"typebox": "1.1.34"
},
"devDependencies": {
"@openclaw/plugin-sdk": "workspace:*"
},

View File

@@ -2,7 +2,6 @@ import {
normalizeLowercaseStringOrEmpty,
normalizeOptionalString,
} from "openclaw/plugin-sdk/text-runtime";
import { Type } from "typebox";
export type BraveConfig = {
mode?: string;
@@ -119,7 +118,6 @@ const BRAVE_SEARCH_LANG_ALIASES: Record<string, string> = {
};
const BRAVE_UI_LANG_LOCALE = /^([a-z]{2})-([a-z]{2})$/i;
const MAX_BRAVE_SEARCH_COUNT = 10;
function normalizeBraveSearchLang(value: string | undefined): string | undefined {
if (!value) {
@@ -226,54 +224,3 @@ export function mapBraveLlmContextResults(
siteName: resolveSiteName(entry.url) || undefined,
}));
}
export function createBraveSchema() {
return Type.Object({
query: Type.String({ description: "Search query string." }),
count: Type.Optional(
Type.Number({
description: "Number of results to return (1-10).",
minimum: 1,
maximum: MAX_BRAVE_SEARCH_COUNT,
}),
),
country: Type.Optional(
Type.String({
description:
"2-letter country code for region-specific results (e.g., 'DE', 'US', 'ALL'). Default: 'US'.",
}),
),
language: Type.Optional(
Type.String({
description: "ISO 639-1 language code for results (e.g., 'en', 'de', 'fr').",
}),
),
freshness: Type.Optional(
Type.String({
description: "Filter by time: 'day' (24h), 'week', 'month', or 'year'.",
}),
),
date_after: Type.Optional(
Type.String({
description: "Only results published after this date (YYYY-MM-DD).",
}),
),
date_before: Type.Optional(
Type.String({
description: "Only results published before this date (YYYY-MM-DD).",
}),
),
search_lang: Type.Optional(
Type.String({
description:
"Brave language code for search results (e.g., 'en', 'de', 'en-gb', 'zh-hans', 'zh-hant', 'pt-br').",
}),
),
ui_lang: Type.Optional(
Type.String({
description:
"Locale code for UI elements in language-region format (e.g., 'en-US', 'de-DE', 'fr-FR', 'tr-TR'). Must include region subtag.",
}),
),
});
}

View File

@@ -21,14 +21,6 @@ export type BrowserActResponse = {
results?: Array<{ ok: boolean; error?: string }>;
};
export type BrowserDownloadPayload = {
url: string;
suggestedFilename: string;
path: string;
};
type BrowserDownloadResult = { ok: true; targetId: string; download: BrowserDownloadPayload };
const BROWSER_ACT_REQUEST_TIMEOUT_SLACK_MS = 5_000;
function normalizePositiveTimeoutMs(value: unknown): number | undefined {
@@ -52,21 +44,6 @@ function resolveBrowserActRequestTimeoutMs(req: BrowserActRequest): number {
return Math.max(...candidateTimeouts);
}
async function postDownloadRequest(
baseUrl: string | undefined,
route: "/wait/download" | "/download",
body: Record<string, unknown>,
profile?: string,
): Promise<BrowserDownloadResult> {
const q = buildProfileQuery(profile);
return await fetchBrowserJson<BrowserDownloadResult>(withBaseUrl(baseUrl, `${route}${q}`), {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(body),
timeoutMs: 20000,
});
}
export async function browserNavigate(
baseUrl: string | undefined,
opts: {
@@ -136,50 +113,6 @@ export async function browserArmFileChooser(
});
}
export async function browserWaitForDownload(
baseUrl: string | undefined,
opts: {
path?: string;
targetId?: string;
timeoutMs?: number;
profile?: string;
},
): Promise<BrowserDownloadResult> {
return await postDownloadRequest(
baseUrl,
"/wait/download",
{
targetId: opts.targetId,
path: opts.path,
timeoutMs: opts.timeoutMs,
},
opts.profile,
);
}
export async function browserDownload(
baseUrl: string | undefined,
opts: {
ref: string;
path: string;
targetId?: string;
timeoutMs?: number;
profile?: string;
},
): Promise<BrowserDownloadResult> {
return await postDownloadRequest(
baseUrl,
"/download",
{
targetId: opts.targetId,
ref: opts.ref,
path: opts.path,
timeoutMs: opts.timeoutMs,
},
opts.profile,
);
}
export async function browserAct(
baseUrl: string | undefined,
req: BrowserActRequest,

View File

@@ -1,11 +1,7 @@
import type { BrowserActionPathResult, BrowserActionTargetOk } from "./client-actions-types.js";
import type { BrowserActionPathResult } from "./client-actions-types.js";
import { buildProfileQuery, withBaseUrl } from "./client-actions-url.js";
import { fetchBrowserJson } from "./client-fetch.js";
import type {
BrowserConsoleMessage,
BrowserNetworkRequest,
BrowserPageError,
} from "./pw-session.js";
import type { BrowserConsoleMessage } from "./pw-session.js";
function buildQuerySuffix(params: Array<[string, string | boolean | undefined]>): string {
const query = new URLSearchParams();
@@ -51,137 +47,3 @@ export async function browserPdfSave(
timeoutMs: 20000,
});
}
export async function browserPageErrors(
baseUrl: string | undefined,
opts: { targetId?: string; clear?: boolean; profile?: string } = {},
): Promise<{ ok: true; targetId: string; url?: string; errors: BrowserPageError[] }> {
const suffix = buildQuerySuffix([
["targetId", opts.targetId],
["clear", typeof opts.clear === "boolean" ? opts.clear : undefined],
["profile", opts.profile],
]);
return await fetchBrowserJson<{
ok: true;
targetId: string;
url?: string;
errors: BrowserPageError[];
}>(withBaseUrl(baseUrl, `/errors${suffix}`), { timeoutMs: 20000 });
}
export async function browserRequests(
baseUrl: string | undefined,
opts: {
targetId?: string;
filter?: string;
clear?: boolean;
profile?: string;
} = {},
): Promise<{ ok: true; targetId: string; url?: string; requests: BrowserNetworkRequest[] }> {
const suffix = buildQuerySuffix([
["targetId", opts.targetId],
["filter", opts.filter],
["clear", typeof opts.clear === "boolean" ? opts.clear : undefined],
["profile", opts.profile],
]);
return await fetchBrowserJson<{
ok: true;
targetId: string;
url?: string;
requests: BrowserNetworkRequest[];
}>(withBaseUrl(baseUrl, `/requests${suffix}`), { timeoutMs: 20000 });
}
export async function browserTraceStart(
baseUrl: string | undefined,
opts: {
targetId?: string;
screenshots?: boolean;
snapshots?: boolean;
sources?: boolean;
profile?: string;
} = {},
): Promise<BrowserActionTargetOk> {
const q = buildProfileQuery(opts.profile);
return await fetchBrowserJson<BrowserActionTargetOk>(withBaseUrl(baseUrl, `/trace/start${q}`), {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
targetId: opts.targetId,
screenshots: opts.screenshots,
snapshots: opts.snapshots,
sources: opts.sources,
}),
timeoutMs: 20000,
});
}
export async function browserTraceStop(
baseUrl: string | undefined,
opts: { targetId?: string; path?: string; profile?: string } = {},
): Promise<BrowserActionPathResult> {
const q = buildProfileQuery(opts.profile);
return await fetchBrowserJson<BrowserActionPathResult>(withBaseUrl(baseUrl, `/trace/stop${q}`), {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ targetId: opts.targetId, path: opts.path }),
timeoutMs: 20000,
});
}
export async function browserHighlight(
baseUrl: string | undefined,
opts: { ref: string; targetId?: string; profile?: string },
): Promise<BrowserActionTargetOk> {
const q = buildProfileQuery(opts.profile);
return await fetchBrowserJson<BrowserActionTargetOk>(withBaseUrl(baseUrl, `/highlight${q}`), {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ targetId: opts.targetId, ref: opts.ref }),
timeoutMs: 20000,
});
}
export async function browserResponseBody(
baseUrl: string | undefined,
opts: {
url: string;
targetId?: string;
timeoutMs?: number;
maxChars?: number;
profile?: string;
},
): Promise<{
ok: true;
targetId: string;
response: {
url: string;
status?: number;
headers?: Record<string, string>;
body: string;
truncated?: boolean;
};
}> {
const q = buildProfileQuery(opts.profile);
return await fetchBrowserJson<{
ok: true;
targetId: string;
response: {
url: string;
status?: number;
headers?: Record<string, string>;
body: string;
truncated?: boolean;
};
}>(withBaseUrl(baseUrl, `/response/body${q}`), {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
targetId: opts.targetId,
url: opts.url,
timeoutMs: opts.timeoutMs,
maxChars: opts.maxChars,
}),
timeoutMs: 20000,
});
}

View File

@@ -1,278 +0,0 @@
import type { BrowserActionOk, BrowserActionTargetOk } from "./client-actions-types.js";
import { buildProfileQuery, withBaseUrl } from "./client-actions-url.js";
import { fetchBrowserJson } from "./client-fetch.js";
type TargetedProfileOptions = {
targetId?: string;
profile?: string;
};
type HttpCredentialsOptions = TargetedProfileOptions & {
username?: string;
password?: string;
clear?: boolean;
};
type GeolocationOptions = TargetedProfileOptions & {
latitude?: number;
longitude?: number;
accuracy?: number;
origin?: string;
clear?: boolean;
};
function buildStateQuery(params: { targetId?: string; key?: string; profile?: string }): string {
const query = new URLSearchParams();
if (params.targetId) {
query.set("targetId", params.targetId);
}
if (params.key) {
query.set("key", params.key);
}
if (params.profile) {
query.set("profile", params.profile);
}
const suffix = query.toString();
return suffix ? `?${suffix}` : "";
}
async function postProfileJson<T>(
baseUrl: string | undefined,
params: { path: string; profile?: string; body: unknown },
): Promise<T> {
const query = buildProfileQuery(params.profile);
return await fetchBrowserJson<T>(withBaseUrl(baseUrl, `${params.path}${query}`), {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(params.body),
timeoutMs: 20000,
});
}
async function postTargetedProfileJson(
baseUrl: string | undefined,
params: {
path: string;
opts: { targetId?: string; profile?: string };
body: Record<string, unknown>;
},
): Promise<BrowserActionTargetOk> {
return await postProfileJson<BrowserActionTargetOk>(baseUrl, {
path: params.path,
profile: params.opts.profile,
body: {
targetId: params.opts.targetId,
...params.body,
},
});
}
export async function browserCookies(
baseUrl: string | undefined,
opts: { targetId?: string; profile?: string } = {},
): Promise<{ ok: true; targetId: string; cookies: unknown[] }> {
const suffix = buildStateQuery({ targetId: opts.targetId, profile: opts.profile });
return await fetchBrowserJson<{
ok: true;
targetId: string;
cookies: unknown[];
}>(withBaseUrl(baseUrl, `/cookies${suffix}`), { timeoutMs: 20000 });
}
export async function browserCookiesSet(
baseUrl: string | undefined,
opts: {
cookie: Record<string, unknown>;
targetId?: string;
profile?: string;
},
): Promise<BrowserActionTargetOk> {
return await postProfileJson<BrowserActionTargetOk>(baseUrl, {
path: "/cookies/set",
profile: opts.profile,
body: { targetId: opts.targetId, cookie: opts.cookie },
});
}
export async function browserCookiesClear(
baseUrl: string | undefined,
opts: { targetId?: string; profile?: string } = {},
): Promise<BrowserActionTargetOk> {
return await postProfileJson<BrowserActionTargetOk>(baseUrl, {
path: "/cookies/clear",
profile: opts.profile,
body: { targetId: opts.targetId },
});
}
export async function browserStorageGet(
baseUrl: string | undefined,
opts: {
kind: "local" | "session";
key?: string;
targetId?: string;
profile?: string;
},
): Promise<{ ok: true; targetId: string; values: Record<string, string> }> {
const suffix = buildStateQuery({ targetId: opts.targetId, key: opts.key, profile: opts.profile });
return await fetchBrowserJson<{
ok: true;
targetId: string;
values: Record<string, string>;
}>(withBaseUrl(baseUrl, `/storage/${opts.kind}${suffix}`), { timeoutMs: 20000 });
}
export async function browserStorageSet(
baseUrl: string | undefined,
opts: {
kind: "local" | "session";
key: string;
value: string;
targetId?: string;
profile?: string;
},
): Promise<BrowserActionTargetOk> {
return await postProfileJson<BrowserActionTargetOk>(baseUrl, {
path: `/storage/${opts.kind}/set`,
profile: opts.profile,
body: {
targetId: opts.targetId,
key: opts.key,
value: opts.value,
},
});
}
export async function browserStorageClear(
baseUrl: string | undefined,
opts: { kind: "local" | "session"; targetId?: string; profile?: string },
): Promise<BrowserActionTargetOk> {
return await postProfileJson<BrowserActionTargetOk>(baseUrl, {
path: `/storage/${opts.kind}/clear`,
profile: opts.profile,
body: { targetId: opts.targetId },
});
}
export async function browserSetOffline(
baseUrl: string | undefined,
opts: { offline: boolean; targetId?: string; profile?: string },
): Promise<BrowserActionTargetOk> {
return await postProfileJson<BrowserActionTargetOk>(baseUrl, {
path: "/set/offline",
profile: opts.profile,
body: { targetId: opts.targetId, offline: opts.offline },
});
}
export async function browserSetHeaders(
baseUrl: string | undefined,
opts: {
headers: Record<string, string>;
targetId?: string;
profile?: string;
},
): Promise<BrowserActionTargetOk> {
return await postProfileJson<BrowserActionTargetOk>(baseUrl, {
path: "/set/headers",
profile: opts.profile,
body: { targetId: opts.targetId, headers: opts.headers },
});
}
export async function browserSetHttpCredentials(
baseUrl: string | undefined,
opts: HttpCredentialsOptions = {},
): Promise<BrowserActionTargetOk> {
return await postTargetedProfileJson(baseUrl, {
path: "/set/credentials",
opts,
body: {
username: opts.username,
password: opts.password,
clear: opts.clear,
},
});
}
export async function browserSetGeolocation(
baseUrl: string | undefined,
opts: GeolocationOptions = {},
): Promise<BrowserActionTargetOk> {
return await postTargetedProfileJson(baseUrl, {
path: "/set/geolocation",
opts,
body: {
latitude: opts.latitude,
longitude: opts.longitude,
accuracy: opts.accuracy,
origin: opts.origin,
clear: opts.clear,
},
});
}
export async function browserSetMedia(
baseUrl: string | undefined,
opts: {
colorScheme: "dark" | "light" | "no-preference" | "none";
targetId?: string;
profile?: string;
},
): Promise<BrowserActionTargetOk> {
return await postProfileJson<BrowserActionTargetOk>(baseUrl, {
path: "/set/media",
profile: opts.profile,
body: {
targetId: opts.targetId,
colorScheme: opts.colorScheme,
},
});
}
export async function browserSetTimezone(
baseUrl: string | undefined,
opts: { timezoneId: string; targetId?: string; profile?: string },
): Promise<BrowserActionTargetOk> {
return await postProfileJson<BrowserActionTargetOk>(baseUrl, {
path: "/set/timezone",
profile: opts.profile,
body: {
targetId: opts.targetId,
timezoneId: opts.timezoneId,
},
});
}
export async function browserSetLocale(
baseUrl: string | undefined,
opts: { locale: string; targetId?: string; profile?: string },
): Promise<BrowserActionTargetOk> {
return await postProfileJson<BrowserActionTargetOk>(baseUrl, {
path: "/set/locale",
profile: opts.profile,
body: { targetId: opts.targetId, locale: opts.locale },
});
}
export async function browserSetDevice(
baseUrl: string | undefined,
opts: { name: string; targetId?: string; profile?: string },
): Promise<BrowserActionTargetOk> {
return await postProfileJson<BrowserActionTargetOk>(baseUrl, {
path: "/set/device",
profile: opts.profile,
body: { targetId: opts.targetId, name: opts.name },
});
}
export async function browserClearPermissions(
baseUrl: string | undefined,
opts: { targetId?: string; profile?: string } = {},
): Promise<BrowserActionOk> {
return await postProfileJson<BrowserActionOk>(baseUrl, {
path: "/set/geolocation",
profile: opts.profile,
body: { targetId: opts.targetId, clear: true },
});
}

View File

@@ -15,5 +15,3 @@ export type BrowserActionPathResult = {
labelsCount?: number;
labelsSkipped?: number;
};
export type BrowserActionTargetOk = { ok: true; targetId: string };

View File

@@ -1,4 +1,3 @@
export * from "./client-actions-core.js";
export * from "./client-actions-observe.js";
export * from "./client-actions-state.js";
export * from "./client-actions-types.js";

View File

@@ -23,7 +23,6 @@ type HarnessState = {
attachOnly?: boolean;
}
>;
createTargetId: string | null;
prevGatewayPort: string | undefined;
prevGatewayToken: string | undefined;
prevGatewayPassword: string | undefined;
@@ -37,7 +36,6 @@ const state: HarnessState = {
cfgEvaluateEnabled: true,
cfgDefaultProfile: "openclaw",
cfgProfiles: {},
createTargetId: null,
prevGatewayPort: undefined,
prevGatewayToken: undefined,
prevGatewayPassword: undefined,
@@ -59,14 +57,6 @@ export function restoreGatewayPortEnv(prevGatewayPort: string | undefined): void
process.env.OPENCLAW_GATEWAY_PORT = prevGatewayPort;
}
export function setBrowserControlServerCreateTargetId(targetId: string | null): void {
state.createTargetId = targetId;
}
export function setBrowserControlServerAttachOnly(attachOnly: boolean): void {
state.cfgAttachOnly = attachOnly;
}
export function setBrowserControlServerEvaluateEnabled(enabled: boolean): void {
state.cfgEvaluateEnabled = enabled;
}
@@ -360,10 +350,6 @@ const chromeMcpMocks = vi.hoisted(() => ({
uploadChromeMcpFile: vi.fn(async () => {}),
}));
export function getChromeMcpMocks(): Record<string, MockFn> {
return chromeMcpMocks as unknown as Record<string, MockFn>;
}
const chromeUserDataDir = vi.hoisted(() => ({ dir: "/tmp/openclaw" }));
installChromeUserDataDirHooks(chromeUserDataDir);
@@ -435,10 +421,6 @@ vi.mock("../config/config.js", async () => {
const launchCalls = vi.hoisted(() => [] as Array<{ port: number }>);
export function getLaunchCalls() {
return launchCalls;
}
vi.mock("./chrome.js", () => ({
isChromeCdpReady: vi.fn(async () => state.reachable),
isChromeReachable: vi.fn(async () => state.reachable),
@@ -535,7 +517,6 @@ export async function resetBrowserControlServerTestContext(): Promise<void> {
state.cfgEvaluateEnabled = true;
state.cfgDefaultProfile = "openclaw";
state.cfgProfiles = defaultProfilesForState(state.testPort);
state.createTargetId = null;
mockClearAll(pwMocks);
mockClearAll(cdpMocks);
@@ -583,9 +564,6 @@ export function installBrowserControlServerHooks() {
beforeEach(async () => {
vi.useRealTimers();
cdpMocks.createTargetViaCdp.mockImplementation(async () => {
if (state.createTargetId) {
return { targetId: state.createTargetId };
}
throw new Error("cdp disabled");
});

View File

@@ -15,8 +15,6 @@ const BYTEPLUS_CODING_MANIFEST_PROVIDER = buildManifestModelProviderConfig({
export const BYTEPLUS_BASE_URL = BYTEPLUS_MANIFEST_PROVIDER.baseUrl;
export const BYTEPLUS_CODING_BASE_URL = BYTEPLUS_CODING_MANIFEST_PROVIDER.baseUrl;
export const BYTEPLUS_DEFAULT_MODEL_ID = "seed-1-8-251228";
export const BYTEPLUS_CODING_DEFAULT_MODEL_ID = "ark-code-latest";
export const BYTEPLUS_DEFAULT_MODEL_REF = `byteplus/${BYTEPLUS_DEFAULT_MODEL_ID}`;
export const BYTEPLUS_DEFAULT_COST = {
input: 0.0001,

View File

@@ -1,7 +1,7 @@
import type { ProviderPlugin } from "openclaw/plugin-sdk/provider-model-shared";
import { buildBytePlusCodingProvider, buildBytePlusProvider } from "./provider-catalog.js";
export const bytePlusProviderDiscovery: ProviderPlugin[] = [
const bytePlusProviderDiscovery: ProviderPlugin[] = [
{
id: "byteplus",
label: "BytePlus",

View File

@@ -21,10 +21,6 @@ const cerebrasPresetAppliers = createModelCatalogPresetAppliers({
}),
});
export function applyCerebrasProviderConfig(cfg: OpenClawConfig): OpenClawConfig {
return cerebrasPresetAppliers.applyProviderConfig(cfg);
}
export function applyCerebrasConfig(cfg: OpenClawConfig): OpenClawConfig {
return cerebrasPresetAppliers.applyConfig(cfg);
}

View File

@@ -8,7 +8,7 @@ import {
resolveCloudflareAiGatewayBaseUrl,
} from "./models.js";
export type CloudflareAiGatewayCredential =
type CloudflareAiGatewayCredential =
| {
type?: string;
keyRef?: unknown;
@@ -20,9 +20,7 @@ export type CloudflareAiGatewayCredential =
}
| undefined;
export function resolveCloudflareAiGatewayApiKey(
cred: CloudflareAiGatewayCredential,
): string | undefined {
function resolveCloudflareAiGatewayApiKey(cred: CloudflareAiGatewayCredential): string | undefined {
if (!cred || cred.type !== "api_key") {
return undefined;
}
@@ -35,7 +33,7 @@ export function resolveCloudflareAiGatewayApiKey(
return normalizeOptionalString(cred.key);
}
export function resolveCloudflareAiGatewayMetadata(cred: CloudflareAiGatewayCredential): {
function resolveCloudflareAiGatewayMetadata(cred: CloudflareAiGatewayCredential): {
accountId?: string;
gatewayId?: string;
} {

View File

@@ -1,18 +1,11 @@
import {
GPT5_BEHAVIOR_CONTRACT,
GPT5_FRIENDLY_PROMPT_OVERLAY,
isGpt5ModelId,
renderGpt5PromptOverlay,
resolveGpt5SystemPromptContribution,
} from "openclaw/plugin-sdk/provider-model-shared";
export const CODEX_FRIENDLY_PROMPT_OVERLAY = GPT5_FRIENDLY_PROMPT_OVERLAY;
export const CODEX_GPT5_BEHAVIOR_CONTRACT = GPT5_BEHAVIOR_CONTRACT;
export function shouldApplyCodexPromptOverlay(params: { modelId?: string }): boolean {
return isGpt5ModelId(params.modelId);
}
export function resolveCodexSystemPromptContribution(
params: Parameters<typeof resolveGpt5SystemPromptContribution>[0],
) {

View File

@@ -2,14 +2,11 @@ import type {
ClientRequest as GeneratedClientRequest,
InitializeParams as GeneratedInitializeParams,
InitializeResponse as GeneratedInitializeResponse,
ServerNotification as GeneratedServerNotification,
ServerRequest as GeneratedServerRequest,
ServiceTier as GeneratedServiceTier,
v2,
} from "./protocol-generated/typescript/index.js";
import type { JsonValue as GeneratedJsonValue } from "./protocol-generated/typescript/serde_json/JsonValue.js";
export type JsonPrimitive = null | boolean | number | string;
export type JsonValue = GeneratedJsonValue;
export type JsonObject = { [key: string]: JsonValue };
export type CodexServiceTier = GeneratedServiceTier;
@@ -65,29 +62,17 @@ export type CodexTurnStartParams = v2.TurnStartParams;
export type CodexSandboxPolicy = v2.SandboxPolicy;
export type CodexTurnSteerParams = v2.TurnSteerParams;
export type CodexTurnInterruptParams = {
threadId: string;
turnId: string;
};
export type CodexTurnStartResponse = v2.TurnStartResponse;
export type CodexThread = v2.Thread;
export type CodexTurn = v2.Turn;
export type CodexThreadItem = v2.ThreadItem;
export type CodexKnownServerNotification = GeneratedServerNotification;
export type CodexServerNotification = {
method: string;
params?: JsonValue;
};
export type CodexKnownServerRequest = GeneratedServerRequest;
export type CodexDynamicToolCallParams = v2.DynamicToolCallParams;
export type CodexDynamicToolCallResponse = v2.DynamicToolCallResponse;
@@ -123,10 +108,3 @@ export function isJsonObject(value: JsonValue | undefined): value is JsonObject
export function isRpcResponse(message: RpcMessage): message is RpcResponse {
return "id" in message && !("method" in message);
}
export function coerceJsonObject(value: unknown): JsonObject | undefined {
if (!value || typeof value !== "object" || Array.isArray(value)) {
return undefined;
}
return value as JsonObject;
}

View File

@@ -9,6 +9,7 @@ import {
} from "openclaw/plugin-sdk/agent-harness";
import {
buildAgentRuntimePlan,
embeddedAgentLog,
nativeHookRelayTesting,
onAgentEvent,
resetAgentEventsForTest,
@@ -303,6 +304,20 @@ function createMessageDynamicTool(
};
}
function createNamedDynamicTool(
name: string,
): Parameters<typeof startOrResumeThread>[0]["dynamicTools"][number] {
return {
name,
description: `${name} test tool`,
inputSchema: {
type: "object",
properties: {},
additionalProperties: false,
},
};
}
function extractRelayIdFromThreadRequest(params: unknown): string {
const command = (
params as {
@@ -371,26 +386,40 @@ describe("runCodexAppServerAttempt", () => {
it("starts Codex threads without duplicate OpenClaw workspace tools by default", async () => {
const sessionFile = path.join(tempDir, "session.jsonl");
const workspaceDir = path.join(tempDir, "workspace");
const harness = createStartedThreadHarness();
const params = {
...createParams(sessionFile, workspaceDir),
disableTools: false,
provider: "openai",
modelId: "gpt-5.5",
model: createCodexTestModel("openai"),
agentDir: tempDir,
senderIsOwner: true,
} as EmbeddedRunAttemptParams;
const appServer = createThreadLifecycleAppServerOptions();
const request = vi.fn(async (method: string, _params: unknown) => {
if (method === "thread/start") {
return threadStartResult();
}
throw new Error(`unexpected method: ${method}`);
});
const dynamicTools = __testing.applyCodexDynamicToolProfile(
[
"read",
"write",
"edit",
"apply_patch",
"exec",
"process",
"update_plan",
"web_search",
"message",
].map(createNamedDynamicTool),
{},
);
const run = runCodexAppServerAttempt(params);
await harness.waitForMethod("turn/start");
await harness.completeTurn({ threadId: "thread-1", turnId: "turn-1" });
await run;
await startOrResumeThread({
client: { request } as never,
params: createParams(sessionFile, workspaceDir),
cwd: workspaceDir,
dynamicTools,
appServer,
});
const startRequest = harness.requests.find((request) => request.method === "thread/start");
const startRequest = request.mock.calls.find(([method]) => method === "thread/start");
const dynamicToolNames = (
(startRequest?.params as { dynamicTools?: Array<{ name: string }> } | undefined)
?.dynamicTools ?? []
(startRequest?.[1] as { dynamicTools?: Array<{ name: string }> } | undefined)?.dynamicTools ??
[]
).map((tool) => tool.name);
expect(dynamicToolNames).toContain("message");
@@ -437,13 +466,61 @@ describe("runCodexAppServerAttempt", () => {
await expect(response).resolves.toEqual({
success: false,
contentItems: [
{ type: "inputText", text: "OpenClaw dynamic tool call timed out after 1ms." },
{
type: "inputText",
text: "OpenClaw dynamic tool call timed out after 1ms while running tool message.",
},
],
});
expect(capturedSignal?.aborted).toBe(true);
expect(onTimeout).toHaveBeenCalledTimes(1);
});
it("logs process poll timeout context separately from session idle", async () => {
vi.useFakeTimers();
const warn = vi.spyOn(embeddedAgentLog, "warn").mockImplementation(() => undefined);
const response = __testing.handleDynamicToolCallWithTimeout({
call: {
threadId: "thread-1",
turnId: "turn-1",
callId: "call-timeout",
namespace: null,
tool: "process",
arguments: { action: "poll", sessionId: "rapid-crustacean", timeout: 30_000 },
},
toolBridge: {
handleToolCall: vi.fn(() => new Promise<never>(() => undefined)),
},
signal: new AbortController().signal,
timeoutMs: 1,
});
await vi.advanceTimersByTimeAsync(1);
await expect(response).resolves.toEqual({
success: false,
contentItems: [
{
type: "inputText",
text: "OpenClaw dynamic tool call timed out after 1ms while waiting for process action=poll sessionId=rapid-crustacean. This is a tool RPC timeout, not a session idle timeout.",
},
],
});
expect(warn).toHaveBeenCalledWith("codex dynamic tool call timed out", {
tool: "process",
toolCallId: "call-timeout",
threadId: "thread-1",
turnId: "turn-1",
timeoutMs: 1,
timeoutKind: "codex_dynamic_tool_rpc",
processAction: "poll",
processSessionId: "rapid-crustacean",
processRequestedTimeoutMs: 30_000,
consoleMessage:
"codex process tool timeout: action=poll sessionId=rapid-crustacean toolTimeoutMs=1 requestedWaitMs=30000; per-tool-call watchdog, not session idle; repeated lines usually mean process-poll retry churn, not model progress",
});
});
it("releases the session when Codex never completes after a dynamic tool response", async () => {
let handleRequest:
| ((request: { id: string; method: string; params?: unknown }) => Promise<unknown>)

View File

@@ -102,6 +102,7 @@ const CODEX_NATIVE_FIRST_DYNAMIC_TOOL_EXCLUDES = [
"process",
"update_plan",
] as const;
const LOG_FIELD_MAX_LENGTH = 160;
type OpenClawCodingToolsOptions = NonNullable<
Parameters<(typeof import("openclaw/plugin-sdk/agent-harness"))["createOpenClawCodingTools"]>[0]
@@ -144,6 +145,93 @@ type CodexSteeringQueueOptions = {
debounceMs?: number;
};
type DynamicToolTimeoutDetails = {
responseMessage: string;
consoleMessage: string;
meta: Record<string, unknown>;
};
function normalizeLogField(value: unknown): string | undefined {
if (typeof value !== "string") {
return undefined;
}
const normalized = value
.replaceAll(String.fromCharCode(27), " ")
.replaceAll("\r", " ")
.replaceAll("\n", " ")
.replaceAll("\t", " ")
.trim();
if (!normalized) {
return undefined;
}
return normalized.length > LOG_FIELD_MAX_LENGTH
? `${normalized.slice(0, LOG_FIELD_MAX_LENGTH - 3)}...`
: normalized;
}
function readNumericTimeoutMs(value: unknown): number | undefined {
if (typeof value === "number" && Number.isFinite(value)) {
return Math.max(0, Math.floor(value));
}
if (typeof value === "string") {
const parsed = Number.parseInt(value.trim(), 10);
if (Number.isFinite(parsed)) {
return Math.max(0, Math.floor(parsed));
}
}
return undefined;
}
function formatDynamicToolTimeoutDetails(params: {
call: CodexDynamicToolCallParams;
timeoutMs: number;
}): DynamicToolTimeoutDetails {
const tool = normalizeLogField(params.call.tool) ?? "unknown";
const baseMeta: Record<string, unknown> = {
tool: params.call.tool,
toolCallId: params.call.callId,
threadId: params.call.threadId,
turnId: params.call.turnId,
timeoutMs: params.timeoutMs,
timeoutKind: "codex_dynamic_tool_rpc",
};
if (tool !== "process" || !isJsonObject(params.call.arguments)) {
return {
responseMessage: `OpenClaw dynamic tool call timed out after ${params.timeoutMs}ms while running tool ${tool}.`,
consoleMessage: `codex dynamic tool timeout: tool=${tool} toolTimeoutMs=${params.timeoutMs}; per-tool-call watchdog, not session idle`,
meta: baseMeta,
};
}
const action = normalizeLogField(params.call.arguments.action);
const sessionId = normalizeLogField(params.call.arguments.sessionId);
const requestedTimeoutMs = readNumericTimeoutMs(params.call.arguments.timeout);
const actionPart = action ? ` action=${action}` : "";
const sessionPart = sessionId ? ` sessionId=${sessionId}` : "";
const requestedPart =
requestedTimeoutMs === undefined ? "" : ` requestedWaitMs=${requestedTimeoutMs}`;
const retryHint =
action === "poll"
? "; repeated lines usually mean process-poll retry churn, not model progress"
: "";
const responseTarget =
action || sessionId
? ` while waiting for process${actionPart}${sessionPart}`
: " while waiting for the process tool";
return {
responseMessage: `OpenClaw dynamic tool call timed out after ${params.timeoutMs}ms${responseTarget}. This is a tool RPC timeout, not a session idle timeout.`,
consoleMessage: `codex process tool timeout:${actionPart}${sessionPart} toolTimeoutMs=${params.timeoutMs}${requestedPart}; per-tool-call watchdog, not session idle${retryHint}`,
meta: {
...baseMeta,
processAction: action,
processSessionId: sessionId,
processRequestedTimeoutMs: requestedTimeoutMs,
},
};
}
function createCodexSteeringQueue(params: {
client: CodexAppServerClient;
threadId: string;
@@ -1127,17 +1215,14 @@ async function handleDynamicToolCallWithTimeout(params: {
const timeoutMs = Math.max(1, Math.min(CODEX_DYNAMIC_TOOL_TIMEOUT_MS, params.timeoutMs));
timeout = setTimeout(() => {
timedOut = true;
const message = `OpenClaw dynamic tool call timed out after ${timeoutMs}ms.`;
controller.abort(new Error(message));
const timeoutDetails = formatDynamicToolTimeoutDetails({ call: params.call, timeoutMs });
controller.abort(new Error(timeoutDetails.responseMessage));
params.onTimeout?.();
embeddedAgentLog.warn("codex dynamic tool call timed out", {
tool: params.call.tool,
toolCallId: params.call.callId,
threadId: params.call.threadId,
turnId: params.call.turnId,
timeoutMs,
...timeoutDetails.meta,
consoleMessage: timeoutDetails.consoleMessage,
});
resolve(failedDynamicToolResponse(message));
resolve(failedDynamicToolResponse(timeoutDetails.responseMessage));
}, timeoutMs);
timeout.unref?.();
});

View File

@@ -352,9 +352,3 @@ function enqueueBoundTurn<T>(key: string, run: () => Promise<T>): Promise<T> {
});
return next;
}
export const __testing = {
resetQueues() {
getGlobalState().queues.clear();
},
};

View File

@@ -253,9 +253,3 @@ function permissionsForMode(mode: PermissionsMode): {
? { approvalPolicy: "never", sandbox: "danger-full-access" }
: { approvalPolicy: "on-request", sandbox: "workspace-write" };
}
export const __testing = {
resetActiveTurns() {
getActiveTurns().clear();
},
};

View File

@@ -4,10 +4,6 @@ import {
type JsonObject,
} from "./app-server/protocol.js";
export type CodexConversationTurnCollector = ReturnType<
typeof createCodexConversationTurnCollector
>;
export function createCodexConversationTurnCollector(threadId: string) {
let turnId: string | undefined;
let completed = false;

View File

@@ -39,11 +39,11 @@ const DEFAULT_TIMEOUT_MS = 5 * 60_000;
export const DEFAULT_COMFY_MODEL = "workflow";
export type ComfyMode = "local" | "cloud";
export type ComfyCapability = "image" | "music" | "video";
export type ComfyOutputKind = "audio" | "gifs" | "images" | "videos";
export type ComfyWorkflow = Record<string, unknown>;
export type ComfyProviderConfig = Record<string, unknown>;
type ComfyMode = "local" | "cloud";
type ComfyCapability = "image" | "music" | "video";
type ComfyOutputKind = "audio" | "gifs" | "images" | "videos";
type ComfyWorkflow = Record<string, unknown>;
type ComfyProviderConfig = Record<string, unknown>;
type ComfyFetchGuardParams = Parameters<typeof fetchWithSsrFGuard>[0];
type ComfyDispatcherPolicy = ComfyFetchGuardParams["dispatcherPolicy"];
type ComfyPromptResponse = {
@@ -84,20 +84,20 @@ type ComfyApiKeyResolution =
status: "configured_unavailable";
};
export type ComfySourceImage = {
type ComfySourceImage = {
buffer: Buffer;
mimeType: string;
fileName?: string;
};
export type ComfyGeneratedAsset = {
type ComfyGeneratedAsset = {
buffer: Buffer;
mimeType: string;
fileName: string;
nodeId: string;
};
export type ComfyWorkflowResult = {
type ComfyWorkflowResult = {
assets: ComfyGeneratedAsset[];
model: string;
promptId: string;
@@ -137,7 +137,7 @@ function stripNestedCapabilityConfig(config: ComfyProviderConfig): ComfyProvider
return next;
}
export function getComfyCapabilityConfig(
function getComfyCapabilityConfig(
config: ComfyProviderConfig,
capability: ComfyCapability,
): ComfyProviderConfig {
@@ -149,7 +149,7 @@ export function getComfyCapabilityConfig(
return { ...shared, ...nested };
}
export function resolveComfyMode(config: ComfyProviderConfig): ComfyMode {
function resolveComfyMode(config: ComfyProviderConfig): ComfyMode {
return normalizeOptionalString(config.mode) === "cloud" ? "cloud" : "local";
}

View File

@@ -10,8 +10,8 @@ export const DEEPINFRA_MODELS_URL = `${DEEPINFRA_BASE_URL}/models?sort_by=opencl
export const DEEPINFRA_DEFAULT_MODEL_ID = "deepseek-ai/DeepSeek-V3.2";
export const DEEPINFRA_DEFAULT_MODEL_REF = `deepinfra/${DEEPINFRA_DEFAULT_MODEL_ID}`;
export const DEEPINFRA_DEFAULT_CONTEXT_WINDOW = 128000;
export const DEEPINFRA_DEFAULT_MAX_TOKENS = 8192;
const DEEPINFRA_DEFAULT_CONTEXT_WINDOW = 128000;
const DEEPINFRA_DEFAULT_MAX_TOKENS = 8192;
export const DEEPINFRA_MODEL_CATALOG: ModelDefinitionConfig[] = [
{

View File

@@ -7,7 +7,7 @@ import { buildDeepSeekModelDefinition, DEEPSEEK_BASE_URL, DEEPSEEK_MODEL_CATALOG
export const DEEPSEEK_DEFAULT_MODEL_REF = "deepseek/deepseek-v4-flash";
export function applyDeepSeekProviderConfig(cfg: OpenClawConfig): OpenClawConfig {
function applyDeepSeekProviderConfig(cfg: OpenClawConfig): OpenClawConfig {
const models = { ...cfg.agents?.defaults?.models };
models[DEEPSEEK_DEFAULT_MODEL_REF] = {
...models[DEEPSEEK_DEFAULT_MODEL_REF],

View File

@@ -1,7 +1,7 @@
import type { ProviderPlugin } from "openclaw/plugin-sdk/provider-model-shared";
import { buildDeepSeekProvider } from "./provider-catalog.js";
export const deepSeekProviderDiscovery: ProviderPlugin = {
const deepSeekProviderDiscovery: ProviderPlugin = {
id: "deepseek",
label: "DeepSeek",
docsPath: "/providers/deepseek",

View File

@@ -15,7 +15,6 @@ import {
type DiffLayout,
type DiffMode,
type DiffOutputFormat,
type DiffPresentationDefaults,
type DiffTheme,
type DiffToolDefaults,
} from "./types.js";
@@ -314,31 +313,6 @@ export function resolveDiffsPluginViewerBaseUrl(config: unknown): string | undef
return normalized ? normalizeViewerBaseUrl(normalized) : undefined;
}
export function toPresentationDefaults(defaults: DiffToolDefaults): DiffPresentationDefaults {
const {
fontFamily,
fontSize,
lineSpacing,
layout,
showLineNumbers,
diffIndicators,
wordWrap,
background,
theme,
} = defaults;
return {
fontFamily,
fontSize,
lineSpacing,
layout,
showLineNumbers,
diffIndicators,
wordWrap,
background,
theme,
};
}
function normalizeFontFamily(fontFamily?: string): string {
const normalized = fontFamily?.trim();
return normalized || DEFAULT_DIFFS_TOOL_DEFAULTS.fontFamily;

View File

@@ -1,4 +1,3 @@
import type { AgentToolResult } from "@mariozechner/pi-agent-core";
import { resolveDefaultDiscordAccountId } from "../accounts.js";
import { createDiscordRuntimeAccountContext } from "../client.js";
import {
@@ -33,10 +32,6 @@ export type DiscordMessagingActionContext = {
normalizeMessage: (message: unknown) => unknown;
};
export type DiscordMessagingActionHandler = (
ctx: DiscordMessagingActionContext,
) => Promise<AgentToolResult<unknown> | undefined>;
export function createDiscordMessagingActionContext(params: {
action: string;
input: Record<string, unknown>;

View File

@@ -32,14 +32,6 @@ export function extractDiscordChannelId(sessionKey?: string | null): string | nu
return match ? match[1] : null;
}
export function extractDiscordThreadId(sessionKey?: string | null): string | null {
if (!sessionKey) {
return null;
}
const match = sessionKey.match(/discord:(?:channel|group):\d+:thread:(\d+)/);
return match ? match[1] : null;
}
function extractDiscordSessionKind(sessionKey?: string | null): "channel" | "group" | "dm" | null {
if (!sessionKey) {
return null;
@@ -220,16 +212,8 @@ export function createDiscordNativeApprovalAdapter(
}
let cachedDiscordApprovalCapability: ReturnType<typeof createDiscordApprovalCapability> | undefined;
let cachedDiscordNativeApprovalAdapter:
| ReturnType<typeof createDiscordNativeApprovalAdapter>
| undefined;
export function getDiscordApprovalCapability() {
cachedDiscordApprovalCapability ??= createDiscordApprovalCapability();
return cachedDiscordApprovalCapability;
}
export function getDiscordNativeApprovalAdapter() {
cachedDiscordNativeApprovalAdapter ??= createDiscordNativeApprovalAdapter();
return cachedDiscordNativeApprovalAdapter;
}

View File

@@ -109,8 +109,6 @@ export type DiscordModalFieldSpec = {
style?: "short" | "paragraph";
};
export type DiscordComponentModalFieldSpec = DiscordModalFieldSpec;
export type DiscordModalSpec = {
title: string;
callbackData?: string;
@@ -165,8 +163,6 @@ export type DiscordModalFieldDefinition = {
style?: "short" | "paragraph";
};
export type DiscordComponentModalFieldDefinition = DiscordModalFieldDefinition;
export type DiscordModalEntry = {
id: string;
title: string;
@@ -182,8 +178,6 @@ export type DiscordModalEntry = {
allowedUsers?: string[];
};
export type DiscordComponentModalEntry = DiscordModalEntry;
export type DiscordComponentBuildResult = {
components: TopLevelComponents[];
entries: DiscordComponentEntry[];

View File

@@ -147,6 +147,29 @@ describe("discord config schema", () => {
expect(cfg.voice?.model).toBe("openai/gpt-5.4-mini");
});
it("accepts Discord voice timing overrides", () => {
const cfg = expectValidDiscordConfig({
voice: {
connectTimeoutMs: 45_000,
reconnectGraceMs: 20_000,
},
});
expect(cfg.voice?.connectTimeoutMs).toBe(45_000);
expect(cfg.voice?.reconnectGraceMs).toBe(20_000);
});
it("rejects invalid Discord voice timing overrides", () => {
for (const voice of [
{ connectTimeoutMs: 0 },
{ connectTimeoutMs: 120_001 },
{ reconnectGraceMs: -1 },
{ reconnectGraceMs: 1.5 },
]) {
expectInvalidDiscordConfig({ voice });
}
});
it("coerces safe-integer numeric allowlist entries to strings", () => {
const cfg = expectValidDiscordConfig({
allowFrom: [123],

View File

@@ -161,6 +161,14 @@ export const discordChannelConfigUiHints = {
label: "Discord Voice Decrypt Failure Tolerance",
help: "Consecutive decrypt failures before DAVE attempts session recovery (passed to @discordjs/voice; default: 24).",
},
"voice.connectTimeoutMs": {
label: "Discord Voice Connect Timeout (ms)",
help: "Initial @discordjs/voice Ready wait before a join is treated as failed. Default: 30000.",
},
"voice.reconnectGraceMs": {
label: "Discord Voice Reconnect Grace (ms)",
help: "Grace period for a disconnected Discord voice session to enter Signalling or Connecting before OpenClaw destroys it. Default: 15000.",
},
"voice.tts": {
label: "Discord Voice Text-to-Speech",
help: "Optional TTS overrides for Discord voice playback (merged with messages.tts).",

View File

@@ -2,7 +2,6 @@ export { resolveInteractionContextWithDmAuth } from "./agent-components-dm-auth.
export {
ensureAgentComponentInteractionAllowed,
ensureComponentUserAllowed,
ensureGuildComponentMemberAllowed,
resolveAuthorizedComponentInteraction,
resolveComponentCommandAuthorized,
} from "./agent-components-guild-auth.js";

View File

@@ -32,7 +32,7 @@ function resolveComponentRuntimeGroupPolicy(ctx: AgentComponentContext) {
}).groupPolicy;
}
export async function ensureGuildComponentMemberAllowed(params: {
async function ensureGuildComponentMemberAllowed(params: {
interaction: AgentComponentInteraction;
guildInfo: ReturnType<typeof resolveDiscordGuildEntry>;
channelId: string;

View File

@@ -1,18 +1,6 @@
export const AGENT_BUTTON_KEY = "agent";
export const AGENT_SELECT_KEY = "agentsel";
/**
* The component custom id only carries the logical button id. Channel binding
* comes from Discord's trusted interaction payload.
*/
export function buildAgentButtonCustomId(componentId: string): string {
return `${AGENT_BUTTON_KEY}:componentId=${encodeURIComponent(componentId)}`;
}
export function buildAgentSelectCustomId(componentId: string): string {
return `${AGENT_SELECT_KEY}:componentId=${encodeURIComponent(componentId)}`;
}
export {
ackComponentInteraction,
resolveAgentComponentRoute,
@@ -22,7 +10,6 @@ export {
export {
ensureAgentComponentInteractionAllowed,
ensureComponentUserAllowed,
ensureGuildComponentMemberAllowed,
resolveAuthorizedComponentInteraction,
resolveComponentCommandAuthorized,
resolveInteractionContextWithDmAuth,
@@ -43,7 +30,5 @@ export type {
AgentComponentMessageInteraction,
ComponentInteractionContext,
DiscordChannelContext,
DiscordUser,
} from "./agent-components.types.js";
export { resolveDiscordGuildEntry } from "./allow-list.js";
export { resolvePinnedMainDmOwnerFromAllowlist } from "./agent-components-helpers.runtime.js";

View File

@@ -79,11 +79,3 @@ export function resolveDiscordSenderIdentity(params: {
isPluralKit: false,
};
}
export function resolveDiscordSenderLabel(params: {
author: User;
member?: DiscordMemberLike | null;
pluralkitInfo?: PluralKitMessageInfo | null;
}): string {
return resolveDiscordSenderIdentity(params).label;
}

View File

@@ -11,7 +11,6 @@ import {
import {
createAccountScopedAllowFromSection,
createAccountScopedGroupAccessSection,
createAllowlistSetupWizardProxy,
createLegacyCompatChannelDmPolicy,
parseMentionOrPrefixedId,
patchChannelConfigForAccount,
@@ -179,11 +178,3 @@ export function createDiscordSetupWizardBase(handlers: {
disable: (cfg: OpenClawConfig) => setSetupChannelEnabled(cfg, channel, false),
} satisfies ChannelSetupWizard;
}
export function createDiscordSetupWizardProxy(loadWizard: () => Promise<ChannelSetupWizard>) {
return createAllowlistSetupWizardProxy({
loadWizard,
createBase: createDiscordSetupWizardBase,
fallbackResolvedGroupAllowlist: (entries) =>
entries.map((input) => ({ input, resolved: false })),
});
}

View File

@@ -62,7 +62,8 @@ describe("authorizeDiscordVoiceIngress", () => {
},
});
expect(access).toEqual({ ok: true });
expect(access).toMatchObject({ ok: true });
expect(access.ok && access.channelConfig?.users).toEqual(["discord:u-owner"]);
});
it("allows slug-keyed guild configs when manager context only has guild name", async () => {
@@ -91,7 +92,7 @@ describe("authorizeDiscordVoiceIngress", () => {
},
});
expect(access).toEqual({ ok: true });
expect(access).toMatchObject({ ok: true });
});
it("allows wildcard guild configs when only the guild id is available", async () => {
@@ -119,7 +120,7 @@ describe("authorizeDiscordVoiceIngress", () => {
},
});
expect(access).toEqual({ ok: true });
expect(access).toMatchObject({ ok: true });
});
it("blocks commands when channel id is unavailable for an allowlisted channel", async () => {
@@ -211,6 +212,6 @@ describe("authorizeDiscordVoiceIngress", () => {
},
});
expect(access).toEqual({ ok: true });
expect(access).toMatchObject({ ok: true });
});
});

View File

@@ -6,6 +6,7 @@ import type { Guild } from "../internal/discord.js";
import {
isDiscordGroupAllowedByPolicy,
resolveDiscordChannelConfigWithFallback,
type DiscordChannelConfigResolved,
resolveDiscordGuildEntry,
resolveDiscordMemberAccessState,
resolveDiscordOwnerAccess,
@@ -30,7 +31,9 @@ export async function authorizeDiscordVoiceIngress(params: {
memberRoleIds: string[];
ownerAllowFrom?: string[];
sender: { id: string; name?: string; tag?: string };
}): Promise<{ ok: true } | { ok: false; message: string }> {
}): Promise<
{ ok: true; channelConfig?: DiscordChannelConfigResolved | null } | { ok: false; message: string }
> {
const groupPolicy =
params.groupPolicy ??
resolveOpenProviderRuntimeGroupPolicy({
@@ -116,6 +119,6 @@ export async function authorizeDiscordVoiceIngress(params: {
authorizers,
modeWhenAccessGroupsOff: "configured",
})
? { ok: true }
? { ok: true, channelConfig }
: { ok: false, message: "You are not authorized to use this command." };
}

View File

@@ -347,14 +347,63 @@ describe("DiscordVoiceManager", () => {
);
});
it("keeps the shorter timeout for initial voice connection readiness", async () => {
it("uses the default timeout for initial voice connection readiness", async () => {
const connection = createConnectionMock();
joinVoiceChannelMock.mockReturnValueOnce(connection);
const manager = createManager();
await manager.join({ guildId: "g1", channelId: "1001" });
expect(entersStateMock).toHaveBeenCalledWith(connection, "ready", 15_000);
expect(entersStateMock).toHaveBeenCalledWith(connection, "ready", 30_000);
});
it("uses configured voice connection and reconnect timeouts", async () => {
const connection = createConnectionMock();
joinVoiceChannelMock.mockReturnValueOnce(connection);
const manager = createManager({
voice: {
connectTimeoutMs: 45_000,
reconnectGraceMs: 20_000,
},
});
await manager.join({ guildId: "g1", channelId: "1001" });
expect(entersStateMock).toHaveBeenCalledWith(connection, "ready", 45_000);
entersStateMock.mockClear();
entersStateMock.mockRejectedValueOnce(new Error("still disconnected"));
entersStateMock.mockRejectedValueOnce(new Error("still disconnected"));
const disconnected = connection.handlers.get("disconnected");
expect(disconnected).toBeTypeOf("function");
await disconnected?.();
expect(entersStateMock).toHaveBeenCalledWith(connection, "signalling", 20_000);
expect(entersStateMock).toHaveBeenCalledWith(connection, "connecting", 20_000);
expect(connection.destroy).toHaveBeenCalledTimes(1);
expect(manager.status()).toEqual([]);
});
it("uses the default reconnect grace before destroying disconnected sessions", async () => {
const connection = createConnectionMock();
joinVoiceChannelMock.mockReturnValueOnce(connection);
const manager = createManager();
await manager.join({ guildId: "g1", channelId: "1001" });
entersStateMock.mockClear();
entersStateMock.mockRejectedValueOnce(new Error("still disconnected"));
entersStateMock.mockRejectedValueOnce(new Error("still disconnected"));
const disconnected = connection.handlers.get("disconnected");
expect(disconnected).toBeTypeOf("function");
await disconnected?.();
expect(entersStateMock).toHaveBeenCalledWith(connection, "signalling", 15_000);
expect(entersStateMock).toHaveBeenCalledWith(connection, "connecting", 15_000);
expect(connection.destroy).toHaveBeenCalledTimes(1);
expect(manager.status()).toEqual([]);
});
it("stores guild metadata on joined voice sessions", async () => {
@@ -539,6 +588,79 @@ describe("DiscordVoiceManager", () => {
expect(commandArgs?.model).toBe("openai/gpt-5.4-mini");
});
it("runs voice replies under Discord voice output policy", async () => {
agentCommandMock.mockResolvedValueOnce({
payloads: [{ text: "hello back" }],
} as never);
const client = createClient();
client.fetchMember.mockResolvedValue({
nickname: "Guest Nick",
user: {
id: "u-guest",
username: "guest",
globalName: "Guest",
discriminator: "4321",
},
});
const manager = createManager({ groupPolicy: "open" }, client, {
commands: { useAccessGroups: false },
});
await processVoiceSegment(manager, "u-guest");
const commandArgs = agentCommandMock.mock.calls.at(-1)?.[0] as
| { message?: string; messageChannel?: string; messageProvider?: string }
| undefined;
expect(commandArgs?.messageChannel).toBe("discord");
expect(commandArgs?.messageProvider).toBe("discord-voice");
expect(commandArgs?.message).toContain("Do not call the tts tool");
expect(textToSpeechMock).toHaveBeenCalledWith(
expect.objectContaining({
channel: "discord",
text: "hello back",
}),
);
});
it("passes per-channel system prompt overrides to voice agent runs", async () => {
const client = createClient();
client.fetchMember.mockResolvedValue({
nickname: "Guest Nick",
user: {
id: "u-guest",
username: "guest",
globalName: "Guest",
discriminator: "4321",
},
});
const manager = createManager(
{
groupPolicy: "open",
guilds: {
g1: {
channels: {
"1001": {
systemPrompt: " Use short voice replies. ",
},
},
},
},
},
client,
{
commands: { useAccessGroups: false },
},
);
await processVoiceSegment(manager, "u-guest");
const commandArgs = agentCommandMock.mock.calls.at(-1)?.[0] as
| { extraSystemPrompt?: string }
| undefined;
expect(commandArgs?.extraSystemPrompt).toBe("Use short voice replies.");
});
it("reuses speaker context cache for repeated segments from the same speaker", async () => {
const client = createClient();
client.fetchMember.mockResolvedValue({

View File

@@ -35,8 +35,10 @@ import {
CAPTURE_FINALIZE_GRACE_MS,
isVoiceChannel,
logVoiceVerbose,
resolveVoiceTimeoutMs,
MIN_SEGMENT_SECONDS,
VOICE_CONNECT_READY_TIMEOUT_MS,
VOICE_RECONNECT_GRACE_MS,
type VoiceOperationResult,
type VoiceSessionEntry,
} from "./session.js";
@@ -172,13 +174,22 @@ export class DiscordVoiceManager {
return { ok: false, message: "Discord voice plugin is not available." };
}
const voiceConfig = this.params.discordConfig.voice;
const adapterCreator = voicePlugin.getGatewayAdapterCreator(guildId);
const daveEncryption = this.params.discordConfig.voice?.daveEncryption;
const decryptionFailureTolerance = this.params.discordConfig.voice?.decryptionFailureTolerance;
const daveEncryption = voiceConfig?.daveEncryption;
const decryptionFailureTolerance = voiceConfig?.decryptionFailureTolerance;
const connectReadyTimeoutMs = resolveVoiceTimeoutMs(
voiceConfig?.connectTimeoutMs,
VOICE_CONNECT_READY_TIMEOUT_MS,
);
const reconnectGraceMs = resolveVoiceTimeoutMs(
voiceConfig?.reconnectGraceMs,
VOICE_RECONNECT_GRACE_MS,
);
logVoiceVerbose(
`join: DAVE settings encryption=${daveEncryption === false ? "off" : "on"} tolerance=${
decryptionFailureTolerance ?? "default"
}`,
} connectTimeout=${connectReadyTimeoutMs}ms reconnectGrace=${reconnectGraceMs}ms`,
);
const voiceSdk = loadDiscordVoiceSdk();
const connection = voiceSdk.joinVoiceChannel({
@@ -195,10 +206,13 @@ export class DiscordVoiceManager {
await voiceSdk.entersState(
connection,
voiceSdk.VoiceConnectionStatus.Ready,
VOICE_CONNECT_READY_TIMEOUT_MS,
connectReadyTimeoutMs,
);
logVoiceVerbose(`join: connected to guild ${guildId} channel ${channelId}`);
} catch (err) {
logger.warn(
`discord voice: join failed before ready: guild ${guildId} channel ${channelId} timeout=${connectReadyTimeoutMs}ms error=${formatErrorMessage(err)}`,
);
connection.destroy();
return { ok: false, message: `Failed to join voice channel: ${formatErrorMessage(err)}` };
}
@@ -289,11 +303,26 @@ export class DiscordVoiceManager {
disconnectedHandler = async () => {
try {
logVoiceVerbose(
`disconnected: attempting recovery guild ${guildId} channel ${channelId} grace=${reconnectGraceMs}ms`,
);
await Promise.race([
voiceSdk.entersState(connection, voiceSdk.VoiceConnectionStatus.Signalling, 5_000),
voiceSdk.entersState(connection, voiceSdk.VoiceConnectionStatus.Connecting, 5_000),
voiceSdk.entersState(
connection,
voiceSdk.VoiceConnectionStatus.Signalling,
reconnectGraceMs,
),
voiceSdk.entersState(
connection,
voiceSdk.VoiceConnectionStatus.Connecting,
reconnectGraceMs,
),
]);
} catch {
logVoiceVerbose(`disconnected: recovery started guild ${guildId} channel ${channelId}`);
} catch (err) {
logger.warn(
`discord voice: disconnect recovery failed: guild ${guildId} channel ${channelId} timeout=${reconnectGraceMs}ms error=${formatErrorMessage(err)}; destroying connection`,
);
clearSessionIfCurrent();
connection.destroy();
}

Some files were not shown because too many files have changed in this diff Show More