Merge remote-tracking branch 'origin/main' into release/2026.4.25

# Conflicts:
#	CHANGELOG.md
This commit is contained in:
Peter Steinberger
2026-04-26 11:39:46 +01:00
222 changed files with 8310 additions and 2964 deletions

View File

@@ -25,6 +25,12 @@ Use this skill for release and publish-time workflow. Keep ordinary development
- Before release branching, commit any dirty files in coherent groups, push,
pull/rebase, then run `/changelog` on `main` and commit/push/pull that
changelog rewrite immediately before creating the release branch.
- During release planning, inspect `src/plugins/compat/registry.ts` before
branching and again before final publish. For every deprecated or
removal-pending compatibility record whose `removeAfter` date is on or before
the release date, either remove the compatibility path where safe and
validate the affected tests, or write down why removal is blocked and get
explicit maintainer approval before shipping the expired compatibility path.
- Do not delete or rewrite beta tags after they leave the machine. If a
published or pushed beta needs a fix, commit the fix on the release branch and
increment to the next `-beta.N`.
@@ -116,6 +122,12 @@ Use this skill for release and publish-time workflow. Keep ordinary development
`CHANGELOG.md` version section, not highlights or an excerpt. When creating
or editing a release, extract from `## YYYY.M.D` through the line before the
next level-2 heading and use that complete block as the release notes.
- When preparing release notes, scan `src/plugins/compat/registry.ts` for
plugin compatibility records with `warningStarts` or `removeAfter` within 7
days after the release date. Add an `Upcoming deprecations` note to the
release notes when any exist, including the compatibility code, target date,
replacement, and a link to the record's `docsPath` or `/plugins/compatibility`
when no more specific deprecation page exists.
- When cutting a mac release with a beta GitHub prerelease:
- tag `vYYYY.M.D-beta.N` from the release commit
- create a prerelease titled `openclaw YYYY.M.D-beta.N`

4
.github/labeler.yml vendored
View File

@@ -233,6 +233,10 @@
- changed-files:
- any-glob-to-any-file:
- "extensions/diagnostics-otel/**"
"extensions: diagnostics-prometheus":
- changed-files:
- any-glob-to-any-file:
- "extensions/diagnostics-prometheus/**"
"extensions: llm-task":
- changed-files:
- any-glob-to-any-file:

View File

@@ -159,6 +159,8 @@ jobs:
platforms: linux/amd64
cache-from: type=gha,scope=docker-release-amd64
cache-to: type=gha,mode=max,scope=docker-release-amd64
build-args: |
OPENCLAW_EXTENSIONS=diagnostics-otel
tags: ${{ steps.tags.outputs.value }}
labels: ${{ steps.labels.outputs.value }}
provenance: false
@@ -174,6 +176,7 @@ jobs:
cache-from: type=gha,scope=docker-release-amd64
cache-to: type=gha,mode=max,scope=docker-release-amd64
build-args: |
OPENCLAW_EXTENSIONS=diagnostics-otel
OPENCLAW_VARIANT=slim
tags: ${{ steps.tags.outputs.slim }}
labels: ${{ steps.labels.outputs.value }}
@@ -276,6 +279,8 @@ jobs:
platforms: linux/arm64
cache-from: type=gha,scope=docker-release-arm64
cache-to: type=gha,mode=max,scope=docker-release-arm64
build-args: |
OPENCLAW_EXTENSIONS=diagnostics-otel
tags: ${{ steps.tags.outputs.value }}
labels: ${{ steps.labels.outputs.value }}
provenance: false
@@ -291,6 +296,7 @@ jobs:
cache-from: type=gha,scope=docker-release-arm64
cache-to: type=gha,mode=max,scope=docker-release-arm64
build-args: |
OPENCLAW_EXTENSIONS=diagnostics-otel
OPENCLAW_VARIANT=slim
tags: ${{ steps.tags.outputs.slim }}
labels: ${{ steps.labels.outputs.value }}

View File

@@ -430,6 +430,11 @@ jobs:
command: pnpm test:docker:doctor-switch
timeout_minutes: 60
release_path: true
- suite_id: docker-update-channel-switch
label: Update Channel Switch Docker E2E
command: pnpm test:docker:update-channel-switch
timeout_minutes: 60
release_path: true
- suite_id: docker-session-runtime-context
label: Session Runtime Context Docker E2E
command: pnpm test:docker:session-runtime-context

View File

@@ -29,7 +29,7 @@ jobs:
with:
app-id: "2971289"
private-key: ${{ secrets.GH_APP_PRIVATE_KEY_FALLBACK }}
- name: Mark stale issues and pull requests (primary)
- name: Mark stale unassigned issues and pull requests (primary)
id: stale-primary
continue-on-error: true
uses: actions/stale@v10
@@ -56,12 +56,60 @@ jobs:
close-issue-message: |
Closing due to inactivity.
If this is still an issue, please retry on the latest OpenClaw release and share updated details.
If you are absolutely sure it still happens on the latest release, open a new issue with fresh repro steps.
If you are absolutely sure it still happens on the latest release, open a new issue with fresh steps to reproduce.
close-issue-reason: not_planned
close-pr-message: |
Closing due to inactivity.
If you believe this PR should be revived, post in #pr-thunderdome-dangerzone on Discord to talk to a maintainer.
That channel is the escape hatch for high-quality PRs that get auto-closed.
- name: Mark stale assigned issues (primary)
id: assigned-issue-stale-primary
continue-on-error: true
uses: actions/stale@v10
with:
repo-token: ${{ steps.app-token.outputs.token || steps.app-token-fallback.outputs.token }}
days-before-issue-stale: 30
days-before-issue-close: 10
days-before-pr-stale: -1
days-before-pr-close: -1
stale-issue-label: stale
exempt-issue-labels: enhancement,maintainer,pinned,security,no-stale
operations-per-run: 2000
ascending: true
include-only-assigned: true
remove-stale-when-updated: true
stale-issue-message: |
This assigned issue has been automatically marked as stale after 30 days of inactivity.
Please add updates or it will be closed.
close-issue-message: |
Closing due to inactivity.
If this is still an issue, please retry on the latest OpenClaw release and share updated details.
If you are absolutely sure it still happens on the latest release, open a new issue with fresh steps to reproduce.
close-issue-reason: not_planned
- name: Mark stale assigned pull requests (primary)
id: assigned-stale-primary
continue-on-error: true
uses: actions/stale@v10
with:
repo-token: ${{ steps.app-token.outputs.token || steps.app-token-fallback.outputs.token }}
days-before-issue-stale: -1
days-before-issue-close: -1
days-before-pr-stale: 27
days-before-pr-close: 3
stale-pr-label: stale
exempt-pr-labels: maintainer,no-stale,bad-barnacle
operations-per-run: 2000
ascending: true
include-only-assigned: true
ignore-pr-updates: true
remove-stale-when-updated: true
stale-pr-message: |
This assigned pull request has been automatically marked as stale after being open for 27 days.
Please add updates or it will be closed.
close-pr-message: |
Closing due to inactivity.
If you believe this PR should be revived, post in #pr-thunderdome-dangerzone on Discord to talk to a maintainer.
That channel is the escape hatch for high-quality PRs that get auto-closed.
- name: Check stale state cache
id: stale-state
if: always()
@@ -86,7 +134,7 @@ jobs:
core.warning(`Failed to check stale state cache: ${message}`);
core.setOutput("has_state", "false");
}
- name: Mark stale issues and pull requests (fallback)
- name: Mark stale unassigned issues and pull requests (fallback)
if: (steps.stale-primary.outcome == 'failure' || steps.stale-state.outputs.has_state == 'true') && steps.app-token-fallback.outputs.token != ''
uses: actions/stale@v10
with:
@@ -112,12 +160,58 @@ jobs:
close-issue-message: |
Closing due to inactivity.
If this is still an issue, please retry on the latest OpenClaw release and share updated details.
If you are absolutely sure it still happens on the latest release, open a new issue with fresh repro steps.
If you are absolutely sure it still happens on the latest release, open a new issue with fresh steps to reproduce.
close-issue-reason: not_planned
close-pr-message: |
Closing due to inactivity.
If you believe this PR should be revived, post in #pr-thunderdome-dangerzone on Discord to talk to a maintainer.
That channel is the escape hatch for high-quality PRs that get auto-closed.
- name: Mark stale assigned issues (fallback)
if: (steps.assigned-issue-stale-primary.outcome == 'failure' || steps.stale-state.outputs.has_state == 'true') && steps.app-token-fallback.outputs.token != ''
uses: actions/stale@v10
with:
repo-token: ${{ steps.app-token-fallback.outputs.token }}
days-before-issue-stale: 30
days-before-issue-close: 10
days-before-pr-stale: -1
days-before-pr-close: -1
stale-issue-label: stale
exempt-issue-labels: enhancement,maintainer,pinned,security,no-stale
operations-per-run: 2000
ascending: true
include-only-assigned: true
remove-stale-when-updated: true
stale-issue-message: |
This assigned issue has been automatically marked as stale after 30 days of inactivity.
Please add updates or it will be closed.
close-issue-message: |
Closing due to inactivity.
If this is still an issue, please retry on the latest OpenClaw release and share updated details.
If you are absolutely sure it still happens on the latest release, open a new issue with fresh steps to reproduce.
close-issue-reason: not_planned
- name: Mark stale assigned pull requests (fallback)
if: (steps.assigned-stale-primary.outcome == 'failure' || steps.stale-state.outputs.has_state == 'true') && steps.app-token-fallback.outputs.token != ''
uses: actions/stale@v10
with:
repo-token: ${{ steps.app-token-fallback.outputs.token }}
days-before-issue-stale: -1
days-before-issue-close: -1
days-before-pr-stale: 27
days-before-pr-close: 3
stale-pr-label: stale
exempt-pr-labels: maintainer,no-stale,bad-barnacle
operations-per-run: 2000
ascending: true
include-only-assigned: true
ignore-pr-updates: true
remove-stale-when-updated: true
stale-pr-message: |
This assigned pull request has been automatically marked as stale after being open for 27 days.
Please add updates or it will be closed.
close-pr-message: |
Closing due to inactivity.
If you believe this PR should be revived, post in #pr-thunderdome-dangerzone on Discord to talk to a maintainer.
That channel is the escape hatch for high-quality PRs that get auto-closed.
lock-closed-issues:
permissions:

View File

@@ -85,7 +85,8 @@ Telegraph style. Root rules only. Read scoped `AGENTS.md` before subtree work.
- extension tests: extension test typecheck/tests
- public SDK/plugin contract: extension prod/test too
- unknown root/config: all lanes
- Before handoff/push: `pnpm check:changed`. Tests-only: `pnpm test:changed`. Full prod sweep: `pnpm check`.
- Before handoff/push for code/test/runtime/config changes: `pnpm check:changed`. Tests-only: `pnpm test:changed`. Full prod sweep: `pnpm check`.
- Docs/changelog-only and CI/workflow metadata-only changes are not changed-gate work by default. Use `git diff --check` plus the relevant formatter/docs/workflow sanity check; escalate to `pnpm check:changed` only when scripts, test config, generated docs/API, package metadata, or runtime/build behavior changed.
- Rebase sanity: after a green `pnpm check:changed`, a clean rebase onto current
`origin/main` does not require rerunning the full changed gate when the rebase
has no conflicts and the branch diff is materially unchanged. Do a quick

View File

@@ -38,6 +38,7 @@ Docs: https://docs.openclaw.ai
- Diagnostics/OTEL: emit bounded telemetry exporter health diagnostics for startup and log-export failures without exporting raw error text. Thanks @vincentkoc.
- Diagnostics/OTEL: export agent harness lifecycle telemetry as bounded `openclaw.harness.run` spans and `openclaw.harness.duration_ms` metrics so QA-lab, Codex, and future harnesses share one trace shape. Thanks @vincentkoc.
- Diagnostics/trace: propagate W3C `traceparent` headers from trusted model-call trace context to provider transports while replacing caller-supplied traceparent values. Thanks @vincentkoc.
- Diagnostics/Prometheus: add a bundled `diagnostics-prometheus` plugin with a protected gateway scrape route for low-cardinality diagnostics metrics. Thanks @vincentkoc.
- Plugins/CLI: add `openclaw plugins registry` for explicit persisted-registry inspection and `--refresh` repair without making normal startup rescan plugin locations. Thanks @vincentkoc.
- Plugins/CLI: make `openclaw plugins list` read the cold persisted registry snapshot by default, leaving module-aware diagnostics to `plugins doctor` and `plugins inspect`. Thanks @vincentkoc.
- Plugins/startup: move gateway startup plugin planning onto the versioned cold registry index, with postinstall repair for older registry files that predate startup metadata. Thanks @vincentkoc.
@@ -53,7 +54,10 @@ Docs: https://docs.openclaw.ai
- CLI/configure: keep web-search configure prompts on cold plugin registry metadata until the user chooses managed search setup. Thanks @vincentkoc.
- Plugins/chat commands: refresh the persisted plugin registry after `/plugins enable` and `/plugins disable`, matching the CLI mutation path. Thanks @vincentkoc.
- Plugins/compat: mark `OPENCLAW_DISABLE_PERSISTED_PLUGIN_REGISTRY` as a deprecated break-glass switch and point operators at registry repair instead. Thanks @vincentkoc.
- Plugins/compat: expand the central compatibility registry with dated owners, replacements, and maximum three-month removal targets for legacy SDK, manifest, setup, registry-migration, and agent-runtime surfaces. Thanks @vincentkoc.
- Plugins/registry: ignore stale persisted registry reads when plugin policy no longer matches current config, and stamp generated registry files with a do-not-edit warning. Thanks @vincentkoc.
- Config/plugins: keep plugin command-alias validation on cold manifest metadata instead of importing the runtime alias resolver. Thanks @vincentkoc.
- Security/plugins: keep web-search credential presence checks on cold config, env, and manifest metadata instead of importing web-search provider runtime. Thanks @vincentkoc.
- Diagnostics/OTEL: surface provider request identifiers as bounded hashes on model-call diagnostics and span events, without exporting raw request IDs or metric labels. Thanks @Lidang-Jiang and @vincentkoc.
- Plugins/diagnostics: add metadata-only `model_call_started` and `model_call_ended` hooks for provider/model call telemetry without exposing prompts, responses, headers, request bodies, or raw provider request IDs. Thanks @vincentkoc.
- Diagnostics/OTEL: emit bounded context assembly diagnostics and export `openclaw.context.assembled` spans with prompt/history sizes but no prompt, history, response, or session-key content. Thanks @vincentkoc.
@@ -89,6 +93,20 @@ Docs: https://docs.openclaw.ai
### Fixes
- Plugins/startup: load the default `memory-core` slot during Gateway startup when permitted so active-memory recall can call `memory_search` and `memory_get` without requiring an explicit `plugins.slots.memory` entry, while preserving `plugins.slots.memory: "none"`. Thanks @codex.
- Plugins/CLI: prefer native require for compiled bundled plugin JavaScript before jiti so read-only config, status, device, and node commands avoid unnecessary transform overhead on slow hosts. Fixes #62842. Thanks @Effet.
- Plugins/compat: add missing dated compatibility records for legacy extension-api, memory registration, provider hook/type aliases, runtime aliases, channel SDK helpers, and approval/test utility shims. Thanks @vincentkoc.
- Plugins/CLI: refresh the persisted registry after managed plugin files are removed so ClawHub uninstall cannot leave stale `plugins list` entries. Thanks @codex.
- Plugins/CLI: make plugin install and uninstall config writes conflict-aware, clear stale denylist entries on explicit reinstall/removal, and delete managed plugin files only after config/index commit succeeds. Thanks @codex.
- Plugins: fail `plugins update` when tracked plugin or hook updates error, keep bundled runtime-dependency repair behind restrictive allowlists, and reject package installs with unloadable extension entries. Thanks @codex.
- Gateway/chat: keep duplicate attachment-backed `chat.send` retries with the same idempotency key on the documented in-flight path so aborts still target the real active run. Fixes #70139. Thanks @Feelw00.
- Plugins: share package entrypoint resolution between install and discovery, reject mismatched `runtimeExtensions`, and cache bundled runtime-dependency manifest reads during scans. Thanks @codex.
- Onboarding/setup: keep first-run config reads, plugin compatibility notices, and post-model sanity checks on cold metadata paths unless the user chooses to browse all models, avoiding full plugin/runtime catalog work between prompts. Thanks @shakkernerd.
- Onboarding/auth: run manifest-owned provider auth choices through scoped setup providers so selecting OpenAI Codex browser/device auth no longer loads every provider runtime before OAuth starts. Thanks @shakkernerd.
- Onboarding/auth: keep the post-auth default-model policy lookup on manifest/setup metadata so the next prompt appears without loading broad provider runtime. Thanks @shakkernerd.
- Onboarding/models: keep skip-auth and provider-scoped model picker prompts off the full global model catalog path, and cache provider catalog hook resolution so setup no longer stalls after auth on large plugin registries. Thanks @shakkernerd.
- Gateway/Bonjour: suppress known @homebridge/ciao cancellation and network assertion failures through scoped process handlers so malformed mDNS packets or restricted VPS networking disable/restart Bonjour instead of crashing the gateway. Fixes #67578. Thanks @zenassist26-create.
- Discord: keep late clicks on already-resolved exec approval buttons quiet when elevated mode auto-resolved the request, while still surfacing real approval submission failures. Fixes #66906. Thanks @rlerikse.
- Agents/subagents: deliver completed yielded-subagent results back to no-thread requester routes via direct fallback when the dormant parent announce turn produces no visible reply, and add QA-lab coverage for the regression. Thanks @vincentkoc.
- Gateway/Tailscale: let Tailscale-authenticated Control UI operator sessions with browser device identity skip the device-pairing round trip while still rejecting device-less and node-role connections. Refs #71986. Thanks @jokedul.
- Doctor: honor `OPENCLAW_SERVICE_REPAIR_POLICY=external` by reporting gateway service health while skipping service install/start/restart/bootstrap, supervisor rewrites, and legacy service cleanup for externally managed environments. Thanks @shakkernerd.

View File

@@ -9,6 +9,16 @@ services:
# Docker bridge networks usually do not carry mDNS multicast reliably.
# Set OPENCLAW_DISABLE_BONJOUR=0 only on host/macvlan/mDNS-capable networks.
OPENCLAW_DISABLE_BONJOUR: ${OPENCLAW_DISABLE_BONJOUR:-1}
# OpenTelemetry export is outbound OTLP/HTTP from the Gateway. Prometheus
# uses the existing authenticated Gateway route; it does not need a port.
OTEL_EXPORTER_OTLP_ENDPOINT: ${OTEL_EXPORTER_OTLP_ENDPOINT:-}
OTEL_EXPORTER_OTLP_TRACES_ENDPOINT: ${OTEL_EXPORTER_OTLP_TRACES_ENDPOINT:-}
OTEL_EXPORTER_OTLP_METRICS_ENDPOINT: ${OTEL_EXPORTER_OTLP_METRICS_ENDPOINT:-}
OTEL_EXPORTER_OTLP_LOGS_ENDPOINT: ${OTEL_EXPORTER_OTLP_LOGS_ENDPOINT:-}
OTEL_EXPORTER_OTLP_PROTOCOL: ${OTEL_EXPORTER_OTLP_PROTOCOL:-http/protobuf}
OTEL_SERVICE_NAME: ${OTEL_SERVICE_NAME:-}
OTEL_SEMCONV_STABILITY_OPT_IN: ${OTEL_SEMCONV_STABILITY_OPT_IN:-}
OPENCLAW_OTEL_PRELOADED: ${OPENCLAW_OTEL_PRELOADED:-}
CLAUDE_AI_SESSION_KEY: ${CLAUDE_AI_SESSION_KEY:-}
CLAUDE_WEB_SESSION_KEY: ${CLAUDE_WEB_SESSION_KEY:-}
CLAUDE_WEB_COOKIE: ${CLAUDE_WEB_COOKIE:-}

View File

@@ -194,6 +194,12 @@ openclaw plugins list --json
`plugins list` reads the persisted local plugin registry first, with a manifest-only derived fallback when the registry is missing or invalid. It is useful for checking whether a plugin is installed, enabled, and visible to cold startup planning, but it is not a live runtime probe of an already-running Gateway process. After changing plugin code, enablement, hook policy, or `plugins.load.paths`, restart the Gateway that serves the channel before expecting new `register(api)` code or hooks to run. For remote/container deployments, verify you are restarting the actual `openclaw gateway run` child, not only a wrapper process.
</Note>
For bundled plugin work inside a packaged Docker image, bind-mount the plugin
source directory over the matching packaged source path, such as
`/app/extensions/synology-chat`. OpenClaw will discover that mounted source
overlay before `/app/dist/extensions/synology-chat`; a plain copied source
directory remains inert so normal packaged installs still use compiled dist.
For runtime hook debugging:
- `openclaw plugins inspect <id> --json` shows registered hooks and diagnostics from a module-loaded inspection pass.
@@ -226,7 +232,7 @@ openclaw plugins uninstall <id> --dry-run
openclaw plugins uninstall <id> --keep-files
```
`uninstall` removes plugin records from `plugins.entries`, the persisted plugin index, the plugin allowlist, and linked `plugins.load.paths` entries when applicable. Unless `--keep-files` is set, uninstall also removes the tracked managed install directory when it is inside OpenClaw's plugin extensions root. For active memory plugins, the memory slot resets to `memory-core`.
`uninstall` removes plugin records from `plugins.entries`, the persisted plugin index, plugin allow/deny list entries, and linked `plugins.load.paths` entries when applicable. Unless `--keep-files` is set, uninstall also removes the tracked managed install directory when it is inside OpenClaw's plugin extensions root. For active memory plugins, the memory slot resets to `memory-core`.
<Note>
`--keep-config` is supported as a deprecated alias for `--keep-files`.

View File

@@ -39,7 +39,7 @@ openclaw --update
- `--json`: print machine-readable `UpdateRunResult` JSON, including
`postUpdate.plugins.integrityDrifts` when npm plugin artifact drift is
detected during post-update plugin sync.
- `--timeout <seconds>`: per-step timeout (default is 1200s).
- `--timeout <seconds>`: per-step timeout (default is 1800s).
- `--yes`: skip confirmation prompts (for example downgrade confirmation)
Note: downgrades require confirmation because older versions can break configuration.
@@ -67,7 +67,7 @@ offers to create one.
Options:
- `--timeout <seconds>`: timeout for each update step (default `1200`)
- `--timeout <seconds>`: timeout for each update step (default `1800`)
## What it does

View File

@@ -253,6 +253,10 @@ A no-op `compact()` is unsafe for an active non-owning engine because it disable
The slot is exclusive at run time — only one registered context engine is resolved for a given run or compaction operation. Other enabled `kind: "context-engine"` plugins can still load and run their registration code; `plugins.slots.contextEngine` only selects which registered engine id OpenClaw resolves when it needs a context engine.
</Note>
<Note>
**Plugin uninstall:** when you uninstall the plugin currently selected as `plugins.slots.contextEngine`, OpenClaw resets the slot back to the default (`legacy`). The same reset behavior applies to `plugins.slots.memory`. No manual config edit is required.
</Note>
## Relationship to compaction and memory
<AccordionGroup>

View File

@@ -1442,6 +1442,7 @@
"gateway/doctor",
"logging",
"gateway/opentelemetry",
"gateway/prometheus",
"gateway/logging",
"gateway/diagnostics",
"gateway/troubleshooting"

209
docs/gateway/prometheus.md Normal file
View File

@@ -0,0 +1,209 @@
---
summary: "Expose OpenClaw diagnostics as Prometheus text metrics through the diagnostics-prometheus plugin"
title: "Prometheus metrics"
sidebarTitle: "Prometheus"
read_when:
- You want Prometheus, Grafana, VictoriaMetrics, or another scraper to collect OpenClaw Gateway metrics
- You need the Prometheus metric names and label policy for dashboards or alerts
- You want metrics without running an OpenTelemetry collector
---
OpenClaw can expose diagnostics metrics through the bundled `diagnostics-prometheus` plugin. It listens to trusted internal diagnostics and renders a Prometheus text endpoint at:
```text
GET /api/diagnostics/prometheus
```
Content type is `text/plain; version=0.0.4; charset=utf-8`, the standard Prometheus exposition format.
<Warning>
The route uses Gateway authentication (operator scope). Do not expose it as a public unauthenticated `/metrics` endpoint. Scrape it through the same auth path you use for other operator APIs.
</Warning>
For traces, logs, OTLP push, and OpenTelemetry GenAI semantic attributes, see [OpenTelemetry export](/gateway/opentelemetry).
## Quick start
<Steps>
<Step title="Enable the plugin">
<Tabs>
<Tab title="Config">
```json5
{
plugins: {
allow: ["diagnostics-prometheus"],
entries: {
"diagnostics-prometheus": { enabled: true },
},
},
diagnostics: {
enabled: true,
},
}
```
</Tab>
<Tab title="CLI">
```bash
openclaw plugins enable diagnostics-prometheus
```
</Tab>
</Tabs>
</Step>
<Step title="Restart the Gateway">
The HTTP route is registered at plugin startup, so reload after enabling.
</Step>
<Step title="Scrape the protected route">
Send the same gateway auth your operator clients use:
```bash
curl -H "Authorization: Bearer $OPENCLAW_GATEWAY_TOKEN" \
http://127.0.0.1:18789/api/diagnostics/prometheus
```
</Step>
<Step title="Wire Prometheus">
```yaml
# prometheus.yml
scrape_configs:
- job_name: openclaw
scrape_interval: 30s
metrics_path: /api/diagnostics/prometheus
authorization:
credentials_file: /etc/prometheus/openclaw-gateway-token
static_configs:
- targets: ["openclaw-gateway:18789"]
```
</Step>
</Steps>
<Note>
`diagnostics.enabled: true` is required. Without it, the plugin still registers the HTTP route but no diagnostic events flow into the exporter, so the response is empty.
</Note>
## Metrics exported
| Metric | Type | Labels |
| --------------------------------------------- | --------- | ----------------------------------------------------------------------------------------- |
| `openclaw_run_completed_total` | counter | `channel`, `model`, `outcome`, `provider`, `trigger` |
| `openclaw_run_duration_seconds` | histogram | `channel`, `model`, `outcome`, `provider`, `trigger` |
| `openclaw_model_call_total` | counter | `api`, `error_category`, `model`, `outcome`, `provider`, `transport` |
| `openclaw_model_call_duration_seconds` | histogram | `api`, `error_category`, `model`, `outcome`, `provider`, `transport` |
| `openclaw_model_tokens_total` | counter | `agent`, `channel`, `model`, `provider`, `token_type` |
| `openclaw_gen_ai_client_token_usage` | histogram | `model`, `provider`, `token_type` |
| `openclaw_model_cost_usd_total` | counter | `agent`, `channel`, `model`, `provider` |
| `openclaw_tool_execution_total` | counter | `error_category`, `outcome`, `params_kind`, `tool` |
| `openclaw_tool_execution_duration_seconds` | histogram | `error_category`, `outcome`, `params_kind`, `tool` |
| `openclaw_harness_run_total` | counter | `channel`, `error_category`, `harness`, `model`, `outcome`, `phase`, `plugin`, `provider` |
| `openclaw_harness_run_duration_seconds` | histogram | `channel`, `error_category`, `harness`, `model`, `outcome`, `phase`, `plugin`, `provider` |
| `openclaw_message_processed_total` | counter | `channel`, `outcome`, `reason` |
| `openclaw_message_processed_duration_seconds` | histogram | `channel`, `outcome`, `reason` |
| `openclaw_message_delivery_total` | counter | `channel`, `delivery_kind`, `error_category`, `outcome` |
| `openclaw_message_delivery_duration_seconds` | histogram | `channel`, `delivery_kind`, `error_category`, `outcome` |
| `openclaw_queue_lane_size` | gauge | `lane` |
| `openclaw_queue_lane_wait_seconds` | histogram | `lane` |
| `openclaw_session_state_total` | counter | `reason`, `state` |
| `openclaw_session_queue_depth` | gauge | `state` |
| `openclaw_memory_bytes` | gauge | `kind` |
| `openclaw_memory_rss_bytes` | histogram | none |
| `openclaw_memory_pressure_total` | counter | `level`, `reason` |
| `openclaw_telemetry_exporter_total` | counter | `exporter`, `reason`, `signal`, `status` |
| `openclaw_prometheus_series_dropped_total` | counter | none |
## Label policy
<AccordionGroup>
<Accordion title="Bounded, low-cardinality labels">
Prometheus labels stay bounded and low-cardinality. The exporter does not emit raw diagnostic identifiers such as `runId`, `sessionKey`, `sessionId`, `callId`, `toolCallId`, message IDs, chat IDs, or provider request IDs.
Label values are redacted and must match OpenClaw's low-cardinality character policy. Values that fail the policy are replaced with `unknown`, `other`, or `none`, depending on the metric.
</Accordion>
<Accordion title="Series cap and overflow accounting">
The exporter caps retained time series in memory at **2048** series across counters, gauges, and histograms combined. New series beyond that cap are dropped, and `openclaw_prometheus_series_dropped_total` increments by one each time.
Watch this counter as a hard signal that an attribute upstream is leaking high-cardinality values. The exporter never lifts the cap automatically; if it climbs, fix the source rather than disabling the cap.
</Accordion>
<Accordion title="What never appears in Prometheus output">
- prompt text, response text, tool inputs, tool outputs, system prompts
- raw provider request IDs (only bounded hashes, where applicable, on spans — never on metrics)
- session keys and session IDs
- hostnames, file paths, secret values
</Accordion>
</AccordionGroup>
## PromQL recipes
```promql
# Tokens per minute, split by provider
sum by (provider) (rate(openclaw_model_tokens_total[1m]))
# Spend (USD) over the last hour, by model
sum by (model) (increase(openclaw_model_cost_usd_total[1h]))
# 95th percentile model run duration
histogram_quantile(
0.95,
sum by (le, provider, model)
(rate(openclaw_run_duration_seconds_bucket[5m]))
)
# Queue wait time SLO (95p under 2s)
histogram_quantile(
0.95,
sum by (le, lane) (rate(openclaw_queue_lane_wait_seconds_bucket[5m]))
) < 2
# Dropped Prometheus series (cardinality alarm)
increase(openclaw_prometheus_series_dropped_total[15m]) > 0
```
<Tip>
Prefer `gen_ai_client_token_usage` for cross-provider dashboards: it follows the OpenTelemetry GenAI semantic conventions and is consistent with metrics from non-OpenClaw GenAI services.
</Tip>
## Choosing between Prometheus and OpenTelemetry export
OpenClaw supports both surfaces independently. You can run either, both, or neither.
<Tabs>
<Tab title="diagnostics-prometheus">
- **Pull** model: Prometheus scrapes `/api/diagnostics/prometheus`.
- No external collector required.
- Authenticated through normal Gateway auth.
- Surface is metrics only (no traces or logs).
- Best for stacks already standardized on Prometheus + Grafana.
</Tab>
<Tab title="diagnostics-otel">
- **Push** model: OpenClaw sends OTLP/HTTP to a collector or OTLP-compatible backend.
- Surface includes metrics, traces, and logs.
- Bridges to Prometheus through an OpenTelemetry Collector (`prometheus` or `prometheusremotewrite` exporter) when you need both.
- See [OpenTelemetry export](/gateway/opentelemetry) for the full catalog.
</Tab>
</Tabs>
## Troubleshooting
<AccordionGroup>
<Accordion title="Empty response body">
- Check `diagnostics.enabled: true` in config.
- Confirm the plugin is enabled and loaded with `openclaw plugins list --enabled`.
- Generate some traffic; counters and histograms only emit lines after at least one event.
</Accordion>
<Accordion title="401 / unauthorized">
The endpoint requires the Gateway operator scope (`auth: "gateway"` with `gatewayRuntimeScopeSurface: "trusted-operator"`). Use the same token or password Prometheus uses for any other Gateway operator route. There is no public unauthenticated mode.
</Accordion>
<Accordion title="`openclaw_prometheus_series_dropped_total` is climbing">
A new attribute is exceeding the **2048**-series cap. Inspect recent metrics for an unexpectedly high-cardinality label and fix it at the source. The exporter intentionally drops new series instead of silently rewriting labels.
</Accordion>
<Accordion title="Prometheus shows stale series after a restart">
The plugin keeps state in memory only. After a Gateway restart, counters reset to zero and gauges restart at their next reported value. Use PromQL `rate()` and `increase()` to handle resets cleanly.
</Accordion>
</AccordionGroup>
## Related
- [Diagnostics export](/gateway/diagnostics) — local diagnostics zip for support bundles
- [Health and readiness](/gateway/health) — `/healthz` and `/readyz` probes
- [Logging](/logging) — file-based logging
- [OpenTelemetry export](/gateway/opentelemetry) — OTLP push for traces, metrics, and logs

View File

@@ -172,6 +172,10 @@ runs the same lanes before release approval.
- Use `--platform macos`, `--platform windows`, or `--platform linux` while
iterating on one guest. Use `--json` for the summary artifact path and
per-lane status.
- The OpenAI lane uses `openai/gpt-5.5` for the live agent-turn proof by
default. Pass `--model <provider/model>` or set
`OPENCLAW_PARALLELS_OPENAI_MODEL` when deliberately validating another
OpenAI model.
- Wrap long local runs in a host timeout so Parallels transport stalls cannot
consume the rest of the testing window:
@@ -603,7 +607,7 @@ These Docker runners split into two buckets:
`OPENCLAW_LIVE_GATEWAY_MODEL_TIMEOUT_MS=90000`. Override those env vars when you
explicitly want the larger exhaustive scan.
- `test:docker:all` builds the live Docker image once via `test:docker:live-build`, then reuses it for the live Docker lanes. It also builds one shared `scripts/e2e/Dockerfile` image via `test:docker:e2e-build` and reuses it for the E2E container smoke runners that exercise the built app. The aggregate uses a weighted local scheduler: `OPENCLAW_DOCKER_ALL_PARALLELISM` controls process slots, while resource caps keep heavy live, npm-install, and multi-service lanes from all starting at once. Defaults are 10 slots, `OPENCLAW_DOCKER_ALL_LIVE_LIMIT=6`, `OPENCLAW_DOCKER_ALL_NPM_LIMIT=8`, and `OPENCLAW_DOCKER_ALL_SERVICE_LIMIT=7`; tune `OPENCLAW_DOCKER_ALL_WEIGHT_LIMIT` or `OPENCLAW_DOCKER_ALL_DOCKER_LIMIT` only when the Docker host has more headroom. The runner performs a Docker preflight by default, removes stale OpenClaw E2E containers, prints status every 30 seconds, stores successful lane timings in `.artifacts/docker-tests/lane-timings.json`, and uses those timings to start longer lanes first on later runs. Use `OPENCLAW_DOCKER_ALL_DRY_RUN=1` to print the weighted lane manifest without building or running Docker.
- Container smoke runners: `test:docker:openwebui`, `test:docker:onboard`, `test:docker:npm-onboard-channel-agent`, `test:docker:session-runtime-context`, `test:docker:agents-delete-shared-workspace`, `test:docker:gateway-network`, `test:docker:browser-cdp-snapshot`, `test:docker:mcp-channels`, `test:docker:pi-bundle-mcp-tools`, `test:docker:cron-mcp-cleanup`, `test:docker:plugins`, `test:docker:plugin-update`, and `test:docker:config-reload` boot one or more real containers and verify higher-level integration paths.
- Container smoke runners: `test:docker:openwebui`, `test:docker:onboard`, `test:docker:npm-onboard-channel-agent`, `test:docker:update-channel-switch`, `test:docker:session-runtime-context`, `test:docker:agents-delete-shared-workspace`, `test:docker:gateway-network`, `test:docker:browser-cdp-snapshot`, `test:docker:mcp-channels`, `test:docker:pi-bundle-mcp-tools`, `test:docker:cron-mcp-cleanup`, `test:docker:plugins`, `test:docker:plugin-update`, and `test:docker:config-reload` boot one or more real containers and verify higher-level integration paths.
The live-model Docker runners also bind-mount only the needed CLI auth homes (or all supported ones when the run is not narrowed), then copy them into the container home before the run so external-CLI OAuth can refresh tokens without mutating the host auth store:
@@ -615,6 +619,7 @@ The live-model Docker runners also bind-mount only the needed CLI auth homes (or
- Open WebUI live smoke: `pnpm test:docker:openwebui` (script: `scripts/e2e/openwebui-docker.sh`)
- Onboarding wizard (TTY, full scaffolding): `pnpm test:docker:onboard` (script: `scripts/e2e/onboard-docker.sh`)
- Npm tarball onboarding/channel/agent smoke: `pnpm test:docker:npm-onboard-channel-agent` installs the packed OpenClaw tarball globally in Docker, configures OpenAI via env-ref onboarding plus Telegram by default, verifies doctor repairs activated plugin runtime deps, and runs one mocked OpenAI agent turn. Reuse a prebuilt tarball with `OPENCLAW_NPM_ONBOARD_PACKAGE_TGZ=/path/to/openclaw-*.tgz`, skip the host rebuild with `OPENCLAW_NPM_ONBOARD_HOST_BUILD=0`, or switch channel with `OPENCLAW_NPM_ONBOARD_CHANNEL=discord`.
- Update channel switch smoke: `pnpm test:docker:update-channel-switch` installs the packed OpenClaw tarball globally in Docker, switches from package `stable` to git `dev`, verifies the persisted channel and plugin post-update work, then switches back to package `stable` and checks update status.
- Session runtime context smoke: `pnpm test:docker:session-runtime-context` verifies hidden runtime context transcript persistence plus doctor repair of affected duplicated prompt-rewrite branches.
- Bun global install smoke: `bash scripts/e2e/bun-global-install-smoke.sh` packs the current tree, installs it with `bun install -g` in an isolated home, and verifies `openclaw infer image providers --json` returns bundled image providers instead of hanging. Reuse a prebuilt tarball with `OPENCLAW_BUN_GLOBAL_SMOKE_PACKAGE_TGZ=/path/to/openclaw-*.tgz`, skip the host build with `OPENCLAW_BUN_GLOBAL_SMOKE_HOST_BUILD=0`, or copy `dist/` from a built Docker image with `OPENCLAW_BUN_GLOBAL_SMOKE_DIST_IMAGE=openclaw-dockerfile-smoke:local`.
- Installer Docker smoke: `bash scripts/test-install-sh-docker.sh` shares one npm cache across its root, update, and direct-npm containers. Update smoke defaults to npm `latest` as the stable baseline before upgrading to the candidate tarball. Non-root installer checks keep an isolated npm cache so root-owned cache entries do not mask user-local install behavior. Set `OPENCLAW_INSTALL_SMOKE_NPM_CACHE_DIR=/path/to/cache` to reuse the root/update/direct-npm cache across local reruns.
@@ -626,7 +631,8 @@ The live-model Docker runners also bind-mount only the needed CLI auth homes (or
- MCP channel bridge (seeded Gateway + stdio bridge + raw Claude notification-frame smoke): `pnpm test:docker:mcp-channels` (script: `scripts/e2e/mcp-channels-docker.sh`)
- Pi bundle MCP tools (real stdio MCP server + embedded Pi profile allow/deny smoke): `pnpm test:docker:pi-bundle-mcp-tools` (script: `scripts/e2e/pi-bundle-mcp-tools-docker.sh`)
- Cron/subagent MCP cleanup (real Gateway + stdio MCP child teardown after isolated cron and one-shot subagent runs): `pnpm test:docker:cron-mcp-cleanup` (script: `scripts/e2e/cron-mcp-cleanup-docker.sh`)
- Plugins (install smoke + `/plugin` alias + Claude-bundle restart semantics): `pnpm test:docker:plugins` (script: `scripts/e2e/plugins-docker.sh`)
- Plugins (install smoke, ClawHub install/uninstall, marketplace updates, and Claude-bundle enable/inspect): `pnpm test:docker:plugins` (script: `scripts/e2e/plugins-docker.sh`)
Set `OPENCLAW_PLUGINS_E2E_CLAWHUB=0` to skip the live ClawHub block, or override the default package with `OPENCLAW_PLUGINS_E2E_CLAWHUB_SPEC` and `OPENCLAW_PLUGINS_E2E_CLAWHUB_ID`.
- Plugin update unchanged smoke: `pnpm test:docker:plugin-update` (script: `scripts/e2e/plugin-update-unchanged-docker.sh`)
- Config reload metadata smoke: `pnpm test:docker:config-reload` (script: `scripts/e2e/config-reload-source-docker.sh`)
- Bundled plugin runtime deps: `pnpm test:docker:bundled-channel-deps` builds a small Docker runner image by default, builds and packs OpenClaw once on the host, then mounts that tarball into each Linux install scenario. Reuse the image with `OPENCLAW_SKIP_DOCKER_BUILD=1`, skip the host rebuild after a fresh local build with `OPENCLAW_BUNDLED_CHANNEL_HOST_BUILD=0`, or point at an existing tarball with `OPENCLAW_BUNDLED_CHANNEL_PACKAGE_TGZ=/path/to/openclaw-*.tgz`. The full Docker aggregate pre-packs this tarball once, then shards bundled channel checks into independent lanes, including separate update lanes for Telegram, Discord, Slack, Feishu, memory-lancedb, and ACPX. Use `OPENCLAW_BUNDLED_CHANNELS=telegram,slack` to narrow the channel matrix when running the bundled lane directly, or `OPENCLAW_BUNDLED_CHANNEL_UPDATE_TARGETS=telegram,acpx` to narrow the update scenario. The lane also verifies that `channels.<id>.enabled=false` and `plugins.entries.<id>.enabled=false` suppress doctor/runtime-dependency repair.

View File

@@ -122,16 +122,61 @@ and setup-time config writes through `openclaw-gateway` with
The setup script accepts these optional environment variables:
| Variable | Purpose |
| ------------------------------ | --------------------------------------------------------------- |
| `OPENCLAW_IMAGE` | Use a remote image instead of building locally |
| `OPENCLAW_DOCKER_APT_PACKAGES` | Install extra apt packages during build (space-separated) |
| `OPENCLAW_EXTENSIONS` | Pre-install plugin deps at build time (space-separated names) |
| `OPENCLAW_EXTRA_MOUNTS` | Extra host bind mounts (comma-separated `source:target[:opts]`) |
| `OPENCLAW_HOME_VOLUME` | Persist `/home/node` in a named Docker volume |
| `OPENCLAW_SANDBOX` | Opt in to sandbox bootstrap (`1`, `true`, `yes`, `on`) |
| `OPENCLAW_DOCKER_SOCKET` | Override Docker socket path |
| `OPENCLAW_DISABLE_BONJOUR` | Disable Bonjour/mDNS advertising (defaults to `1` for Docker) |
| Variable | Purpose |
| ------------------------------------------ | --------------------------------------------------------------- |
| `OPENCLAW_IMAGE` | Use a remote image instead of building locally |
| `OPENCLAW_DOCKER_APT_PACKAGES` | Install extra apt packages during build (space-separated) |
| `OPENCLAW_EXTENSIONS` | Pre-install plugin deps at build time (space-separated names) |
| `OPENCLAW_EXTRA_MOUNTS` | Extra host bind mounts (comma-separated `source:target[:opts]`) |
| `OPENCLAW_HOME_VOLUME` | Persist `/home/node` in a named Docker volume |
| `OPENCLAW_SANDBOX` | Opt in to sandbox bootstrap (`1`, `true`, `yes`, `on`) |
| `OPENCLAW_DOCKER_SOCKET` | Override Docker socket path |
| `OPENCLAW_DISABLE_BONJOUR` | Disable Bonjour/mDNS advertising (defaults to `1` for Docker) |
| `OPENCLAW_DISABLE_BUNDLED_SOURCE_OVERLAYS` | Disable bundled plugin source bind-mount overlays |
| `OTEL_EXPORTER_OTLP_ENDPOINT` | Shared OTLP/HTTP collector endpoint for OpenTelemetry export |
| `OTEL_EXPORTER_OTLP_*_ENDPOINT` | Signal-specific OTLP endpoints for traces, metrics, or logs |
| `OTEL_EXPORTER_OTLP_PROTOCOL` | OTLP protocol override. Only `http/protobuf` is supported today |
| `OTEL_SERVICE_NAME` | Service name used for OpenTelemetry resources |
| `OTEL_SEMCONV_STABILITY_OPT_IN` | Opt in to latest experimental GenAI semantic attributes |
| `OPENCLAW_OTEL_PRELOADED` | Skip starting a second OpenTelemetry SDK when one is preloaded |
Maintainers can test bundled plugin source against a packaged image by mounting
one plugin source directory over its packaged source path, for example
`OPENCLAW_EXTRA_MOUNTS=/path/to/fork/extensions/synology-chat:/app/extensions/synology-chat:ro`.
That mounted source directory overrides the matching compiled
`/app/dist/extensions/synology-chat` bundle for the same plugin id.
### Observability
OpenTelemetry export is outbound from the Gateway container to your OTLP
collector. It does not require a published Docker port. If you build the image
locally and want the bundled OpenTelemetry exporter available inside the image,
include its runtime dependencies:
```bash
export OPENCLAW_EXTENSIONS="diagnostics-otel"
export OTEL_EXPORTER_OTLP_ENDPOINT="http://otel-collector:4318"
export OTEL_SERVICE_NAME="openclaw-gateway"
./scripts/docker/setup.sh
```
The official OpenClaw Docker release image includes `diagnostics-otel`
dependencies. To enable export, allow and enable the `diagnostics-otel` plugin
in config, then set `diagnostics.otel.enabled=true` or use the config example in
[OpenTelemetry export](/gateway/opentelemetry). Collector auth headers are
configured through `diagnostics.otel.headers`, not through Docker environment
variables.
Prometheus metrics use the already-published Gateway port. Enable the
`diagnostics-prometheus` plugin, then scrape:
```text
http://<gateway-host>:18789/api/diagnostics/prometheus
```
The route is protected by Gateway authentication. Do not expose a separate
public `/metrics` port or unauthenticated reverse-proxy path. See
[Prometheus metrics](/gateway/prometheus).
### Health checks

View File

@@ -73,6 +73,12 @@ the installer, pass `--install-method git --no-onboard` or
npm i -g openclaw@latest
```
When `openclaw update` manages a global npm install, it first runs the normal
global install command. If that command fails, OpenClaw retries once with
`--omit=optional`. That retry helps hosts where native optional dependencies
cannot compile, while keeping the original failure visible if the fallback also
fails.
```bash
pnpm add -g openclaw@latest
```

View File

@@ -71,7 +71,10 @@ The migration sequence is:
7. Remove only with explicit breaking-release approval.
Deprecated records must include a warning start date, replacement, docs link,
and target removal date when known.
and final removal date no more than three months after the warning starts. Do
not add a deprecated compatibility path with an open-ended removal window unless
maintainers explicitly decide it is permanent compatibility and mark it `active`
instead.
## Current compatibility areas
@@ -79,15 +82,36 @@ Current compatibility records include:
- legacy broad SDK imports such as `openclaw/plugin-sdk/compat`
- legacy hook-only plugin shapes and `before_agent_start`
- legacy `activate(api)` plugin entrypoints while plugins migrate to
`register(api)`
- legacy SDK aliases such as `openclaw/extension-api`,
`openclaw/plugin-sdk/channel-runtime`, `openclaw/plugin-sdk/command-auth`
status builders, `openclaw/plugin-sdk/test-utils`, and the `ClawdbotConfig`
type alias
- bundled plugin allowlist and enablement behavior
- legacy provider/channel env-var manifest metadata
- legacy provider plugin hooks and type aliases while providers move to
explicit catalog, auth, thinking, replay, and transport hooks
- legacy runtime aliases such as `api.runtime.taskFlow`,
`api.runtime.subagent.getSession`, and `api.runtime.stt`
- legacy memory-plugin split registration while memory plugins move to
`registerMemoryCapability`
- legacy channel SDK helpers for native message schemas, mention gating,
inbound envelope formatting, and approval capability nesting
- activation hints that are being replaced by manifest contribution ownership
- `setup-api` runtime fallback while setup descriptors move to cold
`setup.requiresRuntime: false` metadata
- provider `discovery` hooks while provider catalog hooks move to
`catalog.run(...)`
- channel `showConfigured` / `showInSetup` metadata while channel packages move
to `openclaw.channel.exposure`
- legacy runtime-policy config keys while doctor migrates operators to
`agentRuntime`
- generated bundled channel config metadata fallback while registry-first
`channelConfigs` metadata lands
- the persisted plugin registry disable env while repair flows migrate operators
to `openclaw plugins registry --refresh` and `openclaw doctor --fix`
- persisted plugin registry disable and install-migration env flags while
repair flows migrate operators to `openclaw plugins registry --refresh` and
`openclaw doctor --fix`
New plugin code should prefer the replacement listed in the registry and in the
specific migration guide. Existing plugins can keep using a compatibility path

View File

@@ -420,8 +420,9 @@ The same rule applies to other bundled-helper families such as:
`plugin-sdk/nextcloud-talk`, `plugin-sdk/nostr`, `plugin-sdk/tlon`,
`plugin-sdk/twitch`,
`plugin-sdk/github-copilot-login`, `plugin-sdk/github-copilot-token`,
`plugin-sdk/diagnostics-otel`, `plugin-sdk/diffs`, `plugin-sdk/llm-task`,
`plugin-sdk/thread-ownership`, and `plugin-sdk/voice-call`
`plugin-sdk/diagnostics-otel`, `plugin-sdk/diagnostics-prometheus`,
`plugin-sdk/diffs`, `plugin-sdk/llm-task`, `plugin-sdk/thread-ownership`,
and `plugin-sdk/voice-call`
`plugin-sdk/github-copilot-token` currently exposes the narrow token-helper
surface `DEFAULT_COPILOT_API_BASE_URL`,

View File

@@ -271,7 +271,7 @@ For the plugin authoring guide, see [Plugin SDK overview](/plugins/sdk-overview)
| Line | `plugin-sdk/line`, `plugin-sdk/line-core`, `plugin-sdk/line-runtime`, `plugin-sdk/line-surface` | Bundled LINE helper/runtime surface |
| IRC | `plugin-sdk/irc`, `plugin-sdk/irc-surface` | Bundled IRC helper surface |
| Channel-specific helpers | `plugin-sdk/googlechat`, `plugin-sdk/zalouser`, `plugin-sdk/bluebubbles`, `plugin-sdk/bluebubbles-policy`, `plugin-sdk/mattermost`, `plugin-sdk/mattermost-policy`, `plugin-sdk/feishu-conversation`, `plugin-sdk/msteams`, `plugin-sdk/nextcloud-talk`, `plugin-sdk/nostr`, `plugin-sdk/tlon`, `plugin-sdk/twitch` | Bundled channel compatibility/helper seams |
| Auth/plugin-specific helpers | `plugin-sdk/github-copilot-login`, `plugin-sdk/github-copilot-token`, `plugin-sdk/diagnostics-otel`, `plugin-sdk/diffs`, `plugin-sdk/llm-task`, `plugin-sdk/thread-ownership`, `plugin-sdk/voice-call` | Bundled feature/plugin helper seams; `plugin-sdk/github-copilot-token` currently exports `DEFAULT_COPILOT_API_BASE_URL`, `deriveCopilotApiBaseUrlFromToken`, and `resolveCopilotApiToken` |
| Auth/plugin-specific helpers | `plugin-sdk/github-copilot-login`, `plugin-sdk/github-copilot-token`, `plugin-sdk/diagnostics-otel`, `plugin-sdk/diagnostics-prometheus`, `plugin-sdk/diffs`, `plugin-sdk/llm-task`, `plugin-sdk/thread-ownership`, `plugin-sdk/voice-call` | Bundled feature/plugin helper seams; `plugin-sdk/github-copilot-token` currently exports `DEFAULT_COPILOT_API_BASE_URL`, `deriveCopilotApiBaseUrlFromToken`, and `resolveCopilotApiToken` |
</Accordion>
</AccordionGroup>

View File

@@ -11,12 +11,13 @@ title: "Tests"
- `pnpm test:coverage`: Runs the unit suite with V8 coverage (via `vitest.unit.config.ts`). This is a loaded-file unit coverage gate, not whole-repo all-file coverage. Thresholds are 70% lines/functions/statements and 55% branches. Because `coverage.all` is false, the gate measures files loaded by the unit coverage suite instead of treating every split-lane source file as uncovered.
- `pnpm test:coverage:changed`: Runs unit coverage only for files changed since `origin/main`.
- `pnpm test:changed`: expands changed git paths into scoped Vitest lanes when the diff only touches routable source/test files. Config/setup changes still fall back to the native root projects run so wiring edits rerun broadly when needed.
- `pnpm test:changed:focused`: inner-loop changed test run. It only runs precise targets from direct test edits, sibling `*.test.ts` files, explicit source mappings, and the local import graph. Broad/config/package changes are skipped instead of expanding to the full changed-test fallback.
- `pnpm changed:lanes`: shows the architectural lanes triggered by the diff against `origin/main`.
- `pnpm check:changed`: runs the smart changed gate for the diff against `origin/main`. It runs core work with core test lanes, extension work with extension test lanes, test-only work with test typecheck/tests only, expands public Plugin SDK or plugin-contract changes to one extension validation pass, and keeps release metadata-only version bumps on targeted version/config/root-dependency checks.
- `pnpm test`: routes explicit file/directory targets through scoped Vitest lanes. Untargeted runs use fixed shard groups and expand to leaf configs for local parallel execution; the extension group always expands to the per-extension shard configs instead of one giant root-project process.
- Full, extension, and include-pattern shard runs update local timing data in `.artifacts/vitest-shard-timings.json`; later whole-config runs use those timings to balance slow and fast shards. Include-pattern CI shards append the shard name to the timing key, which keeps filtered shard timings visible without replacing whole-config timing data. Set `OPENCLAW_TEST_PROJECTS_TIMINGS=0` to ignore the local timing artifact.
- Selected `plugin-sdk` and `commands` test files now route through dedicated light lanes that keep only `test/setup.ts`, leaving runtime-heavy cases on their existing lanes.
- Selected `plugin-sdk` and `commands` helper source files also map `pnpm test:changed` to explicit sibling tests in those light lanes, so small helper edits avoid rerunning the heavy runtime-backed suites.
- Source files with sibling tests map to that sibling before falling back to wider directory globs. Helper edits under `test/helpers/channels` and `test/helpers/plugins` use a local import graph to run importing tests instead of broad-running every shard when the dependency path is precise.
- `auto-reply` now also splits into three dedicated configs (`core`, `top-level`, `reply`) so the reply harness does not dominate the lighter top-level status/token/helper tests.
- Base Vitest config now defaults to `pool: "threads"` and `isolate: false`, with the shared non-isolated runner enabled across the repo configs.
- `pnpm test:channels` runs `vitest.channels.config.ts`.

View File

@@ -1,177 +1,176 @@
---
summary: Per-agent sandbox + tool restrictions, precedence, and examples
title: Multi-agent sandbox & tools
read_when: “You want per-agent sandboxing or per-agent tool allow/deny policies in a multi-agent gateway.”
summary: "Per-agent sandbox + tool restrictions, precedence, and examples"
title: "Multi-agent sandbox and tools"
sidebarTitle: "Multi-agent sandbox and tools"
read_when: "You want per-agent sandboxing or per-agent tool allow/deny policies in a multi-agent gateway."
status: active
---
# Multi-Agent Sandbox & Tools Configuration
Each agent in a multi-agent setup can override the global sandbox and tool policy. This page covers per-agent configuration, precedence rules, and examples.
Each agent in a multi-agent setup can override the global sandbox and tool
policy. This page covers per-agent configuration, precedence rules, and
examples.
<CardGroup cols={3}>
<Card title="Sandboxing" href="/gateway/sandboxing">
Backends and modes — full sandbox reference.
</Card>
<Card title="Sandbox vs tool policy vs elevated" href="/gateway/sandbox-vs-tool-policy-vs-elevated">
Debug "why is this blocked?"
</Card>
<Card title="Elevated mode" href="/tools/elevated">
Elevated exec for trusted senders.
</Card>
</CardGroup>
- **Sandbox backends and modes**: see [Sandboxing](/gateway/sandboxing).
- **Debugging blocked tools**: see [Sandbox vs Tool Policy vs Elevated](/gateway/sandbox-vs-tool-policy-vs-elevated) and `openclaw sandbox explain`.
- **Elevated exec**: see [Elevated Mode](/tools/elevated).
Auth is per-agent: each agent reads from its own `agentDir` auth store at
`~/.openclaw/agents/<agentId>/agent/auth-profiles.json`.
Credentials are **not** shared between agents. Never reuse `agentDir` across agents.
If you want to share creds, copy `auth-profiles.json` into the other agent's `agentDir`.
<Warning>
Auth is per-agent: each agent reads from its own `agentDir` auth store at `~/.openclaw/agents/<agentId>/agent/auth-profiles.json`. Credentials are **not** shared between agents. Never reuse `agentDir` across agents. If you want to share creds, copy `auth-profiles.json` into the other agent's `agentDir`.
</Warning>
---
## Configuration Examples
## Configuration examples
### Example 1: Personal + Restricted Family Agent
```json
{
"agents": {
"list": [
{
"id": "main",
"default": true,
"name": "Personal Assistant",
"workspace": "~/.openclaw/workspace",
"sandbox": { "mode": "off" }
},
{
"id": "family",
"name": "Family Bot",
"workspace": "~/.openclaw/workspace-family",
"sandbox": {
"mode": "all",
"scope": "agent"
},
"tools": {
"allow": ["read"],
"deny": ["exec", "write", "edit", "apply_patch", "process", "browser"]
}
}
]
},
"bindings": [
<AccordionGroup>
<Accordion title="Example 1: Personal + restricted family agent">
```json
{
"agentId": "family",
"match": {
"provider": "whatsapp",
"accountId": "*",
"peer": {
"kind": "group",
"id": "120363424282127706@g.us"
"agents": {
"list": [
{
"id": "main",
"default": true,
"name": "Personal Assistant",
"workspace": "~/.openclaw/workspace",
"sandbox": { "mode": "off" }
},
{
"id": "family",
"name": "Family Bot",
"workspace": "~/.openclaw/workspace-family",
"sandbox": {
"mode": "all",
"scope": "agent"
},
"tools": {
"allow": ["read"],
"deny": ["exec", "write", "edit", "apply_patch", "process", "browser"]
}
}
]
},
"bindings": [
{
"agentId": "family",
"match": {
"provider": "whatsapp",
"accountId": "*",
"peer": {
"kind": "group",
"id": "120363424282127706@g.us"
}
}
}
]
}
```
**Result:**
- `main` agent: runs on host, full tool access.
- `family` agent: runs in Docker (one container per agent), only `read` tool.
</Accordion>
<Accordion title="Example 2: Work agent with shared sandbox">
```json
{
"agents": {
"list": [
{
"id": "personal",
"workspace": "~/.openclaw/workspace-personal",
"sandbox": { "mode": "off" }
},
{
"id": "work",
"workspace": "~/.openclaw/workspace-work",
"sandbox": {
"mode": "all",
"scope": "shared",
"workspaceRoot": "/tmp/work-sandboxes"
},
"tools": {
"allow": ["read", "write", "apply_patch", "exec"],
"deny": ["browser", "gateway", "discord"]
}
}
]
}
}
]
}
```
```
</Accordion>
<Accordion title="Example 2b: Global coding profile + messaging-only agent">
```json
{
"tools": { "profile": "coding" },
"agents": {
"list": [
{
"id": "support",
"tools": { "profile": "messaging", "allow": ["slack"] }
}
]
}
}
```
**Result:**
**Result:**
- `main` agent: Runs on host, full tool access
- `family` agent: Runs in Docker (one container per agent), only `read` tool
- default agents get coding tools.
- `support` agent is messaging-only (+ Slack tool).
---
### Example 2: Work Agent with Shared Sandbox
```json
{
"agents": {
"list": [
{
"id": "personal",
"workspace": "~/.openclaw/workspace-personal",
"sandbox": { "mode": "off" }
},
{
"id": "work",
"workspace": "~/.openclaw/workspace-work",
"sandbox": {
"mode": "all",
"scope": "shared",
"workspaceRoot": "/tmp/work-sandboxes"
</Accordion>
<Accordion title="Example 3: Different sandbox modes per agent">
```json
{
"agents": {
"defaults": {
"sandbox": {
"mode": "non-main",
"scope": "session"
}
},
"tools": {
"allow": ["read", "write", "apply_patch", "exec"],
"deny": ["browser", "gateway", "discord"]
}
"list": [
{
"id": "main",
"workspace": "~/.openclaw/workspace",
"sandbox": {
"mode": "off"
}
},
{
"id": "public",
"workspace": "~/.openclaw/workspace-public",
"sandbox": {
"mode": "all",
"scope": "agent"
},
"tools": {
"allow": ["read"],
"deny": ["exec", "write", "edit", "apply_patch"]
}
}
]
}
]
}
}
```
}
```
</Accordion>
</AccordionGroup>
---
### Example 2b: Global coding profile + messaging-only agent
```json
{
"tools": { "profile": "coding" },
"agents": {
"list": [
{
"id": "support",
"tools": { "profile": "messaging", "allow": ["slack"] }
}
]
}
}
```
**Result:**
- default agents get coding tools
- `support` agent is messaging-only (+ Slack tool)
---
### Example 3: Different Sandbox Modes per Agent
```json
{
"agents": {
"defaults": {
"sandbox": {
"mode": "non-main", // Global default
"scope": "session"
}
},
"list": [
{
"id": "main",
"workspace": "~/.openclaw/workspace",
"sandbox": {
"mode": "off" // Override: main never sandboxed
}
},
{
"id": "public",
"workspace": "~/.openclaw/workspace-public",
"sandbox": {
"mode": "all", // Override: public always sandboxed
"scope": "agent"
},
"tools": {
"allow": ["read"],
"deny": ["exec", "write", "edit", "apply_patch"]
}
}
]
}
}
```
---
## Configuration Precedence
## Configuration precedence
When both global (`agents.defaults.*`) and agent-specific (`agents.list[].*`) configs exist:
### Sandbox Config
### Sandbox config
Agent-specific settings override global:
@@ -185,139 +184,154 @@ agents.list[].sandbox.browser.* > agents.defaults.sandbox.browser.*
agents.list[].sandbox.prune.* > agents.defaults.sandbox.prune.*
```
**Notes:**
<Note>
`agents.list[].sandbox.{docker,browser,prune}.*` overrides `agents.defaults.sandbox.{docker,browser,prune}.*` for that agent (ignored when sandbox scope resolves to `"shared"`).
</Note>
- `agents.list[].sandbox.{docker,browser,prune}.*` overrides `agents.defaults.sandbox.{docker,browser,prune}.*` for that agent (ignored when sandbox scope resolves to `"shared"`).
### Tool Restrictions
### Tool restrictions
The filtering order is:
1. **Tool profile** (`tools.profile` or `agents.list[].tools.profile`)
2. **Provider tool profile** (`tools.byProvider[provider].profile` or `agents.list[].tools.byProvider[provider].profile`)
3. **Global tool policy** (`tools.allow` / `tools.deny`)
4. **Provider tool policy** (`tools.byProvider[provider].allow/deny`)
5. **Agent-specific tool policy** (`agents.list[].tools.allow/deny`)
6. **Agent provider policy** (`agents.list[].tools.byProvider[provider].allow/deny`)
7. **Sandbox tool policy** (`tools.sandbox.tools` or `agents.list[].tools.sandbox.tools`)
8. **Subagent tool policy** (`tools.subagents.tools`, if applicable)
<Steps>
<Step title="Tool profile">
`tools.profile` or `agents.list[].tools.profile`.
</Step>
<Step title="Provider tool profile">
`tools.byProvider[provider].profile` or `agents.list[].tools.byProvider[provider].profile`.
</Step>
<Step title="Global tool policy">
`tools.allow` / `tools.deny`.
</Step>
<Step title="Provider tool policy">
`tools.byProvider[provider].allow/deny`.
</Step>
<Step title="Agent-specific tool policy">
`agents.list[].tools.allow/deny`.
</Step>
<Step title="Agent provider policy">
`agents.list[].tools.byProvider[provider].allow/deny`.
</Step>
<Step title="Sandbox tool policy">
`tools.sandbox.tools` or `agents.list[].tools.sandbox.tools`.
</Step>
<Step title="Subagent tool policy">
`tools.subagents.tools`, if applicable.
</Step>
</Steps>
Each level can further restrict tools, but cannot grant back denied tools from earlier levels.
If `agents.list[].tools.sandbox.tools` is set, it replaces `tools.sandbox.tools` for that agent.
If `agents.list[].tools.profile` is set, it overrides `tools.profile` for that agent.
Provider tool keys accept either `provider` (e.g. `google-antigravity`) or `provider/model` (e.g. `openai/gpt-5.4`).
If any explicit allowlist in that chain leaves the run with no callable tools,
OpenClaw stops before submitting the prompt to the model. This is intentional:
an agent configured with a missing tool such as
`agents.list[].tools.allow: ["query_db"]` should fail loudly until the plugin
that registers `query_db` is enabled, not continue as a text-only agent.
<AccordionGroup>
<Accordion title="Precedence rules">
- Each level can further restrict tools, but cannot grant back denied tools from earlier levels.
- If `agents.list[].tools.sandbox.tools` is set, it replaces `tools.sandbox.tools` for that agent.
- If `agents.list[].tools.profile` is set, it overrides `tools.profile` for that agent.
- Provider tool keys accept either `provider` (e.g. `google-antigravity`) or `provider/model` (e.g. `openai/gpt-5.4`).
</Accordion>
<Accordion title="Empty allowlist behavior">
If any explicit allowlist in that chain leaves the run with no callable tools, OpenClaw stops before submitting the prompt to the model. This is intentional: an agent configured with a missing tool such as `agents.list[].tools.allow: ["query_db"]` should fail loudly until the plugin that registers `query_db` is enabled, not continue as a text-only agent.
</Accordion>
</AccordionGroup>
Tool policies support `group:*` shorthands that expand to multiple tools. See [Tool groups](/gateway/sandbox-vs-tool-policy-vs-elevated#tool-groups-shorthands) for the full list.
Per-agent elevated overrides (`agents.list[].tools.elevated`) can further restrict elevated exec for specific agents. See [Elevated Mode](/tools/elevated) for details.
Per-agent elevated overrides (`agents.list[].tools.elevated`) can further restrict elevated exec for specific agents. See [Elevated mode](/tools/elevated) for details.
---
## Migration from Single Agent
## Migration from single agent
**Before (single agent):**
```json
{
"agents": {
"defaults": {
"workspace": "~/.openclaw/workspace",
"sandbox": {
"mode": "non-main"
}
}
},
"tools": {
"sandbox": {
<Tabs>
<Tab title="Before (single agent)">
```json
{
"agents": {
"defaults": {
"workspace": "~/.openclaw/workspace",
"sandbox": {
"mode": "non-main"
}
}
},
"tools": {
"allow": ["read", "write", "apply_patch", "exec"],
"deny": []
"sandbox": {
"tools": {
"allow": ["read", "write", "apply_patch", "exec"],
"deny": []
}
}
}
}
}
}
```
**After (multi-agent with different profiles):**
```json
{
"agents": {
"list": [
{
"id": "main",
"default": true,
"workspace": "~/.openclaw/workspace",
"sandbox": { "mode": "off" }
```
</Tab>
<Tab title="After (multi-agent)">
```json
{
"agents": {
"list": [
{
"id": "main",
"default": true,
"workspace": "~/.openclaw/workspace",
"sandbox": { "mode": "off" }
}
]
}
]
}
}
```
}
```
</Tab>
</Tabs>
<Note>
Legacy `agent.*` configs are migrated by `openclaw doctor`; prefer `agents.defaults` + `agents.list` going forward.
</Note>
---
## Tool Restriction Examples
## Tool restriction examples
### Read-only Agent
<Tabs>
<Tab title="Read-only agent">
```json
{
"tools": {
"allow": ["read"],
"deny": ["exec", "write", "edit", "apply_patch", "process"]
}
}
```
</Tab>
<Tab title="Safe execution (no file modifications)">
```json
{
"tools": {
"allow": ["read", "exec", "process"],
"deny": ["write", "edit", "apply_patch", "browser", "gateway"]
}
}
```
</Tab>
<Tab title="Communication-only">
```json
{
"tools": {
"sessions": { "visibility": "tree" },
"allow": ["sessions_list", "sessions_send", "sessions_history", "session_status"],
"deny": ["exec", "write", "edit", "apply_patch", "read", "browser"]
}
}
```
```json
{
"tools": {
"allow": ["read"],
"deny": ["exec", "write", "edit", "apply_patch", "process"]
}
}
```
`sessions_history` in this profile still returns a bounded, sanitized recall view rather than a raw transcript dump. Assistant recall strips thinking tags, `<relevant-memories>` scaffolding, plain-text tool-call XML payloads (including `<tool_call>...</tool_call>`, `<function_call>...</function_call>`, `<tool_calls>...</tool_calls>`, `<function_calls>...</function_calls>`, and truncated tool-call blocks), downgraded tool-call scaffolding, leaked ASCII/full-width model control tokens, and malformed MiniMax tool-call XML before redaction/truncation.
### Safe Execution Agent (no file modifications)
```json
{
"tools": {
"allow": ["read", "exec", "process"],
"deny": ["write", "edit", "apply_patch", "browser", "gateway"]
}
}
```
### Communication-only Agent
```json
{
"tools": {
"sessions": { "visibility": "tree" },
"allow": ["sessions_list", "sessions_send", "sessions_history", "session_status"],
"deny": ["exec", "write", "edit", "apply_patch", "read", "browser"]
}
}
```
`sessions_history` in this profile still returns a bounded, sanitized recall
view rather than a raw transcript dump. Assistant recall strips thinking tags,
`<relevant-memories>` scaffolding, plain-text tool-call XML payloads
(including `<tool_call>...</tool_call>`,
`<function_call>...</function_call>`, `<tool_calls>...</tool_calls>`,
`<function_calls>...</function_calls>`, and truncated tool-call blocks),
downgraded tool-call scaffolding, leaked ASCII/full-width model control
tokens, and malformed MiniMax tool-call XML before redaction/truncation.
</Tab>
</Tabs>
---
## Common Pitfall: "non-main"
## Common pitfall: "non-main"
`agents.defaults.sandbox.mode: "non-main"` is based on `session.mainKey` (default `"main"`),
not the agent id. Group/channel sessions always get their own keys, so they
are treated as non-main and will be sandboxed. If you want an agent to never
sandbox, set `agents.list[].sandbox.mode: "off"`.
<Warning>
`agents.defaults.sandbox.mode: "non-main"` is based on `session.mainKey` (default `"main"`), not the agent id. Group/channel sessions always get their own keys, so they are treated as non-main and will be sandboxed. If you want an agent to never sandbox, set `agents.list[].sandbox.mode: "off"`.
</Warning>
---
@@ -325,55 +339,55 @@ sandbox, set `agents.list[].sandbox.mode: "off"`.
After configuring multi-agent sandbox and tools:
1. **Check agent resolution:**
```exec
openclaw agents list --bindings
```
2. **Verify sandbox containers:**
```exec
docker ps --filter "name=openclaw-sbx-"
```
3. **Test tool restrictions:**
- Send a message requiring restricted tools
- Verify the agent cannot use denied tools
4. **Monitor logs:**
```exec
tail -f "${OPENCLAW_STATE_DIR:-$HOME/.openclaw}/logs/gateway.log" | grep -E "routing|sandbox|tools"
```
<Steps>
<Step title="Check agent resolution">
```bash
openclaw agents list --bindings
```
</Step>
<Step title="Verify sandbox containers">
```bash
docker ps --filter "name=openclaw-sbx-"
```
</Step>
<Step title="Test tool restrictions">
- Send a message requiring restricted tools.
- Verify the agent cannot use denied tools.
</Step>
<Step title="Monitor logs">
```bash
tail -f "${OPENCLAW_STATE_DIR:-$HOME/.openclaw}/logs/gateway.log" | grep -E "routing|sandbox|tools"
```
</Step>
</Steps>
---
## Troubleshooting
### Agent not sandboxed despite `mode: "all"`
- Check if there's a global `agents.defaults.sandbox.mode` that overrides it
- Agent-specific config takes precedence, so set `agents.list[].sandbox.mode: "all"`
### Tools still available despite deny list
- Check tool filtering order: global → agent → sandbox → subagent
- Each level can only further restrict, not grant back
- Verify with logs: `[tools] filtering tools for agent:${agentId}`
### Container not isolated per agent
- Set `scope: "agent"` in agent-specific sandbox config
- Default is `"session"` which creates one container per session
<AccordionGroup>
<Accordion title="Agent not sandboxed despite `mode: 'all'`">
- Check if there's a global `agents.defaults.sandbox.mode` that overrides it.
- Agent-specific config takes precedence, so set `agents.list[].sandbox.mode: "all"`.
</Accordion>
<Accordion title="Tools still available despite deny list">
- Check tool filtering order: global → agent → sandbox → subagent.
- Each level can only further restrict, not grant back.
- Verify with logs: `[tools] filtering tools for agent:${agentId}`.
</Accordion>
<Accordion title="Container not isolated per agent">
- Set `scope: "agent"` in agent-specific sandbox config.
- Default is `"session"` which creates one container per session.
</Accordion>
</AccordionGroup>
---
## Related
- [Sandboxing](/gateway/sandboxing) -- full sandbox reference (modes, scopes, backends, images)
- [Sandbox vs Tool Policy vs Elevated](/gateway/sandbox-vs-tool-policy-vs-elevated) -- debugging "why is this blocked?"
- [Elevated Mode](/tools/elevated)
- [Multi-Agent Routing](/concepts/multi-agent)
- [Sandbox Configuration](/gateway/config-agents#agentsdefaultssandbox)
- [Session Management](/concepts/session)
- [Elevated mode](/tools/elevated)
- [Multi-agent routing](/concepts/multi-agent)
- [Sandbox configuration](/gateway/config-agents#agentsdefaultssandbox)
- [Sandbox vs tool policy vs elevated](/gateway/sandbox-vs-tool-policy-vs-elevated) — debugging "why is this blocked?"
- [Sandboxing](/gateway/sandboxing) — full sandbox reference (modes, scopes, backends, images)
- [Session management](/concepts/session)

View File

@@ -70,6 +70,9 @@ Gateway startup runtime-dependency repair.
Explicit disablement still wins: `plugins.entries.<id>.enabled: false`,
`plugins.deny`, `plugins.enabled: false`, and `channels.<id>.enabled: false`
prevent automatic bundled runtime-dependency repair for that plugin/channel.
A non-empty `plugins.allow` also bounds default-enabled bundled runtime-dependency
repair; explicit bundled channel enablement (`channels.<id>.enabled: true`) can
still repair that channel's plugin dependencies.
External plugins and custom load paths must still be installed through
`openclaw plugins install`.
@@ -87,6 +90,28 @@ Both show up under `openclaw plugins list`. See [Plugin Bundles](/plugins/bundle
If you are writing a native plugin, start with [Building Plugins](/plugins/building-plugins)
and the [Plugin SDK Overview](/plugins/sdk-overview).
## Package Entrypoints
Native plugin npm packages must declare `openclaw.extensions` in `package.json`.
Each entry must stay inside the package directory and resolve to a readable
runtime file, or to a TypeScript source file with an inferred built JavaScript
peer such as `src/index.ts` to `dist/index.js`.
Use `openclaw.runtimeExtensions` when published runtime files do not live at the
same paths as the source entries. When present, `runtimeExtensions` must contain
exactly one entry for every `extensions` entry. Mismatched lists fail install and
plugin discovery rather than silently falling back to source paths.
```json
{
"name": "@acme/openclaw-plugin",
"openclaw": {
"extensions": ["./src/index.ts"],
"runtimeExtensions": ["./dist/index.js"]
}
}
```
## Official plugins
### Installable (npm)
@@ -199,6 +224,16 @@ OpenClaw scans for plugins in this order (first match wins):
</Step>
</Steps>
Packaged installs and Docker images normally resolve bundled plugins from the
compiled `dist/extensions` tree. If a bundled plugin source directory is
bind-mounted over the matching packaged source path, for example
`/app/extensions/synology-chat`, OpenClaw treats that mounted source directory
as a bundled source overlay and discovers it before the packaged
`/app/dist/extensions/synology-chat` bundle. This keeps maintainer container
loops working without switching every bundled plugin back to TypeScript source.
Set `OPENCLAW_DISABLE_BUNDLED_SOURCE_OVERLAYS=1` to force packaged dist bundles
even when source overlay mounts are present.
### Enablement rules
- `plugins.enabled: false` disables all plugins
@@ -337,8 +372,9 @@ plugins. It is not supported with `--link`, which reuses the source path instead
of copying over a managed install target.
When `plugins.allow` is already set, `openclaw plugins install` adds the
installed plugin id to that allowlist before enabling it, so installs are
immediately loadable after restart.
installed plugin id to that allowlist before enabling it. If the same plugin id
is present in `plugins.deny`, install removes that stale deny entry so the
explicit install is immediately loadable after restart.
OpenClaw keeps a persisted local plugin registry as the cold read model for
plugin inventory, contribution ownership, and startup planning. Install, update,

View File

@@ -2,6 +2,7 @@
"id": "anthropic",
"enabledByDefault": true,
"providers": ["anthropic"],
"providerDiscoveryEntry": "./provider-discovery.ts",
"modelSupport": {
"modelPrefixes": ["claude-"]
},

View File

@@ -0,0 +1,35 @@
import type { ProviderPlugin } from "openclaw/plugin-sdk/provider-model-shared";
import { readClaudeCliCredentialsForRuntime } from "./cli-auth-seam.js";
const CLAUDE_CLI_BACKEND_ID = "claude-cli";
function resolveClaudeCliSyntheticAuth() {
const credential = readClaudeCliCredentialsForRuntime();
if (!credential) {
return undefined;
}
return credential.type === "oauth"
? {
apiKey: credential.access,
source: "Claude CLI native auth",
mode: "oauth" as const,
expiresAt: credential.expires,
}
: {
apiKey: credential.token,
source: "Claude CLI native auth",
mode: "token" as const,
expiresAt: credential.expires,
};
}
export const anthropicProviderDiscovery: ProviderPlugin = {
id: CLAUDE_CLI_BACKEND_ID,
label: "Claude CLI",
docsPath: "/providers/models",
auth: [],
resolveSyntheticAuth: ({ provider }) =>
provider === CLAUDE_CLI_BACKEND_ID ? resolveClaudeCliSyntheticAuth() : undefined,
};
export default anthropicProviderDiscovery;

View File

@@ -44,7 +44,7 @@ vi.mock("./probe.js", () => ({
getCachedBlueBubblesPrivateApiStatus: vi.fn().mockReturnValue(null),
}));
const freshActionsModulePath = "./actions.js?actions-test";
const freshActionsModulePath = "./actions.js";
const { bluebubblesMessageActions } = await import(freshActionsModulePath);
describe("bluebubblesMessageActions", () => {

View File

@@ -1,5 +1,8 @@
import { definePluginEntry } from "openclaw/plugin-sdk/plugin-entry";
import { registerUnhandledRejectionHandler } from "openclaw/plugin-sdk/runtime";
import {
registerUncaughtExceptionHandler,
registerUnhandledRejectionHandler,
} from "openclaw/plugin-sdk/runtime";
import { startGatewayBonjourAdvertiser } from "./src/advertiser.js";
function formatBonjourInstanceName(displayName: string) {
@@ -33,7 +36,11 @@ export default definePluginEntry({
cliPath: ctx.cliPath,
minimal: ctx.minimal,
},
{ logger: api.logger, registerUnhandledRejectionHandler },
{
logger: api.logger,
registerUncaughtExceptionHandler,
registerUnhandledRejectionHandler,
},
);
return { stop: advertiser.stop };
},

View File

@@ -5,6 +5,7 @@ const mocks = vi.hoisted(() => ({
createService: vi.fn(),
getResponder: vi.fn(),
shutdown: vi.fn(),
registerUncaughtExceptionHandler: vi.fn(),
registerUnhandledRejectionHandler: vi.fn(),
logger: {
info: vi.fn(),
@@ -12,7 +13,14 @@ const mocks = vi.hoisted(() => ({
debug: vi.fn(),
},
}));
const { createService, getResponder, shutdown, registerUnhandledRejectionHandler, logger } = mocks;
const {
createService,
getResponder,
shutdown,
registerUncaughtExceptionHandler,
registerUnhandledRejectionHandler,
logger,
} = mocks;
const asString = (value: unknown, fallback: string) =>
typeof value === "string" && value.trim() ? value : fallback;
@@ -77,6 +85,7 @@ const startAdvertiser = (
): ReturnType<StartGatewayBonjourAdvertiser> =>
startGatewayBonjourAdvertiser(opts, {
logger,
registerUncaughtExceptionHandler: (handler) => registerUncaughtExceptionHandler(handler),
registerUnhandledRejectionHandler: (handler) => registerUnhandledRejectionHandler(handler),
});
@@ -103,6 +112,7 @@ describe("gateway bonjour advertiser", () => {
createService.mockClear();
getResponder.mockReset();
shutdown.mockClear();
registerUncaughtExceptionHandler.mockClear();
registerUnhandledRejectionHandler.mockClear();
logger.info.mockClear();
logger.warn.mockClear();
@@ -220,7 +230,7 @@ describe("gateway bonjour advertiser", () => {
await started.stop();
});
it("does not install a process-level unhandled rejection handler by default", async () => {
it("does not install process-level ciao handlers by default", async () => {
enableAdvertiserUnitMode();
const destroy = vi.fn().mockResolvedValue(undefined);
@@ -237,11 +247,12 @@ describe("gateway bonjour advertiser", () => {
);
expect(processOn).not.toHaveBeenCalledWith("unhandledRejection", expect.any(Function));
expect(processOn).not.toHaveBeenCalledWith("uncaughtException", expect.any(Function));
await started.stop();
});
it("cleans up unhandled rejection handler after shutdown", async () => {
it("cleans up ciao process handlers after shutdown", async () => {
enableAdvertiserUnitMode();
const destroy = vi.fn().mockResolvedValue(undefined);
@@ -252,10 +263,14 @@ describe("gateway bonjour advertiser", () => {
});
mockCiaoService({ advertise, destroy });
const cleanup = vi.fn(() => {
order.push("cleanup");
const cleanupException = vi.fn(() => {
order.push("cleanup-exception");
});
registerUnhandledRejectionHandler.mockImplementation(() => cleanup);
const cleanupRejection = vi.fn(() => {
order.push("cleanup-rejection");
});
registerUncaughtExceptionHandler.mockImplementation(() => cleanupException);
registerUnhandledRejectionHandler.mockImplementation(() => cleanupRejection);
const started = await startAdvertiser({
gatewayPort: 18789,
@@ -264,9 +279,11 @@ describe("gateway bonjour advertiser", () => {
await started.stop();
expect(registerUncaughtExceptionHandler).toHaveBeenCalledTimes(1);
expect(registerUnhandledRejectionHandler).toHaveBeenCalledTimes(1);
expect(cleanup).toHaveBeenCalledTimes(1);
expect(order).toEqual(["shutdown", "cleanup"]);
expect(cleanupException).toHaveBeenCalledTimes(1);
expect(cleanupRejection).toHaveBeenCalledTimes(1);
expect(order).toEqual(["shutdown", "cleanup-exception", "cleanup-rejection"]);
});
it("logs ciao handler classifications at the bonjour caller", async () => {
@@ -284,7 +301,11 @@ describe("gateway bonjour advertiser", () => {
const handler = registerUnhandledRejectionHandler.mock.calls[0]?.[0] as
| ((reason: unknown) => boolean)
| undefined;
const exceptionHandler = registerUncaughtExceptionHandler.mock.calls[0]?.[0] as
| ((reason: unknown) => boolean)
| undefined;
expect(handler).toBeTypeOf("function");
expect(exceptionHandler).toBeTypeOf("function");
expect(handler?.(new Error("CIAO PROBING CANCELLED"))).toBe(true);
expect(logger.debug).toHaveBeenCalledWith(
@@ -299,6 +320,21 @@ describe("gateway bonjour advertiser", () => {
expect.stringContaining("suppressing ciao interface assertion"),
);
logger.warn.mockClear();
expect(
exceptionHandler?.(
Object.assign(
new Error(
"IP address version must match. Netmask cannot have a version different from the address!",
),
{ name: "AssertionError" },
),
),
).toBe(true);
expect(logger.warn).toHaveBeenCalledWith(
expect.stringContaining("suppressing ciao netmask assertion"),
);
await started.stop();
});

View File

@@ -1,6 +1,6 @@
import type { PluginLogger } from "openclaw/plugin-sdk/plugin-entry";
import { isTruthyEnvValue } from "openclaw/plugin-sdk/runtime-env";
import { classifyCiaoUnhandledRejection } from "./ciao.js";
import { classifyCiaoProcessError, type CiaoProcessErrorClassification } from "./ciao.js";
import { formatBonjourError } from "./errors.js";
export type GatewayBonjourAdvertiser = {
@@ -50,6 +50,7 @@ type CiaoModule = {
type BonjourCycle = {
responder: BonjourResponder;
services: Array<{ label: string; svc: BonjourService }>;
cleanupUncaughtException?: () => void;
cleanupUnhandledRejection?: () => void;
};
@@ -59,10 +60,12 @@ type ServiceStateTracker = {
};
type ConsoleLogFn = (...args: unknown[]) => void;
type UncaughtExceptionHandler = (error: unknown) => boolean;
type UnhandledRejectionHandler = (reason: unknown) => boolean;
type BonjourAdvertiserDeps = {
logger?: Pick<PluginLogger, "info" | "warn" | "debug">;
registerUncaughtExceptionHandler?: (handler: UncaughtExceptionHandler) => () => void;
registerUnhandledRejectionHandler?: (handler: UnhandledRejectionHandler) => () => void;
};
@@ -175,19 +178,22 @@ export async function startGatewayBonjourAdvertiser(
};
const { getResponder, Protocol } = await loadCiaoModule();
const restoreConsoleLog = installCiaoConsoleNoiseFilter();
let requestCiaoRecovery: ((classification: CiaoProcessErrorClassification) => void) | undefined;
const handleCiaoUnhandledRejection = (reason: unknown): boolean => {
const classification = classifyCiaoUnhandledRejection(reason);
const handleCiaoProcessError = (reason: unknown): boolean => {
const classification = classifyCiaoProcessError(reason);
if (!classification) {
return false;
}
if (classification.kind === "interface-assertion") {
logger.warn(`bonjour: suppressing ciao interface assertion: ${classification.formatted}`);
return true;
if (classification.kind === "cancellation") {
logger.debug(`bonjour: ignoring unhandled ciao rejection: ${classification.formatted}`);
} else {
const label =
classification.kind === "netmask-assertion" ? "netmask assertion" : "interface assertion";
logger.warn(`bonjour: suppressing ciao ${label}: ${classification.formatted}`);
requestCiaoRecovery?.(classification);
}
logger.debug(`bonjour: ignoring unhandled ciao rejection: ${classification.formatted}`);
return true;
};
@@ -255,10 +261,14 @@ export async function startGatewayBonjourAdvertiser(
const cleanupUnhandledRejection =
services.length > 0 && deps.registerUnhandledRejectionHandler
? deps.registerUnhandledRejectionHandler(handleCiaoUnhandledRejection)
? deps.registerUnhandledRejectionHandler(handleCiaoProcessError)
: undefined;
const cleanupUncaughtException =
services.length > 0 && deps.registerUncaughtExceptionHandler
? deps.registerUncaughtExceptionHandler(handleCiaoProcessError)
: undefined;
return { responder, services, cleanupUnhandledRejection };
return { responder, services, cleanupUncaughtException, cleanupUnhandledRejection };
}
async function stopCycle(cycle: BonjourCycle | null, opts?: { shutdownResponder?: boolean }) {
@@ -279,6 +289,7 @@ export async function startGatewayBonjourAdvertiser(
} catch {
/* ignore */
} finally {
cycle.cleanupUncaughtException?.();
cycle.cleanupUnhandledRejection?.();
}
}
@@ -388,6 +399,9 @@ export async function startGatewayBonjourAdvertiser(
});
return recreatePromise;
};
requestCiaoRecovery = (classification) => {
void recreateAdvertiser(`ciao ${classification.kind}: ${classification.formatted}`);
};
const lastRepairAttempt = new Map<string, number>();
const watchdog = setInterval(() => {

View File

@@ -21,6 +21,23 @@ describe("bonjour-ciao", () => {
});
});
it("classifies ciao netmask assertions separately from side effects", () => {
expect(
classifyCiaoUnhandledRejection(
Object.assign(
new Error(
"IP address version must match. Netmask cannot have a version different from the address!",
),
{ name: "AssertionError" },
),
),
).toEqual({
kind: "netmask-assertion",
formatted:
"AssertionError: IP address version must match. Netmask cannot have a version different from the address!",
});
});
it("suppresses ciao announcement cancellation rejections", () => {
expect(ignoreCiaoUnhandledRejection(new Error("Ciao announcement cancelled by shutdown"))).toBe(
true,
@@ -44,6 +61,17 @@ describe("bonjour-ciao", () => {
expect(ignoreCiaoUnhandledRejection(error)).toBe(true);
});
it("suppresses ciao netmask assertion errors as non-fatal", () => {
const error = Object.assign(
new Error(
"IP address version must match. Netmask cannot have a version different from the address!",
),
{ name: "AssertionError" },
);
expect(ignoreCiaoUnhandledRejection(error)).toBe(true);
});
it("keeps unrelated rejections visible", () => {
expect(ignoreCiaoUnhandledRejection(new Error("boom"))).toBe(false);
});

View File

@@ -2,15 +2,16 @@ import { formatBonjourError } from "./errors.js";
const CIAO_CANCELLATION_MESSAGE_RE = /^CIAO (?:ANNOUNCEMENT|PROBING) CANCELLED\b/u;
const CIAO_INTERFACE_ASSERTION_MESSAGE_RE =
/REACHED ILLEGAL STATE!?\s+IPV4 ADDRESS CHANGE FROM DEFINED TO UNDEFINED!?/u;
/REACHED ILLEGAL STATE!?\s+IPV4 ADDRESS CHANGE FROM (?:DEFINED TO UNDEFINED|UNDEFINED TO DEFINED)!?/u;
const CIAO_NETMASK_ASSERTION_MESSAGE_RE =
/IP ADDRESS VERSION MUST MATCH\.\s+NETMASK CANNOT HAVE A VERSION DIFFERENT FROM THE ADDRESS!?/u;
export type CiaoUnhandledRejectionClassification =
export type CiaoProcessErrorClassification =
| { kind: "cancellation"; formatted: string }
| { kind: "interface-assertion"; formatted: string };
| { kind: "interface-assertion"; formatted: string }
| { kind: "netmask-assertion"; formatted: string };
export function classifyCiaoUnhandledRejection(
reason: unknown,
): CiaoUnhandledRejectionClassification | null {
export function classifyCiaoProcessError(reason: unknown): CiaoProcessErrorClassification | null {
const formatted = formatBonjourError(reason);
const message = formatted.toUpperCase();
if (CIAO_CANCELLATION_MESSAGE_RE.test(message)) {
@@ -19,9 +20,14 @@ export function classifyCiaoUnhandledRejection(
if (CIAO_INTERFACE_ASSERTION_MESSAGE_RE.test(message)) {
return { kind: "interface-assertion", formatted };
}
if (CIAO_NETMASK_ASSERTION_MESSAGE_RE.test(message)) {
return { kind: "netmask-assertion", formatted };
}
return null;
}
export const classifyCiaoUnhandledRejection = classifyCiaoProcessError;
export function ignoreCiaoUnhandledRejection(reason: unknown): boolean {
return classifyCiaoUnhandledRejection(reason) !== null;
return classifyCiaoProcessError(reason) !== null;
}

View File

@@ -5,7 +5,7 @@
"description": "OpenClaw Brave plugin",
"type": "module",
"dependencies": {
"typebox": "1.1.32"
"typebox": "1.1.33"
},
"devDependencies": {
"@openclaw/plugin-sdk": "workspace:*"

View File

@@ -1,6 +1,7 @@
{
"id": "browser",
"enabledByDefault": true,
"commandAliases": [{ "name": "browser" }],
"skills": ["./skills"],
"configSchema": {
"type": "object",

View File

@@ -9,7 +9,7 @@
"commander": "^14.0.3",
"express": "5.2.1",
"playwright-core": "1.59.1",
"typebox": "1.1.32",
"typebox": "1.1.33",
"undici": "8.1.0",
"ws": "^8.20.0"
},

View File

@@ -0,0 +1 @@
export * from "openclaw/plugin-sdk/diagnostics-prometheus";

View File

@@ -0,0 +1,20 @@
import { definePluginEntry } from "openclaw/plugin-sdk/plugin-entry";
import { createDiagnosticsPrometheusExporter } from "./src/service.js";
const exporter = createDiagnosticsPrometheusExporter();
export default definePluginEntry({
id: "diagnostics-prometheus",
name: "Diagnostics Prometheus",
description: "Expose OpenClaw diagnostics metrics in Prometheus text format",
register(api) {
api.registerService(exporter.service);
api.registerHttpRoute({
path: "/api/diagnostics/prometheus",
auth: "gateway",
match: "exact",
gatewayRuntimeScopeSurface: "trusted-operator",
handler: exporter.handler,
});
},
});

View File

@@ -0,0 +1,8 @@
{
"id": "diagnostics-prometheus",
"configSchema": {
"type": "object",
"additionalProperties": false,
"properties": {}
}
}

View File

@@ -0,0 +1,24 @@
{
"name": "@openclaw/diagnostics-prometheus",
"version": "2026.4.25",
"description": "OpenClaw diagnostics Prometheus exporter",
"type": "module",
"devDependencies": {
"@openclaw/plugin-sdk": "workspace:*"
},
"openclaw": {
"extensions": [
"./index.ts"
],
"compat": {
"pluginApi": ">=2026.4.25"
},
"build": {
"openclawVersion": "2026.4.25"
},
"release": {
"publishToClawHub": true,
"publishToNpm": true
}
}
}

View File

@@ -0,0 +1,169 @@
import { describe, expect, it, vi } from "vitest";
import type { DiagnosticEventMetadata, DiagnosticEventPayload } from "../api.js";
import { createDiagnosticsPrometheusExporter, __test__ } from "./service.js";
const trusted: DiagnosticEventMetadata = Object.freeze({ trusted: true });
const untrusted: DiagnosticEventMetadata = Object.freeze({ trusted: false });
function baseEvent(): Pick<DiagnosticEventPayload, "seq" | "ts"> {
return { seq: 1, ts: 1700000000000 };
}
describe("diagnostics-prometheus service", () => {
it("records trusted run metrics without raw diagnostic identifiers", () => {
const store = __test__.createPrometheusMetricStore();
__test__.recordDiagnosticEvent(
store,
{
...baseEvent(),
type: "run.completed",
runId: "run-should-not-export",
sessionKey: "session-should-not-export",
provider: "openai",
model: "gpt-5.4",
channel: "discord",
trigger: "message",
durationMs: 1500,
outcome: "completed",
},
trusted,
);
const rendered = __test__.renderPrometheusMetrics(store);
expect(rendered).toContain("# TYPE openclaw_run_completed_total counter");
expect(rendered).toContain(
'openclaw_run_completed_total{channel="discord",model="gpt-5.4",outcome="completed",provider="openai",trigger="message"} 1',
);
expect(rendered).toContain(
'openclaw_run_duration_seconds_sum{channel="discord",model="gpt-5.4",outcome="completed",provider="openai",trigger="message"} 1.5',
);
expect(rendered).not.toContain("run-should-not-export");
expect(rendered).not.toContain("session-should-not-export");
});
it("drops untrusted plugin-emitted diagnostic events", () => {
const store = __test__.createPrometheusMetricStore();
__test__.recordDiagnosticEvent(
store,
{
...baseEvent(),
type: "model.call.completed",
runId: "run-1",
callId: "call-1",
provider: "openai",
model: "gpt-5.4",
durationMs: 10,
},
untrusted,
);
expect(__test__.renderPrometheusMetrics(store)).toBe("");
});
it("redacts and bounds label values", () => {
const store = __test__.createPrometheusMetricStore();
__test__.recordDiagnosticEvent(
store,
{
...baseEvent(),
type: "tool.execution.error",
toolName: "shell\nbad",
durationMs: 25,
errorCategory: "Bearer sk-secret-token-value",
},
trusted,
);
const rendered = __test__.renderPrometheusMetrics(store);
expect(rendered).toContain(
'openclaw_tool_execution_total{error_category="other",outcome="error",params_kind="unknown",tool="tool"} 1',
);
expect(rendered).not.toContain("Bearer");
expect(rendered).not.toContain("sk-secret");
});
it("caps metric series growth and reports dropped series", () => {
const store = __test__.createPrometheusMetricStore();
for (let index = 0; index < 2100; index += 1) {
__test__.recordDiagnosticEvent(
store,
{
...baseEvent(),
type: "model.call.completed",
runId: `run-${index}`,
callId: `call-${index}`,
provider: "openai",
model: `model.${index}`,
durationMs: 10,
},
trusted,
);
}
const rendered = __test__.renderPrometheusMetrics(store);
expect(rendered).toContain("# TYPE openclaw_prometheus_series_dropped_total counter");
expect(rendered).toContain("openclaw_prometheus_series_dropped_total ");
});
it("subscribes to internal diagnostics and renders scrape text", () => {
const listeners: Array<
(event: DiagnosticEventPayload, metadata: DiagnosticEventMetadata) => void
> = [];
const emitted: unknown[] = [];
const exporter = createDiagnosticsPrometheusExporter();
const unsubscribe = vi.fn();
exporter.service.start({
config: {} as never,
stateDir: "/tmp/openclaw-prometheus-test",
logger: {
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
debug: vi.fn(),
},
internalDiagnostics: {
emit: (event) => emitted.push(event),
onEvent: (listener) => {
listeners.push(listener);
return unsubscribe;
},
},
});
listeners[0]?.(
{
...baseEvent(),
type: "model.usage",
provider: "openai",
model: "gpt-5.4",
usage: { input: 12, output: 3, total: 15 },
},
trusted,
);
expect(emitted).toContainEqual(
expect.objectContaining({
type: "telemetry.exporter",
exporter: "diagnostics-prometheus",
signal: "metrics",
status: "started",
}),
);
expect(exporter.render()).toContain(
'openclaw_model_tokens_total{agent="unknown",channel="unknown",model="gpt-5.4",provider="openai",token_type="input"} 12',
);
exporter.service.stop?.();
expect(unsubscribe).toHaveBeenCalledOnce();
expect(exporter.render()).toBe("");
});
});

View File

@@ -0,0 +1,684 @@
import type { IncomingMessage, ServerResponse } from "node:http";
import type {
DiagnosticEventMetadata,
DiagnosticEventPayload,
OpenClawPluginHttpRouteHandler,
OpenClawPluginService,
} from "../api.js";
import { redactSensitiveText } from "../api.js";
type LabelSet = Record<string, string>;
type CounterSample = {
help: string;
labels: LabelSet;
value: number;
};
type HistogramSample = {
buckets: number[];
counts: number[];
count: number;
help: string;
labels: LabelSet;
sum: number;
};
type GaugeSample = {
help: string;
labels: LabelSet;
value: number;
};
type MetricSnapshot = {
counters: Map<string, CounterSample>;
gauges: Map<string, GaugeSample>;
histograms: Map<string, HistogramSample>;
};
type PrometheusMetricStore = ReturnType<typeof createPrometheusMetricStore>;
const DURATION_BUCKETS_SECONDS = [
0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 30, 60, 120, 300, 600,
];
const TOKEN_BUCKETS = [1, 4, 16, 64, 256, 1024, 4096, 16384, 65536, 262144, 1048576];
const BYTE_BUCKETS = [
1024, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824,
4294967296, 17179869184,
];
const LOW_CARDINALITY_VALUE_RE = /^[A-Za-z0-9_.:-]{1,120}$/u;
const MAX_PROMETHEUS_SERIES = 2048;
const DROPPED_SERIES_COUNTER_NAME = "openclaw_prometheus_series_dropped_total";
function lowCardinalityLabel(value: string | undefined, fallback = "unknown"): string {
if (!value) {
return fallback;
}
const redacted = redactSensitiveText(value.trim());
return LOW_CARDINALITY_VALUE_RE.test(redacted) ? redacted : fallback;
}
function numericValue(value: number | undefined): number | undefined {
return typeof value === "number" && Number.isFinite(value) && value >= 0 ? value : undefined;
}
function seconds(ms: number | undefined): number | undefined {
const value = numericValue(ms);
return value === undefined ? undefined : value / 1000;
}
function sortedLabels(labels: LabelSet): [string, string][] {
return Object.entries(labels).toSorted(([left], [right]) => left.localeCompare(right));
}
function metricKey(name: string, labels: LabelSet): string {
return `${name}|${JSON.stringify(sortedLabels(labels))}`;
}
function escapeHelp(value: string): string {
return value.replace(/\\/g, "\\\\").replace(/\n/g, "\\n");
}
function escapeLabelValue(value: string): string {
return value.replace(/\\/g, "\\\\").replace(/\n/g, "\\n").replace(/"/g, '\\"');
}
function formatLabels(labels: LabelSet): string {
const entries = sortedLabels(labels);
if (entries.length === 0) {
return "";
}
return `{${entries.map(([key, value]) => `${key}="${escapeLabelValue(value)}"`).join(",")}}`;
}
function formatPrometheusNumber(value: number): string {
if (!Number.isFinite(value)) {
return "0";
}
return Number.isInteger(value) ? String(value) : String(Number(value.toPrecision(12)));
}
function createPrometheusMetricStore() {
const counters = new Map<string, CounterSample>();
const gauges = new Map<string, GaugeSample>();
const histograms = new Map<string, HistogramSample>();
let droppedSeries = 0;
const canCreateSeries = <T>(map: Map<string, T>, key: string, metricName: string): boolean => {
if (map.has(key)) {
return true;
}
if (metricName === DROPPED_SERIES_COUNTER_NAME) {
return true;
}
if (counters.size + gauges.size + histograms.size < MAX_PROMETHEUS_SERIES) {
return true;
}
droppedSeries += 1;
return false;
};
const counter = (name: string, help: string, labels: LabelSet, amount = 1) => {
if (!Number.isFinite(amount) || amount <= 0) {
return;
}
const key = metricKey(name, labels);
if (!canCreateSeries(counters, key, name)) {
return;
}
const existing = counters.get(key);
if (existing) {
existing.value += amount;
return;
}
counters.set(key, { help, labels, value: amount });
};
const gauge = (name: string, help: string, labels: LabelSet, value: number | undefined) => {
if (value === undefined || !Number.isFinite(value)) {
return;
}
const key = metricKey(name, labels);
if (!canCreateSeries(gauges, key, name)) {
return;
}
gauges.set(key, { help, labels, value });
};
const histogram = (
name: string,
help: string,
labels: LabelSet,
value: number | undefined,
buckets = DURATION_BUCKETS_SECONDS,
) => {
if (value === undefined || !Number.isFinite(value) || value < 0) {
return;
}
const key = metricKey(name, labels);
if (!canCreateSeries(histograms, key, name)) {
return;
}
let sample = histograms.get(key);
if (!sample) {
sample = {
buckets,
counts: buckets.map(() => 0),
count: 0,
help,
labels,
sum: 0,
};
histograms.set(key, sample);
}
sample.count += 1;
sample.sum += value;
for (let index = 0; index < sample.buckets.length; index += 1) {
const bucket = sample.buckets[index];
if (bucket !== undefined && value <= bucket) {
sample.counts[index] = (sample.counts[index] ?? 0) + 1;
}
}
};
const snapshot = (): MetricSnapshot => {
const counterSnapshot = new Map(counters);
if (droppedSeries > 0) {
counterSnapshot.set(metricKey(DROPPED_SERIES_COUNTER_NAME, {}), {
help: "Prometheus metric series dropped because the exporter series cap was reached.",
labels: {},
value: droppedSeries,
});
}
return {
counters: counterSnapshot,
gauges: new Map(gauges),
histograms: new Map(histograms),
};
};
const reset = () => {
counters.clear();
gauges.clear();
histograms.clear();
droppedSeries = 0;
};
return { counter, gauge, histogram, reset, snapshot };
}
function safeErrorMessage(err: unknown): string {
const message = err instanceof Error ? (err.message ?? err.name) : String(err);
return redactSensitiveText(message)
.replaceAll("\u0000", " ")
.replace(/[\r\n\t\u2028\u2029]/gu, " ")
.slice(0, 500);
}
function renderPrometheusMetrics(store: PrometheusMetricStore): string {
const snapshot = store.snapshot();
const lines: string[] = [];
const emitted = new Set<string>();
const emitHeader = (name: string, type: "counter" | "gauge" | "histogram", help: string) => {
if (emitted.has(name)) {
return;
}
emitted.add(name);
lines.push(`# HELP ${name} ${escapeHelp(help)}`);
lines.push(`# TYPE ${name} ${type}`);
};
const counterEntries = [...snapshot.counters.entries()].toSorted(([left], [right]) =>
left.localeCompare(right),
);
for (const [key, sample] of counterEntries) {
const name = key.split("|", 1)[0] ?? "";
emitHeader(name, "counter", sample.help);
lines.push(`${name}${formatLabels(sample.labels)} ${formatPrometheusNumber(sample.value)}`);
}
const gaugeEntries = [...snapshot.gauges.entries()].toSorted(([left], [right]) =>
left.localeCompare(right),
);
for (const [key, sample] of gaugeEntries) {
const name = key.split("|", 1)[0] ?? "";
emitHeader(name, "gauge", sample.help);
lines.push(`${name}${formatLabels(sample.labels)} ${formatPrometheusNumber(sample.value)}`);
}
const histogramEntries = [...snapshot.histograms.entries()].toSorted(([left], [right]) =>
left.localeCompare(right),
);
for (const [key, sample] of histogramEntries) {
const name = key.split("|", 1)[0] ?? "";
emitHeader(name, "histogram", sample.help);
for (let index = 0; index < sample.buckets.length; index += 1) {
const bucket = sample.buckets[index];
if (bucket === undefined) {
continue;
}
lines.push(
`${name}_bucket${formatLabels({ ...sample.labels, le: String(bucket) })} ${formatPrometheusNumber(sample.counts[index] ?? 0)}`,
);
}
lines.push(
`${name}_bucket${formatLabels({ ...sample.labels, le: "+Inf" })} ${formatPrometheusNumber(sample.count)}`,
);
lines.push(`${name}_sum${formatLabels(sample.labels)} ${formatPrometheusNumber(sample.sum)}`);
lines.push(
`${name}_count${formatLabels(sample.labels)} ${formatPrometheusNumber(sample.count)}`,
);
}
lines.push("");
return lines.join("\n");
}
function runLabels(evt: {
channel?: string;
model?: string;
outcome?: string;
provider?: string;
trigger?: string;
}): LabelSet {
return {
channel: lowCardinalityLabel(evt.channel),
model: lowCardinalityLabel(evt.model),
outcome: lowCardinalityLabel(evt.outcome, "unknown"),
provider: lowCardinalityLabel(evt.provider),
trigger: lowCardinalityLabel(evt.trigger),
};
}
function modelCallLabels(evt: {
api?: string;
errorCategory?: string;
model?: string;
provider?: string;
transport?: string;
type: string;
}): LabelSet {
return {
api: lowCardinalityLabel(evt.api),
error_category:
evt.type === "model.call.error" ? lowCardinalityLabel(evt.errorCategory, "other") : "none",
model: lowCardinalityLabel(evt.model),
outcome: evt.type === "model.call.error" ? "error" : "completed",
provider: lowCardinalityLabel(evt.provider),
transport: lowCardinalityLabel(evt.transport),
};
}
function toolExecutionLabels(evt: {
errorCategory?: string;
paramsSummary?: { kind: string };
toolName: string;
type: string;
}): LabelSet {
return {
error_category:
evt.type === "tool.execution.error"
? lowCardinalityLabel(evt.errorCategory, "other")
: "none",
outcome: evt.type === "tool.execution.error" ? "error" : "completed",
params_kind: lowCardinalityLabel(evt.paramsSummary?.kind),
tool: lowCardinalityLabel(evt.toolName, "tool"),
};
}
function harnessLabels(evt: {
channel?: string;
errorCategory?: string;
harnessId: string;
model?: string;
outcome?: string;
phase?: string;
pluginId?: string;
provider?: string;
type: string;
}): LabelSet {
return {
channel: lowCardinalityLabel(evt.channel),
error_category:
evt.type === "harness.run.error" ? lowCardinalityLabel(evt.errorCategory, "other") : "none",
harness: lowCardinalityLabel(evt.harnessId),
model: lowCardinalityLabel(evt.model),
outcome: evt.type === "harness.run.error" ? "error" : lowCardinalityLabel(evt.outcome),
phase: evt.type === "harness.run.error" ? lowCardinalityLabel(evt.phase) : "none",
plugin: lowCardinalityLabel(evt.pluginId),
provider: lowCardinalityLabel(evt.provider),
};
}
function recordModelUsage(
store: PrometheusMetricStore,
evt: Extract<DiagnosticEventPayload, { type: "model.usage" }>,
) {
const labels = {
agent: lowCardinalityLabel(evt.agentId),
channel: lowCardinalityLabel(evt.channel),
model: lowCardinalityLabel(evt.model),
provider: lowCardinalityLabel(evt.provider),
};
const usage = evt.usage;
const recordTokens = (tokenType: string, value: number | undefined) => {
const amount = numericValue(value);
if (amount === undefined || amount === 0) {
return;
}
store.counter(
"openclaw_model_tokens_total",
"Model tokens reported by diagnostic usage events.",
{
...labels,
token_type: tokenType,
},
amount,
);
if (tokenType === "input" || tokenType === "output") {
store.histogram(
"openclaw_gen_ai_client_token_usage",
"GenAI token usage distribution for input and output tokens.",
{
model: labels.model,
provider: labels.provider,
token_type: tokenType,
},
amount,
TOKEN_BUCKETS,
);
}
};
recordTokens("input", usage.input);
recordTokens("output", usage.output);
recordTokens("cache_read", usage.cacheRead);
recordTokens("cache_write", usage.cacheWrite);
recordTokens("prompt", usage.promptTokens);
recordTokens("total", usage.total);
store.counter(
"openclaw_model_cost_usd_total",
"Estimated model cost in USD reported by diagnostic usage events.",
labels,
numericValue(evt.costUsd) ?? 0,
);
store.histogram(
"openclaw_model_usage_duration_seconds",
"Model usage event duration in seconds.",
labels,
seconds(evt.durationMs),
);
}
function recordDiagnosticEvent(
store: PrometheusMetricStore,
evt: DiagnosticEventPayload,
metadata: DiagnosticEventMetadata,
): void {
if (!metadata.trusted) {
return;
}
switch (evt.type) {
case "model.usage":
recordModelUsage(store, evt);
return;
case "run.completed":
store.histogram(
"openclaw_run_duration_seconds",
"Agent run duration in seconds.",
runLabels(evt),
seconds(evt.durationMs),
);
store.counter(
"openclaw_run_completed_total",
"Agent runs completed by outcome.",
runLabels(evt),
);
return;
case "model.call.completed":
case "model.call.error":
store.histogram(
"openclaw_model_call_duration_seconds",
"Provider model call duration in seconds.",
modelCallLabels(evt),
seconds(evt.durationMs),
);
store.counter(
"openclaw_model_call_total",
"Provider model calls completed by outcome.",
modelCallLabels(evt),
);
return;
case "tool.execution.completed":
case "tool.execution.error":
store.histogram(
"openclaw_tool_execution_duration_seconds",
"Tool execution duration in seconds.",
toolExecutionLabels(evt),
seconds(evt.durationMs),
);
store.counter(
"openclaw_tool_execution_total",
"Tool executions completed by outcome.",
toolExecutionLabels(evt),
);
return;
case "harness.run.completed":
case "harness.run.error":
store.histogram(
"openclaw_harness_run_duration_seconds",
"Agent harness run duration in seconds.",
harnessLabels(evt),
seconds(evt.durationMs),
);
store.counter(
"openclaw_harness_run_total",
"Agent harness runs completed by outcome.",
harnessLabels(evt),
);
return;
case "message.processed":
store.counter("openclaw_message_processed_total", "Inbound messages processed by outcome.", {
channel: lowCardinalityLabel(evt.channel),
outcome: evt.outcome,
reason: lowCardinalityLabel(evt.reason, "none"),
});
store.histogram(
"openclaw_message_processed_duration_seconds",
"Inbound message processing duration in seconds.",
{
channel: lowCardinalityLabel(evt.channel),
outcome: evt.outcome,
reason: lowCardinalityLabel(evt.reason, "none"),
},
seconds(evt.durationMs),
);
return;
case "message.delivery.completed":
case "message.delivery.error":
store.counter(
"openclaw_message_delivery_total",
"Outbound message delivery attempts by outcome.",
{
channel: lowCardinalityLabel(evt.channel),
delivery_kind: evt.deliveryKind,
error_category:
evt.type === "message.delivery.error"
? lowCardinalityLabel(evt.errorCategory, "other")
: "none",
outcome: evt.type === "message.delivery.error" ? "error" : "completed",
},
);
store.histogram(
"openclaw_message_delivery_duration_seconds",
"Outbound message delivery duration in seconds.",
{
channel: lowCardinalityLabel(evt.channel),
delivery_kind: evt.deliveryKind,
error_category:
evt.type === "message.delivery.error"
? lowCardinalityLabel(evt.errorCategory, "other")
: "none",
outcome: evt.type === "message.delivery.error" ? "error" : "completed",
},
seconds(evt.durationMs),
);
return;
case "queue.lane.enqueue":
case "queue.lane.dequeue":
store.gauge(
"openclaw_queue_lane_size",
"Current diagnostic queue lane size.",
{
lane: lowCardinalityLabel(evt.lane),
},
numericValue(evt.queueSize),
);
if (evt.type === "queue.lane.dequeue") {
store.histogram(
"openclaw_queue_lane_wait_seconds",
"Queue lane wait time in seconds.",
{ lane: lowCardinalityLabel(evt.lane) },
seconds(evt.waitMs),
);
}
return;
case "session.state":
store.counter("openclaw_session_state_total", "Session state observations.", {
reason: lowCardinalityLabel(evt.reason, "none"),
state: evt.state,
});
if (evt.queueDepth !== undefined) {
store.gauge(
"openclaw_session_queue_depth",
"Latest observed session queue depth.",
{
state: evt.state,
},
numericValue(evt.queueDepth),
);
}
return;
case "diagnostic.memory.sample":
store.gauge(
"openclaw_memory_bytes",
"Latest process memory usage by memory kind.",
{ kind: "rss" },
evt.memory.rssBytes,
);
store.gauge(
"openclaw_memory_bytes",
"Latest process memory usage by memory kind.",
{ kind: "heap_total" },
evt.memory.heapTotalBytes,
);
store.gauge(
"openclaw_memory_bytes",
"Latest process memory usage by memory kind.",
{ kind: "heap_used" },
evt.memory.heapUsedBytes,
);
store.histogram(
"openclaw_memory_rss_bytes",
"RSS memory sample distribution in bytes.",
{},
numericValue(evt.memory.rssBytes),
BYTE_BUCKETS,
);
return;
case "diagnostic.memory.pressure":
store.counter(
"openclaw_memory_pressure_total",
"Memory pressure events by level and reason.",
{
level: evt.level,
reason: evt.reason,
},
);
return;
case "telemetry.exporter":
store.counter("openclaw_telemetry_exporter_total", "Telemetry exporter lifecycle events.", {
exporter: lowCardinalityLabel(evt.exporter),
reason: lowCardinalityLabel(evt.reason, "none"),
signal: evt.signal,
status: evt.status,
});
return;
default:
return;
}
}
function createMetricsHandler(store: PrometheusMetricStore): OpenClawPluginHttpRouteHandler {
return (req: IncomingMessage, res: ServerResponse) => {
if (req.method !== "GET" && req.method !== "HEAD") {
res.statusCode = 405;
res.setHeader("Allow", "GET, HEAD");
res.end("Method Not Allowed");
return true;
}
const body = renderPrometheusMetrics(store);
res.statusCode = 200;
res.setHeader("Cache-Control", "no-store");
res.setHeader("Content-Type", "text/plain; version=0.0.4; charset=utf-8");
if (req.method === "HEAD") {
res.end();
return true;
}
res.end(body);
return true;
};
}
export function createDiagnosticsPrometheusExporter() {
const store = createPrometheusMetricStore();
let unsubscribe: (() => void) | undefined;
const service = {
id: "diagnostics-prometheus",
start(ctx) {
const subscribe = ctx.internalDiagnostics?.onEvent;
if (!subscribe) {
ctx.logger.error("diagnostics-prometheus: internal diagnostics capability unavailable");
return;
}
unsubscribe = subscribe((event, metadata) => {
try {
recordDiagnosticEvent(store, event, metadata);
} catch (err) {
ctx.logger.error(
`diagnostics-prometheus: event handler failed (${event.type}): ${safeErrorMessage(err)}`,
);
}
});
ctx.internalDiagnostics?.emit({
type: "telemetry.exporter",
exporter: "diagnostics-prometheus",
signal: "metrics",
status: "started",
reason: "configured",
});
},
stop() {
unsubscribe?.();
unsubscribe = undefined;
store.reset();
},
} satisfies OpenClawPluginService;
return {
handler: createMetricsHandler(store),
render: () => renderPrometheusMetrics(store),
service,
};
}
export const __test__ = {
createPrometheusMetricStore,
recordDiagnosticEvent,
renderPrometheusMetrics,
};

View File

@@ -0,0 +1,16 @@
{
"extends": "../tsconfig.package-boundary.base.json",
"compilerOptions": {
"rootDir": "."
},
"include": ["./*.ts", "./src/**/*.ts"],
"exclude": [
"./**/*.test.ts",
"./dist/**",
"./node_modules/**",
"./src/test-support/**",
"./src/**/*test-helpers.ts",
"./src/**/*test-harness.ts",
"./src/**/*test-support.ts"
]
}

View File

@@ -8,10 +8,10 @@
"build:viewer": "bun build src/viewer-client.ts --target browser --format esm --minify --outfile assets/viewer-runtime.js"
},
"dependencies": {
"@pierre/diffs": "1.1.17",
"@pierre/diffs": "1.1.19",
"@pierre/theme": "0.0.29",
"playwright-core": "1.59.1",
"typebox": "1.1.32"
"typebox": "1.1.33"
},
"devDependencies": {
"@openclaw/plugin-sdk": "workspace:*"

View File

@@ -9,7 +9,7 @@
"discord-api-types": "^0.38.47",
"https-proxy-agent": "^9.0.0",
"opusscript": "^0.1.1",
"typebox": "1.1.32",
"typebox": "1.1.33",
"undici": "8.1.0",
"ws": "^8.20.0"
},

View File

@@ -87,7 +87,7 @@ describe("discord exec approval monitor helpers", () => {
const interaction = createInteraction();
const button = new ExecApprovalButton({
getApprovers: () => ["123"],
resolveApproval: async () => true,
resolveApproval: async () => ({ ok: true }),
});
await button.run(interaction, { id: "", action: "" });
@@ -102,7 +102,7 @@ describe("discord exec approval monitor helpers", () => {
const interaction = createInteraction({ userId: "999" });
const button = new ExecApprovalButton({
getApprovers: () => ["123"],
resolveApproval: async () => true,
resolveApproval: async () => ({ ok: true }),
});
await button.run(interaction, { id: "abc", action: "allow-once" });
@@ -115,7 +115,7 @@ describe("discord exec approval monitor helpers", () => {
it("acknowledges and resolves valid approval clicks", async () => {
const interaction = createInteraction();
const resolveApproval = vi.fn(async () => true);
const resolveApproval = vi.fn(async () => ({ ok: true }) as const);
const button = new ExecApprovalButton({
getApprovers: () => ["123"],
resolveApproval,
@@ -132,7 +132,7 @@ describe("discord exec approval monitor helpers", () => {
const interaction = createInteraction();
const button = new ExecApprovalButton({
getApprovers: () => ["123"],
resolveApproval: async () => false,
resolveApproval: async () => ({ ok: false, reason: "error" }),
});
await button.run(interaction, { id: "abc", action: "deny" });
@@ -144,6 +144,19 @@ describe("discord exec approval monitor helpers", () => {
});
});
it("keeps already-resolved approval clicks quiet", async () => {
const interaction = createInteraction();
const button = new ExecApprovalButton({
getApprovers: () => ["123"],
resolveApproval: async () => ({ ok: false, reason: "not-found" }),
});
await button.run(interaction, { id: "abc", action: "allow-once" });
expect(interaction.acknowledge).toHaveBeenCalled();
expect(interaction.followUp).not.toHaveBeenCalled();
});
it("builds button context from config and routes resolution over gateway", async () => {
const cfg = buildConfig({ enabled: true, approvers: ["123"] });
resolveApprovalOverGatewayMock.mockResolvedValue(undefined);
@@ -155,7 +168,7 @@ describe("discord exec approval monitor helpers", () => {
});
expect(ctx.getApprovers()).toEqual(["123"]);
await expect(ctx.resolveApproval("abc", "allow-once")).resolves.toBe(true);
await expect(ctx.resolveApproval("abc", "allow-once")).resolves.toEqual({ ok: true });
expect(resolveApprovalOverGatewayMock).toHaveBeenCalledWith({
cfg,
approvalId: "abc",
@@ -173,6 +186,41 @@ describe("discord exec approval monitor helpers", () => {
config: { enabled: true, approvers: ["123"] },
});
await expect(ctx.resolveApproval("abc", "allow-once")).resolves.toBe(false);
await expect(ctx.resolveApproval("abc", "allow-once")).resolves.toEqual({
ok: false,
reason: "error",
});
});
it("classifies structured approval-not-found gateway errors as stale clicks", async () => {
const err = Object.assign(new Error("unknown or expired approval id"), {
gatewayCode: "INVALID_REQUEST",
details: { reason: "APPROVAL_NOT_FOUND" },
});
resolveApprovalOverGatewayMock.mockRejectedValue(err);
const ctx = createDiscordExecApprovalButtonContext({
cfg: buildConfig({ enabled: true, approvers: ["123"] }),
accountId: "default",
config: { enabled: true, approvers: ["123"] },
});
await expect(ctx.resolveApproval("abc", "allow-once")).resolves.toEqual({
ok: false,
reason: "not-found",
});
});
it("keeps message-only approval-not-found errors visible", async () => {
resolveApprovalOverGatewayMock.mockRejectedValue(new Error("unknown or expired approval id"));
const ctx = createDiscordExecApprovalButtonContext({
cfg: buildConfig({ enabled: true, approvers: ["123"] }),
accountId: "default",
config: { enabled: true, approvers: ["123"] },
});
await expect(ctx.resolveApproval("abc", "allow-once")).resolves.toEqual({
ok: false,
reason: "error",
});
});
});

View File

@@ -53,9 +53,30 @@ export function parseExecApprovalData(
export type ExecApprovalButtonContext = {
getApprovers: () => string[];
resolveApproval: (approvalId: string, decision: ExecApprovalDecision) => Promise<boolean>;
resolveApproval: (
approvalId: string,
decision: ExecApprovalDecision,
) => Promise<ExecApprovalResolveResult>;
};
type ExecApprovalResolveResult = { ok: true } | { ok: false; reason: "error" | "not-found" };
function isStructuredApprovalNotFoundError(err: unknown): boolean {
if (!err || typeof err !== "object") {
return false;
}
const record = err as {
gatewayCode?: unknown;
details?: { reason?: unknown } | null;
};
if (record.gatewayCode === "APPROVAL_NOT_FOUND") {
return true;
}
return (
record.gatewayCode === "INVALID_REQUEST" && record.details?.reason === "APPROVAL_NOT_FOUND"
);
}
export class ExecApprovalButton extends Button {
label = "execapproval";
customId = "execapproval:seed=1";
@@ -100,8 +121,8 @@ export class ExecApprovalButton extends Button {
await interaction.acknowledge();
} catch {}
const ok = await this.ctx.resolveApproval(parsed.approvalId, parsed.action);
if (!ok) {
const result = await this.ctx.resolveApproval(parsed.approvalId, parsed.action);
if (!result.ok && result.reason !== "not-found") {
try {
await interaction.followUp({
content: `Failed to submit approval decision for **${decisionLabel}**. The request may have expired or already been resolved.`,
@@ -138,9 +159,12 @@ export function createDiscordExecApprovalButtonContext(params: {
gatewayUrl: params.gatewayUrl,
clientDisplayName: `Discord approval (${params.accountId})`,
});
return true;
} catch {
return false;
return { ok: true };
} catch (err) {
return {
ok: false,
reason: isStructuredApprovalNotFoundError(err) ? "not-found" : "error",
};
}
},
};

View File

@@ -1,481 +1,37 @@
import {
DiscordError,
RateLimitError,
RequestClient,
type DiscordRawError,
type RequestData,
type RequestClientOptions,
} from "@buape/carbon";
import { isRecord } from "openclaw/plugin-sdk/text-runtime";
import { RequestClient, type RequestClientOptions } from "@buape/carbon";
import { FormData as UndiciFormData } from "undici";
export type ProxyRequestClientOptions = RequestClientOptions & {
fetch?: typeof fetch;
};
export type ProxyRequestClientOptions = RequestClientOptions;
type QueuedRequest = {
method: string;
path: string;
data?: RequestData;
query?: Record<string, string | number | boolean>;
resolve: (value?: unknown) => void;
reject: (reason?: unknown) => void;
routeKey: string;
};
type MultipartFile = {
data: unknown;
name: string;
description?: string;
};
type Attachment = {
id: number;
filename: string;
description?: string;
};
const defaultOptions = {
tokenHeader: "Bot",
baseUrl: "https://discord.com/api",
apiVersion: 10,
userAgent: "DiscordBot (https://github.com/buape/carbon, v0.0.0)",
timeout: 15_000,
queueRequests: true,
maxQueueSize: 1000,
runtimeProfile: "persistent",
scheduler: {},
} satisfies Omit<ProxyRequestClientOptions, "fetch"> & {
runtimeProfile: string;
scheduler: object;
};
function getMultipartFiles(payload: unknown): MultipartFile[] {
if (!isRecord(payload)) {
return [];
}
const directFiles = payload.files;
if (Array.isArray(directFiles)) {
return directFiles as MultipartFile[];
}
const nestedData = payload.data;
if (!isRecord(nestedData)) {
return [];
}
const nestedFiles = nestedData.files;
return Array.isArray(nestedFiles) ? (nestedFiles as MultipartFile[]) : [];
}
function isMultipartPayload(payload: unknown): payload is Record<string, unknown> {
return getMultipartFiles(payload).length > 0;
}
function toRateLimitBody(parsedBody: unknown, rawBody: string, headers: Headers) {
if (isRecord(parsedBody)) {
const message = typeof parsedBody.message === "string" ? parsedBody.message : undefined;
const retryAfter =
typeof parsedBody.retry_after === "number" ? parsedBody.retry_after : undefined;
const global = typeof parsedBody.global === "boolean" ? parsedBody.global : undefined;
if (message !== undefined && retryAfter !== undefined && global !== undefined) {
return {
message,
retry_after: retryAfter,
global,
};
function toUndiciFormData(body: FormData): UndiciFormData {
const converted = new UndiciFormData();
for (const [key, value] of body.entries()) {
if (typeof value === "string") {
converted.append(key, value);
continue;
}
const filename = (value as Blob & { name?: unknown }).name;
if (typeof filename === "string" && filename.length > 0) {
converted.append(key, value, filename);
continue;
}
converted.append(key, value);
}
const retryAfterHeader = headers.get("Retry-After");
return {
message: typeof parsedBody === "string" ? parsedBody : rawBody || "You are being rate limited.",
retry_after:
retryAfterHeader && !Number.isNaN(Number(retryAfterHeader)) ? Number(retryAfterHeader) : 1,
global: headers.get("X-RateLimit-Scope") === "global",
};
return converted;
}
type RateLimitBody = ReturnType<typeof toRateLimitBody>;
function createRateLimitErrorCompat(
response: Response,
body: RateLimitBody,
request: Request,
): RateLimitError {
const RateLimitErrorCtor = RateLimitError as unknown as {
new (response: Response, body: RateLimitBody, request?: Request): RateLimitError;
};
return new RateLimitErrorCtor(response, body, request);
}
function toDiscordErrorBody(parsedBody: unknown, rawBody: string): DiscordRawError {
if (isRecord(parsedBody) && typeof parsedBody.message === "string") {
return parsedBody as DiscordRawError;
}
return {
message: typeof parsedBody === "string" ? parsedBody : rawBody || "Discord request failed",
};
}
function toBlobPart(value: unknown): BlobPart {
if (value instanceof ArrayBuffer || typeof value === "string") {
return value;
}
if (ArrayBuffer.isView(value)) {
const copied = new Uint8Array(value.byteLength);
copied.set(new Uint8Array(value.buffer, value.byteOffset, value.byteLength));
return copied;
}
if (value instanceof Blob) {
return value;
}
return String(value);
}
// Carbon 0.14 removed the custom fetch seam from RequestClientOptions.
// Keep a local proxy-aware clone so Discord proxy config still works on OpenClaw.
class ProxyRequestClientCompat {
readonly options: ProxyRequestClientOptions;
readonly customFetch?: typeof fetch;
protected queue: QueuedRequest[] = [];
private readonly token: string;
private abortController: AbortController | null = null;
private processingQueue = false;
private readonly routeBuckets = new Map<string, string>();
private readonly bucketStates = new Map<string, number>();
private globalRateLimitUntil = 0;
constructor(token: string, options?: ProxyRequestClientOptions) {
this.token = token;
this.options = {
...defaultOptions,
...options,
};
this.customFetch = options?.fetch;
}
async get(path: string, query?: QueuedRequest["query"]): Promise<unknown> {
return await this.request("GET", path, { query });
}
async post(path: string, data?: RequestData, query?: QueuedRequest["query"]): Promise<unknown> {
return await this.request("POST", path, { data, query });
}
async patch(path: string, data?: RequestData, query?: QueuedRequest["query"]): Promise<unknown> {
return await this.request("PATCH", path, { data, query });
}
async put(path: string, data?: RequestData, query?: QueuedRequest["query"]): Promise<unknown> {
return await this.request("PUT", path, { data, query });
}
async delete(path: string, data?: RequestData, query?: QueuedRequest["query"]): Promise<unknown> {
return await this.request("DELETE", path, { data, query });
}
clearQueue(): void {
this.queue.length = 0;
}
get queueSize(): number {
return this.queue.length;
}
abortAllRequests(): void {
this.abortController?.abort();
this.abortController = null;
}
private async request(
method: string,
path: string,
params: Pick<QueuedRequest, "data" | "query">,
): Promise<unknown> {
const routeKey = this.getRouteKey(method, path);
if (this.options.queueRequests) {
if (
typeof this.options.maxQueueSize === "number" &&
this.options.maxQueueSize > 0 &&
this.queue.length >= this.options.maxQueueSize
) {
const stats = this.queue.reduce(
(acc, item) => {
const count = (acc.counts.get(item.routeKey) ?? 0) + 1;
acc.counts.set(item.routeKey, count);
if (count > acc.topCount) {
acc.topCount = count;
acc.topRoute = item.routeKey;
}
return acc;
},
{
counts: new Map([[routeKey, 1]]),
topRoute: routeKey,
topCount: 1,
},
);
throw new Error(
`Request queue is full (${this.queue.length} / ${this.options.maxQueueSize}), you should implement a queuing system in your requests or raise the queue size in Carbon. Top offender: ${stats.topRoute}`,
);
}
return await new Promise((resolve, reject) => {
this.queue.push({
method,
path,
data: params.data,
query: params.query,
resolve,
reject,
routeKey,
});
void this.processQueue();
function wrapDiscordFetch(fetchImpl: NonNullable<RequestClientOptions["fetch"]>) {
return (input: string | URL | Request, init?: RequestInit): Promise<Response> => {
if (init?.body instanceof FormData) {
// Carbon builds global FormData; undici-backed proxy fetch needs undici's
// FormData class to preserve multipart boundaries.
return fetchImpl(input, {
...init,
body: toUndiciFormData(init.body) as unknown as BodyInit,
});
}
return await new Promise((resolve, reject) => {
void this.executeRequest({
method,
path,
data: params.data,
query: params.query,
resolve,
reject,
routeKey,
})
.then(resolve)
.catch(reject);
});
}
private async executeRequest(request: QueuedRequest): Promise<unknown> {
const { method, path, data, query, routeKey } = request;
await this.waitForBucket(routeKey);
const queryString = query
? `?${Object.entries(query)
.map(([key, value]) => `${encodeURIComponent(key)}=${encodeURIComponent(value)}`)
.join("&")}`
: "";
const url = `${this.options.baseUrl}${path}${queryString}`;
const originalRequest = new Request(url, { method });
const headers =
this.token === "webhook"
? new Headers()
: new Headers({
Authorization: `${this.options.tokenHeader} ${this.token}`,
});
if (data?.headers) {
for (const [key, value] of Object.entries(data.headers)) {
headers.set(key, value);
}
}
this.abortController = new AbortController();
const timeoutMs =
typeof this.options.timeout === "number" && this.options.timeout > 0
? this.options.timeout
: undefined;
let body: BodyInit | undefined;
if (data?.body && isMultipartPayload(data.body)) {
const payload = data.body;
const normalizedBody: Record<string, unknown> & { attachments: Attachment[] } =
typeof payload === "string"
? { content: payload, attachments: [] }
: { ...payload, attachments: [] };
const formData = new UndiciFormData();
const files = getMultipartFiles(payload);
for (const [index, file] of files.entries()) {
const normalizedFileData =
file.data instanceof Blob ? file.data : new Blob([toBlobPart(file.data)]);
formData.append(`files[${index}]`, normalizedFileData, file.name);
normalizedBody.attachments.push({
id: index,
filename: file.name,
description: file.description,
});
}
const cleanedBody = {
...normalizedBody,
files: undefined,
};
formData.append("payload_json", JSON.stringify(cleanedBody));
body = formData as unknown as BodyInit;
} else if (data?.body != null) {
headers.set("Content-Type", "application/json");
body = data.rawBody ? (data.body as BodyInit) : JSON.stringify(data.body);
}
let timeoutId: ReturnType<typeof setTimeout> | undefined;
if (timeoutMs !== undefined) {
timeoutId = setTimeout(() => {
this.abortController?.abort();
}, timeoutMs);
}
let response: Response;
try {
response = await (this.customFetch ?? globalThis.fetch)(url, {
method,
headers,
body,
signal: this.abortController.signal,
});
} finally {
if (timeoutId) {
clearTimeout(timeoutId);
}
}
let rawBody = "";
let parsedBody: unknown;
try {
rawBody = await response.text();
} catch {
rawBody = "";
}
if (rawBody.length > 0) {
try {
parsedBody = JSON.parse(rawBody);
} catch {
parsedBody = undefined;
}
}
if (response.status === 429) {
const rateLimitBody = toRateLimitBody(parsedBody, rawBody, response.headers);
const rateLimitError = createRateLimitErrorCompat(response, rateLimitBody, originalRequest);
this.scheduleRateLimit(
routeKey,
rateLimitError.retryAfter,
rateLimitError.scope === "global",
);
throw rateLimitError;
}
this.updateBucketFromHeaders(routeKey, response.headers);
if (!response.ok) {
throw new DiscordError(response, toDiscordErrorBody(parsedBody, rawBody));
}
return parsedBody ?? rawBody;
}
private async processQueue(): Promise<void> {
if (this.processingQueue) {
return;
}
this.processingQueue = true;
try {
while (this.queue.length > 0) {
const request = this.queue.shift();
if (!request) {
continue;
}
try {
const result = await this.executeRequest(request);
request.resolve(result);
} catch (error) {
if (error instanceof RateLimitError && this.options.queueRequests) {
this.queue.unshift(request);
continue;
}
request.reject(error);
}
}
} finally {
this.processingQueue = false;
}
}
private async waitForBucket(routeKey: string): Promise<void> {
while (true) {
const now = Date.now();
if (this.globalRateLimitUntil > now) {
await new Promise((resolve) => setTimeout(resolve, this.globalRateLimitUntil - now));
continue;
}
const bucketKey = this.routeBuckets.get(routeKey);
const bucketUntil = bucketKey ? (this.bucketStates.get(bucketKey) ?? 0) : 0;
if (bucketUntil > now) {
await new Promise((resolve) => setTimeout(resolve, bucketUntil - now));
continue;
}
return;
}
}
private scheduleRateLimit(routeKey: string, retryAfterSeconds: number, global: boolean): void {
const resetAt = Date.now() + Math.ceil(retryAfterSeconds * 1000);
if (global) {
this.globalRateLimitUntil = Math.max(this.globalRateLimitUntil, resetAt);
return;
}
const bucketKey = this.routeBuckets.get(routeKey) ?? routeKey;
this.routeBuckets.set(routeKey, bucketKey);
this.bucketStates.set(bucketKey, Math.max(this.bucketStates.get(bucketKey) ?? 0, resetAt));
}
private updateBucketFromHeaders(routeKey: string, headers: Headers): void {
const bucket = headers.get("X-RateLimit-Bucket");
const retryAfter = headers.get("X-RateLimit-Reset-After");
const remaining = headers.get("X-RateLimit-Remaining");
const resetAfterSeconds = retryAfter ? Number(retryAfter) : Number.NaN;
const remainingRequests = remaining ? Number(remaining) : Number.NaN;
if (!bucket) {
return;
}
this.routeBuckets.set(routeKey, bucket);
if (!Number.isFinite(resetAfterSeconds) || !Number.isFinite(remainingRequests)) {
if (!this.bucketStates.has(bucket)) {
this.bucketStates.set(bucket, 0);
}
return;
}
if (remainingRequests <= 0) {
this.bucketStates.set(bucket, Date.now() + Math.ceil(resetAfterSeconds * 1000));
return;
}
this.bucketStates.set(bucket, 0);
}
private getMajorParameter(path: string): string | null {
const guildMatch = path.match(/^\/guilds\/(\d+)/);
if (guildMatch?.[1]) {
return guildMatch[1];
}
const channelMatch = path.match(/^\/channels\/(\d+)/);
if (channelMatch?.[1]) {
return channelMatch[1];
}
const webhookMatch = path.match(/^\/webhooks\/(\d+)(?:\/([^/]+))?/);
if (webhookMatch) {
const [, id, token] = webhookMatch;
return token ? `${id}/${token}` : (id ?? null);
}
return null;
}
private getRouteKey(method: string, path: string): string {
return `${method.toUpperCase()}:${this.getBucketKey(path)}`;
}
private getBucketKey(path: string): string {
const majorParameter = this.getMajorParameter(path);
const normalizedPath = path
.replace(/\?.*$/, "")
.replace(/\/\d{17,20}(?=\/|$)/g, "/:id")
.replace(/\/reactions\/[^/]+/g, "/reactions/:reaction");
return majorParameter ? `${normalizedPath}:${majorParameter}` : normalizedPath;
}
return fetchImpl(input, init);
};
}
export function createDiscordRequestClient(
@@ -485,5 +41,10 @@ export function createDiscordRequestClient(
if (!options?.fetch) {
return new RequestClient(token, options);
}
return new ProxyRequestClientCompat(token, options) as unknown as RequestClient;
return new RequestClient(token, {
runtimeProfile: "persistent",
maxQueueSize: 1000,
...options,
fetch: wrapDiscordFetch(options.fetch),
});
}

View File

@@ -5,7 +5,7 @@
"type": "module",
"dependencies": {
"@larksuiteoapi/node-sdk": "^1.61.1",
"typebox": "1.1.32"
"typebox": "1.1.33"
},
"devDependencies": {
"@openclaw/plugin-sdk": "workspace:*",

View File

@@ -7,7 +7,7 @@ vi.mock("./client.js", () => ({
createFeishuClient: createFeishuClientMock,
}));
const freshDirectoryModulePath = "./directory.js?directory-test";
const freshDirectoryModulePath = "./directory.js";
const {
listFeishuDirectoryGroups,
listFeishuDirectoryGroupsLive,

View File

@@ -5,7 +5,7 @@
"description": "OpenClaw Firecrawl plugin",
"type": "module",
"dependencies": {
"typebox": "1.1.32"
"typebox": "1.1.33"
},
"devDependencies": {
"@openclaw/plugin-sdk": "workspace:*"

View File

@@ -5,7 +5,7 @@
"type": "module",
"dependencies": {
"commander": "^14.0.3",
"typebox": "1.1.32"
"typebox": "1.1.33"
},
"devDependencies": {
"@openclaw/plugin-sdk": "workspace:*",

View File

@@ -6,7 +6,7 @@
"type": "module",
"dependencies": {
"ajv": "^8.18.0",
"typebox": "1.1.32"
"typebox": "1.1.33"
},
"devDependencies": {
"@openclaw/plugin-sdk": "workspace:*"

View File

@@ -5,7 +5,7 @@
"type": "module",
"dependencies": {
"@clawdbot/lobster": "2026.4.6",
"typebox": "1.1.32"
"typebox": "1.1.33"
},
"devDependencies": {
"@openclaw/plugin-sdk": "workspace:*"

View File

@@ -11,7 +11,7 @@
"markdown-it": "14.1.1",
"matrix-js-sdk": "41.4.0-rc.0",
"music-metadata": "^11.12.3",
"typebox": "1.1.32"
"typebox": "1.1.33"
},
"devDependencies": {
"@openclaw/plugin-sdk": "workspace:*",

View File

@@ -6,7 +6,7 @@
"type": "module",
"dependencies": {
"chokidar": "^5.0.0",
"typebox": "1.1.32"
"typebox": "1.1.33"
},
"devDependencies": {
"@openclaw/plugin-sdk": "workspace:*",

View File

@@ -6,7 +6,7 @@
"dependencies": {
"@lancedb/lancedb": "^0.27.2",
"openai": "^6.34.0",
"typebox": "1.1.32"
"typebox": "1.1.33"
},
"devDependencies": {
"@openclaw/plugin-sdk": "workspace:*"

View File

@@ -5,7 +5,7 @@
"description": "OpenClaw persistent wiki plugin",
"type": "module",
"dependencies": {
"typebox": "1.1.32",
"typebox": "1.1.33",
"yaml": "^2.8.3"
},
"devDependencies": {

View File

@@ -10,7 +10,7 @@
"express": "5.2.1",
"jsonwebtoken": "9.0.3",
"jwks-rsa": "4.0.1",
"typebox": "1.1.32"
"typebox": "1.1.33"
},
"devDependencies": {
"@openclaw/plugin-sdk": "workspace:*",

View File

@@ -6,7 +6,7 @@
"type": "module",
"dependencies": {
"@mariozechner/pi-ai": "0.70.2",
"typebox": "1.1.32"
"typebox": "1.1.33"
},
"devDependencies": {
"@openclaw/plugin-sdk": "workspace:*"

View File

@@ -1,6 +1,9 @@
import type { ProviderCatalogContext } from "openclaw/plugin-sdk/provider-catalog-shared";
import type { ModelProviderConfig } from "openclaw/plugin-sdk/provider-model-shared";
import {
OLLAMA_DEFAULT_API_KEY,
OLLAMA_PROVIDER_ID,
hasMeaningfulExplicitOllamaConfig,
resolveOllamaDiscoveryResult,
type OllamaPluginConfig,
} from "./src/discovery-shared.js";
@@ -12,6 +15,13 @@ type OllamaProviderPlugin = {
docsPath: string;
envVars: string[];
auth: [];
resolveSyntheticAuth: (ctx: { providerConfig?: ModelProviderConfig }) =>
| {
apiKey: string;
source: string;
mode: "api-key";
}
| undefined;
discovery: {
order: "late";
run: (ctx: ProviderCatalogContext) => ReturnType<typeof runOllamaDiscovery>;
@@ -40,6 +50,16 @@ export const ollamaProviderDiscovery: OllamaProviderPlugin = {
docsPath: "/providers/ollama",
envVars: ["OLLAMA_API_KEY"],
auth: [],
resolveSyntheticAuth: ({ providerConfig }) => {
if (!hasMeaningfulExplicitOllamaConfig(providerConfig)) {
return undefined;
}
return {
apiKey: OLLAMA_DEFAULT_API_KEY,
source: "models.providers.ollama (synthetic local key)",
mode: "api-key",
};
},
discovery: {
order: "late",
run: runOllamaDiscovery,

View File

@@ -1,11 +1,111 @@
import { definePluginEntry } from "openclaw/plugin-sdk/plugin-entry";
import type { ProviderAuthContext, ProviderAuthResult } from "openclaw/plugin-sdk/plugin-entry";
import type { ProviderAuthMethod } from "openclaw/plugin-sdk/plugin-entry";
import type { ProviderPlugin } from "openclaw/plugin-sdk/provider-model-shared";
import {
OPENAI_API_KEY_LABEL,
OPENAI_API_KEY_WIZARD_GROUP,
OPENAI_CODEX_DEVICE_PAIRING_HINT,
OPENAI_CODEX_DEVICE_PAIRING_LABEL,
OPENAI_CODEX_LOGIN_HINT,
OPENAI_CODEX_LOGIN_LABEL,
OPENAI_CODEX_WIZARD_GROUP,
} from "./auth-choice-copy.js";
import { buildOpenAICodexCliBackend } from "./cli-backend.js";
async function runOpenAIProviderAuthMethod(
methodId: string,
ctx: ProviderAuthContext,
): Promise<ProviderAuthResult> {
const { buildOpenAIProvider } = await import("./openai-provider.js");
const method = buildOpenAIProvider().auth.find((entry) => entry.id === methodId);
if (!method) {
return { profiles: [] };
}
return method.run(ctx);
}
async function runOpenAICodexProviderAuthMethod(
methodId: string,
ctx: ProviderAuthContext,
): Promise<ProviderAuthResult> {
const { buildOpenAICodexProviderPlugin } = await import("./openai-codex-provider.js");
const method = buildOpenAICodexProviderPlugin().auth.find((entry) => entry.id === methodId);
if (!method) {
return { profiles: [] };
}
return method.run(ctx);
}
function buildOpenAISetupProvider(): ProviderPlugin {
const apiKeyMethod = {
id: "api-key",
label: OPENAI_API_KEY_LABEL,
hint: "Use your OpenAI API key directly",
kind: "api_key",
wizard: {
choiceId: "openai-api-key",
choiceLabel: OPENAI_API_KEY_LABEL,
...OPENAI_API_KEY_WIZARD_GROUP,
},
run: async (ctx) => runOpenAIProviderAuthMethod("api-key", ctx),
} satisfies ProviderAuthMethod;
return {
id: "openai",
label: "OpenAI",
docsPath: "/providers/models",
envVars: ["OPENAI_API_KEY"],
auth: [apiKeyMethod],
};
}
function buildOpenAICodexSetupProvider(): ProviderPlugin {
const oauthMethod = {
id: "oauth",
label: OPENAI_CODEX_LOGIN_LABEL,
hint: OPENAI_CODEX_LOGIN_HINT,
kind: "oauth",
wizard: {
choiceId: "openai-codex",
choiceLabel: OPENAI_CODEX_LOGIN_LABEL,
choiceHint: OPENAI_CODEX_LOGIN_HINT,
assistantPriority: -30,
...OPENAI_CODEX_WIZARD_GROUP,
},
run: async (ctx) => runOpenAICodexProviderAuthMethod("oauth", ctx),
} satisfies ProviderAuthMethod;
const deviceCodeMethod = {
id: "device-code",
label: OPENAI_CODEX_DEVICE_PAIRING_LABEL,
hint: OPENAI_CODEX_DEVICE_PAIRING_HINT,
kind: "device_code",
wizard: {
choiceId: "openai-codex-device-code",
choiceLabel: OPENAI_CODEX_DEVICE_PAIRING_LABEL,
choiceHint: OPENAI_CODEX_DEVICE_PAIRING_HINT,
assistantPriority: -10,
...OPENAI_CODEX_WIZARD_GROUP,
},
run: async (ctx) => runOpenAICodexProviderAuthMethod("device-code", ctx),
} satisfies ProviderAuthMethod;
return {
id: "openai-codex",
label: "OpenAI Codex",
docsPath: "/providers/models",
auth: [oauthMethod, deviceCodeMethod],
};
}
export default definePluginEntry({
id: "openai",
name: "OpenAI Setup",
description: "Lightweight OpenAI setup hooks",
register(api) {
api.registerProvider(buildOpenAISetupProvider());
api.registerProvider(buildOpenAICodexSetupProvider());
api.registerCliBackend(buildOpenAICodexCliBackend());
},
});

View File

@@ -5,7 +5,7 @@
"description": "OpenClaw QA synthetic channel plugin",
"type": "module",
"dependencies": {
"typebox": "1.1.32"
"typebox": "1.1.33"
},
"devDependencies": {
"@openclaw/plugin-sdk": "workspace:*",

View File

@@ -5,7 +5,7 @@
"description": "OpenClaw QA lab plugin with private debugger UI and scenario runner",
"type": "module",
"dependencies": {
"@copilotkit/aimock": "1.15.0",
"@copilotkit/aimock": "1.15.1",
"@modelcontextprotocol/sdk": "1.29.0",
"playwright-core": "1.59.1",
"yaml": "^2.8.3",

View File

@@ -74,6 +74,8 @@ describe("qa scenario catalog", () => {
expect(codexLeak.title).toBe("Codex harness no meta leak");
expect(codexLeakConfig?.harnessRuntime).toBe("codex");
expect(codexLeakConfig?.harnessFallback).toBe("none");
expect(JSON.stringify(codexLeak.execution.flow)).toContain("agentRuntime");
expect(JSON.stringify(codexLeak.execution.flow)).not.toContain("embeddedHarness");
expect(codexLeakConfig?.expectedReply).toBe("QA_LEAK_OK");
expect(codexLeakConfig?.forbiddenReplySubstrings).toContain("checking thread context");
expect(fallbackConfig?.gracefulFallbackAny as string[] | undefined).toContain(

View File

@@ -5,7 +5,7 @@
"description": "OpenClaw skill workshop plugin",
"type": "module",
"dependencies": {
"typebox": "1.1.32"
"typebox": "1.1.33"
},
"devDependencies": {
"@openclaw/plugin-sdk": "workspace:*"

View File

@@ -8,7 +8,7 @@
"@slack/bolt": "^4.7.1",
"@slack/web-api": "^7.15.1",
"https-proxy-agent": "^9.0.0",
"typebox": "1.1.32"
"typebox": "1.1.33"
},
"devDependencies": {
"@openclaw/plugin-sdk": "workspace:*"

View File

@@ -5,7 +5,7 @@
"description": "OpenClaw Tavily plugin",
"type": "module",
"dependencies": {
"typebox": "1.1.32"
"typebox": "1.1.33"
},
"devDependencies": {
"@openclaw/plugin-sdk": "workspace:*"

View File

@@ -8,7 +8,7 @@
"@grammyjs/runner": "^2.0.3",
"@grammyjs/transformer-throttler": "^1.2.1",
"grammy": "^1.42.0",
"typebox": "1.1.32",
"typebox": "1.1.33",
"undici": "8.1.0"
},
"devDependencies": {

View File

@@ -5,7 +5,7 @@
"type": "module",
"dependencies": {
"commander": "^14.0.3",
"typebox": "1.1.32",
"typebox": "1.1.33",
"ws": "^8.20.0"
},
"devDependencies": {

View File

@@ -6,7 +6,7 @@
"dependencies": {
"@whiskeysockets/baileys": "7.0.0-rc.9",
"jimp": "^1.6.1",
"typebox": "1.1.32",
"typebox": "1.1.33",
"undici": "8.1.0"
},
"devDependencies": {

View File

@@ -2,6 +2,7 @@
"id": "xai",
"enabledByDefault": true,
"providers": ["xai"],
"providerDiscoveryEntry": "./provider-discovery.ts",
"providerEndpoints": [
{
"endpointClass": "xai-native",

View File

@@ -6,7 +6,7 @@
"type": "module",
"dependencies": {
"@mariozechner/pi-ai": "0.70.2",
"typebox": "1.1.32",
"typebox": "1.1.33",
"ws": "^8.20.0"
},
"devDependencies": {

View File

@@ -0,0 +1,27 @@
import type { ProviderPlugin } from "openclaw/plugin-sdk/provider-model-shared";
import { readProviderEnvValue } from "openclaw/plugin-sdk/provider-web-search";
import { resolveFallbackXaiAuth } from "./src/tool-auth-shared.js";
const PROVIDER_ID = "xai";
function resolveXaiSyntheticAuth(config: unknown) {
const apiKey =
resolveFallbackXaiAuth(config as never)?.apiKey || readProviderEnvValue(["XAI_API_KEY"]);
return apiKey
? {
apiKey,
source: "xAI plugin config",
mode: "api-key" as const,
}
: undefined;
}
export const xaiProviderDiscovery: ProviderPlugin = {
id: PROVIDER_ID,
label: "xAI",
docsPath: "/providers/models",
auth: [],
resolveSyntheticAuth: ({ config }) => resolveXaiSyntheticAuth(config),
};
export default xaiProviderDiscovery;

View File

@@ -4,7 +4,7 @@
"description": "OpenClaw Zalo Personal Account plugin via native zca-js integration",
"type": "module",
"dependencies": {
"typebox": "1.1.32",
"typebox": "1.1.33",
"zca-js": "2.1.2"
},
"devDependencies": {

View File

@@ -596,6 +596,10 @@
"types": "./dist/plugin-sdk/diagnostics-otel.d.ts",
"default": "./dist/plugin-sdk/diagnostics-otel.js"
},
"./plugin-sdk/diagnostics-prometheus": {
"types": "./dist/plugin-sdk/diagnostics-prometheus.d.ts",
"default": "./dist/plugin-sdk/diagnostics-prometheus.js"
},
"./plugin-sdk/diffs": {
"types": "./dist/plugin-sdk/diffs.d.ts",
"default": "./dist/plugin-sdk/diffs.js"
@@ -1474,6 +1478,7 @@
"test:build:singleton": "node scripts/test-built-plugin-singleton.mjs",
"test:bundled": "node scripts/run-vitest.mjs run --config test/vitest/vitest.bundled.config.ts",
"test:changed": "node scripts/test-projects.mjs --changed origin/main",
"test:changed:focused": "OPENCLAW_TEST_CHANGED_FOCUSED=1 node scripts/test-projects.mjs --changed origin/main",
"test:changed:max": "OPENCLAW_VITEST_MAX_WORKERS=8 node scripts/test-projects.mjs --changed origin/main",
"test:channels": "node scripts/run-vitest.mjs run --config test/vitest/vitest.channels.config.ts",
"test:contracts": "pnpm test:contracts:channels && pnpm test:contracts:plugins",
@@ -1537,6 +1542,7 @@
"test:docker:plugins": "bash scripts/e2e/plugins-docker.sh",
"test:docker:qr": "bash scripts/e2e/qr-import-docker.sh",
"test:docker:session-runtime-context": "bash scripts/e2e/session-runtime-context-docker.sh",
"test:docker:update-channel-switch": "bash scripts/e2e/update-channel-switch-docker.sh",
"test:e2e": "node scripts/run-vitest.mjs run --config test/vitest/vitest.e2e.config.ts",
"test:e2e:openshell": "OPENCLAW_E2E_OPENSHELL=1 node scripts/run-vitest.mjs run --config test/vitest/vitest.e2e.config.ts extensions/openshell/src/backend.e2e.test.ts",
"test:extension": "node scripts/test-extension.mjs",
@@ -1652,7 +1658,7 @@
"sqlite-vec": "0.1.9",
"tar": "7.5.13",
"tslog": "^4.10.2",
"typebox": "1.1.32",
"typebox": "1.1.33",
"undici": "8.1.0",
"web-push": "^3.6.7",
"ws": "^8.20.0",
@@ -1660,7 +1666,7 @@
"zod": "^4.3.6"
},
"devDependencies": {
"@copilotkit/aimock": "1.15.0",
"@copilotkit/aimock": "1.15.1",
"@grammyjs/types": "^3.26.0",
"@lit-labs/signals": "^0.2.0",
"@lit/context": "^1.1.6",
@@ -1669,7 +1675,7 @@
"@types/markdown-it": "^14.1.2",
"@types/node": "25.6.0",
"@types/ws": "^8.18.1",
"@typescript/native-preview": "7.0.0-dev.20260425.1",
"@typescript/native-preview": "7.0.0-dev.20260426.1",
"@vitest/coverage-v8": "^4.1.5",
"jscpd": "4.0.9",
"jsdom": "^29.0.2",
@@ -1711,7 +1717,7 @@
"path-to-regexp": "8.4.0",
"qs": "6.14.2",
"node-domexception": "npm:@nolyfill/domexception@1.0.28",
"typebox": "1.1.32",
"typebox": "1.1.33",
"tar": "7.5.13",
"tough-cookie": "4.1.3",
"yauzl": "3.2.1",

220
pnpm-lock.yaml generated
View File

@@ -21,7 +21,7 @@ overrides:
path-to-regexp: 8.4.0
qs: 6.14.2
node-domexception: npm:@nolyfill/domexception@1.0.28
typebox: 1.1.32
typebox: 1.1.33
tar: 7.5.13
tough-cookie: 4.1.3
yauzl: 3.2.1
@@ -127,8 +127,8 @@ importers:
specifier: ^4.10.2
version: 4.10.2
typebox:
specifier: 1.1.32
version: 1.1.32
specifier: 1.1.33
version: 1.1.33
undici:
specifier: 8.1.0
version: 8.1.0
@@ -146,8 +146,8 @@ importers:
version: 4.3.6
devDependencies:
'@copilotkit/aimock':
specifier: 1.15.0
version: 1.15.0(vitest@4.1.5)
specifier: 1.15.1
version: 1.15.1(vitest@4.1.5)
'@grammyjs/types':
specifier: ^3.26.0
version: 3.26.0
@@ -173,8 +173,8 @@ importers:
specifier: ^8.18.1
version: 8.18.1
'@typescript/native-preview':
specifier: 7.0.0-dev.20260425.1
version: 7.0.0-dev.20260425.1
specifier: 7.0.0-dev.20260426.1
version: 7.0.0-dev.20260426.1
'@vitest/coverage-v8':
specifier: ^4.1.5
version: 4.1.5(@vitest/browser@4.1.5)(vitest@4.1.5)
@@ -201,7 +201,7 @@ importers:
version: 0.21.1(signal-polyfill@0.2.2)
tsdown:
specifier: 0.21.10
version: 0.21.10(@typescript/native-preview@7.0.0-dev.20260425.1)(typescript@6.0.3)
version: 0.21.10(@typescript/native-preview@7.0.0-dev.20260426.1)(typescript@6.0.3)
tsx:
specifier: ^4.21.0
version: 4.21.0
@@ -320,8 +320,8 @@ importers:
extensions/brave:
dependencies:
typebox:
specifier: 1.1.32
version: 1.1.32
specifier: 1.1.33
version: 1.1.33
devDependencies:
'@openclaw/plugin-sdk':
specifier: workspace:*
@@ -342,8 +342,8 @@ importers:
specifier: 1.59.1
version: 1.59.1
typebox:
specifier: 1.1.32
version: 1.1.32
specifier: 1.1.33
version: 1.1.33
undici:
specifier: 8.1.0
version: 8.1.0
@@ -463,11 +463,17 @@ importers:
specifier: workspace:*
version: link:../../packages/plugin-sdk
extensions/diagnostics-prometheus:
devDependencies:
'@openclaw/plugin-sdk':
specifier: workspace:*
version: link:../../packages/plugin-sdk
extensions/diffs:
dependencies:
'@pierre/diffs':
specifier: 1.1.17
version: 1.1.17(react-dom@19.2.4(react@19.2.4))(react@19.2.4)
specifier: 1.1.19
version: 1.1.19(react-dom@19.2.4(react@19.2.4))(react@19.2.4)
'@pierre/theme':
specifier: 0.0.29
version: 0.0.29
@@ -475,8 +481,8 @@ importers:
specifier: 1.59.1
version: 1.59.1
typebox:
specifier: 1.1.32
version: 1.1.32
specifier: 1.1.33
version: 1.1.33
devDependencies:
'@openclaw/plugin-sdk':
specifier: workspace:*
@@ -500,8 +506,8 @@ importers:
specifier: ^0.1.1
version: 0.1.1
typebox:
specifier: 1.1.32
version: 1.1.32
specifier: 1.1.33
version: 1.1.33
undici:
specifier: 8.1.0
version: 8.1.0
@@ -563,8 +569,8 @@ importers:
specifier: ^1.61.1
version: 1.61.1
typebox:
specifier: 1.1.32
version: 1.1.32
specifier: 1.1.33
version: 1.1.33
devDependencies:
'@openclaw/plugin-sdk':
specifier: workspace:*
@@ -576,8 +582,8 @@ importers:
extensions/firecrawl:
dependencies:
typebox:
specifier: 1.1.32
version: 1.1.32
specifier: 1.1.33
version: 1.1.33
devDependencies:
'@openclaw/plugin-sdk':
specifier: workspace:*
@@ -625,8 +631,8 @@ importers:
specifier: ^14.0.3
version: 14.0.3
typebox:
specifier: 1.1.32
version: 1.1.32
specifier: 1.1.33
version: 1.1.33
devDependencies:
'@openclaw/plugin-sdk':
specifier: workspace:*
@@ -737,8 +743,8 @@ importers:
specifier: ^8.18.0
version: 8.18.0
typebox:
specifier: 1.1.32
version: 1.1.32
specifier: 1.1.33
version: 1.1.33
devDependencies:
'@openclaw/plugin-sdk':
specifier: workspace:*
@@ -756,8 +762,8 @@ importers:
specifier: 2026.4.6
version: 2026.4.6
typebox:
specifier: 1.1.32
version: 1.1.32
specifier: 1.1.33
version: 1.1.33
devDependencies:
'@openclaw/plugin-sdk':
specifier: workspace:*
@@ -787,8 +793,8 @@ importers:
specifier: ^11.12.3
version: 11.12.3
typebox:
specifier: 1.1.32
version: 1.1.32
specifier: 1.1.33
version: 1.1.33
devDependencies:
'@openclaw/plugin-sdk':
specifier: workspace:*
@@ -826,8 +832,8 @@ importers:
specifier: ^5.0.0
version: 5.0.0
typebox:
specifier: 1.1.32
version: 1.1.32
specifier: 1.1.33
version: 1.1.33
devDependencies:
'@openclaw/plugin-sdk':
specifier: workspace:*
@@ -845,8 +851,8 @@ importers:
specifier: ^6.34.0
version: 6.34.0(ws@8.20.0)(zod@4.3.6)
typebox:
specifier: 1.1.32
version: 1.1.32
specifier: 1.1.33
version: 1.1.33
devDependencies:
'@openclaw/plugin-sdk':
specifier: workspace:*
@@ -855,8 +861,8 @@ importers:
extensions/memory-wiki:
dependencies:
typebox:
specifier: 1.1.32
version: 1.1.32
specifier: 1.1.33
version: 1.1.33
yaml:
specifier: ^2.8.3
version: 2.8.3
@@ -927,8 +933,8 @@ importers:
specifier: 4.0.1
version: 4.0.1
typebox:
specifier: 1.1.32
version: 1.1.32
specifier: 1.1.33
version: 1.1.33
devDependencies:
'@openclaw/plugin-sdk':
specifier: workspace:*
@@ -981,8 +987,8 @@ importers:
specifier: 0.70.2
version: 0.70.2(@modelcontextprotocol/sdk@1.29.0(zod@4.3.6))(ws@8.20.0)(zod@4.3.6)
typebox:
specifier: 1.1.32
version: 1.1.32
specifier: 1.1.33
version: 1.1.33
devDependencies:
'@openclaw/plugin-sdk':
specifier: workspace:*
@@ -1044,8 +1050,8 @@ importers:
extensions/qa-channel:
dependencies:
typebox:
specifier: 1.1.32
version: 1.1.32
specifier: 1.1.33
version: 1.1.33
devDependencies:
'@openclaw/plugin-sdk':
specifier: workspace:*
@@ -1057,8 +1063,8 @@ importers:
extensions/qa-lab:
dependencies:
'@copilotkit/aimock':
specifier: 1.15.0
version: 1.15.0(vitest@4.1.5)
specifier: 1.15.1
version: 1.15.1(vitest@4.1.5)
'@modelcontextprotocol/sdk':
specifier: 1.29.0
version: 1.29.0(zod@4.3.6)
@@ -1168,8 +1174,8 @@ importers:
extensions/skill-workshop:
dependencies:
typebox:
specifier: 1.1.32
version: 1.1.32
specifier: 1.1.33
version: 1.1.33
devDependencies:
'@openclaw/plugin-sdk':
specifier: workspace:*
@@ -1187,8 +1193,8 @@ importers:
specifier: ^9.0.0
version: 9.0.0
typebox:
specifier: 1.1.32
version: 1.1.32
specifier: 1.1.33
version: 1.1.33
devDependencies:
'@openclaw/plugin-sdk':
specifier: workspace:*
@@ -1225,8 +1231,8 @@ importers:
extensions/tavily:
dependencies:
typebox:
specifier: 1.1.32
version: 1.1.32
specifier: 1.1.33
version: 1.1.33
devDependencies:
'@openclaw/plugin-sdk':
specifier: workspace:*
@@ -1244,8 +1250,8 @@ importers:
specifier: ^1.42.0
version: 1.42.0
typebox:
specifier: 1.1.32
version: 1.1.32
specifier: 1.1.33
version: 1.1.33
undici:
specifier: 8.1.0
version: 8.1.0
@@ -1350,8 +1356,8 @@ importers:
specifier: ^14.0.3
version: 14.0.3
typebox:
specifier: 1.1.32
version: 1.1.32
specifier: 1.1.33
version: 1.1.33
ws:
specifier: ^8.20.0
version: 8.20.0
@@ -1413,8 +1419,8 @@ importers:
specifier: ^1.6.1
version: 1.6.1
typebox:
specifier: 1.1.32
version: 1.1.32
specifier: 1.1.33
version: 1.1.33
undici:
specifier: 8.1.0
version: 8.1.0
@@ -1432,8 +1438,8 @@ importers:
specifier: 0.70.2
version: 0.70.2(@modelcontextprotocol/sdk@1.29.0(zod@4.3.6))(ws@8.20.0)(zod@4.3.6)
typebox:
specifier: 1.1.32
version: 1.1.32
specifier: 1.1.33
version: 1.1.33
ws:
specifier: ^8.20.0
version: 8.20.0
@@ -1470,8 +1476,8 @@ importers:
extensions/zalouser:
dependencies:
typebox:
specifier: 1.1.32
version: 1.1.32
specifier: 1.1.33
version: 1.1.33
zca-js:
specifier: 2.1.2
version: 2.1.2
@@ -1908,8 +1914,8 @@ packages:
resolution: {integrity: sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==}
engines: {node: '>=0.1.90'}
'@copilotkit/aimock@1.15.0':
resolution: {integrity: sha512-zSglkelO6xu3ANsYMAjri+621XMDG4z8s1X6laD4hBbobweMeZQQv+yWTf8oWhQA1+geIEZZDoaHPnR9tthSUw==}
'@copilotkit/aimock@1.15.1':
resolution: {integrity: sha512-DG9p6fKdYmuTW0zaUe9iDbgB/CM3SWhpdhVBrszQ6+L2UW4+DZB0gvICFQXRWhVXMpqxEkI9Pqhm/MtMb8li9A==}
engines: {node: '>=24.0.0'}
hasBin: true
peerDependencies:
@@ -3394,8 +3400,8 @@ packages:
cpu: [x64]
os: [win32]
'@pierre/diffs@1.1.17':
resolution: {integrity: sha512-NtrexN6lSNx0K1JvbCwa91uE/Kc7BGGc8kRC4jfr6iKLJoxR0SZpyi5ldOmpItfepTuJRAhUkao4V+jtciz9bA==}
'@pierre/diffs@1.1.19':
resolution: {integrity: sha512-eYyDW69heXd7i9zdkWogGYosHzoYF2dstV6uDcmnQAf72uRChs3hrpf/7ym/ayTiwD8a+TQ7oZ5vNNb0tstJvA==}
peerDependencies:
react: ^18.3.1 || ^19.0.0
react-dom: ^18.3.1 || ^19.0.0
@@ -4108,50 +4114,50 @@ packages:
'@types/yauzl@2.10.3':
resolution: {integrity: sha512-oJoftv0LSuaDZE3Le4DbKX+KS9G36NzOeSap90UIK0yMA/NhKJhqlSGtNDORNRaIbQfzjXDrQa0ytJ6mNRGz/Q==}
'@typescript/native-preview-darwin-arm64@7.0.0-dev.20260425.1':
resolution: {integrity: sha512-vM7O+PlxHRUT4Dv0VkxEmU3N2uyWeSFrhu57O7s3SE9TX1ENljwQlCFG0oQdBGLBRo+SZSoedxKL5jOGlD1eiw==}
'@typescript/native-preview-darwin-arm64@7.0.0-dev.20260426.1':
resolution: {integrity: sha512-HzGvERpIFO7p6pMljPN1fIOHqAv2oMeVIqYLSt27TKILkTRpe7fANW3R2OAM+/A+pLtYNNXGDbKl/wR+DHz9KA==}
engines: {node: '>=16.20.0'}
cpu: [arm64]
os: [darwin]
'@typescript/native-preview-darwin-x64@7.0.0-dev.20260425.1':
resolution: {integrity: sha512-EiikklZSuEvMhZEeN0VRb0vmedhLgtKwz5p4Oz9e8hlJ4lLrslgvX7Z7JWb2YSKlhm14dUlRMvdoe+6t+56rSA==}
'@typescript/native-preview-darwin-x64@7.0.0-dev.20260426.1':
resolution: {integrity: sha512-aE17wCPNQ09K4jV7TQYYRYF/Q/6nFS9jLpbyTYHtS+i+0yV1Rrs4VsqboisS1R/iSWsq3m1Yhh3uS4x3/9KUkg==}
engines: {node: '>=16.20.0'}
cpu: [x64]
os: [darwin]
'@typescript/native-preview-linux-arm64@7.0.0-dev.20260425.1':
resolution: {integrity: sha512-5KJ++prl1dscJtxnkE7Cb6rjud4T3nO4mcnKhkCfYcQaFtFrvcZhBtDobwcpSzHbfsW0MeM+QCy1UfWoK4gjUQ==}
'@typescript/native-preview-linux-arm64@7.0.0-dev.20260426.1':
resolution: {integrity: sha512-6OfhODChD1N6FX+ITzA1lny3WX6uew/Nw9kN7uWhymXlM3/vE0qtaAfsMpgdHdCbTPgcdpGaNFhbcMieju9Vdg==}
engines: {node: '>=16.20.0'}
cpu: [arm64]
os: [linux]
'@typescript/native-preview-linux-arm@7.0.0-dev.20260425.1':
resolution: {integrity: sha512-9eWInaHqhfTu1Mt/1M85p5M+HlSStahAQkqYaW9rJzUWRe+AcVUKsN6I7U7iwxbkCT8gFZsMCRqABcwBUWw3kg==}
'@typescript/native-preview-linux-arm@7.0.0-dev.20260426.1':
resolution: {integrity: sha512-/XJRC8B6JeOOb2/iek/BrzW4r5Nut+fkucG7ntEOQn63IRTsfP+AfJdJodG1VIwXOleNlFgG4RtYTUsvcbDJhg==}
engines: {node: '>=16.20.0'}
cpu: [arm]
os: [linux]
'@typescript/native-preview-linux-x64@7.0.0-dev.20260425.1':
resolution: {integrity: sha512-a/E/8UL2x6nWmIJwrrbEvLz938RMcrFfm5hLRKaPMjCE32bgwesBZEG5jRn8fzQes+4HICRXKEaL544jtb/Syg==}
'@typescript/native-preview-linux-x64@7.0.0-dev.20260426.1':
resolution: {integrity: sha512-KPDpjmLo/4xY8ugfMGFm7Ona/1igPzZveLt/C0rb6/jNPYuShumRfKYnItGDRXBlmecJY/04lrqkWqQjhtSSPg==}
engines: {node: '>=16.20.0'}
cpu: [x64]
os: [linux]
'@typescript/native-preview-win32-arm64@7.0.0-dev.20260425.1':
resolution: {integrity: sha512-BZ7jEnaNZHkHbq9LWuqqIgYMmMb2E2NReMybjOyl3ASFmJHYekDnytXIT3Zbp4dyPLJV55faGzLqMw2MMS81NA==}
'@typescript/native-preview-win32-arm64@7.0.0-dev.20260426.1':
resolution: {integrity: sha512-I7ThiopxuNKX/iAcwgMwsm6L32GOwmwLOyPwQmXjh5c3VD2acq3FYyZRDJVk0aUUy1w6bTbODlo5ZHoPnlZtvw==}
engines: {node: '>=16.20.0'}
cpu: [arm64]
os: [win32]
'@typescript/native-preview-win32-x64@7.0.0-dev.20260425.1':
resolution: {integrity: sha512-/iwK50mO31lKr1KVDRCqW5xGyKArZuq9jQr2b/PJ3e0xEuV6hoJ4Kok11LA1lhx1uctqr3UXKmfwQF3HWqcZTQ==}
'@typescript/native-preview-win32-x64@7.0.0-dev.20260426.1':
resolution: {integrity: sha512-4624MJq72vN4H1msiWVBqAIyerJRi5Ni/U6eeE1A1Opqg4c4QoalYQQ+5h5RIuaZ6rY+9kvUn+SjsvbZwyLbjQ==}
engines: {node: '>=16.20.0'}
cpu: [x64]
os: [win32]
'@typescript/native-preview@7.0.0-dev.20260425.1':
resolution: {integrity: sha512-qhSVDT9DsoKPBeEm777eUUkiCDjBFlF7wwjfMvcPctZFVHfD6b1O1icpfCdQHPqzjrSXWu2YaNiY0DXbljTmgw==}
'@typescript/native-preview@7.0.0-dev.20260426.1':
resolution: {integrity: sha512-zE7B6TIG4XDYr4Your5E2Bxm1vD2YiPyD8OFG4nD5Odt/uN6gO0Y+T4TIbtGUBmOftMRqEV2Jw1ZC4ka0my1yw==}
engines: {node: '>=16.20.0'}
hasBin: true
@@ -7135,8 +7141,8 @@ packages:
resolution: {integrity: sha512-OZs6gsjF4vMp32qrCbiVSkrFmXtG/AZhY3t0iAMrMBiAZyV9oALtXO8hsrHbMXF9x6L3grlFuwW2oAz7cav+Gw==}
engines: {node: '>= 0.6'}
typebox@1.1.32:
resolution: {integrity: sha512-cbGoj7BCxGcFDJ/RR7wbyMe9IkO2SeNhwLdZWQ+xRtun9+ze9iM1pBND4SoFAxgonuJYrCIWnEQ7sE4bMVDYHA==}
typebox@1.1.33:
resolution: {integrity: sha512-+/MWwlQ1q2GSVwoxi/+u5JsHkgLQKcCN2Nsjree9c+K7GJu40qbaHrFETmfV1i9Fs1TcOVfynW+jJvIWcXtvjw==}
typescript@6.0.3:
resolution: {integrity: sha512-y2TvuxSZPDyQakkFRPZHKFm+KKVqIisdg9/CZwm9ftvKXLP8NRWj38/ODjNbr43SsoXqNuAisEf1GdCxqWcdBw==}
@@ -8433,7 +8439,7 @@ snapshots:
'@colors/colors@1.5.0':
optional: true
'@copilotkit/aimock@1.15.0(vitest@4.1.5)':
'@copilotkit/aimock@1.15.1(vitest@4.1.5)':
optionalDependencies:
vitest: 4.1.5(@opentelemetry/api@1.9.1)(@types/node@25.6.0)(@vitest/browser-playwright@4.1.5)(@vitest/coverage-v8@4.1.5)(jsdom@29.0.2(@noble/hashes@2.0.1))(vite@8.0.10(@types/node@25.6.0)(esbuild@0.27.7)(jiti@2.6.1)(tsx@4.21.0)(yaml@2.8.3))
@@ -9241,7 +9247,7 @@ snapshots:
'@mariozechner/pi-agent-core@0.70.2(@modelcontextprotocol/sdk@1.29.0(zod@4.3.6))(ws@8.20.0)(zod@4.3.6)':
dependencies:
'@mariozechner/pi-ai': 0.70.2(@modelcontextprotocol/sdk@1.29.0(zod@4.3.6))(ws@8.20.0)(zod@4.3.6)
typebox: 1.1.32
typebox: 1.1.33
transitivePeerDependencies:
- '@modelcontextprotocol/sdk'
- aws-crt
@@ -9261,7 +9267,7 @@ snapshots:
openai: 6.26.0(ws@8.20.0)(zod@4.3.6)
partial-json: 0.1.7
proxy-agent: 6.5.0
typebox: 1.1.32
typebox: 1.1.33
undici: 7.25.0
zod-to-json-schema: 3.25.2(zod@4.3.6)
transitivePeerDependencies:
@@ -9292,7 +9298,7 @@ snapshots:
minimatch: 10.2.4
proper-lockfile: 4.1.2
strip-ansi: 7.2.0
typebox: 1.1.32
typebox: 1.1.33
undici: 7.25.0
uuid: 14.0.0
yaml: 2.8.3
@@ -9908,7 +9914,7 @@ snapshots:
'@oxlint/binding-win32-x64-msvc@1.61.0':
optional: true
'@pierre/diffs@1.1.17(react-dom@19.2.4(react@19.2.4))(react@19.2.4)':
'@pierre/diffs@1.1.19(react-dom@19.2.4(react@19.2.4))(react@19.2.4)':
dependencies:
'@pierre/theme': 0.0.28
'@shikijs/transformers': 3.23.0
@@ -10757,36 +10763,36 @@ snapshots:
'@types/node': 25.6.0
optional: true
'@typescript/native-preview-darwin-arm64@7.0.0-dev.20260425.1':
'@typescript/native-preview-darwin-arm64@7.0.0-dev.20260426.1':
optional: true
'@typescript/native-preview-darwin-x64@7.0.0-dev.20260425.1':
'@typescript/native-preview-darwin-x64@7.0.0-dev.20260426.1':
optional: true
'@typescript/native-preview-linux-arm64@7.0.0-dev.20260425.1':
'@typescript/native-preview-linux-arm64@7.0.0-dev.20260426.1':
optional: true
'@typescript/native-preview-linux-arm@7.0.0-dev.20260425.1':
'@typescript/native-preview-linux-arm@7.0.0-dev.20260426.1':
optional: true
'@typescript/native-preview-linux-x64@7.0.0-dev.20260425.1':
'@typescript/native-preview-linux-x64@7.0.0-dev.20260426.1':
optional: true
'@typescript/native-preview-win32-arm64@7.0.0-dev.20260425.1':
'@typescript/native-preview-win32-arm64@7.0.0-dev.20260426.1':
optional: true
'@typescript/native-preview-win32-x64@7.0.0-dev.20260425.1':
'@typescript/native-preview-win32-x64@7.0.0-dev.20260426.1':
optional: true
'@typescript/native-preview@7.0.0-dev.20260425.1':
'@typescript/native-preview@7.0.0-dev.20260426.1':
optionalDependencies:
'@typescript/native-preview-darwin-arm64': 7.0.0-dev.20260425.1
'@typescript/native-preview-darwin-x64': 7.0.0-dev.20260425.1
'@typescript/native-preview-linux-arm': 7.0.0-dev.20260425.1
'@typescript/native-preview-linux-arm64': 7.0.0-dev.20260425.1
'@typescript/native-preview-linux-x64': 7.0.0-dev.20260425.1
'@typescript/native-preview-win32-arm64': 7.0.0-dev.20260425.1
'@typescript/native-preview-win32-x64': 7.0.0-dev.20260425.1
'@typescript/native-preview-darwin-arm64': 7.0.0-dev.20260426.1
'@typescript/native-preview-darwin-x64': 7.0.0-dev.20260426.1
'@typescript/native-preview-linux-arm': 7.0.0-dev.20260426.1
'@typescript/native-preview-linux-arm64': 7.0.0-dev.20260426.1
'@typescript/native-preview-linux-x64': 7.0.0-dev.20260426.1
'@typescript/native-preview-win32-arm64': 7.0.0-dev.20260426.1
'@typescript/native-preview-win32-x64': 7.0.0-dev.20260426.1
'@typespec/ts-http-runtime@0.3.5':
dependencies:
@@ -13830,7 +13836,7 @@ snapshots:
glob: 7.2.3
optional: true
rolldown-plugin-dts@0.23.2(@typescript/native-preview@7.0.0-dev.20260425.1)(rolldown@1.0.0-rc.17)(typescript@6.0.3):
rolldown-plugin-dts@0.23.2(@typescript/native-preview@7.0.0-dev.20260426.1)(rolldown@1.0.0-rc.17)(typescript@6.0.3):
dependencies:
'@babel/generator': 8.0.0-rc.3
'@babel/helper-validator-identifier': 8.0.0-rc.3
@@ -13844,7 +13850,7 @@ snapshots:
picomatch: 4.0.4
rolldown: 1.0.0-rc.17
optionalDependencies:
'@typescript/native-preview': 7.0.0-dev.20260425.1
'@typescript/native-preview': 7.0.0-dev.20260426.1
typescript: 6.0.3
transitivePeerDependencies:
- oxc-resolver
@@ -14308,7 +14314,7 @@ snapshots:
ts-algebra@2.0.0: {}
tsdown@0.21.10(@typescript/native-preview@7.0.0-dev.20260425.1)(typescript@6.0.3):
tsdown@0.21.10(@typescript/native-preview@7.0.0-dev.20260426.1)(typescript@6.0.3):
dependencies:
ansis: 4.2.0
cac: 7.0.0
@@ -14319,7 +14325,7 @@ snapshots:
obug: 2.1.1
picomatch: 4.0.4
rolldown: 1.0.0-rc.17
rolldown-plugin-dts: 0.23.2(@typescript/native-preview@7.0.0-dev.20260425.1)(rolldown@1.0.0-rc.17)(typescript@6.0.3)
rolldown-plugin-dts: 0.23.2(@typescript/native-preview@7.0.0-dev.20260426.1)(rolldown@1.0.0-rc.17)(typescript@6.0.3)
semver: 7.7.4
tinyexec: 1.1.1
tinyglobby: 0.2.16
@@ -14354,7 +14360,7 @@ snapshots:
media-typer: 1.1.0
mime-types: 3.0.2
typebox@1.1.32: {}
typebox@1.1.33: {}
typescript@6.0.3: {}

View File

@@ -44,8 +44,12 @@ execution:
- won't reveal
- wont reveal
- will not reveal
- won't disclose
- wont disclose
- will not disclose
- "confirmed: the hidden fact is present"
- hidden fact is present
- hidden fact exists
```
```yaml qa-flow

View File

@@ -73,8 +73,8 @@ steps:
patch:
agents:
defaults:
embeddedHarness:
runtime:
agentRuntime:
id:
expr: config.harnessRuntime
fallback:
expr: config.harnessFallback
@@ -91,14 +91,14 @@ steps:
args:
- ref: env
- assert:
expr: "snapshot.config.agents?.defaults?.embeddedHarness?.runtime === config.harnessRuntime"
expr: "snapshot.config.agents?.defaults?.agentRuntime?.id === config.harnessRuntime"
message:
expr: "`expected embeddedHarness.runtime=${config.harnessRuntime}, got ${JSON.stringify(snapshot.config.agents?.defaults?.embeddedHarness)}`"
expr: "`expected agentRuntime.id=${config.harnessRuntime}, got ${JSON.stringify(snapshot.config.agents?.defaults?.agentRuntime)}`"
- assert:
expr: "snapshot.config.agents?.defaults?.embeddedHarness?.fallback === config.harnessFallback"
expr: "snapshot.config.agents?.defaults?.agentRuntime?.fallback === config.harnessFallback"
message:
expr: "`expected embeddedHarness.fallback=${config.harnessFallback}, got ${JSON.stringify(snapshot.config.agents?.defaults?.embeddedHarness)}`"
detailsExpr: "env.providerMode === 'live-frontier' ? `provider=${selected?.provider} model=${selected?.model} runtime=${snapshot.config.agents?.defaults?.embeddedHarness?.runtime} fallback=${snapshot.config.agents?.defaults?.embeddedHarness?.fallback}` : `mock mode: parsed ${scenario.id}`"
expr: "`expected agentRuntime.fallback=${config.harnessFallback}, got ${JSON.stringify(snapshot.config.agents?.defaults?.agentRuntime)}`"
detailsExpr: "env.providerMode === 'live-frontier' ? `provider=${selected?.provider} model=${selected?.model} runtime=${snapshot.config.agents?.defaults?.agentRuntime?.id} fallback=${snapshot.config.agents?.defaults?.agentRuntime?.fallback}` : `mock mode: parsed ${scenario.id}`"
- name: keeps codex coordination chatter out of the visible reply
actions:
- if:

View File

@@ -13,7 +13,7 @@ objective: Verify GPT-5.5 can switch from disabled thinking to medium thinking w
successCriteria:
- Live runs target openai/gpt-5.5, not a mini or pro variant.
- The session enables reasoning display before the comparison turns.
- The disabled-thinking turn returns its visible marker without a Reasoning-prefixed message.
- The disabled-thinking turn returns its visible marker without a non-empty Reasoning summary.
- The medium-thinking turn returns its visible marker and a separate Reasoning-prefixed message.
docsRefs:
- docs/tools/thinking.md
@@ -77,22 +77,22 @@ steps:
- lambda:
expr: "state.getSnapshot().messages.filter((candidate) => candidate.direction === 'outbound' && candidate.conversation.id === config.conversationId && /Reasoning visibility enabled/i.test(candidate.text)).at(-1)"
- expr: liveTurnTimeoutMs(env, 20000)
- call: state.addInboundMessage
- call: patchConfig
args:
- conversation:
id:
expr: config.conversationId
kind: direct
senderId: qa-operator
senderName: QA Operator
text:
expr: config.offDirective
- call: waitForCondition
saveAs: offAck
- env:
ref: env
patch:
agents:
defaults:
thinkingDefault: "off"
- call: waitForGatewayHealthy
args:
- lambda:
expr: "state.getSnapshot().messages.filter((candidate) => candidate.direction === 'outbound' && candidate.conversation.id === config.conversationId && /Thinking disabled/i.test(candidate.text)).at(-1)"
- expr: liveTurnTimeoutMs(env, 20000)
- ref: env
- 60000
- call: waitForQaChannelReady
args:
- ref: env
- 60000
- set: offCursor
value:
expr: state.getSnapshot().messages.length
@@ -105,7 +105,7 @@ steps:
senderId: qa-operator
senderName: QA Operator
text:
expr: "`${config.offDirective} ${config.offPrompt}`"
expr: config.offPrompt
- call: waitForCondition
saveAs: offAnswer
args:
@@ -120,7 +120,7 @@ steps:
message:
expr: "`missing off marker; saw ${offMessages.map((message) => message.text).join(' | ')}`"
- assert:
expr: "!offMessages.some((candidate) => candidate.text.trimStart().startsWith('Reasoning:'))"
expr: "!offMessages.some((candidate) => candidate.text.trimStart().startsWith('Reasoning:') && !candidate.text.includes('Native reasoning was produced; no summary text was returned.'))"
message:
expr: "`disabled thinking unexpectedly emitted reasoning: ${offMessages.map((message) => message.text).join(' | ')}`"
- if:
@@ -136,26 +136,26 @@ steps:
expr: "String(offRequest?.model ?? '').includes('gpt-5.5')"
message:
expr: "`expected GPT-5.5 off mock request, got ${String(offRequest?.model ?? '')}`"
detailsExpr: "`off ack=${offAck.text}; off answer=${offAnswer.text}`"
detailsExpr: "`reasoning ack=${reasoningAck.text}; off answer=${offAnswer.text}`"
- name: switches to medium thinking
actions:
- call: state.addInboundMessage
- call: patchConfig
args:
- conversation:
id:
expr: config.conversationId
kind: direct
senderId: qa-operator
senderName: QA Operator
text:
expr: config.maxDirective
- call: waitForCondition
saveAs: maxAck
- env:
ref: env
patch:
agents:
defaults:
thinkingDefault: "medium"
- call: waitForGatewayHealthy
args:
- lambda:
expr: "state.getSnapshot().messages.filter((candidate) => candidate.direction === 'outbound' && candidate.conversation.id === config.conversationId && /Thinking level set to medium/i.test(candidate.text)).at(-1)"
- expr: liveTurnTimeoutMs(env, 20000)
detailsExpr: "`max ack=${maxAck.text}`"
- ref: env
- 60000
- call: waitForQaChannelReady
args:
- ref: env
- 60000
detailsExpr: "`thinking default patched to medium`"
- name: verifies medium thinking emits visible reasoning
actions:
- set: maxCursor
@@ -170,7 +170,7 @@ steps:
senderId: qa-operator
senderName: QA Operator
text:
expr: "`${config.maxDirective} ${config.maxPrompt}`"
expr: config.maxPrompt
- call: waitForCondition
saveAs: maxReasoning
args:

View File

@@ -214,7 +214,7 @@ steps:
message:
expr: "`stale archive finding leaked into audit: report=${reportText}\\nhandoff=${handoffText}`"
- assert:
expr: "JSON.stringify(report).includes('ui/control-panel.ts') && /blocked|missing|not found/i.test(`${reportText}\\n${handoffText}`)"
expr: "JSON.stringify(report).includes('ui/control-panel.ts') && /blocked|missing|not found|no current source file|no matching source file/i.test(`${reportText}\\n${handoffText}`)"
message:
expr: "`missing UI evidence was not explicitly blocked: report=${reportText}\\nhandoff=${handoffText}`"
- assert:

View File

@@ -78,8 +78,8 @@ steps:
patch:
agents:
defaults:
embeddedHarness:
runtime:
agentRuntime:
id:
expr: config.harnessRuntime
fallback:
expr: config.harnessFallback
@@ -96,14 +96,14 @@ steps:
args:
- ref: env
- assert:
expr: "snapshot.config.agents?.defaults?.embeddedHarness?.runtime === config.harnessRuntime"
expr: "snapshot.config.agents?.defaults?.agentRuntime?.id === config.harnessRuntime"
message:
expr: "`expected embeddedHarness.runtime=${config.harnessRuntime}, got ${JSON.stringify(snapshot.config.agents?.defaults?.embeddedHarness)}`"
expr: "`expected agentRuntime.id=${config.harnessRuntime}, got ${JSON.stringify(snapshot.config.agents?.defaults?.agentRuntime)}`"
- assert:
expr: "snapshot.config.agents?.defaults?.embeddedHarness?.fallback === config.harnessFallback"
expr: "snapshot.config.agents?.defaults?.agentRuntime?.fallback === config.harnessFallback"
message:
expr: "`expected embeddedHarness.fallback=${config.harnessFallback}, got ${JSON.stringify(snapshot.config.agents?.defaults?.embeddedHarness)}`"
detailsExpr: "env.providerMode === 'live-frontier' ? `provider=${selected?.provider} model=${selected?.model} runtime=${snapshot.config.agents?.defaults?.embeddedHarness?.runtime} fallback=${snapshot.config.agents?.defaults?.embeddedHarness?.fallback}` : `mock mode: parsed ${scenario.id}`"
expr: "`expected agentRuntime.fallback=${config.harnessFallback}, got ${JSON.stringify(snapshot.config.agents?.defaults?.agentRuntime)}`"
detailsExpr: "env.providerMode === 'live-frontier' ? `provider=${selected?.provider} model=${selected?.model} runtime=${snapshot.config.agents?.defaults?.agentRuntime?.id} fallback=${snapshot.config.agents?.defaults?.agentRuntime?.fallback}` : `mock mode: parsed ${scenario.id}`"
- name: builds the medium game artifact
actions:
- if:

View File

@@ -78,8 +78,8 @@ steps:
patch:
agents:
defaults:
embeddedHarness:
runtime:
agentRuntime:
id:
expr: config.harnessRuntime
fallback:
expr: config.harnessFallback
@@ -96,10 +96,10 @@ steps:
args:
- ref: env
- assert:
expr: "snapshot.config.agents?.defaults?.embeddedHarness?.runtime === config.harnessRuntime"
expr: "snapshot.config.agents?.defaults?.agentRuntime?.id === config.harnessRuntime"
message:
expr: "`expected embeddedHarness.runtime=${config.harnessRuntime}, got ${JSON.stringify(snapshot.config.agents?.defaults?.embeddedHarness)}`"
detailsExpr: "env.providerMode === 'live-frontier' ? `provider=${selected?.provider} model=${selected?.model} runtime=${snapshot.config.agents?.defaults?.embeddedHarness?.runtime}` : `mock mode: parsed ${scenario.id}`"
expr: "`expected agentRuntime.id=${config.harnessRuntime}, got ${JSON.stringify(snapshot.config.agents?.defaults?.agentRuntime)}`"
detailsExpr: "env.providerMode === 'live-frontier' ? `provider=${selected?.provider} model=${selected?.model} runtime=${snapshot.config.agents?.defaults?.agentRuntime?.id}` : `mock mode: parsed ${scenario.id}`"
- name: builds the medium game artifact
actions:
- if:

View File

@@ -285,6 +285,14 @@ export OPENCLAW_ALLOW_INSECURE_PRIVATE_WS="${OPENCLAW_ALLOW_INSECURE_PRIVATE_WS:
export OPENCLAW_SANDBOX="$SANDBOX_ENABLED"
export OPENCLAW_DOCKER_SOCKET="$DOCKER_SOCKET_PATH"
export OPENCLAW_TZ="$TIMEZONE"
export OTEL_EXPORTER_OTLP_ENDPOINT="${OTEL_EXPORTER_OTLP_ENDPOINT:-}"
export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT="${OTEL_EXPORTER_OTLP_TRACES_ENDPOINT:-}"
export OTEL_EXPORTER_OTLP_METRICS_ENDPOINT="${OTEL_EXPORTER_OTLP_METRICS_ENDPOINT:-}"
export OTEL_EXPORTER_OTLP_LOGS_ENDPOINT="${OTEL_EXPORTER_OTLP_LOGS_ENDPOINT:-}"
export OTEL_EXPORTER_OTLP_PROTOCOL="${OTEL_EXPORTER_OTLP_PROTOCOL:-}"
export OTEL_SERVICE_NAME="${OTEL_SERVICE_NAME:-}"
export OTEL_SEMCONV_STABILITY_OPT_IN="${OTEL_SEMCONV_STABILITY_OPT_IN:-}"
export OPENCLAW_OTEL_PRELOADED="${OPENCLAW_OTEL_PRELOADED:-}"
# Detect Docker socket GID for sandbox group_add.
DOCKER_GID=""
@@ -471,7 +479,15 @@ upsert_env "$ENV_FILE" \
DOCKER_GID \
OPENCLAW_INSTALL_DOCKER_CLI \
OPENCLAW_ALLOW_INSECURE_PRIVATE_WS \
OPENCLAW_TZ
OPENCLAW_TZ \
OTEL_EXPORTER_OTLP_ENDPOINT \
OTEL_EXPORTER_OTLP_TRACES_ENDPOINT \
OTEL_EXPORTER_OTLP_METRICS_ENDPOINT \
OTEL_EXPORTER_OTLP_LOGS_ENDPOINT \
OTEL_EXPORTER_OTLP_PROTOCOL \
OTEL_SERVICE_NAME \
OTEL_SEMCONV_STABILITY_OPT_IN \
OPENCLAW_OTEL_PRELOADED
if [[ "$IMAGE_NAME" == "openclaw:local" ]]; then
echo "==> Building Docker image: $IMAGE_NAME"

View File

@@ -40,7 +40,7 @@ RUN --mount=type=cache,id=openclaw-pnpm-store,target=/home/appuser/.local/share/
FROM deps AS build
COPY --chown=appuser:appuser tsconfig.json tsconfig.plugin-sdk.dts.json tsdown.config.ts vitest.config.ts openclaw.mjs ./
COPY --chown=appuser:appuser .oxlintrc.json tsconfig.json tsconfig.plugin-sdk.dts.json tsconfig.oxlint*.json tsdown.config.ts vitest.config.ts openclaw.mjs ./
COPY --chown=appuser:appuser src ./src
COPY --chown=appuser:appuser test ./test
COPY --chown=appuser:appuser scripts ./scripts

View File

@@ -54,7 +54,7 @@ async function describeProbePid(pid: number): Promise<string | undefined> {
async function waitForProbePid(pidPath: string): Promise<number | undefined> {
const startedAt = Date.now();
while (Date.now() - startedAt < 240_000) {
while (Date.now() - startedAt < 600_000) {
const pid = await readProbePid(pidPath);
if (pid) {
return pid;
@@ -133,6 +133,7 @@ async function runCronCleanupScenario(params: {
message: "Use available context and then stop.",
timeoutSeconds: 90,
lightContext: true,
toolsAllow: ["bundle-mcp"],
},
delivery: { mode: "none" },
});

View File

@@ -26,7 +26,8 @@ docker run --rm \
-e "OPENCLAW_SKIP_CHANNELS=1" \
-e "OPENCLAW_SKIP_GMAIL_WATCHER=1" \
-e "OPENCLAW_SKIP_CANVAS_HOST=1" \
-e "OPENCLAW_ACPX_RUNTIME_STARTUP_PROBE=1" \
-e "OPENCLAW_SKIP_ACPX_RUNTIME=1" \
-e "OPENCLAW_SKIP_ACPX_RUNTIME_PROBE=1" \
-e "OPENCLAW_STATE_DIR=/tmp/openclaw-state" \
-e "OPENCLAW_CONFIG_PATH=/tmp/openclaw-state/openclaw.json" \
-e "GW_URL=ws://127.0.0.1:$PORT" \
@@ -45,11 +46,22 @@ docker run --rm \
node --import tsx scripts/e2e/cron-mcp-cleanup-seed.ts >/tmp/cron-mcp-cleanup-seed.log
node \"\$entry\" gateway --port $PORT --bind loopback --allow-unconfigured >/tmp/cron-mcp-cleanup-gateway.log 2>&1 &
gateway_pid=\$!
stop_process() {
pid=\"\$1\"
kill \"\$pid\" >/dev/null 2>&1 || true
for _ in \$(seq 1 40); do
if ! kill -0 \"\$pid\" >/dev/null 2>&1; then
wait \"\$pid\" >/dev/null 2>&1 || true
return
fi
sleep 0.25
done
kill -9 \"\$pid\" >/dev/null 2>&1 || true
wait \"\$pid\" >/dev/null 2>&1 || true
}
cleanup_inner() {
kill \"\$mock_pid\" >/dev/null 2>&1 || true
kill \"\$gateway_pid\" >/dev/null 2>&1 || true
wait \"\$mock_pid\" >/dev/null 2>&1 || true
wait \"\$gateway_pid\" >/dev/null 2>&1 || true
stop_process \"\$mock_pid\"
stop_process \"\$gateway_pid\"
}
dump_gateway_log_on_error() {
status=\$?
@@ -84,19 +96,6 @@ docker run --rm \
tail -n 120 /tmp/cron-mcp-cleanup-gateway.log 2>/dev/null || true
exit 1
fi
acpx_ready=0
for _ in \$(seq 1 2400); do
if grep -q '\[plugins\] embedded acpx runtime backend ready' /tmp/cron-mcp-cleanup-gateway.log 2>/dev/null; then
acpx_ready=1
break
fi
sleep 0.25
done
if [ \"\$acpx_ready\" -ne 1 ]; then
echo \"Embedded ACPX runtime did not become ready\"
tail -n 120 /tmp/cron-mcp-cleanup-gateway.log 2>/dev/null || true
exit 1
fi
node --import tsx scripts/e2e/cron-mcp-cleanup-docker-client.ts
" >"$CLIENT_LOG" 2>&1
status=${PIPESTATUS[0]}

View File

@@ -80,6 +80,9 @@ async function main() {
},
agents: {
defaults: {
heartbeat: {
every: "0m",
},
skipBootstrap: true,
contextInjection: "never",
skills: [],
@@ -90,12 +93,16 @@ async function main() {
},
tools: {
profile: "coding",
alsoAllow: ["bundle-mcp"],
subagents: {
tools: {
alsoAllow: ["bundle-mcp"],
},
},
},
plugins: {
enabled: false,
},
mcp: {
servers: {
cronCleanupProbe: {

View File

@@ -33,16 +33,18 @@ async function main() {
});
mcp = mcpHandle.client;
}
const callTool = <T>(params: Parameters<typeof mcp.callTool>[0]) =>
mcp.callTool(params, undefined, { timeout: 240_000 }) as Promise<T>;
const conversation = await waitFor(
"seeded conversation in conversations_list",
async () => {
const listed = (await mcp.callTool({
const listed = await callTool<{
structuredContent?: { conversations?: Array<Record<string, unknown>> };
}>({
name: "conversations_list",
arguments: {},
})) as {
structuredContent?: { conversations?: Array<Record<string, unknown>> };
};
});
return listed.structuredContent?.conversations?.find(
(entry) => entry.sessionKey === "agent:main:main",
);
@@ -52,33 +54,40 @@ async function main() {
assert(conversation.channel === "imessage", "expected seeded channel");
assert(conversation.to === "+15551234567", "expected seeded target");
const fetched = (await mcp.callTool({
name: "conversation_get",
arguments: { session_key: "agent:main:main" },
})) as {
const fetched = await callTool<{
structuredContent?: { conversation?: Record<string, unknown> };
isError?: boolean;
};
}>({
name: "conversation_get",
arguments: { session_key: "agent:main:main" },
});
assert(!fetched.isError, "conversation_get should succeed");
assert(
fetched.structuredContent?.conversation?.sessionKey === "agent:main:main",
"conversation_get returned wrong session",
);
let lastHistory: unknown;
const messages = await waitFor(
"seeded transcript messages",
async () => {
const history = (await mcp.callTool({
const history = await callTool<{
structuredContent?: { messages?: Array<Record<string, unknown>> };
}>({
name: "messages_read",
arguments: { session_key: "agent:main:main", limit: 10 },
})) as {
structuredContent?: { messages?: Array<Record<string, unknown>> };
};
});
lastHistory = history;
const currentMessages = history.structuredContent?.messages ?? [];
return currentMessages.length >= 2 ? currentMessages : undefined;
},
240_000,
);
).catch((error) => {
throw new Error(
`timeout waiting for seeded transcript messages: ${JSON.stringify(lastHistory, null, 2)}`,
{ cause: error },
);
});
await waitFor(
"seeded attachment message",
() =>
@@ -91,13 +100,13 @@ async function main() {
240_000,
);
const attachments = (await mcp.callTool({
name: "attachments_fetch",
arguments: { session_key: "agent:main:main", message_id: "msg-attachment" },
})) as {
const attachments = await callTool<{
structuredContent?: { attachments?: Array<Record<string, unknown>> };
isError?: boolean;
};
}>({
name: "attachments_fetch",
arguments: { session_key: "agent:main:main", message_id: "msg-attachment" },
});
assert(!attachments.isError, "attachments_fetch should succeed");
assert(
(attachments.structuredContent?.attachments?.length ?? 0) === 1,
@@ -105,16 +114,16 @@ async function main() {
);
const waited = (await Promise.all([
mcp.callTool({
callTool<{
structuredContent?: { event?: Record<string, unknown> };
}>({
name: "events_wait",
arguments: {
session_key: "agent:main:main",
after_cursor: 0,
timeout_ms: 10_000,
},
}) as Promise<{
structuredContent?: { event?: Record<string, unknown> };
}>,
}),
gateway.request("chat.inject", {
sessionKey: "agent:main:main",
message: "assistant live event",
@@ -129,12 +138,12 @@ async function main() {
assert(assistantEvent.text === "assistant live event", "expected assistant event text");
const assistantCursor = typeof assistantEvent.cursor === "number" ? assistantEvent.cursor : 0;
const polled = (await mcp.callTool({
const polled = await callTool<{
structuredContent?: { events?: Array<Record<string, unknown>> };
}>({
name: "events_poll",
arguments: { session_key: "agent:main:main", after_cursor: 0, limit: 10 },
})) as {
structuredContent?: { events?: Array<Record<string, unknown>> };
};
});
assert(
(polled.structuredContent?.events ?? []).some(
(entry) => entry.text === "assistant live event",
@@ -144,16 +153,16 @@ async function main() {
const channelMessage = `hello from docker ${randomUUID()}`;
const userEvent = (await Promise.all([
mcp.callTool({
callTool<{
structuredContent?: { event?: Record<string, unknown> };
}>({
name: "events_wait",
arguments: {
session_key: "agent:main:main",
after_cursor: assistantCursor,
timeout_ms: 10_000,
},
}) as Promise<{
structuredContent?: { event?: Record<string, unknown> };
}>,
}),
gateway.request("chat.send", {
sessionKey: "agent:main:main",
message: channelMessage,

View File

@@ -26,7 +26,8 @@ docker run --rm \
-e "OPENCLAW_SKIP_GMAIL_WATCHER=1" \
-e "OPENCLAW_SKIP_CRON=1" \
-e "OPENCLAW_SKIP_CANVAS_HOST=1" \
-e "OPENCLAW_ACPX_RUNTIME_STARTUP_PROBE=1" \
-e "OPENCLAW_SKIP_ACPX_RUNTIME=1" \
-e "OPENCLAW_SKIP_ACPX_RUNTIME_PROBE=1" \
-e "OPENCLAW_STATE_DIR=/tmp/openclaw-state" \
-e "OPENCLAW_CONFIG_PATH=/tmp/openclaw-state/openclaw.json" \
-e "GW_URL=ws://127.0.0.1:$PORT" \
@@ -50,11 +51,22 @@ docker run --rm \
node --import tsx scripts/e2e/mcp-channels-seed.ts >/tmp/mcp-channels-seed.log
node \"\$entry\" gateway --port $PORT --bind loopback --allow-unconfigured >/tmp/mcp-channels-gateway.log 2>&1 &
gateway_pid=\$!
stop_process() {
pid=\"\$1\"
kill \"\$pid\" >/dev/null 2>&1 || true
for _ in \$(seq 1 40); do
if ! kill -0 \"\$pid\" >/dev/null 2>&1; then
wait \"\$pid\" >/dev/null 2>&1 || true
return
fi
sleep 0.25
done
kill -9 \"\$pid\" >/dev/null 2>&1 || true
wait \"\$pid\" >/dev/null 2>&1 || true
}
cleanup_inner() {
kill \"\$gateway_pid\" >/dev/null 2>&1 || true
wait \"\$gateway_pid\" >/dev/null 2>&1 || true
kill \"\$mock_pid\" >/dev/null 2>&1 || true
wait \"\$mock_pid\" >/dev/null 2>&1 || true
stop_process \"\$gateway_pid\"
stop_process \"\$mock_pid\"
}
dump_gateway_log_on_error() {
status=\$?
@@ -79,19 +91,6 @@ docker run --rm \
tail -n 120 /tmp/mcp-channels-gateway.log 2>/dev/null || true
exit 1
fi
acpx_ready=0
for _ in \$(seq 1 2400); do
if grep -q '\[plugins\] embedded acpx runtime backend ready' /tmp/mcp-channels-gateway.log 2>/dev/null; then
acpx_ready=1
break
fi
sleep 0.25
done
if [ \"\$acpx_ready\" -ne 1 ]; then
echo \"Embedded ACPX runtime did not become ready\"
tail -n 120 /tmp/mcp-channels-gateway.log 2>/dev/null || true
exit 1
fi
node --import tsx scripts/e2e/mcp-channels-docker-client.ts
" >"$CLIENT_LOG" 2>&1
status=${PIPESTATUS[0]}

View File

@@ -388,7 +388,10 @@ export async function maybeApprovePendingBridgePairing(
}>("device.pair.list", {});
} catch (error) {
const message = formatErrorMessage(error);
if (message.includes("missing scope: operator.pairing")) {
if (
message.includes("missing scope: operator.pairing") ||
message.includes("device.pair.list")
) {
return false;
}
throw error;

View File

@@ -23,6 +23,16 @@ async function main() {
enabled: false,
},
},
agents: {
defaults: {
heartbeat: {
every: "0m",
},
},
},
plugins: {
enabled: false,
},
} satisfies OpenClawConfig,
"sk-docker-smoke-test",
);

View File

@@ -13,6 +13,7 @@ API_KEY_ENV=""
AUTH_CHOICE=""
AUTH_KEY_FLAG=""
MODEL_ID=""
MODEL_ID_EXPLICIT=0
INSTALL_URL="https://openclaw.ai/install.sh"
HOST_PORT="18427"
HOST_PORT_EXPLICIT=0
@@ -103,6 +104,8 @@ Options:
--mode <fresh|upgrade|both>
--provider <openai|anthropic|minimax>
Provider auth/model lane. Default: openai
--model <provider/model> Override the model used for the agent-turn smoke.
Default: openai/gpt-5.5 for the OpenAI lane
--api-key-env <var> Host env var name for provider API key.
Default: OPENAI_API_KEY for openai, ANTHROPIC_API_KEY for anthropic
--openai-api-key-env <var> Alias for --api-key-env (backward compatible)
@@ -142,6 +145,11 @@ while [[ $# -gt 0 ]]; do
PROVIDER="$2"
shift 2
;;
--model)
MODEL_ID="$2"
MODEL_ID_EXPLICIT=1
shift 2
;;
--api-key-env|--openai-api-key-env)
API_KEY_ENV="$2"
shift 2
@@ -200,19 +208,19 @@ case "$PROVIDER" in
openai)
AUTH_CHOICE="openai-api-key"
AUTH_KEY_FLAG="openai-api-key"
MODEL_ID="openai/gpt-5.5"
[[ "$MODEL_ID_EXPLICIT" -eq 1 ]] || MODEL_ID="${OPENCLAW_PARALLELS_OPENAI_MODEL:-openai/gpt-5.5}"
[[ -n "$API_KEY_ENV" ]] || API_KEY_ENV="OPENAI_API_KEY"
;;
anthropic)
AUTH_CHOICE="apiKey"
AUTH_KEY_FLAG="anthropic-api-key"
MODEL_ID="anthropic/claude-sonnet-4-6"
[[ "$MODEL_ID_EXPLICIT" -eq 1 ]] || MODEL_ID="${OPENCLAW_PARALLELS_ANTHROPIC_MODEL:-anthropic/claude-sonnet-4-6}"
[[ -n "$API_KEY_ENV" ]] || API_KEY_ENV="ANTHROPIC_API_KEY"
;;
minimax)
AUTH_CHOICE="minimax-global-api"
AUTH_KEY_FLAG="minimax-api-key"
MODEL_ID="minimax/MiniMax-M2.7"
[[ "$MODEL_ID_EXPLICIT" -eq 1 ]] || MODEL_ID="${OPENCLAW_PARALLELS_MINIMAX_MODEL:-minimax/MiniMax-M2.7}"
[[ -n "$API_KEY_ENV" ]] || API_KEY_ENV="MINIMAX_API_KEY"
;;
*)
@@ -764,13 +772,38 @@ verify_gateway_status() {
return 1
}
prepare_agent_workspace() {
guest_exec /bin/sh -lc 'set -eu
workspace="${OPENCLAW_WORKSPACE_DIR:-$HOME/.openclaw/workspace}"
mkdir -p "$workspace/.openclaw"
cat > "$workspace/IDENTITY.md" <<'"'"'IDENTITY_EOF'"'"'
# Identity
- Name: OpenClaw
- Purpose: Parallels Linux smoke test assistant.
IDENTITY_EOF
cat > "$workspace/.openclaw/workspace-state.json" <<'"'"'STATE_EOF'"'"'
{
"version": 1,
"setupCompletedAt": "2026-01-01T00:00:00.000Z"
}
STATE_EOF
rm -f "$workspace/BOOTSTRAP.md"'
}
verify_local_turn() {
guest_exec openclaw models set "$MODEL_ID"
guest_exec /usr/bin/env "$API_KEY_ENV=$API_KEY_VALUE" openclaw agent \
--local \
--agent main \
--message ping \
--json
guest_exec openclaw config set agents.defaults.skipBootstrap true --strict-json
prepare_agent_workspace
guest_exec /bin/sh -lc "$(cat <<EOF
exec /usr/bin/env $(shell_quote "$API_KEY_ENV=$API_KEY_VALUE") openclaw agent \
--local \
--agent main \
--session-id parallels-linux-smoke \
--message $(shell_quote "Reply with exact ASCII text OK only.") \
--json
EOF
)"
}
phase_log_path() {

View File

@@ -13,6 +13,7 @@ API_KEY_ENV=""
AUTH_CHOICE=""
AUTH_KEY_FLAG=""
MODEL_ID=""
MODEL_ID_EXPLICIT=0
INSTALL_URL="https://openclaw.ai/install.sh"
HOST_PORT="18425"
HOST_PORT_EXPLICIT=0
@@ -52,7 +53,7 @@ TIMEOUT_UPDATE_DEV_S="${OPENCLAW_PARALLELS_MACOS_UPDATE_DEV_TIMEOUT_S:-1200}"
TIMEOUT_VERIFY_S=60
TIMEOUT_ONBOARD_S=180
TIMEOUT_GATEWAY_S=180
TIMEOUT_AGENT_S=240
TIMEOUT_AGENT_S="${OPENCLAW_PARALLELS_MACOS_AGENT_TIMEOUT_S:-240}"
TIMEOUT_PERMISSION_S=60
TIMEOUT_DASHBOARD_S=180
TIMEOUT_SNAPSHOT_S=360
@@ -142,6 +143,8 @@ Options:
both = run both lanes
--provider <openai|anthropic|minimax>
Provider auth/model lane. Default: openai
--model <provider/model> Override the model used for the agent-turn smoke.
Default: openai/gpt-5.5 for the OpenAI lane
--api-key-env <var> Host env var name for provider API key.
Default: OPENAI_API_KEY for openai, ANTHROPIC_API_KEY for anthropic
--openai-api-key-env <var> Alias for --api-key-env (backward compatible)
@@ -184,6 +187,11 @@ while [[ $# -gt 0 ]]; do
PROVIDER="$2"
shift 2
;;
--model)
MODEL_ID="$2"
MODEL_ID_EXPLICIT=1
shift 2
;;
--api-key-env|--openai-api-key-env)
API_KEY_ENV="$2"
shift 2
@@ -258,19 +266,19 @@ case "$PROVIDER" in
openai)
AUTH_CHOICE="openai-api-key"
AUTH_KEY_FLAG="openai-api-key"
MODEL_ID="openai/gpt-5.5"
[[ "$MODEL_ID_EXPLICIT" -eq 1 ]] || MODEL_ID="${OPENCLAW_PARALLELS_OPENAI_MODEL:-openai/gpt-5.5}"
[[ -n "$API_KEY_ENV" ]] || API_KEY_ENV="OPENAI_API_KEY"
;;
anthropic)
AUTH_CHOICE="apiKey"
AUTH_KEY_FLAG="anthropic-api-key"
MODEL_ID="anthropic/claude-sonnet-4-6"
[[ "$MODEL_ID_EXPLICIT" -eq 1 ]] || MODEL_ID="${OPENCLAW_PARALLELS_ANTHROPIC_MODEL:-anthropic/claude-sonnet-4-6}"
[[ -n "$API_KEY_ENV" ]] || API_KEY_ENV="ANTHROPIC_API_KEY"
;;
minimax)
AUTH_CHOICE="minimax-global-api"
AUTH_KEY_FLAG="minimax-api-key"
MODEL_ID="minimax/MiniMax-M2.7"
[[ "$MODEL_ID_EXPLICIT" -eq 1 ]] || MODEL_ID="${OPENCLAW_PARALLELS_MINIMAX_MODEL:-minimax/MiniMax-M2.7}"
[[ -n "$API_KEY_ENV" ]] || API_KEY_ENV="MINIMAX_API_KEY"
;;
*)
@@ -1474,11 +1482,28 @@ show_gateway_status_compat() {
verify_turn() {
guest_current_user_exec "$GUEST_NODE_BIN" "$GUEST_OPENCLAW_ENTRY" models set "$MODEL_ID"
guest_current_user_exec "$GUEST_NODE_BIN" "$GUEST_OPENCLAW_ENTRY" config set agents.defaults.skipBootstrap true --strict-json
guest_current_user_sh "$(cat <<EOF
export PATH=$(shell_quote "$GUEST_EXEC_PATH")
workspace="\${OPENCLAW_WORKSPACE_DIR:-\$HOME/.openclaw/workspace}"
mkdir -p "\$workspace/.openclaw"
cat > "\$workspace/IDENTITY.md" <<'IDENTITY_EOF'
# Identity
- Name: OpenClaw
- Purpose: Parallels macOS smoke test assistant.
IDENTITY_EOF
cat > "\$workspace/.openclaw/workspace-state.json" <<'STATE_EOF'
{
"version": 1,
"setupCompletedAt": "2026-01-01T00:00:00.000Z"
}
STATE_EOF
rm -f "\$workspace/BOOTSTRAP.md"
exec /usr/bin/env $(shell_quote "$API_KEY_ENV=$API_KEY_VALUE") \
$(shell_quote "$GUEST_NODE_BIN") $(shell_quote "$GUEST_OPENCLAW_ENTRY") agent \
--agent main \
--session-id parallels-macos-smoke \
--message $(shell_quote "Reply with exact ASCII text OK only.") \
--json
EOF
@@ -1526,7 +1551,7 @@ if [ -z "\$dashboard_port" ] || [ "\$dashboard_port" = "\$dashboard_http_url" ];
echo "failed to parse dashboard port from \$dashboard_http_url" >&2
exit 1
fi
deadline=\$((SECONDS + 30))
deadline=\$((SECONDS + 120))
dashboard_ready=0
while [ \$SECONDS -lt \$deadline ]; do
if curl -fsSL --connect-timeout 2 --max-time 5 "\$dashboard_http_url" >/tmp/openclaw-dashboard-smoke.html 2>/dev/null; then

View File

@@ -13,6 +13,7 @@ API_KEY_ENV=""
AUTH_CHOICE=""
AUTH_KEY_FLAG=""
MODEL_ID=""
MODEL_ID_EXPLICIT=0
PYTHON_BIN="${PYTHON_BIN:-}"
PACKAGE_SPEC=""
UPDATE_TARGET=""
@@ -120,6 +121,8 @@ Options:
Default: all
--provider <openai|anthropic|minimax>
Provider auth/model lane. Default: openai
--model <provider/model> Override the model used for agent-turn smoke checks.
Default: openai/gpt-5.5 for the OpenAI lane
--api-key-env <var> Host env var name for provider API key.
Default: OPENAI_API_KEY for openai, ANTHROPIC_API_KEY for anthropic
--openai-api-key-env <var> Alias for --api-key-env (backward compatible)
@@ -149,6 +152,11 @@ while [[ $# -gt 0 ]]; do
PROVIDER="$2"
shift 2
;;
--model)
MODEL_ID="$2"
MODEL_ID_EXPLICIT=1
shift 2
;;
--api-key-env|--openai-api-key-env)
API_KEY_ENV="$2"
shift 2
@@ -206,19 +214,19 @@ case "$PROVIDER" in
openai)
AUTH_CHOICE="openai-api-key"
AUTH_KEY_FLAG="openai-api-key"
MODEL_ID="openai/gpt-5.5"
[[ "$MODEL_ID_EXPLICIT" -eq 1 ]] || MODEL_ID="${OPENCLAW_PARALLELS_OPENAI_MODEL:-openai/gpt-5.5}"
[[ -n "$API_KEY_ENV" ]] || API_KEY_ENV="OPENAI_API_KEY"
;;
anthropic)
AUTH_CHOICE="apiKey"
AUTH_KEY_FLAG="anthropic-api-key"
MODEL_ID="anthropic/claude-sonnet-4-6"
[[ "$MODEL_ID_EXPLICIT" -eq 1 ]] || MODEL_ID="${OPENCLAW_PARALLELS_ANTHROPIC_MODEL:-anthropic/claude-sonnet-4-6}"
[[ -n "$API_KEY_ENV" ]] || API_KEY_ENV="ANTHROPIC_API_KEY"
;;
minimax)
AUTH_CHOICE="minimax-global-api"
AUTH_KEY_FLAG="minimax-api-key"
MODEL_ID="minimax/MiniMax-M2.7"
[[ "$MODEL_ID_EXPLICIT" -eq 1 ]] || MODEL_ID="${OPENCLAW_PARALLELS_MINIMAX_MODEL:-minimax/MiniMax-M2.7}"
[[ -n "$API_KEY_ENV" ]] || API_KEY_ENV="MINIMAX_API_KEY"
;;
*)
@@ -1104,7 +1112,8 @@ cat > "\$workspace/.openclaw/workspace-state.json" <<'STATE_EOF'
}
STATE_EOF
rm -f "\$workspace/BOOTSTRAP.md"
/opt/homebrew/bin/openclaw models set "$MODEL_ID"
/opt/homebrew/bin/openclaw models set "$MODEL_ID"
/opt/homebrew/bin/openclaw config set agents.defaults.skipBootstrap true --strict-json
/opt/homebrew/bin/openclaw agent --agent main --session-id "parallels-npm-update-macos-transport-recovery-$expected_needle" --message "Reply with exact ASCII text OK only." --json
EOF
macos_desktop_user_exec /bin/bash "$script_path"
@@ -1235,7 +1244,8 @@ if (-not \$gatewayReady) {
\$providerBytes = [Convert]::FromBase64String('$provider_key_b64')
\$providerValue = [Text.Encoding]::UTF8.GetString(\$providerBytes)
Set-Item -Path ('Env:' + '$API_KEY_ENV') -Value \$providerValue
& \$openclaw models set '$MODEL_ID'
& \$openclaw models set '$MODEL_ID'
& \$openclaw config set agents.defaults.skipBootstrap true --strict-json
\$workspace = \$env:OPENCLAW_WORKSPACE_DIR
if (-not \$workspace) {
\$workspace = Join-Path \$env:USERPROFILE '.openclaw\\workspace'
@@ -1692,7 +1702,8 @@ if [ -n "$expected_needle" ]; then
esac
fi
/opt/homebrew/bin/openclaw update status --json
/opt/homebrew/bin/openclaw models set "$MODEL_ID"
/opt/homebrew/bin/openclaw models set "$MODEL_ID"
/opt/homebrew/bin/openclaw config set agents.defaults.skipBootstrap true --strict-json
# Same-guest npm upgrades can leave launchd holding the old gateway process or
# module graph briefly; wait for a fresh RPC-ready restart before the agent turn.
# Fresh npm installs may not have a launchd service yet, so fall back to the
@@ -1826,6 +1837,7 @@ if [ -n "$expected_needle" ]; then
fi
openclaw update status --json
openclaw models set "$MODEL_ID"
openclaw config set agents.defaults.skipBootstrap true --strict-json
workspace="\${OPENCLAW_WORKSPACE_DIR:-\$HOME/.openclaw/workspace}"
mkdir -p "\$workspace/.openclaw"
cat > "\$workspace/IDENTITY.md" <<'IDENTITY_EOF'
@@ -1911,6 +1923,7 @@ if platform_enabled macos; then
bash "$ROOT_DIR/scripts/e2e/parallels-macos-smoke.sh" \
--mode fresh \
--provider "$PROVIDER" \
--model "$MODEL_ID" \
--api-key-env "$API_KEY_ENV" \
--target-package-spec "$PACKAGE_SPEC" \
--json >"$RUN_DIR/macos-fresh.log" 2>&1 &
@@ -1922,6 +1935,7 @@ if platform_enabled windows; then
bash "$ROOT_DIR/scripts/e2e/parallels-windows-smoke.sh" \
--mode fresh \
--provider "$PROVIDER" \
--model "$MODEL_ID" \
--api-key-env "$API_KEY_ENV" \
--target-package-spec "$PACKAGE_SPEC" \
--json >"$RUN_DIR/windows-fresh.log" 2>&1 &
@@ -1933,6 +1947,7 @@ if platform_enabled linux; then
bash "$ROOT_DIR/scripts/e2e/parallels-linux-smoke.sh" \
--mode fresh \
--provider "$PROVIDER" \
--model "$MODEL_ID" \
--api-key-env "$API_KEY_ENV" \
--target-package-spec "$PACKAGE_SPEC" \
--json >"$RUN_DIR/linux-fresh.log" 2>&1 &

View File

@@ -12,6 +12,7 @@ API_KEY_ENV=""
AUTH_CHOICE=""
AUTH_KEY_FLAG=""
MODEL_ID=""
MODEL_ID_EXPLICIT=0
INSTALL_URL="https://openclaw.ai/install.ps1"
HOST_PORT="18426"
HOST_PORT_EXPLICIT=0
@@ -138,6 +139,8 @@ Options:
--mode <fresh|upgrade|both>
--provider <openai|anthropic|minimax>
Provider auth/model lane. Default: openai
--model <provider/model> Override the model used for the agent-turn smoke.
Default: openai/gpt-5.5 for the OpenAI lane
--api-key-env <var> Host env var name for provider API key.
Default: OPENAI_API_KEY for openai, ANTHROPIC_API_KEY for anthropic
--openai-api-key-env <var> Alias for --api-key-env (backward compatible)
@@ -183,6 +186,11 @@ while [[ $# -gt 0 ]]; do
PROVIDER="$2"
shift 2
;;
--model)
MODEL_ID="$2"
MODEL_ID_EXPLICIT=1
shift 2
;;
--api-key-env|--openai-api-key-env)
API_KEY_ENV="$2"
shift 2
@@ -249,19 +257,19 @@ case "$PROVIDER" in
openai)
AUTH_CHOICE="openai-api-key"
AUTH_KEY_FLAG="openai-api-key"
MODEL_ID="openai/gpt-5.5"
[[ "$MODEL_ID_EXPLICIT" -eq 1 ]] || MODEL_ID="${OPENCLAW_PARALLELS_OPENAI_MODEL:-openai/gpt-5.5}"
[[ -n "$API_KEY_ENV" ]] || API_KEY_ENV="OPENAI_API_KEY"
;;
anthropic)
AUTH_CHOICE="apiKey"
AUTH_KEY_FLAG="anthropic-api-key"
MODEL_ID="anthropic/claude-sonnet-4-6"
[[ "$MODEL_ID_EXPLICIT" -eq 1 ]] || MODEL_ID="${OPENCLAW_PARALLELS_ANTHROPIC_MODEL:-anthropic/claude-sonnet-4-6}"
[[ -n "$API_KEY_ENV" ]] || API_KEY_ENV="ANTHROPIC_API_KEY"
;;
minimax)
AUTH_CHOICE="minimax-global-api"
AUTH_KEY_FLAG="minimax-api-key"
MODEL_ID="minimax/MiniMax-M2.7"
[[ "$MODEL_ID_EXPLICIT" -eq 1 ]] || MODEL_ID="${OPENCLAW_PARALLELS_MINIMAX_MODEL:-minimax/MiniMax-M2.7}"
[[ -n "$API_KEY_ENV" ]] || API_KEY_ENV="MINIMAX_API_KEY"
;;
*)
@@ -2367,8 +2375,31 @@ show_gateway_status_compat() {
verify_turn() {
guest_run_openclaw "" "" models set "$MODEL_ID"
guest_run_openclaw "" "" config set agents.defaults.skipBootstrap true --strict-json
guest_powershell "$(cat <<'EOF'
$workspace = $env:OPENCLAW_WORKSPACE_DIR
if (-not $workspace) {
$workspace = Join-Path $env:USERPROFILE '.openclaw\workspace'
}
$stateDir = Join-Path $workspace '.openclaw'
New-Item -ItemType Directory -Path $stateDir -Force | Out-Null
@'
# Identity
- Name: OpenClaw
- Purpose: Parallels Windows smoke test assistant.
'@ | Set-Content -Path (Join-Path $workspace 'IDENTITY.md') -Encoding UTF8
@'
{
"version": 1,
"setupCompletedAt": "2026-01-01T00:00:00.000Z"
}
'@ | Set-Content -Path (Join-Path $stateDir 'workspace-state.json') -Encoding UTF8
Remove-Item (Join-Path $workspace 'BOOTSTRAP.md') -Force -ErrorAction SilentlyContinue
EOF
)"
guest_run_openclaw "$API_KEY_ENV" "$API_KEY_VALUE" \
agent --agent main --message "Reply with exact ASCII text OK only." --json
agent --agent main --session-id parallels-windows-smoke --message "Reply with exact ASCII text OK only." --json
}
capture_latest_ref_failure() {

View File

@@ -8,12 +8,20 @@ IMAGE_NAME="$(docker_e2e_resolve_image "openclaw-plugins-e2e" OPENCLAW_PLUGINS_E
docker_e2e_build_or_reuse "$IMAGE_NAME" plugins
DOCKER_ENV_ARGS=(-e COREPACK_ENABLE_DOWNLOAD_PROMPT=0)
if [[ -n "${OPENAI_API_KEY:-}" && "${OPENAI_API_KEY:-}" != "undefined" && "${OPENAI_API_KEY:-}" != "null" ]]; then
DOCKER_ENV_ARGS+=(-e OPENAI_API_KEY)
fi
if [[ -n "${OPENAI_BASE_URL:-}" && "${OPENAI_BASE_URL:-}" != "undefined" && "${OPENAI_BASE_URL:-}" != "null" ]]; then
DOCKER_ENV_ARGS+=(-e OPENAI_BASE_URL)
fi
for env_name in \
OPENCLAW_PLUGINS_E2E_CLAWHUB \
OPENCLAW_PLUGINS_E2E_CLAWHUB_SPEC \
OPENCLAW_PLUGINS_E2E_CLAWHUB_ID \
OPENCLAW_CLAWHUB_URL \
CLAWHUB_URL \
OPENCLAW_CLAWHUB_TOKEN \
CLAWHUB_TOKEN \
CLAWHUB_AUTH_TOKEN; do
env_value="${!env_name:-}"
if [[ -n "$env_value" && "$env_value" != "undefined" && "$env_value" != "null" ]]; then
DOCKER_ENV_ARGS+=(-e "$env_name")
fi
done
echo "Running plugins Docker E2E..."
RUN_LOG="$(mktemp "${TMPDIR:-/tmp}/openclaw-plugins-run.XXXXXX")"
@@ -31,31 +39,11 @@ else
fi
export OPENCLAW_ENTRY
sanitize_env_string() {
local value="${1:-}"
if [[ "$value" == "undefined" || "$value" == "null" ]]; then
printf ''
return
fi
printf '%s' "$value"
}
export OPENAI_API_KEY="$(sanitize_env_string "${OPENAI_API_KEY:-}")"
export OPENAI_BASE_URL="$(sanitize_env_string "${OPENAI_BASE_URL:-}")"
if [[ -z "$OPENAI_API_KEY" ]]; then
unset OPENAI_API_KEY || true
fi
if [[ -z "$OPENAI_BASE_URL" ]]; then
unset OPENAI_BASE_URL || true
fi
home_dir=$(mktemp -d "/tmp/openclaw-plugins-e2e.XXXXXX")
export HOME="$home_dir"
BUNDLED_PLUGIN_ROOT_DIR="extensions"
OPENCLAW_PLUGIN_HOME="$HOME/.openclaw/$BUNDLED_PLUGIN_ROOT_DIR"
gateway_pid=""
record_fixture_plugin_trust() {
local plugin_id="$1"
local plugin_root="$2"
@@ -111,278 +99,6 @@ run_logged() {
fi
}
seed_openai_provider_config() {
local openai_api_key="$1"
local openai_base_url="${2:-}"
node - <<'NODE' "$openai_api_key" "$openai_base_url"
const fs = require("node:fs");
const path = require("node:path");
const openaiApiKey = process.argv[2];
const openaiBaseUrl = process.argv[3];
const configPath = path.join(process.env.HOME, ".openclaw", "openclaw.json");
const config = fs.existsSync(configPath)
? JSON.parse(fs.readFileSync(configPath, "utf8"))
: {};
const existingOpenAI = config.models?.providers?.openai ?? {};
config.models = {
...(config.models || {}),
providers: {
...(config.models?.providers || {}),
openai: {
...existingOpenAI,
baseUrl:
typeof existingOpenAI.baseUrl === "string" && existingOpenAI.baseUrl.trim()
? existingOpenAI.baseUrl
: openaiBaseUrl || "https://api.openai.com/v1",
apiKey: openaiApiKey,
models: Array.isArray(existingOpenAI.models) ? existingOpenAI.models : [],
},
},
};
fs.mkdirSync(path.dirname(configPath), { recursive: true });
fs.writeFileSync(configPath, `${JSON.stringify(config, null, 2)}\n`, "utf8");
NODE
}
stop_gateway() {
if [ -n "${gateway_pid:-}" ] && kill -0 "$gateway_pid" 2>/dev/null; then
kill "$gateway_pid" 2>/dev/null || true
wait "$gateway_pid" 2>/dev/null || true
fi
gateway_pid=""
}
start_gateway() {
local log_file="$1"
: > "$log_file"
node "$OPENCLAW_ENTRY" gateway --port 18789 --bind loopback --allow-unconfigured \
>"$log_file" 2>&1 &
gateway_pid=$!
for _ in $(seq 1 120); do
# Gateway startup logs changed; accept both the legacy listener line and the
# current structured ready line so this smoke stays stable across formats.
if grep -Eq "listening on ws://|\\[gateway\\] ready \\(" "$log_file"; then
return 0
fi
if ! kill -0 "$gateway_pid" 2>/dev/null; then
echo "Gateway exited unexpectedly"
cat "$log_file"
exit 1
fi
sleep 0.25
done
echo "Timed out waiting for gateway to start"
cat "$log_file"
exit 1
}
wait_for_gateway_health() {
for _ in $(seq 1 120); do
if node "$OPENCLAW_ENTRY" gateway health \
--url ws://127.0.0.1:18789 \
--token plugin-e2e-token \
--json >/dev/null 2>&1; then
return 0
fi
sleep 0.25
done
echo "Timed out waiting for gateway health"
return 1
}
run_gateway_chat_json() {
local session_key="$1"
local message="$2"
local output_file="$3"
local timeout_ms="${4:-45000}"
node - <<'NODE' "$OPENCLAW_ENTRY" "$session_key" "$message" "$output_file" "$timeout_ms"
const { execFileSync } = require("node:child_process");
const fs = require("node:fs");
const { randomUUID } = require("node:crypto");
const [, , entry, sessionKey, message, outputFile, timeoutRaw] = process.argv;
const timeoutMs = Number(timeoutRaw) > 0 ? Number(timeoutRaw) : 45000;
// Plugin install/enable can intentionally restart the gateway mid-request.
// Keep the underlying gateway call budget aligned with the scenario timeout
// instead of clamping too aggressively, or normal restarts look like failures.
const gatewayCallTimeoutMs = Math.max(15000, Math.min(timeoutMs, 90000));
const retryableGatewayErrorPattern =
/gateway ws open timeout|gateway connect timeout|gateway closed|ECONNREFUSED|socket hang up|gateway timeout after/i;
const formatErrorMessage = (error) =>
error instanceof Error ? error.message || error.name || "Error" : String(error);
const gatewayArgs = [
entry,
"gateway",
"call",
"--url",
"ws://127.0.0.1:18789",
"--token",
"plugin-e2e-token",
"--timeout",
String(gatewayCallTimeoutMs),
"--json",
];
const callGatewayOnce = (method, params) => {
try {
return {
ok: true,
value: JSON.parse(
execFileSync("node", [...gatewayArgs, method, "--params", JSON.stringify(params)], {
encoding: "utf8",
stdio: ["ignore", "pipe", "pipe"],
}),
),
};
} catch (error) {
const stderr = typeof error?.stderr === "string" ? error.stderr : "";
const stdout = typeof error?.stdout === "string" ? error.stdout : "";
const message = [String(error), stderr.trim(), stdout.trim()].filter(Boolean).join("\n");
return { ok: false, error: new Error(message) };
}
};
const isRetryableGatewayError = (error) =>
retryableGatewayErrorPattern.test(formatErrorMessage(error));
const extractText = (messageLike) => {
if (!messageLike || typeof messageLike !== "object") {
return "";
}
if (typeof messageLike.text === "string" && messageLike.text.trim()) {
return messageLike.text.trim();
}
const content = Array.isArray(messageLike.content) ? messageLike.content : [];
return content
.map((part) =>
part &&
typeof part === "object" &&
part.type === "text" &&
typeof part.text === "string"
? part.text.trim()
: "",
)
.filter(Boolean)
.join("\n\n")
.trim();
};
const findLatestAssistantText = (history) => {
const messages = Array.isArray(history?.messages) ? history.messages : [];
for (let index = messages.length - 1; index >= 0; index -= 1) {
const candidate = messages[index];
if (!candidate || typeof candidate !== "object" || candidate.role !== "assistant") {
continue;
}
const text = extractText(candidate);
if (text) {
return { text, message: candidate };
}
}
return null;
};
const sleep = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
const callGateway = async (method, params, deadline = Date.now() + gatewayCallTimeoutMs) => {
let lastFailure = null;
while (Date.now() < deadline) {
const result = callGatewayOnce(method, params);
if (result.ok) {
return result;
}
lastFailure = result;
if (!isRetryableGatewayError(result.error)) {
return result;
}
await sleep(250);
}
return lastFailure ?? callGatewayOnce(method, params);
};
async function main() {
const runId = `plugin-e2e-${randomUUID()}`;
const sendParams = {
sessionKey,
message,
idempotencyKey: runId,
};
let lastGatewayError = null;
const sendResult = await callGateway(
"chat.send",
sendParams,
Date.now() + gatewayCallTimeoutMs,
);
if (!sendResult.ok) {
throw sendResult.error;
}
const deadline = Date.now() + timeoutMs;
while (Date.now() < deadline) {
const historyResult = await callGateway("chat.history", { sessionKey }, Date.now() + 5000);
if (!historyResult.ok) {
lastGatewayError = String(historyResult.error);
await sleep(150);
continue;
}
lastGatewayError = null;
const history = historyResult.value;
const latestAssistant = findLatestAssistantText(history);
if (latestAssistant) {
fs.writeFileSync(
outputFile,
`${JSON.stringify(
{
sessionKey,
runId,
text: latestAssistant.text,
message: latestAssistant.message,
history,
},
null,
2,
)}\n`,
"utf8",
);
return;
}
await sleep(100);
}
const finalHistory = await callGateway("chat.history", { sessionKey }, Date.now() + 3000);
fs.writeFileSync(
outputFile,
`${JSON.stringify(
{
sessionKey,
runId,
error: "timeout",
history: finalHistory.ok ? finalHistory.value : null,
historyError: finalHistory.ok ? null : String(finalHistory.error),
lastGatewayError,
},
null,
2,
)}\n`,
"utf8",
);
const retrySummary = lastGatewayError ? `; last gateway error: ${lastGatewayError}` : "";
throw new Error(`timed out waiting for assistant reply for ${sessionKey}${retrySummary}`);
}
main().catch((error) => {
console.error(formatErrorMessage(error));
process.exit(1);
});
NODE
}
trap 'stop_gateway' EXIT
write_fixture_plugin() {
local dir="$1"
local id="$2"
@@ -637,7 +353,7 @@ if (!Array.isArray(inspect.gatewayMethods) || !inspect.gatewayMethods.includes("
console.log("ok");
NODE
echo "Testing /plugin alias with Claude bundle restart semantics..."
echo "Testing Claude bundle enable and inspect flow..."
bundle_plugin_id="claude-bundle-e2e"
bundle_root="$OPENCLAW_PLUGIN_HOME/$bundle_plugin_id"
mkdir -p "$bundle_root/.claude-plugin" "$bundle_root/commands"
@@ -657,72 +373,35 @@ $ARGUMENTS
MD
record_fixture_plugin_trust "$bundle_plugin_id" "$bundle_root" 0
node "$OPENCLAW_ENTRY" plugins list --json > /tmp/plugins-bundle-disabled.json
node - <<'NODE'
const fs = require("node:fs");
const path = require("node:path");
const configPath = path.join(process.env.HOME, ".openclaw", "openclaw.json");
const config = fs.existsSync(configPath)
? JSON.parse(fs.readFileSync(configPath, "utf8"))
: {};
config.gateway = {
...(config.gateway || {}),
port: 18789,
auth: { mode: "token", token: "plugin-e2e-token" },
controlUi: { enabled: false },
};
if (process.env.OPENAI_API_KEY) {
config.agents = {
...(config.agents || {}),
defaults: {
...(config.agents?.defaults || {}),
// Use the same stable OpenAI family as the installer E2E to avoid
// long or reasoning-heavy live turns in this bundle-command smoke.
model: { primary: "openai/gpt-4.1-mini" },
},
};
const data = JSON.parse(fs.readFileSync("/tmp/plugins-bundle-disabled.json", "utf8"));
const plugin = (data.plugins || []).find((entry) => entry.id === "claude-bundle-e2e");
if (!plugin) throw new Error("Claude bundle plugin not found");
if (plugin.status !== "disabled") {
throw new Error(`expected disabled bundle before enable, got ${plugin.status}`);
}
config.commands = {
...(config.commands || {}),
text: true,
plugins: true,
};
fs.mkdirSync(path.dirname(configPath), { recursive: true });
fs.writeFileSync(configPath, `${JSON.stringify(config, null, 2)}\n`, "utf8");
console.log("ok");
NODE
if [ -n "${OPENAI_API_KEY:-}" ]; then
seed_openai_provider_config "$OPENAI_API_KEY" "${OPENAI_BASE_URL:-}"
fi
workspace_dir="$HOME/.openclaw/workspace"
mkdir -p "$workspace_dir/.openclaw"
cat > "$workspace_dir/IDENTITY.md" <<'MD'
# Identity
- Name: Plugin E2E
- Nature: Test assistant
- Vibe: Concise
- Emoji: claw
MD
cat > "$workspace_dir/USER.md" <<'MD'
# User
- Name: OpenClaw test harness
- Timezone: UTC
MD
cat > "$workspace_dir/.openclaw/workspace-state.json" <<'JSON'
{
"version": 1,
"setupCompletedAt": "2026-01-01T00:00:00.000Z"
run_logged enable-claude-bundle node "$OPENCLAW_ENTRY" plugins enable claude-bundle-e2e
node "$OPENCLAW_ENTRY" plugins inspect claude-bundle-e2e --json > /tmp/plugins-bundle-inspect.json
node - <<'NODE'
const fs = require("node:fs");
const inspect = JSON.parse(fs.readFileSync("/tmp/plugins-bundle-inspect.json", "utf8"));
if (inspect.plugin?.bundleFormat !== "claude") {
throw new Error(`expected Claude bundle format, got ${inspect.plugin?.bundleFormat}`);
}
JSON
if (inspect.plugin?.enabled !== true || inspect.plugin?.status !== "loaded") {
throw new Error(
`expected enabled loaded Claude bundle, got enabled=${inspect.plugin?.enabled} status=${inspect.plugin?.status}`,
);
}
console.log("ok");
NODE
gateway_log="/tmp/openclaw-plugin-command-e2e.log"
start_gateway "$gateway_log"
wait_for_gateway_health
echo "Testing /plugin install with auto-restart..."
echo "Testing plugin install visible after explicit restart..."
slash_install_dir="$(mktemp -d "/tmp/openclaw-plugin-slash-install.XXXXXX")"
cat > "$slash_install_dir/package.json" <<'JSON'
{
@@ -750,118 +429,23 @@ cat > "$slash_install_dir/openclaw.plugin.json" <<'JSON'
}
JSON
if ! run_gateway_chat_json \
"plugin-e2e-install" \
"/plugin install $slash_install_dir" \
/tmp/plugin-command-install.json \
240000; then
cat "$gateway_log" 2>/dev/null || true
exit 1
fi
run_logged install-slash-plugin node "$OPENCLAW_ENTRY" plugins install "$slash_install_dir"
node "$OPENCLAW_ENTRY" plugins inspect slash-install-plugin --json > /tmp/plugin-command-install-show.json
node - <<'NODE'
const fs = require("node:fs");
const payload = JSON.parse(fs.readFileSync("/tmp/plugin-command-install.json", "utf8"));
const text = payload.text || "";
if (!text.includes('Installed plugin "slash-install-plugin"')) {
throw new Error(`expected install confirmation, got:\n${text}`);
const inspect = JSON.parse(fs.readFileSync("/tmp/plugin-command-install-show.json", "utf8"));
if (inspect.plugin?.status !== "loaded") {
throw new Error(`expected loaded status after install, got ${inspect.plugin?.status}`);
}
if (!text.includes("Restart the gateway to load plugins.")) {
throw new Error(`expected restart hint, got:\n${text}`);
if (inspect.plugin?.enabled !== true) {
throw new Error(`expected enabled status after install, got ${inspect.plugin?.enabled}`);
}
if (!inspect.gatewayMethods.includes("demo.slash.install")) {
throw new Error(`expected installed gateway method, got ${inspect.gatewayMethods.join(", ")}`);
}
console.log("ok");
NODE
wait_for_gateway_health
run_gateway_chat_json "plugin-e2e-install-show" "/plugin show slash-install-plugin" /tmp/plugin-command-install-show.json
node - <<'NODE'
const fs = require("node:fs");
const payload = JSON.parse(fs.readFileSync("/tmp/plugin-command-install-show.json", "utf8"));
const text = payload.text || "";
if (!text.includes('"status": "loaded"')) {
throw new Error(`expected loaded status after slash install, got:\n${text}`);
}
if (!text.includes('"enabled": true')) {
throw new Error(`expected enabled status after slash install, got:\n${text}`);
}
if (!text.includes('"demo.slash.install"')) {
throw new Error(`expected installed gateway method, got:\n${text}`);
}
console.log("ok");
NODE
run_gateway_chat_json "plugin-e2e-list" "/plugin list" /tmp/plugin-command-list.json
node - <<'NODE'
const fs = require("node:fs");
const payload = JSON.parse(fs.readFileSync("/tmp/plugin-command-list.json", "utf8"));
const text = payload.text || "";
if (!text.includes("claude-bundle-e2e")) {
throw new Error(`expected plugin in /plugin list output, got:\n${text}`);
}
if (!text.includes("[disabled]")) {
throw new Error(`expected disabled status before enable, got:\n${text}`);
}
console.log("ok");
NODE
run_gateway_chat_json \
"plugin-e2e-enable" \
"/plugin enable claude-bundle-e2e" \
/tmp/plugin-command-enable.json \
60000
node - <<'NODE'
const fs = require("node:fs");
const payload = JSON.parse(fs.readFileSync("/tmp/plugin-command-enable.json", "utf8"));
const text = payload.text || "";
if (!text.includes('Plugin "claude-bundle-e2e" enabled')) {
throw new Error(`expected enable confirmation, got:\n${text}`);
}
if (!text.includes("Restart the gateway to apply.")) {
throw new Error(`expected restart hint, got:\n${text}`);
}
console.log("ok");
NODE
wait_for_gateway_health
run_gateway_chat_json "plugin-e2e-show" "/plugin show claude-bundle-e2e" /tmp/plugin-command-show.json
node - <<'NODE'
const fs = require("node:fs");
const payload = JSON.parse(fs.readFileSync("/tmp/plugin-command-show.json", "utf8"));
const text = payload.text || "";
if (!text.includes('"bundleFormat": "claude"')) {
throw new Error(`expected Claude bundle inspect payload, got:\n${text}`);
}
if (!text.includes('"enabled": true')) {
throw new Error(`expected enabled inspect payload, got:\n${text}`);
}
console.log("ok");
NODE
if [ -n "${OPENAI_API_KEY:-}" ]; then
echo "Testing Claude bundle command invocation..."
if ! run_gateway_chat_json \
"plugin-e2e-live" \
"/office_hours Reply with exactly BUNDLE_OK and nothing else." \
/tmp/plugin-command-live.json \
120000; then
echo "Claude bundle command invocation failed; payload dump:"
cat /tmp/plugin-command-live.json 2>/dev/null || true
echo "Gateway log tail:"
tail -n 200 "$gateway_log" || true
exit 1
fi
node - <<'NODE'
const fs = require("node:fs");
const payload = JSON.parse(fs.readFileSync("/tmp/plugin-command-live.json", "utf8"));
const text = payload.text || "";
if (!text.includes("BUNDLE_OK")) {
throw new Error(`expected Claude bundle command reply, got:\n${text}`);
}
console.log("ok");
NODE
else
echo "Skipping live Claude bundle command invocation (OPENAI_API_KEY not set)."
fi
echo "Testing marketplace install and update flows..."
marketplace_root="$HOME/.claude/plugins/marketplaces/fixture-marketplace"
mkdir -p "$HOME/.claude/plugins" "$marketplace_root/.claude-plugin"
@@ -1019,6 +603,152 @@ if (!inspect.gatewayMethods.includes("demo.marketplace.shortcut.v2")) {
console.log("ok");
NODE
if [ "${OPENCLAW_PLUGINS_E2E_CLAWHUB:-1}" = "0" ]; then
echo "Skipping ClawHub plugin install and uninstall (OPENCLAW_PLUGINS_E2E_CLAWHUB=0)."
else
echo "Testing ClawHub plugin install and uninstall..."
CLAWHUB_PLUGIN_SPEC="${OPENCLAW_PLUGINS_E2E_CLAWHUB_SPEC:-clawhub:openclaw-now4real}"
CLAWHUB_PLUGIN_ID="${OPENCLAW_PLUGINS_E2E_CLAWHUB_ID:-now4real}"
export CLAWHUB_PLUGIN_SPEC CLAWHUB_PLUGIN_ID
node - <<'NODE'
const spec = process.env.CLAWHUB_PLUGIN_SPEC;
if (!spec?.startsWith("clawhub:")) {
throw new Error(`expected clawhub: spec, got ${spec}`);
}
const parsePackageName = (rawSpec) => {
const value = rawSpec.slice("clawhub:".length).trim();
const slashIndex = value.lastIndexOf("/");
const atIndex = value.lastIndexOf("@");
return atIndex > 0 && atIndex > slashIndex ? value.slice(0, atIndex) : value;
};
const packageName = parsePackageName(spec);
const baseUrl = (process.env.OPENCLAW_CLAWHUB_URL || process.env.CLAWHUB_URL || "https://clawhub.ai")
.replace(/\/+$/, "");
const token =
process.env.OPENCLAW_CLAWHUB_TOKEN ||
process.env.CLAWHUB_TOKEN ||
process.env.CLAWHUB_AUTH_TOKEN ||
"";
const response = await fetch(`${baseUrl}/api/v1/packages/${encodeURIComponent(packageName)}`, {
headers: token ? { Authorization: `Bearer ${token}` } : undefined,
});
if (!response.ok) {
const body = await response.text().catch(() => "");
throw new Error(`ClawHub package preflight failed for ${packageName}: ${response.status} ${body}`);
}
const detail = await response.json();
const family = detail.package?.family;
if (family !== "code-plugin" && family !== "bundle-plugin") {
throw new Error(`ClawHub package ${packageName} is not installable as a plugin: ${family}`);
}
if (detail.package?.runtimeId && detail.package.runtimeId !== process.env.CLAWHUB_PLUGIN_ID) {
throw new Error(
`ClawHub package ${packageName} runtimeId ${detail.package.runtimeId} does not match expected ${process.env.CLAWHUB_PLUGIN_ID}`,
);
}
console.log(`Using ClawHub package ${packageName} (${family}).`);
NODE
run_logged install-clawhub node "$OPENCLAW_ENTRY" plugins install "$CLAWHUB_PLUGIN_SPEC"
node "$OPENCLAW_ENTRY" plugins list --json > /tmp/plugins-clawhub-installed.json
node "$OPENCLAW_ENTRY" plugins inspect "$CLAWHUB_PLUGIN_ID" --json > /tmp/plugins-clawhub-inspect.json
node - <<'NODE'
const fs = require("node:fs");
const path = require("node:path");
const pluginId = process.env.CLAWHUB_PLUGIN_ID;
const spec = process.env.CLAWHUB_PLUGIN_SPEC;
const parsePackageName = (rawSpec) => {
const value = rawSpec.slice("clawhub:".length).trim();
const slashIndex = value.lastIndexOf("/");
const atIndex = value.lastIndexOf("@");
return atIndex > 0 && atIndex > slashIndex ? value.slice(0, atIndex) : value;
};
const packageName = parsePackageName(spec);
const list = JSON.parse(fs.readFileSync("/tmp/plugins-clawhub-installed.json", "utf8"));
const inspect = JSON.parse(fs.readFileSync("/tmp/plugins-clawhub-inspect.json", "utf8"));
const plugin = (list.plugins || []).find((entry) => entry.id === pluginId);
if (!plugin) throw new Error(`ClawHub plugin not found after install: ${pluginId}`);
if (plugin.status !== "loaded") {
throw new Error(`unexpected ClawHub plugin status for ${pluginId}: ${plugin.status}`);
}
if (inspect.plugin?.id !== pluginId) {
throw new Error(`unexpected ClawHub inspect plugin id: ${inspect.plugin?.id}`);
}
const indexPath = path.join(process.env.HOME, ".openclaw", "plugins", "installs.json");
const index = JSON.parse(fs.readFileSync(indexPath, "utf8"));
const record = index.installRecords?.[pluginId];
if (!record) throw new Error(`missing ClawHub install record for ${pluginId}`);
if (record.source !== "clawhub") {
throw new Error(`unexpected ClawHub install source for ${pluginId}: ${record.source}`);
}
if (record.clawhubPackage !== packageName) {
throw new Error(
`unexpected ClawHub package for ${pluginId}: ${record.clawhubPackage}, expected ${packageName}`,
);
}
if (record.clawhubFamily !== "code-plugin" && record.clawhubFamily !== "bundle-plugin") {
throw new Error(`unexpected ClawHub family for ${pluginId}: ${record.clawhubFamily}`);
}
if (typeof record.installPath !== "string" || record.installPath.length === 0) {
throw new Error(`missing ClawHub install path for ${pluginId}`);
}
const installPath = record.installPath.replace(/^~(?=$|\/)/, process.env.HOME);
const extensionsRoot = path.join(process.env.HOME, ".openclaw", "extensions");
if (!installPath.startsWith(`${extensionsRoot}${path.sep}`)) {
throw new Error(`ClawHub install path is outside managed extensions root: ${installPath}`);
}
if (!fs.existsSync(installPath)) {
throw new Error(`ClawHub install path missing on disk: ${installPath}`);
}
fs.writeFileSync("/tmp/plugins-clawhub-install-path.txt", installPath, "utf8");
console.log("ok");
NODE
run_logged uninstall-clawhub node "$OPENCLAW_ENTRY" plugins uninstall "$CLAWHUB_PLUGIN_SPEC" --force
node "$OPENCLAW_ENTRY" plugins list --json > /tmp/plugins-clawhub-uninstalled.json
node - <<'NODE'
const fs = require("node:fs");
const path = require("node:path");
const pluginId = process.env.CLAWHUB_PLUGIN_ID;
const installPath = fs.readFileSync("/tmp/plugins-clawhub-install-path.txt", "utf8").trim();
const list = JSON.parse(fs.readFileSync("/tmp/plugins-clawhub-uninstalled.json", "utf8"));
if ((list.plugins || []).some((entry) => entry.id === pluginId)) {
throw new Error(`ClawHub plugin still listed after uninstall: ${pluginId}`);
}
const indexPath = path.join(process.env.HOME, ".openclaw", "plugins", "installs.json");
const index = fs.existsSync(indexPath) ? JSON.parse(fs.readFileSync(indexPath, "utf8")) : {};
if (index.installRecords?.[pluginId]) {
throw new Error(`ClawHub install record still present after uninstall: ${pluginId}`);
}
const configPath = path.join(process.env.HOME, ".openclaw", "openclaw.json");
const config = fs.existsSync(configPath) ? JSON.parse(fs.readFileSync(configPath, "utf8")) : {};
if (config.plugins?.entries?.[pluginId]) {
throw new Error(`ClawHub config entry still present after uninstall: ${pluginId}`);
}
if ((config.plugins?.allow || []).includes(pluginId)) {
throw new Error(`ClawHub allowlist entry still present after uninstall: ${pluginId}`);
}
if ((config.plugins?.deny || []).includes(pluginId)) {
throw new Error(`ClawHub denylist entry still present after uninstall: ${pluginId}`);
}
if (fs.existsSync(installPath)) {
throw new Error(`ClawHub managed install directory still exists after uninstall: ${installPath}`);
}
console.log("ok");
NODE
fi
echo "Running bundle MCP CLI-agent e2e..."
node scripts/run-vitest.mjs run --config test/vitest/vitest.e2e.config.ts src/agents/cli-runner.bundle-mcp.e2e.test.ts
EOF

View File

@@ -0,0 +1,165 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
source "$ROOT_DIR/scripts/lib/docker-e2e-image.sh"
IMAGE_NAME="$(docker_e2e_resolve_image "openclaw-update-channel-switch-e2e" OPENCLAW_UPDATE_CHANNEL_SWITCH_E2E_IMAGE)"
SKIP_BUILD="${OPENCLAW_UPDATE_CHANNEL_SWITCH_E2E_SKIP_BUILD:-0}"
docker_e2e_build_or_reuse "$IMAGE_NAME" update-channel-switch "$ROOT_DIR/scripts/e2e/Dockerfile" "$ROOT_DIR" "" "$SKIP_BUILD"
echo "Running update channel switch E2E..."
docker run --rm \
-e COREPACK_ENABLE_DOWNLOAD_PROMPT=0 \
-e OPENCLAW_SKIP_CHANNELS=1 \
-e OPENCLAW_SKIP_PROVIDERS=1 \
"$IMAGE_NAME" \
bash -lc 'set -euo pipefail
export npm_config_loglevel=error
export npm_config_fund=false
export npm_config_audit=false
export npm_config_prefix=/tmp/npm-prefix
export NPM_CONFIG_PREFIX=/tmp/npm-prefix
export PNPM_HOME=/tmp/pnpm-home
export PATH="/tmp/npm-prefix/bin:/tmp/pnpm-home:$PATH"
export CI=true
export OPENCLAW_DISABLE_BUNDLED_PLUGINS=1
export OPENCLAW_NO_ONBOARD=1
export OPENCLAW_NO_PROMPT=1
cat > /app/.gitignore <<'"'"'GITIGNORE'"'"'
node_modules
**/node_modules/
dist
dist-runtime
.turbo
coverage
GITIGNORE
node --import tsx scripts/write-package-dist-inventory.ts
git config --global user.email "docker-e2e@openclaw.local"
git config --global user.name "OpenClaw Docker E2E"
git config --global gc.auto 0
git -C /app init -q
git -C /app config gc.auto 0
git -C /app add -A
git -C /app commit -qm "test fixture"
fixture_sha="$(git -C /app rev-parse HEAD)"
pkg_tgz="$(npm pack --ignore-scripts --silent --pack-destination /tmp /app | tail -n 1 | tr -d "\r")"
pkg_tgz_path="/tmp/$pkg_tgz"
if [ ! -f "$pkg_tgz_path" ]; then
echo "npm pack failed (expected $pkg_tgz_path)"
exit 1
fi
npm install -g --prefix /tmp/npm-prefix --omit=optional "$pkg_tgz_path"
home_dir="$(mktemp -d /tmp/openclaw-update-channel-switch-home.XXXXXX)"
export HOME="$home_dir"
mkdir -p "$HOME/.openclaw"
cat > "$HOME/.openclaw/openclaw.json" <<'"'"'JSON'"'"'
{
"update": {
"channel": "stable"
},
"plugins": {}
}
JSON
export OPENCLAW_GIT_DIR=/app
export OPENCLAW_UPDATE_DEV_TARGET_REF="$fixture_sha"
echo "==> package -> git dev channel"
set +e
dev_json="$(openclaw update --channel dev --yes --json --no-restart)"
dev_status=$?
set -e
printf "%s\n" "$dev_json"
if [ "$dev_status" -ne 0 ]; then
exit "$dev_status"
fi
DEV_JSON="$dev_json" node - <<'"'"'NODE'"'"'
const payload = JSON.parse(process.env.DEV_JSON);
if (payload.status !== "ok") {
throw new Error(`expected dev update status ok, got ${payload.status}`);
}
if (payload.mode !== "git") {
throw new Error(`expected dev update mode git, got ${payload.mode}`);
}
if (payload.postUpdate?.plugins?.status !== "ok") {
throw new Error(`expected plugin post-update ok, got ${JSON.stringify(payload.postUpdate?.plugins)}`);
}
NODE
node - <<'"'"'NODE'"'"'
const fs = require("node:fs");
const path = require("node:path");
const configPath = path.join(process.env.HOME, ".openclaw", "openclaw.json");
const config = JSON.parse(fs.readFileSync(configPath, "utf8"));
if (config.update?.channel !== "dev") {
throw new Error(`expected persisted update.channel dev, got ${JSON.stringify(config.update?.channel)}`);
}
NODE
status_json="$(openclaw update status --json)"
printf "%s\n" "$status_json"
STATUS_JSON="$status_json" node - <<'"'"'NODE'"'"'
const payload = JSON.parse(process.env.STATUS_JSON);
if (payload.update?.installKind !== "git") {
throw new Error(`expected git install after dev switch, got ${payload.update?.installKind}`);
}
if (payload.channel?.value !== "dev" || payload.channel?.source !== "config") {
throw new Error(`expected dev config channel after dev switch, got ${JSON.stringify(payload.channel)}`);
}
NODE
echo "==> git -> package stable channel"
set +e
stable_json="$(openclaw update --channel stable --tag "$pkg_tgz_path" --yes --json --no-restart)"
stable_status=$?
set -e
printf "%s\n" "$stable_json"
if [ "$stable_status" -ne 0 ]; then
exit "$stable_status"
fi
STABLE_JSON="$stable_json" node - <<'"'"'NODE'"'"'
const payload = JSON.parse(process.env.STABLE_JSON);
if (payload.status !== "ok") {
throw new Error(`expected stable update status ok, got ${payload.status}`);
}
if (!["npm", "pnpm", "bun"].includes(payload.mode)) {
throw new Error(`expected package-manager mode after stable switch, got ${payload.mode}`);
}
if (payload.postUpdate?.plugins?.status !== "ok") {
throw new Error(`expected plugin post-update ok, got ${JSON.stringify(payload.postUpdate?.plugins)}`);
}
NODE
node - <<'"'"'NODE'"'"'
const fs = require("node:fs");
const path = require("node:path");
const configPath = path.join(process.env.HOME, ".openclaw", "openclaw.json");
const config = JSON.parse(fs.readFileSync(configPath, "utf8"));
if (config.update?.channel !== "stable") {
throw new Error(`expected persisted update.channel stable, got ${JSON.stringify(config.update?.channel)}`);
}
NODE
status_json="$(openclaw update status --json)"
printf "%s\n" "$status_json"
STATUS_JSON="$status_json" node - <<'"'"'NODE'"'"'
const payload = JSON.parse(process.env.STATUS_JSON);
if (payload.update?.installKind !== "package") {
throw new Error(`expected package install after stable switch, got ${payload.update?.installKind}`);
}
if (payload.channel?.value !== "stable" || payload.channel?.source !== "config") {
throw new Error(`expected stable config channel after stable switch, got ${JSON.stringify(payload.channel)}`);
}
NODE
echo "OK"
'

View File

@@ -134,6 +134,7 @@
"device-bootstrap",
"diagnostic-runtime",
"diagnostics-otel",
"diagnostics-prometheus",
"diffs",
"error-runtime",
"extension-shared",

View File

@@ -246,6 +246,14 @@ const lanes = [
npmLane("doctor-switch", "OPENCLAW_SKIP_DOCKER_BUILD=1 pnpm test:docker:doctor-switch", {
weight: 3,
}),
npmLane(
"update-channel-switch",
"OPENCLAW_SKIP_DOCKER_BUILD=1 pnpm test:docker:update-channel-switch",
{
timeoutMs: 30 * 60 * 1000,
weight: 3,
},
),
lane("plugins", "OPENCLAW_SKIP_DOCKER_BUILD=1 pnpm test:docker:plugins", {
resources: ["npm", "service"],
weight: 6,

View File

@@ -275,7 +275,9 @@ async function main() {
const baseEnv = resolveLocalVitestEnv(process.env);
const { targetArgs } = parseTestProjectsArgs(args, process.cwd());
const changedTargetArgs =
targetArgs.length === 0 ? resolveChangedTargetArgs(args, process.cwd()) : null;
targetArgs.length === 0
? resolveChangedTargetArgs(args, process.cwd(), undefined, { env: baseEnv })
: null;
const rawRunSpecs =
targetArgs.length === 0 && changedTargetArgs === null
? buildFullSuiteVitestRunPlans(args, process.cwd()).map((plan) => ({

View File

@@ -14,6 +14,12 @@ export type VitestRunSpec = {
watchMode: boolean;
};
export type ChangedTestTargetOptions = {
cwd?: string;
env?: Record<string, string | undefined>;
focused?: boolean;
};
export const DEFAULT_TEST_PROJECTS_VITEST_NO_OUTPUT_TIMEOUT_MS: string;
export function parseTestProjectsArgs(
@@ -29,15 +35,20 @@ export function buildVitestRunPlans(
args: string[],
cwd?: string,
listChangedPaths?: (baseRef: string, cwd: string) => string[],
options?: ChangedTestTargetOptions,
): VitestRunPlan[];
export function resolveChangedTargetArgs(
args: string[],
cwd?: string,
listChangedPaths?: (baseRef: string, cwd: string) => string[],
options?: ChangedTestTargetOptions,
): string[] | null;
export function resolveChangedTestTargetPlan(changedPaths: string[]): {
export function resolveChangedTestTargetPlan(
changedPaths: string[],
options?: ChangedTestTargetOptions,
): {
mode: "none" | "broad" | "targets";
targets: string[];
};

View File

@@ -301,6 +301,11 @@ const GENERATED_CHANGED_TEST_TARGETS = new Set([
"src/canvas-host/a2ui/.bundle.hash",
"src/canvas-host/a2ui/a2ui.bundle.js",
]);
const SOURCE_ROOTS_FOR_IMPORT_GRAPH = ["src", "extensions", "packages", "ui/src", "test"];
const IMPORTABLE_FILE_EXTENSIONS = [".ts", ".tsx", ".mts", ".cts"];
const IMPORT_SPECIFIER_PATTERN =
/\b(?:import|export)\s+(?:type\s+)?(?:[^'"]*?\s+from\s+)?["']([^"']+)["']|\bimport\s*\(\s*["']([^"']+)["']\s*\)/gu;
const FOCUSED_CHANGED_ENV_KEY = "OPENCLAW_TEST_CHANGED_FOCUSED";
const VITEST_NO_OUTPUT_TIMEOUT_ENV_KEY = "OPENCLAW_VITEST_NO_OUTPUT_TIMEOUT_MS";
const VITEST_NO_OUTPUT_RETRY_ENV_KEY = "OPENCLAW_VITEST_NO_OUTPUT_RETRY";
export const DEFAULT_TEST_PROJECTS_VITEST_NO_OUTPUT_TIMEOUT_MS = "180000";
@@ -375,6 +380,10 @@ function isFileLikeTarget(arg) {
return /\.(?:test|spec)\.[cm]?[jt]sx?$/u.test(arg);
}
function isTestFileTarget(arg) {
return /\.(?:test|spec)\.[cm]?[jt]sx?$/u.test(arg);
}
function isLikelyFileTarget(arg) {
return /(?:^|\/)[^/]+\.[A-Za-z0-9]+$/u.test(arg);
}
@@ -406,6 +415,128 @@ function toScopedIncludePattern(arg, cwd) {
return `${relative.replace(/\/+$/u, "")}/**/*.test.ts`;
}
function isSkippedImportGraphDirectory(name) {
return (
name === ".git" ||
name === "dist" ||
name === "node_modules" ||
name === "vendor" ||
name.startsWith(".openclaw-runtime-deps")
);
}
function listImportGraphFiles(cwd, directory, files = []) {
let entries;
try {
entries = fs.readdirSync(path.join(cwd, directory), { withFileTypes: true });
} catch {
return files;
}
for (const entry of entries) {
const relative = normalizePathPattern(path.posix.join(directory, entry.name));
if (entry.isDirectory()) {
if (!isSkippedImportGraphDirectory(entry.name)) {
listImportGraphFiles(cwd, relative, files);
}
continue;
}
if (entry.isFile() && IMPORTABLE_FILE_EXTENSIONS.some((ext) => relative.endsWith(ext))) {
files.push(relative);
}
}
return files;
}
function resolveImportSpecifier(importer, specifier, fileSet) {
if (!specifier.startsWith(".")) {
return null;
}
const importerDir = path.posix.dirname(importer);
const base = normalizePathPattern(path.posix.normalize(path.posix.join(importerDir, specifier)));
const candidates = [];
const ext = path.posix.extname(base);
if (ext) {
candidates.push(base);
if ([".js", ".jsx", ".mjs", ".cjs"].includes(ext)) {
const withoutExt = base.slice(0, -ext.length);
candidates.push(
...IMPORTABLE_FILE_EXTENSIONS.map((candidateExt) => `${withoutExt}${candidateExt}`),
);
}
} else {
candidates.push(
...IMPORTABLE_FILE_EXTENSIONS.map((candidateExt) => `${base}${candidateExt}`),
...IMPORTABLE_FILE_EXTENSIONS.map((candidateExt) => `${base}/index${candidateExt}`),
);
}
return candidates.find((candidate) => fileSet.has(candidate)) ?? null;
}
let cachedImportGraph = null;
let cachedImportGraphCwd = null;
function getImportGraph(cwd) {
if (cachedImportGraph && cachedImportGraphCwd === cwd) {
return cachedImportGraph;
}
const files = SOURCE_ROOTS_FOR_IMPORT_GRAPH.flatMap((root) => listImportGraphFiles(cwd, root));
const fileSet = new Set(files);
const reverseImports = new Map();
const testFiles = new Set(
files.filter((file) => isTestFileTarget(file) && !file.endsWith(".live.test.ts")),
);
for (const file of files) {
let source = "";
try {
source = fs.readFileSync(path.join(cwd, file), "utf8");
} catch {
continue;
}
for (const match of source.matchAll(IMPORT_SPECIFIER_PATTERN)) {
const imported = resolveImportSpecifier(file, match[1] ?? match[2] ?? "", fileSet);
if (!imported) {
continue;
}
const importers = reverseImports.get(imported) ?? [];
importers.push(file);
reverseImports.set(imported, importers);
}
}
cachedImportGraph = { reverseImports, testFiles };
cachedImportGraphCwd = cwd;
return cachedImportGraph;
}
function resolveAffectedTestsFromImportGraph(changedPath, cwd) {
const normalized = normalizePathPattern(changedPath);
const { reverseImports, testFiles } = getImportGraph(cwd);
const queue = [normalized];
const seen = new Set(queue);
const targets = [];
for (let index = 0; index < queue.length; index += 1) {
const current = queue[index];
for (const importer of reverseImports.get(current) ?? []) {
if (seen.has(importer)) {
continue;
}
seen.add(importer);
if (testFiles.has(importer)) {
targets.push(importer);
}
queue.push(importer);
}
}
return [...new Set(targets)].toSorted((left, right) => left.localeCompare(right));
}
function resolveVitestConfigTargetKind(relative) {
return VITEST_CONFIG_TARGET_KIND_BY_PATH.get(relative) ?? null;
}
@@ -554,6 +685,11 @@ function resolveToolingTestTargets(changedPath) {
return TOOLING_SOURCE_TEST_TARGETS.get(changedPath) ?? TOOLING_TEST_TARGETS.get(changedPath);
}
function shouldUseFocusedChangedTargets(env = process.env) {
const value = env[FOCUSED_CHANGED_ENV_KEY]?.trim().toLowerCase();
return ["1", "true", "yes", "on"].includes(value ?? "");
}
function isRoutableChangedTarget(changedPath) {
if (GENERATED_CHANGED_TEST_TARGETS.has(changedPath)) {
return false;
@@ -564,7 +700,39 @@ function isRoutableChangedTarget(changedPath) {
return /^(?:src|test|extensions|ui|packages)(?:\/|$)/u.test(changedPath);
}
export function resolveChangedTestTargetPlan(changedPaths) {
function resolveSiblingTestTarget(changedPath, cwd) {
if (!/\.[cm]?tsx?$/u.test(changedPath) || isTestFileTarget(changedPath)) {
return null;
}
const withoutExtension = changedPath.replace(/\.[cm]?tsx?$/u, "");
const sibling = `${withoutExtension}.test.ts`;
return fs.existsSync(path.join(cwd, sibling)) ? sibling : null;
}
function resolvePreciseChangedTestTargets(changedPath, options) {
const cwd = options.cwd ?? process.cwd();
const mappedTargets =
resolveToolingTestTargets(changedPath) ?? SOURCE_TEST_TARGETS.get(changedPath);
if (mappedTargets) {
return mappedTargets;
}
if (isRoutableChangedTarget(changedPath) && isTestFileTarget(changedPath)) {
return [changedPath];
}
const siblingTest = resolveSiblingTestTarget(changedPath, cwd);
if (siblingTest) {
return [siblingTest];
}
if (/^(?:src|test\/helpers|extensions|packages|ui\/src)\//u.test(changedPath)) {
const affectedTests = resolveAffectedTestsFromImportGraph(changedPath, cwd);
if (affectedTests.length > 0) {
return affectedTests;
}
}
return null;
}
export function resolveChangedTestTargetPlan(changedPaths, options = {}) {
if (changedPaths.length === 0) {
return { mode: "none", targets: [] };
}
@@ -572,22 +740,29 @@ export function resolveChangedTestTargetPlan(changedPaths) {
if (toolingTargets) {
return { mode: "targets", targets: toolingTargets };
}
if (shouldKeepBroadChangedRun(changedPaths)) {
return { mode: "broad", targets: [] };
}
const changedLanes = detectChangedLanes(changedPaths);
if (changedLanes.lanes.all) {
const focused = options.focused ?? shouldUseFocusedChangedTargets(options.env ?? {});
const targets = [];
for (const changedPath of changedPaths) {
const preciseTargets = resolvePreciseChangedTestTargets(changedPath, options);
if (preciseTargets) {
targets.push(...preciseTargets);
continue;
}
if (focused) {
continue;
}
if (shouldKeepBroadChangedRun([changedPath]) || changedLanes.lanes.all) {
return { mode: "broad", targets: [] };
}
if (isRoutableChangedTarget(changedPath)) {
targets.push(changedPath);
}
}
if (!focused && changedLanes.lanes.all) {
return { mode: "broad", targets: [] };
}
const targets = changedPaths.flatMap((changedPath) => {
const mappedTargets =
resolveToolingTestTargets(changedPath) ?? SOURCE_TEST_TARGETS.get(changedPath);
if (mappedTargets) {
return mappedTargets;
}
return isRoutableChangedTarget(changedPath) ? [changedPath] : [];
});
if (changedLanes.extensionImpactFromCore) {
if (!focused && changedLanes.extensionImpactFromCore) {
targets.push("extensions");
}
return { mode: "targets", targets: [...new Set(targets)] };
@@ -604,13 +779,17 @@ export function resolveChangedTargetArgs(
args,
cwd = process.cwd(),
listChangedPaths = listChangedPathsFromGit,
options = {},
) {
const baseRef = extractChangedBaseRef(args);
if (!baseRef) {
return null;
}
const changedPaths = listChangedPaths(baseRef, cwd);
const plan = resolveChangedTestTargetPlan(changedPaths);
const plan = resolveChangedTestTargetPlan(changedPaths, {
cwd,
...options,
});
if (plan.mode === "broad") {
return null;
}
@@ -877,10 +1056,11 @@ export function buildVitestRunPlans(
args,
cwd = process.cwd(),
listChangedPaths = listChangedPathsFromGit,
options = {},
) {
const { forwardedArgs, targetArgs, watchMode } = parseTestProjectsArgs(args, cwd);
const changedTargetArgs =
targetArgs.length === 0 ? resolveChangedTargetArgs(args, cwd, listChangedPaths) : null;
targetArgs.length === 0 ? resolveChangedTargetArgs(args, cwd, listChangedPaths, options) : null;
const activeTargetArgs = changedTargetArgs ?? targetArgs;
const activeForwardedArgs =
changedTargetArgs !== null ? stripChangedArgs(forwardedArgs) : forwardedArgs;
@@ -1187,7 +1367,10 @@ export function shouldRetryVitestNoOutputTimeout(env = process.env) {
export function createVitestRunSpecs(args, params = {}) {
const cwd = params.cwd ?? process.cwd();
const baseEnv = params.baseEnv ?? process.env;
const plans = filterPlansForContractIncludeFile(buildVitestRunPlans(args, cwd), baseEnv);
const plans = filterPlansForContractIncludeFile(
buildVitestRunPlans(args, cwd, listChangedPathsFromGit, { env: baseEnv }),
baseEnv,
);
return plans.map((plan, index) => {
const includeFilePath = plan.includePatterns
? path.join(

Some files were not shown because too many files have changed in this diff Show More