ci: split docker e2e images

This commit is contained in:
Peter Steinberger
2026-04-26 22:54:56 +01:00
parent 3a8961af0f
commit baaad52389
7 changed files with 300 additions and 57 deletions

View File

@@ -82,7 +82,8 @@ OPENCLAW_DOCKER_ALL_LANES=<lane> \
OPENCLAW_DOCKER_ALL_BUILD=0 \
OPENCLAW_DOCKER_ALL_PREFLIGHT=0 \
OPENCLAW_SKIP_DOCKER_BUILD=1 \
OPENCLAW_DOCKER_E2E_IMAGE='<prepared-image>' \
OPENCLAW_DOCKER_E2E_BARE_IMAGE='<prepared-bare-image>' \
OPENCLAW_DOCKER_E2E_FUNCTIONAL_IMAGE='<prepared-functional-image>' \
pnpm test:docker:all
```
@@ -99,7 +100,7 @@ docker_lanes: install-e2e bundled-channel-update-acpx
```
That skips the three chunk matrix and runs one targeted Docker job against the
prepared GHCR image. Release-path normal mode remains max three Docker chunk
prepared GHCR images. Release-path normal mode remains max three Docker chunk
jobs:
- `core`
@@ -108,7 +109,9 @@ jobs:
Every scheduler run writes `.artifacts/docker-tests/**/summary.json`. Read it
before rerunning. Lane entries include `command`, `rerunCommand`, status,
timing, timeout state, and log file path.
timing, timeout state, image kind, and log file path. The summary also includes
top-level phase timings for preflight, image build, package prep, lane pools,
and cleanup.
## Docker Expected Timings

View File

@@ -436,6 +436,8 @@ jobs:
OPENCLAW_GEMINI_SETTINGS_JSON: ${{ secrets.OPENCLAW_GEMINI_SETTINGS_JSON }}
FIREWORKS_API_KEY: ${{ secrets.FIREWORKS_API_KEY }}
OPENCLAW_DOCKER_E2E_IMAGE: ${{ needs.prepare_docker_e2e_image.outputs.image }}
OPENCLAW_DOCKER_E2E_BARE_IMAGE: ${{ needs.prepare_docker_e2e_image.outputs.bare_image }}
OPENCLAW_DOCKER_E2E_FUNCTIONAL_IMAGE: ${{ needs.prepare_docker_e2e_image.outputs.functional_image }}
OPENCLAW_SKIP_DOCKER_BUILD: "1"
INCLUDE_OPENWEBUI: ${{ inputs.include_openwebui }}
DOCKER_E2E_CHUNK: ${{ matrix.chunk_id }}
@@ -467,7 +469,21 @@ jobs:
shell: bash
run: |
set -euo pipefail
docker pull "${OPENCLAW_DOCKER_E2E_IMAGE}"
case "${DOCKER_E2E_CHUNK}" in
core)
docker pull "${OPENCLAW_DOCKER_E2E_FUNCTIONAL_IMAGE}"
;;
package-update)
docker pull "${OPENCLAW_DOCKER_E2E_BARE_IMAGE}"
;;
plugins-integrations)
docker pull "${OPENCLAW_DOCKER_E2E_BARE_IMAGE}"
docker pull "${OPENCLAW_DOCKER_E2E_FUNCTIONAL_IMAGE}"
;;
*)
docker pull "${OPENCLAW_DOCKER_E2E_IMAGE}"
;;
esac
- name: Validate chunk credentials
shell: bash
@@ -534,6 +550,15 @@ jobs:
const rerun = String(lane.rerunCommand ?? "").replaceAll("`", "\\`");
console.log(`| \`${lane.name}\` | ${status} | ${lane.elapsedSeconds ?? ""} | ${lane.timedOut ? "yes" : "no"} | \`${rerun}\` |`);
}
const phases = Array.isArray(summary.phases) ? summary.phases : [];
if (phases.length > 0) {
console.log("");
console.log("| Phase | Seconds | Status | Image kind |");
console.log("| --- | ---: | --- | --- |");
for (const phase of phases) {
console.log(`| \`${phase.name}\` | ${phase.elapsedSeconds ?? ""} | ${phase.status ?? ""} | ${phase.imageKind ?? ""} |`);
}
}
NODE
- name: Upload Docker E2E chunk artifacts
@@ -596,6 +621,8 @@ jobs:
OPENCLAW_GEMINI_SETTINGS_JSON: ${{ secrets.OPENCLAW_GEMINI_SETTINGS_JSON }}
FIREWORKS_API_KEY: ${{ secrets.FIREWORKS_API_KEY }}
OPENCLAW_DOCKER_E2E_IMAGE: ${{ needs.prepare_docker_e2e_image.outputs.image }}
OPENCLAW_DOCKER_E2E_BARE_IMAGE: ${{ needs.prepare_docker_e2e_image.outputs.bare_image }}
OPENCLAW_DOCKER_E2E_FUNCTIONAL_IMAGE: ${{ needs.prepare_docker_e2e_image.outputs.functional_image }}
OPENCLAW_SKIP_DOCKER_BUILD: "1"
INCLUDE_OPENWEBUI: ${{ inputs.include_openwebui }}
DOCKER_E2E_LANES: ${{ inputs.docker_lanes }}
@@ -623,11 +650,12 @@ jobs:
- name: Hydrate live auth/profile inputs
run: bash scripts/ci-hydrate-live-auth.sh
- name: Pull shared Docker E2E image
- name: Pull shared Docker E2E images
shell: bash
run: |
set -euo pipefail
docker pull "${OPENCLAW_DOCKER_E2E_IMAGE}"
docker pull "${OPENCLAW_DOCKER_E2E_BARE_IMAGE}"
docker pull "${OPENCLAW_DOCKER_E2E_FUNCTIONAL_IMAGE}"
- name: Validate targeted lane credentials
shell: bash
@@ -695,6 +723,15 @@ jobs:
const rerun = String(lane.rerunCommand ?? "").replaceAll("`", "\\`");
console.log(`| \`${lane.name}\` | ${status} | ${lane.elapsedSeconds ?? ""} | ${lane.timedOut ? "yes" : "no"} | \`${rerun}\` |`);
}
const phases = Array.isArray(summary.phases) ? summary.phases : [];
if (phases.length > 0) {
console.log("");
console.log("| Phase | Seconds | Status | Image kind |");
console.log("| --- | ---: | --- | --- |");
for (const phase of phases) {
console.log(`| \`${phase.name}\` | ${phase.elapsedSeconds ?? ""} | ${phase.status ?? ""} | ${phase.imageKind ?? ""} |`);
}
}
NODE
- name: Upload targeted Docker E2E artifacts
@@ -714,6 +751,7 @@ jobs:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
OPENAI_BASE_URL: ${{ secrets.OPENAI_BASE_URL }}
OPENCLAW_DOCKER_E2E_IMAGE: ${{ needs.prepare_docker_e2e_image.outputs.image }}
OPENCLAW_DOCKER_E2E_FUNCTIONAL_IMAGE: ${{ needs.prepare_docker_e2e_image.outputs.functional_image }}
OPENCLAW_SKIP_DOCKER_BUILD: "1"
steps:
- name: Checkout selected ref
@@ -758,6 +796,8 @@ jobs:
packages: write
outputs:
image: ${{ steps.image.outputs.image }}
bare_image: ${{ steps.image.outputs.bare_image }}
functional_image: ${{ steps.image.outputs.functional_image }}
env:
DOCKER_BUILD_SUMMARY: "false"
DOCKER_BUILD_RECORD_UPLOAD: "false"
@@ -768,7 +808,7 @@ jobs:
ref: ${{ needs.validate_selected_ref.outputs.selected_sha }}
fetch-depth: 1
- name: Resolve shared Docker E2E image tag
- name: Resolve shared Docker E2E image tags
id: image
shell: bash
env:
@@ -776,9 +816,14 @@ jobs:
run: |
set -euo pipefail
repository="${GITHUB_REPOSITORY,,}"
image="ghcr.io/${repository}-docker-e2e:${SELECTED_SHA}"
bare_image="ghcr.io/${repository}-docker-e2e-bare:${SELECTED_SHA}"
functional_image="ghcr.io/${repository}-docker-e2e-functional:${SELECTED_SHA}"
image="$functional_image"
echo "image=$image" >> "$GITHUB_OUTPUT"
echo "Shared Docker E2E image: \`$image\`" >> "$GITHUB_STEP_SUMMARY"
echo "bare_image=$bare_image" >> "$GITHUB_OUTPUT"
echo "functional_image=$functional_image" >> "$GITHUB_OUTPUT"
echo "Shared Docker E2E bare image: \`$bare_image\`" >> "$GITHUB_STEP_SUMMARY"
echo "Shared Docker E2E functional image: \`$functional_image\`" >> "$GITHUB_STEP_SUMMARY"
- name: Log in to GHCR
uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4
@@ -790,16 +835,33 @@ jobs:
- name: Setup Docker builder
uses: useblacksmith/setup-docker-builder@ac083cc84672d01c60d5e8561d0a939b697de542 # v1
- name: Build and push shared Docker E2E image
- name: Build and push bare Docker E2E image
if: inputs.include_release_path_suites || inputs.docker_lanes != ''
uses: docker/build-push-action@bcafcacb16a39f128d818304e6c9c0c18556b85f # v7.1.0
with:
context: .
file: ./scripts/e2e/Dockerfile
target: build
platforms: linux/amd64
cache-from: type=gha,scope=docker-e2e
cache-to: type=gha,mode=max,scope=docker-e2e
tags: ${{ steps.image.outputs.image }}
cache-from: type=gha,scope=docker-e2e-bare
cache-to: type=gha,mode=max,scope=docker-e2e-bare
tags: ${{ steps.image.outputs.bare_image }}
sbom: true
provenance: mode=max
push: true
- name: Build and push functional Docker E2E image
uses: docker/build-push-action@bcafcacb16a39f128d818304e6c9c0c18556b85f # v7.1.0
with:
context: .
file: ./scripts/e2e/Dockerfile
target: functional
platforms: linux/amd64
cache-from: |
type=gha,scope=docker-e2e-bare
type=gha,scope=docker-e2e-functional
cache-to: type=gha,mode=max,scope=docker-e2e-functional
tags: ${{ steps.image.outputs.functional_image }}
sbom: true
provenance: mode=max
push: true

View File

@@ -92,7 +92,7 @@ Scope logic lives in `scripts/ci-changed-scope.mjs` and is covered by unit tests
CI workflow edits validate the Node CI graph plus workflow linting, but do not force Windows, Android, or macOS native builds by themselves; those platform lanes stay scoped to platform source changes.
CI routing-only edits, selected cheap core-test fixture edits, and narrow plugin contract helper/test-routing edits use a fast Node-only manifest path: preflight, security, and a single `checks-fast-core` task. That path avoids build artifacts, Node 22 compatibility, channel contracts, full core shards, bundled-plugin shards, and additional guard matrices when the changed files are limited to the routing or helper surfaces that the fast task exercises directly.
Windows Node checks are scoped to Windows-specific process/path wrappers, npm/pnpm/UI runner helpers, package manager config, and the CI workflow surfaces that execute that lane; unrelated source, plugin, install-smoke, and test-only changes stay on the Linux Node lanes so they do not reserve a 16-vCPU Windows worker for coverage that is already exercised by the normal test shards.
The separate `install-smoke` workflow reuses the same scope script through its own `preflight` job. It splits smoke coverage into `run_fast_install_smoke` and `run_full_install_smoke`. Pull requests run the fast path for Docker/package surfaces, bundled plugin package/manifest changes, and core plugin/channel/gateway/Plugin SDK surfaces that the Docker smoke jobs exercise. Source-only bundled plugin changes, test-only edits, and docs-only edits do not reserve Docker workers. The fast path builds the root Dockerfile image once, checks the CLI, runs the agents delete shared-workspace CLI smoke, runs the container gateway-network e2e, verifies a bundled extension build arg, and runs the bounded bundled-plugin Docker profile under a 240-second aggregate command timeout with each scenario's Docker run capped separately. The full path keeps QR package install and installer Docker/update coverage for nightly scheduled runs, manual dispatches, workflow-call release checks, and pull requests that truly touch installer/package/Docker surfaces. `main` pushes, including merge commits, do not force the full path; when changed-scope logic would request full coverage on a push, the workflow keeps the fast Docker smoke and leaves the full install smoke to nightly or release validation. The slow Bun global install image-provider smoke is separately gated by `run_bun_global_install_smoke`; it runs on the nightly schedule and from the release checks workflow, and manual `install-smoke` dispatches can opt into it, but pull requests and `main` pushes do not run it. QR and installer Docker tests keep their own install-focused Dockerfiles. Local `test:docker:all` prebuilds one shared live-test image and one shared `scripts/e2e/Dockerfile` built-app image, then runs the live/E2E smoke lanes with a weighted scheduler and `OPENCLAW_SKIP_DOCKER_BUILD=1`; tune the default main-pool slot count of 10 with `OPENCLAW_DOCKER_ALL_PARALLELISM` and the provider-sensitive tail-pool slot count of 10 with `OPENCLAW_DOCKER_ALL_TAIL_PARALLELISM`. Heavy lane caps default to `OPENCLAW_DOCKER_ALL_LIVE_LIMIT=6`, `OPENCLAW_DOCKER_ALL_NPM_LIMIT=8`, and `OPENCLAW_DOCKER_ALL_SERVICE_LIMIT=7` so npm install and multi-service lanes do not overcommit Docker while lighter lanes still fill available slots. Lane starts are staggered by 2 seconds by default to avoid local Docker daemon create storms; override with `OPENCLAW_DOCKER_ALL_START_STAGGER_MS=0` or another millisecond value. The local aggregate preflights Docker, removes stale OpenClaw E2E containers, emits active-lane status, persists lane timings for longest-first ordering, and supports `OPENCLAW_DOCKER_ALL_DRY_RUN=1` for scheduler inspection. It stops scheduling new pooled lanes after the first failure by default, and each lane has a 120-minute fallback timeout overrideable with `OPENCLAW_DOCKER_ALL_LANE_TIMEOUT_MS`; selected live/tail lanes use tighter per-lane caps. `OPENCLAW_DOCKER_ALL_LANES=<lane[,lane]>` runs exact scheduler lanes, including release-only lanes such as `install-e2e` and split bundled update lanes such as `bundled-channel-update-acpx`, while skipping the cleanup smoke so agents can reproduce one failed lane. The reusable live/E2E workflow builds and pushes one SHA-tagged GHCR Docker E2E image, then runs the release-path Docker suite as at most three chunked jobs with `OPENCLAW_SKIP_DOCKER_BUILD=1` so each chunk pulls the shared image once and executes multiple lanes through the same weighted scheduler (`OPENCLAW_DOCKER_ALL_PROFILE=release-path`, `OPENCLAW_DOCKER_ALL_CHUNK=core|package-update|plugins-integrations`). Each chunk uploads `.artifacts/docker-tests/` with lane logs, timings, `summary.json`, and per-lane rerun commands. The workflow `docker_lanes` input runs selected lanes against the prepared image instead of the three chunk jobs, which keeps failed-lane debugging bounded to one targeted Docker job; if a selected lane is a live Docker lane, the targeted job builds the live-test image locally for that rerun. When Open WebUI is requested with the release-path suite, it runs inside the plugins/integrations chunk instead of reserving a fourth Docker worker; Open WebUI keeps a standalone job only for openwebui-only dispatches. The scheduled live/E2E workflow runs the full release-path Docker suite daily. The bundled update matrix is split by update target so repeated npm update and doctor repair passes can shard with other bundled checks.
The separate `install-smoke` workflow reuses the same scope script through its own `preflight` job. It splits smoke coverage into `run_fast_install_smoke` and `run_full_install_smoke`. Pull requests run the fast path for Docker/package surfaces, bundled plugin package/manifest changes, and core plugin/channel/gateway/Plugin SDK surfaces that the Docker smoke jobs exercise. Source-only bundled plugin changes, test-only edits, and docs-only edits do not reserve Docker workers. The fast path builds the root Dockerfile image once, checks the CLI, runs the agents delete shared-workspace CLI smoke, runs the container gateway-network e2e, verifies a bundled extension build arg, and runs the bounded bundled-plugin Docker profile under a 240-second aggregate command timeout with each scenario's Docker run capped separately. The full path keeps QR package install and installer Docker/update coverage for nightly scheduled runs, manual dispatches, workflow-call release checks, and pull requests that truly touch installer/package/Docker surfaces. `main` pushes, including merge commits, do not force the full path; when changed-scope logic would request full coverage on a push, the workflow keeps the fast Docker smoke and leaves the full install smoke to nightly or release validation. The slow Bun global install image-provider smoke is separately gated by `run_bun_global_install_smoke`; it runs on the nightly schedule and from the release checks workflow, and manual `install-smoke` dispatches can opt into it, but pull requests and `main` pushes do not run it. QR and installer Docker tests keep their own install-focused Dockerfiles. Local `test:docker:all` prebuilds one shared live-test image plus two shared `scripts/e2e/Dockerfile` built-app images: a bare image for installer/update/plugin-dependency lanes and a functional image that pre-stages bundled plugin runtime dependencies for normal functionality lanes. The scheduler selects the image per lane with `OPENCLAW_DOCKER_E2E_BARE_IMAGE` and `OPENCLAW_DOCKER_E2E_FUNCTIONAL_IMAGE`, then runs lanes with `OPENCLAW_SKIP_DOCKER_BUILD=1`; tune the default main-pool slot count of 10 with `OPENCLAW_DOCKER_ALL_PARALLELISM` and the provider-sensitive tail-pool slot count of 10 with `OPENCLAW_DOCKER_ALL_TAIL_PARALLELISM`. Heavy lane caps default to `OPENCLAW_DOCKER_ALL_LIVE_LIMIT=6`, `OPENCLAW_DOCKER_ALL_NPM_LIMIT=8`, and `OPENCLAW_DOCKER_ALL_SERVICE_LIMIT=7` so npm install and multi-service lanes do not overcommit Docker while lighter lanes still fill available slots. Lane starts are staggered by 2 seconds by default to avoid local Docker daemon create storms; override with `OPENCLAW_DOCKER_ALL_START_STAGGER_MS=0` or another millisecond value. The local aggregate preflights Docker, removes stale OpenClaw E2E containers, emits active-lane status, persists lane timings for longest-first ordering, and supports `OPENCLAW_DOCKER_ALL_DRY_RUN=1` for scheduler inspection. It stops scheduling new pooled lanes after the first failure by default, and each lane has a 120-minute fallback timeout overrideable with `OPENCLAW_DOCKER_ALL_LANE_TIMEOUT_MS`; selected live/tail lanes use tighter per-lane caps. `OPENCLAW_DOCKER_ALL_LANES=<lane[,lane]>` runs exact scheduler lanes, including release-only lanes such as `install-e2e` and split bundled update lanes such as `bundled-channel-update-acpx`, while skipping the cleanup smoke so agents can reproduce one failed lane. The reusable live/E2E workflow builds and pushes one SHA-tagged bare GHCR Docker E2E image and one SHA-tagged functional GHCR Docker E2E image, then runs the release-path Docker suite as at most three chunked jobs with `OPENCLAW_SKIP_DOCKER_BUILD=1` so each chunk pulls the image kind it needs and executes multiple lanes through the same weighted scheduler (`OPENCLAW_DOCKER_ALL_PROFILE=release-path`, `OPENCLAW_DOCKER_ALL_CHUNK=core|package-update|plugins-integrations`). Each chunk uploads `.artifacts/docker-tests/` with lane logs, timings, `summary.json`, phase timings, and per-lane rerun commands. The workflow `docker_lanes` input runs selected lanes against the prepared images instead of the three chunk jobs, which keeps failed-lane debugging bounded to one targeted Docker job; if a selected lane is a live Docker lane, the targeted job builds the live-test image locally for that rerun. When Open WebUI is requested with the release-path suite, it runs inside the plugins/integrations chunk instead of reserving a fourth Docker worker; Open WebUI keeps a standalone job only for openwebui-only dispatches. The scheduled live/E2E workflow runs the full release-path Docker suite daily. The bundled update matrix is split by update target so repeated npm update and doctor repair passes can shard with other bundled checks.
Local changed-lane logic lives in `scripts/changed-lanes.mjs` and is executed by `scripts/check-changed.mjs`. That local gate is stricter about architecture boundaries than the broad CI platform scope: core production changes run core prod typecheck plus core tests, core test-only changes run only core test typecheck/tests, extension production changes run extension prod typecheck plus extension tests, and extension test-only changes run only extension test typecheck/tests. Public Plugin SDK or plugin-contract changes expand to extension validation because extensions depend on those core contracts. Release metadata-only version bumps run targeted version/config/root-dependency checks. Unknown root/config changes fail safe to all lanes.

View File

@@ -33,7 +33,7 @@ title: "Tests"
- Gateway integration: opt-in via `OPENCLAW_TEST_INCLUDE_GATEWAY=1 pnpm test` or `pnpm test:gateway`.
- `pnpm test:e2e`: Runs gateway end-to-end smoke tests (multi-instance WS/HTTP/node pairing). Defaults to `threads` + `isolate: false` with adaptive workers in `vitest.e2e.config.ts`; tune with `OPENCLAW_E2E_WORKERS=<n>` and set `OPENCLAW_E2E_VERBOSE=1` for verbose logs.
- `pnpm test:live`: Runs provider live tests (minimax/zai). Requires API keys and `LIVE=1` (or provider-specific `*_LIVE_TEST=1`) to unskip.
- `pnpm test:docker:all`: Builds the shared live-test image and Docker E2E image once, then runs the Docker smoke lanes with `OPENCLAW_SKIP_DOCKER_BUILD=1` through a weighted scheduler. `OPENCLAW_DOCKER_ALL_PARALLELISM=<n>` controls process slots and defaults to 10; `OPENCLAW_DOCKER_ALL_TAIL_PARALLELISM=<n>` controls the provider-sensitive tail pool and defaults to 10. Heavy lane caps default to `OPENCLAW_DOCKER_ALL_LIVE_LIMIT=9`, `OPENCLAW_DOCKER_ALL_NPM_LIMIT=10`, and `OPENCLAW_DOCKER_ALL_SERVICE_LIMIT=7`; provider caps default to one heavy lane per provider via `OPENCLAW_DOCKER_ALL_LIVE_CLAUDE_LIMIT=4`, `OPENCLAW_DOCKER_ALL_LIVE_CODEX_LIMIT=4`, and `OPENCLAW_DOCKER_ALL_LIVE_GEMINI_LIMIT=4`. Use `OPENCLAW_DOCKER_ALL_WEIGHT_LIMIT` or `OPENCLAW_DOCKER_ALL_DOCKER_LIMIT` for larger hosts. Lane starts are staggered by 2 seconds by default to avoid local Docker daemon create storms; override with `OPENCLAW_DOCKER_ALL_START_STAGGER_MS=<ms>`. The runner preflights Docker by default, cleans stale OpenClaw E2E containers, emits active-lane status every 30 seconds, shares provider CLI tool caches between compatible lanes, retries transient live-provider failures once by default (`OPENCLAW_DOCKER_ALL_LIVE_RETRIES=<n>`), and stores lane timings in `.artifacts/docker-tests/lane-timings.json` for longest-first ordering on later runs. Use `OPENCLAW_DOCKER_ALL_DRY_RUN=1` to print the lane manifest without running Docker, `OPENCLAW_DOCKER_ALL_STATUS_INTERVAL_MS=<ms>` to tune status output, or `OPENCLAW_DOCKER_ALL_TIMINGS=0` to disable timing reuse. Use `OPENCLAW_DOCKER_ALL_LIVE_MODE=skip` for deterministic/local lanes only or `OPENCLAW_DOCKER_ALL_LIVE_MODE=only` for live-provider lanes only; package aliases are `pnpm test:docker:local:all` and `pnpm test:docker:live:all`. Live-only mode merges main and tail live lanes into one longest-first pool so provider buckets can pack Claude, Codex, and Gemini work together. The runner stops scheduling new pooled lanes after the first failure unless `OPENCLAW_DOCKER_ALL_FAIL_FAST=0` is set, and each lane has a 120-minute fallback timeout overrideable with `OPENCLAW_DOCKER_ALL_LANE_TIMEOUT_MS`; selected live/tail lanes use tighter per-lane caps. CLI backend Docker setup commands have their own timeout via `OPENCLAW_LIVE_CLI_BACKEND_SETUP_TIMEOUT_SECONDS` (default 180). Per-lane logs are written under `.artifacts/docker-tests/<run-id>/`.
- `pnpm test:docker:all`: Builds the shared live-test image plus two Docker E2E images once, then runs the Docker smoke lanes with `OPENCLAW_SKIP_DOCKER_BUILD=1` through a weighted scheduler. The bare image (`OPENCLAW_DOCKER_E2E_BARE_IMAGE`) is used for installer/update/plugin-dependency lanes; the functional image (`OPENCLAW_DOCKER_E2E_FUNCTIONAL_IMAGE`) pre-stages bundled plugin runtime dependencies for normal functionality lanes. `OPENCLAW_DOCKER_ALL_PARALLELISM=<n>` controls process slots and defaults to 10; `OPENCLAW_DOCKER_ALL_TAIL_PARALLELISM=<n>` controls the provider-sensitive tail pool and defaults to 10. Heavy lane caps default to `OPENCLAW_DOCKER_ALL_LIVE_LIMIT=9`, `OPENCLAW_DOCKER_ALL_NPM_LIMIT=10`, and `OPENCLAW_DOCKER_ALL_SERVICE_LIMIT=7`; provider caps default to one heavy lane per provider via `OPENCLAW_DOCKER_ALL_LIVE_CLAUDE_LIMIT=4`, `OPENCLAW_DOCKER_ALL_LIVE_CODEX_LIMIT=4`, and `OPENCLAW_DOCKER_ALL_LIVE_GEMINI_LIMIT=4`. Use `OPENCLAW_DOCKER_ALL_WEIGHT_LIMIT` or `OPENCLAW_DOCKER_ALL_DOCKER_LIMIT` for larger hosts. Lane starts are staggered by 2 seconds by default to avoid local Docker daemon create storms; override with `OPENCLAW_DOCKER_ALL_START_STAGGER_MS=<ms>`. The runner preflights Docker by default, cleans stale OpenClaw E2E containers, emits active-lane status every 30 seconds, shares provider CLI tool caches between compatible lanes, retries transient live-provider failures once by default (`OPENCLAW_DOCKER_ALL_LIVE_RETRIES=<n>`), and stores lane timings in `.artifacts/docker-tests/lane-timings.json` for longest-first ordering on later runs. Use `OPENCLAW_DOCKER_ALL_DRY_RUN=1` to print the lane manifest without running Docker, `OPENCLAW_DOCKER_ALL_STATUS_INTERVAL_MS=<ms>` to tune status output, or `OPENCLAW_DOCKER_ALL_TIMINGS=0` to disable timing reuse. Use `OPENCLAW_DOCKER_ALL_LIVE_MODE=skip` for deterministic/local lanes only or `OPENCLAW_DOCKER_ALL_LIVE_MODE=only` for live-provider lanes only; package aliases are `pnpm test:docker:local:all` and `pnpm test:docker:live:all`. Live-only mode merges main and tail live lanes into one longest-first pool so provider buckets can pack Claude, Codex, and Gemini work together. The runner stops scheduling new pooled lanes after the first failure unless `OPENCLAW_DOCKER_ALL_FAIL_FAST=0` is set, and each lane has a 120-minute fallback timeout overrideable with `OPENCLAW_DOCKER_ALL_LANE_TIMEOUT_MS`; selected live/tail lanes use tighter per-lane caps. CLI backend Docker setup commands have their own timeout via `OPENCLAW_LIVE_CLI_BACKEND_SETUP_TIMEOUT_SECONDS` (default 180). Per-lane logs and `summary.json` phase timings are written under `.artifacts/docker-tests/<run-id>/`.
- `pnpm test:docker:browser-cdp-snapshot`: Builds a Chromium-backed source E2E container, starts raw CDP plus an isolated Gateway, runs `browser doctor --deep`, and verifies CDP role snapshots include link URLs, cursor-promoted clickables, iframe refs, and frame metadata.
- CLI backend live Docker probes can be run as focused lanes, for example `pnpm test:docker:live-cli-backend:codex`, `pnpm test:docker:live-cli-backend:codex:resume`, or `pnpm test:docker:live-cli-backend:codex:mcp`. Claude and Gemini have matching `:resume` and `:mcp` aliases.
- `pnpm test:docker:openwebui`: Starts Dockerized OpenClaw + Open WebUI, signs in through Open WebUI, checks `/api/models`, then runs a real proxied chat through `/api/chat/completions`. Requires a usable live model key (for example OpenAI in `~/.profile`), pulls an external Open WebUI image, and is not expected to be CI-stable like the normal unit/e2e suites.

View File

@@ -60,3 +60,9 @@ RUN mkdir -p dist/control-ui \
&& printf '%s\n' '<!doctype html><title>OpenClaw Control UI</title>' > dist/control-ui/index.html
CMD ["bash"]
FROM build AS functional
RUN node scripts/stage-bundled-plugin-runtime-deps.mjs
CMD ["bash"]

View File

@@ -4,7 +4,7 @@ set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
source "$ROOT_DIR/scripts/lib/docker-e2e-image.sh"
IMAGE_NAME="$(docker_e2e_resolve_image "openclaw-docker-e2e:local")"
DOCKER_TARGET="${OPENCLAW_DOCKER_E2E_TARGET:-build}"
IMAGE_NAME="$(docker_e2e_resolve_image "openclaw-docker-e2e-functional:local")"
DOCKER_TARGET="${OPENCLAW_DOCKER_E2E_TARGET:-functional}"
docker_e2e_build_or_reuse "$IMAGE_NAME" docker-e2e "$ROOT_DIR/scripts/e2e/Dockerfile" "$ROOT_DIR" "$DOCKER_TARGET"

View File

@@ -5,7 +5,9 @@ import path from "node:path";
import { fileURLToPath } from "node:url";
const ROOT_DIR = path.resolve(path.dirname(fileURLToPath(import.meta.url)), "..");
const DEFAULT_E2E_IMAGE = "openclaw-docker-e2e:local";
const DEFAULT_E2E_BARE_IMAGE = "openclaw-docker-e2e-bare:local";
const DEFAULT_E2E_FUNCTIONAL_IMAGE = "openclaw-docker-e2e-functional:local";
const DEFAULT_E2E_IMAGE = DEFAULT_E2E_FUNCTIONAL_IMAGE;
const DEFAULT_PARALLELISM = 10;
const DEFAULT_TAIL_PARALLELISM = 10;
const DEFAULT_FAILURE_TAIL_LINES = 80;
@@ -49,6 +51,7 @@ function lane(name, command, options = {}) {
return {
cacheKey: options.cacheKey,
command,
e2eImageKind: options.e2eImageKind ?? (options.live ? undefined : "functional"),
estimateSeconds: options.estimateSeconds,
live: options.live === true,
name,
@@ -104,6 +107,7 @@ function liveLane(name, command, options = {}) {
function npmLane(name, command, options = {}) {
return lane(name, command, {
...options,
e2eImageKind: options.e2eImageKind ?? "bare",
resources: ["npm", ...(options.resources ?? [])],
weight: options.weight ?? 2,
});
@@ -607,7 +611,8 @@ function laneSummary(poolLane) {
const timeout = poolLane.timeoutMs ? ` timeout=${Math.round(poolLane.timeoutMs / 1000)}s` : "";
const retries = poolLane.retries > 0 ? ` retries=${poolLane.retries}` : "";
const cache = poolLane.cacheKey ? ` cache=${poolLane.cacheKey}` : "";
return `${poolLane.name}(w=${laneWeight(poolLane)} r=${resources}${timeout}${retries}${cache})`;
const image = poolLane.e2eImageKind ? ` image=${poolLane.e2eImageKind}` : "";
return `${poolLane.name}(w=${laneWeight(poolLane)} r=${resources}${timeout}${retries}${cache}${image})`;
}
function sleep(ms) {
@@ -645,17 +650,47 @@ function shellQuote(value) {
}
function buildLaneRerunCommand(name, baseEnv) {
const poolLane = findLaneByName(name);
const build = name.startsWith("live-") ? "1" : "0";
const image = poolLane ? e2eImageForLane(poolLane, baseEnv) : baseEnv.OPENCLAW_DOCKER_E2E_IMAGE;
const env = [
["OPENCLAW_DOCKER_ALL_LANES", name],
["OPENCLAW_DOCKER_ALL_BUILD", build],
["OPENCLAW_DOCKER_ALL_PREFLIGHT", "0"],
["OPENCLAW_SKIP_DOCKER_BUILD", "1"],
["OPENCLAW_DOCKER_E2E_IMAGE", baseEnv.OPENCLAW_DOCKER_E2E_IMAGE || DEFAULT_E2E_IMAGE],
["OPENCLAW_DOCKER_E2E_IMAGE", image || DEFAULT_E2E_IMAGE],
["OPENCLAW_DOCKER_E2E_BARE_IMAGE", baseEnv.OPENCLAW_DOCKER_E2E_BARE_IMAGE],
["OPENCLAW_DOCKER_E2E_FUNCTIONAL_IMAGE", baseEnv.OPENCLAW_DOCKER_E2E_FUNCTIONAL_IMAGE],
];
return `${env.map(([key, value]) => `${key}=${shellQuote(value)}`).join(" ")} pnpm test:docker:all`;
}
function findLaneByName(name) {
return dedupeLanes([
...allReleasePathLanes({ includeOpenWebUI: true }),
...lanes,
...tailLanes,
]).find((poolLane) => poolLane.name === name);
}
function e2eImageForKind(kind, baseEnv) {
if (kind === "bare") {
return baseEnv.OPENCLAW_DOCKER_E2E_BARE_IMAGE || baseEnv.OPENCLAW_DOCKER_E2E_IMAGE;
}
if (kind === "functional") {
return baseEnv.OPENCLAW_DOCKER_E2E_FUNCTIONAL_IMAGE || baseEnv.OPENCLAW_DOCKER_E2E_IMAGE;
}
return baseEnv.OPENCLAW_DOCKER_E2E_IMAGE;
}
function e2eImageForLane(poolLane, baseEnv) {
return e2eImageForKind(poolLane.e2eImageKind, baseEnv);
}
function lanesNeedE2eImageKind(poolLanes, kind) {
return poolLanes.some((poolLane) => poolLane.e2eImageKind === kind);
}
function timingSeconds(timingStore, poolLane) {
const fromStore = timingStore?.lanes?.[poolLane.name]?.durationSeconds;
if (typeof fromStore === "number" && Number.isFinite(fromStore) && fromStore > 0) {
@@ -734,6 +769,33 @@ async function writeRunSummary(logDir, summary) {
console.log(`==> Docker run summary: ${file}`);
}
function phaseElapsedSeconds(startedAtMs) {
return Math.round((Date.now() - startedAtMs) / 1000);
}
async function runPhase(phases, name, details, fn) {
const startedAtMs = Date.now();
const phase = {
...details,
name,
startedAt: new Date(startedAtMs).toISOString(),
};
try {
const result = await fn();
phase.status = "passed";
return result;
} catch (error) {
phase.status = "failed";
phase.error = error instanceof Error ? error.message : String(error);
throw error;
} finally {
phase.elapsedSeconds = phaseElapsedSeconds(startedAtMs);
phase.finishedAt = new Date().toISOString();
phases.push(phase);
console.log(`==> Phase ${phase.status}: ${name} ${phase.elapsedSeconds}s`);
}
}
function printLaneManifest(label, poolLanes, timingStore) {
console.log(`==> ${label} lanes (${poolLanes.length})`);
for (const [index, poolLane] of poolLanes.entries()) {
@@ -858,19 +920,31 @@ async function runForeground(label, command, env) {
}
async function runForegroundGroup(entries, env) {
const results = await Promise.allSettled(
entries.map(async ([label, command]) => {
await runForeground(label, command, env);
}),
);
const failures = results
.map((result, index) => ({ result, entry: entries[index] }))
.filter(({ result }) => result.status === "rejected");
const failures = [];
for (const entry of entries) {
try {
const label = entry.label ?? entry[0];
const command = entry.command ?? entry[1];
const entryEnv = { ...env, ...entry.env };
const phases = entry.phases;
const details = entry.phaseDetails ?? {};
if (phases) {
await runPhase(phases, `build:${label}`, details, async () => {
await runForeground(label, command, entryEnv);
});
} else {
await runForeground(label, command, entryEnv);
}
} catch (error) {
failures.push({ entry, error });
}
}
if (failures.length > 0) {
throw new Error(
failures
.map(
({ result, entry }) => `${entry[0]}: ${result.reason?.message ?? String(result.reason)}`,
({ entry, error }) =>
`${entry.label ?? entry[0]}: ${error instanceof Error ? error.message : String(error)}`,
)
.join("\n"),
);
@@ -952,12 +1026,12 @@ async function prepareBundledChannelPackage(baseEnv, logDir) {
"cat /tmp/openclaw-pack.out",
].join("\n");
await runForeground(
"Pack bundled channel package once from Docker E2E image",
"Pack bundled channel package once from bare Docker E2E image",
[
"docker run --rm",
"-e COREPACK_ENABLE_DOWNLOAD_PROMPT=0",
`-v ${shellQuote(packDir)}:/tmp/openclaw-pack`,
shellQuote(baseEnv.OPENCLAW_DOCKER_E2E_IMAGE),
shellQuote(baseEnv.OPENCLAW_DOCKER_E2E_BARE_IMAGE),
"bash -lc",
shellQuote(packScript),
].join(" "),
@@ -978,10 +1052,18 @@ async function prepareBundledChannelPackage(baseEnv, logDir) {
console.log(`==> Bundled channel package: ${baseEnv.OPENCLAW_BUNDLED_CHANNEL_PACKAGE_TGZ}`);
}
function laneEnv(name, baseEnv, logDir, cacheKey) {
function laneEnv(poolLane, baseEnv, logDir, cacheKey) {
const env = {
...baseEnv,
};
const name = poolLane.name;
const image = e2eImageForLane(poolLane, baseEnv);
if (image) {
env.OPENCLAW_DOCKER_E2E_IMAGE = image;
}
if (poolLane.e2eImageKind) {
env.OPENCLAW_DOCKER_E2E_IMAGE_KIND = poolLane.e2eImageKind;
}
const cacheName = cacheKey || name;
if (!process.env.OPENCLAW_DOCKER_CLI_TOOLS_DIR) {
env.OPENCLAW_DOCKER_CLI_TOOLS_DIR = path.join(logDir, `${cacheName}-cli-tools`);
@@ -996,7 +1078,7 @@ async function runLane(lane, baseEnv, logDir, fallbackTimeoutMs) {
const { command, name } = lane;
const timeoutMs = lane.timeoutMs ?? fallbackTimeoutMs;
const logFile = path.join(logDir, `${name}.log`);
const env = laneEnv(name, baseEnv, logDir, lane.cacheKey);
const env = laneEnv(lane, baseEnv, logDir, lane.cacheKey);
await mkdir(env.OPENCLAW_DOCKER_CLI_TOOLS_DIR, { recursive: true });
await mkdir(env.OPENCLAW_DOCKER_CACHE_HOME_DIR, { recursive: true });
await fs.promises.writeFile(
@@ -1006,19 +1088,32 @@ async function runLane(lane, baseEnv, logDir, fallbackTimeoutMs) {
`==> [${name}] cache dir: ${env.OPENCLAW_DOCKER_CACHE_HOME_DIR}`,
`==> [${name}] timeout: ${timeoutMs}ms`,
`==> [${name}] retries: ${lane.retries ?? 0}`,
`==> [${name}] e2e image kind: ${lane.e2eImageKind ?? "none"}`,
`==> [${name}] e2e image: ${env.OPENCLAW_DOCKER_E2E_IMAGE ?? ""}`,
"",
].join("\n"),
);
console.log(`==> [${name}] start`);
const startedAt = Date.now();
const startedAtIso = new Date(startedAt).toISOString();
let result;
const attempts = [];
const maxAttempts = 1 + Math.max(0, lane.retries ?? 0);
for (let attempt = 1; attempt <= maxAttempts; attempt += 1) {
const attemptStartedAt = Date.now();
if (attempt > 1) {
await fs.promises.appendFile(logFile, `\n==> [${name}] retry attempt ${attempt}\n`);
console.log(`==> [${name}] retry ${attempt}/${maxAttempts}`);
}
result = await runShellCommand({ command, env, label: name, logFile, timeoutMs });
attempts.push({
attempt,
elapsedSeconds: phaseElapsedSeconds(attemptStartedAt),
finishedAt: new Date().toISOString(),
startedAt: new Date(attemptStartedAt).toISOString(),
status: result.status,
timedOut: result.timedOut,
});
if (result.status === 0 || attempt >= maxAttempts) {
break;
}
@@ -1039,10 +1134,15 @@ async function runLane(lane, baseEnv, logDir, fallbackTimeoutMs) {
}
return {
command,
attempts,
finishedAt: new Date().toISOString(),
image: env.OPENCLAW_DOCKER_E2E_IMAGE,
imageKind: lane.e2eImageKind,
logFile,
name,
elapsedSeconds,
rerunCommand: buildLaneRerunCommand(name, baseEnv),
startedAt: startedAtIso,
status: result.status,
timedOut: result.timedOut,
};
@@ -1255,6 +1355,7 @@ process.on("SIGTERM", () => {
async function main() {
const runStartedAt = new Date().toISOString();
const phases = [];
const parallelism = parsePositiveInt(
process.env.OPENCLAW_DOCKER_ALL_PARALLELISM,
DEFAULT_PARALLELISM,
@@ -1325,8 +1426,17 @@ async function main() {
await mkdir(logDir, { recursive: true });
const baseEnv = commandEnv({
OPENCLAW_DOCKER_E2E_IMAGE: process.env.OPENCLAW_DOCKER_E2E_IMAGE || DEFAULT_E2E_IMAGE,
OPENCLAW_DOCKER_E2E_BARE_IMAGE:
process.env.OPENCLAW_DOCKER_E2E_BARE_IMAGE ||
process.env.OPENCLAW_DOCKER_E2E_IMAGE ||
DEFAULT_E2E_BARE_IMAGE,
OPENCLAW_DOCKER_E2E_FUNCTIONAL_IMAGE:
process.env.OPENCLAW_DOCKER_E2E_FUNCTIONAL_IMAGE ||
process.env.OPENCLAW_DOCKER_E2E_IMAGE ||
DEFAULT_E2E_FUNCTIONAL_IMAGE,
});
baseEnv.OPENCLAW_DOCKER_E2E_IMAGE =
process.env.OPENCLAW_DOCKER_E2E_IMAGE || baseEnv.OPENCLAW_DOCKER_E2E_FUNCTIONAL_IMAGE;
appendExtension(baseEnv, "matrix");
appendExtension(baseEnv, "acpx");
appendExtension(baseEnv, "codex");
@@ -1383,6 +1493,8 @@ async function main() {
}`,
);
console.log(`==> Build shared Docker images: ${buildEnabled ? "yes" : "no"}`);
console.log(`==> Docker E2E bare image: ${baseEnv.OPENCLAW_DOCKER_E2E_BARE_IMAGE}`);
console.log(`==> Docker E2E functional image: ${baseEnv.OPENCLAW_DOCKER_E2E_FUNCTIONAL_IMAGE}`);
if (profile === RELEASE_PATH_PROFILE) {
console.log(`==> Include Open WebUI: ${includeOpenWebUI ? "yes" : "no"}`);
}
@@ -1406,30 +1518,65 @@ async function main() {
return;
}
await runDockerPreflight(baseEnv, {
cleanup: preflightCleanup,
enabled: preflightEnabled,
runTimeoutMs: preflightRunTimeoutMs,
});
await runPhase(
phases,
"docker-preflight",
{ cleanup: preflightCleanup, enabled: preflightEnabled },
async () => {
await runDockerPreflight(baseEnv, {
cleanup: preflightCleanup,
enabled: preflightEnabled,
runTimeoutMs: preflightRunTimeoutMs,
});
},
);
if (buildEnabled) {
const buildEntries = [];
const scheduledLanes = [...orderedLanes, ...orderedTailLanes];
if (scheduledLanes.some((poolLane) => poolLane.live)) {
buildEntries.push(["Build shared live-test image once", "pnpm test:docker:live-build"]);
buildEntries.push({
command: "pnpm test:docker:live-build",
label: "shared live-test image once",
phaseDetails: { imageKind: "live" },
phases,
});
}
if (scheduledLanes.some((poolLane) => !poolLane.live)) {
buildEntries.push([
`Build shared Docker E2E image once: ${baseEnv.OPENCLAW_DOCKER_E2E_IMAGE}`,
"pnpm test:docker:e2e-build",
]);
if (lanesNeedE2eImageKind(scheduledLanes, "bare")) {
buildEntries.push({
command: "pnpm test:docker:e2e-build",
env: {
OPENCLAW_DOCKER_E2E_IMAGE: baseEnv.OPENCLAW_DOCKER_E2E_BARE_IMAGE,
OPENCLAW_DOCKER_E2E_TARGET: "build",
},
label: `shared bare Docker E2E image once: ${baseEnv.OPENCLAW_DOCKER_E2E_BARE_IMAGE}`,
phaseDetails: { image: baseEnv.OPENCLAW_DOCKER_E2E_BARE_IMAGE, imageKind: "bare" },
phases,
});
}
if (lanesNeedE2eImageKind(scheduledLanes, "functional")) {
buildEntries.push({
command: "pnpm test:docker:e2e-build",
env: {
OPENCLAW_DOCKER_E2E_IMAGE: baseEnv.OPENCLAW_DOCKER_E2E_FUNCTIONAL_IMAGE,
OPENCLAW_DOCKER_E2E_TARGET: "functional",
},
label: `shared functional Docker E2E image once: ${baseEnv.OPENCLAW_DOCKER_E2E_FUNCTIONAL_IMAGE}`,
phaseDetails: {
image: baseEnv.OPENCLAW_DOCKER_E2E_FUNCTIONAL_IMAGE,
imageKind: "functional",
},
phases,
});
}
await runForegroundGroup(buildEntries, baseEnv);
} else {
console.log(`==> Shared Docker image builds: skipped`);
}
if (lanesNeedBundledPackage([...orderedLanes, ...orderedTailLanes])) {
await prepareBundledChannelPackage(baseEnv, logDir);
await runPhase(phases, "prepare-bundled-channel-package", { imageKind: "bare" }, async () => {
await prepareBundledChannelPackage(baseEnv, logDir);
});
} else {
console.log("==> Bundled channel package: not needed for selected lanes");
}
@@ -1442,7 +1589,9 @@ async function main() {
statusIntervalMs,
timeoutMs: laneTimeoutMs,
};
const mainResult = await runLanePool(orderedLanes, baseEnv, logDir, parallelism, options);
const mainResult = await runPhase(phases, "main-lane-pool", { lanes: orderedLanes.length }, () =>
runLanePool(orderedLanes, baseEnv, logDir, parallelism, options),
);
const failures = [...mainResult.failures];
const allResults = [...mainResult.results];
await writeTimingStore(timingStore, mainResult.results);
@@ -1451,7 +1600,12 @@ async function main() {
chunk: releaseChunk || undefined,
failures,
image: baseEnv.OPENCLAW_DOCKER_E2E_IMAGE,
images: {
bare: baseEnv.OPENCLAW_DOCKER_E2E_BARE_IMAGE,
functional: baseEnv.OPENCLAW_DOCKER_E2E_FUNCTIONAL_IMAGE,
},
lanes: allResults,
phases,
profile,
selectedLanes: selectedLaneNames.length > 0 ? selectedLaneNames : undefined,
startedAt: runStartedAt,
@@ -1463,11 +1617,17 @@ async function main() {
if (orderedTailLanes.length > 0) {
console.log("==> Running provider-sensitive Docker tail lanes");
const tailResult = await runLanePool(orderedTailLanes, baseEnv, logDir, tailParallelism, {
...options,
...tailSchedulerOptions,
poolLabel: "tail",
});
const tailResult = await runPhase(
phases,
"tail-lane-pool",
{ lanes: orderedTailLanes.length },
() =>
runLanePool(orderedTailLanes, baseEnv, logDir, tailParallelism, {
...options,
...tailSchedulerOptions,
poolLabel: "tail",
}),
);
failures.push(...tailResult.failures);
allResults.push(...tailResult.results);
await writeTimingStore(timingStore, tailResult.results);
@@ -1479,7 +1639,12 @@ async function main() {
chunk: releaseChunk || undefined,
failures,
image: baseEnv.OPENCLAW_DOCKER_E2E_IMAGE,
images: {
bare: baseEnv.OPENCLAW_DOCKER_E2E_BARE_IMAGE,
functional: baseEnv.OPENCLAW_DOCKER_E2E_FUNCTIONAL_IMAGE,
},
lanes: allResults,
phases,
profile,
selectedLanes: selectedLaneNames.length > 0 ? selectedLaneNames : undefined,
startedAt: runStartedAt,
@@ -1490,11 +1655,13 @@ async function main() {
}
if (profile === DEFAULT_PROFILE && selectedLaneNames.length === 0) {
await runForeground(
"Run cleanup smoke after parallel lanes",
"pnpm test:docker:cleanup",
baseEnv,
);
await runPhase(phases, "cleanup-smoke", {}, async () => {
await runForeground(
"Run cleanup smoke after parallel lanes",
"pnpm test:docker:cleanup",
baseEnv,
);
});
} else {
console.log("==> Cleanup smoke after parallel lanes: skipped for selected/release lanes");
}
@@ -1503,7 +1670,12 @@ async function main() {
chunk: releaseChunk || undefined,
failures,
image: baseEnv.OPENCLAW_DOCKER_E2E_IMAGE,
images: {
bare: baseEnv.OPENCLAW_DOCKER_E2E_BARE_IMAGE,
functional: baseEnv.OPENCLAW_DOCKER_E2E_FUNCTIONAL_IMAGE,
},
lanes: allResults,
phases,
profile,
selectedLanes: selectedLaneNames.length > 0 ? selectedLaneNames : undefined,
startedAt: runStartedAt,