mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-06 08:20:43 +00:00
ci: run release Docker chunks through scheduler
This commit is contained in:
@@ -488,66 +488,50 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
export OPENCLAW_DOCKER_ALL_PROFILE=release-path
|
||||
export OPENCLAW_DOCKER_ALL_CHUNK="${DOCKER_E2E_CHUNK}"
|
||||
export OPENCLAW_DOCKER_ALL_BUILD=0
|
||||
export OPENCLAW_DOCKER_ALL_PREFLIGHT=0
|
||||
export OPENCLAW_DOCKER_ALL_FAIL_FAST=0
|
||||
export OPENCLAW_DOCKER_ALL_INCLUDE_OPENWEBUI="${INCLUDE_OPENWEBUI}"
|
||||
export OPENCLAW_DOCKER_ALL_LOG_DIR=".artifacts/docker-tests/release-${DOCKER_E2E_CHUNK}"
|
||||
export OPENCLAW_DOCKER_ALL_TIMINGS_FILE=".artifacts/docker-tests/release-${DOCKER_E2E_CHUNK}-timings.json"
|
||||
|
||||
failures=()
|
||||
pnpm test:docker:all
|
||||
|
||||
run_lane() {
|
||||
local label="$1"
|
||||
shift
|
||||
|
||||
echo "::group::${label}"
|
||||
local status=0
|
||||
"$@" || status=$?
|
||||
echo "::endgroup::"
|
||||
|
||||
if [[ "$status" -ne 0 ]]; then
|
||||
failures+=("${label} exited ${status}")
|
||||
fi
|
||||
}
|
||||
|
||||
run_openwebui_lane() {
|
||||
if [[ "${INCLUDE_OPENWEBUI}" != "true" ]]; then
|
||||
echo "Skipping Open WebUI Docker E2E because include_openwebui=false."
|
||||
return 0
|
||||
fi
|
||||
run_lane "Open WebUI Docker E2E" pnpm test:docker:openwebui
|
||||
}
|
||||
|
||||
case "${DOCKER_E2E_CHUNK}" in
|
||||
core)
|
||||
run_lane "QR Import Docker E2E" pnpm test:docker:qr
|
||||
run_lane "Onboarding Docker E2E" pnpm test:docker:onboard
|
||||
run_lane "Gateway Network Docker E2E" pnpm test:docker:gateway-network
|
||||
run_lane "Config Reload Docker E2E" pnpm test:docker:config-reload
|
||||
run_lane "Session Runtime Context Docker E2E" pnpm test:docker:session-runtime-context
|
||||
run_lane "Pi Bundle MCP Tools Docker E2E" pnpm test:docker:pi-bundle-mcp-tools
|
||||
run_lane "MCP Channels Docker E2E" pnpm test:docker:mcp-channels
|
||||
;;
|
||||
package-update)
|
||||
run_lane "Installer Docker E2E" env OPENCLAW_E2E_MODELS=both pnpm test:install:e2e
|
||||
run_lane "Npm Onboard Channel Agent Docker E2E" pnpm test:docker:npm-onboard-channel-agent
|
||||
run_lane "Doctor Install Switch Docker E2E" pnpm test:docker:doctor-switch
|
||||
run_lane "Update Channel Switch Docker E2E" pnpm test:docker:update-channel-switch
|
||||
;;
|
||||
plugins-integrations)
|
||||
run_lane "Plugins Docker E2E" pnpm test:docker:plugins
|
||||
run_lane "Plugin Update Docker E2E" pnpm test:docker:plugin-update
|
||||
run_lane "Bundled Channel Runtime Deps Docker E2E" pnpm test:docker:bundled-channel-deps
|
||||
run_lane "Cron MCP Cleanup Docker E2E" pnpm test:docker:cron-mcp-cleanup
|
||||
run_lane "OpenAI Web Search Minimal Docker E2E" pnpm test:docker:openai-web-search-minimal
|
||||
run_openwebui_lane
|
||||
;;
|
||||
*)
|
||||
echo "Unknown Docker E2E chunk: ${DOCKER_E2E_CHUNK}" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
if (( ${#failures[@]} > 0 )); then
|
||||
printf 'Docker E2E chunk %s failed:\n' "${DOCKER_E2E_CHUNK}" >&2
|
||||
printf -- '- %s\n' "${failures[@]}" >&2
|
||||
exit 1
|
||||
- name: Summarize Docker E2E chunk
|
||||
if: always()
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
summary=".artifacts/docker-tests/release-${DOCKER_E2E_CHUNK}/summary.json"
|
||||
if [[ ! -f "$summary" ]]; then
|
||||
echo "Docker chunk summary missing: \`$summary\`" >> "$GITHUB_STEP_SUMMARY"
|
||||
exit 0
|
||||
fi
|
||||
node --input-type=module - "$summary" <<'NODE' >> "$GITHUB_STEP_SUMMARY"
|
||||
import fs from "node:fs";
|
||||
const summary = JSON.parse(fs.readFileSync(process.argv[2], "utf8"));
|
||||
const lanes = Array.isArray(summary.lanes) ? summary.lanes : [];
|
||||
console.log(`### Docker E2E chunk: ${summary.chunk ?? "unknown"}`);
|
||||
console.log("");
|
||||
console.log(`Status: \`${summary.status}\``);
|
||||
console.log("");
|
||||
console.log("| Lane | Status | Seconds | Timed out |");
|
||||
console.log("| --- | ---: | ---: | --- |");
|
||||
for (const lane of lanes) {
|
||||
const status = lane.status === 0 ? "pass" : `fail ${lane.status}`;
|
||||
console.log(`| \`${lane.name}\` | ${status} | ${lane.elapsedSeconds ?? ""} | ${lane.timedOut ? "yes" : "no"} |`);
|
||||
}
|
||||
NODE
|
||||
|
||||
- name: Upload Docker E2E chunk artifacts
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v7
|
||||
with:
|
||||
name: docker-e2e-${{ matrix.chunk_id }}
|
||||
path: .artifacts/docker-tests/
|
||||
if-no-files-found: ignore
|
||||
|
||||
validate_docker_openwebui:
|
||||
needs: [validate_selected_ref, prepare_docker_e2e_image]
|
||||
|
||||
@@ -92,7 +92,7 @@ Scope logic lives in `scripts/ci-changed-scope.mjs` and is covered by unit tests
|
||||
CI workflow edits validate the Node CI graph plus workflow linting, but do not force Windows, Android, or macOS native builds by themselves; those platform lanes stay scoped to platform source changes.
|
||||
CI routing-only edits, selected cheap core-test fixture edits, and narrow plugin contract helper/test-routing edits use a fast Node-only manifest path: preflight, security, and a single `checks-fast-core` task. That path avoids build artifacts, Node 22 compatibility, channel contracts, full core shards, bundled-plugin shards, and additional guard matrices when the changed files are limited to the routing or helper surfaces that the fast task exercises directly.
|
||||
Windows Node checks are scoped to Windows-specific process/path wrappers, npm/pnpm/UI runner helpers, package manager config, and the CI workflow surfaces that execute that lane; unrelated source, plugin, install-smoke, and test-only changes stay on the Linux Node lanes so they do not reserve a 16-vCPU Windows worker for coverage that is already exercised by the normal test shards.
|
||||
The separate `install-smoke` workflow reuses the same scope script through its own `preflight` job. It splits smoke coverage into `run_fast_install_smoke` and `run_full_install_smoke`. Pull requests run the fast path for Docker/package surfaces, bundled plugin package/manifest changes, and core plugin/channel/gateway/Plugin SDK surfaces that the Docker smoke jobs exercise. Source-only bundled plugin changes, test-only edits, and docs-only edits do not reserve Docker workers. The fast path builds the root Dockerfile image once, checks the CLI, runs the agents delete shared-workspace CLI smoke, runs the container gateway-network e2e, verifies a bundled extension build arg, and runs the bounded bundled-plugin Docker profile under a 240-second aggregate command timeout with each scenario's Docker run capped separately. The full path keeps QR package install and installer Docker/update coverage for nightly scheduled runs, manual dispatches, workflow-call release checks, and pull requests that truly touch installer/package/Docker surfaces. `main` pushes, including merge commits, do not force the full path; when changed-scope logic would request full coverage on a push, the workflow keeps the fast Docker smoke and leaves the full install smoke to nightly or release validation. The slow Bun global install image-provider smoke is separately gated by `run_bun_global_install_smoke`; it runs on the nightly schedule and from the release checks workflow, and manual `install-smoke` dispatches can opt into it, but pull requests and `main` pushes do not run it. QR and installer Docker tests keep their own install-focused Dockerfiles. Local `test:docker:all` prebuilds one shared live-test image and one shared `scripts/e2e/Dockerfile` built-app image, then runs the live/E2E smoke lanes with a weighted scheduler and `OPENCLAW_SKIP_DOCKER_BUILD=1`; tune the default main-pool slot count of 10 with `OPENCLAW_DOCKER_ALL_PARALLELISM` and the provider-sensitive tail-pool slot count of 10 with `OPENCLAW_DOCKER_ALL_TAIL_PARALLELISM`. Heavy lane caps default to `OPENCLAW_DOCKER_ALL_LIVE_LIMIT=6`, `OPENCLAW_DOCKER_ALL_NPM_LIMIT=8`, and `OPENCLAW_DOCKER_ALL_SERVICE_LIMIT=7` so npm install and multi-service lanes do not overcommit Docker while lighter lanes still fill available slots. Lane starts are staggered by 2 seconds by default to avoid local Docker daemon create storms; override with `OPENCLAW_DOCKER_ALL_START_STAGGER_MS=0` or another millisecond value. The local aggregate preflights Docker, removes stale OpenClaw E2E containers, emits active-lane status, persists lane timings for longest-first ordering, and supports `OPENCLAW_DOCKER_ALL_DRY_RUN=1` for scheduler inspection. It stops scheduling new pooled lanes after the first failure by default, and each lane has a 120-minute fallback timeout overrideable with `OPENCLAW_DOCKER_ALL_LANE_TIMEOUT_MS`; selected live/tail lanes use tighter per-lane caps. The reusable live/E2E workflow builds and pushes one SHA-tagged GHCR Docker E2E image, then runs the release-path Docker suite as at most three chunked jobs with `OPENCLAW_SKIP_DOCKER_BUILD=1` so each chunk pulls the shared image once and executes multiple lanes. When Open WebUI is requested with the release-path suite, it runs inside the plugins/integrations chunk instead of reserving a fourth Docker worker; Open WebUI keeps a standalone job only for openwebui-only dispatches. The scheduled live/E2E workflow runs the full release-path Docker suite daily. The bundled update matrix is split by update target so repeated npm update and doctor repair passes can shard with other bundled checks.
|
||||
The separate `install-smoke` workflow reuses the same scope script through its own `preflight` job. It splits smoke coverage into `run_fast_install_smoke` and `run_full_install_smoke`. Pull requests run the fast path for Docker/package surfaces, bundled plugin package/manifest changes, and core plugin/channel/gateway/Plugin SDK surfaces that the Docker smoke jobs exercise. Source-only bundled plugin changes, test-only edits, and docs-only edits do not reserve Docker workers. The fast path builds the root Dockerfile image once, checks the CLI, runs the agents delete shared-workspace CLI smoke, runs the container gateway-network e2e, verifies a bundled extension build arg, and runs the bounded bundled-plugin Docker profile under a 240-second aggregate command timeout with each scenario's Docker run capped separately. The full path keeps QR package install and installer Docker/update coverage for nightly scheduled runs, manual dispatches, workflow-call release checks, and pull requests that truly touch installer/package/Docker surfaces. `main` pushes, including merge commits, do not force the full path; when changed-scope logic would request full coverage on a push, the workflow keeps the fast Docker smoke and leaves the full install smoke to nightly or release validation. The slow Bun global install image-provider smoke is separately gated by `run_bun_global_install_smoke`; it runs on the nightly schedule and from the release checks workflow, and manual `install-smoke` dispatches can opt into it, but pull requests and `main` pushes do not run it. QR and installer Docker tests keep their own install-focused Dockerfiles. Local `test:docker:all` prebuilds one shared live-test image and one shared `scripts/e2e/Dockerfile` built-app image, then runs the live/E2E smoke lanes with a weighted scheduler and `OPENCLAW_SKIP_DOCKER_BUILD=1`; tune the default main-pool slot count of 10 with `OPENCLAW_DOCKER_ALL_PARALLELISM` and the provider-sensitive tail-pool slot count of 10 with `OPENCLAW_DOCKER_ALL_TAIL_PARALLELISM`. Heavy lane caps default to `OPENCLAW_DOCKER_ALL_LIVE_LIMIT=6`, `OPENCLAW_DOCKER_ALL_NPM_LIMIT=8`, and `OPENCLAW_DOCKER_ALL_SERVICE_LIMIT=7` so npm install and multi-service lanes do not overcommit Docker while lighter lanes still fill available slots. Lane starts are staggered by 2 seconds by default to avoid local Docker daemon create storms; override with `OPENCLAW_DOCKER_ALL_START_STAGGER_MS=0` or another millisecond value. The local aggregate preflights Docker, removes stale OpenClaw E2E containers, emits active-lane status, persists lane timings for longest-first ordering, and supports `OPENCLAW_DOCKER_ALL_DRY_RUN=1` for scheduler inspection. It stops scheduling new pooled lanes after the first failure by default, and each lane has a 120-minute fallback timeout overrideable with `OPENCLAW_DOCKER_ALL_LANE_TIMEOUT_MS`; selected live/tail lanes use tighter per-lane caps. The reusable live/E2E workflow builds and pushes one SHA-tagged GHCR Docker E2E image, then runs the release-path Docker suite as at most three chunked jobs with `OPENCLAW_SKIP_DOCKER_BUILD=1` so each chunk pulls the shared image once and executes multiple lanes through the same weighted scheduler (`OPENCLAW_DOCKER_ALL_PROFILE=release-path`, `OPENCLAW_DOCKER_ALL_CHUNK=core|package-update|plugins-integrations`). Each chunk uploads `.artifacts/docker-tests/` with lane logs, timings, and `summary.json`. When Open WebUI is requested with the release-path suite, it runs inside the plugins/integrations chunk instead of reserving a fourth Docker worker; Open WebUI keeps a standalone job only for openwebui-only dispatches. The scheduled live/E2E workflow runs the full release-path Docker suite daily. The bundled update matrix is split by update target so repeated npm update and doctor repair passes can shard with other bundled checks.
|
||||
|
||||
Local changed-lane logic lives in `scripts/changed-lanes.mjs` and is executed by `scripts/check-changed.mjs`. That local gate is stricter about architecture boundaries than the broad CI platform scope: core production changes run core prod typecheck plus core tests, core test-only changes run only core test typecheck/tests, extension production changes run extension prod typecheck plus extension tests, and extension test-only changes run only extension test typecheck/tests. Public Plugin SDK or plugin-contract changes expand to extension validation because extensions depend on those core contracts. Release metadata-only version bumps run targeted version/config/root-dependency checks. Unknown root/config changes fail safe to all lanes.
|
||||
|
||||
|
||||
@@ -15,6 +15,8 @@ const DEFAULT_LIVE_RETRIES = 1;
|
||||
const DEFAULT_STATUS_INTERVAL_MS = 30_000;
|
||||
const DEFAULT_PREFLIGHT_RUN_TIMEOUT_MS = 60_000;
|
||||
const DEFAULT_TIMINGS_FILE = path.join(ROOT_DIR, ".artifacts/docker-tests/lane-timings.json");
|
||||
const DEFAULT_PROFILE = "all";
|
||||
const RELEASE_PATH_PROFILE = "release-path";
|
||||
const LIVE_PROFILE_TIMEOUT_MS = 20 * 60 * 1000;
|
||||
const LIVE_CLI_TIMEOUT_MS = 20 * 60 * 1000;
|
||||
const LIVE_ACP_TIMEOUT_MS = 20 * 60 * 1000;
|
||||
@@ -367,6 +369,95 @@ const exclusiveLanes = [
|
||||
|
||||
const tailLanes = exclusiveLanes;
|
||||
|
||||
const releasePathChunks = {
|
||||
core: [
|
||||
lane("qr", "OPENCLAW_SKIP_DOCKER_BUILD=1 pnpm test:docker:qr"),
|
||||
serviceLane("onboard", "OPENCLAW_SKIP_DOCKER_BUILD=1 pnpm test:docker:onboard", {
|
||||
weight: 2,
|
||||
}),
|
||||
serviceLane("gateway-network", "OPENCLAW_SKIP_DOCKER_BUILD=1 pnpm test:docker:gateway-network"),
|
||||
serviceLane("config-reload", "OPENCLAW_SKIP_DOCKER_BUILD=1 pnpm test:docker:config-reload"),
|
||||
lane(
|
||||
"session-runtime-context",
|
||||
"OPENCLAW_SKIP_DOCKER_BUILD=1 pnpm test:docker:session-runtime-context",
|
||||
),
|
||||
lane(
|
||||
"pi-bundle-mcp-tools",
|
||||
"OPENCLAW_SKIP_DOCKER_BUILD=1 pnpm test:docker:pi-bundle-mcp-tools",
|
||||
),
|
||||
serviceLane("mcp-channels", "OPENCLAW_SKIP_DOCKER_BUILD=1 pnpm test:docker:mcp-channels", {
|
||||
resources: ["npm"],
|
||||
weight: 3,
|
||||
}),
|
||||
],
|
||||
"package-update": [
|
||||
npmLane("install-e2e", "OPENCLAW_E2E_MODELS=both pnpm test:install:e2e", {
|
||||
resources: ["service"],
|
||||
weight: 4,
|
||||
}),
|
||||
npmLane(
|
||||
"npm-onboard-channel-agent",
|
||||
"OPENCLAW_SKIP_DOCKER_BUILD=1 pnpm test:docker:npm-onboard-channel-agent",
|
||||
{ resources: ["service"], weight: 3 },
|
||||
),
|
||||
npmLane("doctor-switch", "OPENCLAW_SKIP_DOCKER_BUILD=1 pnpm test:docker:doctor-switch", {
|
||||
weight: 3,
|
||||
}),
|
||||
npmLane(
|
||||
"update-channel-switch",
|
||||
"OPENCLAW_SKIP_DOCKER_BUILD=1 pnpm test:docker:update-channel-switch",
|
||||
{
|
||||
timeoutMs: 30 * 60 * 1000,
|
||||
weight: 3,
|
||||
},
|
||||
),
|
||||
],
|
||||
"plugins-integrations": [
|
||||
lane("plugins", "OPENCLAW_SKIP_DOCKER_BUILD=1 pnpm test:docker:plugins", {
|
||||
resources: ["npm", "service"],
|
||||
weight: 6,
|
||||
}),
|
||||
npmLane("plugin-update", "OPENCLAW_SKIP_DOCKER_BUILD=1 pnpm test:docker:plugin-update"),
|
||||
npmLane(
|
||||
"bundled-channel-deps",
|
||||
"OPENCLAW_SKIP_DOCKER_BUILD=1 pnpm test:docker:bundled-channel-deps",
|
||||
{ resources: ["service"], weight: 3 },
|
||||
),
|
||||
serviceLane(
|
||||
"cron-mcp-cleanup",
|
||||
"OPENCLAW_SKIP_DOCKER_BUILD=1 pnpm test:docker:cron-mcp-cleanup",
|
||||
{
|
||||
resources: ["npm"],
|
||||
weight: 3,
|
||||
},
|
||||
),
|
||||
serviceLane(
|
||||
"openai-web-search-minimal",
|
||||
"OPENCLAW_SKIP_DOCKER_BUILD=1 pnpm test:docker:openai-web-search-minimal",
|
||||
{ timeoutMs: 8 * 60 * 1000 },
|
||||
),
|
||||
],
|
||||
};
|
||||
|
||||
function releasePathChunkLanes(chunk, options = {}) {
|
||||
const base = releasePathChunks[chunk];
|
||||
if (!base) {
|
||||
throw new Error(
|
||||
`OPENCLAW_DOCKER_ALL_CHUNK must be one of: ${Object.keys(releasePathChunks).join(", ")}. Got: ${JSON.stringify(chunk)}`,
|
||||
);
|
||||
}
|
||||
if (chunk !== "plugins-integrations" || !options.includeOpenWebUI) {
|
||||
return base;
|
||||
}
|
||||
return [
|
||||
...base,
|
||||
serviceLane("openwebui", "OPENCLAW_SKIP_DOCKER_BUILD=1 pnpm test:docker:openwebui", {
|
||||
timeoutMs: OPENWEBUI_TIMEOUT_MS,
|
||||
weight: 5,
|
||||
}),
|
||||
];
|
||||
}
|
||||
|
||||
function parsePositiveInt(raw, fallback, label) {
|
||||
if (!raw) {
|
||||
return fallback;
|
||||
@@ -406,6 +497,16 @@ function parseLiveMode(raw) {
|
||||
);
|
||||
}
|
||||
|
||||
function parseProfile(raw) {
|
||||
const profile = raw || DEFAULT_PROFILE;
|
||||
if (profile === DEFAULT_PROFILE || profile === RELEASE_PATH_PROFILE) {
|
||||
return profile;
|
||||
}
|
||||
throw new Error(
|
||||
`OPENCLAW_DOCKER_ALL_PROFILE must be one of: ${DEFAULT_PROFILE}, ${RELEASE_PATH_PROFILE}. Got: ${JSON.stringify(raw)}`,
|
||||
);
|
||||
}
|
||||
|
||||
function applyLiveMode(poolLanes, mode) {
|
||||
if (mode === "all") {
|
||||
return poolLanes;
|
||||
@@ -565,6 +666,17 @@ async function writeTimingStore(timingStore, results) {
|
||||
console.log(`==> Docker lane timings: ${timingStore.file}`);
|
||||
}
|
||||
|
||||
async function writeRunSummary(logDir, summary) {
|
||||
const file = path.join(logDir, "summary.json");
|
||||
const payload = {
|
||||
...summary,
|
||||
finishedAt: new Date().toISOString(),
|
||||
version: 1,
|
||||
};
|
||||
await fs.promises.writeFile(file, `${JSON.stringify(payload, null, 2)}\n`);
|
||||
console.log(`==> Docker run summary: ${file}`);
|
||||
}
|
||||
|
||||
function printLaneManifest(label, poolLanes, timingStore) {
|
||||
console.log(`==> ${label} lanes (${poolLanes.length})`);
|
||||
for (const [index, poolLane] of poolLanes.entries()) {
|
||||
@@ -574,6 +686,13 @@ function printLaneManifest(label, poolLanes, timingStore) {
|
||||
}
|
||||
}
|
||||
|
||||
function lanesNeedBundledPackage(poolLanes) {
|
||||
return poolLanes.some(
|
||||
(poolLane) =>
|
||||
poolLane.name === "npm-onboard-channel-agent" || poolLane.name.startsWith("bundled-channel"),
|
||||
);
|
||||
}
|
||||
|
||||
function dockerPreflightContainerNames(raw) {
|
||||
return raw
|
||||
.split(/\r?\n/)
|
||||
@@ -1077,6 +1196,7 @@ process.on("SIGTERM", () => {
|
||||
});
|
||||
|
||||
async function main() {
|
||||
const runStartedAt = new Date().toISOString();
|
||||
const parallelism = parsePositiveInt(
|
||||
process.env.OPENCLAW_DOCKER_ALL_PARALLELISM,
|
||||
DEFAULT_PARALLELISM,
|
||||
@@ -1117,6 +1237,13 @@ async function main() {
|
||||
const preflightEnabled = parseBool(process.env.OPENCLAW_DOCKER_ALL_PREFLIGHT, true);
|
||||
const preflightCleanup = parseBool(process.env.OPENCLAW_DOCKER_ALL_PREFLIGHT_CLEANUP, true);
|
||||
const timingsEnabled = parseBool(process.env.OPENCLAW_DOCKER_ALL_TIMINGS, true);
|
||||
const buildEnabled = parseBool(process.env.OPENCLAW_DOCKER_ALL_BUILD, true);
|
||||
const profile = parseProfile(process.env.OPENCLAW_DOCKER_ALL_PROFILE);
|
||||
const releaseChunk = process.env.OPENCLAW_DOCKER_ALL_CHUNK || process.env.DOCKER_E2E_CHUNK || "";
|
||||
const includeOpenWebUI = parseBool(
|
||||
process.env.OPENCLAW_DOCKER_ALL_INCLUDE_OPENWEBUI ?? process.env.INCLUDE_OPENWEBUI,
|
||||
true,
|
||||
);
|
||||
const liveMode = parseLiveMode(process.env.OPENCLAW_DOCKER_ALL_LIVE_MODE);
|
||||
const liveRetries = parseNonNegativeInt(
|
||||
process.env.OPENCLAW_DOCKER_ALL_LIVE_RETRIES,
|
||||
@@ -1143,15 +1270,25 @@ async function main() {
|
||||
const timingStore = await loadTimingStore(timingsFile, timingsEnabled);
|
||||
const retriedMainLanes = applyLiveRetries(lanes, liveRetries);
|
||||
const retriedTailLanes = applyLiveRetries(tailLanes, liveRetries);
|
||||
const configuredLanes =
|
||||
liveMode === "only"
|
||||
const releaseLanes =
|
||||
profile === RELEASE_PATH_PROFILE
|
||||
? releasePathChunkLanes(releaseChunk, { includeOpenWebUI })
|
||||
: undefined;
|
||||
const configuredLanes = releaseLanes
|
||||
? releaseLanes
|
||||
: liveMode === "only"
|
||||
? applyLiveMode([...retriedMainLanes, ...retriedTailLanes], liveMode)
|
||||
: applyLiveMode(retriedMainLanes, liveMode);
|
||||
const configuredTailLanes = liveMode === "only" ? [] : applyLiveMode(retriedTailLanes, liveMode);
|
||||
const configuredTailLanes = releaseLanes
|
||||
? []
|
||||
: liveMode === "only"
|
||||
? []
|
||||
: applyLiveMode(retriedTailLanes, liveMode);
|
||||
const orderedLanes = orderLanes(configuredLanes, timingStore);
|
||||
const orderedTailLanes = orderLanes(configuredTailLanes, timingStore);
|
||||
|
||||
console.log(`==> Docker test logs: ${logDir}`);
|
||||
console.log(`==> Profile: ${profile}${releaseChunk ? ` chunk=${releaseChunk}` : ""}`);
|
||||
console.log(`==> Parallelism: ${parallelism}`);
|
||||
console.log(`==> Tail parallelism: ${tailParallelism}`);
|
||||
console.log(`==> Lane timeout: ${laneTimeoutMs}ms`);
|
||||
@@ -1166,6 +1303,10 @@ async function main() {
|
||||
preflightCleanup ? " cleanup=yes" : " cleanup=no"
|
||||
}`,
|
||||
);
|
||||
console.log(`==> Build shared Docker images: ${buildEnabled ? "yes" : "no"}`);
|
||||
if (profile === RELEASE_PATH_PROFILE) {
|
||||
console.log(`==> Include Open WebUI: ${includeOpenWebUI ? "yes" : "no"}`);
|
||||
}
|
||||
console.log(`==> Docker lane timings: ${timingStore.enabled ? timingsFile : "disabled"}`);
|
||||
console.log(`==> Live-test bundled plugin deps: ${baseEnv.OPENCLAW_DOCKER_BUILD_EXTENSIONS}`);
|
||||
const schedulerOptions = parseSchedulerOptions(process.env, parallelism);
|
||||
@@ -1189,17 +1330,24 @@ async function main() {
|
||||
runTimeoutMs: preflightRunTimeoutMs,
|
||||
});
|
||||
|
||||
await runForegroundGroup(
|
||||
[
|
||||
["Build shared live-test image once", "pnpm test:docker:live-build"],
|
||||
[
|
||||
`Build shared Docker E2E image once: ${baseEnv.OPENCLAW_DOCKER_E2E_IMAGE}`,
|
||||
"pnpm test:docker:e2e-build",
|
||||
],
|
||||
],
|
||||
baseEnv,
|
||||
);
|
||||
await prepareBundledChannelPackage(baseEnv, logDir);
|
||||
if (buildEnabled) {
|
||||
const buildEntries = [];
|
||||
if ([...orderedLanes, ...orderedTailLanes].some((poolLane) => poolLane.live)) {
|
||||
buildEntries.push(["Build shared live-test image once", "pnpm test:docker:live-build"]);
|
||||
}
|
||||
buildEntries.push([
|
||||
`Build shared Docker E2E image once: ${baseEnv.OPENCLAW_DOCKER_E2E_IMAGE}`,
|
||||
"pnpm test:docker:e2e-build",
|
||||
]);
|
||||
await runForegroundGroup(buildEntries, baseEnv);
|
||||
} else {
|
||||
console.log(`==> Shared Docker image builds: skipped`);
|
||||
}
|
||||
if (lanesNeedBundledPackage([...orderedLanes, ...orderedTailLanes])) {
|
||||
await prepareBundledChannelPackage(baseEnv, logDir);
|
||||
} else {
|
||||
console.log("==> Bundled channel package: not needed for selected lanes");
|
||||
}
|
||||
|
||||
const options = {
|
||||
...schedulerOptions,
|
||||
@@ -1214,30 +1362,65 @@ async function main() {
|
||||
const allResults = [...mainResult.results];
|
||||
await writeTimingStore(timingStore, mainResult.results);
|
||||
if (failFast && failures.length > 0) {
|
||||
await writeRunSummary(logDir, {
|
||||
chunk: releaseChunk || undefined,
|
||||
failures,
|
||||
image: baseEnv.OPENCLAW_DOCKER_E2E_IMAGE,
|
||||
lanes: allResults,
|
||||
profile,
|
||||
startedAt: runStartedAt,
|
||||
status: "failed",
|
||||
});
|
||||
await printFailureSummary(failures, tailLines);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
console.log("==> Running provider-sensitive Docker tail lanes");
|
||||
const tailResult = await runLanePool(orderedTailLanes, baseEnv, logDir, tailParallelism, {
|
||||
...options,
|
||||
...tailSchedulerOptions,
|
||||
poolLabel: "tail",
|
||||
});
|
||||
failures.push(...tailResult.failures);
|
||||
allResults.push(...tailResult.results);
|
||||
await writeTimingStore(timingStore, tailResult.results);
|
||||
if (orderedTailLanes.length > 0) {
|
||||
console.log("==> Running provider-sensitive Docker tail lanes");
|
||||
const tailResult = await runLanePool(orderedTailLanes, baseEnv, logDir, tailParallelism, {
|
||||
...options,
|
||||
...tailSchedulerOptions,
|
||||
poolLabel: "tail",
|
||||
});
|
||||
failures.push(...tailResult.failures);
|
||||
allResults.push(...tailResult.results);
|
||||
await writeTimingStore(timingStore, tailResult.results);
|
||||
} else {
|
||||
console.log("==> Provider-sensitive Docker tail lanes: none");
|
||||
}
|
||||
if (failures.length > 0) {
|
||||
await writeRunSummary(logDir, {
|
||||
chunk: releaseChunk || undefined,
|
||||
failures,
|
||||
image: baseEnv.OPENCLAW_DOCKER_E2E_IMAGE,
|
||||
lanes: allResults,
|
||||
profile,
|
||||
startedAt: runStartedAt,
|
||||
status: "failed",
|
||||
});
|
||||
await printFailureSummary(failures, tailLines);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
await runForeground(
|
||||
"Run cleanup smoke after parallel lanes",
|
||||
"pnpm test:docker:cleanup",
|
||||
baseEnv,
|
||||
);
|
||||
if (profile === DEFAULT_PROFILE) {
|
||||
await runForeground(
|
||||
"Run cleanup smoke after parallel lanes",
|
||||
"pnpm test:docker:cleanup",
|
||||
baseEnv,
|
||||
);
|
||||
} else {
|
||||
console.log("==> Cleanup smoke after parallel lanes: skipped for release-path chunk");
|
||||
}
|
||||
await writeTimingStore(timingStore, allResults);
|
||||
await writeRunSummary(logDir, {
|
||||
chunk: releaseChunk || undefined,
|
||||
failures,
|
||||
image: baseEnv.OPENCLAW_DOCKER_E2E_IMAGE,
|
||||
lanes: allResults,
|
||||
profile,
|
||||
startedAt: runStartedAt,
|
||||
status: "passed",
|
||||
});
|
||||
console.log("==> Docker test suite passed");
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user