diff --git a/scripts/e2e/npm-telegram-rtt-docker.sh b/scripts/e2e/npm-telegram-rtt-docker.sh index 64ae123b487..5ccfefed915 100755 --- a/scripts/e2e/npm-telegram-rtt-docker.sh +++ b/scripts/e2e/npm-telegram-rtt-docker.sh @@ -91,7 +91,7 @@ docker_env=( -e OPENCLAW_NPM_TELEGRAM_PROVIDER_MODE="${OPENCLAW_NPM_TELEGRAM_PROVIDER_MODE:-mock-openai}" -e OPENCLAW_NPM_TELEGRAM_WARM_SAMPLES="${OPENCLAW_NPM_TELEGRAM_WARM_SAMPLES:-20}" -e OPENCLAW_NPM_TELEGRAM_SAMPLE_TIMEOUT_MS="${OPENCLAW_NPM_TELEGRAM_SAMPLE_TIMEOUT_MS:-30000}" - -e OPENCLAW_NPM_TELEGRAM_MAX_FAILURES="${OPENCLAW_NPM_TELEGRAM_MAX_FAILURES:-3}" + -e OPENCLAW_NPM_TELEGRAM_MAX_FAILURES="${OPENCLAW_NPM_TELEGRAM_MAX_FAILURES:-${OPENCLAW_NPM_TELEGRAM_WARM_SAMPLES:-20}}" ) run_logged() { diff --git a/scripts/e2e/npm-telegram-rtt-driver.mjs b/scripts/e2e/npm-telegram-rtt-driver.mjs index c115508f418..d9800db8043 100755 --- a/scripts/e2e/npm-telegram-rtt-driver.mjs +++ b/scripts/e2e/npm-telegram-rtt-driver.mjs @@ -12,7 +12,9 @@ const canaryTimeoutMs = Number( ); const warmSampleCount = Number(process.env.OPENCLAW_NPM_TELEGRAM_WARM_SAMPLES ?? "20"); const sampleTimeoutMs = Number(process.env.OPENCLAW_NPM_TELEGRAM_SAMPLE_TIMEOUT_MS ?? "30000"); -const maxWarmFailures = Number(process.env.OPENCLAW_NPM_TELEGRAM_MAX_FAILURES ?? "3"); +const maxWarmFailures = Number( + process.env.OPENCLAW_NPM_TELEGRAM_MAX_FAILURES ?? String(warmSampleCount), +); const successMarker = process.env.OPENCLAW_NPM_TELEGRAM_SUCCESS_MARKER ?? "OPENCLAW_E2E_OK"; const scenarioIds = ( process.env.OPENCLAW_NPM_TELEGRAM_SCENARIOS ?? "telegram-mentioned-message-reply"