mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-13 23:30:44 +00:00
* fix(heartbeat): clamp scheduler delay to Node setTimeout cap (#71414) When `agents.defaults.heartbeat.every` resolves to >2_147_483_647 ms (~24.85d), the previous scheduleNext() called setTimeout with the raw delay. Node clamps any delay > 2^31-1 to 1 ms, fires the callback, and the heartbeat re-arms with the same oversized value - a tight loop that floods the log with TimeoutOverflowWarning and crashes the gateway with exit code 1. Clamp the computed delay to HEARTBEAT_MAX_TIMEOUT_MS (2_147_483_647) before calling setTimeout. The worst case is now one heartbeat every ~24.85d instead of crash-loop. Warn once per process when clamping fires, so a misconfigured "365d" remains visible without flooding. This is a defense-in-depth fix at the scheduler layer; loadConfig-level rejection is a broader change with more blast radius and a separate question (some users may legitimately want "every: 365d" to mean "effectively never"). The clamped behaviour is closer to that intent than the crash is. Test: new scheduler test sets heartbeat.every="365d" with fake timers, advances 60s, and asserts runSpy was never called (with the bug, it would be called ~60_000 times). * style: format heartbeat scheduler clamp * fix: share safe timeout delay clamp (#71478) (thanks @hclsys) --------- Co-authored-by: Peter Steinberger <steipete@gmail.com>
71 lines
1.9 KiB
TypeScript
71 lines
1.9 KiB
TypeScript
import { afterEach, describe, expect, it, vi } from "vitest";
|
|
import type { OpenClawConfig } from "../config/config.js";
|
|
|
|
function createHeartbeatConfig(every: string): OpenClawConfig {
|
|
return {
|
|
agents: {
|
|
defaults: { heartbeat: { every } },
|
|
list: [{ id: "main", heartbeat: { every } }],
|
|
},
|
|
} as OpenClawConfig;
|
|
}
|
|
|
|
describe("startHeartbeatRunner timeout overflow warnings", () => {
|
|
afterEach(() => {
|
|
vi.useRealTimers();
|
|
vi.resetModules();
|
|
vi.restoreAllMocks();
|
|
});
|
|
|
|
it("warns once per runner lifetime when clamping an oversized scheduler delay", async () => {
|
|
const warn = vi.fn();
|
|
const noop = vi.fn();
|
|
const logger = {
|
|
subsystem: "gateway/heartbeat",
|
|
isEnabled: vi.fn(() => true),
|
|
trace: noop,
|
|
debug: noop,
|
|
info: noop,
|
|
warn,
|
|
error: noop,
|
|
fatal: noop,
|
|
raw: noop,
|
|
child: vi.fn(() => logger),
|
|
};
|
|
|
|
vi.doMock("../logging/subsystem.js", async () => {
|
|
const actual =
|
|
await vi.importActual<typeof import("../logging/subsystem.js")>("../logging/subsystem.js");
|
|
return {
|
|
...actual,
|
|
createSubsystemLogger: vi.fn(() => logger),
|
|
};
|
|
});
|
|
|
|
const [{ startHeartbeatRunner }, { resetHeartbeatWakeStateForTests }] = await Promise.all([
|
|
import("./heartbeat-runner.js"),
|
|
import("./heartbeat-wake.js"),
|
|
]);
|
|
|
|
vi.useFakeTimers();
|
|
vi.setSystemTime(new Date(0));
|
|
const cfg = createHeartbeatConfig("365d");
|
|
const runnerA = startHeartbeatRunner({
|
|
cfg,
|
|
runOnce: vi.fn().mockResolvedValue({ status: "ran", durationMs: 1 }),
|
|
stableSchedulerSeed: "seed-0",
|
|
});
|
|
const runnerB = startHeartbeatRunner({
|
|
cfg,
|
|
runOnce: vi.fn().mockResolvedValue({ status: "ran", durationMs: 1 }),
|
|
stableSchedulerSeed: "seed-0",
|
|
});
|
|
|
|
expect(warn).toHaveBeenCalledTimes(2);
|
|
|
|
runnerA.stop();
|
|
runnerB.stop();
|
|
resetHeartbeatWakeStateForTests();
|
|
});
|
|
});
|