test: stabilize e2e module isolation

This commit is contained in:
Peter Steinberger
2026-03-23 11:46:10 +00:00
parent 4f92eaad37
commit e84ca730a3
9 changed files with 278 additions and 103 deletions

View File

@@ -1,5 +1,5 @@
import * as ssrf from "openclaw/plugin-sdk/infra-runtime";
import { afterEach, beforeAll, beforeEach, expect, vi, type Mock } from "vitest";
import { afterEach, beforeEach, expect, vi, type Mock } from "vitest";
import * as harness from "./bot.media.e2e-harness.js";
type StickerSpy = Mock<(...args: unknown[]) => unknown>;
@@ -17,8 +17,6 @@ export const TELEGRAM_TEST_TIMINGS = {
textFragmentGapMs: 30,
} as const;
const TELEGRAM_BOT_IMPORT_TIMEOUT_MS = process.platform === "win32" ? 180_000 : 150_000;
let createTelegramBotRef: typeof import("./bot.js").createTelegramBot;
let replySpyRef: ReturnType<typeof vi.fn>;
let onSpyRef: Mock;
@@ -117,21 +115,7 @@ export function watchTelegramFetch(): FetchMockHandle {
return createFetchMockHandle();
}
beforeEach(() => {
vi.useRealTimers();
lookupMock.mockResolvedValue([{ address: "93.184.216.34", family: 4 }]);
resolvePinnedHostnameSpy = vi
.spyOn(ssrf, "resolvePinnedHostname")
.mockImplementation((hostname) => resolvePinnedHostname(hostname, lookupMock));
});
afterEach(() => {
lookupMock.mockClear();
resolvePinnedHostnameSpy?.mockRestore();
resolvePinnedHostnameSpy = null;
});
beforeAll(async () => {
async function loadTelegramBotHarness() {
onSpyRef = harness.onSpy;
sendChatActionSpyRef = harness.sendChatActionSpy;
fetchRemoteMediaSpyRef = harness.fetchRemoteMediaSpy;
@@ -150,7 +134,23 @@ beforeAll(async () => {
});
const replyModule = await import("openclaw/plugin-sdk/reply-runtime");
replySpyRef = (replyModule as unknown as { __replySpy: ReturnType<typeof vi.fn> }).__replySpy;
}, TELEGRAM_BOT_IMPORT_TIMEOUT_MS);
}
beforeEach(async () => {
vi.resetModules();
await loadTelegramBotHarness();
vi.useRealTimers();
lookupMock.mockResolvedValue([{ address: "93.184.216.34", family: 4 }]);
resolvePinnedHostnameSpy = vi
.spyOn(ssrf, "resolvePinnedHostname")
.mockImplementation((hostname) => resolvePinnedHostname(hostname, lookupMock));
});
afterEach(() => {
lookupMock.mockClear();
resolvePinnedHostnameSpy?.mockRestore();
resolvePinnedHostnameSpy = null;
});
vi.mock("./sticker-cache.js", async (importOriginal) => {
const actual = await importOriginal<typeof import("./sticker-cache.js")>();

View File

@@ -47,9 +47,50 @@ vi.mock("./models-config.js", async (importOriginal) => {
};
});
const installRunEmbeddedMocks = () => {
vi.doMock("../plugins/hook-runner-global.js", () => ({
getGlobalHookRunner: vi.fn(() => undefined),
}));
vi.doMock("../context-engine/index.js", () => ({
ensureContextEnginesInitialized: vi.fn(),
resolveContextEngine: vi.fn(async () => ({
dispose: async () => undefined,
})),
}));
vi.doMock("./runtime-plugins.js", () => ({
ensureRuntimePluginsLoaded: vi.fn(),
}));
vi.doMock("./pi-embedded-runner/model.js", () => ({
resolveModelAsync: async (provider: string, modelId: string) => ({
model: {
id: modelId,
name: modelId,
api: "openai-responses",
provider,
baseUrl: `https://example.com/${provider}`,
reasoning: false,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 16_000,
maxTokens: 2048,
},
error: undefined,
authStorage: {
setRuntimeApiKey: vi.fn(),
},
modelRegistry: {},
}),
}));
vi.doMock("../plugins/provider-runtime.js", () => ({
prepareProviderRuntimeAuth: vi.fn(async () => undefined),
}));
};
let runEmbeddedPiAgent: typeof import("./pi-embedded-runner/run.js").runEmbeddedPiAgent;
beforeAll(async () => {
vi.resetModules();
installRunEmbeddedMocks();
({ runEmbeddedPiAgent } = await import("./pi-embedded-runner/run.js"));
});
@@ -233,6 +274,7 @@ async function runEmbeddedFallback(params: {
timeoutMs: 5_000,
runId: params.runId,
abortSignal: params.abortSignal,
enqueue: async (task) => await task(),
}),
});
}

View File

@@ -1,7 +1,9 @@
import fs from "node:fs/promises";
import path from "node:path";
import "./test-helpers/fast-coding-tools.js";
import { afterAll, beforeAll, describe, expect, it, vi } from "vitest";
import type { AssistantMessage } from "@mariozechner/pi-ai";
import { afterAll, beforeAll, beforeEach, describe, expect, it, vi } from "vitest";
import type { EmbeddedRunAttemptResult } from "./pi-embedded-runner/run/types.js";
import {
cleanupEmbeddedPiRunnerTestWorkspace,
createEmbeddedPiRunnerOpenAiConfig,
@@ -10,6 +12,8 @@ import {
immediateEnqueue,
} from "./test-helpers/pi-embedded-runner-e2e-fixtures.js";
const runEmbeddedAttemptMock = vi.fn<(params: unknown) => Promise<EmbeddedRunAttemptResult>>();
function createMockUsage(input: number, output: number) {
return {
input,
@@ -85,6 +89,67 @@ vi.mock("@mariozechner/pi-ai", async (importOriginal) => {
};
});
const installRunEmbeddedMocks = () => {
vi.doMock("../plugins/hook-runner-global.js", () => ({
getGlobalHookRunner: vi.fn(() => undefined),
getGlobalPluginRegistry: vi.fn(() => null),
hasGlobalHooks: vi.fn(() => false),
initializeGlobalHookRunner: vi.fn(),
resetGlobalHookRunner: vi.fn(),
}));
vi.doMock("../context-engine/index.js", () => ({
ensureContextEnginesInitialized: vi.fn(),
resolveContextEngine: vi.fn(async () => ({
dispose: async () => undefined,
})),
}));
vi.doMock("./runtime-plugins.js", () => ({
ensureRuntimePluginsLoaded: vi.fn(),
}));
vi.doMock("./pi-embedded-runner/run/attempt.js", () => ({
runEmbeddedAttempt: (params: unknown) => runEmbeddedAttemptMock(params),
}));
vi.doMock("./pi-embedded-runner/model.js", async (importOriginal) => {
const actual = await importOriginal<typeof import("./pi-embedded-runner/model.js")>();
return {
...actual,
resolveModelAsync: async (provider: string, modelId: string) => ({
model: {
id: modelId,
name: modelId,
api: "openai-responses",
provider,
baseUrl: `https://example.com/${provider}`,
reasoning: false,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 16_000,
maxTokens: 2048,
},
error: undefined,
authStorage: {
setRuntimeApiKey: vi.fn(),
},
modelRegistry: {},
}),
};
});
vi.doMock("../plugins/provider-runtime.js", async (importOriginal) => {
const actual = await importOriginal<typeof import("../plugins/provider-runtime.js")>();
return {
...actual,
prepareProviderRuntimeAuth: vi.fn(async () => undefined),
};
});
vi.doMock("./models-config.js", async (importOriginal) => {
const mod = await importOriginal<typeof import("./models-config.js")>();
return {
...mod,
ensureOpenClawModelsJson: vi.fn(async () => ({ wrote: false })),
};
});
};
let runEmbeddedPiAgent: typeof import("./pi-embedded-runner/run.js").runEmbeddedPiAgent;
let SessionManager: typeof import("@mariozechner/pi-coding-agent").SessionManager;
let e2eWorkspace: EmbeddedPiRunnerTestWorkspace | undefined;
@@ -95,6 +160,8 @@ let runCounter = 0;
beforeAll(async () => {
vi.useRealTimers();
vi.resetModules();
installRunEmbeddedMocks();
({ runEmbeddedPiAgent } = await import("./pi-embedded-runner/run.js"));
({ SessionManager } = await import("@mariozechner/pi-coding-agent"));
e2eWorkspace = await createEmbeddedPiRunnerTestWorkspace("openclaw-embedded-agent-");
@@ -106,6 +173,14 @@ afterAll(async () => {
e2eWorkspace = undefined;
});
beforeEach(() => {
vi.useRealTimers();
runEmbeddedAttemptMock.mockReset();
runEmbeddedAttemptMock.mockImplementation(async () => {
throw new Error("unexpected extra runEmbeddedAttempt call");
});
});
const nextSessionFile = () => {
sessionCounter += 1;
return path.join(workspaceDir, `session-${sessionCounter}.jsonl`);
@@ -113,6 +188,46 @@ const nextSessionFile = () => {
const nextRunId = (prefix = "run-embedded-test") => `${prefix}-${++runCounter}`;
const nextSessionKey = () => `agent:test:embedded:${nextRunId("session-key")}`;
const baseUsage = {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
totalTokens: 0,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
};
const buildAssistant = (overrides: Partial<AssistantMessage>): AssistantMessage => ({
role: "assistant",
content: [],
api: "openai-responses",
provider: "openai",
model: "mock-1",
usage: baseUsage,
stopReason: "stop",
timestamp: Date.now(),
...overrides,
});
const makeAttempt = (overrides: Partial<EmbeddedRunAttemptResult>): EmbeddedRunAttemptResult => ({
aborted: false,
timedOut: false,
timedOutDuringCompaction: false,
promptError: null,
sessionIdUsed: "session:test",
systemPromptReport: undefined,
messagesSnapshot: [],
assistantTexts: [],
toolMetas: [],
lastAssistant: undefined,
didSendViaMessagingTool: false,
messagingToolSentTexts: [],
messagingToolSentMediaUrls: [],
messagingToolSentTargets: [],
cloudCodeAssistFormatError: false,
...overrides,
});
const runWithOrphanedSingleUserMessage = async (text: string, sessionKey: string) => {
const sessionFile = nextSessionFile();
const sessionManager = SessionManager.open(sessionFile);
@@ -122,6 +237,15 @@ const runWithOrphanedSingleUserMessage = async (text: string, sessionKey: string
timestamp: Date.now(),
});
runEmbeddedAttemptMock.mockResolvedValueOnce(
makeAttempt({
assistantTexts: ["ok"],
lastAssistant: buildAssistant({
content: [{ type: "text", text: "ok" }],
}),
}),
);
const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-1"]);
return await runEmbeddedPiAgent({
sessionId: "session:test",
@@ -168,6 +292,14 @@ const readSessionMessages = async (sessionFile: string) => {
const runDefaultEmbeddedTurn = async (sessionFile: string, prompt: string, sessionKey: string) => {
const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-error"]);
runEmbeddedAttemptMock.mockResolvedValueOnce(
makeAttempt({
assistantTexts: ["ok"],
lastAssistant: buildAssistant({
content: [{ type: "text", text: "ok" }],
}),
}),
);
await runEmbeddedPiAgent({
sessionId: "session:test",
sessionKey,
@@ -189,21 +321,27 @@ describe("runEmbeddedPiAgent", () => {
const sessionFile = nextSessionFile();
const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-error"]);
const sessionKey = nextSessionKey();
const result = await runEmbeddedPiAgent({
sessionId: "session:test",
sessionKey,
sessionFile,
workspaceDir,
config: cfg,
prompt: "boom",
provider: "openai",
model: "mock-error",
timeoutMs: 5_000,
agentDir,
runId: nextRunId("prompt-error"),
enqueue: immediateEnqueue,
});
expect(result.payloads?.[0]?.isError).toBe(true);
runEmbeddedAttemptMock.mockResolvedValueOnce(
makeAttempt({
promptError: new Error("boom"),
}),
);
await expect(
runEmbeddedPiAgent({
sessionId: "session:test",
sessionKey,
sessionFile,
workspaceDir,
config: cfg,
prompt: "boom",
provider: "openai",
model: "mock-error",
timeoutMs: 5_000,
agentDir,
runId: nextRunId("prompt-error"),
enqueue: immediateEnqueue,
}),
).rejects.toThrow("boom");
try {
const messages = await readSessionMessages(sessionFile);

View File

@@ -58,7 +58,7 @@ const installRunEmbeddedMocks = () => {
vi.doMock("./pi-embedded-runner/run/attempt.js", () => ({
runEmbeddedAttempt: (params: unknown) => runEmbeddedAttemptMock(params),
}));
vi.doMock("../plugins/provider-runtime.runtime.js", () => ({
vi.doMock("../plugins/provider-runtime.js", () => ({
prepareProviderRuntimeAuth: async (params: {
provider: string;
context: { apiKey: string };

View File

@@ -1,7 +1,7 @@
import { EventEmitter } from "node:events";
import path from "node:path";
import { Readable } from "node:stream";
import { beforeAll, beforeEach, describe, expect, it, vi } from "vitest";
import { beforeEach, describe, expect, it, vi } from "vitest";
import type { OpenClawConfig } from "../config/config.js";
import { createRestrictedAgentSandboxConfig } from "./test-helpers/sandbox-agent-config-fixtures.js";
@@ -119,7 +119,8 @@ function createWorkSetupCommandConfig(scope: "agent" | "shared"): OpenClawConfig
}
describe("Agent-specific sandbox config", () => {
beforeAll(async () => {
beforeEach(async () => {
vi.resetModules();
const [configModule, contextModule, runtimeModule] = await Promise.all([
import("./sandbox/config.js"),
import("./sandbox/context.js"),
@@ -128,9 +129,6 @@ describe("Agent-specific sandbox config", () => {
({ resolveSandboxConfigForAgent } = configModule);
({ resolveSandboxContext } = contextModule);
({ resolveSandboxRuntimeStatus } = runtimeModule);
});
beforeEach(() => {
spawnCalls.length = 0;
});

View File

@@ -1,9 +1,10 @@
import { afterEach, beforeAll, beforeEach, describe, expect, it, vi } from "vitest";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
const noop = () => {};
const loadConfigMock = vi.fn(() => ({
let currentConfig = {
agents: { defaults: { subagents: { archiveAfterMinutes: 60 } } },
}));
};
const loadConfigMock = vi.fn(() => currentConfig);
vi.mock("../gateway/call.js", () => ({
callGateway: vi.fn(async (request: unknown) => {
@@ -44,16 +45,15 @@ vi.mock("./subagent-registry.store.js", () => ({
describe("subagent registry archive behavior", () => {
let mod: typeof import("./subagent-registry.js");
beforeAll(async () => {
mod = await import("./subagent-registry.js");
});
beforeEach(() => {
beforeEach(async () => {
vi.resetModules();
vi.useFakeTimers();
vi.setSystemTime(new Date("2026-01-01T00:00:00Z"));
loadConfigMock.mockReturnValue({
currentConfig = {
agents: { defaults: { subagents: { archiveAfterMinutes: 60 } } },
});
};
loadConfigMock.mockClear();
mod = await import("./subagent-registry.js");
});
afterEach(() => {
@@ -78,9 +78,9 @@ describe("subagent registry archive behavior", () => {
});
it("sets archiveAtMs and sweeps delete-mode run subagents", async () => {
loadConfigMock.mockReturnValue({
currentConfig = {
agents: { defaults: { subagents: { archiveAfterMinutes: 1 } } },
});
};
mod.registerSubagentRun({
runId: "run-delete-1",
@@ -140,9 +140,9 @@ describe("subagent registry archive behavior", () => {
});
it("recomputes archiveAtMs when replacing a delete-mode run after steer restart", async () => {
loadConfigMock.mockReturnValue({
currentConfig = {
agents: { defaults: { subagents: { archiveAfterMinutes: 1 } } },
});
};
mod.registerSubagentRun({
runId: "run-delete-old",
@@ -168,9 +168,9 @@ describe("subagent registry archive behavior", () => {
});
it("treats archiveAfterMinutes=0 as never archive", () => {
loadConfigMock.mockReturnValue({
currentConfig = {
agents: { defaults: { subagents: { archiveAfterMinutes: 0 } } },
});
};
mod.registerSubagentRun({
runId: "run-no-archive",

View File

@@ -1,4 +1,4 @@
import { beforeAll, describe, expect, it, vi } from "vitest";
import { beforeEach, describe, expect, it, vi } from "vitest";
import {
arrangeLegacyStateMigrationTest,
confirm,
@@ -14,7 +14,8 @@ let doctorCommand: typeof import("./doctor.js").doctorCommand;
let healthCommand: typeof import("./health.js").healthCommand;
describe("doctor command", () => {
beforeAll(async () => {
beforeEach(async () => {
vi.resetModules();
({ doctorCommand } = await import("./doctor.js"));
({ healthCommand } = await import("./health.js"));
});

View File

@@ -1,17 +1,25 @@
import fs from "node:fs";
import os from "node:os";
import path from "node:path";
import { beforeAll, describe, expect, it, vi } from "vitest";
import { createDoctorRuntime, mockDoctorConfigSnapshot, note } from "./doctor.e2e-harness.js";
import { beforeEach, describe, expect, it, vi } from "vitest";
import { createDoctorRuntime, mockDoctorConfigSnapshot } from "./doctor.e2e-harness.js";
import "./doctor.fast-path-mocks.js";
const terminalNoteMock = vi.fn();
vi.mock("../terminal/note.js", () => ({
note: (...args: unknown[]) => terminalNoteMock(...args),
}));
vi.doUnmock("./doctor-sandbox.js");
let doctorCommand: typeof import("./doctor.js").doctorCommand;
describe("doctor command", () => {
beforeAll(async () => {
beforeEach(async () => {
vi.resetModules();
({ doctorCommand } = await import("./doctor.js"));
terminalNoteMock.mockClear();
});
it("warns when per-agent sandbox docker/browser/prune overrides are ignored under shared scope", async () => {
@@ -41,12 +49,10 @@ describe("doctor command", () => {
},
});
note.mockClear();
await doctorCommand(createDoctorRuntime(), { nonInteractive: true });
expect(
note.mock.calls.some(([message, title]) => {
terminalNoteMock.mock.calls.some(([message, title]) => {
if (title !== "Sandbox" || typeof message !== "string") {
return false;
}
@@ -66,7 +72,6 @@ describe("doctor command", () => {
},
});
note.mockClear();
const homedirSpy = vi.spyOn(os, "homedir").mockReturnValue("/Users/steipete");
const realExists = fs.existsSync;
const legacyPath = path.join("/Users/steipete", "openclaw");
@@ -84,7 +89,9 @@ describe("doctor command", () => {
await doctorCommand(createDoctorRuntime(), { nonInteractive: true });
expect(note.mock.calls.some(([_, title]) => title === "Extra workspace")).toBe(false);
expect(terminalNoteMock.mock.calls.some(([_, title]) => title === "Extra workspace")).toBe(
false,
);
homedirSpy.mockRestore();
existsSpy.mockRestore();

View File

@@ -1,31 +1,28 @@
import { beforeAll, beforeEach, describe, expect, it, vi } from "vitest";
import { beforeEach, describe, expect, it, vi } from "vitest";
const readConfigFileSnapshot = vi.fn();
const writeConfigFile = vi.fn().mockResolvedValue(undefined);
const loadConfig = vi.fn().mockReturnValue({});
const mocks = vi.hoisted(() => ({
currentConfig: {} as Record<string, unknown>,
writtenConfig: undefined as Record<string, unknown> | undefined,
}));
vi.mock("../config/config.js", async (importOriginal) => {
const actual = await importOriginal<typeof import("../config/config.js")>();
vi.mock("./models/shared.js", async (importOriginal) => {
const actual = await importOriginal<typeof import("./models/shared.js")>();
return {
...actual,
CONFIG_PATH: "/tmp/openclaw.json",
readConfigFileSnapshot,
writeConfigFile,
loadConfig,
updateConfig: async (mutator: (cfg: Record<string, unknown>) => Record<string, unknown>) => {
const next = mutator(JSON.parse(JSON.stringify(mocks.currentConfig)));
mocks.writtenConfig = next;
return next;
},
};
});
import { modelsFallbacksAddCommand } from "./models/fallbacks.js";
import { modelsSetCommand } from "./models/set.js";
function mockConfigSnapshot(config: Record<string, unknown> = {}) {
readConfigFileSnapshot.mockResolvedValue({
path: "/tmp/openclaw.json",
exists: true,
raw: "{}",
parsed: {},
valid: true,
config,
issues: [],
legacyIssues: [],
});
mocks.currentConfig = config;
mocks.writtenConfig = undefined;
}
function makeRuntime() {
@@ -33,11 +30,11 @@ function makeRuntime() {
}
function getWrittenConfig() {
return writeConfigFile.mock.calls[0]?.[0] as Record<string, unknown>;
return mocks.writtenConfig as Record<string, unknown>;
}
function expectWrittenPrimaryModel(model: string) {
expect(writeConfigFile).toHaveBeenCalledTimes(1);
expect(mocks.writtenConfig).toBeDefined();
const written = getWrittenConfig();
expect(written.agents).toEqual({
defaults: {
@@ -47,18 +44,10 @@ function expectWrittenPrimaryModel(model: string) {
});
}
let modelsSetCommand: typeof import("./models/set.js").modelsSetCommand;
let modelsFallbacksAddCommand: typeof import("./models/fallbacks.js").modelsFallbacksAddCommand;
describe("models set + fallbacks", () => {
beforeAll(async () => {
({ modelsSetCommand } = await import("./models/set.js"));
({ modelsFallbacksAddCommand } = await import("./models/fallbacks.js"));
});
beforeEach(() => {
readConfigFileSnapshot.mockClear();
writeConfigFile.mockClear();
mocks.currentConfig = {};
mocks.writtenConfig = undefined;
});
it("normalizes z.ai provider in models set", async () => {
@@ -76,7 +65,7 @@ describe("models set + fallbacks", () => {
await modelsFallbacksAddCommand("z-ai/glm-4.7", runtime);
expect(writeConfigFile).toHaveBeenCalledTimes(1);
expect(mocks.writtenConfig).toBeDefined();
const written = getWrittenConfig();
expect(written.agents).toEqual({
defaults: {
@@ -92,7 +81,7 @@ describe("models set + fallbacks", () => {
await modelsFallbacksAddCommand("anthropic/claude-opus-4-6", runtime);
expect(writeConfigFile).toHaveBeenCalledTimes(1);
expect(mocks.writtenConfig).toBeDefined();
const written = getWrittenConfig();
expect(written.agents).toEqual({
defaults: {
@@ -139,7 +128,7 @@ describe("models set + fallbacks", () => {
await modelsSetCommand("openrouter/hunter-alpha", runtime);
expect(writeConfigFile).toHaveBeenCalledTimes(1);
expect(mocks.writtenConfig).toBeDefined();
const written = getWrittenConfig();
expect(written.agents).toEqual({
defaults: {
@@ -159,7 +148,7 @@ describe("models set + fallbacks", () => {
await modelsSetCommand("anthropic/claude-opus-4-6", runtime);
expect(writeConfigFile).toHaveBeenCalledTimes(1);
expect(mocks.writtenConfig).toBeDefined();
const written = getWrittenConfig();
expect(written.agents).toEqual({
defaults: {