Tests: speed up QA lab startup

This commit is contained in:
Gustavo Madeira Santana
2026-04-17 22:19:17 -04:00
parent a09bf67fa5
commit a50ec27d3b
3 changed files with 80 additions and 30 deletions

View File

@@ -3,9 +3,11 @@ import { createServer } from "node:http";
import os from "node:os";
import path from "node:path";
import { setTimeout as sleep } from "node:timers/promises";
import { afterEach, describe, expect, it } from "vitest";
import { afterEach, describe, expect, it, vi } from "vitest";
import { startQaLabServer } from "./lab-server.js";
vi.mock("openclaw/plugin-sdk/qa-channel", async () => await import("../../qa-channel/api.js"));
const cleanups: Array<() => Promise<void>> = [];
afterEach(async () => {
@@ -79,6 +81,43 @@ async function waitForFile(filePath: string, timeoutMs = 5_000) {
throw new Error(`file did not appear: ${filePath}`);
}
async function createQaLabRepoRootFixture(params?: {
uiHtml?: string;
models?: Array<{
key: string;
name: string;
input?: string;
available?: boolean;
missing?: boolean;
}>;
}) {
const repoRoot = await mkdtemp(path.join(os.tmpdir(), "qa-lab-repo-root-"));
cleanups.push(async () => {
await rm(repoRoot, { recursive: true, force: true });
});
await mkdir(path.join(repoRoot, "dist"), { recursive: true });
await mkdir(path.join(repoRoot, "extensions/qa-lab/web/dist"), { recursive: true });
const models =
params?.models?.map((model) => ({
key: model.key,
name: model.name,
input: model.input ?? model.key,
available: model.available ?? true,
missing: model.missing ?? false,
})) ?? [];
await writeFile(
path.join(repoRoot, "dist/index.js"),
`process.stdout.write(${JSON.stringify(JSON.stringify({ models }))});\n`,
"utf8",
);
await writeFile(
path.join(repoRoot, "extensions/qa-lab/web/dist/index.html"),
params?.uiHtml ?? "<!doctype html><html><body>qa lab fixture</body></html>",
"utf8",
);
return repoRoot;
}
describe("qa-lab server", () => {
it("serves bootstrap state and writes a self-check report", async () => {
const tempDir = await mkdtemp(path.join(os.tmpdir(), "qa-lab-test-"));
@@ -86,11 +125,13 @@ describe("qa-lab server", () => {
await rm(tempDir, { recursive: true, force: true });
});
const outputPath = path.join(tempDir, "self-check.md");
const repoRoot = await createQaLabRepoRootFixture();
const lab = await startQaLabServer({
host: "127.0.0.1",
port: 0,
outputPath,
repoRoot,
controlUiUrl: "http://127.0.0.1:18789/",
controlUiToken: "qa-token",
});
@@ -299,32 +340,16 @@ describe("qa-lab server", () => {
});
it("uses the explicit repo root for ui assets and runner model discovery", async () => {
const repoRoot = await mkdtemp(path.join(os.tmpdir(), "qa-lab-repo-root-"));
cleanups.push(async () => {
await rm(repoRoot, { recursive: true, force: true });
const repoRoot = await createQaLabRepoRootFixture({
models: [
{
key: "anthropic/qa-temp-model",
name: "QA Temp Model",
},
],
uiHtml:
"<!doctype html><html><head><title>Temp QA Lab UI</title></head><body>repo-root-ui</body></html>",
});
await mkdir(path.join(repoRoot, "dist"), { recursive: true });
await mkdir(path.join(repoRoot, "extensions/qa-lab/web/dist"), { recursive: true });
await writeFile(
path.join(repoRoot, "dist/index.js"),
[
"process.stdout.write(JSON.stringify({",
" models: [{",
' key: "anthropic/qa-temp-model",',
' name: "QA Temp Model",',
' input: "anthropic/qa-temp-model",',
" available: true,",
" missing: false,",
" }],",
"}));",
].join("\n"),
"utf8",
);
await writeFile(
path.join(repoRoot, "extensions/qa-lab/web/dist/index.html"),
"<!doctype html><html><head><title>Temp QA Lab UI</title></head><body>repo-root-ui</body></html>",
"utf8",
);
const lab = await startQaLabServer({
host: "127.0.0.1",

View File

@@ -98,6 +98,18 @@ describe("qa run config", () => {
).toEqual(["dm-chat-baseline", "thread-lifecycle"]);
});
it("keeps idle snapshots on static defaults so startup does not inspect auth profiles", () => {
defaultQaRuntimeModelForMode.mockReturnValue("openai-codex/gpt-5.4");
defaultQaRuntimeModelForMode.mockClear();
expect(createIdleQaRunnerSnapshot(scenarios).selection).toMatchObject({
providerMode: "live-frontier",
primaryModel: "openai/gpt-5.4",
alternateModel: "openai/gpt-5.4",
});
expect(defaultQaRuntimeModelForMode).not.toHaveBeenCalled();
});
it("normalizes aimock selections", () => {
expect(
normalizeQaRunSelection(

View File

@@ -1,4 +1,5 @@
import path from "node:path";
import { defaultQaModelForMode as defaultStaticQaModelForMode } from "./model-selection.js";
import { defaultQaRuntimeModelForMode } from "./model-selection.runtime.js";
import {
DEFAULT_QA_LIVE_PROVIDER_MODE,
@@ -40,12 +41,22 @@ export function defaultQaModelForMode(mode: QaProviderMode, alternate = false) {
return defaultQaRuntimeModelForMode(mode, alternate ? { alternate: true } : undefined);
}
export function createDefaultQaRunSelection(scenarios: QaSeedScenario[]): QaLabRunSelection {
type QaDefaultModelResolver = (mode: QaProviderMode, alternate?: boolean) => string;
function defaultStaticModelForMode(mode: QaProviderMode, alternate = false) {
return defaultStaticQaModelForMode(mode, alternate ? { alternate: true } : undefined);
}
export function createDefaultQaRunSelection(
scenarios: QaSeedScenario[],
options?: { resolveDefaultModel?: QaDefaultModelResolver },
): QaLabRunSelection {
const providerMode: QaProviderMode = DEFAULT_QA_LIVE_PROVIDER_MODE;
const resolveDefaultModel = options?.resolveDefaultModel ?? defaultQaModelForMode;
return {
providerMode,
primaryModel: defaultQaModelForMode(providerMode),
alternateModel: defaultQaModelForMode(providerMode, true),
primaryModel: resolveDefaultModel(providerMode),
alternateModel: resolveDefaultModel(providerMode, true),
fastMode: true,
scenarioIds: scenarios.map((scenario) => scenario.id),
};
@@ -101,7 +112,9 @@ export function normalizeQaRunSelection(
export function createIdleQaRunnerSnapshot(scenarios: QaSeedScenario[]): QaLabRunnerSnapshot {
return {
status: "idle",
selection: createDefaultQaRunSelection(scenarios),
selection: createDefaultQaRunSelection(scenarios, {
resolveDefaultModel: defaultStaticModelForMode,
}),
artifacts: null,
error: null,
};