test: update QA parity fixtures for GPT-5.5

This commit is contained in:
Peter Steinberger
2026-04-25 18:05:13 +01:00
parent 39343088ed
commit 6b3e4b88d6
59 changed files with 407 additions and 399 deletions

View File

@@ -74,7 +74,7 @@ describe("qa agentic parity report", () => {
it("fails the parity gate when the candidate regresses against baseline", () => {
const comparison = buildQaAgenticParityComparison({
candidateLabel: "openai/gpt-5.4",
candidateLabel: "openai/gpt-5.5",
baselineLabel: "anthropic/claude-opus-4-6",
candidateSummary: {
scenarios: [
@@ -103,10 +103,10 @@ describe("qa agentic parity report", () => {
expect(comparison.pass).toBe(false);
expect(comparison.failures).toContain(
"openai/gpt-5.4 completion rate 80.0% is below anthropic/claude-opus-4-6 100.0%.",
"openai/gpt-5.5 completion rate 80.0% is below anthropic/claude-opus-4-6 100.0%.",
);
expect(comparison.failures).toContain(
"openai/gpt-5.4 unintended-stop rate 20.0% exceeds anthropic/claude-opus-4-6 0.0%.",
"openai/gpt-5.5 unintended-stop rate 20.0% exceeds anthropic/claude-opus-4-6 0.0%.",
);
});
@@ -120,7 +120,7 @@ describe("qa agentic parity report", () => {
{ name: "Extra non-parity lane", status: "pass" as const },
];
const comparison = buildQaAgenticParityComparison({
candidateLabel: "openai/gpt-5.4",
candidateLabel: "openai/gpt-5.5",
baselineLabel: "anthropic/claude-opus-4-6",
candidateSummary: {
scenarios: baselineScenarios.filter(
@@ -133,13 +133,13 @@ describe("qa agentic parity report", () => {
expect(comparison.pass).toBe(false);
expect(comparison.failures).toContain(
"Scenario coverage mismatch for Extra non-parity lane: openai/gpt-5.4=missing, anthropic/claude-opus-4-6=pass.",
"Scenario coverage mismatch for Extra non-parity lane: openai/gpt-5.5=missing, anthropic/claude-opus-4-6=pass.",
);
});
it("reports each missing required parity scenario exactly once (no double-counting)", () => {
const comparison = buildQaAgenticParityComparison({
candidateLabel: "openai/gpt-5.4",
candidateLabel: "openai/gpt-5.5",
baselineLabel: "anthropic/claude-opus-4-6",
candidateSummary: {
scenarios: [{ name: "Approval turn tool followthrough", status: "pass" }],
@@ -181,7 +181,7 @@ describe("qa agentic parity report", () => {
};
const comparison = buildQaAgenticParityComparison({
candidateLabel: "openai/gpt-5.4",
candidateLabel: "openai/gpt-5.5",
baselineLabel: "anthropic/claude-opus-4-6",
candidateSummary: summaryWithExtras,
baselineSummary: scopedSummary,
@@ -203,7 +203,7 @@ describe("qa agentic parity report", () => {
it("fails the parity gate when required parity scenarios are missing on both sides", () => {
const comparison = buildQaAgenticParityComparison({
candidateLabel: "openai/gpt-5.4",
candidateLabel: "openai/gpt-5.5",
baselineLabel: "anthropic/claude-opus-4-6",
candidateSummary: {
scenarios: [{ name: "Approval turn tool followthrough", status: "pass" }],
@@ -216,13 +216,13 @@ describe("qa agentic parity report", () => {
expect(comparison.pass).toBe(false);
expect(comparison.failures).toContain(
"Missing required parity scenario coverage for Image understanding from attachment: openai/gpt-5.4=missing, anthropic/claude-opus-4-6=missing.",
"Missing required parity scenario coverage for Image understanding from attachment: openai/gpt-5.5=missing, anthropic/claude-opus-4-6=missing.",
);
});
it("fails the parity gate when required parity scenarios are skipped", () => {
const comparison = buildQaAgenticParityComparison({
candidateLabel: "openai/gpt-5.4",
candidateLabel: "openai/gpt-5.5",
baselineLabel: "anthropic/claude-opus-4-6",
candidateSummary: {
scenarios: [
@@ -247,7 +247,7 @@ describe("qa agentic parity report", () => {
expect(comparison.pass).toBe(false);
expect(comparison.failures).toContain(
"Missing required parity scenario coverage for Compaction retry after mutating tool: openai/gpt-5.4=skip, anthropic/claude-opus-4-6=skip.",
"Missing required parity scenario coverage for Compaction retry after mutating tool: openai/gpt-5.5=skip, anthropic/claude-opus-4-6=skip.",
);
});
@@ -263,7 +263,7 @@ describe("qa agentic parity report", () => {
status: "fail",
});
const comparison = buildQaAgenticParityComparison({
candidateLabel: "openai/gpt-5.4",
candidateLabel: "openai/gpt-5.5",
baselineLabel: "anthropic/claude-opus-4-6",
candidateSummary: { scenarios: scenariosWithBothFail },
baselineSummary: { scenarios: scenariosWithBothFail },
@@ -272,7 +272,7 @@ describe("qa agentic parity report", () => {
expect(comparison.pass).toBe(false);
expect(comparison.failures).toContain(
"Required parity scenario Approval turn tool followthrough failed: openai/gpt-5.4=fail, anthropic/claude-opus-4-6=fail.",
"Required parity scenario Approval turn tool followthrough failed: openai/gpt-5.5=fail, anthropic/claude-opus-4-6=fail.",
);
// Metric comparisons are relative, so a same-on-both-sides failure
// must not appear as a relative metric failure. The required-scenario
@@ -289,7 +289,7 @@ describe("qa agentic parity report", () => {
status: "fail",
});
const comparison = buildQaAgenticParityComparison({
candidateLabel: "openai/gpt-5.4",
candidateLabel: "openai/gpt-5.5",
baselineLabel: "anthropic/claude-opus-4-6",
candidateSummary: { scenarios: candidateWithOneFail },
baselineSummary: { scenarios: FULL_PARITY_PASS_SCENARIOS },
@@ -298,7 +298,7 @@ describe("qa agentic parity report", () => {
expect(comparison.pass).toBe(false);
expect(comparison.failures).toContain(
"Required parity scenario Approval turn tool followthrough failed: openai/gpt-5.4=fail, anthropic/claude-opus-4-6=pass.",
"Required parity scenario Approval turn tool followthrough failed: openai/gpt-5.5=fail, anthropic/claude-opus-4-6=pass.",
);
});
@@ -306,7 +306,7 @@ describe("qa agentic parity report", () => {
// Cover the full second-wave pack on both sides so the suspicious-pass assertion
// below is the isolated gate failure under test (no coverage-gap noise).
const comparison = buildQaAgenticParityComparison({
candidateLabel: "openai/gpt-5.4",
candidateLabel: "openai/gpt-5.5",
baselineLabel: "anthropic/claude-opus-4-6",
candidateSummary: {
scenarios: FULL_PARITY_PASS_SCENARIOS,
@@ -490,7 +490,7 @@ status=done`,
expect(() =>
buildQaAgenticParityComparison({
candidateLabel: "openai/gpt-5.4",
candidateLabel: "openai/gpt-5.5",
baselineLabel: "anthropic/claude-opus-4-6",
candidateSummary: {
scenarios: parityPassScenarios,
@@ -512,7 +512,7 @@ status=done`,
expect(() =>
buildQaAgenticParityComparison({
candidateLabel: "openai/gpt-5.4",
candidateLabel: "openai/gpt-5.5",
baselineLabel: "anthropic/claude-opus-4-6",
candidateSummary: {
scenarios: parityPassScenarios,
@@ -520,25 +520,25 @@ status=done`,
},
baselineSummary: {
scenarios: parityPassScenarios,
run: { primaryProvider: "openai", primaryModel: "gpt-5.4" },
run: { primaryProvider: "openai", primaryModel: "gpt-5.5" },
},
comparedAt: "2026-04-11T00:00:00.000Z",
}),
).toThrow(
/baseline summary run\.primaryProvider=openai and run\.primaryModel=gpt-5\.4 do not match --baseline-label/,
/baseline summary run\.primaryProvider=openai and run\.primaryModel=gpt-5\.5 do not match --baseline-label/,
);
});
it("accepts matching run.primaryProvider labels without throwing", () => {
const comparison = buildQaAgenticParityComparison({
candidateLabel: "openai/gpt-5.4",
candidateLabel: "openai/gpt-5.5",
baselineLabel: "anthropic/claude-opus-4-6",
candidateSummary: {
scenarios: FULL_PARITY_PASS_SCENARIOS,
run: {
primaryProvider: "openai",
primaryModel: "openai/gpt-5.4",
primaryModelName: "gpt-5.4",
primaryModel: "openai/gpt-5.5",
primaryModelName: "gpt-5.5",
},
},
baselineSummary: {
@@ -558,7 +558,7 @@ status=done`,
// Pre-PR-L summaries don't carry a `run` block. The gate must still
// work against those, trusting the caller-supplied label.
const comparison = buildQaAgenticParityComparison({
candidateLabel: "openai/gpt-5.4",
candidateLabel: "openai/gpt-5.5",
baselineLabel: "anthropic/claude-opus-4-6",
candidateSummary: { scenarios: FULL_PARITY_PASS_SCENARIOS },
baselineSummary: { scenarios: FULL_PARITY_PASS_SCENARIOS },
@@ -569,14 +569,14 @@ status=done`,
it("skips provider verification for arbitrary display labels when run metadata is present", () => {
const comparison = buildQaAgenticParityComparison({
candidateLabel: "GPT-5.4 candidate",
candidateLabel: "GPT-5.5 candidate",
baselineLabel: "Opus 4.6 baseline",
candidateSummary: {
scenarios: FULL_PARITY_PASS_SCENARIOS,
run: {
primaryProvider: "openai",
primaryModel: "openai/gpt-5.4",
primaryModelName: "gpt-5.4",
primaryModel: "openai/gpt-5.5",
primaryModelName: "gpt-5.5",
},
},
baselineSummary: {
@@ -595,14 +595,14 @@ status=done`,
it("skips provider verification for mixed-case or decorated display labels", () => {
const comparison = buildQaAgenticParityComparison({
candidateLabel: "Candidate: GPT-5.4",
candidateLabel: "Candidate: GPT-5.5",
baselineLabel: "Opus 4.6 / baseline",
candidateSummary: {
scenarios: FULL_PARITY_PASS_SCENARIOS,
run: {
primaryProvider: "openai",
primaryModel: "openai/gpt-5.4",
primaryModelName: "gpt-5.4",
primaryModel: "openai/gpt-5.5",
primaryModelName: "gpt-5.5",
},
},
baselineSummary: {
@@ -622,14 +622,14 @@ status=done`,
it("throws when a structured label mismatches the recorded model even if the provider matches", () => {
expect(() =>
buildQaAgenticParityComparison({
candidateLabel: "openai/gpt-5.4",
candidateLabel: "openai/gpt-5.5",
baselineLabel: "anthropic/claude-opus-4-6",
candidateSummary: {
scenarios: FULL_PARITY_PASS_SCENARIOS,
run: {
primaryProvider: "openai",
primaryModel: "openai/gpt-5.4-alt",
primaryModelName: "gpt-5.4-alt",
primaryModel: "openai/gpt-5.5-alt",
primaryModelName: "gpt-5.5-alt",
},
},
baselineSummary: {
@@ -643,20 +643,20 @@ status=done`,
comparedAt: "2026-04-11T00:00:00.000Z",
}),
).toThrow(
/candidate summary run\.primaryProvider=openai and run\.primaryModel=openai\/gpt-5\.4-alt do not match --candidate-label=openai\/gpt-5\.4/,
/candidate summary run\.primaryProvider=openai and run\.primaryModel=openai\/gpt-5\.5-alt do not match --candidate-label=openai\/gpt-5\.5/,
);
});
it("accepts colon-delimited structured labels when provider and model both match", () => {
const comparison = buildQaAgenticParityComparison({
candidateLabel: "openai:gpt-5.4",
candidateLabel: "openai:gpt-5.5",
baselineLabel: "anthropic:claude-opus-4-6",
candidateSummary: {
scenarios: FULL_PARITY_PASS_SCENARIOS,
run: {
primaryProvider: "openai",
primaryModel: "openai/gpt-5.4",
primaryModelName: "gpt-5.4",
primaryModel: "openai/gpt-5.5",
primaryModelName: "gpt-5.5",
},
},
baselineSummary: {
@@ -678,7 +678,7 @@ status=done`,
// verdict is not disrupted by required-scenario coverage failures
// added by the second-wave expansion.
const comparison = buildQaAgenticParityComparison({
candidateLabel: "openai/gpt-5.4",
candidateLabel: "openai/gpt-5.5",
baselineLabel: "anthropic/claude-opus-4-6",
candidateSummary: { scenarios: FULL_PARITY_PASS_SCENARIOS },
baselineSummary: { scenarios: FULL_PARITY_PASS_SCENARIOS },
@@ -688,7 +688,7 @@ status=done`,
const report = renderQaAgenticParityMarkdownReport(comparison);
expect(report).toContain(
"# OpenClaw Agentic Parity Report — openai/gpt-5.4 vs anthropic/claude-opus-4-6",
"# OpenClaw Agentic Parity Report — openai/gpt-5.5 vs anthropic/claude-opus-4-6",
);
expect(report).toContain("| Completion rate | 100.0% | 100.0% |");
expect(report).toContain("### Approval turn tool followthrough");
@@ -697,20 +697,20 @@ status=done`,
it("parametrizes the markdown header from the comparison labels", () => {
// Regression for the loop-7 Copilot finding: callers that configure
// non-gpt-5.4 / non-opus labels (for example an internal candidate vs
// non-gpt-5.5 / non-opus labels (for example an internal candidate vs
// another candidate) must see the labels in the rendered H1 instead of
// the hardcoded "GPT-5.4 / Opus 4.6" title that would otherwise confuse
// the hardcoded "GPT-5.5 / Opus 4.6" title that would otherwise confuse
// readers of saved reports.
const comparison = buildQaAgenticParityComparison({
candidateLabel: "openai/gpt-5.4-alt",
baselineLabel: "openai/gpt-5.4",
candidateLabel: "openai/gpt-5.5-alt",
baselineLabel: "openai/gpt-5.5",
candidateSummary: { scenarios: [] },
baselineSummary: { scenarios: [] },
comparedAt: "2026-04-11T00:00:00.000Z",
});
const report = renderQaAgenticParityMarkdownReport(comparison);
expect(report).toContain(
"# OpenClaw Agentic Parity Report — openai/gpt-5.4-alt vs openai/gpt-5.4",
"# OpenClaw Agentic Parity Report — openai/gpt-5.5-alt vs openai/gpt-5.5",
);
});
});

View File

@@ -225,7 +225,7 @@ type StructuredQaParityLabel = {
/**
* Only treat caller labels as provenance-checked identifiers when they are
* exact lower-case provider/model refs. Human-facing display labels like
* "GPT-5.4 candidate" or "Candidate: GPT-5.4" should render in the report
* "GPT-5.5 candidate" or "Candidate: GPT-5.5" should render in the report
* without being misread as structured provider ids.
*/
function parseStructuredLabelRef(label: string): StructuredQaParityLabel | null {
@@ -486,9 +486,9 @@ export function buildQaAgenticParityComparison(params: {
export function renderQaAgenticParityMarkdownReport(comparison: QaAgenticParityComparison): string {
// Title is parametrized from the candidate / baseline labels so reports
// for any candidate/baseline pair (not only gpt-5.4 vs opus 4.6) render
// for any candidate/baseline pair (not only gpt-5.5 vs opus 4.6) render
// with an accurate header. The default CLI labels are still
// openai/gpt-5.4 vs anthropic/claude-opus-4-6, but the helper works for
// openai/gpt-5.5 vs anthropic/claude-opus-4-6, but the helper works for
// any parity comparison a caller configures.
const lines = [
`# OpenClaw Agentic Parity Report — ${comparison.candidateLabel} vs ${comparison.baselineLabel}`,

View File

@@ -82,7 +82,7 @@ describe("runQaCharacterEval", () => {
});
const runJudge = makeRunJudge([
{
model: "openai/gpt-5.4",
model: "openai/gpt-5.5",
rank: 1,
score: 9.1,
summary: "Most natural.",
@@ -102,10 +102,10 @@ describe("runQaCharacterEval", () => {
const result = await runQaCharacterEval({
repoRoot: tempRoot,
outputDir: path.join(tempRoot, "character"),
models: ["openai/gpt-5.4", "codex-cli/test-model", "openai/gpt-5.4"],
models: ["openai/gpt-5.5", "codex-cli/test-model", "openai/gpt-5.5"],
scenarioId: "character-vibes-gollum",
candidateFastMode: true,
judgeModels: ["openai/gpt-5.4"],
judgeModels: ["openai/gpt-5.5"],
runSuite,
runJudge,
});
@@ -115,15 +115,15 @@ describe("runQaCharacterEval", () => {
1,
expect.objectContaining({
providerMode: "live-frontier",
primaryModel: "openai/gpt-5.4",
alternateModel: "openai/gpt-5.4",
primaryModel: "openai/gpt-5.5",
alternateModel: "openai/gpt-5.5",
fastMode: true,
scenarioIds: ["character-vibes-gollum"],
}),
);
expect(runJudge).toHaveBeenCalledWith(
expect.objectContaining({
judgeModel: "openai/gpt-5.4",
judgeModel: "openai/gpt-5.5",
judgeThinkingDefault: "xhigh",
judgeFastMode: true,
timeoutMs: 300_000,
@@ -131,17 +131,17 @@ describe("runQaCharacterEval", () => {
);
expect(result.judgments).toHaveLength(1);
expect(result.judgments[0]?.rankings.map((ranking) => ranking.model)).toEqual([
"openai/gpt-5.4",
"openai/gpt-5.5",
"codex-cli/test-model",
]);
const report = await fs.readFile(result.reportPath, "utf8");
expect(report).toContain("Execution: local QA gateway child processes, not Docker");
expect(report).toContain("Judges: openai/gpt-5.4");
expect(report).toContain("Judges: openai/gpt-5.5");
expect(report).toContain("Judge model labels: visible");
expect(report).toContain("## Judge Rankings");
expect(report).toContain("### openai/gpt-5.4");
expect(report).toContain("reply from openai/gpt-5.4");
expect(report).toContain("### openai/gpt-5.5");
expect(report).toContain("reply from openai/gpt-5.5");
expect(report).toContain("reply from codex-cli/test-model");
expect(report).toContain("Judge thinking: xhigh");
expect(report).toContain("- Timeout: 5m");
@@ -162,7 +162,7 @@ describe("runQaCharacterEval", () => {
const runJudge = vi.fn(async (params: CharacterRunJudgeParams) => {
expect(params.prompt).toContain("## CANDIDATE candidate-01");
expect(params.prompt).toContain("## CANDIDATE candidate-02");
expect(params.prompt).not.toContain("openai/gpt-5.4");
expect(params.prompt).not.toContain("openai/gpt-5.5");
expect(params.prompt).not.toContain("codex-cli/test-model");
return makeJudgeReply([
{
@@ -183,8 +183,8 @@ describe("runQaCharacterEval", () => {
const result = await runQaCharacterEval({
repoRoot: tempRoot,
outputDir: path.join(tempRoot, "character"),
models: ["openai/gpt-5.4", "codex-cli/test-model"],
judgeModels: ["openai/gpt-5.4"],
models: ["openai/gpt-5.5", "codex-cli/test-model"],
judgeModels: ["openai/gpt-5.5"],
judgeBlindModels: true,
runSuite,
runJudge,
@@ -193,7 +193,7 @@ describe("runQaCharacterEval", () => {
expect(result.judgments[0]?.blindModels).toBe(true);
expect(result.judgments[0]?.rankings.map((ranking) => ranking.model)).toEqual([
"codex-cli/test-model",
"openai/gpt-5.4",
"openai/gpt-5.5",
]);
const report = await fs.readFile(result.reportPath, "utf8");
expect(report).toContain("Judge model labels: blind");
@@ -203,7 +203,7 @@ describe("runQaCharacterEval", () => {
it("defaults to the character eval model panel when no models are provided", async () => {
const runSuite = makeRunSuite();
const runJudge = makeRunJudge([
{ model: "openai/gpt-5.4", rank: 1, score: 8, summary: "ok" },
{ model: "openai/gpt-5.5", rank: 1, score: 8, summary: "ok" },
{ model: "openai/gpt-5.2", rank: 2, score: 7.5, summary: "ok" },
{ model: "openai/gpt-5", rank: 3, score: 7.2, summary: "ok" },
{ model: "anthropic/claude-opus-4-6", rank: 4, score: 7, summary: "ok" },
@@ -223,7 +223,7 @@ describe("runQaCharacterEval", () => {
expect(runSuite).toHaveBeenCalledTimes(8);
expect(runSuite.mock.calls.map(([params]) => params.primaryModel)).toEqual([
"openai/gpt-5.4",
"openai/gpt-5.5",
"openai/gpt-5.2",
"openai/gpt-5",
"anthropic/claude-opus-4-6",
@@ -254,7 +254,7 @@ describe("runQaCharacterEval", () => {
]);
expect(runJudge).toHaveBeenCalledTimes(2);
expect(runJudge.mock.calls.map(([params]) => params.judgeModel)).toEqual([
"openai/gpt-5.4",
"openai/gpt-5.5",
"anthropic/claude-opus-4-6",
]);
expect(runJudge.mock.calls.map(([params]) => params.judgeThinkingDefault)).toEqual([
@@ -275,7 +275,7 @@ describe("runQaCharacterEval", () => {
return makeReplySuiteResult(params);
});
const runJudge = makeRunJudge([
{ model: "openai/gpt-5.4", rank: 1, score: 8, summary: "ok" },
{ model: "openai/gpt-5.5", rank: 1, score: 8, summary: "ok" },
{ model: "anthropic/claude-sonnet-4-6", rank: 2, score: 7, summary: "ok" },
{ model: "moonshot/kimi-k2.5", rank: 3, score: 6, summary: "ok" },
]);
@@ -283,16 +283,16 @@ describe("runQaCharacterEval", () => {
const result = await runQaCharacterEval({
repoRoot: tempRoot,
outputDir: path.join(tempRoot, "character"),
models: ["openai/gpt-5.4", "anthropic/claude-sonnet-4-6", "moonshot/kimi-k2.5"],
models: ["openai/gpt-5.5", "anthropic/claude-sonnet-4-6", "moonshot/kimi-k2.5"],
candidateConcurrency: 2,
judgeModels: ["openai/gpt-5.4"],
judgeModels: ["openai/gpt-5.5"],
runSuite,
runJudge,
});
expect(maxActiveRuns).toBe(2);
expect(result.runs.map((run) => run.model)).toEqual([
"openai/gpt-5.4",
"openai/gpt-5.5",
"anthropic/claude-sonnet-4-6",
"moonshot/kimi-k2.5",
]);
@@ -355,7 +355,7 @@ describe("runQaCharacterEval", () => {
repoRoot: tempRoot,
outputDir: path.join(tempRoot, "character"),
models: ["qwen/qwen3.6-plus"],
judgeModels: ["openai/gpt-5.4"],
judgeModels: ["openai/gpt-5.5"],
runSuite,
runJudge,
});
@@ -383,7 +383,7 @@ describe("runQaCharacterEval", () => {
repoRoot: tempRoot,
outputDir: path.join(tempRoot, "character"),
models: ["qwen/qwen3.5-plus"],
judgeModels: ["openai/gpt-5.4"],
judgeModels: ["openai/gpt-5.5"],
runSuite,
runJudge,
});
@@ -412,7 +412,7 @@ describe("runQaCharacterEval", () => {
repoRoot: tempRoot,
outputDir: path.join(tempRoot, "character"),
models: ["qa/generic-fallback-model"],
judgeModels: ["openai/gpt-5.4"],
judgeModels: ["openai/gpt-5.5"],
runSuite,
runJudge,
});
@@ -441,7 +441,7 @@ describe("runQaCharacterEval", () => {
repoRoot: tempRoot,
outputDir: path.join(tempRoot, "character"),
models: ["google/gemini-test"],
judgeModels: ["openai/gpt-5.4"],
judgeModels: ["openai/gpt-5.5"],
runSuite,
runJudge,
});
@@ -463,20 +463,20 @@ describe("runQaCharacterEval", () => {
}),
);
const runJudge = makeRunJudge([
{ model: "codex/gpt-5.4", rank: 1, score: 0.5, summary: "failed" },
{ model: "codex/gpt-5.5", rank: 1, score: 0.5, summary: "failed" },
]);
const result = await runQaCharacterEval({
repoRoot: tempRoot,
outputDir: path.join(tempRoot, "character"),
models: ["codex/gpt-5.4"],
judgeModels: ["openai/gpt-5.4"],
models: ["codex/gpt-5.5"],
judgeModels: ["openai/gpt-5.5"],
runSuite,
runJudge,
});
expect(result.runs[0]).toMatchObject({
model: "codex/gpt-5.4",
model: "codex/gpt-5.5",
status: "fail",
error: "internal harness/meta text leaked into transcript",
});
@@ -485,17 +485,17 @@ describe("runQaCharacterEval", () => {
it("lets explicit candidate thinking override the default panel", async () => {
const runSuite = makeRunSuite();
const runJudge = makeRunJudge([
{ model: "openai/gpt-5.4", rank: 1, score: 8, summary: "ok" },
{ model: "openai/gpt-5.5", rank: 1, score: 8, summary: "ok" },
{ model: "moonshot/kimi-k2.5", rank: 2, score: 7, summary: "ok" },
]);
await runQaCharacterEval({
repoRoot: tempRoot,
outputDir: path.join(tempRoot, "character"),
models: ["openai/gpt-5.4", "moonshot/kimi-k2.5"],
models: ["openai/gpt-5.5", "moonshot/kimi-k2.5"],
candidateThinkingDefault: "medium",
candidateThinkingByModel: { "moonshot/kimi-k2.5": "high" },
judgeModels: ["openai/gpt-5.4"],
judgeModels: ["openai/gpt-5.5"],
runSuite,
runJudge,
});
@@ -508,21 +508,21 @@ describe("runQaCharacterEval", () => {
it("lets model-specific options override candidate and judge defaults", async () => {
const runSuite = makeRunSuite();
const runJudge = makeRunJudge([{ model: "openai/gpt-5.4", rank: 1, score: 8, summary: "ok" }]);
const runJudge = makeRunJudge([{ model: "openai/gpt-5.5", rank: 1, score: 8, summary: "ok" }]);
await runQaCharacterEval({
repoRoot: tempRoot,
outputDir: path.join(tempRoot, "character"),
models: ["openai/gpt-5.4", "moonshot/kimi-k2.5"],
models: ["openai/gpt-5.5", "moonshot/kimi-k2.5"],
candidateFastMode: true,
candidateThinkingDefault: "medium",
candidateModelOptions: {
"openai/gpt-5.4": { thinkingDefault: "xhigh", fastMode: false },
"openai/gpt-5.5": { thinkingDefault: "xhigh", fastMode: false },
},
judgeModels: ["openai/gpt-5.4", "anthropic/claude-opus-4-6"],
judgeModels: ["openai/gpt-5.5", "anthropic/claude-opus-4-6"],
judgeThinkingDefault: "medium",
judgeModelOptions: {
"openai/gpt-5.4": { thinkingDefault: "xhigh", fastMode: true },
"openai/gpt-5.5": { thinkingDefault: "xhigh", fastMode: true },
"anthropic/claude-opus-4-6": { thinkingDefault: "high" },
},
runSuite,
@@ -554,15 +554,15 @@ describe("runQaCharacterEval", () => {
});
const runJudge = vi.fn(async (_params: CharacterRunJudgeParams) =>
JSON.stringify({
rankings: [{ model: "openai/gpt-5.4", rank: 1, score: 8, summary: "ok" }],
rankings: [{ model: "openai/gpt-5.5", rank: 1, score: 8, summary: "ok" }],
}),
);
const result = await runQaCharacterEval({
repoRoot: tempRoot,
outputDir: path.join(tempRoot, "character"),
models: ["openai/gpt-5.4", "codex-cli/test-model"],
judgeModels: ["openai/gpt-5.4"],
models: ["openai/gpt-5.5", "codex-cli/test-model"],
judgeModels: ["openai/gpt-5.5"],
runSuite,
runJudge,
});

View File

@@ -131,7 +131,7 @@ describe("qa cli runtime", () => {
summaryPath: "/tmp/character-summary.json",
});
runQaManualLane.mockResolvedValue({
model: "openai/gpt-5.4",
model: "openai/gpt-5.5",
waited: { status: "ok" },
reply: "done",
watchUrl: "http://127.0.0.1:43124",
@@ -186,7 +186,7 @@ describe("qa cli runtime", () => {
repoRoot: "/tmp/openclaw-repo",
outputDir: ".artifacts/qa/frontier",
providerMode: "live-frontier",
primaryModel: "openai/gpt-5.4",
primaryModel: "openai/gpt-5.5",
alternateModel: "anthropic/claude-sonnet-4-6",
fastMode: true,
thinking: "medium",
@@ -198,7 +198,7 @@ describe("qa cli runtime", () => {
outputDir: path.resolve("/tmp/openclaw-repo", ".artifacts/qa/frontier"),
transportId: "qa-channel",
providerMode: "live-frontier",
primaryModel: "openai/gpt-5.4",
primaryModel: "openai/gpt-5.5",
alternateModel: "anthropic/claude-sonnet-4-6",
fastMode: true,
thinkingDefault: "medium",
@@ -211,8 +211,8 @@ describe("qa cli runtime", () => {
repoRoot: "/tmp/openclaw-repo",
outputDir: ".artifacts/qa/telegram",
providerMode: "live-frontier",
primaryModel: "openai/gpt-5.4",
alternateModel: "openai/gpt-5.4",
primaryModel: "openai/gpt-5.5",
alternateModel: "openai/gpt-5.5",
fastMode: true,
scenarioIds: ["telegram-help-command"],
sutAccountId: "sut-live",
@@ -222,8 +222,8 @@ describe("qa cli runtime", () => {
repoRoot: path.resolve("/tmp/openclaw-repo"),
outputDir: path.resolve("/tmp/openclaw-repo", ".artifacts/qa/telegram"),
providerMode: "live-frontier",
primaryModel: "openai/gpt-5.4",
alternateModel: "openai/gpt-5.4",
primaryModel: "openai/gpt-5.5",
alternateModel: "openai/gpt-5.5",
fastMode: true,
allowFailures: undefined,
scenarioIds: ["telegram-help-command"],
@@ -509,7 +509,7 @@ describe("qa cli runtime", () => {
await runQaSuiteCommand({
repoRoot: "/tmp/openclaw-repo",
providerMode: "mock-openai",
primaryModel: "openai/gpt-5.4",
primaryModel: "openai/gpt-5.5",
alternateModel: "anthropic/claude-opus-4-6",
preflight: true,
});
@@ -521,7 +521,7 @@ describe("qa cli runtime", () => {
),
transportId: "qa-channel",
providerMode: "mock-openai",
primaryModel: "openai/gpt-5.4",
primaryModel: "openai/gpt-5.5",
alternateModel: "anthropic/claude-opus-4-6",
scenarioIds: ["approval-turn-tool-followthrough"],
concurrency: 1,
@@ -709,14 +709,14 @@ describe("qa cli runtime", () => {
repoRoot: "/tmp/openclaw-repo",
outputDir: ".artifacts/qa/character",
model: [
"openai/gpt-5.4,thinking=xhigh,fast=false",
"openai/gpt-5.5,thinking=xhigh,fast=false",
"codex-cli/test-model,thinking=high,fast",
],
scenario: "character-vibes-gollum",
fast: true,
thinking: "medium",
modelThinking: ["codex-cli/test-model=medium"],
judgeModel: ["openai/gpt-5.4,thinking=xhigh,fast", "anthropic/claude-opus-4-6,thinking=high"],
judgeModel: ["openai/gpt-5.5,thinking=xhigh,fast", "anthropic/claude-opus-4-6,thinking=high"],
judgeTimeoutMs: 180_000,
blindJudgeModels: true,
concurrency: 4,
@@ -726,18 +726,18 @@ describe("qa cli runtime", () => {
expect(runQaCharacterEval).toHaveBeenCalledWith({
repoRoot: path.resolve("/tmp/openclaw-repo"),
outputDir: path.resolve("/tmp/openclaw-repo", ".artifacts/qa/character"),
models: ["openai/gpt-5.4", "codex-cli/test-model"],
models: ["openai/gpt-5.5", "codex-cli/test-model"],
scenarioId: "character-vibes-gollum",
candidateFastMode: true,
candidateThinkingDefault: "medium",
candidateThinkingByModel: { "codex-cli/test-model": "medium" },
candidateModelOptions: {
"openai/gpt-5.4": { thinkingDefault: "xhigh", fastMode: false },
"openai/gpt-5.5": { thinkingDefault: "xhigh", fastMode: false },
"codex-cli/test-model": { thinkingDefault: "high", fastMode: true },
},
judgeModels: ["openai/gpt-5.4", "anthropic/claude-opus-4-6"],
judgeModels: ["openai/gpt-5.5", "anthropic/claude-opus-4-6"],
judgeModelOptions: {
"openai/gpt-5.4": { thinkingDefault: "xhigh", fastMode: true },
"openai/gpt-5.5": { thinkingDefault: "xhigh", fastMode: true },
"anthropic/claude-opus-4-6": { thinkingDefault: "high" },
},
judgeTimeoutMs: 180_000,
@@ -751,13 +751,13 @@ describe("qa cli runtime", () => {
it("lets character eval auto-select candidate fast mode when --fast is omitted", async () => {
await runQaCharacterEvalCommand({
repoRoot: "/tmp/openclaw-repo",
model: ["openai/gpt-5.4"],
model: ["openai/gpt-5.5"],
});
expect(runQaCharacterEval).toHaveBeenCalledWith({
repoRoot: path.resolve("/tmp/openclaw-repo"),
outputDir: undefined,
models: ["openai/gpt-5.4"],
models: ["openai/gpt-5.5"],
scenarioId: undefined,
candidateFastMode: undefined,
candidateThinkingDefault: undefined,
@@ -777,7 +777,7 @@ describe("qa cli runtime", () => {
await expect(
runQaCharacterEvalCommand({
repoRoot: "/tmp/openclaw-repo",
model: ["openai/gpt-5.4"],
model: ["openai/gpt-5.5"],
thinking: "enormous",
}),
).rejects.toThrow("--thinking must be one of");
@@ -785,22 +785,22 @@ describe("qa cli runtime", () => {
await expect(
runQaCharacterEvalCommand({
repoRoot: "/tmp/openclaw-repo",
model: ["openai/gpt-5.4,thinking=galaxy"],
model: ["openai/gpt-5.5,thinking=galaxy"],
}),
).rejects.toThrow("--model thinking must be one of");
await expect(
runQaCharacterEvalCommand({
repoRoot: "/tmp/openclaw-repo",
model: ["openai/gpt-5.4,warp"],
model: ["openai/gpt-5.5,warp"],
}),
).rejects.toThrow("--model options must be thinking=<level>");
await expect(
runQaCharacterEvalCommand({
repoRoot: "/tmp/openclaw-repo",
model: ["openai/gpt-5.4"],
modelThinking: ["openai/gpt-5.4"],
model: ["openai/gpt-5.5"],
modelThinking: ["openai/gpt-5.5"],
}),
).rejects.toThrow("--model-thinking must use provider/model=level");
});
@@ -809,8 +809,8 @@ describe("qa cli runtime", () => {
await runQaManualLaneCommand({
repoRoot: "/tmp/openclaw-repo",
providerMode: "live-frontier",
primaryModel: "openai/gpt-5.4",
alternateModel: "openai/gpt-5.4",
primaryModel: "openai/gpt-5.5",
alternateModel: "openai/gpt-5.5",
fastMode: true,
message: "read qa kickoff and reply short",
timeoutMs: 45_000,
@@ -820,8 +820,8 @@ describe("qa cli runtime", () => {
repoRoot: path.resolve("/tmp/openclaw-repo"),
transportId: "qa-channel",
providerMode: "live-frontier",
primaryModel: "openai/gpt-5.4",
alternateModel: "openai/gpt-5.4",
primaryModel: "openai/gpt-5.5",
alternateModel: "openai/gpt-5.5",
fastMode: true,
message: "read qa kickoff and reply short",
timeoutMs: 45_000,
@@ -867,8 +867,8 @@ describe("qa cli runtime", () => {
repoRoot: "/tmp/openclaw-repo",
runner: "multipass",
providerMode: "live-frontier",
primaryModel: "openai/gpt-5.4",
alternateModel: "openai/gpt-5.4",
primaryModel: "openai/gpt-5.5",
alternateModel: "openai/gpt-5.5",
fastMode: true,
allowFailures: true,
scenarioIds: ["channel-chat-baseline"],
@@ -879,8 +879,8 @@ describe("qa cli runtime", () => {
repoRoot: path.resolve("/tmp/openclaw-repo"),
transportId: "qa-channel",
providerMode: "live-frontier",
primaryModel: "openai/gpt-5.4",
alternateModel: "openai/gpt-5.4",
primaryModel: "openai/gpt-5.5",
alternateModel: "openai/gpt-5.5",
fastMode: true,
allowFailures: true,
scenarioIds: ["channel-chat-baseline"],
@@ -1052,7 +1052,7 @@ describe("qa cli runtime", () => {
repoRoot: "/tmp/openclaw-repo",
providerMode: "mock-openai",
parityPack: "agentic",
primaryModel: "openai/gpt-5.4",
primaryModel: "openai/gpt-5.5",
alternateModel: "anthropic/claude-opus-4-6",
});
@@ -1061,7 +1061,7 @@ describe("qa cli runtime", () => {
outputDir: undefined,
transportId: "qa-channel",
providerMode: "mock-openai",
primaryModel: "openai/gpt-5.4",
primaryModel: "openai/gpt-5.5",
alternateModel: "anthropic/claude-opus-4-6",
fastMode: undefined,
scenarioIds: [
@@ -1102,8 +1102,8 @@ describe("qa cli runtime", () => {
repoRoot: path.resolve("/tmp/openclaw-repo"),
transportId: "qa-channel",
providerMode: "mock-openai",
primaryModel: "mock-openai/gpt-5.4",
alternateModel: "mock-openai/gpt-5.4-alt",
primaryModel: "mock-openai/gpt-5.5",
alternateModel: "mock-openai/gpt-5.5-alt",
fastMode: undefined,
message: "read qa kickoff and reply short",
timeoutMs: undefined,
@@ -1121,8 +1121,8 @@ describe("qa cli runtime", () => {
repoRoot: path.resolve("/tmp/openclaw-repo"),
transportId: "qa-channel",
providerMode: "aimock",
primaryModel: "aimock/gpt-5.4",
alternateModel: "aimock/gpt-5.4-alt",
primaryModel: "aimock/gpt-5.5",
alternateModel: "aimock/gpt-5.5-alt",
fastMode: undefined,
message: "read qa kickoff and reply short",
timeoutMs: undefined,
@@ -1139,8 +1139,8 @@ describe("qa cli runtime", () => {
repoRoot: path.resolve("/tmp/openclaw-repo"),
transportId: "qa-channel",
providerMode: "live-frontier",
primaryModel: "openai/gpt-5.4",
alternateModel: "openai/gpt-5.4",
primaryModel: "openai/gpt-5.5",
alternateModel: "openai/gpt-5.5",
fastMode: undefined,
message: "read qa kickoff and reply short",
timeoutMs: undefined,
@@ -1170,7 +1170,7 @@ describe("qa cli runtime", () => {
it("defaults manual frontier runs onto Codex OAuth when the runtime resolver prefers it", async () => {
defaultQaRuntimeModelForMode.mockImplementation((mode, options) =>
mode === "live-frontier"
? "openai/gpt-5.4"
? "openai/gpt-5.5"
: defaultQaProviderModelForMode(mode as QaProviderModeInput, options),
);
@@ -1183,8 +1183,8 @@ describe("qa cli runtime", () => {
repoRoot: path.resolve("/tmp/openclaw-repo"),
transportId: "qa-channel",
providerMode: "live-frontier",
primaryModel: "openai/gpt-5.4",
alternateModel: "openai/gpt-5.4",
primaryModel: "openai/gpt-5.5",
alternateModel: "openai/gpt-5.5",
fastMode: undefined,
message: "read qa kickoff and reply short",
timeoutMs: undefined,

View File

@@ -19,7 +19,7 @@ describe("qa-lab server capture helpers", () => {
metaJson: JSON.stringify({
provider: "openai",
api: "responses",
model: "gpt-5.4",
model: "gpt-5.5",
captureOrigin: "shared-fetch",
}),
}),
@@ -29,7 +29,7 @@ describe("qa-lab server capture helpers", () => {
payloadPreview: '{"hello":"world"}',
provider: "openai",
api: "responses",
model: "gpt-5.4",
model: "gpt-5.5",
captureOrigin: "shared-fetch",
}),
);

View File

@@ -508,9 +508,9 @@ describe("qa-lab server", () => {
`fs.writeFileSync(${JSON.stringify(markerPath)}, process.argv.slice(2).join(" "), "utf8");`,
"process.stdout.write(JSON.stringify({",
" models: [{",
' key: "openai/gpt-5.4",',
' name: "GPT-5.4",',
' input: "openai/gpt-5.4",',
' key: "openai/gpt-5.5",',
' name: "GPT-5.5",',
' input: "openai/gpt-5.5",',
" available: true,",
" missing: false,",
" }],",
@@ -726,7 +726,7 @@ describe("qa-lab server", () => {
metaJson: JSON.stringify({
provider: "openai",
api: "responses",
model: "gpt-5.4",
model: "gpt-5.5",
captureOrigin: "shared-fetch",
}),
});
@@ -747,7 +747,7 @@ describe("qa-lab server", () => {
metaJson: JSON.stringify({
provider: "openai",
api: "responses",
model: "gpt-5.4",
model: "gpt-5.5",
captureOrigin: "shared-fetch",
}),
});
@@ -796,7 +796,7 @@ describe("qa-lab server", () => {
expect.objectContaining({
flowId: "flow-1",
provider: "openai",
model: "gpt-5.4",
model: "gpt-5.5",
captureOrigin: "shared-fetch",
}),
expect.objectContaining({
@@ -828,7 +828,7 @@ describe("qa-lab server", () => {
);
expect(coverage.coverage.models).toEqual(
expect.arrayContaining([
expect.objectContaining({ value: "gpt-5.4", count: 2 }),
expect.objectContaining({ value: "gpt-5.5", count: 2 }),
expect.objectContaining({ value: "kimi-k2.5:cloud", count: 1 }),
]),
);

View File

@@ -20,8 +20,8 @@ describe("qa live timeout policy", () => {
resolveQaLiveTurnTimeoutMs(
{
providerMode: "live-frontier",
primaryModel: "openai/gpt-5.4",
alternateModel: "openai/gpt-5.4",
primaryModel: "openai/gpt-5.5",
alternateModel: "openai/gpt-5.5",
},
30_000,
),

View File

@@ -75,8 +75,8 @@ describe("startQaLiveLaneGateway", () => {
transport: createStubTransport(),
transportBaseUrl: "http://127.0.0.1:43123",
providerMode: "mock-openai",
primaryModel: "mock-openai/gpt-5.4",
alternateModel: "mock-openai/gpt-5.4-alt",
primaryModel: "mock-openai/gpt-5.5",
alternateModel: "mock-openai/gpt-5.5-alt",
controlUiEnabled: false,
});
@@ -100,8 +100,8 @@ describe("startQaLiveLaneGateway", () => {
transport: createStubTransport(),
transportBaseUrl: "http://127.0.0.1:43123",
providerMode: "mock-openai",
primaryModel: "mock-openai/gpt-5.4",
alternateModel: "mock-openai/gpt-5.4-alt",
primaryModel: "mock-openai/gpt-5.5",
alternateModel: "mock-openai/gpt-5.5-alt",
controlUiEnabled: false,
});
@@ -116,8 +116,8 @@ describe("startQaLiveLaneGateway", () => {
transport: createStubTransport(),
transportBaseUrl: "http://127.0.0.1:43123",
providerMode: "live-frontier",
primaryModel: "openai/gpt-5.4",
alternateModel: "openai/gpt-5.4",
primaryModel: "openai/gpt-5.5",
alternateModel: "openai/gpt-5.5",
controlUiEnabled: false,
});
@@ -141,8 +141,8 @@ describe("startQaLiveLaneGateway", () => {
transport: createStubTransport(),
transportBaseUrl: "http://127.0.0.1:43123",
providerMode: "mock-openai",
primaryModel: "mock-openai/gpt-5.4",
alternateModel: "mock-openai/gpt-5.4-alt",
primaryModel: "mock-openai/gpt-5.5",
alternateModel: "mock-openai/gpt-5.5-alt",
controlUiEnabled: false,
});
@@ -161,8 +161,8 @@ describe("startQaLiveLaneGateway", () => {
transport: createStubTransport(),
transportBaseUrl: "http://127.0.0.1:43123",
providerMode: "mock-openai",
primaryModel: "mock-openai/gpt-5.4",
alternateModel: "mock-openai/gpt-5.4-alt",
primaryModel: "mock-openai/gpt-5.5",
alternateModel: "mock-openai/gpt-5.5-alt",
controlUiEnabled: false,
});

View File

@@ -82,8 +82,8 @@ describe("runQaManualLane", () => {
const result = await runQaManualLane({
repoRoot: "/tmp/openclaw-repo",
providerMode: "mock-openai",
primaryModel: "mock-openai/gpt-5.4",
alternateModel: "mock-openai/gpt-5.4-alt",
primaryModel: "mock-openai/gpt-5.5",
alternateModel: "mock-openai/gpt-5.5-alt",
message: "check the kickoff file",
timeoutMs: 5_000,
replySettleMs: 0,
@@ -111,8 +111,8 @@ describe("runQaManualLane", () => {
const result = await runQaManualLane({
repoRoot: "/tmp/openclaw-repo",
providerMode: "live-frontier",
primaryModel: "openai/gpt-5.4",
alternateModel: "openai/gpt-5.4",
primaryModel: "openai/gpt-5.5",
alternateModel: "openai/gpt-5.5",
message: "check the kickoff file",
timeoutMs: 5_000,
replySettleMs: 0,

View File

@@ -2,7 +2,7 @@ import { describe, expect, it } from "vitest";
import { selectQaRunnerModelOptions } from "./model-catalog.runtime.js";
describe("qa runner model catalog", () => {
it("filters to available rows and prefers gpt-5.4 first", () => {
it("filters to available rows and prefers gpt-5.5 first", () => {
expect(
selectQaRunnerModelOptions([
{
@@ -13,8 +13,8 @@ describe("qa runner model catalog", () => {
missing: false,
},
{
key: "openai/gpt-5.4",
name: "gpt-5.4",
key: "openai/gpt-5.5",
name: "gpt-5.5",
input: "text,image",
available: true,
missing: false,
@@ -27,6 +27,6 @@ describe("qa runner model catalog", () => {
missing: false,
},
]).map((entry) => entry.key),
).toEqual(["openai/gpt-5.4", "anthropic/claude-sonnet-4-6"]);
).toEqual(["openai/gpt-5.5", "anthropic/claude-sonnet-4-6"]);
});
});

View File

@@ -34,7 +34,7 @@ describe("qa model selection runtime", () => {
resolveEnvApiKey.mockReturnValue({ apiKey: "sk-test" });
expect(resolveQaPreferredLiveModel()).toBeUndefined();
expect(defaultQaRuntimeModelForMode("live-frontier")).toBe("openai/gpt-5.4");
expect(defaultQaRuntimeModelForMode("live-frontier")).toBe("openai/gpt-5.5");
expect(loadAuthProfileStoreForRuntime).not.toHaveBeenCalled();
});
@@ -43,8 +43,8 @@ describe("qa model selection runtime", () => {
provider === "openai-codex" ? ["openai-codex:user@example.com"] : [],
);
expect(resolveQaPreferredLiveModel()).toBe("openai/gpt-5.4");
expect(defaultQaRuntimeModelForMode("live-frontier")).toBe("openai/gpt-5.4");
expect(resolveQaPreferredLiveModel()).toBe("openai/gpt-5.5");
expect(defaultQaRuntimeModelForMode("live-frontier")).toBe("openai/gpt-5.5");
});
it("keeps the OpenAI live default when stored OpenAI profiles are available", () => {
@@ -53,7 +53,7 @@ describe("qa model selection runtime", () => {
);
expect(resolveQaPreferredLiveModel()).toBeUndefined();
expect(defaultQaRuntimeModelForMode("live-frontier")).toBe("openai/gpt-5.4");
expect(defaultQaRuntimeModelForMode("live-frontier")).toBe("openai/gpt-5.5");
});
it("leaves mock defaults unchanged", () => {
@@ -61,11 +61,11 @@ describe("qa model selection runtime", () => {
provider === "openai-codex" ? ["openai-codex:user@example.com"] : [],
);
expect(defaultQaRuntimeModelForMode("mock-openai")).toBe("mock-openai/gpt-5.4");
expect(defaultQaRuntimeModelForMode("mock-openai")).toBe("mock-openai/gpt-5.5");
expect(defaultQaRuntimeModelForMode("mock-openai", { alternate: true })).toBe(
"mock-openai/gpt-5.4-alt",
"mock-openai/gpt-5.5-alt",
);
expect(defaultQaRuntimeModelForMode("aimock")).toBe("aimock/gpt-5.4");
expect(defaultQaRuntimeModelForMode("aimock", { alternate: true })).toBe("aimock/gpt-5.4-alt");
expect(defaultQaRuntimeModelForMode("aimock")).toBe("aimock/gpt-5.5");
expect(defaultQaRuntimeModelForMode("aimock", { alternate: true })).toBe("aimock/gpt-5.5-alt");
});
});

View File

@@ -115,8 +115,8 @@ describe("qa multipass runtime", () => {
repoRoot: process.cwd(),
outputDir: path.join(process.cwd(), ".artifacts", "qa-e2e", "multipass-live-test"),
providerMode: "live-frontier",
primaryModel: "openai/gpt-5.4",
alternateModel: "openai/gpt-5.4",
primaryModel: "openai/gpt-5.5",
alternateModel: "openai/gpt-5.5",
fastMode: true,
scenarioIds: ["channel-chat-baseline"],
});
@@ -128,9 +128,9 @@ describe("qa multipass runtime", () => {
"--provider-mode",
"live-frontier",
"--model",
"openai/gpt-5.4",
"openai/gpt-5.5",
"--alt-model",
"openai/gpt-5.4",
"openai/gpt-5.5",
"--fast",
]),
);

View File

@@ -24,7 +24,7 @@ describe("qa aimock server", () => {
method: "POST",
headers: { "content-type": "application/json" },
body: JSON.stringify({
model: "aimock/gpt-5.4",
model: "aimock/gpt-5.5",
stream: false,
input: [makeResponsesInput("hello aimock")],
}),
@@ -32,7 +32,7 @@ describe("qa aimock server", () => {
expect(response.status).toBe(200);
expect(await response.json()).toMatchObject({
status: "completed",
model: "aimock/gpt-5.4",
model: "aimock/gpt-5.5",
});
const debug = await fetch(`${server.baseUrl}/debug/last-request`);
@@ -40,7 +40,7 @@ describe("qa aimock server", () => {
expect(await debug.json()).toMatchObject({
prompt: "hello aimock",
allInputText: "hello aimock",
model: "aimock/gpt-5.4",
model: "aimock/gpt-5.5",
providerVariant: "openai",
});
} finally {
@@ -58,7 +58,7 @@ describe("qa aimock server", () => {
method: "POST",
headers: { "content-type": "application/json" },
body: JSON.stringify({
model: "aimock/gpt-5.4",
model: "aimock/gpt-5.5",
stream: false,
input: [makeResponsesInput("@openclaw explain the QA lab")],
}),
@@ -90,7 +90,7 @@ describe("qa aimock server", () => {
method: "POST",
headers: { "content-type": "application/json" },
body: JSON.stringify({
model: "openai-codex/gpt-5.4",
model: "openai-codex/gpt-5.5",
stream: false,
input: [makeResponsesInput("hello codex-compatible aimock")],
}),
@@ -100,7 +100,7 @@ describe("qa aimock server", () => {
const debug = await fetch(`${server.baseUrl}/debug/last-request`);
expect(debug.status).toBe(200);
expect(await debug.json()).toMatchObject({
model: "openai-codex/gpt-5.4",
model: "openai-codex/gpt-5.5",
providerVariant: "openai",
});
} finally {

View File

@@ -1,5 +1,5 @@
export const QA_FRONTIER_PROVIDER_IDS = ["anthropic", "google", "openai"] as const;
export const QA_FRONTIER_CATALOG_PRIMARY_MODEL = "openai/gpt-5.4";
export const QA_FRONTIER_CATALOG_PRIMARY_MODEL = "openai/gpt-5.5";
export const QA_FRONTIER_CATALOG_ALTERNATE_MODEL = "anthropic/claude-sonnet-4-6";
export function isPreferredQaLiveFrontierCatalogModel(modelRef: string) {

View File

@@ -6,7 +6,7 @@ type QaFrontierCharacterModelOptions = {
};
export const QA_FRONTIER_CHARACTER_EVAL_MODELS = Object.freeze([
"openai/gpt-5.4",
"openai/gpt-5.5",
"openai/gpt-5.2",
"openai/gpt-5",
"anthropic/claude-opus-4-6",
@@ -18,19 +18,19 @@ export const QA_FRONTIER_CHARACTER_EVAL_MODELS = Object.freeze([
export const QA_FRONTIER_CHARACTER_THINKING_BY_MODEL: Readonly<Record<string, QaThinkingLevel>> =
Object.freeze({
"openai/gpt-5.4": "medium",
"openai/gpt-5.5": "medium",
"openai/gpt-5.2": "xhigh",
"openai/gpt-5": "xhigh",
});
export const QA_FRONTIER_CHARACTER_JUDGE_MODELS = Object.freeze([
"openai/gpt-5.4",
"openai/gpt-5.5",
"anthropic/claude-opus-4-6",
]);
export const QA_FRONTIER_CHARACTER_JUDGE_MODEL_OPTIONS: Readonly<
Record<string, QaFrontierCharacterModelOptions>
> = Object.freeze({
"openai/gpt-5.4": { thinkingDefault: "xhigh", fastMode: true },
"openai/gpt-5.5": { thinkingDefault: "xhigh", fastMode: true },
"anthropic/claude-opus-4-6": { thinkingDefault: "high" },
});

View File

@@ -23,7 +23,7 @@ function isClaudeOpusModel(modelRef: string) {
export const liveFrontierProviderDefinition: QaProviderDefinition = {
mode: "live-frontier",
kind: "live",
defaultModel: (options) => options?.preferredLiveModel ?? "openai/gpt-5.4",
defaultModel: (options) => options?.preferredLiveModel ?? "openai/gpt-5.5",
defaultImageGenerationProviderIds: ["openai"],
defaultImageGenerationModel: ({ modelProviderIds }) =>
modelProviderIds.includes("openai") ? "openai/gpt-image-1" : null,

View File

@@ -4,7 +4,7 @@ import {
} from "openclaw/plugin-sdk/agent-runtime";
import { resolveEnvApiKey } from "openclaw/plugin-sdk/provider-auth";
const QA_CODEX_OAUTH_LIVE_MODEL = "openai/gpt-5.4";
const QA_CODEX_OAUTH_LIVE_MODEL = "openai/gpt-5.5";
export function resolveQaLiveFrontierPreferredModel() {
if (resolveEnvApiKey("openai")?.apiKey) {

View File

@@ -1,2 +1,2 @@
export const QA_FRONTIER_PARITY_CANDIDATE_LABEL = "openai/gpt-5.4";
export const QA_FRONTIER_PARITY_CANDIDATE_LABEL = "openai/gpt-5.5";
export const QA_FRONTIER_PARITY_BASELINE_LABEL = "anthropic/claude-opus-4-6";

View File

@@ -130,7 +130,7 @@ describe("qa mock openai server", () => {
},
body: JSON.stringify({
stream: false,
model: "gpt-5.4",
model: "gpt-5.5",
input: [
makeUserInput(
"Before acting, tell me the single file you would start with in six words or fewer. Do not use tools yet.",
@@ -159,7 +159,7 @@ describe("qa mock openai server", () => {
},
body: JSON.stringify({
stream: true,
model: "gpt-5.4",
model: "gpt-5.5",
input: [
makeUserInput(
"Before acting, tell me the single file you would start with in six words or fewer. Do not use tools yet.",
@@ -178,7 +178,7 @@ describe("qa mock openai server", () => {
const debugResponse = await fetch(`${server.baseUrl}/debug/last-request`);
expect(debugResponse.status).toBe(200);
expect(await debugResponse.json()).toMatchObject({
model: "gpt-5.4",
model: "gpt-5.5",
prompt:
"ok do it. read `QA_KICKOFF_TASK.md` now and reply with the QA mission in one short sentence.",
allInputText: expect.stringContaining("ok do it."),
@@ -285,7 +285,7 @@ describe("qa mock openai server", () => {
},
body: JSON.stringify({
stream: true,
model: "gpt-5.4",
model: "gpt-5.5",
input: [
{
role: "user",
@@ -312,7 +312,7 @@ describe("qa mock openai server", () => {
},
body: JSON.stringify({
stream: false,
model: "gpt-5.4-alt",
model: "gpt-5.5-alt",
input: [
{
role: "user",
@@ -344,8 +344,8 @@ describe("qa mock openai server", () => {
const requests = await fetch(`${server.baseUrl}/debug/requests`);
expect(requests.status).toBe(200);
expect((await requests.json()) as Array<{ model?: string }>).toMatchObject([
{ model: "gpt-5.4" },
{ model: "gpt-5.4-alt" },
{ model: "gpt-5.5" },
{ model: "gpt-5.5-alt" },
]);
});
@@ -365,7 +365,7 @@ describe("qa mock openai server", () => {
},
body: JSON.stringify({
stream: true,
model: "gpt-5.4",
model: "gpt-5.5",
input: [
{
role: "user",
@@ -402,7 +402,7 @@ describe("qa mock openai server", () => {
headers: { "content-type": "application/json" },
body: JSON.stringify({
stream: true,
model: "gpt-5.4",
model: "gpt-5.5",
input: [{ role: "user", content: [{ type: "input_text", text: prompt }] }],
}),
});
@@ -414,7 +414,7 @@ describe("qa mock openai server", () => {
headers: { "content-type": "application/json" },
body: JSON.stringify({
stream: true,
model: "gpt-5.4",
model: "gpt-5.5",
input: [
{ role: "user", content: [{ type: "input_text", text: prompt }] },
{
@@ -433,7 +433,7 @@ describe("qa mock openai server", () => {
headers: { "content-type": "application/json" },
body: JSON.stringify({
stream: true,
model: "gpt-5.4",
model: "gpt-5.5",
input: [
{ role: "user", content: [{ type: "input_text", text: prompt }] },
{
@@ -451,7 +451,7 @@ describe("qa mock openai server", () => {
headers: { "content-type": "application/json" },
body: JSON.stringify({
stream: true,
model: "gpt-5.4",
model: "gpt-5.5",
input: [
{ role: "user", content: [{ type: "input_text", text: prompt }] },
{
@@ -472,7 +472,7 @@ describe("qa mock openai server", () => {
headers: { "content-type": "application/json" },
body: JSON.stringify({
stream: false,
model: "gpt-5.4",
model: "gpt-5.5",
input: [
{ role: "user", content: [{ type: "input_text", text: prompt }] },
{
@@ -508,7 +508,7 @@ describe("qa mock openai server", () => {
},
body: JSON.stringify({
stream: true,
model: "gpt-5.4",
model: "gpt-5.5",
input: [
{
role: "user",
@@ -538,7 +538,7 @@ describe("qa mock openai server", () => {
},
body: JSON.stringify({
stream: false,
model: "gpt-5.4",
model: "gpt-5.5",
input: [
{
role: "user",
@@ -1407,7 +1407,7 @@ describe("qa mock openai server", () => {
headers: { "content-type": "application/json" },
body: JSON.stringify({
stream: false,
model: "mock-openai/gpt-5.4",
model: "mock-openai/gpt-5.5",
input: [
{
role: "user",
@@ -1457,7 +1457,7 @@ describe("qa mock openai server", () => {
headers: { "content-type": "application/json" },
body: JSON.stringify({
stream: false,
model: "mock-openai/gpt-5.4",
model: "mock-openai/gpt-5.5",
input: [
{
role: "user",
@@ -1544,7 +1544,7 @@ describe("qa mock openai server", () => {
},
body: JSON.stringify({
stream: false,
model: "gpt-5.4-alt",
model: "gpt-5.5-alt",
input: [
{
role: "user",
@@ -1630,7 +1630,7 @@ describe("qa mock openai server", () => {
const body = (await response.json()) as { data: Array<{ id: string }> };
const ids = body.data.map((entry) => entry.id);
expect(ids).toContain("claude-opus-4-6");
expect(ids).toContain("gpt-5.4");
expect(ids).toContain("gpt-5.5");
});
it("dispatches an Anthropic /v1/messages read tool call for source discovery prompts", async () => {
@@ -2160,7 +2160,7 @@ describe("qa mock openai server", () => {
const toolPlan = await expectResponsesText(server, {
stream: true,
model: "gpt-5.4",
model: "gpt-5.5",
input: [makeUserInput(QA_REASONING_ONLY_RECOVERY_PROMPT)],
});
expect(toolPlan).toContain('"name":"read"');
@@ -2171,7 +2171,7 @@ describe("qa mock openai server", () => {
output?: Array<{ type?: string; id?: string; summary?: Array<{ text?: string }> }>;
}>(server, {
stream: false,
model: "gpt-5.4",
model: "gpt-5.5",
input: [
makeUserInput(QA_REASONING_ONLY_RECOVERY_PROMPT),
{
@@ -2195,7 +2195,7 @@ describe("qa mock openai server", () => {
output?: Array<{ content?: Array<{ text?: string }> }>;
}>(server, {
stream: false,
model: "gpt-5.4",
model: "gpt-5.5",
input: [
makeUserInput(QA_REASONING_ONLY_RECOVERY_PROMPT),
makeUserInput(QA_REASONING_ONLY_RETRY_INSTRUCTION),
@@ -2222,7 +2222,7 @@ describe("qa mock openai server", () => {
]);
});
it("scripts the GPT-5.4 thinking visibility switch prompts", async () => {
it("scripts the GPT-5.5 thinking visibility switch prompts", async () => {
const server = await startMockServer();
expect(
@@ -2230,7 +2230,7 @@ describe("qa mock openai server", () => {
output?: Array<{ type?: string; content?: Array<{ text?: string }> }>;
}>(server, {
stream: false,
model: "gpt-5.4",
model: "gpt-5.5",
input: [makeUserInput(QA_THINKING_VISIBILITY_OFF_PROMPT)],
}),
).toMatchObject({
@@ -2252,7 +2252,7 @@ describe("qa mock openai server", () => {
}>;
}>(server, {
stream: false,
model: "gpt-5.4",
model: "gpt-5.5",
input: [makeUserInput(QA_THINKING_VISIBILITY_MAX_PROMPT)],
}),
).toMatchObject({
@@ -2275,7 +2275,7 @@ describe("qa mock openai server", () => {
const toolPlan = await expectResponsesText(server, {
stream: true,
model: "gpt-5.4",
model: "gpt-5.5",
input: [makeUserInput(QA_REASONING_ONLY_SIDE_EFFECT_PROMPT)],
});
expect(toolPlan).toContain('"name":"write"');
@@ -2286,7 +2286,7 @@ describe("qa mock openai server", () => {
output?: Array<{ type?: string; id?: string }>;
}>(server, {
stream: false,
model: "gpt-5.4",
model: "gpt-5.5",
input: [
makeUserInput(QA_REASONING_ONLY_SIDE_EFFECT_PROMPT),
{
@@ -2309,7 +2309,7 @@ describe("qa mock openai server", () => {
const toolPlan = await expectResponsesText(server, {
stream: true,
model: "gpt-5.4",
model: "gpt-5.5",
input: [makeUserInput(QA_EMPTY_RESPONSE_RECOVERY_PROMPT)],
});
expect(toolPlan).toContain('"name":"read"');
@@ -2319,7 +2319,7 @@ describe("qa mock openai server", () => {
output?: Array<{ content?: Array<{ type?: string; text?: string }> }>;
}>(server, {
stream: false,
model: "gpt-5.4",
model: "gpt-5.5",
input: [
makeUserInput(QA_EMPTY_RESPONSE_RECOVERY_PROMPT),
{
@@ -2341,7 +2341,7 @@ describe("qa mock openai server", () => {
output?: Array<{ content?: Array<{ text?: string }> }>;
}>(server, {
stream: false,
model: "gpt-5.4",
model: "gpt-5.5",
input: [
makeUserInput(QA_EMPTY_RESPONSE_RECOVERY_PROMPT),
makeUserInput(QA_EMPTY_RESPONSE_RETRY_INSTRUCTION),
@@ -2365,7 +2365,7 @@ describe("qa mock openai server", () => {
await expectResponsesText(server, {
stream: true,
model: "gpt-5.4",
model: "gpt-5.5",
input: [makeUserInput(QA_EMPTY_RESPONSE_EXHAUSTION_PROMPT)],
});
@@ -2373,7 +2373,7 @@ describe("qa mock openai server", () => {
output?: Array<{ content?: Array<{ text?: string }> }>;
}>(server, {
stream: false,
model: "gpt-5.4",
model: "gpt-5.5",
input: [
makeUserInput(QA_EMPTY_RESPONSE_EXHAUSTION_PROMPT),
{
@@ -2388,7 +2388,7 @@ describe("qa mock openai server", () => {
output?: Array<{ content?: Array<{ text?: string }> }>;
}>(server, {
stream: false,
model: "gpt-5.4",
model: "gpt-5.5",
input: [
makeUserInput(QA_EMPTY_RESPONSE_EXHAUSTION_PROMPT),
makeUserInput(QA_EMPTY_RESPONSE_RETRY_INSTRUCTION),
@@ -2404,9 +2404,9 @@ describe("qa mock openai server", () => {
describe("resolveProviderVariant", () => {
it("tags prefix-qualified openai models", () => {
expect(resolveProviderVariant("openai/gpt-5.4")).toBe("openai");
expect(resolveProviderVariant("openai:gpt-5.4")).toBe("openai");
expect(resolveProviderVariant("openai-codex/gpt-5.4")).toBe("openai");
expect(resolveProviderVariant("openai/gpt-5.5")).toBe("openai");
expect(resolveProviderVariant("openai:gpt-5.5")).toBe("openai");
expect(resolveProviderVariant("openai-codex/gpt-5.5")).toBe("openai");
});
it("tags prefix-qualified anthropic models", () => {
@@ -2416,8 +2416,8 @@ describe("resolveProviderVariant", () => {
});
it("tags bare model names by prefix", () => {
expect(resolveProviderVariant("gpt-5.4")).toBe("openai");
expect(resolveProviderVariant("gpt-5.4-alt")).toBe("openai");
expect(resolveProviderVariant("gpt-5.5")).toBe("openai");
expect(resolveProviderVariant("gpt-5.5-alt")).toBe("openai");
expect(resolveProviderVariant("gpt-4.5")).toBe("openai");
expect(resolveProviderVariant("o1-preview")).toBe("openai");
expect(resolveProviderVariant("claude-opus-4-6")).toBe("anthropic");
@@ -2425,7 +2425,7 @@ describe("resolveProviderVariant", () => {
});
it("handles case drift and whitespace", () => {
expect(resolveProviderVariant(" OpenAI/GPT-5.4 ")).toBe("openai");
expect(resolveProviderVariant(" OpenAI/GPT-5.5 ")).toBe("openai");
expect(resolveProviderVariant("ANTHROPIC/CLAUDE-OPUS-4-6")).toBe("anthropic");
});
@@ -2451,7 +2451,7 @@ describe("qa mock openai server provider variant tagging", () => {
method: "POST",
headers: { "content-type": "application/json" },
body: JSON.stringify({
model: "openai/gpt-5.4",
model: "openai/gpt-5.5",
stream: false,
input: [{ role: "user", content: [{ type: "input_text", text: "Heartbeat check" }] }],
}),
@@ -2461,7 +2461,7 @@ describe("qa mock openai server provider variant tagging", () => {
model: string;
providerVariant: string;
};
expect(debug.model).toBe("openai/gpt-5.4");
expect(debug.model).toBe("openai/gpt-5.5");
expect(debug.providerVariant).toBe("openai");
});

View File

@@ -53,7 +53,7 @@ type StreamEvent =
* - Everything else (including empty strings) → `"unknown"`
*
* The `/v1/messages` route always feeds `body.model` straight through,
* so an Anthropic request with an `openai/gpt-5.4` model string is still
* so an Anthropic request with an `openai/gpt-5.5` model string is still
* classified as `"openai"`. That matches the parity program's convention
* where the provider label is the source of truth, not the HTTP route.
*/
@@ -78,7 +78,7 @@ export function resolveProviderVariant(model: string | undefined): MockOpenAiPro
return "anthropic";
}
// Fall back to model-name prefix matching for bare model strings like
// `gpt-5.4` or `claude-opus-4-6`.
// `gpt-5.5` or `claude-opus-4-6`.
if (/^(?:gpt-|o1-|openai-)/.test(trimmed)) {
return "openai";
}
@@ -1537,7 +1537,7 @@ async function buildResponsesPayload(
// ---------------------------------------------------------------------------
//
// The QA parity gate needs two comparable scenario runs: one against the
// "candidate" (openai/gpt-5.4) and one against the "baseline"
// "candidate" (openai/gpt-5.5) and one against the "baseline"
// (anthropic/claude-opus-4-6). The OpenAI mock above already dispatches all
// the scenario prompt branches we care about. Rather than duplicating that
// machinery, the /v1/messages route below translates Anthropic request
@@ -1926,8 +1926,8 @@ export async function startQaMockOpenAiServer(params?: { host?: string; port?: n
if (req.method === "GET" && url.pathname === "/v1/models") {
writeJson(res, 200, {
data: [
{ id: "gpt-5.4", object: "model" },
{ id: "gpt-5.4-alt", object: "model" },
{ id: "gpt-5.5", object: "model" },
{ id: "gpt-5.5-alt", object: "model" },
{ id: "gpt-image-1", object: "model" },
{ id: "text-embedding-3-small", object: "model" },
{ id: "claude-opus-4-6", object: "model" },

View File

@@ -28,8 +28,8 @@ export function createMockOpenAiResponsesProvider(baseUrl: string): ModelProvide
},
models: [
{
id: "gpt-5.4",
name: "gpt-5.4",
id: "gpt-5.5",
name: "gpt-5.5",
api: "openai-responses",
reasoning: false,
input: ["text", "image"],
@@ -38,8 +38,8 @@ export function createMockOpenAiResponsesProvider(baseUrl: string): ModelProvide
maxTokens: 4096,
},
{
id: "gpt-5.4-alt",
name: "gpt-5.4-alt",
id: "gpt-5.5-alt",
name: "gpt-5.5-alt",
api: "openai-responses",
reasoning: false,
input: ["text", "image"],

View File

@@ -10,7 +10,7 @@ export type MockQaProviderDefinitionParams = {
};
function mockModelRef(providerId: string, alternate?: boolean) {
return `${providerId}/${alternate ? "gpt-5.4-alt" : "gpt-5.4"}`;
return `${providerId}/${alternate ? "gpt-5.5-alt" : "gpt-5.5"}`;
}
export function createMockQaProviderDefinition(

View File

@@ -51,7 +51,7 @@ describe("buildQaGatewayConfig", () => {
...createQaChannelTransportParams(),
});
expect(getPrimaryModel(cfg.agents?.defaults?.model)).toBe("mock-openai/gpt-5.4");
expect(getPrimaryModel(cfg.agents?.defaults?.model)).toBe("mock-openai/gpt-5.5");
expect(cfg.models?.providers?.["mock-openai"]?.baseUrl).toBe("http://127.0.0.1:44080/v1");
expect(cfg.models?.providers?.["mock-openai"]?.request).toEqual({ allowPrivateNetwork: true });
expect(cfg.models?.providers?.openai?.baseUrl).toBe("http://127.0.0.1:44080/v1");
@@ -88,14 +88,14 @@ describe("buildQaGatewayConfig", () => {
providerBaseUrl: "http://127.0.0.1:44080/v1",
workspaceDir: "/tmp/qa-workspace",
providerMode: "mock-openai",
primaryModel: "openai/gpt-5.4",
primaryModel: "openai/gpt-5.5",
alternateModel: "anthropic/claude-opus-4-6",
});
expect(getPrimaryModel(cfg.agents?.defaults?.model)).toBe("openai/gpt-5.4");
expect(getPrimaryModel(cfg.agents?.defaults?.model)).toBe("openai/gpt-5.5");
expect(cfg.models?.providers?.openai?.api).toBe("openai-responses");
expect(cfg.models?.providers?.openai?.request).toEqual({ allowPrivateNetwork: true });
expect(cfg.models?.providers?.openai?.models.map((model) => model.id)).toContain("gpt-5.4");
expect(cfg.models?.providers?.openai?.models.map((model) => model.id)).toContain("gpt-5.5");
expect(cfg.models?.providers?.anthropic?.api).toBe("anthropic-messages");
expect(cfg.models?.providers?.anthropic?.baseUrl).toBe("http://127.0.0.1:44080");
expect(cfg.models?.providers?.anthropic?.request).toEqual({ allowPrivateNetwork: true });
@@ -113,11 +113,11 @@ describe("buildQaGatewayConfig", () => {
providerBaseUrl: "http://127.0.0.1:45080/v1",
workspaceDir: "/tmp/qa-workspace",
providerMode: "aimock",
primaryModel: "aimock/gpt-5.4",
alternateModel: "aimock/gpt-5.4-alt",
primaryModel: "aimock/gpt-5.5",
alternateModel: "aimock/gpt-5.5-alt",
});
expect(getPrimaryModel(cfg.agents?.defaults?.model)).toBe("aimock/gpt-5.4");
expect(getPrimaryModel(cfg.agents?.defaults?.model)).toBe("aimock/gpt-5.5");
expect(cfg.agents?.defaults?.imageGenerationModel).toEqual({
primary: "aimock/gpt-image-1",
});
@@ -167,17 +167,17 @@ describe("buildQaGatewayConfig", () => {
workspaceDir: "/tmp/qa-workspace",
providerMode: "live-frontier",
fastMode: true,
primaryModel: "openai/gpt-5.4",
alternateModel: "openai/gpt-5.4",
primaryModel: "openai/gpt-5.5",
alternateModel: "openai/gpt-5.5",
...createQaChannelTransportParams(),
});
expect(getPrimaryModel(cfg.agents?.defaults?.model)).toBe("openai/gpt-5.4");
expect(getPrimaryModel(cfg.agents?.list?.[0]?.model)).toBe("openai/gpt-5.4");
expect(getPrimaryModel(cfg.agents?.defaults?.model)).toBe("openai/gpt-5.5");
expect(getPrimaryModel(cfg.agents?.list?.[0]?.model)).toBe("openai/gpt-5.5");
expect(cfg.models).toBeUndefined();
expect(cfg.plugins?.allow).toEqual(["acpx", "memory-core", "openai", "qa-channel"]);
expect(cfg.plugins?.entries?.openai).toEqual({ enabled: true });
expect(cfg.agents?.defaults?.models?.["openai/gpt-5.4"]).toEqual({
expect(cfg.agents?.defaults?.models?.["openai/gpt-5.5"]).toEqual({
params: { transport: "sse", openaiWsWarmup: false, fastMode: true },
});
});
@@ -273,14 +273,14 @@ describe("buildQaGatewayConfig", () => {
gatewayToken: "token",
workspaceDir: "/tmp/qa-workspace",
providerMode: "live-frontier",
primaryModel: "openai/gpt-5.4",
alternateModel: "openai/gpt-5.4",
primaryModel: "openai/gpt-5.5",
alternateModel: "openai/gpt-5.5",
thinkingDefault: "xhigh",
...createQaChannelTransportParams(),
});
expect(cfg.agents?.defaults?.thinkingDefault).toBe("xhigh");
expect(cfg.agents?.defaults?.models?.["openai/gpt-5.4"]?.params).toMatchObject({
expect(cfg.agents?.defaults?.models?.["openai/gpt-5.5"]?.params).toMatchObject({
thinking: "xhigh",
});
});

View File

@@ -45,8 +45,8 @@ describe("qa run config", () => {
it("creates a live-by-default selection that arms every scenario", () => {
expect(createDefaultQaRunSelection(scenarios)).toEqual({
providerMode: "live-frontier",
primaryModel: "openai/gpt-5.4",
alternateModel: "openai/gpt-5.4",
primaryModel: "openai/gpt-5.5",
alternateModel: "openai/gpt-5.5",
fastMode: true,
scenarioIds: ["dm-chat-baseline", "thread-lifecycle"],
});
@@ -57,7 +57,7 @@ describe("qa run config", () => {
normalizeQaRunSelection(
{
providerMode: "live-frontier",
primaryModel: "openai/gpt-5.4",
primaryModel: "openai/gpt-5.5",
alternateModel: "",
fastMode: false,
scenarioIds: ["thread-lifecycle", "missing", "thread-lifecycle"],
@@ -66,8 +66,8 @@ describe("qa run config", () => {
),
).toEqual({
providerMode: "live-frontier",
primaryModel: "openai/gpt-5.4",
alternateModel: "openai/gpt-5.4",
primaryModel: "openai/gpt-5.5",
alternateModel: "openai/gpt-5.5",
fastMode: true,
scenarioIds: ["thread-lifecycle"],
});
@@ -99,13 +99,13 @@ describe("qa run config", () => {
});
it("keeps idle snapshots on static defaults so startup does not inspect auth profiles", () => {
defaultQaRuntimeModelForMode.mockReturnValue("openai/gpt-5.4");
defaultQaRuntimeModelForMode.mockReturnValue("openai/gpt-5.5");
defaultQaRuntimeModelForMode.mockClear();
expect(createIdleQaRunnerSnapshot(scenarios).selection).toMatchObject({
providerMode: "live-frontier",
primaryModel: "openai/gpt-5.4",
alternateModel: "openai/gpt-5.4",
primaryModel: "openai/gpt-5.5",
alternateModel: "openai/gpt-5.5",
});
expect(defaultQaRuntimeModelForMode).not.toHaveBeenCalled();
});
@@ -123,8 +123,8 @@ describe("qa run config", () => {
),
).toEqual({
providerMode: "aimock",
primaryModel: "aimock/gpt-5.4",
alternateModel: "aimock/gpt-5.4-alt",
primaryModel: "aimock/gpt-5.5",
alternateModel: "aimock/gpt-5.5-alt",
fastMode: false,
scenarioIds: ["dm-chat-baseline"],
});
@@ -138,14 +138,14 @@ describe("qa run config", () => {
it("prefers the Codex OAuth default when the runtime resolver says it is available", () => {
defaultQaRuntimeModelForMode.mockImplementation((mode, options) =>
mode === "live-frontier"
? "openai/gpt-5.4"
? "openai/gpt-5.5"
: defaultQaProviderModelForMode(mode as QaProviderModeInput, options),
);
expect(createDefaultQaRunSelection(scenarios)).toEqual({
providerMode: "live-frontier",
primaryModel: "openai/gpt-5.4",
alternateModel: "openai/gpt-5.4",
primaryModel: "openai/gpt-5.5",
alternateModel: "openai/gpt-5.5",
fastMode: true,
scenarioIds: ["dm-chat-baseline", "thread-lifecycle"],
});

View File

@@ -123,9 +123,9 @@ describe("qa scenario catalog", () => {
);
});
it("includes the GPT-5.4 thinking visibility switch scenario", () => {
const scenario = readQaScenarioById("gpt54-thinking-visibility-switch");
const config = readQaScenarioExecutionConfig("gpt54-thinking-visibility-switch") as
it("includes the GPT-5.5 thinking visibility switch scenario", () => {
const scenario = readQaScenarioById("gpt55-thinking-visibility-switch");
const config = readQaScenarioExecutionConfig("gpt55-thinking-visibility-switch") as
| {
requiredLiveProvider?: string;
requiredLiveModel?: string;
@@ -135,9 +135,9 @@ describe("qa scenario catalog", () => {
}
| undefined;
expect(scenario.sourcePath).toBe("qa/scenarios/models/gpt54-thinking-visibility-switch.md");
expect(scenario.sourcePath).toBe("qa/scenarios/models/gpt55-thinking-visibility-switch.md");
expect(config?.requiredLiveProvider).toBe("openai");
expect(config?.requiredLiveModel).toBe("gpt-5.4");
expect(config?.requiredLiveModel).toBe("gpt-5.5");
expect(config?.offDirective).toBe("/think off");
expect(config?.maxDirective).toBe("/think medium");
expect(config?.reasoningDirective).toBe("/reasoning on");
@@ -169,10 +169,10 @@ describe("qa scenario catalog", () => {
},
});
expect(config?.requiredProvider).toBe("openai");
expect(config?.requiredModel).toBe("gpt-5.4");
expect(config?.requiredModel).toBe("gpt-5.5");
expect(config?.expectedMarker).toBe("WEB-SEARCH-OK");
expect(scenario.execution.flow?.steps.map((step) => step.name)).toEqual([
"confirms live OpenAI GPT-5.4 web search auto mode",
"confirms live OpenAI GPT-5.5 web search auto mode",
"searches official OpenAI News through the live model",
]);
});
@@ -191,7 +191,7 @@ describe("qa scenario catalog", () => {
expect(scenario.sourcePath).toBe("qa/scenarios/models/thinking-slash-model-remap.md");
expect(config?.requiredProviderMode).toBe("live-frontier");
expect(config?.anthropicModelRef).toBe("anthropic/claude-sonnet-4-6");
expect(config?.openAiXhighModelRef).toBe("openai/gpt-5.4");
expect(config?.openAiXhighModelRef).toBe("openai/gpt-5.5");
expect(config?.noXhighModelRef).toBe("anthropic/claude-sonnet-4-6");
expect(scenario.execution.flow?.steps.map((step) => step.name)).toEqual([
"selects Anthropic and verifies adaptive options",

View File

@@ -158,7 +158,7 @@ describe("qa suite planning helpers", () => {
scenarios,
scenarioIds: ["anthropic-only"],
providerMode: "live-frontier",
primaryModel: "openai/gpt-5.4",
primaryModel: "openai/gpt-5.5",
}).map((scenario) => scenario.id),
).toEqual(["anthropic-only"]);
});
@@ -274,7 +274,7 @@ describe("qa suite planning helpers", () => {
const scenarios = [
makeQaSuiteTestScenario("generic"),
makeQaSuiteTestScenario("openai-only", {
config: { requiredProvider: "openai", requiredModel: "gpt-5.4" },
config: { requiredProvider: "openai", requiredModel: "gpt-5.5" },
}),
makeQaSuiteTestScenario("anthropic-only", {
config: { requiredProvider: "anthropic", requiredModel: "claude-opus-4-6" },
@@ -288,7 +288,7 @@ describe("qa suite planning helpers", () => {
selectQaSuiteScenarios({
scenarios,
providerMode: "live-frontier",
primaryModel: "openai/gpt-5.4",
primaryModel: "openai/gpt-5.5",
}).map((scenario) => scenario.id),
).toEqual(["generic", "openai-only"]);
@@ -317,7 +317,7 @@ describe("qa suite planning helpers", () => {
selectQaSuiteScenarios({
scenarios,
providerMode: "mock-openai",
primaryModel: "mock-openai/gpt-5.4",
primaryModel: "mock-openai/gpt-5.5",
}).map((scenario) => scenario.id),
).toEqual(["generic", "mock-only"]);
@@ -325,7 +325,7 @@ describe("qa suite planning helpers", () => {
selectQaSuiteScenarios({
scenarios,
providerMode: "live-frontier",
primaryModel: "openai/gpt-5.4",
primaryModel: "openai/gpt-5.5",
}).map((scenario) => scenario.id),
).toEqual(["generic", "live-only"]);
});

View File

@@ -58,8 +58,8 @@ describe("qa suite runtime CLI integration", () => {
OPENCLAW_BUNDLED_PLUGINS_DIR: bundledPluginsDir,
},
},
primaryModel: "openai/gpt-5.4",
alternateModel: "openai/gpt-5.4",
primaryModel: "openai/gpt-5.5",
alternateModel: "openai/gpt-5.5",
providerMode: "mock-openai",
} as never,
["memory", "status", "--json"],

View File

@@ -81,8 +81,8 @@ describe("qa suite runtime agent process helpers", () => {
tempRoot: "/tmp/runtime",
runtimeEnv: { PATH: "/usr/bin" },
},
primaryModel: "openai/gpt-5.4",
alternateModel: "openai/gpt-5.4-mini",
primaryModel: "openai/gpt-5.5",
alternateModel: "openai/gpt-5.5-mini",
providerMode: "mock-openai",
} as never,
["qa", "suite"],
@@ -114,8 +114,8 @@ describe("qa suite runtime agent process helpers", () => {
tempRoot: "/tmp/runtime",
runtimeEnv: { PATH: "/usr/bin", OPENCLAW_STATE_DIR: "/tmp/default-state" },
},
primaryModel: "openai/gpt-5.4",
alternateModel: "openai/gpt-5.4-mini",
primaryModel: "openai/gpt-5.5",
alternateModel: "openai/gpt-5.5-mini",
providerMode: "mock-openai",
} as never,
["crestodian", "-m", "overview"],
@@ -156,8 +156,8 @@ describe("qa suite runtime agent process helpers", () => {
tempRoot: "/tmp/runtime",
runtimeEnv: {},
},
primaryModel: "openai/gpt-5.4",
alternateModel: "openai/gpt-5.4-mini",
primaryModel: "openai/gpt-5.5",
alternateModel: "openai/gpt-5.5-mini",
providerMode: "mock-openai",
} as never,
["memory", "search"],
@@ -182,8 +182,8 @@ describe("qa suite runtime agent process helpers", () => {
tempRoot: "/tmp/runtime",
runtimeEnv: {},
},
primaryModel: "openai/gpt-5.4",
alternateModel: "openai/gpt-5.4-mini",
primaryModel: "openai/gpt-5.5",
alternateModel: "openai/gpt-5.5-mini",
providerMode: "mock-openai",
} as never,
["memory", "search", "--json"],
@@ -213,8 +213,8 @@ describe("qa suite runtime agent process helpers", () => {
tempRoot: "/tmp/runtime",
runtimeEnv: {},
},
primaryModel: "openai/gpt-5.4",
alternateModel: "openai/gpt-5.4-mini",
primaryModel: "openai/gpt-5.5",
alternateModel: "openai/gpt-5.5-mini",
providerMode: "mock-openai",
} as never,
["memory", "search", "--json"],

View File

@@ -17,8 +17,8 @@ describe("qa suite runtime agent session helpers", () => {
const gatewayCall = vi.fn();
const env = {
gateway: { call: gatewayCall },
primaryModel: "openai/gpt-5.4",
alternateModel: "openai/gpt-5.4-mini",
primaryModel: "openai/gpt-5.5",
alternateModel: "openai/gpt-5.5-mini",
providerMode: "mock-openai",
} as never;

View File

@@ -185,8 +185,8 @@ describe("qa suite runtime flow", () => {
},
repoRoot: "/repo",
providerMode: "mock-openai",
primaryModel: "openai/gpt-5.4",
alternateModel: "openai/gpt-5.4-mini",
primaryModel: "openai/gpt-5.5",
alternateModel: "openai/gpt-5.5-mini",
mock: null,
cfg: {} as QaSuiteRuntimeEnv["cfg"],
} satisfies Parameters<typeof createQaSuiteScenarioFlowApi>[0]["env"];

View File

@@ -31,7 +31,7 @@ describe("qa suite gateway helpers", () => {
profile: "coding",
},
agents: {
list: [{ id: "qa", model: { primary: "openai/gpt-5.4" } }],
list: [{ id: "qa", model: { primary: "openai/gpt-5.5" } }],
},
};

View File

@@ -13,8 +13,8 @@ describe("buildQaSuiteSummaryJson", () => {
startedAt: new Date("2026-04-11T00:00:00.000Z"),
finishedAt: new Date("2026-04-11T00:05:00.000Z"),
providerMode: "mock-openai" as const,
primaryModel: "openai/gpt-5.4",
alternateModel: "openai/gpt-5.4-alt",
primaryModel: "openai/gpt-5.5",
alternateModel: "openai/gpt-5.5-alt",
fastMode: true,
concurrency: 2,
};
@@ -25,12 +25,12 @@ describe("buildQaSuiteSummaryJson", () => {
startedAt: "2026-04-11T00:00:00.000Z",
finishedAt: "2026-04-11T00:05:00.000Z",
providerMode: "mock-openai",
primaryModel: "openai/gpt-5.4",
primaryModel: "openai/gpt-5.5",
primaryProvider: "openai",
primaryModelName: "gpt-5.4",
alternateModel: "openai/gpt-5.4-alt",
primaryModelName: "gpt-5.5",
alternateModel: "openai/gpt-5.5-alt",
alternateProvider: "openai",
alternateModelName: "gpt-5.4-alt",
alternateModelName: "gpt-5.5-alt",
fastMode: true,
concurrency: 2,
scenarioIds: null,

View File

@@ -286,7 +286,7 @@ export type QaSuiteSummaryJsonParams = {
};
/**
* Strongly-typed shape of `qa-suite-summary.json`. The GPT-5.4 parity gate
* Strongly-typed shape of `qa-suite-summary.json`. The GPT-5.5 parity gate
* (agentic-parity-report.ts, #64441) and any future parity wrapper can
* import this type instead of re-declaring the shape, so changes to the
* summary schema propagate through to every consumer at type-check time.
@@ -294,7 +294,7 @@ export type QaSuiteSummaryJsonParams = {
export type { QaSuiteSummaryJson } from "./suite-summary.js";
/**
* Pure-ish JSON builder for qa-suite-summary.json. Exported so the GPT-5.4
* Pure-ish JSON builder for qa-suite-summary.json. Exported so the GPT-5.5
* parity gate (agentic-parity-report.ts, #64441) and any future parity
* runner can assert-and-trust the provider/model that produced a given
* summary instead of blindly accepting the caller's candidateLabel /