test(live): prefer stable OpenAI cache model

This commit is contained in:
Vincent Koc
2026-05-04 18:40:08 -07:00
parent 46a04099a4
commit 30bb88d80e
2 changed files with 2 additions and 2 deletions

View File

@@ -562,7 +562,7 @@ export async function runLiveCacheRegression(): Promise<LiveCacheRegressionResul
provider: "openai",
api: "openai-responses",
envVar: "OPENCLAW_LIVE_OPENAI_CACHE_MODEL",
preferredModelIds: ["gpt-5.5", "gpt-5.4-mini", "gpt-5.4", "gpt-5.2"],
preferredModelIds: ["gpt-5.2", "gpt-5.4-mini", "gpt-5.4", "gpt-5.5"],
});
const anthropic = await resolveLiveDirectModel({
provider: "anthropic",

View File

@@ -542,7 +542,7 @@ describe("resolveGatewayLiveSuiteTimeoutMs", () => {
});
it("scales model-capped sweeps for multi-probe retries", () => {
expect(resolveGatewayLiveSuiteTimeoutMs(2)).toBeGreaterThan(GATEWAY_LIVE_DEFAULT_TIMEOUT_MS);
expect(resolveGatewayLiveSuiteTimeoutMs(3)).toBeGreaterThan(GATEWAY_LIVE_DEFAULT_TIMEOUT_MS);
});
it("caps very large model sweeps", () => {