CI: fix live Docker auth mounts (#67812)

* CI: fix live Docker auth mounts

* CI: harden live Docker auth mounts
This commit is contained in:
Onur
2026-04-16 23:00:11 +02:00
committed by GitHub
parent 012b577e84
commit 3ae5d95bfd
7 changed files with 151 additions and 41 deletions

View File

@@ -192,6 +192,10 @@ function isRefreshTokenReused(raw: string): boolean {
return /refresh_token_reused/i.test(raw);
}
function isAccountIdExtractionError(raw: string): boolean {
return /failed to extract accountid from token/i.test(raw);
}
function isInstructionsRequiredError(raw: string): boolean {
return /instructions are required/i.test(raw);
}
@@ -794,6 +798,15 @@ describeLive("live models (profile keys)", () => {
logProgress(`${progressLabel}: skip (codex refresh token reused)`);
break;
}
if (
allowNotFoundSkip &&
model.provider === "openai-codex" &&
isAccountIdExtractionError(message)
) {
skipped.push({ model: id, reason: message });
logProgress(`${progressLabel}: skip (codex account id extraction)`);
break;
}
if (
allowNotFoundSkip &&
model.provider === "openai-codex" &&

View File

@@ -548,6 +548,10 @@ function isRefreshTokenReused(error: string): boolean {
return /refresh_token_reused/i.test(error);
}
function isAccountIdExtractionError(error: string): boolean {
return /failed to extract accountid from token/i.test(error);
}
function isChatGPTUsageLimitErrorMessage(raw: string): boolean {
const msg = raw.toLowerCase();
return msg.includes("hit your chatgpt usage limit") && msg.includes("try again in");
@@ -675,10 +679,10 @@ describe("getHighSignalLiveModelPriorityIndex", () => {
it("prefers curated Google replacements over big-pickle", () => {
expect(
getHighSignalLiveModelPriorityIndex({ provider: "google", id: "gemini-3.1-pro-preview" }),
).toBe(2);
).toBe(3);
expect(
getHighSignalLiveModelPriorityIndex({ provider: "google", id: "gemini-3-flash-preview" }),
).toBe(3);
).toBe(4);
expect(getHighSignalLiveModelPriorityIndex({ provider: "opencode", id: "big-pickle" })).toBe(
null,
);
@@ -1926,6 +1930,11 @@ async function runGatewayModelSuite(params: GatewayModelSuiteParams) {
logProgress(`${progressLabel}: skip (codex refresh token reused)`);
break;
}
if (model.provider === "openai-codex" && isAccountIdExtractionError(message)) {
skippedCount += 1;
logProgress(`${progressLabel}: skip (codex account id extraction)`);
break;
}
if (model.provider === "openai-codex" && isChatGPTUsageLimitErrorMessage(message)) {
skippedCount += 1;
logProgress(`${progressLabel}: skip (chatgpt usage limit)`);