Reply: surface OAuth reauth failures

This commit is contained in:
Mariano Belinky
2026-04-08 17:12:17 +02:00
parent 45195e3645
commit fd1558eb9e
13 changed files with 446 additions and 13 deletions

View File

@@ -323,6 +323,11 @@ Anthropic setup-token path.
Refresh prompts only appear when running interactively (TTY); `--non-interactive`
skips refresh attempts.
When an OAuth refresh fails permanently (for example `refresh_token_reused`,
`invalid_grant`, or a provider telling you to sign in again), doctor reports
that re-auth is required and prints the exact `openclaw models auth login --provider ...`
command to run.
Doctor also reports auth profiles that are temporarily unusable due to:
- short cooldowns (rate limits/timeouts/auth failures)

View File

@@ -0,0 +1,48 @@
export type OAuthRefreshFailureReason =
| "refresh_token_reused"
| "invalid_grant"
| "sign_in_again"
| "invalid_refresh_token"
| "revoked";
const OAUTH_REFRESH_FAILURE_PROVIDER_RE = /OAuth token refresh failed for ([^:]+):/i;
export function extractOAuthRefreshFailureProvider(message: string): string | null {
const provider = message.match(OAUTH_REFRESH_FAILURE_PROVIDER_RE)?.[1]?.trim();
return provider && provider.length > 0 ? provider : null;
}
export function classifyOAuthRefreshFailureReason(
message: string,
): OAuthRefreshFailureReason | null {
const lower = message.toLowerCase();
if (lower.includes("refresh_token_reused")) {
return "refresh_token_reused";
}
if (lower.includes("invalid_grant")) {
return "invalid_grant";
}
if (lower.includes("signing in again") || lower.includes("sign in again")) {
return "sign_in_again";
}
if (lower.includes("invalid refresh token")) {
return "invalid_refresh_token";
}
if (lower.includes("expired or revoked") || lower.includes("revoked")) {
return "revoked";
}
return null;
}
export function classifyOAuthRefreshFailure(message: string): {
provider: string | null;
reason: OAuthRefreshFailureReason | null;
} | null {
if (!/oauth token refresh failed/i.test(message)) {
return null;
}
return {
provider: extractOAuthRefreshFailureProvider(message),
reason: classifyOAuthRefreshFailureReason(message),
};
}

View File

@@ -0,0 +1,143 @@
import { beforeEach, describe, expect, it, vi } from "vitest";
import type { TemplateContext } from "../templating.js";
import type { FollowupRun, QueueSettings } from "./queue.js";
import { createMockTypingController } from "./test-helpers.js";
const freshCfg = { runtimeFresh: true };
const staleCfg = {
runtimeFresh: false,
skills: {
entries: {
whisper: {
apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" },
},
},
},
};
const sentinelError = new Error("stop-after-preflight");
const resolveQueuedReplyExecutionConfigMock = vi.fn();
const resolveReplyToModeMock = vi.fn();
const createReplyToModeFilterForChannelMock = vi.fn();
const createReplyMediaPathNormalizerMock = vi.fn();
const runPreflightCompactionIfNeededMock = vi.fn();
const runMemoryFlushIfNeededMock = vi.fn();
vi.mock("./agent-runner-utils.js", () => ({
resolveQueuedReplyExecutionConfig: (...args: unknown[]) =>
resolveQueuedReplyExecutionConfigMock(...args),
}));
vi.mock("./reply-threading.js", () => ({
resolveReplyToMode: (...args: unknown[]) => resolveReplyToModeMock(...args),
createReplyToModeFilterForChannel: (...args: unknown[]) =>
createReplyToModeFilterForChannelMock(...args),
}));
vi.mock("./reply-media-paths.js", () => ({
createReplyMediaPathNormalizer: (...args: unknown[]) =>
createReplyMediaPathNormalizerMock(...args),
}));
vi.mock("./agent-runner-memory.js", () => ({
runPreflightCompactionIfNeeded: (...args: unknown[]) =>
runPreflightCompactionIfNeededMock(...args),
runMemoryFlushIfNeeded: (...args: unknown[]) => runMemoryFlushIfNeededMock(...args),
}));
const { runReplyAgent } = await import("./agent-runner.js");
describe("runReplyAgent runtime config", () => {
beforeEach(() => {
resolveQueuedReplyExecutionConfigMock.mockReset();
resolveReplyToModeMock.mockReset();
createReplyToModeFilterForChannelMock.mockReset();
createReplyMediaPathNormalizerMock.mockReset();
runPreflightCompactionIfNeededMock.mockReset();
runMemoryFlushIfNeededMock.mockReset();
resolveQueuedReplyExecutionConfigMock.mockResolvedValue(freshCfg);
resolveReplyToModeMock.mockReturnValue("default");
createReplyToModeFilterForChannelMock.mockReturnValue((payload: unknown) => payload);
createReplyMediaPathNormalizerMock.mockReturnValue((payload: unknown) => payload);
runPreflightCompactionIfNeededMock.mockRejectedValue(sentinelError);
runMemoryFlushIfNeededMock.mockResolvedValue(undefined);
});
it("resolves direct reply runs before early helpers read config", async () => {
const followupRun = {
prompt: "hello",
summaryLine: "hello",
enqueuedAt: Date.now(),
run: {
sessionId: "session-1",
sessionKey: "agent:main:telegram:default:direct:test",
messageProvider: "telegram",
sessionFile: "/tmp/session.jsonl",
workspaceDir: "/tmp",
config: staleCfg,
skillsSnapshot: {},
provider: "openai",
model: "gpt-5.4",
thinkLevel: "low",
verboseLevel: "off",
elevatedLevel: "off",
bashElevated: {
enabled: false,
allowed: false,
defaultLevel: "off",
},
timeoutMs: 1_000,
blockReplyBreak: "message_end",
},
} as unknown as FollowupRun;
const resolvedQueue = { mode: "interrupt" } as QueueSettings;
const typing = createMockTypingController();
const sessionCtx = {
Provider: "telegram",
OriginatingChannel: "telegram",
OriginatingTo: "12345",
AccountId: "default",
ChatType: "dm",
MessageSid: "msg-1",
} as unknown as TemplateContext;
await expect(
runReplyAgent({
commandBody: "hello",
followupRun,
queueKey: "main",
resolvedQueue,
shouldSteer: false,
shouldFollowup: false,
isActive: false,
isStreaming: false,
typing,
sessionCtx,
defaultModel: "openai/gpt-5.4",
resolvedVerboseLevel: "off",
isNewSession: false,
blockStreamingEnabled: false,
resolvedBlockStreamingBreak: "message_end",
shouldInjectGroupIntro: false,
typingMode: "instant",
}),
).rejects.toBe(sentinelError);
expect(followupRun.run.config).toBe(freshCfg);
expect(resolveQueuedReplyExecutionConfigMock).toHaveBeenCalledWith(staleCfg);
expect(resolveReplyToModeMock).toHaveBeenCalledWith(freshCfg, "telegram", "default", "dm");
expect(createReplyMediaPathNormalizerMock).toHaveBeenCalledWith({
cfg: freshCfg,
sessionKey: undefined,
workspaceDir: "/tmp",
});
expect(runPreflightCompactionIfNeededMock).toHaveBeenCalledWith(
expect.objectContaining({
cfg: freshCfg,
followupRun,
}),
);
});
});

View File

@@ -960,6 +960,46 @@ describe("runAgentTurnWithFallback", () => {
}
});
it("surfaces gateway reauth guidance for known OAuth refresh failures", async () => {
state.runEmbeddedPiAgentMock.mockRejectedValueOnce(
new Error(
"OAuth token refresh failed for openai-codex: refresh_token_reused. Please try again or re-authenticate.",
),
);
const runAgentTurnWithFallback = await getRunAgentTurnWithFallback();
const result = await runAgentTurnWithFallback({
commandBody: "hello",
followupRun: createFollowupRun(),
sessionCtx: {
Provider: "whatsapp",
MessageSid: "msg",
} as unknown as TemplateContext,
opts: {},
typingSignals: createMockTypingSignaler(),
blockReplyPipeline: null,
blockStreamingEnabled: false,
resolvedBlockStreamingBreak: "message_end",
applyReplyToMode: (payload) => payload,
shouldEmitToolResult: () => true,
shouldEmitToolOutput: () => false,
pendingToolTasks: new Set(),
resetSessionAfterCompactionFailure: async () => false,
resetSessionAfterRoleOrderingConflict: async () => false,
isHeartbeat: false,
sessionKey: "main",
getActiveSessionEntry: () => undefined,
resolvedVerboseLevel: "off",
});
expect(result.kind).toBe("final");
if (result.kind === "final") {
expect(result.payload.text).toBe(
"⚠️ Model login expired on the gateway for openai-codex. Re-auth with `openclaw models auth login --provider openai-codex`, then try again.",
);
}
});
it("returns a session reset hint for Bedrock tool mismatch errors on external chat channels", async () => {
state.runEmbeddedPiAgentMock.mockRejectedValueOnce(
new Error(

View File

@@ -4,6 +4,7 @@ import {
hasOutboundReplyContent,
resolveSendableOutboundReplyParts,
} from "openclaw/plugin-sdk/reply-payload";
import { classifyOAuthRefreshFailure } from "../../agents/auth-profiles/oauth-refresh-failure.js";
import { resolveBootstrapWarningSignaturesSeen } from "../../agents/bootstrap-budget.js";
import { runCliAgent } from "../../agents/cli-runner.js";
import { getCliSessionBinding } from "../../agents/cli-session.js";
@@ -308,6 +309,16 @@ function buildExternalRunFailureText(message: string): string {
if (isToolResultTurnMismatchError(message)) {
return "⚠️ Session history got out of sync. Please try again, or use /new to start a fresh session.";
}
const oauthRefreshFailure = classifyOAuthRefreshFailure(message);
if (oauthRefreshFailure) {
const loginCommand = oauthRefreshFailure.provider
? `openclaw models auth login --provider ${oauthRefreshFailure.provider}`
: "openclaw models auth login --provider <provider>";
if (oauthRefreshFailure.reason) {
return `⚠️ Model login expired on the gateway${oauthRefreshFailure.provider ? ` for ${oauthRefreshFailure.provider}` : ""}. Re-auth with \`${loginCommand}\`, then try again.`;
}
return `⚠️ Model login failed on the gateway${oauthRefreshFailure.provider ? ` for ${oauthRefreshFailure.provider}` : ""}. Please try again. If this keeps happening, re-auth with \`${loginCommand}\`.`;
}
return "⚠️ Something went wrong while processing your request. Please try again, or use /new to start a fresh session.";
}

View File

@@ -2,6 +2,8 @@ import { resolveRunModelFallbacksOverride } from "../../agents/agent-scope.js";
import { getChannelPlugin } from "../../channels/plugins/index.js";
import type { ChannelId, ChannelThreadingToolContext } from "../../channels/plugins/types.js";
import { normalizeAnyChannelId, normalizeChannelId } from "../../channels/registry.js";
import { resolveCommandSecretRefsViaGateway } from "../../cli/command-secret-gateway.js";
import { getAgentRuntimeCommandSecretTargetIds } from "../../cli/command-secret-targets.js";
import { getRuntimeConfigSnapshot, type OpenClawConfig } from "../../config/config.js";
import {
normalizeOptionalLowercaseString,
@@ -25,6 +27,18 @@ export function resolveQueuedReplyRuntimeConfig(config: OpenClawConfig): OpenCla
);
}
export async function resolveQueuedReplyExecutionConfig(
config: OpenClawConfig,
): Promise<OpenClawConfig> {
const runtimeConfig = resolveQueuedReplyRuntimeConfig(config);
const { resolvedConfig } = await resolveCommandSecretRefsViaGateway({
config: runtimeConfig,
commandName: "reply",
targetIds: getAgentRuntimeCommandSecretTargetIds(),
});
return resolvedConfig ?? runtimeConfig;
}
/**
* Build provider-specific threading context for tool auto-injection.
*/

View File

@@ -48,6 +48,7 @@ import {
hasUnbackedReminderCommitment,
} from "./agent-runner-reminder-guard.js";
import { appendUsageLine, formatResponseUsageLine } from "./agent-runner-usage-line.js";
import { resolveQueuedReplyExecutionConfig } from "./agent-runner-utils.js";
import { createAudioAsVoiceBuffer, createBlockReplyPipeline } from "./block-reply-pipeline.js";
import { resolveEffectiveBlockStreamingConfig } from "./block-streaming.js";
import { createFollowupRunner } from "./followup-runner.js";
@@ -162,6 +163,7 @@ export async function runReplyAgent(params: {
const pendingToolTasks = new Set<Promise<void>>();
const blockReplyTimeoutMs = opts?.blockReplyTimeoutMs ?? BLOCK_REPLY_SEND_TIMEOUT_MS;
followupRun.run.config = await resolveQueuedReplyExecutionConfig(followupRun.run.config);
const replyToChannel = resolveOriginMessageProvider({
originatingChannel: sessionCtx.OriginatingChannel,

View File

@@ -11,6 +11,7 @@ const compactEmbeddedPiSessionMock = vi.fn();
const routeReplyMock = vi.fn();
const isRoutableChannelMock = vi.fn();
const runPreflightCompactionIfNeededMock = vi.fn();
const resolveCommandSecretRefsViaGatewayMock = vi.fn();
let createFollowupRunner: typeof import("./followup-runner.js").createFollowupRunner;
let clearRuntimeConfigSnapshot: typeof import("../../config/config.js").clearRuntimeConfigSnapshot;
let loadSessionStore: typeof import("../../config/sessions/store.js").loadSessionStore;
@@ -275,6 +276,13 @@ async function loadFreshFollowupRunnerModuleForTest() {
isRoutableChannel: (...args: unknown[]) => isRoutableChannelMock(...args),
routeReply: (...args: unknown[]) => routeReplyMock(...args),
}));
vi.doMock("../../cli/command-secret-gateway.js", () => ({
resolveCommandSecretRefsViaGateway: (...args: unknown[]) =>
resolveCommandSecretRefsViaGatewayMock(...args),
}));
vi.doMock("../../cli/command-secret-targets.js", () => ({
getAgentRuntimeCommandSecretTargetIds: () => new Set(["skills.entries."]),
}));
({ createFollowupRunner } = await import("./followup-runner.js"));
({ clearRuntimeConfigSnapshot, setRuntimeConfigSnapshot } =
await import("../../config/config.js"));
@@ -301,9 +309,16 @@ beforeEach(async () => {
runEmbeddedPiAgentMock.mockReset();
compactEmbeddedPiSessionMock.mockReset();
runPreflightCompactionIfNeededMock.mockReset();
resolveCommandSecretRefsViaGatewayMock.mockReset();
runPreflightCompactionIfNeededMock.mockImplementation(
async (params: { sessionEntry?: SessionEntry }) => params.sessionEntry,
);
resolveCommandSecretRefsViaGatewayMock.mockImplementation(async ({ config }) => ({
resolvedConfig: config,
diagnostics: [],
targetStatesByPath: {},
hadUnresolvedTargets: false,
}));
routeReplyMock.mockReset();
routeReplyMock.mockResolvedValue({ ok: true });
isRoutableChannelMock.mockReset();
@@ -432,6 +447,69 @@ describe("createFollowupRunner runtime config", () => {
| undefined;
expect(call?.config).toBe(runtimeConfig);
});
it("resolves queued embedded followups before preflight helpers read config", async () => {
const sourceConfig: OpenClawConfig = {
skills: {
entries: {
whisper: {
apiKey: {
source: "env",
provider: "default",
id: "OPENAI_API_KEY",
},
},
},
},
};
const runtimeConfig: OpenClawConfig = {
skills: {
entries: {
whisper: {
apiKey: "resolved-runtime-key",
},
},
},
};
resolveCommandSecretRefsViaGatewayMock.mockResolvedValueOnce({
resolvedConfig: runtimeConfig,
diagnostics: [],
targetStatesByPath: { "skills.entries.whisper.apiKey": "resolved_local" },
hadUnresolvedTargets: false,
});
runEmbeddedPiAgentMock.mockResolvedValueOnce({
payloads: [],
meta: {},
});
const runner = createFollowupRunner({
typing: createMockTypingController(),
typingMode: "instant",
defaultModel: "openai/gpt-5.4",
});
const queued = createQueuedRun({
run: {
config: sourceConfig,
provider: "openai",
model: "gpt-5.4",
},
});
await runner(queued);
expect(queued.run.config).toBe(runtimeConfig);
expect(runPreflightCompactionIfNeededMock).toHaveBeenCalledWith(
expect.objectContaining({
cfg: runtimeConfig,
}),
);
const call = runEmbeddedPiAgentMock.mock.calls.at(-1)?.[0] as
| {
config?: unknown;
}
| undefined;
expect(call?.config).toBe(runtimeConfig);
});
});
describe("createFollowupRunner compaction", () => {

View File

@@ -21,7 +21,11 @@ import { stripHeartbeatToken } from "../heartbeat.js";
import { isSilentReplyText, SILENT_REPLY_TOKEN } from "../tokens.js";
import type { GetReplyOptions, ReplyPayload } from "../types.js";
import { runPreflightCompactionIfNeeded } from "./agent-runner-memory.js";
import { resolveQueuedReplyRuntimeConfig, resolveRunAuthProfile } from "./agent-runner-utils.js";
import {
resolveQueuedReplyExecutionConfig,
resolveQueuedReplyRuntimeConfig,
resolveRunAuthProfile,
} from "./agent-runner-utils.js";
import { resolveFollowupDeliveryPayloads } from "./followup-delivery.js";
import { resolveOriginMessageProvider } from "./origin-routing.js";
import { refreshQueuedFollowupSession, type FollowupRun } from "./queue.js";
@@ -127,6 +131,7 @@ export function createFollowupRunner(params: {
};
return async (queued: FollowupRun) => {
queued.run.config = await resolveQueuedReplyExecutionConfig(queued.run.config);
const replySessionKey = queued.run.sessionKey ?? sessionKey;
const runtimeConfig = resolveQueuedReplyRuntimeConfig(queued.run.config);
const effectiveQueued =

View File

@@ -1,5 +1,5 @@
import { describe, expect, it } from "vitest";
import { resolveUnusableProfileHint } from "./doctor-auth.js";
import { formatOAuthRefreshFailureDoctorLine, resolveUnusableProfileHint } from "./doctor-auth.js";
describe("resolveUnusableProfileHint", () => {
it("returns billing guidance for disabled billing profiles", () => {
@@ -25,4 +25,30 @@ describe("resolveUnusableProfileHint", () => {
"Wait for cooldown or switch provider.",
);
});
it("formats permanent OAuth refresh failures as reauth-required", () => {
expect(
formatOAuthRefreshFailureDoctorLine({
profileId: "openai-codex:default",
provider: "openai-codex",
message:
"OAuth token refresh failed for openai-codex: refresh_token_reused. Please try again or re-authenticate.",
}),
).toBe(
"- openai-codex:default: re-auth required [refresh_token_reused] — Run `openclaw models auth login --provider openai-codex`.",
);
});
it("formats non-permanent OAuth refresh failures as retry-then-reauth guidance", () => {
expect(
formatOAuthRefreshFailureDoctorLine({
profileId: "openai-codex:default",
provider: "openai-codex",
message:
"OAuth token refresh failed for openai-codex: temporary upstream issue. Please try again or re-authenticate.",
}),
).toBe(
"- openai-codex:default: OAuth refresh failed — Try again; if this persists, run `openclaw models auth login --provider openai-codex`.",
);
});
});

View File

@@ -11,13 +11,21 @@ import {
resolveProfileUnusableUntilForDisplay,
} from "../agents/auth-profiles.js";
import { formatAuthDoctorHint } from "../agents/auth-profiles/doctor.js";
import {
classifyOAuthRefreshFailure,
type OAuthRefreshFailureReason,
} from "../agents/auth-profiles/oauth-refresh-failure.js";
import { formatCliCommand } from "../cli/command-format.js";
import type { OpenClawConfig } from "../config/config.js";
import { formatErrorMessage } from "../infra/errors.js";
import { resolvePluginProviders } from "../plugins/providers.runtime.js";
import { note } from "../terminal/note.js";
import { isRecord } from "../utils.js";
import type { DoctorPrompter } from "./doctor-prompter.js";
import { buildProviderAuthRecoveryHint } from "./provider-auth-guidance.js";
import {
buildProviderAuthRecoveryHint,
resolveProviderAuthLoginCommand,
} from "./provider-auth-guidance.js";
const CODEX_PROVIDER_ID = "openai-codex";
const CODEX_OAUTH_WARNING_TITLE = "Codex OAuth";
@@ -167,6 +175,43 @@ export function resolveUnusableProfileHint(params: {
return "Wait for cooldown or switch provider.";
}
function formatOAuthRefreshFailureReason(reason: OAuthRefreshFailureReason | null): string {
switch (reason) {
case "refresh_token_reused":
return "refresh_token_reused";
case "invalid_grant":
return "invalid_grant";
case "sign_in_again":
return "sign in again";
case "invalid_refresh_token":
return "invalid refresh token";
case "revoked":
return "revoked";
default:
return "refresh failed";
}
}
export function formatOAuthRefreshFailureDoctorLine(params: {
profileId: string;
provider: string;
message: string;
}): string | null {
const classified = classifyOAuthRefreshFailure(params.message);
if (!classified) {
return null;
}
const provider = classified.provider ?? params.provider;
const command =
resolveProviderAuthLoginCommand({
provider,
}) ?? formatCliCommand(`openclaw models auth login --provider ${provider}`);
if (classified.reason) {
return `- ${params.profileId}: re-auth required [${formatOAuthRefreshFailureReason(classified.reason)}] — Run \`${command}\`.`;
}
return `- ${params.profileId}: OAuth refresh failed — Try again; if this persists, run \`${command}\`.`;
}
export async function resolveAuthIssueHint(
issue: AuthIssue,
cfg: OpenClawConfig,
@@ -275,7 +320,14 @@ export async function noteAuthProfileHealth(params: {
profileId: profile.profileId,
});
} catch (err) {
errors.push(`- ${profile.profileId}: ${formatErrorMessage(err)}`);
const message = formatErrorMessage(err);
errors.push(
formatOAuthRefreshFailureDoctorLine({
profileId: profile.profileId,
provider: profile.provider,
message,
}) ?? `- ${profile.profileId}: ${message}`,
);
}
}
if (errors.length > 0) {

View File

@@ -0,0 +1,14 @@
import { describe, expect, it } from "vitest";
import { summarizeLogTail } from "./gateway.js";
describe("summarizeLogTail", () => {
it("marks permanent OAuth refresh failures as reauth-required", () => {
const lines = summarizeLogTail([
"[openai-codex] Token refresh failed: 401 {",
'"error":{"code":"invalid_grant","message":"Session invalidated due to signing in again"}',
"}",
]);
expect(lines).toEqual(["[openai-codex] token refresh 401 invalid_grant · re-auth required"]);
});
});

View File

@@ -1,8 +1,6 @@
import fs from "node:fs/promises";
import {
normalizeLowercaseStringOrEmpty,
normalizeOptionalString,
} from "../../shared/string-coerce.js";
import { classifyOAuthRefreshFailureReason } from "../../agents/auth-profiles/oauth-refresh-failure.js";
import { normalizeOptionalString } from "../../shared/string-coerce.js";
export async function readFileTailLines(filePath: string, maxLines: number): Promise<string[]> {
const raw = await fs.readFile(filePath, "utf8").catch(() => "");
@@ -120,11 +118,8 @@ export function summarizeLogTail(rawLines: string[], opts?: { maxLines?: number
})();
const code = normalizeOptionalString(parsed?.error?.code) ?? null;
const msg = normalizeOptionalString(parsed?.error?.message) ?? null;
const msgShort = msg
? normalizeLowercaseStringOrEmpty(msg).includes("signing in again")
? "re-auth required"
: shorten(msg, 52)
: null;
const refreshReason = classifyOAuthRefreshFailureReason(msg ?? "");
const msgShort = msg ? (refreshReason ? "re-auth required" : shorten(msg, 52)) : null;
const base = `[${tag}] token refresh ${status}${code ? ` ${code}` : ""}${msgShort ? ` · ${msgShort}` : ""}`;
addGroup(`token:${tag}:${status}:${code ?? ""}:${msgShort ?? ""}`, base);
continue;