From 8714badc0c551747976c4ea4e44bca0d919a728d Mon Sep 17 00:00:00 2001 From: Peter Steinberger Date: Thu, 23 Apr 2026 08:03:34 +0100 Subject: [PATCH] fix: show fast mode in status --- CHANGELOG.md | 1 + docs/tools/thinking.md | 1 + src/auto-reply/reply/commands-info.test.ts | 17 +++++++++++++ src/auto-reply/reply/commands-info.ts | 1 + src/auto-reply/reply/commands-types.ts | 1 + src/auto-reply/status.test.ts | 19 +++++++++++++- src/status/status-message.test.ts | 29 ++++++++++++++++++++++ src/status/status-message.ts | 9 ++++++- 8 files changed, 76 insertions(+), 2 deletions(-) create mode 100644 src/status/status-message.test.ts diff --git a/CHANGELOG.md b/CHANGELOG.md index e84868f6a4d..76347bd2f8b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,6 +39,7 @@ Docs: https://docs.openclaw.ai ### Fixes +- Status: show `Fast` in `/status` when fast mode is enabled, including config/default-derived fast mode, and omit it when disabled. - Models/auth: merge provider-owned default-model additions from `openclaw models auth login` instead of replacing `agents.defaults.models`, so re-authenticating an OAuth provider such as OpenAI Codex no longer wipes other providers' aliases and per-model params. Migrations that must rename keys (Anthropic -> Claude CLI) opt in with `replaceDefaultModels`. Fixes #69414. (#70435) Thanks @neeravmakwana. - Media understanding/audio: prefer configured or key-backed STT providers before auto-detected local Whisper CLIs, so installed local transcription tools no longer shadow API providers such as Groq/OpenAI in `tools.media.audio` auto mode. Fixes #68727. - Providers/OpenAI: lock the auth picker wording for OpenAI API key, Codex browser login, and Codex device pairing so the setup choices no longer imply a mixed Codex/API-key auth path. (#67848) Thanks @tmlxrd. diff --git a/docs/tools/thinking.md b/docs/tools/thinking.md index 49977ca1597..07fa758a61f 100644 --- a/docs/tools/thinking.md +++ b/docs/tools/thinking.md @@ -68,6 +68,7 @@ title: "Thinking Levels" - For direct public `anthropic/*` requests, including OAuth-authenticated traffic sent to `api.anthropic.com`, fast mode maps to Anthropic service tiers: `/fast on` sets `service_tier=auto`, `/fast off` sets `service_tier=standard_only`. - For `minimax/*` on the Anthropic-compatible path, `/fast on` (or `params.fastMode: true`) rewrites `MiniMax-M2.7` to `MiniMax-M2.7-highspeed`. - Explicit Anthropic `serviceTier` / `service_tier` model params override the fast-mode default when both are set. OpenClaw still skips Anthropic service-tier injection for non-Anthropic proxy base URLs. +- `/status` shows `Fast` only when fast mode is enabled. ## Verbose directives (/verbose or /v) diff --git a/src/auto-reply/reply/commands-info.test.ts b/src/auto-reply/reply/commands-info.test.ts index 6e31f7bd25d..50e43e2ca05 100644 --- a/src/auto-reply/reply/commands-info.test.ts +++ b/src/auto-reply/reply/commands-info.test.ts @@ -268,6 +268,23 @@ describe("info command handlers", () => { ); }); + it("forwards resolved fast mode to /status", async () => { + const params = buildInfoParams("/status", { + commands: { text: true }, + channels: { whatsapp: { allowFrom: ["*"] } }, + } as OpenClawConfig); + params.resolvedFastMode = true; + + const statusResult = await handleStatusCommand(params, true); + + expect(statusResult?.shouldContinue).toBe(false); + expect(vi.mocked(buildStatusReply)).toHaveBeenCalledWith( + expect.objectContaining({ + resolvedFastMode: true, + }), + ); + }); + it("uses the canonical target session agent when listing /commands", async () => { const { handleCommandsListCommand } = await import("./commands-info.js"); const params = buildInfoParams("/commands", { diff --git a/src/auto-reply/reply/commands-info.ts b/src/auto-reply/reply/commands-info.ts index d5544252cfb..ee463f95271 100644 --- a/src/auto-reply/reply/commands-info.ts +++ b/src/auto-reply/reply/commands-info.ts @@ -204,6 +204,7 @@ export const handleStatusCommand: CommandHandler = async (params, allowTextComma model: params.model, contextTokens: params.contextTokens, resolvedThinkLevel: params.resolvedThinkLevel, + resolvedFastMode: params.resolvedFastMode, resolvedVerboseLevel: params.resolvedVerboseLevel, resolvedReasoningLevel: params.resolvedReasoningLevel, resolvedElevatedLevel: params.resolvedElevatedLevel, diff --git a/src/auto-reply/reply/commands-types.ts b/src/auto-reply/reply/commands-types.ts index 52199240360..c4e045c1c9f 100644 --- a/src/auto-reply/reply/commands-types.ts +++ b/src/auto-reply/reply/commands-types.ts @@ -53,6 +53,7 @@ export type HandleCommandsParams = { opts?: GetReplyOptions; defaultGroupActivation: () => "always" | "mention"; resolvedThinkLevel?: ThinkLevel; + resolvedFastMode?: boolean; resolvedVerboseLevel: VerboseLevel; resolvedReasoningLevel: ReasoningLevel; resolvedElevatedLevel?: ElevatedLevel; diff --git a/src/auto-reply/status.test.ts b/src/auto-reply/status.test.ts index d40a0068d4c..332591ce155 100644 --- a/src/auto-reply/status.test.ts +++ b/src/auto-reply/status.test.ts @@ -276,7 +276,24 @@ describe("buildStatusMessage", () => { queue: { mode: "collect", depth: 0 }, }); - expect(normalizeTestText(text)).toContain("Fast: on"); + expect(normalizeTestText(text)).toContain("Fast"); + }); + + it("hides fast mode when disabled", () => { + const text = buildStatusMessage({ + agent: { + model: "anthropic/claude-opus-4-6", + }, + sessionEntry: { + sessionId: "fast-off", + updatedAt: 0, + fastMode: false, + }, + sessionKey: "agent:main:main", + queue: { mode: "collect", depth: 0 }, + }); + + expect(normalizeTestText(text)).not.toContain("Fast"); }); it("shows configured text verbosity for the active model", () => { diff --git a/src/status/status-message.test.ts b/src/status/status-message.test.ts new file mode 100644 index 00000000000..d3600264f28 --- /dev/null +++ b/src/status/status-message.test.ts @@ -0,0 +1,29 @@ +import { describe, expect, it } from "vitest"; +import { normalizeTestText } from "../../test/helpers/normalize-text.js"; +import { buildStatusMessage } from "./status-message.js"; + +const buildFastStatus = (model: string, fastMode: boolean) => + normalizeTestText( + buildStatusMessage({ + modelAuth: "api-key", + activeModelAuth: "api-key", + agent: { model }, + sessionEntry: { + sessionId: "fast-status", + updatedAt: 0, + fastMode, + }, + sessionKey: "agent:main:main", + queue: { mode: "collect", depth: 0 }, + }), + ); + +describe("buildStatusMessage fast mode labels", () => { + it("shows fast mode when enabled", () => { + expect(buildFastStatus("openai/gpt-5.4", true)).toContain("Fast"); + }); + + it("hides fast mode when disabled", () => { + expect(buildFastStatus("anthropic/claude-opus-4-6", false)).not.toContain("Fast"); + }); +}); diff --git a/src/status/status-message.ts b/src/status/status-message.ts index d4859a621fc..a2f679e4a11 100644 --- a/src/status/status-message.ts +++ b/src/status/status-message.ts @@ -237,6 +237,13 @@ const formatQueueDetails = (queue?: QueueStatus) => { return detailParts.length ? ` (${detailParts.join(" ยท ")})` : ""; }; +const formatFastModeLabel = (enabled: boolean) => { + if (!enabled) { + return null; + } + return "Fast"; +}; + const readUsageFromSessionLog = ( sessionId?: string, sessionEntry?: SessionEntry, @@ -705,7 +712,7 @@ export function buildStatusMessage(args: StatusArgs): string { const optionParts = [ `Runtime: ${runtime.label}`, `Think: ${thinkLevel}`, - fastMode ? "Fast: on" : null, + formatFastModeLabel(fastMode), textVerbosity ? `Text: ${textVerbosity}` : null, verboseLabel, traceLabel,