From bca67092036273b4f4a660eab40208a111f5235c Mon Sep 17 00:00:00 2001 From: Vincent Koc Date: Tue, 5 May 2026 17:42:41 -0700 Subject: [PATCH] fix(doctor): repair legacy Codex route config Repair legacy openai-codex route config and session pins safely. --- CHANGELOG.md | 2 + docs/cli/doctor.md | 1 + docs/gateway/doctor.md | 18 +- docs/plugins/codex-harness.md | 85 +- src/commands/doctor/repair-sequencing.test.ts | 81 ++ src/commands/doctor/repair-sequencing.ts | 11 + .../shared/codex-route-warnings.test.ts | 394 +++++++- .../doctor/shared/codex-route-warnings.ts | 875 ++++++++++++++++-- src/config/model-refs.ts | 40 + src/flows/doctor-health-contributions.ts | 22 + 10 files changed, 1401 insertions(+), 128 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4366f7d8633..b6536f916f1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -113,6 +113,8 @@ Docs: https://docs.openclaw.ai - Hooks/session-memory: add collision suffixes to fallback memory filenames so repeated `/new` or `/reset` captures in the same minute do not overwrite the earlier session archive. Thanks @vincentkoc. - Agents/config: remove the ambiguous legacy `main` agent dir helper from runtime paths; model, auth, gateway, bundled plugin, and test helpers now resolve default/session agent dirs through `agents.list`/agent-scope helpers while plugin SDK keeps a deprecated compatibility export. - CLI/status: show the selected agent runtime/harness in `openclaw status` session rows so terminal status matches the `/status` runtime line. Thanks @vincentkoc. +- CLI/sessions: prune old unreferenced transcript, compaction checkpoint, and trajectory artifacts during normal `sessions cleanup`, so gateway restart or crash orphans do not accumulate indefinitely outside `sessions.json`. Fixes #77608. Thanks @slideshow-dingo. +- Doctor/Codex: repair legacy `openai-codex/*` routes in primary models, fallbacks, heartbeat/subagent/compaction overrides, hooks, channel overrides, and stale session pins to canonical `openai/*`, selecting `agentRuntime.id: "codex"` only when the Codex plugin is installed, enabled, contributes the `codex` harness, and has usable OAuth; otherwise select `agentRuntime.id: "pi"`. Thanks @vincentkoc. - Video generation: wait up to 20 minutes for slow fal/MiniMax queue-backed jobs, stop forwarding unsupported Google Veo generated-audio options, and normalize MiniMax `720P` requests to its supported `768P` resolution with the usual override warning/details instead of failing fallback. - Video generation: accept provider-specific aspect-ratio and resolution hints at the tool boundary, normalize `720P` to MiniMax's supported `768P`, and stop sending Google `generateAudio` on Gemini video requests so provider fallback can recover from model-specific parameter differences. Thanks @vincentkoc. - OpenAI/Google Meet: fail realtime voice connection attempts when the socket closes before `session.updated`, avoiding stuck Meet joins waiting on a bridge that never became ready. Thanks @vincentkoc. diff --git a/docs/cli/doctor.md b/docs/cli/doctor.md index f0b34e7fdd2..ff37fd4f0aa 100644 --- a/docs/cli/doctor.md +++ b/docs/cli/doctor.md @@ -46,6 +46,7 @@ Notes: - Doctor also scans `~/.openclaw/cron/jobs.json` (or `cron.store`) for legacy cron job shapes and can rewrite them in place before the scheduler has to auto-normalize them at runtime. - On Linux, doctor warns when the user's crontab still runs legacy `~/.openclaw/bin/ensure-whatsapp.sh`; that script is no longer maintained and can log false WhatsApp gateway outages when cron lacks the systemd user-bus environment. - When WhatsApp is enabled, doctor checks for a degraded Gateway event loop with local `openclaw-tui` clients still running. `doctor --fix` stops only verified local TUI clients so WhatsApp replies are not queued behind stale TUI refresh loops. +- Doctor rewrites legacy `openai-codex/*` model refs to canonical `openai/*` refs across primary models, fallbacks, heartbeat/subagent/compaction overrides, hooks, channel model overrides, and stale session route pins. `--fix` selects `agentRuntime.id: "codex"` only when the Codex plugin is installed, enabled, contributes the `codex` harness, and has usable OAuth; otherwise it selects `agentRuntime.id: "pi"` so the route stays on the default OpenClaw runner. - Doctor cleans legacy plugin dependency staging state created by older OpenClaw versions. It also repairs missing downloadable plugins that are referenced by config, such as `plugins.entries`, configured channels, configured provider/search settings, or configured agent runtimes. During package updates, doctor skips package-manager plugin repair until the package swap is complete; rerun `openclaw doctor --fix` afterward if a configured plugin still needs recovery. If the download fails, doctor reports the install error and preserves the configured plugin entry for the next repair attempt. - Doctor repairs stale plugin config by removing missing plugin ids from `plugins.allow`/`plugins.entries`, plus matching dangling channel config, heartbeat targets, and channel model overrides when plugin discovery is healthy. - Doctor quarantines invalid plugin config by disabling the affected `plugins.entries.` entry and removing its invalid `config` payload. Gateway startup already skips only that bad plugin so other plugins and channels can keep running. diff --git a/docs/gateway/doctor.md b/docs/gateway/doctor.md index bb9886d82c8..50751246f48 100644 --- a/docs/gateway/doctor.md +++ b/docs/gateway/doctor.md @@ -108,6 +108,7 @@ cat ~/.openclaw/openclaw.json - Gateway runtime checks (service installed but not running; cached launchd label). - Channel status warnings (probed from the running gateway). - WhatsApp responsiveness checks for degraded Gateway event-loop health with local TUI clients still running; `--fix` stops only verified local TUI clients. + - Codex route repair for legacy `openai-codex/*` model refs in primary models, fallbacks, heartbeat/subagent/compaction overrides, hooks, channel model overrides, and session route pins; `--fix` rewrites them to `openai/*` and selects `agentRuntime.id: "codex"` only when the Codex plugin is installed, enabled, contributes the `codex` harness, and has usable OAuth. Otherwise it selects `agentRuntime.id: "pi"`. - Supervisor config audit (launchd/systemd/schtasks) with optional repair. - Embedded proxy environment cleanup for gateway services that captured shell `HTTP_PROXY` / `HTTPS_PROXY` / `NO_PROXY` values during install or update. - Gateway runtime best-practice checks (Node vs Bun, version-manager paths). @@ -260,21 +261,22 @@ That stages grounded durable candidates into the short-term dreaming store while If you previously added legacy OpenAI transport settings under `models.providers.openai-codex`, they can shadow the built-in Codex OAuth provider path that newer releases use automatically. Doctor warns when it sees those old transport settings alongside Codex OAuth so you can remove or rewrite the stale transport override and get the built-in routing/fallback behavior back. Custom proxies and header-only overrides are still supported and do not trigger this warning. - - When the bundled Codex plugin is enabled, doctor also checks whether `openai-codex/*` primary model refs still resolve through the default PI runner. That combination is valid when you want Codex OAuth/subscription auth through PI, but it is easy to confuse with the native Codex app-server harness. Doctor warns and points to the explicit app-server shape: `openai/*` plus `agentRuntime.id: "codex"` or `OPENCLAW_AGENT_RUNTIME=codex`. + + Doctor checks for legacy `openai-codex/*` model refs. Native Codex harness routing uses canonical `openai/*` model refs plus `agentRuntime.id: "codex"` so the turn goes through the Codex app-server harness instead of the OpenClaw PI OpenAI path. - Doctor does not repair this automatically because both routes are valid: + In `--fix` / `--repair` mode, doctor rewrites affected default-agent and per-agent refs, including primary models, fallbacks, heartbeat/subagent/compaction overrides, hooks, channel model overrides, and stale persisted session route state: - - `openai-codex/*` + PI means "use Codex OAuth/subscription auth through the normal OpenClaw runner." - - `openai/*` + `agentRuntime.id: "codex"` means "run the embedded turn through native Codex app-server." + - `openai-codex/gpt-*` becomes `openai/gpt-*`. + - The matching agent runtime becomes `agentRuntime.id: "codex"` only when Codex is installed, enabled, contributes the `codex` harness, and has usable OAuth. + - Otherwise the matching agent runtime becomes `agentRuntime.id: "pi"`. + - Existing model fallback lists are preserved with their legacy entries rewritten; copied per-model settings move from the legacy key to the canonical `openai/*` key. + - Persisted session `modelProvider`/`providerOverride`, `model`/`modelOverride`, fallback notices, auth-profile pins, and Codex harness pins are repaired across all discovered agent session stores. - `/codex ...` means "control or bind a native Codex conversation from chat." - `/acp ...` or `runtime: "acp"` means "use the external ACP/acpx adapter." - If the warning appears, choose the route you intended and edit config manually. Keep the warning as-is when PI Codex OAuth is intentional. - - Doctor also scans the active sessions store for stale auto-created route state after you move the configured default/fallback model or runtime away from a plugin-owned route such as Codex. + Doctor also scans discovered agent session stores for stale auto-created route state after you move configured models or runtime away from a plugin-owned route such as Codex. `openclaw doctor --fix` can clear auto-created stale state such as `modelOverrideSource: "auto"` model pins, runtime model metadata, pinned harness ids, CLI session bindings, and auto auth-profile overrides when their owning route is no longer configured. Explicit user or legacy session model choices are reported for manual review and left untouched; switch them with `/model ...`, `/new`, or reset the session when that route is no longer intended. diff --git a/docs/plugins/codex-harness.md b/docs/plugins/codex-harness.md index 5c4d229be79..4777ce499d6 100644 --- a/docs/plugins/codex-harness.md +++ b/docs/plugins/codex-harness.md @@ -87,9 +87,10 @@ If your config uses `plugins.allow`, include `codex` there too: } ``` -Do not use `openai-codex/gpt-*` when you mean native Codex runtime. That prefix -is the explicit "Codex OAuth through PI" route. Config changes apply to new or -reset sessions; existing sessions keep their recorded runtime. +Do not use `openai-codex/gpt-*` in config. That prefix is a legacy route that +`openclaw doctor --fix` rewrites to `openai/gpt-*` across primary models, +fallbacks, heartbeat/subagent/compaction overrides, hooks, channel overrides, +and stale persisted session route pins. ## What this plugin changes @@ -106,7 +107,9 @@ The bundled `codex` plugin contributes several separate capabilities: Enabling the plugin makes those capabilities available. It does **not**: - start using Codex for every OpenAI model -- convert `openai-codex/*` model refs into the native runtime +- convert `openai-codex/*` model refs into the native runtime without doctor + verifying that Codex is installed, enabled, contributes the `codex` harness, + and is OAuth-ready - make ACP/acpx the default Codex path - hot-switch existing sessions that already recorded a PI runtime - replace OpenClaw channel delivery, session files, auth-profile storage, or @@ -145,10 +148,10 @@ want native app-server execution. Legacy `codex/*` model refs still auto-select the harness for compatibility, but runtime-backed legacy provider prefixes are not shown as normal model/provider choices. -If the `codex` plugin is enabled but the primary model is still -`openai-codex/*`, `openclaw doctor` warns instead of changing the route. That is -intentional: `openai-codex/*` remains the PI Codex OAuth/subscription path, and -native app-server execution stays an explicit runtime choice. +If any configured model route is still `openai-codex/*`, `openclaw doctor --fix` +rewrites it to `openai/*`. For matching agent routes, it sets the agent runtime +to `codex` only when the Codex plugin is installed, enabled, contributes the +`codex` harness, and has usable OAuth; otherwise it sets the runtime to `pi`. ## Route map @@ -158,15 +161,18 @@ Use this table before changing config: | ---------------------------------------------------- | -------------------------- | -------------------------------------- | ---------------------------- | ------------------------------ | | ChatGPT/Codex subscription with native Codex runtime | `openai/gpt-*` | `agentRuntime.id: "codex"` | Codex OAuth or Codex account | `Runtime: OpenAI Codex` | | OpenAI API through normal OpenClaw runner | `openai/gpt-*` | omitted or `runtime: "pi"` | OpenAI API key | `Runtime: OpenClaw Pi Default` | -| ChatGPT/Codex subscription through PI | `openai-codex/gpt-*` | omitted or `runtime: "pi"` | OpenAI Codex OAuth provider | `Runtime: OpenClaw Pi Default` | +| Legacy config that needs doctor repair | `openai-codex/gpt-*` | repaired to `codex` or `pi` | Existing configured auth | Recheck after `doctor --fix` | | Mixed providers with conservative auto mode | provider-specific refs | `agentRuntime.id: "auto"` | Per selected provider | Depends on selected runtime | | Explicit Codex ACP adapter session | ACP prompt/model dependent | `sessions_spawn` with `runtime: "acp"` | ACP backend auth | ACP task/session status | The important split is provider versus runtime: -- `openai-codex/*` answers "which provider/auth route should PI use?" -- `agentRuntime.id: "codex"` answers "which loop should execute this - embedded turn?" +- `openai-codex/*` is a legacy route that doctor rewrites. +- `agentRuntime.id: "codex"` requires the Codex harness and fails closed if it + is unavailable. +- `agentRuntime.id: "auto"` lets registered harnesses claim matching provider + routes, but canonical OpenAI refs are still PI-owned unless a harness supports + that provider/model pair. - `/codex ...` answers "which native Codex conversation should this chat bind or control?" - ACP answers "which external harness process should acpx launch?" @@ -175,33 +181,30 @@ The important split is provider versus runtime: OpenAI-family routes are prefix-specific. For the common subscription plus native Codex runtime setup, use `openai/*` with `agentRuntime.id: "codex"`. -Use `openai-codex/*` only when you intentionally want Codex OAuth through PI: +Treat `openai-codex/*` as legacy config that doctor should rewrite: | Model ref | Runtime path | Use when | | --------------------------------------------- | -------------------------------------------- | ------------------------------------------------------------------------- | | `openai/gpt-5.4` | OpenAI provider through OpenClaw/PI plumbing | You want current direct OpenAI Platform API access with `OPENAI_API_KEY`. | -| `openai-codex/gpt-5.5` | OpenAI Codex OAuth through OpenClaw/PI | You want ChatGPT/Codex subscription auth with the default PI runner. | +| `openai-codex/gpt-5.5` | Legacy route repaired by doctor | You are on old config; run `openclaw doctor --fix` to rewrite it. | | `openai/gpt-5.5` + `agentRuntime.id: "codex"` | Codex app-server harness | You want ChatGPT/Codex subscription auth with native Codex execution. | GPT-5.5 can appear on both direct OpenAI API-key and Codex subscription routes when your account exposes them. Use `openai/gpt-5.5` with the Codex app-server -harness for native Codex runtime, `openai-codex/gpt-5.5` for PI OAuth, or -`openai/gpt-5.5` without a Codex runtime override for direct API-key traffic. +harness for native Codex runtime, or `openai/gpt-5.5` without a Codex runtime +override for direct API-key traffic. Legacy `codex/gpt-*` refs remain accepted as compatibility aliases. Doctor -compatibility migration rewrites legacy primary runtime refs to canonical model -refs and records the runtime policy separately, while fallback-only legacy refs -are left unchanged because runtime is configured for the whole agent container. -New PI Codex OAuth configs should use `openai-codex/gpt-*`; new native -app-server harness configs should use `openai/gpt-*` plus -`agentRuntime.id: "codex"`. +compatibility migration rewrites legacy runtime refs to canonical model refs +and records the runtime policy separately. New native app-server harness configs +should use `openai/gpt-*` plus `agentRuntime.id: "codex"`. `agents.defaults.imageModel` follows the same prefix split. Use -`openai-codex/gpt-*` when image understanding should run through the OpenAI -Codex OAuth provider path. Use `codex/gpt-*` when image understanding should run -through a bounded Codex app-server turn. The Codex app-server model must -advertise image input support; text-only Codex models fail before the media turn -starts. +`openai/gpt-*` for the normal OpenAI route and `codex/gpt-*` when image +understanding should run through a bounded Codex app-server turn. Do not use +`openai-codex/gpt-*`; doctor rewrites that legacy prefix to `openai/gpt-*`. The +Codex app-server model must advertise image input support; text-only Codex +models fail before the media turn starts. Use `/status` to confirm the effective harness for the current session. If the selection is surprising, enable debug logging for the `agents/harness` subsystem @@ -211,22 +214,20 @@ in `auto` mode, each plugin candidate's support result. ### What doctor warnings mean -`openclaw doctor` warns when all of these are true: +`openclaw doctor` warns when configured model refs or persisted session route +state still use `openai-codex/*`. `openclaw doctor --fix` rewrites those routes +to: -- the bundled `codex` plugin is enabled or allowed -- an agent's primary model is `openai-codex/*` -- that agent's effective runtime is not `codex` +- `openai/` +- `agentRuntime.id: "codex"` when Codex is installed, enabled, contributes the + `codex` harness, and has usable OAuth +- `agentRuntime.id: "pi"` otherwise -That warning exists because users often expect "Codex plugin enabled" to imply -"native Codex app-server runtime." OpenClaw does not make that leap. The warning -means: - -- **No change is required** if you intended ChatGPT/Codex OAuth through PI. -- Change the model to `openai/` and set - `agentRuntime.id: "codex"` if you intended native app-server - execution. -- Existing sessions still need `/new` or `/reset` after a runtime change, - because session runtime pins are sticky. +The `codex` route forces the native Codex harness. The `pi` route keeps the +agent on the default OpenClaw runner instead of enabling or installing Codex as +a side effect of legacy-route cleanup. +Doctor also repairs stale persisted session pins across discovered agent session +stores so old conversations do not stay wedged on the removed route. Harness selection is not a live session control. When an embedded turn runs, OpenClaw records the selected harness id on that session and keeps using it for @@ -349,7 +350,7 @@ Agents should route user requests by intent, not by the word "Codex" alone: | "File a support report for a bad Codex run" | `/diagnostics [note]` | | "Only send Codex feedback for this attached thread" | `/codex diagnostics [note]` | | "Use my ChatGPT/Codex subscription with Codex runtime" | `openai/*` plus `agentRuntime.id: "codex"` | -| "Use my ChatGPT/Codex subscription through PI" | `openai-codex/*` model refs | +| "Repair old `openai-codex/*` config/session pins" | `openclaw doctor --fix` | | "Run Codex through ACP/acpx" | ACP `sessions_spawn({ runtime: "acp", ... })` | | "Start Claude Code/Gemini/OpenCode/Cursor in a thread" | ACP/acpx, not `/codex` and not native sub-agents | diff --git a/src/commands/doctor/repair-sequencing.test.ts b/src/commands/doctor/repair-sequencing.test.ts index 5537507e808..59bfe67bb1e 100644 --- a/src/commands/doctor/repair-sequencing.test.ts +++ b/src/commands/doctor/repair-sequencing.test.ts @@ -4,9 +4,16 @@ import { runDoctorRepairSequence } from "./repair-sequencing.js"; const mocks = vi.hoisted(() => ({ applyPluginAutoEnable: vi.fn(), + ensureAuthProfileStore: vi.fn(), + evaluateStoredCredentialEligibility: vi.fn(), + getInstalledPluginRecord: vi.fn(), + isInstalledPluginEnabled: vi.fn(), + loadInstalledPluginIndex: vi.fn(), maybeRepairStaleManagedNpmBundledPlugins: vi.fn(), maybeRepairStalePluginConfig: vi.fn(), repairMissingConfiguredPluginInstalls: vi.fn(), + resolveAuthProfileOrder: vi.fn(), + resolveProfileUnusableUntilForDisplay: vi.fn(), })); vi.mock("../../config/plugin-auto-enable.js", () => ({ @@ -21,6 +28,23 @@ vi.mock("./shared/missing-configured-plugin-install.js", () => ({ repairMissingConfiguredPluginInstalls: mocks.repairMissingConfiguredPluginInstalls, })); +vi.mock("../../agents/auth-profiles.js", () => ({ + ensureAuthProfileStore: mocks.ensureAuthProfileStore, + resolveAuthProfileOrder: mocks.resolveAuthProfileOrder, + resolveProfileUnusableUntilForDisplay: mocks.resolveProfileUnusableUntilForDisplay, +})); + +vi.mock("../../agents/auth-profiles/credential-state.js", () => ({ + evaluateStoredCredentialEligibility: mocks.evaluateStoredCredentialEligibility, +})); + +vi.mock("../../plugins/installed-plugin-index.js", async (importOriginal) => ({ + ...(await importOriginal()), + getInstalledPluginRecord: mocks.getInstalledPluginRecord, + isInstalledPluginEnabled: mocks.isInstalledPluginEnabled, + loadInstalledPluginIndex: mocks.loadInstalledPluginIndex, +})); + vi.mock("./shared/channel-doctor.js", () => ({ collectChannelDoctorRepairMutations: ({ cfg }: { cfg: OpenClawConfig }) => { const allowFrom = cfg.channels?.discord?.allowFrom as unknown[] | undefined; @@ -150,11 +174,24 @@ describe("doctor repair sequencing", () => { config: params.config, changes: [], })); + mocks.ensureAuthProfileStore.mockReturnValue({ + profiles: {}, + usageStats: {}, + }); + mocks.evaluateStoredCredentialEligibility.mockReturnValue({ + eligible: true, + reasonCode: "ok", + }); + mocks.getInstalledPluginRecord.mockReturnValue(undefined); + mocks.isInstalledPluginEnabled.mockReturnValue(false); + mocks.loadInstalledPluginIndex.mockReturnValue({ plugins: [] }); mocks.maybeRepairStaleManagedNpmBundledPlugins.mockReturnValue(false); mocks.repairMissingConfiguredPluginInstalls.mockResolvedValue({ changes: [], warnings: [], }); + mocks.resolveAuthProfileOrder.mockReturnValue([]); + mocks.resolveProfileUnusableUntilForDisplay.mockReturnValue(null); mocks.maybeRepairStalePluginConfig.mockImplementation((cfg: OpenClawConfig) => ({ config: cfg, changes: [], @@ -360,6 +397,50 @@ describe("doctor repair sequencing", () => { ); }); + it("moves legacy Codex routes to PI before missing plugin install repair when Codex is not ready", async () => { + mocks.repairMissingConfiguredPluginInstalls.mockImplementationOnce( + async (params: { cfg: OpenClawConfig }) => { + expect(params.cfg.agents?.defaults?.model).toBe("openai/gpt-5.5"); + expect(params.cfg.agents?.defaults?.agentRuntime).toEqual({ id: "pi" }); + return { + changes: [], + warnings: [], + }; + }, + ); + + const result = await runDoctorRepairSequence({ + state: { + cfg: { + agents: { + defaults: { + model: "openai-codex/gpt-5.5", + }, + }, + } as OpenClawConfig, + candidate: { + agents: { + defaults: { + model: "openai-codex/gpt-5.5", + }, + }, + } as OpenClawConfig, + pendingChanges: false, + fixHints: [], + }, + doctorFixCommand: "openclaw doctor --fix", + env: {}, + }); + + expect(result.state.pendingChanges).toBe(true); + expect(result.state.candidate.agents?.defaults?.model).toBe("openai/gpt-5.5"); + expect(result.state.candidate.agents?.defaults?.agentRuntime).toEqual({ id: "pi" }); + expect(result.changeNotes.join("\n")).toContain( + 'agents.defaults.model: openai-codex/gpt-5.5 -> openai/gpt-5.5; set agentRuntime.id to "pi".', + ); + expect(result.changeNotes.join("\n")).not.toContain("Installed missing configured plugin"); + }); + it("does not remove deferred configured plugins during the package update doctor pass", async () => { mocks.repairMissingConfiguredPluginInstalls.mockResolvedValueOnce({ changes: [ diff --git a/src/commands/doctor/repair-sequencing.ts b/src/commands/doctor/repair-sequencing.ts index bc47164e7f4..8ad475a8ebd 100644 --- a/src/commands/doctor/repair-sequencing.ts +++ b/src/commands/doctor/repair-sequencing.ts @@ -7,6 +7,7 @@ import { createChannelDoctorEmptyAllowlistPolicyHooks, collectChannelDoctorRepairMutations, } from "./shared/channel-doctor.js"; +import { maybeRepairCodexRoutes } from "./shared/codex-route-warnings.js"; import { applyDoctorConfigMutation, type DoctorConfigMutationState, @@ -73,6 +74,16 @@ export async function runDoctorRepairSequence(params: { env, prompter: { shouldRepair: true }, }); + const codexRouteRepair = maybeRepairCodexRoutes({ + cfg: state.candidate, + env, + shouldRepair: true, + }); + applyMutation({ + config: codexRouteRepair.cfg, + changes: codexRouteRepair.changes, + warnings: codexRouteRepair.warnings, + }); const missingConfiguredPluginInstallRepair = await repairMissingConfiguredPluginInstalls({ cfg: state.candidate, env, diff --git a/src/commands/doctor/shared/codex-route-warnings.test.ts b/src/commands/doctor/shared/codex-route-warnings.test.ts index b3dbce3b5ee..410f4f50316 100644 --- a/src/commands/doctor/shared/codex-route-warnings.test.ts +++ b/src/commands/doctor/shared/codex-route-warnings.test.ts @@ -1,22 +1,61 @@ -import { describe, expect, it } from "vitest"; +import { beforeEach, describe, expect, it, vi } from "vitest"; +import type { SessionEntry } from "../../../config/sessions/types.js"; import type { OpenClawConfig } from "../../../config/types.openclaw.js"; -import { collectCodexRouteWarnings } from "./codex-route-warnings.js"; -function codexPluginConfig(): Pick { - return { - plugins: { - entries: { - codex: { enabled: true }, - }, - }, - } as Pick; -} +const mocks = vi.hoisted(() => ({ + ensureAuthProfileStore: vi.fn(), + evaluateStoredCredentialEligibility: vi.fn(), + getInstalledPluginRecord: vi.fn(), + isInstalledPluginEnabled: vi.fn(), + loadInstalledPluginIndex: vi.fn(), + resolveAuthProfileOrder: vi.fn(), + resolveProfileUnusableUntilForDisplay: vi.fn(), +})); + +vi.mock("../../../agents/auth-profiles.js", () => ({ + ensureAuthProfileStore: mocks.ensureAuthProfileStore, + resolveAuthProfileOrder: mocks.resolveAuthProfileOrder, + resolveProfileUnusableUntilForDisplay: mocks.resolveProfileUnusableUntilForDisplay, +})); + +vi.mock("../../../agents/auth-profiles/credential-state.js", () => ({ + evaluateStoredCredentialEligibility: mocks.evaluateStoredCredentialEligibility, +})); + +vi.mock("../../../plugins/installed-plugin-index.js", async (importOriginal) => ({ + ...(await importOriginal()), + getInstalledPluginRecord: mocks.getInstalledPluginRecord, + isInstalledPluginEnabled: mocks.isInstalledPluginEnabled, + loadInstalledPluginIndex: mocks.loadInstalledPluginIndex, +})); + +import { + collectCodexRouteWarnings, + maybeRepairCodexRoutes, + repairCodexSessionStoreRoutes, +} from "./codex-route-warnings.js"; describe("collectCodexRouteWarnings", () => { - it("warns when the Codex plugin is enabled but openai-codex models still route through PI", () => { + beforeEach(() => { + vi.clearAllMocks(); + mocks.ensureAuthProfileStore.mockReturnValue({ + profiles: {}, + usageStats: {}, + }); + mocks.evaluateStoredCredentialEligibility.mockReturnValue({ + eligible: true, + reasonCode: "ok", + }); + mocks.getInstalledPluginRecord.mockReturnValue(undefined); + mocks.isInstalledPluginEnabled.mockReturnValue(false); + mocks.loadInstalledPluginIndex.mockReturnValue({ plugins: [] }); + mocks.resolveAuthProfileOrder.mockReturnValue([]); + mocks.resolveProfileUnusableUntilForDisplay.mockReturnValue(null); + }); + + it("warns when openai-codex primary models still use the legacy route", () => { const warnings = collectCodexRouteWarnings({ cfg: { - ...codexPluginConfig(), agents: { defaults: { model: "openai-codex/gpt-5.5", @@ -25,16 +64,17 @@ describe("collectCodexRouteWarnings", () => { } as OpenClawConfig, }); - expect(warnings).toEqual([expect.stringContaining("Codex plugin is enabled")]); + expect(warnings).toEqual([expect.stringContaining("Legacy `openai-codex/*`")]); expect(warnings[0]).toContain("agents.defaults.model"); - expect(warnings[0]).toContain('runtime "pi"'); + expect(warnings[0]).toContain("openai/gpt-5.5"); + expect(warnings[0]).toContain('runtime is "pi"'); expect(warnings[0]).toContain('agentRuntime.id: "codex"'); + expect(warnings[0]).toContain("usable OAuth"); }); - it("does not warn when the native Codex runtime is selected", () => { + it("still warns when the native Codex runtime is selected with a legacy model ref", () => { const warnings = collectCodexRouteWarnings({ cfg: { - ...codexPluginConfig(), agents: { defaults: { model: "openai-codex/gpt-5.5", @@ -46,13 +86,13 @@ describe("collectCodexRouteWarnings", () => { } as OpenClawConfig, }); - expect(warnings).toEqual([]); + expect(warnings).toEqual([expect.stringContaining("openai/gpt-5.5")]); + expect(warnings[0]).toContain('runtime is "codex"'); }); - it("does not warn when OPENCLAW_AGENT_RUNTIME selects native Codex", () => { + it("still warns when OPENCLAW_AGENT_RUNTIME selects native Codex with a legacy model ref", () => { const warnings = collectCodexRouteWarnings({ cfg: { - ...codexPluginConfig(), agents: { defaults: { model: "openai-codex/gpt-5.5", @@ -64,15 +104,15 @@ describe("collectCodexRouteWarnings", () => { }, }); - expect(warnings).toEqual([]); + expect(warnings).toEqual([expect.stringContaining('runtime is "codex"')]); }); - it("does not warn unless the Codex plugin is explicitly enabled or allowed", () => { + it("does not warn for canonical OpenAI refs", () => { const warnings = collectCodexRouteWarnings({ cfg: { agents: { defaults: { - model: "openai-codex/gpt-5.5", + model: "openai/gpt-5.5", }, }, } as OpenClawConfig, @@ -80,4 +120,312 @@ describe("collectCodexRouteWarnings", () => { expect(warnings).toEqual([]); }); + + it("repairs configured Codex model refs to canonical OpenAI refs with the Codex runtime when ready", () => { + const result = maybeRepairCodexRoutes({ + cfg: { + agents: { + defaults: { + model: { + primary: "openai-codex/gpt-5.5", + fallbacks: ["openai-codex/gpt-5.4", "anthropic/claude-sonnet-4-6"], + }, + heartbeat: { + model: "openai-codex/gpt-5.4-mini", + }, + subagents: { + model: { + primary: "openai-codex/gpt-5.5", + fallbacks: ["openai-codex/gpt-5.4"], + }, + }, + compaction: { + model: "openai-codex/gpt-5.4", + memoryFlush: { + model: "openai-codex/gpt-5.4-mini", + }, + }, + models: { + "openai-codex/gpt-5.5": { alias: "codex" }, + }, + }, + list: [ + { + id: "worker", + model: "openai-codex/gpt-5.4", + agentRuntime: { id: "codex" }, + }, + ], + }, + channels: { + modelByChannel: { + telegram: { + default: "openai-codex/gpt-5.4", + }, + }, + }, + hooks: { + mappings: [ + { + model: "openai-codex/gpt-5.4-mini", + }, + ], + gmail: { + model: "openai-codex/gpt-5.4", + }, + }, + tools: { + subagents: { + model: { + primary: "openai-codex/gpt-5.4", + fallbacks: ["openai-codex/gpt-5.4-mini"], + }, + }, + }, + messages: { + tts: { + summaryModel: "openai-codex/gpt-5.4-mini", + }, + }, + } as OpenClawConfig, + shouldRepair: true, + codexRuntimeReady: true, + }); + + expect(result.warnings).toEqual([]); + expect(result.changes).toEqual([expect.stringContaining("Repaired Codex model routes")]); + expect(result.cfg.agents?.defaults?.model).toEqual({ + primary: "openai/gpt-5.5", + fallbacks: ["openai/gpt-5.4", "anthropic/claude-sonnet-4-6"], + }); + expect(result.cfg.agents?.defaults?.heartbeat?.model).toBe("openai/gpt-5.4-mini"); + expect(result.cfg.agents?.defaults?.subagents?.model).toEqual({ + primary: "openai/gpt-5.5", + fallbacks: ["openai/gpt-5.4"], + }); + expect(result.cfg.agents?.defaults?.compaction?.model).toBe("openai/gpt-5.4"); + expect(result.cfg.agents?.defaults?.compaction?.memoryFlush?.model).toBe("openai/gpt-5.4-mini"); + expect(result.cfg.agents?.defaults?.agentRuntime).toEqual({ id: "codex" }); + expect(result.cfg.agents?.defaults?.models).toEqual({ + "openai/gpt-5.5": { alias: "codex" }, + }); + expect(result.cfg.agents?.list?.[0]).toMatchObject({ + id: "worker", + model: "openai/gpt-5.4", + agentRuntime: { id: "codex" }, + }); + expect(result.cfg.channels?.modelByChannel?.telegram?.default).toBe("openai/gpt-5.4"); + expect(result.cfg.hooks?.mappings?.[0]?.model).toBe("openai/gpt-5.4-mini"); + expect(result.cfg.hooks?.gmail?.model).toBe("openai/gpt-5.4"); + expect(result.cfg.tools?.subagents?.model).toEqual({ + primary: "openai/gpt-5.4", + fallbacks: ["openai/gpt-5.4-mini"], + }); + expect(result.cfg.messages?.tts?.summaryModel).toBe("openai/gpt-5.4-mini"); + }); + + it("repairs legacy routes to PI when Codex is not installed, enabled, and OAuth-ready", () => { + const result = maybeRepairCodexRoutes({ + cfg: { + agents: { + defaults: { + model: "openai-codex/gpt-5.5", + }, + }, + } as OpenClawConfig, + shouldRepair: true, + }); + + expect(result.cfg.agents?.defaults?.model).toBe("openai/gpt-5.5"); + expect(result.cfg.agents?.defaults?.agentRuntime).toEqual({ id: "pi" }); + expect(result.changes.join("\n")).toContain('set agentRuntime.id to "pi"'); + }); + + it("repairs persisted session route pins to PI when Codex is not ready", () => { + const store: Record = { + main: { + sessionId: "s1", + updatedAt: 1, + modelProvider: "openai-codex", + model: "gpt-5.5", + providerOverride: "openai-codex", + modelOverride: "openai-codex/gpt-5.4", + modelOverrideSource: "auto", + agentHarnessId: "codex", + agentRuntimeOverride: "codex", + authProfileOverride: "openai-codex:default", + authProfileOverrideSource: "auto", + authProfileOverrideCompactionCount: 2, + fallbackNoticeSelectedModel: "openai-codex/gpt-5.5", + fallbackNoticeActiveModel: "openai-codex/gpt-5.4", + fallbackNoticeReason: "rate-limit", + }, + other: { + sessionId: "s2", + updatedAt: 2, + agentHarnessId: "codex", + }, + }; + + const result = repairCodexSessionStoreRoutes({ + store, + runtime: "pi", + now: 123, + }); + + expect(result).toEqual({ changed: true, sessionKeys: ["main", "other"] }); + expect(store.main).toMatchObject({ + updatedAt: 123, + modelProvider: "openai", + model: "gpt-5.5", + providerOverride: "openai", + modelOverride: "gpt-5.4", + modelOverrideSource: "auto", + agentHarnessId: "pi", + agentRuntimeOverride: "pi", + }); + expect(store.main.authProfileOverride).toBeUndefined(); + expect(store.main.authProfileOverrideSource).toBeUndefined(); + expect(store.main.authProfileOverrideCompactionCount).toBeUndefined(); + expect(store.main.fallbackNoticeSelectedModel).toBeUndefined(); + expect(store.main.fallbackNoticeActiveModel).toBeUndefined(); + expect(store.main.fallbackNoticeReason).toBeUndefined(); + expect(store.other).toMatchObject({ + updatedAt: 123, + agentHarnessId: "pi", + agentRuntimeOverride: "pi", + }); + }); + + it("keeps Codex session auth pins when the Codex runtime is ready", () => { + const store: Record = { + main: { + sessionId: "s1", + updatedAt: 1, + providerOverride: "openai-codex", + modelOverride: "gpt-5.5", + agentHarnessId: "codex", + authProfileOverride: "openai-codex:default", + authProfileOverrideSource: "auto", + }, + }; + + const result = repairCodexSessionStoreRoutes({ + store, + runtime: "codex", + now: 123, + }); + + expect(result).toEqual({ changed: true, sessionKeys: ["main"] }); + expect(store.main).toMatchObject({ + updatedAt: 123, + providerOverride: "openai", + modelOverride: "gpt-5.5", + agentHarnessId: "codex", + agentRuntimeOverride: "codex", + authProfileOverride: "openai-codex:default", + authProfileOverrideSource: "auto", + }); + }); + + it("selects the Codex runtime only when the plugin is installed, enabled, and has usable OAuth", () => { + const store = { + profiles: { + "openai-codex:default": { + type: "oauth", + provider: "openai-codex", + access: "access-token", + }, + }, + usageStats: {}, + }; + const index = { + plugins: [ + { + pluginId: "codex", + enabled: true, + startup: { + agentHarnesses: ["codex"], + }, + }, + ], + }; + mocks.ensureAuthProfileStore.mockReturnValue(store); + mocks.loadInstalledPluginIndex.mockReturnValue(index); + mocks.getInstalledPluginRecord.mockReturnValue(index.plugins[0]); + mocks.isInstalledPluginEnabled.mockReturnValue(true); + mocks.resolveAuthProfileOrder.mockReturnValue(["openai-codex:default"]); + + const result = maybeRepairCodexRoutes({ + cfg: { + plugins: { + entries: { + codex: { + enabled: true, + }, + }, + }, + agents: { + defaults: { + model: "openai-codex/gpt-5.5", + }, + }, + } as OpenClawConfig, + shouldRepair: true, + }); + + expect(mocks.loadInstalledPluginIndex).toHaveBeenCalled(); + expect(mocks.isInstalledPluginEnabled).toHaveBeenCalledWith(index, "codex", expect.anything()); + expect(mocks.resolveAuthProfileOrder).toHaveBeenCalledWith( + expect.objectContaining({ + provider: "openai-codex", + store, + }), + ); + expect(result.cfg.agents?.defaults?.model).toBe("openai/gpt-5.5"); + expect(result.cfg.agents?.defaults?.agentRuntime).toEqual({ id: "codex" }); + }); + + it("keeps PI when the installed Codex record does not contribute the Codex harness", () => { + const store = { + profiles: { + "openai-codex:default": { + type: "oauth", + provider: "openai-codex", + access: "access-token", + }, + }, + usageStats: {}, + }; + const index = { + plugins: [ + { + pluginId: "codex", + enabled: true, + startup: { + agentHarnesses: [], + }, + }, + ], + }; + mocks.ensureAuthProfileStore.mockReturnValue(store); + mocks.loadInstalledPluginIndex.mockReturnValue(index); + mocks.getInstalledPluginRecord.mockReturnValue(index.plugins[0]); + mocks.isInstalledPluginEnabled.mockReturnValue(true); + mocks.resolveAuthProfileOrder.mockReturnValue(["openai-codex:default"]); + + const result = maybeRepairCodexRoutes({ + cfg: { + agents: { + defaults: { + model: "openai-codex/gpt-5.5", + }, + }, + } as OpenClawConfig, + shouldRepair: true, + }); + + expect(result.cfg.agents?.defaults?.model).toBe("openai/gpt-5.5"); + expect(result.cfg.agents?.defaults?.agentRuntime).toEqual({ id: "pi" }); + }); }); diff --git a/src/commands/doctor/shared/codex-route-warnings.ts b/src/commands/doctor/shared/codex-route-warnings.ts index 21879e21b91..5ca0432cad3 100644 --- a/src/commands/doctor/shared/codex-route-warnings.ts +++ b/src/commands/doctor/shared/codex-route-warnings.ts @@ -1,25 +1,51 @@ -import type { - AgentModelConfig, - AgentRuntimePolicyConfig, -} from "../../../config/types.agents-shared.js"; +import fs from "node:fs"; +import { + ensureAuthProfileStore, + resolveAuthProfileOrder, + resolveProfileUnusableUntilForDisplay, +} from "../../../agents/auth-profiles.js"; +import { evaluateStoredCredentialEligibility } from "../../../agents/auth-profiles/credential-state.js"; +import { AGENT_MODEL_CONFIG_KEYS } from "../../../config/model-refs.js"; +import { loadSessionStore, updateSessionStore } from "../../../config/sessions/store.js"; +import { resolveAllAgentSessionStoreTargetsSync } from "../../../config/sessions/targets.js"; +import type { SessionEntry } from "../../../config/sessions/types.js"; +import type { AgentRuntimePolicyConfig } from "../../../config/types.agents-shared.js"; import type { OpenClawConfig } from "../../../config/types.openclaw.js"; +import { + getInstalledPluginRecord, + isInstalledPluginEnabled, + loadInstalledPluginIndex, +} from "../../../plugins/installed-plugin-index.js"; -type CodexPiRouteHit = { +type CodexRouteHit = { path: string; model: string; - runtime: string; + canonicalModel: string; + runtime?: string; + setsRuntime?: boolean; +}; + +type CodexRepairRuntime = "codex" | "pi"; +type MutableRecord = Record; +type SessionRouteRepairResult = { + changed: boolean; + sessionKeys: string[]; +}; +type CodexSessionRouteRepairSummary = { + scannedStores: number; + repairedStores: number; + repairedSessions: number; + warnings: string[]; + changes: string[]; }; function normalizeString(value: unknown): string | undefined { return typeof value === "string" && value.trim() ? value.trim().toLowerCase() : undefined; } -function normalizeModelRef(model: AgentModelConfig | undefined): string | undefined { - if (typeof model === "string") { - return model.trim() || undefined; - } - return typeof model?.primary === "string" && model.primary.trim() - ? model.primary.trim() +function asMutableRecord(value: unknown): MutableRecord | undefined { + return value && typeof value === "object" && !Array.isArray(value) + ? (value as MutableRecord) : undefined; } @@ -27,19 +53,20 @@ function isOpenAICodexModelRef(model: string | undefined): model is string { return normalizeString(model)?.startsWith("openai-codex/") === true; } -function isCodexPluginEnabled(cfg: OpenClawConfig): boolean { - const plugins = cfg.plugins; - if (plugins?.enabled === false) { - return false; +function toCanonicalOpenAIModelRef(model: string): string | undefined { + if (!isOpenAICodexModelRef(model)) { + return undefined; } - const allow = plugins?.allow; - if (Array.isArray(allow) && !allow.map((entry) => normalizeString(entry)).includes("codex")) { - return false; + const modelId = model.slice("openai-codex/".length).trim(); + return modelId ? `openai/${modelId}` : undefined; +} + +function toOpenAIModelId(model: string): string | undefined { + if (!isOpenAICodexModelRef(model)) { + return undefined; } - return ( - plugins?.entries?.codex?.enabled === true || - (Array.isArray(allow) && allow.map((entry) => normalizeString(entry)).includes("codex")) - ); + const modelId = model.slice("openai-codex/".length).trim(); + return modelId || undefined; } function resolveRuntime(params: { @@ -55,58 +82,796 @@ function resolveRuntime(params: { ); } -function collectOpenAICodexPiRouteHits( - cfg: OpenClawConfig, - env?: NodeJS.ProcessEnv, -): CodexPiRouteHit[] { +function recordCodexModelHit(params: { + hits: CodexRouteHit[]; + path: string; + model: string; + runtime?: string; + setsRuntime?: boolean; +}): string | undefined { + const canonicalModel = toCanonicalOpenAIModelRef(params.model); + if (!canonicalModel) { + return undefined; + } + params.hits.push({ + path: params.path, + model: params.model, + canonicalModel, + ...(params.runtime ? { runtime: params.runtime } : {}), + ...(params.setsRuntime ? { setsRuntime: true } : {}), + }); + return canonicalModel; +} + +function collectStringModelSlot(params: { + hits: CodexRouteHit[]; + path: string; + value: unknown; + runtime?: string; + setsRuntime?: boolean; +}): boolean { + if (typeof params.value !== "string") { + return false; + } + const model = params.value.trim(); + if (!model || !isOpenAICodexModelRef(model)) { + return false; + } + return !!recordCodexModelHit({ + hits: params.hits, + path: params.path, + model, + runtime: params.runtime, + setsRuntime: params.setsRuntime, + }); +} + +function collectModelConfigSlot(params: { + hits: CodexRouteHit[]; + path: string; + value: unknown; + runtime?: string; + setsRuntimeOnPrimary?: boolean; +}): boolean { + if (typeof params.value === "string") { + return collectStringModelSlot({ + hits: params.hits, + path: params.path, + value: params.value, + runtime: params.runtime, + setsRuntime: params.setsRuntimeOnPrimary, + }); + } + const record = asMutableRecord(params.value); + if (!record) { + return false; + } + let rewrotePrimary = false; + if (typeof record.primary === "string") { + rewrotePrimary = collectStringModelSlot({ + hits: params.hits, + path: `${params.path}.primary`, + value: record.primary, + runtime: params.runtime, + setsRuntime: params.setsRuntimeOnPrimary, + }); + } + if (Array.isArray(record.fallbacks)) { + for (const [index, entry] of record.fallbacks.entries()) { + collectStringModelSlot({ + hits: params.hits, + path: `${params.path}.fallbacks.${index}`, + value: entry, + }); + } + } + return rewrotePrimary; +} + +function collectModelsMapRefs(params: { + hits: CodexRouteHit[]; + path: string; + models: unknown; +}): void { + const record = asMutableRecord(params.models); + if (!record) { + return; + } + for (const modelRef of Object.keys(record)) { + if (!isOpenAICodexModelRef(modelRef)) { + continue; + } + recordCodexModelHit({ + hits: params.hits, + path: `${params.path}.${modelRef}`, + model: modelRef, + }); + } +} + +function collectAgentModelRefs(params: { + hits: CodexRouteHit[]; + agent: unknown; + path: string; + runtime?: string; + collectModelsMap?: boolean; +}): void { + const agent = asMutableRecord(params.agent); + if (!agent) { + return; + } + for (const key of AGENT_MODEL_CONFIG_KEYS) { + collectModelConfigSlot({ + hits: params.hits, + path: `${params.path}.${key}`, + value: agent[key], + runtime: key === "model" ? params.runtime : undefined, + setsRuntimeOnPrimary: key === "model", + }); + } + collectStringModelSlot({ + hits: params.hits, + path: `${params.path}.heartbeat.model`, + value: asMutableRecord(agent.heartbeat)?.model, + }); + collectModelConfigSlot({ + hits: params.hits, + path: `${params.path}.subagents.model`, + value: asMutableRecord(agent.subagents)?.model, + }); + const compaction = asMutableRecord(agent.compaction); + collectStringModelSlot({ + hits: params.hits, + path: `${params.path}.compaction.model`, + value: compaction?.model, + }); + collectStringModelSlot({ + hits: params.hits, + path: `${params.path}.compaction.memoryFlush.model`, + value: asMutableRecord(compaction?.memoryFlush)?.model, + }); + if (params.collectModelsMap) { + collectModelsMapRefs({ + hits: params.hits, + path: `${params.path}.models`, + models: agent.models, + }); + } +} + +function collectConfigModelRefs(cfg: OpenClawConfig, env?: NodeJS.ProcessEnv): CodexRouteHit[] { + const hits: CodexRouteHit[] = []; const defaults = cfg.agents?.defaults; const defaultsRuntime = defaults?.agentRuntime; - const hits: CodexPiRouteHit[] = []; - const defaultModel = normalizeModelRef(defaults?.model); - const defaultRuntime = resolveRuntime({ env, defaultsRuntime }); - if (isOpenAICodexModelRef(defaultModel) && defaultRuntime !== "codex") { - hits.push({ path: "agents.defaults.model", model: defaultModel, runtime: defaultRuntime }); - } + collectAgentModelRefs({ + hits, + agent: defaults, + path: "agents.defaults", + runtime: resolveRuntime({ env, defaultsRuntime }), + collectModelsMap: true, + }); - for (const agent of cfg.agents?.list ?? []) { - const model = normalizeModelRef(agent.model); - if (!isOpenAICodexModelRef(model)) { - continue; - } - const runtime = resolveRuntime({ - env, - agentRuntime: agent.agentRuntime, - defaultsRuntime, + for (const [index, agent] of (cfg.agents?.list ?? []).entries()) { + const id = typeof agent.id === "string" && agent.id.trim() ? agent.id.trim() : String(index); + collectAgentModelRefs({ + hits, + agent, + path: `agents.list.${id}`, + runtime: resolveRuntime({ + env, + agentRuntime: agent.agentRuntime, + defaultsRuntime, + }), }); - if (runtime === "codex") { - continue; - } - const id = typeof agent.id === "string" && agent.id.trim() ? agent.id.trim() : ""; - hits.push({ path: `agents.list.${id}.model`, model, runtime }); } + const channelsModelByChannel = asMutableRecord(cfg.channels?.modelByChannel); + if (channelsModelByChannel) { + for (const [channelId, channelMap] of Object.entries(channelsModelByChannel)) { + const targets = asMutableRecord(channelMap); + if (!targets) { + continue; + } + for (const [targetId, model] of Object.entries(targets)) { + collectStringModelSlot({ + hits, + path: `channels.modelByChannel.${channelId}.${targetId}`, + value: model, + }); + } + } + } + + for (const [index, mapping] of (cfg.hooks?.mappings ?? []).entries()) { + collectStringModelSlot({ + hits, + path: `hooks.mappings.${index}.model`, + value: mapping.model, + }); + } + collectStringModelSlot({ + hits, + path: "hooks.gmail.model", + value: cfg.hooks?.gmail?.model, + }); + collectModelConfigSlot({ + hits, + path: "tools.subagents.model", + value: cfg.tools?.subagents?.model, + }); + collectStringModelSlot({ + hits, + path: "messages.tts.summaryModel", + value: cfg.messages?.tts?.summaryModel, + }); + collectStringModelSlot({ + hits, + path: "channels.discord.voice.model", + value: asMutableRecord(asMutableRecord(cfg.channels?.discord)?.voice)?.model, + }); return hits; } +function rewriteStringModelSlot(params: { + hits: CodexRouteHit[]; + container: MutableRecord | undefined; + key: string; + path: string; + runtime?: string; + setsRuntime?: boolean; +}): boolean { + if (!params.container) { + return false; + } + const value = params.container[params.key]; + const model = typeof value === "string" ? value.trim() : ""; + if (!model || !isOpenAICodexModelRef(model)) { + return false; + } + const canonicalModel = recordCodexModelHit({ + hits: params.hits, + path: params.path, + model, + runtime: params.runtime, + setsRuntime: params.setsRuntime, + }); + if (!canonicalModel) { + return false; + } + params.container[params.key] = canonicalModel; + return true; +} + +function rewriteModelConfigSlot(params: { + hits: CodexRouteHit[]; + container: MutableRecord | undefined; + key: string; + path: string; + runtime?: string; + setsRuntimeOnPrimary?: boolean; +}): boolean { + if (!params.container) { + return false; + } + const value = params.container[params.key]; + if (typeof value === "string") { + return rewriteStringModelSlot({ + hits: params.hits, + container: params.container, + key: params.key, + path: params.path, + runtime: params.runtime, + setsRuntime: params.setsRuntimeOnPrimary, + }); + } + const record = asMutableRecord(value); + if (!record) { + return false; + } + const rewrotePrimary = rewriteStringModelSlot({ + hits: params.hits, + container: record, + key: "primary", + path: `${params.path}.primary`, + runtime: params.runtime, + setsRuntime: params.setsRuntimeOnPrimary, + }); + if (Array.isArray(record.fallbacks)) { + record.fallbacks = record.fallbacks.map((entry, index) => { + if (typeof entry !== "string") { + return entry; + } + const model = entry.trim(); + const canonicalModel = recordCodexModelHit({ + hits: params.hits, + path: `${params.path}.fallbacks.${index}`, + model, + }); + return canonicalModel ?? entry; + }); + } + return rewrotePrimary; +} + +function rewriteModelsMap(params: { + hits: CodexRouteHit[]; + models: MutableRecord | undefined; + path: string; +}): void { + if (!params.models) { + return; + } + for (const legacyRef of Object.keys(params.models)) { + const canonicalModel = toCanonicalOpenAIModelRef(legacyRef); + if (!canonicalModel) { + continue; + } + recordCodexModelHit({ + hits: params.hits, + path: `${params.path}.${legacyRef}`, + model: legacyRef, + }); + params.models[canonicalModel] ??= params.models[legacyRef] ?? {}; + delete params.models[legacyRef]; + } +} + +function rewriteAgentModelRefs(params: { + hits: CodexRouteHit[]; + agent: MutableRecord | undefined; + path: string; + runtime: CodexRepairRuntime; + currentRuntime: string; + rewriteModelsMap?: boolean; +}): void { + if (!params.agent) { + return; + } + for (const key of AGENT_MODEL_CONFIG_KEYS) { + const rewrotePrimary = rewriteModelConfigSlot({ + hits: params.hits, + container: params.agent, + key, + path: `${params.path}.${key}`, + runtime: key === "model" ? params.currentRuntime : undefined, + setsRuntimeOnPrimary: key === "model", + }); + if (key === "model" && rewrotePrimary) { + const agentRuntime = asMutableRecord(params.agent.agentRuntime) ?? {}; + agentRuntime.id = params.runtime; + params.agent.agentRuntime = agentRuntime; + } + } + rewriteStringModelSlot({ + hits: params.hits, + container: asMutableRecord(params.agent.heartbeat), + key: "model", + path: `${params.path}.heartbeat.model`, + }); + rewriteModelConfigSlot({ + hits: params.hits, + container: asMutableRecord(params.agent.subagents), + key: "model", + path: `${params.path}.subagents.model`, + }); + const compaction = asMutableRecord(params.agent.compaction); + rewriteStringModelSlot({ + hits: params.hits, + container: compaction, + key: "model", + path: `${params.path}.compaction.model`, + }); + rewriteStringModelSlot({ + hits: params.hits, + container: asMutableRecord(compaction?.memoryFlush), + key: "model", + path: `${params.path}.compaction.memoryFlush.model`, + }); + if (params.rewriteModelsMap) { + rewriteModelsMap({ + hits: params.hits, + models: asMutableRecord(params.agent.models), + path: `${params.path}.models`, + }); + } +} + +function rewriteConfigModelRefs(params: { + cfg: OpenClawConfig; + env?: NodeJS.ProcessEnv; + runtime: CodexRepairRuntime; +}): { cfg: OpenClawConfig; changes: CodexRouteHit[] } { + const nextConfig = structuredClone(params.cfg); + const hits: CodexRouteHit[] = []; + const defaultsRuntime = nextConfig.agents?.defaults?.agentRuntime; + rewriteAgentModelRefs({ + hits, + agent: asMutableRecord(nextConfig.agents?.defaults), + path: "agents.defaults", + runtime: params.runtime, + currentRuntime: resolveRuntime({ env: params.env, defaultsRuntime }), + rewriteModelsMap: true, + }); + for (const [index, agent] of (nextConfig.agents?.list ?? []).entries()) { + const id = typeof agent.id === "string" && agent.id.trim() ? agent.id.trim() : String(index); + rewriteAgentModelRefs({ + hits, + agent: agent as MutableRecord, + path: `agents.list.${id}`, + runtime: params.runtime, + currentRuntime: resolveRuntime({ + env: params.env, + agentRuntime: agent.agentRuntime, + defaultsRuntime, + }), + }); + } + const channelsModelByChannel = asMutableRecord(nextConfig.channels?.modelByChannel); + if (channelsModelByChannel) { + for (const [channelId, channelMap] of Object.entries(channelsModelByChannel)) { + const targets = asMutableRecord(channelMap); + if (!targets) { + continue; + } + for (const targetId of Object.keys(targets)) { + rewriteStringModelSlot({ + hits, + container: targets, + key: targetId, + path: `channels.modelByChannel.${channelId}.${targetId}`, + }); + } + } + } + for (const [index, mapping] of (nextConfig.hooks?.mappings ?? []).entries()) { + rewriteStringModelSlot({ + hits, + container: mapping as MutableRecord, + key: "model", + path: `hooks.mappings.${index}.model`, + }); + } + rewriteStringModelSlot({ + hits, + container: asMutableRecord(nextConfig.hooks?.gmail), + key: "model", + path: "hooks.gmail.model", + }); + rewriteModelConfigSlot({ + hits, + container: asMutableRecord(nextConfig.tools?.subagents), + key: "model", + path: "tools.subagents.model", + }); + rewriteStringModelSlot({ + hits, + container: asMutableRecord(nextConfig.messages?.tts), + key: "summaryModel", + path: "messages.tts.summaryModel", + }); + rewriteStringModelSlot({ + hits, + container: asMutableRecord(asMutableRecord(nextConfig.channels?.discord)?.voice), + key: "model", + path: "channels.discord.voice.model", + }); + return { + cfg: hits.length > 0 ? nextConfig : params.cfg, + changes: hits, + }; +} + +function hasUsableCodexOAuthProfile(cfg: OpenClawConfig): boolean { + try { + const store = ensureAuthProfileStore(undefined, { allowKeychainPrompt: false, config: cfg }); + const now = Date.now(); + return resolveAuthProfileOrder({ cfg, store, provider: "openai-codex" }).some((profileId) => { + const credential = store.profiles[profileId]; + if (!credential || credential.type !== "oauth") { + return false; + } + const unusableUntil = resolveProfileUnusableUntilForDisplay(store, profileId); + if (unusableUntil && now < unusableUntil) { + return false; + } + return evaluateStoredCredentialEligibility({ credential, now }).eligible; + }); + } catch { + return false; + } +} + +function isCodexPluginInstalledAndEnabled(cfg: OpenClawConfig, env?: NodeJS.ProcessEnv): boolean { + const index = loadInstalledPluginIndex({ config: cfg, env }); + const record = getInstalledPluginRecord(index, "codex"); + if (!record || !record.startup.agentHarnesses.includes("codex")) { + return false; + } + return isInstalledPluginEnabled(index, "codex", cfg); +} + +function resolveCodexRepairRuntime(params: { + cfg: OpenClawConfig; + env?: NodeJS.ProcessEnv; + codexRuntimeReady?: boolean; +}): CodexRepairRuntime { + if (params.codexRuntimeReady !== undefined) { + return params.codexRuntimeReady ? "codex" : "pi"; + } + return isCodexPluginInstalledAndEnabled(params.cfg, params.env) && + hasUsableCodexOAuthProfile(params.cfg) + ? "codex" + : "pi"; +} + +function formatCodexRouteChange(hit: CodexRouteHit, runtime: CodexRepairRuntime): string { + const suffix = hit.setsRuntime ? `; set agentRuntime.id to "${runtime}"` : ""; + return `${hit.path}: ${hit.model} -> ${hit.canonicalModel}${suffix}.`; +} + export function collectCodexRouteWarnings(params: { cfg: OpenClawConfig; env?: NodeJS.ProcessEnv; }): string[] { - if (!isCodexPluginEnabled(params.cfg)) { - return []; - } - const hits = collectOpenAICodexPiRouteHits(params.cfg, params.env); + const hits = collectConfigModelRefs(params.cfg, params.env); if (hits.length === 0) { return []; } return [ [ - "- Codex plugin is enabled, but `openai-codex/*` model refs still use the OpenClaw PI runner unless `agentRuntime.id` is `codex`.", + "- Legacy `openai-codex/*` model refs should be rewritten to `openai/*`.", ...hits.map( - (hit) => `- ${hit.path}: ${hit.model} currently resolves with runtime "${hit.runtime}".`, + (hit) => + `- ${hit.path}: ${hit.model} should become ${hit.canonicalModel}${ + hit.runtime ? `; current runtime is "${hit.runtime}"` : "" + }.`, ), - '- To use native Codex app-server, set the model to `openai/` and set `agents.defaults.agentRuntime.id: "codex"` (or the agent-level equivalent).', - "- Leave this unchanged if you intentionally want Codex OAuth/subscription auth through PI.", + '- Run `openclaw doctor --fix`: it rewrites configured model refs and stale sessions; primary routes select `agentRuntime.id: "codex"` only when Codex is installed, enabled, and has usable OAuth, otherwise they select OpenClaw PI.', ].join("\n"), ]; } + +export function maybeRepairCodexRoutes(params: { + cfg: OpenClawConfig; + env?: NodeJS.ProcessEnv; + shouldRepair: boolean; + codexRuntimeReady?: boolean; +}): { cfg: OpenClawConfig; warnings: string[]; changes: string[] } { + const hits = collectConfigModelRefs(params.cfg, params.env); + if (hits.length === 0) { + return { cfg: params.cfg, warnings: [], changes: [] }; + } + if (!params.shouldRepair) { + return { + cfg: params.cfg, + warnings: collectCodexRouteWarnings({ cfg: params.cfg, env: params.env }), + changes: [], + }; + } + const runtime = resolveCodexRepairRuntime({ + cfg: params.cfg, + env: params.env, + codexRuntimeReady: params.codexRuntimeReady, + }); + const repaired = rewriteConfigModelRefs({ + cfg: params.cfg, + env: params.env, + runtime, + }); + return { + cfg: repaired.cfg, + warnings: [], + changes: [ + `Repaired Codex model routes:\n${repaired.changes + .map((hit) => `- ${formatCodexRouteChange(hit, runtime)}`) + .join("\n")}`, + ], + }; +} + +function rewriteSessionModelPair(params: { + entry: SessionEntry; + providerKey: "modelProvider" | "providerOverride"; + modelKey: "model" | "modelOverride"; +}): boolean { + let changed = false; + const provider = normalizeString(params.entry[params.providerKey]); + const model = + typeof params.entry[params.modelKey] === "string" ? params.entry[params.modelKey] : undefined; + if (provider === "openai-codex") { + params.entry[params.providerKey] = "openai"; + changed = true; + if (model) { + const modelId = toOpenAIModelId(model); + if (modelId) { + params.entry[params.modelKey] = modelId; + } + } + return true; + } + if (model && isOpenAICodexModelRef(model)) { + const canonicalModel = toCanonicalOpenAIModelRef(model); + if (canonicalModel) { + params.entry[params.modelKey] = canonicalModel; + changed = true; + } + } + return changed; +} + +function clearStaleCodexFallbackNotice(entry: SessionEntry): boolean { + if ( + !isOpenAICodexModelRef(entry.fallbackNoticeSelectedModel) && + !isOpenAICodexModelRef(entry.fallbackNoticeActiveModel) + ) { + return false; + } + delete entry.fallbackNoticeSelectedModel; + delete entry.fallbackNoticeActiveModel; + delete entry.fallbackNoticeReason; + return true; +} + +function clearStaleCodexAuthOverride(entry: SessionEntry, runtime: CodexRepairRuntime): boolean { + if (runtime === "codex" || !entry.authProfileOverride?.startsWith("openai-codex:")) { + return false; + } + delete entry.authProfileOverride; + delete entry.authProfileOverrideSource; + delete entry.authProfileOverrideCompactionCount; + return true; +} + +export function repairCodexSessionStoreRoutes(params: { + store: Record; + runtime: CodexRepairRuntime; + now?: number; +}): SessionRouteRepairResult { + const now = params.now ?? Date.now(); + const sessionKeys: string[] = []; + for (const [sessionKey, entry] of Object.entries(params.store)) { + if (!entry) { + continue; + } + const changedRuntimeModelRoute = rewriteSessionModelPair({ + entry, + providerKey: "modelProvider", + modelKey: "model", + }); + const changedOverrideModelRoute = rewriteSessionModelPair({ + entry, + providerKey: "providerOverride", + modelKey: "modelOverride", + }); + const changedModelRoute = changedRuntimeModelRoute || changedOverrideModelRoute; + const changedFallbackNotice = clearStaleCodexFallbackNotice(entry); + const changedAuthOverride = clearStaleCodexAuthOverride(entry, params.runtime); + const shouldRepinCodexHarness = entry.agentHarnessId === "codex" && params.runtime !== "codex"; + if ( + !changedModelRoute && + !changedFallbackNotice && + !changedAuthOverride && + !shouldRepinCodexHarness + ) { + continue; + } + if (changedModelRoute || shouldRepinCodexHarness) { + entry.agentHarnessId = params.runtime; + entry.agentRuntimeOverride = params.runtime; + } + entry.updatedAt = now; + sessionKeys.push(sessionKey); + } + return { + changed: sessionKeys.length > 0, + sessionKeys, + }; +} + +function scanCodexSessionStoreRoutes( + store: Record, + runtime: CodexRepairRuntime, +): string[] { + return Object.entries(store).flatMap(([sessionKey, entry]) => { + if (!entry) { + return []; + } + const hasLegacyRoute = + normalizeString(entry.modelProvider) === "openai-codex" || + normalizeString(entry.providerOverride) === "openai-codex" || + isOpenAICodexModelRef(entry.model) || + isOpenAICodexModelRef(entry.modelOverride) || + isOpenAICodexModelRef(entry.fallbackNoticeSelectedModel) || + isOpenAICodexModelRef(entry.fallbackNoticeActiveModel) || + (runtime !== "codex" && entry.authProfileOverride?.startsWith("openai-codex:") === true) || + (runtime !== "codex" && entry.agentHarnessId === "codex"); + return hasLegacyRoute ? [sessionKey] : []; + }); +} + +export async function maybeRepairCodexSessionRoutes(params: { + cfg: OpenClawConfig; + env?: NodeJS.ProcessEnv; + shouldRepair: boolean; + codexRuntimeReady?: boolean; +}): Promise { + const targets = resolveAllAgentSessionStoreTargetsSync(params.cfg, { + env: params.env ?? process.env, + }).filter((target) => fs.existsSync(target.storePath)); + if (targets.length === 0) { + return { + scannedStores: 0, + repairedStores: 0, + repairedSessions: 0, + warnings: [], + changes: [], + }; + } + if (!params.shouldRepair) { + const runtime = resolveCodexRepairRuntime({ + cfg: params.cfg, + env: params.env, + codexRuntimeReady: params.codexRuntimeReady, + }); + const stale = targets.flatMap((target) => { + const sessionKeys = scanCodexSessionStoreRoutes(loadSessionStore(target.storePath), runtime); + return sessionKeys.map((sessionKey) => `${target.agentId}:${sessionKey}`); + }); + return { + scannedStores: targets.length, + repairedStores: 0, + repairedSessions: 0, + warnings: + stale.length > 0 + ? [ + [ + "- Legacy `openai-codex/*` session route state detected.", + `- Affected sessions: ${stale.length}.`, + "- Run `openclaw doctor --fix` to rewrite stale session model/provider pins across all agent session stores.", + ].join("\n"), + ] + : [], + changes: [], + }; + } + const runtime = resolveCodexRepairRuntime({ + cfg: params.cfg, + env: params.env, + codexRuntimeReady: params.codexRuntimeReady, + }); + let repairedStores = 0; + let repairedSessions = 0; + for (const target of targets) { + const staleSessionKeys = scanCodexSessionStoreRoutes( + loadSessionStore(target.storePath), + runtime, + ); + if (staleSessionKeys.length === 0) { + continue; + } + const result = await updateSessionStore( + target.storePath, + (store) => repairCodexSessionStoreRoutes({ store, runtime }), + { skipMaintenance: true }, + ); + if (!result.changed) { + continue; + } + repairedStores += 1; + repairedSessions += result.sessionKeys.length; + } + return { + scannedStores: targets.length, + repairedStores, + repairedSessions, + warnings: [], + changes: + repairedSessions > 0 + ? [ + `Repaired Codex session routes: moved ${repairedSessions} session${ + repairedSessions === 1 ? "" : "s" + } across ${repairedStores} store${repairedStores === 1 ? "" : "s"} to openai/* with agentRuntime "${runtime}".`, + ] + : [], + }; +} diff --git a/src/config/model-refs.ts b/src/config/model-refs.ts index 8008aa8b911..d4e0c7e266e 100644 --- a/src/config/model-refs.ts +++ b/src/config/model-refs.ts @@ -46,6 +46,21 @@ export function collectConfiguredModelRefs( for (const key of AGENT_MODEL_CONFIG_KEYS) { collectModelConfig(`${path}.${key}`, agent[key]); } + pushModelRef( + `${path}.heartbeat.model`, + isRecord(agent.heartbeat) ? agent.heartbeat.model : undefined, + ); + collectModelConfig( + `${path}.subagents.model`, + isRecord(agent.subagents) ? agent.subagents.model : undefined, + ); + if (isRecord(agent.compaction)) { + pushModelRef(`${path}.compaction.model`, agent.compaction.model); + pushModelRef( + `${path}.compaction.memoryFlush.model`, + isRecord(agent.compaction.memoryFlush) ? agent.compaction.memoryFlush.model : undefined, + ); + } if (isRecord(agent.models)) { for (const modelRef of Object.keys(agent.models)) { pushModelRef(`${path}.models.${modelRef}`, modelRef); @@ -73,5 +88,30 @@ export function collectConfiguredModelRefs( } } } + const hooks = isRecord(root.hooks) ? root.hooks : {}; + if (Array.isArray(hooks.mappings)) { + for (const [index, mapping] of hooks.mappings.entries()) { + pushModelRef(`hooks.mappings.${index}.model`, isRecord(mapping) ? mapping.model : undefined); + } + } + pushModelRef("hooks.gmail.model", isRecord(hooks.gmail) ? hooks.gmail.model : undefined); + collectModelConfig( + "tools.subagents.model", + isRecord(root.tools) && isRecord(root.tools.subagents) ? root.tools.subagents.model : undefined, + ); + pushModelRef( + "messages.tts.summaryModel", + isRecord(root.messages) && isRecord(root.messages.tts) + ? root.messages.tts.summaryModel + : undefined, + ); + pushModelRef( + "channels.discord.voice.model", + isRecord(root.channels) && + isRecord(root.channels.discord) && + isRecord(root.channels.discord.voice) + ? root.channels.discord.voice.model + : undefined, + ); return refs; } diff --git a/src/flows/doctor-health-contributions.ts b/src/flows/doctor-health-contributions.ts index 717fa4b7705..392675b1905 100644 --- a/src/flows/doctor-health-contributions.ts +++ b/src/flows/doctor-health-contributions.ts @@ -313,6 +313,23 @@ async function runStateIntegrityHealth(ctx: DoctorHealthFlowContext): Promise { + const { maybeRepairCodexSessionRoutes } = + await import("../commands/doctor/shared/codex-route-warnings.js"); + const { note } = await import("../terminal/note.js"); + const result = await maybeRepairCodexSessionRoutes({ + cfg: ctx.cfg, + env: ctx.env ?? process.env, + shouldRepair: ctx.prompter.shouldRepair, + }); + if (result.changes.length > 0) { + note(result.changes.join("\n"), "Doctor changes"); + } + if (result.warnings.length > 0) { + note(result.warnings.join("\n"), "Doctor warnings"); + } +} + async function runSessionLocksHealth(ctx: DoctorHealthFlowContext): Promise { const { noteSessionLockHealth } = await import("../commands/doctor-session-locks.js"); await noteSessionLockHealth({ shouldRepair: ctx.prompter.shouldRepair }); @@ -675,6 +692,11 @@ export function resolveDoctorHealthContributions(): DoctorHealthContribution[] { label: "State integrity", run: runStateIntegrityHealth, }), + createDoctorHealthContribution({ + id: "doctor:codex-session-routes", + label: "Codex session routes", + run: runCodexSessionRouteHealth, + }), createDoctorHealthContribution({ id: "doctor:session-locks", label: "Session locks",