refactor(cli): remove stale cli provider leftovers

This commit is contained in:
Peter Steinberger
2026-04-05 19:08:55 +01:00
parent b664541158
commit 84e76f7cce
32 changed files with 20 additions and 1189 deletions

View File

@@ -167,7 +167,7 @@ pluggable interface, lifecycle hooks, and configuration.
`/context` prefers the latest **run-built** system prompt report when available:
- `System prompt (run)` = captured from the last embedded (tool-capable) run and persisted in the session store.
- `System prompt (estimate)` = computed on the fly when no run report exists (or when running via a CLI backend that doesnt generate the report).
- `System prompt (estimate)` = computed on the fly when no run report exists yet.
Either way, it reports sizes and top contributors; it does **not** dump the full system prompt or tool schemas.

View File

@@ -1064,7 +1064,6 @@ Z.AI GLM-4.x models automatically enable thinking mode unless you set `--thinkin
Z.AI models enable `tool_stream` by default for tool call streaming. Set `agents.defaults.models["zai/<model>"].params.tool_stream` to `false` to disable it.
Anthropic Claude 4.6 models default to `adaptive` thinking when no explicit thinking level is set.
- CLI backends are text-first; tools are always disabled.
- Sessions supported when `sessionArg` is set.
- Image pass-through supported when `imageArg` accepts file paths.

View File

@@ -30,7 +30,6 @@ native OpenClaw plugin registers against one or more capability types:
| Capability | Registration method | Example plugins |
| ---------------------- | ------------------------------------------------ | ------------------------------------ |
| Text inference | `api.registerProvider(...)` | `openai`, `anthropic` |
| CLI inference backend | `api.registerCliBackend(...)` | `openai`, `anthropic` |
| Speech | `api.registerSpeechProvider(...)` | `elevenlabs`, `microsoft` |
| Realtime transcription | `api.registerRealtimeTranscriptionProvider(...)` | `openai` |
| Realtime voice | `api.registerRealtimeVoiceProvider(...)` | `openai` |

View File

@@ -122,7 +122,6 @@ explicitly promotes one as public.
| `plugin-sdk/provider-entry` | `defineSingleProviderPluginEntry` |
| `plugin-sdk/provider-setup` | Curated local/self-hosted provider setup helpers |
| `plugin-sdk/self-hosted-provider-setup` | Focused OpenAI-compatible self-hosted provider setup helpers |
| `plugin-sdk/cli-backend` | CLI backend defaults + watchdog constants |
| `plugin-sdk/provider-auth-runtime` | Runtime API-key resolution helpers for provider plugins |
| `plugin-sdk/provider-auth-api-key` | API-key onboarding/profile-write helpers |
| `plugin-sdk/provider-auth-result` | Standard OAuth auth-result builder |
@@ -283,7 +282,6 @@ methods:
| Method | What it registers |
| ------------------------------------------------ | -------------------------------- |
| `api.registerProvider(...)` | Text inference (LLM) |
| `api.registerCliBackend(...)` | Local CLI inference backend |
| `api.registerChannel(...)` | Messaging channel |
| `api.registerSpeechProvider(...)` | Text-to-speech / STT synthesis |
| `api.registerRealtimeTranscriptionProvider(...)` | Streaming realtime transcription |

View File

@@ -332,7 +332,7 @@ Notes:
- The default prompt/system prompt include a `NO_REPLY` hint to suppress
delivery.
- The flush runs once per compaction cycle (tracked in `sessions.json`).
- The flush runs only for embedded Pi sessions (CLI backends skip it).
- The flush runs only for embedded Pi sessions.
- The flush is skipped when the session workspace is read-only (`workspaceAccess: "ro"` or `"none"`).
- See [Memory](/concepts/memory) for the workspace file layout and write patterns.

View File

@@ -114,7 +114,6 @@ Important distinction:
For operators, the practical rule is:
- want `/acp spawn`, bindable sessions, runtime controls, or persistent harness work: use ACP
- want simple local text fallback through the raw CLI: use CLI backends
## Bound sessions

View File

@@ -583,7 +583,6 @@ function describeCronSeamKinds(relativePath, source) {
const seamKinds = [];
const importsAgentRunner = hasAnyImportSource(source, [
"../../agents/cli-runner.js",
"../../agents/pi-embedded.js",
"../../agents/model-fallback.js",
"../../agents/subagent-registry.js",
@@ -625,9 +624,7 @@ function describeCronSeamKinds(relativePath, source) {
if (
importsAgentRunner &&
/\brunCliAgent\b|\brunEmbeddedPiAgent\b|\brunWithModelFallback\b|\bregisterAgentRunContext\b/.test(
source,
)
/\brunEmbeddedPiAgent\b|\brunWithModelFallback\b|\bregisterAgentRunContext\b/.test(source)
) {
seamKinds.push("cron-agent-handoff");
}

View File

@@ -908,8 +908,6 @@ if (!inspect.gatewayMethods.includes("demo.marketplace.shortcut.v2")) {
console.log("ok");
NODE
echo "Running bundle MCP CLI-agent e2e..."
pnpm exec vitest run --config vitest.e2e.config.ts src/agents/cli-runner.bundle-mcp.e2e.test.ts
EOF
echo "OK"

View File

@@ -1,72 +0,0 @@
import fs from "node:fs/promises";
import os from "node:os";
import path from "node:path";
import { afterEach, beforeEach, describe, expect, it } from "vitest";
import type { OpenClawConfig } from "../../config/config.js";
import { loadSessionStore, type SessionEntry } from "../../config/sessions.js";
import type { EmbeddedPiRunResult } from "../pi-embedded.js";
import { updateSessionStoreAfterAgentRun } from "./session-store.js";
describe("updateSessionStoreAfterAgentRun", () => {
let tmpDir: string;
let storePath: string;
beforeEach(async () => {
tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-session-store-"));
storePath = path.join(tmpDir, "sessions.json");
});
afterEach(async () => {
await fs.rm(tmpDir, { recursive: true, force: true });
});
it("persists the runtime provider/model used by the completed run", async () => {
const cfg = {
agents: {
defaults: {
cliBackends: {
"codex-cli": { command: "codex" },
},
},
},
} as OpenClawConfig;
const sessionKey = "agent:main:explicit:test-codex-cli";
const sessionId = "test-openclaw-session";
const sessionStore: Record<string, SessionEntry> = {
[sessionKey]: {
sessionId,
updatedAt: 1,
},
};
await fs.writeFile(storePath, JSON.stringify(sessionStore, null, 2));
const result: EmbeddedPiRunResult = {
meta: {
durationMs: 1,
agentMeta: {
sessionId: "cli-session-123",
provider: "codex-cli",
model: "gpt-5.4",
},
},
};
await updateSessionStoreAfterAgentRun({
cfg,
sessionId,
sessionKey,
storePath,
sessionStore,
defaultProvider: "codex-cli",
defaultModel: "gpt-5.4",
result,
});
expect(sessionStore[sessionKey]?.modelProvider).toBe("codex-cli");
expect(sessionStore[sessionKey]?.model).toBe("gpt-5.4");
const persisted = loadSessionStore(storePath);
expect(persisted[sessionKey]?.modelProvider).toBe("codex-cli");
expect(persisted[sessionKey]?.model).toBe("gpt-5.4");
});
});

View File

@@ -1,10 +1,8 @@
import { describe, it, expect, vi } from "vitest";
import type { OpenClawConfig } from "../config/config.js";
import { resetLogger, setLoggerOverride } from "../logging/logger.js";
import {
buildAllowedModelSet,
inferUniqueProviderFromConfiguredModels,
isCliProvider,
parseModelRef,
buildModelAliasIndex,
normalizeModelSelection,
@@ -101,7 +99,7 @@ function createProviderWithModelsConfig(provider: string, models: Array<Record<s
function resolveConfiguredRefForTest(cfg: Partial<OpenClawConfig>) {
return resolveConfiguredModelRef({
cfg: cfg as OpenClawConfig,
cfg: cfg,
defaultProvider: "openai",
defaultModel: "gpt-5.4",
});
@@ -131,12 +129,6 @@ describe("model-selection", () => {
});
});
describe("isCliProvider", () => {
it("returns false for provider ids", () => {
expect(isCliProvider("example-cli", {} as OpenClawConfig)).toBe(false);
});
});
describe("modelKey", () => {
it("keeps canonical OpenRouter native ids without duplicating the provider", () => {
expect(modelKey("openrouter", "openrouter/hunter-alpha")).toBe("openrouter/hunter-alpha");
@@ -462,7 +454,7 @@ describe("model-selection", () => {
};
const index = buildModelAliasIndex({
cfg: cfg as OpenClawConfig,
cfg: cfg,
defaultProvider: "anthropic",
});
@@ -751,7 +743,7 @@ describe("model-selection", () => {
};
const result = resolveConfiguredModelRef({
cfg: cfg as OpenClawConfig,
cfg: cfg,
defaultProvider: "google",
defaultModel: "gemini-pro",
});
@@ -779,7 +771,7 @@ describe("model-selection", () => {
};
const result = resolveConfiguredModelRef({
cfg: cfg as OpenClawConfig,
cfg: cfg,
defaultProvider: "google",
defaultModel: "gemini-pro",
});
@@ -832,7 +824,7 @@ describe("model-selection", () => {
it("should use default provider/model if config is empty", () => {
const cfg: Partial<OpenClawConfig> = {};
const result = resolveConfiguredModelRef({
cfg: cfg as OpenClawConfig,
cfg: cfg,
defaultProvider: "openai",
defaultModel: "gpt-4",
});
@@ -912,7 +904,7 @@ describe("model-selection", () => {
};
const result = resolveConfiguredModelRef({
cfg: cfg as OpenClawConfig,
cfg: cfg,
defaultProvider: "openai",
defaultModel: "gpt-5.4",
});

View File

@@ -86,10 +86,6 @@ export {
normalizeProviderIdForAuth,
};
export function isCliProvider(_provider: string, _cfg?: OpenClawConfig): boolean {
return false;
}
function normalizeProviderModelId(provider: string, model: string): string {
const staticModelId = normalizeStaticProviderModelId(provider, model);
return (

View File

@@ -27,10 +27,6 @@ vi.mock("../../agents/model-fallback.js", () => ({
Array.isArray((err as { attempts?: unknown[] }).attempts),
}));
vi.mock("../../agents/model-selection.js", () => ({
isCliProvider: () => false,
}));
vi.mock("../../agents/bootstrap-budget.js", () => ({
resolveBootstrapWarningSignaturesSeen: () => [],
}));

View File

@@ -4,7 +4,6 @@ import type { AgentMessage } from "@mariozechner/pi-agent-core";
import { resolveBootstrapWarningSignaturesSeen } from "../../agents/bootstrap-budget.js";
import { estimateMessagesTokens } from "../../agents/compaction.js";
import { runWithModelFallback } from "../../agents/model-fallback.js";
import { isCliProvider } from "../../agents/model-selection.js";
import { compactEmbeddedPiSession, runEmbeddedPiAgent } from "../../agents/pi-embedded.js";
import { resolveSandboxConfigForAgent, resolveSandboxRuntimeStatus } from "../../agents/sandbox.js";
import {
@@ -324,8 +323,7 @@ export async function runPreflightCompactionIfNeeded(params: {
return entry ?? params.sessionEntry;
}
const isCli = isCliProvider(params.followupRun.run.provider, params.cfg);
if (params.isHeartbeat || isCli) {
if (params.isHeartbeat) {
return entry ?? params.sessionEntry;
}
@@ -376,7 +374,7 @@ export async function runPreflightCompactionIfNeeded(params: {
`preflightCompaction check: sessionKey=${params.sessionKey} ` +
`tokenCount=${tokenCountForCompaction ?? freshPersistedTokens ?? "undefined"} ` +
`contextWindow=${contextWindowTokens} threshold=${threshold} ` +
`isHeartbeat=${params.isHeartbeat} isCli=${isCli} ` +
`isHeartbeat=${params.isHeartbeat} ` +
`persistedFresh=${entry?.totalTokensFresh === true} ` +
`transcriptPromptTokens=${transcriptPromptTokens ?? "undefined"} ` +
`promptTokensEst=${promptTokenEstimate ?? "undefined"}`,
@@ -489,8 +487,7 @@ export async function runMemoryFlushIfNeeded(params: {
return sandboxCfg.workspaceAccess === "rw";
})();
const isCli = isCliProvider(params.followupRun.run.provider, params.cfg);
const canAttemptFlush = memoryFlushWritable && !params.isHeartbeat && !isCli;
const canAttemptFlush = memoryFlushWritable && !params.isHeartbeat;
let entry =
params.sessionEntry ??
(params.sessionKey ? params.sessionStore?.[params.sessionKey] : undefined);
@@ -624,7 +621,7 @@ export async function runMemoryFlushIfNeeded(params: {
`memoryFlush check: sessionKey=${params.sessionKey} ` +
`tokenCount=${tokenCountForFlush ?? "undefined"} ` +
`contextWindow=${contextWindowTokens} threshold=${flushThreshold} ` +
`isHeartbeat=${params.isHeartbeat} isCli=${isCli} memoryFlushWritable=${memoryFlushWritable} ` +
`isHeartbeat=${params.isHeartbeat} memoryFlushWritable=${memoryFlushWritable} ` +
`compactionCount=${entry?.compactionCount ?? 0} memoryFlushCompactionCount=${entry?.memoryFlushCompactionCount ?? "undefined"} ` +
`persistedPromptTokens=${persistedPromptTokens ?? "undefined"} persistedFresh=${entry?.totalTokensFresh === true} ` +
`promptTokensEst=${promptTokenEstimate ?? "undefined"} transcriptPromptTokens=${transcriptPromptTokens ?? "undefined"} transcriptOutputTokens=${transcriptOutputTokens ?? "undefined"} ` +
@@ -635,7 +632,6 @@ export async function runMemoryFlushIfNeeded(params: {
const shouldFlushMemory =
(memoryFlushWritable &&
!params.isHeartbeat &&
!isCli &&
shouldRunMemoryFlush({
entry,
tokenCount: tokenCountForFlush,

View File

@@ -1,4 +1,3 @@
import crypto from "node:crypto";
import fs from "node:fs/promises";
import os from "node:os";
import path from "node:path";
@@ -11,7 +10,6 @@ import {
import type { OpenClawConfig } from "../../config/config.js";
import type { SessionEntry } from "../../config/sessions.js";
import { loadSessionStore, saveSessionStore } from "../../config/sessions.js";
import { onAgentEvent } from "../../infra/agent-events.js";
import { peekSystemEvents, resetSystemEventsForTest } from "../../infra/system-events.js";
import {
clearMemoryPluginState,
@@ -26,12 +24,7 @@ import { createMockTypingController } from "./test-helpers.js";
function createCliBackendTestConfig() {
return {
agents: {
defaults: {
cliBackends: {
"codex-cli": {},
"google-gemini-cli": {},
},
},
defaults: {},
},
};
}
@@ -243,8 +236,6 @@ describe("runReplyAgent onAgentRunStart", () => {
const onAgentRunStart = vi.fn();
const result = await createRun({
provider: "codex-cli",
model: "gpt-5.4",
opts: { runId: "run-started", onAgentRunStart },
});
@@ -1506,100 +1497,6 @@ describe("runReplyAgent block streaming", () => {
});
});
describe("runReplyAgent cli routing", () => {
function createRun() {
const typing = createMockTypingController();
const sessionCtx = {
Provider: "webchat",
OriginatingTo: "session:1",
AccountId: "primary",
MessageSid: "msg",
} as unknown as TemplateContext;
const resolvedQueue = { mode: "interrupt" } as unknown as QueueSettings;
const followupRun = {
prompt: "hello",
summaryLine: "hello",
enqueuedAt: Date.now(),
run: {
sessionId: "session",
sessionKey: "main",
messageProvider: "webchat",
sessionFile: "/tmp/session.jsonl",
workspaceDir: "/tmp",
config: { agents: { defaults: { cliBackends: { "codex-cli": {} } } } },
skillsSnapshot: {},
provider: "codex-cli",
model: "gpt-5.4",
thinkLevel: "low",
verboseLevel: "off",
elevatedLevel: "off",
bashElevated: {
enabled: false,
allowed: false,
defaultLevel: "off",
},
timeoutMs: 1_000,
blockReplyBreak: "message_end",
},
} as unknown as FollowupRun;
return runReplyAgent({
commandBody: "hello",
followupRun,
queueKey: "main",
resolvedQueue,
shouldSteer: false,
shouldFollowup: false,
isActive: false,
isStreaming: false,
typing,
sessionCtx,
defaultModel: "codex-cli/gpt-5.4",
resolvedVerboseLevel: "off",
isNewSession: false,
blockStreamingEnabled: false,
resolvedBlockStreamingBreak: "message_end",
shouldInjectGroupIntro: false,
typingMode: "instant",
});
}
it("uses the embedded runner for codex-cli providers", async () => {
const runId = "00000000-0000-0000-0000-000000000001";
const randomSpy = vi.spyOn(crypto, "randomUUID").mockReturnValue(runId);
const lifecyclePhases: string[] = [];
const unsubscribe = onAgentEvent((evt) => {
if (evt.runId !== runId) {
return;
}
if (evt.stream !== "lifecycle") {
return;
}
const phase = evt.data?.phase;
if (typeof phase === "string") {
lifecyclePhases.push(phase);
}
});
runEmbeddedPiAgentMock.mockResolvedValueOnce({
payloads: [{ text: "ok" }],
meta: {
agentMeta: {
provider: "codex-cli",
model: "gpt-5.4",
},
},
});
const result = await createRun();
unsubscribe();
randomSpy.mockRestore();
expect(runEmbeddedPiAgentMock).toHaveBeenCalledTimes(1);
expect(lifecyclePhases).toEqual(["start", "end"]);
expect(result).toMatchObject({ text: "ok" });
});
});
describe("runReplyAgent messaging tool suppression", () => {
function createRun(
messageProvider = "slack",
@@ -2144,8 +2041,8 @@ describe("runReplyAgent fallback reasoning tags", () => {
return { payloads: [{ text: "ok" }], meta: {} };
});
runWithModelFallbackMock.mockImplementation(async ({ run }: RunWithModelFallbackParams) => ({
result: await run("google-gemini-cli", "gemini-3"),
provider: "google-gemini-cli",
result: await run("google", "gemini-3"),
provider: "google",
model: "gemini-3",
}));

View File

@@ -1753,7 +1753,7 @@ describe("runReplyAgent memory flush", () => {
const baseRun = createBaseRun({
storePath,
sessionEntry,
runOverrides: { provider: "codex-cli" },
runOverrides: { provider: "openai" },
});
await runReplyAgentWithBase({

View File

@@ -6,7 +6,6 @@ import "../cron/isolated-agent.mocks.js";
import { __testing as acpManagerTesting } from "../acp/control-plane/manager.js";
import { resolveAgentDir, resolveSessionAgentId } from "../agents/agent-scope.js";
import * as authProfilesModule from "../agents/auth-profiles.js";
import * as sessionStoreModule from "../agents/command/session-store.js";
import { resolveSession } from "../agents/command/session.js";
import { loadModelCatalog } from "../agents/model-catalog.js";
import * as modelSelectionModule from "../agents/model-selection.js";
@@ -104,7 +103,6 @@ async function loadFreshAgentCommandModulesForTest() {
vi.resetModules();
const runEmbeddedPiAgentMock = vi.fn();
const loadModelCatalogMock = vi.fn();
const isCliProviderMock = vi.fn(() => false);
vi.doMock("../agents/pi-embedded.js", () => ({
abortEmbeddedPiRun: vi.fn().mockReturnValue(false),
runEmbeddedPiAgent: runEmbeddedPiAgentMock,
@@ -113,15 +111,6 @@ async function loadFreshAgentCommandModulesForTest() {
vi.doMock("../agents/model-catalog.js", () => ({
loadModelCatalog: loadModelCatalogMock,
}));
vi.doMock("../agents/model-selection.js", async () => {
const actual = await vi.importActual<typeof import("../agents/model-selection.js")>(
"../agents/model-selection.js",
);
return {
...actual,
isCliProvider: isCliProviderMock,
};
});
const [agentModule, configModuleFresh, commandSecretGatewayModuleFresh] = await Promise.all([
import("./agent.js"),
import("../config/config.js"),
@@ -133,7 +122,6 @@ async function loadFreshAgentCommandModulesForTest() {
commandSecretGatewayModuleFresh,
runEmbeddedPiAgentMock,
loadModelCatalogMock,
isCliProviderMock,
};
}
@@ -336,7 +324,6 @@ beforeEach(() => {
configModule.clearRuntimeConfigSnapshot();
vi.mocked(runEmbeddedPiAgent).mockResolvedValue(createDefaultAgentResult());
vi.mocked(loadModelCatalog).mockResolvedValue([]);
vi.mocked(modelSelectionModule.isCliProvider).mockImplementation(() => false);
readConfigFileSnapshotForWriteSpy.mockResolvedValue({
snapshot: { valid: false, resolved: {} as OpenClawConfig },
writeOptions: {},
@@ -352,7 +339,6 @@ describe("agentCommand", () => {
commandSecretGatewayModuleFresh,
runEmbeddedPiAgentMock,
loadModelCatalogMock,
isCliProviderMock,
} = await loadFreshAgentCommandModulesForTest();
const freshConfigSpy = vi.spyOn(configModuleFresh, "loadConfig");
const freshReadConfigFileSnapshotForWriteSpy = vi.spyOn(
@@ -365,7 +351,6 @@ describe("agentCommand", () => {
);
runEmbeddedPiAgentMock.mockResolvedValue(createDefaultAgentResult());
loadModelCatalogMock.mockResolvedValue([]);
isCliProviderMock.mockImplementation(() => false);
const store = path.join(home, "sessions.json");
const loadedConfig = {
@@ -565,36 +550,6 @@ describe("agentCommand", () => {
});
});
it("persists explicit session-id-only runs with the synthetic session key", async () => {
await withTempHome(async (home) => {
const store = path.join(home, "sessions.json");
mockConfig(home, store, {
model: { primary: "codex-cli/gpt-5.4" },
models: { "codex-cli/gpt-5.4": {} },
});
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
payloads: [{ text: "ok" }],
meta: {
durationMs: 5,
agentMeta: {
sessionId: "codex-cli-session-1",
provider: "codex-cli",
model: "gpt-5.4",
},
},
});
await agentCommand({ message: "resume me", sessionId: "explicit-session-123" }, runtime);
expect(vi.mocked(sessionStoreModule.updateSessionStoreAfterAgentRun)).toHaveBeenCalledWith(
expect.objectContaining({
sessionId: "explicit-session-123",
sessionKey: "agent:main:explicit:explicit-session-123",
}),
);
});
});
it("uses the resumed session agent scope when sessionId resolves to another agent store", async () => {
await withCrossAgentResumeFixture(async ({ sessionId, sessionKey, cfg }) => {
const resolution = resolveSession({ cfg, sessionId });

View File

@@ -3,7 +3,6 @@ import fs from "node:fs/promises";
import os from "node:os";
import path from "node:path";
import { describe, expect, it } from "vitest";
import { resolveSession } from "../../agents/command/session.js";
import type { SessionEntry } from "../../config/sessions.js";
import { loadSessionStore } from "../../config/sessions.js";
import { updateSessionStoreAfterAgentRun } from "./session-store.js";
@@ -125,62 +124,4 @@ describe("updateSessionStoreAfterAgentRun", () => {
"once",
);
});
it("stores and reloads the runtime model for explicit session-id-only runs", async () => {
const dir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-session-store-"));
const storePath = path.join(dir, "sessions.json");
const cfg = {
session: {
store: storePath,
mainKey: "main",
},
agents: {
defaults: {
cliBackends: {
"codex-cli": {},
},
},
},
} as never;
const first = resolveSession({
cfg,
sessionId: "explicit-session-123",
});
expect(first.sessionKey).toBe("agent:main:explicit:explicit-session-123");
await updateSessionStoreAfterAgentRun({
cfg,
sessionId: first.sessionId,
sessionKey: first.sessionKey!,
storePath: first.storePath,
sessionStore: first.sessionStore!,
defaultProvider: "codex-cli",
defaultModel: "gpt-5.4",
result: {
payloads: [],
meta: {
agentMeta: {
provider: "codex-cli",
model: "gpt-5.4",
sessionId: "codex-cli-session-1",
},
},
} as never,
});
const second = resolveSession({
cfg,
sessionId: "explicit-session-123",
});
expect(second.sessionKey).toBe(first.sessionKey);
expect(second.sessionEntry?.modelProvider).toBe("codex-cli");
expect(second.sessionEntry?.model).toBe("gpt-5.4");
const persisted = loadSessionStore(storePath, { skipCache: true })[first.sessionKey!];
expect(persisted?.modelProvider).toBe("codex-cli");
expect(persisted?.model).toBe("gpt-5.4");
});
});

View File

@@ -53,10 +53,7 @@ describe("doctor preview warnings", () => {
doctorFixCommand: "openclaw doctor --fix",
});
expect(warnings).toEqual([
expect.stringContaining("Telegram allowFrom contains 1 non-numeric entries"),
expect.stringContaining('channels.signal.allowFrom: set to ["*"]'),
]);
expect(warnings).toEqual([expect.stringContaining('channels.signal.allowFrom: set to ["*"]')]);
});
it("sanitizes empty-allowlist warning paths before returning preview output", async () => {

View File

@@ -18,7 +18,6 @@ import {
import { resolveEnvApiKey } from "../../agents/model-auth.js";
import {
buildModelAliasIndex,
isCliProvider,
normalizeProviderId,
parseModelRef,
resolveConfiguredModelRef,
@@ -191,7 +190,6 @@ export async function modelsStatusCommand(
const providerAuthMap = new Map(providerAuth.map((entry) => [entry.provider, entry]));
const missingProvidersInUse = Array.from(providersInUse)
.filter((provider) => !providerAuthMap.has(provider))
.filter((provider) => !isCliProvider(provider, cfg))
.toSorted((a, b) => a.localeCompare(b));
const probeProfileIds = (() => {

View File

@@ -295,42 +295,6 @@ describe("modelsStatusCommand auth overview", () => {
);
});
it("does not report cli backends as missing auth", async () => {
const localRuntime = createRuntime();
const originalLoadConfig = mocks.loadConfig.getMockImplementation();
const originalEnvImpl = mocks.resolveEnvApiKey.getMockImplementation();
mocks.loadConfig.mockReturnValue({
agents: {
defaults: {
model: { primary: "codex-cli/gpt-5.4", fallbacks: [] },
models: { "codex-cli/gpt-5.4": {} },
cliBackends: { "codex-cli": {} },
},
},
models: { providers: {} },
env: { shellEnv: { enabled: true } },
});
mocks.resolveEnvApiKey.mockImplementation(() => null);
try {
await modelsStatusCommand({ json: true }, localRuntime as never);
const payload = JSON.parse(String((localRuntime.log as Mock).mock.calls[0]?.[0]));
expect(payload.defaultModel).toBe("codex-cli/gpt-5.4");
expect(payload.auth.missingProvidersInUse).toEqual([]);
} finally {
if (originalLoadConfig) {
mocks.loadConfig.mockImplementation(originalLoadConfig);
}
if (originalEnvImpl) {
mocks.resolveEnvApiKey.mockImplementation(originalEnvImpl);
} else if (defaultResolveEnvApiKeyImpl) {
mocks.resolveEnvApiKey.mockImplementation(defaultResolveEnvApiKeyImpl);
} else {
mocks.resolveEnvApiKey.mockImplementation(() => null);
}
}
});
it("dedupes alias and canonical provider ids in auth provider summaries", async () => {
const localRuntime = createRuntime();
const originalLoadConfig = mocks.loadConfig.getMockImplementation();

View File

@@ -169,7 +169,6 @@ vi.mock("../plugins/manifest-registry.js", () => {
contracts: {
webSearchProviders: ["brave"],
},
cliBackends: [],
skills: [],
hooks: [],
rootDir: "/tmp/plugins/brave",
@@ -195,7 +194,6 @@ vi.mock("../plugins/manifest-registry.js", () => {
contracts: {
webSearchProviders: [id],
},
cliBackends: [],
skills: [],
hooks: [],
rootDir: `/tmp/plugins/${id}`,

View File

@@ -415,41 +415,6 @@ describe("config io write", () => {
});
});
it("preserves env var references when writing", async () => {
await withSuiteHome(async (home) => {
const { configPath, io, snapshot } = await writeConfigAndCreateIo({
home,
env: { OPENAI_API_KEY: "sk-secret" } as NodeJS.ProcessEnv,
initialConfig: {
agents: {
defaults: {
cliBackends: {
codex: {
command: "codex",
env: {
OPENAI_API_KEY: "${OPENAI_API_KEY}",
},
},
},
},
},
gateway: { port: 18789 },
},
});
const persisted = (await writeTokenAuthAndReadConfig({ io, snapshot, configPath })) as {
agents: { defaults: { cliBackends: { codex: { env: { OPENAI_API_KEY: string } } } } };
gateway: { port: number; auth: { mode: string } };
};
expect(persisted.agents.defaults.cliBackends.codex.env.OPENAI_API_KEY).toBe(
"${OPENAI_API_KEY}",
);
expect(persisted.gateway).toEqual({
port: 18789,
auth: { mode: "token" },
});
});
});
it("does not leak channel plugin AJV defaults into persisted config (issue #56772)", async () => {
// Regression test for #56772. Mock the BlueBubbles channel metadata so
// read-time AJV validation injects the same default that triggered the
@@ -558,91 +523,6 @@ describe("config io write", () => {
});
});
it("keeps env refs in arrays when appending entries", async () => {
await withSuiteHome(async (home) => {
const configPath = path.join(home, ".openclaw", "openclaw.json");
await fs.mkdir(path.dirname(configPath), { recursive: true });
await fs.writeFile(
configPath,
JSON.stringify(
{
agents: {
defaults: {
cliBackends: {
codex: {
command: "codex",
args: ["${DISCORD_USER_ID}", "123"],
},
},
},
},
},
null,
2,
),
"utf-8",
);
const io = createConfigIO({
env: { DISCORD_USER_ID: "999" } as NodeJS.ProcessEnv,
homedir: () => home,
logger: silentLogger,
});
const snapshot = await io.readConfigFileSnapshot();
expect(snapshot.valid).toBe(true);
const next = structuredClone(snapshot.config) as {
agents?: {
defaults?: {
cliBackends?: Record<
string,
{
command?: string;
args?: string[];
}
>;
};
};
};
const codexBackend = next.agents?.defaults?.cliBackends?.codex;
const args = Array.isArray(codexBackend?.args) ? codexBackend?.args : [];
next.agents = {
...next.agents,
defaults: {
...next.agents?.defaults,
cliBackends: {
...next.agents?.defaults?.cliBackends,
codex: {
...codexBackend,
command: typeof codexBackend?.command === "string" ? codexBackend.command : "codex",
args: [...args, "456"],
},
},
},
};
await io.writeConfigFile(next as OpenClawConfig);
const persisted = JSON.parse(await fs.readFile(configPath, "utf-8")) as {
agents: {
defaults: {
cliBackends: {
codex: {
args: string[];
};
};
};
};
};
expect(persisted.agents.defaults.cliBackends.codex.args).toEqual([
"${DISCORD_USER_ID}",
"123",
"456",
]);
});
});
it("logs an overwrite audit entry when replacing an existing config file", async () => {
await withSuiteHome(async (home) => {
const warn = vi.fn();
@@ -759,83 +639,4 @@ describe("config io write", () => {
expect(last.watchCommand).toBe("gateway --force");
});
});
it("accepts unrelated writes when the file still contains legacy nested allow aliases", async () => {
await withSuiteHome(async (home) => {
const { configPath, io, snapshot } = await writeConfigAndCreateIo({
home,
initialConfig: {
channels: {
slack: {
channels: {
ops: {
allow: false,
},
},
},
googlechat: {
groups: {
"spaces/aaa": {
allow: true,
},
},
},
discord: {
guilds: {
"100": {
channels: {
general: {
allow: false,
},
},
},
},
},
},
},
});
const next = structuredClone(snapshot.config);
next.gateway = {
...next.gateway,
auth: { mode: "token" },
};
await io.writeConfigFile(next);
const persisted = JSON.parse(await fs.readFile(configPath, "utf-8")) as {
channels?: Record<string, unknown>;
gateway?: Record<string, unknown>;
};
expect(persisted.gateway).toEqual({
auth: { mode: "token" },
});
expect(
(
(persisted.channels?.slack as { channels?: Record<string, unknown> } | undefined)
?.channels?.ops as Record<string, unknown> | undefined
)?.enabled,
).toBe(false);
expect(
(
(persisted.channels?.googlechat as { groups?: Record<string, unknown> } | undefined)
?.groups?.["spaces/aaa"] as Record<string, unknown> | undefined
)?.enabled,
).toBe(true);
expect(
(
(
(persisted.channels?.discord as { guilds?: Record<string, unknown> } | undefined)
?.guilds?.["100"] as { channels?: Record<string, unknown> } | undefined
)?.channels?.general as Record<string, unknown> | undefined
)?.enabled,
).toBe(false);
expect(
(
(persisted.channels?.slack as { channels?: Record<string, unknown> } | undefined)
?.channels?.ops as Record<string, unknown> | undefined
)?.allow,
).toBeUndefined();
});
});
});

View File

@@ -15,7 +15,6 @@ function makeRegistry(
channels: [],
providers: [],
modelSupport: plugin.modelSupport,
cliBackends: [],
skills: [],
hooks: [],
origin: "config" as const,

View File

@@ -1 +1 @@
export { isCliProvider, resolveThinkingDefault } from "../../agents/model-selection.js";
export { resolveThinkingDefault } from "../../agents/model-selection.js";

View File

@@ -13,7 +13,6 @@ export { DEFAULT_CONTEXT_TOKENS, DEFAULT_MODEL, DEFAULT_PROVIDER } from "../../a
export { loadModelCatalog } from "../../agents/model-catalog.js";
export {
getModelRefStatus,
isCliProvider,
normalizeModelSelection,
resolveAllowedModelRef,
resolveConfiguredModelRef,

View File

@@ -6,8 +6,6 @@ import {
} from "./run.suite-helpers.js";
import {
buildWorkspaceSkillSnapshotMock,
getCliSessionIdMock,
isCliProviderMock,
lookupContextTokensMock,
loadRunCronIsolatedAgentTurn,
logWarnMock,
@@ -17,7 +15,6 @@ import {
resolveAgentSkillsFilterMock,
resolveAllowedModelRefMock,
resolveCronSessionMock,
runCliAgentMock,
runWithModelFallbackMock,
} from "./run.test-harness.js";
@@ -44,15 +41,6 @@ describe("runCronIsolatedAgentTurn — skill filter", () => {
expect(model?.fallbacks).toEqual(params.fallbacks);
}
function mockCliFallbackInvocation() {
runWithModelFallbackMock.mockImplementationOnce(
async (params: { run: (provider: string, model: string) => Promise<unknown> }) => {
const result = await params.run("codex-cli", "gpt-5.4");
return { result, provider: "codex-cli", model: "gpt-5.4", attempts: [] };
},
);
}
it("passes agent-level skillFilter to buildWorkspaceSkillSnapshot", async () => {
resolveAgentSkillsFilterMock.mockReturnValue(["meme-factory", "weather"]);
@@ -158,7 +146,7 @@ describe("runCronIsolatedAgentTurn — skill filter", () => {
describe("model fallbacks", () => {
const defaultFallbacks = [
"anthropic/claude-opus-4-6",
"google-gemini-cli/gemini-3-pro-preview",
"google/gemini-3-pro-preview",
"nvidia/deepseek-ai/deepseek-v3.2",
];
@@ -256,74 +244,6 @@ describe("runCronIsolatedAgentTurn — skill filter", () => {
});
});
describe("CLI session handoff (issue #29774)", () => {
it("does not pass stored cliSessionId on fresh isolated runs (isNewSession=true)", async () => {
// Simulate a persisted CLI session ID from a previous run.
getCliSessionIdMock.mockReturnValue("prev-cli-session-abc");
isCliProviderMock.mockReturnValue(true);
runCliAgentMock.mockResolvedValue({
payloads: [{ text: "output" }],
meta: { agentMeta: { sessionId: "new-cli-session-xyz", usage: { input: 5, output: 10 } } },
});
// Make runWithModelFallback invoke the run callback so the CLI path executes.
mockCliFallbackInvocation();
resolveCronSessionMock.mockReturnValue({
storePath: "/tmp/store.json",
store: {},
sessionEntry: {
sessionId: "test-session-fresh",
updatedAt: 0,
systemSent: false,
skillsSnapshot: undefined,
// A stored CLI session ID that should NOT be reused on fresh runs.
cliSessionIds: { "codex-cli": "prev-cli-session-abc" },
},
systemSent: false,
isNewSession: true,
});
await runCronIsolatedAgentTurn(makeSkillParams());
expect(runCliAgentMock).toHaveBeenCalledOnce();
// Fresh session: cliSessionId must be undefined, not the stored value.
expect(runCliAgentMock.mock.calls[0][0]).toHaveProperty("cliSessionId", undefined);
});
it("reuses stored cliSessionId on continuation runs (isNewSession=false)", async () => {
getCliSessionIdMock.mockReturnValue("existing-cli-session-def");
isCliProviderMock.mockReturnValue(true);
runCliAgentMock.mockResolvedValue({
payloads: [{ text: "output" }],
meta: {
agentMeta: { sessionId: "existing-cli-session-def", usage: { input: 5, output: 10 } },
},
});
mockCliFallbackInvocation();
resolveCronSessionMock.mockReturnValue({
storePath: "/tmp/store.json",
store: {},
sessionEntry: {
sessionId: "test-session-continuation",
updatedAt: 0,
systemSent: false,
skillsSnapshot: undefined,
cliSessionIds: { "codex-cli": "existing-cli-session-def" },
},
systemSent: false,
isNewSession: false,
});
await runCronIsolatedAgentTurn(makeSkillParams());
expect(runCliAgentMock).toHaveBeenCalledOnce();
// Continuation: cliSessionId should be passed through for session resume.
expect(runCliAgentMock.mock.calls[0][0]).toHaveProperty(
"cliSessionId",
"existing-cli-session-def",
);
});
});
describe("context token fallback", () => {
it("preserves existing session contextTokens when no configured or cached model window is loaded", async () => {
const session = makeCronSession({

View File

@@ -44,16 +44,13 @@ export const resolveEffectiveModelFallbacksMock = createMock();
export const resolveAgentModelFallbacksOverrideMock = createMock();
export const resolveAgentSkillsFilterMock = createMock();
export const getModelRefStatusMock = createMock();
export const isCliProviderMock = createMock();
export const resolveAllowedModelRefMock = createMock();
export const resolveConfiguredModelRefMock = createMock();
export const resolveHooksGmailModelMock = createMock();
export const resolveThinkingDefaultMock = createMock();
export const runWithModelFallbackMock = createMock();
export const runEmbeddedPiAgentMock = createMock();
export const runCliAgentMock = createMock();
export const lookupContextTokensMock = createMock();
export const getCliSessionIdMock = createMock();
export const updateSessionStoreMock = createMock();
export const resolveCronSessionMock = createMock();
export const logWarnMock = createMock();
@@ -103,7 +100,6 @@ vi.mock("./run.runtime.js", () => ({
DEFAULT_PROVIDER: "openai",
loadModelCatalog: loadModelCatalogMock,
getModelRefStatus: getModelRefStatusMock,
isCliProvider: isCliProviderMock,
normalizeModelSelection: normalizeModelSelectionForTest,
resolveAllowedModelRef: resolveAllowedModelRefMock,
resolveConfiguredModelRef: resolveConfiguredModelRefMock,
@@ -133,13 +129,10 @@ vi.mock("./run.runtime.js", () => ({
vi.mock("./run-execution.runtime.js", () => ({
resolveEffectiveModelFallbacks: resolveEffectiveModelFallbacksMock,
resolveBootstrapWarningSignaturesSeen: resolveBootstrapWarningSignaturesSeenMock,
getCliSessionId: getCliSessionIdMock,
runCliAgent: runCliAgentMock,
resolveFastModeState: resolveFastModeStateMock,
resolveNestedAgentLane: resolveNestedAgentLaneMock,
LiveSessionModelSwitchError,
runWithModelFallback: runWithModelFallbackMock,
isCliProvider: isCliProviderMock,
runEmbeddedPiAgent: runEmbeddedPiAgentMock,
countActiveDescendantRuns: countActiveDescendantRunsMock,
listDescendantRunsForRequester: listDescendantRunsForRequesterMock,
@@ -149,10 +142,6 @@ vi.mock("./run-execution.runtime.js", () => ({
logWarn: (...args: unknown[]) => logWarnMock(...args),
}));
vi.mock("../../agents/cli-runner.runtime.js", () => ({
setCliSessionId: vi.fn(),
}));
vi.mock("../../config/sessions/store.runtime.js", () => ({
updateSessionStore: updateSessionStoreMock,
}));
@@ -275,7 +264,6 @@ function resetRunConfigMocks(): void {
}
function resetRunExecutionMocks(): void {
isCliProviderMock.mockReturnValue(false);
resolveBootstrapWarningSignaturesSeenMock.mockReturnValue(new Set());
resolveFastModeStateMock.mockImplementation((params) => resolveFastModeStateImpl(params));
resolveNestedAgentLaneMock.mockReturnValue(undefined);
@@ -286,8 +274,6 @@ function resetRunExecutionMocks(): void {
runWithModelFallbackMock.mockResolvedValue(makeDefaultModelFallbackResult());
runEmbeddedPiAgentMock.mockReset();
runEmbeddedPiAgentMock.mockResolvedValue(makeDefaultEmbeddedResult());
runCliAgentMock.mockReset();
getCliSessionIdMock.mockReturnValue(undefined);
countActiveDescendantRunsMock.mockReset();
countActiveDescendantRunsMock.mockReturnValue(0);
listDescendantRunsForRequesterMock.mockReset();

View File

@@ -1,508 +0,0 @@
import { spawn } from "node:child_process";
import { randomBytes, randomUUID } from "node:crypto";
import fs from "node:fs/promises";
import os from "node:os";
import path from "node:path";
import { describe, expect, it } from "vitest";
import { isLiveTestEnabled } from "../agents/live-test-helpers.js";
import { parseModelRef } from "../agents/model-selection.js";
import { clearRuntimeConfigSnapshot, loadConfig, type OpenClawConfig } from "../config/config.js";
import { isTruthyEnvValue } from "../infra/env.js";
import { getFreePortBlockWithPermissionFallback } from "../test-utils/ports.js";
import { GATEWAY_CLIENT_NAMES } from "../utils/message-channel.js";
import { GatewayClient } from "./client.js";
import { renderCatNoncePngBase64 } from "./live-image-probe.js";
import { startGatewayServer } from "./server.js";
import { extractPayloadText } from "./test-helpers.agent-results.js";
const LIVE = isLiveTestEnabled();
const CLI_LIVE = isTruthyEnvValue(process.env.OPENCLAW_LIVE_CLI_BACKEND);
const CLI_IMAGE = isTruthyEnvValue(process.env.OPENCLAW_LIVE_CLI_BACKEND_IMAGE_PROBE);
const CLI_RESUME = isTruthyEnvValue(process.env.OPENCLAW_LIVE_CLI_BACKEND_RESUME_PROBE);
const describeLive = LIVE && CLI_LIVE ? describe : describe.skip;
const DEFAULT_MODEL = "codex-cli/gpt-5.4";
const BOOTSTRAP_LIVE_MODEL = process.env.OPENCLAW_LIVE_CLI_BACKEND_MODEL ?? DEFAULT_MODEL;
const describeBootstrapLive =
LIVE && CLI_LIVE && BOOTSTRAP_LIVE_MODEL.startsWith("codex-cli/") ? describe : describe.skip;
const DEFAULT_CODEX_ARGS = [
"exec",
"--json",
"--color",
"never",
"--sandbox",
"read-only",
"--skip-git-repo-check",
];
function randomImageProbeCode(len = 6): string {
// Chosen to avoid common OCR confusions in our 5x7 bitmap font.
// Notably: 0↔8, B↔8, 6↔9, 3↔B, D↔0.
// Must stay within the glyph set in `src/gateway/live-image-probe.ts`.
const alphabet = "24567ACEF";
const bytes = randomBytes(len);
let out = "";
for (let i = 0; i < len; i += 1) {
out += alphabet[bytes[i] % alphabet.length];
}
return out;
}
function editDistance(a: string, b: string): number {
if (a === b) {
return 0;
}
const aLen = a.length;
const bLen = b.length;
if (aLen === 0) {
return bLen;
}
if (bLen === 0) {
return aLen;
}
let prev = Array.from({ length: bLen + 1 }, (_v, idx) => idx);
let curr = Array.from({ length: bLen + 1 }, () => 0);
for (let i = 1; i <= aLen; i += 1) {
curr[0] = i;
const aCh = a.charCodeAt(i - 1);
for (let j = 1; j <= bLen; j += 1) {
const cost = aCh === b.charCodeAt(j - 1) ? 0 : 1;
curr[j] = Math.min(
prev[j] + 1, // delete
curr[j - 1] + 1, // insert
prev[j - 1] + cost, // substitute
);
}
[prev, curr] = [curr, prev];
}
return prev[bLen] ?? Number.POSITIVE_INFINITY;
}
function parseJsonStringArray(name: string, raw?: string): string[] | undefined {
const trimmed = raw?.trim();
if (!trimmed) {
return undefined;
}
const parsed = JSON.parse(trimmed);
if (!Array.isArray(parsed) || !parsed.every((entry) => typeof entry === "string")) {
throw new Error(`${name} must be a JSON array of strings.`);
}
return parsed;
}
function parseImageMode(raw?: string): "list" | "repeat" | undefined {
const trimmed = raw?.trim();
if (!trimmed) {
return undefined;
}
if (trimmed === "list" || trimmed === "repeat") {
return trimmed;
}
throw new Error("OPENCLAW_LIVE_CLI_BACKEND_IMAGE_MODE must be 'list' or 'repeat'.");
}
async function getFreeGatewayPort(): Promise<number> {
return await getFreePortBlockWithPermissionFallback({
offsets: [0, 1, 2, 4],
fallbackBase: 40_000,
});
}
async function connectClient(params: { url: string; token: string }) {
return await new Promise<GatewayClient>((resolve, reject) => {
let done = false;
const finish = (result: { client?: GatewayClient; error?: Error }) => {
if (done) {
return;
}
done = true;
clearTimeout(connectTimeout);
if (result.error) {
reject(result.error);
return;
}
resolve(result.client as GatewayClient);
};
const failWithClose = (code: number, reason: string) =>
finish({ error: new Error(`gateway closed during connect (${code}): ${reason}`) });
const client = new GatewayClient({
url: params.url,
token: params.token,
clientName: GATEWAY_CLIENT_NAMES.TEST,
clientVersion: "dev",
mode: "test",
onHelloOk: () => finish({ client }),
onConnectError: (error) => finish({ error }),
onClose: failWithClose,
});
const connectTimeout = setTimeout(
() => finish({ error: new Error("gateway connect timeout") }),
10_000,
);
connectTimeout.unref();
client.start();
});
}
async function runGatewayCliBootstrapLiveProbe(): Promise<{
ok: boolean;
text: string;
expectedText: string;
systemPromptReport: {
injectedWorkspaceFiles?: Array<{ name?: string }>;
} | null;
}> {
return await new Promise((resolve, reject) => {
const env = { ...process.env };
delete env.VITEST;
const child = spawn(
"pnpm",
["exec", "tsx", path.join("scripts", "gateway-cli-bootstrap-live-probe.ts")],
{
cwd: process.cwd(),
env,
stdio: ["ignore", "pipe", "pipe"],
},
);
let stdout = "";
let stderr = "";
const timeout = setTimeout(() => {
child.kill("SIGTERM");
reject(new Error(`bootstrap probe timed out\nstdout:\n${stdout}\nstderr:\n${stderr}`));
}, 120_000);
timeout.unref();
child.stdout.setEncoding("utf8");
child.stderr.setEncoding("utf8");
child.stdout.on("data", (chunk: string) => {
stdout += chunk;
});
child.stderr.on("data", (chunk: string) => {
stderr += chunk;
});
child.on("error", (error) => {
clearTimeout(timeout);
reject(error);
});
child.on("close", (code) => {
clearTimeout(timeout);
if (code !== 0) {
reject(
new Error(`bootstrap probe exit=${String(code)}\nstdout:\n${stdout}\nstderr:\n${stderr}`),
);
return;
}
const line = stdout
.trim()
.split(/\r?\n/)
.map((entry) => entry.trim())
.findLast((entry) => entry.startsWith("{") && entry.endsWith("}"));
if (!line) {
reject(
new Error(`bootstrap probe missing JSON result\nstdout:\n${stdout}\nstderr:\n${stderr}`),
);
return;
}
resolve(JSON.parse(line) as Awaited<ReturnType<typeof runGatewayCliBootstrapLiveProbe>>);
});
});
}
describeLive("gateway live (cli backend)", () => {
it("runs the agent pipeline against the local CLI backend", async () => {
const preservedEnv = new Set(
parseJsonStringArray(
"OPENCLAW_LIVE_CLI_BACKEND_PRESERVE_ENV",
process.env.OPENCLAW_LIVE_CLI_BACKEND_PRESERVE_ENV,
) ?? [],
);
clearRuntimeConfigSnapshot();
const previous = {
configPath: process.env.OPENCLAW_CONFIG_PATH,
token: process.env.OPENCLAW_GATEWAY_TOKEN,
skipChannels: process.env.OPENCLAW_SKIP_CHANNELS,
skipGmail: process.env.OPENCLAW_SKIP_GMAIL_WATCHER,
skipCron: process.env.OPENCLAW_SKIP_CRON,
skipCanvas: process.env.OPENCLAW_SKIP_CANVAS_HOST,
anthropicApiKey: process.env.ANTHROPIC_API_KEY,
anthropicApiKeyOld: process.env.ANTHROPIC_API_KEY_OLD,
};
process.env.OPENCLAW_SKIP_CHANNELS = "1";
process.env.OPENCLAW_SKIP_GMAIL_WATCHER = "1";
process.env.OPENCLAW_SKIP_CRON = "1";
process.env.OPENCLAW_SKIP_CANVAS_HOST = "1";
if (!preservedEnv.has("ANTHROPIC_API_KEY")) {
delete process.env.ANTHROPIC_API_KEY;
}
if (!preservedEnv.has("ANTHROPIC_API_KEY_OLD")) {
delete process.env.ANTHROPIC_API_KEY_OLD;
}
const token = `test-${randomUUID()}`;
process.env.OPENCLAW_GATEWAY_TOKEN = token;
const rawModel = process.env.OPENCLAW_LIVE_CLI_BACKEND_MODEL ?? DEFAULT_MODEL;
const parsed = parseModelRef(rawModel, "codex-cli");
if (!parsed) {
throw new Error(
`OPENCLAW_LIVE_CLI_BACKEND_MODEL must resolve to a CLI backend model. Got: ${rawModel}`,
);
}
const providerId = parsed.provider;
const modelKey = `${providerId}/${parsed.model}`;
const providerDefaults =
providerId === "codex-cli" ? { command: "codex", args: DEFAULT_CODEX_ARGS } : null;
const cliCommand = process.env.OPENCLAW_LIVE_CLI_BACKEND_COMMAND ?? providerDefaults?.command;
if (!cliCommand) {
throw new Error(
`OPENCLAW_LIVE_CLI_BACKEND_COMMAND is required for provider "${providerId}".`,
);
}
const baseCliArgs =
parseJsonStringArray(
"OPENCLAW_LIVE_CLI_BACKEND_ARGS",
process.env.OPENCLAW_LIVE_CLI_BACKEND_ARGS,
) ?? providerDefaults?.args;
if (!baseCliArgs || baseCliArgs.length === 0) {
throw new Error(`OPENCLAW_LIVE_CLI_BACKEND_ARGS is required for provider "${providerId}".`);
}
const cliClearEnv =
parseJsonStringArray(
"OPENCLAW_LIVE_CLI_BACKEND_CLEAR_ENV",
process.env.OPENCLAW_LIVE_CLI_BACKEND_CLEAR_ENV,
) ?? [];
const filteredCliClearEnv = cliClearEnv.filter((name) => !preservedEnv.has(name));
const preservedCliEnv = Object.fromEntries(
[...preservedEnv]
.map((name) => [name, process.env[name]])
.filter((entry): entry is [string, string] => typeof entry[1] === "string"),
);
const cliImageArg = process.env.OPENCLAW_LIVE_CLI_BACKEND_IMAGE_ARG?.trim() || undefined;
const cliImageMode = parseImageMode(process.env.OPENCLAW_LIVE_CLI_BACKEND_IMAGE_MODE);
if (cliImageMode && !cliImageArg) {
throw new Error(
"OPENCLAW_LIVE_CLI_BACKEND_IMAGE_MODE requires OPENCLAW_LIVE_CLI_BACKEND_IMAGE_ARG.",
);
}
const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-live-cli-"));
const cliArgs = baseCliArgs;
const cfg = loadConfig();
const cfgWithCliBackends = cfg as OpenClawConfig & {
agents?: {
defaults?: {
cliBackends?: Record<string, Record<string, unknown>>;
};
};
};
const existingBackends = cfgWithCliBackends.agents?.defaults?.cliBackends ?? {};
const nextCfg = {
...cfg,
agents: {
...cfg.agents,
defaults: {
...cfg.agents?.defaults,
model: { primary: modelKey },
models: {
[modelKey]: {},
},
cliBackends: {
...existingBackends,
[providerId]: {
command: cliCommand,
args: cliArgs,
clearEnv: filteredCliClearEnv.length > 0 ? filteredCliClearEnv : undefined,
env: Object.keys(preservedCliEnv).length > 0 ? preservedCliEnv : undefined,
systemPromptWhen: "never",
...(cliImageArg ? { imageArg: cliImageArg, imageMode: cliImageMode } : {}),
},
},
sandbox: { mode: "off" },
},
},
};
const tempConfigPath = path.join(tempDir, "openclaw.json");
await fs.writeFile(tempConfigPath, `${JSON.stringify(nextCfg, null, 2)}\n`);
process.env.OPENCLAW_CONFIG_PATH = tempConfigPath;
const port = await getFreeGatewayPort();
const server = await startGatewayServer(port, {
bind: "loopback",
auth: { mode: "token", token },
controlUiEnabled: false,
});
const client = await connectClient({
url: `ws://127.0.0.1:${port}`,
token,
});
try {
const sessionKey = "agent:dev:live-cli-backend";
const runId = randomUUID();
const nonce = randomBytes(3).toString("hex").toUpperCase();
const message =
providerId === "codex-cli"
? `Please include the token CLI-BACKEND-${nonce} in your reply.`
: `Reply with exactly: CLI backend OK ${nonce}.`;
const payload = await client.request(
"agent",
{
sessionKey,
idempotencyKey: `idem-${runId}`,
message,
deliver: false,
},
{ expectFinal: true },
);
if (payload?.status !== "ok") {
throw new Error(`agent status=${String(payload?.status)}`);
}
const text = extractPayloadText(payload?.result);
if (providerId === "codex-cli") {
expect(text).toContain(`CLI-BACKEND-${nonce}`);
} else {
expect(text).toContain(`CLI backend OK ${nonce}.`);
}
if (CLI_RESUME) {
const runIdResume = randomUUID();
const resumeNonce = randomBytes(3).toString("hex").toUpperCase();
const resumeMessage =
providerId === "codex-cli"
? `Please include the token CLI-RESUME-${resumeNonce} in your reply.`
: `Reply with exactly: CLI backend RESUME OK ${resumeNonce}.`;
const resumePayload = await client.request(
"agent",
{
sessionKey,
idempotencyKey: `idem-${runIdResume}`,
message: resumeMessage,
deliver: false,
},
{ expectFinal: true },
);
if (resumePayload?.status !== "ok") {
throw new Error(`resume status=${String(resumePayload?.status)}`);
}
const resumeText = extractPayloadText(resumePayload?.result);
if (providerId === "codex-cli") {
expect(resumeText).toContain(`CLI-RESUME-${resumeNonce}`);
} else {
expect(resumeText).toContain(`CLI backend RESUME OK ${resumeNonce}.`);
}
}
if (CLI_IMAGE) {
// Shorter code => less OCR flake across providers, still tests image attachments end-to-end.
const imageCode = randomImageProbeCode();
const imageBase64 = renderCatNoncePngBase64(imageCode);
const runIdImage = randomUUID();
const imageProbe = await client.request(
"agent",
{
sessionKey,
idempotencyKey: `idem-${runIdImage}-image`,
message:
"Look at the attached image. Reply with exactly two tokens separated by a single space: " +
"(1) the animal shown or written in the image, lowercase; " +
"(2) the code printed in the image, uppercase. No extra text.",
attachments: [
{
mimeType: "image/png",
fileName: `probe-${runIdImage}.png`,
content: imageBase64,
},
],
deliver: false,
},
{ expectFinal: true },
);
if (imageProbe?.status !== "ok") {
throw new Error(`image probe failed: status=${String(imageProbe?.status)}`);
}
const imageText = extractPayloadText(imageProbe?.result);
if (!/\bcat\b/i.test(imageText)) {
throw new Error(`image probe missing 'cat': ${imageText}`);
}
const candidates = imageText.toUpperCase().match(/[A-Z0-9]{6,20}/g) ?? [];
const bestDistance = candidates.reduce((best, cand) => {
if (Math.abs(cand.length - imageCode.length) > 2) {
return best;
}
return Math.min(best, editDistance(cand, imageCode));
}, Number.POSITIVE_INFINITY);
if (!(bestDistance <= 5)) {
throw new Error(`image probe missing code (${imageCode}): ${imageText}`);
}
}
} finally {
clearRuntimeConfigSnapshot();
await client.stopAndWait();
await server.close();
await fs.rm(tempDir, { recursive: true, force: true });
if (previous.configPath === undefined) {
delete process.env.OPENCLAW_CONFIG_PATH;
} else {
process.env.OPENCLAW_CONFIG_PATH = previous.configPath;
}
if (previous.token === undefined) {
delete process.env.OPENCLAW_GATEWAY_TOKEN;
} else {
process.env.OPENCLAW_GATEWAY_TOKEN = previous.token;
}
if (previous.skipChannels === undefined) {
delete process.env.OPENCLAW_SKIP_CHANNELS;
} else {
process.env.OPENCLAW_SKIP_CHANNELS = previous.skipChannels;
}
if (previous.skipGmail === undefined) {
delete process.env.OPENCLAW_SKIP_GMAIL_WATCHER;
} else {
process.env.OPENCLAW_SKIP_GMAIL_WATCHER = previous.skipGmail;
}
if (previous.skipCron === undefined) {
delete process.env.OPENCLAW_SKIP_CRON;
} else {
process.env.OPENCLAW_SKIP_CRON = previous.skipCron;
}
if (previous.skipCanvas === undefined) {
delete process.env.OPENCLAW_SKIP_CANVAS_HOST;
} else {
process.env.OPENCLAW_SKIP_CANVAS_HOST = previous.skipCanvas;
}
if (previous.anthropicApiKey === undefined) {
delete process.env.ANTHROPIC_API_KEY;
} else {
process.env.ANTHROPIC_API_KEY = previous.anthropicApiKey;
}
if (previous.anthropicApiKeyOld === undefined) {
delete process.env.ANTHROPIC_API_KEY_OLD;
} else {
process.env.ANTHROPIC_API_KEY_OLD = previous.anthropicApiKeyOld;
}
}
}, 60_000);
});
describeBootstrapLive("gateway live (cli backend bootstrap context)", () => {
it("injects AGENTS, SOUL, IDENTITY, and USER files into the first CLI turn", async () => {
const result = await runGatewayCliBootstrapLiveProbe();
expect(result.ok).toBe(true);
expect(result.text).toBe(result.expectedText);
expect(
result.systemPromptReport?.injectedWorkspaceFiles?.map((entry) => entry.name) ?? [],
).toEqual(expect.arrayContaining(["AGENTS.md", "SOUL.md", "IDENTITY.md", "USER.md"]));
}, 60_000);
});

View File

@@ -23,7 +23,6 @@ function createManifestRegistryFixture() {
origin: "bundled",
enabledByDefault: undefined,
providers: [],
cliBackends: [],
},
{
id: "demo-other-channel",
@@ -31,7 +30,6 @@ function createManifestRegistryFixture() {
origin: "bundled",
enabledByDefault: undefined,
providers: [],
cliBackends: [],
},
{
id: "browser",
@@ -39,7 +37,6 @@ function createManifestRegistryFixture() {
origin: "bundled",
enabledByDefault: true,
providers: [],
cliBackends: [],
},
{
id: "demo-provider-plugin",
@@ -47,7 +44,6 @@ function createManifestRegistryFixture() {
origin: "bundled",
enabledByDefault: undefined,
providers: ["demo-provider"],
cliBackends: ["demo-cli"],
},
{
id: "voice-call",
@@ -55,7 +51,6 @@ function createManifestRegistryFixture() {
origin: "bundled",
enabledByDefault: undefined,
providers: [],
cliBackends: [],
},
{
id: "demo-global-sidecar",
@@ -63,7 +58,6 @@ function createManifestRegistryFixture() {
origin: "global",
enabledByDefault: undefined,
providers: [],
cliBackends: [],
},
],
diagnostics: [],

View File

@@ -274,12 +274,6 @@ describe("resolvePluginProviders", () => {
({ setActivePluginRegistry } = await import("./runtime.js"));
});
it("maps cli backend ids to owning plugin ids via manifests", () => {
setOwningProviderManifestPlugins();
expectOwningPluginIds("codex-cli", ["openai"]);
});
beforeEach(() => {
setActivePluginRegistry(createEmptyPluginRegistry());
resolveRuntimePluginRegistryMock.mockReset();

View File

@@ -662,7 +662,6 @@ describe("plugin status reports", () => {
});
expect(inspect.capabilities).toEqual([]);
});
it("builds compatibility warnings for legacy compatibility paths", () => {
setPluginLoadResult({
plugins: [

View File

@@ -25,7 +25,6 @@ export type PluginStatusReport = PluginRegistry & {
};
export type PluginCapabilityKind =
| "cli-backend"
| "text-inference"
| "speech"
| "realtime-transcription"