mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-06 05:10:44 +00:00
test: reduce core command hotspots
This commit is contained in:
@@ -1,7 +1,3 @@
|
||||
import { getAcpSessionManager } from "../acp/control-plane/manager.js";
|
||||
import { resolveAcpAgentPolicyError, resolveAcpDispatchPolicyError } from "../acp/policy.js";
|
||||
import { toAcpRuntimeError } from "../acp/runtime/errors.js";
|
||||
import { resolveAcpSessionCwd } from "../acp/runtime/session-identifiers.js";
|
||||
import {
|
||||
formatThinkingLevels,
|
||||
formatXHighModelHint,
|
||||
@@ -11,7 +7,7 @@ import {
|
||||
type VerboseLevel,
|
||||
} from "../auto-reply/thinking.js";
|
||||
import { formatCliCommand } from "../cli/command-format.js";
|
||||
import { type CliDeps, createDefaultDeps } from "../cli/deps.js";
|
||||
import type { CliDeps } from "../cli/deps.types.js";
|
||||
import type { SessionEntry } from "../config/sessions/types.js";
|
||||
import {
|
||||
clearAgentRunContext,
|
||||
@@ -20,7 +16,6 @@ import {
|
||||
} from "../infra/agent-events.js";
|
||||
import { formatErrorMessage } from "../infra/errors.js";
|
||||
import { buildOutboundSessionContext } from "../infra/outbound/session-context.js";
|
||||
import { getRemoteSkillEligibility } from "../infra/skills-remote.js";
|
||||
import { createSubsystemLogger } from "../logging/subsystem.js";
|
||||
import { normalizeAgentId } from "../routing/session-key.js";
|
||||
import { resolveAgentIdFromSessionKey } from "../routing/session-key.js";
|
||||
@@ -40,8 +35,8 @@ import {
|
||||
resolveAgentSkillsFilter,
|
||||
resolveAgentWorkspaceDir,
|
||||
} from "./agent-scope.js";
|
||||
import { ensureAuthProfileStore } from "./auth-profiles.js";
|
||||
import { clearSessionAuthProfileOverride } from "./auth-profiles/session-override.js";
|
||||
import { ensureAuthProfileStore } from "./auth-profiles/store.js";
|
||||
import {
|
||||
persistSessionEntry as persistSessionEntryBase,
|
||||
prependInternalEventContext,
|
||||
@@ -50,7 +45,6 @@ import { resolveAgentRunContext } from "./command/run-context.js";
|
||||
import { resolveSession } from "./command/session.js";
|
||||
import type { AgentCommandIngressOpts, AgentCommandOpts } from "./command/types.js";
|
||||
import { DEFAULT_MODEL, DEFAULT_PROVIDER } from "./defaults.js";
|
||||
import { canExecRequestNode } from "./exec-defaults.js";
|
||||
import { AGENT_LANE_SUBAGENT } from "./lanes.js";
|
||||
import { LiveSessionModelSwitchError } from "./live-model-switch.js";
|
||||
import { loadModelCatalog } from "./model-catalog.js";
|
||||
@@ -64,29 +58,66 @@ import {
|
||||
resolveDefaultModelForAgent,
|
||||
resolveThinkingDefault,
|
||||
} from "./model-selection.js";
|
||||
import { buildWorkspaceSkillSnapshot } from "./skills.js";
|
||||
import { matchesSkillFilter } from "./skills/filter.js";
|
||||
import { getSkillsSnapshotVersion, shouldRefreshSnapshotForVersion } from "./skills/refresh.js";
|
||||
import { normalizeSpawnedRunMetadata } from "./spawned-context.js";
|
||||
import { resolveAgentTimeoutMs } from "./timeout.js";
|
||||
import { ensureAgentWorkspace } from "./workspace.js";
|
||||
|
||||
const log = createSubsystemLogger("agents/agent-command");
|
||||
type AttemptExecutionRuntime = typeof import("./command/attempt-execution.runtime.js");
|
||||
type AcpManagerRuntime = typeof import("../acp/control-plane/manager.js");
|
||||
type AcpPolicyRuntime = typeof import("../acp/policy.js");
|
||||
type AcpRuntimeErrorsRuntime = typeof import("../acp/runtime/errors.js");
|
||||
type AcpSessionIdentifiersRuntime = typeof import("../acp/runtime/session-identifiers.js");
|
||||
type DeliveryRuntime = typeof import("./command/delivery.runtime.js");
|
||||
type SessionStoreRuntime = typeof import("./command/session-store.runtime.js");
|
||||
type TranscriptResolveRuntime = typeof import("../config/sessions/transcript-resolve.runtime.js");
|
||||
type CliDepsRuntime = typeof import("../cli/deps.js");
|
||||
type ExecDefaultsRuntime = typeof import("./exec-defaults.js");
|
||||
type SkillsRuntime = typeof import("./skills.js");
|
||||
type SkillsFilterRuntime = typeof import("./skills/filter.js");
|
||||
type SkillsRefreshStateRuntime = typeof import("./skills/refresh-state.js");
|
||||
type SkillsRemoteRuntime = typeof import("../infra/skills-remote.js");
|
||||
|
||||
let attemptExecutionRuntimePromise: Promise<AttemptExecutionRuntime> | undefined;
|
||||
let acpManagerRuntimePromise: Promise<AcpManagerRuntime> | undefined;
|
||||
let acpPolicyRuntimePromise: Promise<AcpPolicyRuntime> | undefined;
|
||||
let acpRuntimeErrorsRuntimePromise: Promise<AcpRuntimeErrorsRuntime> | undefined;
|
||||
let acpSessionIdentifiersRuntimePromise: Promise<AcpSessionIdentifiersRuntime> | undefined;
|
||||
let deliveryRuntimePromise: Promise<DeliveryRuntime> | undefined;
|
||||
let sessionStoreRuntimePromise: Promise<SessionStoreRuntime> | undefined;
|
||||
let transcriptResolveRuntimePromise: Promise<TranscriptResolveRuntime> | undefined;
|
||||
let cliDepsRuntimePromise: Promise<CliDepsRuntime> | undefined;
|
||||
let execDefaultsRuntimePromise: Promise<ExecDefaultsRuntime> | undefined;
|
||||
let skillsRuntimePromise: Promise<SkillsRuntime> | undefined;
|
||||
let skillsFilterRuntimePromise: Promise<SkillsFilterRuntime> | undefined;
|
||||
let skillsRefreshStateRuntimePromise: Promise<SkillsRefreshStateRuntime> | undefined;
|
||||
let skillsRemoteRuntimePromise: Promise<SkillsRemoteRuntime> | undefined;
|
||||
|
||||
function loadAttemptExecutionRuntime(): Promise<AttemptExecutionRuntime> {
|
||||
attemptExecutionRuntimePromise ??= import("./command/attempt-execution.runtime.js");
|
||||
return attemptExecutionRuntimePromise;
|
||||
}
|
||||
|
||||
function loadAcpManagerRuntime(): Promise<AcpManagerRuntime> {
|
||||
acpManagerRuntimePromise ??= import("../acp/control-plane/manager.js");
|
||||
return acpManagerRuntimePromise;
|
||||
}
|
||||
|
||||
function loadAcpPolicyRuntime(): Promise<AcpPolicyRuntime> {
|
||||
acpPolicyRuntimePromise ??= import("../acp/policy.js");
|
||||
return acpPolicyRuntimePromise;
|
||||
}
|
||||
|
||||
function loadAcpRuntimeErrorsRuntime(): Promise<AcpRuntimeErrorsRuntime> {
|
||||
acpRuntimeErrorsRuntimePromise ??= import("../acp/runtime/errors.js");
|
||||
return acpRuntimeErrorsRuntimePromise;
|
||||
}
|
||||
|
||||
function loadAcpSessionIdentifiersRuntime(): Promise<AcpSessionIdentifiersRuntime> {
|
||||
acpSessionIdentifiersRuntimePromise ??= import("../acp/runtime/session-identifiers.js");
|
||||
return acpSessionIdentifiersRuntimePromise;
|
||||
}
|
||||
|
||||
function loadDeliveryRuntime(): Promise<DeliveryRuntime> {
|
||||
deliveryRuntimePromise ??= import("./command/delivery.runtime.js");
|
||||
return deliveryRuntimePromise;
|
||||
@@ -102,6 +133,44 @@ function loadTranscriptResolveRuntime(): Promise<TranscriptResolveRuntime> {
|
||||
return transcriptResolveRuntimePromise;
|
||||
}
|
||||
|
||||
function loadCliDepsRuntime(): Promise<CliDepsRuntime> {
|
||||
cliDepsRuntimePromise ??= import("../cli/deps.js");
|
||||
return cliDepsRuntimePromise;
|
||||
}
|
||||
|
||||
function loadExecDefaultsRuntime(): Promise<ExecDefaultsRuntime> {
|
||||
execDefaultsRuntimePromise ??= import("./exec-defaults.js");
|
||||
return execDefaultsRuntimePromise;
|
||||
}
|
||||
|
||||
function loadSkillsRuntime(): Promise<SkillsRuntime> {
|
||||
skillsRuntimePromise ??= import("./skills.js");
|
||||
return skillsRuntimePromise;
|
||||
}
|
||||
|
||||
function loadSkillsFilterRuntime(): Promise<SkillsFilterRuntime> {
|
||||
skillsFilterRuntimePromise ??= import("./skills/filter.js");
|
||||
return skillsFilterRuntimePromise;
|
||||
}
|
||||
|
||||
function loadSkillsRefreshStateRuntime(): Promise<SkillsRefreshStateRuntime> {
|
||||
skillsRefreshStateRuntimePromise ??= import("./skills/refresh-state.js");
|
||||
return skillsRefreshStateRuntimePromise;
|
||||
}
|
||||
|
||||
function loadSkillsRemoteRuntime(): Promise<SkillsRemoteRuntime> {
|
||||
skillsRemoteRuntimePromise ??= import("../infra/skills-remote.js");
|
||||
return skillsRemoteRuntimePromise;
|
||||
}
|
||||
|
||||
async function resolveAgentCommandDeps(deps: CliDeps | undefined): Promise<CliDeps> {
|
||||
if (deps) {
|
||||
return deps;
|
||||
}
|
||||
const { createDefaultDeps } = await loadCliDepsRuntime();
|
||||
return createDefaultDeps();
|
||||
}
|
||||
|
||||
type PersistSessionEntryParams = {
|
||||
sessionStore: Record<string, SessionEntry>;
|
||||
sessionKey: string;
|
||||
@@ -287,6 +356,7 @@ async function prepareAgentCommandExecution(
|
||||
});
|
||||
const workspaceDir = workspace.dir;
|
||||
const runId = opts.runId?.trim() || sessionId;
|
||||
const { getAcpSessionManager } = await loadAcpManagerRuntime();
|
||||
const acpManager = getAcpSessionManager();
|
||||
const acpResolution = sessionKey
|
||||
? acpManager.resolveSession({
|
||||
@@ -325,8 +395,9 @@ async function prepareAgentCommandExecution(
|
||||
async function agentCommandInternal(
|
||||
opts: AgentCommandOpts & { senderIsOwner: boolean },
|
||||
runtime: RuntimeEnv = defaultRuntime,
|
||||
deps: CliDeps = createDefaultDeps(),
|
||||
deps?: CliDeps,
|
||||
) {
|
||||
const resolvedDeps = await resolveAgentCommandDeps(deps);
|
||||
const prepared = await prepareAgentCommandExecution(opts, runtime);
|
||||
const {
|
||||
body,
|
||||
@@ -383,6 +454,8 @@ async function agentCommandInternal(
|
||||
const visibleTextAccumulator = attemptExecutionRuntime.createAcpVisibleTextAccumulator();
|
||||
let stopReason: string | undefined;
|
||||
try {
|
||||
const { resolveAcpAgentPolicyError, resolveAcpDispatchPolicyError } =
|
||||
await loadAcpPolicyRuntime();
|
||||
const dispatchPolicyError = resolveAcpDispatchPolicyError(cfg);
|
||||
if (dispatchPolicyError) {
|
||||
throw dispatchPolicyError;
|
||||
@@ -428,6 +501,7 @@ async function agentCommandInternal(
|
||||
},
|
||||
});
|
||||
} catch (error) {
|
||||
const { toAcpRuntimeError } = await loadAcpRuntimeErrorsRuntime();
|
||||
const acpError = toAcpRuntimeError({
|
||||
error,
|
||||
fallbackCode: "ACP_TURN_FAILED",
|
||||
@@ -445,6 +519,7 @@ async function agentCommandInternal(
|
||||
const finalTextRaw = visibleTextAccumulator.finalizeRaw();
|
||||
const finalText = visibleTextAccumulator.finalize();
|
||||
try {
|
||||
const { resolveAcpSessionCwd } = await loadAcpSessionIdentifiersRuntime();
|
||||
sessionEntry = await attemptExecutionRuntime.persistAcpTurnTranscript({
|
||||
body,
|
||||
finalText: finalTextRaw,
|
||||
@@ -474,7 +549,7 @@ async function agentCommandInternal(
|
||||
|
||||
return await deliverAgentCommandResult({
|
||||
cfg,
|
||||
deps,
|
||||
deps: resolvedDeps,
|
||||
runtime,
|
||||
opts,
|
||||
outboundSession,
|
||||
@@ -495,6 +570,8 @@ async function agentCommandInternal(
|
||||
});
|
||||
}
|
||||
|
||||
const [{ getSkillsSnapshotVersion, shouldRefreshSnapshotForVersion }, { matchesSkillFilter }] =
|
||||
await Promise.all([loadSkillsRefreshStateRuntime(), loadSkillsFilterRuntime()]);
|
||||
const skillsSnapshotVersion = getSkillsSnapshotVersion(workspaceDir);
|
||||
const skillFilter = resolveAgentSkillsFilter(cfg, sessionAgentId);
|
||||
const currentSkillsSnapshot = sessionEntry?.skillsSnapshot;
|
||||
@@ -504,22 +581,33 @@ async function agentCommandInternal(
|
||||
!matchesSkillFilter(currentSkillsSnapshot.skillFilter, skillFilter);
|
||||
const needsSkillsSnapshot = isNewSession || shouldRefreshSkillsSnapshot;
|
||||
const skillsSnapshot = needsSkillsSnapshot
|
||||
? buildWorkspaceSkillSnapshot(workspaceDir, {
|
||||
config: cfg,
|
||||
eligibility: {
|
||||
remote: getRemoteSkillEligibility({
|
||||
advertiseExecNode: canExecRequestNode({
|
||||
cfg,
|
||||
sessionEntry,
|
||||
sessionKey,
|
||||
agentId: sessionAgentId,
|
||||
? await (async () => {
|
||||
const [
|
||||
{ buildWorkspaceSkillSnapshot },
|
||||
{ getRemoteSkillEligibility },
|
||||
{ canExecRequestNode },
|
||||
] = await Promise.all([
|
||||
loadSkillsRuntime(),
|
||||
loadSkillsRemoteRuntime(),
|
||||
loadExecDefaultsRuntime(),
|
||||
]);
|
||||
return buildWorkspaceSkillSnapshot(workspaceDir, {
|
||||
config: cfg,
|
||||
eligibility: {
|
||||
remote: getRemoteSkillEligibility({
|
||||
advertiseExecNode: canExecRequestNode({
|
||||
cfg,
|
||||
sessionEntry,
|
||||
sessionKey,
|
||||
agentId: sessionAgentId,
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
},
|
||||
snapshotVersion: skillsSnapshotVersion,
|
||||
skillFilter,
|
||||
agentId: sessionAgentId,
|
||||
})
|
||||
},
|
||||
snapshotVersion: skillsSnapshotVersion,
|
||||
skillFilter,
|
||||
agentId: sessionAgentId,
|
||||
});
|
||||
})()
|
||||
: currentSkillsSnapshot;
|
||||
|
||||
if (skillsSnapshot && sessionStore && sessionKey && needsSkillsSnapshot) {
|
||||
@@ -971,7 +1059,7 @@ async function agentCommandInternal(
|
||||
const { deliverAgentCommandResult } = await loadDeliveryRuntime();
|
||||
return await deliverAgentCommandResult({
|
||||
cfg,
|
||||
deps,
|
||||
deps: resolvedDeps,
|
||||
runtime,
|
||||
opts,
|
||||
outboundSession,
|
||||
@@ -987,7 +1075,7 @@ async function agentCommandInternal(
|
||||
export async function agentCommand(
|
||||
opts: AgentCommandOpts,
|
||||
runtime: RuntimeEnv = defaultRuntime,
|
||||
deps: CliDeps = createDefaultDeps(),
|
||||
deps?: CliDeps,
|
||||
) {
|
||||
return await agentCommandInternal(
|
||||
{
|
||||
@@ -1007,7 +1095,7 @@ export async function agentCommand(
|
||||
export async function agentCommandFromIngress(
|
||||
opts: AgentCommandIngressOpts,
|
||||
runtime: RuntimeEnv = defaultRuntime,
|
||||
deps: CliDeps = createDefaultDeps(),
|
||||
deps?: CliDeps,
|
||||
) {
|
||||
if (typeof opts.senderIsOwner !== "boolean") {
|
||||
// HTTP/WS ingress must declare the trust level explicitly at the boundary.
|
||||
|
||||
@@ -1,14 +1,11 @@
|
||||
import type { OpenClawConfig } from "../config/types.openclaw.js";
|
||||
import {
|
||||
type AuthCredentialReasonCode,
|
||||
type AuthProfileCredential,
|
||||
type AuthProfileStore,
|
||||
resolveAuthProfileDisplayLabel,
|
||||
} from "./auth-profiles.js";
|
||||
import {
|
||||
evaluateStoredCredentialEligibility,
|
||||
resolveTokenExpiryState,
|
||||
} from "./auth-profiles/credential-state.js";
|
||||
import { resolveAuthProfileDisplayLabel } from "./auth-profiles/display.js";
|
||||
import type { AuthProfileCredential, AuthProfileStore } from "./auth-profiles/types.js";
|
||||
import { normalizeProviderId } from "./provider-id.js";
|
||||
|
||||
export type AuthProfileSource = "store";
|
||||
|
||||
@@ -299,6 +299,26 @@ describe("resolveApiKeyForProfile token expiry handling", () => {
|
||||
});
|
||||
|
||||
describe("resolveApiKeyForProfile secret refs", () => {
|
||||
it("ignores blank api_key credentials", async () => {
|
||||
const profileId = "openrouter:default";
|
||||
const result = await resolveApiKeyForProfile({
|
||||
cfg: cfgFor(profileId, "openrouter", "api_key"),
|
||||
store: {
|
||||
version: 1,
|
||||
profiles: {
|
||||
[profileId]: {
|
||||
type: "api_key",
|
||||
provider: "openrouter",
|
||||
key: " ",
|
||||
},
|
||||
},
|
||||
},
|
||||
profileId,
|
||||
});
|
||||
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
|
||||
it("resolves api_key keyRef from env", async () => {
|
||||
const profileId = "openai:default";
|
||||
const previous = process.env.OPENAI_API_KEY;
|
||||
|
||||
@@ -467,7 +467,10 @@ export async function executePreparedCliRun(
|
||||
...parsed,
|
||||
rawText,
|
||||
finalPromptText: prompt,
|
||||
text: applyPluginTextReplacements(rawText, context.backendResolved.textTransforms?.output),
|
||||
text: applyPluginTextReplacements(
|
||||
rawText,
|
||||
context.backendResolved.textTransforms?.output,
|
||||
),
|
||||
};
|
||||
} finally {
|
||||
restoreSkillEnv?.();
|
||||
|
||||
@@ -204,4 +204,17 @@ describe("createAcpVisibleTextAccumulator", () => {
|
||||
|
||||
expect(acc.finalize()).toBe("NO_REPLY: explanation");
|
||||
});
|
||||
|
||||
it("buffers chunked NO_REPLY prefixes before emitting visible text", () => {
|
||||
const acc = createAcpVisibleTextAccumulator();
|
||||
|
||||
expect(acc.consume("NO")).toBeNull();
|
||||
expect(acc.consume("NO_")).toBeNull();
|
||||
expect(acc.consume("NO_RE")).toBeNull();
|
||||
expect(acc.consume("NO_REPLY")).toBeNull();
|
||||
expect(acc.consume("Actual answer")).toEqual({
|
||||
text: "Actual answer",
|
||||
delta: "Actual answer",
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,83 +1,273 @@
|
||||
import { randomUUID } from "node:crypto";
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { afterEach, beforeEach, describe, expect, it } from "vitest";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import type { OpenClawConfig } from "../../config/config.js";
|
||||
import { loadSessionStore, type SessionEntry } from "../../config/sessions.js";
|
||||
import type { SessionEntry } from "../../config/sessions.js";
|
||||
import { loadSessionStore } from "../../config/sessions.js";
|
||||
import type { EmbeddedPiRunResult } from "../pi-embedded.js";
|
||||
import { updateSessionStoreAfterAgentRun } from "./session-store.js";
|
||||
import { resolveSession } from "./session.js";
|
||||
|
||||
function acpMeta() {
|
||||
return {
|
||||
backend: "acpx",
|
||||
agent: "codex",
|
||||
runtimeSessionName: "runtime-1",
|
||||
mode: "persistent" as const,
|
||||
state: "idle" as const,
|
||||
lastActivityAt: Date.now(),
|
||||
};
|
||||
}
|
||||
|
||||
async function withTempSessionStore<T>(
|
||||
run: (params: { dir: string; storePath: string }) => Promise<T>,
|
||||
): Promise<T> {
|
||||
const dir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-session-store-"));
|
||||
try {
|
||||
return await run({ dir, storePath: path.join(dir, "sessions.json") });
|
||||
} finally {
|
||||
await fs.rm(dir, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
describe("updateSessionStoreAfterAgentRun", () => {
|
||||
let tmpDir: string;
|
||||
let storePath: string;
|
||||
|
||||
beforeEach(async () => {
|
||||
tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-session-store-"));
|
||||
storePath = path.join(tmpDir, "sessions.json");
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await fs.rm(tmpDir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it("persists claude-cli session bindings when the backend is configured", async () => {
|
||||
const cfg = {
|
||||
agents: {
|
||||
defaults: {
|
||||
cliBackends: {
|
||||
"claude-cli": {
|
||||
command: "claude",
|
||||
await withTempSessionStore(async ({ storePath }) => {
|
||||
const cfg = {
|
||||
agents: {
|
||||
defaults: {
|
||||
cliBackends: {
|
||||
"claude-cli": {
|
||||
command: "claude",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
} as OpenClawConfig;
|
||||
const sessionKey = "agent:main:explicit:test-claude-cli";
|
||||
const sessionId = "test-openclaw-session";
|
||||
const sessionStore: Record<string, SessionEntry> = {
|
||||
[sessionKey]: {
|
||||
sessionId,
|
||||
updatedAt: 1,
|
||||
},
|
||||
};
|
||||
await fs.writeFile(storePath, JSON.stringify(sessionStore, null, 2));
|
||||
} as OpenClawConfig;
|
||||
const sessionKey = "agent:main:explicit:test-claude-cli";
|
||||
const sessionId = "test-openclaw-session";
|
||||
const sessionStore: Record<string, SessionEntry> = {
|
||||
[sessionKey]: {
|
||||
sessionId,
|
||||
updatedAt: 1,
|
||||
},
|
||||
};
|
||||
await fs.writeFile(storePath, JSON.stringify(sessionStore, null, 2));
|
||||
|
||||
const result: EmbeddedPiRunResult = {
|
||||
meta: {
|
||||
durationMs: 1,
|
||||
agentMeta: {
|
||||
sessionId: "cli-session-123",
|
||||
provider: "claude-cli",
|
||||
model: "claude-sonnet-4-6",
|
||||
cliSessionBinding: {
|
||||
const result: EmbeddedPiRunResult = {
|
||||
meta: {
|
||||
durationMs: 1,
|
||||
agentMeta: {
|
||||
sessionId: "cli-session-123",
|
||||
provider: "claude-cli",
|
||||
model: "claude-sonnet-4-6",
|
||||
cliSessionBinding: {
|
||||
sessionId: "cli-session-123",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
};
|
||||
|
||||
await updateSessionStoreAfterAgentRun({
|
||||
cfg,
|
||||
sessionId,
|
||||
sessionKey,
|
||||
storePath,
|
||||
sessionStore,
|
||||
defaultProvider: "claude-cli",
|
||||
defaultModel: "claude-sonnet-4-6",
|
||||
result,
|
||||
});
|
||||
await updateSessionStoreAfterAgentRun({
|
||||
cfg,
|
||||
sessionId,
|
||||
sessionKey,
|
||||
storePath,
|
||||
sessionStore,
|
||||
defaultProvider: "claude-cli",
|
||||
defaultModel: "claude-sonnet-4-6",
|
||||
result,
|
||||
});
|
||||
|
||||
expect(sessionStore[sessionKey]?.cliSessionBindings?.["claude-cli"]).toEqual({
|
||||
sessionId: "cli-session-123",
|
||||
});
|
||||
expect(sessionStore[sessionKey]?.cliSessionIds?.["claude-cli"]).toBe("cli-session-123");
|
||||
expect(sessionStore[sessionKey]?.claudeCliSessionId).toBe("cli-session-123");
|
||||
expect(sessionStore[sessionKey]?.cliSessionBindings?.["claude-cli"]).toEqual({
|
||||
sessionId: "cli-session-123",
|
||||
});
|
||||
expect(sessionStore[sessionKey]?.cliSessionIds?.["claude-cli"]).toBe("cli-session-123");
|
||||
expect(sessionStore[sessionKey]?.claudeCliSessionId).toBe("cli-session-123");
|
||||
|
||||
const persisted = loadSessionStore(storePath);
|
||||
expect(persisted[sessionKey]?.cliSessionBindings?.["claude-cli"]).toEqual({
|
||||
sessionId: "cli-session-123",
|
||||
const persisted = loadSessionStore(storePath);
|
||||
expect(persisted[sessionKey]?.cliSessionBindings?.["claude-cli"]).toEqual({
|
||||
sessionId: "cli-session-123",
|
||||
});
|
||||
expect(persisted[sessionKey]?.cliSessionIds?.["claude-cli"]).toBe("cli-session-123");
|
||||
expect(persisted[sessionKey]?.claudeCliSessionId).toBe("cli-session-123");
|
||||
});
|
||||
});
|
||||
|
||||
it("preserves ACP metadata when caller has a stale session snapshot", async () => {
|
||||
await withTempSessionStore(async ({ storePath }) => {
|
||||
const sessionKey = `agent:codex:acp:${randomUUID()}`;
|
||||
const sessionId = randomUUID();
|
||||
|
||||
const existing: SessionEntry = {
|
||||
sessionId,
|
||||
updatedAt: Date.now(),
|
||||
acp: acpMeta(),
|
||||
};
|
||||
await fs.writeFile(storePath, JSON.stringify({ [sessionKey]: existing }, null, 2), "utf8");
|
||||
|
||||
const staleInMemory: Record<string, SessionEntry> = {
|
||||
[sessionKey]: {
|
||||
sessionId,
|
||||
updatedAt: Date.now(),
|
||||
},
|
||||
};
|
||||
|
||||
await updateSessionStoreAfterAgentRun({
|
||||
cfg: {} as never,
|
||||
sessionId,
|
||||
sessionKey,
|
||||
storePath,
|
||||
sessionStore: staleInMemory,
|
||||
contextTokensOverride: 200_000,
|
||||
defaultProvider: "openai",
|
||||
defaultModel: "gpt-5.4",
|
||||
result: {
|
||||
payloads: [],
|
||||
meta: {
|
||||
aborted: false,
|
||||
agentMeta: {
|
||||
provider: "openai",
|
||||
model: "gpt-5.4",
|
||||
},
|
||||
},
|
||||
} as never,
|
||||
});
|
||||
|
||||
const persisted = loadSessionStore(storePath, { skipCache: true })[sessionKey];
|
||||
expect(persisted?.acp).toBeDefined();
|
||||
expect(staleInMemory[sessionKey]?.acp).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
it("persists latest systemPromptReport for downstream warning dedupe", async () => {
|
||||
await withTempSessionStore(async ({ storePath }) => {
|
||||
const sessionKey = `agent:codex:report:${randomUUID()}`;
|
||||
const sessionId = randomUUID();
|
||||
|
||||
const sessionStore: Record<string, SessionEntry> = {
|
||||
[sessionKey]: {
|
||||
sessionId,
|
||||
updatedAt: Date.now(),
|
||||
},
|
||||
};
|
||||
await fs.writeFile(storePath, JSON.stringify(sessionStore, null, 2), "utf8");
|
||||
|
||||
const report = {
|
||||
source: "run" as const,
|
||||
generatedAt: Date.now(),
|
||||
bootstrapTruncation: {
|
||||
warningMode: "once" as const,
|
||||
warningSignaturesSeen: ["sig-a", "sig-b"],
|
||||
},
|
||||
systemPrompt: {
|
||||
chars: 1,
|
||||
projectContextChars: 1,
|
||||
nonProjectContextChars: 0,
|
||||
},
|
||||
injectedWorkspaceFiles: [],
|
||||
skills: { promptChars: 0, entries: [] },
|
||||
tools: { listChars: 0, schemaChars: 0, entries: [] },
|
||||
};
|
||||
|
||||
await updateSessionStoreAfterAgentRun({
|
||||
cfg: {} as never,
|
||||
sessionId,
|
||||
sessionKey,
|
||||
storePath,
|
||||
sessionStore,
|
||||
contextTokensOverride: 200_000,
|
||||
defaultProvider: "openai",
|
||||
defaultModel: "gpt-5.4",
|
||||
result: {
|
||||
payloads: [],
|
||||
meta: {
|
||||
agentMeta: {
|
||||
provider: "openai",
|
||||
model: "gpt-5.4",
|
||||
},
|
||||
systemPromptReport: report,
|
||||
},
|
||||
} as never,
|
||||
});
|
||||
|
||||
const persisted = loadSessionStore(storePath, { skipCache: true })[sessionKey];
|
||||
expect(persisted?.systemPromptReport?.bootstrapTruncation?.warningSignaturesSeen).toEqual([
|
||||
"sig-a",
|
||||
"sig-b",
|
||||
]);
|
||||
expect(sessionStore[sessionKey]?.systemPromptReport?.bootstrapTruncation?.warningMode).toBe(
|
||||
"once",
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
it("stores and reloads the runtime model for explicit session-id-only runs", async () => {
|
||||
await withTempSessionStore(async ({ storePath }) => {
|
||||
const cfg = {
|
||||
session: {
|
||||
store: storePath,
|
||||
mainKey: "main",
|
||||
},
|
||||
agents: {
|
||||
defaults: {
|
||||
cliBackends: {
|
||||
"claude-cli": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
} as never;
|
||||
|
||||
const first = resolveSession({
|
||||
cfg,
|
||||
sessionId: "explicit-session-123",
|
||||
});
|
||||
|
||||
expect(first.sessionKey).toBe("agent:main:explicit:explicit-session-123");
|
||||
|
||||
await updateSessionStoreAfterAgentRun({
|
||||
cfg,
|
||||
sessionId: first.sessionId,
|
||||
sessionKey: first.sessionKey!,
|
||||
storePath: first.storePath,
|
||||
sessionStore: first.sessionStore!,
|
||||
contextTokensOverride: 200_000,
|
||||
defaultProvider: "claude-cli",
|
||||
defaultModel: "claude-sonnet-4-6",
|
||||
result: {
|
||||
payloads: [],
|
||||
meta: {
|
||||
agentMeta: {
|
||||
provider: "claude-cli",
|
||||
model: "claude-sonnet-4-6",
|
||||
sessionId: "claude-cli-session-1",
|
||||
cliSessionBinding: {
|
||||
sessionId: "claude-cli-session-1",
|
||||
authEpoch: "auth-epoch-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
} as never,
|
||||
});
|
||||
|
||||
const second = resolveSession({
|
||||
cfg,
|
||||
sessionId: "explicit-session-123",
|
||||
});
|
||||
|
||||
expect(second.sessionKey).toBe(first.sessionKey);
|
||||
expect(second.sessionEntry?.cliSessionBindings?.["claude-cli"]).toEqual({
|
||||
sessionId: "claude-cli-session-1",
|
||||
authEpoch: "auth-epoch-1",
|
||||
});
|
||||
|
||||
const persisted = loadSessionStore(storePath, { skipCache: true })[first.sessionKey!];
|
||||
expect(persisted?.cliSessionBindings?.["claude-cli"]).toEqual({
|
||||
sessionId: "claude-cli-session-1",
|
||||
authEpoch: "auth-epoch-1",
|
||||
});
|
||||
});
|
||||
expect(persisted[sessionKey]?.cliSessionIds?.["claude-cli"]).toBe("cli-session-123");
|
||||
expect(persisted[sessionKey]?.claudeCliSessionId).toBe("cli-session-123");
|
||||
});
|
||||
});
|
||||
|
||||
@@ -60,16 +60,16 @@ export async function updateSessionStoreAfterAgentRun(params: {
|
||||
const compactionsThisRun = Math.max(0, result.meta.agentMeta?.compactionCount ?? 0);
|
||||
const modelUsed = result.meta.agentMeta?.model ?? fallbackModel ?? defaultModel;
|
||||
const providerUsed = result.meta.agentMeta?.provider ?? fallbackProvider ?? defaultProvider;
|
||||
const { resolveContextTokensForModel } = await getContextModule();
|
||||
const contextTokens =
|
||||
resolveContextTokensForModel({
|
||||
cfg,
|
||||
provider: providerUsed,
|
||||
model: modelUsed,
|
||||
contextTokensOverride: params.contextTokensOverride,
|
||||
fallbackContextTokens: DEFAULT_CONTEXT_TOKENS,
|
||||
allowAsyncLoad: false,
|
||||
}) ?? DEFAULT_CONTEXT_TOKENS;
|
||||
typeof params.contextTokensOverride === "number" && params.contextTokensOverride > 0
|
||||
? params.contextTokensOverride
|
||||
: ((await getContextModule()).resolveContextTokensForModel({
|
||||
cfg,
|
||||
provider: providerUsed,
|
||||
model: modelUsed,
|
||||
fallbackContextTokens: DEFAULT_CONTEXT_TOKENS,
|
||||
allowAsyncLoad: false,
|
||||
}) ?? DEFAULT_CONTEXT_TOKENS);
|
||||
|
||||
const entry = sessionStore[sessionKey] ?? {
|
||||
sessionId,
|
||||
|
||||
@@ -5,6 +5,10 @@ import { normalizeProviderId } from "./model-selection-normalize.js";
|
||||
|
||||
export function isCliProvider(provider: string, cfg?: OpenClawConfig): boolean {
|
||||
const normalized = normalizeProviderId(provider);
|
||||
const backends = cfg?.agents?.defaults?.cliBackends ?? {};
|
||||
if (Object.keys(backends).some((key) => normalizeProviderId(key) === normalized)) {
|
||||
return true;
|
||||
}
|
||||
const cliBackends = resolveRuntimeCliBackends();
|
||||
if (cliBackends.some((backend) => normalizeProviderId(backend.id) === normalized)) {
|
||||
return true;
|
||||
@@ -12,6 +16,5 @@ export function isCliProvider(provider: string, cfg?: OpenClawConfig): boolean {
|
||||
if (resolvePluginSetupCliBackendRuntime({ backend: normalized })) {
|
||||
return true;
|
||||
}
|
||||
const backends = cfg?.agents?.defaults?.cliBackends ?? {};
|
||||
return Object.keys(backends).some((key) => normalizeProviderId(key) === normalized);
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -278,16 +278,11 @@ describe("extractToolResultMediaPaths", () => {
|
||||
});
|
||||
|
||||
it("blocks trusted-media aliases that are not exact registered built-ins", () => {
|
||||
expect(filterToolResultMediaUrls("bash", ["/etc/passwd"], undefined, new Set(["exec"]))).toEqual(
|
||||
[],
|
||||
);
|
||||
expect(
|
||||
filterToolResultMediaUrls(
|
||||
"Web_Search",
|
||||
["/etc/passwd"],
|
||||
undefined,
|
||||
new Set(["web_search"]),
|
||||
),
|
||||
filterToolResultMediaUrls("bash", ["/etc/passwd"], undefined, new Set(["exec"])),
|
||||
).toEqual([]);
|
||||
expect(
|
||||
filterToolResultMediaUrls("Web_Search", ["/etc/passwd"], undefined, new Set(["web_search"])),
|
||||
).toEqual([]);
|
||||
});
|
||||
|
||||
|
||||
@@ -1,8 +1,5 @@
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
import {
|
||||
MIN_PROMPT_BUDGET_RATIO,
|
||||
MIN_PROMPT_BUDGET_TOKENS,
|
||||
} from "./pi-compaction-constants.js";
|
||||
import { MIN_PROMPT_BUDGET_RATIO, MIN_PROMPT_BUDGET_TOKENS } from "./pi-compaction-constants.js";
|
||||
import {
|
||||
applyPiCompactionSettingsFromConfig,
|
||||
DEFAULT_PI_COMPACTION_RESERVE_TOKENS_FLOOR,
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
import type { OpenClawConfig } from "../config/types.openclaw.js";
|
||||
import type { ContextEngineInfo } from "../context-engine/types.js";
|
||||
import {
|
||||
MIN_PROMPT_BUDGET_RATIO,
|
||||
MIN_PROMPT_BUDGET_TOKENS,
|
||||
} from "./pi-compaction-constants.js";
|
||||
import { MIN_PROMPT_BUDGET_RATIO, MIN_PROMPT_BUDGET_TOKENS } from "./pi-compaction-constants.js";
|
||||
|
||||
export const DEFAULT_PI_COMPACTION_RESERVE_TOKENS_FLOOR = 20_000;
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import path from "node:path";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { resolveSandboxInputPath } from "./sandbox-paths.js";
|
||||
import { resolveToolPathAgainstWorkspaceRoot } from "./pi-tools.read.js";
|
||||
import { resolveSandboxInputPath } from "./sandbox-paths.js";
|
||||
|
||||
describe("resolveSandboxInputPath (Windows drive paths under POSIX rules)", () => {
|
||||
it("does not join workspace cwd when path looks like a Windows drive path", () => {
|
||||
|
||||
@@ -48,7 +48,9 @@ describe("buildWorkspaceSkillsPrompt — .agents/skills/ directories", () => {
|
||||
await Promise.all(
|
||||
tempDirs
|
||||
.splice(0, tempDirs.length)
|
||||
.map((dir) => fs.rm(dir, { recursive: true, force: true })),
|
||||
.map((dir) =>
|
||||
fs.rm(dir, { recursive: true, force: true, maxRetries: 5, retryDelay: 20 }),
|
||||
),
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -18,6 +18,157 @@ vi.mock("../logging/subsystem.js", () => {
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock("../cli/deps.js", () => ({
|
||||
createDefaultDeps: vi.fn(() => ({})),
|
||||
}));
|
||||
|
||||
vi.mock("../acp/control-plane/manager.js", () => ({
|
||||
__testing: {
|
||||
resetAcpSessionManagerForTests: vi.fn(),
|
||||
},
|
||||
getAcpSessionManager: vi.fn(() => ({
|
||||
resolveSession: vi.fn(() => null),
|
||||
})),
|
||||
}));
|
||||
|
||||
vi.mock("../agents/pi-embedded.js", () => ({
|
||||
abortEmbeddedPiRun: vi.fn().mockReturnValue(false),
|
||||
runEmbeddedPiAgent: vi.fn(),
|
||||
resolveEmbeddedSessionLane: (key: string) => `session:${key.trim() || "main"}`,
|
||||
}));
|
||||
|
||||
vi.mock("../agents/model-catalog.js", () => ({
|
||||
loadModelCatalog: vi.fn(),
|
||||
}));
|
||||
|
||||
vi.mock("../agents/model-selection.js", () => {
|
||||
type ConfigWithModels = {
|
||||
agents?: {
|
||||
defaults?: {
|
||||
model?: string | { primary?: string; fallbacks?: string[] };
|
||||
models?: Record<string, { params?: { thinking?: string } } | undefined>;
|
||||
thinkingDefault?: string;
|
||||
};
|
||||
};
|
||||
};
|
||||
type ModelRef = { provider: string; model: string };
|
||||
type CatalogEntry = { id?: string; model?: string; name?: string; reasoning?: boolean };
|
||||
|
||||
const parseModelRefImpl = (raw: string, defaultProvider = "openai"): ModelRef | null => {
|
||||
const value = raw.trim();
|
||||
if (!value) {
|
||||
return null;
|
||||
}
|
||||
const slash = value.indexOf("/");
|
||||
if (slash >= 0) {
|
||||
return {
|
||||
provider: value.slice(0, slash).trim(),
|
||||
model: value.slice(slash + 1).trim(),
|
||||
};
|
||||
}
|
||||
return { provider: defaultProvider, model: value };
|
||||
};
|
||||
const parseModelRef = vi.fn(parseModelRefImpl);
|
||||
const normalizeModelRef = (provider: string, model: string): ModelRef => ({
|
||||
provider: provider.trim().toLowerCase(),
|
||||
model: model.trim(),
|
||||
});
|
||||
const modelKey = (provider: string, model: string) =>
|
||||
`${provider.trim().toLowerCase()}/${model.trim().toLowerCase()}`;
|
||||
const resolvePrimary = (cfg?: ConfigWithModels): string | undefined => {
|
||||
const primary = cfg?.agents?.defaults?.model;
|
||||
if (typeof primary === "string") {
|
||||
return primary;
|
||||
}
|
||||
return primary?.primary;
|
||||
};
|
||||
const resolveDefaultRef = (cfg?: ConfigWithModels): ModelRef => {
|
||||
const parsed = parseModelRefImpl(resolvePrimary(cfg) ?? "openai/gpt-5.4", "openai");
|
||||
return parsed ?? { provider: "openai", model: "gpt-5.4" };
|
||||
};
|
||||
const resolveModelConfig = (cfg: ConfigWithModels | undefined, ref: ModelRef) => {
|
||||
const models = cfg?.agents?.defaults?.models ?? {};
|
||||
return models[`${ref.provider}/${ref.model}`] ?? models[modelKey(ref.provider, ref.model)];
|
||||
};
|
||||
|
||||
return {
|
||||
buildAllowedModelSet: vi.fn(({ cfg }: { cfg?: ConfigWithModels; catalog?: CatalogEntry[] }) => {
|
||||
const refs = new Set<string>();
|
||||
const modelConfig = cfg?.agents?.defaults?.models ?? {};
|
||||
for (const raw of Object.keys(modelConfig)) {
|
||||
const parsed = parseModelRefImpl(raw, "openai");
|
||||
if (parsed) {
|
||||
refs.add(modelKey(parsed.provider, parsed.model));
|
||||
}
|
||||
}
|
||||
const primary = resolveDefaultRef(cfg);
|
||||
refs.add(modelKey(primary.provider, primary.model));
|
||||
const fallbackRefs =
|
||||
typeof cfg?.agents?.defaults?.model === "object"
|
||||
? (cfg.agents.defaults.model.fallbacks ?? [])
|
||||
: [];
|
||||
for (const fallback of fallbackRefs) {
|
||||
const parsed = parseModelRefImpl(fallback, primary.provider);
|
||||
if (parsed) {
|
||||
refs.add(modelKey(parsed.provider, parsed.model));
|
||||
}
|
||||
}
|
||||
return {
|
||||
allowedKeys: refs,
|
||||
allowedCatalog: [],
|
||||
allowAny: Object.keys(modelConfig).length === 0,
|
||||
};
|
||||
}),
|
||||
isCliProvider: vi.fn(() => false),
|
||||
modelKey,
|
||||
normalizeModelRef,
|
||||
parseModelRef,
|
||||
resolveConfiguredModelRef: vi.fn(
|
||||
({ cfg }: { cfg?: ConfigWithModels; defaultProvider?: string; defaultModel?: string }) =>
|
||||
resolveDefaultRef(cfg),
|
||||
),
|
||||
resolveDefaultModelForAgent: vi.fn(({ cfg }: { cfg?: ConfigWithModels }) =>
|
||||
resolveDefaultRef(cfg),
|
||||
),
|
||||
resolveThinkingDefault: vi.fn(
|
||||
({
|
||||
cfg,
|
||||
provider,
|
||||
model,
|
||||
catalog,
|
||||
}: {
|
||||
cfg?: ConfigWithModels;
|
||||
provider: string;
|
||||
model: string;
|
||||
catalog?: CatalogEntry[];
|
||||
}) => {
|
||||
const ref = normalizeModelRef(provider, model);
|
||||
const modelThinking = resolveModelConfig(cfg, ref)?.params?.thinking;
|
||||
if (modelThinking) {
|
||||
return modelThinking;
|
||||
}
|
||||
const defaultThinking = cfg?.agents?.defaults?.thinkingDefault;
|
||||
if (defaultThinking) {
|
||||
return defaultThinking;
|
||||
}
|
||||
const entry = catalog?.find((item) => item.id === model || item.model === model);
|
||||
if (entry?.reasoning && entry.name?.includes("4.6")) {
|
||||
return "adaptive";
|
||||
}
|
||||
return entry?.reasoning ? "low" : "off";
|
||||
},
|
||||
),
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock("../agents/subagent-announce.js", () => ({
|
||||
runSubagentAnnounceFlow: vi.fn(),
|
||||
}));
|
||||
|
||||
vi.mock("../gateway/call.js", () => ({
|
||||
callGateway: vi.fn(),
|
||||
}));
|
||||
|
||||
vi.mock("../agents/workspace.js", () => ({
|
||||
DEFAULT_AGENT_WORKSPACE_DIR: "/tmp/openclaw-workspace",
|
||||
DEFAULT_AGENTS_FILENAME: "AGENTS.md",
|
||||
@@ -34,3 +185,29 @@ vi.mock("../agents/skills.js", () => ({
|
||||
vi.mock("../agents/skills/refresh.js", () => ({
|
||||
getSkillsSnapshotVersion: vi.fn(() => 0),
|
||||
}));
|
||||
|
||||
vi.mock("../agents/skills/refresh-state.js", () => ({
|
||||
getSkillsSnapshotVersion: vi.fn(() => 0),
|
||||
shouldRefreshSnapshotForVersion: vi.fn(() => false),
|
||||
}));
|
||||
|
||||
vi.mock("../agents/skills/filter.js", () => ({
|
||||
normalizeSkillFilter: vi.fn((skillFilter?: ReadonlyArray<unknown>) =>
|
||||
skillFilter?.map((entry) => String(entry).trim()).filter(Boolean),
|
||||
),
|
||||
normalizeSkillFilterForComparison: vi.fn((skillFilter?: ReadonlyArray<unknown>) =>
|
||||
skillFilter
|
||||
?.map((entry) => String(entry).trim())
|
||||
.filter(Boolean)
|
||||
.toSorted(),
|
||||
),
|
||||
matchesSkillFilter: vi.fn(() => true),
|
||||
}));
|
||||
|
||||
vi.mock("../agents/exec-defaults.js", () => ({
|
||||
canExecRequestNode: vi.fn(() => false),
|
||||
}));
|
||||
|
||||
vi.mock("../infra/skills-remote.js", () => ({
|
||||
getRemoteSkillEligibility: vi.fn(() => undefined),
|
||||
}));
|
||||
|
||||
@@ -2,16 +2,112 @@ import fs from "node:fs";
|
||||
import path from "node:path";
|
||||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.js";
|
||||
import "./agent-command.test-mocks.js";
|
||||
import * as acpManagerModule from "../acp/control-plane/manager.js";
|
||||
import { AcpRuntimeError } from "../acp/runtime/errors.js";
|
||||
import * as embeddedModule from "../agents/pi-embedded.js";
|
||||
import * as configIoModule from "../config/io.js";
|
||||
import type { OpenClawConfig } from "../config/types.openclaw.js";
|
||||
import { readSessionMessages } from "../gateway/session-utils.fs.js";
|
||||
import { onAgentEvent } from "../infra/agent-events.js";
|
||||
import type { RuntimeEnv } from "../runtime.js";
|
||||
import { agentCommand } from "./agent.js";
|
||||
|
||||
const agentEventMocks = vi.hoisted(() => {
|
||||
type AgentEvent = { stream: string; data?: Record<string, unknown>; runId?: string };
|
||||
const handlers = new Set<(event: AgentEvent) => void>();
|
||||
return {
|
||||
clearAgentRunContext: vi.fn(),
|
||||
emitAgentEvent: vi.fn((event: AgentEvent) => {
|
||||
for (const handler of handlers) {
|
||||
handler(event);
|
||||
}
|
||||
}),
|
||||
onAgentEvent: vi.fn((handler: (event: AgentEvent) => void) => {
|
||||
handlers.add(handler);
|
||||
return () => handlers.delete(handler);
|
||||
}),
|
||||
registerAgentRunContext: vi.fn(),
|
||||
};
|
||||
});
|
||||
|
||||
const attemptExecutionMocks = vi.hoisted(() => ({
|
||||
emitAcpLifecycleStart: vi.fn(),
|
||||
emitAcpLifecycleEnd: vi.fn(),
|
||||
emitAcpLifecycleError: vi.fn(),
|
||||
persistAcpTurnTranscript: vi.fn(
|
||||
async ({ sessionEntry }: { sessionEntry?: unknown }) => sessionEntry,
|
||||
),
|
||||
}));
|
||||
|
||||
vi.mock("../infra/agent-events.js", () => agentEventMocks);
|
||||
|
||||
vi.mock("../agents/command/delivery.runtime.js", () => ({
|
||||
deliverAgentCommandResult: vi.fn(
|
||||
async (params: { runtime: RuntimeEnv; payloads?: Array<{ text?: string }> }) => {
|
||||
for (const payload of params.payloads ?? []) {
|
||||
if (payload.text) {
|
||||
params.runtime.log(payload.text);
|
||||
}
|
||||
}
|
||||
},
|
||||
),
|
||||
}));
|
||||
|
||||
vi.mock("../agents/command/attempt-execution.runtime.js", () => {
|
||||
const createAcpVisibleTextAccumulator = () => {
|
||||
let text = "";
|
||||
return {
|
||||
consume(chunk: string) {
|
||||
if (!chunk || chunk === "NO_REPLY") {
|
||||
return null;
|
||||
}
|
||||
text += chunk;
|
||||
return { text, delta: chunk };
|
||||
},
|
||||
finalize: () => text.trim(),
|
||||
finalizeRaw: () => text,
|
||||
};
|
||||
};
|
||||
|
||||
return {
|
||||
createAcpVisibleTextAccumulator,
|
||||
emitAcpLifecycleStart: attemptExecutionMocks.emitAcpLifecycleStart,
|
||||
emitAcpLifecycleEnd: attemptExecutionMocks.emitAcpLifecycleEnd,
|
||||
emitAcpLifecycleError: attemptExecutionMocks.emitAcpLifecycleError,
|
||||
emitAcpAssistantDelta: ({
|
||||
runId,
|
||||
text,
|
||||
delta,
|
||||
}: {
|
||||
runId: string;
|
||||
text: string;
|
||||
delta: string;
|
||||
}) =>
|
||||
agentEventMocks.emitAgentEvent({
|
||||
runId,
|
||||
stream: "assistant",
|
||||
data: { text, delta },
|
||||
}),
|
||||
buildAcpResult: ({
|
||||
payloadText,
|
||||
startedAt,
|
||||
stopReason,
|
||||
abortSignal,
|
||||
}: {
|
||||
payloadText: string;
|
||||
startedAt: number;
|
||||
stopReason?: string;
|
||||
abortSignal?: AbortSignal;
|
||||
}) => ({
|
||||
payloads: payloadText ? [{ text: payloadText }] : [],
|
||||
meta: {
|
||||
durationMs: Date.now() - startedAt,
|
||||
aborted: abortSignal?.aborted === true,
|
||||
stopReason,
|
||||
},
|
||||
}),
|
||||
persistAcpTurnTranscript: attemptExecutionMocks.persistAcpTurnTranscript,
|
||||
};
|
||||
});
|
||||
|
||||
const loadConfigSpy = vi.spyOn(configIoModule, "loadConfig");
|
||||
const runEmbeddedPiAgentSpy = vi.spyOn(embeddedModule, "runEmbeddedPiAgent");
|
||||
const getAcpSessionManagerSpy = vi.spyOn(acpManagerModule, "getAcpSessionManager");
|
||||
@@ -69,24 +165,20 @@ function writeAcpSessionStore(storePath: string, agent = "codex") {
|
||||
fs.mkdirSync(path.dirname(storePath), { recursive: true });
|
||||
fs.writeFileSync(
|
||||
storePath,
|
||||
JSON.stringify(
|
||||
{
|
||||
[sessionKey]: {
|
||||
sessionId: "acp-session-1",
|
||||
updatedAt: Date.now(),
|
||||
acp: {
|
||||
backend: "acpx",
|
||||
agent,
|
||||
runtimeSessionName: sessionKey,
|
||||
mode: "oneshot",
|
||||
state: "idle",
|
||||
lastActivityAt: Date.now(),
|
||||
},
|
||||
JSON.stringify({
|
||||
[sessionKey]: {
|
||||
sessionId: "acp-session-1",
|
||||
updatedAt: Date.now(),
|
||||
acp: {
|
||||
backend: "acpx",
|
||||
agent,
|
||||
runtimeSessionName: sessionKey,
|
||||
mode: "oneshot",
|
||||
state: "idle",
|
||||
lastActivityAt: Date.now(),
|
||||
},
|
||||
},
|
||||
null,
|
||||
2,
|
||||
),
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
@@ -159,7 +251,7 @@ function createRunTurnFromTextDeltas(chunks: string[]) {
|
||||
|
||||
function subscribeAssistantEvents() {
|
||||
const assistantEvents: Array<{ text?: string; delta?: string }> = [];
|
||||
const stop = onAgentEvent((evt) => {
|
||||
const stop = agentEventMocks.onAgentEvent((evt) => {
|
||||
if (evt.stream !== "assistant") {
|
||||
return;
|
||||
}
|
||||
@@ -180,6 +272,7 @@ async function runAcpTurnWithAssistantEvents(chunks: string[]) {
|
||||
});
|
||||
|
||||
try {
|
||||
vi.mocked(runtime.log).mockClear();
|
||||
await agentCommand({ message: "ping", sessionKey: "agent:codex:acp:test" }, runtime);
|
||||
} finally {
|
||||
stop();
|
||||
@@ -204,26 +297,13 @@ async function runAcpTurnWithTextDeltas(params: { message?: string; chunks: stri
|
||||
return { runTurn };
|
||||
}
|
||||
|
||||
function expectPersistedAcpTranscript(params: {
|
||||
storePath: string;
|
||||
userContent: string;
|
||||
assistantText: string;
|
||||
}) {
|
||||
const persistedStore = JSON.parse(fs.readFileSync(params.storePath, "utf-8")) as Record<
|
||||
string,
|
||||
{ sessionFile?: string }
|
||||
>;
|
||||
const sessionFile = persistedStore["agent:codex:acp:test"]?.sessionFile;
|
||||
const messages = readSessionMessages("acp-session-1", params.storePath, sessionFile);
|
||||
expect(messages).toHaveLength(2);
|
||||
expect(messages[0]).toMatchObject({
|
||||
role: "user",
|
||||
content: params.userContent,
|
||||
});
|
||||
expect(messages[1]).toMatchObject({
|
||||
role: "assistant",
|
||||
content: [{ type: "text", text: params.assistantText }],
|
||||
});
|
||||
function expectPersistedAcpTranscript(params: { userContent: string; assistantText: string }) {
|
||||
expect(attemptExecutionMocks.persistAcpTurnTranscript).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
body: params.userContent,
|
||||
finalText: params.assistantText,
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
async function runAcpSessionWithPolicyOverrides(params: {
|
||||
@@ -262,14 +342,16 @@ describe("agentCommand ACP runtime routing", () => {
|
||||
} as never);
|
||||
});
|
||||
|
||||
it("routes ACP sessions through AcpSessionManager instead of embedded agent", async () => {
|
||||
await withAcpSessionEnv(async () => {
|
||||
const { runTurn } = await runAcpTurnWithTextDeltas({ chunks: ["ACP_", "OK"] });
|
||||
|
||||
it("routes ACP sessions and preserves exact transcript text", async () => {
|
||||
await withAcpSessionEnvInfo(async () => {
|
||||
const { runTurn } = await runAcpTurnWithTextDeltas({
|
||||
message: " ping\n",
|
||||
chunks: [" ACP_OK\n"],
|
||||
});
|
||||
expect(runTurn).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
sessionKey: "agent:codex:acp:test",
|
||||
text: "ping",
|
||||
text: " ping\n",
|
||||
mode: "prompt",
|
||||
}),
|
||||
);
|
||||
@@ -278,89 +360,22 @@ describe("agentCommand ACP runtime routing", () => {
|
||||
.mocked(runtime.log)
|
||||
.mock.calls.some(([first]) => typeof first === "string" && first.includes("ACP_OK"));
|
||||
expect(hasAckLog).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
it("persists ACP child session history to the transcript store", async () => {
|
||||
await withAcpSessionEnvInfo(async ({ storePath }) => {
|
||||
await runAcpTurnWithTextDeltas({ chunks: ["ACP_", "OK"] });
|
||||
expectPersistedAcpTranscript({
|
||||
storePath,
|
||||
userContent: "ping",
|
||||
assistantText: "ACP_OK",
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it("preserves exact ACP transcript text without trimming whitespace", async () => {
|
||||
await withAcpSessionEnvInfo(async ({ storePath }) => {
|
||||
await runAcpTurnWithTextDeltas({
|
||||
message: " ping\n",
|
||||
chunks: [" ACP_OK\n"],
|
||||
});
|
||||
expectPersistedAcpTranscript({
|
||||
storePath,
|
||||
userContent: " ping\n",
|
||||
assistantText: " ACP_OK\n",
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it("suppresses ACP NO_REPLY lead fragments before emitting assistant text", async () => {
|
||||
it("streams ACP visible text deltas", async () => {
|
||||
await withAcpSessionEnv(async () => {
|
||||
const { assistantEvents, logLines } = await runAcpTurnWithAssistantEvents([
|
||||
"NO",
|
||||
"NO_",
|
||||
"NO_RE",
|
||||
"NO_REPLY",
|
||||
"Actual answer",
|
||||
const repeated = await runAcpTurnWithAssistantEvents(["bo", "ok"]);
|
||||
|
||||
expect(repeated.assistantEvents).toEqual([
|
||||
{ text: "bo", delta: "bo" },
|
||||
{ text: "book", delta: "ok" },
|
||||
]);
|
||||
|
||||
expect(assistantEvents).toEqual([{ text: "Actual answer", delta: "Actual answer" }]);
|
||||
expect(logLines.some((line) => line.includes("NO_REPLY"))).toBe(false);
|
||||
expect(logLines.some((line) => line.includes("Actual answer"))).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
it("keeps silent-only ACP turns out of assistant output", async () => {
|
||||
await withAcpSessionEnv(async () => {
|
||||
const { assistantEvents, logLines } = await runAcpTurnWithAssistantEvents([
|
||||
"NO",
|
||||
"NO_",
|
||||
"NO_RE",
|
||||
"NO_REPLY",
|
||||
]);
|
||||
expect(assistantEvents.map((event) => event.text).filter(Boolean)).toEqual([]);
|
||||
expect(logLines.some((line) => line.includes("NO_REPLY"))).toBe(false);
|
||||
expect(logLines.some((line) => line.includes("No reply from agent."))).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
it("preserves repeated identical ACP delta chunks", async () => {
|
||||
await withAcpSessionEnv(async () => {
|
||||
const { assistantEvents, logLines } = await runAcpTurnWithAssistantEvents([
|
||||
"b",
|
||||
"o",
|
||||
"o",
|
||||
"k",
|
||||
]);
|
||||
|
||||
expect(assistantEvents).toEqual([
|
||||
{ text: "b", delta: "b" },
|
||||
{ text: "bo", delta: "o" },
|
||||
{ text: "boo", delta: "o" },
|
||||
{ text: "book", delta: "k" },
|
||||
]);
|
||||
expect(logLines.some((line) => line.includes("book"))).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
it("re-emits buffered NO prefix when ACP text becomes visible content", async () => {
|
||||
await withAcpSessionEnv(async () => {
|
||||
const { assistantEvents, logLines } = await runAcpTurnWithAssistantEvents(["NO", "W"]);
|
||||
|
||||
expect(assistantEvents).toEqual([{ text: "NOW", delta: "NOW" }]);
|
||||
expect(logLines.some((line) => line.includes("NOW"))).toBe(true);
|
||||
expect(repeated.logLines.some((line) => line.includes("book"))).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -370,16 +385,12 @@ describe("agentCommand ACP runtime routing", () => {
|
||||
fs.mkdirSync(path.dirname(storePath), { recursive: true });
|
||||
fs.writeFileSync(
|
||||
storePath,
|
||||
JSON.stringify(
|
||||
{
|
||||
"agent:codex:acp:stale": {
|
||||
sessionId: "stale-1",
|
||||
updatedAt: Date.now(),
|
||||
},
|
||||
JSON.stringify({
|
||||
"agent:codex:acp:stale": {
|
||||
sessionId: "stale-1",
|
||||
updatedAt: Date.now(),
|
||||
},
|
||||
null,
|
||||
2,
|
||||
),
|
||||
}),
|
||||
);
|
||||
mockConfig(home, storePath);
|
||||
|
||||
@@ -390,10 +401,9 @@ describe("agentCommand ACP runtime routing", () => {
|
||||
return {
|
||||
kind: "stale",
|
||||
sessionKey,
|
||||
error: new AcpRuntimeError(
|
||||
"ACP_SESSION_INIT_FAILED",
|
||||
`ACP metadata is missing for session ${sessionKey}.`,
|
||||
),
|
||||
error: Object.assign(new Error(`ACP metadata is missing for session ${sessionKey}.`), {
|
||||
code: "ACP_SESSION_INIT_FAILED",
|
||||
}),
|
||||
};
|
||||
},
|
||||
});
|
||||
@@ -409,19 +419,13 @@ describe("agentCommand ACP runtime routing", () => {
|
||||
});
|
||||
});
|
||||
|
||||
it.each([
|
||||
{
|
||||
name: "blocks ACP turns when ACP is disabled by policy",
|
||||
acpOverrides: { enabled: false } satisfies Partial<NonNullable<OpenClawConfig["acp"]>>,
|
||||
},
|
||||
{
|
||||
name: "blocks ACP turns when ACP dispatch is disabled by policy",
|
||||
acpOverrides: {
|
||||
dispatch: { enabled: false },
|
||||
} satisfies Partial<NonNullable<OpenClawConfig["acp"]>>,
|
||||
},
|
||||
])("$name", async ({ acpOverrides }) => {
|
||||
await runAcpSessionWithPolicyOverrides({ acpOverrides });
|
||||
it("blocks ACP turns when disabled by policy", async () => {
|
||||
for (const acpOverrides of [
|
||||
{ enabled: false },
|
||||
{ dispatch: { enabled: false } },
|
||||
] satisfies Array<Partial<NonNullable<OpenClawConfig["acp"]>>>) {
|
||||
await runAcpSessionWithPolicyOverrides({ acpOverrides });
|
||||
}
|
||||
});
|
||||
|
||||
it("blocks ACP turns when ACP agent is disallowed by policy", async () => {
|
||||
|
||||
@@ -9,7 +9,10 @@ import type { OpenClawConfig } from "../config/types.openclaw.js";
|
||||
import { buildOutboundSessionContext } from "../infra/outbound/session-context.js";
|
||||
|
||||
async function withTempHome<T>(fn: (home: string) => Promise<T>): Promise<T> {
|
||||
return withTempHomeBase(fn, { prefix: "openclaw-agent-session-" });
|
||||
return withTempHomeBase(fn, {
|
||||
prefix: "openclaw-agent-session-",
|
||||
skipSessionCleanup: true,
|
||||
});
|
||||
}
|
||||
|
||||
function mockConfig(
|
||||
@@ -35,7 +38,7 @@ function writeSessionStoreSeed(
|
||||
sessions: Record<string, Record<string, unknown>>,
|
||||
) {
|
||||
fs.mkdirSync(path.dirname(storePath), { recursive: true });
|
||||
fs.writeFileSync(storePath, JSON.stringify(sessions, null, 2));
|
||||
fs.writeFileSync(storePath, JSON.stringify(sessions));
|
||||
}
|
||||
|
||||
async function withCrossAgentResumeFixture(
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,194 +0,0 @@
|
||||
import { randomUUID } from "node:crypto";
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { updateSessionStoreAfterAgentRun } from "../../agents/command/session-store.js";
|
||||
import { resolveSession } from "../../agents/command/session.js";
|
||||
import type { SessionEntry } from "../../config/sessions.js";
|
||||
import { loadSessionStore } from "../../config/sessions.js";
|
||||
|
||||
function acpMeta() {
|
||||
return {
|
||||
backend: "acpx",
|
||||
agent: "codex",
|
||||
runtimeSessionName: "runtime-1",
|
||||
mode: "persistent" as const,
|
||||
state: "idle" as const,
|
||||
lastActivityAt: Date.now(),
|
||||
};
|
||||
}
|
||||
|
||||
describe("updateSessionStoreAfterAgentRun", () => {
|
||||
it("preserves ACP metadata when caller has a stale session snapshot", async () => {
|
||||
const dir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-session-store-"));
|
||||
const storePath = path.join(dir, "sessions.json");
|
||||
const sessionKey = `agent:codex:acp:${randomUUID()}`;
|
||||
const sessionId = randomUUID();
|
||||
|
||||
const existing: SessionEntry = {
|
||||
sessionId,
|
||||
updatedAt: Date.now(),
|
||||
acp: acpMeta(),
|
||||
};
|
||||
await fs.writeFile(storePath, JSON.stringify({ [sessionKey]: existing }, null, 2), "utf8");
|
||||
|
||||
const staleInMemory: Record<string, SessionEntry> = {
|
||||
[sessionKey]: {
|
||||
sessionId,
|
||||
updatedAt: Date.now(),
|
||||
},
|
||||
};
|
||||
|
||||
await updateSessionStoreAfterAgentRun({
|
||||
cfg: {} as never,
|
||||
sessionId,
|
||||
sessionKey,
|
||||
storePath,
|
||||
sessionStore: staleInMemory,
|
||||
defaultProvider: "openai",
|
||||
defaultModel: "gpt-5.4",
|
||||
result: {
|
||||
payloads: [],
|
||||
meta: {
|
||||
aborted: false,
|
||||
agentMeta: {
|
||||
provider: "openai",
|
||||
model: "gpt-5.4",
|
||||
},
|
||||
},
|
||||
} as never,
|
||||
});
|
||||
|
||||
const persisted = loadSessionStore(storePath, { skipCache: true })[sessionKey];
|
||||
expect(persisted?.acp).toBeDefined();
|
||||
expect(staleInMemory[sessionKey]?.acp).toBeDefined();
|
||||
});
|
||||
|
||||
it("persists latest systemPromptReport for downstream warning dedupe", async () => {
|
||||
const dir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-session-store-"));
|
||||
const storePath = path.join(dir, "sessions.json");
|
||||
const sessionKey = `agent:codex:report:${randomUUID()}`;
|
||||
const sessionId = randomUUID();
|
||||
|
||||
const sessionStore: Record<string, SessionEntry> = {
|
||||
[sessionKey]: {
|
||||
sessionId,
|
||||
updatedAt: Date.now(),
|
||||
},
|
||||
};
|
||||
await fs.writeFile(storePath, JSON.stringify(sessionStore, null, 2), "utf8");
|
||||
|
||||
const report = {
|
||||
source: "run" as const,
|
||||
generatedAt: Date.now(),
|
||||
bootstrapTruncation: {
|
||||
warningMode: "once" as const,
|
||||
warningSignaturesSeen: ["sig-a", "sig-b"],
|
||||
},
|
||||
systemPrompt: {
|
||||
chars: 1,
|
||||
projectContextChars: 1,
|
||||
nonProjectContextChars: 0,
|
||||
},
|
||||
injectedWorkspaceFiles: [],
|
||||
skills: { promptChars: 0, entries: [] },
|
||||
tools: { listChars: 0, schemaChars: 0, entries: [] },
|
||||
};
|
||||
|
||||
await updateSessionStoreAfterAgentRun({
|
||||
cfg: {} as never,
|
||||
sessionId,
|
||||
sessionKey,
|
||||
storePath,
|
||||
sessionStore,
|
||||
defaultProvider: "openai",
|
||||
defaultModel: "gpt-5.4",
|
||||
result: {
|
||||
payloads: [],
|
||||
meta: {
|
||||
agentMeta: {
|
||||
provider: "openai",
|
||||
model: "gpt-5.4",
|
||||
},
|
||||
systemPromptReport: report,
|
||||
},
|
||||
} as never,
|
||||
});
|
||||
|
||||
const persisted = loadSessionStore(storePath, { skipCache: true })[sessionKey];
|
||||
expect(persisted?.systemPromptReport?.bootstrapTruncation?.warningSignaturesSeen).toEqual([
|
||||
"sig-a",
|
||||
"sig-b",
|
||||
]);
|
||||
expect(sessionStore[sessionKey]?.systemPromptReport?.bootstrapTruncation?.warningMode).toBe(
|
||||
"once",
|
||||
);
|
||||
});
|
||||
|
||||
it("stores and reloads the runtime model for explicit session-id-only runs", async () => {
|
||||
const dir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-session-store-"));
|
||||
const storePath = path.join(dir, "sessions.json");
|
||||
const cfg = {
|
||||
session: {
|
||||
store: storePath,
|
||||
mainKey: "main",
|
||||
},
|
||||
agents: {
|
||||
defaults: {
|
||||
cliBackends: {
|
||||
"claude-cli": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
} as never;
|
||||
|
||||
const first = resolveSession({
|
||||
cfg,
|
||||
sessionId: "explicit-session-123",
|
||||
});
|
||||
|
||||
expect(first.sessionKey).toBe("agent:main:explicit:explicit-session-123");
|
||||
|
||||
await updateSessionStoreAfterAgentRun({
|
||||
cfg,
|
||||
sessionId: first.sessionId,
|
||||
sessionKey: first.sessionKey!,
|
||||
storePath: first.storePath,
|
||||
sessionStore: first.sessionStore!,
|
||||
defaultProvider: "claude-cli",
|
||||
defaultModel: "claude-sonnet-4-6",
|
||||
result: {
|
||||
payloads: [],
|
||||
meta: {
|
||||
agentMeta: {
|
||||
provider: "claude-cli",
|
||||
model: "claude-sonnet-4-6",
|
||||
sessionId: "claude-cli-session-1",
|
||||
cliSessionBinding: {
|
||||
sessionId: "claude-cli-session-1",
|
||||
authEpoch: "auth-epoch-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
} as never,
|
||||
});
|
||||
|
||||
const second = resolveSession({
|
||||
cfg,
|
||||
sessionId: "explicit-session-123",
|
||||
});
|
||||
|
||||
expect(second.sessionKey).toBe(first.sessionKey);
|
||||
expect(second.sessionEntry?.cliSessionBindings?.["claude-cli"]).toEqual({
|
||||
sessionId: "claude-cli-session-1",
|
||||
authEpoch: "auth-epoch-1",
|
||||
});
|
||||
|
||||
const persisted = loadSessionStore(storePath, { skipCache: true })[first.sessionKey!];
|
||||
expect(persisted?.cliSessionBindings?.["claude-cli"]).toEqual({
|
||||
sessionId: "claude-cli-session-1",
|
||||
authEpoch: "auth-epoch-1",
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,5 +1,5 @@
|
||||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { createBindingResolverTestPlugin } from "../test-utils/channel-plugins.js";
|
||||
import { beforeAll, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import type { ChannelId, ChannelPlugin } from "../channels/plugins/types.public.js";
|
||||
import {
|
||||
loadFreshAgentsBindCommandModuleForTest,
|
||||
readConfigFileSnapshotMock,
|
||||
@@ -9,10 +9,56 @@ import {
|
||||
} from "./agents.bind.test-support.js";
|
||||
import { baseConfigSnapshot } from "./test-runtime-config-helpers.js";
|
||||
|
||||
vi.mock("../channels/plugins/index.js", async () => {
|
||||
const actual = await vi.importActual<typeof import("../channels/plugins/index.js")>(
|
||||
"../channels/plugins/index.js",
|
||||
);
|
||||
vi.mock("../agents/agent-scope.js", () => ({
|
||||
listAgentEntries: (
|
||||
cfg: {
|
||||
agents?: { list?: Array<{ id: string; default?: boolean }> };
|
||||
} | null,
|
||||
) => cfg?.agents?.list ?? [],
|
||||
resolveDefaultAgentId: (
|
||||
cfg: {
|
||||
agents?: { list?: Array<{ id: string; default?: boolean }> };
|
||||
} | null,
|
||||
) => cfg?.agents?.list?.find((agent) => agent.default)?.id ?? "main",
|
||||
}));
|
||||
|
||||
vi.mock("../config/bindings.js", () => ({
|
||||
isRouteBinding: (binding: { match?: unknown }) => Boolean(binding.match),
|
||||
listRouteBindings: (cfg: { bindings?: Array<{ match?: unknown }> }) =>
|
||||
(cfg.bindings ?? []).filter((binding) => Boolean(binding.match)),
|
||||
}));
|
||||
|
||||
type BindingResolverTestPlugin = Pick<ChannelPlugin, "id" | "meta" | "capabilities" | "config"> & {
|
||||
setup?: Pick<NonNullable<ChannelPlugin["setup"]>, "resolveBindingAccountId">;
|
||||
};
|
||||
|
||||
function createBindingResolverTestPlugin(params: {
|
||||
id: ChannelId;
|
||||
config: Partial<ChannelPlugin["config"]>;
|
||||
resolveBindingAccountId?: NonNullable<ChannelPlugin["setup"]>["resolveBindingAccountId"];
|
||||
}): BindingResolverTestPlugin {
|
||||
return {
|
||||
id: params.id,
|
||||
meta: {
|
||||
id: params.id,
|
||||
label: params.id,
|
||||
selectionLabel: params.id,
|
||||
docsPath: `/channels/${params.id}`,
|
||||
blurb: "test stub.",
|
||||
},
|
||||
capabilities: { chatTypes: ["direct"] },
|
||||
config: {
|
||||
listAccountIds: () => ["default"],
|
||||
resolveAccount: () => ({}),
|
||||
...params.config,
|
||||
},
|
||||
...(params.resolveBindingAccountId
|
||||
? { setup: { resolveBindingAccountId: params.resolveBindingAccountId } }
|
||||
: {}),
|
||||
};
|
||||
}
|
||||
|
||||
vi.mock("../channels/plugins/index.js", () => {
|
||||
const knownChannels = new Map([
|
||||
[
|
||||
"discord",
|
||||
@@ -32,21 +78,16 @@ vi.mock("../channels/plugins/index.js", async () => {
|
||||
],
|
||||
]);
|
||||
return {
|
||||
...actual,
|
||||
getChannelPlugin: (channel: string) => {
|
||||
const normalized = channel.trim().toLowerCase();
|
||||
const plugin = knownChannels.get(normalized);
|
||||
if (plugin) {
|
||||
return plugin;
|
||||
}
|
||||
return actual.getChannelPlugin(channel);
|
||||
return knownChannels.get(normalized);
|
||||
},
|
||||
normalizeChannelId: (channel: string) => {
|
||||
const normalized = channel.trim().toLowerCase();
|
||||
if (knownChannels.has(normalized)) {
|
||||
return normalized;
|
||||
}
|
||||
return actual.normalizeChannelId(channel);
|
||||
return undefined;
|
||||
},
|
||||
};
|
||||
});
|
||||
@@ -56,9 +97,12 @@ let agentsBindingsCommand: typeof import("./agents.commands.bind.js").agentsBind
|
||||
let agentsUnbindCommand: typeof import("./agents.commands.bind.js").agentsUnbindCommand;
|
||||
|
||||
describe("agents bind/unbind commands", () => {
|
||||
beforeEach(async () => {
|
||||
beforeAll(async () => {
|
||||
({ agentsBindCommand, agentsBindingsCommand, agentsUnbindCommand } =
|
||||
await loadFreshAgentsBindCommandModuleForTest());
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
resetAgentsBindTestHarness();
|
||||
});
|
||||
|
||||
@@ -97,47 +141,6 @@ describe("agents bind/unbind commands", () => {
|
||||
expect(runtime.exit).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("defaults matrix accountId to the target agent id when omitted", async () => {
|
||||
readConfigFileSnapshotMock.mockResolvedValue({
|
||||
...baseConfigSnapshot,
|
||||
config: {},
|
||||
});
|
||||
|
||||
await agentsBindCommand({ agent: "main", bind: ["matrix"] }, runtime);
|
||||
|
||||
expect(writeConfigFileMock).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
bindings: [
|
||||
{
|
||||
type: "route",
|
||||
agentId: "main",
|
||||
match: { channel: "matrix", accountId: "main" },
|
||||
},
|
||||
],
|
||||
}),
|
||||
);
|
||||
expect(runtime.exit).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("upgrades existing channel-only binding when accountId is later provided", async () => {
|
||||
readConfigFileSnapshotMock.mockResolvedValue({
|
||||
...baseConfigSnapshot,
|
||||
config: {
|
||||
bindings: [{ agentId: "main", match: { channel: "telegram" } }],
|
||||
},
|
||||
});
|
||||
|
||||
await agentsBindCommand({ bind: ["telegram:work"] }, runtime);
|
||||
|
||||
expect(writeConfigFileMock).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
bindings: [{ agentId: "main", match: { channel: "telegram", accountId: "work" } }],
|
||||
}),
|
||||
);
|
||||
expect(runtime.log).toHaveBeenCalledWith("Updated bindings:");
|
||||
expect(runtime.exit).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("unbinds all routes for an agent", async () => {
|
||||
readConfigFileSnapshotMock.mockResolvedValue({
|
||||
...baseConfigSnapshot,
|
||||
@@ -175,47 +178,4 @@ describe("agents bind/unbind commands", () => {
|
||||
expect(runtime.error).toHaveBeenCalledWith("Bindings are owned by another agent:");
|
||||
expect(runtime.exit).toHaveBeenCalledWith(1);
|
||||
});
|
||||
|
||||
it("keeps role-based bindings when removing channel-level discord binding", async () => {
|
||||
readConfigFileSnapshotMock.mockResolvedValue({
|
||||
...baseConfigSnapshot,
|
||||
config: {
|
||||
bindings: [
|
||||
{
|
||||
agentId: "main",
|
||||
match: {
|
||||
channel: "discord",
|
||||
accountId: "guild-a",
|
||||
roles: ["111", "222"],
|
||||
},
|
||||
},
|
||||
{
|
||||
agentId: "main",
|
||||
match: {
|
||||
channel: "discord",
|
||||
accountId: "guild-a",
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
});
|
||||
|
||||
await agentsUnbindCommand({ bind: ["discord:guild-a"] }, runtime);
|
||||
|
||||
expect(writeConfigFileMock).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
bindings: [
|
||||
{
|
||||
agentId: "main",
|
||||
match: {
|
||||
channel: "discord",
|
||||
accountId: "guild-a",
|
||||
roles: ["111", "222"],
|
||||
},
|
||||
},
|
||||
],
|
||||
}),
|
||||
);
|
||||
expect(runtime.exit).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import type { Mock } from "vitest";
|
||||
import { vi } from "vitest";
|
||||
import type { OpenClawConfig } from "../config/types.openclaw.js";
|
||||
import { mergeMockedModule } from "../test-utils/vitest-module-mocks.js";
|
||||
import { createTestRuntime } from "./test-runtime-config-helpers.js";
|
||||
|
||||
type ReplaceConfigFileResult = Awaited<
|
||||
@@ -24,17 +23,22 @@ export const replaceConfigFileMock: Mock<(...args: unknown[]) => Promise<unknown
|
||||
},
|
||||
) as Mock<(...args: unknown[]) => Promise<unknown>>;
|
||||
|
||||
vi.mock("../config/config.js", async () => {
|
||||
const actual = await vi.importActual<typeof import("../config/config.js")>("../config/config.js");
|
||||
return await mergeMockedModule(actual, () => ({
|
||||
readConfigFileSnapshot: (...args: Parameters<typeof actual.readConfigFileSnapshot>) =>
|
||||
readConfigFileSnapshotMock(...args) as ReturnType<typeof actual.readConfigFileSnapshot>,
|
||||
writeConfigFile: (...args: Parameters<typeof actual.writeConfigFile>) =>
|
||||
writeConfigFileMock(...args) as ReturnType<typeof actual.writeConfigFile>,
|
||||
replaceConfigFile: (...args: Parameters<typeof actual.replaceConfigFile>) =>
|
||||
replaceConfigFileMock(...args) as ReturnType<typeof actual.replaceConfigFile>,
|
||||
}));
|
||||
});
|
||||
vi.mock("../config/config.js", () => ({
|
||||
readConfigFileSnapshot: (...args: unknown[]) => readConfigFileSnapshotMock(...args),
|
||||
writeConfigFile: (...args: unknown[]) => writeConfigFileMock(...args),
|
||||
replaceConfigFile: (...args: unknown[]) => replaceConfigFileMock(...args),
|
||||
}));
|
||||
|
||||
vi.mock("./agents.command-shared.js", () => ({
|
||||
createQuietRuntime: <T>(runtime: T) => runtime,
|
||||
requireValidConfig: async () => {
|
||||
const snapshot = (await readConfigFileSnapshotMock()) as
|
||||
| { config?: OpenClawConfig; sourceConfig?: OpenClawConfig }
|
||||
| undefined;
|
||||
return snapshot?.sourceConfig ?? snapshot?.config ?? null;
|
||||
},
|
||||
requireValidConfigFileSnapshot: async () => readConfigFileSnapshotMock(),
|
||||
}));
|
||||
|
||||
export const runtime = createTestRuntime();
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { resolveDefaultAgentId } from "../agents/agent-scope.js";
|
||||
import { listAgentEntries, resolveDefaultAgentId } from "../agents/agent-scope.js";
|
||||
import { isRouteBinding, listRouteBindings } from "../config/bindings.js";
|
||||
import { replaceConfigFile } from "../config/config.js";
|
||||
import { logConfigUpdated } from "../config/logging.js";
|
||||
@@ -6,14 +6,7 @@ import type { AgentRouteBinding } from "../config/types.js";
|
||||
import { normalizeAgentId } from "../routing/session-key.js";
|
||||
import { type RuntimeEnv, writeRuntimeJson } from "../runtime.js";
|
||||
import { defaultRuntime } from "../runtime.js";
|
||||
import {
|
||||
applyAgentBindings,
|
||||
describeBinding,
|
||||
parseBindingSpecs,
|
||||
removeAgentBindings,
|
||||
} from "./agents.bindings.js";
|
||||
import { requireValidConfig, requireValidConfigFileSnapshot } from "./agents.command-shared.js";
|
||||
import { buildAgentSummaries } from "./agents.config.js";
|
||||
|
||||
type AgentsBindingsListOptions = {
|
||||
agent?: string;
|
||||
@@ -33,6 +26,24 @@ type AgentsUnbindOptions = {
|
||||
json?: boolean;
|
||||
};
|
||||
|
||||
function describeBinding(binding: AgentRouteBinding): string {
|
||||
const match = binding.match;
|
||||
const parts = [match.channel];
|
||||
if (match.accountId) {
|
||||
parts.push(`accountId=${match.accountId}`);
|
||||
}
|
||||
if (match.peer) {
|
||||
parts.push(`peer=${match.peer.kind}:${match.peer.id}`);
|
||||
}
|
||||
if (match.guildId) {
|
||||
parts.push(`guild=${match.guildId}`);
|
||||
}
|
||||
if (match.teamId) {
|
||||
parts.push(`team=${match.teamId}`);
|
||||
}
|
||||
return parts.join(" ");
|
||||
}
|
||||
|
||||
function resolveAgentId(
|
||||
cfg: Awaited<ReturnType<typeof requireValidConfig>>,
|
||||
agentInput: string | undefined,
|
||||
@@ -54,7 +65,12 @@ function hasAgent(cfg: Awaited<ReturnType<typeof requireValidConfig>>, agentId:
|
||||
if (!cfg) {
|
||||
return false;
|
||||
}
|
||||
return buildAgentSummaries(cfg).some((summary) => summary.id === agentId);
|
||||
const targetAgentId = normalizeAgentId(agentId);
|
||||
const agents = listAgentEntries(cfg);
|
||||
if (agents.length === 0) {
|
||||
return targetAgentId === normalizeAgentId(resolveDefaultAgentId(cfg));
|
||||
}
|
||||
return agents.some((agent) => normalizeAgentId(agent.id) === targetAgentId);
|
||||
}
|
||||
|
||||
function formatBindingOwnerLine(binding: AgentRouteBinding): string {
|
||||
@@ -90,13 +106,16 @@ function formatBindingConflicts(
|
||||
);
|
||||
}
|
||||
|
||||
function resolveParsedBindingsOrExit(params: {
|
||||
async function resolveParsedBindingsOrExit(params: {
|
||||
runtime: RuntimeEnv;
|
||||
cfg: NonNullable<Awaited<ReturnType<typeof requireValidConfig>>>;
|
||||
agentId: string;
|
||||
bindValues: string[] | undefined;
|
||||
emptyMessage: string;
|
||||
}): ReturnType<typeof parseBindingSpecs> | null {
|
||||
}): Promise<{
|
||||
bindings: AgentRouteBinding[];
|
||||
errors: string[];
|
||||
} | null> {
|
||||
const specs = (params.bindValues ?? []).map((value) => value.trim()).filter(Boolean);
|
||||
if (specs.length === 0) {
|
||||
params.runtime.error(params.emptyMessage);
|
||||
@@ -104,6 +123,7 @@ function resolveParsedBindingsOrExit(params: {
|
||||
return null;
|
||||
}
|
||||
|
||||
const { parseBindingSpecs } = await import("./agents.bindings.js");
|
||||
const parsed = parseBindingSpecs({ agentId: params.agentId, specs, config: params.cfg });
|
||||
if (parsed.errors.length > 0) {
|
||||
params.runtime.error(parsed.errors.join("\n"));
|
||||
@@ -217,7 +237,7 @@ export async function agentsBindCommand(
|
||||
}
|
||||
const { cfg, agentId, baseHash } = resolved;
|
||||
|
||||
const parsed = resolveParsedBindingsOrExit({
|
||||
const parsed = await resolveParsedBindingsOrExit({
|
||||
runtime,
|
||||
cfg,
|
||||
agentId,
|
||||
@@ -228,6 +248,7 @@ export async function agentsBindCommand(
|
||||
return;
|
||||
}
|
||||
|
||||
const { applyAgentBindings } = await import("./agents.bindings.js");
|
||||
const result = applyAgentBindings(cfg, parsed.bindings);
|
||||
if (result.added.length > 0 || result.updated.length > 0) {
|
||||
await replaceConfigFile({
|
||||
@@ -336,7 +357,7 @@ export async function agentsUnbindCommand(
|
||||
return;
|
||||
}
|
||||
|
||||
const parsed = resolveParsedBindingsOrExit({
|
||||
const parsed = await resolveParsedBindingsOrExit({
|
||||
runtime,
|
||||
cfg,
|
||||
agentId,
|
||||
@@ -347,6 +368,7 @@ export async function agentsUnbindCommand(
|
||||
return;
|
||||
}
|
||||
|
||||
const { removeAgentBindings } = await import("./agents.bindings.js");
|
||||
const result = removeAgentBindings(cfg, parsed.bindings);
|
||||
if (result.removed.length > 0) {
|
||||
await replaceConfigFile({
|
||||
|
||||
@@ -15,6 +15,11 @@ const resolveManifestProviderAuthChoices = vi.hoisted(() =>
|
||||
const resolveProviderWizardOptions = vi.hoisted(() =>
|
||||
vi.fn<() => ProviderWizardOption[]>(() => []),
|
||||
);
|
||||
const resolveLegacyAuthChoiceAliasesForCli = vi.hoisted(() => vi.fn<() => string[]>(() => []));
|
||||
|
||||
vi.mock("./auth-choice-legacy.js", () => ({
|
||||
resolveLegacyAuthChoiceAliasesForCli,
|
||||
}));
|
||||
|
||||
function includesOnboardingScope(
|
||||
scopes: readonly ("text-inference" | "image-generation")[] | undefined,
|
||||
@@ -90,6 +95,7 @@ describe("buildAuthChoiceOptions", () => {
|
||||
beforeEach(() => {
|
||||
resolveManifestProviderAuthChoices.mockReturnValue([]);
|
||||
resolveProviderWizardOptions.mockReturnValue([]);
|
||||
resolveLegacyAuthChoiceAliasesForCli.mockReturnValue([]);
|
||||
});
|
||||
|
||||
it("includes core and provider-specific auth choices", () => {
|
||||
@@ -316,24 +322,7 @@ describe("buildAuthChoiceOptions", () => {
|
||||
});
|
||||
|
||||
it("can include legacy aliases in cli help choices", () => {
|
||||
resolveManifestProviderAuthChoices.mockReturnValue([
|
||||
{
|
||||
pluginId: "anthropic",
|
||||
providerId: "anthropic",
|
||||
methodId: "cli",
|
||||
choiceId: "anthropic-cli",
|
||||
choiceLabel: "Anthropic Claude CLI",
|
||||
deprecatedChoiceIds: ["claude-cli"],
|
||||
},
|
||||
{
|
||||
pluginId: "openai",
|
||||
providerId: "openai-codex",
|
||||
methodId: "oauth",
|
||||
choiceId: "openai-codex",
|
||||
choiceLabel: "OpenAI Codex (ChatGPT OAuth)",
|
||||
deprecatedChoiceIds: ["codex-cli"],
|
||||
},
|
||||
]);
|
||||
resolveLegacyAuthChoiceAliasesForCli.mockReturnValue(["claude-cli", "codex-cli"]);
|
||||
|
||||
const cliChoices = formatAuthChoiceChoicesForCli({
|
||||
includeLegacyAliases: true,
|
||||
@@ -377,7 +366,7 @@ describe("buildAuthChoiceOptions", () => {
|
||||
expect(cliChoices).toContain("skip");
|
||||
});
|
||||
|
||||
it("shows Chutes in grouped provider selection", () => {
|
||||
it("shows plugin and wizard providers in grouped selection", () => {
|
||||
resolveManifestProviderAuthChoices.mockReturnValue([
|
||||
{
|
||||
pluginId: "chutes",
|
||||
@@ -388,19 +377,6 @@ describe("buildAuthChoiceOptions", () => {
|
||||
groupId: "chutes",
|
||||
groupLabel: "Chutes",
|
||||
},
|
||||
]);
|
||||
const { groups } = buildAuthChoiceGroups({
|
||||
store: EMPTY_STORE,
|
||||
includeSkip: false,
|
||||
});
|
||||
const chutesGroup = groups.find((group) => group.value === "chutes");
|
||||
|
||||
expect(chutesGroup).toBeDefined();
|
||||
expect(chutesGroup?.options.some((opt) => opt.value === "chutes")).toBe(true);
|
||||
});
|
||||
|
||||
it("shows LiteLLM in grouped provider selection", () => {
|
||||
resolveManifestProviderAuthChoices.mockReturnValue([
|
||||
{
|
||||
pluginId: "litellm",
|
||||
providerId: "litellm",
|
||||
@@ -411,14 +387,29 @@ describe("buildAuthChoiceOptions", () => {
|
||||
groupLabel: "LiteLLM",
|
||||
},
|
||||
]);
|
||||
resolveProviderWizardOptions.mockReturnValue([
|
||||
{
|
||||
value: "ollama",
|
||||
label: "Ollama",
|
||||
hint: "Cloud and local open models",
|
||||
groupId: "ollama",
|
||||
groupLabel: "Ollama",
|
||||
},
|
||||
]);
|
||||
const { groups } = buildAuthChoiceGroups({
|
||||
store: EMPTY_STORE,
|
||||
includeSkip: false,
|
||||
});
|
||||
const chutesGroup = groups.find((group) => group.value === "chutes");
|
||||
const litellmGroup = groups.find((group) => group.value === "litellm");
|
||||
const ollamaGroup = groups.find((group) => group.value === "ollama");
|
||||
|
||||
expect(chutesGroup).toBeDefined();
|
||||
expect(chutesGroup?.options.some((opt) => opt.value === "chutes")).toBe(true);
|
||||
expect(litellmGroup).toBeDefined();
|
||||
expect(litellmGroup?.options.some((opt) => opt.value === "litellm-api-key")).toBe(true);
|
||||
expect(ollamaGroup).toBeDefined();
|
||||
expect(ollamaGroup?.options.some((opt) => opt.value === "ollama")).toBe(true);
|
||||
});
|
||||
|
||||
it("prefers Anthropic Claude CLI over API key in grouped selection", () => {
|
||||
@@ -488,27 +479,6 @@ describe("buildAuthChoiceOptions", () => {
|
||||
expect(openCodeGroup?.options.some((opt) => opt.value === "opencode-go")).toBe(true);
|
||||
});
|
||||
|
||||
it("shows Ollama in grouped provider selection", () => {
|
||||
resolveManifestProviderAuthChoices.mockReturnValue([]);
|
||||
resolveProviderWizardOptions.mockReturnValue([
|
||||
{
|
||||
value: "ollama",
|
||||
label: "Ollama",
|
||||
hint: "Cloud and local open models",
|
||||
groupId: "ollama",
|
||||
groupLabel: "Ollama",
|
||||
},
|
||||
]);
|
||||
const { groups } = buildAuthChoiceGroups({
|
||||
store: EMPTY_STORE,
|
||||
includeSkip: false,
|
||||
});
|
||||
const ollamaGroup = groups.find((group) => group.value === "ollama");
|
||||
|
||||
expect(ollamaGroup).toBeDefined();
|
||||
expect(ollamaGroup?.options.some((opt) => opt.value === "ollama")).toBe(true);
|
||||
});
|
||||
|
||||
it("hides image-generation-only providers from the interactive auth picker", () => {
|
||||
resolveManifestProviderAuthChoices.mockReturnValue([
|
||||
{
|
||||
|
||||
@@ -1,440 +0,0 @@
|
||||
import { afterEach, describe, expect, it, vi } from "vitest";
|
||||
import type { WizardPrompter } from "../wizard/prompts.js";
|
||||
import {
|
||||
ensureApiKeyFromOptionEnvOrPrompt,
|
||||
ensureApiKeyFromEnvOrPrompt,
|
||||
maybeApplyApiKeyFromOption,
|
||||
normalizeTokenProviderInput,
|
||||
} from "./auth-choice.apply-helpers.js";
|
||||
|
||||
const ORIGINAL_MINIMAX_API_KEY = process.env.MINIMAX_API_KEY;
|
||||
const ORIGINAL_MINIMAX_OAUTH_TOKEN = process.env.MINIMAX_OAUTH_TOKEN;
|
||||
|
||||
function restoreMinimaxEnv(): void {
|
||||
if (ORIGINAL_MINIMAX_API_KEY === undefined) {
|
||||
delete process.env.MINIMAX_API_KEY;
|
||||
} else {
|
||||
process.env.MINIMAX_API_KEY = ORIGINAL_MINIMAX_API_KEY;
|
||||
}
|
||||
if (ORIGINAL_MINIMAX_OAUTH_TOKEN === undefined) {
|
||||
delete process.env.MINIMAX_OAUTH_TOKEN;
|
||||
} else {
|
||||
process.env.MINIMAX_OAUTH_TOKEN = ORIGINAL_MINIMAX_OAUTH_TOKEN;
|
||||
}
|
||||
}
|
||||
|
||||
function createPrompter(params?: {
|
||||
confirm?: WizardPrompter["confirm"];
|
||||
note?: WizardPrompter["note"];
|
||||
select?: WizardPrompter["select"];
|
||||
text?: WizardPrompter["text"];
|
||||
}): WizardPrompter {
|
||||
return {
|
||||
confirm: params?.confirm ?? (vi.fn(async () => true) as WizardPrompter["confirm"]),
|
||||
note: params?.note ?? (vi.fn(async () => undefined) as WizardPrompter["note"]),
|
||||
...(params?.select ? { select: params.select } : {}),
|
||||
text: params?.text ?? (vi.fn(async () => "prompt-key") as WizardPrompter["text"]),
|
||||
} as unknown as WizardPrompter;
|
||||
}
|
||||
|
||||
function createPromptSpies(params?: { confirmResult?: boolean; textResult?: string }) {
|
||||
const confirm = vi.fn(async () => params?.confirmResult ?? true);
|
||||
const note = vi.fn(async () => undefined);
|
||||
const text = vi.fn(async () => params?.textResult ?? "prompt-key");
|
||||
return { confirm, note, text };
|
||||
}
|
||||
|
||||
function createPromptAndCredentialSpies(params?: { confirmResult?: boolean; textResult?: string }) {
|
||||
return {
|
||||
...createPromptSpies(params),
|
||||
setCredential: vi.fn(async () => undefined),
|
||||
};
|
||||
}
|
||||
|
||||
function setMinimaxEnv(params: { apiKey?: string; oauthToken?: string } = {}) {
|
||||
if (params.apiKey === undefined) {
|
||||
delete process.env.MINIMAX_API_KEY;
|
||||
} else {
|
||||
process.env.MINIMAX_API_KEY = params.apiKey; // pragma: allowlist secret
|
||||
}
|
||||
if (params.oauthToken === undefined) {
|
||||
delete process.env.MINIMAX_OAUTH_TOKEN;
|
||||
} else {
|
||||
process.env.MINIMAX_OAUTH_TOKEN = params.oauthToken; // pragma: allowlist secret
|
||||
}
|
||||
}
|
||||
|
||||
async function ensureMinimaxApiKey(params: {
|
||||
config?: Parameters<typeof ensureApiKeyFromEnvOrPrompt>[0]["config"];
|
||||
env?: Parameters<typeof ensureApiKeyFromEnvOrPrompt>[0]["env"];
|
||||
confirm: WizardPrompter["confirm"];
|
||||
note?: WizardPrompter["note"];
|
||||
select?: WizardPrompter["select"];
|
||||
text: WizardPrompter["text"];
|
||||
setCredential: Parameters<typeof ensureApiKeyFromEnvOrPrompt>[0]["setCredential"];
|
||||
secretInputMode?: Parameters<typeof ensureApiKeyFromEnvOrPrompt>[0]["secretInputMode"];
|
||||
}) {
|
||||
return await ensureMinimaxApiKeyInternal({
|
||||
config: params.config,
|
||||
env: params.env,
|
||||
prompter: createPrompter({
|
||||
confirm: params.confirm,
|
||||
note: params.note,
|
||||
select: params.select,
|
||||
text: params.text,
|
||||
}),
|
||||
secretInputMode: params.secretInputMode,
|
||||
setCredential: params.setCredential,
|
||||
});
|
||||
}
|
||||
|
||||
async function ensureMinimaxApiKeyInternal(params: {
|
||||
config?: Parameters<typeof ensureApiKeyFromEnvOrPrompt>[0]["config"];
|
||||
env?: Parameters<typeof ensureApiKeyFromEnvOrPrompt>[0]["env"];
|
||||
prompter: WizardPrompter;
|
||||
secretInputMode?: Parameters<typeof ensureApiKeyFromEnvOrPrompt>[0]["secretInputMode"];
|
||||
setCredential: Parameters<typeof ensureApiKeyFromEnvOrPrompt>[0]["setCredential"];
|
||||
}) {
|
||||
return await ensureApiKeyFromEnvOrPrompt({
|
||||
config: params.config ?? {},
|
||||
env: params.env,
|
||||
provider: "minimax",
|
||||
envLabel: "MINIMAX_API_KEY",
|
||||
promptMessage: "Enter key",
|
||||
normalize: (value) => value.trim(),
|
||||
validate: () => undefined,
|
||||
prompter: params.prompter,
|
||||
secretInputMode: params.secretInputMode,
|
||||
setCredential: params.setCredential,
|
||||
});
|
||||
}
|
||||
|
||||
async function ensureMinimaxApiKeyWithEnvRefPrompter(params: {
|
||||
config?: Parameters<typeof ensureApiKeyFromEnvOrPrompt>[0]["config"];
|
||||
env?: Parameters<typeof ensureApiKeyFromEnvOrPrompt>[0]["env"];
|
||||
note: WizardPrompter["note"];
|
||||
select: WizardPrompter["select"];
|
||||
setCredential: Parameters<typeof ensureApiKeyFromEnvOrPrompt>[0]["setCredential"];
|
||||
text: WizardPrompter["text"];
|
||||
}) {
|
||||
return await ensureMinimaxApiKeyInternal({
|
||||
config: params.config,
|
||||
env: params.env,
|
||||
prompter: createPrompter({ select: params.select, text: params.text, note: params.note }),
|
||||
secretInputMode: "ref", // pragma: allowlist secret
|
||||
setCredential: params.setCredential,
|
||||
});
|
||||
}
|
||||
|
||||
async function runEnsureMinimaxApiKeyFlow(params: { confirmResult: boolean; textResult: string }) {
|
||||
setMinimaxEnv({ apiKey: "env-key" });
|
||||
|
||||
const { confirm, text } = createPromptSpies({
|
||||
confirmResult: params.confirmResult,
|
||||
textResult: params.textResult,
|
||||
});
|
||||
const setCredential = vi.fn(async () => undefined);
|
||||
const result = await ensureMinimaxApiKey({
|
||||
confirm,
|
||||
text,
|
||||
setCredential,
|
||||
});
|
||||
|
||||
return { result, setCredential, confirm, text };
|
||||
}
|
||||
|
||||
async function runMaybeApplyDemoToken(tokenProvider: string) {
|
||||
const setCredential = vi.fn(async () => undefined);
|
||||
const result = await maybeApplyApiKeyFromOption({
|
||||
token: " opt-key ",
|
||||
tokenProvider,
|
||||
expectedProviders: ["demo-provider"],
|
||||
normalize: (value) => value.trim(),
|
||||
setCredential,
|
||||
});
|
||||
return { result, setCredential };
|
||||
}
|
||||
|
||||
function expectMinimaxEnvRefCredentialStored(setCredential: ReturnType<typeof vi.fn>) {
|
||||
expect(setCredential).toHaveBeenCalledWith(
|
||||
{ source: "env", provider: "default", id: "MINIMAX_API_KEY" },
|
||||
"ref",
|
||||
);
|
||||
}
|
||||
|
||||
async function ensureWithOptionEnvOrPrompt(params: {
|
||||
token: string;
|
||||
tokenProvider: string;
|
||||
expectedProviders: string[];
|
||||
provider: string;
|
||||
envLabel: string;
|
||||
confirm: WizardPrompter["confirm"];
|
||||
note: WizardPrompter["note"];
|
||||
noteMessage: string;
|
||||
noteTitle: string;
|
||||
setCredential: Parameters<typeof ensureApiKeyFromOptionEnvOrPrompt>[0]["setCredential"];
|
||||
text: WizardPrompter["text"];
|
||||
}) {
|
||||
return await ensureApiKeyFromOptionEnvOrPrompt({
|
||||
token: params.token,
|
||||
tokenProvider: params.tokenProvider,
|
||||
config: {},
|
||||
expectedProviders: params.expectedProviders,
|
||||
provider: params.provider,
|
||||
envLabel: params.envLabel,
|
||||
promptMessage: "Enter key",
|
||||
normalize: (value) => value.trim(),
|
||||
validate: () => undefined,
|
||||
prompter: createPrompter({ confirm: params.confirm, note: params.note, text: params.text }),
|
||||
setCredential: params.setCredential,
|
||||
noteMessage: params.noteMessage,
|
||||
noteTitle: params.noteTitle,
|
||||
});
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
restoreMinimaxEnv();
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
describe("normalizeTokenProviderInput", () => {
|
||||
it("trims and lowercases non-empty values", () => {
|
||||
expect(normalizeTokenProviderInput(" DeMo-PrOvIdEr ")).toBe("demo-provider");
|
||||
expect(normalizeTokenProviderInput("")).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe("maybeApplyApiKeyFromOption", () => {
|
||||
it.each(["demo-provider", " DeMo-PrOvIdEr "])(
|
||||
"stores normalized token when provider %p matches",
|
||||
async (tokenProvider) => {
|
||||
const { result, setCredential } = await runMaybeApplyDemoToken(tokenProvider);
|
||||
|
||||
expect(result).toBe("opt-key");
|
||||
expect(setCredential).toHaveBeenCalledWith("opt-key", undefined);
|
||||
},
|
||||
);
|
||||
|
||||
it("skips when provider does not match", async () => {
|
||||
const setCredential = vi.fn(async () => undefined);
|
||||
|
||||
const result = await maybeApplyApiKeyFromOption({
|
||||
token: "opt-key",
|
||||
tokenProvider: "other-provider",
|
||||
expectedProviders: ["demo-provider"],
|
||||
normalize: (value) => value.trim(),
|
||||
setCredential,
|
||||
});
|
||||
|
||||
expect(result).toBeUndefined();
|
||||
expect(setCredential).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe("ensureApiKeyFromEnvOrPrompt", () => {
|
||||
it("uses env credential when user confirms", async () => {
|
||||
const { result, setCredential, text } = await runEnsureMinimaxApiKeyFlow({
|
||||
confirmResult: true,
|
||||
textResult: "prompt-key",
|
||||
});
|
||||
|
||||
expect(result).toBe("env-key");
|
||||
expect(setCredential).toHaveBeenCalledWith("env-key", "plaintext");
|
||||
expect(text).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("falls back to prompt when env is declined", async () => {
|
||||
const { result, setCredential, text } = await runEnsureMinimaxApiKeyFlow({
|
||||
confirmResult: false,
|
||||
textResult: " prompted-key ",
|
||||
});
|
||||
|
||||
expect(result).toBe("prompted-key");
|
||||
expect(setCredential).toHaveBeenCalledWith("prompted-key", "plaintext");
|
||||
expect(text).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
message: "Enter key",
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it("uses explicit inline env ref when secret-input-mode=ref selects existing env key", async () => {
|
||||
setMinimaxEnv({ apiKey: "env-key" });
|
||||
|
||||
const { confirm, text, setCredential } = createPromptAndCredentialSpies({
|
||||
confirmResult: true,
|
||||
textResult: "prompt-key",
|
||||
});
|
||||
|
||||
const result = await ensureMinimaxApiKey({
|
||||
confirm,
|
||||
text,
|
||||
secretInputMode: "ref", // pragma: allowlist secret
|
||||
setCredential,
|
||||
});
|
||||
|
||||
expect(result).toBe("env-key");
|
||||
expectMinimaxEnvRefCredentialStored(setCredential);
|
||||
expect(text).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("fails ref mode without select when fallback env var is missing", async () => {
|
||||
setMinimaxEnv();
|
||||
|
||||
const { confirm, text, setCredential } = createPromptAndCredentialSpies({
|
||||
confirmResult: true,
|
||||
textResult: "prompt-key",
|
||||
});
|
||||
|
||||
await expect(
|
||||
ensureMinimaxApiKey({
|
||||
confirm,
|
||||
text,
|
||||
secretInputMode: "ref", // pragma: allowlist secret
|
||||
setCredential,
|
||||
}),
|
||||
).rejects.toThrow(
|
||||
'Environment variable "MINIMAX_API_KEY" is required for --secret-input-mode ref in non-interactive setup.',
|
||||
);
|
||||
expect(setCredential).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("uses explicit env for ref fallback instead of host process env", async () => {
|
||||
setMinimaxEnv({ apiKey: "host-key" });
|
||||
const env = { MINIMAX_API_KEY: "explicit-key" } as NodeJS.ProcessEnv;
|
||||
|
||||
const { confirm, text, setCredential } = createPromptAndCredentialSpies({
|
||||
confirmResult: true,
|
||||
textResult: "prompt-key",
|
||||
});
|
||||
|
||||
const result = await ensureMinimaxApiKey({
|
||||
confirm,
|
||||
text,
|
||||
env,
|
||||
secretInputMode: "ref", // pragma: allowlist secret
|
||||
setCredential,
|
||||
});
|
||||
|
||||
expect(result).toBe("explicit-key");
|
||||
expectMinimaxEnvRefCredentialStored(setCredential);
|
||||
});
|
||||
|
||||
it("re-prompts after provider ref validation failure and succeeds with env ref", async () => {
|
||||
setMinimaxEnv({ apiKey: "env-key" });
|
||||
|
||||
const selectValues: Array<"provider" | "env" | "filemain"> = ["provider", "filemain", "env"];
|
||||
const select = vi.fn(async () => selectValues.shift() ?? "env") as WizardPrompter["select"];
|
||||
const text = vi
|
||||
.fn<WizardPrompter["text"]>()
|
||||
.mockResolvedValueOnce("/providers/minimax/apiKey")
|
||||
.mockResolvedValueOnce("MINIMAX_API_KEY");
|
||||
const note = vi.fn(async () => undefined);
|
||||
const setCredential = vi.fn(async () => undefined);
|
||||
|
||||
const result = await ensureMinimaxApiKeyWithEnvRefPrompter({
|
||||
config: {
|
||||
secrets: {
|
||||
providers: {
|
||||
filemain: {
|
||||
source: "file",
|
||||
path: "/tmp/does-not-exist-secrets.json",
|
||||
mode: "json",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
select,
|
||||
text,
|
||||
note,
|
||||
setCredential,
|
||||
});
|
||||
|
||||
expect(result).toBe("env-key");
|
||||
expectMinimaxEnvRefCredentialStored(setCredential);
|
||||
expect(note).toHaveBeenCalledWith(
|
||||
expect.stringContaining("Could not validate provider reference"),
|
||||
"Reference check failed",
|
||||
);
|
||||
});
|
||||
|
||||
it("never includes resolved env secret values in reference validation notes", async () => {
|
||||
setMinimaxEnv({ apiKey: "sk-minimax-redacted-value" });
|
||||
|
||||
const select = vi.fn(async () => "env") as WizardPrompter["select"];
|
||||
const text = vi.fn<WizardPrompter["text"]>().mockResolvedValue("MINIMAX_API_KEY");
|
||||
const note = vi.fn(async () => undefined);
|
||||
const setCredential = vi.fn(async () => undefined);
|
||||
|
||||
const result = await ensureMinimaxApiKeyWithEnvRefPrompter({
|
||||
config: {},
|
||||
select,
|
||||
text,
|
||||
note,
|
||||
setCredential,
|
||||
});
|
||||
|
||||
expect(result).toBe("sk-minimax-redacted-value");
|
||||
const noteMessages = note.mock.calls.map((call) => call.at(0) ?? "").join("\n");
|
||||
expect(noteMessages).toContain("Validated environment variable MINIMAX_API_KEY.");
|
||||
expect(noteMessages).not.toContain("sk-minimax-redacted-value");
|
||||
});
|
||||
});
|
||||
|
||||
describe("ensureApiKeyFromOptionEnvOrPrompt", () => {
|
||||
it("uses opts token and skips note/env/prompt", async () => {
|
||||
const { confirm, note, text, setCredential } = createPromptAndCredentialSpies({
|
||||
confirmResult: true,
|
||||
textResult: "prompt-key",
|
||||
});
|
||||
|
||||
const result = await ensureWithOptionEnvOrPrompt({
|
||||
token: " opts-key ",
|
||||
tokenProvider: " DEMO-PROVIDER ",
|
||||
expectedProviders: ["demo-provider"],
|
||||
provider: "demo-provider",
|
||||
envLabel: "DEMO_TOKEN",
|
||||
confirm,
|
||||
note,
|
||||
noteMessage: "Demo note",
|
||||
noteTitle: "Demo",
|
||||
setCredential,
|
||||
text,
|
||||
});
|
||||
|
||||
expect(result).toBe("opts-key");
|
||||
expect(setCredential).toHaveBeenCalledWith("opts-key", undefined);
|
||||
expect(note).not.toHaveBeenCalled();
|
||||
expect(confirm).not.toHaveBeenCalled();
|
||||
expect(text).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("falls back to env flow and shows note when opts provider does not match", async () => {
|
||||
setMinimaxEnv({ apiKey: "env-key" });
|
||||
|
||||
const { confirm, note, text, setCredential } = createPromptAndCredentialSpies({
|
||||
confirmResult: true,
|
||||
textResult: "prompt-key",
|
||||
});
|
||||
|
||||
const result = await ensureWithOptionEnvOrPrompt({
|
||||
token: "opts-key",
|
||||
tokenProvider: "other-provider",
|
||||
expectedProviders: ["minimax"],
|
||||
provider: "minimax",
|
||||
envLabel: "MINIMAX_API_KEY",
|
||||
confirm,
|
||||
note,
|
||||
noteMessage: "Demo provider note",
|
||||
noteTitle: "Demo provider",
|
||||
setCredential,
|
||||
text,
|
||||
});
|
||||
|
||||
expect(result).toBe("env-key");
|
||||
expect(note).toHaveBeenCalledWith("Demo provider note", "Demo provider");
|
||||
expect(confirm).toHaveBeenCalled();
|
||||
expect(text).not.toHaveBeenCalled();
|
||||
expect(setCredential).toHaveBeenCalledWith("env-key", "plaintext");
|
||||
});
|
||||
});
|
||||
@@ -127,6 +127,89 @@ describe("applyAuthChoiceLoadedPluginProvider", () => {
|
||||
expect(runProviderModelSelectedHook).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("keeps provider config patches when default model application is deferred", async () => {
|
||||
const provider: ProviderPlugin = {
|
||||
id: "moonshot",
|
||||
label: "Moonshot",
|
||||
auth: [
|
||||
{
|
||||
id: "api-key-cn",
|
||||
label: "Moonshot API key (.cn)",
|
||||
kind: "api_key",
|
||||
run: async () => ({
|
||||
profiles: [
|
||||
{
|
||||
profileId: "moonshot:default",
|
||||
credential: {
|
||||
type: "api_key",
|
||||
provider: "moonshot",
|
||||
key: "sk-moonshot-cn-test",
|
||||
},
|
||||
},
|
||||
],
|
||||
configPatch: {
|
||||
models: {
|
||||
providers: {
|
||||
moonshot: {
|
||||
api: "openai-completions",
|
||||
baseUrl: "https://api.moonshot.cn/v1",
|
||||
models: [
|
||||
{
|
||||
id: "kimi-k2.5",
|
||||
name: "kimi-k2.5",
|
||||
input: ["text", "image"],
|
||||
reasoning: true,
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 128_000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
defaultModel: "moonshot/kimi-k2.5",
|
||||
}),
|
||||
},
|
||||
],
|
||||
};
|
||||
resolvePluginProviders.mockReturnValue([provider]);
|
||||
resolveProviderPluginChoice.mockReturnValue({
|
||||
provider,
|
||||
method: provider.auth[0],
|
||||
});
|
||||
|
||||
const result = await applyAuthChoiceLoadedPluginProvider(
|
||||
buildParams({
|
||||
config: {
|
||||
agents: {
|
||||
defaults: {
|
||||
model: { primary: "anthropic/claude-opus-4-6" },
|
||||
},
|
||||
},
|
||||
},
|
||||
setDefaultModel: false,
|
||||
}),
|
||||
);
|
||||
|
||||
expect(result?.agentModelOverride).toBe("moonshot/kimi-k2.5");
|
||||
expect(result?.config.agents?.defaults?.model).toEqual({
|
||||
primary: "anthropic/claude-opus-4-6",
|
||||
});
|
||||
expect(result?.config.models?.providers?.moonshot?.baseUrl).toBe("https://api.moonshot.cn/v1");
|
||||
expect(result?.config.models?.providers?.moonshot?.models?.[0]?.input).toContain("image");
|
||||
expect(upsertAuthProfile).toHaveBeenCalledWith({
|
||||
profileId: "moonshot:default",
|
||||
credential: {
|
||||
type: "api_key",
|
||||
provider: "moonshot",
|
||||
key: "sk-moonshot-cn-test",
|
||||
},
|
||||
agentDir: "/tmp/agent",
|
||||
});
|
||||
expect(runProviderModelSelectedHook).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("applies the default model and runs provider post-setup hooks", async () => {
|
||||
const provider = buildProvider();
|
||||
resolvePluginProviders.mockReturnValue([provider]);
|
||||
|
||||
@@ -1,41 +1,66 @@
|
||||
import { applyAuthChoiceLoadedPluginProvider } from "../plugins/provider-auth-choice.js";
|
||||
import { normalizeLegacyOnboardAuthChoice } from "./auth-choice-legacy.js";
|
||||
import { applyAuthChoiceApiProviders } from "./auth-choice.apply.api-providers.js";
|
||||
import { normalizeApiKeyTokenProviderAuthChoice } from "./auth-choice.apply.api-providers.js";
|
||||
import { applyAuthChoiceOAuth } from "./auth-choice.apply.oauth.js";
|
||||
import type { ApplyAuthChoiceParams, ApplyAuthChoiceResult } from "./auth-choice.apply.types.js";
|
||||
import type { AuthChoice } from "./onboard-types.js";
|
||||
|
||||
export type { ApplyAuthChoiceParams, ApplyAuthChoiceResult } from "./auth-choice.apply.types.js";
|
||||
|
||||
async function normalizeLegacyChoice(
|
||||
authChoice: AuthChoice | undefined,
|
||||
params: Pick<ApplyAuthChoiceParams, "config" | "env">,
|
||||
): Promise<AuthChoice | undefined> {
|
||||
if (authChoice === "oauth") {
|
||||
return "setup-token";
|
||||
}
|
||||
if (typeof authChoice !== "string" || !authChoice.endsWith("-cli")) {
|
||||
return authChoice;
|
||||
}
|
||||
const { normalizeLegacyOnboardAuthChoice } = await import("./auth-choice-legacy.js");
|
||||
return normalizeLegacyOnboardAuthChoice(authChoice, params);
|
||||
}
|
||||
|
||||
async function normalizeTokenProviderChoice(params: {
|
||||
authChoice: AuthChoice;
|
||||
source: ApplyAuthChoiceParams;
|
||||
}): Promise<AuthChoice> {
|
||||
if (!params.source.opts?.tokenProvider) {
|
||||
return params.authChoice;
|
||||
}
|
||||
if (
|
||||
params.authChoice !== "apiKey" &&
|
||||
params.authChoice !== "token" &&
|
||||
params.authChoice !== "setup-token"
|
||||
) {
|
||||
return params.authChoice;
|
||||
}
|
||||
const { normalizeApiKeyTokenProviderAuthChoice } =
|
||||
await import("./auth-choice.apply.api-providers.js");
|
||||
return normalizeApiKeyTokenProviderAuthChoice({
|
||||
authChoice: params.authChoice,
|
||||
tokenProvider: params.source.opts.tokenProvider,
|
||||
config: params.source.config,
|
||||
env: params.source.env,
|
||||
});
|
||||
}
|
||||
|
||||
export async function applyAuthChoice(
|
||||
params: ApplyAuthChoiceParams,
|
||||
): Promise<ApplyAuthChoiceResult> {
|
||||
const normalizedAuthChoice =
|
||||
normalizeLegacyOnboardAuthChoice(params.authChoice, {
|
||||
(await normalizeLegacyChoice(params.authChoice, {
|
||||
config: params.config,
|
||||
env: params.env,
|
||||
}) ?? params.authChoice;
|
||||
const normalizedProviderAuthChoice = normalizeApiKeyTokenProviderAuthChoice({
|
||||
})) ?? params.authChoice;
|
||||
const normalizedProviderAuthChoice = await normalizeTokenProviderChoice({
|
||||
authChoice: normalizedAuthChoice,
|
||||
tokenProvider: params.opts?.tokenProvider,
|
||||
config: params.config,
|
||||
env: params.env,
|
||||
source: params,
|
||||
});
|
||||
const normalizedParams =
|
||||
normalizedProviderAuthChoice === params.authChoice
|
||||
? params
|
||||
: { ...params, authChoice: normalizedProviderAuthChoice };
|
||||
const handlers: Array<(p: ApplyAuthChoiceParams) => Promise<ApplyAuthChoiceResult | null>> = [
|
||||
applyAuthChoiceLoadedPluginProvider,
|
||||
applyAuthChoiceOAuth,
|
||||
applyAuthChoiceApiProviders,
|
||||
];
|
||||
|
||||
for (const handler of handlers) {
|
||||
const result = await handler(normalizedParams);
|
||||
if (result) {
|
||||
return result;
|
||||
}
|
||||
const result = await applyAuthChoiceLoadedPluginProvider(normalizedParams);
|
||||
if (result) {
|
||||
return result;
|
||||
}
|
||||
|
||||
if (normalizedParams.authChoice === "token" || normalizedParams.authChoice === "setup-token") {
|
||||
|
||||
@@ -1,177 +0,0 @@
|
||||
import { afterEach, describe, expect, it, vi } from "vitest";
|
||||
import { resolveAgentModelPrimaryValue } from "../config/model-input.js";
|
||||
import { __testing as providerAuthChoiceTesting } from "../plugins/provider-auth-choice.js";
|
||||
import type { ProviderAuthContext, ProviderPlugin } from "../plugins/types.js";
|
||||
import type { WizardPrompter } from "../wizard/prompts.js";
|
||||
import { applyAuthChoice } from "./auth-choice.js";
|
||||
import {
|
||||
createAuthTestLifecycle,
|
||||
createExitThrowingRuntime,
|
||||
createWizardPrompter,
|
||||
readAuthProfilesForAgent,
|
||||
requireOpenClawAgentDir,
|
||||
setupAuthTestEnv,
|
||||
} from "./test-wizard-helpers.js";
|
||||
|
||||
function createPrompter(overrides: Partial<WizardPrompter>): WizardPrompter {
|
||||
return createWizardPrompter(overrides, { defaultSelect: "" });
|
||||
}
|
||||
|
||||
describe("applyAuthChoice (moonshot)", () => {
|
||||
const lifecycle = createAuthTestLifecycle([
|
||||
"OPENCLAW_STATE_DIR",
|
||||
"OPENCLAW_AGENT_DIR",
|
||||
"PI_CODING_AGENT_DIR",
|
||||
"MOONSHOT_API_KEY",
|
||||
]);
|
||||
|
||||
async function setupTempState() {
|
||||
const env = await setupAuthTestEnv("openclaw-auth-");
|
||||
lifecycle.setStateDir(env.stateDir);
|
||||
delete process.env.MOONSHOT_API_KEY;
|
||||
providerAuthChoiceTesting.setDepsForTest({
|
||||
loadPluginProviderRuntime: async () =>
|
||||
({
|
||||
resolvePluginProviders: () =>
|
||||
[
|
||||
{
|
||||
id: "moonshot",
|
||||
label: "Moonshot",
|
||||
auth: [
|
||||
{
|
||||
id: "api-key-cn",
|
||||
label: "Moonshot API key (.cn)",
|
||||
kind: "api_key",
|
||||
run: async ({ prompter }: ProviderAuthContext) => {
|
||||
const key = await prompter.text({
|
||||
message: "Enter Moonshot API key (.cn)",
|
||||
});
|
||||
return {
|
||||
profiles: [
|
||||
{
|
||||
profileId: "moonshot:default",
|
||||
credential: {
|
||||
type: "api_key",
|
||||
provider: "moonshot",
|
||||
key,
|
||||
},
|
||||
},
|
||||
],
|
||||
configPatch: {
|
||||
models: {
|
||||
providers: {
|
||||
moonshot: {
|
||||
api: "openai-completions",
|
||||
baseUrl: "https://api.moonshot.cn/v1",
|
||||
models: [
|
||||
{
|
||||
id: "kimi-k2.5",
|
||||
name: "kimi-k2.5",
|
||||
input: ["text", "image"],
|
||||
reasoning: true,
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 128_000,
|
||||
maxTokens: 8192,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
defaultModel: "moonshot/kimi-k2.5",
|
||||
};
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
] as ProviderPlugin[],
|
||||
resolveProviderPluginChoice: ({
|
||||
choice,
|
||||
providers,
|
||||
}: {
|
||||
choice: string;
|
||||
providers: ProviderPlugin[];
|
||||
}) =>
|
||||
choice === "moonshot-api-key-cn"
|
||||
? { provider: providers[0], method: providers[0]?.auth[0] }
|
||||
: null,
|
||||
runProviderModelSelectedHook: async () => {},
|
||||
}) as never,
|
||||
});
|
||||
}
|
||||
|
||||
async function readAuthProfiles() {
|
||||
return await readAuthProfilesForAgent<{
|
||||
profiles?: Record<string, { key?: string }>;
|
||||
}>(requireOpenClawAgentDir());
|
||||
}
|
||||
|
||||
async function runMoonshotCnFlow(params: {
|
||||
config: Record<string, unknown>;
|
||||
setDefaultModel: boolean;
|
||||
}) {
|
||||
const text = vi.fn().mockResolvedValue("sk-moonshot-cn-test");
|
||||
const prompter = createPrompter({ text: text as unknown as WizardPrompter["text"] });
|
||||
const runtime = createExitThrowingRuntime();
|
||||
const result = await applyAuthChoice({
|
||||
authChoice: "moonshot-api-key-cn",
|
||||
config: params.config,
|
||||
prompter,
|
||||
runtime,
|
||||
setDefaultModel: params.setDefaultModel,
|
||||
});
|
||||
return { result, text };
|
||||
}
|
||||
|
||||
afterEach(async () => {
|
||||
providerAuthChoiceTesting.resetDepsForTest();
|
||||
await lifecycle.cleanup();
|
||||
});
|
||||
|
||||
it("keeps the .cn baseUrl when setDefaultModel is false", async () => {
|
||||
await setupTempState();
|
||||
|
||||
const { result, text } = await runMoonshotCnFlow({
|
||||
config: {
|
||||
agents: {
|
||||
defaults: {
|
||||
model: { primary: "anthropic/claude-opus-4-6" },
|
||||
},
|
||||
},
|
||||
},
|
||||
setDefaultModel: false,
|
||||
});
|
||||
|
||||
expect(text).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ message: "Enter Moonshot API key (.cn)" }),
|
||||
);
|
||||
expect(resolveAgentModelPrimaryValue(result.config.agents?.defaults?.model)).toBe(
|
||||
"anthropic/claude-opus-4-6",
|
||||
);
|
||||
expect(result.config.models?.providers?.moonshot?.baseUrl).toBe("https://api.moonshot.cn/v1");
|
||||
expect(result.config.models?.providers?.moonshot?.models?.[0]?.input).toContain("image");
|
||||
expect(result.agentModelOverride).toBe("moonshot/kimi-k2.5");
|
||||
|
||||
const parsed = await readAuthProfiles();
|
||||
expect(parsed.profiles?.["moonshot:default"]?.key).toBe("sk-moonshot-cn-test");
|
||||
});
|
||||
|
||||
it("sets the default model when setDefaultModel is true", async () => {
|
||||
await setupTempState();
|
||||
|
||||
const { result } = await runMoonshotCnFlow({
|
||||
config: {},
|
||||
setDefaultModel: true,
|
||||
});
|
||||
|
||||
expect(resolveAgentModelPrimaryValue(result.config.agents?.defaults?.model)).toBe(
|
||||
"moonshot/kimi-k2.5",
|
||||
);
|
||||
expect(result.config.models?.providers?.moonshot?.baseUrl).toBe("https://api.moonshot.cn/v1");
|
||||
expect(result.config.models?.providers?.moonshot?.models?.[0]?.input).toContain("image");
|
||||
expect(result.agentModelOverride).toBeUndefined();
|
||||
|
||||
const parsed = await readAuthProfiles();
|
||||
expect(parsed.profiles?.["moonshot:default"]?.key).toBe("sk-moonshot-cn-test");
|
||||
});
|
||||
});
|
||||
File diff suppressed because it is too large
Load Diff
@@ -196,38 +196,33 @@ describe("backupVerifyCommand", () => {
|
||||
}
|
||||
});
|
||||
|
||||
it("fails when archive paths contain traversal segments", async () => {
|
||||
const traversalPath = `${TEST_ARCHIVE_ROOT}/payload/../escaped.txt`;
|
||||
await withBrokenArchiveFixture(
|
||||
it("rejects unsafe archive paths", async () => {
|
||||
for (const { tempPrefix, archivePath, error } of [
|
||||
{
|
||||
tempPrefix: "openclaw-backup-traversal-",
|
||||
manifestAssetArchivePath: traversalPath,
|
||||
payloads: [{ fileName: "payload.txt", contents: "payload\n", archivePath: traversalPath }],
|
||||
archivePath: `${TEST_ARCHIVE_ROOT}/payload/../escaped.txt`,
|
||||
error: /path traversal segments/i,
|
||||
},
|
||||
async (archivePath) => {
|
||||
const runtime = createBackupVerifyRuntime();
|
||||
await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow(
|
||||
/path traversal segments/i,
|
||||
);
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
it("fails when archive paths contain backslashes", async () => {
|
||||
const invalidPath = `${TEST_ARCHIVE_ROOT}/payload\\..\\escaped.txt`;
|
||||
await withBrokenArchiveFixture(
|
||||
{
|
||||
tempPrefix: "openclaw-backup-backslash-",
|
||||
manifestAssetArchivePath: invalidPath,
|
||||
payloads: [{ fileName: "payload.txt", contents: "payload\n", archivePath: invalidPath }],
|
||||
archivePath: `${TEST_ARCHIVE_ROOT}/payload\\..\\escaped.txt`,
|
||||
error: /forward slashes/i,
|
||||
},
|
||||
async (archivePath) => {
|
||||
const runtime = createBackupVerifyRuntime();
|
||||
await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow(
|
||||
/forward slashes/i,
|
||||
);
|
||||
},
|
||||
);
|
||||
]) {
|
||||
await withBrokenArchiveFixture(
|
||||
{
|
||||
tempPrefix,
|
||||
manifestAssetArchivePath: archivePath,
|
||||
payloads: [{ fileName: "payload.txt", contents: "payload\n", archivePath }],
|
||||
},
|
||||
async (brokenArchivePath) => {
|
||||
const runtime = createBackupVerifyRuntime();
|
||||
await expect(
|
||||
backupVerifyCommand(runtime, { archive: brokenArchivePath }),
|
||||
).rejects.toThrow(error);
|
||||
},
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
it("ignores payload manifest.json files when locating the backup manifest", async () => {
|
||||
@@ -302,45 +297,44 @@ describe("backupVerifyCommand", () => {
|
||||
}
|
||||
});
|
||||
|
||||
it("fails when the archive contains duplicate root manifest entries", async () => {
|
||||
it("rejects duplicate manifest and payload entries", async () => {
|
||||
const payloadArchivePath = `${TEST_ARCHIVE_ROOT}/payload/posix/tmp/.openclaw/payload.txt`;
|
||||
await withBrokenArchiveFixture(
|
||||
for (const options of [
|
||||
{
|
||||
tempPrefix: "openclaw-backup-duplicate-manifest-",
|
||||
manifestAssetArchivePath: payloadArchivePath,
|
||||
payloads: [{ fileName: "payload.txt", contents: "payload\n" }],
|
||||
buildTarEntries: ({ manifestPath, payloadPaths }) => [
|
||||
buildTarEntries: ({
|
||||
manifestPath,
|
||||
manifestPath,
|
||||
...payloadPaths,
|
||||
],
|
||||
payloadPaths,
|
||||
}: {
|
||||
manifestPath: string;
|
||||
payloadPaths: string[];
|
||||
}) => [manifestPath, manifestPath, ...payloadPaths],
|
||||
error: /expected exactly one backup manifest entry, found 2/i,
|
||||
},
|
||||
async (archivePath) => {
|
||||
const runtime = createBackupVerifyRuntime();
|
||||
await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow(
|
||||
/expected exactly one backup manifest entry, found 2/i,
|
||||
);
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
it("fails when the archive contains duplicate payload entries", async () => {
|
||||
const payloadArchivePath = `${TEST_ARCHIVE_ROOT}/payload/posix/tmp/.openclaw/payload.txt`;
|
||||
await withBrokenArchiveFixture(
|
||||
{
|
||||
tempPrefix: "openclaw-backup-duplicate-payload-",
|
||||
manifestAssetArchivePath: payloadArchivePath,
|
||||
payloads: [
|
||||
{ fileName: "payload-a.txt", contents: "payload-a\n", archivePath: payloadArchivePath },
|
||||
{ fileName: "payload-b.txt", contents: "payload-b\n", archivePath: payloadArchivePath },
|
||||
],
|
||||
error: /duplicate entry path/i,
|
||||
},
|
||||
async (archivePath) => {
|
||||
const runtime = createBackupVerifyRuntime();
|
||||
await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow(
|
||||
/duplicate entry path/i,
|
||||
);
|
||||
},
|
||||
);
|
||||
]) {
|
||||
await withBrokenArchiveFixture(
|
||||
{
|
||||
tempPrefix: options.tempPrefix,
|
||||
manifestAssetArchivePath: payloadArchivePath,
|
||||
payloads: options.payloads,
|
||||
buildTarEntries: options.buildTarEntries,
|
||||
},
|
||||
async (archivePath) => {
|
||||
const runtime = createBackupVerifyRuntime();
|
||||
await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow(
|
||||
options.error,
|
||||
);
|
||||
},
|
||||
);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
@@ -321,47 +321,37 @@ describe("backup commands", () => {
|
||||
path.join(tempHome.home, `${buildBackupArchiveRoot(nowMs)}.tar.gz`),
|
||||
);
|
||||
await fs.rm(result.archivePath, { force: true });
|
||||
});
|
||||
|
||||
it("falls back to the home directory when cwd is a symlink into a backed-up source tree", async () => {
|
||||
if (process.platform === "win32") {
|
||||
return;
|
||||
}
|
||||
if (process.platform !== "win32") {
|
||||
const linkParent = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-backup-cwd-link-"));
|
||||
const workspaceLink = path.join(linkParent, "workspace-link");
|
||||
try {
|
||||
await fs.symlink(workspaceDir, workspaceLink);
|
||||
vi.mocked(process.cwd).mockReturnValue(workspaceLink);
|
||||
vi.spyOn(backupShared, "resolveBackupPlanFromDisk").mockResolvedValue(
|
||||
await resolveBackupPlanFromPaths({
|
||||
stateDir,
|
||||
configPath: path.join(stateDir, "openclaw.json"),
|
||||
oauthDir: path.join(stateDir, "credentials"),
|
||||
workspaceDirs: [workspaceDir],
|
||||
includeWorkspace: true,
|
||||
configInsideState: true,
|
||||
oauthInsideState: true,
|
||||
nowMs: Date.UTC(2026, 2, 9, 1, 3, 4),
|
||||
}),
|
||||
);
|
||||
|
||||
const stateDir = path.join(tempHome.home, ".openclaw");
|
||||
const workspaceDir = path.join(stateDir, "workspace");
|
||||
const linkParent = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-backup-cwd-link-"));
|
||||
const workspaceLink = path.join(linkParent, "workspace-link");
|
||||
try {
|
||||
await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8");
|
||||
await fs.mkdir(workspaceDir, { recursive: true });
|
||||
await fs.writeFile(path.join(workspaceDir, "SOUL.md"), "# soul\n", "utf8");
|
||||
await fs.symlink(workspaceDir, workspaceLink);
|
||||
vi.spyOn(process, "cwd").mockReturnValue(workspaceLink);
|
||||
vi.spyOn(backupShared, "resolveBackupPlanFromDisk").mockResolvedValue(
|
||||
await resolveBackupPlanFromPaths({
|
||||
stateDir,
|
||||
configPath: path.join(stateDir, "openclaw.json"),
|
||||
oauthDir: path.join(stateDir, "credentials"),
|
||||
workspaceDirs: [workspaceDir],
|
||||
includeWorkspace: true,
|
||||
configInsideState: true,
|
||||
oauthInsideState: true,
|
||||
nowMs: Date.UTC(2026, 2, 9, 1, 3, 4),
|
||||
}),
|
||||
);
|
||||
|
||||
const runtime = createBackupTestRuntime();
|
||||
|
||||
const nowMs = Date.UTC(2026, 2, 9, 1, 3, 4);
|
||||
const result = await backupCreateCommand(runtime, { nowMs });
|
||||
|
||||
expect(result.archivePath).toBe(
|
||||
path.join(tempHome.home, `${buildBackupArchiveRoot(nowMs)}.tar.gz`),
|
||||
);
|
||||
await fs.rm(result.archivePath, { force: true });
|
||||
} finally {
|
||||
await fs.rm(linkParent, { recursive: true, force: true });
|
||||
const symlinkNowMs = Date.UTC(2026, 2, 9, 1, 3, 4);
|
||||
const symlinkResult = await backupCreateCommand(createBackupTestRuntime(), {
|
||||
nowMs: symlinkNowMs,
|
||||
});
|
||||
expect(symlinkResult.archivePath).toBe(
|
||||
path.join(tempHome.home, `${buildBackupArchiveRoot(symlinkNowMs)}.tar.gz`),
|
||||
);
|
||||
await fs.rm(symlinkResult.archivePath, { force: true });
|
||||
} finally {
|
||||
await fs.rm(linkParent, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
@@ -395,16 +385,12 @@ describe("backup commands", () => {
|
||||
expect(await fs.readFile(existingArchive, "utf8")).toBe("already here");
|
||||
});
|
||||
|
||||
it("fails fast when config is invalid and workspace backup is enabled", async () => {
|
||||
it("handles invalid config according to backup scope", async () => {
|
||||
await withInvalidWorkspaceBackupConfig(async (runtime) => {
|
||||
await expect(backupCreateCommand(runtime, { dryRun: true })).rejects.toThrow(
|
||||
/--no-include-workspace/i,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
it("allows explicit partial backups when config is invalid", async () => {
|
||||
await withInvalidWorkspaceBackupConfig(async (runtime) => {
|
||||
const result = await backupCreateCommand(runtime, {
|
||||
dryRun: true,
|
||||
includeWorkspace: false,
|
||||
@@ -412,6 +398,13 @@ describe("backup commands", () => {
|
||||
|
||||
expect(result.includeWorkspace).toBe(false);
|
||||
expect(result.assets.some((asset) => asset.kind === "workspace")).toBe(false);
|
||||
|
||||
const configOnly = await backupCreateCommand(runtime, {
|
||||
dryRun: true,
|
||||
onlyConfig: true,
|
||||
});
|
||||
expect(configOnly.assets).toHaveLength(1);
|
||||
expect(configOnly.assets[0]?.kind).toBe("config");
|
||||
});
|
||||
});
|
||||
|
||||
@@ -447,24 +440,4 @@ describe("backup commands", () => {
|
||||
expect(result.assets).toHaveLength(1);
|
||||
expect(result.assets[0]?.kind).toBe("config");
|
||||
});
|
||||
|
||||
it("allows config-only backups even when the config file is invalid", async () => {
|
||||
const configPath = path.join(tempHome.home, "custom-config.json");
|
||||
process.env.OPENCLAW_CONFIG_PATH = configPath;
|
||||
await fs.writeFile(configPath, '{"agents": { defaults: { workspace: ', "utf8");
|
||||
|
||||
const runtime = createBackupTestRuntime();
|
||||
|
||||
try {
|
||||
const result = await backupCreateCommand(runtime, {
|
||||
dryRun: true,
|
||||
onlyConfig: true,
|
||||
});
|
||||
|
||||
expect(result.assets).toHaveLength(1);
|
||||
expect(result.assets[0]?.kind).toBe("config");
|
||||
} finally {
|
||||
delete process.env.OPENCLAW_CONFIG_PATH;
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
@@ -5,15 +5,24 @@ import {
|
||||
type BackupCreateResult,
|
||||
} from "../infra/backup-create.js";
|
||||
import { type RuntimeEnv, writeRuntimeJson } from "../runtime.js";
|
||||
import { backupVerifyCommand } from "./backup-verify.js";
|
||||
export type { BackupCreateOptions, BackupCreateResult } from "../infra/backup-create.js";
|
||||
|
||||
type BackupVerifyRuntime = typeof import("./backup-verify.js");
|
||||
|
||||
let backupVerifyRuntimePromise: Promise<BackupVerifyRuntime> | undefined;
|
||||
|
||||
function loadBackupVerifyRuntime(): Promise<BackupVerifyRuntime> {
|
||||
backupVerifyRuntimePromise ??= import("./backup-verify.js");
|
||||
return backupVerifyRuntimePromise;
|
||||
}
|
||||
|
||||
export async function backupCreateCommand(
|
||||
runtime: RuntimeEnv,
|
||||
opts: BackupCreateOptions = {},
|
||||
): Promise<BackupCreateResult> {
|
||||
const result = await createBackupArchive(opts);
|
||||
if (opts.verify && !opts.dryRun) {
|
||||
const { backupVerifyCommand } = await loadBackupVerifyRuntime();
|
||||
await backupVerifyCommand(
|
||||
{
|
||||
...runtime,
|
||||
|
||||
@@ -3,6 +3,10 @@ import type { ChannelPlugin } from "../channels/plugins/types.js";
|
||||
import type { OpenClawConfig } from "../config/config.js";
|
||||
import { resolveDefaultChannelAccountContext } from "./channel-account-context.js";
|
||||
|
||||
vi.mock("../channels/read-only-account-inspect.js", () => ({
|
||||
inspectReadOnlyChannelAccount: vi.fn(async () => null),
|
||||
}));
|
||||
|
||||
describe("resolveDefaultChannelAccountContext", () => {
|
||||
it("uses enabled/configured defaults when hooks are missing", async () => {
|
||||
const account = { token: "x" };
|
||||
@@ -49,7 +53,7 @@ describe("resolveDefaultChannelAccountContext", () => {
|
||||
expect(result.degraded).toBe(false);
|
||||
});
|
||||
|
||||
it("keeps strict mode fail-closed when resolveAccount throws", async () => {
|
||||
it("keeps strict mode fail-closed and degrades read_only mode when resolveAccount throws", async () => {
|
||||
const plugin = {
|
||||
id: "demo",
|
||||
config: {
|
||||
@@ -63,18 +67,6 @@ describe("resolveDefaultChannelAccountContext", () => {
|
||||
await expect(resolveDefaultChannelAccountContext(plugin, {} as OpenClawConfig)).rejects.toThrow(
|
||||
/missing secret/i,
|
||||
);
|
||||
});
|
||||
|
||||
it("degrades safely in read_only mode when resolveAccount throws", async () => {
|
||||
const plugin = {
|
||||
id: "demo",
|
||||
config: {
|
||||
listAccountIds: () => ["acc-err"],
|
||||
resolveAccount: () => {
|
||||
throw new Error("missing secret");
|
||||
},
|
||||
},
|
||||
} as unknown as ChannelPlugin;
|
||||
|
||||
const result = await resolveDefaultChannelAccountContext(plugin, {} as OpenClawConfig, {
|
||||
mode: "read_only",
|
||||
|
||||
@@ -85,6 +85,24 @@ function isTrustedWorkspaceChannelCatalogEntry(
|
||||
if (!entry.pluginId) {
|
||||
return false;
|
||||
}
|
||||
const plugins = cfg.plugins;
|
||||
if (plugins?.enabled === false) {
|
||||
return false;
|
||||
}
|
||||
const pluginEntry = plugins?.entries?.[entry.pluginId];
|
||||
if (pluginEntry?.enabled === false) {
|
||||
return false;
|
||||
}
|
||||
if (plugins?.deny?.length) {
|
||||
return resolveEnableState(entry.pluginId, "workspace", normalizePluginsConfig(cfg.plugins))
|
||||
.enabled;
|
||||
}
|
||||
if (plugins?.allow?.includes(entry.pluginId)) {
|
||||
return true;
|
||||
}
|
||||
if (pluginEntry?.enabled === true && !plugins?.allow?.length) {
|
||||
return true;
|
||||
}
|
||||
return resolveEnableState(entry.pluginId, "workspace", normalizePluginsConfig(cfg.plugins))
|
||||
.enabled;
|
||||
}
|
||||
|
||||
@@ -30,13 +30,12 @@ vi.mock("../../config/plugin-auto-enable.js", () => ({
|
||||
|
||||
const resolveBundledPluginSources = vi.fn();
|
||||
const getChannelPluginCatalogEntry = vi.fn();
|
||||
vi.mock("../../channels/plugins/catalog.js", async () => {
|
||||
const actual = await vi.importActual<typeof import("../../channels/plugins/catalog.js")>(
|
||||
"../../channels/plugins/catalog.js",
|
||||
);
|
||||
const listChannelPluginCatalogEntries = vi.fn(() => []);
|
||||
vi.mock("../../channels/plugins/catalog.js", () => {
|
||||
return {
|
||||
...actual,
|
||||
getChannelPluginCatalogEntry: (...args: unknown[]) => getChannelPluginCatalogEntry(...args),
|
||||
listChannelPluginCatalogEntries: (...args: unknown[]) =>
|
||||
listChannelPluginCatalogEntries(...args),
|
||||
};
|
||||
});
|
||||
|
||||
@@ -125,6 +124,7 @@ beforeEach(() => {
|
||||
}));
|
||||
resolveBundledPluginSources.mockReturnValue(new Map());
|
||||
getChannelPluginCatalogEntry.mockReturnValue(undefined);
|
||||
listChannelPluginCatalogEntries.mockReturnValue([]);
|
||||
loadPluginManifestRegistry.mockReturnValue({ plugins: [], diagnostics: [] });
|
||||
setActivePluginRegistry(createEmptyPluginRegistry());
|
||||
});
|
||||
|
||||
@@ -15,62 +15,34 @@ import {
|
||||
} from "./channels.plugin-install.test-helpers.js";
|
||||
import { baseConfigSnapshot, createTestRuntime } from "./test-runtime-config-helpers.js";
|
||||
|
||||
let channelsAddCommand: typeof import("./channels.js").channelsAddCommand;
|
||||
let channelsAddCommand: typeof import("./channels/add.js").channelsAddCommand;
|
||||
|
||||
const catalogMocks = vi.hoisted(() => ({
|
||||
listChannelPluginCatalogEntries: vi.fn((): ChannelPluginCatalogEntry[] => []),
|
||||
}));
|
||||
|
||||
const manifestRegistryMocks = vi.hoisted(() => ({
|
||||
loadPluginManifestRegistry: vi.fn(() => ({ plugins: [], diagnostics: [] })),
|
||||
}));
|
||||
|
||||
const discoveryMocks = vi.hoisted(() => ({
|
||||
isCatalogChannelInstalled: vi.fn(() => false),
|
||||
}));
|
||||
|
||||
vi.mock("../channels/plugins/catalog.js", async () => {
|
||||
const actual = await vi.importActual<typeof import("../channels/plugins/catalog.js")>(
|
||||
"../channels/plugins/catalog.js",
|
||||
);
|
||||
return {
|
||||
...actual,
|
||||
listChannelPluginCatalogEntries: catalogMocks.listChannelPluginCatalogEntries,
|
||||
};
|
||||
});
|
||||
const pluginInstallMocks = vi.hoisted(() => ({
|
||||
ensureChannelSetupPluginInstalled: vi.fn(),
|
||||
loadChannelSetupPluginRegistrySnapshotForChannel: vi.fn(),
|
||||
}));
|
||||
|
||||
vi.mock("../plugins/manifest-registry.js", async () => {
|
||||
const actual = await vi.importActual<typeof import("../plugins/manifest-registry.js")>(
|
||||
"../plugins/manifest-registry.js",
|
||||
);
|
||||
return {
|
||||
...actual,
|
||||
loadPluginManifestRegistry: manifestRegistryMocks.loadPluginManifestRegistry,
|
||||
};
|
||||
});
|
||||
vi.mock("../channels/plugins/catalog.js", () => ({
|
||||
listChannelPluginCatalogEntries: catalogMocks.listChannelPluginCatalogEntries,
|
||||
}));
|
||||
|
||||
vi.mock("./channel-setup/discovery.js", () => ({
|
||||
isCatalogChannelInstalled: discoveryMocks.isCatalogChannelInstalled,
|
||||
}));
|
||||
|
||||
vi.mock("../channels/plugins/bundled.js", async () => {
|
||||
const actual = await vi.importActual<typeof import("../channels/plugins/bundled.js")>(
|
||||
"../channels/plugins/bundled.js",
|
||||
);
|
||||
return {
|
||||
...actual,
|
||||
getBundledChannelPlugin: vi.fn(() => undefined),
|
||||
};
|
||||
});
|
||||
vi.mock("../channels/plugins/bundled.js", () => ({
|
||||
getBundledChannelPlugin: vi.fn(() => undefined),
|
||||
}));
|
||||
|
||||
vi.mock("./channel-setup/plugin-install.js", async () => {
|
||||
const actual = await vi.importActual<typeof import("./channel-setup/plugin-install.js")>(
|
||||
"./channel-setup/plugin-install.js",
|
||||
);
|
||||
const { createMockChannelSetupPluginInstallModule } =
|
||||
await import("./channels.plugin-install.test-helpers.js");
|
||||
return createMockChannelSetupPluginInstallModule(actual);
|
||||
});
|
||||
vi.mock("./channel-setup/plugin-install.js", () => pluginInstallMocks);
|
||||
|
||||
const runtime = createTestRuntime();
|
||||
|
||||
@@ -245,7 +217,7 @@ async function runSignalAddCommand(afterAccountConfigWritten: SignalAfterAccount
|
||||
|
||||
describe("channelsAddCommand", () => {
|
||||
beforeAll(async () => {
|
||||
({ channelsAddCommand } = await import("./channels.js"));
|
||||
({ channelsAddCommand } = await import("./channels/add.js"));
|
||||
});
|
||||
|
||||
beforeEach(async () => {
|
||||
@@ -263,26 +235,21 @@ describe("channelsAddCommand", () => {
|
||||
runtime.exit.mockClear();
|
||||
catalogMocks.listChannelPluginCatalogEntries.mockClear();
|
||||
catalogMocks.listChannelPluginCatalogEntries.mockReturnValue([]);
|
||||
manifestRegistryMocks.loadPluginManifestRegistry.mockClear();
|
||||
manifestRegistryMocks.loadPluginManifestRegistry.mockReturnValue({
|
||||
plugins: [],
|
||||
diagnostics: [],
|
||||
});
|
||||
discoveryMocks.isCatalogChannelInstalled.mockClear();
|
||||
discoveryMocks.isCatalogChannelInstalled.mockReturnValue(false);
|
||||
vi.mocked(ensureChannelSetupPluginInstalled).mockClear();
|
||||
vi.mocked(ensureChannelSetupPluginInstalled).mockReset();
|
||||
vi.mocked(ensureChannelSetupPluginInstalled).mockImplementation(async ({ cfg }) => ({
|
||||
cfg,
|
||||
installed: true,
|
||||
}));
|
||||
vi.mocked(loadChannelSetupPluginRegistrySnapshotForChannel).mockClear();
|
||||
vi.mocked(loadChannelSetupPluginRegistrySnapshotForChannel).mockReset();
|
||||
vi.mocked(loadChannelSetupPluginRegistrySnapshotForChannel).mockReturnValue(
|
||||
createTestRegistry(),
|
||||
);
|
||||
setMinimalChannelsAddRegistryForTests();
|
||||
});
|
||||
|
||||
it("clears telegram update offsets when the token changes", async () => {
|
||||
it("clears telegram update offsets only when the token changes", async () => {
|
||||
configMocks.readConfigFileSnapshot.mockResolvedValue({
|
||||
...baseConfigSnapshot,
|
||||
config: {
|
||||
@@ -300,9 +267,8 @@ describe("channelsAddCommand", () => {
|
||||
|
||||
expect(offsetMocks.deleteTelegramUpdateOffset).toHaveBeenCalledTimes(1);
|
||||
expect(offsetMocks.deleteTelegramUpdateOffset).toHaveBeenCalledWith({ accountId: "default" });
|
||||
});
|
||||
|
||||
it("does not clear telegram update offsets when the token is unchanged", async () => {
|
||||
offsetMocks.deleteTelegramUpdateOffset.mockClear();
|
||||
configMocks.readConfigFileSnapshot.mockResolvedValue({
|
||||
...baseConfigSnapshot,
|
||||
config: {
|
||||
@@ -321,7 +287,7 @@ describe("channelsAddCommand", () => {
|
||||
expect(offsetMocks.deleteTelegramUpdateOffset).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("falls back to a scoped snapshot after installing an external channel plugin", async () => {
|
||||
it("loads external channel setup snapshots for newly installed and existing plugins", async () => {
|
||||
configMocks.readConfigFileSnapshot.mockResolvedValue({ ...baseConfigSnapshot });
|
||||
setActivePluginRegistry(createTestRegistry());
|
||||
const catalogEntry = createMSTeamsCatalogEntry();
|
||||
@@ -353,24 +319,11 @@ describe("channelsAddCommand", () => {
|
||||
);
|
||||
expect(runtime.error).not.toHaveBeenCalled();
|
||||
expect(runtime.exit).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("uses the installed external channel snapshot without reinstalling", async () => {
|
||||
configMocks.readConfigFileSnapshot.mockResolvedValue({ ...baseConfigSnapshot });
|
||||
setActivePluginRegistry(createTestRegistry());
|
||||
const catalogEntry = createMSTeamsCatalogEntry();
|
||||
catalogMocks.listChannelPluginCatalogEntries.mockReturnValue([catalogEntry]);
|
||||
manifestRegistryMocks.loadPluginManifestRegistry.mockReturnValue({
|
||||
plugins: [
|
||||
{
|
||||
id: "@openclaw/msteams-plugin",
|
||||
channels: ["msteams"],
|
||||
} as never,
|
||||
],
|
||||
diagnostics: [],
|
||||
});
|
||||
vi.mocked(ensureChannelSetupPluginInstalled).mockClear();
|
||||
vi.mocked(loadChannelSetupPluginRegistrySnapshotForChannel).mockClear();
|
||||
configMocks.writeConfigFile.mockClear();
|
||||
discoveryMocks.isCatalogChannelInstalled.mockReturnValue(true);
|
||||
registerMSTeamsSetupPlugin("msteams");
|
||||
|
||||
await channelsAddCommand(
|
||||
{
|
||||
@@ -470,7 +423,7 @@ describe("channelsAddCommand", () => {
|
||||
expect(runtime.exit).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("runs post-setup hooks after writing config", async () => {
|
||||
it("runs post-setup hooks after writing config and keeps saved config on hook failure", async () => {
|
||||
const afterAccountConfigWritten = vi.fn().mockResolvedValue(undefined);
|
||||
await runSignalAddCommand(afterAccountConfigWritten);
|
||||
|
||||
@@ -499,11 +452,12 @@ describe("channelsAddCommand", () => {
|
||||
}),
|
||||
runtime,
|
||||
});
|
||||
});
|
||||
|
||||
it("keeps the saved config when a post-setup hook fails", async () => {
|
||||
const afterAccountConfigWritten = vi.fn().mockRejectedValue(new Error("hook failed"));
|
||||
await runSignalAddCommand(afterAccountConfigWritten);
|
||||
configMocks.writeConfigFile.mockClear();
|
||||
runtime.error.mockClear();
|
||||
runtime.exit.mockClear();
|
||||
const failingHook = vi.fn().mockRejectedValue(new Error("hook failed"));
|
||||
await runSignalAddCommand(failingHook);
|
||||
|
||||
expect(configMocks.writeConfigFile).toHaveBeenCalledTimes(1);
|
||||
expect(runtime.exit).not.toHaveBeenCalled();
|
||||
|
||||
@@ -13,7 +13,12 @@ import { formatGatewayChannelsStatusLines } from "./channels/status.js";
|
||||
import { baseConfigSnapshot, createTestRuntime } from "./test-runtime-config-helpers.js";
|
||||
|
||||
const runtime = createTestRuntime();
|
||||
let clackPrompterModule: typeof import("../wizard/clack-prompter.js");
|
||||
let minimalChannelsCommandRegistry: ReturnType<typeof createTestRegistry>;
|
||||
const createClackPrompterMock = vi.hoisted(() => vi.fn());
|
||||
|
||||
vi.mock("../wizard/clack-prompter.js", () => ({
|
||||
createClackPrompter: createClackPrompterMock,
|
||||
}));
|
||||
|
||||
type ChannelSectionConfig = {
|
||||
enabled?: boolean;
|
||||
@@ -192,106 +197,108 @@ function createTelegramCommandTestPlugin(): ChannelPlugin {
|
||||
});
|
||||
}
|
||||
|
||||
function setMinimalChannelsCommandRegistryForTests(): void {
|
||||
setActivePluginRegistry(
|
||||
createTestRegistry([
|
||||
{
|
||||
pluginId: "telegram",
|
||||
plugin: createTelegramCommandTestPlugin(),
|
||||
source: "test",
|
||||
},
|
||||
{
|
||||
pluginId: "whatsapp",
|
||||
plugin: createScopedCommandTestPlugin({
|
||||
id: "whatsapp",
|
||||
label: "WhatsApp",
|
||||
buildPatch: () => ({}),
|
||||
clearBaseFields: ["name"],
|
||||
}),
|
||||
source: "test",
|
||||
},
|
||||
{
|
||||
pluginId: "discord",
|
||||
plugin: createScopedCommandTestPlugin({
|
||||
id: "discord",
|
||||
label: "Discord",
|
||||
buildPatch: ({ token }) => (token ? { token } : {}),
|
||||
clearBaseFields: ["token", "name"],
|
||||
collectStatusIssues: (accounts) =>
|
||||
accounts.flatMap((account) => {
|
||||
if (account.enabled !== true || account.configured !== true) {
|
||||
return [];
|
||||
}
|
||||
const issues: ChannelStatusIssue[] = [];
|
||||
const issueAccountId = account.accountId ?? DEFAULT_ACCOUNT_ID;
|
||||
const messageContent = (
|
||||
account.application as { intents?: { messageContent?: string } } | undefined
|
||||
)?.intents?.messageContent;
|
||||
if (messageContent === "disabled") {
|
||||
issues.push({
|
||||
channel: "discord",
|
||||
accountId: issueAccountId,
|
||||
kind: "intent",
|
||||
message:
|
||||
"Message Content Intent is disabled. Bot may not see normal channel messages.",
|
||||
});
|
||||
}
|
||||
const audit = account.audit as
|
||||
| {
|
||||
channels?: Array<{
|
||||
channelId?: string;
|
||||
ok?: boolean;
|
||||
missing?: string[];
|
||||
error?: string;
|
||||
}>;
|
||||
}
|
||||
| undefined;
|
||||
for (const channel of audit?.channels ?? []) {
|
||||
if (channel.ok === true || !channel.channelId) {
|
||||
continue;
|
||||
function createMinimalChannelsCommandRegistryForTests(): ReturnType<typeof createTestRegistry> {
|
||||
return createTestRegistry([
|
||||
{
|
||||
pluginId: "telegram",
|
||||
plugin: createTelegramCommandTestPlugin(),
|
||||
source: "test",
|
||||
},
|
||||
{
|
||||
pluginId: "whatsapp",
|
||||
plugin: createScopedCommandTestPlugin({
|
||||
id: "whatsapp",
|
||||
label: "WhatsApp",
|
||||
buildPatch: () => ({}),
|
||||
clearBaseFields: ["name"],
|
||||
}),
|
||||
source: "test",
|
||||
},
|
||||
{
|
||||
pluginId: "discord",
|
||||
plugin: createScopedCommandTestPlugin({
|
||||
id: "discord",
|
||||
label: "Discord",
|
||||
buildPatch: ({ token }) => (token ? { token } : {}),
|
||||
clearBaseFields: ["token", "name"],
|
||||
collectStatusIssues: (accounts) =>
|
||||
accounts.flatMap((account) => {
|
||||
if (account.enabled !== true || account.configured !== true) {
|
||||
return [];
|
||||
}
|
||||
const issues: ChannelStatusIssue[] = [];
|
||||
const issueAccountId = account.accountId ?? DEFAULT_ACCOUNT_ID;
|
||||
const messageContent = (
|
||||
account.application as { intents?: { messageContent?: string } } | undefined
|
||||
)?.intents?.messageContent;
|
||||
if (messageContent === "disabled") {
|
||||
issues.push({
|
||||
channel: "discord",
|
||||
accountId: issueAccountId,
|
||||
kind: "intent",
|
||||
message:
|
||||
"Message Content Intent is disabled. Bot may not see normal channel messages.",
|
||||
});
|
||||
}
|
||||
const audit = account.audit as
|
||||
| {
|
||||
channels?: Array<{
|
||||
channelId?: string;
|
||||
ok?: boolean;
|
||||
missing?: string[];
|
||||
error?: string;
|
||||
}>;
|
||||
}
|
||||
issues.push({
|
||||
channel: "discord",
|
||||
accountId: issueAccountId,
|
||||
kind: "permissions",
|
||||
message: `Channel ${channel.channelId} permission audit failed.${channel.missing?.length ? ` missing ${channel.missing.join(", ")}` : ""}${channel.error ? `: ${channel.error}` : ""}`,
|
||||
});
|
||||
| undefined;
|
||||
for (const channel of audit?.channels ?? []) {
|
||||
if (channel.ok === true || !channel.channelId) {
|
||||
continue;
|
||||
}
|
||||
return issues;
|
||||
}),
|
||||
}),
|
||||
source: "test",
|
||||
},
|
||||
{
|
||||
pluginId: "slack",
|
||||
plugin: createScopedCommandTestPlugin({
|
||||
id: "slack",
|
||||
label: "Slack",
|
||||
buildPatch: ({ botToken, appToken }) => ({
|
||||
...(botToken ? { botToken } : {}),
|
||||
...(appToken ? { appToken } : {}),
|
||||
issues.push({
|
||||
channel: "discord",
|
||||
accountId: issueAccountId,
|
||||
kind: "permissions",
|
||||
message: `Channel ${channel.channelId} permission audit failed.${channel.missing?.length ? ` missing ${channel.missing.join(", ")}` : ""}${channel.error ? `: ${channel.error}` : ""}`,
|
||||
});
|
||||
}
|
||||
return issues;
|
||||
}),
|
||||
clearBaseFields: ["botToken", "appToken", "name"],
|
||||
}),
|
||||
source: "test",
|
||||
},
|
||||
{
|
||||
pluginId: "slack",
|
||||
plugin: createScopedCommandTestPlugin({
|
||||
id: "slack",
|
||||
label: "Slack",
|
||||
buildPatch: ({ botToken, appToken }) => ({
|
||||
...(botToken ? { botToken } : {}),
|
||||
...(appToken ? { appToken } : {}),
|
||||
}),
|
||||
source: "test",
|
||||
},
|
||||
{
|
||||
pluginId: "signal",
|
||||
plugin: createScopedCommandTestPlugin({
|
||||
id: "signal",
|
||||
label: "Signal",
|
||||
buildPatch: ({ signalNumber }) => (signalNumber ? { account: signalNumber } : {}),
|
||||
clearBaseFields: ["account", "name"],
|
||||
}),
|
||||
source: "test",
|
||||
},
|
||||
]),
|
||||
);
|
||||
clearBaseFields: ["botToken", "appToken", "name"],
|
||||
}),
|
||||
source: "test",
|
||||
},
|
||||
{
|
||||
pluginId: "signal",
|
||||
plugin: createScopedCommandTestPlugin({
|
||||
id: "signal",
|
||||
label: "Signal",
|
||||
buildPatch: ({ signalNumber }) => (signalNumber ? { account: signalNumber } : {}),
|
||||
clearBaseFields: ["account", "name"],
|
||||
}),
|
||||
source: "test",
|
||||
},
|
||||
]);
|
||||
}
|
||||
|
||||
function setMinimalChannelsCommandRegistryForTests(): void {
|
||||
setActivePluginRegistry(minimalChannelsCommandRegistry);
|
||||
}
|
||||
|
||||
describe("channels command", () => {
|
||||
beforeAll(async () => {
|
||||
clackPrompterModule = await import("../wizard/clack-prompter.js");
|
||||
beforeAll(() => {
|
||||
minimalChannelsCommandRegistry = createMinimalChannelsCommandRegistryForTests();
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
@@ -299,6 +306,7 @@ describe("channels command", () => {
|
||||
configMocks.writeConfigFile.mockClear();
|
||||
secretMocks.resolveCommandConfigWithSecrets.mockClear();
|
||||
offsetMocks.deleteTelegramUpdateOffset.mockClear();
|
||||
createClackPrompterMock.mockReset();
|
||||
runtime.log.mockClear();
|
||||
runtime.error.mockClear();
|
||||
runtime.exit.mockClear();
|
||||
@@ -314,14 +322,8 @@ describe("channels command", () => {
|
||||
args: Parameters<typeof channelsRemoveCommand>[0],
|
||||
): Promise<void> {
|
||||
const prompt = { confirm: vi.fn().mockResolvedValue(true) };
|
||||
const promptSpy = vi
|
||||
.spyOn(clackPrompterModule, "createClackPrompter")
|
||||
.mockReturnValue(prompt as never);
|
||||
try {
|
||||
await channelsRemoveCommand(args, runtime, { hasFlags: true });
|
||||
} finally {
|
||||
promptSpy.mockRestore();
|
||||
}
|
||||
createClackPrompterMock.mockReturnValue(prompt);
|
||||
await channelsRemoveCommand(args, runtime, { hasFlags: true });
|
||||
}
|
||||
|
||||
async function addTelegramAccount(account: string, token: string): Promise<void> {
|
||||
|
||||
@@ -1,20 +1,68 @@
|
||||
import { afterEach, describe, expect, it } from "vitest";
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
import type { ChannelPlugin } from "../channels/plugins/types.js";
|
||||
import { setActivePluginRegistry } from "../plugins/runtime.js";
|
||||
import { makeDirectPlugin } from "../test-utils/channel-plugin-test-fixtures.js";
|
||||
import { createTestRegistry } from "../test-utils/channel-plugins.js";
|
||||
import { formatConfigChannelsStatusLines } from "./channels/status.js";
|
||||
import { formatConfigChannelsStatusLines } from "./channels/status-config-format.js";
|
||||
|
||||
function registerSingleTestPlugin(pluginId: string, plugin: ChannelPlugin) {
|
||||
setActivePluginRegistry(
|
||||
createTestRegistry([
|
||||
{
|
||||
pluginId,
|
||||
source: "test",
|
||||
plugin,
|
||||
},
|
||||
]),
|
||||
);
|
||||
const activeChannelPlugins = vi.hoisted(() => [] as ChannelPlugin[]);
|
||||
|
||||
vi.mock("../channels/plugins/index.js", () => ({
|
||||
listChannelPlugins: () => activeChannelPlugins,
|
||||
getChannelPlugin: (id: string) => activeChannelPlugins.find((plugin) => plugin.id === id),
|
||||
}));
|
||||
|
||||
vi.mock("../channels/plugins/status.js", () => ({
|
||||
buildReadOnlySourceChannelAccountSnapshot: async ({
|
||||
accountId,
|
||||
cfg,
|
||||
plugin,
|
||||
}: {
|
||||
accountId: string;
|
||||
cfg: unknown;
|
||||
plugin: ChannelPlugin;
|
||||
}) => {
|
||||
const account = await plugin.config.inspectAccount?.(cfg as never, accountId);
|
||||
return account ? { accountId, ...(account as Record<string, unknown>) } : null;
|
||||
},
|
||||
buildChannelAccountSnapshot: async ({
|
||||
accountId,
|
||||
cfg,
|
||||
plugin,
|
||||
}: {
|
||||
accountId: string;
|
||||
cfg: unknown;
|
||||
plugin: ChannelPlugin;
|
||||
}) => {
|
||||
const account =
|
||||
(await plugin.config.inspectAccount?.(cfg as never, accountId)) ??
|
||||
plugin.config.resolveAccount(cfg as never, accountId);
|
||||
return { accountId, ...(account as Record<string, unknown>) };
|
||||
},
|
||||
}));
|
||||
|
||||
function registerSingleTestPlugin(_pluginId: string, plugin: ChannelPlugin) {
|
||||
activeChannelPlugins.splice(0, activeChannelPlugins.length, plugin);
|
||||
}
|
||||
|
||||
function makeTestPlugin(params: {
|
||||
id: string;
|
||||
label: string;
|
||||
docsPath: string;
|
||||
config: ChannelPlugin["config"];
|
||||
}): ChannelPlugin {
|
||||
return {
|
||||
id: params.id,
|
||||
meta: {
|
||||
id: params.id,
|
||||
label: params.label,
|
||||
selectionLabel: params.label,
|
||||
docsPath: params.docsPath,
|
||||
blurb: "test",
|
||||
},
|
||||
capabilities: { chatTypes: ["direct"] },
|
||||
config: params.config,
|
||||
actions: {
|
||||
describeMessageTool: () => ({ actions: ["send"] }),
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
async function formatLocalStatusSummary(
|
||||
@@ -52,7 +100,7 @@ function tokenOnlyPluginConfig() {
|
||||
}
|
||||
|
||||
function makeUnavailableTokenPlugin(): ChannelPlugin {
|
||||
return makeDirectPlugin({
|
||||
return makeTestPlugin({
|
||||
id: "token-only",
|
||||
label: "TokenOnly",
|
||||
docsPath: "/channels/token-only",
|
||||
@@ -64,7 +112,7 @@ function makeUnavailableTokenPlugin(): ChannelPlugin {
|
||||
}
|
||||
|
||||
function makeResolvedTokenPlugin(): ChannelPlugin {
|
||||
return makeDirectPlugin({
|
||||
return makeTestPlugin({
|
||||
id: "token-only",
|
||||
label: "TokenOnly",
|
||||
docsPath: "/channels/token-only",
|
||||
@@ -124,7 +172,7 @@ function makeResolvedTokenPluginWithoutInspectAccount(): ChannelPlugin {
|
||||
}
|
||||
|
||||
function makeUnavailableHttpSlackPlugin(): ChannelPlugin {
|
||||
return makeDirectPlugin({
|
||||
return makeTestPlugin({
|
||||
id: "slack",
|
||||
label: "Slack",
|
||||
docsPath: "/channels/slack",
|
||||
@@ -169,10 +217,6 @@ function expectResolvedTokenStatusSummary(
|
||||
}
|
||||
|
||||
describe("config-only channels status output", () => {
|
||||
afterEach(() => {
|
||||
setActivePluginRegistry(createTestRegistry([]));
|
||||
});
|
||||
|
||||
it("shows configured-but-unavailable credentials distinctly from not configured", async () => {
|
||||
registerSingleTestPlugin("token-only", makeUnavailableTokenPlugin());
|
||||
|
||||
|
||||
@@ -49,12 +49,6 @@ vi.mock("../cli/command-secret-targets.js", () => ({
|
||||
getChannelsCommandSecretTargetIds: () => new Set<string>(),
|
||||
}));
|
||||
|
||||
vi.mock(buildBundledPluginModuleId("telegram", "update-offset-runtime-api.js"), async () => {
|
||||
const actual: Record<string, unknown> = await vi.importActual(
|
||||
buildBundledPluginModuleId("telegram", "update-offset-runtime-api.js"),
|
||||
);
|
||||
return {
|
||||
...actual,
|
||||
deleteTelegramUpdateOffset: offsetMocks.deleteTelegramUpdateOffset,
|
||||
};
|
||||
});
|
||||
vi.mock(buildBundledPluginModuleId("telegram", "update-offset-runtime-api.js"), () => ({
|
||||
deleteTelegramUpdateOffset: offsetMocks.deleteTelegramUpdateOffset,
|
||||
}));
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import { resolveAgentWorkspaceDir, resolveDefaultAgentId } from "../../agents/agent-scope.js";
|
||||
import { listChannelPluginCatalogEntries } from "../../channels/plugins/catalog.js";
|
||||
import { parseOptionalDelimitedEntries } from "../../channels/plugins/helpers.js";
|
||||
import { getChannelPlugin, normalizeChannelId } from "../../channels/plugins/index.js";
|
||||
import { moveSingleAccountChannelSectionToDefaultAccount } from "../../channels/plugins/setup-helpers.js";
|
||||
@@ -12,11 +11,6 @@ import { defaultRuntime, type RuntimeEnv } from "../../runtime.js";
|
||||
import { normalizeOptionalLowercaseString } from "../../shared/string-coerce.js";
|
||||
import { createClackPrompter } from "../../wizard/clack-prompter.js";
|
||||
import { applyAgentBindings, describeBinding } from "../agents.bindings.js";
|
||||
import { isCatalogChannelInstalled } from "../channel-setup/discovery.js";
|
||||
import {
|
||||
createChannelOnboardingPostWriteHookCollector,
|
||||
runCollectedChannelOnboardingPostWriteHooks,
|
||||
} from "../onboard-channels.js";
|
||||
import type { ChannelChoice } from "../onboard-types.js";
|
||||
import { applyAccountName, applyChannelAccountConfig } from "./add-mutators.js";
|
||||
import { channelLabel, requireValidConfigFileSnapshot, shouldUseWizard } from "./shared.js";
|
||||
@@ -29,11 +23,12 @@ export type ChannelsAddOptions = {
|
||||
dmAllowlist?: string;
|
||||
} & Omit<ChannelSetupInput, "groupChannels" | "dmAllowlist" | "initialSyncLimit">;
|
||||
|
||||
function resolveCatalogChannelEntry(raw: string, cfg: OpenClawConfig | null) {
|
||||
async function resolveCatalogChannelEntry(raw: string, cfg: OpenClawConfig | null) {
|
||||
const trimmed = normalizeOptionalLowercaseString(raw);
|
||||
if (!trimmed) {
|
||||
return undefined;
|
||||
}
|
||||
const { listChannelPluginCatalogEntries } = await import("../../channels/plugins/catalog.js");
|
||||
const workspaceDir = cfg ? resolveAgentWorkspaceDir(cfg, resolveDefaultAgentId(cfg)) : undefined;
|
||||
return listChannelPluginCatalogEntries({ workspaceDir }).find((entry) => {
|
||||
if (normalizeOptionalLowercaseString(entry.id) === trimmed) {
|
||||
@@ -60,17 +55,17 @@ export async function channelsAddCommand(
|
||||
|
||||
const useWizard = shouldUseWizard(params);
|
||||
if (useWizard) {
|
||||
const [{ buildAgentSummaries }, { setupChannels }] = await Promise.all([
|
||||
const [{ buildAgentSummaries }, onboardChannels] = await Promise.all([
|
||||
import("../agents.config.js"),
|
||||
import("../onboard-channels.js"),
|
||||
]);
|
||||
const prompter = createClackPrompter();
|
||||
const postWriteHooks = createChannelOnboardingPostWriteHookCollector();
|
||||
const postWriteHooks = onboardChannels.createChannelOnboardingPostWriteHookCollector();
|
||||
let selection: ChannelChoice[] = [];
|
||||
const accountIds: Partial<Record<ChannelChoice, string>> = {};
|
||||
const resolvedPlugins = new Map<ChannelChoice, ChannelSetupPlugin>();
|
||||
await prompter.intro("Channel setup");
|
||||
let nextConfig = await setupChannels(cfg, runtime, prompter, {
|
||||
let nextConfig = await onboardChannels.setupChannels(cfg, runtime, prompter, {
|
||||
allowDisable: false,
|
||||
allowSignalInstall: true,
|
||||
onPostWriteHook: (hook) => {
|
||||
@@ -187,7 +182,7 @@ export async function channelsAddCommand(
|
||||
nextConfig,
|
||||
...(baseHash !== undefined ? { baseHash } : {}),
|
||||
});
|
||||
await runCollectedChannelOnboardingPostWriteHooks({
|
||||
await onboardChannels.runCollectedChannelOnboardingPostWriteHooks({
|
||||
hooks: postWriteHooks.drain(),
|
||||
cfg: nextConfig,
|
||||
runtime,
|
||||
@@ -198,7 +193,7 @@ export async function channelsAddCommand(
|
||||
|
||||
const rawChannel = opts.channel ?? "";
|
||||
let channel = normalizeChannelId(rawChannel);
|
||||
let catalogEntry = channel ? undefined : resolveCatalogChannelEntry(rawChannel, nextConfig);
|
||||
let catalogEntry = channel ? undefined : await resolveCatalogChannelEntry(rawChannel, nextConfig);
|
||||
const resolveWorkspaceDir = () =>
|
||||
resolveAgentWorkspaceDir(nextConfig, resolveDefaultAgentId(nextConfig));
|
||||
// May trigger loadOpenClawPlugins on cache miss (disk scan + jiti import)
|
||||
@@ -227,6 +222,7 @@ export async function channelsAddCommand(
|
||||
|
||||
if (!channel && catalogEntry) {
|
||||
const workspaceDir = resolveWorkspaceDir();
|
||||
const { isCatalogChannelInstalled } = await import("../channel-setup/discovery.js");
|
||||
if (
|
||||
!isCatalogChannelInstalled({
|
||||
cfg: nextConfig,
|
||||
@@ -364,6 +360,7 @@ export async function channelsAddCommand(
|
||||
runtime.log(`Added ${channelLabel(channel)} account "${accountId}".`);
|
||||
const afterAccountConfigWritten = plugin.setup?.afterAccountConfigWritten;
|
||||
if (afterAccountConfigWritten) {
|
||||
const { runCollectedChannelOnboardingPostWriteHooks } = await import("../onboard-channels.js");
|
||||
await runCollectedChannelOnboardingPostWriteHooks({
|
||||
hooks: [
|
||||
{
|
||||
|
||||
@@ -9,7 +9,6 @@ import { DEFAULT_ACCOUNT_ID, normalizeAccountId } from "../../routing/session-ke
|
||||
import { defaultRuntime, type RuntimeEnv } from "../../runtime.js";
|
||||
import { normalizeOptionalString } from "../../shared/string-coerce.js";
|
||||
import { createClackPrompter } from "../../wizard/clack-prompter.js";
|
||||
import { resolveInstallableChannelPlugin } from "../channel-setup/channel-plugin-resolution.js";
|
||||
import {
|
||||
type ChatChannel,
|
||||
channelLabel,
|
||||
@@ -101,15 +100,20 @@ export async function channelsRemoveCommand(
|
||||
}
|
||||
}
|
||||
|
||||
const resolvedPluginState =
|
||||
!useWizard && rawChannel
|
||||
? await resolveInstallableChannelPlugin({
|
||||
const shouldResolveInstallablePlugin =
|
||||
!useWizard && rawChannel && (!channel || !getChannelPlugin(channel));
|
||||
const resolvedPluginState = shouldResolveInstallablePlugin
|
||||
? await (async () => {
|
||||
const { resolveInstallableChannelPlugin } =
|
||||
await import("../channel-setup/channel-plugin-resolution.js");
|
||||
return await resolveInstallableChannelPlugin({
|
||||
cfg,
|
||||
runtime,
|
||||
rawChannel,
|
||||
allowInstall: true,
|
||||
})
|
||||
: null;
|
||||
});
|
||||
})()
|
||||
: null;
|
||||
if (resolvedPluginState?.configChanged) {
|
||||
cfg = resolvedPluginState.cfg;
|
||||
}
|
||||
|
||||
168
src/commands/channels/status-config-format.ts
Normal file
168
src/commands/channels/status-config-format.ts
Normal file
@@ -0,0 +1,168 @@
|
||||
import {
|
||||
hasConfiguredUnavailableCredentialStatus,
|
||||
hasResolvedCredentialValue,
|
||||
} from "../../channels/account-snapshot-fields.js";
|
||||
import { listChannelPlugins } from "../../channels/plugins/index.js";
|
||||
import {
|
||||
buildChannelAccountSnapshot,
|
||||
buildReadOnlySourceChannelAccountSnapshot,
|
||||
} from "../../channels/plugins/status.js";
|
||||
import type { ChannelAccountSnapshot } from "../../channels/plugins/types.public.js";
|
||||
import type { OpenClawConfig } from "../../config/config.js";
|
||||
import { normalizeOptionalString } from "../../shared/string-coerce.js";
|
||||
import { formatDocsLink } from "../../terminal/links.js";
|
||||
import { theme } from "../../terminal/theme.js";
|
||||
|
||||
type ChatChannel = string;
|
||||
|
||||
function formatAccountLabel(params: { accountId: string; name?: string }) {
|
||||
const base = params.accountId || "default";
|
||||
if (params.name?.trim()) {
|
||||
return `${base} (${params.name.trim()})`;
|
||||
}
|
||||
return base;
|
||||
}
|
||||
|
||||
function formatChannelAccountLabel(params: {
|
||||
channel: ChatChannel;
|
||||
accountId: string;
|
||||
name?: string;
|
||||
}): string {
|
||||
const channelText =
|
||||
listChannelPlugins().find((plugin) => plugin.id === params.channel)?.meta.label ??
|
||||
params.channel;
|
||||
return `${channelText} ${formatAccountLabel({
|
||||
accountId: params.accountId,
|
||||
name: params.name,
|
||||
})}`;
|
||||
}
|
||||
|
||||
function appendEnabledConfiguredLinkedBits(bits: string[], account: Record<string, unknown>) {
|
||||
if (typeof account.enabled === "boolean") {
|
||||
bits.push(account.enabled ? "enabled" : "disabled");
|
||||
}
|
||||
if (typeof account.configured === "boolean") {
|
||||
if (account.configured) {
|
||||
bits.push("configured");
|
||||
if (hasConfiguredUnavailableCredentialStatus(account)) {
|
||||
bits.push("secret unavailable in this command path");
|
||||
}
|
||||
} else {
|
||||
bits.push("not configured");
|
||||
}
|
||||
}
|
||||
if (typeof account.linked === "boolean") {
|
||||
bits.push(account.linked ? "linked" : "not linked");
|
||||
}
|
||||
}
|
||||
|
||||
function appendModeBit(bits: string[], account: Record<string, unknown>) {
|
||||
if (typeof account.mode === "string" && account.mode.length > 0) {
|
||||
bits.push(`mode:${account.mode}`);
|
||||
}
|
||||
}
|
||||
|
||||
function appendTokenSourceBits(bits: string[], account: Record<string, unknown>) {
|
||||
const appendSourceBit = (label: string, sourceKey: string, statusKey: string) => {
|
||||
const source = account[sourceKey];
|
||||
if (typeof source !== "string" || !source || source === "none") {
|
||||
return;
|
||||
}
|
||||
const status = account[statusKey];
|
||||
const unavailable = status === "configured_unavailable" ? " (unavailable)" : "";
|
||||
bits.push(`${label}:${source}${unavailable}`);
|
||||
};
|
||||
|
||||
appendSourceBit("token", "tokenSource", "tokenStatus");
|
||||
appendSourceBit("bot", "botTokenSource", "botTokenStatus");
|
||||
appendSourceBit("app", "appTokenSource", "appTokenStatus");
|
||||
appendSourceBit("signing", "signingSecretSource", "signingSecretStatus");
|
||||
}
|
||||
|
||||
function appendBaseUrlBit(bits: string[], account: Record<string, unknown>) {
|
||||
if (typeof account.baseUrl === "string" && account.baseUrl) {
|
||||
bits.push(`url:${account.baseUrl}`);
|
||||
}
|
||||
}
|
||||
|
||||
function buildChannelAccountLine(
|
||||
provider: ChatChannel,
|
||||
account: Record<string, unknown>,
|
||||
bits: string[],
|
||||
): string {
|
||||
const accountId = typeof account.accountId === "string" ? account.accountId : "default";
|
||||
const name = normalizeOptionalString(account.name) ?? "";
|
||||
const labelText = formatChannelAccountLabel({
|
||||
channel: provider,
|
||||
accountId,
|
||||
name: name || undefined,
|
||||
});
|
||||
return `- ${labelText}: ${bits.join(", ")}`;
|
||||
}
|
||||
|
||||
export async function formatConfigChannelsStatusLines(
|
||||
cfg: OpenClawConfig,
|
||||
meta: { path?: string; mode?: "local" | "remote" },
|
||||
opts?: { sourceConfig?: OpenClawConfig },
|
||||
): Promise<string[]> {
|
||||
const lines: string[] = [];
|
||||
lines.push(theme.warn("Gateway not reachable; showing config-only status."));
|
||||
if (meta.path) {
|
||||
lines.push(`Config: ${meta.path}`);
|
||||
}
|
||||
if (meta.mode) {
|
||||
lines.push(`Mode: ${meta.mode}`);
|
||||
}
|
||||
if (meta.path || meta.mode) {
|
||||
lines.push("");
|
||||
}
|
||||
|
||||
const accountLines = (provider: ChatChannel, accounts: Array<Record<string, unknown>>) =>
|
||||
accounts.map((account) => {
|
||||
const bits: string[] = [];
|
||||
appendEnabledConfiguredLinkedBits(bits, account);
|
||||
appendModeBit(bits, account);
|
||||
appendTokenSourceBits(bits, account);
|
||||
appendBaseUrlBit(bits, account);
|
||||
return buildChannelAccountLine(provider, account, bits);
|
||||
});
|
||||
|
||||
const plugins = listChannelPlugins();
|
||||
const sourceConfig = opts?.sourceConfig ?? cfg;
|
||||
for (const plugin of plugins) {
|
||||
const accountIds = plugin.config.listAccountIds(cfg);
|
||||
if (!accountIds.length) {
|
||||
continue;
|
||||
}
|
||||
const snapshots: ChannelAccountSnapshot[] = [];
|
||||
for (const accountId of accountIds) {
|
||||
const sourceSnapshot = await buildReadOnlySourceChannelAccountSnapshot({
|
||||
plugin,
|
||||
cfg: sourceConfig,
|
||||
accountId,
|
||||
});
|
||||
const resolvedSnapshot = await buildChannelAccountSnapshot({
|
||||
plugin,
|
||||
cfg,
|
||||
accountId,
|
||||
});
|
||||
snapshots.push(
|
||||
sourceSnapshot &&
|
||||
hasConfiguredUnavailableCredentialStatus(sourceSnapshot) &&
|
||||
(!hasResolvedCredentialValue(resolvedSnapshot) ||
|
||||
(sourceSnapshot.configured === true && resolvedSnapshot.configured === false))
|
||||
? sourceSnapshot
|
||||
: resolvedSnapshot,
|
||||
);
|
||||
}
|
||||
if (snapshots.length > 0) {
|
||||
lines.push(...accountLines(plugin.id, snapshots));
|
||||
}
|
||||
}
|
||||
|
||||
lines.push("");
|
||||
lines.push(
|
||||
`Tip: ${formatDocsLink("/cli#status", "status --deep")} adds gateway health probes to status output (requires a reachable gateway).`,
|
||||
);
|
||||
return lines;
|
||||
}
|
||||
@@ -1,18 +1,10 @@
|
||||
import {
|
||||
hasConfiguredUnavailableCredentialStatus,
|
||||
hasResolvedCredentialValue,
|
||||
} from "../../channels/account-snapshot-fields.js";
|
||||
import { hasConfiguredUnavailableCredentialStatus } from "../../channels/account-snapshot-fields.js";
|
||||
import { listChannelPlugins } from "../../channels/plugins/index.js";
|
||||
import {
|
||||
buildChannelAccountSnapshot,
|
||||
buildReadOnlySourceChannelAccountSnapshot,
|
||||
} from "../../channels/plugins/status.js";
|
||||
import type { ChannelAccountSnapshot } from "../../channels/plugins/types.public.js";
|
||||
import { resolveCommandConfigWithSecrets } from "../../cli/command-config-resolution.js";
|
||||
import { formatCliCommand } from "../../cli/command-format.js";
|
||||
import { getChannelsCommandSecretTargetIds } from "../../cli/command-secret-targets.js";
|
||||
import { withProgress } from "../../cli/progress.js";
|
||||
import { type OpenClawConfig, readConfigFileSnapshot } from "../../config/config.js";
|
||||
import { readConfigFileSnapshot } from "../../config/config.js";
|
||||
import { callGateway } from "../../gateway/call.js";
|
||||
import { collectChannelStatusIssues } from "../../infra/channels-status-issues.js";
|
||||
import { formatTimeAgo } from "../../infra/format-time/format-relative.ts";
|
||||
@@ -25,6 +17,9 @@ import {
|
||||
formatChannelAccountLabel,
|
||||
requireValidConfigSnapshot,
|
||||
} from "./shared.js";
|
||||
import { formatConfigChannelsStatusLines } from "./status-config-format.js";
|
||||
|
||||
export { formatConfigChannelsStatusLines } from "./status-config-format.js";
|
||||
|
||||
export type ChannelsStatusOptions = {
|
||||
json?: boolean;
|
||||
@@ -210,73 +205,6 @@ export function formatGatewayChannelsStatusLines(payload: Record<string, unknown
|
||||
return lines;
|
||||
}
|
||||
|
||||
export async function formatConfigChannelsStatusLines(
|
||||
cfg: OpenClawConfig,
|
||||
meta: { path?: string; mode?: "local" | "remote" },
|
||||
opts?: { sourceConfig?: OpenClawConfig },
|
||||
): Promise<string[]> {
|
||||
const lines: string[] = [];
|
||||
lines.push(theme.warn("Gateway not reachable; showing config-only status."));
|
||||
if (meta.path) {
|
||||
lines.push(`Config: ${meta.path}`);
|
||||
}
|
||||
if (meta.mode) {
|
||||
lines.push(`Mode: ${meta.mode}`);
|
||||
}
|
||||
if (meta.path || meta.mode) {
|
||||
lines.push("");
|
||||
}
|
||||
|
||||
const accountLines = (provider: ChatChannel, accounts: Array<Record<string, unknown>>) =>
|
||||
accounts.map((account) => {
|
||||
const bits: string[] = [];
|
||||
appendEnabledConfiguredLinkedBits(bits, account);
|
||||
appendModeBit(bits, account);
|
||||
appendTokenSourceBits(bits, account);
|
||||
appendBaseUrlBit(bits, account);
|
||||
return buildChannelAccountLine(provider, account, bits);
|
||||
});
|
||||
|
||||
const plugins = listChannelPlugins();
|
||||
const sourceConfig = opts?.sourceConfig ?? cfg;
|
||||
for (const plugin of plugins) {
|
||||
const accountIds = plugin.config.listAccountIds(cfg);
|
||||
if (!accountIds.length) {
|
||||
continue;
|
||||
}
|
||||
const snapshots: ChannelAccountSnapshot[] = [];
|
||||
for (const accountId of accountIds) {
|
||||
const sourceSnapshot = await buildReadOnlySourceChannelAccountSnapshot({
|
||||
plugin,
|
||||
cfg: sourceConfig,
|
||||
accountId,
|
||||
});
|
||||
const resolvedSnapshot = await buildChannelAccountSnapshot({
|
||||
plugin,
|
||||
cfg,
|
||||
accountId,
|
||||
});
|
||||
snapshots.push(
|
||||
sourceSnapshot &&
|
||||
hasConfiguredUnavailableCredentialStatus(sourceSnapshot) &&
|
||||
(!hasResolvedCredentialValue(resolvedSnapshot) ||
|
||||
(sourceSnapshot.configured === true && resolvedSnapshot.configured === false))
|
||||
? sourceSnapshot
|
||||
: resolvedSnapshot,
|
||||
);
|
||||
}
|
||||
if (snapshots.length > 0) {
|
||||
lines.push(...accountLines(plugin.id, snapshots));
|
||||
}
|
||||
}
|
||||
|
||||
lines.push("");
|
||||
lines.push(
|
||||
`Tip: ${formatDocsLink("/cli#status", "status --deep")} adds gateway health probes to status output (requires a reachable gateway).`,
|
||||
);
|
||||
return lines;
|
||||
}
|
||||
|
||||
export async function channelsStatusCommand(
|
||||
opts: ChannelsStatusOptions,
|
||||
runtime: RuntimeEnv = defaultRuntime,
|
||||
|
||||
@@ -25,6 +25,9 @@ const mocks = vi.hoisted(() => {
|
||||
waitForGatewayReachable: vi.fn(),
|
||||
resolveControlUiLinks: vi.fn(),
|
||||
summarizeExistingConfig: vi.fn(),
|
||||
isCodexNativeWebSearchRelevant: vi.fn(({ config }: { config: OpenClawConfig }) =>
|
||||
Boolean(config.auth?.profiles?.["openai-codex:default"]),
|
||||
),
|
||||
};
|
||||
});
|
||||
|
||||
@@ -109,6 +112,10 @@ vi.mock("./onboard-search.js", () => ({
|
||||
setupSearch: mocks.setupSearch,
|
||||
}));
|
||||
|
||||
vi.mock("../agents/codex-native-web-search.js", () => ({
|
||||
isCodexNativeWebSearchRelevant: mocks.isCodexNativeWebSearchRelevant,
|
||||
}));
|
||||
|
||||
vi.mock("../config/mutate.js", async () => {
|
||||
const actual = await vi.importActual<typeof import("../config/mutate.js")>("../config/mutate.js");
|
||||
return {
|
||||
@@ -259,6 +266,13 @@ describe("runConfigureWizard", () => {
|
||||
|
||||
await runWebConfigureWizard();
|
||||
|
||||
expect(mocks.setupSearch).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
gateway: expect.objectContaining({ mode: "local" }),
|
||||
}),
|
||||
expect.anything(),
|
||||
expect.anything(),
|
||||
);
|
||||
expect(mocks.writeConfigFile).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
tools: expect.objectContaining({
|
||||
@@ -284,40 +298,6 @@ describe("runConfigureWizard", () => {
|
||||
expect(mocks.setupSearch).toHaveBeenCalledOnce();
|
||||
});
|
||||
|
||||
it("delegates provider selection to the shared search setup flow", async () => {
|
||||
setupBaseWizardState();
|
||||
mocks.setupSearch.mockImplementation(async (cfg: OpenClawConfig) =>
|
||||
createEnabledWebSearchConfig("firecrawl", {
|
||||
enabled: true,
|
||||
})(cfg),
|
||||
);
|
||||
queueWizardPrompts({
|
||||
select: ["local"],
|
||||
confirm: [true, false],
|
||||
});
|
||||
|
||||
await runWebConfigureWizard();
|
||||
|
||||
expect(mocks.setupSearch).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
gateway: expect.objectContaining({ mode: "local" }),
|
||||
}),
|
||||
expect.anything(),
|
||||
expect.anything(),
|
||||
);
|
||||
expect(mocks.writeConfigFile).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
plugins: expect.objectContaining({
|
||||
entries: expect.objectContaining({
|
||||
firecrawl: expect.objectContaining({
|
||||
enabled: true,
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it("does not crash when web search providers are unavailable under plugin policy", async () => {
|
||||
setupBaseWizardState();
|
||||
mocks.resolveSearchProviderOptions.mockReturnValue([]);
|
||||
|
||||
@@ -166,105 +166,7 @@ describe("buildGatewayInstallPlan", () => {
|
||||
expect(mocks.resolvePreferredNodePath).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("merges config env vars into the environment", async () => {
|
||||
mockNodeGatewayPlanFixture({
|
||||
serviceEnvironment: {
|
||||
OPENCLAW_PORT: "3000",
|
||||
HOME: "/Users/me",
|
||||
},
|
||||
});
|
||||
|
||||
const plan = await buildGatewayInstallPlan({
|
||||
env: isolatedPlanEnv(),
|
||||
port: 3000,
|
||||
runtime: "node",
|
||||
config: {
|
||||
env: {
|
||||
vars: {
|
||||
GOOGLE_API_KEY: "test-key", // pragma: allowlist secret
|
||||
},
|
||||
CUSTOM_VAR: "custom-value",
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
// Config env vars should be present
|
||||
expect(plan.environment.GOOGLE_API_KEY).toBe("test-key");
|
||||
expect(plan.environment.CUSTOM_VAR).toBe("custom-value");
|
||||
expect(plan.environment.OPENCLAW_SERVICE_MANAGED_ENV_KEYS).toBe("CUSTOM_VAR,GOOGLE_API_KEY");
|
||||
// Service environment vars should take precedence
|
||||
expect(plan.environment.OPENCLAW_PORT).toBe("3000");
|
||||
expect(plan.environment.HOME).toBe("/Users/me");
|
||||
});
|
||||
|
||||
it("drops dangerous config env vars before service merge", async () => {
|
||||
mockNodeGatewayPlanFixture({
|
||||
serviceEnvironment: {
|
||||
OPENCLAW_PORT: "3000",
|
||||
},
|
||||
});
|
||||
|
||||
const plan = await buildGatewayInstallPlan({
|
||||
env: isolatedPlanEnv(),
|
||||
port: 3000,
|
||||
runtime: "node",
|
||||
config: {
|
||||
env: {
|
||||
vars: {
|
||||
NODE_OPTIONS: "--require /tmp/evil.js",
|
||||
SAFE_KEY: "safe-value",
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
expect(plan.environment.NODE_OPTIONS).toBeUndefined();
|
||||
expect(plan.environment.SAFE_KEY).toBe("safe-value");
|
||||
});
|
||||
|
||||
it("does not include empty config env values", async () => {
|
||||
mockNodeGatewayPlanFixture();
|
||||
|
||||
const plan = await buildGatewayInstallPlan({
|
||||
env: isolatedPlanEnv(),
|
||||
port: 3000,
|
||||
runtime: "node",
|
||||
config: {
|
||||
env: {
|
||||
vars: {
|
||||
VALID_KEY: "valid",
|
||||
EMPTY_KEY: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
expect(plan.environment.VALID_KEY).toBe("valid");
|
||||
expect(plan.environment.EMPTY_KEY).toBeUndefined();
|
||||
});
|
||||
|
||||
it("drops whitespace-only config env values", async () => {
|
||||
mockNodeGatewayPlanFixture({ serviceEnvironment: {} });
|
||||
|
||||
const plan = await buildGatewayInstallPlan({
|
||||
env: isolatedPlanEnv(),
|
||||
port: 3000,
|
||||
runtime: "node",
|
||||
config: {
|
||||
env: {
|
||||
vars: {
|
||||
VALID_KEY: "valid",
|
||||
},
|
||||
TRIMMED_KEY: " ",
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
expect(plan.environment.VALID_KEY).toBe("valid");
|
||||
expect(plan.environment.TRIMMED_KEY).toBeUndefined();
|
||||
});
|
||||
|
||||
it("keeps service env values over config env vars", async () => {
|
||||
it("merges safe config env while dropping unsafe values and keeping service precedence", async () => {
|
||||
mockNodeGatewayPlanFixture({
|
||||
serviceEnvironment: {
|
||||
HOME: "/Users/service",
|
||||
@@ -279,15 +181,30 @@ describe("buildGatewayInstallPlan", () => {
|
||||
config: {
|
||||
env: {
|
||||
HOME: "/Users/config",
|
||||
CUSTOM_VAR: "custom-value",
|
||||
EMPTY_KEY: "",
|
||||
TRIMMED_KEY: " ",
|
||||
vars: {
|
||||
GOOGLE_API_KEY: "test-key", // pragma: allowlist secret
|
||||
OPENCLAW_PORT: "9999",
|
||||
NODE_OPTIONS: "--require /tmp/evil.js",
|
||||
SAFE_KEY: "safe-value",
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
expect(plan.environment.GOOGLE_API_KEY).toBe("test-key");
|
||||
expect(plan.environment.CUSTOM_VAR).toBe("custom-value");
|
||||
expect(plan.environment.SAFE_KEY).toBe("safe-value");
|
||||
expect(plan.environment.NODE_OPTIONS).toBeUndefined();
|
||||
expect(plan.environment.EMPTY_KEY).toBeUndefined();
|
||||
expect(plan.environment.TRIMMED_KEY).toBeUndefined();
|
||||
expect(plan.environment.HOME).toBe("/Users/service");
|
||||
expect(plan.environment.OPENCLAW_PORT).toBe("3000");
|
||||
expect(plan.environment.OPENCLAW_SERVICE_MANAGED_ENV_KEYS).toBe(
|
||||
"CUSTOM_VAR,GOOGLE_API_KEY,OPENCLAW_PORT,SAFE_KEY",
|
||||
);
|
||||
});
|
||||
|
||||
it("skips auth-profile store load when no auth-profile source exists", async () => {
|
||||
@@ -338,42 +255,7 @@ describe("buildGatewayInstallPlan", () => {
|
||||
expect(mocks.loadAuthProfileStoreForSecretsRuntime).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("merges env-backed auth-profile refs into the service environment", async () => {
|
||||
mockNodeGatewayPlanFixture({
|
||||
serviceEnvironment: {
|
||||
OPENCLAW_PORT: "3000",
|
||||
},
|
||||
});
|
||||
mocks.loadAuthProfileStoreForSecretsRuntime.mockReturnValue({
|
||||
version: 1,
|
||||
profiles: {
|
||||
"openai:default": {
|
||||
type: "api_key",
|
||||
provider: "openai",
|
||||
keyRef: { source: "env", provider: "default", id: "OPENAI_API_KEY" },
|
||||
},
|
||||
"anthropic:default": {
|
||||
type: "token",
|
||||
provider: "anthropic",
|
||||
tokenRef: { source: "env", provider: "default", id: "ANTHROPIC_TOKEN" },
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const plan = await buildGatewayInstallPlan({
|
||||
env: isolatedPlanEnv({
|
||||
OPENAI_API_KEY: "sk-openai-test", // pragma: allowlist secret
|
||||
ANTHROPIC_TOKEN: "ant-test-token",
|
||||
}),
|
||||
port: 3000,
|
||||
runtime: "node",
|
||||
});
|
||||
|
||||
expect(plan.environment.OPENAI_API_KEY).toBe("sk-openai-test");
|
||||
expect(plan.environment.ANTHROPIC_TOKEN).toBe("ant-test-token");
|
||||
});
|
||||
|
||||
it("blocks dangerous auth-profile env refs from the service environment", async () => {
|
||||
it("merges only portable auth-profile env refs into the service environment", async () => {
|
||||
mockNodeGatewayPlanFixture({
|
||||
serviceEnvironment: {
|
||||
OPENCLAW_PORT: "3000",
|
||||
@@ -392,11 +274,26 @@ describe("buildGatewayInstallPlan", () => {
|
||||
provider: "git",
|
||||
tokenRef: { source: "env", provider: "default", id: "GIT_ASKPASS" },
|
||||
},
|
||||
"broken:default": {
|
||||
type: "token",
|
||||
provider: "broken",
|
||||
tokenRef: { source: "env", provider: "default", id: "BAD KEY" },
|
||||
},
|
||||
"openai:default": {
|
||||
type: "api_key",
|
||||
provider: "openai",
|
||||
keyRef: { source: "env", provider: "default", id: "OPENAI_API_KEY" },
|
||||
},
|
||||
"anthropic:default": {
|
||||
type: "token",
|
||||
provider: "anthropic",
|
||||
tokenRef: { source: "env", provider: "default", id: "ANTHROPIC_TOKEN" },
|
||||
},
|
||||
"missing:default": {
|
||||
type: "token",
|
||||
provider: "missing",
|
||||
tokenRef: { source: "env", provider: "default", id: "MISSING_TOKEN" },
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
@@ -406,6 +303,7 @@ describe("buildGatewayInstallPlan", () => {
|
||||
NODE_OPTIONS: "--require ./pwn.js",
|
||||
GIT_ASKPASS: "/tmp/askpass.sh",
|
||||
OPENAI_API_KEY: "sk-openai-test", // pragma: allowlist secret
|
||||
ANTHROPIC_TOKEN: "ant-test-token",
|
||||
}),
|
||||
port: 3000,
|
||||
runtime: "node",
|
||||
@@ -414,64 +312,13 @@ describe("buildGatewayInstallPlan", () => {
|
||||
|
||||
expect(plan.environment.NODE_OPTIONS).toBeUndefined();
|
||||
expect(plan.environment.GIT_ASKPASS).toBeUndefined();
|
||||
expect(plan.environment["BAD KEY"]).toBeUndefined();
|
||||
expect(plan.environment.MISSING_TOKEN).toBeUndefined();
|
||||
expect(plan.environment.OPENAI_API_KEY).toBe("sk-openai-test");
|
||||
expect(plan.environment.ANTHROPIC_TOKEN).toBe("ant-test-token");
|
||||
expect(warn).toHaveBeenCalledWith(expect.stringContaining("NODE_OPTIONS"), "Auth profile");
|
||||
expect(warn).toHaveBeenCalledWith(expect.stringContaining("GIT_ASKPASS"), "Auth profile");
|
||||
});
|
||||
|
||||
it("skips non-portable auth-profile env ref keys", async () => {
|
||||
mockNodeGatewayPlanFixture({
|
||||
serviceEnvironment: {
|
||||
OPENCLAW_PORT: "3000",
|
||||
},
|
||||
});
|
||||
mocks.loadAuthProfileStoreForSecretsRuntime.mockReturnValue({
|
||||
version: 1,
|
||||
profiles: {
|
||||
"broken:default": {
|
||||
type: "token",
|
||||
provider: "broken",
|
||||
tokenRef: { source: "env", provider: "default", id: "BAD KEY" },
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const plan = await buildGatewayInstallPlan({
|
||||
env: isolatedPlanEnv({
|
||||
"BAD KEY": "should-not-pass",
|
||||
}),
|
||||
port: 3000,
|
||||
runtime: "node",
|
||||
});
|
||||
|
||||
expect(plan.environment["BAD KEY"]).toBeUndefined();
|
||||
});
|
||||
|
||||
it("skips unresolved auth-profile env refs", async () => {
|
||||
mockNodeGatewayPlanFixture({
|
||||
serviceEnvironment: {
|
||||
OPENCLAW_PORT: "3000",
|
||||
},
|
||||
});
|
||||
mocks.loadAuthProfileStoreForSecretsRuntime.mockReturnValue({
|
||||
version: 1,
|
||||
profiles: {
|
||||
"openai:default": {
|
||||
type: "api_key",
|
||||
provider: "openai",
|
||||
keyRef: { source: "env", provider: "default", id: "OPENAI_API_KEY" },
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const plan = await buildGatewayInstallPlan({
|
||||
env: isolatedPlanEnv(),
|
||||
port: 3000,
|
||||
runtime: "node",
|
||||
});
|
||||
|
||||
expect(plan.environment.OPENAI_API_KEY).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe("buildGatewayInstallPlan — dotenv merge", () => {
|
||||
@@ -485,28 +332,19 @@ describe("buildGatewayInstallPlan — dotenv merge", () => {
|
||||
fs.rmSync(tmpDir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it("merges .env file vars into the install plan", async () => {
|
||||
await writeStateDirDotEnv("BRAVE_API_KEY=BSA-from-env\nOPENROUTER_API_KEY=or-key\n", {
|
||||
stateDir: path.join(tmpDir, ".openclaw"),
|
||||
it("merges .env vars with config and service precedence", async () => {
|
||||
await writeStateDirDotEnv(
|
||||
"BRAVE_API_KEY=BSA-from-env\nOPENROUTER_API_KEY=or-key\nMY_KEY=from-dotenv\nHOME=/from-dotenv\n",
|
||||
{
|
||||
stateDir: path.join(tmpDir, ".openclaw"),
|
||||
},
|
||||
);
|
||||
mockNodeGatewayPlanFixture({
|
||||
serviceEnvironment: {
|
||||
HOME: "/from-service",
|
||||
OPENCLAW_PORT: "3000",
|
||||
},
|
||||
});
|
||||
mockNodeGatewayPlanFixture({ serviceEnvironment: { OPENCLAW_PORT: "3000" } });
|
||||
|
||||
const plan = await buildGatewayInstallPlan({
|
||||
env: { HOME: tmpDir },
|
||||
port: 3000,
|
||||
runtime: "node",
|
||||
});
|
||||
|
||||
expect(plan.environment.BRAVE_API_KEY).toBe("BSA-from-env");
|
||||
expect(plan.environment.OPENROUTER_API_KEY).toBe("or-key");
|
||||
expect(plan.environment.OPENCLAW_PORT).toBe("3000");
|
||||
});
|
||||
|
||||
it("config env vars override .env file vars", async () => {
|
||||
await writeStateDirDotEnv("MY_KEY=from-dotenv\n", {
|
||||
stateDir: path.join(tmpDir, ".openclaw"),
|
||||
});
|
||||
mockNodeGatewayPlanFixture({ serviceEnvironment: {} });
|
||||
|
||||
const plan = await buildGatewayInstallPlan({
|
||||
env: { HOME: tmpDir },
|
||||
@@ -521,16 +359,15 @@ describe("buildGatewayInstallPlan — dotenv merge", () => {
|
||||
},
|
||||
});
|
||||
|
||||
expect(plan.environment.BRAVE_API_KEY).toBe("BSA-from-env");
|
||||
expect(plan.environment.OPENROUTER_API_KEY).toBe("or-key");
|
||||
expect(plan.environment.MY_KEY).toBe("from-config");
|
||||
expect(plan.environment.HOME).toBe("/from-service");
|
||||
expect(plan.environment.OPENCLAW_PORT).toBe("3000");
|
||||
});
|
||||
|
||||
it("service env overrides .env file vars", async () => {
|
||||
await writeStateDirDotEnv("HOME=/from-dotenv\n", {
|
||||
stateDir: path.join(tmpDir, ".openclaw"),
|
||||
});
|
||||
mockNodeGatewayPlanFixture({
|
||||
serviceEnvironment: { HOME: "/from-service" },
|
||||
});
|
||||
it("works when .env file does not exist", async () => {
|
||||
mockNodeGatewayPlanFixture({ serviceEnvironment: { OPENCLAW_PORT: "3000" } });
|
||||
|
||||
const plan = await buildGatewayInstallPlan({
|
||||
env: { HOME: tmpDir },
|
||||
@@ -538,41 +375,10 @@ describe("buildGatewayInstallPlan — dotenv merge", () => {
|
||||
runtime: "node",
|
||||
});
|
||||
|
||||
expect(plan.environment.HOME).toBe("/from-service");
|
||||
expect(plan.environment.OPENCLAW_PORT).toBe("3000");
|
||||
});
|
||||
|
||||
it("preserves safe custom vars from an existing service env and merges PATH", async () => {
|
||||
mockNodeGatewayPlanFixture({
|
||||
serviceEnvironment: {
|
||||
HOME: "/from-service",
|
||||
OPENCLAW_PORT: "3000",
|
||||
PATH: "/managed/bin:/usr/bin",
|
||||
},
|
||||
});
|
||||
|
||||
const plan = await buildGatewayInstallPlan({
|
||||
env: { HOME: tmpDir },
|
||||
port: 3000,
|
||||
runtime: "node",
|
||||
existingEnvironment: {
|
||||
PATH: "/custom/go/bin:/usr/bin",
|
||||
GOBIN: "/Users/test/.local/gopath/bin",
|
||||
BLOGWATCHER_HOME: "/Users/test/.blogwatcher",
|
||||
NODE_OPTIONS: "--require /tmp/evil.js",
|
||||
GOPATH: "/Users/test/.local/gopath",
|
||||
OPENCLAW_SERVICE_MARKER: "openclaw",
|
||||
},
|
||||
});
|
||||
|
||||
expect(plan.environment.PATH).toBe("/managed/bin:/usr/bin:/custom/go/bin");
|
||||
expect(plan.environment.GOBIN).toBe("/Users/test/.local/gopath/bin");
|
||||
expect(plan.environment.BLOGWATCHER_HOME).toBe("/Users/test/.blogwatcher");
|
||||
expect(plan.environment.NODE_OPTIONS).toBeUndefined();
|
||||
expect(plan.environment.GOPATH).toBeUndefined();
|
||||
expect(plan.environment.OPENCLAW_SERVICE_MARKER).toBeUndefined();
|
||||
});
|
||||
|
||||
it("drops non-absolute and temp PATH entries from an existing service env", async () => {
|
||||
mockNodeGatewayPlanFixture({
|
||||
serviceEnvironment: {
|
||||
HOME: "/from-service",
|
||||
@@ -588,10 +394,20 @@ describe("buildGatewayInstallPlan — dotenv merge", () => {
|
||||
runtime: "node",
|
||||
existingEnvironment: {
|
||||
PATH: ".:/tmp/evil:/custom/go/bin:/usr/bin",
|
||||
GOBIN: "/Users/test/.local/gopath/bin",
|
||||
BLOGWATCHER_HOME: "/Users/test/.blogwatcher",
|
||||
NODE_OPTIONS: "--require /tmp/evil.js",
|
||||
GOPATH: "/Users/test/.local/gopath",
|
||||
OPENCLAW_SERVICE_MARKER: "openclaw",
|
||||
},
|
||||
});
|
||||
|
||||
expect(plan.environment.PATH).toBe("/managed/bin:/usr/bin:/custom/go/bin");
|
||||
expect(plan.environment.GOBIN).toBe("/Users/test/.local/gopath/bin");
|
||||
expect(plan.environment.BLOGWATCHER_HOME).toBe("/Users/test/.blogwatcher");
|
||||
expect(plan.environment.NODE_OPTIONS).toBeUndefined();
|
||||
expect(plan.environment.GOPATH).toBeUndefined();
|
||||
expect(plan.environment.OPENCLAW_SERVICE_MARKER).toBeUndefined();
|
||||
});
|
||||
|
||||
it("drops keys that were previously tracked as managed service env", async () => {
|
||||
@@ -622,18 +438,6 @@ describe("buildGatewayInstallPlan — dotenv merge", () => {
|
||||
expect(plan.environment.GOPATH).toBeUndefined();
|
||||
expect(plan.environment.OPENCLAW_SERVICE_MANAGED_ENV_KEYS).toBeUndefined();
|
||||
});
|
||||
|
||||
it("works when .env file does not exist", async () => {
|
||||
mockNodeGatewayPlanFixture({ serviceEnvironment: { OPENCLAW_PORT: "3000" } });
|
||||
|
||||
const plan = await buildGatewayInstallPlan({
|
||||
env: { HOME: tmpDir },
|
||||
port: 3000,
|
||||
runtime: "node",
|
||||
});
|
||||
|
||||
expect(plan.environment.OPENCLAW_PORT).toBe("3000");
|
||||
});
|
||||
});
|
||||
|
||||
describe("gatewayInstallErrorHint", () => {
|
||||
|
||||
51
src/commands/doctor-auth-legacy-oauth.ts
Normal file
51
src/commands/doctor-auth-legacy-oauth.ts
Normal file
@@ -0,0 +1,51 @@
|
||||
import { repairOAuthProfileIdMismatch } from "../agents/auth-profiles/repair.js";
|
||||
import { ensureAuthProfileStore } from "../agents/auth-profiles/store.js";
|
||||
import type { OpenClawConfig } from "../config/types.openclaw.js";
|
||||
import type { DoctorPrompter } from "./doctor-prompter.js";
|
||||
|
||||
async function loadProviderRuntime() {
|
||||
return import("../plugins/providers.runtime.js");
|
||||
}
|
||||
|
||||
async function loadNoteRuntime() {
|
||||
return import("../terminal/note.js");
|
||||
}
|
||||
|
||||
export async function maybeRepairLegacyOAuthProfileIds(
|
||||
cfg: OpenClawConfig,
|
||||
prompter: DoctorPrompter,
|
||||
): Promise<OpenClawConfig> {
|
||||
const store = ensureAuthProfileStore();
|
||||
let nextCfg = cfg;
|
||||
const { resolvePluginProviders } = await loadProviderRuntime();
|
||||
const providers = resolvePluginProviders({
|
||||
config: cfg,
|
||||
env: process.env,
|
||||
mode: "setup",
|
||||
});
|
||||
for (const provider of providers) {
|
||||
for (const repairSpec of provider.oauthProfileIdRepairs ?? []) {
|
||||
const repair = repairOAuthProfileIdMismatch({
|
||||
cfg: nextCfg,
|
||||
store,
|
||||
provider: provider.id,
|
||||
legacyProfileId: repairSpec.legacyProfileId,
|
||||
});
|
||||
if (!repair.migrated || repair.changes.length === 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const { note } = await loadNoteRuntime();
|
||||
note(repair.changes.map((c) => `- ${c}`).join("\n"), "Auth profiles");
|
||||
const apply = await prompter.confirm({
|
||||
message: `Update ${repairSpec.promptLabel ?? provider.label} OAuth profile id in config now?`,
|
||||
initialValue: true,
|
||||
});
|
||||
if (!apply) {
|
||||
continue;
|
||||
}
|
||||
nextCfg = repair.config;
|
||||
}
|
||||
}
|
||||
return nextCfg;
|
||||
}
|
||||
@@ -1,24 +1,34 @@
|
||||
import fs from "node:fs";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import type { AuthProfileStore } from "../agents/auth-profiles/types.js";
|
||||
import type { OpenClawConfig } from "../config/config.js";
|
||||
import type { ProviderPlugin } from "../plugins/types.js";
|
||||
import { captureEnv } from "../test-utils/env.js";
|
||||
import { maybeRepairLegacyOAuthProfileIds } from "./doctor-auth.js";
|
||||
import { maybeRepairLegacyOAuthProfileIds } from "./doctor-auth-legacy-oauth.js";
|
||||
import type { DoctorPrompter } from "./doctor-prompter.js";
|
||||
import type { DoctorRepairMode } from "./doctor-repair-mode.js";
|
||||
|
||||
const resolvePluginProvidersMock = vi.fn<() => ProviderPlugin[]>(() => []);
|
||||
const isPluginProvidersLoadInFlightMock = vi.fn(() => false);
|
||||
const authProfileStoreMock = vi.hoisted(() => ({
|
||||
store: { version: 1, profiles: {} } as AuthProfileStore,
|
||||
}));
|
||||
const repairMocks = vi.hoisted(() => ({
|
||||
repairOAuthProfileIdMismatch: vi.fn(),
|
||||
}));
|
||||
|
||||
vi.mock("../plugins/providers.runtime.js", () => ({
|
||||
isPluginProvidersLoadInFlight: () => isPluginProvidersLoadInFlightMock(),
|
||||
resolvePluginProviders: () => resolvePluginProvidersMock(),
|
||||
}));
|
||||
|
||||
let envSnapshot: ReturnType<typeof captureEnv>;
|
||||
let tempAgentDir: string | undefined;
|
||||
vi.mock("../agents/auth-profiles/repair.js", () => ({
|
||||
repairOAuthProfileIdMismatch: repairMocks.repairOAuthProfileIdMismatch,
|
||||
}));
|
||||
|
||||
vi.mock("../agents/auth-profiles/store.js", () => ({
|
||||
ensureAuthProfileStore: () => authProfileStoreMock.store,
|
||||
}));
|
||||
|
||||
vi.mock("../terminal/note.js", () => ({
|
||||
note: vi.fn(),
|
||||
}));
|
||||
|
||||
function makePrompter(confirmValue: boolean): DoctorPrompter {
|
||||
const repairMode: DoctorRepairMode = {
|
||||
@@ -41,54 +51,35 @@ function makePrompter(confirmValue: boolean): DoctorPrompter {
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
envSnapshot = captureEnv(["OPENCLAW_AGENT_DIR", "PI_CODING_AGENT_DIR"]);
|
||||
tempAgentDir = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-auth-"));
|
||||
process.env.OPENCLAW_AGENT_DIR = tempAgentDir;
|
||||
process.env.PI_CODING_AGENT_DIR = tempAgentDir;
|
||||
resolvePluginProvidersMock.mockReset();
|
||||
resolvePluginProvidersMock.mockReturnValue([]);
|
||||
isPluginProvidersLoadInFlightMock.mockReset();
|
||||
isPluginProvidersLoadInFlightMock.mockReturnValue(false);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
envSnapshot.restore();
|
||||
if (tempAgentDir) {
|
||||
fs.rmSync(tempAgentDir, { recursive: true, force: true });
|
||||
tempAgentDir = undefined;
|
||||
}
|
||||
authProfileStoreMock.store = { version: 1, profiles: {} };
|
||||
repairMocks.repairOAuthProfileIdMismatch.mockReset();
|
||||
repairMocks.repairOAuthProfileIdMismatch.mockReturnValue({
|
||||
config: {},
|
||||
changes: [],
|
||||
migrated: false,
|
||||
});
|
||||
});
|
||||
|
||||
describe("maybeRepairLegacyOAuthProfileIds", () => {
|
||||
it("repairs provider-owned legacy OAuth profile ids", async () => {
|
||||
if (!tempAgentDir) {
|
||||
throw new Error("Missing temp agent dir");
|
||||
}
|
||||
const authPath = path.join(tempAgentDir, "auth-profiles.json");
|
||||
fs.writeFileSync(
|
||||
authPath,
|
||||
`${JSON.stringify(
|
||||
{
|
||||
version: 1,
|
||||
profiles: {
|
||||
"anthropic:user@example.com": {
|
||||
type: "oauth",
|
||||
provider: "anthropic",
|
||||
access: "token-a",
|
||||
refresh: "token-r",
|
||||
expires: Date.now() + 60_000,
|
||||
email: "user@example.com",
|
||||
},
|
||||
},
|
||||
lastGood: {
|
||||
anthropic: "anthropic:user@example.com",
|
||||
},
|
||||
authProfileStoreMock.store = {
|
||||
version: 1,
|
||||
profiles: {
|
||||
"anthropic:user@example.com": {
|
||||
type: "oauth",
|
||||
provider: "anthropic",
|
||||
access: "token-a",
|
||||
refresh: "token-r",
|
||||
expires: Date.now() + 60_000,
|
||||
email: "user@example.com",
|
||||
},
|
||||
null,
|
||||
2,
|
||||
)}\n`,
|
||||
"utf8",
|
||||
);
|
||||
},
|
||||
lastGood: {
|
||||
anthropic: "anthropic:user@example.com",
|
||||
},
|
||||
};
|
||||
|
||||
resolvePluginProvidersMock.mockReturnValue([
|
||||
{
|
||||
@@ -98,6 +89,24 @@ describe("maybeRepairLegacyOAuthProfileIds", () => {
|
||||
oauthProfileIdRepairs: [{ legacyProfileId: "anthropic:default" }],
|
||||
},
|
||||
]);
|
||||
repairMocks.repairOAuthProfileIdMismatch.mockReturnValue({
|
||||
migrated: true,
|
||||
changes: ["Auth: migrate anthropic:default → anthropic:user@example.com"],
|
||||
config: {
|
||||
auth: {
|
||||
profiles: {
|
||||
"anthropic:user@example.com": {
|
||||
provider: "anthropic",
|
||||
mode: "oauth",
|
||||
email: "user@example.com",
|
||||
},
|
||||
},
|
||||
order: {
|
||||
anthropic: ["anthropic:user@example.com"],
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const next = await maybeRepairLegacyOAuthProfileIds(
|
||||
{
|
||||
@@ -113,6 +122,18 @@ describe("maybeRepairLegacyOAuthProfileIds", () => {
|
||||
makePrompter(true),
|
||||
);
|
||||
|
||||
expect(repairMocks.repairOAuthProfileIdMismatch).toHaveBeenCalledWith({
|
||||
cfg: expect.objectContaining({
|
||||
auth: expect.objectContaining({
|
||||
profiles: expect.objectContaining({
|
||||
"anthropic:default": { provider: "anthropic", mode: "oauth" },
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
store: authProfileStoreMock.store,
|
||||
provider: "anthropic",
|
||||
legacyProfileId: "anthropic:default",
|
||||
});
|
||||
expect(next.auth?.profiles?.["anthropic:default"]).toBeUndefined();
|
||||
expect(next.auth?.profiles?.["anthropic:user@example.com"]).toMatchObject({
|
||||
provider: "anthropic",
|
||||
|
||||
@@ -6,7 +6,6 @@ import {
|
||||
import {
|
||||
type AuthCredentialReasonCode,
|
||||
ensureAuthProfileStore,
|
||||
repairOAuthProfileIdMismatch,
|
||||
resolveApiKeyForProfile,
|
||||
resolveProfileUnusableUntilForDisplay,
|
||||
} from "../agents/auth-profiles.js";
|
||||
@@ -18,54 +17,17 @@ import {
|
||||
} from "../agents/auth-profiles/oauth-refresh-failure.js";
|
||||
import type { OpenClawConfig } from "../config/types.openclaw.js";
|
||||
import { formatErrorMessage } from "../infra/errors.js";
|
||||
import { resolvePluginProviders } from "../plugins/providers.runtime.js";
|
||||
import { note } from "../terminal/note.js";
|
||||
import { isRecord } from "../utils.js";
|
||||
import type { DoctorPrompter } from "./doctor-prompter.js";
|
||||
import { buildProviderAuthRecoveryHint } from "./provider-auth-guidance.js";
|
||||
export { maybeRepairLegacyOAuthProfileIds } from "./doctor-auth-legacy-oauth.js";
|
||||
|
||||
const CODEX_PROVIDER_ID = "openai-codex";
|
||||
const CODEX_OAUTH_WARNING_TITLE = "Codex OAuth";
|
||||
const OPENAI_BASE_URL = "https://api.openai.com/v1";
|
||||
const LEGACY_CODEX_APIS = new Set(["openai-responses", "openai-completions"]);
|
||||
|
||||
export async function maybeRepairLegacyOAuthProfileIds(
|
||||
cfg: OpenClawConfig,
|
||||
prompter: DoctorPrompter,
|
||||
): Promise<OpenClawConfig> {
|
||||
const store = ensureAuthProfileStore();
|
||||
let nextCfg = cfg;
|
||||
const providers = resolvePluginProviders({
|
||||
config: cfg,
|
||||
env: process.env,
|
||||
mode: "setup",
|
||||
});
|
||||
for (const provider of providers) {
|
||||
for (const repairSpec of provider.oauthProfileIdRepairs ?? []) {
|
||||
const repair = repairOAuthProfileIdMismatch({
|
||||
cfg: nextCfg,
|
||||
store,
|
||||
provider: provider.id,
|
||||
legacyProfileId: repairSpec.legacyProfileId,
|
||||
});
|
||||
if (!repair.migrated || repair.changes.length === 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
note(repair.changes.map((c) => `- ${c}`).join("\n"), "Auth profiles");
|
||||
const apply = await prompter.confirm({
|
||||
message: `Update ${repairSpec.promptLabel ?? provider.label} OAuth profile id in config now?`,
|
||||
initialValue: true,
|
||||
});
|
||||
if (!apply) {
|
||||
continue;
|
||||
}
|
||||
nextCfg = repair.config;
|
||||
}
|
||||
}
|
||||
return nextCfg;
|
||||
}
|
||||
|
||||
function hasConfiguredCodexOAuthProfile(cfg: OpenClawConfig): boolean {
|
||||
return Object.values(cfg.auth?.profiles ?? {}).some(
|
||||
(profile) => profile.provider === CODEX_PROVIDER_ID && profile.mode === "oauth",
|
||||
|
||||
@@ -11,6 +11,194 @@ import {
|
||||
type TerminalNote = (message: string, title?: string) => void;
|
||||
|
||||
const terminalNoteMock = vi.hoisted(() => vi.fn<TerminalNote>());
|
||||
const legacyConfigMigrationForTest = vi.hoisted(() => {
|
||||
function asRecord(value: unknown): Record<string, unknown> | null {
|
||||
return value && typeof value === "object" && !Array.isArray(value)
|
||||
? (value as Record<string, unknown>)
|
||||
: null;
|
||||
}
|
||||
|
||||
function ensureRecord(parent: Record<string, unknown>, key: string): Record<string, unknown> {
|
||||
const current = asRecord(parent[key]);
|
||||
if (current) {
|
||||
return current;
|
||||
}
|
||||
const next: Record<string, unknown> = {};
|
||||
parent[key] = next;
|
||||
return next;
|
||||
}
|
||||
|
||||
function migrateThreadBinding(value: unknown, changes: string[], pathLabel: string): void {
|
||||
const record = asRecord(value);
|
||||
const bindings = asRecord(record?.threadBindings);
|
||||
if (!bindings || !("ttlHours" in bindings)) {
|
||||
return;
|
||||
}
|
||||
if (!("idleHours" in bindings)) {
|
||||
bindings.idleHours = bindings.ttlHours;
|
||||
}
|
||||
delete bindings.ttlHours;
|
||||
changes.push(`Moved ${pathLabel}.threadBindings.ttlHours to idleHours.`);
|
||||
}
|
||||
|
||||
function migrateStreamingAlias(channel: Record<string, unknown>, channelId: string): boolean {
|
||||
if (
|
||||
!("streamMode" in channel) &&
|
||||
typeof channel.streaming !== "boolean" &&
|
||||
typeof channel.streaming !== "string"
|
||||
) {
|
||||
return false;
|
||||
}
|
||||
if (channelId === "googlechat") {
|
||||
delete channel.streamMode;
|
||||
return true;
|
||||
}
|
||||
const streaming = asRecord(channel.streaming) ?? {};
|
||||
if (!("mode" in streaming)) {
|
||||
streaming.mode =
|
||||
channel.streamMode === "block"
|
||||
? "partial"
|
||||
: channel.streaming === false
|
||||
? "off"
|
||||
: "partial";
|
||||
}
|
||||
delete channel.streamMode;
|
||||
channel.streaming = streaming;
|
||||
return true;
|
||||
}
|
||||
|
||||
function migrateNestedAllowAliases(channel: Record<string, unknown>, channelId: string): boolean {
|
||||
let changed = false;
|
||||
if (channelId === "slack") {
|
||||
for (const room of Object.values(asRecord(channel.channels) ?? {})) {
|
||||
const roomRecord = asRecord(room);
|
||||
if (roomRecord && "allow" in roomRecord) {
|
||||
roomRecord.enabled = roomRecord.allow;
|
||||
delete roomRecord.allow;
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (channelId === "googlechat") {
|
||||
for (const group of Object.values(asRecord(channel.groups) ?? {})) {
|
||||
const groupRecord = asRecord(group);
|
||||
if (groupRecord && "allow" in groupRecord) {
|
||||
groupRecord.enabled = groupRecord.allow;
|
||||
delete groupRecord.allow;
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (channelId === "discord") {
|
||||
for (const guild of Object.values(asRecord(channel.guilds) ?? {})) {
|
||||
for (const room of Object.values(asRecord(asRecord(guild)?.channels) ?? {})) {
|
||||
const roomRecord = asRecord(room);
|
||||
if (roomRecord && "allow" in roomRecord) {
|
||||
roomRecord.enabled = roomRecord.allow;
|
||||
delete roomRecord.allow;
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return changed;
|
||||
}
|
||||
|
||||
function migrate(raw: unknown): { next: Record<string, unknown> | null; changes: string[] } {
|
||||
const root = asRecord(raw);
|
||||
if (!root) {
|
||||
return { next: null, changes: [] };
|
||||
}
|
||||
const next = structuredClone(root);
|
||||
const changes: string[] = [];
|
||||
|
||||
const heartbeat = asRecord(next.heartbeat);
|
||||
if (heartbeat) {
|
||||
const agents = ensureRecord(next, "agents");
|
||||
const agentDefaults = ensureRecord(agents, "defaults");
|
||||
const channels = ensureRecord(next, "channels");
|
||||
const channelDefaults = ensureRecord(channels, "defaults");
|
||||
const agentHeartbeat: Record<string, unknown> = {};
|
||||
const channelHeartbeat: Record<string, unknown> = {};
|
||||
for (const key of ["model", "every"]) {
|
||||
if (key in heartbeat) {
|
||||
agentHeartbeat[key] = heartbeat[key];
|
||||
}
|
||||
}
|
||||
for (const key of ["showOk", "showAlerts", "useIndicator"]) {
|
||||
if (key in heartbeat) {
|
||||
channelHeartbeat[key] = heartbeat[key];
|
||||
}
|
||||
}
|
||||
if (Object.keys(agentHeartbeat).length > 0) {
|
||||
agentDefaults.heartbeat = {
|
||||
...asRecord(agentDefaults.heartbeat),
|
||||
...agentHeartbeat,
|
||||
};
|
||||
}
|
||||
if (Object.keys(channelHeartbeat).length > 0) {
|
||||
channelDefaults.heartbeat = {
|
||||
...asRecord(channelDefaults.heartbeat),
|
||||
...channelHeartbeat,
|
||||
};
|
||||
}
|
||||
delete next.heartbeat;
|
||||
changes.push("Moved heartbeat to agents.defaults.heartbeat and channels.defaults.heartbeat.");
|
||||
}
|
||||
|
||||
const gateway = asRecord(next.gateway);
|
||||
if (gateway?.bind === "0.0.0.0") {
|
||||
gateway.bind = "lan";
|
||||
changes.push("Normalized gateway.bind host alias.");
|
||||
} else if (gateway?.bind === "localhost" || gateway?.bind === "127.0.0.1") {
|
||||
gateway.bind = "loopback";
|
||||
changes.push("Normalized gateway.bind host alias.");
|
||||
}
|
||||
|
||||
migrateThreadBinding(next.session, changes, "session");
|
||||
const channels = asRecord(next.channels);
|
||||
for (const [channelId, channelRaw] of Object.entries(channels ?? {})) {
|
||||
if (channelId === "defaults") {
|
||||
continue;
|
||||
}
|
||||
const channel = asRecord(channelRaw);
|
||||
if (!channel) {
|
||||
continue;
|
||||
}
|
||||
migrateThreadBinding(channel, changes, `channels.${channelId}`);
|
||||
if (migrateStreamingAlias(channel, channelId)) {
|
||||
changes.push(`Normalized channels.${channelId} streaming aliases.`);
|
||||
}
|
||||
if (migrateNestedAllowAliases(channel, channelId)) {
|
||||
changes.push(`Normalized channels.${channelId} nested allow aliases.`);
|
||||
}
|
||||
for (const [accountId, accountRaw] of Object.entries(asRecord(channel.accounts) ?? {})) {
|
||||
const account = asRecord(accountRaw);
|
||||
migrateThreadBinding(account, changes, `channels.${channelId}.accounts.${accountId}`);
|
||||
if (account && migrateStreamingAlias(account, channelId)) {
|
||||
changes.push(`Normalized channels.${channelId}.accounts.${accountId} streaming aliases.`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const sandbox = asRecord(asRecord(asRecord(next.agents)?.defaults)?.sandbox);
|
||||
if (sandbox && "perSession" in sandbox) {
|
||||
sandbox.scope = sandbox.perSession === true ? "session" : "workspace";
|
||||
delete sandbox.perSession;
|
||||
changes.push("Moved agents.defaults.sandbox.perSession to scope.");
|
||||
}
|
||||
|
||||
return changes.length > 0 ? { next, changes } : { next: null, changes: [] };
|
||||
}
|
||||
|
||||
return {
|
||||
migrate,
|
||||
migrateLegacyConfig: (raw: unknown) => {
|
||||
const { next, changes } = migrate(raw);
|
||||
return { config: next, changes };
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock("../terminal/note.js", () => ({
|
||||
note: terminalNoteMock,
|
||||
@@ -59,6 +247,202 @@ vi.mock("../config/validation.js", () => ({
|
||||
validateConfigObjectWithPlugins: vi.fn((config: unknown) => ({ ok: true, config })),
|
||||
}));
|
||||
|
||||
vi.mock("../config/legacy.js", () => {
|
||||
type LegacyRule = {
|
||||
path: string[];
|
||||
message: string;
|
||||
match?: (value: unknown, root: Record<string, unknown>) => boolean;
|
||||
requireSourceLiteral?: boolean;
|
||||
};
|
||||
|
||||
function asRecord(value: unknown): Record<string, unknown> | null {
|
||||
return value && typeof value === "object" && !Array.isArray(value)
|
||||
? (value as Record<string, unknown>)
|
||||
: null;
|
||||
}
|
||||
|
||||
function getPathValue(root: Record<string, unknown>, pathParts: readonly string[]): unknown {
|
||||
let cursor: unknown = root;
|
||||
for (const part of pathParts) {
|
||||
const record = asRecord(cursor);
|
||||
if (!record) {
|
||||
return undefined;
|
||||
}
|
||||
cursor = record[part];
|
||||
}
|
||||
return cursor;
|
||||
}
|
||||
|
||||
function addIssue(
|
||||
issues: Array<{ path: string; message: string }>,
|
||||
pathParts: readonly string[],
|
||||
message: string,
|
||||
) {
|
||||
issues.push({ path: pathParts.join("."), message });
|
||||
}
|
||||
|
||||
function hasLegacyStreamingAlias(channel: Record<string, unknown>): boolean {
|
||||
return (
|
||||
"streamMode" in channel ||
|
||||
"chunkMode" in channel ||
|
||||
"blockStreaming" in channel ||
|
||||
"draftChunk" in channel ||
|
||||
"blockStreamingCoalesce" in channel ||
|
||||
"nativeStreaming" in channel ||
|
||||
typeof channel.streaming === "boolean" ||
|
||||
typeof channel.streaming === "string"
|
||||
);
|
||||
}
|
||||
|
||||
return {
|
||||
findLegacyConfigIssues: (raw: unknown, sourceRaw?: unknown, extraRules: LegacyRule[] = []) => {
|
||||
const root = asRecord(raw);
|
||||
if (!root) {
|
||||
return [];
|
||||
}
|
||||
const sourceRoot = asRecord(sourceRaw) ?? root;
|
||||
const issues: Array<{ path: string; message: string }> = [];
|
||||
|
||||
if ("heartbeat" in root) {
|
||||
addIssue(
|
||||
issues,
|
||||
["heartbeat"],
|
||||
'heartbeat is legacy; use agents.defaults.heartbeat and channels.defaults.heartbeat. Run "openclaw doctor --fix".',
|
||||
);
|
||||
}
|
||||
if ("memorySearch" in root) {
|
||||
addIssue(
|
||||
issues,
|
||||
["memorySearch"],
|
||||
'memorySearch is legacy; use agents.defaults.memorySearch. Run "openclaw doctor --fix".',
|
||||
);
|
||||
}
|
||||
const gateway = asRecord(root.gateway);
|
||||
if (gateway && "bind" in gateway) {
|
||||
addIssue(
|
||||
issues,
|
||||
["gateway", "bind"],
|
||||
'gateway.bind host aliases are legacy; use the canonical bind mode. Run "openclaw doctor --fix".',
|
||||
);
|
||||
}
|
||||
const sessionThreadBindings = asRecord(asRecord(root.session)?.threadBindings);
|
||||
if (sessionThreadBindings && "ttlHours" in sessionThreadBindings) {
|
||||
addIssue(
|
||||
issues,
|
||||
["session", "threadBindings", "ttlHours"],
|
||||
'session.threadBindings.ttlHours is legacy; use session.threadBindings.idleHours. Run "openclaw doctor --fix".',
|
||||
);
|
||||
}
|
||||
const xSearch = asRecord(asRecord(asRecord(root.tools)?.web)?.x_search);
|
||||
if (xSearch && "apiKey" in xSearch) {
|
||||
addIssue(
|
||||
issues,
|
||||
["tools", "web", "x_search", "apiKey"],
|
||||
'tools.web.x_search.apiKey is legacy; use plugins.entries.xai.config.webSearch.apiKey. Run "openclaw doctor --fix".',
|
||||
);
|
||||
}
|
||||
const sandbox = asRecord(asRecord(asRecord(root.agents)?.defaults)?.sandbox);
|
||||
if (sandbox && "perSession" in sandbox) {
|
||||
addIssue(
|
||||
issues,
|
||||
["agents", "defaults", "sandbox"],
|
||||
'agents.defaults.sandbox.perSession is legacy; use agents.defaults.sandbox.scope. Run "openclaw doctor --fix".',
|
||||
);
|
||||
}
|
||||
|
||||
const channels = asRecord(root.channels);
|
||||
for (const [channelId, channelRaw] of Object.entries(channels ?? {})) {
|
||||
if (channelId === "defaults") {
|
||||
continue;
|
||||
}
|
||||
const channel = asRecord(channelRaw);
|
||||
if (!channel) {
|
||||
continue;
|
||||
}
|
||||
if (hasLegacyStreamingAlias(channel)) {
|
||||
addIssue(
|
||||
issues,
|
||||
["channels", channelId],
|
||||
channelId === "googlechat"
|
||||
? `channels.${channelId}.streamMode is legacy and no longer used. Run "openclaw doctor --fix".`
|
||||
: `channels.${channelId}.streamMode, channels.${channelId}.streaming aliases are legacy. Run "openclaw doctor --fix".`,
|
||||
);
|
||||
}
|
||||
const threadBindings = asRecord(channel.threadBindings);
|
||||
if (threadBindings && "ttlHours" in threadBindings) {
|
||||
addIssue(
|
||||
issues,
|
||||
["channels", channelId, "threadBindings", "ttlHours"],
|
||||
'channels.<id>.threadBindings.ttlHours is legacy; use channels.<id>.threadBindings.idleHours. Run "openclaw doctor --fix".',
|
||||
);
|
||||
}
|
||||
if (channelId === "slack") {
|
||||
for (const roomRaw of Object.values(asRecord(channel.channels) ?? {})) {
|
||||
if ("allow" in (asRecord(roomRaw) ?? {})) {
|
||||
addIssue(
|
||||
issues,
|
||||
["channels", "slack"],
|
||||
'channels.slack.channels.<id>.allow is legacy; use enabled. Run "openclaw doctor --fix".',
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (channelId === "googlechat") {
|
||||
for (const spaceRaw of Object.values(asRecord(channel.groups) ?? {})) {
|
||||
if ("allow" in (asRecord(spaceRaw) ?? {})) {
|
||||
addIssue(
|
||||
issues,
|
||||
["channels", "googlechat"],
|
||||
'channels.googlechat.groups.<id>.allow is legacy; use enabled. Run "openclaw doctor --fix".',
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (channelId === "discord") {
|
||||
for (const guildRaw of Object.values(asRecord(channel.guilds) ?? {})) {
|
||||
const guild = asRecord(guildRaw);
|
||||
for (const roomRaw of Object.values(asRecord(guild?.channels) ?? {})) {
|
||||
if ("allow" in (asRecord(roomRaw) ?? {})) {
|
||||
addIssue(
|
||||
issues,
|
||||
["channels", "discord"],
|
||||
'channels.discord.guilds.<id>.channels.<id>.allow is legacy; use enabled. Run "openclaw doctor --fix".',
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for (const [accountId, accountRaw] of Object.entries(asRecord(channel.accounts) ?? {})) {
|
||||
const account = asRecord(accountRaw);
|
||||
const accountThreadBindings = asRecord(account?.threadBindings);
|
||||
if (accountThreadBindings && "ttlHours" in accountThreadBindings) {
|
||||
addIssue(
|
||||
issues,
|
||||
["channels", channelId, "accounts", accountId, "threadBindings", "ttlHours"],
|
||||
'channels.<id>.threadBindings.ttlHours is legacy; use channels.<id>.threadBindings.idleHours. Run "openclaw doctor --fix".',
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (const rule of extraRules) {
|
||||
const value = getPathValue(root, rule.path);
|
||||
if (value === undefined || (rule.match && !rule.match(value, root))) {
|
||||
continue;
|
||||
}
|
||||
if (rule.requireSourceLiteral) {
|
||||
const sourceValue = getPathValue(sourceRoot, rule.path);
|
||||
if (sourceValue === undefined || (rule.match && !rule.match(sourceValue, sourceRoot))) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
addIssue(issues, rule.path, rule.message);
|
||||
}
|
||||
return issues;
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock("../channels/plugins/bootstrap-registry.js", () => ({
|
||||
getBootstrapChannelPlugin: vi.fn((channelId: string) => {
|
||||
if (channelId !== "discord") {
|
||||
@@ -197,6 +581,32 @@ vi.mock("./doctor/shared/channel-legacy-config-migrate.js", () => ({
|
||||
}),
|
||||
}));
|
||||
|
||||
vi.mock("./doctor/shared/legacy-config-migrate.js", () => ({
|
||||
migrateLegacyConfig: legacyConfigMigrationForTest.migrateLegacyConfig,
|
||||
}));
|
||||
|
||||
vi.mock("./doctor/shared/bundled-plugin-load-paths.js", () => ({
|
||||
maybeRepairBundledPluginLoadPaths: vi.fn((cfg: Record<string, unknown>) => ({
|
||||
config: cfg,
|
||||
changes: [],
|
||||
})),
|
||||
}));
|
||||
|
||||
vi.mock("./doctor/shared/exec-safe-bins.js", () => ({
|
||||
maybeRepairExecSafeBinProfiles: vi.fn((cfg: Record<string, unknown>) => ({
|
||||
config: cfg,
|
||||
changes: [],
|
||||
warnings: [],
|
||||
})),
|
||||
}));
|
||||
|
||||
vi.mock("./doctor/shared/stale-plugin-config.js", () => ({
|
||||
maybeRepairStalePluginConfig: vi.fn((cfg: Record<string, unknown>) => ({
|
||||
config: cfg,
|
||||
changes: [],
|
||||
})),
|
||||
}));
|
||||
|
||||
vi.mock("./doctor/channel-capabilities.js", () => {
|
||||
const byChannel = {
|
||||
googlechat: {
|
||||
@@ -698,10 +1108,6 @@ vi.mock("./doctor-config-preflight.js", async () => {
|
||||
await import("../plugins/doctor-contract-registry.js");
|
||||
const { findLegacyConfigIssues }: typeof import("../config/legacy.js") =
|
||||
await import("../config/legacy.js");
|
||||
const {
|
||||
applyRuntimeLegacyConfigMigrations,
|
||||
}: typeof import("./doctor/shared/runtime-compat-api.js") =
|
||||
await import("./doctor/shared/runtime-compat-api.js");
|
||||
|
||||
function resolveConfigPath() {
|
||||
const stateDir =
|
||||
@@ -807,7 +1213,7 @@ vi.mock("./doctor-config-preflight.js", async () => {
|
||||
pluginIds: collectRelevantDoctorPluginIds(parsed),
|
||||
}),
|
||||
);
|
||||
const compat = applyRuntimeLegacyConfigMigrations(parsed);
|
||||
const compat = legacyConfigMigrationForTest.migrate(parsed);
|
||||
const effectiveConfig = normalizeDiscordStreamingCompat(compat.next ?? parsed);
|
||||
return {
|
||||
snapshot: {
|
||||
|
||||
@@ -12,6 +12,41 @@ vi.mock("../plugins/setup-registry.js", () => ({
|
||||
}),
|
||||
}));
|
||||
|
||||
vi.mock("../plugins/manifest-registry.js", () => ({
|
||||
loadPluginManifestRegistry: () => ({
|
||||
plugins: [
|
||||
{
|
||||
id: "brave",
|
||||
origin: "bundled",
|
||||
contracts: { webSearchProviders: ["brave"] },
|
||||
},
|
||||
{
|
||||
id: "google",
|
||||
origin: "bundled",
|
||||
contracts: { webSearchProviders: ["gemini"] },
|
||||
},
|
||||
{
|
||||
id: "firecrawl",
|
||||
origin: "bundled",
|
||||
contracts: { webSearchProviders: ["firecrawl"] },
|
||||
},
|
||||
],
|
||||
}),
|
||||
resolveManifestContractOwnerPluginId: ({ value }: { value: string }): string | undefined => {
|
||||
if (value === "gemini") {
|
||||
return "google";
|
||||
}
|
||||
return value === "brave" || value === "firecrawl" ? value : undefined;
|
||||
},
|
||||
}));
|
||||
|
||||
vi.mock("./doctor/shared/channel-legacy-config-migrate.js", () => ({
|
||||
applyChannelDoctorCompatibilityMigrations: (cfg: OpenClawConfig) => ({
|
||||
next: cfg,
|
||||
changes: [],
|
||||
}),
|
||||
}));
|
||||
|
||||
describe("normalizeCompatibilityConfigValues", () => {
|
||||
let previousOauthDir: string | undefined;
|
||||
let tempOauthDir = "";
|
||||
|
||||
@@ -3,7 +3,10 @@ import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import type { OpenClawConfig } from "../config/config.js";
|
||||
import { resolveStorePath, resolveSessionTranscriptsDirForAgent } from "../config/sessions.js";
|
||||
import {
|
||||
resolveStorePath,
|
||||
resolveSessionTranscriptsDirForAgent,
|
||||
} from "../config/sessions/paths.js";
|
||||
import { noteStateIntegrity } from "./doctor-state-integrity.js";
|
||||
|
||||
vi.mock("../channels/plugins/bundled-ids.js", () => ({
|
||||
|
||||
@@ -9,13 +9,15 @@ import { resolveOAuthDir, resolveStateDir } from "../config/paths.js";
|
||||
import {
|
||||
formatSessionArchiveTimestamp,
|
||||
isPrimarySessionTranscriptFileName,
|
||||
loadSessionStore,
|
||||
resolveMainSessionKey,
|
||||
} from "../config/sessions/artifacts.js";
|
||||
import { resolveMainSessionKey } from "../config/sessions/main-session.js";
|
||||
import {
|
||||
resolveSessionFilePath,
|
||||
resolveSessionFilePathOptions,
|
||||
resolveSessionTranscriptsDirForAgent,
|
||||
resolveStorePath,
|
||||
} from "../config/sessions.js";
|
||||
} from "../config/sessions/paths.js";
|
||||
import { loadSessionStore } from "../config/sessions/store-load.js";
|
||||
import type { OpenClawConfig } from "../config/types.openclaw.js";
|
||||
import { resolveRequiredHomeDir } from "../infra/home-dir.js";
|
||||
import { resolveMemoryBackendConfig } from "../memory-host-sdk/engine-storage.js";
|
||||
|
||||
@@ -1,12 +1,8 @@
|
||||
import fs from "node:fs";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { afterEach, describe, expect, it, vi } from "vitest";
|
||||
import type { OpenClawConfig } from "../config/config.js";
|
||||
import {
|
||||
resetSessionStoreLockRuntimeForTests,
|
||||
setSessionWriteLockAcquirerForTests,
|
||||
} from "../config/sessions/store.js";
|
||||
import {
|
||||
autoMigrateLegacyStateDir,
|
||||
autoMigrateLegacyState,
|
||||
@@ -16,7 +12,7 @@ import {
|
||||
runLegacyStateMigrations,
|
||||
} from "./doctor-state-migrations.js";
|
||||
|
||||
let tempRoot: string | null = null;
|
||||
let tempRoots: string[] = [];
|
||||
|
||||
vi.mock("../channels/plugins/bundled.js", () => {
|
||||
function fileExists(filePath: string): boolean {
|
||||
@@ -135,6 +131,13 @@ vi.mock("../channels/plugins/bundled.js", () => {
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock("../config/sessions.js", () => ({
|
||||
saveSessionStore: async (storePath: string, store: Record<string, unknown>) => {
|
||||
await fs.promises.mkdir(path.dirname(storePath), { recursive: true });
|
||||
await fs.promises.writeFile(storePath, `${JSON.stringify(store, null, 2)}\n`, "utf-8");
|
||||
},
|
||||
}));
|
||||
|
||||
vi.mock("../infra/json-files.js", async () => {
|
||||
const actual =
|
||||
await vi.importActual<typeof import("../infra/json-files.js")>("../infra/json-files.js");
|
||||
@@ -161,7 +164,7 @@ vi.mock("../infra/json-files.js", async () => {
|
||||
|
||||
async function makeTempRoot() {
|
||||
const root = await fs.promises.mkdtemp(path.join(os.tmpdir(), "openclaw-doctor-"));
|
||||
tempRoot = root;
|
||||
tempRoots.push(root);
|
||||
return root;
|
||||
}
|
||||
|
||||
@@ -197,21 +200,13 @@ async function runTelegramAllowFromMigration(params: { root: string; cfg: OpenCl
|
||||
return { oauthDir, detected, result };
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
setSessionWriteLockAcquirerForTests(async () => ({
|
||||
release: async () => undefined,
|
||||
}));
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
resetAutoMigrateLegacyStateForTest();
|
||||
resetAutoMigrateLegacyStateDirForTest();
|
||||
resetSessionStoreLockRuntimeForTests();
|
||||
if (!tempRoot) {
|
||||
return;
|
||||
}
|
||||
await fs.promises.rm(tempRoot, { recursive: true, force: true });
|
||||
tempRoot = null;
|
||||
await Promise.all(
|
||||
tempRoots.map((root) => fs.promises.rm(root, { recursive: true, force: true })),
|
||||
);
|
||||
tempRoots = [];
|
||||
});
|
||||
|
||||
function writeJson5(filePath: string, value: unknown) {
|
||||
@@ -291,6 +286,11 @@ async function runStateDirMigration(root: string, env = {} as NodeJS.ProcessEnv)
|
||||
});
|
||||
}
|
||||
|
||||
async function runFreshStateDirMigration(root: string, env = {} as NodeJS.ProcessEnv) {
|
||||
resetAutoMigrateLegacyStateDirForTest();
|
||||
return runStateDirMigration(root, env);
|
||||
}
|
||||
|
||||
async function runAutoMigrateLegacyStateWithLog(params: {
|
||||
root: string;
|
||||
cfg: OpenClawConfig;
|
||||
@@ -712,93 +712,74 @@ describe("doctor legacy state migrations", () => {
|
||||
expect(result.migrated).toBe(false);
|
||||
});
|
||||
|
||||
it("does not warn when legacy state dir is an already-migrated symlink mirror", async () => {
|
||||
const root = await makeTempRoot();
|
||||
const { targetDir, legacyDir } = ensureLegacyAndTargetStateDirs(root);
|
||||
fs.mkdirSync(path.join(targetDir, "sessions"), { recursive: true });
|
||||
fs.mkdirSync(path.join(targetDir, "agent"), { recursive: true });
|
||||
|
||||
it("classifies already-migrated symlink mirrors without warnings", async () => {
|
||||
const flatRoot = await makeTempRoot();
|
||||
const flat = ensureLegacyAndTargetStateDirs(flatRoot);
|
||||
fs.mkdirSync(path.join(flat.targetDir, "sessions"), { recursive: true });
|
||||
fs.mkdirSync(path.join(flat.targetDir, "agent"), { recursive: true });
|
||||
fs.symlinkSync(
|
||||
path.join(targetDir, "sessions"),
|
||||
path.join(legacyDir, "sessions"),
|
||||
path.join(flat.targetDir, "sessions"),
|
||||
path.join(flat.legacyDir, "sessions"),
|
||||
DIR_LINK_TYPE,
|
||||
);
|
||||
fs.symlinkSync(path.join(targetDir, "agent"), path.join(legacyDir, "agent"), DIR_LINK_TYPE);
|
||||
|
||||
const result = await runStateDirMigration(root);
|
||||
expectUnmigratedWithoutWarnings(result);
|
||||
});
|
||||
|
||||
it("warns when legacy state dir is empty and target already exists", async () => {
|
||||
const root = await makeTempRoot();
|
||||
const { targetDir } = ensureLegacyAndTargetStateDirs(root);
|
||||
|
||||
const result = await runStateDirMigration(root);
|
||||
expectTargetAlreadyExistsWarning(result, targetDir);
|
||||
});
|
||||
|
||||
it("warns when legacy state dir contains non-symlink entries and target already exists", async () => {
|
||||
const root = await makeTempRoot();
|
||||
const { targetDir, legacyDir } = ensureLegacyAndTargetStateDirs(root);
|
||||
fs.writeFileSync(path.join(legacyDir, "sessions.json"), "{}", "utf-8");
|
||||
|
||||
const result = await runStateDirMigration(root);
|
||||
expectTargetAlreadyExistsWarning(result, targetDir);
|
||||
});
|
||||
|
||||
it("does not warn when legacy state dir contains nested symlink mirrors", async () => {
|
||||
const root = await makeTempRoot();
|
||||
const { targetDir, legacyDir } = ensureLegacyAndTargetStateDirs(root);
|
||||
fs.mkdirSync(path.join(targetDir, "agents", "main"), { recursive: true });
|
||||
fs.mkdirSync(path.join(legacyDir, "agents"), { recursive: true });
|
||||
|
||||
fs.symlinkSync(
|
||||
path.join(targetDir, "agents", "main"),
|
||||
path.join(legacyDir, "agents", "main"),
|
||||
path.join(flat.targetDir, "agent"),
|
||||
path.join(flat.legacyDir, "agent"),
|
||||
DIR_LINK_TYPE,
|
||||
);
|
||||
expectUnmigratedWithoutWarnings(await runFreshStateDirMigration(flatRoot));
|
||||
|
||||
const result = await runStateDirMigration(root);
|
||||
expectUnmigratedWithoutWarnings(result);
|
||||
const nestedRoot = await makeTempRoot();
|
||||
const nested = ensureLegacyAndTargetStateDirs(nestedRoot);
|
||||
fs.mkdirSync(path.join(nested.targetDir, "agents", "main"), { recursive: true });
|
||||
fs.mkdirSync(path.join(nested.legacyDir, "agents"), { recursive: true });
|
||||
fs.symlinkSync(
|
||||
path.join(nested.targetDir, "agents", "main"),
|
||||
path.join(nested.legacyDir, "agents", "main"),
|
||||
DIR_LINK_TYPE,
|
||||
);
|
||||
expectUnmigratedWithoutWarnings(await runFreshStateDirMigration(nestedRoot));
|
||||
});
|
||||
|
||||
it("warns when legacy state dir symlink points outside the target tree", async () => {
|
||||
const root = await makeTempRoot();
|
||||
const { targetDir, legacyDir } = ensureLegacyAndTargetStateDirs(root);
|
||||
const outsideDir = path.join(root, ".outside-state");
|
||||
fs.mkdirSync(path.join(targetDir, "sessions"), { recursive: true });
|
||||
it("warns when target exists and legacy state is not a safe mirror", async () => {
|
||||
const emptyRoot = await makeTempRoot();
|
||||
const empty = ensureLegacyAndTargetStateDirs(emptyRoot);
|
||||
expectTargetAlreadyExistsWarning(await runFreshStateDirMigration(emptyRoot), empty.targetDir);
|
||||
|
||||
const fileRoot = await makeTempRoot();
|
||||
const file = ensureLegacyAndTargetStateDirs(fileRoot);
|
||||
fs.writeFileSync(path.join(file.legacyDir, "sessions.json"), "{}", "utf-8");
|
||||
expectTargetAlreadyExistsWarning(await runFreshStateDirMigration(fileRoot), file.targetDir);
|
||||
|
||||
const outsideRoot = await makeTempRoot();
|
||||
const outside = ensureLegacyAndTargetStateDirs(outsideRoot);
|
||||
const outsideDir = path.join(outsideRoot, ".outside-state");
|
||||
fs.mkdirSync(path.join(outside.targetDir, "sessions"), { recursive: true });
|
||||
fs.mkdirSync(outsideDir, { recursive: true });
|
||||
fs.symlinkSync(outsideDir, path.join(outside.legacyDir, "sessions"), DIR_LINK_TYPE);
|
||||
expectTargetAlreadyExistsWarning(
|
||||
await runFreshStateDirMigration(outsideRoot),
|
||||
outside.targetDir,
|
||||
);
|
||||
|
||||
fs.symlinkSync(path.join(outsideDir), path.join(legacyDir, "sessions"), DIR_LINK_TYPE);
|
||||
|
||||
const result = await runStateDirMigration(root);
|
||||
expectTargetAlreadyExistsWarning(result, targetDir);
|
||||
});
|
||||
|
||||
it("warns when legacy state dir contains a broken symlink target", async () => {
|
||||
const root = await makeTempRoot();
|
||||
const { targetDir, legacyDir } = ensureLegacyAndTargetStateDirs(root);
|
||||
fs.mkdirSync(path.join(targetDir, "sessions"), { recursive: true });
|
||||
|
||||
const targetSessionDir = path.join(targetDir, "sessions");
|
||||
fs.symlinkSync(targetSessionDir, path.join(legacyDir, "sessions"), DIR_LINK_TYPE);
|
||||
const brokenRoot = await makeTempRoot();
|
||||
const broken = ensureLegacyAndTargetStateDirs(brokenRoot);
|
||||
const targetSessionDir = path.join(broken.targetDir, "sessions");
|
||||
fs.mkdirSync(targetSessionDir, { recursive: true });
|
||||
fs.symlinkSync(targetSessionDir, path.join(broken.legacyDir, "sessions"), DIR_LINK_TYPE);
|
||||
fs.rmSync(targetSessionDir, { recursive: true, force: true });
|
||||
expectTargetAlreadyExistsWarning(await runFreshStateDirMigration(brokenRoot), broken.targetDir);
|
||||
|
||||
const result = await runStateDirMigration(root);
|
||||
expectTargetAlreadyExistsWarning(result, targetDir);
|
||||
});
|
||||
|
||||
it("warns when legacy symlink escapes target tree through second-hop symlink", async () => {
|
||||
const root = await makeTempRoot();
|
||||
const { targetDir, legacyDir } = ensureLegacyAndTargetStateDirs(root);
|
||||
const outsideDir = path.join(root, ".outside-state");
|
||||
fs.mkdirSync(outsideDir, { recursive: true });
|
||||
|
||||
const targetHop = path.join(targetDir, "hop");
|
||||
fs.symlinkSync(outsideDir, targetHop, DIR_LINK_TYPE);
|
||||
fs.symlinkSync(targetHop, path.join(legacyDir, "sessions"), DIR_LINK_TYPE);
|
||||
|
||||
const result = await runStateDirMigration(root);
|
||||
expectTargetAlreadyExistsWarning(result, targetDir);
|
||||
const secondHopRoot = await makeTempRoot();
|
||||
const secondHop = ensureLegacyAndTargetStateDirs(secondHopRoot);
|
||||
const secondHopOutsideDir = path.join(secondHopRoot, ".outside-state");
|
||||
fs.mkdirSync(secondHopOutsideDir, { recursive: true });
|
||||
const targetHop = path.join(secondHop.targetDir, "hop");
|
||||
fs.symlinkSync(secondHopOutsideDir, targetHop, DIR_LINK_TYPE);
|
||||
fs.symlinkSync(targetHop, path.join(secondHop.legacyDir, "sessions"), DIR_LINK_TYPE);
|
||||
expectTargetAlreadyExistsWarning(
|
||||
await runFreshStateDirMigration(secondHopRoot),
|
||||
secondHop.targetDir,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -4,6 +4,14 @@ import { describe, expect, it } from "vitest";
|
||||
|
||||
const REPO_ROOT = path.resolve(import.meta.dirname, "../../../..");
|
||||
const SRC_ROOT = path.join(REPO_ROOT, "src");
|
||||
const DOCTOR_ROOT = path.join(SRC_ROOT, "commands", "doctor");
|
||||
const LEGACY_REPAIR_FLAG = "migrateLegacyConfig";
|
||||
const LEGACY_MIGRATION_MODULE = "legacy-config-migrate";
|
||||
const LEGACY_REPAIR_FLAG_BYTES = Buffer.from(LEGACY_REPAIR_FLAG);
|
||||
const LEGACY_MIGRATION_MODULE_BYTES = Buffer.from(LEGACY_MIGRATION_MODULE);
|
||||
const LEGACY_REPAIR_FLAG_RE = /migrateLegacyConfig\s*:\s*true/;
|
||||
const LEGACY_MIGRATION_MODULE_RE =
|
||||
/legacy-config-migrate(?:\.js)?|legacy-config-migrations(?:\.[\w-]+)?(?:\.js)?/;
|
||||
|
||||
function collectSourceFiles(dir: string, acc: string[] = []): string[] {
|
||||
for (const entry of fs.readdirSync(dir, { withFileTypes: true })) {
|
||||
@@ -12,6 +20,9 @@ function collectSourceFiles(dir: string, acc: string[] = []): string[] {
|
||||
}
|
||||
const fullPath = path.join(dir, entry.name);
|
||||
if (entry.isDirectory()) {
|
||||
if (fullPath === DOCTOR_ROOT) {
|
||||
continue;
|
||||
}
|
||||
collectSourceFiles(fullPath, acc);
|
||||
continue;
|
||||
}
|
||||
@@ -23,27 +34,33 @@ function collectSourceFiles(dir: string, acc: string[] = []): string[] {
|
||||
return acc;
|
||||
}
|
||||
|
||||
function collectViolations(files: string[]): string[] {
|
||||
const violations: string[] = [];
|
||||
for (const file of files) {
|
||||
const rel = path.relative(REPO_ROOT, file).replaceAll(path.sep, "/");
|
||||
const sourceBytes = fs.readFileSync(file);
|
||||
const hasRepairFlag = sourceBytes.includes(LEGACY_REPAIR_FLAG_BYTES);
|
||||
const hasMigrationModule = sourceBytes.includes(LEGACY_MIGRATION_MODULE_BYTES);
|
||||
if (!hasRepairFlag && !hasMigrationModule) {
|
||||
continue;
|
||||
}
|
||||
const source = sourceBytes.toString("utf8");
|
||||
|
||||
if (hasRepairFlag && LEGACY_REPAIR_FLAG_RE.test(source)) {
|
||||
violations.push(`${rel}: migrateLegacyConfig:true outside doctor`);
|
||||
}
|
||||
|
||||
if (hasMigrationModule && LEGACY_MIGRATION_MODULE_RE.test(source)) {
|
||||
violations.push(`${rel}: doctor legacy migration module referenced outside doctor`);
|
||||
}
|
||||
}
|
||||
return violations;
|
||||
}
|
||||
|
||||
describe("legacy config write ownership", () => {
|
||||
it("keeps legacy config repair flags and migration modules under doctor", () => {
|
||||
const files = collectSourceFiles(SRC_ROOT);
|
||||
const violations: string[] = [];
|
||||
|
||||
for (const file of files) {
|
||||
const rel = path.relative(REPO_ROOT, file).replaceAll(path.sep, "/");
|
||||
const source = fs.readFileSync(file, "utf8");
|
||||
const isDoctorFile = rel.startsWith("src/commands/doctor/");
|
||||
|
||||
if (!isDoctorFile && /migrateLegacyConfig\s*:\s*true/.test(source)) {
|
||||
violations.push(`${rel}: migrateLegacyConfig:true outside doctor`);
|
||||
}
|
||||
|
||||
if (
|
||||
!isDoctorFile &&
|
||||
/legacy-config-migrate(?:\.js)?|legacy-config-migrations(?:\.[\w-]+)?(?:\.js)?/.test(source)
|
||||
) {
|
||||
violations.push(`${rel}: doctor legacy migration module referenced outside doctor`);
|
||||
}
|
||||
}
|
||||
const violations = collectViolations(files);
|
||||
|
||||
expect(violations).toEqual([]);
|
||||
});
|
||||
|
||||
@@ -1,10 +1,23 @@
|
||||
import path from "node:path";
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import * as bundledSources from "../../../plugins/bundled-sources.js";
|
||||
import type { PluginManifestRecord } from "../../../plugins/manifest-registry.js";
|
||||
import * as manifestRegistry from "../../../plugins/manifest-registry.js";
|
||||
import { collectDoctorPreviewWarnings } from "./preview-warnings.js";
|
||||
|
||||
type TestManifestRecord = {
|
||||
id: string;
|
||||
channels: string[];
|
||||
};
|
||||
|
||||
const manifestState = vi.hoisted(
|
||||
() =>
|
||||
({
|
||||
plugins: [] as TestManifestRecord[],
|
||||
diagnostics: [] as Array<{ level: string; message: string; source: string }>,
|
||||
}) satisfies {
|
||||
plugins: TestManifestRecord[];
|
||||
diagnostics: Array<{ level: string; message: string; source: string }>;
|
||||
},
|
||||
);
|
||||
|
||||
vi.mock("../channel-capabilities.js", () => {
|
||||
const fallback = {
|
||||
dmAllowFromMode: "topOnly",
|
||||
@@ -40,22 +53,98 @@ vi.mock("./channel-doctor.js", () => ({
|
||||
shouldSkipChannelDoctorDefaultEmptyGroupAllowlistWarning: vi.fn(() => false),
|
||||
}));
|
||||
|
||||
function manifest(id: string): PluginManifestRecord {
|
||||
vi.mock("./channel-plugin-blockers.js", () => ({
|
||||
scanConfiguredChannelPluginBlockers: (cfg: {
|
||||
channels?: Record<string, unknown>;
|
||||
plugins?: { enabled?: boolean; entries?: Record<string, { enabled?: boolean }> };
|
||||
}) => {
|
||||
const configuredChannels = new Set(Object.keys(cfg.channels ?? {}));
|
||||
return manifestState.plugins.flatMap((plugin) => {
|
||||
const disabledByEntry = cfg.plugins?.entries?.[plugin.id]?.enabled === false;
|
||||
const pluginsDisabled = cfg.plugins?.enabled === false;
|
||||
if (!disabledByEntry && !pluginsDisabled) {
|
||||
return [];
|
||||
}
|
||||
return plugin.channels
|
||||
.filter((channelId) => configuredChannels.has(channelId))
|
||||
.map((channelId) => ({
|
||||
channelId,
|
||||
pluginId: plugin.id,
|
||||
reason: disabledByEntry ? "disabled in config" : "plugins disabled",
|
||||
}));
|
||||
});
|
||||
},
|
||||
collectConfiguredChannelPluginBlockerWarnings: (
|
||||
hits: Array<{ channelId: string; pluginId: string; reason: string }>,
|
||||
) =>
|
||||
hits.map((hit) => {
|
||||
const reason =
|
||||
hit.reason === "disabled in config"
|
||||
? `plugin "${hit.pluginId}" is disabled by plugins.entries.${hit.pluginId}.enabled=false.`
|
||||
: "plugins.enabled=false blocks channel plugins globally.";
|
||||
return `- channels.${hit.channelId}: channel is configured, but ${reason}`;
|
||||
}),
|
||||
isWarningBlockedByChannelPlugin: (warning: string, hits: Array<{ channelId: string }>) =>
|
||||
hits.some(
|
||||
(hit) =>
|
||||
warning.includes(`channels.${hit.channelId}:`) ||
|
||||
warning.includes(`channels.${hit.channelId}.`),
|
||||
),
|
||||
}));
|
||||
|
||||
vi.mock("./stale-plugin-config.js", () => ({
|
||||
scanStalePluginConfig: (cfg: {
|
||||
plugins?: { allow?: string[]; entries?: Record<string, unknown> };
|
||||
}) => {
|
||||
const knownIds = new Set(manifestState.plugins.map((plugin) => plugin.id));
|
||||
const ids = [...(cfg.plugins?.allow ?? []), ...Object.keys(cfg.plugins?.entries ?? {})];
|
||||
return [...new Set(ids)].filter((id) => !knownIds.has(id)).map((id) => ({ id }));
|
||||
},
|
||||
isStalePluginAutoRepairBlocked: () =>
|
||||
manifestState.diagnostics.some((diagnostic) => diagnostic.level === "error"),
|
||||
collectStalePluginConfigWarnings: ({
|
||||
autoRepairBlocked,
|
||||
doctorFixCommand,
|
||||
hits,
|
||||
}: {
|
||||
autoRepairBlocked: boolean;
|
||||
doctorFixCommand: string;
|
||||
hits: Array<{ id: string }>;
|
||||
}) =>
|
||||
hits.map(
|
||||
(hit) =>
|
||||
`plugins.allow: stale plugin reference "${hit.id}". plugins.entries.${hit.id} is unused. ${
|
||||
autoRepairBlocked
|
||||
? `Auto-removal is paused; rerun "${doctorFixCommand}".`
|
||||
: `Run "${doctorFixCommand}".`
|
||||
}`,
|
||||
),
|
||||
}));
|
||||
|
||||
vi.mock("./bundled-plugin-load-paths.js", () => ({
|
||||
scanBundledPluginLoadPathMigrations: (cfg: { plugins?: { load?: { paths?: string[] } } }) =>
|
||||
(cfg.plugins?.load?.paths ?? []).map((legacyPath) => ({ legacyPath })),
|
||||
collectBundledPluginLoadPathWarnings: ({
|
||||
doctorFixCommand,
|
||||
hits,
|
||||
}: {
|
||||
doctorFixCommand: string;
|
||||
hits: Array<{ legacyPath: string }>;
|
||||
}) =>
|
||||
hits.map(
|
||||
(hit) =>
|
||||
`plugins.load.paths: legacy bundled plugin path "${hit.legacyPath}". Run "${doctorFixCommand}".`,
|
||||
),
|
||||
}));
|
||||
|
||||
function manifest(id: string): TestManifestRecord {
|
||||
return {
|
||||
id,
|
||||
channels: [],
|
||||
providers: [],
|
||||
cliBackends: [],
|
||||
skills: [],
|
||||
hooks: [],
|
||||
origin: "bundled",
|
||||
rootDir: `/plugins/${id}`,
|
||||
source: `/plugins/${id}`,
|
||||
manifestPath: `/plugins/${id}/openclaw.plugin.json`,
|
||||
};
|
||||
}
|
||||
|
||||
function channelManifest(id: string, channelId: string): PluginManifestRecord {
|
||||
function channelManifest(id: string, channelId: string): TestManifestRecord {
|
||||
return {
|
||||
...manifest(id),
|
||||
channels: [channelId],
|
||||
@@ -64,10 +153,8 @@ function channelManifest(id: string, channelId: string): PluginManifestRecord {
|
||||
|
||||
describe("doctor preview warnings", () => {
|
||||
beforeEach(() => {
|
||||
vi.spyOn(manifestRegistry, "loadPluginManifestRegistry").mockReturnValue({
|
||||
plugins: [manifest("discord")],
|
||||
diagnostics: [],
|
||||
});
|
||||
manifestState.plugins = [manifest("discord")];
|
||||
manifestState.diagnostics = [];
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
@@ -147,23 +234,7 @@ describe("doctor preview warnings", () => {
|
||||
it("includes bundled plugin load path migration warnings", async () => {
|
||||
const packageRoot = path.resolve("app-node-modules", "openclaw");
|
||||
const legacyPath = path.join(packageRoot, "extensions", "feishu");
|
||||
const bundledPath = path.join(packageRoot, "dist", "extensions", "feishu");
|
||||
vi.spyOn(manifestRegistry, "loadPluginManifestRegistry").mockReturnValue({
|
||||
plugins: [manifest("feishu")],
|
||||
diagnostics: [],
|
||||
});
|
||||
vi.spyOn(bundledSources, "resolveBundledPluginSources").mockReturnValue(
|
||||
new Map([
|
||||
[
|
||||
"feishu",
|
||||
{
|
||||
pluginId: "feishu",
|
||||
localPath: bundledPath,
|
||||
npmSpec: "@openclaw/feishu",
|
||||
},
|
||||
],
|
||||
]),
|
||||
);
|
||||
manifestState.plugins = [manifest("feishu")];
|
||||
|
||||
const warnings = await collectDoctorPreviewWarnings({
|
||||
cfg: {
|
||||
@@ -183,12 +254,10 @@ describe("doctor preview warnings", () => {
|
||||
});
|
||||
|
||||
it("warns but skips auto-removal when plugin discovery has errors", async () => {
|
||||
vi.spyOn(manifestRegistry, "loadPluginManifestRegistry").mockReturnValue({
|
||||
plugins: [],
|
||||
diagnostics: [
|
||||
{ level: "error", message: "plugin path not found: /missing", source: "/missing" },
|
||||
],
|
||||
});
|
||||
manifestState.plugins = [];
|
||||
manifestState.diagnostics = [
|
||||
{ level: "error", message: "plugin path not found: /missing", source: "/missing" },
|
||||
];
|
||||
|
||||
const warnings = await collectDoctorPreviewWarnings({
|
||||
cfg: {
|
||||
@@ -210,10 +279,7 @@ describe("doctor preview warnings", () => {
|
||||
});
|
||||
|
||||
it("warns when a configured channel plugin is disabled explicitly", async () => {
|
||||
vi.spyOn(manifestRegistry, "loadPluginManifestRegistry").mockReturnValue({
|
||||
plugins: [channelManifest("telegram", "telegram")],
|
||||
diagnostics: [],
|
||||
});
|
||||
manifestState.plugins = [channelManifest("telegram", "telegram")];
|
||||
|
||||
const warnings = await collectDoctorPreviewWarnings({
|
||||
cfg: {
|
||||
@@ -243,10 +309,7 @@ describe("doctor preview warnings", () => {
|
||||
});
|
||||
|
||||
it("warns when channel plugins are blocked globally", async () => {
|
||||
vi.spyOn(manifestRegistry, "loadPluginManifestRegistry").mockReturnValue({
|
||||
plugins: [channelManifest("telegram", "telegram")],
|
||||
diagnostics: [],
|
||||
});
|
||||
manifestState.plugins = [channelManifest("telegram", "telegram")];
|
||||
|
||||
const warnings = await collectDoctorPreviewWarnings({
|
||||
cfg: {
|
||||
|
||||
@@ -1,122 +1,204 @@
|
||||
import type { OpenClawConfig } from "../../../config/types.openclaw.js";
|
||||
import { sanitizeForLog } from "../../../terminal/ansi.js";
|
||||
import {
|
||||
collectBundledPluginLoadPathWarnings,
|
||||
scanBundledPluginLoadPathMigrations,
|
||||
} from "./bundled-plugin-load-paths.js";
|
||||
import {
|
||||
collectChannelDoctorEmptyAllowlistExtraWarnings,
|
||||
collectChannelDoctorPreviewWarnings,
|
||||
} from "./channel-doctor.js";
|
||||
import {
|
||||
collectConfiguredChannelPluginBlockerWarnings,
|
||||
isWarningBlockedByChannelPlugin,
|
||||
scanConfiguredChannelPluginBlockers,
|
||||
} from "./channel-plugin-blockers.js";
|
||||
import { scanEmptyAllowlistPolicyWarnings } from "./empty-allowlist-scan.js";
|
||||
import {
|
||||
collectExecSafeBinCoverageWarnings,
|
||||
collectExecSafeBinTrustedDirHintWarnings,
|
||||
scanExecSafeBinCoverage,
|
||||
scanExecSafeBinTrustedDirHints,
|
||||
} from "./exec-safe-bins.js";
|
||||
import {
|
||||
collectLegacyToolsBySenderWarnings,
|
||||
scanLegacyToolsBySenderKeys,
|
||||
} from "./legacy-tools-by-sender.js";
|
||||
import {
|
||||
collectOpenPolicyAllowFromWarnings,
|
||||
maybeRepairOpenPolicyAllowFrom,
|
||||
} from "./open-policy-allowfrom.js";
|
||||
import {
|
||||
collectStalePluginConfigWarnings,
|
||||
isStalePluginAutoRepairBlocked,
|
||||
scanStalePluginConfig,
|
||||
} from "./stale-plugin-config.js";
|
||||
|
||||
function hasRecord(value: unknown): value is Record<string, unknown> {
|
||||
return Boolean(value && typeof value === "object" && !Array.isArray(value));
|
||||
}
|
||||
|
||||
function hasChannels(cfg: OpenClawConfig): boolean {
|
||||
return hasRecord(cfg.channels);
|
||||
}
|
||||
|
||||
function hasPlugins(cfg: OpenClawConfig): boolean {
|
||||
return hasRecord(cfg.plugins);
|
||||
}
|
||||
|
||||
function hasPluginLoadPaths(cfg: OpenClawConfig): boolean {
|
||||
const plugins = cfg.plugins;
|
||||
if (!hasRecord(plugins)) {
|
||||
return false;
|
||||
}
|
||||
const load = plugins.load;
|
||||
return hasRecord(load) && Array.isArray(load.paths) && load.paths.length > 0;
|
||||
}
|
||||
|
||||
function hasExplicitChannelPluginBlockerConfig(cfg: OpenClawConfig): boolean {
|
||||
if (cfg.plugins?.enabled === false) {
|
||||
return true;
|
||||
}
|
||||
const entries = cfg.plugins?.entries;
|
||||
if (!hasRecord(entries)) {
|
||||
return false;
|
||||
}
|
||||
return Object.values(entries).some(
|
||||
(entry) => hasRecord(entry) && "enabled" in entry && entry.enabled === false,
|
||||
);
|
||||
}
|
||||
|
||||
function hasToolsBySenderKey(value: unknown): boolean {
|
||||
if (Array.isArray(value)) {
|
||||
return value.some(hasToolsBySenderKey);
|
||||
}
|
||||
if (!hasRecord(value)) {
|
||||
return false;
|
||||
}
|
||||
if (hasRecord(value.toolsBySender)) {
|
||||
return true;
|
||||
}
|
||||
return Object.entries(value).some(
|
||||
([key, nested]) => key !== "toolsBySender" && hasToolsBySenderKey(nested),
|
||||
);
|
||||
}
|
||||
|
||||
function hasConfiguredSafeBins(cfg: OpenClawConfig): boolean {
|
||||
const globalExec = cfg.tools?.exec;
|
||||
if (
|
||||
hasRecord(globalExec) &&
|
||||
Array.isArray(globalExec.safeBins) &&
|
||||
globalExec.safeBins.length > 0
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
return (cfg.agents?.list ?? []).some((agent) => {
|
||||
const agentExec = hasRecord(agent) && hasRecord(agent.tools) ? agent.tools.exec : undefined;
|
||||
return (
|
||||
hasRecord(agentExec) && Array.isArray(agentExec.safeBins) && agentExec.safeBins.length > 0
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
export async function collectDoctorPreviewWarnings(params: {
|
||||
cfg: OpenClawConfig;
|
||||
doctorFixCommand: string;
|
||||
}): Promise<string[]> {
|
||||
const warnings: string[] = [];
|
||||
const hasChannelConfig = hasChannels(params.cfg);
|
||||
const hasPluginConfig = hasPlugins(params.cfg);
|
||||
|
||||
const channelPluginBlockerHits = scanConfiguredChannelPluginBlockers(params.cfg, process.env);
|
||||
if (channelPluginBlockerHits.length > 0) {
|
||||
const channelPluginRuntime =
|
||||
hasChannelConfig && hasExplicitChannelPluginBlockerConfig(params.cfg)
|
||||
? await import("./channel-plugin-blockers.js")
|
||||
: undefined;
|
||||
const channelPluginBlockerHits =
|
||||
channelPluginRuntime?.scanConfiguredChannelPluginBlockers(params.cfg, process.env) ?? [];
|
||||
if (channelPluginRuntime && channelPluginBlockerHits.length > 0) {
|
||||
warnings.push(
|
||||
collectConfiguredChannelPluginBlockerWarnings(channelPluginBlockerHits).join("\n"),
|
||||
channelPluginRuntime
|
||||
.collectConfiguredChannelPluginBlockerWarnings(channelPluginBlockerHits)
|
||||
.join("\n"),
|
||||
);
|
||||
}
|
||||
|
||||
const channelDoctorWarnings = await collectChannelDoctorPreviewWarnings({
|
||||
cfg: params.cfg,
|
||||
doctorFixCommand: params.doctorFixCommand,
|
||||
});
|
||||
if (channelDoctorWarnings.length > 0) {
|
||||
warnings.push(...channelDoctorWarnings);
|
||||
if (hasChannelConfig) {
|
||||
const { collectChannelDoctorPreviewWarnings } = await import("./channel-doctor.js");
|
||||
const channelDoctorWarnings = await collectChannelDoctorPreviewWarnings({
|
||||
cfg: params.cfg,
|
||||
doctorFixCommand: params.doctorFixCommand,
|
||||
});
|
||||
if (channelDoctorWarnings.length > 0) {
|
||||
warnings.push(...channelDoctorWarnings);
|
||||
}
|
||||
|
||||
const { collectOpenPolicyAllowFromWarnings, maybeRepairOpenPolicyAllowFrom } =
|
||||
await import("./open-policy-allowfrom.js");
|
||||
const allowFromScan = maybeRepairOpenPolicyAllowFrom(params.cfg);
|
||||
if (allowFromScan.changes.length > 0) {
|
||||
warnings.push(
|
||||
collectOpenPolicyAllowFromWarnings({
|
||||
changes: allowFromScan.changes,
|
||||
doctorFixCommand: params.doctorFixCommand,
|
||||
}).join("\n"),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
const allowFromScan = maybeRepairOpenPolicyAllowFrom(params.cfg);
|
||||
if (allowFromScan.changes.length > 0) {
|
||||
warnings.push(
|
||||
collectOpenPolicyAllowFromWarnings({
|
||||
changes: allowFromScan.changes,
|
||||
doctorFixCommand: params.doctorFixCommand,
|
||||
}).join("\n"),
|
||||
if (hasPluginConfig) {
|
||||
const {
|
||||
collectStalePluginConfigWarnings,
|
||||
isStalePluginAutoRepairBlocked,
|
||||
scanStalePluginConfig,
|
||||
} = await import("./stale-plugin-config.js");
|
||||
const stalePluginHits = scanStalePluginConfig(params.cfg, process.env);
|
||||
if (stalePluginHits.length > 0) {
|
||||
warnings.push(
|
||||
collectStalePluginConfigWarnings({
|
||||
hits: stalePluginHits,
|
||||
doctorFixCommand: params.doctorFixCommand,
|
||||
autoRepairBlocked: isStalePluginAutoRepairBlocked(params.cfg, process.env),
|
||||
}).join("\n"),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if (hasPluginLoadPaths(params.cfg)) {
|
||||
const { collectBundledPluginLoadPathWarnings, scanBundledPluginLoadPathMigrations } =
|
||||
await import("./bundled-plugin-load-paths.js");
|
||||
const bundledPluginLoadPathHits = scanBundledPluginLoadPathMigrations(params.cfg, process.env);
|
||||
if (bundledPluginLoadPathHits.length > 0) {
|
||||
warnings.push(
|
||||
collectBundledPluginLoadPathWarnings({
|
||||
hits: bundledPluginLoadPathHits,
|
||||
doctorFixCommand: params.doctorFixCommand,
|
||||
}).join("\n"),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if (hasChannelConfig) {
|
||||
const { collectChannelDoctorEmptyAllowlistExtraWarnings } = await import("./channel-doctor.js");
|
||||
const { scanEmptyAllowlistPolicyWarnings } = await import("./empty-allowlist-scan.js");
|
||||
const emptyAllowlistWarnings = scanEmptyAllowlistPolicyWarnings(params.cfg, {
|
||||
doctorFixCommand: params.doctorFixCommand,
|
||||
extraWarningsForAccount: collectChannelDoctorEmptyAllowlistExtraWarnings,
|
||||
}).filter(
|
||||
(warning) =>
|
||||
!(
|
||||
channelPluginRuntime?.isWarningBlockedByChannelPlugin(
|
||||
warning,
|
||||
channelPluginBlockerHits,
|
||||
) ?? false
|
||||
),
|
||||
);
|
||||
if (emptyAllowlistWarnings.length > 0) {
|
||||
const { sanitizeForLog } = await import("../../../terminal/ansi.js");
|
||||
warnings.push(emptyAllowlistWarnings.map((line) => sanitizeForLog(line)).join("\n"));
|
||||
}
|
||||
}
|
||||
|
||||
const stalePluginHits = scanStalePluginConfig(params.cfg, process.env);
|
||||
if (stalePluginHits.length > 0) {
|
||||
warnings.push(
|
||||
collectStalePluginConfigWarnings({
|
||||
hits: stalePluginHits,
|
||||
doctorFixCommand: params.doctorFixCommand,
|
||||
autoRepairBlocked: isStalePluginAutoRepairBlocked(params.cfg, process.env),
|
||||
}).join("\n"),
|
||||
);
|
||||
if (hasToolsBySenderKey(params.cfg)) {
|
||||
const { collectLegacyToolsBySenderWarnings, scanLegacyToolsBySenderKeys } =
|
||||
await import("./legacy-tools-by-sender.js");
|
||||
const toolsBySenderHits = scanLegacyToolsBySenderKeys(params.cfg);
|
||||
if (toolsBySenderHits.length > 0) {
|
||||
warnings.push(
|
||||
collectLegacyToolsBySenderWarnings({
|
||||
hits: toolsBySenderHits,
|
||||
doctorFixCommand: params.doctorFixCommand,
|
||||
}).join("\n"),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
const bundledPluginLoadPathHits = scanBundledPluginLoadPathMigrations(params.cfg, process.env);
|
||||
if (bundledPluginLoadPathHits.length > 0) {
|
||||
warnings.push(
|
||||
collectBundledPluginLoadPathWarnings({
|
||||
hits: bundledPluginLoadPathHits,
|
||||
doctorFixCommand: params.doctorFixCommand,
|
||||
}).join("\n"),
|
||||
);
|
||||
}
|
||||
if (hasConfiguredSafeBins(params.cfg)) {
|
||||
const {
|
||||
collectExecSafeBinCoverageWarnings,
|
||||
collectExecSafeBinTrustedDirHintWarnings,
|
||||
scanExecSafeBinCoverage,
|
||||
scanExecSafeBinTrustedDirHints,
|
||||
} = await import("./exec-safe-bins.js");
|
||||
const safeBinCoverage = scanExecSafeBinCoverage(params.cfg);
|
||||
if (safeBinCoverage.length > 0) {
|
||||
warnings.push(
|
||||
collectExecSafeBinCoverageWarnings({
|
||||
hits: safeBinCoverage,
|
||||
doctorFixCommand: params.doctorFixCommand,
|
||||
}).join("\n"),
|
||||
);
|
||||
}
|
||||
|
||||
const emptyAllowlistWarnings = scanEmptyAllowlistPolicyWarnings(params.cfg, {
|
||||
doctorFixCommand: params.doctorFixCommand,
|
||||
extraWarningsForAccount: collectChannelDoctorEmptyAllowlistExtraWarnings,
|
||||
}).filter((warning) => !isWarningBlockedByChannelPlugin(warning, channelPluginBlockerHits));
|
||||
if (emptyAllowlistWarnings.length > 0) {
|
||||
warnings.push(emptyAllowlistWarnings.map((line) => sanitizeForLog(line)).join("\n"));
|
||||
}
|
||||
|
||||
const toolsBySenderHits = scanLegacyToolsBySenderKeys(params.cfg);
|
||||
if (toolsBySenderHits.length > 0) {
|
||||
warnings.push(
|
||||
collectLegacyToolsBySenderWarnings({
|
||||
hits: toolsBySenderHits,
|
||||
doctorFixCommand: params.doctorFixCommand,
|
||||
}).join("\n"),
|
||||
);
|
||||
}
|
||||
|
||||
const safeBinCoverage = scanExecSafeBinCoverage(params.cfg);
|
||||
if (safeBinCoverage.length > 0) {
|
||||
warnings.push(
|
||||
collectExecSafeBinCoverageWarnings({
|
||||
hits: safeBinCoverage,
|
||||
doctorFixCommand: params.doctorFixCommand,
|
||||
}).join("\n"),
|
||||
);
|
||||
}
|
||||
|
||||
const safeBinTrustedDirHints = scanExecSafeBinTrustedDirHints(params.cfg);
|
||||
if (safeBinTrustedDirHints.length > 0) {
|
||||
warnings.push(collectExecSafeBinTrustedDirHintWarnings(safeBinTrustedDirHints).join("\n"));
|
||||
const safeBinTrustedDirHints = scanExecSafeBinTrustedDirHints(params.cfg);
|
||||
if (safeBinTrustedDirHints.length > 0) {
|
||||
warnings.push(collectExecSafeBinTrustedDirHintWarnings(safeBinTrustedDirHints).join("\n"));
|
||||
}
|
||||
}
|
||||
|
||||
return warnings;
|
||||
|
||||
@@ -142,28 +142,51 @@ vi.mock("../config/config.js", () => ({
|
||||
resolveGatewayPort: mocks.resolveGatewayPort,
|
||||
}));
|
||||
|
||||
vi.mock("../infra/bonjour-discovery.js", async () => {
|
||||
const actual = await vi.importActual<typeof import("../infra/bonjour-discovery.js")>(
|
||||
"../infra/bonjour-discovery.js",
|
||||
);
|
||||
return {
|
||||
...actual,
|
||||
discoverGatewayBeacons: mocks.discoverGatewayBeacons,
|
||||
};
|
||||
});
|
||||
vi.mock("../infra/bonjour-discovery.js", () => ({
|
||||
discoverGatewayBeacons: mocks.discoverGatewayBeacons,
|
||||
resolveGatewayDiscoveryEndpoint: (beacon: GatewayBonjourBeacon) => {
|
||||
const host = beacon.host?.trim();
|
||||
const port = beacon.port;
|
||||
if (!host || typeof port !== "number" || !Number.isFinite(port) || port <= 0) {
|
||||
return null;
|
||||
}
|
||||
const scheme = beacon.gatewayTls === true ? "wss" : "ws";
|
||||
return {
|
||||
host,
|
||||
port,
|
||||
gatewayTls: beacon.gatewayTls === true,
|
||||
gatewayTlsFingerprintSha256: beacon.gatewayTlsFingerprintSha256,
|
||||
scheme,
|
||||
wsUrl: `${scheme}://${host}:${port}`,
|
||||
};
|
||||
},
|
||||
}));
|
||||
|
||||
vi.mock("../infra/tailnet.js", () => ({
|
||||
pickPrimaryTailnetIPv4: mocks.pickPrimaryTailnetIPv4,
|
||||
}));
|
||||
|
||||
vi.mock("../infra/ssh-tunnel.js", async () => {
|
||||
const actual =
|
||||
await vi.importActual<typeof import("../infra/ssh-tunnel.js")>("../infra/ssh-tunnel.js");
|
||||
return {
|
||||
...actual,
|
||||
startSshPortForward: mocks.startSshPortForward,
|
||||
};
|
||||
});
|
||||
vi.mock("../infra/ssh-tunnel.js", () => ({
|
||||
parseSshTarget: (rawTarget: string) => {
|
||||
const trimmed = rawTarget.trim();
|
||||
if (!trimmed || trimmed.startsWith("-")) {
|
||||
return null;
|
||||
}
|
||||
const [userHost, rawPort] = trimmed.split(":");
|
||||
const [maybeUser, maybeHost] = userHost.includes("@")
|
||||
? userHost.split("@", 2)
|
||||
: [undefined, userHost];
|
||||
if (!maybeHost) {
|
||||
return null;
|
||||
}
|
||||
return {
|
||||
user: maybeUser,
|
||||
host: maybeHost,
|
||||
port: rawPort ? Number(rawPort) : 22,
|
||||
};
|
||||
},
|
||||
startSshPortForward: mocks.startSshPortForward,
|
||||
}));
|
||||
|
||||
vi.mock("../infra/ssh-config.js", () => ({
|
||||
resolveSshConfig: mocks.resolveSshConfig,
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
import { getChannelPlugin } from "../channels/plugins/index.js";
|
||||
import { asNullableRecord } from "../shared/record-coerce.js";
|
||||
import { colorize, isRich, theme } from "../terminal/theme.js";
|
||||
import type { ChannelAccountHealthSummary, HealthSummary } from "./health.types.js";
|
||||
|
||||
const formatKv = (line: string, rich: boolean) => {
|
||||
const idx = line.indexOf(": ");
|
||||
@@ -47,3 +50,176 @@ export function formatHealthCheckFailure(err: unknown, opts: { rich?: boolean }
|
||||
}
|
||||
return out.join("\n");
|
||||
}
|
||||
|
||||
const formatProbeLine = (probe: unknown, opts: { botUsernames?: string[] } = {}): string | null => {
|
||||
const record = asNullableRecord(probe);
|
||||
if (!record) {
|
||||
return null;
|
||||
}
|
||||
const ok = typeof record.ok === "boolean" ? record.ok : undefined;
|
||||
if (ok === undefined) {
|
||||
return null;
|
||||
}
|
||||
const elapsedMs = typeof record.elapsedMs === "number" ? record.elapsedMs : null;
|
||||
const status = typeof record.status === "number" ? record.status : null;
|
||||
const error = typeof record.error === "string" ? record.error : null;
|
||||
const bot = asNullableRecord(record.bot);
|
||||
const botUsername = bot && typeof bot.username === "string" ? bot.username : null;
|
||||
const webhook = asNullableRecord(record.webhook);
|
||||
const webhookUrl = webhook && typeof webhook.url === "string" ? webhook.url : null;
|
||||
|
||||
const usernames = new Set<string>();
|
||||
if (botUsername) {
|
||||
usernames.add(botUsername);
|
||||
}
|
||||
for (const extra of opts.botUsernames ?? []) {
|
||||
if (extra) {
|
||||
usernames.add(extra);
|
||||
}
|
||||
}
|
||||
|
||||
if (ok) {
|
||||
let label = "ok";
|
||||
if (usernames.size > 0) {
|
||||
label += ` (@${Array.from(usernames).join(", @")})`;
|
||||
}
|
||||
if (elapsedMs != null) {
|
||||
label += ` (${elapsedMs}ms)`;
|
||||
}
|
||||
if (webhookUrl) {
|
||||
label += ` - webhook ${webhookUrl}`;
|
||||
}
|
||||
return label;
|
||||
}
|
||||
let label = `failed (${status ?? "unknown"})`;
|
||||
if (error) {
|
||||
label += ` - ${error}`;
|
||||
}
|
||||
return label;
|
||||
};
|
||||
|
||||
const formatAccountProbeTiming = (summary: ChannelAccountHealthSummary): string | null => {
|
||||
const probe = asNullableRecord(summary.probe);
|
||||
if (!probe) {
|
||||
return null;
|
||||
}
|
||||
const elapsedMs = typeof probe.elapsedMs === "number" ? Math.round(probe.elapsedMs) : null;
|
||||
const ok = typeof probe.ok === "boolean" ? probe.ok : null;
|
||||
if (elapsedMs == null && ok !== true) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const accountId = summary.accountId || "default";
|
||||
const botRecord = asNullableRecord(probe.bot);
|
||||
const botUsername =
|
||||
botRecord && typeof botRecord.username === "string" ? botRecord.username : null;
|
||||
const handle = botUsername ? `@${botUsername}` : accountId;
|
||||
const timing = elapsedMs != null ? `${elapsedMs}ms` : "ok";
|
||||
|
||||
return `${handle}:${accountId}:${timing}`;
|
||||
};
|
||||
|
||||
const isProbeFailure = (summary: ChannelAccountHealthSummary): boolean => {
|
||||
const probe = asNullableRecord(summary.probe);
|
||||
if (!probe) {
|
||||
return false;
|
||||
}
|
||||
const ok = typeof probe.ok === "boolean" ? probe.ok : null;
|
||||
return ok === false;
|
||||
};
|
||||
|
||||
export const formatHealthChannelLines = (
|
||||
summary: HealthSummary,
|
||||
opts: {
|
||||
accountMode?: "default" | "all";
|
||||
accountIdsByChannel?: Record<string, string[] | undefined>;
|
||||
} = {},
|
||||
): string[] => {
|
||||
const channels = summary.channels ?? {};
|
||||
const channelOrder =
|
||||
summary.channelOrder?.length > 0 ? summary.channelOrder : Object.keys(channels);
|
||||
const accountMode = opts.accountMode ?? "default";
|
||||
|
||||
const lines: string[] = [];
|
||||
for (const channelId of channelOrder) {
|
||||
const channelSummary = channels[channelId];
|
||||
if (!channelSummary) {
|
||||
continue;
|
||||
}
|
||||
const plugin = getChannelPlugin(channelId as never);
|
||||
const label = summary.channelLabels?.[channelId] ?? plugin?.meta.label ?? channelId;
|
||||
const accountSummaries = channelSummary.accounts ?? {};
|
||||
const accountIds = opts.accountIdsByChannel?.[channelId];
|
||||
const filteredSummaries =
|
||||
accountIds && accountIds.length > 0
|
||||
? accountIds
|
||||
.map((accountId) => accountSummaries[accountId])
|
||||
.filter((entry): entry is ChannelAccountHealthSummary => Boolean(entry))
|
||||
: undefined;
|
||||
const listSummaries =
|
||||
accountMode === "all"
|
||||
? Object.values(accountSummaries)
|
||||
: (filteredSummaries ?? (channelSummary.accounts ? Object.values(accountSummaries) : []));
|
||||
const baseSummary =
|
||||
filteredSummaries && filteredSummaries.length > 0 ? filteredSummaries[0] : channelSummary;
|
||||
const botUsernames = listSummaries
|
||||
? listSummaries
|
||||
.map((account) => {
|
||||
const probeRecord = asNullableRecord(account.probe);
|
||||
const bot = probeRecord ? asNullableRecord(probeRecord.bot) : null;
|
||||
return bot && typeof bot.username === "string" ? bot.username : null;
|
||||
})
|
||||
.filter((value): value is string => Boolean(value))
|
||||
: [];
|
||||
const linked = typeof baseSummary.linked === "boolean" ? baseSummary.linked : null;
|
||||
if (linked !== null) {
|
||||
if (linked) {
|
||||
const authAgeMs = typeof baseSummary.authAgeMs === "number" ? baseSummary.authAgeMs : null;
|
||||
const authLabel = authAgeMs != null ? ` (auth age ${Math.round(authAgeMs / 60000)}m)` : "";
|
||||
lines.push(`${label}: linked${authLabel}`);
|
||||
} else {
|
||||
lines.push(`${label}: not linked`);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
const configured = typeof baseSummary.configured === "boolean" ? baseSummary.configured : null;
|
||||
if (configured === false) {
|
||||
lines.push(`${label}: not configured`);
|
||||
continue;
|
||||
}
|
||||
|
||||
const accountTimings =
|
||||
accountMode === "all"
|
||||
? listSummaries
|
||||
.map((account) => formatAccountProbeTiming(account))
|
||||
.filter((value): value is string => Boolean(value))
|
||||
: [];
|
||||
const failedSummary = listSummaries.find((summary) => isProbeFailure(summary));
|
||||
if (failedSummary) {
|
||||
const failureLine = formatProbeLine(failedSummary.probe, { botUsernames });
|
||||
if (failureLine) {
|
||||
lines.push(`${label}: ${failureLine}`);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (accountTimings.length > 0) {
|
||||
lines.push(`${label}: ok (${accountTimings.join(", ")})`);
|
||||
continue;
|
||||
}
|
||||
|
||||
const probeLine = formatProbeLine(baseSummary.probe, { botUsernames });
|
||||
if (probeLine) {
|
||||
lines.push(`${label}: ${probeLine}`);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (configured === true) {
|
||||
lines.push(`${label}: configured`);
|
||||
continue;
|
||||
}
|
||||
lines.push(`${label}: unknown`);
|
||||
}
|
||||
return lines;
|
||||
};
|
||||
|
||||
@@ -67,7 +67,7 @@ describe("healthCommand (coverage)", () => {
|
||||
});
|
||||
});
|
||||
|
||||
it("prints the rich text summary when linked and configured", async () => {
|
||||
it("prints the rich text summary and verbose gateway details", async () => {
|
||||
const recent = createRecentSessionRows();
|
||||
callGatewayMock.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
@@ -128,40 +128,17 @@ describe("healthCommand (coverage)", () => {
|
||||
},
|
||||
} satisfies HealthSummary);
|
||||
|
||||
await healthCommand({ json: false, timeoutMs: 1000 }, runtime as never);
|
||||
|
||||
expect(runtime.exit).not.toHaveBeenCalled();
|
||||
expect(stripAnsi(runtime.log.mock.calls.map((c) => String(c[0])).join("\n"))).toMatch(
|
||||
/WhatsApp: linked/i,
|
||||
);
|
||||
expect(logWebSelfIdMock).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("prints gateway connection details in verbose mode", async () => {
|
||||
callGatewayMock.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
ts: Date.now(),
|
||||
durationMs: 5,
|
||||
channels: {},
|
||||
channelOrder: [],
|
||||
channelLabels: {},
|
||||
heartbeatSeconds: 60,
|
||||
defaultAgentId: "main",
|
||||
agents: [],
|
||||
sessions: {
|
||||
path: "/tmp/sessions.json",
|
||||
count: 0,
|
||||
recent: [],
|
||||
},
|
||||
} satisfies HealthSummary);
|
||||
|
||||
await healthCommand({ json: false, verbose: true, timeoutMs: 1000 }, runtime as never);
|
||||
|
||||
expect(runtime.exit).not.toHaveBeenCalled();
|
||||
const output = stripAnsi(runtime.log.mock.calls.map((c) => String(c[0])).join("\n"));
|
||||
expect(output).toMatch(/WhatsApp: linked/i);
|
||||
expect(runtime.log.mock.calls.slice(0, 3)).toEqual([
|
||||
["Gateway connection:"],
|
||||
[" Gateway mode: local"],
|
||||
[" Gateway target: ws://127.0.0.1:18789"],
|
||||
]);
|
||||
expect(buildGatewayConnectionDetailsMock).toHaveBeenCalled();
|
||||
expect(logWebSelfIdMock).toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -353,24 +353,24 @@ describe("getHealthSnapshot", () => {
|
||||
expect(telegram.probe?.webhook?.url).toMatch(/^https:/);
|
||||
expect(calls.some((c) => c.includes("/getMe"))).toBe(true);
|
||||
expect(calls.some((c) => c.includes("/getWebhookInfo"))).toBe(true);
|
||||
});
|
||||
|
||||
it("treats telegram.tokenFile as configured", async () => {
|
||||
const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-health-"));
|
||||
const tokenFile = path.join(tmpDir, "telegram-token");
|
||||
fs.writeFileSync(tokenFile, "t-file\n", "utf-8");
|
||||
const { calls, telegram } = await runSuccessfulTelegramProbe(
|
||||
{ channels: { telegram: { tokenFile } } },
|
||||
{ clearTokenEnv: true },
|
||||
);
|
||||
expect(telegram.configured).toBe(true);
|
||||
expect(telegram.probe?.ok).toBe(true);
|
||||
expect(calls.some((c) => c.includes("bott-file/getMe"))).toBe(true);
|
||||
|
||||
fs.rmSync(tmpDir, { recursive: true, force: true });
|
||||
try {
|
||||
fs.writeFileSync(tokenFile, "t-file\n", "utf-8");
|
||||
const tokenFileProbe = await runSuccessfulTelegramProbe(
|
||||
{ channels: { telegram: { tokenFile } } },
|
||||
{ clearTokenEnv: true },
|
||||
);
|
||||
expect(tokenFileProbe.telegram.configured).toBe(true);
|
||||
expect(tokenFileProbe.telegram.probe?.ok).toBe(true);
|
||||
expect(tokenFileProbe.calls.some((c) => c.includes("bott-file/getMe"))).toBe(true);
|
||||
} finally {
|
||||
fs.rmSync(tmpDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("returns a structured telegram probe error when getMe fails", async () => {
|
||||
it("returns structured telegram probe errors", async () => {
|
||||
testConfig = { channels: { telegram: { botToken: "bad-token" } } };
|
||||
testStore = {};
|
||||
vi.stubEnv("DISCORD_BOT_TOKEN", "");
|
||||
@@ -398,9 +398,7 @@ describe("getHealthSnapshot", () => {
|
||||
expect(telegram.probe?.ok).toBe(false);
|
||||
expect(telegram.probe?.status).toBe(401);
|
||||
expect(telegram.probe?.error).toMatch(/unauthorized/i);
|
||||
});
|
||||
|
||||
it("captures unexpected probe exceptions as errors", async () => {
|
||||
testConfig = { channels: { telegram: { botToken: "t-err" } } };
|
||||
testStore = {};
|
||||
vi.stubEnv("DISCORD_BOT_TOKEN", "");
|
||||
@@ -412,14 +410,14 @@ describe("getHealthSnapshot", () => {
|
||||
}),
|
||||
);
|
||||
|
||||
const snap = await getHealthSnapshot({ timeoutMs: 25 });
|
||||
const telegram = snap.channels.telegram as {
|
||||
const exceptionSnap = await getHealthSnapshot({ timeoutMs: 25 });
|
||||
const exceptionTelegram = exceptionSnap.channels.telegram as {
|
||||
configured?: boolean;
|
||||
probe?: { ok?: boolean; error?: string };
|
||||
};
|
||||
expect(telegram.configured).toBe(true);
|
||||
expect(telegram.probe?.ok).toBe(false);
|
||||
expect(telegram.probe?.error).toMatch(/network down/i);
|
||||
expect(exceptionTelegram.configured).toBe(true);
|
||||
expect(exceptionTelegram.probe?.ok).toBe(false);
|
||||
expect(exceptionTelegram.probe?.error).toMatch(/network down/i);
|
||||
});
|
||||
|
||||
it("disables heartbeat for agents without heartbeat blocks", async () => {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { resolveDefaultAgentId } from "../agents/agent-scope.js";
|
||||
import { resolveChannelDefaultAccountId } from "../channels/plugins/helpers.js";
|
||||
import { getChannelPlugin, listChannelPlugins } from "../channels/plugins/index.js";
|
||||
import { listChannelPlugins } from "../channels/plugins/index.js";
|
||||
import type { ChannelPlugin } from "../channels/plugins/types.plugin.js";
|
||||
import type { ChannelAccountSnapshot } from "../channels/plugins/types.public.js";
|
||||
import { inspectReadOnlyChannelAccount } from "../channels/read-only-account-inspect.js";
|
||||
@@ -18,6 +18,7 @@ import { type RuntimeEnv, writeRuntimeJson } from "../runtime.js";
|
||||
import { asNullableRecord } from "../shared/record-coerce.js";
|
||||
import { styleHealthChannelLine } from "../terminal/health-style.js";
|
||||
import { isRich } from "../terminal/theme.js";
|
||||
import { formatHealthChannelLines } from "./health-format.js";
|
||||
import type {
|
||||
AgentHealthSummary,
|
||||
ChannelAccountHealthSummary,
|
||||
@@ -25,6 +26,7 @@ import type {
|
||||
HealthSummary,
|
||||
} from "./health.types.js";
|
||||
import { logGatewayConnectionDetails } from "./status.gateway-connection.js";
|
||||
export { formatHealthChannelLines } from "./health-format.js";
|
||||
export type {
|
||||
AgentHealthSummary,
|
||||
ChannelAccountHealthSummary,
|
||||
@@ -201,179 +203,6 @@ async function resolveHealthAccountContext(params: {
|
||||
return { account, enabled, configured, diagnostics };
|
||||
}
|
||||
|
||||
const formatProbeLine = (probe: unknown, opts: { botUsernames?: string[] } = {}): string | null => {
|
||||
const record = asNullableRecord(probe);
|
||||
if (!record) {
|
||||
return null;
|
||||
}
|
||||
const ok = typeof record.ok === "boolean" ? record.ok : undefined;
|
||||
if (ok === undefined) {
|
||||
return null;
|
||||
}
|
||||
const elapsedMs = typeof record.elapsedMs === "number" ? record.elapsedMs : null;
|
||||
const status = typeof record.status === "number" ? record.status : null;
|
||||
const error = typeof record.error === "string" ? record.error : null;
|
||||
const bot = asNullableRecord(record.bot);
|
||||
const botUsername = bot && typeof bot.username === "string" ? bot.username : null;
|
||||
const webhook = asNullableRecord(record.webhook);
|
||||
const webhookUrl = webhook && typeof webhook.url === "string" ? webhook.url : null;
|
||||
|
||||
const usernames = new Set<string>();
|
||||
if (botUsername) {
|
||||
usernames.add(botUsername);
|
||||
}
|
||||
for (const extra of opts.botUsernames ?? []) {
|
||||
if (extra) {
|
||||
usernames.add(extra);
|
||||
}
|
||||
}
|
||||
|
||||
if (ok) {
|
||||
let label = "ok";
|
||||
if (usernames.size > 0) {
|
||||
label += ` (@${Array.from(usernames).join(", @")})`;
|
||||
}
|
||||
if (elapsedMs != null) {
|
||||
label += ` (${elapsedMs}ms)`;
|
||||
}
|
||||
if (webhookUrl) {
|
||||
label += ` - webhook ${webhookUrl}`;
|
||||
}
|
||||
return label;
|
||||
}
|
||||
let label = `failed (${status ?? "unknown"})`;
|
||||
if (error) {
|
||||
label += ` - ${error}`;
|
||||
}
|
||||
return label;
|
||||
};
|
||||
|
||||
const formatAccountProbeTiming = (summary: ChannelAccountHealthSummary): string | null => {
|
||||
const probe = asNullableRecord(summary.probe);
|
||||
if (!probe) {
|
||||
return null;
|
||||
}
|
||||
const elapsedMs = typeof probe.elapsedMs === "number" ? Math.round(probe.elapsedMs) : null;
|
||||
const ok = typeof probe.ok === "boolean" ? probe.ok : null;
|
||||
if (elapsedMs == null && ok !== true) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const accountId = summary.accountId || "default";
|
||||
const botRecord = asNullableRecord(probe.bot);
|
||||
const botUsername =
|
||||
botRecord && typeof botRecord.username === "string" ? botRecord.username : null;
|
||||
const handle = botUsername ? `@${botUsername}` : accountId;
|
||||
const timing = elapsedMs != null ? `${elapsedMs}ms` : "ok";
|
||||
|
||||
return `${handle}:${accountId}:${timing}`;
|
||||
};
|
||||
|
||||
const isProbeFailure = (summary: ChannelAccountHealthSummary): boolean => {
|
||||
const probe = asNullableRecord(summary.probe);
|
||||
if (!probe) {
|
||||
return false;
|
||||
}
|
||||
const ok = typeof probe.ok === "boolean" ? probe.ok : null;
|
||||
return ok === false;
|
||||
};
|
||||
|
||||
export const formatHealthChannelLines = (
|
||||
summary: HealthSummary,
|
||||
opts: {
|
||||
accountMode?: "default" | "all";
|
||||
accountIdsByChannel?: Record<string, string[] | undefined>;
|
||||
} = {},
|
||||
): string[] => {
|
||||
const channels = summary.channels ?? {};
|
||||
const channelOrder =
|
||||
summary.channelOrder?.length > 0 ? summary.channelOrder : Object.keys(channels);
|
||||
const accountMode = opts.accountMode ?? "default";
|
||||
|
||||
const lines: string[] = [];
|
||||
for (const channelId of channelOrder) {
|
||||
const channelSummary = channels[channelId];
|
||||
if (!channelSummary) {
|
||||
continue;
|
||||
}
|
||||
const plugin = getChannelPlugin(channelId as never);
|
||||
const label = summary.channelLabels?.[channelId] ?? plugin?.meta.label ?? channelId;
|
||||
const accountSummaries = channelSummary.accounts ?? {};
|
||||
const accountIds = opts.accountIdsByChannel?.[channelId];
|
||||
const filteredSummaries =
|
||||
accountIds && accountIds.length > 0
|
||||
? accountIds
|
||||
.map((accountId) => accountSummaries[accountId])
|
||||
.filter((entry): entry is ChannelAccountHealthSummary => Boolean(entry))
|
||||
: undefined;
|
||||
const listSummaries =
|
||||
accountMode === "all"
|
||||
? Object.values(accountSummaries)
|
||||
: (filteredSummaries ?? (channelSummary.accounts ? Object.values(accountSummaries) : []));
|
||||
const baseSummary =
|
||||
filteredSummaries && filteredSummaries.length > 0 ? filteredSummaries[0] : channelSummary;
|
||||
const botUsernames = listSummaries
|
||||
? listSummaries
|
||||
.map((account) => {
|
||||
const probeRecord = asNullableRecord(account.probe);
|
||||
const bot = probeRecord ? asNullableRecord(probeRecord.bot) : null;
|
||||
return bot && typeof bot.username === "string" ? bot.username : null;
|
||||
})
|
||||
.filter((value): value is string => Boolean(value))
|
||||
: [];
|
||||
const linked = typeof baseSummary.linked === "boolean" ? baseSummary.linked : null;
|
||||
if (linked !== null) {
|
||||
if (linked) {
|
||||
const authAgeMs = typeof baseSummary.authAgeMs === "number" ? baseSummary.authAgeMs : null;
|
||||
const authLabel = authAgeMs != null ? ` (auth age ${Math.round(authAgeMs / 60000)}m)` : "";
|
||||
lines.push(`${label}: linked${authLabel}`);
|
||||
} else {
|
||||
lines.push(`${label}: not linked`);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
const configured = typeof baseSummary.configured === "boolean" ? baseSummary.configured : null;
|
||||
if (configured === false) {
|
||||
lines.push(`${label}: not configured`);
|
||||
continue;
|
||||
}
|
||||
|
||||
const accountTimings =
|
||||
accountMode === "all"
|
||||
? listSummaries
|
||||
.map((account) => formatAccountProbeTiming(account))
|
||||
.filter((value): value is string => Boolean(value))
|
||||
: [];
|
||||
const failedSummary = listSummaries.find((summary) => isProbeFailure(summary));
|
||||
if (failedSummary) {
|
||||
const failureLine = formatProbeLine(failedSummary.probe, { botUsernames });
|
||||
if (failureLine) {
|
||||
lines.push(`${label}: ${failureLine}`);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (accountTimings.length > 0) {
|
||||
lines.push(`${label}: ok (${accountTimings.join(", ")})`);
|
||||
continue;
|
||||
}
|
||||
|
||||
const probeLine = formatProbeLine(baseSummary.probe, { botUsernames });
|
||||
if (probeLine) {
|
||||
lines.push(`${label}: ${probeLine}`);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (configured === true) {
|
||||
lines.push(`${label}: configured`);
|
||||
continue;
|
||||
}
|
||||
lines.push(`${label}: unknown`);
|
||||
}
|
||||
return lines;
|
||||
};
|
||||
|
||||
export async function getHealthSnapshot(params?: {
|
||||
timeoutMs?: number;
|
||||
probe?: boolean;
|
||||
|
||||
@@ -1,138 +0,0 @@
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import {
|
||||
type AuthProfileStore,
|
||||
resolveApiKeyForProfile,
|
||||
saveAuthProfileStore,
|
||||
} from "../agents/auth-profiles.js";
|
||||
import { clearConfigCache, clearRuntimeConfigSnapshot } from "../config/config.js";
|
||||
import { withEnvAsync } from "../test-utils/env.js";
|
||||
import { toModelRow } from "./models/list.registry.js";
|
||||
|
||||
const OPENROUTER_MODEL = {
|
||||
provider: "openrouter",
|
||||
id: "openai/gpt-5.4",
|
||||
name: "GPT-5.4 via OpenRouter",
|
||||
api: "openai-chat-completions",
|
||||
baseUrl: "https://openrouter.ai/api/v1",
|
||||
input: ["text"],
|
||||
contextWindow: 1_000_000,
|
||||
maxTokens: 128_000,
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
} as const;
|
||||
|
||||
async function pathExists(pathname: string): Promise<boolean> {
|
||||
try {
|
||||
await fs.stat(pathname);
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
type AuthSyncFixture = {
|
||||
root: string;
|
||||
stateDir: string;
|
||||
agentDir: string;
|
||||
configPath: string;
|
||||
authPath: string;
|
||||
};
|
||||
|
||||
async function withAuthSyncFixture(run: (fixture: AuthSyncFixture) => Promise<void>) {
|
||||
const root = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-models-list-auth-sync-"));
|
||||
try {
|
||||
const stateDir = path.join(root, "state");
|
||||
const agentDir = path.join(stateDir, "agents", "main", "agent");
|
||||
const configPath = path.join(stateDir, "openclaw.json");
|
||||
const authPath = path.join(agentDir, "auth.json");
|
||||
|
||||
await fs.mkdir(agentDir, { recursive: true });
|
||||
await fs.writeFile(configPath, "{}\n", "utf8");
|
||||
|
||||
await withEnvAsync(
|
||||
{
|
||||
OPENCLAW_STATE_DIR: stateDir,
|
||||
OPENCLAW_AGENT_DIR: agentDir,
|
||||
PI_CODING_AGENT_DIR: agentDir,
|
||||
OPENCLAW_CONFIG_PATH: configPath,
|
||||
OPENROUTER_API_KEY: undefined,
|
||||
},
|
||||
async () => {
|
||||
clearRuntimeConfigSnapshot();
|
||||
clearConfigCache();
|
||||
await run({ root, stateDir, agentDir, configPath, authPath });
|
||||
},
|
||||
);
|
||||
} finally {
|
||||
clearRuntimeConfigSnapshot();
|
||||
clearConfigCache();
|
||||
await fs.rm(root, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
describe("models list auth-profile sync", () => {
|
||||
it("marks models available when auth exists only in auth-profiles.json", async () => {
|
||||
await withAuthSyncFixture(async ({ agentDir, authPath }) => {
|
||||
const authStore: AuthProfileStore = {
|
||||
version: 1,
|
||||
profiles: {
|
||||
"openrouter:default": {
|
||||
type: "api_key",
|
||||
provider: "openrouter",
|
||||
key: "sk-or-v1-regression-test",
|
||||
},
|
||||
},
|
||||
};
|
||||
saveAuthProfileStore(authStore, agentDir);
|
||||
|
||||
expect(await pathExists(authPath)).toBe(false);
|
||||
|
||||
const row = toModelRow({
|
||||
model: OPENROUTER_MODEL as never,
|
||||
key: "openrouter/openai/gpt-5.4",
|
||||
tags: [],
|
||||
cfg: {},
|
||||
authStore,
|
||||
});
|
||||
expect(row.available).toBe(true);
|
||||
expect(await pathExists(authPath)).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
it("does not persist blank auth-profile credentials", async () => {
|
||||
await withAuthSyncFixture(async ({ agentDir, authPath }) => {
|
||||
const authStore: AuthProfileStore = {
|
||||
version: 1,
|
||||
profiles: {
|
||||
"openrouter:default": {
|
||||
type: "api_key",
|
||||
provider: "openrouter",
|
||||
key: " ",
|
||||
},
|
||||
},
|
||||
};
|
||||
saveAuthProfileStore(authStore, agentDir);
|
||||
|
||||
await expect(
|
||||
resolveApiKeyForProfile({
|
||||
cfg: {},
|
||||
store: authStore,
|
||||
profileId: "openrouter:default",
|
||||
agentDir,
|
||||
}),
|
||||
).resolves.toBeNull();
|
||||
if (await pathExists(authPath)) {
|
||||
const parsed = JSON.parse(await fs.readFile(authPath, "utf8")) as Record<
|
||||
string,
|
||||
{ type?: string; key?: string }
|
||||
>;
|
||||
const openrouterKey = parsed.openrouter?.key;
|
||||
if (openrouterKey !== undefined) {
|
||||
expect(openrouterKey.trim().length).toBeGreaterThan(0);
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
10
src/commands/models/alias-name.ts
Normal file
10
src/commands/models/alias-name.ts
Normal file
@@ -0,0 +1,10 @@
|
||||
export function normalizeAlias(alias: string): string {
|
||||
const trimmed = alias.trim();
|
||||
if (!trimmed) {
|
||||
throw new Error("Alias cannot be empty.");
|
||||
}
|
||||
if (!/^[A-Za-z0-9_.:-]+$/.test(trimmed)) {
|
||||
throw new Error("Alias must use letters, numbers, dots, underscores, colons, or dashes.");
|
||||
}
|
||||
return trimmed;
|
||||
}
|
||||
@@ -26,13 +26,46 @@ const mocks = vi.hoisted(() => ({
|
||||
clearAuthProfileCooldown: vi.fn(),
|
||||
}));
|
||||
|
||||
vi.mock("../../agents/auth-profiles.js", () => ({
|
||||
loadAuthProfileStoreForRuntime: mocks.loadAuthProfileStoreForRuntime,
|
||||
vi.mock("../../agents/auth-profiles/profiles.js", () => ({
|
||||
listProfilesForProvider: mocks.listProfilesForProvider,
|
||||
clearAuthProfileCooldown: mocks.clearAuthProfileCooldown,
|
||||
upsertAuthProfile: mocks.upsertAuthProfile,
|
||||
}));
|
||||
|
||||
vi.mock("../../agents/auth-profiles/store.js", () => ({
|
||||
loadAuthProfileStoreForRuntime: mocks.loadAuthProfileStoreForRuntime,
|
||||
}));
|
||||
|
||||
vi.mock("../../agents/auth-profiles/usage.js", () => ({
|
||||
clearAuthProfileCooldown: mocks.clearAuthProfileCooldown,
|
||||
}));
|
||||
|
||||
vi.mock("../../plugins/provider-auth-helpers.js", () => ({
|
||||
applyAuthProfileConfig: (
|
||||
cfg: OpenClawConfig,
|
||||
params: {
|
||||
profileId: string;
|
||||
provider: string;
|
||||
mode: "api_key" | "oauth" | "token";
|
||||
email?: string;
|
||||
displayName?: string;
|
||||
},
|
||||
): OpenClawConfig => ({
|
||||
...cfg,
|
||||
auth: {
|
||||
...cfg.auth,
|
||||
profiles: {
|
||||
...cfg.auth?.profiles,
|
||||
[params.profileId]: {
|
||||
provider: params.provider,
|
||||
mode: params.mode,
|
||||
...(params.email ? { email: params.email } : {}),
|
||||
...(params.displayName ? { displayName: params.displayName } : {}),
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
}));
|
||||
|
||||
vi.mock("@clack/prompts", () => ({
|
||||
cancel: mocks.clackCancel,
|
||||
confirm: mocks.clackConfirm,
|
||||
@@ -59,14 +92,10 @@ vi.mock("../../wizard/clack-prompter.js", () => ({
|
||||
createClackPrompter: mocks.createClackPrompter,
|
||||
}));
|
||||
|
||||
vi.mock("./shared.js", async (importActual) => {
|
||||
const actual = await importActual<typeof import("./shared.js")>();
|
||||
return {
|
||||
...actual,
|
||||
loadValidConfigOrThrow: mocks.loadValidConfigOrThrow,
|
||||
updateConfig: mocks.updateConfig,
|
||||
};
|
||||
});
|
||||
vi.mock("./shared.js", () => ({
|
||||
loadValidConfigOrThrow: mocks.loadValidConfigOrThrow,
|
||||
updateConfig: mocks.updateConfig,
|
||||
}));
|
||||
|
||||
vi.mock("../../config/logging.js", () => ({
|
||||
logConfigUpdated: mocks.logConfigUpdated,
|
||||
@@ -80,6 +109,91 @@ vi.mock("../oauth-env.js", () => ({
|
||||
isRemoteEnvironment: mocks.isRemoteEnvironment,
|
||||
}));
|
||||
|
||||
vi.mock("../oauth-flow.js", () => ({
|
||||
createVpsAwareOAuthHandlers: vi.fn(() => ({
|
||||
onAuth: vi.fn(),
|
||||
onPrompt: vi.fn(),
|
||||
})),
|
||||
}));
|
||||
|
||||
vi.mock("../auth-token.js", () => ({
|
||||
validateAnthropicSetupToken: vi.fn(() => undefined),
|
||||
}));
|
||||
|
||||
vi.mock("../provider-auth-helpers.js", () => {
|
||||
const normalize = (value: string | undefined) => value?.trim().toLowerCase() ?? "";
|
||||
const isRecord = (value: unknown): value is Record<string, unknown> =>
|
||||
Boolean(value && typeof value === "object" && !Array.isArray(value));
|
||||
const mergePatch = <T>(base: T, patch: unknown): T => {
|
||||
if (!isRecord(base) || !isRecord(patch)) {
|
||||
return patch as T;
|
||||
}
|
||||
const next: Record<string, unknown> = { ...base };
|
||||
for (const [key, value] of Object.entries(patch)) {
|
||||
next[key] = mergePatch(next[key], value);
|
||||
}
|
||||
return next as T;
|
||||
};
|
||||
|
||||
return {
|
||||
resolveProviderMatch: vi.fn((providers: ProviderPlugin[], rawProvider?: string) => {
|
||||
const requested = normalize(rawProvider);
|
||||
return (
|
||||
providers.find((provider) => normalize(provider.id) === requested) ??
|
||||
providers.find((provider) =>
|
||||
provider.aliases?.some((alias) => normalize(alias) === requested),
|
||||
) ??
|
||||
null
|
||||
);
|
||||
}),
|
||||
pickAuthMethod: vi.fn((provider: ProviderPlugin, rawMethod?: string) => {
|
||||
const requested = normalize(rawMethod);
|
||||
return (
|
||||
provider.auth.find((method) => normalize(method.id) === requested) ??
|
||||
provider.auth.find((method) => normalize(method.label) === requested) ??
|
||||
null
|
||||
);
|
||||
}),
|
||||
applyProviderAuthConfigPatch: vi.fn((cfg: OpenClawConfig, patch: unknown) => {
|
||||
const merged = mergePatch(cfg, patch);
|
||||
const patchModels = (patch as { agents?: { defaults?: { models?: unknown } } })?.agents
|
||||
?.defaults?.models;
|
||||
return isRecord(patchModels)
|
||||
? {
|
||||
...merged,
|
||||
agents: {
|
||||
...merged.agents,
|
||||
defaults: {
|
||||
...merged.agents?.defaults,
|
||||
models: patchModels,
|
||||
},
|
||||
},
|
||||
}
|
||||
: merged;
|
||||
}),
|
||||
applyDefaultModel: vi.fn((cfg: OpenClawConfig, model: string) => ({
|
||||
...cfg,
|
||||
agents: {
|
||||
...cfg.agents,
|
||||
defaults: {
|
||||
...cfg.agents?.defaults,
|
||||
models: {
|
||||
...cfg.agents?.defaults?.models,
|
||||
[model]: cfg.agents?.defaults?.models?.[model] ?? {},
|
||||
},
|
||||
model: {
|
||||
...(typeof cfg.agents?.defaults?.model === "object" &&
|
||||
"fallbacks" in cfg.agents.defaults.model
|
||||
? { fallbacks: cfg.agents.defaults.model.fallbacks }
|
||||
: undefined),
|
||||
primary: model,
|
||||
},
|
||||
},
|
||||
},
|
||||
})),
|
||||
};
|
||||
});
|
||||
|
||||
const { modelsAuthLoginCommand, modelsAuthPasteTokenCommand, modelsAuthSetupTokenCommand } =
|
||||
await import("./auth.js");
|
||||
|
||||
@@ -200,9 +314,35 @@ describe("modelsAuthLoginCommand", () => {
|
||||
|
||||
it("runs plugin-owned openai-codex login", async () => {
|
||||
const runtime = createRuntime();
|
||||
const fakeStore = {
|
||||
profiles: {
|
||||
"openai-codex:user@example.com": {
|
||||
type: "oauth",
|
||||
provider: "openai-codex",
|
||||
},
|
||||
},
|
||||
usageStats: {
|
||||
"openai-codex:user@example.com": {
|
||||
disabledUntil: Date.now() + 3_600_000,
|
||||
disabledReason: "auth_permanent",
|
||||
errorCount: 3,
|
||||
},
|
||||
},
|
||||
};
|
||||
mocks.loadAuthProfileStoreForRuntime.mockReturnValue(fakeStore);
|
||||
mocks.listProfilesForProvider.mockReturnValue(["openai-codex:user@example.com"]);
|
||||
|
||||
await modelsAuthLoginCommand({ provider: "openai-codex" }, runtime);
|
||||
|
||||
expect(mocks.loadAuthProfileStoreForRuntime).toHaveBeenCalledWith("/tmp/openclaw/agents/main");
|
||||
expect(mocks.clearAuthProfileCooldown).toHaveBeenCalledWith({
|
||||
store: fakeStore,
|
||||
profileId: "openai-codex:user@example.com",
|
||||
agentDir: "/tmp/openclaw/agents/main",
|
||||
});
|
||||
expect(mocks.clearAuthProfileCooldown.mock.invocationCallOrder[0]).toBeLessThan(
|
||||
runProviderAuth.mock.invocationCallOrder[0],
|
||||
);
|
||||
expect(runProviderAuth).toHaveBeenCalledOnce();
|
||||
expect(mocks.upsertAuthProfile).toHaveBeenCalledWith({
|
||||
profileId: "openai-codex:user@example.com",
|
||||
@@ -227,63 +367,6 @@ describe("modelsAuthLoginCommand", () => {
|
||||
);
|
||||
});
|
||||
|
||||
it("applies openai-codex default model when --set-default is used", async () => {
|
||||
const runtime = createRuntime();
|
||||
|
||||
await modelsAuthLoginCommand({ provider: "openai-codex", setDefault: true }, runtime);
|
||||
|
||||
expect(lastUpdatedConfig?.agents?.defaults?.model).toEqual({
|
||||
primary: "openai-codex/gpt-5.4",
|
||||
});
|
||||
expect(runtime.log).toHaveBeenCalledWith("Default model set to openai-codex/gpt-5.4");
|
||||
});
|
||||
|
||||
it("supports provider-owned Claude CLI migration without writing auth profiles", async () => {
|
||||
const runtime = createRuntime();
|
||||
const runClaudeCliMigration = vi.fn().mockResolvedValue({
|
||||
profiles: [],
|
||||
defaultModel: "claude-cli/claude-sonnet-4-6",
|
||||
configPatch: {
|
||||
agents: {
|
||||
defaults: {
|
||||
models: {
|
||||
"claude-cli/claude-sonnet-4-6": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
mocks.resolvePluginProviders.mockReturnValue([
|
||||
{
|
||||
id: "anthropic",
|
||||
label: "Anthropic",
|
||||
auth: [
|
||||
{
|
||||
id: "cli",
|
||||
label: "Claude CLI",
|
||||
kind: "custom",
|
||||
run: runClaudeCliMigration,
|
||||
},
|
||||
],
|
||||
},
|
||||
]);
|
||||
|
||||
await modelsAuthLoginCommand(
|
||||
{ provider: "anthropic", method: "cli", setDefault: true },
|
||||
runtime,
|
||||
);
|
||||
|
||||
expect(runClaudeCliMigration).toHaveBeenCalledOnce();
|
||||
expect(mocks.upsertAuthProfile).not.toHaveBeenCalled();
|
||||
expect(lastUpdatedConfig?.agents?.defaults?.model).toEqual({
|
||||
primary: "claude-cli/claude-sonnet-4-6",
|
||||
});
|
||||
expect(lastUpdatedConfig?.agents?.defaults?.models).toEqual({
|
||||
"claude-cli/claude-sonnet-4-6": {},
|
||||
});
|
||||
expect(runtime.log).toHaveBeenCalledWith("Default model set to claude-cli/claude-sonnet-4-6");
|
||||
});
|
||||
|
||||
it("loads the owning plugin for an explicit provider even in a clean config", async () => {
|
||||
const runtime = createRuntime();
|
||||
const runClaudeCliMigration = vi.fn().mockResolvedValue({
|
||||
@@ -335,6 +418,14 @@ describe("modelsAuthLoginCommand", () => {
|
||||
}),
|
||||
);
|
||||
expect(runClaudeCliMigration).toHaveBeenCalledOnce();
|
||||
expect(mocks.upsertAuthProfile).not.toHaveBeenCalled();
|
||||
expect(lastUpdatedConfig?.agents?.defaults?.model).toEqual({
|
||||
primary: "claude-cli/claude-sonnet-4-6",
|
||||
});
|
||||
expect(lastUpdatedConfig?.agents?.defaults?.models).toEqual({
|
||||
"claude-cli/claude-sonnet-4-6": {},
|
||||
});
|
||||
expect(runtime.log).toHaveBeenCalledWith("Default model set to claude-cli/claude-sonnet-4-6");
|
||||
});
|
||||
|
||||
it("runs the requested anthropic cli auth method with the full login context", async () => {
|
||||
@@ -482,39 +573,6 @@ describe("modelsAuthLoginCommand", () => {
|
||||
expect(runtime.log).toHaveBeenCalledWith("Default model set to claude-cli/claude-sonnet-4-6");
|
||||
});
|
||||
|
||||
it("clears stale auth lockouts before attempting openai-codex login", async () => {
|
||||
const runtime = createRuntime();
|
||||
const fakeStore = {
|
||||
profiles: {
|
||||
"openai-codex:user@example.com": {
|
||||
type: "oauth",
|
||||
provider: "openai-codex",
|
||||
},
|
||||
},
|
||||
usageStats: {
|
||||
"openai-codex:user@example.com": {
|
||||
disabledUntil: Date.now() + 3_600_000,
|
||||
disabledReason: "auth_permanent",
|
||||
errorCount: 3,
|
||||
},
|
||||
},
|
||||
};
|
||||
mocks.loadAuthProfileStoreForRuntime.mockReturnValue(fakeStore);
|
||||
mocks.listProfilesForProvider.mockReturnValue(["openai-codex:user@example.com"]);
|
||||
|
||||
await modelsAuthLoginCommand({ provider: "openai-codex" }, runtime);
|
||||
|
||||
expect(mocks.clearAuthProfileCooldown).toHaveBeenCalledWith({
|
||||
store: fakeStore,
|
||||
profileId: "openai-codex:user@example.com",
|
||||
agentDir: "/tmp/openclaw/agents/main",
|
||||
});
|
||||
// Verify clearing happens before login attempt
|
||||
const clearOrder = mocks.clearAuthProfileCooldown.mock.invocationCallOrder[0];
|
||||
const loginOrder = runProviderAuth.mock.invocationCallOrder[0];
|
||||
expect(clearOrder).toBeLessThan(loginOrder);
|
||||
});
|
||||
|
||||
it("survives lockout clearing failure without blocking login", async () => {
|
||||
const runtime = createRuntime();
|
||||
mocks.loadAuthProfileStoreForRuntime.mockImplementation(() => {
|
||||
@@ -526,16 +584,6 @@ describe("modelsAuthLoginCommand", () => {
|
||||
expect(runProviderAuth).toHaveBeenCalledOnce();
|
||||
});
|
||||
|
||||
it("loads lockout state from the agent-scoped store", async () => {
|
||||
const runtime = createRuntime();
|
||||
mocks.loadAuthProfileStoreForRuntime.mockReturnValue({ profiles: {}, usageStats: {} });
|
||||
mocks.listProfilesForProvider.mockReturnValue([]);
|
||||
|
||||
await modelsAuthLoginCommand({ provider: "openai-codex" }, runtime);
|
||||
|
||||
expect(mocks.loadAuthProfileStoreForRuntime).toHaveBeenCalledWith("/tmp/openclaw/agents/main");
|
||||
});
|
||||
|
||||
it("reports loaded plugin providers when requested provider is unavailable", async () => {
|
||||
const runtime = createRuntime();
|
||||
|
||||
@@ -568,23 +616,6 @@ describe("modelsAuthLoginCommand", () => {
|
||||
}
|
||||
});
|
||||
|
||||
it("writes pasted tokens to the resolved agent store", async () => {
|
||||
const runtime = createRuntime();
|
||||
mocks.clackText.mockResolvedValue("tok-fresh");
|
||||
|
||||
await modelsAuthPasteTokenCommand({ provider: "openai" }, runtime);
|
||||
|
||||
expect(mocks.upsertAuthProfile).toHaveBeenCalledWith({
|
||||
profileId: "openai:manual",
|
||||
credential: {
|
||||
type: "token",
|
||||
provider: "openai",
|
||||
token: "tok-fresh",
|
||||
},
|
||||
agentDir: "/tmp/openclaw/agents/main",
|
||||
});
|
||||
});
|
||||
|
||||
it("writes pasted Anthropic setup-tokens and logs the preference note", async () => {
|
||||
const runtime = createRuntime();
|
||||
mocks.clackText.mockResolvedValue(`sk-ant-oat01-${"a".repeat(80)}`);
|
||||
@@ -653,48 +684,4 @@ describe("modelsAuthLoginCommand", () => {
|
||||
agentDir: "/tmp/openclaw/agents/main",
|
||||
});
|
||||
});
|
||||
|
||||
it("runs setup-token for Anthropic when the provider exposes the method", async () => {
|
||||
const runtime = createRuntime();
|
||||
const runTokenAuth = vi.fn().mockResolvedValue({
|
||||
profiles: [
|
||||
{
|
||||
profileId: "anthropic:default",
|
||||
credential: {
|
||||
type: "token",
|
||||
provider: "anthropic",
|
||||
token: `sk-ant-oat01-${"b".repeat(80)}`,
|
||||
},
|
||||
},
|
||||
],
|
||||
defaultModel: "anthropic/claude-sonnet-4-6",
|
||||
});
|
||||
mocks.resolvePluginProviders.mockReturnValue([
|
||||
{
|
||||
id: "anthropic",
|
||||
label: "Anthropic",
|
||||
auth: [
|
||||
{
|
||||
id: "setup-token",
|
||||
label: "setup-token",
|
||||
kind: "token",
|
||||
run: runTokenAuth,
|
||||
},
|
||||
],
|
||||
},
|
||||
]);
|
||||
|
||||
await modelsAuthSetupTokenCommand({ provider: "anthropic", yes: true }, runtime);
|
||||
|
||||
expect(runTokenAuth).toHaveBeenCalledOnce();
|
||||
expect(mocks.upsertAuthProfile).toHaveBeenCalledWith({
|
||||
profileId: "anthropic:default",
|
||||
credential: {
|
||||
type: "token",
|
||||
provider: "anthropic",
|
||||
token: `sk-ant-oat01-${"b".repeat(80)}`,
|
||||
},
|
||||
agentDir: "/tmp/openclaw/agents/main",
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -10,14 +10,11 @@ import {
|
||||
resolveAgentWorkspaceDir,
|
||||
resolveDefaultAgentId,
|
||||
} from "../../agents/agent-scope.js";
|
||||
import {
|
||||
clearAuthProfileCooldown,
|
||||
listProfilesForProvider,
|
||||
loadAuthProfileStoreForRuntime,
|
||||
upsertAuthProfile,
|
||||
} from "../../agents/auth-profiles.js";
|
||||
import { listProfilesForProvider, upsertAuthProfile } from "../../agents/auth-profiles/profiles.js";
|
||||
import { loadAuthProfileStoreForRuntime } from "../../agents/auth-profiles/store.js";
|
||||
import type { AuthProfileCredential } from "../../agents/auth-profiles/types.js";
|
||||
import { normalizeProviderId } from "../../agents/model-selection.js";
|
||||
import { clearAuthProfileCooldown } from "../../agents/auth-profiles/usage.js";
|
||||
import { normalizeProviderId } from "../../agents/model-selection-normalize.js";
|
||||
import { resolveDefaultAgentWorkspaceDir } from "../../agents/workspace.js";
|
||||
import { formatCliCommand } from "../../cli/command-format.js";
|
||||
import { parseDurationMs } from "../../cli/parse-duration.js";
|
||||
@@ -40,7 +37,6 @@ import { createClackPrompter } from "../../wizard/clack-prompter.js";
|
||||
import { validateAnthropicSetupToken } from "../auth-token.js";
|
||||
import { isRemoteEnvironment } from "../oauth-env.js";
|
||||
import { createVpsAwareOAuthHandlers } from "../oauth-flow.js";
|
||||
import { openUrl } from "../onboard-helpers.js";
|
||||
import {
|
||||
applyProviderAuthConfigPatch,
|
||||
applyDefaultModel,
|
||||
@@ -131,6 +127,11 @@ async function resolveModelsAuthContext(params?: {
|
||||
};
|
||||
}
|
||||
|
||||
async function resolveModelsAuthAgentDir(): Promise<string> {
|
||||
const config = await loadValidConfigOrThrow();
|
||||
return resolveAgentDir(config, resolveDefaultAgentId(config));
|
||||
}
|
||||
|
||||
function resolveRequestedProviderOrThrow(
|
||||
providers: ProviderPlugin[],
|
||||
rawProvider?: string,
|
||||
@@ -300,6 +301,7 @@ async function runProviderAuthMethod(params: {
|
||||
allowSecretRefPrompt: false,
|
||||
isRemote: isRemoteEnvironment(),
|
||||
openUrl: async (url) => {
|
||||
const { openUrl } = await import("../onboard-helpers.js");
|
||||
await openUrl(url);
|
||||
},
|
||||
oauth: {
|
||||
@@ -375,7 +377,7 @@ export async function modelsAuthPasteTokenCommand(
|
||||
},
|
||||
runtime: RuntimeEnv,
|
||||
) {
|
||||
const { agentDir } = await resolveModelsAuthContext();
|
||||
const agentDir = await resolveModelsAuthAgentDir();
|
||||
const rawProvider = normalizeOptionalString(opts.provider);
|
||||
if (!rawProvider) {
|
||||
throw new Error("Missing --provider.");
|
||||
|
||||
@@ -1,8 +1,66 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
import { NON_ENV_SECRETREF_MARKER } from "../../agents/model-auth-markers.js";
|
||||
import { withEnv } from "../../test-utils/env.js";
|
||||
import { resolveProviderAuthOverview } from "./list.auth-overview.js";
|
||||
|
||||
vi.mock("../../agents/auth-profiles/display.js", () => ({
|
||||
resolveAuthProfileDisplayLabel: vi.fn(({ profileId }: { profileId: string }) => profileId),
|
||||
}));
|
||||
|
||||
vi.mock("../../agents/auth-profiles/paths.js", () => ({
|
||||
resolveAuthStorePathForDisplay: vi.fn(() => "/tmp/auth-profiles.json"),
|
||||
}));
|
||||
|
||||
vi.mock("../../agents/auth-profiles/profiles.js", () => ({
|
||||
listProfilesForProvider: vi.fn(
|
||||
(store: { profiles?: Record<string, { provider?: string }> }, provider: string) =>
|
||||
Object.keys(store.profiles ?? {}).filter(
|
||||
(profileId) => store.profiles?.[profileId]?.provider === provider,
|
||||
),
|
||||
),
|
||||
}));
|
||||
|
||||
vi.mock("../../agents/auth-profiles/usage.js", () => ({
|
||||
resolveProfileUnusableUntilForDisplay: vi.fn(() => undefined),
|
||||
}));
|
||||
|
||||
vi.mock("../../agents/model-auth.js", () => {
|
||||
const resolveConfigKey = (
|
||||
cfg: { models?: { providers?: Record<string, { apiKey?: string }> } } | undefined,
|
||||
provider: string,
|
||||
) => cfg?.models?.providers?.[provider]?.apiKey;
|
||||
|
||||
return {
|
||||
getCustomProviderApiKey: vi.fn(resolveConfigKey),
|
||||
resolveEnvApiKey: vi.fn((provider: string) => {
|
||||
if (provider !== "openai" || !process.env.OPENAI_API_KEY?.trim()) {
|
||||
return null;
|
||||
}
|
||||
return {
|
||||
apiKey: process.env.OPENAI_API_KEY,
|
||||
source: "env: OPENAI_API_KEY",
|
||||
};
|
||||
}),
|
||||
resolveUsableCustomProviderApiKey: vi.fn(
|
||||
(params: {
|
||||
cfg?: { models?: { providers?: Record<string, { apiKey?: string }> } };
|
||||
provider: string;
|
||||
}) => {
|
||||
const apiKey = resolveConfigKey(params.cfg, params.provider);
|
||||
if (!apiKey || apiKey === "secretref-managed") {
|
||||
return null;
|
||||
}
|
||||
if (apiKey === "OPENAI_API_KEY") {
|
||||
return process.env.OPENAI_API_KEY?.trim()
|
||||
? { apiKey: process.env.OPENAI_API_KEY, source: "env: OPENAI_API_KEY" }
|
||||
: null;
|
||||
}
|
||||
return { apiKey, source: "models.json" };
|
||||
},
|
||||
),
|
||||
};
|
||||
});
|
||||
|
||||
function resolveOpenAiOverview(apiKey: string) {
|
||||
return resolveProviderAuthOverview({
|
||||
provider: "openai",
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
import { formatRemainingShort } from "../../agents/auth-health.js";
|
||||
import {
|
||||
type AuthProfileStore,
|
||||
listProfilesForProvider,
|
||||
resolveAuthProfileDisplayLabel,
|
||||
resolveAuthStorePathForDisplay,
|
||||
resolveProfileUnusableUntilForDisplay,
|
||||
} from "../../agents/auth-profiles.js";
|
||||
import { resolveAuthProfileDisplayLabel } from "../../agents/auth-profiles/display.js";
|
||||
import { resolveAuthStorePathForDisplay } from "../../agents/auth-profiles/paths.js";
|
||||
import { listProfilesForProvider } from "../../agents/auth-profiles/profiles.js";
|
||||
import type { AuthProfileStore } from "../../agents/auth-profiles/types.js";
|
||||
import { resolveProfileUnusableUntilForDisplay } from "../../agents/auth-profiles/usage.js";
|
||||
import { isNonSecretApiKeyMarker } from "../../agents/model-auth-markers.js";
|
||||
import {
|
||||
getCustomProviderApiKey,
|
||||
|
||||
17
src/commands/models/list.local-url.ts
Normal file
17
src/commands/models/list.local-url.ts
Normal file
@@ -0,0 +1,17 @@
|
||||
import { normalizeLowercaseStringOrEmpty } from "../../shared/string-coerce.js";
|
||||
|
||||
export const isLocalBaseUrl = (baseUrl: string) => {
|
||||
try {
|
||||
const url = new URL(baseUrl);
|
||||
const host = normalizeLowercaseStringOrEmpty(url.hostname);
|
||||
return (
|
||||
host === "localhost" ||
|
||||
host === "127.0.0.1" ||
|
||||
host === "0.0.0.0" ||
|
||||
host === "::1" ||
|
||||
host.endsWith(".local")
|
||||
);
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
40
src/commands/models/list.model-row.test.ts
Normal file
40
src/commands/models/list.model-row.test.ts
Normal file
@@ -0,0 +1,40 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import type { AuthProfileStore } from "../../agents/auth-profiles/types.js";
|
||||
import { toModelRow } from "./list.model-row.js";
|
||||
|
||||
const OPENROUTER_MODEL = {
|
||||
provider: "openrouter",
|
||||
id: "openai/gpt-5.4",
|
||||
name: "GPT-5.4 via OpenRouter",
|
||||
api: "openai-chat-completions",
|
||||
baseUrl: "https://openrouter.ai/api/v1",
|
||||
input: ["text"],
|
||||
contextWindow: 1_000_000,
|
||||
maxTokens: 128_000,
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
} as const;
|
||||
|
||||
describe("toModelRow", () => {
|
||||
it("marks models available from auth profiles without loading model discovery", () => {
|
||||
const authStore: AuthProfileStore = {
|
||||
version: 1,
|
||||
profiles: {
|
||||
"openrouter:default": {
|
||||
type: "api_key",
|
||||
provider: "openrouter",
|
||||
key: "sk-or-v1-regression-test",
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const row = toModelRow({
|
||||
model: OPENROUTER_MODEL as never,
|
||||
key: "openrouter/openai/gpt-5.4",
|
||||
tags: [],
|
||||
cfg: {},
|
||||
authStore,
|
||||
});
|
||||
|
||||
expect(row.available).toBe(true);
|
||||
});
|
||||
});
|
||||
97
src/commands/models/list.model-row.ts
Normal file
97
src/commands/models/list.model-row.ts
Normal file
@@ -0,0 +1,97 @@
|
||||
import type { Api, Model } from "@mariozechner/pi-ai";
|
||||
import type { AuthProfileStore } from "../../agents/auth-profiles/types.js";
|
||||
import { modelKey } from "../../agents/model-ref-shared.js";
|
||||
import type { OpenClawConfig } from "../../config/types.openclaw.js";
|
||||
import { isLocalBaseUrl } from "./list.local-url.js";
|
||||
import type { ModelRow } from "./list.types.js";
|
||||
|
||||
export type ModelAuthAvailabilityResolver = (params: {
|
||||
provider: string;
|
||||
cfg: OpenClawConfig;
|
||||
authStore: AuthProfileStore;
|
||||
}) => boolean;
|
||||
|
||||
function authStoreHasProviderProfile(authStore: AuthProfileStore, provider: string): boolean {
|
||||
return Object.values(authStore.profiles ?? {}).some(
|
||||
(credential) => credential.provider === provider,
|
||||
);
|
||||
}
|
||||
|
||||
export function toModelRow(params: {
|
||||
model?: Model<Api>;
|
||||
key: string;
|
||||
tags: string[];
|
||||
aliases?: string[];
|
||||
availableKeys?: Set<string>;
|
||||
cfg?: OpenClawConfig;
|
||||
authStore?: AuthProfileStore;
|
||||
allowProviderAvailabilityFallback?: boolean;
|
||||
hasAuthForProvider?: ModelAuthAvailabilityResolver;
|
||||
}): ModelRow {
|
||||
const {
|
||||
model,
|
||||
key,
|
||||
tags,
|
||||
aliases = [],
|
||||
availableKeys,
|
||||
cfg,
|
||||
authStore,
|
||||
allowProviderAvailabilityFallback = false,
|
||||
} = params;
|
||||
if (!model) {
|
||||
return {
|
||||
key,
|
||||
name: key,
|
||||
input: "-",
|
||||
contextWindow: null,
|
||||
local: null,
|
||||
available: null,
|
||||
tags: [...tags, "missing"],
|
||||
missing: true,
|
||||
};
|
||||
}
|
||||
|
||||
const input = model.input.join("+") || "text";
|
||||
const local = isLocalBaseUrl(model.baseUrl);
|
||||
const modelIsAvailable = availableKeys?.has(modelKey(model.provider, model.id)) ?? false;
|
||||
// Prefer model-level registry availability when present.
|
||||
// Fall back to provider-level auth heuristics only if registry availability isn't available,
|
||||
// or if the caller marks this as a synthetic/forward-compat model that won't appear in getAvailable().
|
||||
const available =
|
||||
availableKeys !== undefined && !allowProviderAvailabilityFallback
|
||||
? modelIsAvailable
|
||||
: modelIsAvailable ||
|
||||
(cfg && authStore
|
||||
? (
|
||||
params.hasAuthForProvider ??
|
||||
((input) => authStoreHasProviderProfile(input.authStore, input.provider))
|
||||
)({
|
||||
provider: model.provider,
|
||||
cfg,
|
||||
authStore,
|
||||
})
|
||||
: false);
|
||||
const aliasTags = aliases.length > 0 ? [`alias:${aliases.join(",")}`] : [];
|
||||
const mergedTags = new Set(tags);
|
||||
if (aliasTags.length > 0) {
|
||||
for (const tag of mergedTags) {
|
||||
if (tag === "alias" || tag.startsWith("alias:")) {
|
||||
mergedTags.delete(tag);
|
||||
}
|
||||
}
|
||||
for (const tag of aliasTags) {
|
||||
mergedTags.add(tag);
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
key,
|
||||
name: model.name || model.id,
|
||||
input,
|
||||
contextWindow: model.contextWindow ?? null,
|
||||
local,
|
||||
available,
|
||||
tags: Array.from(mergedTags),
|
||||
missing: false,
|
||||
};
|
||||
}
|
||||
@@ -1,6 +1,5 @@
|
||||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import type { AuthProfileStore } from "../../agents/auth-profiles.js";
|
||||
import { OLLAMA_LOCAL_AUTH_MARKER } from "../../agents/model-auth-markers.js";
|
||||
import type { ModelCatalogEntry } from "../../agents/model-catalog.js";
|
||||
import type { OpenClawConfig } from "../../config/config.js";
|
||||
|
||||
@@ -18,29 +17,62 @@ const resolveSecretRefStringMock = vi.fn(async () => "resolved-secret");
|
||||
vi.mock("../../agents/model-catalog.js", () => ({
|
||||
loadModelCatalog: loadModelCatalogMock,
|
||||
}));
|
||||
vi.mock("../../agents/model-auth.js", () => ({
|
||||
hasUsableCustomProviderApiKey: (cfg: OpenClawConfig, provider: string) => {
|
||||
const raw = cfg.models?.providers?.[provider]?.apiKey;
|
||||
return typeof raw === "string" && raw.trim().length > 0 && raw !== "ollama-local";
|
||||
},
|
||||
resolveEnvApiKey: (provider: string) => {
|
||||
const keys =
|
||||
provider === "anthropic"
|
||||
? ["ANTHROPIC_API_KEY", "ANTHROPIC_OAUTH_TOKEN"]
|
||||
: provider === "zai"
|
||||
? ["ZAI_API_KEY", "Z_AI_API_KEY"]
|
||||
: [];
|
||||
const source = keys.find((key) => process.env[key]?.trim());
|
||||
return source ? { source, value: process.env[source] } : null;
|
||||
},
|
||||
}));
|
||||
vi.mock("../../agents/model-selection.js", () => {
|
||||
const normalizeProviderId = (value: string) =>
|
||||
value.trim().toLowerCase() === "z.ai" || value.trim().toLowerCase() === "z-ai"
|
||||
? "zai"
|
||||
: value.trim().toLowerCase();
|
||||
return {
|
||||
normalizeProviderId,
|
||||
findNormalizedProviderValue: (record: Record<string, unknown> | undefined, provider: string) =>
|
||||
Object.entries(record ?? {}).find(([key]) => normalizeProviderId(key) === provider)?.[1],
|
||||
parseModelRef: (raw: string, defaultProvider: string) => {
|
||||
const [provider, ...modelParts] = raw.includes("/") ? raw.split("/") : [defaultProvider, raw];
|
||||
const model = modelParts.join("/");
|
||||
return provider && model ? { provider: normalizeProviderId(provider), model } : null;
|
||||
},
|
||||
};
|
||||
});
|
||||
vi.mock("../../secrets/resolve.js", () => ({
|
||||
resolveSecretRefString: resolveSecretRefStringMock,
|
||||
}));
|
||||
vi.mock("../status-all/format.js", () => ({
|
||||
redactSecrets: (value: string) => value,
|
||||
}));
|
||||
vi.mock("./shared.js", () => ({
|
||||
DEFAULT_PROVIDER: "openai",
|
||||
formatMs: (ms: number) => `${ms}ms`,
|
||||
}));
|
||||
|
||||
vi.mock("../../agents/auth-profiles.js", async () => {
|
||||
const actual = await vi.importActual<typeof import("../../agents/auth-profiles.js")>(
|
||||
"../../agents/auth-profiles.js",
|
||||
);
|
||||
return {
|
||||
...actual,
|
||||
ensureAuthProfileStore: () => mockStore,
|
||||
listProfilesForProvider: (_store: AuthProfileStore, provider: string) =>
|
||||
Object.entries(mockStore.profiles)
|
||||
.filter(
|
||||
([, profile]) =>
|
||||
typeof profile.provider === "string" && profile.provider.toLowerCase() === provider,
|
||||
)
|
||||
.map(([profileId]) => profileId),
|
||||
resolveAuthProfileDisplayLabel: ({ profileId }: { profileId: string }) => profileId,
|
||||
resolveAuthProfileOrder: resolveAuthProfileOrderMock,
|
||||
resolveAuthProfileEligibility: resolveAuthProfileEligibilityMock,
|
||||
};
|
||||
});
|
||||
vi.mock("../../agents/auth-profiles.js", () => ({
|
||||
ensureAuthProfileStore: () => mockStore,
|
||||
listProfilesForProvider: (_store: AuthProfileStore, provider: string) =>
|
||||
Object.entries(mockStore.profiles)
|
||||
.filter(
|
||||
([, profile]) =>
|
||||
typeof profile.provider === "string" && profile.provider.toLowerCase() === provider,
|
||||
)
|
||||
.map(([profileId]) => profileId),
|
||||
resolveAuthProfileDisplayLabel: ({ profileId }: { profileId: string }) => profileId,
|
||||
resolveAuthProfileOrder: resolveAuthProfileOrderMock,
|
||||
resolveAuthProfileEligibility: resolveAuthProfileEligibilityMock,
|
||||
}));
|
||||
|
||||
const { buildProbeTargets } = await import("./list.probe.js");
|
||||
|
||||
@@ -219,7 +251,7 @@ describe("buildProbeTargets reason codes", () => {
|
||||
order: {},
|
||||
};
|
||||
await withClearedAnthropicEnv(async () => {
|
||||
const plan = await buildAnthropicPlanFromModelsJsonApiKey(OLLAMA_LOCAL_AUTH_MARKER);
|
||||
const plan = await buildAnthropicPlanFromModelsJsonApiKey("ollama-local");
|
||||
expect(plan.targets).toEqual([]);
|
||||
expect(plan.results).toEqual([]);
|
||||
});
|
||||
|
||||
@@ -1,14 +1,15 @@
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
import { beforeAll, describe, expect, it, vi } from "vitest";
|
||||
import { importFreshModule } from "../../../test/helpers/import-fresh.js";
|
||||
import { mapFailoverReasonToProbeStatus } from "./list.probe.js";
|
||||
|
||||
let probeModule: typeof import("./list.probe.js");
|
||||
|
||||
describe("mapFailoverReasonToProbeStatus", () => {
|
||||
it("does not import the embedded runner on module load", async () => {
|
||||
beforeAll(async () => {
|
||||
vi.doMock("../../agents/pi-embedded.js", () => {
|
||||
throw new Error("pi-embedded should stay lazy for probe imports");
|
||||
});
|
||||
try {
|
||||
await importFreshModule<typeof import("./list.probe.js")>(
|
||||
probeModule = await importFreshModule<typeof import("./list.probe.js")>(
|
||||
import.meta.url,
|
||||
`./list.probe.js?scope=${Math.random().toString(36).slice(2)}`,
|
||||
);
|
||||
@@ -17,11 +18,13 @@ describe("mapFailoverReasonToProbeStatus", () => {
|
||||
}
|
||||
});
|
||||
|
||||
it("maps auth_permanent to auth", () => {
|
||||
expect(mapFailoverReasonToProbeStatus("auth_permanent")).toBe("auth");
|
||||
it("does not import the embedded runner on module load", async () => {
|
||||
expect(probeModule.mapFailoverReasonToProbeStatus).toBeTypeOf("function");
|
||||
});
|
||||
|
||||
it("keeps existing failover reason mappings", () => {
|
||||
it("maps failover reasons to probe statuses", () => {
|
||||
const { mapFailoverReasonToProbeStatus } = probeModule;
|
||||
expect(mapFailoverReasonToProbeStatus("auth_permanent")).toBe("auth");
|
||||
expect(mapFailoverReasonToProbeStatus("auth")).toBe("auth");
|
||||
expect(mapFailoverReasonToProbeStatus("rate_limit")).toBe("rate_limit");
|
||||
expect(mapFailoverReasonToProbeStatus("overloaded")).toBe("rate_limit");
|
||||
@@ -29,9 +32,7 @@ describe("mapFailoverReasonToProbeStatus", () => {
|
||||
expect(mapFailoverReasonToProbeStatus("timeout")).toBe("timeout");
|
||||
expect(mapFailoverReasonToProbeStatus("model_not_found")).toBe("format");
|
||||
expect(mapFailoverReasonToProbeStatus("format")).toBe("format");
|
||||
});
|
||||
|
||||
it("falls back to unknown for unrecognized values", () => {
|
||||
expect(mapFailoverReasonToProbeStatus(undefined)).toBe("unknown");
|
||||
expect(mapFailoverReasonToProbeStatus(null)).toBe("unknown");
|
||||
expect(mapFailoverReasonToProbeStatus("something_else")).toBe("unknown");
|
||||
|
||||
@@ -8,6 +8,7 @@ import {
|
||||
MODEL_AVAILABILITY_UNAVAILABLE_CODE,
|
||||
shouldFallbackToAuthHeuristics,
|
||||
} from "./list.errors.js";
|
||||
import { toModelRow as toModelRowBase } from "./list.model-row.js";
|
||||
import {
|
||||
discoverAuthStorage,
|
||||
discoverModels,
|
||||
@@ -18,7 +19,7 @@ import {
|
||||
resolveOpenClawAgentDir,
|
||||
} from "./list.runtime.js";
|
||||
import type { ModelRow } from "./list.types.js";
|
||||
import { isLocalBaseUrl, modelKey } from "./shared.js";
|
||||
import { modelKey } from "./shared.js";
|
||||
|
||||
const hasAuthForProvider = (
|
||||
provider: string,
|
||||
@@ -140,71 +141,10 @@ export async function loadModelRegistry(
|
||||
return { registry, models, availableKeys, availabilityErrorMessage };
|
||||
}
|
||||
|
||||
export function toModelRow(params: {
|
||||
model?: Model<Api>;
|
||||
key: string;
|
||||
tags: string[];
|
||||
aliases?: string[];
|
||||
availableKeys?: Set<string>;
|
||||
cfg?: OpenClawConfig;
|
||||
authStore?: AuthProfileStore;
|
||||
allowProviderAvailabilityFallback?: boolean;
|
||||
}): ModelRow {
|
||||
const {
|
||||
model,
|
||||
key,
|
||||
tags,
|
||||
aliases = [],
|
||||
availableKeys,
|
||||
cfg,
|
||||
authStore,
|
||||
allowProviderAvailabilityFallback = false,
|
||||
} = params;
|
||||
if (!model) {
|
||||
return {
|
||||
key,
|
||||
name: key,
|
||||
input: "-",
|
||||
contextWindow: null,
|
||||
local: null,
|
||||
available: null,
|
||||
tags: [...tags, "missing"],
|
||||
missing: true,
|
||||
};
|
||||
}
|
||||
|
||||
const input = model.input.join("+") || "text";
|
||||
const local = isLocalBaseUrl(model.baseUrl);
|
||||
const modelIsAvailable = availableKeys?.has(modelKey(model.provider, model.id)) ?? false;
|
||||
// Prefer model-level registry availability when present.
|
||||
// Fall back to provider-level auth heuristics only if registry availability isn't available,
|
||||
// or if the caller marks this as a synthetic/forward-compat model that won't appear in getAvailable().
|
||||
const available =
|
||||
availableKeys !== undefined && !allowProviderAvailabilityFallback
|
||||
? modelIsAvailable
|
||||
: modelIsAvailable ||
|
||||
(cfg && authStore ? hasAuthForProvider(model.provider, cfg, authStore) : false);
|
||||
const aliasTags = aliases.length > 0 ? [`alias:${aliases.join(",")}`] : [];
|
||||
const mergedTags = new Set(tags);
|
||||
if (aliasTags.length > 0) {
|
||||
for (const tag of mergedTags) {
|
||||
if (tag === "alias" || tag.startsWith("alias:")) {
|
||||
mergedTags.delete(tag);
|
||||
}
|
||||
}
|
||||
for (const tag of aliasTags) {
|
||||
mergedTags.add(tag);
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
key,
|
||||
name: model.name || model.id,
|
||||
input,
|
||||
contextWindow: model.contextWindow ?? null,
|
||||
local,
|
||||
available,
|
||||
tags: Array.from(mergedTags),
|
||||
missing: false,
|
||||
};
|
||||
export function toModelRow(params: Parameters<typeof toModelRowBase>[0]): ModelRow {
|
||||
return toModelRowBase({
|
||||
...params,
|
||||
hasAuthForProvider: ({ provider, cfg, authStore }) =>
|
||||
hasAuthForProvider(provider, cfg, authStore),
|
||||
});
|
||||
}
|
||||
|
||||
@@ -10,11 +10,9 @@ import {
|
||||
DEFAULT_OAUTH_WARN_MS,
|
||||
formatRemainingShort,
|
||||
} from "../../agents/auth-health.js";
|
||||
import {
|
||||
ensureAuthProfileStore,
|
||||
resolveAuthStorePathForDisplay,
|
||||
resolveProfileUnusableUntilForDisplay,
|
||||
} from "../../agents/auth-profiles.js";
|
||||
import { resolveAuthStorePathForDisplay } from "../../agents/auth-profiles/paths.js";
|
||||
import { ensureAuthProfileStore } from "../../agents/auth-profiles/store.js";
|
||||
import { resolveProfileUnusableUntilForDisplay } from "../../agents/auth-profiles/usage.js";
|
||||
import { resolveProviderEnvApiKeyCandidates } from "../../agents/model-auth-env-vars.js";
|
||||
import { resolveEnvApiKey } from "../../agents/model-auth.js";
|
||||
import {
|
||||
@@ -26,34 +24,19 @@ import {
|
||||
resolveDefaultModelForAgent,
|
||||
resolveModelRefFromString,
|
||||
} from "../../agents/model-selection.js";
|
||||
import { withProgressTotals } from "../../cli/progress.js";
|
||||
import { createConfigIO } from "../../config/config.js";
|
||||
import {
|
||||
resolveAgentModelFallbackValues,
|
||||
resolveAgentModelPrimaryValue,
|
||||
} from "../../config/model-input.js";
|
||||
import {
|
||||
formatUsageWindowSummary,
|
||||
loadProviderUsageSummary,
|
||||
resolveUsageProviderId,
|
||||
type UsageProviderId,
|
||||
} from "../../infra/provider-usage.js";
|
||||
import { getShellEnvAppliedKeys, shouldEnableShellEnvFallback } from "../../infra/shell-env.js";
|
||||
import { type RuntimeEnv, writeRuntimeJson } from "../../runtime.js";
|
||||
import { normalizeOptionalString } from "../../shared/string-coerce.js";
|
||||
import { getTerminalTableWidth, renderTable } from "../../terminal/table.js";
|
||||
import { colorize, theme } from "../../terminal/theme.js";
|
||||
import { shortenHomePath } from "../../utils.js";
|
||||
import { buildProviderAuthRecoveryHint } from "../provider-auth-guidance.js";
|
||||
import { resolveProviderAuthOverview } from "./list.auth-overview.js";
|
||||
import { isRich } from "./list.format.js";
|
||||
import {
|
||||
describeProbeSummary,
|
||||
formatProbeLatency,
|
||||
runAuthProbes,
|
||||
sortProbeResults,
|
||||
type AuthProbeSummary,
|
||||
} from "./list.probe.js";
|
||||
import { type AuthProbeSummary } from "./list.probe.js";
|
||||
import { loadModelsConfig } from "./load-config.js";
|
||||
import {
|
||||
DEFAULT_MODEL,
|
||||
@@ -62,6 +45,15 @@ import {
|
||||
resolveKnownAgentId,
|
||||
} from "./shared.js";
|
||||
|
||||
type ProviderUsageRuntime = typeof import("../../infra/provider-usage.js");
|
||||
|
||||
let providerUsageRuntimePromise: Promise<ProviderUsageRuntime> | undefined;
|
||||
|
||||
function loadProviderUsageRuntime(): Promise<ProviderUsageRuntime> {
|
||||
providerUsageRuntimePromise ??= import("../../infra/provider-usage.js");
|
||||
return providerUsageRuntimePromise;
|
||||
}
|
||||
|
||||
export async function modelsStatusCommand(
|
||||
opts: {
|
||||
json?: boolean;
|
||||
@@ -227,6 +219,10 @@ export async function modelsStatusCommand(
|
||||
|
||||
let probeSummary: AuthProbeSummary | undefined;
|
||||
if (opts.probe) {
|
||||
const [{ withProgressTotals }, { runAuthProbes }] = await Promise.all([
|
||||
import("../../cli/progress.js"),
|
||||
import("./list.probe.js"),
|
||||
]);
|
||||
probeSummary = await withProgressTotals(
|
||||
{ label: "Probing auth profiles…", total: 1 },
|
||||
async (update) => {
|
||||
@@ -517,6 +513,7 @@ export async function modelsStatusCommand(
|
||||
}
|
||||
|
||||
if (missingProvidersInUse.length > 0) {
|
||||
const { buildProviderAuthRecoveryHint } = await import("../provider-auth-guidance.js");
|
||||
runtime.log("");
|
||||
runtime.log(colorize(rich, theme.heading, "Missing auth"));
|
||||
for (const provider of missingProvidersInUse) {
|
||||
@@ -534,12 +531,14 @@ export async function modelsStatusCommand(
|
||||
if (oauthProfiles.length === 0) {
|
||||
runtime.log(colorize(rich, theme.muted, "- none"));
|
||||
} else {
|
||||
const { formatUsageWindowSummary, loadProviderUsageSummary, resolveUsageProviderId } =
|
||||
await loadProviderUsageRuntime();
|
||||
const usageByProvider = new Map<string, string>();
|
||||
const usageProviders = Array.from(
|
||||
new Set(
|
||||
oauthProfiles
|
||||
.map((profile) => resolveUsageProviderId(profile.provider))
|
||||
.filter((provider): provider is UsageProviderId => Boolean(provider)),
|
||||
.filter((provider): provider is NonNullable<typeof provider> => Boolean(provider)),
|
||||
),
|
||||
);
|
||||
if (usageProviders.length > 0) {
|
||||
@@ -611,6 +610,10 @@ export async function modelsStatusCommand(
|
||||
}
|
||||
|
||||
if (probeSummary) {
|
||||
const [
|
||||
{ getTerminalTableWidth, renderTable },
|
||||
{ describeProbeSummary, formatProbeLatency, sortProbeResults },
|
||||
] = await Promise.all([import("../../terminal/table.js"), import("./list.probe.js")]);
|
||||
runtime.log("");
|
||||
runtime.log(colorize(rich, theme.heading, "Auth probes"));
|
||||
if (probeSummary.results.length === 0) {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { afterAll, beforeAll, describe, expect, it, type Mock, vi } from "vitest";
|
||||
import { describe, expect, it, type Mock, vi } from "vitest";
|
||||
|
||||
const mocks = vi.hoisted(() => {
|
||||
type MockAuthProfile = { provider: string; [key: string]: unknown };
|
||||
@@ -25,6 +25,11 @@ const mocks = vi.hoisted(() => {
|
||||
refresh: "oai-refresh-1234567890",
|
||||
expires: Date.now() + 60_000,
|
||||
},
|
||||
"openai:default": {
|
||||
type: "api_key",
|
||||
provider: "openai",
|
||||
key: "abc123", // pragma: allowlist secret
|
||||
},
|
||||
} as Record<string, MockAuthProfile>,
|
||||
};
|
||||
|
||||
@@ -61,6 +66,18 @@ const mocks = vi.hoisted(() => {
|
||||
source: "env: ANTHROPIC_OAUTH_TOKEN",
|
||||
};
|
||||
}
|
||||
if (provider === "minimax") {
|
||||
return {
|
||||
apiKey: "sk-minimax-0123456789abcdefghijklmnopqrstuvwxyz", // pragma: allowlist secret
|
||||
source: "env: MINIMAX_API_KEY",
|
||||
};
|
||||
}
|
||||
if (provider === "fal") {
|
||||
return {
|
||||
apiKey: "fal_test_0123456789abcdefghijklmnopqrstuvwxyz", // pragma: allowlist secret
|
||||
source: "env: FAL_KEY",
|
||||
};
|
||||
}
|
||||
return null;
|
||||
}),
|
||||
resolveProviderEnvApiKeyCandidates: vi.fn().mockReturnValue({
|
||||
@@ -106,61 +123,91 @@ const mocks = vi.hoisted(() => {
|
||||
};
|
||||
});
|
||||
|
||||
let modelsStatusCommand: typeof import("./list.status-command.js").modelsStatusCommand;
|
||||
vi.mock("../../agents/agent-paths.js", () => ({
|
||||
resolveOpenClawAgentDir: mocks.resolveOpenClawAgentDir,
|
||||
}));
|
||||
vi.mock("../../agents/agent-scope.js", () => ({
|
||||
resolveAgentDir: mocks.resolveAgentDir,
|
||||
resolveAgentWorkspaceDir: mocks.resolveAgentWorkspaceDir,
|
||||
resolveAgentExplicitModelPrimary: mocks.resolveAgentExplicitModelPrimary,
|
||||
resolveAgentEffectiveModelPrimary: mocks.resolveAgentEffectiveModelPrimary,
|
||||
resolveAgentModelFallbacksOverride: mocks.resolveAgentModelFallbacksOverride,
|
||||
listAgentIds: mocks.listAgentIds,
|
||||
}));
|
||||
vi.mock("../../agents/auth-profiles/display.js", () => ({
|
||||
resolveAuthProfileDisplayLabel: mocks.resolveAuthProfileDisplayLabel,
|
||||
}));
|
||||
vi.mock("../../agents/auth-profiles/paths.js", () => ({
|
||||
resolveAuthStorePathForDisplay: mocks.resolveAuthStorePathForDisplay,
|
||||
}));
|
||||
vi.mock("../../agents/auth-profiles/profiles.js", () => ({
|
||||
listProfilesForProvider: mocks.listProfilesForProvider,
|
||||
}));
|
||||
vi.mock("../../agents/auth-profiles/store.js", () => ({
|
||||
ensureAuthProfileStore: mocks.ensureAuthProfileStore,
|
||||
}));
|
||||
vi.mock("../../agents/auth-profiles/usage.js", () => ({
|
||||
resolveProfileUnusableUntilForDisplay: mocks.resolveProfileUnusableUntilForDisplay,
|
||||
}));
|
||||
vi.mock("../../agents/auth-health.js", () => ({
|
||||
DEFAULT_OAUTH_WARN_MS: 86_400_000,
|
||||
buildAuthHealthSummary: vi.fn(
|
||||
({ store, warnAfterMs }: { store: typeof mocks.store; warnAfterMs: number }) => {
|
||||
const profiles = Object.entries(store.profiles).map(([profileId, profile]) => ({
|
||||
profileId,
|
||||
provider: profile.provider,
|
||||
type: profile.type ?? "api_key",
|
||||
status: profile.type === "api_key" ? "static" : "ok",
|
||||
source: "store",
|
||||
label: profileId,
|
||||
}));
|
||||
return {
|
||||
now: Date.now(),
|
||||
warnAfterMs,
|
||||
profiles,
|
||||
providers: profiles.map((profile) => ({
|
||||
provider: profile.provider,
|
||||
status: profile.status,
|
||||
profiles: [profile],
|
||||
})),
|
||||
};
|
||||
},
|
||||
),
|
||||
formatRemainingShort: vi.fn(() => "1h"),
|
||||
}));
|
||||
vi.mock("../../agents/model-auth.js", () => ({
|
||||
resolveEnvApiKey: mocks.resolveEnvApiKey,
|
||||
hasUsableCustomProviderApiKey: mocks.hasUsableCustomProviderApiKey,
|
||||
resolveUsableCustomProviderApiKey: mocks.resolveUsableCustomProviderApiKey,
|
||||
getCustomProviderApiKey: mocks.getCustomProviderApiKey,
|
||||
}));
|
||||
vi.mock("../../agents/model-auth-env-vars.js", () => ({
|
||||
resolveProviderEnvApiKeyCandidates: mocks.resolveProviderEnvApiKeyCandidates,
|
||||
listKnownProviderEnvApiKeyNames: mocks.listKnownProviderEnvApiKeyNames,
|
||||
}));
|
||||
vi.mock("../../agents/model-selection-cli.js", () => ({
|
||||
isCliProvider: vi.fn(
|
||||
(provider: string, cfg?: { agents?: { defaults?: { cliBackends?: object } } }) =>
|
||||
Object.prototype.hasOwnProperty.call(cfg?.agents?.defaults?.cliBackends ?? {}, provider),
|
||||
),
|
||||
}));
|
||||
vi.mock("../../infra/shell-env.js", () => ({
|
||||
getShellEnvAppliedKeys: mocks.getShellEnvAppliedKeys,
|
||||
shouldEnableShellEnvFallback: mocks.shouldEnableShellEnvFallback,
|
||||
}));
|
||||
vi.mock("../../config/config.js", () => ({
|
||||
createConfigIO: mocks.createConfigIO,
|
||||
}));
|
||||
vi.mock("./load-config.js", () => ({
|
||||
loadModelsConfig: vi.fn(async () => mocks.loadConfig()),
|
||||
}));
|
||||
vi.mock("../../infra/provider-usage.js", () => ({
|
||||
formatUsageWindowSummary: vi.fn().mockReturnValue("-"),
|
||||
loadProviderUsageSummary: mocks.loadProviderUsageSummary,
|
||||
resolveUsageProviderId: vi.fn((providerId: string) => providerId),
|
||||
}));
|
||||
|
||||
async function loadFreshModelsStatusCommandModuleForTest() {
|
||||
vi.resetModules();
|
||||
vi.doMock("../../agents/agent-paths.js", () => ({
|
||||
resolveOpenClawAgentDir: mocks.resolveOpenClawAgentDir,
|
||||
}));
|
||||
vi.doMock("../../agents/agent-scope.js", () => ({
|
||||
resolveAgentDir: mocks.resolveAgentDir,
|
||||
resolveAgentWorkspaceDir: mocks.resolveAgentWorkspaceDir,
|
||||
resolveAgentExplicitModelPrimary: mocks.resolveAgentExplicitModelPrimary,
|
||||
resolveAgentEffectiveModelPrimary: mocks.resolveAgentEffectiveModelPrimary,
|
||||
resolveAgentModelFallbacksOverride: mocks.resolveAgentModelFallbacksOverride,
|
||||
listAgentIds: mocks.listAgentIds,
|
||||
}));
|
||||
vi.doMock("../../agents/auth-profiles.js", () => ({
|
||||
ensureAuthProfileStore: mocks.ensureAuthProfileStore,
|
||||
listProfilesForProvider: mocks.listProfilesForProvider,
|
||||
resolveAuthProfileDisplayLabel: mocks.resolveAuthProfileDisplayLabel,
|
||||
resolveAuthStorePathForDisplay: mocks.resolveAuthStorePathForDisplay,
|
||||
resolveProfileUnusableUntilForDisplay: mocks.resolveProfileUnusableUntilForDisplay,
|
||||
}));
|
||||
vi.doMock("../../agents/model-auth.js", () => ({
|
||||
resolveEnvApiKey: mocks.resolveEnvApiKey,
|
||||
hasUsableCustomProviderApiKey: mocks.hasUsableCustomProviderApiKey,
|
||||
resolveUsableCustomProviderApiKey: mocks.resolveUsableCustomProviderApiKey,
|
||||
getCustomProviderApiKey: mocks.getCustomProviderApiKey,
|
||||
}));
|
||||
vi.doMock("../../agents/model-auth-env-vars.js", () => ({
|
||||
resolveProviderEnvApiKeyCandidates: mocks.resolveProviderEnvApiKeyCandidates,
|
||||
listKnownProviderEnvApiKeyNames: mocks.listKnownProviderEnvApiKeyNames,
|
||||
}));
|
||||
vi.doMock("../../infra/shell-env.js", () => ({
|
||||
getShellEnvAppliedKeys: mocks.getShellEnvAppliedKeys,
|
||||
shouldEnableShellEnvFallback: mocks.shouldEnableShellEnvFallback,
|
||||
}));
|
||||
vi.doMock("../../config/config.js", async () => {
|
||||
const actual =
|
||||
await vi.importActual<typeof import("../../config/config.js")>("../../config/config.js");
|
||||
return {
|
||||
...actual,
|
||||
createConfigIO: mocks.createConfigIO,
|
||||
loadConfig: mocks.loadConfig,
|
||||
};
|
||||
});
|
||||
vi.doMock("./load-config.js", () => ({
|
||||
loadModelsConfig: vi.fn(async () => mocks.loadConfig()),
|
||||
}));
|
||||
vi.doMock("../../infra/provider-usage.js", () => ({
|
||||
formatUsageWindowSummary: vi.fn().mockReturnValue("-"),
|
||||
loadProviderUsageSummary: mocks.loadProviderUsageSummary,
|
||||
resolveUsageProviderId: vi.fn((providerId: string) => providerId),
|
||||
}));
|
||||
({ modelsStatusCommand } = await import("./list.status-command.js"));
|
||||
}
|
||||
import { modelsStatusCommand } from "./list.status-command.js";
|
||||
|
||||
const defaultResolveEnvApiKeyImpl:
|
||||
| ((provider: string) => { apiKey: string; source: string } | null)
|
||||
@@ -227,23 +274,6 @@ async function withAgentScopeOverrides<T>(
|
||||
}
|
||||
|
||||
describe("modelsStatusCommand auth overview", () => {
|
||||
beforeAll(async () => {
|
||||
await loadFreshModelsStatusCommandModuleForTest();
|
||||
});
|
||||
|
||||
afterAll(() => {
|
||||
vi.doUnmock("../../agents/agent-paths.js");
|
||||
vi.doUnmock("../../agents/agent-scope.js");
|
||||
vi.doUnmock("../../agents/auth-profiles.js");
|
||||
vi.doUnmock("../../agents/model-auth.js");
|
||||
vi.doUnmock("../../agents/model-auth-env-vars.js");
|
||||
vi.doUnmock("../../infra/shell-env.js");
|
||||
vi.doUnmock("../../config/config.js");
|
||||
vi.doUnmock("./load-config.js");
|
||||
vi.doUnmock("../../infra/provider-usage.js");
|
||||
vi.resetModules();
|
||||
});
|
||||
|
||||
it("includes masked auth sources in JSON output", async () => {
|
||||
await modelsStatusCommand({ json: true }, runtime as never);
|
||||
const payload = JSON.parse(String((runtime.log as Mock).mock.calls[0]?.[0]));
|
||||
@@ -271,11 +301,25 @@ describe("modelsStatusCommand auth overview", () => {
|
||||
const openai = providers.find((p) => p.provider === "openai");
|
||||
expect(openai?.env?.source).toContain("OPENAI_API_KEY");
|
||||
expect(openai?.env?.value).toContain("...");
|
||||
expect(openai?.profiles.labels.join(" ")).toContain("...");
|
||||
expect(openai?.profiles.labels.join(" ")).not.toContain("abc123");
|
||||
expect(
|
||||
(payload.auth.oauth.providers as Array<{ provider: string }>).some(
|
||||
(provider) => provider.provider === "openai",
|
||||
(payload.auth.providersWithOAuth as string[]).some((provider) =>
|
||||
provider.startsWith("openai "),
|
||||
),
|
||||
).toBe(false);
|
||||
expect(providers).toEqual(
|
||||
expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
provider: "minimax",
|
||||
effective: expect.objectContaining({ kind: "env" }),
|
||||
}),
|
||||
expect.objectContaining({
|
||||
provider: "fal",
|
||||
effective: expect.objectContaining({ kind: "env" }),
|
||||
}),
|
||||
]),
|
||||
);
|
||||
|
||||
expect(
|
||||
(payload.auth.providersWithOAuth as string[]).some((e) => e.startsWith("anthropic")),
|
||||
@@ -285,97 +329,6 @@ describe("modelsStatusCommand auth overview", () => {
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
it("does not emit raw short api-key values in JSON labels", async () => {
|
||||
const localRuntime = createRuntime();
|
||||
const shortSecret = "abc123"; // pragma: allowlist secret
|
||||
const originalProfiles = { ...mocks.store.profiles };
|
||||
mocks.store.profiles = {
|
||||
...mocks.store.profiles,
|
||||
"openai:default": {
|
||||
type: "api_key",
|
||||
provider: "openai",
|
||||
key: shortSecret,
|
||||
},
|
||||
};
|
||||
|
||||
try {
|
||||
await modelsStatusCommand({ json: true }, localRuntime as never);
|
||||
const payload = JSON.parse(String((localRuntime.log as Mock).mock.calls[0]?.[0]));
|
||||
const providers = payload.auth.providers as Array<{
|
||||
provider: string;
|
||||
profiles: { labels: string[] };
|
||||
}>;
|
||||
const openai = providers.find((p) => p.provider === "openai");
|
||||
const labels = openai?.profiles.labels ?? [];
|
||||
expect(labels.join(" ")).toContain("...");
|
||||
expect(labels.join(" ")).not.toContain(shortSecret);
|
||||
} finally {
|
||||
mocks.store.profiles = originalProfiles;
|
||||
}
|
||||
});
|
||||
|
||||
it("includes env-backed image-generation providers in effective auth output", async () => {
|
||||
const localRuntime = createRuntime();
|
||||
const originalEnvImpl = mocks.resolveEnvApiKey.getMockImplementation();
|
||||
|
||||
mocks.resolveEnvApiKey.mockImplementation((provider: string) => {
|
||||
if (provider === "openai") {
|
||||
return {
|
||||
apiKey: "sk-openai-0123456789abcdefghijklmnopqrstuvwxyz", // pragma: allowlist secret
|
||||
source: "shell env: OPENAI_API_KEY",
|
||||
};
|
||||
}
|
||||
if (provider === "anthropic") {
|
||||
return {
|
||||
apiKey: "sk-ant-oat01-ACCESS-TOKEN-1234567890", // pragma: allowlist secret
|
||||
source: "env: ANTHROPIC_OAUTH_TOKEN",
|
||||
};
|
||||
}
|
||||
if (provider === "minimax") {
|
||||
return {
|
||||
apiKey: "sk-minimax-0123456789abcdefghijklmnopqrstuvwxyz", // pragma: allowlist secret
|
||||
source: "env: MINIMAX_API_KEY",
|
||||
};
|
||||
}
|
||||
if (provider === "fal") {
|
||||
return {
|
||||
apiKey: "fal_test_0123456789abcdefghijklmnopqrstuvwxyz", // pragma: allowlist secret
|
||||
source: "env: FAL_KEY",
|
||||
};
|
||||
}
|
||||
return null;
|
||||
});
|
||||
|
||||
try {
|
||||
await modelsStatusCommand({ json: true }, localRuntime as never);
|
||||
const payload = JSON.parse(String((localRuntime.log as Mock).mock.calls[0]?.[0]));
|
||||
const providers = payload.auth.providers as Array<{
|
||||
provider: string;
|
||||
effective: { kind: string };
|
||||
}>;
|
||||
expect(providers).toEqual(
|
||||
expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
provider: "minimax",
|
||||
effective: expect.objectContaining({ kind: "env" }),
|
||||
}),
|
||||
expect.objectContaining({
|
||||
provider: "fal",
|
||||
effective: expect.objectContaining({ kind: "env" }),
|
||||
}),
|
||||
]),
|
||||
);
|
||||
} finally {
|
||||
if (originalEnvImpl) {
|
||||
mocks.resolveEnvApiKey.mockImplementation(originalEnvImpl);
|
||||
} else if (defaultResolveEnvApiKeyImpl) {
|
||||
mocks.resolveEnvApiKey.mockImplementation(defaultResolveEnvApiKeyImpl);
|
||||
} else {
|
||||
mocks.resolveEnvApiKey.mockImplementation(() => null);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
it("uses agent overrides and reports sources", async () => {
|
||||
const localRuntime = createRuntime();
|
||||
await withAgentScopeOverrides(
|
||||
@@ -400,7 +353,7 @@ describe("modelsStatusCommand auth overview", () => {
|
||||
);
|
||||
});
|
||||
|
||||
it("does not report cli backends as missing auth", async () => {
|
||||
it("handles cli backend and aliased provider auth summaries", async () => {
|
||||
const localRuntime = createRuntime();
|
||||
const originalLoadConfig = mocks.loadConfig.getMockImplementation();
|
||||
const originalEnvImpl = mocks.resolveEnvApiKey.getMockImplementation();
|
||||
@@ -422,6 +375,32 @@ describe("modelsStatusCommand auth overview", () => {
|
||||
const payload = JSON.parse(String((localRuntime.log as Mock).mock.calls[0]?.[0]));
|
||||
expect(payload.defaultModel).toBe("claude-cli/claude-sonnet-4-6");
|
||||
expect(payload.auth.missingProvidersInUse).toEqual([]);
|
||||
|
||||
const aliasRuntime = createRuntime();
|
||||
mocks.loadConfig.mockReturnValue({
|
||||
agents: {
|
||||
defaults: {
|
||||
model: { primary: "z.ai/glm-4.7", fallbacks: [] },
|
||||
models: { "z.ai/glm-4.7": {} },
|
||||
},
|
||||
},
|
||||
models: { providers: { "z.ai": {} } },
|
||||
env: { shellEnv: { enabled: true } },
|
||||
});
|
||||
mocks.resolveEnvApiKey.mockImplementation((provider: string) => {
|
||||
if (provider === "zai" || provider === "z.ai" || provider === "z-ai") {
|
||||
return {
|
||||
apiKey: "sk-zai-0123456789abcdefghijklmnopqrstuvwxyz", // pragma: allowlist secret
|
||||
source: "shell env: ZAI_API_KEY",
|
||||
};
|
||||
}
|
||||
return null;
|
||||
});
|
||||
await modelsStatusCommand({ json: true }, aliasRuntime as never);
|
||||
const aliasPayload = JSON.parse(String((aliasRuntime.log as Mock).mock.calls[0]?.[0]));
|
||||
const providers = aliasPayload.auth.providers as Array<{ provider: string }>;
|
||||
expect(providers.filter((provider) => provider.provider === "zai")).toHaveLength(1);
|
||||
expect(providers.some((provider) => provider.provider === "z.ai")).toBe(false);
|
||||
} finally {
|
||||
if (originalLoadConfig) {
|
||||
mocks.loadConfig.mockImplementation(originalLoadConfig);
|
||||
@@ -436,79 +415,24 @@ describe("modelsStatusCommand auth overview", () => {
|
||||
}
|
||||
});
|
||||
|
||||
it("dedupes alias and canonical provider ids in auth provider summaries", async () => {
|
||||
const localRuntime = createRuntime();
|
||||
const originalLoadConfig = mocks.loadConfig.getMockImplementation();
|
||||
const originalResolveEnvApiKey = mocks.resolveEnvApiKey.getMockImplementation();
|
||||
|
||||
mocks.loadConfig.mockReturnValue({
|
||||
agents: {
|
||||
defaults: {
|
||||
model: { primary: "z.ai/glm-4.7", fallbacks: [] },
|
||||
models: { "z.ai/glm-4.7": {} },
|
||||
},
|
||||
},
|
||||
models: { providers: { "z.ai": {} } },
|
||||
env: { shellEnv: { enabled: true } },
|
||||
});
|
||||
mocks.resolveEnvApiKey.mockImplementation((provider: string) => {
|
||||
if (provider === "zai" || provider === "z.ai" || provider === "z-ai") {
|
||||
return {
|
||||
apiKey: "sk-zai-0123456789abcdefghijklmnopqrstuvwxyz", // pragma: allowlist secret
|
||||
source: "shell env: ZAI_API_KEY",
|
||||
};
|
||||
}
|
||||
return null;
|
||||
});
|
||||
|
||||
try {
|
||||
await modelsStatusCommand({ json: true }, localRuntime as never);
|
||||
const payload = JSON.parse(String((localRuntime.log as Mock).mock.calls[0]?.[0]));
|
||||
const providers = payload.auth.providers as Array<{ provider: string }>;
|
||||
expect(providers.filter((provider) => provider.provider === "zai")).toHaveLength(1);
|
||||
expect(providers.some((provider) => provider.provider === "z.ai")).toBe(false);
|
||||
} finally {
|
||||
if (originalLoadConfig) {
|
||||
mocks.loadConfig.mockImplementation(originalLoadConfig);
|
||||
}
|
||||
if (originalResolveEnvApiKey) {
|
||||
mocks.resolveEnvApiKey.mockImplementation(originalResolveEnvApiKey);
|
||||
} else if (defaultResolveEnvApiKeyImpl) {
|
||||
mocks.resolveEnvApiKey.mockImplementation(defaultResolveEnvApiKeyImpl);
|
||||
} else {
|
||||
mocks.resolveEnvApiKey.mockImplementation(() => null);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
it("labels defaults when --agent has no overrides", async () => {
|
||||
const localRuntime = createRuntime();
|
||||
it("reports defaults source when --agent has no overrides", async () => {
|
||||
await withAgentScopeOverrides(
|
||||
{
|
||||
primary: undefined,
|
||||
fallbacks: undefined,
|
||||
},
|
||||
async () => {
|
||||
await modelsStatusCommand({ agent: "main" }, localRuntime as never);
|
||||
const output = (localRuntime.log as Mock).mock.calls
|
||||
const textRuntime = createRuntime();
|
||||
await modelsStatusCommand({ agent: "main" }, textRuntime as never);
|
||||
const output = (textRuntime.log as Mock).mock.calls
|
||||
.map((call: unknown[]) => String(call[0]))
|
||||
.join("\n");
|
||||
expect(output).toContain("Default (defaults)");
|
||||
expect(output).toContain("Fallbacks (0) (defaults)");
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
it("reports defaults source in JSON when --agent has no overrides", async () => {
|
||||
const localRuntime = createRuntime();
|
||||
await withAgentScopeOverrides(
|
||||
{
|
||||
primary: undefined,
|
||||
fallbacks: undefined,
|
||||
},
|
||||
async () => {
|
||||
await modelsStatusCommand({ json: true, agent: "main" }, localRuntime as never);
|
||||
const payload = JSON.parse(String((localRuntime.log as Mock).mock.calls[0]?.[0]));
|
||||
const jsonRuntime = createRuntime();
|
||||
await modelsStatusCommand({ json: true, agent: "main" }, jsonRuntime as never);
|
||||
const payload = JSON.parse(String((jsonRuntime.log as Mock).mock.calls[0]?.[0]));
|
||||
expect(payload.modelConfig).toEqual({
|
||||
defaultSource: "defaults",
|
||||
fallbacksSource: "defaults",
|
||||
|
||||
@@ -18,7 +18,8 @@ import { toAgentModelListLike } from "../../config/model-input.js";
|
||||
import type { AgentModelEntryConfig } from "../../config/types.agent-defaults.js";
|
||||
import type { AgentModelConfig } from "../../config/types.agents-shared.js";
|
||||
import { normalizeAgentId } from "../../routing/session-key.js";
|
||||
import { normalizeLowercaseStringOrEmpty } from "../../shared/string-coerce.js";
|
||||
export { normalizeAlias } from "./alias-name.js";
|
||||
export { isLocalBaseUrl } from "./list.local-url.js";
|
||||
|
||||
export const ensureFlagCompatibility = (opts: { json?: boolean; plain?: boolean }) => {
|
||||
if (opts.json && opts.plain) {
|
||||
@@ -49,22 +50,6 @@ export const formatMs = (value?: number | null) => {
|
||||
return `${Math.round(value / 100) / 10}s`;
|
||||
};
|
||||
|
||||
export const isLocalBaseUrl = (baseUrl: string) => {
|
||||
try {
|
||||
const url = new URL(baseUrl);
|
||||
const host = normalizeLowercaseStringOrEmpty(url.hostname);
|
||||
return (
|
||||
host === "localhost" ||
|
||||
host === "127.0.0.1" ||
|
||||
host === "0.0.0.0" ||
|
||||
host === "::1" ||
|
||||
host.endsWith(".local")
|
||||
);
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
export async function loadValidConfigOrThrow(): Promise<OpenClawConfig> {
|
||||
const snapshot = await readConfigFileSnapshot();
|
||||
if (!snapshot.valid) {
|
||||
@@ -142,17 +127,6 @@ export function buildAllowlistSet(cfg: OpenClawConfig): Set<string> {
|
||||
return allowed;
|
||||
}
|
||||
|
||||
export function normalizeAlias(alias: string): string {
|
||||
const trimmed = alias.trim();
|
||||
if (!trimmed) {
|
||||
throw new Error("Alias cannot be empty.");
|
||||
}
|
||||
if (!/^[A-Za-z0-9_.:-]+$/.test(trimmed)) {
|
||||
throw new Error("Alias must use letters, numbers, dots, underscores, colons, or dashes.");
|
||||
}
|
||||
return trimmed;
|
||||
}
|
||||
|
||||
export function resolveKnownAgentId(params: {
|
||||
cfg: OpenClawConfig;
|
||||
rawAgentId?: string | null;
|
||||
|
||||
@@ -1,288 +0,0 @@
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { afterEach, describe, expect, it, vi } from "vitest";
|
||||
import { upsertApiKeyProfile } from "../plugins/provider-auth-helpers.js";
|
||||
import { captureEnv } from "../test-utils/env.js";
|
||||
|
||||
const providerEnvVarsById: Record<string, readonly string[]> = {
|
||||
"cloudflare-ai-gateway": ["CLOUDFLARE_AI_GATEWAY_API_KEY"],
|
||||
byteplus: ["BYTEPLUS_API_KEY"],
|
||||
moonshot: ["MOONSHOT_API_KEY"],
|
||||
openai: ["OPENAI_API_KEY"],
|
||||
opencode: ["OPENCODE_API_KEY"],
|
||||
"opencode-go": ["OPENCODE_API_KEY"],
|
||||
volcengine: ["VOLCANO_ENGINE_API_KEY"],
|
||||
};
|
||||
|
||||
vi.mock("../secrets/provider-env-vars.js", () => ({
|
||||
getProviderEnvVars: vi.fn((provider: string) => providerEnvVarsById[provider] ?? []),
|
||||
}));
|
||||
|
||||
type AuthTestEnv = {
|
||||
stateDir: string;
|
||||
agentDir: string;
|
||||
};
|
||||
|
||||
async function setupAuthTestEnv(prefix: string): Promise<AuthTestEnv> {
|
||||
const stateDir = await fs.mkdtemp(path.join(os.tmpdir(), prefix));
|
||||
const agentDir = path.join(stateDir, "agent");
|
||||
process.env.OPENCLAW_STATE_DIR = stateDir;
|
||||
process.env.OPENCLAW_AGENT_DIR = agentDir;
|
||||
process.env.PI_CODING_AGENT_DIR = agentDir;
|
||||
await fs.mkdir(agentDir, { recursive: true });
|
||||
return { stateDir, agentDir };
|
||||
}
|
||||
|
||||
function createAuthTestLifecycle(envKeys: string[]) {
|
||||
const envSnapshot = captureEnv(envKeys);
|
||||
let stateDir: string | null = null;
|
||||
return {
|
||||
setStateDir(nextStateDir: string) {
|
||||
stateDir = nextStateDir;
|
||||
},
|
||||
async cleanup() {
|
||||
if (stateDir) {
|
||||
await fs.rm(stateDir, { recursive: true, force: true });
|
||||
stateDir = null;
|
||||
}
|
||||
envSnapshot.restore();
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
async function readAuthProfilesForAgent<T>(agentDir: string): Promise<T> {
|
||||
const raw = await fs.readFile(path.join(agentDir, "auth-profiles.json"), "utf8");
|
||||
return JSON.parse(raw) as T;
|
||||
}
|
||||
|
||||
describe("onboard auth credentials secret refs", () => {
|
||||
const lifecycle = createAuthTestLifecycle([
|
||||
"OPENCLAW_STATE_DIR",
|
||||
"OPENCLAW_AGENT_DIR",
|
||||
"PI_CODING_AGENT_DIR",
|
||||
"MOONSHOT_API_KEY",
|
||||
"OPENAI_API_KEY",
|
||||
"CLOUDFLARE_AI_GATEWAY_API_KEY",
|
||||
"VOLCANO_ENGINE_API_KEY",
|
||||
"BYTEPLUS_API_KEY",
|
||||
"OPENCODE_API_KEY",
|
||||
]);
|
||||
|
||||
afterEach(async () => {
|
||||
await lifecycle.cleanup();
|
||||
});
|
||||
|
||||
type AuthProfileEntry = { key?: string; keyRef?: unknown; metadata?: unknown };
|
||||
|
||||
async function withAuthEnv(
|
||||
prefix: string,
|
||||
run: (env: Awaited<ReturnType<typeof setupAuthTestEnv>>) => Promise<void>,
|
||||
) {
|
||||
const env = await setupAuthTestEnv(prefix);
|
||||
lifecycle.setStateDir(env.stateDir);
|
||||
await run(env);
|
||||
}
|
||||
|
||||
async function readProfile(
|
||||
agentDir: string,
|
||||
profileId: string,
|
||||
): Promise<AuthProfileEntry | undefined> {
|
||||
const parsed = await readAuthProfilesForAgent<{
|
||||
profiles?: Record<string, AuthProfileEntry>;
|
||||
}>(agentDir);
|
||||
return parsed.profiles?.[profileId];
|
||||
}
|
||||
|
||||
async function expectStoredAuthKey(params: {
|
||||
prefix: string;
|
||||
envVar?: string;
|
||||
envValue?: string;
|
||||
profileId: string;
|
||||
apply: (agentDir: string) => Promise<void>;
|
||||
expected: AuthProfileEntry;
|
||||
absent?: Array<keyof AuthProfileEntry>;
|
||||
}) {
|
||||
await withAuthEnv(params.prefix, async (env) => {
|
||||
if (params.envVar && params.envValue !== undefined) {
|
||||
process.env[params.envVar] = params.envValue;
|
||||
}
|
||||
await params.apply(env.agentDir);
|
||||
const profile = await readProfile(env.agentDir, params.profileId);
|
||||
expect(profile).toMatchObject(params.expected);
|
||||
for (const key of params.absent ?? []) {
|
||||
expect(profile?.[key]).toBeUndefined();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
it("keeps env-backed provider keys as plaintext by default", async () => {
|
||||
await withAuthEnv("openclaw-onboard-auth-credentials-", async (env) => {
|
||||
process.env.MOONSHOT_API_KEY = "sk-moonshot-env";
|
||||
process.env.OPENAI_API_KEY = "sk-openai-env";
|
||||
|
||||
upsertApiKeyProfile({
|
||||
provider: "moonshot",
|
||||
input: "sk-moonshot-env",
|
||||
agentDir: env.agentDir,
|
||||
});
|
||||
upsertApiKeyProfile({ provider: "openai", input: "sk-openai-env", agentDir: env.agentDir });
|
||||
|
||||
const parsed = await readAuthProfilesForAgent<{
|
||||
profiles?: Record<string, AuthProfileEntry>;
|
||||
}>(env.agentDir);
|
||||
expect(parsed.profiles?.["moonshot:default"]).toMatchObject({ key: "sk-moonshot-env" });
|
||||
expect(parsed.profiles?.["moonshot:default"]?.keyRef).toBeUndefined();
|
||||
expect(parsed.profiles?.["openai:default"]).toMatchObject({ key: "sk-openai-env" });
|
||||
expect(parsed.profiles?.["openai:default"]?.keyRef).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
it("stores env-backed provider keys as keyRef in ref mode", async () => {
|
||||
await withAuthEnv("openclaw-onboard-auth-credentials-ref-", async (env) => {
|
||||
process.env.MOONSHOT_API_KEY = "sk-moonshot-env";
|
||||
process.env.OPENAI_API_KEY = "sk-openai-env";
|
||||
|
||||
upsertApiKeyProfile({
|
||||
provider: "moonshot",
|
||||
input: "sk-moonshot-env",
|
||||
agentDir: env.agentDir,
|
||||
options: { secretInputMode: "ref" }, // pragma: allowlist secret
|
||||
});
|
||||
upsertApiKeyProfile({
|
||||
provider: "openai",
|
||||
input: "sk-openai-env",
|
||||
agentDir: env.agentDir,
|
||||
options: { secretInputMode: "ref" }, // pragma: allowlist secret
|
||||
});
|
||||
|
||||
const parsed = await readAuthProfilesForAgent<{
|
||||
profiles?: Record<string, AuthProfileEntry>;
|
||||
}>(env.agentDir);
|
||||
expect(parsed.profiles?.["moonshot:default"]).toMatchObject({
|
||||
keyRef: { source: "env", provider: "default", id: "MOONSHOT_API_KEY" },
|
||||
});
|
||||
expect(parsed.profiles?.["moonshot:default"]?.key).toBeUndefined();
|
||||
expect(parsed.profiles?.["openai:default"]).toMatchObject({
|
||||
keyRef: { source: "env", provider: "default", id: "OPENAI_API_KEY" },
|
||||
});
|
||||
expect(parsed.profiles?.["openai:default"]?.key).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
it("stores ${ENV} moonshot input as keyRef even when env value is unset", async () => {
|
||||
await expectStoredAuthKey({
|
||||
prefix: "openclaw-onboard-auth-credentials-inline-ref-",
|
||||
profileId: "moonshot:default",
|
||||
apply: async () => {
|
||||
upsertApiKeyProfile({ provider: "moonshot", input: "${MOONSHOT_API_KEY}" });
|
||||
},
|
||||
expected: {
|
||||
keyRef: { source: "env", provider: "default", id: "MOONSHOT_API_KEY" },
|
||||
},
|
||||
absent: ["key"],
|
||||
});
|
||||
});
|
||||
|
||||
it("keeps plaintext moonshot key when no env ref applies", async () => {
|
||||
await expectStoredAuthKey({
|
||||
prefix: "openclaw-onboard-auth-credentials-plaintext-",
|
||||
envVar: "MOONSHOT_API_KEY",
|
||||
envValue: "sk-moonshot-other",
|
||||
profileId: "moonshot:default",
|
||||
apply: async () => {
|
||||
upsertApiKeyProfile({ provider: "moonshot", input: "sk-moonshot-plaintext" });
|
||||
},
|
||||
expected: {
|
||||
key: "sk-moonshot-plaintext",
|
||||
},
|
||||
absent: ["keyRef"],
|
||||
});
|
||||
});
|
||||
|
||||
it("preserves cloudflare metadata when storing keyRef", async () => {
|
||||
const env = await setupAuthTestEnv("openclaw-onboard-auth-credentials-cloudflare-");
|
||||
lifecycle.setStateDir(env.stateDir);
|
||||
process.env.CLOUDFLARE_AI_GATEWAY_API_KEY = "cf-secret"; // pragma: allowlist secret
|
||||
|
||||
upsertApiKeyProfile({
|
||||
provider: "cloudflare-ai-gateway",
|
||||
input: "cf-secret",
|
||||
agentDir: env.agentDir,
|
||||
options: { secretInputMode: "ref" }, // pragma: allowlist secret
|
||||
metadata: {
|
||||
accountId: "account-1",
|
||||
gatewayId: "gateway-1",
|
||||
},
|
||||
});
|
||||
|
||||
const parsed = await readAuthProfilesForAgent<{
|
||||
profiles?: Record<string, { key?: string; keyRef?: unknown; metadata?: unknown }>;
|
||||
}>(env.agentDir);
|
||||
expect(parsed.profiles?.["cloudflare-ai-gateway:default"]).toMatchObject({
|
||||
keyRef: { source: "env", provider: "default", id: "CLOUDFLARE_AI_GATEWAY_API_KEY" },
|
||||
metadata: { accountId: "account-1", gatewayId: "gateway-1" },
|
||||
});
|
||||
expect(parsed.profiles?.["cloudflare-ai-gateway:default"]?.key).toBeUndefined();
|
||||
});
|
||||
|
||||
it("stores env-backed volcengine and byteplus keys as keyRef in ref mode", async () => {
|
||||
const env = await setupAuthTestEnv("openclaw-onboard-auth-credentials-volc-byte-");
|
||||
lifecycle.setStateDir(env.stateDir);
|
||||
process.env.VOLCANO_ENGINE_API_KEY = "volcengine-secret"; // pragma: allowlist secret
|
||||
process.env.BYTEPLUS_API_KEY = "byteplus-secret"; // pragma: allowlist secret
|
||||
|
||||
upsertApiKeyProfile({
|
||||
provider: "volcengine",
|
||||
input: "volcengine-secret",
|
||||
agentDir: env.agentDir,
|
||||
options: { secretInputMode: "ref" }, // pragma: allowlist secret
|
||||
});
|
||||
upsertApiKeyProfile({
|
||||
provider: "byteplus",
|
||||
input: "byteplus-secret",
|
||||
agentDir: env.agentDir,
|
||||
options: { secretInputMode: "ref" }, // pragma: allowlist secret
|
||||
});
|
||||
|
||||
const parsed = await readAuthProfilesForAgent<{
|
||||
profiles?: Record<string, { key?: string; keyRef?: unknown }>;
|
||||
}>(env.agentDir);
|
||||
|
||||
expect(parsed.profiles?.["volcengine:default"]).toMatchObject({
|
||||
keyRef: { source: "env", provider: "default", id: "VOLCANO_ENGINE_API_KEY" },
|
||||
});
|
||||
expect(parsed.profiles?.["volcengine:default"]?.key).toBeUndefined();
|
||||
|
||||
expect(parsed.profiles?.["byteplus:default"]).toMatchObject({
|
||||
keyRef: { source: "env", provider: "default", id: "BYTEPLUS_API_KEY" },
|
||||
});
|
||||
expect(parsed.profiles?.["byteplus:default"]?.key).toBeUndefined();
|
||||
});
|
||||
|
||||
it("stores shared OpenCode credentials for both runtime providers", async () => {
|
||||
const env = await setupAuthTestEnv("openclaw-onboard-auth-credentials-opencode-");
|
||||
lifecycle.setStateDir(env.stateDir);
|
||||
process.env.OPENCODE_API_KEY = "sk-opencode-env"; // pragma: allowlist secret
|
||||
|
||||
for (const provider of ["opencode", "opencode-go"] as const) {
|
||||
upsertApiKeyProfile({
|
||||
provider,
|
||||
input: "sk-opencode-env",
|
||||
agentDir: env.agentDir,
|
||||
options: { secretInputMode: "ref" }, // pragma: allowlist secret
|
||||
});
|
||||
}
|
||||
|
||||
const parsed = await readAuthProfilesForAgent<{
|
||||
profiles?: Record<string, { key?: string; keyRef?: unknown }>;
|
||||
}>(env.agentDir);
|
||||
|
||||
expect(parsed.profiles?.["opencode:default"]).toMatchObject({
|
||||
keyRef: { source: "env", provider: "default", id: "OPENCODE_API_KEY" },
|
||||
});
|
||||
expect(parsed.profiles?.["opencode-go:default"]).toMatchObject({
|
||||
keyRef: { source: "env", provider: "default", id: "OPENCODE_API_KEY" },
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -14,6 +14,62 @@ import {
|
||||
setupAuthTestEnv,
|
||||
} from "./test-wizard-helpers.js";
|
||||
|
||||
const providerEnvVarsById = vi.hoisted(
|
||||
(): Record<string, readonly string[]> => ({
|
||||
"cloudflare-ai-gateway": ["CLOUDFLARE_AI_GATEWAY_API_KEY"],
|
||||
byteplus: ["BYTEPLUS_API_KEY"],
|
||||
moonshot: ["MOONSHOT_API_KEY"],
|
||||
openai: ["OPENAI_API_KEY"],
|
||||
opencode: ["OPENCODE_API_KEY"],
|
||||
"opencode-go": ["OPENCODE_API_KEY"],
|
||||
volcengine: ["VOLCANO_ENGINE_API_KEY"],
|
||||
}),
|
||||
);
|
||||
|
||||
vi.mock("../agents/agent-paths.js", () => ({
|
||||
resolveOpenClawAgentDir: () => process.env.OPENCLAW_AGENT_DIR ?? "/tmp/openclaw-agent",
|
||||
}));
|
||||
|
||||
vi.mock("../config/paths.js", () => ({
|
||||
resolveStateDir: () => process.env.OPENCLAW_STATE_DIR ?? "/tmp/openclaw-state",
|
||||
}));
|
||||
|
||||
vi.mock("../agents/auth-profiles/profiles.js", async () => {
|
||||
const fs = await import("node:fs");
|
||||
const path = await import("node:path");
|
||||
return {
|
||||
upsertAuthProfile: (params: { profileId: string; credential: unknown; agentDir?: string }) => {
|
||||
const agentDir = params.agentDir ?? process.env.OPENCLAW_AGENT_DIR ?? "/tmp/openclaw-agent";
|
||||
const file = path.join(agentDir, "auth-profiles.json");
|
||||
fs.mkdirSync(agentDir, { recursive: true });
|
||||
const existing = (() => {
|
||||
try {
|
||||
return JSON.parse(fs.readFileSync(file, "utf8")) as {
|
||||
version?: number;
|
||||
profiles?: Record<string, unknown>;
|
||||
};
|
||||
} catch {
|
||||
return { version: 1, profiles: {} };
|
||||
}
|
||||
})();
|
||||
fs.writeFileSync(
|
||||
file,
|
||||
`${JSON.stringify(
|
||||
{
|
||||
version: existing.version ?? 1,
|
||||
profiles: {
|
||||
...existing.profiles,
|
||||
[params.profileId]: params.credential,
|
||||
},
|
||||
},
|
||||
null,
|
||||
2,
|
||||
)}\n`,
|
||||
);
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock("../agents/provider-auth-aliases.js", () => ({
|
||||
resolveProviderIdForAuth: (provider: string) => {
|
||||
const normalized = provider.trim().toLowerCase();
|
||||
@@ -24,6 +80,10 @@ vi.mock("../agents/provider-auth-aliases.js", () => ({
|
||||
},
|
||||
}));
|
||||
|
||||
vi.mock("../secrets/provider-env-vars.js", () => ({
|
||||
getProviderEnvVars: vi.fn((provider: string) => providerEnvVarsById[provider] ?? []),
|
||||
}));
|
||||
|
||||
describe("writeOAuthCredentials", () => {
|
||||
const lifecycle = createAuthTestLifecycle([
|
||||
"OPENCLAW_STATE_DIR",
|
||||
@@ -176,6 +236,152 @@ describe("writeOAuthCredentials", () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe("upsertApiKeyProfile secret refs", () => {
|
||||
const lifecycle = createAuthTestLifecycle([
|
||||
"OPENCLAW_STATE_DIR",
|
||||
"OPENCLAW_AGENT_DIR",
|
||||
"PI_CODING_AGENT_DIR",
|
||||
"MOONSHOT_API_KEY",
|
||||
"OPENAI_API_KEY",
|
||||
"CLOUDFLARE_AI_GATEWAY_API_KEY",
|
||||
"VOLCANO_ENGINE_API_KEY",
|
||||
"BYTEPLUS_API_KEY",
|
||||
"OPENCODE_API_KEY",
|
||||
]);
|
||||
|
||||
type AuthProfileEntry = { key?: string; keyRef?: unknown; metadata?: unknown };
|
||||
|
||||
afterEach(async () => {
|
||||
await lifecycle.cleanup();
|
||||
});
|
||||
|
||||
async function readProfile(
|
||||
agentDir: string,
|
||||
profileId: string,
|
||||
): Promise<AuthProfileEntry | undefined> {
|
||||
const parsed = await readAuthProfilesForAgent<{
|
||||
profiles?: Record<string, AuthProfileEntry>;
|
||||
}>(agentDir);
|
||||
return parsed.profiles?.[profileId];
|
||||
}
|
||||
|
||||
it("handles plaintext, ref mode, and inline env-ref provider keys", async () => {
|
||||
const env = await setupAuthTestEnv("openclaw-onboard-auth-credentials-");
|
||||
lifecycle.setStateDir(env.stateDir);
|
||||
process.env.MOONSHOT_API_KEY = "sk-moonshot-env"; // pragma: allowlist secret
|
||||
process.env.OPENAI_API_KEY = "sk-openai-env"; // pragma: allowlist secret
|
||||
|
||||
upsertApiKeyProfile({
|
||||
provider: "moonshot",
|
||||
input: "sk-moonshot-env",
|
||||
agentDir: env.agentDir,
|
||||
});
|
||||
upsertApiKeyProfile({ provider: "openai", input: "sk-openai-env", agentDir: env.agentDir });
|
||||
|
||||
expect(await readProfile(env.agentDir, "moonshot:default")).toMatchObject({
|
||||
key: "sk-moonshot-env",
|
||||
});
|
||||
expect((await readProfile(env.agentDir, "moonshot:default"))?.keyRef).toBeUndefined();
|
||||
expect(await readProfile(env.agentDir, "openai:default")).toMatchObject({
|
||||
key: "sk-openai-env",
|
||||
});
|
||||
expect((await readProfile(env.agentDir, "openai:default"))?.keyRef).toBeUndefined();
|
||||
|
||||
upsertApiKeyProfile({
|
||||
provider: "moonshot",
|
||||
input: "sk-moonshot-env",
|
||||
agentDir: env.agentDir,
|
||||
options: { secretInputMode: "ref" }, // pragma: allowlist secret
|
||||
});
|
||||
upsertApiKeyProfile({
|
||||
provider: "openai",
|
||||
input: "sk-openai-env",
|
||||
agentDir: env.agentDir,
|
||||
options: { secretInputMode: "ref" }, // pragma: allowlist secret
|
||||
});
|
||||
upsertApiKeyProfile({
|
||||
provider: "moonshot",
|
||||
input: "${MOONSHOT_API_KEY}",
|
||||
agentDir: env.agentDir,
|
||||
profileId: "moonshot:inline",
|
||||
});
|
||||
process.env.MOONSHOT_API_KEY = "sk-moonshot-other"; // pragma: allowlist secret
|
||||
upsertApiKeyProfile({
|
||||
provider: "moonshot",
|
||||
input: "sk-moonshot-plaintext",
|
||||
agentDir: env.agentDir,
|
||||
profileId: "moonshot:plain",
|
||||
});
|
||||
|
||||
expect(await readProfile(env.agentDir, "moonshot:default")).toMatchObject({
|
||||
keyRef: { source: "env", provider: "default", id: "MOONSHOT_API_KEY" },
|
||||
});
|
||||
expect((await readProfile(env.agentDir, "moonshot:default"))?.key).toBeUndefined();
|
||||
expect(await readProfile(env.agentDir, "openai:default")).toMatchObject({
|
||||
keyRef: { source: "env", provider: "default", id: "OPENAI_API_KEY" },
|
||||
});
|
||||
expect((await readProfile(env.agentDir, "openai:default"))?.key).toBeUndefined();
|
||||
expect(await readProfile(env.agentDir, "moonshot:inline")).toMatchObject({
|
||||
keyRef: { source: "env", provider: "default", id: "MOONSHOT_API_KEY" },
|
||||
});
|
||||
expect(await readProfile(env.agentDir, "moonshot:plain")).toMatchObject({
|
||||
key: "sk-moonshot-plaintext",
|
||||
});
|
||||
expect((await readProfile(env.agentDir, "moonshot:plain"))?.keyRef).toBeUndefined();
|
||||
});
|
||||
|
||||
it("stores provider-specific env refs and metadata in ref mode", async () => {
|
||||
const env = await setupAuthTestEnv("openclaw-onboard-auth-credentials-provider-ref-");
|
||||
lifecycle.setStateDir(env.stateDir);
|
||||
process.env.CLOUDFLARE_AI_GATEWAY_API_KEY = "cf-secret"; // pragma: allowlist secret
|
||||
process.env.VOLCANO_ENGINE_API_KEY = "volcengine-secret"; // pragma: allowlist secret
|
||||
process.env.BYTEPLUS_API_KEY = "byteplus-secret"; // pragma: allowlist secret
|
||||
process.env.OPENCODE_API_KEY = "sk-opencode-env"; // pragma: allowlist secret
|
||||
|
||||
upsertApiKeyProfile({
|
||||
provider: "cloudflare-ai-gateway",
|
||||
input: "cf-secret",
|
||||
agentDir: env.agentDir,
|
||||
options: { secretInputMode: "ref" }, // pragma: allowlist secret
|
||||
metadata: {
|
||||
accountId: "account-1",
|
||||
gatewayId: "gateway-1",
|
||||
},
|
||||
});
|
||||
for (const [provider, input] of [
|
||||
["volcengine", "volcengine-secret"],
|
||||
["byteplus", "byteplus-secret"],
|
||||
["opencode", "sk-opencode-env"],
|
||||
["opencode-go", "sk-opencode-env"],
|
||||
] as const) {
|
||||
upsertApiKeyProfile({
|
||||
provider,
|
||||
input,
|
||||
agentDir: env.agentDir,
|
||||
options: { secretInputMode: "ref" }, // pragma: allowlist secret
|
||||
});
|
||||
}
|
||||
|
||||
expect(await readProfile(env.agentDir, "cloudflare-ai-gateway:default")).toMatchObject({
|
||||
keyRef: { source: "env", provider: "default", id: "CLOUDFLARE_AI_GATEWAY_API_KEY" },
|
||||
metadata: { accountId: "account-1", gatewayId: "gateway-1" },
|
||||
});
|
||||
expect((await readProfile(env.agentDir, "cloudflare-ai-gateway:default"))?.key).toBeUndefined();
|
||||
expect(await readProfile(env.agentDir, "volcengine:default")).toMatchObject({
|
||||
keyRef: { source: "env", provider: "default", id: "VOLCANO_ENGINE_API_KEY" },
|
||||
});
|
||||
expect(await readProfile(env.agentDir, "byteplus:default")).toMatchObject({
|
||||
keyRef: { source: "env", provider: "default", id: "BYTEPLUS_API_KEY" },
|
||||
});
|
||||
expect(await readProfile(env.agentDir, "opencode:default")).toMatchObject({
|
||||
keyRef: { source: "env", provider: "default", id: "OPENCODE_API_KEY" },
|
||||
});
|
||||
expect(await readProfile(env.agentDir, "opencode-go:default")).toMatchObject({
|
||||
keyRef: { source: "env", provider: "default", id: "OPENCODE_API_KEY" },
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("upsertApiKeyProfile", () => {
|
||||
const lifecycle = createAuthTestLifecycle([
|
||||
"OPENCLAW_STATE_DIR",
|
||||
|
||||
421
src/commands/onboard-custom-config.test.ts
Normal file
421
src/commands/onboard-custom-config.test.ts
Normal file
@@ -0,0 +1,421 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { CONTEXT_WINDOW_HARD_MIN_TOKENS } from "../agents/context-window-guard.js";
|
||||
import type { OpenClawConfig } from "../config/config.js";
|
||||
import {
|
||||
applyCustomApiConfig,
|
||||
buildAnthropicVerificationProbeRequest,
|
||||
buildOpenAiVerificationProbeRequest,
|
||||
parseNonInteractiveCustomApiFlags,
|
||||
} from "./onboard-custom-config.js";
|
||||
|
||||
function buildCustomProviderConfig(contextWindow?: number) {
|
||||
if (contextWindow === undefined) {
|
||||
return {} as OpenClawConfig;
|
||||
}
|
||||
return {
|
||||
models: {
|
||||
providers: {
|
||||
custom: {
|
||||
api: "openai-completions" as const,
|
||||
baseUrl: "https://llm.example.com/v1",
|
||||
models: [
|
||||
{
|
||||
id: "foo-large",
|
||||
name: "foo-large",
|
||||
contextWindow,
|
||||
maxTokens: contextWindow > CONTEXT_WINDOW_HARD_MIN_TOKENS ? 4096 : 1024,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
reasoning: false,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
} as OpenClawConfig;
|
||||
}
|
||||
|
||||
function applyCustomModelConfigWithContextWindow(contextWindow?: number) {
|
||||
return applyCustomApiConfig({
|
||||
config: buildCustomProviderConfig(contextWindow),
|
||||
baseUrl: "https://llm.example.com/v1",
|
||||
modelId: "foo-large",
|
||||
compatibility: "openai",
|
||||
providerId: "custom",
|
||||
});
|
||||
}
|
||||
|
||||
it("uses expanded max_tokens for openai verification probes", async () => {
|
||||
const request = buildOpenAiVerificationProbeRequest({
|
||||
baseUrl: "https://example.com/v1",
|
||||
apiKey: "test-key",
|
||||
modelId: "detected-model",
|
||||
});
|
||||
|
||||
expect(request.body).toMatchObject({ max_tokens: 16 });
|
||||
});
|
||||
it("uses azure responses-specific headers and body for openai verification probes", () => {
|
||||
const request = buildOpenAiVerificationProbeRequest({
|
||||
baseUrl: "https://my-resource.openai.azure.com",
|
||||
apiKey: "azure-test-key",
|
||||
modelId: "gpt-4.1",
|
||||
});
|
||||
|
||||
expect(request.endpoint).toBe("https://my-resource.openai.azure.com/openai/v1/responses");
|
||||
expect(request.headers["api-key"]).toBe("azure-test-key");
|
||||
expect(request.headers.Authorization).toBeUndefined();
|
||||
expect(request.body).toEqual({
|
||||
model: "gpt-4.1",
|
||||
input: "Hi",
|
||||
max_output_tokens: 16,
|
||||
stream: false,
|
||||
});
|
||||
});
|
||||
it("uses Azure Foundry chat-completions probes for services.ai URLs", () => {
|
||||
const request = buildOpenAiVerificationProbeRequest({
|
||||
baseUrl: "https://my-resource.services.ai.azure.com",
|
||||
apiKey: "azure-test-key",
|
||||
modelId: "deepseek-v3-0324",
|
||||
});
|
||||
|
||||
expect(request.endpoint).toBe(
|
||||
"https://my-resource.services.ai.azure.com/openai/deployments/deepseek-v3-0324/chat/completions?api-version=2024-10-21",
|
||||
);
|
||||
expect(request.headers["api-key"]).toBe("azure-test-key");
|
||||
expect(request.headers.Authorization).toBeUndefined();
|
||||
expect(request.body).toEqual({
|
||||
model: "deepseek-v3-0324",
|
||||
messages: [{ role: "user", content: "Hi" }],
|
||||
max_tokens: 16,
|
||||
stream: false,
|
||||
});
|
||||
});
|
||||
it("uses expanded max_tokens for anthropic verification probes", () => {
|
||||
const request = buildAnthropicVerificationProbeRequest({
|
||||
baseUrl: "https://example.com",
|
||||
apiKey: "test-key",
|
||||
modelId: "detected-model",
|
||||
});
|
||||
|
||||
expect(request.endpoint).toBe("https://example.com/v1/messages");
|
||||
expect(request.body).toMatchObject({ max_tokens: 1 });
|
||||
});
|
||||
|
||||
describe("applyCustomApiConfig", () => {
|
||||
it.each([
|
||||
{
|
||||
name: "uses hard-min context window for newly added custom models",
|
||||
existingContextWindow: undefined,
|
||||
expectedContextWindow: CONTEXT_WINDOW_HARD_MIN_TOKENS,
|
||||
},
|
||||
{
|
||||
name: "upgrades existing custom model context window when below hard minimum",
|
||||
existingContextWindow: 4096,
|
||||
expectedContextWindow: CONTEXT_WINDOW_HARD_MIN_TOKENS,
|
||||
},
|
||||
{
|
||||
name: "preserves existing custom model context window when already above minimum",
|
||||
existingContextWindow: 131072,
|
||||
expectedContextWindow: 131072,
|
||||
},
|
||||
])("$name", ({ existingContextWindow, expectedContextWindow }) => {
|
||||
const result = applyCustomModelConfigWithContextWindow(existingContextWindow);
|
||||
const model = result.config.models?.providers?.custom?.models?.find(
|
||||
(entry) => entry.id === "foo-large",
|
||||
);
|
||||
expect(model?.contextWindow).toBe(expectedContextWindow);
|
||||
});
|
||||
|
||||
it.each([
|
||||
{
|
||||
name: "invalid compatibility values at runtime",
|
||||
params: {
|
||||
config: {},
|
||||
baseUrl: "https://llm.example.com/v1",
|
||||
modelId: "foo-large",
|
||||
compatibility: "invalid" as unknown as "openai",
|
||||
},
|
||||
expectedMessage: 'Custom provider compatibility must be "openai" or "anthropic".',
|
||||
},
|
||||
{
|
||||
name: "explicit provider ids that normalize to empty",
|
||||
params: {
|
||||
config: {},
|
||||
baseUrl: "https://llm.example.com/v1",
|
||||
modelId: "foo-large",
|
||||
compatibility: "openai" as const,
|
||||
providerId: "!!!",
|
||||
},
|
||||
expectedMessage: "Custom provider ID must include letters, numbers, or hyphens.",
|
||||
},
|
||||
])("rejects $name", ({ params, expectedMessage }) => {
|
||||
expect(() => applyCustomApiConfig(params)).toThrow(expectedMessage);
|
||||
});
|
||||
|
||||
it("produces azure-specific config for Azure OpenAI URLs with reasoning model", () => {
|
||||
const result = applyCustomApiConfig({
|
||||
config: {},
|
||||
baseUrl: "https://user123-resource.openai.azure.com",
|
||||
modelId: "o4-mini",
|
||||
compatibility: "openai",
|
||||
apiKey: "abcd1234",
|
||||
});
|
||||
const providerId = result.providerId!;
|
||||
const provider = result.config.models?.providers?.[providerId];
|
||||
|
||||
expect(provider?.baseUrl).toBe("https://user123-resource.openai.azure.com/openai/v1");
|
||||
expect(provider?.api).toBe("azure-openai-responses");
|
||||
expect(provider?.authHeader).toBe(false);
|
||||
expect(provider?.headers).toEqual({ "api-key": "abcd1234" });
|
||||
|
||||
const model = provider?.models?.find((m) => m.id === "o4-mini");
|
||||
expect(model?.input).toEqual(["text", "image"]);
|
||||
expect(model?.reasoning).toBe(true);
|
||||
expect(model?.compat).toEqual({ supportsStore: false });
|
||||
|
||||
const modelRef = `${providerId}/${result.modelId}`;
|
||||
expect(result.config.agents?.defaults?.models?.[modelRef]?.params?.thinking).toBe("medium");
|
||||
});
|
||||
|
||||
it("keeps selected compatibility for Azure AI Foundry URLs", () => {
|
||||
const result = applyCustomApiConfig({
|
||||
config: {},
|
||||
baseUrl: "https://my-resource.services.ai.azure.com",
|
||||
modelId: "gpt-4.1",
|
||||
compatibility: "openai",
|
||||
apiKey: "key123",
|
||||
});
|
||||
const providerId = result.providerId!;
|
||||
const provider = result.config.models?.providers?.[providerId];
|
||||
|
||||
expect(provider?.baseUrl).toBe("https://my-resource.services.ai.azure.com/openai/v1");
|
||||
expect(provider?.api).toBe("openai-completions");
|
||||
expect(provider?.authHeader).toBe(false);
|
||||
expect(provider?.headers).toEqual({ "api-key": "key123" });
|
||||
|
||||
const model = provider?.models?.find((m) => m.id === "gpt-4.1");
|
||||
expect(model?.reasoning).toBe(false);
|
||||
expect(model?.input).toEqual(["text"]);
|
||||
expect(model?.compat).toEqual({ supportsStore: false });
|
||||
|
||||
const modelRef = `${providerId}/gpt-4.1`;
|
||||
expect(result.config.agents?.defaults?.models?.[modelRef]?.params?.thinking).toBeUndefined();
|
||||
});
|
||||
|
||||
it("strips pre-existing deployment path from Azure URL in stored config", () => {
|
||||
const result = applyCustomApiConfig({
|
||||
config: {},
|
||||
baseUrl: "https://my-resource.openai.azure.com/openai/deployments/gpt-4",
|
||||
modelId: "gpt-4",
|
||||
compatibility: "openai",
|
||||
apiKey: "key456",
|
||||
});
|
||||
const providerId = result.providerId!;
|
||||
const provider = result.config.models?.providers?.[providerId];
|
||||
|
||||
expect(provider?.baseUrl).toBe("https://my-resource.openai.azure.com/openai/v1");
|
||||
});
|
||||
|
||||
it("re-onboard updates existing Azure provider instead of creating a duplicate", () => {
|
||||
const oldProviderId = "custom-my-resource-openai-azure-com";
|
||||
const result = applyCustomApiConfig({
|
||||
config: {
|
||||
models: {
|
||||
providers: {
|
||||
[oldProviderId]: {
|
||||
baseUrl: "https://my-resource.openai.azure.com/openai/deployments/gpt-4",
|
||||
api: "openai-completions",
|
||||
models: [
|
||||
{
|
||||
id: "gpt-4",
|
||||
name: "gpt-4",
|
||||
contextWindow: 1,
|
||||
maxTokens: 1,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
reasoning: false,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
baseUrl: "https://my-resource.openai.azure.com",
|
||||
modelId: "gpt-4",
|
||||
compatibility: "openai",
|
||||
apiKey: "key789",
|
||||
});
|
||||
|
||||
expect(result.providerId).toBe(oldProviderId);
|
||||
expect(result.providerIdRenamedFrom).toBeUndefined();
|
||||
const provider = result.config.models?.providers?.[oldProviderId];
|
||||
expect(provider?.baseUrl).toBe("https://my-resource.openai.azure.com/openai/v1");
|
||||
expect(provider?.api).toBe("azure-openai-responses");
|
||||
expect(provider?.authHeader).toBe(false);
|
||||
expect(provider?.headers).toEqual({ "api-key": "key789" });
|
||||
});
|
||||
|
||||
it("renames provider id when a non-azure baseUrl differs", () => {
|
||||
const result = applyCustomApiConfig({
|
||||
config: {
|
||||
models: {
|
||||
providers: {
|
||||
custom: {
|
||||
baseUrl: "http://old.example.com/v1",
|
||||
api: "openai-completions",
|
||||
models: [
|
||||
{
|
||||
id: "old-model",
|
||||
name: "Old",
|
||||
contextWindow: 1,
|
||||
maxTokens: 1,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
reasoning: false,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
baseUrl: "http://localhost:11434/v1",
|
||||
modelId: "llama3",
|
||||
compatibility: "openai",
|
||||
providerId: "custom",
|
||||
});
|
||||
|
||||
expect(result.providerId).toBe("custom-2");
|
||||
expect(result.config.models?.providers?.custom).toBeDefined();
|
||||
expect(result.config.models?.providers?.["custom-2"]).toBeDefined();
|
||||
});
|
||||
|
||||
it("does not add azure fields for non-azure URLs", () => {
|
||||
const result = applyCustomApiConfig({
|
||||
config: {},
|
||||
baseUrl: "https://llm.example.com/v1",
|
||||
modelId: "foo-large",
|
||||
compatibility: "openai",
|
||||
apiKey: "key123",
|
||||
providerId: "custom",
|
||||
});
|
||||
const provider = result.config.models?.providers?.custom;
|
||||
|
||||
expect(provider?.api).toBe("openai-completions");
|
||||
expect(provider?.authHeader).toBeUndefined();
|
||||
expect(provider?.headers).toBeUndefined();
|
||||
expect(provider?.models?.[0]?.reasoning).toBe(false);
|
||||
expect(provider?.models?.[0]?.input).toEqual(["text"]);
|
||||
expect(provider?.models?.[0]?.compat).toBeUndefined();
|
||||
expect(
|
||||
result.config.agents?.defaults?.models?.["custom/foo-large"]?.params?.thinking,
|
||||
).toBeUndefined();
|
||||
});
|
||||
|
||||
it("re-onboard preserves user-customized fields for non-azure models", () => {
|
||||
const result = applyCustomApiConfig({
|
||||
config: {
|
||||
models: {
|
||||
providers: {
|
||||
custom: {
|
||||
baseUrl: "https://llm.example.com/v1",
|
||||
api: "openai-completions",
|
||||
models: [
|
||||
{
|
||||
id: "foo-large",
|
||||
name: "My Custom Model",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: { input: 1, output: 2, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 131072,
|
||||
maxTokens: 16384,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
} as OpenClawConfig,
|
||||
baseUrl: "https://llm.example.com/v1",
|
||||
modelId: "foo-large",
|
||||
compatibility: "openai",
|
||||
apiKey: "key",
|
||||
providerId: "custom",
|
||||
});
|
||||
const model = result.config.models?.providers?.custom?.models?.find(
|
||||
(m) => m.id === "foo-large",
|
||||
);
|
||||
expect(model?.name).toBe("My Custom Model");
|
||||
expect(model?.reasoning).toBe(true);
|
||||
expect(model?.input).toEqual(["text", "image"]);
|
||||
expect(model?.cost).toEqual({ input: 1, output: 2, cacheRead: 0, cacheWrite: 0 });
|
||||
expect(model?.maxTokens).toBe(16384);
|
||||
expect(model?.contextWindow).toBe(131072);
|
||||
});
|
||||
|
||||
it("preserves existing per-model thinking when already set for azure reasoning model", () => {
|
||||
const providerId = "custom-my-resource-openai-azure-com";
|
||||
const modelRef = `${providerId}/o3-mini`;
|
||||
const result = applyCustomApiConfig({
|
||||
config: {
|
||||
agents: {
|
||||
defaults: {
|
||||
models: {
|
||||
[modelRef]: { params: { thinking: "high" } },
|
||||
},
|
||||
},
|
||||
},
|
||||
} as OpenClawConfig,
|
||||
baseUrl: "https://my-resource.openai.azure.com",
|
||||
modelId: "o3-mini",
|
||||
compatibility: "openai",
|
||||
apiKey: "key",
|
||||
});
|
||||
expect(result.config.agents?.defaults?.models?.[modelRef]?.params?.thinking).toBe("high");
|
||||
});
|
||||
});
|
||||
|
||||
describe("parseNonInteractiveCustomApiFlags", () => {
|
||||
it("parses required flags and defaults compatibility to openai", () => {
|
||||
const result = parseNonInteractiveCustomApiFlags({
|
||||
baseUrl: " https://llm.example.com/v1 ",
|
||||
modelId: " foo-large ",
|
||||
apiKey: " custom-test-key ",
|
||||
providerId: " my-custom ",
|
||||
});
|
||||
|
||||
expect(result).toEqual({
|
||||
baseUrl: "https://llm.example.com/v1",
|
||||
modelId: "foo-large",
|
||||
compatibility: "openai",
|
||||
apiKey: "custom-test-key", // pragma: allowlist secret
|
||||
providerId: "my-custom",
|
||||
});
|
||||
});
|
||||
|
||||
it.each([
|
||||
{
|
||||
name: "missing required flags",
|
||||
flags: { baseUrl: "https://llm.example.com/v1" },
|
||||
expectedMessage: 'Auth choice "custom-api-key" requires a base URL and model ID.',
|
||||
},
|
||||
{
|
||||
name: "invalid compatibility values",
|
||||
flags: {
|
||||
baseUrl: "https://llm.example.com/v1",
|
||||
modelId: "foo-large",
|
||||
compatibility: "xmlrpc",
|
||||
},
|
||||
expectedMessage: 'Invalid --custom-compatibility (use "openai" or "anthropic").',
|
||||
},
|
||||
{
|
||||
name: "invalid explicit provider ids",
|
||||
flags: {
|
||||
baseUrl: "https://llm.example.com/v1",
|
||||
modelId: "foo-large",
|
||||
providerId: "!!!",
|
||||
},
|
||||
expectedMessage: "Custom provider ID must include letters, numbers, or hyphens.",
|
||||
},
|
||||
])("rejects $name", ({ flags, expectedMessage }) => {
|
||||
expect(() => parseNonInteractiveCustomApiFlags(flags)).toThrow(expectedMessage);
|
||||
});
|
||||
});
|
||||
608
src/commands/onboard-custom-config.ts
Normal file
608
src/commands/onboard-custom-config.ts
Normal file
@@ -0,0 +1,608 @@
|
||||
import { CONTEXT_WINDOW_HARD_MIN_TOKENS } from "../agents/context-window-guard.js";
|
||||
import { DEFAULT_PROVIDER } from "../agents/defaults.js";
|
||||
import { buildModelAliasIndex, modelKey } from "../agents/model-selection.js";
|
||||
import type { ModelProviderConfig } from "../config/types.models.js";
|
||||
import type { OpenClawConfig } from "../config/types.openclaw.js";
|
||||
import { isSecretRef, type SecretInput } from "../config/types.secrets.js";
|
||||
import { applyPrimaryModel } from "../plugins/provider-model-primary.js";
|
||||
import {
|
||||
normalizeLowercaseStringOrEmpty,
|
||||
normalizeOptionalLowercaseString,
|
||||
normalizeOptionalString,
|
||||
} from "../shared/string-coerce.js";
|
||||
import { normalizeOptionalSecretInput } from "../utils/normalize-secret-input.js";
|
||||
import { normalizeAlias } from "./models/alias-name.js";
|
||||
|
||||
const DEFAULT_CONTEXT_WINDOW = CONTEXT_WINDOW_HARD_MIN_TOKENS;
|
||||
const DEFAULT_MAX_TOKENS = 4096;
|
||||
// Azure OpenAI uses the Responses API which supports larger defaults
|
||||
const AZURE_DEFAULT_CONTEXT_WINDOW = 400_000;
|
||||
const AZURE_DEFAULT_MAX_TOKENS = 16_384;
|
||||
|
||||
function normalizeContextWindowForCustomModel(value: unknown): number {
|
||||
const parsed = typeof value === "number" && Number.isFinite(value) ? Math.floor(value) : 0;
|
||||
return parsed >= CONTEXT_WINDOW_HARD_MIN_TOKENS ? parsed : CONTEXT_WINDOW_HARD_MIN_TOKENS;
|
||||
}
|
||||
|
||||
function isAzureFoundryUrl(baseUrl: string): boolean {
|
||||
try {
|
||||
const url = new URL(baseUrl);
|
||||
const host = normalizeLowercaseStringOrEmpty(url.hostname);
|
||||
return host.endsWith(".services.ai.azure.com");
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
function isAzureOpenAiUrl(baseUrl: string): boolean {
|
||||
try {
|
||||
const url = new URL(baseUrl);
|
||||
const host = normalizeLowercaseStringOrEmpty(url.hostname);
|
||||
return host.endsWith(".openai.azure.com");
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
function isAzureUrl(baseUrl: string): boolean {
|
||||
return isAzureFoundryUrl(baseUrl) || isAzureOpenAiUrl(baseUrl);
|
||||
}
|
||||
|
||||
/**
|
||||
* Transforms an Azure AI Foundry/OpenAI URL to include the deployment path.
|
||||
* Azure requires: https://host/openai/deployments/<model-id>/chat/completions?api-version=2024-xx-xx-preview
|
||||
* But we can't add query params here, so we just add the path prefix.
|
||||
* The api-version will be handled by the Azure OpenAI client or as a query param.
|
||||
*
|
||||
* Example:
|
||||
* https://my-resource.services.ai.azure.com + gpt-5.4-nano
|
||||
* => https://my-resource.services.ai.azure.com/openai/deployments/gpt-5.4-nano
|
||||
*/
|
||||
function transformAzureUrl(baseUrl: string, modelId: string): string {
|
||||
const normalizedUrl = baseUrl.endsWith("/") ? baseUrl.slice(0, -1) : baseUrl;
|
||||
// Check if the URL already includes the deployment path
|
||||
if (normalizedUrl.includes("/openai/deployments/")) {
|
||||
return normalizedUrl;
|
||||
}
|
||||
return `${normalizedUrl}/openai/deployments/${modelId}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Transforms an Azure URL into the base URL stored in config.
|
||||
*
|
||||
* Example:
|
||||
* https://my-resource.openai.azure.com
|
||||
* => https://my-resource.openai.azure.com/openai/v1
|
||||
*/
|
||||
function transformAzureConfigUrl(baseUrl: string): string {
|
||||
const normalizedUrl = baseUrl.endsWith("/") ? baseUrl.slice(0, -1) : baseUrl;
|
||||
if (normalizedUrl.endsWith("/openai/v1")) {
|
||||
return normalizedUrl;
|
||||
}
|
||||
// Strip a full deployment path back to the base origin
|
||||
const deploymentIdx = normalizedUrl.indexOf("/openai/deployments/");
|
||||
const base = deploymentIdx !== -1 ? normalizedUrl.slice(0, deploymentIdx) : normalizedUrl;
|
||||
return `${base}/openai/v1`;
|
||||
}
|
||||
|
||||
function hasSameHost(a: string, b: string): boolean {
|
||||
try {
|
||||
return (
|
||||
normalizeLowercaseStringOrEmpty(new URL(a).hostname) ===
|
||||
normalizeLowercaseStringOrEmpty(new URL(b).hostname)
|
||||
);
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
export type CustomApiCompatibility = "openai" | "anthropic";
|
||||
export type CustomApiResult = {
|
||||
config: OpenClawConfig;
|
||||
providerId?: string;
|
||||
modelId?: string;
|
||||
providerIdRenamedFrom?: string;
|
||||
};
|
||||
|
||||
export type ApplyCustomApiConfigParams = {
|
||||
config: OpenClawConfig;
|
||||
baseUrl: string;
|
||||
modelId: string;
|
||||
compatibility: CustomApiCompatibility;
|
||||
apiKey?: SecretInput;
|
||||
providerId?: string;
|
||||
alias?: string;
|
||||
};
|
||||
|
||||
export type ParseNonInteractiveCustomApiFlagsParams = {
|
||||
baseUrl?: string;
|
||||
modelId?: string;
|
||||
compatibility?: string;
|
||||
apiKey?: string;
|
||||
providerId?: string;
|
||||
};
|
||||
|
||||
export type ParsedNonInteractiveCustomApiFlags = {
|
||||
baseUrl: string;
|
||||
modelId: string;
|
||||
compatibility: CustomApiCompatibility;
|
||||
apiKey?: string;
|
||||
providerId?: string;
|
||||
};
|
||||
|
||||
export type CustomApiErrorCode =
|
||||
| "missing_required"
|
||||
| "invalid_compatibility"
|
||||
| "invalid_base_url"
|
||||
| "invalid_model_id"
|
||||
| "invalid_provider_id"
|
||||
| "invalid_alias";
|
||||
|
||||
export class CustomApiError extends Error {
|
||||
readonly code: CustomApiErrorCode;
|
||||
|
||||
constructor(code: CustomApiErrorCode, message: string) {
|
||||
super(message);
|
||||
this.name = "CustomApiError";
|
||||
this.code = code;
|
||||
}
|
||||
}
|
||||
|
||||
export type ResolveCustomProviderIdParams = {
|
||||
config: OpenClawConfig;
|
||||
baseUrl: string;
|
||||
providerId?: string;
|
||||
};
|
||||
|
||||
export type ResolvedCustomProviderId = {
|
||||
providerId: string;
|
||||
providerIdRenamedFrom?: string;
|
||||
};
|
||||
|
||||
export function normalizeEndpointId(raw: string): string {
|
||||
const trimmed = normalizeOptionalLowercaseString(raw);
|
||||
if (!trimmed) {
|
||||
return "";
|
||||
}
|
||||
return trimmed.replace(/[^a-z0-9-]+/g, "-").replace(/^-+|-+$/g, "");
|
||||
}
|
||||
|
||||
export function buildEndpointIdFromUrl(baseUrl: string): string {
|
||||
try {
|
||||
const url = new URL(baseUrl);
|
||||
const host = normalizeLowercaseStringOrEmpty(url.hostname.replace(/[^a-z0-9]+/gi, "-"));
|
||||
const port = url.port ? `-${url.port}` : "";
|
||||
const candidate = `custom-${host}${port}`;
|
||||
return normalizeEndpointId(candidate) || "custom";
|
||||
} catch {
|
||||
return "custom";
|
||||
}
|
||||
}
|
||||
|
||||
function resolveUniqueEndpointId(params: {
|
||||
requestedId: string;
|
||||
baseUrl: string;
|
||||
providers: Record<string, ModelProviderConfig | undefined>;
|
||||
}) {
|
||||
const normalized = normalizeEndpointId(params.requestedId) || "custom";
|
||||
const existing = params.providers[normalized];
|
||||
if (
|
||||
!existing?.baseUrl ||
|
||||
existing.baseUrl === params.baseUrl ||
|
||||
(isAzureUrl(params.baseUrl) && hasSameHost(existing.baseUrl, params.baseUrl))
|
||||
) {
|
||||
return { providerId: normalized, renamed: false };
|
||||
}
|
||||
let suffix = 2;
|
||||
let candidate = `${normalized}-${suffix}`;
|
||||
while (params.providers[candidate]) {
|
||||
suffix += 1;
|
||||
candidate = `${normalized}-${suffix}`;
|
||||
}
|
||||
return { providerId: candidate, renamed: true };
|
||||
}
|
||||
|
||||
export function resolveCustomModelAliasError(params: {
|
||||
raw: string;
|
||||
cfg: OpenClawConfig;
|
||||
modelRef: string;
|
||||
}): string | undefined {
|
||||
const trimmed = params.raw.trim();
|
||||
if (!trimmed) {
|
||||
return undefined;
|
||||
}
|
||||
let normalized: string;
|
||||
try {
|
||||
normalized = normalizeAlias(trimmed);
|
||||
} catch (err) {
|
||||
return err instanceof Error ? err.message : "Alias is invalid.";
|
||||
}
|
||||
const aliasIndex = buildModelAliasIndex({
|
||||
cfg: params.cfg,
|
||||
defaultProvider: DEFAULT_PROVIDER,
|
||||
});
|
||||
const aliasKey = normalizeLowercaseStringOrEmpty(normalized);
|
||||
const existing = aliasIndex.byAlias.get(aliasKey);
|
||||
if (!existing) {
|
||||
return undefined;
|
||||
}
|
||||
const existingKey = modelKey(existing.ref.provider, existing.ref.model);
|
||||
if (existingKey === params.modelRef) {
|
||||
return undefined;
|
||||
}
|
||||
return `Alias ${normalized} already points to ${existingKey}.`;
|
||||
}
|
||||
|
||||
function buildAzureOpenAiHeaders(apiKey: string) {
|
||||
const headers: Record<string, string> = {};
|
||||
if (apiKey) {
|
||||
headers["api-key"] = apiKey;
|
||||
}
|
||||
return headers;
|
||||
}
|
||||
|
||||
function buildOpenAiHeaders(apiKey: string) {
|
||||
const headers: Record<string, string> = {};
|
||||
if (apiKey) {
|
||||
headers.Authorization = `Bearer ${apiKey}`;
|
||||
}
|
||||
return headers;
|
||||
}
|
||||
|
||||
function buildAnthropicHeaders(apiKey: string) {
|
||||
const headers: Record<string, string> = {
|
||||
"anthropic-version": "2023-06-01",
|
||||
};
|
||||
if (apiKey) {
|
||||
headers["x-api-key"] = apiKey;
|
||||
}
|
||||
return headers;
|
||||
}
|
||||
|
||||
export type VerificationRequest = {
|
||||
endpoint: string;
|
||||
headers: Record<string, string>;
|
||||
body: Record<string, unknown>;
|
||||
};
|
||||
|
||||
export function normalizeOptionalProviderApiKey(value: unknown): SecretInput | undefined {
|
||||
if (isSecretRef(value)) {
|
||||
return value;
|
||||
}
|
||||
return normalizeOptionalSecretInput(value);
|
||||
}
|
||||
|
||||
function resolveVerificationEndpoint(params: {
|
||||
baseUrl: string;
|
||||
modelId: string;
|
||||
endpointPath: "chat/completions" | "messages";
|
||||
}) {
|
||||
const resolvedUrl = isAzureUrl(params.baseUrl)
|
||||
? transformAzureUrl(params.baseUrl, params.modelId)
|
||||
: params.baseUrl;
|
||||
const endpointUrl = new URL(
|
||||
params.endpointPath,
|
||||
resolvedUrl.endsWith("/") ? resolvedUrl : `${resolvedUrl}/`,
|
||||
);
|
||||
if (isAzureUrl(params.baseUrl)) {
|
||||
endpointUrl.searchParams.set("api-version", "2024-10-21");
|
||||
}
|
||||
return endpointUrl.href;
|
||||
}
|
||||
|
||||
export function buildOpenAiVerificationProbeRequest(params: {
|
||||
baseUrl: string;
|
||||
apiKey: string;
|
||||
modelId: string;
|
||||
}): VerificationRequest {
|
||||
const isBaseUrlAzureUrl = isAzureUrl(params.baseUrl);
|
||||
const headers = isBaseUrlAzureUrl
|
||||
? buildAzureOpenAiHeaders(params.apiKey)
|
||||
: buildOpenAiHeaders(params.apiKey);
|
||||
if (isAzureOpenAiUrl(params.baseUrl)) {
|
||||
const endpoint = new URL(
|
||||
"responses",
|
||||
transformAzureConfigUrl(params.baseUrl).replace(/\/?$/, "/"),
|
||||
).href;
|
||||
return {
|
||||
endpoint,
|
||||
headers,
|
||||
body: {
|
||||
model: params.modelId,
|
||||
input: "Hi",
|
||||
max_output_tokens: 16,
|
||||
stream: false,
|
||||
},
|
||||
};
|
||||
}
|
||||
const endpoint = resolveVerificationEndpoint({
|
||||
baseUrl: params.baseUrl,
|
||||
modelId: params.modelId,
|
||||
endpointPath: "chat/completions",
|
||||
});
|
||||
return {
|
||||
endpoint,
|
||||
headers,
|
||||
body: {
|
||||
model: params.modelId,
|
||||
messages: [{ role: "user", content: "Hi" }],
|
||||
// Recent OpenAI-family endpoints reject probes below 16 tokens.
|
||||
max_tokens: 16,
|
||||
stream: false,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
export function buildAnthropicVerificationProbeRequest(params: {
|
||||
baseUrl: string;
|
||||
apiKey: string;
|
||||
modelId: string;
|
||||
}): VerificationRequest {
|
||||
// Use a base URL with /v1 injected for this raw fetch only. The rest of the app uses the
|
||||
// Anthropic client, which appends /v1 itself; config should store the base URL
|
||||
// without /v1 to avoid /v1/v1/messages at runtime. See docs/gateway/configuration-reference.md.
|
||||
const baseUrlForRequest = /\/v1\/?$/.test(params.baseUrl.trim())
|
||||
? params.baseUrl.trim()
|
||||
: params.baseUrl.trim().replace(/\/?$/, "") + "/v1";
|
||||
const endpoint = resolveVerificationEndpoint({
|
||||
baseUrl: baseUrlForRequest,
|
||||
modelId: params.modelId,
|
||||
endpointPath: "messages",
|
||||
});
|
||||
return {
|
||||
endpoint,
|
||||
headers: buildAnthropicHeaders(params.apiKey),
|
||||
body: {
|
||||
model: params.modelId,
|
||||
max_tokens: 1,
|
||||
messages: [{ role: "user", content: "Hi" }],
|
||||
stream: false,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
function resolveProviderApi(
|
||||
compatibility: CustomApiCompatibility,
|
||||
): "openai-completions" | "anthropic-messages" {
|
||||
return compatibility === "anthropic" ? "anthropic-messages" : "openai-completions";
|
||||
}
|
||||
|
||||
function parseCustomApiCompatibility(raw?: string): CustomApiCompatibility {
|
||||
const compatibilityRaw = normalizeOptionalLowercaseString(raw);
|
||||
if (!compatibilityRaw) {
|
||||
return "openai";
|
||||
}
|
||||
if (compatibilityRaw !== "openai" && compatibilityRaw !== "anthropic") {
|
||||
throw new CustomApiError(
|
||||
"invalid_compatibility",
|
||||
'Invalid --custom-compatibility (use "openai" or "anthropic").',
|
||||
);
|
||||
}
|
||||
return compatibilityRaw;
|
||||
}
|
||||
|
||||
export function resolveCustomProviderId(
|
||||
params: ResolveCustomProviderIdParams,
|
||||
): ResolvedCustomProviderId {
|
||||
const providers = params.config.models?.providers ?? {};
|
||||
const baseUrl = params.baseUrl.trim();
|
||||
const explicitProviderId = params.providerId?.trim();
|
||||
if (explicitProviderId && !normalizeEndpointId(explicitProviderId)) {
|
||||
throw new CustomApiError(
|
||||
"invalid_provider_id",
|
||||
"Custom provider ID must include letters, numbers, or hyphens.",
|
||||
);
|
||||
}
|
||||
const requestedProviderId = explicitProviderId || buildEndpointIdFromUrl(baseUrl);
|
||||
const providerIdResult = resolveUniqueEndpointId({
|
||||
requestedId: requestedProviderId,
|
||||
baseUrl,
|
||||
providers,
|
||||
});
|
||||
|
||||
return {
|
||||
providerId: providerIdResult.providerId,
|
||||
...(providerIdResult.renamed
|
||||
? {
|
||||
providerIdRenamedFrom: normalizeEndpointId(requestedProviderId) || "custom",
|
||||
}
|
||||
: {}),
|
||||
};
|
||||
}
|
||||
|
||||
export function parseNonInteractiveCustomApiFlags(
|
||||
params: ParseNonInteractiveCustomApiFlagsParams,
|
||||
): ParsedNonInteractiveCustomApiFlags {
|
||||
const baseUrl = normalizeOptionalString(params.baseUrl) ?? "";
|
||||
const modelId = normalizeOptionalString(params.modelId) ?? "";
|
||||
if (!baseUrl || !modelId) {
|
||||
throw new CustomApiError(
|
||||
"missing_required",
|
||||
[
|
||||
'Auth choice "custom-api-key" requires a base URL and model ID.',
|
||||
"Use --custom-base-url and --custom-model-id.",
|
||||
].join("\n"),
|
||||
);
|
||||
}
|
||||
|
||||
const apiKey = normalizeOptionalString(params.apiKey);
|
||||
const providerId = normalizeOptionalString(params.providerId);
|
||||
if (providerId && !normalizeEndpointId(providerId)) {
|
||||
throw new CustomApiError(
|
||||
"invalid_provider_id",
|
||||
"Custom provider ID must include letters, numbers, or hyphens.",
|
||||
);
|
||||
}
|
||||
return {
|
||||
baseUrl,
|
||||
modelId,
|
||||
compatibility: parseCustomApiCompatibility(params.compatibility),
|
||||
...(apiKey ? { apiKey } : {}),
|
||||
...(providerId ? { providerId } : {}),
|
||||
};
|
||||
}
|
||||
|
||||
export function applyCustomApiConfig(params: ApplyCustomApiConfigParams): CustomApiResult {
|
||||
const baseUrl = normalizeOptionalString(params.baseUrl) ?? "";
|
||||
if (!URL.canParse(baseUrl)) {
|
||||
throw new CustomApiError("invalid_base_url", "Custom provider base URL must be a valid URL.");
|
||||
}
|
||||
|
||||
if (params.compatibility !== "openai" && params.compatibility !== "anthropic") {
|
||||
throw new CustomApiError(
|
||||
"invalid_compatibility",
|
||||
'Custom provider compatibility must be "openai" or "anthropic".',
|
||||
);
|
||||
}
|
||||
|
||||
const modelId = normalizeOptionalString(params.modelId) ?? "";
|
||||
if (!modelId) {
|
||||
throw new CustomApiError("invalid_model_id", "Custom provider model ID is required.");
|
||||
}
|
||||
|
||||
const isAzure = isAzureUrl(baseUrl);
|
||||
const isAzureOpenAi = isAzureOpenAiUrl(baseUrl);
|
||||
const resolvedBaseUrl = isAzure ? transformAzureConfigUrl(baseUrl) : baseUrl;
|
||||
|
||||
const providerIdResult = resolveCustomProviderId({
|
||||
config: params.config,
|
||||
baseUrl: resolvedBaseUrl,
|
||||
providerId: params.providerId,
|
||||
});
|
||||
const providerId = providerIdResult.providerId;
|
||||
const providers = params.config.models?.providers ?? {};
|
||||
|
||||
const modelRef = modelKey(providerId, modelId);
|
||||
const alias = normalizeOptionalString(params.alias) ?? "";
|
||||
const aliasError = resolveCustomModelAliasError({
|
||||
raw: alias,
|
||||
cfg: params.config,
|
||||
modelRef,
|
||||
});
|
||||
if (aliasError) {
|
||||
throw new CustomApiError("invalid_alias", aliasError);
|
||||
}
|
||||
|
||||
const existingProvider = providers[providerId];
|
||||
const existingModels = Array.isArray(existingProvider?.models) ? existingProvider.models : [];
|
||||
const hasModel = existingModels.some((model) => model.id === modelId);
|
||||
const isLikelyReasoningModel = isAzure && /\b(o[134]|gpt-([5-9]|\d{2,}))\b/i.test(modelId);
|
||||
const nextModel = isAzure
|
||||
? {
|
||||
id: modelId,
|
||||
name: `${modelId} (Custom Provider)`,
|
||||
contextWindow: AZURE_DEFAULT_CONTEXT_WINDOW,
|
||||
maxTokens: AZURE_DEFAULT_MAX_TOKENS,
|
||||
input: isLikelyReasoningModel
|
||||
? (["text", "image"] as Array<"text" | "image">)
|
||||
: (["text"] as ["text"]),
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
reasoning: isLikelyReasoningModel,
|
||||
compat: { supportsStore: false },
|
||||
}
|
||||
: {
|
||||
id: modelId,
|
||||
name: `${modelId} (Custom Provider)`,
|
||||
contextWindow: DEFAULT_CONTEXT_WINDOW,
|
||||
maxTokens: DEFAULT_MAX_TOKENS,
|
||||
input: ["text"] as ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
reasoning: false,
|
||||
};
|
||||
const mergedModels = hasModel
|
||||
? existingModels.map((model) =>
|
||||
model.id === modelId
|
||||
? {
|
||||
...model,
|
||||
...(isAzure ? nextModel : {}),
|
||||
name: model.name ?? nextModel.name,
|
||||
cost: model.cost ?? nextModel.cost,
|
||||
contextWindow: normalizeContextWindowForCustomModel(model.contextWindow),
|
||||
maxTokens: model.maxTokens ?? nextModel.maxTokens,
|
||||
}
|
||||
: model,
|
||||
)
|
||||
: [...existingModels, nextModel];
|
||||
const { apiKey: existingApiKey, ...existingProviderRest } = existingProvider ?? {};
|
||||
const normalizedApiKey =
|
||||
normalizeOptionalProviderApiKey(params.apiKey) ??
|
||||
normalizeOptionalProviderApiKey(existingApiKey);
|
||||
|
||||
const providerApi = isAzureOpenAi
|
||||
? ("azure-openai-responses" as const)
|
||||
: resolveProviderApi(params.compatibility);
|
||||
const azureHeaders = isAzure && normalizedApiKey ? { "api-key": normalizedApiKey } : undefined;
|
||||
|
||||
let config: OpenClawConfig = {
|
||||
...params.config,
|
||||
models: {
|
||||
...params.config.models,
|
||||
mode: params.config.models?.mode ?? "merge",
|
||||
providers: {
|
||||
...providers,
|
||||
[providerId]: {
|
||||
...existingProviderRest,
|
||||
baseUrl: resolvedBaseUrl,
|
||||
api: providerApi,
|
||||
...(normalizedApiKey ? { apiKey: normalizedApiKey } : {}),
|
||||
...(isAzure ? { authHeader: false } : {}),
|
||||
...(azureHeaders ? { headers: azureHeaders } : {}),
|
||||
models: mergedModels.length > 0 ? mergedModels : [nextModel],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
config = applyPrimaryModel(config, modelRef);
|
||||
if (isAzure && isLikelyReasoningModel) {
|
||||
const existingPerModelThinking = config.agents?.defaults?.models?.[modelRef]?.params?.thinking;
|
||||
if (!existingPerModelThinking) {
|
||||
config = {
|
||||
...config,
|
||||
agents: {
|
||||
...config.agents,
|
||||
defaults: {
|
||||
...config.agents?.defaults,
|
||||
models: {
|
||||
...config.agents?.defaults?.models,
|
||||
[modelRef]: {
|
||||
...config.agents?.defaults?.models?.[modelRef],
|
||||
params: {
|
||||
...config.agents?.defaults?.models?.[modelRef]?.params,
|
||||
thinking: "medium",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
if (alias) {
|
||||
config = {
|
||||
...config,
|
||||
agents: {
|
||||
...config.agents,
|
||||
defaults: {
|
||||
...config.agents?.defaults,
|
||||
models: {
|
||||
...config.agents?.defaults?.models,
|
||||
[modelRef]: {
|
||||
...config.agents?.defaults?.models?.[modelRef],
|
||||
alias,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
config,
|
||||
providerId,
|
||||
modelId,
|
||||
...(providerIdResult.providerIdRenamedFrom
|
||||
? { providerIdRenamedFrom: providerIdResult.providerIdRenamedFrom }
|
||||
: {}),
|
||||
};
|
||||
}
|
||||
@@ -1,18 +1,22 @@
|
||||
import { afterEach, describe, expect, it, vi } from "vitest";
|
||||
import { CONTEXT_WINDOW_HARD_MIN_TOKENS } from "../agents/context-window-guard.js";
|
||||
import type { OpenClawConfig } from "../config/config.js";
|
||||
import { defaultRuntime } from "../runtime.js";
|
||||
import {
|
||||
applyCustomApiConfig,
|
||||
parseNonInteractiveCustomApiFlags,
|
||||
promptCustomApiConfig,
|
||||
} from "./onboard-custom.js";
|
||||
import type { ensureApiKeyFromEnvOrPrompt } from "../plugins/provider-auth-input.js";
|
||||
import { promptCustomApiConfig } from "./onboard-custom.js";
|
||||
|
||||
const OLLAMA_DEFAULT_BASE_URL_FOR_TEST = "http://127.0.0.1:11434";
|
||||
|
||||
// Mock dependencies
|
||||
vi.mock("./model-picker.js", () => ({
|
||||
applyPrimaryModel: vi.fn((cfg) => cfg),
|
||||
vi.mock("../plugins/provider-auth-input.js", () => ({
|
||||
ensureApiKeyFromEnvOrPrompt: vi.fn(
|
||||
async (params: Parameters<typeof ensureApiKeyFromEnvOrPrompt>[0]) => {
|
||||
await params.prompter.select({ message: "Secret input mode", options: [] });
|
||||
const input = await params.prompter.text({
|
||||
message: params.promptMessage,
|
||||
validate: params.validate,
|
||||
});
|
||||
const apiKey = params.normalize(input ?? "");
|
||||
await params.setCredential(apiKey);
|
||||
return apiKey;
|
||||
},
|
||||
),
|
||||
}));
|
||||
|
||||
function createTestPrompter(params: { text: string[]; select?: string[] }): {
|
||||
@@ -63,7 +67,7 @@ async function runPromptCustomApi(
|
||||
) {
|
||||
return promptCustomApiConfig({
|
||||
prompter: prompter as unknown as Parameters<typeof promptCustomApiConfig>[0]["prompter"],
|
||||
runtime: { ...defaultRuntime, log: vi.fn() },
|
||||
runtime: { log: vi.fn() } as unknown as Parameters<typeof promptCustomApiConfig>[0]["runtime"],
|
||||
config,
|
||||
});
|
||||
}
|
||||
@@ -79,59 +83,6 @@ function expectOpenAiCompatResult(params: {
|
||||
expect(params.result.config.models?.providers?.custom?.api).toBe("openai-completions");
|
||||
}
|
||||
|
||||
function getFirstFetchVerificationCall(fetchMock: ReturnType<typeof vi.fn>) {
|
||||
const firstCall = fetchMock.mock.calls[0];
|
||||
const firstUrl = firstCall?.[0];
|
||||
const firstInit = firstCall?.[1] as
|
||||
| { body?: string; headers?: Record<string, string> }
|
||||
| undefined;
|
||||
if (typeof firstUrl !== "string") {
|
||||
throw new Error("Expected first verification call URL");
|
||||
}
|
||||
return {
|
||||
url: firstUrl,
|
||||
init: firstInit,
|
||||
body: JSON.parse(firstInit?.body ?? "{}"),
|
||||
};
|
||||
}
|
||||
|
||||
function buildCustomProviderConfig(contextWindow?: number) {
|
||||
if (contextWindow === undefined) {
|
||||
return {} as OpenClawConfig;
|
||||
}
|
||||
return {
|
||||
models: {
|
||||
providers: {
|
||||
custom: {
|
||||
api: "openai-completions" as const,
|
||||
baseUrl: "https://llm.example.com/v1",
|
||||
models: [
|
||||
{
|
||||
id: "foo-large",
|
||||
name: "foo-large",
|
||||
contextWindow,
|
||||
maxTokens: contextWindow > CONTEXT_WINDOW_HARD_MIN_TOKENS ? 4096 : 1024,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
reasoning: false,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
} as OpenClawConfig;
|
||||
}
|
||||
|
||||
function applyCustomModelConfigWithContextWindow(contextWindow?: number) {
|
||||
return applyCustomApiConfig({
|
||||
config: buildCustomProviderConfig(contextWindow),
|
||||
baseUrl: "https://llm.example.com/v1",
|
||||
modelId: "foo-large",
|
||||
compatibility: "openai",
|
||||
providerId: "custom",
|
||||
});
|
||||
}
|
||||
|
||||
describe("promptCustomApiConfig", () => {
|
||||
afterEach(() => {
|
||||
vi.unstubAllGlobals();
|
||||
@@ -191,94 +142,6 @@ describe("promptCustomApiConfig", () => {
|
||||
expectOpenAiCompatResult({ prompter, textCalls: 5, selectCalls: 2, result });
|
||||
});
|
||||
|
||||
it("uses expanded max_tokens for openai verification probes", async () => {
|
||||
const prompter = createTestPrompter({
|
||||
text: ["https://example.com/v1", "test-key", "detected-model", "custom", "alias"],
|
||||
select: ["plaintext", "openai"],
|
||||
});
|
||||
const fetchMock = stubFetchSequence([{ ok: true }]);
|
||||
|
||||
await runPromptCustomApi(prompter);
|
||||
|
||||
const firstCall = fetchMock.mock.calls[0]?.[1] as { body?: string } | undefined;
|
||||
expect(firstCall?.body).toBeDefined();
|
||||
expect(JSON.parse(firstCall?.body ?? "{}")).toMatchObject({ max_tokens: 16 });
|
||||
});
|
||||
|
||||
it("uses azure responses-specific headers and body for openai verification probes", async () => {
|
||||
const prompter = createTestPrompter({
|
||||
text: [
|
||||
"https://my-resource.openai.azure.com",
|
||||
"azure-test-key",
|
||||
"gpt-4.1",
|
||||
"custom",
|
||||
"alias",
|
||||
],
|
||||
select: ["plaintext", "openai"],
|
||||
});
|
||||
const fetchMock = stubFetchSequence([{ ok: true }]);
|
||||
|
||||
await runPromptCustomApi(prompter);
|
||||
|
||||
const { url, init, body } = getFirstFetchVerificationCall(fetchMock);
|
||||
|
||||
expect(url).toBe("https://my-resource.openai.azure.com/openai/v1/responses");
|
||||
expect(init?.headers?.["api-key"]).toBe("azure-test-key");
|
||||
expect(init?.headers?.Authorization).toBeUndefined();
|
||||
expect(init?.body).toBeDefined();
|
||||
expect(body).toEqual({
|
||||
model: "gpt-4.1",
|
||||
input: "Hi",
|
||||
max_output_tokens: 16,
|
||||
stream: false,
|
||||
});
|
||||
});
|
||||
|
||||
it("uses Azure Foundry chat-completions probes for services.ai URLs", async () => {
|
||||
const prompter = createTestPrompter({
|
||||
text: [
|
||||
"https://my-resource.services.ai.azure.com",
|
||||
"azure-test-key",
|
||||
"deepseek-v3-0324",
|
||||
"custom",
|
||||
"alias",
|
||||
],
|
||||
select: ["plaintext", "openai"],
|
||||
});
|
||||
const fetchMock = stubFetchSequence([{ ok: true }]);
|
||||
|
||||
await runPromptCustomApi(prompter);
|
||||
|
||||
const { url, init, body } = getFirstFetchVerificationCall(fetchMock);
|
||||
|
||||
expect(url).toBe(
|
||||
"https://my-resource.services.ai.azure.com/openai/deployments/deepseek-v3-0324/chat/completions?api-version=2024-10-21",
|
||||
);
|
||||
expect(init?.headers?.["api-key"]).toBe("azure-test-key");
|
||||
expect(init?.headers?.Authorization).toBeUndefined();
|
||||
expect(body).toEqual({
|
||||
model: "deepseek-v3-0324",
|
||||
messages: [{ role: "user", content: "Hi" }],
|
||||
max_tokens: 16,
|
||||
stream: false,
|
||||
});
|
||||
});
|
||||
|
||||
it("uses expanded max_tokens for anthropic verification probes", async () => {
|
||||
const prompter = createTestPrompter({
|
||||
text: ["https://example.com", "test-key", "detected-model", "custom", "alias"],
|
||||
select: ["plaintext", "unknown"],
|
||||
});
|
||||
const fetchMock = stubFetchSequence([{ ok: false, status: 404 }, { ok: true }]);
|
||||
|
||||
await runPromptCustomApi(prompter);
|
||||
|
||||
expect(fetchMock).toHaveBeenCalledTimes(2);
|
||||
const secondCall = fetchMock.mock.calls[1]?.[1] as { body?: string } | undefined;
|
||||
expect(secondCall?.body).toBeDefined();
|
||||
expect(JSON.parse(secondCall?.body ?? "{}")).toMatchObject({ max_tokens: 1 });
|
||||
});
|
||||
|
||||
it("re-prompts base url when unknown detection fails", async () => {
|
||||
const prompter = createTestPrompter({
|
||||
text: [
|
||||
@@ -301,39 +164,6 @@ describe("promptCustomApiConfig", () => {
|
||||
);
|
||||
});
|
||||
|
||||
it("renames provider id when baseUrl differs", async () => {
|
||||
const prompter = createTestPrompter({
|
||||
text: ["http://localhost:11434/v1", "", "llama3", "custom", ""],
|
||||
select: ["plaintext", "openai"],
|
||||
});
|
||||
stubFetchSequence([{ ok: true }]);
|
||||
const result = await runPromptCustomApi(prompter, {
|
||||
models: {
|
||||
providers: {
|
||||
custom: {
|
||||
baseUrl: "http://old.example.com/v1",
|
||||
api: "openai-completions",
|
||||
models: [
|
||||
{
|
||||
id: "old-model",
|
||||
name: "Old",
|
||||
contextWindow: 1,
|
||||
maxTokens: 1,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
reasoning: false,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.providerId).toBe("custom-2");
|
||||
expect(result.config.models?.providers?.custom).toBeDefined();
|
||||
expect(result.config.models?.providers?.["custom-2"]).toBeDefined();
|
||||
});
|
||||
|
||||
it("aborts verification after timeout", async () => {
|
||||
vi.useFakeTimers();
|
||||
const prompter = createTestPrompter({
|
||||
@@ -358,348 +188,4 @@ describe("promptCustomApiConfig", () => {
|
||||
|
||||
expect(prompter.text).toHaveBeenCalledTimes(6);
|
||||
});
|
||||
|
||||
it("stores env SecretRef for custom provider when selected", async () => {
|
||||
vi.stubEnv("CUSTOM_PROVIDER_API_KEY", "test-env-key");
|
||||
const prompter = createTestPrompter({
|
||||
text: ["https://example.com/v1", "CUSTOM_PROVIDER_API_KEY", "detected-model", "custom", ""],
|
||||
select: ["ref", "env", "openai"],
|
||||
});
|
||||
const fetchMock = stubFetchSequence([{ ok: true }]);
|
||||
|
||||
const result = await runPromptCustomApi(prompter);
|
||||
|
||||
expect(result.config.models?.providers?.custom?.apiKey).toEqual({
|
||||
source: "env",
|
||||
provider: "default",
|
||||
id: "CUSTOM_PROVIDER_API_KEY",
|
||||
});
|
||||
const firstCall = fetchMock.mock.calls[0]?.[1] as
|
||||
| { headers?: Record<string, string> }
|
||||
| undefined;
|
||||
expect(firstCall?.headers?.Authorization).toBe("Bearer test-env-key");
|
||||
});
|
||||
|
||||
it("re-prompts source after provider ref preflight fails and succeeds with env ref", async () => {
|
||||
vi.stubEnv("CUSTOM_PROVIDER_API_KEY", "test-env-key");
|
||||
const prompter = createTestPrompter({
|
||||
text: [
|
||||
"https://example.com/v1",
|
||||
"/providers/custom/apiKey",
|
||||
"CUSTOM_PROVIDER_API_KEY",
|
||||
"detected-model",
|
||||
"custom",
|
||||
"",
|
||||
],
|
||||
select: ["ref", "provider", "filemain", "env", "openai"],
|
||||
});
|
||||
stubFetchSequence([{ ok: true }]);
|
||||
|
||||
const result = await runPromptCustomApi(prompter, {
|
||||
secrets: {
|
||||
providers: {
|
||||
filemain: {
|
||||
source: "file",
|
||||
path: "/tmp/openclaw-missing-provider.json",
|
||||
mode: "json",
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
expect(prompter.note).toHaveBeenCalledWith(
|
||||
expect.stringContaining("Could not validate provider reference"),
|
||||
"Reference check failed",
|
||||
);
|
||||
expect(result.config.models?.providers?.custom?.apiKey).toEqual({
|
||||
source: "env",
|
||||
provider: "default",
|
||||
id: "CUSTOM_PROVIDER_API_KEY",
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("applyCustomApiConfig", () => {
|
||||
it.each([
|
||||
{
|
||||
name: "uses hard-min context window for newly added custom models",
|
||||
existingContextWindow: undefined,
|
||||
expectedContextWindow: CONTEXT_WINDOW_HARD_MIN_TOKENS,
|
||||
},
|
||||
{
|
||||
name: "upgrades existing custom model context window when below hard minimum",
|
||||
existingContextWindow: 4096,
|
||||
expectedContextWindow: CONTEXT_WINDOW_HARD_MIN_TOKENS,
|
||||
},
|
||||
{
|
||||
name: "preserves existing custom model context window when already above minimum",
|
||||
existingContextWindow: 131072,
|
||||
expectedContextWindow: 131072,
|
||||
},
|
||||
])("$name", ({ existingContextWindow, expectedContextWindow }) => {
|
||||
const result = applyCustomModelConfigWithContextWindow(existingContextWindow);
|
||||
const model = result.config.models?.providers?.custom?.models?.find(
|
||||
(entry) => entry.id === "foo-large",
|
||||
);
|
||||
expect(model?.contextWindow).toBe(expectedContextWindow);
|
||||
});
|
||||
|
||||
it.each([
|
||||
{
|
||||
name: "invalid compatibility values at runtime",
|
||||
params: {
|
||||
config: {},
|
||||
baseUrl: "https://llm.example.com/v1",
|
||||
modelId: "foo-large",
|
||||
compatibility: "invalid" as unknown as "openai",
|
||||
},
|
||||
expectedMessage: 'Custom provider compatibility must be "openai" or "anthropic".',
|
||||
},
|
||||
{
|
||||
name: "explicit provider ids that normalize to empty",
|
||||
params: {
|
||||
config: {},
|
||||
baseUrl: "https://llm.example.com/v1",
|
||||
modelId: "foo-large",
|
||||
compatibility: "openai" as const,
|
||||
providerId: "!!!",
|
||||
},
|
||||
expectedMessage: "Custom provider ID must include letters, numbers, or hyphens.",
|
||||
},
|
||||
])("rejects $name", ({ params, expectedMessage }) => {
|
||||
expect(() => applyCustomApiConfig(params)).toThrow(expectedMessage);
|
||||
});
|
||||
|
||||
it("produces azure-specific config for Azure OpenAI URLs with reasoning model", () => {
|
||||
const result = applyCustomApiConfig({
|
||||
config: {},
|
||||
baseUrl: "https://user123-resource.openai.azure.com",
|
||||
modelId: "o4-mini",
|
||||
compatibility: "openai",
|
||||
apiKey: "abcd1234",
|
||||
});
|
||||
const providerId = result.providerId!;
|
||||
const provider = result.config.models?.providers?.[providerId];
|
||||
|
||||
expect(provider?.baseUrl).toBe("https://user123-resource.openai.azure.com/openai/v1");
|
||||
expect(provider?.api).toBe("azure-openai-responses");
|
||||
expect(provider?.authHeader).toBe(false);
|
||||
expect(provider?.headers).toEqual({ "api-key": "abcd1234" });
|
||||
|
||||
const model = provider?.models?.find((m) => m.id === "o4-mini");
|
||||
expect(model?.input).toEqual(["text", "image"]);
|
||||
expect(model?.reasoning).toBe(true);
|
||||
expect(model?.compat).toEqual({ supportsStore: false });
|
||||
|
||||
const modelRef = `${providerId}/${result.modelId}`;
|
||||
expect(result.config.agents?.defaults?.models?.[modelRef]?.params?.thinking).toBe("medium");
|
||||
});
|
||||
|
||||
it("keeps selected compatibility for Azure AI Foundry URLs", () => {
|
||||
const result = applyCustomApiConfig({
|
||||
config: {},
|
||||
baseUrl: "https://my-resource.services.ai.azure.com",
|
||||
modelId: "gpt-4.1",
|
||||
compatibility: "openai",
|
||||
apiKey: "key123",
|
||||
});
|
||||
const providerId = result.providerId!;
|
||||
const provider = result.config.models?.providers?.[providerId];
|
||||
|
||||
expect(provider?.baseUrl).toBe("https://my-resource.services.ai.azure.com/openai/v1");
|
||||
expect(provider?.api).toBe("openai-completions");
|
||||
expect(provider?.authHeader).toBe(false);
|
||||
expect(provider?.headers).toEqual({ "api-key": "key123" });
|
||||
|
||||
const model = provider?.models?.find((m) => m.id === "gpt-4.1");
|
||||
expect(model?.reasoning).toBe(false);
|
||||
expect(model?.input).toEqual(["text"]);
|
||||
expect(model?.compat).toEqual({ supportsStore: false });
|
||||
|
||||
const modelRef = `${providerId}/gpt-4.1`;
|
||||
expect(result.config.agents?.defaults?.models?.[modelRef]?.params?.thinking).toBeUndefined();
|
||||
});
|
||||
|
||||
it("strips pre-existing deployment path from Azure URL in stored config", () => {
|
||||
const result = applyCustomApiConfig({
|
||||
config: {},
|
||||
baseUrl: "https://my-resource.openai.azure.com/openai/deployments/gpt-4",
|
||||
modelId: "gpt-4",
|
||||
compatibility: "openai",
|
||||
apiKey: "key456",
|
||||
});
|
||||
const providerId = result.providerId!;
|
||||
const provider = result.config.models?.providers?.[providerId];
|
||||
|
||||
expect(provider?.baseUrl).toBe("https://my-resource.openai.azure.com/openai/v1");
|
||||
});
|
||||
|
||||
it("re-onboard updates existing Azure provider instead of creating a duplicate", () => {
|
||||
const oldProviderId = "custom-my-resource-openai-azure-com";
|
||||
const result = applyCustomApiConfig({
|
||||
config: {
|
||||
models: {
|
||||
providers: {
|
||||
[oldProviderId]: {
|
||||
baseUrl: "https://my-resource.openai.azure.com/openai/deployments/gpt-4",
|
||||
api: "openai-completions",
|
||||
models: [
|
||||
{
|
||||
id: "gpt-4",
|
||||
name: "gpt-4",
|
||||
contextWindow: 1,
|
||||
maxTokens: 1,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
reasoning: false,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
baseUrl: "https://my-resource.openai.azure.com",
|
||||
modelId: "gpt-4",
|
||||
compatibility: "openai",
|
||||
apiKey: "key789",
|
||||
});
|
||||
|
||||
expect(result.providerId).toBe(oldProviderId);
|
||||
expect(result.providerIdRenamedFrom).toBeUndefined();
|
||||
const provider = result.config.models?.providers?.[oldProviderId];
|
||||
expect(provider?.baseUrl).toBe("https://my-resource.openai.azure.com/openai/v1");
|
||||
expect(provider?.api).toBe("azure-openai-responses");
|
||||
expect(provider?.authHeader).toBe(false);
|
||||
expect(provider?.headers).toEqual({ "api-key": "key789" });
|
||||
});
|
||||
|
||||
it("does not add azure fields for non-azure URLs", () => {
|
||||
const result = applyCustomApiConfig({
|
||||
config: {},
|
||||
baseUrl: "https://llm.example.com/v1",
|
||||
modelId: "foo-large",
|
||||
compatibility: "openai",
|
||||
apiKey: "key123",
|
||||
providerId: "custom",
|
||||
});
|
||||
const provider = result.config.models?.providers?.custom;
|
||||
|
||||
expect(provider?.api).toBe("openai-completions");
|
||||
expect(provider?.authHeader).toBeUndefined();
|
||||
expect(provider?.headers).toBeUndefined();
|
||||
expect(provider?.models?.[0]?.reasoning).toBe(false);
|
||||
expect(provider?.models?.[0]?.input).toEqual(["text"]);
|
||||
expect(provider?.models?.[0]?.compat).toBeUndefined();
|
||||
expect(
|
||||
result.config.agents?.defaults?.models?.["custom/foo-large"]?.params?.thinking,
|
||||
).toBeUndefined();
|
||||
});
|
||||
|
||||
it("re-onboard preserves user-customized fields for non-azure models", () => {
|
||||
const result = applyCustomApiConfig({
|
||||
config: {
|
||||
models: {
|
||||
providers: {
|
||||
custom: {
|
||||
baseUrl: "https://llm.example.com/v1",
|
||||
api: "openai-completions",
|
||||
models: [
|
||||
{
|
||||
id: "foo-large",
|
||||
name: "My Custom Model",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: { input: 1, output: 2, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 131072,
|
||||
maxTokens: 16384,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
} as OpenClawConfig,
|
||||
baseUrl: "https://llm.example.com/v1",
|
||||
modelId: "foo-large",
|
||||
compatibility: "openai",
|
||||
apiKey: "key",
|
||||
providerId: "custom",
|
||||
});
|
||||
const model = result.config.models?.providers?.custom?.models?.find(
|
||||
(m) => m.id === "foo-large",
|
||||
);
|
||||
expect(model?.name).toBe("My Custom Model");
|
||||
expect(model?.reasoning).toBe(true);
|
||||
expect(model?.input).toEqual(["text", "image"]);
|
||||
expect(model?.cost).toEqual({ input: 1, output: 2, cacheRead: 0, cacheWrite: 0 });
|
||||
expect(model?.maxTokens).toBe(16384);
|
||||
expect(model?.contextWindow).toBe(131072);
|
||||
});
|
||||
|
||||
it("preserves existing per-model thinking when already set for azure reasoning model", () => {
|
||||
const providerId = "custom-my-resource-openai-azure-com";
|
||||
const modelRef = `${providerId}/o3-mini`;
|
||||
const result = applyCustomApiConfig({
|
||||
config: {
|
||||
agents: {
|
||||
defaults: {
|
||||
models: {
|
||||
[modelRef]: { params: { thinking: "high" } },
|
||||
},
|
||||
},
|
||||
},
|
||||
} as OpenClawConfig,
|
||||
baseUrl: "https://my-resource.openai.azure.com",
|
||||
modelId: "o3-mini",
|
||||
compatibility: "openai",
|
||||
apiKey: "key",
|
||||
});
|
||||
expect(result.config.agents?.defaults?.models?.[modelRef]?.params?.thinking).toBe("high");
|
||||
});
|
||||
});
|
||||
|
||||
describe("parseNonInteractiveCustomApiFlags", () => {
|
||||
it("parses required flags and defaults compatibility to openai", () => {
|
||||
const result = parseNonInteractiveCustomApiFlags({
|
||||
baseUrl: " https://llm.example.com/v1 ",
|
||||
modelId: " foo-large ",
|
||||
apiKey: " custom-test-key ",
|
||||
providerId: " my-custom ",
|
||||
});
|
||||
|
||||
expect(result).toEqual({
|
||||
baseUrl: "https://llm.example.com/v1",
|
||||
modelId: "foo-large",
|
||||
compatibility: "openai",
|
||||
apiKey: "custom-test-key", // pragma: allowlist secret
|
||||
providerId: "my-custom",
|
||||
});
|
||||
});
|
||||
|
||||
it.each([
|
||||
{
|
||||
name: "missing required flags",
|
||||
flags: { baseUrl: "https://llm.example.com/v1" },
|
||||
expectedMessage: 'Auth choice "custom-api-key" requires a base URL and model ID.',
|
||||
},
|
||||
{
|
||||
name: "invalid compatibility values",
|
||||
flags: {
|
||||
baseUrl: "https://llm.example.com/v1",
|
||||
modelId: "foo-large",
|
||||
compatibility: "xmlrpc",
|
||||
},
|
||||
expectedMessage: 'Invalid --custom-compatibility (use "openai" or "anthropic").',
|
||||
},
|
||||
{
|
||||
name: "invalid explicit provider ids",
|
||||
flags: {
|
||||
baseUrl: "https://llm.example.com/v1",
|
||||
modelId: "foo-large",
|
||||
providerId: "!!!",
|
||||
},
|
||||
expectedMessage: "Custom provider ID must include letters, numbers, or hyphens.",
|
||||
},
|
||||
])("rejects $name", ({ flags, expectedMessage }) => {
|
||||
expect(() => parseNonInteractiveCustomApiFlags(flags)).toThrow(expectedMessage);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,174 +1,44 @@
|
||||
import { CONTEXT_WINDOW_HARD_MIN_TOKENS } from "../agents/context-window-guard.js";
|
||||
import { DEFAULT_PROVIDER } from "../agents/defaults.js";
|
||||
import { buildModelAliasIndex, modelKey } from "../agents/model-selection.js";
|
||||
import type { ModelProviderConfig } from "../config/types.models.js";
|
||||
import { modelKey } from "../agents/model-selection.js";
|
||||
import type { OpenClawConfig } from "../config/types.openclaw.js";
|
||||
import { isSecretRef, type SecretInput } from "../config/types.secrets.js";
|
||||
import type { SecretInput } from "../config/types.secrets.js";
|
||||
import { ensureApiKeyFromEnvOrPrompt } from "../plugins/provider-auth-input.js";
|
||||
import { OLLAMA_DEFAULT_BASE_URL } from "../plugins/provider-model-defaults.js";
|
||||
import type { RuntimeEnv } from "../runtime.js";
|
||||
import {
|
||||
normalizeLowercaseStringOrEmpty,
|
||||
normalizeOptionalLowercaseString,
|
||||
normalizeOptionalString,
|
||||
} from "../shared/string-coerce.js";
|
||||
import { fetchWithTimeout } from "../utils/fetch-timeout.js";
|
||||
import {
|
||||
normalizeSecretInput,
|
||||
normalizeOptionalSecretInput,
|
||||
} from "../utils/normalize-secret-input.js";
|
||||
import { normalizeSecretInput } from "../utils/normalize-secret-input.js";
|
||||
import type { WizardPrompter } from "../wizard/prompts.js";
|
||||
import { ensureApiKeyFromEnvOrPrompt } from "./auth-choice.apply-helpers.js";
|
||||
import { applyPrimaryModel } from "./model-picker.js";
|
||||
import { normalizeAlias } from "./models/shared.js";
|
||||
import {
|
||||
applyCustomApiConfig,
|
||||
buildAnthropicVerificationProbeRequest,
|
||||
buildEndpointIdFromUrl,
|
||||
buildOpenAiVerificationProbeRequest,
|
||||
normalizeEndpointId,
|
||||
normalizeOptionalProviderApiKey,
|
||||
resolveCustomModelAliasError,
|
||||
resolveCustomProviderId,
|
||||
type CustomApiCompatibility,
|
||||
type CustomApiResult,
|
||||
} from "./onboard-custom-config.js";
|
||||
export {
|
||||
applyCustomApiConfig,
|
||||
buildAnthropicVerificationProbeRequest,
|
||||
buildOpenAiVerificationProbeRequest,
|
||||
CustomApiError,
|
||||
parseNonInteractiveCustomApiFlags,
|
||||
resolveCustomProviderId,
|
||||
type ApplyCustomApiConfigParams,
|
||||
type CustomApiCompatibility,
|
||||
type CustomApiErrorCode,
|
||||
type CustomApiResult,
|
||||
type ParseNonInteractiveCustomApiFlagsParams,
|
||||
type ParsedNonInteractiveCustomApiFlags,
|
||||
type ResolveCustomProviderIdParams,
|
||||
type ResolvedCustomProviderId,
|
||||
} from "./onboard-custom-config.js";
|
||||
import type { SecretInputMode } from "./onboard-types.js";
|
||||
|
||||
const DEFAULT_CONTEXT_WINDOW = CONTEXT_WINDOW_HARD_MIN_TOKENS;
|
||||
const DEFAULT_MAX_TOKENS = 4096;
|
||||
// Azure OpenAI uses the Responses API which supports larger defaults
|
||||
const AZURE_DEFAULT_CONTEXT_WINDOW = 400_000;
|
||||
const AZURE_DEFAULT_MAX_TOKENS = 16_384;
|
||||
const VERIFY_TIMEOUT_MS = 30_000;
|
||||
|
||||
function normalizeContextWindowForCustomModel(value: unknown): number {
|
||||
const parsed = typeof value === "number" && Number.isFinite(value) ? Math.floor(value) : 0;
|
||||
return parsed >= CONTEXT_WINDOW_HARD_MIN_TOKENS ? parsed : CONTEXT_WINDOW_HARD_MIN_TOKENS;
|
||||
}
|
||||
|
||||
function isAzureFoundryUrl(baseUrl: string): boolean {
|
||||
try {
|
||||
const url = new URL(baseUrl);
|
||||
const host = normalizeLowercaseStringOrEmpty(url.hostname);
|
||||
return host.endsWith(".services.ai.azure.com");
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
function isAzureOpenAiUrl(baseUrl: string): boolean {
|
||||
try {
|
||||
const url = new URL(baseUrl);
|
||||
const host = normalizeLowercaseStringOrEmpty(url.hostname);
|
||||
return host.endsWith(".openai.azure.com");
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
function isAzureUrl(baseUrl: string): boolean {
|
||||
return isAzureFoundryUrl(baseUrl) || isAzureOpenAiUrl(baseUrl);
|
||||
}
|
||||
|
||||
/**
|
||||
* Transforms an Azure AI Foundry/OpenAI URL to include the deployment path.
|
||||
* Azure requires: https://host/openai/deployments/<model-id>/chat/completions?api-version=2024-xx-xx-preview
|
||||
* But we can't add query params here, so we just add the path prefix.
|
||||
* The api-version will be handled by the Azure OpenAI client or as a query param.
|
||||
*
|
||||
* Example:
|
||||
* https://my-resource.services.ai.azure.com + gpt-5.4-nano
|
||||
* => https://my-resource.services.ai.azure.com/openai/deployments/gpt-5.4-nano
|
||||
*/
|
||||
function transformAzureUrl(baseUrl: string, modelId: string): string {
|
||||
const normalizedUrl = baseUrl.endsWith("/") ? baseUrl.slice(0, -1) : baseUrl;
|
||||
// Check if the URL already includes the deployment path
|
||||
if (normalizedUrl.includes("/openai/deployments/")) {
|
||||
return normalizedUrl;
|
||||
}
|
||||
return `${normalizedUrl}/openai/deployments/${modelId}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Transforms an Azure URL into the base URL stored in config.
|
||||
*
|
||||
* Example:
|
||||
* https://my-resource.openai.azure.com
|
||||
* => https://my-resource.openai.azure.com/openai/v1
|
||||
*/
|
||||
function transformAzureConfigUrl(baseUrl: string): string {
|
||||
const normalizedUrl = baseUrl.endsWith("/") ? baseUrl.slice(0, -1) : baseUrl;
|
||||
if (normalizedUrl.endsWith("/openai/v1")) {
|
||||
return normalizedUrl;
|
||||
}
|
||||
// Strip a full deployment path back to the base origin
|
||||
const deploymentIdx = normalizedUrl.indexOf("/openai/deployments/");
|
||||
const base = deploymentIdx !== -1 ? normalizedUrl.slice(0, deploymentIdx) : normalizedUrl;
|
||||
return `${base}/openai/v1`;
|
||||
}
|
||||
|
||||
function hasSameHost(a: string, b: string): boolean {
|
||||
try {
|
||||
return (
|
||||
normalizeLowercaseStringOrEmpty(new URL(a).hostname) ===
|
||||
normalizeLowercaseStringOrEmpty(new URL(b).hostname)
|
||||
);
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
export type CustomApiCompatibility = "openai" | "anthropic";
|
||||
type CustomApiCompatibilityChoice = CustomApiCompatibility | "unknown";
|
||||
export type CustomApiResult = {
|
||||
config: OpenClawConfig;
|
||||
providerId?: string;
|
||||
modelId?: string;
|
||||
providerIdRenamedFrom?: string;
|
||||
};
|
||||
|
||||
export type ApplyCustomApiConfigParams = {
|
||||
config: OpenClawConfig;
|
||||
baseUrl: string;
|
||||
modelId: string;
|
||||
compatibility: CustomApiCompatibility;
|
||||
apiKey?: SecretInput;
|
||||
providerId?: string;
|
||||
alias?: string;
|
||||
};
|
||||
|
||||
export type ParseNonInteractiveCustomApiFlagsParams = {
|
||||
baseUrl?: string;
|
||||
modelId?: string;
|
||||
compatibility?: string;
|
||||
apiKey?: string;
|
||||
providerId?: string;
|
||||
};
|
||||
|
||||
export type ParsedNonInteractiveCustomApiFlags = {
|
||||
baseUrl: string;
|
||||
modelId: string;
|
||||
compatibility: CustomApiCompatibility;
|
||||
apiKey?: string;
|
||||
providerId?: string;
|
||||
};
|
||||
|
||||
export type CustomApiErrorCode =
|
||||
| "missing_required"
|
||||
| "invalid_compatibility"
|
||||
| "invalid_base_url"
|
||||
| "invalid_model_id"
|
||||
| "invalid_provider_id"
|
||||
| "invalid_alias";
|
||||
|
||||
export class CustomApiError extends Error {
|
||||
readonly code: CustomApiErrorCode;
|
||||
|
||||
constructor(code: CustomApiErrorCode, message: string) {
|
||||
super(message);
|
||||
this.name = "CustomApiError";
|
||||
this.code = code;
|
||||
}
|
||||
}
|
||||
|
||||
export type ResolveCustomProviderIdParams = {
|
||||
config: OpenClawConfig;
|
||||
baseUrl: string;
|
||||
providerId?: string;
|
||||
};
|
||||
|
||||
export type ResolvedCustomProviderId = {
|
||||
providerId: string;
|
||||
providerIdRenamedFrom?: string;
|
||||
};
|
||||
|
||||
const COMPATIBILITY_OPTIONS: Array<{
|
||||
value: CustomApiCompatibilityChoice;
|
||||
@@ -192,106 +62,6 @@ const COMPATIBILITY_OPTIONS: Array<{
|
||||
},
|
||||
];
|
||||
|
||||
function normalizeEndpointId(raw: string): string {
|
||||
const trimmed = normalizeOptionalLowercaseString(raw);
|
||||
if (!trimmed) {
|
||||
return "";
|
||||
}
|
||||
return trimmed.replace(/[^a-z0-9-]+/g, "-").replace(/^-+|-+$/g, "");
|
||||
}
|
||||
|
||||
function buildEndpointIdFromUrl(baseUrl: string): string {
|
||||
try {
|
||||
const url = new URL(baseUrl);
|
||||
const host = normalizeLowercaseStringOrEmpty(url.hostname.replace(/[^a-z0-9]+/gi, "-"));
|
||||
const port = url.port ? `-${url.port}` : "";
|
||||
const candidate = `custom-${host}${port}`;
|
||||
return normalizeEndpointId(candidate) || "custom";
|
||||
} catch {
|
||||
return "custom";
|
||||
}
|
||||
}
|
||||
|
||||
function resolveUniqueEndpointId(params: {
|
||||
requestedId: string;
|
||||
baseUrl: string;
|
||||
providers: Record<string, ModelProviderConfig | undefined>;
|
||||
}) {
|
||||
const normalized = normalizeEndpointId(params.requestedId) || "custom";
|
||||
const existing = params.providers[normalized];
|
||||
if (
|
||||
!existing?.baseUrl ||
|
||||
existing.baseUrl === params.baseUrl ||
|
||||
(isAzureUrl(params.baseUrl) && hasSameHost(existing.baseUrl, params.baseUrl))
|
||||
) {
|
||||
return { providerId: normalized, renamed: false };
|
||||
}
|
||||
let suffix = 2;
|
||||
let candidate = `${normalized}-${suffix}`;
|
||||
while (params.providers[candidate]) {
|
||||
suffix += 1;
|
||||
candidate = `${normalized}-${suffix}`;
|
||||
}
|
||||
return { providerId: candidate, renamed: true };
|
||||
}
|
||||
|
||||
function resolveAliasError(params: {
|
||||
raw: string;
|
||||
cfg: OpenClawConfig;
|
||||
modelRef: string;
|
||||
}): string | undefined {
|
||||
const trimmed = params.raw.trim();
|
||||
if (!trimmed) {
|
||||
return undefined;
|
||||
}
|
||||
let normalized: string;
|
||||
try {
|
||||
normalized = normalizeAlias(trimmed);
|
||||
} catch (err) {
|
||||
return err instanceof Error ? err.message : "Alias is invalid.";
|
||||
}
|
||||
const aliasIndex = buildModelAliasIndex({
|
||||
cfg: params.cfg,
|
||||
defaultProvider: DEFAULT_PROVIDER,
|
||||
});
|
||||
const aliasKey = normalizeLowercaseStringOrEmpty(normalized);
|
||||
const existing = aliasIndex.byAlias.get(aliasKey);
|
||||
if (!existing) {
|
||||
return undefined;
|
||||
}
|
||||
const existingKey = modelKey(existing.ref.provider, existing.ref.model);
|
||||
if (existingKey === params.modelRef) {
|
||||
return undefined;
|
||||
}
|
||||
return `Alias ${normalized} already points to ${existingKey}.`;
|
||||
}
|
||||
|
||||
function buildAzureOpenAiHeaders(apiKey: string) {
|
||||
const headers: Record<string, string> = {};
|
||||
if (apiKey) {
|
||||
headers["api-key"] = apiKey;
|
||||
}
|
||||
return headers;
|
||||
}
|
||||
|
||||
function buildOpenAiHeaders(apiKey: string) {
|
||||
const headers: Record<string, string> = {};
|
||||
if (apiKey) {
|
||||
headers.Authorization = `Bearer ${apiKey}`;
|
||||
}
|
||||
return headers;
|
||||
}
|
||||
|
||||
function buildAnthropicHeaders(apiKey: string) {
|
||||
const headers: Record<string, string> = {
|
||||
"anthropic-version": "2023-06-01",
|
||||
};
|
||||
if (apiKey) {
|
||||
headers["x-api-key"] = apiKey;
|
||||
}
|
||||
return headers;
|
||||
}
|
||||
|
||||
function formatVerificationError(error: unknown): string {
|
||||
if (!error) {
|
||||
return "unknown error";
|
||||
@@ -315,31 +85,6 @@ type VerificationResult = {
|
||||
error?: unknown;
|
||||
};
|
||||
|
||||
function normalizeOptionalProviderApiKey(value: unknown): SecretInput | undefined {
|
||||
if (isSecretRef(value)) {
|
||||
return value;
|
||||
}
|
||||
return normalizeOptionalSecretInput(value);
|
||||
}
|
||||
|
||||
function resolveVerificationEndpoint(params: {
|
||||
baseUrl: string;
|
||||
modelId: string;
|
||||
endpointPath: "chat/completions" | "messages";
|
||||
}) {
|
||||
const resolvedUrl = isAzureUrl(params.baseUrl)
|
||||
? transformAzureUrl(params.baseUrl, params.modelId)
|
||||
: params.baseUrl;
|
||||
const endpointUrl = new URL(
|
||||
params.endpointPath,
|
||||
resolvedUrl.endsWith("/") ? resolvedUrl : `${resolvedUrl}/`,
|
||||
);
|
||||
if (isAzureUrl(params.baseUrl)) {
|
||||
endpointUrl.searchParams.set("api-version", "2024-10-21");
|
||||
}
|
||||
return endpointUrl.href;
|
||||
}
|
||||
|
||||
async function requestVerification(params: {
|
||||
endpoint: string;
|
||||
headers: Record<string, string>;
|
||||
@@ -369,43 +114,7 @@ async function requestOpenAiVerification(params: {
|
||||
apiKey: string;
|
||||
modelId: string;
|
||||
}): Promise<VerificationResult> {
|
||||
const isBaseUrlAzureUrl = isAzureUrl(params.baseUrl);
|
||||
const headers = isBaseUrlAzureUrl
|
||||
? buildAzureOpenAiHeaders(params.apiKey)
|
||||
: buildOpenAiHeaders(params.apiKey);
|
||||
if (isAzureOpenAiUrl(params.baseUrl)) {
|
||||
const endpoint = new URL(
|
||||
"responses",
|
||||
transformAzureConfigUrl(params.baseUrl).replace(/\/?$/, "/"),
|
||||
).href;
|
||||
return await requestVerification({
|
||||
endpoint,
|
||||
headers,
|
||||
body: {
|
||||
model: params.modelId,
|
||||
input: "Hi",
|
||||
max_output_tokens: 16,
|
||||
stream: false,
|
||||
},
|
||||
});
|
||||
} else {
|
||||
const endpoint = resolveVerificationEndpoint({
|
||||
baseUrl: params.baseUrl,
|
||||
modelId: params.modelId,
|
||||
endpointPath: "chat/completions",
|
||||
});
|
||||
return await requestVerification({
|
||||
endpoint,
|
||||
headers,
|
||||
body: {
|
||||
model: params.modelId,
|
||||
messages: [{ role: "user", content: "Hi" }],
|
||||
// Recent OpenAI-family endpoints reject probes below 16 tokens.
|
||||
max_tokens: 16,
|
||||
stream: false,
|
||||
},
|
||||
});
|
||||
}
|
||||
return await requestVerification(buildOpenAiVerificationProbeRequest(params));
|
||||
}
|
||||
|
||||
async function requestAnthropicVerification(params: {
|
||||
@@ -413,27 +122,7 @@ async function requestAnthropicVerification(params: {
|
||||
apiKey: string;
|
||||
modelId: string;
|
||||
}): Promise<VerificationResult> {
|
||||
// Use a base URL with /v1 injected for this raw fetch only. The rest of the app uses the
|
||||
// Anthropic client, which appends /v1 itself; config should store the base URL
|
||||
// without /v1 to avoid /v1/v1/messages at runtime. See docs/gateway/configuration-reference.md.
|
||||
const baseUrlForRequest = /\/v1\/?$/.test(params.baseUrl.trim())
|
||||
? params.baseUrl.trim()
|
||||
: params.baseUrl.trim().replace(/\/?$/, "") + "/v1";
|
||||
const endpoint = resolveVerificationEndpoint({
|
||||
baseUrl: baseUrlForRequest,
|
||||
modelId: params.modelId,
|
||||
endpointPath: "messages",
|
||||
});
|
||||
return await requestVerification({
|
||||
endpoint,
|
||||
headers: buildAnthropicHeaders(params.apiKey),
|
||||
body: {
|
||||
model: params.modelId,
|
||||
max_tokens: 1,
|
||||
messages: [{ role: "user", content: "Hi" }],
|
||||
stream: false,
|
||||
},
|
||||
});
|
||||
return await requestVerification(buildAnthropicVerificationProbeRequest(params));
|
||||
}
|
||||
|
||||
async function promptBaseUrlAndKey(params: {
|
||||
@@ -521,252 +210,6 @@ async function applyCustomApiRetryChoice(params: {
|
||||
return { baseUrl, apiKey, resolvedApiKey, modelId };
|
||||
}
|
||||
|
||||
function resolveProviderApi(
|
||||
compatibility: CustomApiCompatibility,
|
||||
): "openai-completions" | "anthropic-messages" {
|
||||
return compatibility === "anthropic" ? "anthropic-messages" : "openai-completions";
|
||||
}
|
||||
|
||||
function parseCustomApiCompatibility(raw?: string): CustomApiCompatibility {
|
||||
const compatibilityRaw = normalizeOptionalLowercaseString(raw);
|
||||
if (!compatibilityRaw) {
|
||||
return "openai";
|
||||
}
|
||||
if (compatibilityRaw !== "openai" && compatibilityRaw !== "anthropic") {
|
||||
throw new CustomApiError(
|
||||
"invalid_compatibility",
|
||||
'Invalid --custom-compatibility (use "openai" or "anthropic").',
|
||||
);
|
||||
}
|
||||
return compatibilityRaw;
|
||||
}
|
||||
|
||||
export function resolveCustomProviderId(
|
||||
params: ResolveCustomProviderIdParams,
|
||||
): ResolvedCustomProviderId {
|
||||
const providers = params.config.models?.providers ?? {};
|
||||
const baseUrl = params.baseUrl.trim();
|
||||
const explicitProviderId = params.providerId?.trim();
|
||||
if (explicitProviderId && !normalizeEndpointId(explicitProviderId)) {
|
||||
throw new CustomApiError(
|
||||
"invalid_provider_id",
|
||||
"Custom provider ID must include letters, numbers, or hyphens.",
|
||||
);
|
||||
}
|
||||
const requestedProviderId = explicitProviderId || buildEndpointIdFromUrl(baseUrl);
|
||||
const providerIdResult = resolveUniqueEndpointId({
|
||||
requestedId: requestedProviderId,
|
||||
baseUrl,
|
||||
providers,
|
||||
});
|
||||
|
||||
return {
|
||||
providerId: providerIdResult.providerId,
|
||||
...(providerIdResult.renamed
|
||||
? {
|
||||
providerIdRenamedFrom: normalizeEndpointId(requestedProviderId) || "custom",
|
||||
}
|
||||
: {}),
|
||||
};
|
||||
}
|
||||
|
||||
export function parseNonInteractiveCustomApiFlags(
|
||||
params: ParseNonInteractiveCustomApiFlagsParams,
|
||||
): ParsedNonInteractiveCustomApiFlags {
|
||||
const baseUrl = normalizeOptionalString(params.baseUrl) ?? "";
|
||||
const modelId = normalizeOptionalString(params.modelId) ?? "";
|
||||
if (!baseUrl || !modelId) {
|
||||
throw new CustomApiError(
|
||||
"missing_required",
|
||||
[
|
||||
'Auth choice "custom-api-key" requires a base URL and model ID.',
|
||||
"Use --custom-base-url and --custom-model-id.",
|
||||
].join("\n"),
|
||||
);
|
||||
}
|
||||
|
||||
const apiKey = normalizeOptionalString(params.apiKey);
|
||||
const providerId = normalizeOptionalString(params.providerId);
|
||||
if (providerId && !normalizeEndpointId(providerId)) {
|
||||
throw new CustomApiError(
|
||||
"invalid_provider_id",
|
||||
"Custom provider ID must include letters, numbers, or hyphens.",
|
||||
);
|
||||
}
|
||||
return {
|
||||
baseUrl,
|
||||
modelId,
|
||||
compatibility: parseCustomApiCompatibility(params.compatibility),
|
||||
...(apiKey ? { apiKey } : {}),
|
||||
...(providerId ? { providerId } : {}),
|
||||
};
|
||||
}
|
||||
|
||||
export function applyCustomApiConfig(params: ApplyCustomApiConfigParams): CustomApiResult {
|
||||
const baseUrl = normalizeOptionalString(params.baseUrl) ?? "";
|
||||
if (!URL.canParse(baseUrl)) {
|
||||
throw new CustomApiError("invalid_base_url", "Custom provider base URL must be a valid URL.");
|
||||
}
|
||||
|
||||
if (params.compatibility !== "openai" && params.compatibility !== "anthropic") {
|
||||
throw new CustomApiError(
|
||||
"invalid_compatibility",
|
||||
'Custom provider compatibility must be "openai" or "anthropic".',
|
||||
);
|
||||
}
|
||||
|
||||
const modelId = normalizeOptionalString(params.modelId) ?? "";
|
||||
if (!modelId) {
|
||||
throw new CustomApiError("invalid_model_id", "Custom provider model ID is required.");
|
||||
}
|
||||
|
||||
const isAzure = isAzureUrl(baseUrl);
|
||||
const isAzureOpenAi = isAzureOpenAiUrl(baseUrl);
|
||||
const resolvedBaseUrl = isAzure ? transformAzureConfigUrl(baseUrl) : baseUrl;
|
||||
|
||||
const providerIdResult = resolveCustomProviderId({
|
||||
config: params.config,
|
||||
baseUrl: resolvedBaseUrl,
|
||||
providerId: params.providerId,
|
||||
});
|
||||
const providerId = providerIdResult.providerId;
|
||||
const providers = params.config.models?.providers ?? {};
|
||||
|
||||
const modelRef = modelKey(providerId, modelId);
|
||||
const alias = normalizeOptionalString(params.alias) ?? "";
|
||||
const aliasError = resolveAliasError({
|
||||
raw: alias,
|
||||
cfg: params.config,
|
||||
modelRef,
|
||||
});
|
||||
if (aliasError) {
|
||||
throw new CustomApiError("invalid_alias", aliasError);
|
||||
}
|
||||
|
||||
const existingProvider = providers[providerId];
|
||||
const existingModels = Array.isArray(existingProvider?.models) ? existingProvider.models : [];
|
||||
const hasModel = existingModels.some((model) => model.id === modelId);
|
||||
const isLikelyReasoningModel = isAzure && /\b(o[134]|gpt-([5-9]|\d{2,}))\b/i.test(modelId);
|
||||
const nextModel = isAzure
|
||||
? {
|
||||
id: modelId,
|
||||
name: `${modelId} (Custom Provider)`,
|
||||
contextWindow: AZURE_DEFAULT_CONTEXT_WINDOW,
|
||||
maxTokens: AZURE_DEFAULT_MAX_TOKENS,
|
||||
input: isLikelyReasoningModel
|
||||
? (["text", "image"] as Array<"text" | "image">)
|
||||
: (["text"] as ["text"]),
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
reasoning: isLikelyReasoningModel,
|
||||
compat: { supportsStore: false },
|
||||
}
|
||||
: {
|
||||
id: modelId,
|
||||
name: `${modelId} (Custom Provider)`,
|
||||
contextWindow: DEFAULT_CONTEXT_WINDOW,
|
||||
maxTokens: DEFAULT_MAX_TOKENS,
|
||||
input: ["text"] as ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
reasoning: false,
|
||||
};
|
||||
const mergedModels = hasModel
|
||||
? existingModels.map((model) =>
|
||||
model.id === modelId
|
||||
? {
|
||||
...model,
|
||||
...(isAzure ? nextModel : {}),
|
||||
name: model.name ?? nextModel.name,
|
||||
cost: model.cost ?? nextModel.cost,
|
||||
contextWindow: normalizeContextWindowForCustomModel(model.contextWindow),
|
||||
maxTokens: model.maxTokens ?? nextModel.maxTokens,
|
||||
}
|
||||
: model,
|
||||
)
|
||||
: [...existingModels, nextModel];
|
||||
const { apiKey: existingApiKey, ...existingProviderRest } = existingProvider ?? {};
|
||||
const normalizedApiKey =
|
||||
normalizeOptionalProviderApiKey(params.apiKey) ??
|
||||
normalizeOptionalProviderApiKey(existingApiKey);
|
||||
|
||||
const providerApi = isAzureOpenAi
|
||||
? ("azure-openai-responses" as const)
|
||||
: resolveProviderApi(params.compatibility);
|
||||
const azureHeaders = isAzure && normalizedApiKey ? { "api-key": normalizedApiKey } : undefined;
|
||||
|
||||
let config: OpenClawConfig = {
|
||||
...params.config,
|
||||
models: {
|
||||
...params.config.models,
|
||||
mode: params.config.models?.mode ?? "merge",
|
||||
providers: {
|
||||
...providers,
|
||||
[providerId]: {
|
||||
...existingProviderRest,
|
||||
baseUrl: resolvedBaseUrl,
|
||||
api: providerApi,
|
||||
...(normalizedApiKey ? { apiKey: normalizedApiKey } : {}),
|
||||
...(isAzure ? { authHeader: false } : {}),
|
||||
...(azureHeaders ? { headers: azureHeaders } : {}),
|
||||
models: mergedModels.length > 0 ? mergedModels : [nextModel],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
config = applyPrimaryModel(config, modelRef);
|
||||
if (isAzure && isLikelyReasoningModel) {
|
||||
const existingPerModelThinking = config.agents?.defaults?.models?.[modelRef]?.params?.thinking;
|
||||
if (!existingPerModelThinking) {
|
||||
config = {
|
||||
...config,
|
||||
agents: {
|
||||
...config.agents,
|
||||
defaults: {
|
||||
...config.agents?.defaults,
|
||||
models: {
|
||||
...config.agents?.defaults?.models,
|
||||
[modelRef]: {
|
||||
...config.agents?.defaults?.models?.[modelRef],
|
||||
params: {
|
||||
...config.agents?.defaults?.models?.[modelRef]?.params,
|
||||
thinking: "medium",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
if (alias) {
|
||||
config = {
|
||||
...config,
|
||||
agents: {
|
||||
...config.agents,
|
||||
defaults: {
|
||||
...config.agents?.defaults,
|
||||
models: {
|
||||
...config.agents?.defaults?.models,
|
||||
[modelRef]: {
|
||||
...config.agents?.defaults?.models?.[modelRef],
|
||||
alias,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
config,
|
||||
providerId,
|
||||
modelId,
|
||||
...(providerIdResult.providerIdRenamedFrom
|
||||
? { providerIdRenamedFrom: providerIdResult.providerIdRenamedFrom }
|
||||
: {}),
|
||||
};
|
||||
}
|
||||
|
||||
export async function promptCustomApiConfig(params: {
|
||||
prompter: WizardPrompter;
|
||||
runtime: RuntimeEnv;
|
||||
@@ -871,7 +314,6 @@ export async function promptCustomApiConfig(params: {
|
||||
}
|
||||
}
|
||||
|
||||
const providers = config.models?.providers ?? {};
|
||||
const suggestedId = buildEndpointIdFromUrl(baseUrl);
|
||||
const providerIdInput = await prompter.text({
|
||||
message: "Endpoint ID",
|
||||
@@ -890,14 +332,13 @@ export async function promptCustomApiConfig(params: {
|
||||
placeholder: "e.g. local, ollama",
|
||||
initialValue: "",
|
||||
validate: (value) => {
|
||||
const requestedId = normalizeEndpointId(providerIdInput) || "custom";
|
||||
const providerIdResult = resolveUniqueEndpointId({
|
||||
requestedId,
|
||||
const resolvedProvider = resolveCustomProviderId({
|
||||
config,
|
||||
baseUrl,
|
||||
providers,
|
||||
providerId: providerIdInput,
|
||||
});
|
||||
const modelRef = modelKey(providerIdResult.providerId, modelId);
|
||||
return resolveAliasError({ raw: value, cfg: config, modelRef });
|
||||
const modelRef = modelKey(resolvedProvider.providerId, modelId);
|
||||
return resolveCustomModelAliasError({ raw: value, cfg: config, modelRef });
|
||||
},
|
||||
});
|
||||
const resolvedCompatibility = compatibility ?? "openai";
|
||||
|
||||
@@ -1,22 +1,15 @@
|
||||
import nodeFs from "node:fs";
|
||||
import fs from "node:fs/promises";
|
||||
import path from "node:path";
|
||||
import { afterAll, afterEach, beforeAll, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { afterAll, afterEach, beforeAll, describe, expect, it, vi } from "vitest";
|
||||
import type { OpenClawConfig } from "../config/types.openclaw.js";
|
||||
import type { RuntimeEnv } from "../runtime.js";
|
||||
import { makeTempWorkspace } from "../test-helpers/workspace.js";
|
||||
import { captureEnv } from "../test-utils/env.js";
|
||||
import { createThrowingRuntime, readJsonFile } from "./onboard-non-interactive.test-helpers.js";
|
||||
import { createThrowingRuntime } from "./onboard-non-interactive.test-helpers.js";
|
||||
import type { installGatewayDaemonNonInteractive } from "./onboard-non-interactive/local/daemon-install.js";
|
||||
|
||||
const gatewayClientCalls: Array<{
|
||||
url?: string;
|
||||
token?: string;
|
||||
password?: string;
|
||||
onHelloOk?: (hello: { features?: { methods?: string[] } }) => void;
|
||||
onClose?: (code: number, reason: string) => void;
|
||||
}> = [];
|
||||
const ensureWorkspaceAndSessionsMock = vi.fn(async (..._args: unknown[]) => {});
|
||||
const testConfigStore = new Map<string, OpenClawConfig>();
|
||||
type InstallGatewayDaemonResult = Awaited<ReturnType<typeof installGatewayDaemonNonInteractive>>;
|
||||
const installGatewayDaemonNonInteractiveMock = vi.hoisted(() =>
|
||||
vi.fn(async (): Promise<InstallGatewayDaemonResult> => ({ installed: true })),
|
||||
@@ -60,25 +53,20 @@ function resolveTestConfigPath() {
|
||||
return path.join(stateDir, "openclaw.json");
|
||||
}
|
||||
|
||||
function readTestConfig<T = OpenClawConfig>(): T {
|
||||
return (testConfigStore.get(resolveTestConfigPath()) ?? {}) as T;
|
||||
}
|
||||
|
||||
vi.mock("../config/io.js", () => ({
|
||||
createConfigIO: () => ({
|
||||
configPath: resolveTestConfigPath(),
|
||||
}),
|
||||
loadConfig: () => {
|
||||
try {
|
||||
return JSON.parse(nodeFs.readFileSync(resolveTestConfigPath(), "utf-8"));
|
||||
} catch (err) {
|
||||
if ((err as NodeJS.ErrnoException).code === "ENOENT") {
|
||||
return {};
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
},
|
||||
loadConfig: () => testConfigStore.get(resolveTestConfigPath()) ?? {},
|
||||
readConfigFileSnapshot: async () => {
|
||||
const configPath = resolveTestConfigPath();
|
||||
try {
|
||||
const raw = await fs.readFile(configPath, "utf-8");
|
||||
const config = JSON.parse(raw);
|
||||
const config = testConfigStore.get(configPath);
|
||||
if (config) {
|
||||
const raw = `${JSON.stringify(config, null, 2)}\n`;
|
||||
return {
|
||||
exists: true,
|
||||
valid: true,
|
||||
@@ -87,58 +75,25 @@ vi.mock("../config/io.js", () => ({
|
||||
raw,
|
||||
hash: "test-config-hash",
|
||||
};
|
||||
} catch (err) {
|
||||
if ((err as NodeJS.ErrnoException).code !== "ENOENT") {
|
||||
throw err;
|
||||
}
|
||||
return {
|
||||
exists: false,
|
||||
valid: true,
|
||||
config: {},
|
||||
sourceConfig: {},
|
||||
raw: null,
|
||||
hash: undefined,
|
||||
};
|
||||
}
|
||||
return {
|
||||
exists: false,
|
||||
valid: true,
|
||||
config: {},
|
||||
sourceConfig: {},
|
||||
raw: null,
|
||||
hash: undefined,
|
||||
};
|
||||
},
|
||||
}));
|
||||
|
||||
vi.mock("../config/config.js", () => ({
|
||||
replaceConfigFile: async ({ nextConfig }: { nextConfig: OpenClawConfig }) => {
|
||||
const configPath = resolveTestConfigPath();
|
||||
await fs.mkdir(path.dirname(configPath), { recursive: true });
|
||||
await fs.writeFile(configPath, `${JSON.stringify(nextConfig, null, 2)}\n`, "utf-8");
|
||||
testConfigStore.set(resolveTestConfigPath(), nextConfig);
|
||||
},
|
||||
resolveGatewayPort: (cfg: OpenClawConfig) => cfg.gateway?.port ?? 18789,
|
||||
}));
|
||||
|
||||
vi.mock("../gateway/client.js", () => ({
|
||||
GatewayClient: class {
|
||||
params: {
|
||||
url?: string;
|
||||
token?: string;
|
||||
password?: string;
|
||||
onHelloOk?: (hello: { features?: { methods?: string[] } }) => void;
|
||||
};
|
||||
constructor(params: {
|
||||
url?: string;
|
||||
token?: string;
|
||||
password?: string;
|
||||
onHelloOk?: (hello: { features?: { methods?: string[] } }) => void;
|
||||
}) {
|
||||
this.params = params;
|
||||
gatewayClientCalls.push(params);
|
||||
}
|
||||
async request() {
|
||||
return { ok: true };
|
||||
}
|
||||
start() {
|
||||
queueMicrotask(() => this.params.onHelloOk?.({ features: { methods: ["health"] } }));
|
||||
}
|
||||
stop() {}
|
||||
},
|
||||
}));
|
||||
|
||||
vi.mock("./onboard-helpers.js", () => {
|
||||
const normalizeGatewayTokenInput = (value: unknown): string => {
|
||||
if (typeof value !== "string") {
|
||||
@@ -184,18 +139,13 @@ vi.mock("../daemon/diagnostics.js", () => ({
|
||||
}));
|
||||
|
||||
let runNonInteractiveSetup: typeof import("./onboard-non-interactive.js").runNonInteractiveSetup;
|
||||
let resolveStateConfigPath: typeof import("../config/paths.js").resolveConfigPath;
|
||||
let callGateway: typeof import("../gateway/call.js").callGateway | undefined;
|
||||
let resolveInstallDaemonGatewayHealthTiming: typeof import("./onboard-non-interactive/local.js").resolveInstallDaemonGatewayHealthTiming;
|
||||
|
||||
async function loadGatewayOnboardModules(): Promise<void> {
|
||||
vi.resetModules();
|
||||
({ runNonInteractiveSetup } = await import("./onboard-non-interactive.js"));
|
||||
({ resolveConfigPath: resolveStateConfigPath } = await import("../config/paths.js"));
|
||||
}
|
||||
|
||||
async function loadCallGateway(): Promise<typeof import("../gateway/call.js").callGateway> {
|
||||
callGateway ??= (await import("../gateway/call.js")).callGateway;
|
||||
return callGateway;
|
||||
({ resolveInstallDaemonGatewayHealthTiming } =
|
||||
await import("./onboard-non-interactive/local.js"));
|
||||
}
|
||||
|
||||
function getPseudoPort(base: number): number {
|
||||
@@ -273,15 +223,6 @@ async function runLocalDaemonSetup(stateDir: string, runtimeEnv: RuntimeEnv = ru
|
||||
await runNonInteractiveSetup(createLocalDaemonSetupOptions(stateDir), runtimeEnv);
|
||||
}
|
||||
|
||||
async function withMockedPlatform<T>(platform: NodeJS.Platform, run: () => Promise<T>): Promise<T> {
|
||||
const platformSpy = vi.spyOn(process, "platform", "get").mockReturnValue(platform);
|
||||
try {
|
||||
return await run();
|
||||
} finally {
|
||||
platformSpy.mockRestore();
|
||||
}
|
||||
}
|
||||
|
||||
function mockGatewayReachableWithCapturedTimeouts() {
|
||||
let capturedDeadlineMs: number | undefined;
|
||||
let capturedProbeTimeoutMs: number | undefined;
|
||||
@@ -359,10 +300,6 @@ describe("onboard (non-interactive): gateway and remote auth", () => {
|
||||
await loadGatewayOnboardModules();
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
gatewayClientCalls.length = 0;
|
||||
});
|
||||
|
||||
afterAll(async () => {
|
||||
if (tempHome) {
|
||||
await fs.rm(tempHome, { recursive: true, force: true });
|
||||
@@ -372,12 +309,12 @@ describe("onboard (non-interactive): gateway and remote auth", () => {
|
||||
|
||||
afterEach(() => {
|
||||
waitForGatewayReachableMock = undefined;
|
||||
testConfigStore.clear();
|
||||
installGatewayDaemonNonInteractiveMock.mockClear();
|
||||
healthCommandMock.mockClear();
|
||||
gatewayServiceMock.isLoaded.mockClear();
|
||||
gatewayServiceMock.readRuntime.mockClear();
|
||||
readLastGatewayErrorLineMock.mockClear();
|
||||
gatewayClientCalls.length = 0;
|
||||
});
|
||||
|
||||
it("writes gateway token auth into config", async () => {
|
||||
@@ -401,12 +338,11 @@ describe("onboard (non-interactive): gateway and remote auth", () => {
|
||||
runtime,
|
||||
);
|
||||
|
||||
const configPath = resolveStateConfigPath(process.env, stateDir);
|
||||
const cfg = await readJsonFile<{
|
||||
const cfg = readTestConfig<{
|
||||
gateway?: { mode?: string; auth?: { mode?: string; token?: string } };
|
||||
agents?: { defaults?: { workspace?: string } };
|
||||
tools?: { profile?: string };
|
||||
}>(configPath);
|
||||
}>();
|
||||
|
||||
expect(cfg?.agents?.defaults?.workspace).toBe(workspace);
|
||||
expect(cfg?.gateway?.mode).toBe("local");
|
||||
@@ -416,37 +352,8 @@ describe("onboard (non-interactive): gateway and remote auth", () => {
|
||||
});
|
||||
}, 60_000);
|
||||
|
||||
it("keeps gateway.mode=local on the install-daemon onboarding path", async () => {
|
||||
await withStateDir("state-install-daemon-local-mode-", async (stateDir) => {
|
||||
const workspace = path.join(stateDir, "openclaw");
|
||||
|
||||
await runNonInteractiveSetup(
|
||||
{
|
||||
nonInteractive: true,
|
||||
mode: "local",
|
||||
workspace,
|
||||
authChoice: "skip",
|
||||
skipSkills: true,
|
||||
skipHealth: true,
|
||||
installDaemon: true,
|
||||
gatewayBind: "loopback",
|
||||
},
|
||||
runtime,
|
||||
);
|
||||
|
||||
const configPath = resolveStateConfigPath(process.env, stateDir);
|
||||
const cfg = await readJsonFile<{
|
||||
gateway?: { mode?: string; bind?: string };
|
||||
}>(configPath);
|
||||
|
||||
expect(cfg?.gateway?.mode).toBe("local");
|
||||
expect(cfg?.gateway?.bind).toBe("loopback");
|
||||
expect(installGatewayDaemonNonInteractiveMock).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
}, 60_000);
|
||||
|
||||
it("writes gateway.remote url/token and callGateway uses them", async () => {
|
||||
await withStateDir("state-remote-", async (stateDir) => {
|
||||
it("writes gateway.remote url/token", async () => {
|
||||
await withStateDir("state-remote-", async (_stateDir) => {
|
||||
const port = getPseudoPort(30_000);
|
||||
const token = "tok_remote_123";
|
||||
await runNonInteractiveSetup(
|
||||
@@ -461,20 +368,13 @@ describe("onboard (non-interactive): gateway and remote auth", () => {
|
||||
runtime,
|
||||
);
|
||||
|
||||
const cfg = await readJsonFile<{
|
||||
const cfg = readTestConfig<{
|
||||
gateway?: { mode?: string; remote?: { url?: string; token?: string } };
|
||||
}>(resolveStateConfigPath(process.env, stateDir));
|
||||
}>();
|
||||
|
||||
expect(cfg.gateway?.mode).toBe("remote");
|
||||
expect(cfg.gateway?.remote?.url).toBe(`ws://127.0.0.1:${port}`);
|
||||
expect(cfg.gateway?.remote?.token).toBe(token);
|
||||
|
||||
gatewayClientCalls.length = 0;
|
||||
const health = await (await loadCallGateway())({ method: "health" });
|
||||
expect(health?.ok).toBe(true);
|
||||
const lastCall = gatewayClientCalls[gatewayClientCalls.length - 1];
|
||||
expect(lastCall?.url).toBe(`ws://127.0.0.1:${port}`);
|
||||
expect(lastCall?.token).toBe(token);
|
||||
});
|
||||
}, 60_000);
|
||||
|
||||
@@ -511,33 +411,25 @@ describe("onboard (non-interactive): gateway and remote auth", () => {
|
||||
|
||||
await runLocalDaemonSetup(stateDir);
|
||||
|
||||
const cfg = readTestConfig<{
|
||||
gateway?: { mode?: string; bind?: string };
|
||||
}>();
|
||||
|
||||
expect(cfg?.gateway?.mode).toBe("local");
|
||||
expect(cfg?.gateway?.bind).toBe("loopback");
|
||||
expect(installGatewayDaemonNonInteractiveMock).toHaveBeenCalledTimes(1);
|
||||
expect(captured.deadlineMs).toBe(45_000);
|
||||
expect(captured.probeTimeoutMs).toBe(10_000);
|
||||
});
|
||||
}, 60_000);
|
||||
|
||||
it("uses longer Windows health timeouts when daemon install was requested", async () => {
|
||||
await withStateDir("state-local-daemon-health-win-", async (stateDir) => {
|
||||
const captured = mockGatewayReachableWithCapturedTimeouts();
|
||||
|
||||
await withMockedPlatform("win32", async () => {
|
||||
await runLocalDaemonSetup(stateDir);
|
||||
});
|
||||
|
||||
expect(installGatewayDaemonNonInteractiveMock).toHaveBeenCalledTimes(1);
|
||||
expect(captured.deadlineMs).toBe(90_000);
|
||||
expect(captured.probeTimeoutMs).toBe(15_000);
|
||||
expect(healthCommandMock).toHaveBeenCalledTimes(1);
|
||||
expect(healthCommandMock).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
json: false,
|
||||
timeoutMs: 90_000,
|
||||
}),
|
||||
runtime,
|
||||
);
|
||||
it("uses longer Windows health timings for daemon install probes", () => {
|
||||
expect(resolveInstallDaemonGatewayHealthTiming("win32")).toEqual({
|
||||
deadlineMs: 90_000,
|
||||
probeTimeoutMs: 15_000,
|
||||
healthCommandTimeoutMs: 90_000,
|
||||
});
|
||||
}, 60_000);
|
||||
});
|
||||
|
||||
it("emits a daemon-install failure when Linux user systemd is unavailable", async () => {
|
||||
await withStateDir("state-local-daemon-install-json-fail-", async (stateDir) => {
|
||||
@@ -654,14 +546,13 @@ describe("onboard (non-interactive): gateway and remote auth", () => {
|
||||
runtime,
|
||||
);
|
||||
|
||||
const configPath = resolveStateConfigPath(process.env, stateDir);
|
||||
const cfg = await readJsonFile<{
|
||||
const cfg = readTestConfig<{
|
||||
gateway?: {
|
||||
bind?: string;
|
||||
port?: number;
|
||||
auth?: { mode?: string; token?: string };
|
||||
};
|
||||
}>(configPath);
|
||||
}>();
|
||||
|
||||
expect(cfg.gateway?.bind).toBe("lan");
|
||||
expect(cfg.gateway?.port).toBe(port);
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -14,7 +14,6 @@ import {
|
||||
waitForGatewayReachable,
|
||||
} from "../onboard-helpers.js";
|
||||
import type { OnboardOptions } from "../onboard-types.js";
|
||||
import { inferAuthChoiceFromFlags } from "./local/auth-choice-inference.js";
|
||||
import { applyNonInteractiveGatewayConfig } from "./local/gateway-config.js";
|
||||
import {
|
||||
type GatewayHealthFailureDiagnostics,
|
||||
@@ -32,12 +31,14 @@ const WINDOWS_INSTALL_DAEMON_HEALTH_PROBE_TIMEOUT_MS = 15_000;
|
||||
const INSTALL_DAEMON_HEALTH_COMMAND_TIMEOUT_MS = 10_000;
|
||||
const WINDOWS_INSTALL_DAEMON_HEALTH_COMMAND_TIMEOUT_MS = 90_000;
|
||||
|
||||
function resolveInstallDaemonGatewayHealthTiming(): {
|
||||
export function resolveInstallDaemonGatewayHealthTiming(
|
||||
platform: NodeJS.Platform = process.platform,
|
||||
): {
|
||||
deadlineMs: number;
|
||||
probeTimeoutMs: number;
|
||||
healthCommandTimeoutMs: number;
|
||||
} {
|
||||
if (process.platform === "win32") {
|
||||
if (platform === "win32") {
|
||||
return {
|
||||
deadlineMs: WINDOWS_INSTALL_DAEMON_HEALTH_DEADLINE_MS,
|
||||
probeTimeoutMs: WINDOWS_INSTALL_DAEMON_HEALTH_PROBE_TIMEOUT_MS,
|
||||
@@ -136,12 +137,14 @@ export async function runNonInteractiveLocalSetup(params: {
|
||||
|
||||
let nextConfig: OpenClawConfig = applyLocalSetupWorkspaceConfig(baseConfig, workspaceDir);
|
||||
|
||||
const inferredAuthChoice = inferAuthChoiceFromFlags(opts, {
|
||||
config: nextConfig,
|
||||
workspaceDir,
|
||||
env: process.env,
|
||||
});
|
||||
if (!opts.authChoice && inferredAuthChoice.matches.length > 1) {
|
||||
const inferredAuthChoice = opts.authChoice
|
||||
? undefined
|
||||
: (await import("./local/auth-choice-inference.js")).inferAuthChoiceFromFlags(opts, {
|
||||
config: nextConfig,
|
||||
workspaceDir,
|
||||
env: process.env,
|
||||
});
|
||||
if (!opts.authChoice && inferredAuthChoice && inferredAuthChoice.matches.length > 1) {
|
||||
runtime.error(
|
||||
[
|
||||
"Multiple API key flags were provided for non-interactive setup.",
|
||||
@@ -152,7 +155,7 @@ export async function runNonInteractiveLocalSetup(params: {
|
||||
runtime.exit(1);
|
||||
return;
|
||||
}
|
||||
const authChoice = opts.authChoice ?? inferredAuthChoice.choice ?? "skip";
|
||||
const authChoice = opts.authChoice ?? inferredAuthChoice?.choice ?? "skip";
|
||||
if (authChoice !== "skip") {
|
||||
const { applyNonInteractiveAuthChoice } = await import("./local/auth-choice.js");
|
||||
const nextConfigAfterAuth = await applyNonInteractiveAuthChoice({
|
||||
|
||||
@@ -16,7 +16,7 @@ import {
|
||||
CustomApiError,
|
||||
parseNonInteractiveCustomApiFlags,
|
||||
resolveCustomProviderId,
|
||||
} from "../../onboard-custom.js";
|
||||
} from "../../onboard-custom-config.js";
|
||||
import type { AuthChoice, OnboardOptions } from "../../onboard-types.js";
|
||||
import { resolveNonInteractiveApiKey } from "../api-keys.js";
|
||||
import { applyNonInteractivePluginProviderChoice } from "./auth-choice.plugin-providers.js";
|
||||
|
||||
@@ -295,121 +295,68 @@ describe("setupSearch", () => {
|
||||
expect(result).toBe(cfg);
|
||||
});
|
||||
|
||||
it("sets provider and key for perplexity", async () => {
|
||||
const cfg: OpenClawConfig = {};
|
||||
const { prompter } = createPrompter({
|
||||
selectValue: "perplexity",
|
||||
textValue: "pplx-test-key",
|
||||
});
|
||||
const result = await setupSearch(cfg, runtime, prompter);
|
||||
expect(result.tools?.web?.search?.provider).toBe("perplexity");
|
||||
expect(pluginWebSearchApiKey(result, "perplexity")).toBe("pplx-test-key");
|
||||
expect(result.tools?.web?.search?.enabled).toBe(true);
|
||||
expect(result.plugins?.entries?.perplexity?.enabled).toBe(true);
|
||||
});
|
||||
it("sets provider keys and enables plugin entries", async () => {
|
||||
const cases = [
|
||||
{ provider: "perplexity", pluginId: "perplexity", key: "pplx-test-key" },
|
||||
{ provider: "brave", pluginId: "brave", key: "BSA-test-key" },
|
||||
{ provider: "firecrawl", pluginId: "firecrawl", key: "fc-test-key" },
|
||||
{ provider: "grok", pluginId: "xai", key: "xai-test" },
|
||||
{ provider: "tavily", pluginId: "tavily", key: "tvly-test-key" },
|
||||
{
|
||||
provider: "gemini",
|
||||
pluginId: "google",
|
||||
key: "AIza-test",
|
||||
textMessage: "Google Gemini API key",
|
||||
},
|
||||
];
|
||||
|
||||
it("sets provider and key for brave", async () => {
|
||||
const cfg: OpenClawConfig = {};
|
||||
const { prompter } = createPrompter({
|
||||
selectValue: "brave",
|
||||
textValue: "BSA-test-key",
|
||||
});
|
||||
const result = await setupSearch(cfg, runtime, prompter);
|
||||
expect(result.tools?.web?.search?.provider).toBe("brave");
|
||||
expect(result.tools?.web?.search?.enabled).toBe(true);
|
||||
expect(pluginWebSearchApiKey(result, "brave")).toBe("BSA-test-key");
|
||||
expect(result.plugins?.entries?.brave?.enabled).toBe(true);
|
||||
});
|
||||
for (const entry of cases) {
|
||||
const cfg: OpenClawConfig = {};
|
||||
const { prompter } = createPrompter({
|
||||
selectValue: entry.provider,
|
||||
textValue: entry.key,
|
||||
});
|
||||
const result = await setupSearch(cfg, runtime, prompter);
|
||||
expect(result.tools?.web?.search?.provider).toBe(entry.provider);
|
||||
expect(result.tools?.web?.search?.enabled).toBe(true);
|
||||
expect(pluginWebSearchApiKey(result, entry.pluginId)).toBe(entry.key);
|
||||
expect(result.plugins?.entries?.[entry.pluginId]?.enabled).toBe(true);
|
||||
if (entry.textMessage) {
|
||||
expect(prompter.text).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ message: entry.textMessage }),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
it("sets provider and key for gemini", async () => {
|
||||
const cfg: OpenClawConfig = {};
|
||||
const { prompter } = createPrompter({
|
||||
selectValue: "gemini",
|
||||
textValue: "AIza-test",
|
||||
});
|
||||
const result = await setupSearch(cfg, runtime, prompter);
|
||||
expect(result.tools?.web?.search?.provider).toBe("gemini");
|
||||
expect(result.tools?.web?.search?.enabled).toBe(true);
|
||||
expect(pluginWebSearchApiKey(result, "google")).toBe("AIza-test");
|
||||
expect(result.plugins?.entries?.google?.enabled).toBe(true);
|
||||
expect(prompter.text).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
message: "Google Gemini API key",
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it("sets provider and key for firecrawl and enables the plugin", async () => {
|
||||
const cfg: OpenClawConfig = {};
|
||||
const { prompter } = createPrompter({
|
||||
selectValue: "firecrawl",
|
||||
textValue: "fc-test-key",
|
||||
});
|
||||
const result = await setupSearch(cfg, runtime, prompter);
|
||||
expect(result.tools?.web?.search?.provider).toBe("firecrawl");
|
||||
expect(result.tools?.web?.search?.enabled).toBe(true);
|
||||
expect(pluginWebSearchApiKey(result, "firecrawl")).toBe("fc-test-key");
|
||||
expect(result.plugins?.entries?.firecrawl?.enabled).toBe(true);
|
||||
});
|
||||
|
||||
it("re-enables firecrawl and persists its plugin config when selected from disabled state", async () => {
|
||||
const cfg = createDisabledFirecrawlConfig();
|
||||
const { prompter } = createPrompter({
|
||||
selectValue: "firecrawl",
|
||||
textValue: "fc-disabled-key",
|
||||
});
|
||||
const result = await setupSearch(cfg, runtime, prompter);
|
||||
expect(result.tools?.web?.search?.provider).toBe("firecrawl");
|
||||
expect(result.tools?.web?.search?.enabled).toBe(true);
|
||||
expect(result.plugins?.entries?.firecrawl?.enabled).toBe(true);
|
||||
expect(readFirecrawlPluginApiKey(result)).toBe("fc-disabled-key");
|
||||
});
|
||||
|
||||
it("sets provider and key for grok", async () => {
|
||||
const cfg: OpenClawConfig = {};
|
||||
const { prompter } = createPrompter({
|
||||
selectValue: "grok",
|
||||
textValue: "xai-test",
|
||||
});
|
||||
const result = await setupSearch(cfg, runtime, prompter);
|
||||
expect(result.tools?.web?.search?.provider).toBe("grok");
|
||||
expect(result.tools?.web?.search?.enabled).toBe(true);
|
||||
expect(pluginWebSearchApiKey(result, "xai")).toBe("xai-test");
|
||||
expect(result.plugins?.entries?.xai?.enabled).toBe(true);
|
||||
});
|
||||
|
||||
it("sets provider and key for kimi", async () => {
|
||||
const cfg: OpenClawConfig = {};
|
||||
const { prompter } = createPrompter({
|
||||
const kimiCfg: OpenClawConfig = {};
|
||||
const { prompter: kimiPrompter } = createPrompter({
|
||||
selectValues: ["kimi", "https://api.moonshot.ai/v1", "__keep__"],
|
||||
textValue: "sk-moonshot",
|
||||
});
|
||||
const result = await setupSearch(cfg, runtime, prompter);
|
||||
const kimiWebSearchConfig = result.plugins?.entries?.moonshot?.config?.webSearch as
|
||||
const kimiResult = await setupSearch(kimiCfg, runtime, kimiPrompter);
|
||||
const kimiWebSearchConfig = kimiResult.plugins?.entries?.moonshot?.config?.webSearch as
|
||||
| {
|
||||
baseUrl?: string;
|
||||
model?: string;
|
||||
}
|
||||
| undefined;
|
||||
expect(result.tools?.web?.search?.provider).toBe("kimi");
|
||||
expect(result.tools?.web?.search?.enabled).toBe(true);
|
||||
expect(pluginWebSearchApiKey(result, "moonshot")).toBe("sk-moonshot");
|
||||
expect(result.plugins?.entries?.moonshot?.enabled).toBe(true);
|
||||
expect(kimiResult.tools?.web?.search?.provider).toBe("kimi");
|
||||
expect(kimiResult.tools?.web?.search?.enabled).toBe(true);
|
||||
expect(pluginWebSearchApiKey(kimiResult, "moonshot")).toBe("sk-moonshot");
|
||||
expect(kimiResult.plugins?.entries?.moonshot?.enabled).toBe(true);
|
||||
expect(kimiWebSearchConfig?.baseUrl).toBe("https://api.moonshot.ai/v1");
|
||||
expect(kimiWebSearchConfig?.model).toBe("kimi-k2.5");
|
||||
});
|
||||
|
||||
it("sets provider and key for tavily and enables the plugin", async () => {
|
||||
const cfg: OpenClawConfig = {};
|
||||
const { prompter } = createPrompter({
|
||||
selectValue: "tavily",
|
||||
textValue: "tvly-test-key",
|
||||
const disabledCfg = createDisabledFirecrawlConfig();
|
||||
const { prompter: disabledPrompter } = createPrompter({
|
||||
selectValue: "firecrawl",
|
||||
textValue: "fc-disabled-key",
|
||||
});
|
||||
const result = await setupSearch(cfg, runtime, prompter);
|
||||
expect(result.tools?.web?.search?.provider).toBe("tavily");
|
||||
expect(result.tools?.web?.search?.enabled).toBe(true);
|
||||
expect(pluginWebSearchApiKey(result, "tavily")).toBe("tvly-test-key");
|
||||
expect(result.plugins?.entries?.tavily?.enabled).toBe(true);
|
||||
const disabledResult = await setupSearch(disabledCfg, runtime, disabledPrompter);
|
||||
expect(disabledResult.tools?.web?.search?.provider).toBe("firecrawl");
|
||||
expect(disabledResult.tools?.web?.search?.enabled).toBe(true);
|
||||
expect(disabledResult.plugins?.entries?.firecrawl?.enabled).toBe(true);
|
||||
expect(readFirecrawlPluginApiKey(disabledResult)).toBe("fc-disabled-key");
|
||||
});
|
||||
|
||||
it("shows missing-key note when no key is provided and no env var", async () => {
|
||||
@@ -441,15 +388,13 @@ describe("setupSearch", () => {
|
||||
);
|
||||
expect(pluginWebSearchApiKey(result, "perplexity")).toBe("existing-key");
|
||||
expect(result.tools?.web?.search?.enabled).toBe(true);
|
||||
});
|
||||
|
||||
it("advanced preserves enabled:false when keeping existing key", async () => {
|
||||
const result = await runBlankPerplexityKeyEntry(
|
||||
const disabledResult = await runBlankPerplexityKeyEntry(
|
||||
"existing-key", // pragma: allowlist secret
|
||||
false,
|
||||
);
|
||||
expect(pluginWebSearchApiKey(result, "perplexity")).toBe("existing-key");
|
||||
expect(result.tools?.web?.search?.enabled).toBe(false);
|
||||
expect(pluginWebSearchApiKey(disabledResult, "perplexity")).toBe("existing-key");
|
||||
expect(disabledResult.tools?.web?.search?.enabled).toBe(false);
|
||||
});
|
||||
|
||||
it("quickstart skips key prompt when config key exists", async () => {
|
||||
@@ -460,17 +405,16 @@ describe("setupSearch", () => {
|
||||
expect(pluginWebSearchApiKey(result, "perplexity")).toBe("stored-pplx-key");
|
||||
expect(result.tools?.web?.search?.enabled).toBe(true);
|
||||
expect(prompter.text).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("quickstart preserves enabled:false when search was intentionally disabled", async () => {
|
||||
const { result, prompter } = await runQuickstartPerplexitySetup(
|
||||
"stored-pplx-key", // pragma: allowlist secret
|
||||
false,
|
||||
);
|
||||
expect(result.tools?.web?.search?.provider).toBe("perplexity");
|
||||
expect(pluginWebSearchApiKey(result, "perplexity")).toBe("stored-pplx-key");
|
||||
expect(result.tools?.web?.search?.enabled).toBe(false);
|
||||
expect(prompter.text).not.toHaveBeenCalled();
|
||||
const { result: disabledResult, prompter: disabledPrompter } =
|
||||
await runQuickstartPerplexitySetup(
|
||||
"stored-pplx-key", // pragma: allowlist secret
|
||||
false,
|
||||
);
|
||||
expect(disabledResult.tools?.web?.search?.provider).toBe("perplexity");
|
||||
expect(pluginWebSearchApiKey(disabledResult, "perplexity")).toBe("stored-pplx-key");
|
||||
expect(disabledResult.tools?.web?.search?.enabled).toBe(false);
|
||||
expect(disabledPrompter.text).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("quickstart skips key prompt when canonical plugin config key exists", async () => {
|
||||
@@ -614,15 +558,14 @@ describe("setupSearch", () => {
|
||||
}
|
||||
});
|
||||
|
||||
it("stores env-backed SecretRef when secretInputMode=ref for perplexity", async () => {
|
||||
it("stores env-backed SecretRef for perplexity ref mode", async () => {
|
||||
const originalPerplexity = process.env.PERPLEXITY_API_KEY;
|
||||
const originalOpenRouter = process.env.OPENROUTER_API_KEY;
|
||||
delete process.env.PERPLEXITY_API_KEY;
|
||||
delete process.env.OPENROUTER_API_KEY;
|
||||
const cfg: OpenClawConfig = {};
|
||||
try {
|
||||
const { prompter } = createPrompter({ selectValue: "perplexity" });
|
||||
const result = await setupSearch(cfg, runtime, prompter, {
|
||||
const result = await setupSearch({}, runtime, prompter, {
|
||||
secretInputMode: "ref", // pragma: allowlist secret
|
||||
});
|
||||
expect(result.tools?.web?.search?.provider).toBe("perplexity");
|
||||
@@ -632,37 +575,18 @@ describe("setupSearch", () => {
|
||||
id: "PERPLEXITY_API_KEY", // pragma: allowlist secret
|
||||
});
|
||||
expect(prompter.text).not.toHaveBeenCalled();
|
||||
} finally {
|
||||
if (originalPerplexity === undefined) {
|
||||
delete process.env.PERPLEXITY_API_KEY;
|
||||
} else {
|
||||
process.env.PERPLEXITY_API_KEY = originalPerplexity;
|
||||
}
|
||||
if (originalOpenRouter === undefined) {
|
||||
delete process.env.OPENROUTER_API_KEY;
|
||||
} else {
|
||||
process.env.OPENROUTER_API_KEY = originalOpenRouter;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
it("prefers detected OPENROUTER_API_KEY SecretRef for perplexity ref mode", async () => {
|
||||
const originalPerplexity = process.env.PERPLEXITY_API_KEY;
|
||||
const originalOpenRouter = process.env.OPENROUTER_API_KEY;
|
||||
delete process.env.PERPLEXITY_API_KEY;
|
||||
process.env.OPENROUTER_API_KEY = "sk-or-test";
|
||||
const cfg: OpenClawConfig = {};
|
||||
try {
|
||||
const { prompter } = createPrompter({ selectValue: "perplexity" });
|
||||
const result = await setupSearch(cfg, runtime, prompter, {
|
||||
process.env.OPENROUTER_API_KEY = "sk-or-test";
|
||||
const { prompter: openRouterPrompter } = createPrompter({ selectValue: "perplexity" });
|
||||
const openRouterResult = await setupSearch({}, runtime, openRouterPrompter, {
|
||||
secretInputMode: "ref", // pragma: allowlist secret
|
||||
});
|
||||
expect(pluginWebSearchApiKey(result, "perplexity")).toEqual({
|
||||
expect(pluginWebSearchApiKey(openRouterResult, "perplexity")).toEqual({
|
||||
source: "env",
|
||||
provider: "default",
|
||||
id: "OPENROUTER_API_KEY", // pragma: allowlist secret
|
||||
});
|
||||
expect(prompter.text).not.toHaveBeenCalled();
|
||||
expect(openRouterPrompter.text).not.toHaveBeenCalled();
|
||||
} finally {
|
||||
if (originalPerplexity === undefined) {
|
||||
delete process.env.PERPLEXITY_API_KEY;
|
||||
@@ -677,39 +601,27 @@ describe("setupSearch", () => {
|
||||
}
|
||||
});
|
||||
|
||||
it("stores env-backed SecretRef when secretInputMode=ref for brave", async () => {
|
||||
const cfg: OpenClawConfig = {};
|
||||
const { prompter } = createPrompter({ selectValue: "brave" });
|
||||
const result = await setupSearch(cfg, runtime, prompter, {
|
||||
secretInputMode: "ref", // pragma: allowlist secret
|
||||
});
|
||||
expect(result.tools?.web?.search?.provider).toBe("brave");
|
||||
expect(pluginWebSearchApiKey(result, "brave")).toEqual({
|
||||
source: "env",
|
||||
provider: "default",
|
||||
id: "BRAVE_API_KEY",
|
||||
});
|
||||
expect(result.plugins?.entries?.brave?.enabled).toBe(true);
|
||||
expect(prompter.text).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("stores env-backed SecretRef when secretInputMode=ref for tavily", async () => {
|
||||
it("stores env-backed SecretRefs for simple providers", async () => {
|
||||
const original = process.env.TAVILY_API_KEY;
|
||||
delete process.env.TAVILY_API_KEY;
|
||||
const cfg: OpenClawConfig = {};
|
||||
try {
|
||||
const { prompter } = createPrompter({ selectValue: "tavily" });
|
||||
const result = await setupSearch(cfg, runtime, prompter, {
|
||||
secretInputMode: "ref", // pragma: allowlist secret
|
||||
});
|
||||
expect(result.tools?.web?.search?.provider).toBe("tavily");
|
||||
expect(pluginWebSearchApiKey(result, "tavily")).toEqual({
|
||||
source: "env",
|
||||
provider: "default",
|
||||
id: "TAVILY_API_KEY",
|
||||
});
|
||||
expect(result.plugins?.entries?.tavily?.enabled).toBe(true);
|
||||
expect(prompter.text).not.toHaveBeenCalled();
|
||||
for (const entry of [
|
||||
{ provider: "brave", pluginId: "brave", env: "BRAVE_API_KEY" },
|
||||
{ provider: "tavily", pluginId: "tavily", env: "TAVILY_API_KEY" },
|
||||
]) {
|
||||
const { prompter } = createPrompter({ selectValue: entry.provider });
|
||||
const result = await setupSearch({}, runtime, prompter, {
|
||||
secretInputMode: "ref", // pragma: allowlist secret
|
||||
});
|
||||
expect(result.tools?.web?.search?.provider).toBe(entry.provider);
|
||||
expect(pluginWebSearchApiKey(result, entry.pluginId)).toEqual({
|
||||
source: "env",
|
||||
provider: "default",
|
||||
id: entry.env,
|
||||
});
|
||||
expect(result.plugins?.entries?.[entry.pluginId]?.enabled).toBe(true);
|
||||
expect(prompter.text).not.toHaveBeenCalled();
|
||||
}
|
||||
} finally {
|
||||
if (original === undefined) {
|
||||
delete process.env.TAVILY_API_KEY;
|
||||
|
||||
@@ -13,7 +13,7 @@ export {
|
||||
} from "../plugins/status.js";
|
||||
export { getTerminalTableWidth, renderTable } from "../terminal/table.js";
|
||||
export { theme } from "../terminal/theme.js";
|
||||
export { formatHealthChannelLines } from "./health.js";
|
||||
export { formatHealthChannelLines } from "./health-format.js";
|
||||
export { groupChannelIssuesByChannel } from "./status-all/channel-issues.js";
|
||||
export {
|
||||
buildStatusChannelsTableRows,
|
||||
|
||||
@@ -56,7 +56,7 @@ function loadStatusNodeModeModule() {
|
||||
return statusNodeModeModulePromise;
|
||||
}
|
||||
|
||||
function resolvePairingRecoveryContext(params: {
|
||||
export function resolvePairingRecoveryContext(params: {
|
||||
error?: string | null;
|
||||
closeReason?: string | null;
|
||||
}): { requestId: string | null } | null {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { afterEach, beforeAll, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import {
|
||||
applyStatusScanDefaults,
|
||||
createStatusMemorySearchConfig,
|
||||
@@ -19,8 +19,7 @@ let originalForceStderr: boolean;
|
||||
let loggingStateRef: typeof import("../logging/state.js").loggingState;
|
||||
let scanStatusJsonFast: typeof import("./status.scan.fast-json.js").scanStatusJsonFast;
|
||||
|
||||
beforeEach(async () => {
|
||||
vi.clearAllMocks();
|
||||
function configureFastJsonStatus() {
|
||||
applyStatusScanDefaults(mocks, {
|
||||
sourceConfig: createStatusMemorySearchConfig(),
|
||||
resolvedConfig: createStatusMemorySearchConfig(),
|
||||
@@ -31,8 +30,17 @@ beforeEach(async () => {
|
||||
mocks.resolveMemorySearchConfig.mockReturnValue({
|
||||
store: { path: "/tmp/main.sqlite" },
|
||||
});
|
||||
}
|
||||
|
||||
beforeAll(async () => {
|
||||
configureFastJsonStatus();
|
||||
({ scanStatusJsonFast } = await loadStatusScanModuleForTest(mocks, { fastJson: true }));
|
||||
({ loggingState: loggingStateRef } = await import("../logging/state.js"));
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
configureFastJsonStatus();
|
||||
originalForceStderr = loggingStateRef.forceConsoleToStderr;
|
||||
loggingStateRef.forceConsoleToStderr = false;
|
||||
});
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { afterEach, beforeAll, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import {
|
||||
applyStatusScanDefaults,
|
||||
createStatusMemorySearchConfig,
|
||||
@@ -20,11 +20,15 @@ let originalForceStderr: boolean;
|
||||
let loggingStateRef: typeof import("../logging/state.js").loggingState;
|
||||
let scanStatus: typeof import("./status.scan.js").scanStatus;
|
||||
|
||||
beforeEach(async () => {
|
||||
vi.clearAllMocks();
|
||||
beforeAll(async () => {
|
||||
configureScanStatus();
|
||||
({ scanStatus } = await loadStatusScanModuleForTest(mocks));
|
||||
({ loggingState: loggingStateRef } = await import("../logging/state.js"));
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
configureScanStatus();
|
||||
originalForceStderr = loggingStateRef.forceConsoleToStderr;
|
||||
loggingStateRef.forceConsoleToStderr = false;
|
||||
});
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import type { Mock } from "vitest";
|
||||
import { afterAll, afterEach, beforeAll, describe, expect, it, vi } from "vitest";
|
||||
import { GatewaySecretRefUnavailableError } from "../gateway/credentials.js";
|
||||
import type { PluginCompatibilityNotice } from "../plugins/status.js";
|
||||
import { createCompatibilityNotice } from "../plugins/status.test-helpers.js";
|
||||
import { captureEnv } from "../test-utils/env.js";
|
||||
@@ -77,15 +76,8 @@ function createErrorChannelPlugin(params: { id: string; label: string; docsPath:
|
||||
}
|
||||
|
||||
async function withUnknownUsageStore(run: () => Promise<void>) {
|
||||
const originalLoadSessionStore = mocks.loadSessionStore.getMockImplementation();
|
||||
mocks.loadSessionStore.mockReturnValue(createUnknownUsageSessionStore());
|
||||
try {
|
||||
await run();
|
||||
} finally {
|
||||
if (originalLoadSessionStore) {
|
||||
mocks.loadSessionStore.mockImplementation(originalLoadSessionStore);
|
||||
}
|
||||
}
|
||||
await run();
|
||||
}
|
||||
|
||||
function getRuntimeLogs() {
|
||||
@@ -640,7 +632,10 @@ vi.mock("../gateway/call.js", () => ({
|
||||
}) => {
|
||||
const token = params.config?.gateway?.auth?.token;
|
||||
if (token && typeof token === "object" && "source" in token) {
|
||||
throw new GatewaySecretRefUnavailableError("gateway.auth.token");
|
||||
throw Object.assign(new Error("gateway.auth.token unavailable"), {
|
||||
name: "GatewaySecretRefUnavailableError",
|
||||
path: "gateway.auth.token",
|
||||
});
|
||||
}
|
||||
const envToken = process.env.OPENCLAW_GATEWAY_TOKEN?.trim();
|
||||
return envToken ? { token: envToken } : {};
|
||||
@@ -776,7 +771,7 @@ vi.mock("./status-runtime-shared.ts", () => ({
|
||||
),
|
||||
}));
|
||||
|
||||
import { statusCommand } from "./status.command.js";
|
||||
import { resolvePairingRecoveryContext, statusCommand } from "./status.command.js";
|
||||
|
||||
const runtime = {
|
||||
log: vi.fn(),
|
||||
@@ -917,8 +912,6 @@ describe("statusCommand", () => {
|
||||
inconsistent_timestamps: 0,
|
||||
},
|
||||
});
|
||||
mocks.hasPotentialConfiguredChannels.mockReset();
|
||||
mocks.hasPotentialConfiguredChannels.mockReturnValue(true);
|
||||
mocks.runSecurityAudit.mockReset();
|
||||
mocks.runSecurityAudit.mockResolvedValue(createDefaultSecurityAuditResult());
|
||||
mocks.resolveGatewayService.mockReset();
|
||||
@@ -959,7 +952,7 @@ describe("statusCommand", () => {
|
||||
(runtime.error as Mock<(...args: unknown[]) => void>).mockClear();
|
||||
});
|
||||
|
||||
it("prints JSON when requested", async () => {
|
||||
it("prints JSON and includes security audit only when all is requested", async () => {
|
||||
mocks.hasPotentialConfiguredChannels.mockReturnValue(false);
|
||||
mocks.buildPluginCompatibilityNotices.mockReturnValue([
|
||||
createCompatibilityNotice({ pluginId: "legacy-plugin", code: "legacy-before-agent-start" }),
|
||||
@@ -995,16 +988,13 @@ describe("statusCommand", () => {
|
||||
}),
|
||||
);
|
||||
expect(mocks.runSecurityAudit).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("includes security audit in JSON when all is requested", async () => {
|
||||
mocks.hasPotentialConfiguredChannels.mockReturnValue(false);
|
||||
|
||||
runtimeLogMock.mockClear();
|
||||
await statusCommand({ json: true, all: true }, runtime as never);
|
||||
|
||||
const payload = JSON.parse(String(runtimeLogMock.mock.calls[0]?.[0]));
|
||||
expect(payload.securityAudit.summary.critical).toBe(1);
|
||||
expect(payload.securityAudit.summary.warn).toBe(1);
|
||||
const allPayload = JSON.parse(String(runtimeLogMock.mock.calls[0]?.[0]));
|
||||
expect(allPayload.securityAudit.summary.critical).toBe(1);
|
||||
expect(allPayload.securityAudit.summary.warn).toBe(1);
|
||||
expect(mocks.runSecurityAudit).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
includeFilesystem: true,
|
||||
@@ -1025,18 +1015,11 @@ describe("statusCommand", () => {
|
||||
});
|
||||
});
|
||||
|
||||
it("prints unknown usage in formatted output when totalTokens is missing", async () => {
|
||||
await withUnknownUsageStore(async () => {
|
||||
const logs = await runStatusAndGetLogs();
|
||||
expect(logs.some((line) => line.includes("unknown/") && line.includes("(?%)"))).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
it("prints formatted lines otherwise", async () => {
|
||||
it("prints formatted lines with verbose cache details", async () => {
|
||||
mocks.buildPluginCompatibilityNotices.mockReturnValue([
|
||||
createCompatibilityNotice({ pluginId: "legacy-plugin", code: "legacy-before-agent-start" }),
|
||||
]);
|
||||
const logs = await runStatusAndGetLogs();
|
||||
const logs = await runStatusAndGetLogs({ verbose: true });
|
||||
for (const token of [
|
||||
"OpenClaw status",
|
||||
"Overview",
|
||||
@@ -1072,10 +1055,6 @@ describe("statusCommand", () => {
|
||||
line.includes("openclaw --profile isolated status --all"),
|
||||
),
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
it("shows explicit cache details in verbose session output", async () => {
|
||||
const logs = await runStatusAndGetLogs({ verbose: true });
|
||||
expect(logs.some((line) => line.includes("Cache"))).toBe(true);
|
||||
expect(logs.some((line) => line.includes("40% hit"))).toBe(true);
|
||||
expect(logs.some((line) => line.includes("read 2.0k"))).toBe(true);
|
||||
@@ -1122,8 +1101,7 @@ describe("statusCommand", () => {
|
||||
expect(joined).toContain("tasks maintenance --apply");
|
||||
});
|
||||
|
||||
it("caps cached percentage at the prompt-token denominator for legacy session totals", async () => {
|
||||
const originalLoadSessionStore = mocks.loadSessionStore.getMockImplementation();
|
||||
it("uses prompt-side denominator for cached percentages", async () => {
|
||||
mocks.loadSessionStore.mockReturnValue({
|
||||
"+1000": {
|
||||
...createDefaultSessionStoreEntry(),
|
||||
@@ -1133,19 +1111,10 @@ describe("statusCommand", () => {
|
||||
totalTokens: 1_000,
|
||||
},
|
||||
});
|
||||
try {
|
||||
const logs = await runStatusAndGetLogs();
|
||||
expect(logs.some((line) => line.includes("100% cached"))).toBe(true);
|
||||
expect(logs.some((line) => line.includes("120% cached"))).toBe(false);
|
||||
} finally {
|
||||
if (originalLoadSessionStore) {
|
||||
mocks.loadSessionStore.mockImplementation(originalLoadSessionStore);
|
||||
}
|
||||
}
|
||||
});
|
||||
const logs = await runStatusAndGetLogs();
|
||||
expect(logs.some((line) => line.includes("100% cached"))).toBe(true);
|
||||
expect(logs.some((line) => line.includes("120% cached"))).toBe(false);
|
||||
|
||||
it("uses prompt-side tokens for cached percentage when they differ from totalTokens", async () => {
|
||||
const originalLoadSessionStore = mocks.loadSessionStore.getMockImplementation();
|
||||
mocks.loadSessionStore.mockReturnValue({
|
||||
"+1000": {
|
||||
...createDefaultSessionStoreEntry(),
|
||||
@@ -1155,15 +1124,9 @@ describe("statusCommand", () => {
|
||||
totalTokens: 5_000,
|
||||
},
|
||||
});
|
||||
try {
|
||||
const logs = await runStatusAndGetLogs();
|
||||
expect(logs.some((line) => line.includes("67% cached"))).toBe(true);
|
||||
expect(logs.some((line) => line.includes("40% cached"))).toBe(false);
|
||||
} finally {
|
||||
if (originalLoadSessionStore) {
|
||||
mocks.loadSessionStore.mockImplementation(originalLoadSessionStore);
|
||||
}
|
||||
}
|
||||
const promptSideLogs = await runStatusAndGetLogs();
|
||||
expect(promptSideLogs.some((line) => line.includes("67% cached"))).toBe(true);
|
||||
expect(promptSideLogs.some((line) => line.includes("40% cached"))).toBe(false);
|
||||
});
|
||||
|
||||
it("shows node-only gateway info when no local gateway service is installed", async () => {
|
||||
@@ -1283,60 +1246,45 @@ describe("statusCommand", () => {
|
||||
expect(joined).toMatch(/WARN/);
|
||||
});
|
||||
|
||||
it.each([
|
||||
{
|
||||
name: "prints requestId-aware recovery guidance when gateway pairing is required",
|
||||
error: "connect failed: pairing required (requestId: req-123)",
|
||||
closeReason: "pairing required (requestId: req-123)",
|
||||
includes: ["devices approve req-123"],
|
||||
excludes: [],
|
||||
},
|
||||
{
|
||||
name: "prints fallback recovery guidance when pairing requestId is unavailable",
|
||||
error: "connect failed: pairing required",
|
||||
closeReason: "connect failed",
|
||||
includes: [],
|
||||
excludes: ["devices approve req-"],
|
||||
},
|
||||
{
|
||||
name: "does not render unsafe requestId content into approval command hints",
|
||||
error: "connect failed: pairing required (requestId: req-123;rm -rf /)",
|
||||
closeReason: "pairing required (requestId: req-123;rm -rf /)",
|
||||
includes: [],
|
||||
excludes: ["devices approve req-123;rm -rf /"],
|
||||
},
|
||||
])("$name", async ({ error, closeReason, includes, excludes }) => {
|
||||
it("prints safe gateway pairing recovery guidance", async () => {
|
||||
expect(
|
||||
resolvePairingRecoveryContext({
|
||||
error: "connect failed: pairing required (requestId: req-123)",
|
||||
closeReason: "pairing required (requestId: req-123)",
|
||||
}),
|
||||
).toEqual({ requestId: "req-123" });
|
||||
expect(
|
||||
resolvePairingRecoveryContext({
|
||||
error: "connect failed: pairing required",
|
||||
closeReason: "connect failed",
|
||||
}),
|
||||
).toEqual({ requestId: null });
|
||||
expect(
|
||||
resolvePairingRecoveryContext({
|
||||
error: "connect failed: pairing required (requestId: req-123;rm -rf /)",
|
||||
closeReason: "pairing required (requestId: req-123;rm -rf /)",
|
||||
}),
|
||||
).toEqual({ requestId: null });
|
||||
expect(
|
||||
resolvePairingRecoveryContext({
|
||||
error: "connect failed: pairing required",
|
||||
closeReason: "pairing required (requestId: req-close-456)",
|
||||
}),
|
||||
).toEqual({ requestId: "req-close-456" });
|
||||
|
||||
mocks.loadConfig.mockReturnValue({
|
||||
session: {},
|
||||
channels: { whatsapp: { allowFrom: ["*"] } },
|
||||
});
|
||||
mockProbeGatewayResult({
|
||||
error,
|
||||
close: { code: 1008, reason: closeReason },
|
||||
error: "connect failed: pairing required (requestId: req-123)",
|
||||
close: { code: 1008, reason: "pairing required (requestId: req-123)" },
|
||||
});
|
||||
const joined = await runStatusAndGetJoinedLogs();
|
||||
expect(joined).toContain("Gateway pairing approval required.");
|
||||
expect(joined).toContain("devices approve req-123");
|
||||
expect(joined).toContain("devices approve --latest");
|
||||
expect(joined).toContain("devices list");
|
||||
for (const expected of includes) {
|
||||
expect(joined).toContain(expected);
|
||||
}
|
||||
for (const blocked of excludes) {
|
||||
expect(joined).not.toContain(blocked);
|
||||
}
|
||||
});
|
||||
|
||||
it("extracts requestId from close reason when error text omits it", async () => {
|
||||
mocks.loadConfig.mockReturnValue({
|
||||
session: {},
|
||||
channels: { whatsapp: { allowFrom: ["*"] } },
|
||||
});
|
||||
mockProbeGatewayResult({
|
||||
error: "connect failed: pairing required",
|
||||
close: { code: 1008, reason: "pairing required (requestId: req-close-456)" },
|
||||
});
|
||||
const joined = await runStatusAndGetJoinedLogs();
|
||||
expect(joined).toContain("devices approve req-close-456");
|
||||
});
|
||||
|
||||
it("includes sessions across agents in JSON output", async () => {
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import type { RuntimeEnv } from "../runtime.js";
|
||||
import { createRunningTaskRun } from "../tasks/task-executor.js";
|
||||
import {
|
||||
createManagedTaskFlow,
|
||||
resetTaskFlowRegistryForTests,
|
||||
} from "../tasks/task-flow-registry.js";
|
||||
import {
|
||||
createTaskRecord,
|
||||
resetTaskRegistryDeliveryRuntimeForTests,
|
||||
resetTaskRegistryForTests,
|
||||
} from "../tasks/task-registry.js";
|
||||
@@ -55,16 +55,17 @@ describe("tasks commands", () => {
|
||||
resetTaskFlowRegistryForTests({ persist: false });
|
||||
});
|
||||
|
||||
it("keeps tasks audit JSON stable while adding TaskFlow summary fields", async () => {
|
||||
it("keeps audit JSON stable and sorts combined findings before limiting", async () => {
|
||||
await withTaskCommandStateDir(async () => {
|
||||
const now = Date.now();
|
||||
vi.useFakeTimers();
|
||||
vi.setSystemTime(now - 40 * 60_000);
|
||||
createRunningTaskRun({
|
||||
createTaskRecord({
|
||||
runtime: "cli",
|
||||
ownerKey: "agent:main:main",
|
||||
scopeKind: "session",
|
||||
runId: "task-stale-queued",
|
||||
status: "running",
|
||||
task: "Inspect issue backlog",
|
||||
});
|
||||
vi.setSystemTime(now);
|
||||
@@ -95,22 +96,7 @@ describe("tasks commands", () => {
|
||||
expect(payload.summary.taskFlows.byCode.stale_waiting).toBe(1);
|
||||
expect(payload.summary.taskFlows.byCode.missing_linked_tasks).toBe(1);
|
||||
expect(payload.summary.combined.total).toBe(3);
|
||||
});
|
||||
});
|
||||
|
||||
it("sorts combined audit findings before applying the limit", async () => {
|
||||
await withTaskCommandStateDir(async () => {
|
||||
const now = Date.now();
|
||||
vi.useFakeTimers();
|
||||
vi.setSystemTime(now - 40 * 60_000);
|
||||
createRunningTaskRun({
|
||||
runtime: "cli",
|
||||
ownerKey: "agent:main:main",
|
||||
scopeKind: "session",
|
||||
runId: "task-stale-queued",
|
||||
task: "Queue audit",
|
||||
});
|
||||
vi.setSystemTime(now);
|
||||
const runningFlow = createManagedTaskFlow({
|
||||
ownerKey: "agent:main:main",
|
||||
controllerId: "tests/tasks-command",
|
||||
@@ -120,15 +106,17 @@ describe("tasks commands", () => {
|
||||
updatedAt: now - 45 * 60_000,
|
||||
});
|
||||
|
||||
const runtime = createRuntime();
|
||||
await tasksAuditCommand({ json: true, limit: 1 }, runtime);
|
||||
const limitedRuntime = createRuntime();
|
||||
await tasksAuditCommand({ json: true, limit: 1 }, limitedRuntime);
|
||||
|
||||
const payload = JSON.parse(String(vi.mocked(runtime.log).mock.calls[0]?.[0])) as {
|
||||
const limitedPayload = JSON.parse(
|
||||
String(vi.mocked(limitedRuntime.log).mock.calls[0]?.[0]),
|
||||
) as {
|
||||
findings: Array<{ kind: string; code: string; token?: string }>;
|
||||
};
|
||||
|
||||
expect(payload.findings).toHaveLength(1);
|
||||
expect(payload.findings[0]).toMatchObject({
|
||||
expect(limitedPayload.findings).toHaveLength(1);
|
||||
expect(limitedPayload.findings[0]).toMatchObject({
|
||||
kind: "task_flow",
|
||||
code: "stale_running",
|
||||
token: runningFlow.flowId,
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
import { loadConfig } from "../config/config.js";
|
||||
import { info } from "../globals.js";
|
||||
import type { RuntimeEnv } from "../runtime.js";
|
||||
import { normalizeOptionalString } from "../shared/string-coerce.js";
|
||||
import {
|
||||
@@ -45,6 +43,13 @@ const DELIVERY_PAD = 14;
|
||||
const ID_PAD = 10;
|
||||
const RUN_PAD = 10;
|
||||
|
||||
const info = theme.info;
|
||||
|
||||
async function loadTaskCancelConfig() {
|
||||
const { loadConfig } = await import("../config/config.js");
|
||||
return loadConfig();
|
||||
}
|
||||
|
||||
function truncate(value: string, maxChars: number) {
|
||||
if (value.length <= maxChars) {
|
||||
return value;
|
||||
@@ -387,7 +392,7 @@ export async function tasksCancelCommand(opts: { lookup: string }, runtime: Runt
|
||||
return;
|
||||
}
|
||||
const result = await cancelTaskById({
|
||||
cfg: loadConfig(),
|
||||
cfg: await loadTaskCancelConfig(),
|
||||
taskId: task.taskId,
|
||||
});
|
||||
if (!result.found) {
|
||||
|
||||
Reference in New Issue
Block a user