mirror of
https://github.com/openclaw/openclaw.git
synced 2026-04-29 10:02:04 +00:00
Merge branch 'main' into vincentkoc-code/slack-block-kit-interactions
This commit is contained in:
@@ -7,7 +7,52 @@ import { createInMemorySessionStore } from "./session.js";
|
||||
import { AcpGatewayAgent } from "./translator.js";
|
||||
import { createAcpConnection, createAcpGateway } from "./translator.test-helpers.js";
|
||||
|
||||
const TEST_SESSION_ID = "session-1";
|
||||
const TEST_SESSION_KEY = "agent:main:main";
|
||||
const TEST_PROMPT = {
|
||||
sessionId: TEST_SESSION_ID,
|
||||
prompt: [{ type: "text", text: "hello" }],
|
||||
_meta: {},
|
||||
} as unknown as PromptRequest;
|
||||
|
||||
describe("acp prompt cwd prefix", () => {
|
||||
const createStopAfterSendSpy = () =>
|
||||
vi.fn(async (method: string) => {
|
||||
if (method === "chat.send") {
|
||||
throw new Error("stop-after-send");
|
||||
}
|
||||
return {};
|
||||
});
|
||||
|
||||
async function runPromptAndCaptureRequest(
|
||||
options: {
|
||||
cwd?: string;
|
||||
prefixCwd?: boolean;
|
||||
provenanceMode?: "meta" | "meta+receipt";
|
||||
} = {},
|
||||
) {
|
||||
const sessionStore = createInMemorySessionStore();
|
||||
sessionStore.createSession({
|
||||
sessionId: TEST_SESSION_ID,
|
||||
sessionKey: TEST_SESSION_KEY,
|
||||
cwd: options.cwd ?? path.join(os.homedir(), "openclaw-test"),
|
||||
});
|
||||
|
||||
const requestSpy = createStopAfterSendSpy();
|
||||
const agent = new AcpGatewayAgent(
|
||||
createAcpConnection(),
|
||||
createAcpGateway(requestSpy as unknown as GatewayClient["request"]),
|
||||
{
|
||||
sessionStore,
|
||||
prefixCwd: options.prefixCwd,
|
||||
provenanceMode: options.provenanceMode,
|
||||
},
|
||||
);
|
||||
|
||||
await expect(agent.prompt(TEST_PROMPT)).rejects.toThrow("stop-after-send");
|
||||
return requestSpy;
|
||||
}
|
||||
|
||||
async function runPromptWithCwd(cwd: string) {
|
||||
const pinnedHome = os.homedir();
|
||||
const previousOpenClawHome = process.env.OPENCLAW_HOME;
|
||||
@@ -15,37 +60,8 @@ describe("acp prompt cwd prefix", () => {
|
||||
delete process.env.OPENCLAW_HOME;
|
||||
process.env.HOME = pinnedHome;
|
||||
|
||||
const sessionStore = createInMemorySessionStore();
|
||||
sessionStore.createSession({
|
||||
sessionId: "session-1",
|
||||
sessionKey: "agent:main:main",
|
||||
cwd,
|
||||
});
|
||||
|
||||
const requestSpy = vi.fn(async (method: string) => {
|
||||
if (method === "chat.send") {
|
||||
throw new Error("stop-after-send");
|
||||
}
|
||||
return {};
|
||||
});
|
||||
const agent = new AcpGatewayAgent(
|
||||
createAcpConnection(),
|
||||
createAcpGateway(requestSpy as unknown as GatewayClient["request"]),
|
||||
{
|
||||
sessionStore,
|
||||
prefixCwd: true,
|
||||
},
|
||||
);
|
||||
|
||||
try {
|
||||
await expect(
|
||||
agent.prompt({
|
||||
sessionId: "session-1",
|
||||
prompt: [{ type: "text", text: "hello" }],
|
||||
_meta: {},
|
||||
} as unknown as PromptRequest),
|
||||
).rejects.toThrow("stop-after-send");
|
||||
return requestSpy;
|
||||
return await runPromptAndCaptureRequest({ cwd, prefixCwd: true });
|
||||
} finally {
|
||||
if (previousOpenClawHome === undefined) {
|
||||
delete process.env.OPENCLAW_HOME;
|
||||
@@ -83,42 +99,13 @@ describe("acp prompt cwd prefix", () => {
|
||||
});
|
||||
|
||||
it("injects system provenance metadata when enabled", async () => {
|
||||
const sessionStore = createInMemorySessionStore();
|
||||
sessionStore.createSession({
|
||||
sessionId: "session-1",
|
||||
sessionKey: "agent:main:main",
|
||||
cwd: path.join(os.homedir(), "openclaw-test"),
|
||||
});
|
||||
|
||||
const requestSpy = vi.fn(async (method: string) => {
|
||||
if (method === "chat.send") {
|
||||
throw new Error("stop-after-send");
|
||||
}
|
||||
return {};
|
||||
});
|
||||
const agent = new AcpGatewayAgent(
|
||||
createAcpConnection(),
|
||||
createAcpGateway(requestSpy as unknown as GatewayClient["request"]),
|
||||
{
|
||||
sessionStore,
|
||||
provenanceMode: "meta",
|
||||
},
|
||||
);
|
||||
|
||||
await expect(
|
||||
agent.prompt({
|
||||
sessionId: "session-1",
|
||||
prompt: [{ type: "text", text: "hello" }],
|
||||
_meta: {},
|
||||
} as unknown as PromptRequest),
|
||||
).rejects.toThrow("stop-after-send");
|
||||
|
||||
const requestSpy = await runPromptAndCaptureRequest({ provenanceMode: "meta" });
|
||||
expect(requestSpy).toHaveBeenCalledWith(
|
||||
"chat.send",
|
||||
expect.objectContaining({
|
||||
systemInputProvenance: {
|
||||
kind: "external_user",
|
||||
originSessionId: "session-1",
|
||||
originSessionId: TEST_SESSION_ID,
|
||||
sourceChannel: "acp",
|
||||
sourceTool: "openclaw_acp",
|
||||
},
|
||||
@@ -129,42 +116,13 @@ describe("acp prompt cwd prefix", () => {
|
||||
});
|
||||
|
||||
it("injects a system provenance receipt when requested", async () => {
|
||||
const sessionStore = createInMemorySessionStore();
|
||||
sessionStore.createSession({
|
||||
sessionId: "session-1",
|
||||
sessionKey: "agent:main:main",
|
||||
cwd: path.join(os.homedir(), "openclaw-test"),
|
||||
});
|
||||
|
||||
const requestSpy = vi.fn(async (method: string) => {
|
||||
if (method === "chat.send") {
|
||||
throw new Error("stop-after-send");
|
||||
}
|
||||
return {};
|
||||
});
|
||||
const agent = new AcpGatewayAgent(
|
||||
createAcpConnection(),
|
||||
createAcpGateway(requestSpy as unknown as GatewayClient["request"]),
|
||||
{
|
||||
sessionStore,
|
||||
provenanceMode: "meta+receipt",
|
||||
},
|
||||
);
|
||||
|
||||
await expect(
|
||||
agent.prompt({
|
||||
sessionId: "session-1",
|
||||
prompt: [{ type: "text", text: "hello" }],
|
||||
_meta: {},
|
||||
} as unknown as PromptRequest),
|
||||
).rejects.toThrow("stop-after-send");
|
||||
|
||||
const requestSpy = await runPromptAndCaptureRequest({ provenanceMode: "meta+receipt" });
|
||||
expect(requestSpy).toHaveBeenCalledWith(
|
||||
"chat.send",
|
||||
expect.objectContaining({
|
||||
systemInputProvenance: {
|
||||
kind: "external_user",
|
||||
originSessionId: "session-1",
|
||||
originSessionId: TEST_SESSION_ID,
|
||||
sourceChannel: "acp",
|
||||
sourceTool: "openclaw_acp",
|
||||
},
|
||||
@@ -182,14 +140,14 @@ describe("acp prompt cwd prefix", () => {
|
||||
expect(requestSpy).toHaveBeenCalledWith(
|
||||
"chat.send",
|
||||
expect.objectContaining({
|
||||
systemProvenanceReceipt: expect.stringContaining("originSessionId=session-1"),
|
||||
systemProvenanceReceipt: expect.stringContaining(`originSessionId=${TEST_SESSION_ID}`),
|
||||
}),
|
||||
{ expectFinal: true },
|
||||
);
|
||||
expect(requestSpy).toHaveBeenCalledWith(
|
||||
"chat.send",
|
||||
expect.objectContaining({
|
||||
systemProvenanceReceipt: expect.stringContaining("targetSession=agent:main:main"),
|
||||
systemProvenanceReceipt: expect.stringContaining(`targetSession=${TEST_SESSION_KEY}`),
|
||||
}),
|
||||
{ expectFinal: true },
|
||||
);
|
||||
|
||||
@@ -1,10 +1,5 @@
|
||||
import type { AgentToolResult } from "@mariozechner/pi-agent-core";
|
||||
import { loadConfig } from "../config/config.js";
|
||||
import { buildExecApprovalUnavailableReplyPayload } from "../infra/exec-approval-reply.js";
|
||||
import {
|
||||
hasConfiguredExecApprovalDmRoute,
|
||||
resolveExecApprovalInitiatingSurfaceState,
|
||||
} from "../infra/exec-approval-surface.js";
|
||||
import {
|
||||
addAllowlistEntry,
|
||||
type ExecAsk,
|
||||
@@ -26,7 +21,7 @@ import {
|
||||
registerExecApprovalRequestForHostOrThrow,
|
||||
} from "./bash-tools.exec-approval-request.js";
|
||||
import {
|
||||
createDefaultExecApprovalRequestContext,
|
||||
createAndRegisterDefaultExecApprovalRequest,
|
||||
resolveBaseExecApprovalDecision,
|
||||
resolveApprovalDecisionOrUndefined,
|
||||
resolveExecHostApprovalContext,
|
||||
@@ -149,52 +144,36 @@ export async function processGatewayAllowlist(
|
||||
approvalId,
|
||||
approvalSlug,
|
||||
warningText,
|
||||
expiresAtMs: defaultExpiresAtMs,
|
||||
preResolvedDecision: defaultPreResolvedDecision,
|
||||
} = createDefaultExecApprovalRequestContext({
|
||||
expiresAtMs,
|
||||
preResolvedDecision,
|
||||
initiatingSurface,
|
||||
sentApproverDms,
|
||||
unavailableReason,
|
||||
} = await createAndRegisterDefaultExecApprovalRequest({
|
||||
warnings: params.warnings,
|
||||
approvalRunningNoticeMs: params.approvalRunningNoticeMs,
|
||||
createApprovalSlug,
|
||||
turnSourceChannel: params.turnSourceChannel,
|
||||
turnSourceAccountId: params.turnSourceAccountId,
|
||||
register: async (approvalId) =>
|
||||
await registerExecApprovalRequestForHostOrThrow({
|
||||
approvalId,
|
||||
command: params.command,
|
||||
workdir: params.workdir,
|
||||
host: "gateway",
|
||||
security: hostSecurity,
|
||||
ask: hostAsk,
|
||||
...buildExecApprovalRequesterContext({
|
||||
agentId: params.agentId,
|
||||
sessionKey: params.sessionKey,
|
||||
}),
|
||||
resolvedPath: allowlistEval.segments[0]?.resolution?.resolvedPath,
|
||||
...buildExecApprovalTurnSourceContext(params),
|
||||
}),
|
||||
});
|
||||
const resolvedPath = allowlistEval.segments[0]?.resolution?.resolvedPath;
|
||||
const effectiveTimeout =
|
||||
typeof params.timeoutSec === "number" ? params.timeoutSec : params.defaultTimeoutSec;
|
||||
let expiresAtMs = defaultExpiresAtMs;
|
||||
let preResolvedDecision = defaultPreResolvedDecision;
|
||||
|
||||
// Register first so the returned approval ID is actionable immediately.
|
||||
const registration = await registerExecApprovalRequestForHostOrThrow({
|
||||
approvalId,
|
||||
command: params.command,
|
||||
workdir: params.workdir,
|
||||
host: "gateway",
|
||||
security: hostSecurity,
|
||||
ask: hostAsk,
|
||||
...buildExecApprovalRequesterContext({
|
||||
agentId: params.agentId,
|
||||
sessionKey: params.sessionKey,
|
||||
}),
|
||||
resolvedPath,
|
||||
...buildExecApprovalTurnSourceContext(params),
|
||||
});
|
||||
expiresAtMs = registration.expiresAtMs;
|
||||
preResolvedDecision = registration.finalDecision;
|
||||
const initiatingSurface = resolveExecApprovalInitiatingSurfaceState({
|
||||
channel: params.turnSourceChannel,
|
||||
accountId: params.turnSourceAccountId,
|
||||
});
|
||||
const cfg = loadConfig();
|
||||
const sentApproverDms =
|
||||
(initiatingSurface.kind === "disabled" || initiatingSurface.kind === "unsupported") &&
|
||||
hasConfiguredExecApprovalDmRoute(cfg);
|
||||
const unavailableReason =
|
||||
preResolvedDecision === null
|
||||
? "no-approval-route"
|
||||
: initiatingSurface.kind === "disabled"
|
||||
? "initiating-platform-disabled"
|
||||
: initiatingSurface.kind === "unsupported"
|
||||
? "initiating-platform-unsupported"
|
||||
: null;
|
||||
|
||||
void (async () => {
|
||||
const decision = await resolveApprovalDecisionOrUndefined({
|
||||
|
||||
@@ -1,11 +1,6 @@
|
||||
import crypto from "node:crypto";
|
||||
import type { AgentToolResult } from "@mariozechner/pi-agent-core";
|
||||
import { loadConfig } from "../config/config.js";
|
||||
import { buildExecApprovalUnavailableReplyPayload } from "../infra/exec-approval-reply.js";
|
||||
import {
|
||||
hasConfiguredExecApprovalDmRoute,
|
||||
resolveExecApprovalInitiatingSurfaceState,
|
||||
} from "../infra/exec-approval-surface.js";
|
||||
import {
|
||||
type ExecApprovalsFile,
|
||||
type ExecAsk,
|
||||
@@ -25,7 +20,7 @@ import {
|
||||
registerExecApprovalRequestForHostOrThrow,
|
||||
} from "./bash-tools.exec-approval-request.js";
|
||||
import {
|
||||
createDefaultExecApprovalRequestContext,
|
||||
createAndRegisterDefaultExecApprovalRequest,
|
||||
resolveBaseExecApprovalDecision,
|
||||
resolveApprovalDecisionOrUndefined,
|
||||
resolveExecHostApprovalContext,
|
||||
@@ -225,50 +220,34 @@ export async function executeNodeHostCommand(
|
||||
approvalId,
|
||||
approvalSlug,
|
||||
warningText,
|
||||
expiresAtMs: defaultExpiresAtMs,
|
||||
preResolvedDecision: defaultPreResolvedDecision,
|
||||
} = createDefaultExecApprovalRequestContext({
|
||||
expiresAtMs,
|
||||
preResolvedDecision,
|
||||
initiatingSurface,
|
||||
sentApproverDms,
|
||||
unavailableReason,
|
||||
} = await createAndRegisterDefaultExecApprovalRequest({
|
||||
warnings: params.warnings,
|
||||
approvalRunningNoticeMs: params.approvalRunningNoticeMs,
|
||||
createApprovalSlug,
|
||||
turnSourceChannel: params.turnSourceChannel,
|
||||
turnSourceAccountId: params.turnSourceAccountId,
|
||||
register: async (approvalId) =>
|
||||
await registerExecApprovalRequestForHostOrThrow({
|
||||
approvalId,
|
||||
systemRunPlan: prepared.plan,
|
||||
env: nodeEnv,
|
||||
workdir: runCwd,
|
||||
host: "node",
|
||||
nodeId,
|
||||
security: hostSecurity,
|
||||
ask: hostAsk,
|
||||
...buildExecApprovalRequesterContext({
|
||||
agentId: runAgentId,
|
||||
sessionKey: runSessionKey,
|
||||
}),
|
||||
...buildExecApprovalTurnSourceContext(params),
|
||||
}),
|
||||
});
|
||||
let expiresAtMs = defaultExpiresAtMs;
|
||||
let preResolvedDecision = defaultPreResolvedDecision;
|
||||
|
||||
// Register first so the returned approval ID is actionable immediately.
|
||||
const registration = await registerExecApprovalRequestForHostOrThrow({
|
||||
approvalId,
|
||||
systemRunPlan: prepared.plan,
|
||||
env: nodeEnv,
|
||||
workdir: runCwd,
|
||||
host: "node",
|
||||
nodeId,
|
||||
security: hostSecurity,
|
||||
ask: hostAsk,
|
||||
...buildExecApprovalRequesterContext({
|
||||
agentId: runAgentId,
|
||||
sessionKey: runSessionKey,
|
||||
}),
|
||||
...buildExecApprovalTurnSourceContext(params),
|
||||
});
|
||||
expiresAtMs = registration.expiresAtMs;
|
||||
preResolvedDecision = registration.finalDecision;
|
||||
const initiatingSurface = resolveExecApprovalInitiatingSurfaceState({
|
||||
channel: params.turnSourceChannel,
|
||||
accountId: params.turnSourceAccountId,
|
||||
});
|
||||
const cfg = loadConfig();
|
||||
const sentApproverDms =
|
||||
(initiatingSurface.kind === "disabled" || initiatingSurface.kind === "unsupported") &&
|
||||
hasConfiguredExecApprovalDmRoute(cfg);
|
||||
const unavailableReason =
|
||||
preResolvedDecision === null
|
||||
? "no-approval-route"
|
||||
: initiatingSurface.kind === "disabled"
|
||||
? "initiating-platform-disabled"
|
||||
: initiatingSurface.kind === "unsupported"
|
||||
? "initiating-platform-unsupported"
|
||||
: null;
|
||||
|
||||
void (async () => {
|
||||
const decision = await resolveApprovalDecisionOrUndefined({
|
||||
|
||||
@@ -1,4 +1,10 @@
|
||||
import crypto from "node:crypto";
|
||||
import { loadConfig } from "../config/config.js";
|
||||
import {
|
||||
hasConfiguredExecApprovalDmRoute,
|
||||
type ExecApprovalInitiatingSurfaceState,
|
||||
resolveExecApprovalInitiatingSurfaceState,
|
||||
} from "../infra/exec-approval-surface.js";
|
||||
import {
|
||||
maxAsk,
|
||||
minSecurity,
|
||||
@@ -6,7 +12,10 @@ import {
|
||||
type ExecAsk,
|
||||
type ExecSecurity,
|
||||
} from "../infra/exec-approvals.js";
|
||||
import { resolveRegisteredExecApprovalDecision } from "./bash-tools.exec-approval-request.js";
|
||||
import {
|
||||
type ExecApprovalRegistration,
|
||||
resolveRegisteredExecApprovalDecision,
|
||||
} from "./bash-tools.exec-approval-request.js";
|
||||
import { DEFAULT_APPROVAL_TIMEOUT_MS } from "./bash-tools.exec-runtime.js";
|
||||
|
||||
type ResolvedExecApprovals = ReturnType<typeof resolveExecApprovals>;
|
||||
@@ -28,6 +37,22 @@ export type ExecApprovalRequestState = ExecApprovalPendingState & {
|
||||
noticeSeconds: number;
|
||||
};
|
||||
|
||||
export type ExecApprovalUnavailableReason =
|
||||
| "no-approval-route"
|
||||
| "initiating-platform-disabled"
|
||||
| "initiating-platform-unsupported";
|
||||
|
||||
export type RegisteredExecApprovalRequestContext = {
|
||||
approvalId: string;
|
||||
approvalSlug: string;
|
||||
warningText: string;
|
||||
expiresAtMs: number;
|
||||
preResolvedDecision: string | null | undefined;
|
||||
initiatingSurface: ExecApprovalInitiatingSurfaceState;
|
||||
sentApproverDms: boolean;
|
||||
unavailableReason: ExecApprovalUnavailableReason | null;
|
||||
};
|
||||
|
||||
export function createExecApprovalPendingState(params: {
|
||||
warnings: string[];
|
||||
timeoutMs: number;
|
||||
@@ -158,3 +183,77 @@ export async function resolveApprovalDecisionOrUndefined(params: {
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
|
||||
export function resolveExecApprovalUnavailableState(params: {
|
||||
turnSourceChannel?: string;
|
||||
turnSourceAccountId?: string;
|
||||
preResolvedDecision: string | null | undefined;
|
||||
}): {
|
||||
initiatingSurface: ExecApprovalInitiatingSurfaceState;
|
||||
sentApproverDms: boolean;
|
||||
unavailableReason: ExecApprovalUnavailableReason | null;
|
||||
} {
|
||||
const initiatingSurface = resolveExecApprovalInitiatingSurfaceState({
|
||||
channel: params.turnSourceChannel,
|
||||
accountId: params.turnSourceAccountId,
|
||||
});
|
||||
const sentApproverDms =
|
||||
(initiatingSurface.kind === "disabled" || initiatingSurface.kind === "unsupported") &&
|
||||
hasConfiguredExecApprovalDmRoute(loadConfig());
|
||||
const unavailableReason =
|
||||
params.preResolvedDecision === null
|
||||
? "no-approval-route"
|
||||
: initiatingSurface.kind === "disabled"
|
||||
? "initiating-platform-disabled"
|
||||
: initiatingSurface.kind === "unsupported"
|
||||
? "initiating-platform-unsupported"
|
||||
: null;
|
||||
return {
|
||||
initiatingSurface,
|
||||
sentApproverDms,
|
||||
unavailableReason,
|
||||
};
|
||||
}
|
||||
|
||||
export async function createAndRegisterDefaultExecApprovalRequest(params: {
|
||||
warnings: string[];
|
||||
approvalRunningNoticeMs: number;
|
||||
createApprovalSlug: (approvalId: string) => string;
|
||||
turnSourceChannel?: string;
|
||||
turnSourceAccountId?: string;
|
||||
register: (approvalId: string) => Promise<ExecApprovalRegistration>;
|
||||
}): Promise<RegisteredExecApprovalRequestContext> {
|
||||
const {
|
||||
approvalId,
|
||||
approvalSlug,
|
||||
warningText,
|
||||
expiresAtMs: defaultExpiresAtMs,
|
||||
preResolvedDecision: defaultPreResolvedDecision,
|
||||
} = createDefaultExecApprovalRequestContext({
|
||||
warnings: params.warnings,
|
||||
approvalRunningNoticeMs: params.approvalRunningNoticeMs,
|
||||
createApprovalSlug: params.createApprovalSlug,
|
||||
});
|
||||
const registration = await params.register(approvalId);
|
||||
const preResolvedDecision = registration.finalDecision;
|
||||
const { initiatingSurface, sentApproverDms, unavailableReason } =
|
||||
resolveExecApprovalUnavailableState({
|
||||
turnSourceChannel: params.turnSourceChannel,
|
||||
turnSourceAccountId: params.turnSourceAccountId,
|
||||
preResolvedDecision,
|
||||
});
|
||||
|
||||
return {
|
||||
approvalId,
|
||||
approvalSlug,
|
||||
warningText,
|
||||
expiresAtMs: registration.expiresAtMs ?? defaultExpiresAtMs,
|
||||
preResolvedDecision:
|
||||
registration.finalDecision === undefined
|
||||
? defaultPreResolvedDecision
|
||||
: registration.finalDecision,
|
||||
initiatingSurface,
|
||||
sentApproverDms,
|
||||
unavailableReason,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -43,6 +43,162 @@ function buildPreparedSystemRunPayload(rawInvokeParams: unknown) {
|
||||
return buildSystemRunPreparePayload(params);
|
||||
}
|
||||
|
||||
function getTestConfigPath() {
|
||||
return path.join(process.env.HOME ?? "", ".openclaw", "openclaw.json");
|
||||
}
|
||||
|
||||
async function writeOpenClawConfig(config: Record<string, unknown>, pretty = false) {
|
||||
const configPath = getTestConfigPath();
|
||||
await fs.mkdir(path.dirname(configPath), { recursive: true });
|
||||
await fs.writeFile(configPath, JSON.stringify(config, null, pretty ? 2 : undefined));
|
||||
}
|
||||
|
||||
async function writeExecApprovalsConfig(config: Record<string, unknown>) {
|
||||
const approvalsPath = path.join(process.env.HOME ?? "", ".openclaw", "exec-approvals.json");
|
||||
await fs.mkdir(path.dirname(approvalsPath), { recursive: true });
|
||||
await fs.writeFile(approvalsPath, JSON.stringify(config, null, 2));
|
||||
}
|
||||
|
||||
function acceptedApprovalResponse(params: unknown) {
|
||||
return { status: "accepted", id: (params as { id?: string })?.id };
|
||||
}
|
||||
|
||||
function getResultText(result: { content: Array<{ type?: string; text?: string }> }) {
|
||||
return result.content.find((part) => part.type === "text")?.text ?? "";
|
||||
}
|
||||
|
||||
function expectPendingApprovalText(
|
||||
result: {
|
||||
details: { status?: string };
|
||||
content: Array<{ type?: string; text?: string }>;
|
||||
},
|
||||
options: {
|
||||
command: string;
|
||||
host: "gateway" | "node";
|
||||
nodeId?: string;
|
||||
interactive?: boolean;
|
||||
},
|
||||
) {
|
||||
expect(result.details.status).toBe("approval-pending");
|
||||
const details = result.details as { approvalId: string; approvalSlug: string };
|
||||
const pendingText = getResultText(result);
|
||||
expect(pendingText).toContain(
|
||||
`Reply with: /approve ${details.approvalSlug} allow-once|allow-always|deny`,
|
||||
);
|
||||
expect(pendingText).toContain(`full ${details.approvalId}`);
|
||||
expect(pendingText).toContain(`Host: ${options.host}`);
|
||||
if (options.nodeId) {
|
||||
expect(pendingText).toContain(`Node: ${options.nodeId}`);
|
||||
}
|
||||
expect(pendingText).toContain(`CWD: ${process.cwd()}`);
|
||||
expect(pendingText).toContain("Command:\n```sh\n");
|
||||
expect(pendingText).toContain(options.command);
|
||||
if (options.interactive) {
|
||||
expect(pendingText).toContain("Mode: foreground (interactive approvals available).");
|
||||
expect(pendingText).toContain("Background mode requires pre-approved policy");
|
||||
}
|
||||
return details;
|
||||
}
|
||||
|
||||
function expectPendingCommandText(
|
||||
result: {
|
||||
details: { status?: string };
|
||||
content: Array<{ type?: string; text?: string }>;
|
||||
},
|
||||
command: string,
|
||||
) {
|
||||
expect(result.details.status).toBe("approval-pending");
|
||||
const text = getResultText(result);
|
||||
expect(text).toContain("Command:\n```sh\n");
|
||||
expect(text).toContain(command);
|
||||
}
|
||||
|
||||
function mockGatewayOkCalls(calls: string[]) {
|
||||
vi.mocked(callGatewayTool).mockImplementation(async (method) => {
|
||||
calls.push(method);
|
||||
return { ok: true };
|
||||
});
|
||||
}
|
||||
|
||||
function createElevatedAllowlistExecTool() {
|
||||
return createExecTool({
|
||||
ask: "on-miss",
|
||||
security: "allowlist",
|
||||
approvalRunningNoticeMs: 0,
|
||||
elevated: { enabled: true, allowed: true, defaultLevel: "ask" },
|
||||
});
|
||||
}
|
||||
|
||||
async function expectGatewayExecWithoutApproval(options: {
|
||||
config: Record<string, unknown>;
|
||||
command: string;
|
||||
ask?: "always" | "on-miss" | "off";
|
||||
}) {
|
||||
await writeExecApprovalsConfig(options.config);
|
||||
const calls: string[] = [];
|
||||
mockGatewayOkCalls(calls);
|
||||
|
||||
const tool = createExecTool({
|
||||
host: "gateway",
|
||||
ask: options.ask,
|
||||
security: "full",
|
||||
approvalRunningNoticeMs: 0,
|
||||
});
|
||||
|
||||
const result = await tool.execute("call-no-approval", { command: options.command });
|
||||
expect(result.details.status).toBe("completed");
|
||||
expect(calls).not.toContain("exec.approval.request");
|
||||
expect(calls).not.toContain("exec.approval.waitDecision");
|
||||
}
|
||||
|
||||
function mockAcceptedApprovalFlow(options: {
|
||||
onAgent?: (params: Record<string, unknown>) => void;
|
||||
onNodeInvoke?: (params: unknown) => unknown;
|
||||
}) {
|
||||
vi.mocked(callGatewayTool).mockImplementation(async (method, _opts, params) => {
|
||||
if (method === "exec.approval.request") {
|
||||
return acceptedApprovalResponse(params);
|
||||
}
|
||||
if (method === "exec.approval.waitDecision") {
|
||||
return { decision: "allow-once" };
|
||||
}
|
||||
if (method === "agent" && options.onAgent) {
|
||||
options.onAgent(params as Record<string, unknown>);
|
||||
return { status: "ok" };
|
||||
}
|
||||
if (method === "node.invoke" && options.onNodeInvoke) {
|
||||
return await options.onNodeInvoke(params);
|
||||
}
|
||||
return { ok: true };
|
||||
});
|
||||
}
|
||||
|
||||
function mockPendingApprovalRegistration() {
|
||||
vi.mocked(callGatewayTool).mockImplementation(async (method) => {
|
||||
if (method === "exec.approval.request") {
|
||||
return { status: "accepted", id: "approval-id" };
|
||||
}
|
||||
if (method === "exec.approval.waitDecision") {
|
||||
return { decision: null };
|
||||
}
|
||||
return { ok: true };
|
||||
});
|
||||
}
|
||||
|
||||
function expectApprovalUnavailableText(result: {
|
||||
details: { status?: string };
|
||||
content: Array<{ type?: string; text?: string }>;
|
||||
}) {
|
||||
expect(result.details.status).toBe("approval-unavailable");
|
||||
const text = result.content.find((part) => part.type === "text")?.text ?? "";
|
||||
expect(text).not.toContain("/approve");
|
||||
expect(text).not.toContain("npm view diver name version description");
|
||||
expect(text).not.toContain("Pending command:");
|
||||
expect(text).not.toContain("Host:");
|
||||
expect(text).not.toContain("CWD:");
|
||||
return text;
|
||||
}
|
||||
|
||||
describe("exec approvals", () => {
|
||||
let previousHome: string | undefined;
|
||||
let previousUserProfile: string | undefined;
|
||||
@@ -81,18 +237,11 @@ describe("exec approvals", () => {
|
||||
let invokeParams: unknown;
|
||||
let agentParams: unknown;
|
||||
|
||||
vi.mocked(callGatewayTool).mockImplementation(async (method, _opts, params) => {
|
||||
if (method === "exec.approval.request") {
|
||||
return { status: "accepted", id: (params as { id?: string })?.id };
|
||||
}
|
||||
if (method === "exec.approval.waitDecision") {
|
||||
return { decision: "allow-once" };
|
||||
}
|
||||
if (method === "agent") {
|
||||
mockAcceptedApprovalFlow({
|
||||
onAgent: (params) => {
|
||||
agentParams = params;
|
||||
return { status: "ok" };
|
||||
}
|
||||
if (method === "node.invoke") {
|
||||
},
|
||||
onNodeInvoke: (params) => {
|
||||
const invoke = params as { command?: string };
|
||||
if (invoke.command === "system.run.prepare") {
|
||||
return buildPreparedSystemRunPayload(params);
|
||||
@@ -101,8 +250,7 @@ describe("exec approvals", () => {
|
||||
invokeParams = params;
|
||||
return { payload: { success: true, stdout: "ok" } };
|
||||
}
|
||||
}
|
||||
return { ok: true };
|
||||
},
|
||||
});
|
||||
|
||||
const tool = createExecTool({
|
||||
@@ -113,19 +261,12 @@ describe("exec approvals", () => {
|
||||
});
|
||||
|
||||
const result = await tool.execute("call1", { command: "ls -la" });
|
||||
expect(result.details.status).toBe("approval-pending");
|
||||
const details = result.details as { approvalId: string; approvalSlug: string };
|
||||
const pendingText = result.content.find((part) => part.type === "text")?.text ?? "";
|
||||
expect(pendingText).toContain(
|
||||
`Reply with: /approve ${details.approvalSlug} allow-once|allow-always|deny`,
|
||||
);
|
||||
expect(pendingText).toContain(`full ${details.approvalId}`);
|
||||
expect(pendingText).toContain("Host: node");
|
||||
expect(pendingText).toContain("Node: node-1");
|
||||
expect(pendingText).toContain(`CWD: ${process.cwd()}`);
|
||||
expect(pendingText).toContain("Command:\n```sh\nls -la\n```");
|
||||
expect(pendingText).toContain("Mode: foreground (interactive approvals available).");
|
||||
expect(pendingText).toContain("Background mode requires pre-approved policy");
|
||||
const details = expectPendingApprovalText(result, {
|
||||
command: "ls -la",
|
||||
host: "node",
|
||||
nodeId: "node-1",
|
||||
interactive: true,
|
||||
});
|
||||
const approvalId = details.approvalId;
|
||||
|
||||
await expect
|
||||
@@ -214,74 +355,28 @@ describe("exec approvals", () => {
|
||||
});
|
||||
|
||||
it("uses exec-approvals ask=off to suppress gateway prompts", async () => {
|
||||
const approvalsPath = path.join(process.env.HOME ?? "", ".openclaw", "exec-approvals.json");
|
||||
await fs.mkdir(path.dirname(approvalsPath), { recursive: true });
|
||||
await fs.writeFile(
|
||||
approvalsPath,
|
||||
JSON.stringify(
|
||||
{
|
||||
version: 1,
|
||||
defaults: { security: "full", ask: "off", askFallback: "full" },
|
||||
agents: {
|
||||
main: { security: "full", ask: "off", askFallback: "full" },
|
||||
},
|
||||
await expectGatewayExecWithoutApproval({
|
||||
config: {
|
||||
version: 1,
|
||||
defaults: { security: "full", ask: "off", askFallback: "full" },
|
||||
agents: {
|
||||
main: { security: "full", ask: "off", askFallback: "full" },
|
||||
},
|
||||
null,
|
||||
2,
|
||||
),
|
||||
);
|
||||
|
||||
const calls: string[] = [];
|
||||
vi.mocked(callGatewayTool).mockImplementation(async (method) => {
|
||||
calls.push(method);
|
||||
return { ok: true };
|
||||
});
|
||||
|
||||
const tool = createExecTool({
|
||||
host: "gateway",
|
||||
},
|
||||
command: "echo ok",
|
||||
ask: "on-miss",
|
||||
security: "full",
|
||||
approvalRunningNoticeMs: 0,
|
||||
});
|
||||
|
||||
const result = await tool.execute("call3b", { command: "echo ok" });
|
||||
expect(result.details.status).toBe("completed");
|
||||
expect(calls).not.toContain("exec.approval.request");
|
||||
expect(calls).not.toContain("exec.approval.waitDecision");
|
||||
});
|
||||
|
||||
it("inherits ask=off from exec-approvals defaults when tool ask is unset", async () => {
|
||||
const approvalsPath = path.join(process.env.HOME ?? "", ".openclaw", "exec-approvals.json");
|
||||
await fs.mkdir(path.dirname(approvalsPath), { recursive: true });
|
||||
await fs.writeFile(
|
||||
approvalsPath,
|
||||
JSON.stringify(
|
||||
{
|
||||
version: 1,
|
||||
defaults: { security: "full", ask: "off", askFallback: "full" },
|
||||
agents: {},
|
||||
},
|
||||
null,
|
||||
2,
|
||||
),
|
||||
);
|
||||
|
||||
const calls: string[] = [];
|
||||
vi.mocked(callGatewayTool).mockImplementation(async (method) => {
|
||||
calls.push(method);
|
||||
return { ok: true };
|
||||
await expectGatewayExecWithoutApproval({
|
||||
config: {
|
||||
version: 1,
|
||||
defaults: { security: "full", ask: "off", askFallback: "full" },
|
||||
agents: {},
|
||||
},
|
||||
command: "echo ok",
|
||||
});
|
||||
|
||||
const tool = createExecTool({
|
||||
host: "gateway",
|
||||
security: "full",
|
||||
approvalRunningNoticeMs: 0,
|
||||
});
|
||||
|
||||
const result = await tool.execute("call3c", { command: "echo ok" });
|
||||
expect(result.details.status).toBe("completed");
|
||||
expect(calls).not.toContain("exec.approval.request");
|
||||
expect(calls).not.toContain("exec.approval.waitDecision");
|
||||
});
|
||||
|
||||
it("requires approval for elevated ask when allowlist misses", async () => {
|
||||
@@ -296,7 +391,7 @@ describe("exec approvals", () => {
|
||||
if (method === "exec.approval.request") {
|
||||
resolveApproval?.();
|
||||
// Return registration confirmation
|
||||
return { status: "accepted", id: (params as { id?: string })?.id };
|
||||
return acceptedApprovalResponse(params);
|
||||
}
|
||||
if (method === "exec.approval.waitDecision") {
|
||||
return { decision: "deny" };
|
||||
@@ -304,24 +399,10 @@ describe("exec approvals", () => {
|
||||
return { ok: true };
|
||||
});
|
||||
|
||||
const tool = createExecTool({
|
||||
ask: "on-miss",
|
||||
security: "allowlist",
|
||||
approvalRunningNoticeMs: 0,
|
||||
elevated: { enabled: true, allowed: true, defaultLevel: "ask" },
|
||||
});
|
||||
const tool = createElevatedAllowlistExecTool();
|
||||
|
||||
const result = await tool.execute("call4", { command: "echo ok", elevated: true });
|
||||
expect(result.details.status).toBe("approval-pending");
|
||||
const details = result.details as { approvalId: string; approvalSlug: string };
|
||||
const pendingText = result.content.find((part) => part.type === "text")?.text ?? "";
|
||||
expect(pendingText).toContain(
|
||||
`Reply with: /approve ${details.approvalSlug} allow-once|allow-always|deny`,
|
||||
);
|
||||
expect(pendingText).toContain(`full ${details.approvalId}`);
|
||||
expect(pendingText).toContain("Host: gateway");
|
||||
expect(pendingText).toContain(`CWD: ${process.cwd()}`);
|
||||
expect(pendingText).toContain("Command:\n```sh\necho ok\n```");
|
||||
expectPendingApprovalText(result, { command: "echo ok", host: "gateway" });
|
||||
await approvalSeen;
|
||||
expect(calls).toContain("exec.approval.request");
|
||||
expect(calls).toContain("exec.approval.waitDecision");
|
||||
@@ -330,18 +411,10 @@ describe("exec approvals", () => {
|
||||
it("starts a direct agent follow-up after approved gateway exec completes", async () => {
|
||||
const agentCalls: Array<Record<string, unknown>> = [];
|
||||
|
||||
vi.mocked(callGatewayTool).mockImplementation(async (method, _opts, params) => {
|
||||
if (method === "exec.approval.request") {
|
||||
return { status: "accepted", id: (params as { id?: string })?.id };
|
||||
}
|
||||
if (method === "exec.approval.waitDecision") {
|
||||
return { decision: "allow-once" };
|
||||
}
|
||||
if (method === "agent") {
|
||||
agentCalls.push(params as Record<string, unknown>);
|
||||
return { status: "ok" };
|
||||
}
|
||||
return { ok: true };
|
||||
mockAcceptedApprovalFlow({
|
||||
onAgent: (params) => {
|
||||
agentCalls.push(params);
|
||||
},
|
||||
});
|
||||
|
||||
const tool = createExecTool({
|
||||
@@ -388,7 +461,7 @@ describe("exec approvals", () => {
|
||||
if (typeof request.id === "string") {
|
||||
requestIds.push(request.id);
|
||||
}
|
||||
return { status: "accepted", id: request.id };
|
||||
return acceptedApprovalResponse(request);
|
||||
}
|
||||
if (method === "exec.approval.waitDecision") {
|
||||
const wait = params as { id?: string };
|
||||
@@ -400,12 +473,7 @@ describe("exec approvals", () => {
|
||||
return { ok: true };
|
||||
});
|
||||
|
||||
const tool = createExecTool({
|
||||
ask: "on-miss",
|
||||
security: "allowlist",
|
||||
approvalRunningNoticeMs: 0,
|
||||
elevated: { enabled: true, allowed: true, defaultLevel: "ask" },
|
||||
});
|
||||
const tool = createElevatedAllowlistExecTool();
|
||||
|
||||
const first = await tool.execute("call-seq-1", {
|
||||
command: "npm view diver --json",
|
||||
@@ -429,7 +497,7 @@ describe("exec approvals", () => {
|
||||
vi.mocked(callGatewayTool).mockImplementation(async (method, _opts, params) => {
|
||||
calls.push(method);
|
||||
if (method === "exec.approval.request") {
|
||||
return { status: "accepted", id: (params as { id?: string })?.id };
|
||||
return acceptedApprovalResponse(params);
|
||||
}
|
||||
if (method === "exec.approval.waitDecision") {
|
||||
return { decision: "deny" };
|
||||
@@ -448,11 +516,7 @@ describe("exec approvals", () => {
|
||||
command: "npm view diver --json | jq .name && brew outdated",
|
||||
});
|
||||
|
||||
expect(result.details.status).toBe("approval-pending");
|
||||
const pendingText = result.content.find((part) => part.type === "text")?.text ?? "";
|
||||
expect(pendingText).toContain(
|
||||
"Command:\n```sh\nnpm view diver --json | jq .name && brew outdated\n```",
|
||||
);
|
||||
expectPendingCommandText(result, "npm view diver --json | jq .name && brew outdated");
|
||||
expect(calls).toContain("exec.approval.request");
|
||||
});
|
||||
|
||||
@@ -480,11 +544,7 @@ describe("exec approvals", () => {
|
||||
command: "npm view diver --json | jq .name && brew outdated",
|
||||
});
|
||||
|
||||
expect(result.details.status).toBe("approval-pending");
|
||||
const pendingText = result.content.find((part) => part.type === "text")?.text ?? "";
|
||||
expect(pendingText).toContain(
|
||||
"Command:\n```sh\nnpm view diver --json | jq .name && brew outdated\n```",
|
||||
);
|
||||
expectPendingCommandText(result, "npm view diver --json | jq .name && brew outdated");
|
||||
expect(calls).toContain("exec.approval.request");
|
||||
});
|
||||
|
||||
@@ -551,30 +611,17 @@ describe("exec approvals", () => {
|
||||
});
|
||||
|
||||
it("returns an unavailable approval message instead of a local /approve prompt when discord exec approvals are disabled", async () => {
|
||||
const configPath = path.join(process.env.HOME ?? "", ".openclaw", "openclaw.json");
|
||||
await fs.mkdir(path.dirname(configPath), { recursive: true });
|
||||
await fs.writeFile(
|
||||
configPath,
|
||||
JSON.stringify({
|
||||
channels: {
|
||||
discord: {
|
||||
enabled: true,
|
||||
execApprovals: { enabled: false },
|
||||
},
|
||||
await writeOpenClawConfig({
|
||||
channels: {
|
||||
discord: {
|
||||
enabled: true,
|
||||
execApprovals: { enabled: false },
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
vi.mocked(callGatewayTool).mockImplementation(async (method) => {
|
||||
if (method === "exec.approval.request") {
|
||||
return { status: "accepted", id: "approval-id" };
|
||||
}
|
||||
if (method === "exec.approval.waitDecision") {
|
||||
return { decision: null };
|
||||
}
|
||||
return { ok: true };
|
||||
},
|
||||
});
|
||||
|
||||
mockPendingApprovalRegistration();
|
||||
|
||||
const tool = createExecTool({
|
||||
host: "gateway",
|
||||
ask: "always",
|
||||
@@ -588,49 +635,29 @@ describe("exec approvals", () => {
|
||||
command: "npm view diver name version description",
|
||||
});
|
||||
|
||||
expect(result.details.status).toBe("approval-unavailable");
|
||||
const text = result.content.find((part) => part.type === "text")?.text ?? "";
|
||||
const text = expectApprovalUnavailableText(result);
|
||||
expect(text).toContain("chat exec approvals are not enabled on Discord");
|
||||
expect(text).toContain("Web UI or terminal UI");
|
||||
expect(text).not.toContain("/approve");
|
||||
expect(text).not.toContain("npm view diver name version description");
|
||||
expect(text).not.toContain("Pending command:");
|
||||
expect(text).not.toContain("Host:");
|
||||
expect(text).not.toContain("CWD:");
|
||||
});
|
||||
|
||||
it("tells Telegram users that allowed approvers were DMed when Telegram approvals are disabled but Discord DM approvals are enabled", async () => {
|
||||
const configPath = path.join(process.env.HOME ?? "", ".openclaw", "openclaw.json");
|
||||
await fs.mkdir(path.dirname(configPath), { recursive: true });
|
||||
await fs.writeFile(
|
||||
configPath,
|
||||
JSON.stringify(
|
||||
{
|
||||
channels: {
|
||||
telegram: {
|
||||
enabled: true,
|
||||
execApprovals: { enabled: false },
|
||||
},
|
||||
discord: {
|
||||
enabled: true,
|
||||
execApprovals: { enabled: true, approvers: ["123"], target: "dm" },
|
||||
},
|
||||
await writeOpenClawConfig(
|
||||
{
|
||||
channels: {
|
||||
telegram: {
|
||||
enabled: true,
|
||||
execApprovals: { enabled: false },
|
||||
},
|
||||
discord: {
|
||||
enabled: true,
|
||||
execApprovals: { enabled: true, approvers: ["123"], target: "dm" },
|
||||
},
|
||||
},
|
||||
null,
|
||||
2,
|
||||
),
|
||||
},
|
||||
true,
|
||||
);
|
||||
|
||||
vi.mocked(callGatewayTool).mockImplementation(async (method) => {
|
||||
if (method === "exec.approval.request") {
|
||||
return { status: "accepted", id: "approval-id" };
|
||||
}
|
||||
if (method === "exec.approval.waitDecision") {
|
||||
return { decision: null };
|
||||
}
|
||||
return { ok: true };
|
||||
});
|
||||
mockPendingApprovalRegistration();
|
||||
|
||||
const tool = createExecTool({
|
||||
host: "gateway",
|
||||
@@ -645,14 +672,8 @@ describe("exec approvals", () => {
|
||||
command: "npm view diver name version description",
|
||||
});
|
||||
|
||||
expect(result.details.status).toBe("approval-unavailable");
|
||||
const text = result.content.find((part) => part.type === "text")?.text ?? "";
|
||||
const text = expectApprovalUnavailableText(result);
|
||||
expect(text).toContain("Approval required. I sent the allowed approvers DMs.");
|
||||
expect(text).not.toContain("/approve");
|
||||
expect(text).not.toContain("npm view diver name version description");
|
||||
expect(text).not.toContain("Pending command:");
|
||||
expect(text).not.toContain("Host:");
|
||||
expect(text).not.toContain("CWD:");
|
||||
});
|
||||
|
||||
it("denies node obfuscated command when approval request times out", async () => {
|
||||
|
||||
@@ -46,6 +46,20 @@ function expectFallbackUsed(
|
||||
expect(result.attempts[0]?.reason).toBe("rate_limit");
|
||||
}
|
||||
|
||||
function expectPrimarySkippedForReason(
|
||||
result: { result: unknown; attempts: Array<{ reason?: string }> },
|
||||
run: {
|
||||
(...args: unknown[]): unknown;
|
||||
mock: { calls: unknown[][] };
|
||||
},
|
||||
reason: string,
|
||||
) {
|
||||
expect(result.result).toBe("ok");
|
||||
expect(run).toHaveBeenCalledTimes(1);
|
||||
expect(run).toHaveBeenCalledWith("anthropic", "claude-haiku-3-5");
|
||||
expect(result.attempts[0]?.reason).toBe(reason);
|
||||
}
|
||||
|
||||
function expectPrimaryProbeSuccess(
|
||||
result: { result: unknown },
|
||||
run: {
|
||||
@@ -183,11 +197,7 @@ describe("runWithModelFallback – probe logic", () => {
|
||||
const run = vi.fn().mockResolvedValue("ok");
|
||||
|
||||
const result = await runPrimaryCandidate(cfg, run);
|
||||
|
||||
expect(result.result).toBe("ok");
|
||||
expect(run).toHaveBeenCalledTimes(1);
|
||||
expect(run).toHaveBeenCalledWith("anthropic", "claude-haiku-3-5");
|
||||
expect(result.attempts[0]?.reason).toBe("billing");
|
||||
expectPrimarySkippedForReason(result, run, "billing");
|
||||
});
|
||||
|
||||
it("probes primary model when within 2-min margin of cooldown expiry", async () => {
|
||||
@@ -540,10 +550,6 @@ describe("runWithModelFallback – probe logic", () => {
|
||||
const run = vi.fn().mockResolvedValue("ok");
|
||||
|
||||
const result = await runPrimaryCandidate(cfg, run);
|
||||
|
||||
expect(result.result).toBe("ok");
|
||||
expect(run).toHaveBeenCalledTimes(1);
|
||||
expect(run).toHaveBeenCalledWith("anthropic", "claude-haiku-3-5");
|
||||
expect(result.attempts[0]?.reason).toBe("billing");
|
||||
expectPrimarySkippedForReason(result, run, "billing");
|
||||
});
|
||||
});
|
||||
|
||||
@@ -113,6 +113,92 @@ function createMoonshotConfig(overrides: {
|
||||
};
|
||||
}
|
||||
|
||||
function createOpenAiConfigWithResolvedApiKey(mergeMode = false): OpenClawConfig {
|
||||
return {
|
||||
models: {
|
||||
...(mergeMode ? { mode: "merge" as const } : {}),
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
apiKey: "sk-plaintext-should-not-appear", // pragma: allowlist secret; simulates resolved ${OPENAI_API_KEY}
|
||||
api: "openai-completions",
|
||||
models: [
|
||||
{
|
||||
id: "gpt-4.1",
|
||||
name: "GPT-4.1",
|
||||
input: ["text"],
|
||||
reasoning: false,
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 128000,
|
||||
maxTokens: 16384,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
async function expectOpenAiEnvMarkerApiKey(options?: { seedMergedProvider?: boolean }) {
|
||||
await withEnvVar("OPENAI_API_KEY", "sk-plaintext-should-not-appear", async () => {
|
||||
await withTempHome(async () => {
|
||||
if (options?.seedMergedProvider) {
|
||||
await writeAgentModelsJson({
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
apiKey: "STALE_AGENT_KEY", // pragma: allowlist secret
|
||||
api: "openai-completions",
|
||||
models: [{ id: "gpt-4.1", name: "GPT-4.1", input: ["text"] }],
|
||||
},
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
await ensureOpenClawModelsJson(
|
||||
createOpenAiConfigWithResolvedApiKey(options?.seedMergedProvider),
|
||||
);
|
||||
const result = await readGeneratedModelsJson<{
|
||||
providers: Record<string, { apiKey?: string }>;
|
||||
}>();
|
||||
expect(result.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); // pragma: allowlist secret
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
async function expectMoonshotTokenLimits(params: {
|
||||
contextWindow: number;
|
||||
maxTokens: number;
|
||||
expectedContextWindow: number;
|
||||
expectedMaxTokens: number;
|
||||
}) {
|
||||
await withTempHome(async () => {
|
||||
await withEnvVar("MOONSHOT_API_KEY", "sk-moonshot-test", async () => {
|
||||
await ensureOpenClawModelsJson(
|
||||
createMoonshotConfig({
|
||||
contextWindow: params.contextWindow,
|
||||
maxTokens: params.maxTokens,
|
||||
}),
|
||||
);
|
||||
const parsed = await readGeneratedModelsJson<{
|
||||
providers: Record<
|
||||
string,
|
||||
{
|
||||
models?: Array<{
|
||||
id: string;
|
||||
contextWindow?: number;
|
||||
maxTokens?: number;
|
||||
}>;
|
||||
}
|
||||
>;
|
||||
}>();
|
||||
const kimi = parsed.providers.moonshot?.models?.find((model) => model.id === "kimi-k2.5");
|
||||
expect(kimi?.contextWindow).toBe(params.expectedContextWindow);
|
||||
expect(kimi?.maxTokens).toBe(params.expectedMaxTokens);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
describe("models-config", () => {
|
||||
it("keeps anthropic api defaults when model entries omit api", async () => {
|
||||
await withTempHome(async () => {
|
||||
@@ -444,131 +530,28 @@ describe("models-config", () => {
|
||||
});
|
||||
|
||||
it("does not persist resolved env var value as plaintext in models.json", async () => {
|
||||
await withEnvVar("OPENAI_API_KEY", "sk-plaintext-should-not-appear", async () => {
|
||||
await withTempHome(async () => {
|
||||
const cfg: OpenClawConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
apiKey: "sk-plaintext-should-not-appear", // pragma: allowlist secret; already resolved by loadConfig
|
||||
api: "openai-completions",
|
||||
models: [
|
||||
{
|
||||
id: "gpt-4.1",
|
||||
name: "GPT-4.1",
|
||||
input: ["text"],
|
||||
reasoning: false,
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 128000,
|
||||
maxTokens: 16384,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
await ensureOpenClawModelsJson(cfg);
|
||||
const result = await readGeneratedModelsJson<{
|
||||
providers: Record<string, { apiKey?: string }>;
|
||||
}>();
|
||||
expect(result.providers.openai?.apiKey).toBe("OPENAI_API_KEY");
|
||||
});
|
||||
});
|
||||
await expectOpenAiEnvMarkerApiKey();
|
||||
});
|
||||
|
||||
it("replaces stale merged apiKey when config key normalizes to a known env marker", async () => {
|
||||
await withEnvVar("OPENAI_API_KEY", "sk-plaintext-should-not-appear", async () => {
|
||||
await withTempHome(async () => {
|
||||
await writeAgentModelsJson({
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
apiKey: "STALE_AGENT_KEY", // pragma: allowlist secret
|
||||
api: "openai-completions",
|
||||
models: [{ id: "gpt-4.1", name: "GPT-4.1", input: ["text"] }],
|
||||
},
|
||||
},
|
||||
});
|
||||
const cfg: OpenClawConfig = {
|
||||
models: {
|
||||
mode: "merge",
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
apiKey: "sk-plaintext-should-not-appear", // pragma: allowlist secret; simulates resolved ${OPENAI_API_KEY}
|
||||
api: "openai-completions",
|
||||
models: [
|
||||
{
|
||||
id: "gpt-4.1",
|
||||
name: "GPT-4.1",
|
||||
input: ["text"],
|
||||
reasoning: false,
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 128000,
|
||||
maxTokens: 16384,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
await ensureOpenClawModelsJson(cfg);
|
||||
const result = await readGeneratedModelsJson<{
|
||||
providers: Record<string, { apiKey?: string }>;
|
||||
}>();
|
||||
expect(result.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); // pragma: allowlist secret
|
||||
});
|
||||
});
|
||||
await expectOpenAiEnvMarkerApiKey({ seedMergedProvider: true });
|
||||
});
|
||||
|
||||
it("preserves explicit larger token limits when they exceed implicit catalog defaults", async () => {
|
||||
await withTempHome(async () => {
|
||||
await withEnvVar("MOONSHOT_API_KEY", "sk-moonshot-test", async () => {
|
||||
const cfg = createMoonshotConfig({ contextWindow: 350000, maxTokens: 16384 });
|
||||
|
||||
await ensureOpenClawModelsJson(cfg);
|
||||
const parsed = await readGeneratedModelsJson<{
|
||||
providers: Record<
|
||||
string,
|
||||
{
|
||||
models?: Array<{
|
||||
id: string;
|
||||
contextWindow?: number;
|
||||
maxTokens?: number;
|
||||
}>;
|
||||
}
|
||||
>;
|
||||
}>();
|
||||
const kimi = parsed.providers.moonshot?.models?.find((model) => model.id === "kimi-k2.5");
|
||||
expect(kimi?.contextWindow).toBe(350000);
|
||||
expect(kimi?.maxTokens).toBe(16384);
|
||||
});
|
||||
await expectMoonshotTokenLimits({
|
||||
contextWindow: 350000,
|
||||
maxTokens: 16384,
|
||||
expectedContextWindow: 350000,
|
||||
expectedMaxTokens: 16384,
|
||||
});
|
||||
});
|
||||
|
||||
it("falls back to implicit token limits when explicit values are invalid", async () => {
|
||||
await withTempHome(async () => {
|
||||
await withEnvVar("MOONSHOT_API_KEY", "sk-moonshot-test", async () => {
|
||||
const cfg = createMoonshotConfig({ contextWindow: 0, maxTokens: -1 });
|
||||
|
||||
await ensureOpenClawModelsJson(cfg);
|
||||
const parsed = await readGeneratedModelsJson<{
|
||||
providers: Record<
|
||||
string,
|
||||
{
|
||||
models?: Array<{
|
||||
id: string;
|
||||
contextWindow?: number;
|
||||
maxTokens?: number;
|
||||
}>;
|
||||
}
|
||||
>;
|
||||
}>();
|
||||
const kimi = parsed.providers.moonshot?.models?.find((model) => model.id === "kimi-k2.5");
|
||||
expect(kimi?.contextWindow).toBe(256000);
|
||||
expect(kimi?.maxTokens).toBe(8192);
|
||||
});
|
||||
await expectMoonshotTokenLimits({
|
||||
contextWindow: 0,
|
||||
maxTokens: -1,
|
||||
expectedContextWindow: 256000,
|
||||
expectedMaxTokens: 8192,
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,91 +1,82 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import type { OpenClawConfig } from "../config/config.js";
|
||||
import type { ModelDefinitionConfig } from "../config/types.models.js";
|
||||
import { installModelsConfigTestHooks, withModelsTempHome } from "./models-config.e2e-harness.js";
|
||||
import { ensureOpenClawModelsJson } from "./models-config.js";
|
||||
import { readGeneratedModelsJson } from "./models-config.test-utils.js";
|
||||
|
||||
function createGoogleModelsConfig(models: ModelDefinitionConfig[]): OpenClawConfig {
|
||||
return {
|
||||
models: {
|
||||
providers: {
|
||||
google: {
|
||||
baseUrl: "https://generativelanguage.googleapis.com/v1beta",
|
||||
apiKey: "GEMINI_KEY", // pragma: allowlist secret
|
||||
api: "google-generative-ai",
|
||||
models,
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
async function expectGeneratedGoogleModelIds(ids: string[]) {
|
||||
const parsed = await readGeneratedModelsJson<{
|
||||
providers: Record<string, { models: Array<{ id: string }> }>;
|
||||
}>();
|
||||
expect(parsed.providers.google?.models?.map((model) => model.id)).toEqual(ids);
|
||||
}
|
||||
|
||||
describe("models-config", () => {
|
||||
installModelsConfigTestHooks();
|
||||
|
||||
it("normalizes gemini 3 ids to preview for google providers", async () => {
|
||||
await withModelsTempHome(async () => {
|
||||
const cfg: OpenClawConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
google: {
|
||||
baseUrl: "https://generativelanguage.googleapis.com/v1beta",
|
||||
apiKey: "GEMINI_KEY", // pragma: allowlist secret
|
||||
api: "google-generative-ai",
|
||||
models: [
|
||||
{
|
||||
id: "gemini-3-pro",
|
||||
name: "Gemini 3 Pro",
|
||||
api: "google-generative-ai",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 1048576,
|
||||
maxTokens: 65536,
|
||||
},
|
||||
{
|
||||
id: "gemini-3-flash",
|
||||
name: "Gemini 3 Flash",
|
||||
api: "google-generative-ai",
|
||||
reasoning: false,
|
||||
input: ["text", "image"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 1048576,
|
||||
maxTokens: 65536,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
const cfg = createGoogleModelsConfig([
|
||||
{
|
||||
id: "gemini-3-pro",
|
||||
name: "Gemini 3 Pro",
|
||||
api: "google-generative-ai",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 1048576,
|
||||
maxTokens: 65536,
|
||||
},
|
||||
};
|
||||
{
|
||||
id: "gemini-3-flash",
|
||||
name: "Gemini 3 Flash",
|
||||
api: "google-generative-ai",
|
||||
reasoning: false,
|
||||
input: ["text", "image"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 1048576,
|
||||
maxTokens: 65536,
|
||||
},
|
||||
]);
|
||||
|
||||
await ensureOpenClawModelsJson(cfg);
|
||||
|
||||
const parsed = await readGeneratedModelsJson<{
|
||||
providers: Record<string, { models: Array<{ id: string }> }>;
|
||||
}>();
|
||||
const ids = parsed.providers.google?.models?.map((model) => model.id);
|
||||
expect(ids).toEqual(["gemini-3-pro-preview", "gemini-3-flash-preview"]);
|
||||
await expectGeneratedGoogleModelIds(["gemini-3-pro-preview", "gemini-3-flash-preview"]);
|
||||
});
|
||||
});
|
||||
|
||||
it("normalizes the deprecated google flash preview id to the working preview id", async () => {
|
||||
await withModelsTempHome(async () => {
|
||||
const cfg: OpenClawConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
google: {
|
||||
baseUrl: "https://generativelanguage.googleapis.com/v1beta",
|
||||
apiKey: "GEMINI_KEY", // pragma: allowlist secret
|
||||
api: "google-generative-ai",
|
||||
models: [
|
||||
{
|
||||
id: "gemini-3.1-flash-preview",
|
||||
name: "Gemini 3.1 Flash Preview",
|
||||
api: "google-generative-ai",
|
||||
reasoning: false,
|
||||
input: ["text", "image"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 1048576,
|
||||
maxTokens: 65536,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
const cfg = createGoogleModelsConfig([
|
||||
{
|
||||
id: "gemini-3.1-flash-preview",
|
||||
name: "Gemini 3.1 Flash Preview",
|
||||
api: "google-generative-ai",
|
||||
reasoning: false,
|
||||
input: ["text", "image"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 1048576,
|
||||
maxTokens: 65536,
|
||||
},
|
||||
};
|
||||
]);
|
||||
|
||||
await ensureOpenClawModelsJson(cfg);
|
||||
|
||||
const parsed = await readGeneratedModelsJson<{
|
||||
providers: Record<string, { models: Array<{ id: string }> }>;
|
||||
}>();
|
||||
const ids = parsed.providers.google?.models?.map((model) => model.id);
|
||||
expect(ids).toEqual(["gemini-3-flash-preview"]);
|
||||
await expectGeneratedGoogleModelIds(["gemini-3-flash-preview"]);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -16,47 +16,137 @@ import { readGeneratedModelsJson } from "./models-config.test-utils.js";
|
||||
|
||||
installModelsConfigTestHooks();
|
||||
|
||||
function createOpenAiApiKeySourceConfig(): OpenClawConfig {
|
||||
return {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, // pragma: allowlist secret
|
||||
api: "openai-completions" as const,
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
function createOpenAiApiKeyRuntimeConfig(): OpenClawConfig {
|
||||
return {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
apiKey: "sk-runtime-resolved", // pragma: allowlist secret
|
||||
api: "openai-completions" as const,
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
function createOpenAiHeaderSourceConfig(): OpenClawConfig {
|
||||
return {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
api: "openai-completions" as const,
|
||||
headers: {
|
||||
Authorization: {
|
||||
source: "env",
|
||||
provider: "default",
|
||||
id: "OPENAI_HEADER_TOKEN", // pragma: allowlist secret
|
||||
},
|
||||
"X-Tenant-Token": {
|
||||
source: "file",
|
||||
provider: "vault",
|
||||
id: "/providers/openai/tenantToken",
|
||||
},
|
||||
},
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
function createOpenAiHeaderRuntimeConfig(): OpenClawConfig {
|
||||
return {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
api: "openai-completions" as const,
|
||||
headers: {
|
||||
Authorization: "Bearer runtime-openai-token",
|
||||
"X-Tenant-Token": "runtime-tenant-token",
|
||||
},
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
function withGatewayTokenMode(config: OpenClawConfig): OpenClawConfig {
|
||||
return {
|
||||
...config,
|
||||
gateway: {
|
||||
auth: {
|
||||
mode: "token",
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
async function withGeneratedModelsFromRuntimeSource(
|
||||
params: {
|
||||
sourceConfig: OpenClawConfig;
|
||||
runtimeConfig: OpenClawConfig;
|
||||
candidateConfig?: OpenClawConfig;
|
||||
},
|
||||
runAssertions: () => Promise<void>,
|
||||
) {
|
||||
await withTempHome(async () => {
|
||||
try {
|
||||
setRuntimeConfigSnapshot(params.runtimeConfig, params.sourceConfig);
|
||||
await ensureOpenClawModelsJson(params.candidateConfig ?? loadConfig());
|
||||
await runAssertions();
|
||||
} finally {
|
||||
clearRuntimeConfigSnapshot();
|
||||
clearConfigCache();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async function expectGeneratedProviderApiKey(providerId: string, expected: string) {
|
||||
const parsed = await readGeneratedModelsJson<{
|
||||
providers: Record<string, { apiKey?: string }>;
|
||||
}>();
|
||||
expect(parsed.providers[providerId]?.apiKey).toBe(expected);
|
||||
}
|
||||
|
||||
async function expectGeneratedOpenAiHeaderMarkers() {
|
||||
const parsed = await readGeneratedModelsJson<{
|
||||
providers: Record<string, { headers?: Record<string, string> }>;
|
||||
}>();
|
||||
expect(parsed.providers.openai?.headers?.Authorization).toBe(
|
||||
"secretref-env:OPENAI_HEADER_TOKEN", // pragma: allowlist secret
|
||||
);
|
||||
expect(parsed.providers.openai?.headers?.["X-Tenant-Token"]).toBe(NON_ENV_SECRETREF_MARKER);
|
||||
}
|
||||
|
||||
describe("models-config runtime source snapshot", () => {
|
||||
it("uses runtime source snapshot markers when passed the active runtime config", async () => {
|
||||
await withTempHome(async () => {
|
||||
const sourceConfig: OpenClawConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, // pragma: allowlist secret
|
||||
api: "openai-completions" as const,
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
const runtimeConfig: OpenClawConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
apiKey: "sk-runtime-resolved", // pragma: allowlist secret
|
||||
api: "openai-completions" as const,
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
try {
|
||||
setRuntimeConfigSnapshot(runtimeConfig, sourceConfig);
|
||||
await ensureOpenClawModelsJson(loadConfig());
|
||||
|
||||
const parsed = await readGeneratedModelsJson<{
|
||||
providers: Record<string, { apiKey?: string }>;
|
||||
}>();
|
||||
expect(parsed.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); // pragma: allowlist secret
|
||||
} finally {
|
||||
clearRuntimeConfigSnapshot();
|
||||
clearConfigCache();
|
||||
}
|
||||
});
|
||||
await withGeneratedModelsFromRuntimeSource(
|
||||
{
|
||||
sourceConfig: createOpenAiApiKeySourceConfig(),
|
||||
runtimeConfig: createOpenAiApiKeyRuntimeConfig(),
|
||||
},
|
||||
async () => expectGeneratedProviderApiKey("openai", "OPENAI_API_KEY"), // pragma: allowlist secret
|
||||
);
|
||||
});
|
||||
|
||||
it("uses non-env marker from runtime source snapshot for file refs", async () => {
|
||||
@@ -103,30 +193,8 @@ describe("models-config runtime source snapshot", () => {
|
||||
|
||||
it("projects cloned runtime configs onto source snapshot when preserving provider auth", async () => {
|
||||
await withTempHome(async () => {
|
||||
const sourceConfig: OpenClawConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, // pragma: allowlist secret
|
||||
api: "openai-completions" as const,
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
const runtimeConfig: OpenClawConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
apiKey: "sk-runtime-resolved", // pragma: allowlist secret
|
||||
api: "openai-completions" as const,
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
const sourceConfig = createOpenAiApiKeySourceConfig();
|
||||
const runtimeConfig = createOpenAiApiKeyRuntimeConfig();
|
||||
const clonedRuntimeConfig: OpenClawConfig = {
|
||||
...runtimeConfig,
|
||||
agents: {
|
||||
@@ -139,11 +207,7 @@ describe("models-config runtime source snapshot", () => {
|
||||
try {
|
||||
setRuntimeConfigSnapshot(runtimeConfig, sourceConfig);
|
||||
await ensureOpenClawModelsJson(clonedRuntimeConfig);
|
||||
|
||||
const parsed = await readGeneratedModelsJson<{
|
||||
providers: Record<string, { apiKey?: string }>;
|
||||
}>();
|
||||
expect(parsed.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); // pragma: allowlist secret
|
||||
await expectGeneratedProviderApiKey("openai", "OPENAI_API_KEY"); // pragma: allowlist secret
|
||||
} finally {
|
||||
clearRuntimeConfigSnapshot();
|
||||
clearConfigCache();
|
||||
@@ -152,121 +216,27 @@ describe("models-config runtime source snapshot", () => {
|
||||
});
|
||||
|
||||
it("uses header markers from runtime source snapshot instead of resolved runtime values", async () => {
|
||||
await withTempHome(async () => {
|
||||
const sourceConfig: OpenClawConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
api: "openai-completions" as const,
|
||||
headers: {
|
||||
Authorization: {
|
||||
source: "env",
|
||||
provider: "default",
|
||||
id: "OPENAI_HEADER_TOKEN", // pragma: allowlist secret
|
||||
},
|
||||
"X-Tenant-Token": {
|
||||
source: "file",
|
||||
provider: "vault",
|
||||
id: "/providers/openai/tenantToken",
|
||||
},
|
||||
},
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
const runtimeConfig: OpenClawConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
api: "openai-completions" as const,
|
||||
headers: {
|
||||
Authorization: "Bearer runtime-openai-token",
|
||||
"X-Tenant-Token": "runtime-tenant-token",
|
||||
},
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
try {
|
||||
setRuntimeConfigSnapshot(runtimeConfig, sourceConfig);
|
||||
await ensureOpenClawModelsJson(loadConfig());
|
||||
|
||||
const parsed = await readGeneratedModelsJson<{
|
||||
providers: Record<string, { headers?: Record<string, string> }>;
|
||||
}>();
|
||||
expect(parsed.providers.openai?.headers?.Authorization).toBe(
|
||||
"secretref-env:OPENAI_HEADER_TOKEN", // pragma: allowlist secret
|
||||
);
|
||||
expect(parsed.providers.openai?.headers?.["X-Tenant-Token"]).toBe(NON_ENV_SECRETREF_MARKER);
|
||||
} finally {
|
||||
clearRuntimeConfigSnapshot();
|
||||
clearConfigCache();
|
||||
}
|
||||
});
|
||||
await withGeneratedModelsFromRuntimeSource(
|
||||
{
|
||||
sourceConfig: createOpenAiHeaderSourceConfig(),
|
||||
runtimeConfig: createOpenAiHeaderRuntimeConfig(),
|
||||
},
|
||||
expectGeneratedOpenAiHeaderMarkers,
|
||||
);
|
||||
});
|
||||
|
||||
it("keeps source markers when runtime projection is skipped for incompatible top-level shape", async () => {
|
||||
await withTempHome(async () => {
|
||||
const sourceConfig: OpenClawConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
apiKey: { source: "env", provider: "default", id: "OPENAI_API_KEY" }, // pragma: allowlist secret
|
||||
api: "openai-completions" as const,
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
gateway: {
|
||||
auth: {
|
||||
mode: "token",
|
||||
},
|
||||
},
|
||||
};
|
||||
const runtimeConfig: OpenClawConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
apiKey: "sk-runtime-resolved", // pragma: allowlist secret
|
||||
api: "openai-completions" as const,
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
gateway: {
|
||||
auth: {
|
||||
mode: "token",
|
||||
},
|
||||
},
|
||||
};
|
||||
const sourceConfig = withGatewayTokenMode(createOpenAiApiKeySourceConfig());
|
||||
const runtimeConfig = withGatewayTokenMode(createOpenAiApiKeyRuntimeConfig());
|
||||
const incompatibleCandidate: OpenClawConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
apiKey: "sk-runtime-resolved", // pragma: allowlist secret
|
||||
api: "openai-completions" as const,
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
...createOpenAiApiKeyRuntimeConfig(),
|
||||
};
|
||||
|
||||
try {
|
||||
setRuntimeConfigSnapshot(runtimeConfig, sourceConfig);
|
||||
await ensureOpenClawModelsJson(incompatibleCandidate);
|
||||
|
||||
const parsed = await readGeneratedModelsJson<{
|
||||
providers: Record<string, { apiKey?: string }>;
|
||||
}>();
|
||||
expect(parsed.providers.openai?.apiKey).toBe("OPENAI_API_KEY"); // pragma: allowlist secret
|
||||
await expectGeneratedProviderApiKey("openai", "OPENAI_API_KEY"); // pragma: allowlist secret
|
||||
} finally {
|
||||
clearRuntimeConfigSnapshot();
|
||||
clearConfigCache();
|
||||
@@ -276,81 +246,16 @@ describe("models-config runtime source snapshot", () => {
|
||||
|
||||
it("keeps source header markers when runtime projection is skipped for incompatible top-level shape", async () => {
|
||||
await withTempHome(async () => {
|
||||
const sourceConfig: OpenClawConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
api: "openai-completions" as const,
|
||||
headers: {
|
||||
Authorization: {
|
||||
source: "env",
|
||||
provider: "default",
|
||||
id: "OPENAI_HEADER_TOKEN", // pragma: allowlist secret
|
||||
},
|
||||
"X-Tenant-Token": {
|
||||
source: "file",
|
||||
provider: "vault",
|
||||
id: "/providers/openai/tenantToken",
|
||||
},
|
||||
},
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
gateway: {
|
||||
auth: {
|
||||
mode: "token",
|
||||
},
|
||||
},
|
||||
};
|
||||
const runtimeConfig: OpenClawConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
api: "openai-completions" as const,
|
||||
headers: {
|
||||
Authorization: "Bearer runtime-openai-token",
|
||||
"X-Tenant-Token": "runtime-tenant-token",
|
||||
},
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
gateway: {
|
||||
auth: {
|
||||
mode: "token",
|
||||
},
|
||||
},
|
||||
};
|
||||
const sourceConfig = withGatewayTokenMode(createOpenAiHeaderSourceConfig());
|
||||
const runtimeConfig = withGatewayTokenMode(createOpenAiHeaderRuntimeConfig());
|
||||
const incompatibleCandidate: OpenClawConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
api: "openai-completions" as const,
|
||||
headers: {
|
||||
Authorization: "Bearer runtime-openai-token",
|
||||
"X-Tenant-Token": "runtime-tenant-token",
|
||||
},
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
...createOpenAiHeaderRuntimeConfig(),
|
||||
};
|
||||
|
||||
try {
|
||||
setRuntimeConfigSnapshot(runtimeConfig, sourceConfig);
|
||||
await ensureOpenClawModelsJson(incompatibleCandidate);
|
||||
|
||||
const parsed = await readGeneratedModelsJson<{
|
||||
providers: Record<string, { headers?: Record<string, string> }>;
|
||||
}>();
|
||||
expect(parsed.providers.openai?.headers?.Authorization).toBe(
|
||||
"secretref-env:OPENAI_HEADER_TOKEN", // pragma: allowlist secret
|
||||
);
|
||||
expect(parsed.providers.openai?.headers?.["X-Tenant-Token"]).toBe(NON_ENV_SECRETREF_MARKER);
|
||||
await expectGeneratedOpenAiHeaderMarkers();
|
||||
} finally {
|
||||
clearRuntimeConfigSnapshot();
|
||||
clearConfigCache();
|
||||
|
||||
@@ -1,31 +1,11 @@
|
||||
import { afterEach, describe, expect, it, vi } from "vitest";
|
||||
import { jsonResponse, requestBodyText, requestUrl } from "../test-helpers/http.js";
|
||||
import {
|
||||
enrichOllamaModelsWithContext,
|
||||
resolveOllamaApiBase,
|
||||
type OllamaTagModel,
|
||||
} from "./ollama-models.js";
|
||||
|
||||
function jsonResponse(body: unknown, status = 200): Response {
|
||||
return new Response(JSON.stringify(body), {
|
||||
status,
|
||||
headers: { "Content-Type": "application/json" },
|
||||
});
|
||||
}
|
||||
|
||||
function requestUrl(input: string | URL | Request): string {
|
||||
if (typeof input === "string") {
|
||||
return input;
|
||||
}
|
||||
if (input instanceof URL) {
|
||||
return input.toString();
|
||||
}
|
||||
return input.url;
|
||||
}
|
||||
|
||||
function requestBody(body: BodyInit | null | undefined): string {
|
||||
return typeof body === "string" ? body : "{}";
|
||||
}
|
||||
|
||||
describe("ollama-models", () => {
|
||||
afterEach(() => {
|
||||
vi.unstubAllGlobals();
|
||||
@@ -43,7 +23,7 @@ describe("ollama-models", () => {
|
||||
if (!url.endsWith("/api/show")) {
|
||||
throw new Error(`Unexpected fetch: ${url}`);
|
||||
}
|
||||
const body = JSON.parse(requestBody(init?.body)) as { name?: string };
|
||||
const body = JSON.parse(requestBodyText(init?.body)) as { name?: string };
|
||||
if (body.name === "llama3:8b") {
|
||||
return jsonResponse({ model_info: { "llama.context_length": 65536 } });
|
||||
}
|
||||
|
||||
@@ -203,6 +203,20 @@ function mockNdjsonReader(lines: string[]): ReadableStreamDefaultReader<Uint8Arr
|
||||
} as unknown as ReadableStreamDefaultReader<Uint8Array>;
|
||||
}
|
||||
|
||||
async function expectDoneEventContent(lines: string[], expectedContent: unknown) {
|
||||
await withMockNdjsonFetch(lines, async () => {
|
||||
const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" });
|
||||
const events = await collectStreamEvents(stream);
|
||||
|
||||
const doneEvent = events.at(-1);
|
||||
if (!doneEvent || doneEvent.type !== "done") {
|
||||
throw new Error("Expected done event");
|
||||
}
|
||||
|
||||
expect(doneEvent.message.content).toEqual(expectedContent);
|
||||
});
|
||||
}
|
||||
|
||||
describe("parseNdjsonStream", () => {
|
||||
it("parses text-only streaming chunks", async () => {
|
||||
const reader = mockNdjsonReader([
|
||||
@@ -486,88 +500,48 @@ describe("createOllamaStreamFn", () => {
|
||||
});
|
||||
|
||||
it("drops thinking chunks when no final content is emitted", async () => {
|
||||
await withMockNdjsonFetch(
|
||||
await expectDoneEventContent(
|
||||
[
|
||||
'{"model":"m","created_at":"t","message":{"role":"assistant","content":"","thinking":"reasoned"},"done":false}',
|
||||
'{"model":"m","created_at":"t","message":{"role":"assistant","content":"","thinking":" output"},"done":false}',
|
||||
'{"model":"m","created_at":"t","message":{"role":"assistant","content":""},"done":true,"prompt_eval_count":1,"eval_count":2}',
|
||||
],
|
||||
async () => {
|
||||
const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" });
|
||||
const events = await collectStreamEvents(stream);
|
||||
|
||||
const doneEvent = events.at(-1);
|
||||
if (!doneEvent || doneEvent.type !== "done") {
|
||||
throw new Error("Expected done event");
|
||||
}
|
||||
|
||||
expect(doneEvent.message.content).toEqual([]);
|
||||
},
|
||||
[],
|
||||
);
|
||||
});
|
||||
|
||||
it("prefers streamed content over earlier thinking chunks", async () => {
|
||||
await withMockNdjsonFetch(
|
||||
await expectDoneEventContent(
|
||||
[
|
||||
'{"model":"m","created_at":"t","message":{"role":"assistant","content":"","thinking":"internal"},"done":false}',
|
||||
'{"model":"m","created_at":"t","message":{"role":"assistant","content":"final"},"done":false}',
|
||||
'{"model":"m","created_at":"t","message":{"role":"assistant","content":" answer"},"done":false}',
|
||||
'{"model":"m","created_at":"t","message":{"role":"assistant","content":""},"done":true,"prompt_eval_count":1,"eval_count":2}',
|
||||
],
|
||||
async () => {
|
||||
const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" });
|
||||
const events = await collectStreamEvents(stream);
|
||||
|
||||
const doneEvent = events.at(-1);
|
||||
if (!doneEvent || doneEvent.type !== "done") {
|
||||
throw new Error("Expected done event");
|
||||
}
|
||||
|
||||
expect(doneEvent.message.content).toEqual([{ type: "text", text: "final answer" }]);
|
||||
},
|
||||
[{ type: "text", text: "final answer" }],
|
||||
);
|
||||
});
|
||||
|
||||
it("drops reasoning chunks when no final content is emitted", async () => {
|
||||
await withMockNdjsonFetch(
|
||||
await expectDoneEventContent(
|
||||
[
|
||||
'{"model":"m","created_at":"t","message":{"role":"assistant","content":"","reasoning":"reasoned"},"done":false}',
|
||||
'{"model":"m","created_at":"t","message":{"role":"assistant","content":"","reasoning":" output"},"done":false}',
|
||||
'{"model":"m","created_at":"t","message":{"role":"assistant","content":""},"done":true,"prompt_eval_count":1,"eval_count":2}',
|
||||
],
|
||||
async () => {
|
||||
const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" });
|
||||
const events = await collectStreamEvents(stream);
|
||||
|
||||
const doneEvent = events.at(-1);
|
||||
if (!doneEvent || doneEvent.type !== "done") {
|
||||
throw new Error("Expected done event");
|
||||
}
|
||||
|
||||
expect(doneEvent.message.content).toEqual([]);
|
||||
},
|
||||
[],
|
||||
);
|
||||
});
|
||||
|
||||
it("prefers streamed content over earlier reasoning chunks", async () => {
|
||||
await withMockNdjsonFetch(
|
||||
await expectDoneEventContent(
|
||||
[
|
||||
'{"model":"m","created_at":"t","message":{"role":"assistant","content":"","reasoning":"internal"},"done":false}',
|
||||
'{"model":"m","created_at":"t","message":{"role":"assistant","content":"final"},"done":false}',
|
||||
'{"model":"m","created_at":"t","message":{"role":"assistant","content":" answer"},"done":false}',
|
||||
'{"model":"m","created_at":"t","message":{"role":"assistant","content":""},"done":true,"prompt_eval_count":1,"eval_count":2}',
|
||||
],
|
||||
async () => {
|
||||
const stream = await createOllamaTestStream({ baseUrl: "http://ollama-host:11434" });
|
||||
const events = await collectStreamEvents(stream);
|
||||
|
||||
const doneEvent = events.at(-1);
|
||||
if (!doneEvent || doneEvent.type !== "done") {
|
||||
throw new Error("Expected done event");
|
||||
}
|
||||
|
||||
expect(doneEvent.message.content).toEqual([{ type: "text", text: "final answer" }]);
|
||||
},
|
||||
[{ type: "text", text: "final answer" }],
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -115,6 +115,50 @@ function resetSessionStore(store: Record<string, unknown>) {
|
||||
mockConfig = createMockConfig();
|
||||
}
|
||||
|
||||
function installSandboxedSessionStatusConfig() {
|
||||
mockConfig = {
|
||||
session: { mainKey: "main", scope: "per-sender" },
|
||||
tools: {
|
||||
sessions: { visibility: "all" },
|
||||
agentToAgent: { enabled: true, allow: ["*"] },
|
||||
},
|
||||
agents: {
|
||||
defaults: {
|
||||
model: { primary: "anthropic/claude-opus-4-5" },
|
||||
models: {},
|
||||
sandbox: { sessionToolsVisibility: "spawned" },
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
function mockSpawnedSessionList(
|
||||
resolveSessions: (spawnedBy: string | undefined) => Array<Record<string, unknown>>,
|
||||
) {
|
||||
callGatewayMock.mockImplementation(async (opts: unknown) => {
|
||||
const request = opts as { method?: string; params?: Record<string, unknown> };
|
||||
if (request.method === "sessions.list") {
|
||||
return { sessions: resolveSessions(request.params?.spawnedBy as string | undefined) };
|
||||
}
|
||||
return {};
|
||||
});
|
||||
}
|
||||
|
||||
function expectSpawnedSessionLookupCalls(spawnedBy: string) {
|
||||
const expectedCall = {
|
||||
method: "sessions.list",
|
||||
params: {
|
||||
includeGlobal: false,
|
||||
includeUnknown: false,
|
||||
limit: 500,
|
||||
spawnedBy,
|
||||
},
|
||||
};
|
||||
expect(callGatewayMock).toHaveBeenCalledTimes(2);
|
||||
expect(callGatewayMock).toHaveBeenNthCalledWith(1, expectedCall);
|
||||
expect(callGatewayMock).toHaveBeenNthCalledWith(2, expectedCall);
|
||||
}
|
||||
|
||||
function getSessionStatusTool(agentSessionKey = "main", options?: { sandboxed?: boolean }) {
|
||||
const tool = createOpenClawTools({
|
||||
agentSessionKey,
|
||||
@@ -242,27 +286,8 @@ describe("session_status tool", () => {
|
||||
updatedAt: 10,
|
||||
},
|
||||
});
|
||||
mockConfig = {
|
||||
session: { mainKey: "main", scope: "per-sender" },
|
||||
tools: {
|
||||
sessions: { visibility: "all" },
|
||||
agentToAgent: { enabled: true, allow: ["*"] },
|
||||
},
|
||||
agents: {
|
||||
defaults: {
|
||||
model: { primary: "anthropic/claude-opus-4-5" },
|
||||
models: {},
|
||||
sandbox: { sessionToolsVisibility: "spawned" },
|
||||
},
|
||||
},
|
||||
};
|
||||
callGatewayMock.mockImplementation(async (opts: unknown) => {
|
||||
const request = opts as { method?: string; params?: Record<string, unknown> };
|
||||
if (request.method === "sessions.list") {
|
||||
return { sessions: [] };
|
||||
}
|
||||
return {};
|
||||
});
|
||||
installSandboxedSessionStatusConfig();
|
||||
mockSpawnedSessionList(() => []);
|
||||
|
||||
const tool = getSessionStatusTool("agent:main:subagent:child", {
|
||||
sandboxed: true,
|
||||
@@ -284,25 +309,7 @@ describe("session_status tool", () => {
|
||||
|
||||
expect(loadSessionStoreMock).not.toHaveBeenCalled();
|
||||
expect(updateSessionStoreMock).not.toHaveBeenCalled();
|
||||
expect(callGatewayMock).toHaveBeenCalledTimes(2);
|
||||
expect(callGatewayMock).toHaveBeenNthCalledWith(1, {
|
||||
method: "sessions.list",
|
||||
params: {
|
||||
includeGlobal: false,
|
||||
includeUnknown: false,
|
||||
limit: 500,
|
||||
spawnedBy: "agent:main:subagent:child",
|
||||
},
|
||||
});
|
||||
expect(callGatewayMock).toHaveBeenNthCalledWith(2, {
|
||||
method: "sessions.list",
|
||||
params: {
|
||||
includeGlobal: false,
|
||||
includeUnknown: false,
|
||||
limit: 500,
|
||||
spawnedBy: "agent:main:subagent:child",
|
||||
},
|
||||
});
|
||||
expectSpawnedSessionLookupCalls("agent:main:subagent:child");
|
||||
});
|
||||
|
||||
it("keeps legacy main requester keys for sandboxed session tree checks", async () => {
|
||||
@@ -316,30 +323,10 @@ describe("session_status tool", () => {
|
||||
updatedAt: 20,
|
||||
},
|
||||
});
|
||||
mockConfig = {
|
||||
session: { mainKey: "main", scope: "per-sender" },
|
||||
tools: {
|
||||
sessions: { visibility: "all" },
|
||||
agentToAgent: { enabled: true, allow: ["*"] },
|
||||
},
|
||||
agents: {
|
||||
defaults: {
|
||||
model: { primary: "anthropic/claude-opus-4-5" },
|
||||
models: {},
|
||||
sandbox: { sessionToolsVisibility: "spawned" },
|
||||
},
|
||||
},
|
||||
};
|
||||
callGatewayMock.mockImplementation(async (opts: unknown) => {
|
||||
const request = opts as { method?: string; params?: Record<string, unknown> };
|
||||
if (request.method === "sessions.list") {
|
||||
return {
|
||||
sessions:
|
||||
request.params?.spawnedBy === "main" ? [{ key: "agent:main:subagent:child" }] : [],
|
||||
};
|
||||
}
|
||||
return {};
|
||||
});
|
||||
installSandboxedSessionStatusConfig();
|
||||
mockSpawnedSessionList((spawnedBy) =>
|
||||
spawnedBy === "main" ? [{ key: "agent:main:subagent:child" }] : [],
|
||||
);
|
||||
|
||||
const tool = getSessionStatusTool("main", {
|
||||
sandboxed: true,
|
||||
@@ -357,25 +344,7 @@ describe("session_status tool", () => {
|
||||
expect(childDetails.ok).toBe(true);
|
||||
expect(childDetails.sessionKey).toBe("agent:main:subagent:child");
|
||||
|
||||
expect(callGatewayMock).toHaveBeenCalledTimes(2);
|
||||
expect(callGatewayMock).toHaveBeenNthCalledWith(1, {
|
||||
method: "sessions.list",
|
||||
params: {
|
||||
includeGlobal: false,
|
||||
includeUnknown: false,
|
||||
limit: 500,
|
||||
spawnedBy: "main",
|
||||
},
|
||||
});
|
||||
expect(callGatewayMock).toHaveBeenNthCalledWith(2, {
|
||||
method: "sessions.list",
|
||||
params: {
|
||||
includeGlobal: false,
|
||||
includeUnknown: false,
|
||||
limit: 500,
|
||||
spawnedBy: "main",
|
||||
},
|
||||
});
|
||||
expectSpawnedSessionLookupCalls("main");
|
||||
});
|
||||
|
||||
it("scopes bare session keys to the requester agent", async () => {
|
||||
|
||||
@@ -17,6 +17,63 @@ function writeStore(storePath: string, store: Record<string, unknown>) {
|
||||
fs.writeFileSync(storePath, JSON.stringify(store, null, 2), "utf-8");
|
||||
}
|
||||
|
||||
function seedLeafOwnedChildSession(storePath: string, leafKey = "agent:main:subagent:leaf") {
|
||||
const childKey = `${leafKey}:subagent:child`;
|
||||
writeStore(storePath, {
|
||||
[leafKey]: {
|
||||
sessionId: "leaf-session",
|
||||
updatedAt: Date.now(),
|
||||
spawnedBy: "agent:main:main",
|
||||
subagentRole: "leaf",
|
||||
subagentControlScope: "none",
|
||||
},
|
||||
[childKey]: {
|
||||
sessionId: "child-session",
|
||||
updatedAt: Date.now(),
|
||||
spawnedBy: leafKey,
|
||||
subagentRole: "leaf",
|
||||
subagentControlScope: "none",
|
||||
},
|
||||
});
|
||||
|
||||
addSubagentRunForTests({
|
||||
runId: "run-child",
|
||||
childSessionKey: childKey,
|
||||
controllerSessionKey: leafKey,
|
||||
requesterSessionKey: leafKey,
|
||||
requesterDisplayKey: leafKey,
|
||||
task: "impossible child",
|
||||
cleanup: "keep",
|
||||
createdAt: Date.now() - 30_000,
|
||||
startedAt: Date.now() - 30_000,
|
||||
});
|
||||
|
||||
return {
|
||||
childKey,
|
||||
tool: createSubagentsTool({ agentSessionKey: leafKey }),
|
||||
};
|
||||
}
|
||||
|
||||
async function expectLeafSubagentControlForbidden(params: {
|
||||
storePath: string;
|
||||
action: "kill" | "steer";
|
||||
callId: string;
|
||||
message?: string;
|
||||
}) {
|
||||
const { childKey, tool } = seedLeafOwnedChildSession(params.storePath);
|
||||
const result = await tool.execute(params.callId, {
|
||||
action: params.action,
|
||||
target: childKey,
|
||||
...(params.message ? { message: params.message } : {}),
|
||||
});
|
||||
|
||||
expect(result.details).toMatchObject({
|
||||
status: "forbidden",
|
||||
error: "Leaf subagents cannot control other sessions.",
|
||||
});
|
||||
expect(callGatewayMock).not.toHaveBeenCalled();
|
||||
}
|
||||
|
||||
describe("openclaw-tools: subagents scope isolation", () => {
|
||||
let storePath = "";
|
||||
|
||||
@@ -151,95 +208,19 @@ describe("openclaw-tools: subagents scope isolation", () => {
|
||||
});
|
||||
|
||||
it("leaf subagents cannot kill even explicitly-owned child sessions", async () => {
|
||||
const leafKey = "agent:main:subagent:leaf";
|
||||
const childKey = `${leafKey}:subagent:child`;
|
||||
|
||||
writeStore(storePath, {
|
||||
[leafKey]: {
|
||||
sessionId: "leaf-session",
|
||||
updatedAt: Date.now(),
|
||||
spawnedBy: "agent:main:main",
|
||||
subagentRole: "leaf",
|
||||
subagentControlScope: "none",
|
||||
},
|
||||
[childKey]: {
|
||||
sessionId: "child-session",
|
||||
updatedAt: Date.now(),
|
||||
spawnedBy: leafKey,
|
||||
subagentRole: "leaf",
|
||||
subagentControlScope: "none",
|
||||
},
|
||||
});
|
||||
|
||||
addSubagentRunForTests({
|
||||
runId: "run-child",
|
||||
childSessionKey: childKey,
|
||||
controllerSessionKey: leafKey,
|
||||
requesterSessionKey: leafKey,
|
||||
requesterDisplayKey: leafKey,
|
||||
task: "impossible child",
|
||||
cleanup: "keep",
|
||||
createdAt: Date.now() - 30_000,
|
||||
startedAt: Date.now() - 30_000,
|
||||
});
|
||||
|
||||
const tool = createSubagentsTool({ agentSessionKey: leafKey });
|
||||
const result = await tool.execute("call-leaf-kill", {
|
||||
await expectLeafSubagentControlForbidden({
|
||||
storePath,
|
||||
action: "kill",
|
||||
target: childKey,
|
||||
callId: "call-leaf-kill",
|
||||
});
|
||||
|
||||
expect(result.details).toMatchObject({
|
||||
status: "forbidden",
|
||||
error: "Leaf subagents cannot control other sessions.",
|
||||
});
|
||||
expect(callGatewayMock).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("leaf subagents cannot steer even explicitly-owned child sessions", async () => {
|
||||
const leafKey = "agent:main:subagent:leaf";
|
||||
const childKey = `${leafKey}:subagent:child`;
|
||||
|
||||
writeStore(storePath, {
|
||||
[leafKey]: {
|
||||
sessionId: "leaf-session",
|
||||
updatedAt: Date.now(),
|
||||
spawnedBy: "agent:main:main",
|
||||
subagentRole: "leaf",
|
||||
subagentControlScope: "none",
|
||||
},
|
||||
[childKey]: {
|
||||
sessionId: "child-session",
|
||||
updatedAt: Date.now(),
|
||||
spawnedBy: leafKey,
|
||||
subagentRole: "leaf",
|
||||
subagentControlScope: "none",
|
||||
},
|
||||
});
|
||||
|
||||
addSubagentRunForTests({
|
||||
runId: "run-child",
|
||||
childSessionKey: childKey,
|
||||
controllerSessionKey: leafKey,
|
||||
requesterSessionKey: leafKey,
|
||||
requesterDisplayKey: leafKey,
|
||||
task: "impossible child",
|
||||
cleanup: "keep",
|
||||
createdAt: Date.now() - 30_000,
|
||||
startedAt: Date.now() - 30_000,
|
||||
});
|
||||
|
||||
const tool = createSubagentsTool({ agentSessionKey: leafKey });
|
||||
const result = await tool.execute("call-leaf-steer", {
|
||||
await expectLeafSubagentControlForbidden({
|
||||
storePath,
|
||||
action: "steer",
|
||||
target: childKey,
|
||||
callId: "call-leaf-steer",
|
||||
message: "continue",
|
||||
});
|
||||
|
||||
expect(result.details).toMatchObject({
|
||||
status: "forbidden",
|
||||
error: "Leaf subagents cannot control other sessions.",
|
||||
});
|
||||
expect(callGatewayMock).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -174,15 +174,18 @@ export function createOpenClawTools(
|
||||
createSessionsListTool({
|
||||
agentSessionKey: options?.agentSessionKey,
|
||||
sandboxed: options?.sandboxed,
|
||||
config: options?.config,
|
||||
}),
|
||||
createSessionsHistoryTool({
|
||||
agentSessionKey: options?.agentSessionKey,
|
||||
sandboxed: options?.sandboxed,
|
||||
config: options?.config,
|
||||
}),
|
||||
createSessionsSendTool({
|
||||
agentSessionKey: options?.agentSessionKey,
|
||||
agentChannel: options?.agentChannel,
|
||||
sandboxed: options?.sandboxed,
|
||||
config: options?.config,
|
||||
}),
|
||||
createSessionsYieldTool({
|
||||
sessionId: options?.sessionId,
|
||||
|
||||
@@ -1,9 +1,14 @@
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import "./test-helpers/fast-coding-tools.js";
|
||||
import { afterAll, beforeAll, describe, expect, it, vi } from "vitest";
|
||||
import type { OpenClawConfig } from "../config/config.js";
|
||||
import {
|
||||
cleanupEmbeddedPiRunnerTestWorkspace,
|
||||
createEmbeddedPiRunnerOpenAiConfig,
|
||||
createEmbeddedPiRunnerTestWorkspace,
|
||||
type EmbeddedPiRunnerTestWorkspace,
|
||||
immediateEnqueue,
|
||||
} from "./test-helpers/pi-embedded-runner-e2e-fixtures.js";
|
||||
|
||||
function createMockUsage(input: number, output: number) {
|
||||
return {
|
||||
@@ -88,7 +93,7 @@ vi.mock("@mariozechner/pi-ai", async () => {
|
||||
|
||||
let runEmbeddedPiAgent: typeof import("./pi-embedded-runner/run.js").runEmbeddedPiAgent;
|
||||
let SessionManager: typeof import("@mariozechner/pi-coding-agent").SessionManager;
|
||||
let tempRoot: string | undefined;
|
||||
let e2eWorkspace: EmbeddedPiRunnerTestWorkspace | undefined;
|
||||
let agentDir: string;
|
||||
let workspaceDir: string;
|
||||
let sessionCounter = 0;
|
||||
@@ -98,50 +103,21 @@ beforeAll(async () => {
|
||||
vi.useRealTimers();
|
||||
({ runEmbeddedPiAgent } = await import("./pi-embedded-runner/run.js"));
|
||||
({ SessionManager } = await import("@mariozechner/pi-coding-agent"));
|
||||
tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-embedded-agent-"));
|
||||
agentDir = path.join(tempRoot, "agent");
|
||||
workspaceDir = path.join(tempRoot, "workspace");
|
||||
await fs.mkdir(agentDir, { recursive: true });
|
||||
await fs.mkdir(workspaceDir, { recursive: true });
|
||||
e2eWorkspace = await createEmbeddedPiRunnerTestWorkspace("openclaw-embedded-agent-");
|
||||
({ agentDir, workspaceDir } = e2eWorkspace);
|
||||
}, 180_000);
|
||||
|
||||
afterAll(async () => {
|
||||
if (!tempRoot) {
|
||||
return;
|
||||
}
|
||||
await fs.rm(tempRoot, { recursive: true, force: true });
|
||||
tempRoot = undefined;
|
||||
await cleanupEmbeddedPiRunnerTestWorkspace(e2eWorkspace);
|
||||
e2eWorkspace = undefined;
|
||||
});
|
||||
|
||||
const makeOpenAiConfig = (modelIds: string[]) =>
|
||||
({
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
api: "openai-responses",
|
||||
apiKey: "sk-test",
|
||||
baseUrl: "https://example.com",
|
||||
models: modelIds.map((id) => ({
|
||||
id,
|
||||
name: `Mock ${id}`,
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 16_000,
|
||||
maxTokens: 2048,
|
||||
})),
|
||||
},
|
||||
},
|
||||
},
|
||||
}) satisfies OpenClawConfig;
|
||||
|
||||
const nextSessionFile = () => {
|
||||
sessionCounter += 1;
|
||||
return path.join(workspaceDir, `session-${sessionCounter}.jsonl`);
|
||||
};
|
||||
const nextRunId = (prefix = "run-embedded-test") => `${prefix}-${++runCounter}`;
|
||||
const nextSessionKey = () => `agent:test:embedded:${nextRunId("session-key")}`;
|
||||
const immediateEnqueue = async <T>(task: () => Promise<T>) => task();
|
||||
|
||||
const runWithOrphanedSingleUserMessage = async (text: string, sessionKey: string) => {
|
||||
const sessionFile = nextSessionFile();
|
||||
@@ -152,7 +128,7 @@ const runWithOrphanedSingleUserMessage = async (text: string, sessionKey: string
|
||||
timestamp: Date.now(),
|
||||
});
|
||||
|
||||
const cfg = makeOpenAiConfig(["mock-1"]);
|
||||
const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-1"]);
|
||||
return await runEmbeddedPiAgent({
|
||||
sessionId: "session:test",
|
||||
sessionKey,
|
||||
@@ -197,7 +173,7 @@ const readSessionMessages = async (sessionFile: string) => {
|
||||
};
|
||||
|
||||
const runDefaultEmbeddedTurn = async (sessionFile: string, prompt: string, sessionKey: string) => {
|
||||
const cfg = makeOpenAiConfig(["mock-error"]);
|
||||
const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-error"]);
|
||||
await runEmbeddedPiAgent({
|
||||
sessionId: "session:test",
|
||||
sessionKey,
|
||||
@@ -217,7 +193,7 @@ const runDefaultEmbeddedTurn = async (sessionFile: string, prompt: string, sessi
|
||||
describe("runEmbeddedPiAgent", () => {
|
||||
it("handles prompt error paths without dropping user state", async () => {
|
||||
const sessionFile = nextSessionFile();
|
||||
const cfg = makeOpenAiConfig(["mock-error"]);
|
||||
const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-error"]);
|
||||
const sessionKey = nextSessionKey();
|
||||
const result = await runEmbeddedPiAgent({
|
||||
sessionId: "session:test",
|
||||
|
||||
@@ -8,12 +8,17 @@
|
||||
* Follows the same pattern as pi-embedded-runner.e2e.test.ts.
|
||||
*/
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import "./test-helpers/fast-coding-tools.js";
|
||||
import { afterAll, beforeAll, describe, expect, it, vi } from "vitest";
|
||||
import type { OpenClawConfig } from "../config/config.js";
|
||||
import { isEmbeddedPiRunActive, queueEmbeddedPiMessage } from "./pi-embedded-runner/runs.js";
|
||||
import {
|
||||
cleanupEmbeddedPiRunnerTestWorkspace,
|
||||
createEmbeddedPiRunnerOpenAiConfig,
|
||||
createEmbeddedPiRunnerTestWorkspace,
|
||||
type EmbeddedPiRunnerTestWorkspace,
|
||||
immediateEnqueue,
|
||||
} from "./test-helpers/pi-embedded-runner-e2e-fixtures.js";
|
||||
|
||||
function createMockUsage(input: number, output: number) {
|
||||
return {
|
||||
@@ -126,7 +131,7 @@ vi.mock("@mariozechner/pi-ai", async () => {
|
||||
});
|
||||
|
||||
let runEmbeddedPiAgent: typeof import("./pi-embedded-runner/run.js").runEmbeddedPiAgent;
|
||||
let tempRoot: string | undefined;
|
||||
let e2eWorkspace: EmbeddedPiRunnerTestWorkspace | undefined;
|
||||
let agentDir: string;
|
||||
let workspaceDir: string;
|
||||
|
||||
@@ -136,45 +141,15 @@ beforeAll(async () => {
|
||||
responsePlan = [];
|
||||
observedContexts = [];
|
||||
({ runEmbeddedPiAgent } = await import("./pi-embedded-runner/run.js"));
|
||||
tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-yield-e2e-"));
|
||||
agentDir = path.join(tempRoot, "agent");
|
||||
workspaceDir = path.join(tempRoot, "workspace");
|
||||
await fs.mkdir(agentDir, { recursive: true });
|
||||
await fs.mkdir(workspaceDir, { recursive: true });
|
||||
e2eWorkspace = await createEmbeddedPiRunnerTestWorkspace("openclaw-yield-e2e-");
|
||||
({ agentDir, workspaceDir } = e2eWorkspace);
|
||||
}, 180_000);
|
||||
|
||||
afterAll(async () => {
|
||||
if (!tempRoot) {
|
||||
return;
|
||||
}
|
||||
await fs.rm(tempRoot, { recursive: true, force: true });
|
||||
tempRoot = undefined;
|
||||
await cleanupEmbeddedPiRunnerTestWorkspace(e2eWorkspace);
|
||||
e2eWorkspace = undefined;
|
||||
});
|
||||
|
||||
const makeConfig = (modelIds: string[]) =>
|
||||
({
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
api: "openai-responses",
|
||||
apiKey: "sk-test",
|
||||
baseUrl: "https://example.com",
|
||||
models: modelIds.map((id) => ({
|
||||
id,
|
||||
name: `Mock ${id}`,
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 16_000,
|
||||
maxTokens: 2048,
|
||||
})),
|
||||
},
|
||||
},
|
||||
},
|
||||
}) satisfies OpenClawConfig;
|
||||
|
||||
const immediateEnqueue = async <T>(task: () => Promise<T>) => task();
|
||||
|
||||
const readSessionMessages = async (sessionFile: string) => {
|
||||
const raw = await fs.readFile(sessionFile, "utf-8");
|
||||
return raw
|
||||
@@ -205,7 +180,7 @@ describe("sessions_yield e2e", () => {
|
||||
|
||||
const sessionId = "yield-e2e-parent";
|
||||
const sessionFile = path.join(workspaceDir, "session-yield-e2e.jsonl");
|
||||
const cfg = makeConfig(["mock-yield"]);
|
||||
const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-yield"]);
|
||||
|
||||
const result = await runEmbeddedPiAgent({
|
||||
sessionId,
|
||||
@@ -304,7 +279,7 @@ describe("sessions_yield e2e", () => {
|
||||
|
||||
const sessionId = "yield-e2e-abort";
|
||||
const sessionFile = path.join(workspaceDir, "session-yield-abort.jsonl");
|
||||
const cfg = makeConfig(["mock-yield-abort"]);
|
||||
const cfg = createEmbeddedPiRunnerOpenAiConfig(["mock-yield-abort"]);
|
||||
|
||||
const result = await runEmbeddedPiAgent({
|
||||
sessionId,
|
||||
|
||||
@@ -7,6 +7,7 @@ import {
|
||||
usesOpenAiStringModeAnthropicToolChoice,
|
||||
} from "../provider-capabilities.js";
|
||||
import { log } from "./logger.js";
|
||||
import { streamWithPayloadPatch } from "./stream-payload-utils.js";
|
||||
|
||||
const ANTHROPIC_CONTEXT_1M_BETA = "context-1m-2025-08-07";
|
||||
const ANTHROPIC_1M_MODEL_PREFIXES = ["claude-opus-4", "claude-sonnet-4"] as const;
|
||||
@@ -341,18 +342,10 @@ export function createAnthropicFastModeWrapper(
|
||||
return underlying(model, context, options);
|
||||
}
|
||||
|
||||
const originalOnPayload = options?.onPayload;
|
||||
return underlying(model, context, {
|
||||
...options,
|
||||
onPayload: (payload) => {
|
||||
if (payload && typeof payload === "object") {
|
||||
const payloadObj = payload as Record<string, unknown>;
|
||||
if (payloadObj.service_tier === undefined) {
|
||||
payloadObj.service_tier = serviceTier;
|
||||
}
|
||||
}
|
||||
return originalOnPayload?.(payload, model);
|
||||
},
|
||||
return streamWithPayloadPatch(underlying, model, context, options, (payloadObj) => {
|
||||
if (payloadObj.service_tier === undefined) {
|
||||
payloadObj.service_tier = serviceTier;
|
||||
}
|
||||
});
|
||||
};
|
||||
}
|
||||
|
||||
@@ -278,6 +278,7 @@ vi.mock("../../config/channel-capabilities.js", () => ({
|
||||
}));
|
||||
|
||||
vi.mock("../../utils/message-channel.js", () => ({
|
||||
INTERNAL_MESSAGE_CHANNEL: "webchat",
|
||||
normalizeMessageChannel: vi.fn(() => undefined),
|
||||
}));
|
||||
|
||||
@@ -375,6 +376,16 @@ describe("compactEmbeddedPiSessionDirect hooks", () => {
|
||||
unregisterApiProviders(getCustomApiRegistrySourceId("ollama"));
|
||||
});
|
||||
|
||||
async function runDirectCompaction(customInstructions = "focus on decisions") {
|
||||
return await compactEmbeddedPiSessionDirect({
|
||||
sessionId: "session-1",
|
||||
sessionKey: "agent:main:session-1",
|
||||
sessionFile: "/tmp/session.jsonl",
|
||||
workspaceDir: "/tmp",
|
||||
customInstructions,
|
||||
});
|
||||
}
|
||||
|
||||
it("bootstraps runtime plugins with the resolved workspace", async () => {
|
||||
await compactEmbeddedPiSessionDirect({
|
||||
sessionId: "session-1",
|
||||
@@ -472,13 +483,7 @@ describe("compactEmbeddedPiSessionDirect hooks", () => {
|
||||
hookRunner.hasHooks.mockReturnValue(true);
|
||||
sanitizeSessionHistoryMock.mockResolvedValue([]);
|
||||
|
||||
const result = await compactEmbeddedPiSessionDirect({
|
||||
sessionId: "session-1",
|
||||
sessionKey: "agent:main:session-1",
|
||||
sessionFile: "/tmp/session.jsonl",
|
||||
workspaceDir: "/tmp",
|
||||
customInstructions: "focus on decisions",
|
||||
});
|
||||
const result = await runDirectCompaction();
|
||||
|
||||
expect(result.ok).toBe(true);
|
||||
const beforeContext = sessionHook("compact:before")?.context;
|
||||
@@ -528,13 +533,7 @@ describe("compactEmbeddedPiSessionDirect hooks", () => {
|
||||
details: { ok: true },
|
||||
});
|
||||
|
||||
const result = await compactEmbeddedPiSessionDirect({
|
||||
sessionId: "session-1",
|
||||
sessionKey: "agent:main:session-1",
|
||||
sessionFile: "/tmp/session.jsonl",
|
||||
workspaceDir: "/tmp",
|
||||
customInstructions: "focus on decisions",
|
||||
});
|
||||
const result = await runDirectCompaction();
|
||||
|
||||
expect(result).toMatchObject({
|
||||
ok: true,
|
||||
|
||||
@@ -2,6 +2,7 @@ import type { StreamFn } from "@mariozechner/pi-agent-core";
|
||||
import type { SimpleStreamOptions } from "@mariozechner/pi-ai";
|
||||
import { streamSimple } from "@mariozechner/pi-ai";
|
||||
import { log } from "./logger.js";
|
||||
import { streamWithPayloadPatch } from "./stream-payload-utils.js";
|
||||
|
||||
type OpenAIServiceTier = "auto" | "default" | "flex" | "priority";
|
||||
type OpenAIReasoningEffort = "low" | "medium" | "high";
|
||||
@@ -325,18 +326,10 @@ export function createOpenAIServiceTierWrapper(
|
||||
) {
|
||||
return underlying(model, context, options);
|
||||
}
|
||||
const originalOnPayload = options?.onPayload;
|
||||
return underlying(model, context, {
|
||||
...options,
|
||||
onPayload: (payload) => {
|
||||
if (payload && typeof payload === "object") {
|
||||
const payloadObj = payload as Record<string, unknown>;
|
||||
if (payloadObj.service_tier === undefined) {
|
||||
payloadObj.service_tier = serviceTier;
|
||||
}
|
||||
}
|
||||
return originalOnPayload?.(payload, model);
|
||||
},
|
||||
return streamWithPayloadPatch(underlying, model, context, options, (payloadObj) => {
|
||||
if (payloadObj.service_tier === undefined) {
|
||||
payloadObj.service_tier = serviceTier;
|
||||
}
|
||||
});
|
||||
};
|
||||
}
|
||||
|
||||
@@ -249,6 +249,72 @@ function createSubscriptionMock() {
|
||||
};
|
||||
}
|
||||
|
||||
function resetEmbeddedAttemptHarness(
|
||||
params: {
|
||||
includeSpawnSubagent?: boolean;
|
||||
subscribeImpl?: () => ReturnType<typeof createSubscriptionMock>;
|
||||
sessionMessages?: AgentMessage[];
|
||||
} = {},
|
||||
) {
|
||||
if (params.includeSpawnSubagent) {
|
||||
hoisted.spawnSubagentDirectMock.mockReset().mockResolvedValue({
|
||||
status: "accepted",
|
||||
childSessionKey: "agent:main:subagent:child",
|
||||
runId: "run-child",
|
||||
});
|
||||
}
|
||||
hoisted.createAgentSessionMock.mockReset();
|
||||
hoisted.sessionManagerOpenMock.mockReset().mockReturnValue(hoisted.sessionManager);
|
||||
hoisted.resolveSandboxContextMock.mockReset();
|
||||
hoisted.acquireSessionWriteLockMock.mockReset().mockResolvedValue({
|
||||
release: async () => {},
|
||||
});
|
||||
hoisted.sessionManager.getLeafEntry.mockReset().mockReturnValue(null);
|
||||
hoisted.sessionManager.branch.mockReset();
|
||||
hoisted.sessionManager.resetLeaf.mockReset();
|
||||
hoisted.sessionManager.buildSessionContext
|
||||
.mockReset()
|
||||
.mockReturnValue({ messages: params.sessionMessages ?? [] });
|
||||
hoisted.sessionManager.appendCustomEntry.mockReset();
|
||||
if (params.subscribeImpl) {
|
||||
hoisted.subscribeEmbeddedPiSessionMock.mockReset().mockImplementation(params.subscribeImpl);
|
||||
}
|
||||
}
|
||||
|
||||
async function cleanupTempPaths(tempPaths: string[]) {
|
||||
while (tempPaths.length > 0) {
|
||||
const target = tempPaths.pop();
|
||||
if (target) {
|
||||
await fs.rm(target, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function createDefaultEmbeddedSession(): MutableSession {
|
||||
const session: MutableSession = {
|
||||
sessionId: "embedded-session",
|
||||
messages: [],
|
||||
isCompacting: false,
|
||||
isStreaming: false,
|
||||
agent: {
|
||||
replaceMessages: (messages: unknown[]) => {
|
||||
session.messages = [...messages];
|
||||
},
|
||||
},
|
||||
prompt: async () => {
|
||||
session.messages = [
|
||||
...session.messages,
|
||||
{ role: "assistant", content: "done", timestamp: 2 },
|
||||
];
|
||||
},
|
||||
abort: async () => {},
|
||||
dispose: () => {},
|
||||
steer: async () => {},
|
||||
};
|
||||
|
||||
return session;
|
||||
}
|
||||
|
||||
const testModel = {
|
||||
api: "openai-completions",
|
||||
provider: "openai",
|
||||
@@ -269,32 +335,14 @@ describe("runEmbeddedAttempt sessions_spawn workspace inheritance", () => {
|
||||
const tempPaths: string[] = [];
|
||||
|
||||
beforeEach(() => {
|
||||
hoisted.spawnSubagentDirectMock.mockReset().mockResolvedValue({
|
||||
status: "accepted",
|
||||
childSessionKey: "agent:main:subagent:child",
|
||||
runId: "run-child",
|
||||
resetEmbeddedAttemptHarness({
|
||||
includeSpawnSubagent: true,
|
||||
subscribeImpl: createSubscriptionMock,
|
||||
});
|
||||
hoisted.createAgentSessionMock.mockReset();
|
||||
hoisted.sessionManagerOpenMock.mockReset().mockReturnValue(hoisted.sessionManager);
|
||||
hoisted.resolveSandboxContextMock.mockReset();
|
||||
hoisted.subscribeEmbeddedPiSessionMock.mockReset().mockImplementation(createSubscriptionMock);
|
||||
hoisted.acquireSessionWriteLockMock.mockReset().mockResolvedValue({
|
||||
release: async () => {},
|
||||
});
|
||||
hoisted.sessionManager.getLeafEntry.mockReset().mockReturnValue(null);
|
||||
hoisted.sessionManager.branch.mockReset();
|
||||
hoisted.sessionManager.resetLeaf.mockReset();
|
||||
hoisted.sessionManager.buildSessionContext.mockReset().mockReturnValue({ messages: [] });
|
||||
hoisted.sessionManager.appendCustomEntry.mockReset();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
while (tempPaths.length > 0) {
|
||||
const target = tempPaths.pop();
|
||||
if (target) {
|
||||
await fs.rm(target, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
await cleanupTempPaths(tempPaths);
|
||||
});
|
||||
|
||||
it("passes the real workspace to sessions_spawn when workspaceAccess is ro", async () => {
|
||||
@@ -394,26 +442,11 @@ describe("runEmbeddedAttempt cache-ttl tracking after compaction", () => {
|
||||
const tempPaths: string[] = [];
|
||||
|
||||
beforeEach(() => {
|
||||
hoisted.createAgentSessionMock.mockReset();
|
||||
hoisted.sessionManagerOpenMock.mockReset().mockReturnValue(hoisted.sessionManager);
|
||||
hoisted.resolveSandboxContextMock.mockReset();
|
||||
hoisted.acquireSessionWriteLockMock.mockReset().mockResolvedValue({
|
||||
release: async () => {},
|
||||
});
|
||||
hoisted.sessionManager.getLeafEntry.mockReset().mockReturnValue(null);
|
||||
hoisted.sessionManager.branch.mockReset();
|
||||
hoisted.sessionManager.resetLeaf.mockReset();
|
||||
hoisted.sessionManager.buildSessionContext.mockReset().mockReturnValue({ messages: [] });
|
||||
hoisted.sessionManager.appendCustomEntry.mockReset();
|
||||
resetEmbeddedAttemptHarness();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
while (tempPaths.length > 0) {
|
||||
const target = tempPaths.pop();
|
||||
if (target) {
|
||||
await fs.rm(target, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
await cleanupTempPaths(tempPaths);
|
||||
});
|
||||
|
||||
async function runAttemptWithCacheTtl(compactionCount: number) {
|
||||
@@ -428,30 +461,9 @@ describe("runEmbeddedAttempt cache-ttl tracking after compaction", () => {
|
||||
getCompactionCount: () => compactionCount,
|
||||
}));
|
||||
|
||||
hoisted.createAgentSessionMock.mockImplementation(async () => {
|
||||
const session: MutableSession = {
|
||||
sessionId: "embedded-session",
|
||||
messages: [],
|
||||
isCompacting: false,
|
||||
isStreaming: false,
|
||||
agent: {
|
||||
replaceMessages: (messages: unknown[]) => {
|
||||
session.messages = [...messages];
|
||||
},
|
||||
},
|
||||
prompt: async () => {
|
||||
session.messages = [
|
||||
...session.messages,
|
||||
{ role: "assistant", content: "done", timestamp: 2 },
|
||||
];
|
||||
},
|
||||
abort: async () => {},
|
||||
dispose: () => {},
|
||||
steer: async () => {},
|
||||
};
|
||||
|
||||
return { session };
|
||||
});
|
||||
hoisted.createAgentSessionMock.mockImplementation(async () => ({
|
||||
session: createDefaultEmbeddedSession(),
|
||||
}));
|
||||
|
||||
return await runEmbeddedAttempt({
|
||||
sessionId: "embedded-session",
|
||||
@@ -591,30 +603,9 @@ describe("runEmbeddedAttempt context engine sessionKey forwarding", () => {
|
||||
.mockReset()
|
||||
.mockReturnValue({ messages: seedMessages });
|
||||
|
||||
hoisted.createAgentSessionMock.mockImplementation(async () => {
|
||||
const session: MutableSession = {
|
||||
sessionId: "embedded-session",
|
||||
messages: [],
|
||||
isCompacting: false,
|
||||
isStreaming: false,
|
||||
agent: {
|
||||
replaceMessages: (messages: unknown[]) => {
|
||||
session.messages = [...messages];
|
||||
},
|
||||
},
|
||||
prompt: async () => {
|
||||
session.messages = [
|
||||
...session.messages,
|
||||
{ role: "assistant", content: "done", timestamp: 2 },
|
||||
];
|
||||
},
|
||||
abort: async () => {},
|
||||
dispose: () => {},
|
||||
steer: async () => {},
|
||||
};
|
||||
|
||||
return { session };
|
||||
});
|
||||
hoisted.createAgentSessionMock.mockImplementation(async () => ({
|
||||
session: createDefaultEmbeddedSession(),
|
||||
}));
|
||||
|
||||
return await runEmbeddedAttempt({
|
||||
sessionId: "embedded-session",
|
||||
|
||||
20
src/agents/pi-embedded-runner/stream-payload-utils.ts
Normal file
20
src/agents/pi-embedded-runner/stream-payload-utils.ts
Normal file
@@ -0,0 +1,20 @@
|
||||
import type { StreamFn } from "@mariozechner/pi-agent-core";
|
||||
|
||||
export function streamWithPayloadPatch(
|
||||
underlying: StreamFn,
|
||||
model: Parameters<StreamFn>[0],
|
||||
context: Parameters<StreamFn>[1],
|
||||
options: Parameters<StreamFn>[2],
|
||||
patchPayload: (payload: Record<string, unknown>) => void,
|
||||
) {
|
||||
const originalOnPayload = options?.onPayload;
|
||||
return underlying(model, context, {
|
||||
...options,
|
||||
onPayload: (payload) => {
|
||||
if (payload && typeof payload === "object") {
|
||||
patchPayload(payload as Record<string, unknown>);
|
||||
}
|
||||
return originalOnPayload?.(payload, model);
|
||||
},
|
||||
});
|
||||
}
|
||||
@@ -1,22 +1,13 @@
|
||||
import { spawnSync } from "node:child_process";
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { withTempDir } from "../../test-helpers/temp-dir.js";
|
||||
import {
|
||||
buildPinnedWritePlan,
|
||||
SANDBOX_PINNED_MUTATION_PYTHON,
|
||||
} from "./fs-bridge-mutation-helper.js";
|
||||
|
||||
async function withTempRoot<T>(prefix: string, run: (root: string) => Promise<T>): Promise<T> {
|
||||
const root = await fs.mkdtemp(path.join(os.tmpdir(), prefix));
|
||||
try {
|
||||
return await run(root);
|
||||
} finally {
|
||||
await fs.rm(root, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
function runMutation(args: string[], input?: string) {
|
||||
return spawnSync("python3", ["-c", SANDBOX_PINNED_MUTATION_PYTHON, ...args], {
|
||||
input,
|
||||
@@ -56,7 +47,7 @@ function runWritePlan(args: string[], input?: string) {
|
||||
|
||||
describe("sandbox pinned mutation helper", () => {
|
||||
it("writes through a pinned directory fd", async () => {
|
||||
await withTempRoot("openclaw-mutation-helper-", async (root) => {
|
||||
await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => {
|
||||
const workspace = path.join(root, "workspace");
|
||||
await fs.mkdir(workspace, { recursive: true });
|
||||
|
||||
@@ -72,7 +63,7 @@ describe("sandbox pinned mutation helper", () => {
|
||||
it.runIf(process.platform !== "win32")(
|
||||
"preserves stdin payload bytes when the pinned write plan runs through sh",
|
||||
async () => {
|
||||
await withTempRoot("openclaw-mutation-helper-", async (root) => {
|
||||
await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => {
|
||||
const workspace = path.join(root, "workspace");
|
||||
await fs.mkdir(workspace, { recursive: true });
|
||||
|
||||
@@ -92,7 +83,7 @@ describe("sandbox pinned mutation helper", () => {
|
||||
it.runIf(process.platform !== "win32")(
|
||||
"rejects symlink-parent writes instead of materializing a temp file outside the mount",
|
||||
async () => {
|
||||
await withTempRoot("openclaw-mutation-helper-", async (root) => {
|
||||
await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => {
|
||||
const workspace = path.join(root, "workspace");
|
||||
const outside = path.join(root, "outside");
|
||||
await fs.mkdir(workspace, { recursive: true });
|
||||
@@ -108,7 +99,7 @@ describe("sandbox pinned mutation helper", () => {
|
||||
);
|
||||
|
||||
it.runIf(process.platform !== "win32")("rejects symlink segments during mkdirp", async () => {
|
||||
await withTempRoot("openclaw-mutation-helper-", async (root) => {
|
||||
await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => {
|
||||
const workspace = path.join(root, "workspace");
|
||||
const outside = path.join(root, "outside");
|
||||
await fs.mkdir(workspace, { recursive: true });
|
||||
@@ -123,7 +114,7 @@ describe("sandbox pinned mutation helper", () => {
|
||||
});
|
||||
|
||||
it.runIf(process.platform !== "win32")("remove unlinks the symlink itself", async () => {
|
||||
await withTempRoot("openclaw-mutation-helper-", async (root) => {
|
||||
await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => {
|
||||
const workspace = path.join(root, "workspace");
|
||||
const outside = path.join(root, "outside");
|
||||
await fs.mkdir(workspace, { recursive: true });
|
||||
@@ -144,7 +135,7 @@ describe("sandbox pinned mutation helper", () => {
|
||||
it.runIf(process.platform !== "win32")(
|
||||
"rejects symlink destination parents during rename",
|
||||
async () => {
|
||||
await withTempRoot("openclaw-mutation-helper-", async (root) => {
|
||||
await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => {
|
||||
const workspace = path.join(root, "workspace");
|
||||
const outside = path.join(root, "outside");
|
||||
await fs.mkdir(workspace, { recursive: true });
|
||||
@@ -175,7 +166,7 @@ describe("sandbox pinned mutation helper", () => {
|
||||
it.runIf(process.platform !== "win32")(
|
||||
"copies directories across different mount roots during rename fallback",
|
||||
async () => {
|
||||
await withTempRoot("openclaw-mutation-helper-", async (root) => {
|
||||
await withTempDir({ prefix: "openclaw-mutation-helper-" }, async (root) => {
|
||||
const sourceRoot = path.join(root, "source");
|
||||
const destRoot = path.join(root, "dest");
|
||||
await fs.mkdir(path.join(sourceRoot, "dir", "nested"), { recursive: true });
|
||||
|
||||
57
src/agents/test-helpers/pi-embedded-runner-e2e-fixtures.ts
Normal file
57
src/agents/test-helpers/pi-embedded-runner-e2e-fixtures.ts
Normal file
@@ -0,0 +1,57 @@
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import type { OpenClawConfig } from "../../config/config.js";
|
||||
|
||||
export type EmbeddedPiRunnerTestWorkspace = {
|
||||
tempRoot: string;
|
||||
agentDir: string;
|
||||
workspaceDir: string;
|
||||
};
|
||||
|
||||
export async function createEmbeddedPiRunnerTestWorkspace(
|
||||
prefix: string,
|
||||
): Promise<EmbeddedPiRunnerTestWorkspace> {
|
||||
const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), prefix));
|
||||
const agentDir = path.join(tempRoot, "agent");
|
||||
const workspaceDir = path.join(tempRoot, "workspace");
|
||||
await fs.mkdir(agentDir, { recursive: true });
|
||||
await fs.mkdir(workspaceDir, { recursive: true });
|
||||
return { tempRoot, agentDir, workspaceDir };
|
||||
}
|
||||
|
||||
export async function cleanupEmbeddedPiRunnerTestWorkspace(
|
||||
workspace: EmbeddedPiRunnerTestWorkspace | undefined,
|
||||
): Promise<void> {
|
||||
if (!workspace) {
|
||||
return;
|
||||
}
|
||||
await fs.rm(workspace.tempRoot, { recursive: true, force: true });
|
||||
}
|
||||
|
||||
export function createEmbeddedPiRunnerOpenAiConfig(modelIds: string[]): OpenClawConfig {
|
||||
return {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
api: "openai-responses",
|
||||
apiKey: "sk-test",
|
||||
baseUrl: "https://example.com",
|
||||
models: modelIds.map((id) => ({
|
||||
id,
|
||||
name: `Mock ${id}`,
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 16_000,
|
||||
maxTokens: 2048,
|
||||
})),
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
export async function immediateEnqueue<T>(task: () => Promise<T>): Promise<T> {
|
||||
return await task();
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
import { Type } from "@sinclair/typebox";
|
||||
import { loadConfig } from "../../config/config.js";
|
||||
import { type OpenClawConfig, loadConfig } from "../../config/config.js";
|
||||
import { callGateway } from "../../gateway/call.js";
|
||||
import { capArrayByJsonBytes } from "../../gateway/session-utils.fs.js";
|
||||
import { jsonUtf8Bytes } from "../../infra/json-utf8-bytes.js";
|
||||
@@ -169,6 +169,7 @@ function enforceSessionsHistoryHardCap(params: {
|
||||
export function createSessionsHistoryTool(opts?: {
|
||||
agentSessionKey?: string;
|
||||
sandboxed?: boolean;
|
||||
config?: OpenClawConfig;
|
||||
}): AnyAgentTool {
|
||||
return {
|
||||
label: "Session History",
|
||||
@@ -180,7 +181,7 @@ export function createSessionsHistoryTool(opts?: {
|
||||
const sessionKeyParam = readStringParam(params, "sessionKey", {
|
||||
required: true,
|
||||
});
|
||||
const cfg = loadConfig();
|
||||
const cfg = opts?.config ?? loadConfig();
|
||||
const { mainKey, alias, effectiveRequesterKey, restrictToSpawned } =
|
||||
resolveSandboxedSessionToolContext({
|
||||
cfg,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import path from "node:path";
|
||||
import { Type } from "@sinclair/typebox";
|
||||
import { loadConfig } from "../../config/config.js";
|
||||
import { type OpenClawConfig, loadConfig } from "../../config/config.js";
|
||||
import {
|
||||
resolveSessionFilePath,
|
||||
resolveSessionFilePathOptions,
|
||||
@@ -33,6 +33,7 @@ const SessionsListToolSchema = Type.Object({
|
||||
export function createSessionsListTool(opts?: {
|
||||
agentSessionKey?: string;
|
||||
sandboxed?: boolean;
|
||||
config?: OpenClawConfig;
|
||||
}): AnyAgentTool {
|
||||
return {
|
||||
label: "Sessions",
|
||||
@@ -41,7 +42,7 @@ export function createSessionsListTool(opts?: {
|
||||
parameters: SessionsListToolSchema,
|
||||
execute: async (_toolCallId, args) => {
|
||||
const params = args as Record<string, unknown>;
|
||||
const cfg = loadConfig();
|
||||
const cfg = opts?.config ?? loadConfig();
|
||||
const { mainKey, alias, requesterInternalKey, restrictToSpawned } =
|
||||
resolveSandboxedSessionToolContext({
|
||||
cfg,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import crypto from "node:crypto";
|
||||
import { Type } from "@sinclair/typebox";
|
||||
import { loadConfig } from "../../config/config.js";
|
||||
import { type OpenClawConfig, loadConfig } from "../../config/config.js";
|
||||
import { callGateway } from "../../gateway/call.js";
|
||||
import { normalizeAgentId, resolveAgentIdFromSessionKey } from "../../routing/session-key.js";
|
||||
import { SESSION_LABEL_MAX_LENGTH } from "../../sessions/session-label.js";
|
||||
@@ -36,6 +36,7 @@ export function createSessionsSendTool(opts?: {
|
||||
agentSessionKey?: string;
|
||||
agentChannel?: GatewayMessageChannel;
|
||||
sandboxed?: boolean;
|
||||
config?: OpenClawConfig;
|
||||
}): AnyAgentTool {
|
||||
return {
|
||||
label: "Session Send",
|
||||
@@ -46,7 +47,7 @@ export function createSessionsSendTool(opts?: {
|
||||
execute: async (_toolCallId, args) => {
|
||||
const params = args as Record<string, unknown>;
|
||||
const message = readStringParam(params, "message", { required: true });
|
||||
const cfg = loadConfig();
|
||||
const cfg = opts?.config ?? loadConfig();
|
||||
const { mainKey, alias, effectiveRequesterKey, restrictToSpawned } =
|
||||
resolveSandboxedSessionToolContext({
|
||||
cfg,
|
||||
|
||||
68
src/browser/chrome-mcp.snapshot.test.ts
Normal file
68
src/browser/chrome-mcp.snapshot.test.ts
Normal file
@@ -0,0 +1,68 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import {
|
||||
buildAiSnapshotFromChromeMcpSnapshot,
|
||||
flattenChromeMcpSnapshotToAriaNodes,
|
||||
} from "./chrome-mcp.snapshot.js";
|
||||
|
||||
const snapshot = {
|
||||
id: "root",
|
||||
role: "document",
|
||||
name: "Example",
|
||||
children: [
|
||||
{
|
||||
id: "btn-1",
|
||||
role: "button",
|
||||
name: "Continue",
|
||||
},
|
||||
{
|
||||
id: "txt-1",
|
||||
role: "textbox",
|
||||
name: "Email",
|
||||
value: "peter@example.com",
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
describe("chrome MCP snapshot conversion", () => {
|
||||
it("flattens structured snapshots into aria-style nodes", () => {
|
||||
const nodes = flattenChromeMcpSnapshotToAriaNodes(snapshot, 10);
|
||||
expect(nodes).toEqual([
|
||||
{
|
||||
ref: "root",
|
||||
role: "document",
|
||||
name: "Example",
|
||||
value: undefined,
|
||||
description: undefined,
|
||||
depth: 0,
|
||||
},
|
||||
{
|
||||
ref: "btn-1",
|
||||
role: "button",
|
||||
name: "Continue",
|
||||
value: undefined,
|
||||
description: undefined,
|
||||
depth: 1,
|
||||
},
|
||||
{
|
||||
ref: "txt-1",
|
||||
role: "textbox",
|
||||
name: "Email",
|
||||
value: "peter@example.com",
|
||||
description: undefined,
|
||||
depth: 1,
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it("builds AI snapshots that preserve Chrome MCP uids as refs", () => {
|
||||
const result = buildAiSnapshotFromChromeMcpSnapshot({ root: snapshot });
|
||||
|
||||
expect(result.snapshot).toContain('- button "Continue" [ref=btn-1]');
|
||||
expect(result.snapshot).toContain('- textbox "Email" [ref=txt-1] value="peter@example.com"');
|
||||
expect(result.refs).toEqual({
|
||||
"btn-1": { role: "button", name: "Continue" },
|
||||
"txt-1": { role: "textbox", name: "Email" },
|
||||
});
|
||||
expect(result.stats.refs).toBe(2);
|
||||
});
|
||||
});
|
||||
246
src/browser/chrome-mcp.snapshot.ts
Normal file
246
src/browser/chrome-mcp.snapshot.ts
Normal file
@@ -0,0 +1,246 @@
|
||||
import type { SnapshotAriaNode } from "./client.js";
|
||||
import {
|
||||
getRoleSnapshotStats,
|
||||
type RoleRefMap,
|
||||
type RoleSnapshotOptions,
|
||||
} from "./pw-role-snapshot.js";
|
||||
|
||||
export type ChromeMcpSnapshotNode = {
|
||||
id?: string;
|
||||
role?: string;
|
||||
name?: string;
|
||||
value?: string | number | boolean;
|
||||
description?: string;
|
||||
children?: ChromeMcpSnapshotNode[];
|
||||
};
|
||||
|
||||
const INTERACTIVE_ROLES = new Set([
|
||||
"button",
|
||||
"checkbox",
|
||||
"combobox",
|
||||
"link",
|
||||
"listbox",
|
||||
"menuitem",
|
||||
"menuitemcheckbox",
|
||||
"menuitemradio",
|
||||
"option",
|
||||
"radio",
|
||||
"searchbox",
|
||||
"slider",
|
||||
"spinbutton",
|
||||
"switch",
|
||||
"tab",
|
||||
"textbox",
|
||||
"treeitem",
|
||||
]);
|
||||
|
||||
const CONTENT_ROLES = new Set([
|
||||
"article",
|
||||
"cell",
|
||||
"columnheader",
|
||||
"gridcell",
|
||||
"heading",
|
||||
"listitem",
|
||||
"main",
|
||||
"navigation",
|
||||
"region",
|
||||
"rowheader",
|
||||
]);
|
||||
|
||||
const STRUCTURAL_ROLES = new Set([
|
||||
"application",
|
||||
"directory",
|
||||
"document",
|
||||
"generic",
|
||||
"group",
|
||||
"ignored",
|
||||
"list",
|
||||
"menu",
|
||||
"menubar",
|
||||
"none",
|
||||
"presentation",
|
||||
"row",
|
||||
"rowgroup",
|
||||
"tablist",
|
||||
"table",
|
||||
"toolbar",
|
||||
"tree",
|
||||
"treegrid",
|
||||
]);
|
||||
|
||||
function normalizeRole(node: ChromeMcpSnapshotNode): string {
|
||||
const role = typeof node.role === "string" ? node.role.trim().toLowerCase() : "";
|
||||
return role || "generic";
|
||||
}
|
||||
|
||||
function normalizeString(value: unknown): string | undefined {
|
||||
if (typeof value === "string") {
|
||||
const trimmed = value.trim();
|
||||
return trimmed || undefined;
|
||||
}
|
||||
if (typeof value === "number" || typeof value === "boolean") {
|
||||
return String(value);
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
function escapeQuoted(value: string): string {
|
||||
return value.replaceAll("\\", "\\\\").replaceAll('"', '\\"');
|
||||
}
|
||||
|
||||
function shouldIncludeNode(params: {
|
||||
role: string;
|
||||
name?: string;
|
||||
options?: RoleSnapshotOptions;
|
||||
}): boolean {
|
||||
if (params.options?.interactive && !INTERACTIVE_ROLES.has(params.role)) {
|
||||
return false;
|
||||
}
|
||||
if (params.options?.compact && STRUCTURAL_ROLES.has(params.role) && !params.name) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
function shouldCreateRef(role: string, name?: string): boolean {
|
||||
return INTERACTIVE_ROLES.has(role) || (CONTENT_ROLES.has(role) && Boolean(name));
|
||||
}
|
||||
|
||||
type DuplicateTracker = {
|
||||
counts: Map<string, number>;
|
||||
keysByRef: Map<string, string>;
|
||||
duplicates: Set<string>;
|
||||
};
|
||||
|
||||
function createDuplicateTracker(): DuplicateTracker {
|
||||
return {
|
||||
counts: new Map(),
|
||||
keysByRef: new Map(),
|
||||
duplicates: new Set(),
|
||||
};
|
||||
}
|
||||
|
||||
function registerRef(
|
||||
tracker: DuplicateTracker,
|
||||
ref: string,
|
||||
role: string,
|
||||
name?: string,
|
||||
): number | undefined {
|
||||
const key = `${role}:${name ?? ""}`;
|
||||
const count = tracker.counts.get(key) ?? 0;
|
||||
tracker.counts.set(key, count + 1);
|
||||
tracker.keysByRef.set(ref, key);
|
||||
if (count > 0) {
|
||||
tracker.duplicates.add(key);
|
||||
return count;
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
export function flattenChromeMcpSnapshotToAriaNodes(
|
||||
root: ChromeMcpSnapshotNode,
|
||||
limit = 500,
|
||||
): SnapshotAriaNode[] {
|
||||
const boundedLimit = Math.max(1, Math.min(2000, Math.floor(limit)));
|
||||
const out: SnapshotAriaNode[] = [];
|
||||
|
||||
const visit = (node: ChromeMcpSnapshotNode, depth: number) => {
|
||||
if (out.length >= boundedLimit) {
|
||||
return;
|
||||
}
|
||||
const ref = normalizeString(node.id);
|
||||
if (ref) {
|
||||
out.push({
|
||||
ref,
|
||||
role: normalizeRole(node),
|
||||
name: normalizeString(node.name) ?? "",
|
||||
value: normalizeString(node.value),
|
||||
description: normalizeString(node.description),
|
||||
depth,
|
||||
});
|
||||
}
|
||||
for (const child of node.children ?? []) {
|
||||
visit(child, depth + 1);
|
||||
if (out.length >= boundedLimit) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
visit(root, 0);
|
||||
return out;
|
||||
}
|
||||
|
||||
export function buildAiSnapshotFromChromeMcpSnapshot(params: {
|
||||
root: ChromeMcpSnapshotNode;
|
||||
options?: RoleSnapshotOptions;
|
||||
maxChars?: number;
|
||||
}): {
|
||||
snapshot: string;
|
||||
truncated?: boolean;
|
||||
refs: RoleRefMap;
|
||||
stats: { lines: number; chars: number; refs: number; interactive: number };
|
||||
} {
|
||||
const refs: RoleRefMap = {};
|
||||
const tracker = createDuplicateTracker();
|
||||
const lines: string[] = [];
|
||||
|
||||
const visit = (node: ChromeMcpSnapshotNode, depth: number) => {
|
||||
const role = normalizeRole(node);
|
||||
const name = normalizeString(node.name);
|
||||
const value = normalizeString(node.value);
|
||||
const description = normalizeString(node.description);
|
||||
const maxDepth = params.options?.maxDepth;
|
||||
if (maxDepth !== undefined && depth > maxDepth) {
|
||||
return;
|
||||
}
|
||||
|
||||
const includeNode = shouldIncludeNode({ role, name, options: params.options });
|
||||
if (includeNode) {
|
||||
let line = `${" ".repeat(depth)}- ${role}`;
|
||||
if (name) {
|
||||
line += ` "${escapeQuoted(name)}"`;
|
||||
}
|
||||
const ref = normalizeString(node.id);
|
||||
if (ref && shouldCreateRef(role, name)) {
|
||||
const nth = registerRef(tracker, ref, role, name);
|
||||
refs[ref] = nth === undefined ? { role, name } : { role, name, nth };
|
||||
line += ` [ref=${ref}]`;
|
||||
}
|
||||
if (value) {
|
||||
line += ` value="${escapeQuoted(value)}"`;
|
||||
}
|
||||
if (description) {
|
||||
line += ` description="${escapeQuoted(description)}"`;
|
||||
}
|
||||
lines.push(line);
|
||||
}
|
||||
|
||||
for (const child of node.children ?? []) {
|
||||
visit(child, depth + 1);
|
||||
}
|
||||
};
|
||||
|
||||
visit(params.root, 0);
|
||||
|
||||
for (const [ref, data] of Object.entries(refs)) {
|
||||
const key = tracker.keysByRef.get(ref);
|
||||
if (key && !tracker.duplicates.has(key)) {
|
||||
delete data.nth;
|
||||
}
|
||||
}
|
||||
|
||||
let snapshot = lines.join("\n");
|
||||
let truncated = false;
|
||||
const maxChars =
|
||||
typeof params.maxChars === "number" && Number.isFinite(params.maxChars) && params.maxChars > 0
|
||||
? Math.floor(params.maxChars)
|
||||
: undefined;
|
||||
if (maxChars && snapshot.length > maxChars) {
|
||||
snapshot = `${snapshot.slice(0, maxChars)}\n\n[...TRUNCATED - page too large]`;
|
||||
truncated = true;
|
||||
}
|
||||
|
||||
const stats = getRoleSnapshotStats(snapshot, refs);
|
||||
return truncated ? { snapshot, truncated, refs, stats } : { snapshot, refs, stats };
|
||||
}
|
||||
108
src/browser/chrome-mcp.test.ts
Normal file
108
src/browser/chrome-mcp.test.ts
Normal file
@@ -0,0 +1,108 @@
|
||||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import {
|
||||
listChromeMcpTabs,
|
||||
openChromeMcpTab,
|
||||
resetChromeMcpSessionsForTest,
|
||||
setChromeMcpSessionFactoryForTest,
|
||||
} from "./chrome-mcp.js";
|
||||
|
||||
type ToolCall = {
|
||||
name: string;
|
||||
arguments?: Record<string, unknown>;
|
||||
};
|
||||
|
||||
type ChromeMcpSessionFactory = Exclude<
|
||||
Parameters<typeof setChromeMcpSessionFactoryForTest>[0],
|
||||
null
|
||||
>;
|
||||
type ChromeMcpSession = Awaited<ReturnType<ChromeMcpSessionFactory>>;
|
||||
|
||||
function createFakeSession(): ChromeMcpSession {
|
||||
const callTool = vi.fn(async ({ name }: ToolCall) => {
|
||||
if (name === "list_pages") {
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: "text",
|
||||
text: [
|
||||
"## Pages",
|
||||
"1: https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session [selected]",
|
||||
"2: https://github.com/openclaw/openclaw/pull/45318",
|
||||
].join("\n"),
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
if (name === "new_page") {
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: "text",
|
||||
text: [
|
||||
"## Pages",
|
||||
"1: https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session",
|
||||
"2: https://github.com/openclaw/openclaw/pull/45318",
|
||||
"3: https://example.com/ [selected]",
|
||||
].join("\n"),
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
throw new Error(`unexpected tool ${name}`);
|
||||
});
|
||||
|
||||
return {
|
||||
client: {
|
||||
callTool,
|
||||
listTools: vi.fn().mockResolvedValue({ tools: [{ name: "list_pages" }] }),
|
||||
close: vi.fn().mockResolvedValue(undefined),
|
||||
connect: vi.fn().mockResolvedValue(undefined),
|
||||
},
|
||||
transport: {
|
||||
pid: 123,
|
||||
},
|
||||
ready: Promise.resolve(),
|
||||
} as unknown as ChromeMcpSession;
|
||||
}
|
||||
|
||||
describe("chrome MCP page parsing", () => {
|
||||
beforeEach(async () => {
|
||||
await resetChromeMcpSessionsForTest();
|
||||
});
|
||||
|
||||
it("parses list_pages text responses when structuredContent is missing", async () => {
|
||||
const factory: ChromeMcpSessionFactory = async () => createFakeSession();
|
||||
setChromeMcpSessionFactoryForTest(factory);
|
||||
|
||||
const tabs = await listChromeMcpTabs("chrome-live");
|
||||
|
||||
expect(tabs).toEqual([
|
||||
{
|
||||
targetId: "1",
|
||||
title: "",
|
||||
url: "https://developer.chrome.com/blog/chrome-devtools-mcp-debug-your-browser-session",
|
||||
type: "page",
|
||||
},
|
||||
{
|
||||
targetId: "2",
|
||||
title: "",
|
||||
url: "https://github.com/openclaw/openclaw/pull/45318",
|
||||
type: "page",
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it("parses new_page text responses and returns the created tab", async () => {
|
||||
const factory: ChromeMcpSessionFactory = async () => createFakeSession();
|
||||
setChromeMcpSessionFactoryForTest(factory);
|
||||
|
||||
const tab = await openChromeMcpTab("chrome-live", "https://example.com/");
|
||||
|
||||
expect(tab).toEqual({
|
||||
targetId: "3",
|
||||
title: "",
|
||||
url: "https://example.com/",
|
||||
type: "page",
|
||||
});
|
||||
});
|
||||
});
|
||||
488
src/browser/chrome-mcp.ts
Normal file
488
src/browser/chrome-mcp.ts
Normal file
@@ -0,0 +1,488 @@
|
||||
import { randomUUID } from "node:crypto";
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { Client } from "@modelcontextprotocol/sdk/client/index.js";
|
||||
import { StdioClientTransport } from "@modelcontextprotocol/sdk/client/stdio.js";
|
||||
import type { ChromeMcpSnapshotNode } from "./chrome-mcp.snapshot.js";
|
||||
import type { BrowserTab } from "./client.js";
|
||||
import { BrowserProfileUnavailableError, BrowserTabNotFoundError } from "./errors.js";
|
||||
|
||||
type ChromeMcpStructuredPage = {
|
||||
id: number;
|
||||
url?: string;
|
||||
selected?: boolean;
|
||||
};
|
||||
|
||||
type ChromeMcpToolResult = {
|
||||
structuredContent?: Record<string, unknown>;
|
||||
content?: Array<Record<string, unknown>>;
|
||||
isError?: boolean;
|
||||
};
|
||||
|
||||
type ChromeMcpSession = {
|
||||
client: Client;
|
||||
transport: StdioClientTransport;
|
||||
ready: Promise<void>;
|
||||
};
|
||||
|
||||
type ChromeMcpSessionFactory = (profileName: string) => Promise<ChromeMcpSession>;
|
||||
|
||||
const DEFAULT_CHROME_MCP_COMMAND = "npx";
|
||||
const DEFAULT_CHROME_MCP_ARGS = [
|
||||
"-y",
|
||||
"chrome-devtools-mcp@latest",
|
||||
"--autoConnect",
|
||||
"--experimental-page-id-routing",
|
||||
];
|
||||
|
||||
const sessions = new Map<string, ChromeMcpSession>();
|
||||
let sessionFactory: ChromeMcpSessionFactory | null = null;
|
||||
|
||||
function asRecord(value: unknown): Record<string, unknown> | null {
|
||||
return value && typeof value === "object" && !Array.isArray(value)
|
||||
? (value as Record<string, unknown>)
|
||||
: null;
|
||||
}
|
||||
|
||||
function asPages(value: unknown): ChromeMcpStructuredPage[] {
|
||||
if (!Array.isArray(value)) {
|
||||
return [];
|
||||
}
|
||||
const out: ChromeMcpStructuredPage[] = [];
|
||||
for (const entry of value) {
|
||||
const record = asRecord(entry);
|
||||
if (!record || typeof record.id !== "number") {
|
||||
continue;
|
||||
}
|
||||
out.push({
|
||||
id: record.id,
|
||||
url: typeof record.url === "string" ? record.url : undefined,
|
||||
selected: record.selected === true,
|
||||
});
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
function parsePageId(targetId: string): number {
|
||||
const parsed = Number.parseInt(targetId.trim(), 10);
|
||||
if (!Number.isFinite(parsed)) {
|
||||
throw new BrowserTabNotFoundError();
|
||||
}
|
||||
return parsed;
|
||||
}
|
||||
|
||||
function toBrowserTabs(pages: ChromeMcpStructuredPage[]): BrowserTab[] {
|
||||
return pages.map((page) => ({
|
||||
targetId: String(page.id),
|
||||
title: "",
|
||||
url: page.url ?? "",
|
||||
type: "page",
|
||||
}));
|
||||
}
|
||||
|
||||
function extractStructuredContent(result: ChromeMcpToolResult): Record<string, unknown> {
|
||||
return asRecord(result.structuredContent) ?? {};
|
||||
}
|
||||
|
||||
function extractTextContent(result: ChromeMcpToolResult): string[] {
|
||||
const content = Array.isArray(result.content) ? result.content : [];
|
||||
return content
|
||||
.map((entry) => {
|
||||
const record = asRecord(entry);
|
||||
return record && typeof record.text === "string" ? record.text : "";
|
||||
})
|
||||
.filter(Boolean);
|
||||
}
|
||||
|
||||
function extractTextPages(result: ChromeMcpToolResult): ChromeMcpStructuredPage[] {
|
||||
const pages: ChromeMcpStructuredPage[] = [];
|
||||
for (const block of extractTextContent(result)) {
|
||||
for (const line of block.split(/\r?\n/)) {
|
||||
const match = line.match(/^\s*(\d+):\s+(.+?)(?:\s+\[(selected)\])?\s*$/i);
|
||||
if (!match) {
|
||||
continue;
|
||||
}
|
||||
pages.push({
|
||||
id: Number.parseInt(match[1] ?? "", 10),
|
||||
url: match[2]?.trim() || undefined,
|
||||
selected: Boolean(match[3]),
|
||||
});
|
||||
}
|
||||
}
|
||||
return pages;
|
||||
}
|
||||
|
||||
function extractStructuredPages(result: ChromeMcpToolResult): ChromeMcpStructuredPage[] {
|
||||
const structured = asPages(extractStructuredContent(result).pages);
|
||||
return structured.length > 0 ? structured : extractTextPages(result);
|
||||
}
|
||||
|
||||
function extractSnapshot(result: ChromeMcpToolResult): ChromeMcpSnapshotNode {
|
||||
const structured = extractStructuredContent(result);
|
||||
const snapshot = asRecord(structured.snapshot);
|
||||
if (!snapshot) {
|
||||
throw new Error("Chrome MCP snapshot response was missing structured snapshot data.");
|
||||
}
|
||||
return snapshot as unknown as ChromeMcpSnapshotNode;
|
||||
}
|
||||
|
||||
function extractJsonBlock(text: string): unknown {
|
||||
const match = text.match(/```json\s*([\s\S]*?)\s*```/i);
|
||||
const raw = match?.[1]?.trim() || text.trim();
|
||||
return raw ? JSON.parse(raw) : null;
|
||||
}
|
||||
|
||||
async function createRealSession(profileName: string): Promise<ChromeMcpSession> {
|
||||
const transport = new StdioClientTransport({
|
||||
command: DEFAULT_CHROME_MCP_COMMAND,
|
||||
args: DEFAULT_CHROME_MCP_ARGS,
|
||||
stderr: "pipe",
|
||||
});
|
||||
const client = new Client(
|
||||
{
|
||||
name: "openclaw-browser",
|
||||
version: "0.0.0",
|
||||
},
|
||||
{},
|
||||
);
|
||||
|
||||
const ready = (async () => {
|
||||
try {
|
||||
await client.connect(transport);
|
||||
const tools = await client.listTools();
|
||||
if (!tools.tools.some((tool) => tool.name === "list_pages")) {
|
||||
throw new Error("Chrome MCP server did not expose the expected navigation tools.");
|
||||
}
|
||||
} catch (err) {
|
||||
await client.close().catch(() => {});
|
||||
throw new BrowserProfileUnavailableError(
|
||||
`Chrome MCP existing-session attach failed for profile "${profileName}". ` +
|
||||
`Make sure Chrome is running, enable chrome://inspect/#remote-debugging, and approve the connection. ` +
|
||||
`Details: ${String(err)}`,
|
||||
);
|
||||
}
|
||||
})();
|
||||
|
||||
return {
|
||||
client,
|
||||
transport,
|
||||
ready,
|
||||
};
|
||||
}
|
||||
|
||||
async function getSession(profileName: string): Promise<ChromeMcpSession> {
|
||||
let session = sessions.get(profileName);
|
||||
if (session && session.transport.pid === null) {
|
||||
sessions.delete(profileName);
|
||||
session = undefined;
|
||||
}
|
||||
if (!session) {
|
||||
session = await (sessionFactory ?? createRealSession)(profileName);
|
||||
sessions.set(profileName, session);
|
||||
}
|
||||
try {
|
||||
await session.ready;
|
||||
return session;
|
||||
} catch (err) {
|
||||
const current = sessions.get(profileName);
|
||||
if (current?.transport === session.transport) {
|
||||
sessions.delete(profileName);
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
async function callTool(
|
||||
profileName: string,
|
||||
name: string,
|
||||
args: Record<string, unknown> = {},
|
||||
): Promise<ChromeMcpToolResult> {
|
||||
const session = await getSession(profileName);
|
||||
try {
|
||||
return (await session.client.callTool({
|
||||
name,
|
||||
arguments: args,
|
||||
})) as ChromeMcpToolResult;
|
||||
} catch (err) {
|
||||
sessions.delete(profileName);
|
||||
await session.client.close().catch(() => {});
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
async function withTempFile<T>(fn: (filePath: string) => Promise<T>): Promise<T> {
|
||||
const dir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-chrome-mcp-"));
|
||||
const filePath = path.join(dir, randomUUID());
|
||||
try {
|
||||
return await fn(filePath);
|
||||
} finally {
|
||||
await fs.rm(dir, { recursive: true, force: true }).catch(() => {});
|
||||
}
|
||||
}
|
||||
|
||||
async function findPageById(profileName: string, pageId: number): Promise<ChromeMcpStructuredPage> {
|
||||
const pages = await listChromeMcpPages(profileName);
|
||||
const page = pages.find((entry) => entry.id === pageId);
|
||||
if (!page) {
|
||||
throw new BrowserTabNotFoundError();
|
||||
}
|
||||
return page;
|
||||
}
|
||||
|
||||
export async function ensureChromeMcpAvailable(profileName: string): Promise<void> {
|
||||
await getSession(profileName);
|
||||
}
|
||||
|
||||
export function getChromeMcpPid(profileName: string): number | null {
|
||||
return sessions.get(profileName)?.transport.pid ?? null;
|
||||
}
|
||||
|
||||
export async function closeChromeMcpSession(profileName: string): Promise<boolean> {
|
||||
const session = sessions.get(profileName);
|
||||
if (!session) {
|
||||
return false;
|
||||
}
|
||||
sessions.delete(profileName);
|
||||
await session.client.close().catch(() => {});
|
||||
return true;
|
||||
}
|
||||
|
||||
export async function stopAllChromeMcpSessions(): Promise<void> {
|
||||
const names = [...sessions.keys()];
|
||||
for (const name of names) {
|
||||
await closeChromeMcpSession(name).catch(() => {});
|
||||
}
|
||||
}
|
||||
|
||||
export async function listChromeMcpPages(profileName: string): Promise<ChromeMcpStructuredPage[]> {
|
||||
const result = await callTool(profileName, "list_pages");
|
||||
return extractStructuredPages(result);
|
||||
}
|
||||
|
||||
export async function listChromeMcpTabs(profileName: string): Promise<BrowserTab[]> {
|
||||
return toBrowserTabs(await listChromeMcpPages(profileName));
|
||||
}
|
||||
|
||||
export async function openChromeMcpTab(profileName: string, url: string): Promise<BrowserTab> {
|
||||
const result = await callTool(profileName, "new_page", { url });
|
||||
const pages = extractStructuredPages(result);
|
||||
const chosen = pages.find((page) => page.selected) ?? pages.at(-1);
|
||||
if (!chosen) {
|
||||
throw new Error("Chrome MCP did not return the created page.");
|
||||
}
|
||||
return {
|
||||
targetId: String(chosen.id),
|
||||
title: "",
|
||||
url: chosen.url ?? url,
|
||||
type: "page",
|
||||
};
|
||||
}
|
||||
|
||||
export async function focusChromeMcpTab(profileName: string, targetId: string): Promise<void> {
|
||||
await callTool(profileName, "select_page", {
|
||||
pageId: parsePageId(targetId),
|
||||
bringToFront: true,
|
||||
});
|
||||
}
|
||||
|
||||
export async function closeChromeMcpTab(profileName: string, targetId: string): Promise<void> {
|
||||
await callTool(profileName, "close_page", { pageId: parsePageId(targetId) });
|
||||
}
|
||||
|
||||
export async function navigateChromeMcpPage(params: {
|
||||
profileName: string;
|
||||
targetId: string;
|
||||
url: string;
|
||||
timeoutMs?: number;
|
||||
}): Promise<{ url: string }> {
|
||||
await callTool(params.profileName, "navigate_page", {
|
||||
pageId: parsePageId(params.targetId),
|
||||
type: "url",
|
||||
url: params.url,
|
||||
...(typeof params.timeoutMs === "number" ? { timeout: params.timeoutMs } : {}),
|
||||
});
|
||||
const page = await findPageById(params.profileName, parsePageId(params.targetId));
|
||||
return { url: page.url ?? params.url };
|
||||
}
|
||||
|
||||
export async function takeChromeMcpSnapshot(params: {
|
||||
profileName: string;
|
||||
targetId: string;
|
||||
}): Promise<ChromeMcpSnapshotNode> {
|
||||
const result = await callTool(params.profileName, "take_snapshot", {
|
||||
pageId: parsePageId(params.targetId),
|
||||
});
|
||||
return extractSnapshot(result);
|
||||
}
|
||||
|
||||
export async function takeChromeMcpScreenshot(params: {
|
||||
profileName: string;
|
||||
targetId: string;
|
||||
uid?: string;
|
||||
fullPage?: boolean;
|
||||
format?: "png" | "jpeg";
|
||||
}): Promise<Buffer> {
|
||||
return await withTempFile(async (filePath) => {
|
||||
await callTool(params.profileName, "take_screenshot", {
|
||||
pageId: parsePageId(params.targetId),
|
||||
filePath,
|
||||
format: params.format ?? "png",
|
||||
...(params.uid ? { uid: params.uid } : {}),
|
||||
...(params.fullPage ? { fullPage: true } : {}),
|
||||
});
|
||||
return await fs.readFile(filePath);
|
||||
});
|
||||
}
|
||||
|
||||
export async function clickChromeMcpElement(params: {
|
||||
profileName: string;
|
||||
targetId: string;
|
||||
uid: string;
|
||||
doubleClick?: boolean;
|
||||
}): Promise<void> {
|
||||
await callTool(params.profileName, "click", {
|
||||
pageId: parsePageId(params.targetId),
|
||||
uid: params.uid,
|
||||
...(params.doubleClick ? { dblClick: true } : {}),
|
||||
});
|
||||
}
|
||||
|
||||
export async function fillChromeMcpElement(params: {
|
||||
profileName: string;
|
||||
targetId: string;
|
||||
uid: string;
|
||||
value: string;
|
||||
}): Promise<void> {
|
||||
await callTool(params.profileName, "fill", {
|
||||
pageId: parsePageId(params.targetId),
|
||||
uid: params.uid,
|
||||
value: params.value,
|
||||
});
|
||||
}
|
||||
|
||||
export async function fillChromeMcpForm(params: {
|
||||
profileName: string;
|
||||
targetId: string;
|
||||
elements: Array<{ uid: string; value: string }>;
|
||||
}): Promise<void> {
|
||||
await callTool(params.profileName, "fill_form", {
|
||||
pageId: parsePageId(params.targetId),
|
||||
elements: params.elements,
|
||||
});
|
||||
}
|
||||
|
||||
export async function hoverChromeMcpElement(params: {
|
||||
profileName: string;
|
||||
targetId: string;
|
||||
uid: string;
|
||||
}): Promise<void> {
|
||||
await callTool(params.profileName, "hover", {
|
||||
pageId: parsePageId(params.targetId),
|
||||
uid: params.uid,
|
||||
});
|
||||
}
|
||||
|
||||
export async function dragChromeMcpElement(params: {
|
||||
profileName: string;
|
||||
targetId: string;
|
||||
fromUid: string;
|
||||
toUid: string;
|
||||
}): Promise<void> {
|
||||
await callTool(params.profileName, "drag", {
|
||||
pageId: parsePageId(params.targetId),
|
||||
from_uid: params.fromUid,
|
||||
to_uid: params.toUid,
|
||||
});
|
||||
}
|
||||
|
||||
export async function uploadChromeMcpFile(params: {
|
||||
profileName: string;
|
||||
targetId: string;
|
||||
uid: string;
|
||||
filePath: string;
|
||||
}): Promise<void> {
|
||||
await callTool(params.profileName, "upload_file", {
|
||||
pageId: parsePageId(params.targetId),
|
||||
uid: params.uid,
|
||||
filePath: params.filePath,
|
||||
});
|
||||
}
|
||||
|
||||
export async function pressChromeMcpKey(params: {
|
||||
profileName: string;
|
||||
targetId: string;
|
||||
key: string;
|
||||
}): Promise<void> {
|
||||
await callTool(params.profileName, "press_key", {
|
||||
pageId: parsePageId(params.targetId),
|
||||
key: params.key,
|
||||
});
|
||||
}
|
||||
|
||||
export async function resizeChromeMcpPage(params: {
|
||||
profileName: string;
|
||||
targetId: string;
|
||||
width: number;
|
||||
height: number;
|
||||
}): Promise<void> {
|
||||
await callTool(params.profileName, "resize_page", {
|
||||
pageId: parsePageId(params.targetId),
|
||||
width: params.width,
|
||||
height: params.height,
|
||||
});
|
||||
}
|
||||
|
||||
export async function handleChromeMcpDialog(params: {
|
||||
profileName: string;
|
||||
targetId: string;
|
||||
action: "accept" | "dismiss";
|
||||
promptText?: string;
|
||||
}): Promise<void> {
|
||||
await callTool(params.profileName, "handle_dialog", {
|
||||
pageId: parsePageId(params.targetId),
|
||||
action: params.action,
|
||||
...(params.promptText ? { promptText: params.promptText } : {}),
|
||||
});
|
||||
}
|
||||
|
||||
export async function evaluateChromeMcpScript(params: {
|
||||
profileName: string;
|
||||
targetId: string;
|
||||
fn: string;
|
||||
args?: string[];
|
||||
}): Promise<unknown> {
|
||||
const result = await callTool(params.profileName, "evaluate_script", {
|
||||
pageId: parsePageId(params.targetId),
|
||||
function: params.fn,
|
||||
...(params.args?.length ? { args: params.args } : {}),
|
||||
});
|
||||
const message = extractStructuredContent(result).message;
|
||||
const text = typeof message === "string" ? message : "";
|
||||
if (!text.trim()) {
|
||||
return null;
|
||||
}
|
||||
return extractJsonBlock(text);
|
||||
}
|
||||
|
||||
export async function waitForChromeMcpText(params: {
|
||||
profileName: string;
|
||||
targetId: string;
|
||||
text: string[];
|
||||
timeoutMs?: number;
|
||||
}): Promise<void> {
|
||||
await callTool(params.profileName, "wait_for", {
|
||||
pageId: parsePageId(params.targetId),
|
||||
text: params.text,
|
||||
...(typeof params.timeoutMs === "number" ? { timeout: params.timeoutMs } : {}),
|
||||
});
|
||||
}
|
||||
|
||||
export function setChromeMcpSessionFactoryForTest(factory: ChromeMcpSessionFactory | null): void {
|
||||
sessionFactory = factory;
|
||||
}
|
||||
|
||||
export async function resetChromeMcpSessionsForTest(): Promise<void> {
|
||||
sessionFactory = null;
|
||||
await stopAllChromeMcpSessions();
|
||||
}
|
||||
@@ -3,6 +3,7 @@ import { fetchBrowserJson } from "./client-fetch.js";
|
||||
export type BrowserStatus = {
|
||||
enabled: boolean;
|
||||
profile?: string;
|
||||
driver?: "openclaw" | "extension" | "existing-session";
|
||||
running: boolean;
|
||||
cdpReady?: boolean;
|
||||
cdpHttp?: boolean;
|
||||
@@ -26,6 +27,7 @@ export type ProfileStatus = {
|
||||
cdpPort: number;
|
||||
cdpUrl: string;
|
||||
color: string;
|
||||
driver: "openclaw" | "extension" | "existing-session";
|
||||
running: boolean;
|
||||
tabCount: number;
|
||||
isDefault: boolean;
|
||||
@@ -165,7 +167,7 @@ export async function browserCreateProfile(
|
||||
name: string;
|
||||
color?: string;
|
||||
cdpUrl?: string;
|
||||
driver?: "openclaw" | "extension";
|
||||
driver?: "openclaw" | "extension" | "existing-session";
|
||||
},
|
||||
): Promise<BrowserCreateProfileResult> {
|
||||
return await fetchBrowserJson<BrowserCreateProfileResult>(
|
||||
|
||||
@@ -46,7 +46,7 @@ export type ResolvedBrowserProfile = {
|
||||
cdpHost: string;
|
||||
cdpIsLoopback: boolean;
|
||||
color: string;
|
||||
driver: "openclaw" | "extension";
|
||||
driver: "openclaw" | "extension" | "existing-session";
|
||||
attachOnly: boolean;
|
||||
};
|
||||
|
||||
@@ -335,7 +335,12 @@ export function resolveProfile(
|
||||
let cdpHost = resolved.cdpHost;
|
||||
let cdpPort = profile.cdpPort ?? 0;
|
||||
let cdpUrl = "";
|
||||
const driver = profile.driver === "extension" ? "extension" : "openclaw";
|
||||
const driver =
|
||||
profile.driver === "extension"
|
||||
? "extension"
|
||||
: profile.driver === "existing-session"
|
||||
? "existing-session"
|
||||
: "openclaw";
|
||||
|
||||
if (rawProfileUrl) {
|
||||
const parsed = parseHttpUrl(rawProfileUrl, `browser.profiles.${profileName}.cdpUrl`);
|
||||
@@ -356,7 +361,7 @@ export function resolveProfile(
|
||||
cdpIsLoopback: isLoopbackHost(cdpHost),
|
||||
color: profile.color,
|
||||
driver,
|
||||
attachOnly: profile.attachOnly ?? resolved.attachOnly,
|
||||
attachOnly: driver === "existing-session" ? true : (profile.attachOnly ?? resolved.attachOnly),
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
import type { ResolvedBrowserProfile } from "./config.js";
|
||||
|
||||
export type BrowserProfileMode = "local-managed" | "local-extension-relay" | "remote-cdp";
|
||||
export type BrowserProfileMode =
|
||||
| "local-managed"
|
||||
| "local-extension-relay"
|
||||
| "local-existing-session"
|
||||
| "remote-cdp";
|
||||
|
||||
export type BrowserProfileCapabilities = {
|
||||
mode: BrowserProfileMode;
|
||||
@@ -31,6 +35,20 @@ export function getBrowserProfileCapabilities(
|
||||
};
|
||||
}
|
||||
|
||||
if (profile.driver === "existing-session") {
|
||||
return {
|
||||
mode: "local-existing-session",
|
||||
isRemote: false,
|
||||
requiresRelay: false,
|
||||
requiresAttachedTab: false,
|
||||
usesPersistentPlaywright: false,
|
||||
supportsPerTabWs: false,
|
||||
supportsJsonTabEndpoints: false,
|
||||
supportsReset: false,
|
||||
supportsManagedTabLimit: false,
|
||||
};
|
||||
}
|
||||
|
||||
if (!profile.cdpIsLoopback) {
|
||||
return {
|
||||
mode: "remote-cdp",
|
||||
@@ -75,6 +93,9 @@ export function resolveDefaultSnapshotFormat(params: {
|
||||
if (capabilities.mode === "local-extension-relay") {
|
||||
return "aria";
|
||||
}
|
||||
if (capabilities.mode === "local-existing-session") {
|
||||
return "ai";
|
||||
}
|
||||
|
||||
return params.hasPlaywright ? "ai" : "aria";
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import fs from "node:fs";
|
||||
import path from "node:path";
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { resolveBrowserConfig } from "./config.js";
|
||||
import { createBrowserProfilesService } from "./profiles-service.js";
|
||||
import type { BrowserRouteContext, BrowserServerState } from "./server-context.js";
|
||||
@@ -57,6 +57,10 @@ async function createWorkProfileWithConfig(params: {
|
||||
}
|
||||
|
||||
describe("BrowserProfilesService", () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
it("allocates next local port for new profiles", async () => {
|
||||
const { result, state } = await createWorkProfileWithConfig({
|
||||
resolved: resolveBrowserConfig({}),
|
||||
@@ -163,6 +167,56 @@ describe("BrowserProfilesService", () => {
|
||||
).rejects.toThrow(/requires an explicit loopback cdpUrl/i);
|
||||
});
|
||||
|
||||
it("creates existing-session profiles as attach-only local entries", async () => {
|
||||
const resolved = resolveBrowserConfig({});
|
||||
const { ctx, state } = createCtx(resolved);
|
||||
vi.mocked(loadConfig).mockReturnValue({ browser: { profiles: {} } });
|
||||
|
||||
const service = createBrowserProfilesService(ctx);
|
||||
const result = await service.createProfile({
|
||||
name: "chrome-live",
|
||||
driver: "existing-session",
|
||||
});
|
||||
|
||||
expect(result.cdpPort).toBe(18801);
|
||||
expect(result.isRemote).toBe(false);
|
||||
expect(state.resolved.profiles["chrome-live"]).toEqual({
|
||||
cdpPort: 18801,
|
||||
driver: "existing-session",
|
||||
attachOnly: true,
|
||||
color: expect.any(String),
|
||||
});
|
||||
expect(writeConfigFile).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
browser: expect.objectContaining({
|
||||
profiles: expect.objectContaining({
|
||||
"chrome-live": expect.objectContaining({
|
||||
cdpPort: 18801,
|
||||
driver: "existing-session",
|
||||
attachOnly: true,
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it("rejects driver=existing-session when cdpUrl is provided", async () => {
|
||||
const resolved = resolveBrowserConfig({});
|
||||
const { ctx } = createCtx(resolved);
|
||||
vi.mocked(loadConfig).mockReturnValue({ browser: { profiles: {} } });
|
||||
|
||||
const service = createBrowserProfilesService(ctx);
|
||||
|
||||
await expect(
|
||||
service.createProfile({
|
||||
name: "chrome-live",
|
||||
driver: "existing-session",
|
||||
cdpUrl: "http://127.0.0.1:9222",
|
||||
}),
|
||||
).rejects.toThrow(/does not accept cdpUrl/i);
|
||||
});
|
||||
|
||||
it("deletes remote profiles without stopping or removing local data", async () => {
|
||||
const resolved = resolveBrowserConfig({
|
||||
profiles: {
|
||||
@@ -218,4 +272,40 @@ describe("BrowserProfilesService", () => {
|
||||
expect(result.deleted).toBe(true);
|
||||
expect(movePathToTrash).toHaveBeenCalledWith(path.dirname(userDataDir));
|
||||
});
|
||||
|
||||
it("deletes existing-session profiles without touching local browser data", async () => {
|
||||
const resolved = resolveBrowserConfig({
|
||||
profiles: {
|
||||
"chrome-live": {
|
||||
cdpPort: 18801,
|
||||
color: "#0066CC",
|
||||
driver: "existing-session",
|
||||
attachOnly: true,
|
||||
},
|
||||
},
|
||||
});
|
||||
const { ctx } = createCtx(resolved);
|
||||
|
||||
vi.mocked(loadConfig).mockReturnValue({
|
||||
browser: {
|
||||
defaultProfile: "openclaw",
|
||||
profiles: {
|
||||
openclaw: { cdpPort: 18800, color: "#FF4500" },
|
||||
"chrome-live": {
|
||||
cdpPort: 18801,
|
||||
color: "#0066CC",
|
||||
driver: "existing-session",
|
||||
attachOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const service = createBrowserProfilesService(ctx);
|
||||
const result = await service.deleteProfile("chrome-live");
|
||||
|
||||
expect(result.deleted).toBe(false);
|
||||
expect(ctx.forProfile).not.toHaveBeenCalled();
|
||||
expect(movePathToTrash).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -27,7 +27,7 @@ export type CreateProfileParams = {
|
||||
name: string;
|
||||
color?: string;
|
||||
cdpUrl?: string;
|
||||
driver?: "openclaw" | "extension";
|
||||
driver?: "openclaw" | "extension" | "existing-session";
|
||||
};
|
||||
|
||||
export type CreateProfileResult = {
|
||||
@@ -79,7 +79,12 @@ export function createBrowserProfilesService(ctx: BrowserRouteContext) {
|
||||
const createProfile = async (params: CreateProfileParams): Promise<CreateProfileResult> => {
|
||||
const name = params.name.trim();
|
||||
const rawCdpUrl = params.cdpUrl?.trim() || undefined;
|
||||
const driver = params.driver === "extension" ? "extension" : undefined;
|
||||
const driver =
|
||||
params.driver === "extension"
|
||||
? "extension"
|
||||
: params.driver === "existing-session"
|
||||
? "existing-session"
|
||||
: undefined;
|
||||
|
||||
if (!isValidProfileName(name)) {
|
||||
throw new BrowserValidationError(
|
||||
@@ -118,6 +123,11 @@ export function createBrowserProfilesService(ctx: BrowserRouteContext) {
|
||||
);
|
||||
}
|
||||
}
|
||||
if (driver === "existing-session") {
|
||||
throw new BrowserValidationError(
|
||||
"driver=existing-session does not accept cdpUrl; it attaches via the Chrome MCP auto-connect flow",
|
||||
);
|
||||
}
|
||||
profileConfig = {
|
||||
cdpUrl: parsed.normalized,
|
||||
...(driver ? { driver } : {}),
|
||||
@@ -136,6 +146,7 @@ export function createBrowserProfilesService(ctx: BrowserRouteContext) {
|
||||
profileConfig = {
|
||||
cdpPort,
|
||||
...(driver ? { driver } : {}),
|
||||
...(driver === "existing-session" ? { attachOnly: true } : {}),
|
||||
color: profileColor,
|
||||
};
|
||||
}
|
||||
@@ -195,7 +206,7 @@ export function createBrowserProfilesService(ctx: BrowserRouteContext) {
|
||||
const state = ctx.state();
|
||||
const resolved = resolveProfile(state.resolved, name);
|
||||
|
||||
if (resolved?.cdpIsLoopback) {
|
||||
if (resolved?.cdpIsLoopback && resolved.driver === "openclaw") {
|
||||
try {
|
||||
await ctx.forProfile(name).stopRunningBrowser();
|
||||
} catch {
|
||||
|
||||
@@ -12,40 +12,49 @@ afterEach(async () => {
|
||||
await closePlaywrightBrowserConnection().catch(() => {});
|
||||
});
|
||||
|
||||
function createExtensionFallbackBrowserHarness(options?: {
|
||||
urls?: string[];
|
||||
newCDPSessionError?: string;
|
||||
}) {
|
||||
const pageOn = vi.fn();
|
||||
const contextOn = vi.fn();
|
||||
const browserOn = vi.fn();
|
||||
const browserClose = vi.fn(async () => {});
|
||||
const newCDPSession = vi.fn(async () => {
|
||||
throw new Error(options?.newCDPSessionError ?? "Not allowed");
|
||||
});
|
||||
|
||||
const context = {
|
||||
pages: () => [],
|
||||
on: contextOn,
|
||||
newCDPSession,
|
||||
} as unknown as import("playwright-core").BrowserContext;
|
||||
|
||||
const pages = (options?.urls ?? [undefined]).map(
|
||||
(url) =>
|
||||
({
|
||||
on: pageOn,
|
||||
context: () => context,
|
||||
...(url ? { url: () => url } : {}),
|
||||
}) as unknown as import("playwright-core").Page,
|
||||
);
|
||||
(context as unknown as { pages: () => unknown[] }).pages = () => pages;
|
||||
|
||||
const browser = {
|
||||
contexts: () => [context],
|
||||
on: browserOn,
|
||||
close: browserClose,
|
||||
} as unknown as import("playwright-core").Browser;
|
||||
|
||||
connectOverCdpSpy.mockResolvedValue(browser);
|
||||
getChromeWebSocketUrlSpy.mockResolvedValue(null);
|
||||
return { browserClose, newCDPSession, pages };
|
||||
}
|
||||
|
||||
describe("pw-session getPageForTargetId", () => {
|
||||
it("falls back to the only page when CDP session attachment is blocked (extension relays)", async () => {
|
||||
connectOverCdpSpy.mockClear();
|
||||
getChromeWebSocketUrlSpy.mockClear();
|
||||
|
||||
const pageOn = vi.fn();
|
||||
const contextOn = vi.fn();
|
||||
const browserOn = vi.fn();
|
||||
const browserClose = vi.fn(async () => {});
|
||||
|
||||
const context = {
|
||||
pages: () => [],
|
||||
on: contextOn,
|
||||
newCDPSession: vi.fn(async () => {
|
||||
throw new Error("Not allowed");
|
||||
}),
|
||||
} as unknown as import("playwright-core").BrowserContext;
|
||||
|
||||
const page = {
|
||||
on: pageOn,
|
||||
context: () => context,
|
||||
} as unknown as import("playwright-core").Page;
|
||||
|
||||
// Fill pages() after page exists.
|
||||
(context as unknown as { pages: () => unknown[] }).pages = () => [page];
|
||||
|
||||
const browser = {
|
||||
contexts: () => [context],
|
||||
on: browserOn,
|
||||
close: browserClose,
|
||||
} as unknown as import("playwright-core").Browser;
|
||||
|
||||
connectOverCdpSpy.mockResolvedValue(browser);
|
||||
getChromeWebSocketUrlSpy.mockResolvedValue(null);
|
||||
const { browserClose, pages } = createExtensionFallbackBrowserHarness();
|
||||
const [page] = pages;
|
||||
|
||||
const resolved = await getPageForTargetId({
|
||||
cdpUrl: "http://127.0.0.1:18792",
|
||||
@@ -58,40 +67,9 @@ describe("pw-session getPageForTargetId", () => {
|
||||
});
|
||||
|
||||
it("uses the shared HTTP-base normalization when falling back to /json/list for direct WebSocket CDP URLs", async () => {
|
||||
const pageOn = vi.fn();
|
||||
const contextOn = vi.fn();
|
||||
const browserOn = vi.fn();
|
||||
const browserClose = vi.fn(async () => {});
|
||||
|
||||
const context = {
|
||||
pages: () => [],
|
||||
on: contextOn,
|
||||
newCDPSession: vi.fn(async () => {
|
||||
throw new Error("Not allowed");
|
||||
}),
|
||||
} as unknown as import("playwright-core").BrowserContext;
|
||||
|
||||
const pageA = {
|
||||
on: pageOn,
|
||||
context: () => context,
|
||||
url: () => "https://alpha.example",
|
||||
} as unknown as import("playwright-core").Page;
|
||||
const pageB = {
|
||||
on: pageOn,
|
||||
context: () => context,
|
||||
url: () => "https://beta.example",
|
||||
} as unknown as import("playwright-core").Page;
|
||||
|
||||
(context as unknown as { pages: () => unknown[] }).pages = () => [pageA, pageB];
|
||||
|
||||
const browser = {
|
||||
contexts: () => [context],
|
||||
on: browserOn,
|
||||
close: browserClose,
|
||||
} as unknown as import("playwright-core").Browser;
|
||||
|
||||
connectOverCdpSpy.mockResolvedValue(browser);
|
||||
getChromeWebSocketUrlSpy.mockResolvedValue(null);
|
||||
const [, pageB] = createExtensionFallbackBrowserHarness({
|
||||
urls: ["https://alpha.example", "https://beta.example"],
|
||||
}).pages;
|
||||
|
||||
const fetchSpy = vi.spyOn(globalThis, "fetch").mockResolvedValue({
|
||||
ok: true,
|
||||
@@ -117,41 +95,11 @@ describe("pw-session getPageForTargetId", () => {
|
||||
});
|
||||
|
||||
it("resolves extension-relay pages from /json/list without probing page CDP sessions first", async () => {
|
||||
const pageOn = vi.fn();
|
||||
const contextOn = vi.fn();
|
||||
const browserOn = vi.fn();
|
||||
const browserClose = vi.fn(async () => {});
|
||||
const newCDPSession = vi.fn(async () => {
|
||||
throw new Error("Target.attachToBrowserTarget: Not allowed");
|
||||
const { newCDPSession, pages } = createExtensionFallbackBrowserHarness({
|
||||
urls: ["https://alpha.example", "https://beta.example"],
|
||||
newCDPSessionError: "Target.attachToBrowserTarget: Not allowed",
|
||||
});
|
||||
|
||||
const context = {
|
||||
pages: () => [],
|
||||
on: contextOn,
|
||||
newCDPSession,
|
||||
} as unknown as import("playwright-core").BrowserContext;
|
||||
|
||||
const pageA = {
|
||||
on: pageOn,
|
||||
context: () => context,
|
||||
url: () => "https://alpha.example",
|
||||
} as unknown as import("playwright-core").Page;
|
||||
const pageB = {
|
||||
on: pageOn,
|
||||
context: () => context,
|
||||
url: () => "https://beta.example",
|
||||
} as unknown as import("playwright-core").Page;
|
||||
|
||||
(context as unknown as { pages: () => unknown[] }).pages = () => [pageA, pageB];
|
||||
|
||||
const browser = {
|
||||
contexts: () => [context],
|
||||
on: browserOn,
|
||||
close: browserClose,
|
||||
} as unknown as import("playwright-core").Browser;
|
||||
|
||||
connectOverCdpSpy.mockResolvedValue(browser);
|
||||
getChromeWebSocketUrlSpy.mockResolvedValue(null);
|
||||
const [, pageB] = pages;
|
||||
|
||||
const fetchSpy = vi.spyOn(globalThis, "fetch");
|
||||
fetchSpy
|
||||
|
||||
@@ -1,5 +1,10 @@
|
||||
import type { BrowserRouteContext } from "../server-context.js";
|
||||
import { readBody, resolveTargetIdFromBody, withPlaywrightRouteContext } from "./agent.shared.js";
|
||||
import {
|
||||
readBody,
|
||||
requirePwAi,
|
||||
resolveTargetIdFromBody,
|
||||
withRouteTabContext,
|
||||
} from "./agent.shared.js";
|
||||
import { ensureOutputRootDir, resolveWritableOutputPathOrRespond } from "./output-paths.js";
|
||||
import { DEFAULT_DOWNLOAD_DIR } from "./path-output.js";
|
||||
import type { BrowserRouteRegistrar } from "./types.js";
|
||||
@@ -23,13 +28,23 @@ export function registerBrowserAgentActDownloadRoutes(
|
||||
const out = toStringOrEmpty(body.path) || "";
|
||||
const timeoutMs = toNumber(body.timeoutMs);
|
||||
|
||||
await withPlaywrightRouteContext({
|
||||
await withRouteTabContext({
|
||||
req,
|
||||
res,
|
||||
ctx,
|
||||
targetId,
|
||||
feature: "wait for download",
|
||||
run: async ({ cdpUrl, tab, pw }) => {
|
||||
run: async ({ profileCtx, cdpUrl, tab }) => {
|
||||
if (profileCtx.profile.driver === "existing-session") {
|
||||
return jsonError(
|
||||
res,
|
||||
501,
|
||||
"download waiting is not supported for existing-session profiles yet.",
|
||||
);
|
||||
}
|
||||
const pw = await requirePwAi(res, "wait for download");
|
||||
if (!pw) {
|
||||
return;
|
||||
}
|
||||
await ensureOutputRootDir(DEFAULT_DOWNLOAD_DIR);
|
||||
let downloadPath: string | undefined;
|
||||
if (out.trim()) {
|
||||
@@ -67,13 +82,23 @@ export function registerBrowserAgentActDownloadRoutes(
|
||||
return jsonError(res, 400, "path is required");
|
||||
}
|
||||
|
||||
await withPlaywrightRouteContext({
|
||||
await withRouteTabContext({
|
||||
req,
|
||||
res,
|
||||
ctx,
|
||||
targetId,
|
||||
feature: "download",
|
||||
run: async ({ cdpUrl, tab, pw }) => {
|
||||
run: async ({ profileCtx, cdpUrl, tab }) => {
|
||||
if (profileCtx.profile.driver === "existing-session") {
|
||||
return jsonError(
|
||||
res,
|
||||
501,
|
||||
"downloads are not supported for existing-session profiles yet.",
|
||||
);
|
||||
}
|
||||
const pw = await requirePwAi(res, "download");
|
||||
if (!pw) {
|
||||
return;
|
||||
}
|
||||
await ensureOutputRootDir(DEFAULT_DOWNLOAD_DIR);
|
||||
const downloadPath = await resolveWritableOutputPathOrRespond({
|
||||
res,
|
||||
|
||||
@@ -1,5 +1,11 @@
|
||||
import { evaluateChromeMcpScript, uploadChromeMcpFile } from "../chrome-mcp.js";
|
||||
import type { BrowserRouteContext } from "../server-context.js";
|
||||
import { readBody, resolveTargetIdFromBody, withPlaywrightRouteContext } from "./agent.shared.js";
|
||||
import {
|
||||
readBody,
|
||||
requirePwAi,
|
||||
resolveTargetIdFromBody,
|
||||
withRouteTabContext,
|
||||
} from "./agent.shared.js";
|
||||
import { DEFAULT_UPLOAD_DIR, resolveExistingPathsWithinRoot } from "./path-output.js";
|
||||
import type { BrowserRouteRegistrar } from "./types.js";
|
||||
import { jsonError, toBoolean, toNumber, toStringArray, toStringOrEmpty } from "./utils.js";
|
||||
@@ -20,13 +26,12 @@ export function registerBrowserAgentActHookRoutes(
|
||||
return jsonError(res, 400, "paths are required");
|
||||
}
|
||||
|
||||
await withPlaywrightRouteContext({
|
||||
await withRouteTabContext({
|
||||
req,
|
||||
res,
|
||||
ctx,
|
||||
targetId,
|
||||
feature: "file chooser hook",
|
||||
run: async ({ cdpUrl, tab, pw }) => {
|
||||
run: async ({ profileCtx, cdpUrl, tab }) => {
|
||||
const uploadPathsResult = await resolveExistingPathsWithinRoot({
|
||||
rootDir: DEFAULT_UPLOAD_DIR,
|
||||
requestedPaths: paths,
|
||||
@@ -38,6 +43,39 @@ export function registerBrowserAgentActHookRoutes(
|
||||
}
|
||||
const resolvedPaths = uploadPathsResult.paths;
|
||||
|
||||
if (profileCtx.profile.driver === "existing-session") {
|
||||
if (element) {
|
||||
return jsonError(
|
||||
res,
|
||||
501,
|
||||
"existing-session file uploads do not support element selectors; use ref/inputRef.",
|
||||
);
|
||||
}
|
||||
if (resolvedPaths.length !== 1) {
|
||||
return jsonError(
|
||||
res,
|
||||
501,
|
||||
"existing-session file uploads currently support one file at a time.",
|
||||
);
|
||||
}
|
||||
const uid = inputRef || ref;
|
||||
if (!uid) {
|
||||
return jsonError(res, 501, "existing-session file uploads require ref or inputRef.");
|
||||
}
|
||||
await uploadChromeMcpFile({
|
||||
profileName: profileCtx.profile.name,
|
||||
targetId: tab.targetId,
|
||||
uid,
|
||||
filePath: resolvedPaths[0] ?? "",
|
||||
});
|
||||
return res.json({ ok: true });
|
||||
}
|
||||
|
||||
const pw = await requirePwAi(res, "file chooser hook");
|
||||
if (!pw) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (inputRef || element) {
|
||||
if (ref) {
|
||||
return jsonError(res, 400, "ref cannot be combined with inputRef/element");
|
||||
@@ -79,13 +117,69 @@ export function registerBrowserAgentActHookRoutes(
|
||||
return jsonError(res, 400, "accept is required");
|
||||
}
|
||||
|
||||
await withPlaywrightRouteContext({
|
||||
await withRouteTabContext({
|
||||
req,
|
||||
res,
|
||||
ctx,
|
||||
targetId,
|
||||
feature: "dialog hook",
|
||||
run: async ({ cdpUrl, tab, pw }) => {
|
||||
run: async ({ profileCtx, cdpUrl, tab }) => {
|
||||
if (profileCtx.profile.driver === "existing-session") {
|
||||
if (timeoutMs) {
|
||||
return jsonError(
|
||||
res,
|
||||
501,
|
||||
"existing-session dialog handling does not support timeoutMs.",
|
||||
);
|
||||
}
|
||||
await evaluateChromeMcpScript({
|
||||
profileName: profileCtx.profile.name,
|
||||
targetId: tab.targetId,
|
||||
fn: `() => {
|
||||
const state = (window.__openclawDialogHook ??= {});
|
||||
if (!state.originals) {
|
||||
state.originals = {
|
||||
alert: window.alert.bind(window),
|
||||
confirm: window.confirm.bind(window),
|
||||
prompt: window.prompt.bind(window),
|
||||
};
|
||||
}
|
||||
const originals = state.originals;
|
||||
const restore = () => {
|
||||
window.alert = originals.alert;
|
||||
window.confirm = originals.confirm;
|
||||
window.prompt = originals.prompt;
|
||||
delete window.__openclawDialogHook;
|
||||
};
|
||||
window.alert = (...args) => {
|
||||
try {
|
||||
return undefined;
|
||||
} finally {
|
||||
restore();
|
||||
}
|
||||
};
|
||||
window.confirm = (...args) => {
|
||||
try {
|
||||
return ${accept ? "true" : "false"};
|
||||
} finally {
|
||||
restore();
|
||||
}
|
||||
};
|
||||
window.prompt = (...args) => {
|
||||
try {
|
||||
return ${accept ? JSON.stringify(promptText ?? "") : "null"};
|
||||
} finally {
|
||||
restore();
|
||||
}
|
||||
};
|
||||
return true;
|
||||
}`,
|
||||
});
|
||||
return res.json({ ok: true });
|
||||
}
|
||||
const pw = await requirePwAi(res, "dialog hook");
|
||||
if (!pw) {
|
||||
return;
|
||||
}
|
||||
await pw.armDialogViaPlaywright({
|
||||
cdpUrl,
|
||||
targetId: tab.targetId,
|
||||
|
||||
@@ -1,3 +1,14 @@
|
||||
import {
|
||||
clickChromeMcpElement,
|
||||
closeChromeMcpTab,
|
||||
dragChromeMcpElement,
|
||||
evaluateChromeMcpScript,
|
||||
fillChromeMcpElement,
|
||||
fillChromeMcpForm,
|
||||
hoverChromeMcpElement,
|
||||
pressChromeMcpKey,
|
||||
resizeChromeMcpPage,
|
||||
} from "../chrome-mcp.js";
|
||||
import type { BrowserFormField } from "../client-actions-core.js";
|
||||
import { normalizeBrowserFormField } from "../form-fields.js";
|
||||
import type { BrowserRouteContext } from "../server-context.js";
|
||||
@@ -11,13 +22,88 @@ import {
|
||||
} from "./agent.act.shared.js";
|
||||
import {
|
||||
readBody,
|
||||
requirePwAi,
|
||||
resolveTargetIdFromBody,
|
||||
withPlaywrightRouteContext,
|
||||
withRouteTabContext,
|
||||
SELECTOR_UNSUPPORTED_MESSAGE,
|
||||
} from "./agent.shared.js";
|
||||
import type { BrowserRouteRegistrar } from "./types.js";
|
||||
import { jsonError, toBoolean, toNumber, toStringArray, toStringOrEmpty } from "./utils.js";
|
||||
|
||||
function sleep(ms: number): Promise<void> {
|
||||
return new Promise((resolve) => setTimeout(resolve, ms));
|
||||
}
|
||||
|
||||
function buildExistingSessionWaitPredicate(params: {
|
||||
text?: string;
|
||||
textGone?: string;
|
||||
selector?: string;
|
||||
url?: string;
|
||||
loadState?: "load" | "domcontentloaded" | "networkidle";
|
||||
fn?: string;
|
||||
}): string | null {
|
||||
const checks: string[] = [];
|
||||
if (params.text) {
|
||||
checks.push(`Boolean(document.body?.innerText?.includes(${JSON.stringify(params.text)}))`);
|
||||
}
|
||||
if (params.textGone) {
|
||||
checks.push(`!document.body?.innerText?.includes(${JSON.stringify(params.textGone)})`);
|
||||
}
|
||||
if (params.selector) {
|
||||
checks.push(`Boolean(document.querySelector(${JSON.stringify(params.selector)}))`);
|
||||
}
|
||||
if (params.url) {
|
||||
checks.push(`window.location.href === ${JSON.stringify(params.url)}`);
|
||||
}
|
||||
if (params.loadState === "domcontentloaded") {
|
||||
checks.push(`document.readyState === "interactive" || document.readyState === "complete"`);
|
||||
} else if (params.loadState === "load" || params.loadState === "networkidle") {
|
||||
checks.push(`document.readyState === "complete"`);
|
||||
}
|
||||
if (params.fn) {
|
||||
checks.push(`Boolean(await (${params.fn})())`);
|
||||
}
|
||||
if (checks.length === 0) {
|
||||
return null;
|
||||
}
|
||||
return checks.length === 1 ? checks[0] : checks.map((check) => `(${check})`).join(" && ");
|
||||
}
|
||||
|
||||
async function waitForExistingSessionCondition(params: {
|
||||
profileName: string;
|
||||
targetId: string;
|
||||
timeMs?: number;
|
||||
text?: string;
|
||||
textGone?: string;
|
||||
selector?: string;
|
||||
url?: string;
|
||||
loadState?: "load" | "domcontentloaded" | "networkidle";
|
||||
fn?: string;
|
||||
timeoutMs?: number;
|
||||
}): Promise<void> {
|
||||
if (params.timeMs && params.timeMs > 0) {
|
||||
await sleep(params.timeMs);
|
||||
}
|
||||
const predicate = buildExistingSessionWaitPredicate(params);
|
||||
if (!predicate) {
|
||||
return;
|
||||
}
|
||||
const timeoutMs = Math.max(250, params.timeoutMs ?? 10_000);
|
||||
const deadline = Date.now() + timeoutMs;
|
||||
while (Date.now() < deadline) {
|
||||
const ready = await evaluateChromeMcpScript({
|
||||
profileName: params.profileName,
|
||||
targetId: params.targetId,
|
||||
fn: `async () => ${predicate}`,
|
||||
});
|
||||
if (ready) {
|
||||
return;
|
||||
}
|
||||
await sleep(250);
|
||||
}
|
||||
throw new Error("Timed out waiting for condition");
|
||||
}
|
||||
|
||||
export function registerBrowserAgentActRoutes(
|
||||
app: BrowserRouteRegistrar,
|
||||
ctx: BrowserRouteContext,
|
||||
@@ -34,14 +120,15 @@ export function registerBrowserAgentActRoutes(
|
||||
return jsonError(res, 400, SELECTOR_UNSUPPORTED_MESSAGE);
|
||||
}
|
||||
|
||||
await withPlaywrightRouteContext({
|
||||
await withRouteTabContext({
|
||||
req,
|
||||
res,
|
||||
ctx,
|
||||
targetId,
|
||||
feature: `act:${kind}`,
|
||||
run: async ({ cdpUrl, tab, pw }) => {
|
||||
run: async ({ profileCtx, cdpUrl, tab }) => {
|
||||
const evaluateEnabled = ctx.state().resolved.evaluateEnabled;
|
||||
const isExistingSession = profileCtx.profile.driver === "existing-session";
|
||||
const profileName = profileCtx.profile.name;
|
||||
|
||||
switch (kind) {
|
||||
case "click": {
|
||||
@@ -63,6 +150,26 @@ export function registerBrowserAgentActRoutes(
|
||||
return jsonError(res, 400, parsedModifiers.error);
|
||||
}
|
||||
const modifiers = parsedModifiers.modifiers;
|
||||
if (isExistingSession) {
|
||||
if ((button && button !== "left") || (modifiers && modifiers.length > 0)) {
|
||||
return jsonError(
|
||||
res,
|
||||
501,
|
||||
"existing-session click currently supports left-click only (no button overrides/modifiers).",
|
||||
);
|
||||
}
|
||||
await clickChromeMcpElement({
|
||||
profileName,
|
||||
targetId: tab.targetId,
|
||||
uid: ref,
|
||||
doubleClick,
|
||||
});
|
||||
return res.json({ ok: true, targetId: tab.targetId, url: tab.url });
|
||||
}
|
||||
const pw = await requirePwAi(res, `act:${kind}`);
|
||||
if (!pw) {
|
||||
return;
|
||||
}
|
||||
const clickRequest: Parameters<typeof pw.clickViaPlaywright>[0] = {
|
||||
cdpUrl,
|
||||
targetId: tab.targetId,
|
||||
@@ -93,6 +200,33 @@ export function registerBrowserAgentActRoutes(
|
||||
const submit = toBoolean(body.submit) ?? false;
|
||||
const slowly = toBoolean(body.slowly) ?? false;
|
||||
const timeoutMs = toNumber(body.timeoutMs);
|
||||
if (isExistingSession) {
|
||||
if (slowly) {
|
||||
return jsonError(
|
||||
res,
|
||||
501,
|
||||
"existing-session type does not support slowly=true; use fill/press instead.",
|
||||
);
|
||||
}
|
||||
await fillChromeMcpElement({
|
||||
profileName,
|
||||
targetId: tab.targetId,
|
||||
uid: ref,
|
||||
value: text,
|
||||
});
|
||||
if (submit) {
|
||||
await pressChromeMcpKey({
|
||||
profileName,
|
||||
targetId: tab.targetId,
|
||||
key: "Enter",
|
||||
});
|
||||
}
|
||||
return res.json({ ok: true, targetId: tab.targetId });
|
||||
}
|
||||
const pw = await requirePwAi(res, `act:${kind}`);
|
||||
if (!pw) {
|
||||
return;
|
||||
}
|
||||
const typeRequest: Parameters<typeof pw.typeViaPlaywright>[0] = {
|
||||
cdpUrl,
|
||||
targetId: tab.targetId,
|
||||
@@ -113,6 +247,17 @@ export function registerBrowserAgentActRoutes(
|
||||
return jsonError(res, 400, "key is required");
|
||||
}
|
||||
const delayMs = toNumber(body.delayMs);
|
||||
if (isExistingSession) {
|
||||
if (delayMs) {
|
||||
return jsonError(res, 501, "existing-session press does not support delayMs.");
|
||||
}
|
||||
await pressChromeMcpKey({ profileName, targetId: tab.targetId, key });
|
||||
return res.json({ ok: true, targetId: tab.targetId });
|
||||
}
|
||||
const pw = await requirePwAi(res, `act:${kind}`);
|
||||
if (!pw) {
|
||||
return;
|
||||
}
|
||||
await pw.pressKeyViaPlaywright({
|
||||
cdpUrl,
|
||||
targetId: tab.targetId,
|
||||
@@ -127,6 +272,21 @@ export function registerBrowserAgentActRoutes(
|
||||
return jsonError(res, 400, "ref is required");
|
||||
}
|
||||
const timeoutMs = toNumber(body.timeoutMs);
|
||||
if (isExistingSession) {
|
||||
if (timeoutMs) {
|
||||
return jsonError(
|
||||
res,
|
||||
501,
|
||||
"existing-session hover does not support timeoutMs overrides.",
|
||||
);
|
||||
}
|
||||
await hoverChromeMcpElement({ profileName, targetId: tab.targetId, uid: ref });
|
||||
return res.json({ ok: true, targetId: tab.targetId });
|
||||
}
|
||||
const pw = await requirePwAi(res, `act:${kind}`);
|
||||
if (!pw) {
|
||||
return;
|
||||
}
|
||||
await pw.hoverViaPlaywright({
|
||||
cdpUrl,
|
||||
targetId: tab.targetId,
|
||||
@@ -141,6 +301,26 @@ export function registerBrowserAgentActRoutes(
|
||||
return jsonError(res, 400, "ref is required");
|
||||
}
|
||||
const timeoutMs = toNumber(body.timeoutMs);
|
||||
if (isExistingSession) {
|
||||
if (timeoutMs) {
|
||||
return jsonError(
|
||||
res,
|
||||
501,
|
||||
"existing-session scrollIntoView does not support timeoutMs overrides.",
|
||||
);
|
||||
}
|
||||
await evaluateChromeMcpScript({
|
||||
profileName,
|
||||
targetId: tab.targetId,
|
||||
fn: `(el) => { el.scrollIntoView({ block: "center", inline: "center" }); return true; }`,
|
||||
args: [ref],
|
||||
});
|
||||
return res.json({ ok: true, targetId: tab.targetId });
|
||||
}
|
||||
const pw = await requirePwAi(res, `act:${kind}`);
|
||||
if (!pw) {
|
||||
return;
|
||||
}
|
||||
const scrollRequest: Parameters<typeof pw.scrollIntoViewViaPlaywright>[0] = {
|
||||
cdpUrl,
|
||||
targetId: tab.targetId,
|
||||
@@ -159,6 +339,26 @@ export function registerBrowserAgentActRoutes(
|
||||
return jsonError(res, 400, "startRef and endRef are required");
|
||||
}
|
||||
const timeoutMs = toNumber(body.timeoutMs);
|
||||
if (isExistingSession) {
|
||||
if (timeoutMs) {
|
||||
return jsonError(
|
||||
res,
|
||||
501,
|
||||
"existing-session drag does not support timeoutMs overrides.",
|
||||
);
|
||||
}
|
||||
await dragChromeMcpElement({
|
||||
profileName,
|
||||
targetId: tab.targetId,
|
||||
fromUid: startRef,
|
||||
toUid: endRef,
|
||||
});
|
||||
return res.json({ ok: true, targetId: tab.targetId });
|
||||
}
|
||||
const pw = await requirePwAi(res, `act:${kind}`);
|
||||
if (!pw) {
|
||||
return;
|
||||
}
|
||||
await pw.dragViaPlaywright({
|
||||
cdpUrl,
|
||||
targetId: tab.targetId,
|
||||
@@ -175,6 +375,33 @@ export function registerBrowserAgentActRoutes(
|
||||
return jsonError(res, 400, "ref and values are required");
|
||||
}
|
||||
const timeoutMs = toNumber(body.timeoutMs);
|
||||
if (isExistingSession) {
|
||||
if (values.length !== 1) {
|
||||
return jsonError(
|
||||
res,
|
||||
501,
|
||||
"existing-session select currently supports a single value only.",
|
||||
);
|
||||
}
|
||||
if (timeoutMs) {
|
||||
return jsonError(
|
||||
res,
|
||||
501,
|
||||
"existing-session select does not support timeoutMs overrides.",
|
||||
);
|
||||
}
|
||||
await fillChromeMcpElement({
|
||||
profileName,
|
||||
targetId: tab.targetId,
|
||||
uid: ref,
|
||||
value: values[0] ?? "",
|
||||
});
|
||||
return res.json({ ok: true, targetId: tab.targetId });
|
||||
}
|
||||
const pw = await requirePwAi(res, `act:${kind}`);
|
||||
if (!pw) {
|
||||
return;
|
||||
}
|
||||
await pw.selectOptionViaPlaywright({
|
||||
cdpUrl,
|
||||
targetId: tab.targetId,
|
||||
@@ -198,6 +425,28 @@ export function registerBrowserAgentActRoutes(
|
||||
return jsonError(res, 400, "fields are required");
|
||||
}
|
||||
const timeoutMs = toNumber(body.timeoutMs);
|
||||
if (isExistingSession) {
|
||||
if (timeoutMs) {
|
||||
return jsonError(
|
||||
res,
|
||||
501,
|
||||
"existing-session fill does not support timeoutMs overrides.",
|
||||
);
|
||||
}
|
||||
await fillChromeMcpForm({
|
||||
profileName,
|
||||
targetId: tab.targetId,
|
||||
elements: fields.map((field) => ({
|
||||
uid: field.ref,
|
||||
value: String(field.value ?? ""),
|
||||
})),
|
||||
});
|
||||
return res.json({ ok: true, targetId: tab.targetId });
|
||||
}
|
||||
const pw = await requirePwAi(res, `act:${kind}`);
|
||||
if (!pw) {
|
||||
return;
|
||||
}
|
||||
await pw.fillFormViaPlaywright({
|
||||
cdpUrl,
|
||||
targetId: tab.targetId,
|
||||
@@ -212,6 +461,19 @@ export function registerBrowserAgentActRoutes(
|
||||
if (!width || !height) {
|
||||
return jsonError(res, 400, "width and height are required");
|
||||
}
|
||||
if (isExistingSession) {
|
||||
await resizeChromeMcpPage({
|
||||
profileName,
|
||||
targetId: tab.targetId,
|
||||
width,
|
||||
height,
|
||||
});
|
||||
return res.json({ ok: true, targetId: tab.targetId, url: tab.url });
|
||||
}
|
||||
const pw = await requirePwAi(res, `act:${kind}`);
|
||||
if (!pw) {
|
||||
return;
|
||||
}
|
||||
await pw.resizeViewportViaPlaywright({
|
||||
cdpUrl,
|
||||
targetId: tab.targetId,
|
||||
@@ -260,6 +522,25 @@ export function registerBrowserAgentActRoutes(
|
||||
"wait requires at least one of: timeMs, text, textGone, selector, url, loadState, fn",
|
||||
);
|
||||
}
|
||||
if (isExistingSession) {
|
||||
await waitForExistingSessionCondition({
|
||||
profileName,
|
||||
targetId: tab.targetId,
|
||||
timeMs,
|
||||
text,
|
||||
textGone,
|
||||
selector,
|
||||
url,
|
||||
loadState,
|
||||
fn,
|
||||
timeoutMs,
|
||||
});
|
||||
return res.json({ ok: true, targetId: tab.targetId });
|
||||
}
|
||||
const pw = await requirePwAi(res, `act:${kind}`);
|
||||
if (!pw) {
|
||||
return;
|
||||
}
|
||||
await pw.waitForViaPlaywright({
|
||||
cdpUrl,
|
||||
targetId: tab.targetId,
|
||||
@@ -291,6 +572,31 @@ export function registerBrowserAgentActRoutes(
|
||||
}
|
||||
const ref = toStringOrEmpty(body.ref) || undefined;
|
||||
const evalTimeoutMs = toNumber(body.timeoutMs);
|
||||
if (isExistingSession) {
|
||||
if (evalTimeoutMs !== undefined) {
|
||||
return jsonError(
|
||||
res,
|
||||
501,
|
||||
"existing-session evaluate does not support timeoutMs overrides.",
|
||||
);
|
||||
}
|
||||
const result = await evaluateChromeMcpScript({
|
||||
profileName,
|
||||
targetId: tab.targetId,
|
||||
fn,
|
||||
args: ref ? [ref] : undefined,
|
||||
});
|
||||
return res.json({
|
||||
ok: true,
|
||||
targetId: tab.targetId,
|
||||
url: tab.url,
|
||||
result,
|
||||
});
|
||||
}
|
||||
const pw = await requirePwAi(res, `act:${kind}`);
|
||||
if (!pw) {
|
||||
return;
|
||||
}
|
||||
const evalRequest: Parameters<typeof pw.evaluateViaPlaywright>[0] = {
|
||||
cdpUrl,
|
||||
targetId: tab.targetId,
|
||||
@@ -310,6 +616,14 @@ export function registerBrowserAgentActRoutes(
|
||||
});
|
||||
}
|
||||
case "close": {
|
||||
if (isExistingSession) {
|
||||
await closeChromeMcpTab(profileName, tab.targetId);
|
||||
return res.json({ ok: true, targetId: tab.targetId });
|
||||
}
|
||||
const pw = await requirePwAi(res, `act:${kind}`);
|
||||
if (!pw) {
|
||||
return;
|
||||
}
|
||||
await pw.closePageViaPlaywright({ cdpUrl, targetId: tab.targetId });
|
||||
return res.json({ ok: true, targetId: tab.targetId });
|
||||
}
|
||||
@@ -334,13 +648,23 @@ export function registerBrowserAgentActRoutes(
|
||||
return jsonError(res, 400, "url is required");
|
||||
}
|
||||
|
||||
await withPlaywrightRouteContext({
|
||||
await withRouteTabContext({
|
||||
req,
|
||||
res,
|
||||
ctx,
|
||||
targetId,
|
||||
feature: "response body",
|
||||
run: async ({ cdpUrl, tab, pw }) => {
|
||||
run: async ({ profileCtx, cdpUrl, tab }) => {
|
||||
if (profileCtx.profile.driver === "existing-session") {
|
||||
return jsonError(
|
||||
res,
|
||||
501,
|
||||
"response body is not supported for existing-session profiles yet.",
|
||||
);
|
||||
}
|
||||
const pw = await requirePwAi(res, "response body");
|
||||
if (!pw) {
|
||||
return;
|
||||
}
|
||||
const result = await pw.responseBodyViaPlaywright({
|
||||
cdpUrl,
|
||||
targetId: tab.targetId,
|
||||
@@ -361,13 +685,39 @@ export function registerBrowserAgentActRoutes(
|
||||
return jsonError(res, 400, "ref is required");
|
||||
}
|
||||
|
||||
await withPlaywrightRouteContext({
|
||||
await withRouteTabContext({
|
||||
req,
|
||||
res,
|
||||
ctx,
|
||||
targetId,
|
||||
feature: "highlight",
|
||||
run: async ({ cdpUrl, tab, pw }) => {
|
||||
run: async ({ profileCtx, cdpUrl, tab }) => {
|
||||
if (profileCtx.profile.driver === "existing-session") {
|
||||
await evaluateChromeMcpScript({
|
||||
profileName: profileCtx.profile.name,
|
||||
targetId: tab.targetId,
|
||||
args: [ref],
|
||||
fn: `(el) => {
|
||||
if (!(el instanceof Element)) {
|
||||
return false;
|
||||
}
|
||||
el.scrollIntoView({ block: "center", inline: "center" });
|
||||
const previousOutline = el.style.outline;
|
||||
const previousOffset = el.style.outlineOffset;
|
||||
el.style.outline = "3px solid #FF4500";
|
||||
el.style.outlineOffset = "2px";
|
||||
setTimeout(() => {
|
||||
el.style.outline = previousOutline;
|
||||
el.style.outlineOffset = previousOffset;
|
||||
}, 2000);
|
||||
return true;
|
||||
}`,
|
||||
});
|
||||
return res.json({ ok: true, targetId: tab.targetId });
|
||||
}
|
||||
const pw = await requirePwAi(res, "highlight");
|
||||
if (!pw) {
|
||||
return;
|
||||
}
|
||||
await pw.highlightViaPlaywright({
|
||||
cdpUrl,
|
||||
targetId: tab.targetId,
|
||||
|
||||
@@ -1,6 +1,20 @@
|
||||
import path from "node:path";
|
||||
import { ensureMediaDir, saveMediaBuffer } from "../../media/store.js";
|
||||
import { captureScreenshot, snapshotAria } from "../cdp.js";
|
||||
import {
|
||||
evaluateChromeMcpScript,
|
||||
navigateChromeMcpPage,
|
||||
takeChromeMcpScreenshot,
|
||||
takeChromeMcpSnapshot,
|
||||
} from "../chrome-mcp.js";
|
||||
import {
|
||||
buildAiSnapshotFromChromeMcpSnapshot,
|
||||
flattenChromeMcpSnapshotToAriaNodes,
|
||||
} from "../chrome-mcp.snapshot.js";
|
||||
import {
|
||||
assertBrowserNavigationAllowed,
|
||||
assertBrowserNavigationResultAllowed,
|
||||
} from "../navigation-guard.js";
|
||||
import { withBrowserNavigationPolicy } from "../navigation-guard.js";
|
||||
import {
|
||||
DEFAULT_BROWSER_SCREENSHOT_MAX_BYTES,
|
||||
@@ -25,6 +39,89 @@ import {
|
||||
import type { BrowserResponse, BrowserRouteRegistrar } from "./types.js";
|
||||
import { jsonError, toBoolean, toStringOrEmpty } from "./utils.js";
|
||||
|
||||
const CHROME_MCP_OVERLAY_ATTR = "data-openclaw-mcp-overlay";
|
||||
|
||||
async function clearChromeMcpOverlay(params: {
|
||||
profileName: string;
|
||||
targetId: string;
|
||||
}): Promise<void> {
|
||||
await evaluateChromeMcpScript({
|
||||
profileName: params.profileName,
|
||||
targetId: params.targetId,
|
||||
fn: `() => {
|
||||
document.querySelectorAll("[${CHROME_MCP_OVERLAY_ATTR}]").forEach((node) => node.remove());
|
||||
return true;
|
||||
}`,
|
||||
}).catch(() => {});
|
||||
}
|
||||
|
||||
async function renderChromeMcpLabels(params: {
|
||||
profileName: string;
|
||||
targetId: string;
|
||||
refs: string[];
|
||||
}): Promise<{ labels: number; skipped: number }> {
|
||||
const refList = JSON.stringify(params.refs);
|
||||
const result = await evaluateChromeMcpScript({
|
||||
profileName: params.profileName,
|
||||
targetId: params.targetId,
|
||||
args: params.refs,
|
||||
fn: `(...elements) => {
|
||||
const refs = ${refList};
|
||||
document.querySelectorAll("[${CHROME_MCP_OVERLAY_ATTR}]").forEach((node) => node.remove());
|
||||
const root = document.createElement("div");
|
||||
root.setAttribute("${CHROME_MCP_OVERLAY_ATTR}", "labels");
|
||||
root.style.position = "fixed";
|
||||
root.style.inset = "0";
|
||||
root.style.pointerEvents = "none";
|
||||
root.style.zIndex = "2147483647";
|
||||
let labels = 0;
|
||||
let skipped = 0;
|
||||
elements.forEach((el, index) => {
|
||||
if (!(el instanceof Element)) {
|
||||
skipped += 1;
|
||||
return;
|
||||
}
|
||||
const rect = el.getBoundingClientRect();
|
||||
if (rect.width <= 0 && rect.height <= 0) {
|
||||
skipped += 1;
|
||||
return;
|
||||
}
|
||||
labels += 1;
|
||||
const badge = document.createElement("div");
|
||||
badge.setAttribute("${CHROME_MCP_OVERLAY_ATTR}", "label");
|
||||
badge.textContent = refs[index] || String(labels);
|
||||
badge.style.position = "fixed";
|
||||
badge.style.left = \`\${Math.max(0, rect.left)}px\`;
|
||||
badge.style.top = \`\${Math.max(0, rect.top)}px\`;
|
||||
badge.style.transform = "translateY(-100%)";
|
||||
badge.style.padding = "2px 6px";
|
||||
badge.style.borderRadius = "999px";
|
||||
badge.style.background = "#FF4500";
|
||||
badge.style.color = "#fff";
|
||||
badge.style.font = "600 12px ui-monospace, SFMono-Regular, Menlo, monospace";
|
||||
badge.style.boxShadow = "0 2px 6px rgba(0,0,0,0.35)";
|
||||
badge.style.whiteSpace = "nowrap";
|
||||
root.appendChild(badge);
|
||||
});
|
||||
document.documentElement.appendChild(root);
|
||||
return { labels, skipped };
|
||||
}`,
|
||||
});
|
||||
const labels =
|
||||
result &&
|
||||
typeof result === "object" &&
|
||||
typeof (result as { labels?: unknown }).labels === "number"
|
||||
? (result as { labels: number }).labels
|
||||
: 0;
|
||||
const skipped =
|
||||
result &&
|
||||
typeof result === "object" &&
|
||||
typeof (result as { skipped?: unknown }).skipped === "number"
|
||||
? (result as { skipped: number }).skipped
|
||||
: 0;
|
||||
return { labels, skipped };
|
||||
}
|
||||
|
||||
async function saveBrowserMediaResponse(params: {
|
||||
res: BrowserResponse;
|
||||
buffer: Buffer;
|
||||
@@ -96,13 +193,27 @@ export function registerBrowserAgentSnapshotRoutes(
|
||||
if (!url) {
|
||||
return jsonError(res, 400, "url is required");
|
||||
}
|
||||
await withPlaywrightRouteContext({
|
||||
await withRouteTabContext({
|
||||
req,
|
||||
res,
|
||||
ctx,
|
||||
targetId,
|
||||
feature: "navigate",
|
||||
run: async ({ cdpUrl, tab, pw, profileCtx }) => {
|
||||
run: async ({ profileCtx, tab, cdpUrl }) => {
|
||||
if (profileCtx.profile.driver === "existing-session") {
|
||||
const ssrfPolicyOpts = withBrowserNavigationPolicy(ctx.state().resolved.ssrfPolicy);
|
||||
await assertBrowserNavigationAllowed({ url, ...ssrfPolicyOpts });
|
||||
const result = await navigateChromeMcpPage({
|
||||
profileName: profileCtx.profile.name,
|
||||
targetId: tab.targetId,
|
||||
url,
|
||||
});
|
||||
await assertBrowserNavigationResultAllowed({ url: result.url, ...ssrfPolicyOpts });
|
||||
return res.json({ ok: true, targetId: tab.targetId, ...result });
|
||||
}
|
||||
const pw = await requirePwAi(res, "navigate");
|
||||
if (!pw) {
|
||||
return;
|
||||
}
|
||||
const result = await pw.navigateViaPlaywright({
|
||||
cdpUrl,
|
||||
targetId: tab.targetId,
|
||||
@@ -122,6 +233,17 @@ export function registerBrowserAgentSnapshotRoutes(
|
||||
app.post("/pdf", async (req, res) => {
|
||||
const body = readBody(req);
|
||||
const targetId = toStringOrEmpty(body.targetId) || undefined;
|
||||
const profileCtx = resolveProfileContext(req, res, ctx);
|
||||
if (!profileCtx) {
|
||||
return;
|
||||
}
|
||||
if (profileCtx.profile.driver === "existing-session") {
|
||||
return jsonError(
|
||||
res,
|
||||
501,
|
||||
"pdf is not supported for existing-session profiles yet; use screenshot/snapshot instead.",
|
||||
);
|
||||
}
|
||||
await withPlaywrightRouteContext({
|
||||
req,
|
||||
res,
|
||||
@@ -163,6 +285,36 @@ export function registerBrowserAgentSnapshotRoutes(
|
||||
ctx,
|
||||
targetId,
|
||||
run: async ({ profileCtx, tab, cdpUrl }) => {
|
||||
if (profileCtx.profile.driver === "existing-session") {
|
||||
if (element) {
|
||||
return jsonError(
|
||||
res,
|
||||
400,
|
||||
"element screenshots are not supported for existing-session profiles; use ref from snapshot.",
|
||||
);
|
||||
}
|
||||
const buffer = await takeChromeMcpScreenshot({
|
||||
profileName: profileCtx.profile.name,
|
||||
targetId: tab.targetId,
|
||||
uid: ref,
|
||||
fullPage,
|
||||
format: type,
|
||||
});
|
||||
const normalized = await normalizeBrowserScreenshot(buffer, {
|
||||
maxSide: DEFAULT_BROWSER_SCREENSHOT_MAX_SIDE,
|
||||
maxBytes: DEFAULT_BROWSER_SCREENSHOT_MAX_BYTES,
|
||||
});
|
||||
await saveBrowserMediaResponse({
|
||||
res,
|
||||
buffer: normalized.buffer,
|
||||
contentType: normalized.contentType ?? `image/${type}`,
|
||||
maxBytes: DEFAULT_BROWSER_SCREENSHOT_MAX_BYTES,
|
||||
targetId: tab.targetId,
|
||||
url: tab.url,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
let buffer: Buffer;
|
||||
const shouldUsePlaywright = shouldUsePlaywrightForScreenshot({
|
||||
profile: profileCtx.profile,
|
||||
@@ -227,6 +379,90 @@ export function registerBrowserAgentSnapshotRoutes(
|
||||
if ((plan.labels || plan.mode === "efficient") && plan.format === "aria") {
|
||||
return jsonError(res, 400, "labels/mode=efficient require format=ai");
|
||||
}
|
||||
if (profileCtx.profile.driver === "existing-session") {
|
||||
if (plan.labels) {
|
||||
return jsonError(res, 501, "labels are not supported for existing-session profiles yet.");
|
||||
}
|
||||
if (plan.selectorValue || plan.frameSelectorValue) {
|
||||
return jsonError(
|
||||
res,
|
||||
400,
|
||||
"selector/frame snapshots are not supported for existing-session profiles; snapshot the whole page and use refs.",
|
||||
);
|
||||
}
|
||||
const snapshot = await takeChromeMcpSnapshot({
|
||||
profileName: profileCtx.profile.name,
|
||||
targetId: tab.targetId,
|
||||
});
|
||||
if (plan.format === "aria") {
|
||||
return res.json({
|
||||
ok: true,
|
||||
format: "aria",
|
||||
targetId: tab.targetId,
|
||||
url: tab.url,
|
||||
nodes: flattenChromeMcpSnapshotToAriaNodes(snapshot, plan.limit),
|
||||
});
|
||||
}
|
||||
const built = buildAiSnapshotFromChromeMcpSnapshot({
|
||||
root: snapshot,
|
||||
options: {
|
||||
interactive: plan.interactive ?? undefined,
|
||||
compact: plan.compact ?? undefined,
|
||||
maxDepth: plan.depth ?? undefined,
|
||||
},
|
||||
maxChars: plan.resolvedMaxChars,
|
||||
});
|
||||
if (plan.labels) {
|
||||
const refs = Object.keys(built.refs);
|
||||
const labelResult = await renderChromeMcpLabels({
|
||||
profileName: profileCtx.profile.name,
|
||||
targetId: tab.targetId,
|
||||
refs,
|
||||
});
|
||||
try {
|
||||
const labeled = await takeChromeMcpScreenshot({
|
||||
profileName: profileCtx.profile.name,
|
||||
targetId: tab.targetId,
|
||||
format: "png",
|
||||
});
|
||||
const normalized = await normalizeBrowserScreenshot(labeled, {
|
||||
maxSide: DEFAULT_BROWSER_SCREENSHOT_MAX_SIDE,
|
||||
maxBytes: DEFAULT_BROWSER_SCREENSHOT_MAX_BYTES,
|
||||
});
|
||||
await ensureMediaDir();
|
||||
const saved = await saveMediaBuffer(
|
||||
normalized.buffer,
|
||||
normalized.contentType ?? "image/png",
|
||||
"browser",
|
||||
DEFAULT_BROWSER_SCREENSHOT_MAX_BYTES,
|
||||
);
|
||||
return res.json({
|
||||
ok: true,
|
||||
format: "ai",
|
||||
targetId: tab.targetId,
|
||||
url: tab.url,
|
||||
labels: true,
|
||||
labelsCount: labelResult.labels,
|
||||
labelsSkipped: labelResult.skipped,
|
||||
imagePath: path.resolve(saved.path),
|
||||
imageType: normalized.contentType?.includes("jpeg") ? "jpeg" : "png",
|
||||
...built,
|
||||
});
|
||||
} finally {
|
||||
await clearChromeMcpOverlay({
|
||||
profileName: profileCtx.profile.name,
|
||||
targetId: tab.targetId,
|
||||
});
|
||||
}
|
||||
}
|
||||
return res.json({
|
||||
ok: true,
|
||||
format: "ai",
|
||||
targetId: tab.targetId,
|
||||
url: tab.url,
|
||||
...built,
|
||||
});
|
||||
}
|
||||
if (plan.format === "ai") {
|
||||
const pw = await requirePwAi(res, "ai snapshot");
|
||||
if (!pw) {
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import { getChromeMcpPid } from "../chrome-mcp.js";
|
||||
import { resolveBrowserExecutableForPlatform } from "../chrome.executables.js";
|
||||
import { toBrowserErrorResponse } from "../errors.js";
|
||||
import { createBrowserProfilesService } from "../profiles-service.js";
|
||||
@@ -76,10 +77,14 @@ export function registerBrowserBasicRoutes(app: BrowserRouteRegistrar, ctx: Brow
|
||||
res.json({
|
||||
enabled: current.resolved.enabled,
|
||||
profile: profileCtx.profile.name,
|
||||
driver: profileCtx.profile.driver,
|
||||
running: cdpReady,
|
||||
cdpReady,
|
||||
cdpHttp,
|
||||
pid: profileState?.running?.pid ?? null,
|
||||
pid:
|
||||
profileCtx.profile.driver === "existing-session"
|
||||
? getChromeMcpPid(profileCtx.profile.name)
|
||||
: (profileState?.running?.pid ?? null),
|
||||
cdpPort: profileCtx.profile.cdpPort,
|
||||
cdpUrl: profileCtx.profile.cdpUrl,
|
||||
chosenBrowser: profileState?.running?.exe.kind ?? null,
|
||||
@@ -146,6 +151,7 @@ export function registerBrowserBasicRoutes(app: BrowserRouteRegistrar, ctx: Brow
|
||||
const driver = toStringOrEmpty((req.body as { driver?: unknown })?.driver) as
|
||||
| "openclaw"
|
||||
| "extension"
|
||||
| "existing-session"
|
||||
| "";
|
||||
|
||||
if (!name) {
|
||||
@@ -158,7 +164,12 @@ export function registerBrowserBasicRoutes(app: BrowserRouteRegistrar, ctx: Brow
|
||||
name,
|
||||
color: color || undefined,
|
||||
cdpUrl: cdpUrl || undefined,
|
||||
driver: driver === "extension" ? "extension" : undefined,
|
||||
driver:
|
||||
driver === "extension"
|
||||
? "extension"
|
||||
: driver === "existing-session"
|
||||
? "existing-session"
|
||||
: undefined,
|
||||
});
|
||||
res.json(result);
|
||||
} catch (err) {
|
||||
|
||||
@@ -3,6 +3,11 @@ import {
|
||||
PROFILE_POST_RESTART_WS_TIMEOUT_MS,
|
||||
resolveCdpReachabilityTimeouts,
|
||||
} from "./cdp-timeouts.js";
|
||||
import {
|
||||
closeChromeMcpSession,
|
||||
ensureChromeMcpAvailable,
|
||||
listChromeMcpTabs,
|
||||
} from "./chrome-mcp.js";
|
||||
import {
|
||||
isChromeCdpReady,
|
||||
isChromeReachable,
|
||||
@@ -60,11 +65,19 @@ export function createProfileAvailability({
|
||||
});
|
||||
|
||||
const isReachable = async (timeoutMs?: number) => {
|
||||
if (profile.driver === "existing-session") {
|
||||
await ensureChromeMcpAvailable(profile.name);
|
||||
await listChromeMcpTabs(profile.name);
|
||||
return true;
|
||||
}
|
||||
const { httpTimeoutMs, wsTimeoutMs } = resolveTimeouts(timeoutMs);
|
||||
return await isChromeCdpReady(profile.cdpUrl, httpTimeoutMs, wsTimeoutMs);
|
||||
};
|
||||
|
||||
const isHttpReachable = async (timeoutMs?: number) => {
|
||||
if (profile.driver === "existing-session") {
|
||||
return await isReachable(timeoutMs);
|
||||
}
|
||||
const { httpTimeoutMs } = resolveTimeouts(timeoutMs);
|
||||
return await isChromeReachable(profile.cdpUrl, httpTimeoutMs);
|
||||
};
|
||||
@@ -109,6 +122,9 @@ export function createProfileAvailability({
|
||||
if (previousProfile.driver === "extension") {
|
||||
await stopChromeExtensionRelayServer({ cdpUrl: previousProfile.cdpUrl }).catch(() => false);
|
||||
}
|
||||
if (previousProfile.driver === "existing-session") {
|
||||
await closeChromeMcpSession(previousProfile.name).catch(() => false);
|
||||
}
|
||||
await closePlaywrightBrowserConnectionForProfile(previousProfile.cdpUrl);
|
||||
if (previousProfile.cdpUrl !== profile.cdpUrl) {
|
||||
await closePlaywrightBrowserConnectionForProfile(profile.cdpUrl);
|
||||
@@ -138,6 +154,10 @@ export function createProfileAvailability({
|
||||
|
||||
const ensureBrowserAvailable = async (): Promise<void> => {
|
||||
await reconcileProfileRuntime();
|
||||
if (profile.driver === "existing-session") {
|
||||
await ensureChromeMcpAvailable(profile.name);
|
||||
return;
|
||||
}
|
||||
const current = state();
|
||||
const remoteCdp = capabilities.isRemote;
|
||||
const attachOnly = profile.attachOnly;
|
||||
@@ -238,6 +258,10 @@ export function createProfileAvailability({
|
||||
|
||||
const stopRunningBrowser = async (): Promise<{ stopped: boolean }> => {
|
||||
await reconcileProfileRuntime();
|
||||
if (profile.driver === "existing-session") {
|
||||
const stopped = await closeChromeMcpSession(profile.name);
|
||||
return { stopped };
|
||||
}
|
||||
if (capabilities.requiresRelay) {
|
||||
const stopped = await stopChromeExtensionRelayServer({
|
||||
cdpUrl: profile.cdpUrl,
|
||||
|
||||
102
src/browser/server-context.existing-session.test.ts
Normal file
102
src/browser/server-context.existing-session.test.ts
Normal file
@@ -0,0 +1,102 @@
|
||||
import { afterEach, describe, expect, it, vi } from "vitest";
|
||||
import { createBrowserRouteContext } from "./server-context.js";
|
||||
import type { BrowserServerState } from "./server-context.js";
|
||||
|
||||
vi.mock("./chrome-mcp.js", () => ({
|
||||
closeChromeMcpSession: vi.fn(async () => true),
|
||||
ensureChromeMcpAvailable: vi.fn(async () => {}),
|
||||
focusChromeMcpTab: vi.fn(async () => {}),
|
||||
listChromeMcpTabs: vi.fn(async () => [
|
||||
{ targetId: "7", title: "", url: "https://example.com", type: "page" },
|
||||
]),
|
||||
openChromeMcpTab: vi.fn(async () => ({
|
||||
targetId: "8",
|
||||
title: "",
|
||||
url: "https://openclaw.ai",
|
||||
type: "page",
|
||||
})),
|
||||
closeChromeMcpTab: vi.fn(async () => {}),
|
||||
getChromeMcpPid: vi.fn(() => 4321),
|
||||
}));
|
||||
|
||||
import * as chromeMcp from "./chrome-mcp.js";
|
||||
|
||||
function makeState(): BrowserServerState {
|
||||
return {
|
||||
server: null,
|
||||
port: 0,
|
||||
resolved: {
|
||||
enabled: true,
|
||||
evaluateEnabled: true,
|
||||
controlPort: 18791,
|
||||
cdpPortRangeStart: 18800,
|
||||
cdpPortRangeEnd: 18899,
|
||||
cdpProtocol: "http",
|
||||
cdpHost: "127.0.0.1",
|
||||
cdpIsLoopback: true,
|
||||
remoteCdpTimeoutMs: 1500,
|
||||
remoteCdpHandshakeTimeoutMs: 3000,
|
||||
color: "#FF4500",
|
||||
headless: false,
|
||||
noSandbox: false,
|
||||
attachOnly: false,
|
||||
defaultProfile: "chrome-live",
|
||||
profiles: {
|
||||
"chrome-live": {
|
||||
cdpPort: 18801,
|
||||
color: "#0066CC",
|
||||
driver: "existing-session",
|
||||
attachOnly: true,
|
||||
},
|
||||
},
|
||||
extraArgs: [],
|
||||
ssrfPolicy: { dangerouslyAllowPrivateNetwork: true },
|
||||
},
|
||||
profiles: new Map(),
|
||||
};
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
describe("browser server-context existing-session profile", () => {
|
||||
it("routes tab operations through the Chrome MCP backend", async () => {
|
||||
const state = makeState();
|
||||
const ctx = createBrowserRouteContext({ getState: () => state });
|
||||
const live = ctx.forProfile("chrome-live");
|
||||
|
||||
vi.mocked(chromeMcp.listChromeMcpTabs)
|
||||
.mockResolvedValueOnce([
|
||||
{ targetId: "7", title: "", url: "https://example.com", type: "page" },
|
||||
])
|
||||
.mockResolvedValueOnce([
|
||||
{ targetId: "8", title: "", url: "https://openclaw.ai", type: "page" },
|
||||
])
|
||||
.mockResolvedValueOnce([
|
||||
{ targetId: "8", title: "", url: "https://openclaw.ai", type: "page" },
|
||||
])
|
||||
.mockResolvedValueOnce([
|
||||
{ targetId: "7", title: "", url: "https://example.com", type: "page" },
|
||||
]);
|
||||
|
||||
await live.ensureBrowserAvailable();
|
||||
const tabs = await live.listTabs();
|
||||
expect(tabs.map((tab) => tab.targetId)).toEqual(["7"]);
|
||||
|
||||
const opened = await live.openTab("https://openclaw.ai");
|
||||
expect(opened.targetId).toBe("8");
|
||||
|
||||
const selected = await live.ensureTabAvailable();
|
||||
expect(selected.targetId).toBe("8");
|
||||
|
||||
await live.focusTab("7");
|
||||
await live.stopRunningBrowser();
|
||||
|
||||
expect(chromeMcp.ensureChromeMcpAvailable).toHaveBeenCalledWith("chrome-live");
|
||||
expect(chromeMcp.listChromeMcpTabs).toHaveBeenCalledWith("chrome-live");
|
||||
expect(chromeMcp.openChromeMcpTab).toHaveBeenCalledWith("chrome-live", "https://openclaw.ai");
|
||||
expect(chromeMcp.focusChromeMcpTab).toHaveBeenCalledWith("chrome-live", "7");
|
||||
expect(chromeMcp.closeChromeMcpSession).toHaveBeenCalledWith("chrome-live");
|
||||
});
|
||||
});
|
||||
@@ -1,5 +1,6 @@
|
||||
import { fetchOk, normalizeCdpHttpBaseForJsonEndpoints } from "./cdp.helpers.js";
|
||||
import { appendCdpPath } from "./cdp.js";
|
||||
import { closeChromeMcpTab, focusChromeMcpTab } from "./chrome-mcp.js";
|
||||
import type { ResolvedBrowserProfile } from "./config.js";
|
||||
import { BrowserTabNotFoundError, BrowserTargetAmbiguousError } from "./errors.js";
|
||||
import { getBrowserProfileCapabilities } from "./profile-capabilities.js";
|
||||
@@ -111,6 +112,13 @@ export function createProfileSelectionOps({
|
||||
const focusTab = async (targetId: string): Promise<void> => {
|
||||
const resolvedTargetId = await resolveTargetIdOrThrow(targetId);
|
||||
|
||||
if (profile.driver === "existing-session") {
|
||||
await focusChromeMcpTab(profile.name, resolvedTargetId);
|
||||
const profileState = getProfileState();
|
||||
profileState.lastTargetId = resolvedTargetId;
|
||||
return;
|
||||
}
|
||||
|
||||
if (capabilities.usesPersistentPlaywright) {
|
||||
const mod = await getPwAiModule({ mode: "strict" });
|
||||
const focusPageByTargetIdViaPlaywright = (mod as Partial<PwAiModule> | null)
|
||||
@@ -134,6 +142,11 @@ export function createProfileSelectionOps({
|
||||
const closeTab = async (targetId: string): Promise<void> => {
|
||||
const resolvedTargetId = await resolveTargetIdOrThrow(targetId);
|
||||
|
||||
if (profile.driver === "existing-session") {
|
||||
await closeChromeMcpTab(profile.name, resolvedTargetId);
|
||||
return;
|
||||
}
|
||||
|
||||
// For remote profiles, use Playwright's persistent connection to close tabs
|
||||
if (capabilities.usesPersistentPlaywright) {
|
||||
const mod = await getPwAiModule({ mode: "strict" });
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { CDP_JSON_NEW_TIMEOUT_MS } from "./cdp-timeouts.js";
|
||||
import { fetchJson, fetchOk, normalizeCdpHttpBaseForJsonEndpoints } from "./cdp.helpers.js";
|
||||
import { appendCdpPath, createTargetViaCdp, normalizeCdpWsUrl } from "./cdp.js";
|
||||
import { listChromeMcpTabs, openChromeMcpTab } from "./chrome-mcp.js";
|
||||
import type { ResolvedBrowserProfile } from "./config.js";
|
||||
import {
|
||||
assertBrowserNavigationAllowed,
|
||||
@@ -65,6 +66,10 @@ export function createProfileTabOps({
|
||||
const capabilities = getBrowserProfileCapabilities(profile);
|
||||
|
||||
const listTabs = async (): Promise<BrowserTab[]> => {
|
||||
if (profile.driver === "existing-session") {
|
||||
return await listChromeMcpTabs(profile.name);
|
||||
}
|
||||
|
||||
if (capabilities.usesPersistentPlaywright) {
|
||||
const mod = await getPwAiModule({ mode: "strict" });
|
||||
const listPagesViaPlaywright = (mod as Partial<PwAiModule> | null)?.listPagesViaPlaywright;
|
||||
@@ -134,6 +139,15 @@ export function createProfileTabOps({
|
||||
const openTab = async (url: string): Promise<BrowserTab> => {
|
||||
const ssrfPolicyOpts = withBrowserNavigationPolicy(state().resolved.ssrfPolicy);
|
||||
|
||||
if (profile.driver === "existing-session") {
|
||||
await assertBrowserNavigationAllowed({ url, ...ssrfPolicyOpts });
|
||||
const page = await openChromeMcpTab(profile.name, url);
|
||||
const profileState = getProfileState();
|
||||
profileState.lastTargetId = page.targetId;
|
||||
await assertBrowserNavigationResultAllowed({ url: page.url, ...ssrfPolicyOpts });
|
||||
return page;
|
||||
}
|
||||
|
||||
if (capabilities.usesPersistentPlaywright) {
|
||||
const mod = await getPwAiModule({ mode: "strict" });
|
||||
const createPageViaPlaywright = (mod as Partial<PwAiModule> | null)?.createPageViaPlaywright;
|
||||
|
||||
@@ -162,12 +162,22 @@ export function createBrowserRouteContext(opts: ContextOptions): BrowserRouteCon
|
||||
|
||||
let tabCount = 0;
|
||||
let running = false;
|
||||
const profileCtx = createProfileContext(opts, profile);
|
||||
|
||||
if (profileState?.running) {
|
||||
if (profile.driver === "existing-session") {
|
||||
try {
|
||||
running = await profileCtx.isReachable(300);
|
||||
if (running) {
|
||||
const tabs = await profileCtx.listTabs();
|
||||
tabCount = tabs.filter((t) => t.type === "page").length;
|
||||
}
|
||||
} catch {
|
||||
// Chrome MCP not available
|
||||
}
|
||||
} else if (profileState?.running) {
|
||||
running = true;
|
||||
try {
|
||||
const ctx = createProfileContext(opts, profile);
|
||||
const tabs = await ctx.listTabs();
|
||||
const tabs = await profileCtx.listTabs();
|
||||
tabCount = tabs.filter((t) => t.type === "page").length;
|
||||
} catch {
|
||||
// Browser might not be responsive
|
||||
@@ -178,8 +188,7 @@ export function createBrowserRouteContext(opts: ContextOptions): BrowserRouteCon
|
||||
const reachable = await isChromeReachable(profile.cdpUrl, 200);
|
||||
if (reachable) {
|
||||
running = true;
|
||||
const ctx = createProfileContext(opts, profile);
|
||||
const tabs = await ctx.listTabs().catch(() => []);
|
||||
const tabs = await profileCtx.listTabs().catch(() => []);
|
||||
tabCount = tabs.filter((t) => t.type === "page").length;
|
||||
}
|
||||
} catch {
|
||||
@@ -192,6 +201,7 @@ export function createBrowserRouteContext(opts: ContextOptions): BrowserRouteCon
|
||||
cdpPort: profile.cdpPort,
|
||||
cdpUrl: profile.cdpUrl,
|
||||
color: profile.color,
|
||||
driver: profile.driver,
|
||||
running,
|
||||
tabCount,
|
||||
isDefault: name === current.resolved.defaultProfile,
|
||||
|
||||
@@ -56,6 +56,7 @@ export type ProfileStatus = {
|
||||
cdpPort: number;
|
||||
cdpUrl: string;
|
||||
color: string;
|
||||
driver: ResolvedBrowserProfile["driver"];
|
||||
running: boolean;
|
||||
tabCount: number;
|
||||
isDefault: boolean;
|
||||
|
||||
@@ -407,7 +407,8 @@ export function registerBrowserManageCommands(
|
||||
const def = p.isDefault ? " [default]" : "";
|
||||
const loc = p.isRemote ? `cdpUrl: ${p.cdpUrl}` : `port: ${p.cdpPort}`;
|
||||
const remote = p.isRemote ? " [remote]" : "";
|
||||
return `${p.name}: ${status}${tabs}${def}${remote}\n ${loc}, color: ${p.color}`;
|
||||
const driver = p.driver !== "openclaw" ? ` [${p.driver}]` : "";
|
||||
return `${p.name}: ${status}${tabs}${def}${remote}${driver}\n ${loc}, color: ${p.color}`;
|
||||
})
|
||||
.join("\n"),
|
||||
);
|
||||
@@ -420,7 +421,10 @@ export function registerBrowserManageCommands(
|
||||
.requiredOption("--name <name>", "Profile name (lowercase, numbers, hyphens)")
|
||||
.option("--color <hex>", "Profile color (hex format, e.g. #0066CC)")
|
||||
.option("--cdp-url <url>", "CDP URL for remote Chrome (http/https)")
|
||||
.option("--driver <driver>", "Profile driver (openclaw|extension). Default: openclaw")
|
||||
.option(
|
||||
"--driver <driver>",
|
||||
"Profile driver (openclaw|extension|existing-session). Default: openclaw",
|
||||
)
|
||||
.action(
|
||||
async (opts: { name: string; color?: string; cdpUrl?: string; driver?: string }, cmd) => {
|
||||
const parent = parentOpts(cmd);
|
||||
@@ -434,7 +438,12 @@ export function registerBrowserManageCommands(
|
||||
name: opts.name,
|
||||
color: opts.color,
|
||||
cdpUrl: opts.cdpUrl,
|
||||
driver: opts.driver === "extension" ? "extension" : undefined,
|
||||
driver:
|
||||
opts.driver === "extension"
|
||||
? "extension"
|
||||
: opts.driver === "existing-session"
|
||||
? "existing-session"
|
||||
: undefined,
|
||||
},
|
||||
},
|
||||
{ timeoutMs: 10_000 },
|
||||
@@ -446,7 +455,11 @@ export function registerBrowserManageCommands(
|
||||
defaultRuntime.log(
|
||||
info(
|
||||
`🦞 Created profile "${result.profile}"\n${loc}\n color: ${result.color}${
|
||||
opts.driver === "extension" ? "\n driver: extension" : ""
|
||||
opts.driver === "extension"
|
||||
? "\n driver: extension"
|
||||
: opts.driver === "existing-session"
|
||||
? "\n driver: existing-session"
|
||||
: ""
|
||||
}`,
|
||||
),
|
||||
);
|
||||
|
||||
@@ -64,6 +64,17 @@ describe("resolveCommandSecretRefsViaGateway", () => {
|
||||
});
|
||||
}
|
||||
|
||||
function expectGatewayUnavailableLocalFallbackDiagnostics(
|
||||
result: Awaited<ReturnType<typeof resolveCommandSecretRefsViaGateway>>,
|
||||
) {
|
||||
expect(
|
||||
result.diagnostics.some((entry) => entry.includes("gateway secrets.resolve unavailable")),
|
||||
).toBe(true);
|
||||
expect(
|
||||
result.diagnostics.some((entry) => entry.includes("resolved command secrets locally")),
|
||||
).toBe(true);
|
||||
}
|
||||
|
||||
it("returns config unchanged when no target SecretRefs are configured", async () => {
|
||||
const config = {
|
||||
talk: {
|
||||
@@ -208,11 +219,8 @@ describe("resolveCommandSecretRefsViaGateway", () => {
|
||||
|
||||
it("falls back to local resolution for web search SecretRefs when gateway is unavailable", async () => {
|
||||
const envKey = "WEB_SEARCH_GEMINI_API_KEY_LOCAL_FALLBACK";
|
||||
const priorValue = process.env[envKey];
|
||||
process.env[envKey] = "gemini-local-fallback-key";
|
||||
callGateway.mockRejectedValueOnce(new Error("gateway closed"));
|
||||
|
||||
try {
|
||||
await withEnvValue(envKey, "gemini-local-fallback-key", async () => {
|
||||
callGateway.mockRejectedValueOnce(new Error("gateway closed"));
|
||||
const result = await resolveCommandSecretRefsViaGateway({
|
||||
config: {
|
||||
tools: {
|
||||
@@ -234,28 +242,14 @@ describe("resolveCommandSecretRefsViaGateway", () => {
|
||||
"gemini-local-fallback-key",
|
||||
);
|
||||
expect(result.targetStatesByPath["tools.web.search.gemini.apiKey"]).toBe("resolved_local");
|
||||
expect(
|
||||
result.diagnostics.some((entry) => entry.includes("gateway secrets.resolve unavailable")),
|
||||
).toBe(true);
|
||||
expect(
|
||||
result.diagnostics.some((entry) => entry.includes("resolved command secrets locally")),
|
||||
).toBe(true);
|
||||
} finally {
|
||||
if (priorValue === undefined) {
|
||||
delete process.env[envKey];
|
||||
} else {
|
||||
process.env[envKey] = priorValue;
|
||||
}
|
||||
}
|
||||
expectGatewayUnavailableLocalFallbackDiagnostics(result);
|
||||
});
|
||||
});
|
||||
|
||||
it("falls back to local resolution for Firecrawl SecretRefs when gateway is unavailable", async () => {
|
||||
const envKey = "WEB_FETCH_FIRECRAWL_API_KEY_LOCAL_FALLBACK";
|
||||
const priorValue = process.env[envKey];
|
||||
process.env[envKey] = "firecrawl-local-fallback-key";
|
||||
callGateway.mockRejectedValueOnce(new Error("gateway closed"));
|
||||
|
||||
try {
|
||||
await withEnvValue(envKey, "firecrawl-local-fallback-key", async () => {
|
||||
callGateway.mockRejectedValueOnce(new Error("gateway closed"));
|
||||
const result = await resolveCommandSecretRefsViaGateway({
|
||||
config: {
|
||||
tools: {
|
||||
@@ -276,19 +270,8 @@ describe("resolveCommandSecretRefsViaGateway", () => {
|
||||
"firecrawl-local-fallback-key",
|
||||
);
|
||||
expect(result.targetStatesByPath["tools.web.fetch.firecrawl.apiKey"]).toBe("resolved_local");
|
||||
expect(
|
||||
result.diagnostics.some((entry) => entry.includes("gateway secrets.resolve unavailable")),
|
||||
).toBe(true);
|
||||
expect(
|
||||
result.diagnostics.some((entry) => entry.includes("resolved command secrets locally")),
|
||||
).toBe(true);
|
||||
} finally {
|
||||
if (priorValue === undefined) {
|
||||
delete process.env[envKey];
|
||||
} else {
|
||||
process.env[envKey] = priorValue;
|
||||
}
|
||||
}
|
||||
expectGatewayUnavailableLocalFallbackDiagnostics(result);
|
||||
});
|
||||
});
|
||||
|
||||
it("marks web SecretRefs inactive when the web surface is disabled during local fallback", async () => {
|
||||
|
||||
@@ -1,30 +1,15 @@
|
||||
import { beforeAll, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import {
|
||||
defaultRuntime,
|
||||
resetLifecycleRuntimeLogs,
|
||||
resetLifecycleServiceMocks,
|
||||
service,
|
||||
stubEmptyGatewayEnv,
|
||||
} from "./test-helpers/lifecycle-core-harness.js";
|
||||
|
||||
const readConfigFileSnapshotMock = vi.fn();
|
||||
const loadConfig = vi.fn(() => ({}));
|
||||
|
||||
const runtimeLogs: string[] = [];
|
||||
const defaultRuntime = {
|
||||
log: (message: string) => runtimeLogs.push(message),
|
||||
error: vi.fn(),
|
||||
exit: (code: number) => {
|
||||
throw new Error(`__exit__:${code}`);
|
||||
},
|
||||
};
|
||||
|
||||
const service = {
|
||||
label: "TestService",
|
||||
loadedText: "loaded",
|
||||
notLoadedText: "not loaded",
|
||||
install: vi.fn(),
|
||||
uninstall: vi.fn(),
|
||||
stop: vi.fn(),
|
||||
isLoaded: vi.fn(),
|
||||
readCommand: vi.fn(),
|
||||
readRuntime: vi.fn(),
|
||||
restart: vi.fn(),
|
||||
};
|
||||
|
||||
vi.mock("../../config/config.js", () => ({
|
||||
loadConfig: () => loadConfig(),
|
||||
readConfigFileSnapshot: () => readConfigFileSnapshotMock(),
|
||||
@@ -50,7 +35,7 @@ describe("runServiceRestart config pre-flight (#35862)", () => {
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
runtimeLogs.length = 0;
|
||||
resetLifecycleRuntimeLogs();
|
||||
readConfigFileSnapshotMock.mockReset();
|
||||
readConfigFileSnapshotMock.mockResolvedValue({
|
||||
exists: true,
|
||||
@@ -60,15 +45,8 @@ describe("runServiceRestart config pre-flight (#35862)", () => {
|
||||
});
|
||||
loadConfig.mockReset();
|
||||
loadConfig.mockReturnValue({});
|
||||
service.isLoaded.mockClear();
|
||||
service.readCommand.mockClear();
|
||||
service.restart.mockClear();
|
||||
service.isLoaded.mockResolvedValue(true);
|
||||
service.readCommand.mockResolvedValue({ environment: {} });
|
||||
service.restart.mockResolvedValue({ outcome: "completed" });
|
||||
vi.unstubAllEnvs();
|
||||
vi.stubEnv("OPENCLAW_GATEWAY_TOKEN", "");
|
||||
vi.stubEnv("CLAWDBOT_GATEWAY_TOKEN", "");
|
||||
resetLifecycleServiceMocks();
|
||||
stubEmptyGatewayEnv();
|
||||
});
|
||||
|
||||
it("aborts restart when config is invalid", async () => {
|
||||
@@ -152,7 +130,7 @@ describe("runServiceStart config pre-flight (#35862)", () => {
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
runtimeLogs.length = 0;
|
||||
resetLifecycleRuntimeLogs();
|
||||
readConfigFileSnapshotMock.mockReset();
|
||||
readConfigFileSnapshotMock.mockResolvedValue({
|
||||
exists: true,
|
||||
@@ -160,10 +138,7 @@ describe("runServiceStart config pre-flight (#35862)", () => {
|
||||
config: {},
|
||||
issues: [],
|
||||
});
|
||||
service.isLoaded.mockClear();
|
||||
service.restart.mockClear();
|
||||
service.isLoaded.mockResolvedValue(true);
|
||||
service.restart.mockResolvedValue({ outcome: "completed" });
|
||||
resetLifecycleServiceMocks();
|
||||
});
|
||||
|
||||
it("aborts start when config is invalid", async () => {
|
||||
|
||||
@@ -1,4 +1,12 @@
|
||||
import { beforeAll, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import {
|
||||
defaultRuntime,
|
||||
resetLifecycleRuntimeLogs,
|
||||
resetLifecycleServiceMocks,
|
||||
runtimeLogs,
|
||||
service,
|
||||
stubEmptyGatewayEnv,
|
||||
} from "./test-helpers/lifecycle-core-harness.js";
|
||||
|
||||
const loadConfig = vi.fn(() => ({
|
||||
gateway: {
|
||||
@@ -8,28 +16,6 @@ const loadConfig = vi.fn(() => ({
|
||||
},
|
||||
}));
|
||||
|
||||
const runtimeLogs: string[] = [];
|
||||
const defaultRuntime = {
|
||||
log: (message: string) => runtimeLogs.push(message),
|
||||
error: vi.fn(),
|
||||
exit: (code: number) => {
|
||||
throw new Error(`__exit__:${code}`);
|
||||
},
|
||||
};
|
||||
|
||||
const service = {
|
||||
label: "TestService",
|
||||
loadedText: "loaded",
|
||||
notLoadedText: "not loaded",
|
||||
install: vi.fn(),
|
||||
uninstall: vi.fn(),
|
||||
stop: vi.fn(),
|
||||
isLoaded: vi.fn(),
|
||||
readCommand: vi.fn(),
|
||||
readRuntime: vi.fn(),
|
||||
restart: vi.fn(),
|
||||
};
|
||||
|
||||
vi.mock("../../config/config.js", () => ({
|
||||
loadConfig: () => loadConfig(),
|
||||
readBestEffortConfig: async () => loadConfig(),
|
||||
@@ -49,7 +35,7 @@ describe("runServiceRestart token drift", () => {
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
runtimeLogs.length = 0;
|
||||
resetLifecycleRuntimeLogs();
|
||||
loadConfig.mockReset();
|
||||
loadConfig.mockReturnValue({
|
||||
gateway: {
|
||||
@@ -58,19 +44,11 @@ describe("runServiceRestart token drift", () => {
|
||||
},
|
||||
},
|
||||
});
|
||||
service.isLoaded.mockClear();
|
||||
service.readCommand.mockClear();
|
||||
service.restart.mockClear();
|
||||
service.isLoaded.mockResolvedValue(true);
|
||||
resetLifecycleServiceMocks();
|
||||
service.readCommand.mockResolvedValue({
|
||||
environment: { OPENCLAW_GATEWAY_TOKEN: "service-token" },
|
||||
});
|
||||
service.restart.mockResolvedValue({ outcome: "completed" });
|
||||
vi.unstubAllEnvs();
|
||||
vi.stubEnv("OPENCLAW_GATEWAY_TOKEN", "");
|
||||
vi.stubEnv("CLAWDBOT_GATEWAY_TOKEN", "");
|
||||
vi.stubEnv("OPENCLAW_GATEWAY_URL", "");
|
||||
vi.stubEnv("CLAWDBOT_GATEWAY_URL", "");
|
||||
stubEmptyGatewayEnv();
|
||||
});
|
||||
|
||||
it("emits drift warning when enabled", async () => {
|
||||
|
||||
@@ -1,8 +1,5 @@
|
||||
import { afterEach, beforeAll, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
|
||||
const mockReadFileSync = vi.hoisted(() => vi.fn());
|
||||
const mockSpawnSync = vi.hoisted(() => vi.fn());
|
||||
|
||||
type RestartHealthSnapshot = {
|
||||
healthy: boolean;
|
||||
staleGatewayPids: number[];
|
||||
@@ -35,7 +32,9 @@ const terminateStaleGatewayPids = vi.fn();
|
||||
const renderGatewayPortHealthDiagnostics = vi.fn(() => ["diag: unhealthy port"]);
|
||||
const renderRestartDiagnostics = vi.fn(() => ["diag: unhealthy runtime"]);
|
||||
const resolveGatewayPort = vi.fn(() => 18789);
|
||||
const findGatewayPidsOnPortSync = vi.fn<(port: number) => number[]>(() => []);
|
||||
const findVerifiedGatewayListenerPidsOnPortSync = vi.fn<(port: number) => number[]>(() => []);
|
||||
const signalVerifiedGatewayPidSync = vi.fn<(pid: number, signal: "SIGTERM" | "SIGUSR1") => void>();
|
||||
const formatGatewayPidList = vi.fn<(pids: number[]) => string>((pids) => pids.join(", "));
|
||||
const probeGateway = vi.fn<
|
||||
(opts: {
|
||||
url: string;
|
||||
@@ -49,24 +48,18 @@ const probeGateway = vi.fn<
|
||||
const isRestartEnabled = vi.fn<(config?: { commands?: unknown }) => boolean>(() => true);
|
||||
const loadConfig = vi.fn(() => ({}));
|
||||
|
||||
vi.mock("node:fs", () => ({
|
||||
default: {
|
||||
readFileSync: (...args: unknown[]) => mockReadFileSync(...args),
|
||||
},
|
||||
}));
|
||||
|
||||
vi.mock("node:child_process", () => ({
|
||||
spawnSync: (...args: unknown[]) => mockSpawnSync(...args),
|
||||
}));
|
||||
|
||||
vi.mock("../../config/config.js", () => ({
|
||||
loadConfig: () => loadConfig(),
|
||||
readBestEffortConfig: async () => loadConfig(),
|
||||
resolveGatewayPort,
|
||||
}));
|
||||
|
||||
vi.mock("../../infra/restart.js", () => ({
|
||||
findGatewayPidsOnPortSync: (port: number) => findGatewayPidsOnPortSync(port),
|
||||
vi.mock("../../infra/gateway-processes.js", () => ({
|
||||
findVerifiedGatewayListenerPidsOnPortSync: (port: number) =>
|
||||
findVerifiedGatewayListenerPidsOnPortSync(port),
|
||||
signalVerifiedGatewayPidSync: (pid: number, signal: "SIGTERM" | "SIGUSR1") =>
|
||||
signalVerifiedGatewayPidSync(pid, signal),
|
||||
formatGatewayPidList: (pids: number[]) => formatGatewayPidList(pids),
|
||||
}));
|
||||
|
||||
vi.mock("../../gateway/probe.js", () => ({
|
||||
@@ -121,12 +114,12 @@ describe("runDaemonRestart health checks", () => {
|
||||
renderGatewayPortHealthDiagnostics.mockReset();
|
||||
renderRestartDiagnostics.mockReset();
|
||||
resolveGatewayPort.mockReset();
|
||||
findGatewayPidsOnPortSync.mockReset();
|
||||
findVerifiedGatewayListenerPidsOnPortSync.mockReset();
|
||||
signalVerifiedGatewayPidSync.mockReset();
|
||||
formatGatewayPidList.mockReset();
|
||||
probeGateway.mockReset();
|
||||
isRestartEnabled.mockReset();
|
||||
loadConfig.mockReset();
|
||||
mockReadFileSync.mockReset();
|
||||
mockSpawnSync.mockReset();
|
||||
|
||||
service.readCommand.mockResolvedValue({
|
||||
programArguments: ["openclaw", "gateway", "--port", "18789"],
|
||||
@@ -158,23 +151,8 @@ describe("runDaemonRestart health checks", () => {
|
||||
configSnapshot: { commands: { restart: true } },
|
||||
});
|
||||
isRestartEnabled.mockReturnValue(true);
|
||||
mockReadFileSync.mockImplementation((path: string) => {
|
||||
const match = path.match(/\/proc\/(\d+)\/cmdline$/);
|
||||
if (!match) {
|
||||
throw new Error(`unexpected path ${path}`);
|
||||
}
|
||||
const pid = Number.parseInt(match[1] ?? "", 10);
|
||||
if ([4200, 4300].includes(pid)) {
|
||||
return ["openclaw", "gateway", "--port", "18789", ""].join("\0");
|
||||
}
|
||||
throw new Error(`unknown pid ${pid}`);
|
||||
});
|
||||
mockSpawnSync.mockReturnValue({
|
||||
error: null,
|
||||
status: 0,
|
||||
stdout: "openclaw gateway --port 18789",
|
||||
stderr: "",
|
||||
});
|
||||
signalVerifiedGatewayPidSync.mockImplementation(() => {});
|
||||
formatGatewayPidList.mockImplementation((pids) => pids.join(", "));
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
@@ -242,38 +220,20 @@ describe("runDaemonRestart health checks", () => {
|
||||
});
|
||||
|
||||
it("signals an unmanaged gateway process on stop", async () => {
|
||||
vi.spyOn(process, "platform", "get").mockReturnValue("win32");
|
||||
const killSpy = vi.spyOn(process, "kill").mockImplementation(() => true);
|
||||
findGatewayPidsOnPortSync.mockReturnValue([4200, 4200, 4300]);
|
||||
mockSpawnSync.mockReturnValue({
|
||||
error: null,
|
||||
status: 0,
|
||||
stdout:
|
||||
'CommandLine="C:\\\\Program Files\\\\OpenClaw\\\\openclaw.exe" gateway --port 18789\r\n',
|
||||
stderr: "",
|
||||
});
|
||||
findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([4200, 4200, 4300]);
|
||||
runServiceStop.mockImplementation(async (params: { onNotLoaded?: () => Promise<unknown> }) => {
|
||||
await params.onNotLoaded?.();
|
||||
});
|
||||
|
||||
await runDaemonStop({ json: true });
|
||||
|
||||
expect(findGatewayPidsOnPortSync).toHaveBeenCalledWith(18789);
|
||||
expect(killSpy).toHaveBeenCalledWith(4200, "SIGTERM");
|
||||
expect(killSpy).toHaveBeenCalledWith(4300, "SIGTERM");
|
||||
expect(findVerifiedGatewayListenerPidsOnPortSync).toHaveBeenCalledWith(18789);
|
||||
expect(signalVerifiedGatewayPidSync).toHaveBeenCalledWith(4200, "SIGTERM");
|
||||
expect(signalVerifiedGatewayPidSync).toHaveBeenCalledWith(4300, "SIGTERM");
|
||||
});
|
||||
|
||||
it("signals a single unmanaged gateway process on restart", async () => {
|
||||
vi.spyOn(process, "platform", "get").mockReturnValue("win32");
|
||||
const killSpy = vi.spyOn(process, "kill").mockImplementation(() => true);
|
||||
findGatewayPidsOnPortSync.mockReturnValue([4200]);
|
||||
mockSpawnSync.mockReturnValue({
|
||||
error: null,
|
||||
status: 0,
|
||||
stdout:
|
||||
'CommandLine="C:\\\\Program Files\\\\OpenClaw\\\\openclaw.exe" gateway --port 18789\r\n',
|
||||
stderr: "",
|
||||
});
|
||||
findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([4200]);
|
||||
runServiceRestart.mockImplementation(
|
||||
async (params: RestartParams & { onNotLoaded?: () => Promise<unknown> }) => {
|
||||
await params.onNotLoaded?.();
|
||||
@@ -291,8 +251,8 @@ describe("runDaemonRestart health checks", () => {
|
||||
|
||||
await runDaemonRestart({ json: true });
|
||||
|
||||
expect(findGatewayPidsOnPortSync).toHaveBeenCalledWith(18789);
|
||||
expect(killSpy).toHaveBeenCalledWith(4200, "SIGUSR1");
|
||||
expect(findVerifiedGatewayListenerPidsOnPortSync).toHaveBeenCalledWith(18789);
|
||||
expect(signalVerifiedGatewayPidSync).toHaveBeenCalledWith(4200, "SIGUSR1");
|
||||
expect(probeGateway).toHaveBeenCalledTimes(1);
|
||||
expect(waitForGatewayHealthyListener).toHaveBeenCalledTimes(1);
|
||||
expect(waitForGatewayHealthyRestart).not.toHaveBeenCalled();
|
||||
@@ -301,15 +261,7 @@ describe("runDaemonRestart health checks", () => {
|
||||
});
|
||||
|
||||
it("fails unmanaged restart when multiple gateway listeners are present", async () => {
|
||||
vi.spyOn(process, "platform", "get").mockReturnValue("win32");
|
||||
findGatewayPidsOnPortSync.mockReturnValue([4200, 4300]);
|
||||
mockSpawnSync.mockReturnValue({
|
||||
error: null,
|
||||
status: 0,
|
||||
stdout:
|
||||
'CommandLine="C:\\\\Program Files\\\\OpenClaw\\\\openclaw.exe" gateway --port 18789\r\n',
|
||||
stderr: "",
|
||||
});
|
||||
findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([4200, 4300]);
|
||||
runServiceRestart.mockImplementation(
|
||||
async (params: RestartParams & { onNotLoaded?: () => Promise<unknown> }) => {
|
||||
await params.onNotLoaded?.();
|
||||
@@ -323,7 +275,7 @@ describe("runDaemonRestart health checks", () => {
|
||||
});
|
||||
|
||||
it("fails unmanaged restart when the running gateway has commands.restart disabled", async () => {
|
||||
findGatewayPidsOnPortSync.mockReturnValue([4200]);
|
||||
findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([4200]);
|
||||
probeGateway.mockResolvedValue({
|
||||
ok: true,
|
||||
configSnapshot: { commands: { restart: false } },
|
||||
@@ -342,21 +294,13 @@ describe("runDaemonRestart health checks", () => {
|
||||
});
|
||||
|
||||
it("skips unmanaged signaling for pids that are not live gateway processes", async () => {
|
||||
const killSpy = vi.spyOn(process, "kill").mockImplementation(() => true);
|
||||
findGatewayPidsOnPortSync.mockReturnValue([4200]);
|
||||
mockReadFileSync.mockReturnValue(["python", "-m", "http.server", ""].join("\0"));
|
||||
mockSpawnSync.mockReturnValue({
|
||||
error: null,
|
||||
status: 0,
|
||||
stdout: "python -m http.server",
|
||||
stderr: "",
|
||||
});
|
||||
findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([]);
|
||||
runServiceStop.mockImplementation(async (params: { onNotLoaded?: () => Promise<unknown> }) => {
|
||||
await params.onNotLoaded?.();
|
||||
});
|
||||
|
||||
await runDaemonStop({ json: true });
|
||||
|
||||
expect(killSpy).not.toHaveBeenCalled();
|
||||
expect(signalVerifiedGatewayPidSync).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
import { spawnSync } from "node:child_process";
|
||||
import fsSync from "node:fs";
|
||||
import { isRestartEnabled } from "../../config/commands.js";
|
||||
import { readBestEffortConfig, resolveGatewayPort } from "../../config/config.js";
|
||||
import { parseCmdScriptCommandLine } from "../../daemon/cmd-argv.js";
|
||||
import { resolveGatewayService } from "../../daemon/service.js";
|
||||
import { probeGateway } from "../../gateway/probe.js";
|
||||
import { isGatewayArgv, parseProcCmdline } from "../../infra/gateway-process-argv.js";
|
||||
import { findGatewayPidsOnPortSync } from "../../infra/restart.js";
|
||||
import {
|
||||
findVerifiedGatewayListenerPidsOnPortSync,
|
||||
formatGatewayPidList,
|
||||
signalVerifiedGatewayPidSync,
|
||||
} from "../../infra/gateway-processes.js";
|
||||
import { defaultRuntime } from "../../runtime.js";
|
||||
import { theme } from "../../terminal/theme.js";
|
||||
import { formatCliCommand } from "../command-format.js";
|
||||
@@ -43,85 +43,12 @@ async function resolveGatewayLifecyclePort(service = resolveGatewayService()) {
|
||||
return portFromArgs ?? resolveGatewayPort(await readBestEffortConfig(), mergedEnv);
|
||||
}
|
||||
|
||||
function extractWindowsCommandLine(raw: string): string | null {
|
||||
const lines = raw
|
||||
.split(/\r?\n/)
|
||||
.map((line) => line.trim())
|
||||
.filter(Boolean);
|
||||
for (const line of lines) {
|
||||
if (!line.toLowerCase().startsWith("commandline=")) {
|
||||
continue;
|
||||
}
|
||||
const value = line.slice("commandline=".length).trim();
|
||||
return value || null;
|
||||
}
|
||||
return lines.find((line) => line.toLowerCase() !== "commandline") ?? null;
|
||||
}
|
||||
|
||||
function readGatewayProcessArgsSync(pid: number): string[] | null {
|
||||
if (process.platform === "linux") {
|
||||
try {
|
||||
return parseProcCmdline(fsSync.readFileSync(`/proc/${pid}/cmdline`, "utf8"));
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
if (process.platform === "darwin") {
|
||||
const ps = spawnSync("ps", ["-o", "command=", "-p", String(pid)], {
|
||||
encoding: "utf8",
|
||||
timeout: 1000,
|
||||
});
|
||||
if (ps.error || ps.status !== 0) {
|
||||
return null;
|
||||
}
|
||||
const command = ps.stdout.trim();
|
||||
return command ? command.split(/\s+/) : null;
|
||||
}
|
||||
if (process.platform === "win32") {
|
||||
const wmic = spawnSync(
|
||||
"wmic",
|
||||
["process", "where", `ProcessId=${pid}`, "get", "CommandLine", "/value"],
|
||||
{
|
||||
encoding: "utf8",
|
||||
timeout: 1000,
|
||||
},
|
||||
);
|
||||
if (wmic.error || wmic.status !== 0) {
|
||||
return null;
|
||||
}
|
||||
const command = extractWindowsCommandLine(wmic.stdout);
|
||||
return command ? parseCmdScriptCommandLine(command) : null;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
function resolveGatewayListenerPids(port: number): number[] {
|
||||
return Array.from(new Set(findGatewayPidsOnPortSync(port)))
|
||||
.filter((pid): pid is number => Number.isFinite(pid) && pid > 0)
|
||||
.filter((pid) => {
|
||||
const args = readGatewayProcessArgsSync(pid);
|
||||
return args != null && isGatewayArgv(args, { allowGatewayBinary: true });
|
||||
});
|
||||
}
|
||||
|
||||
function resolveGatewayPortFallback(): Promise<number> {
|
||||
return readBestEffortConfig()
|
||||
.then((cfg) => resolveGatewayPort(cfg, process.env))
|
||||
.catch(() => resolveGatewayPort(undefined, process.env));
|
||||
}
|
||||
|
||||
function signalGatewayPid(pid: number, signal: "SIGTERM" | "SIGUSR1") {
|
||||
const args = readGatewayProcessArgsSync(pid);
|
||||
if (!args || !isGatewayArgv(args, { allowGatewayBinary: true })) {
|
||||
throw new Error(`refusing to signal non-gateway process pid ${pid}`);
|
||||
}
|
||||
process.kill(pid, signal);
|
||||
}
|
||||
|
||||
function formatGatewayPidList(pids: number[]): string {
|
||||
return pids.join(", ");
|
||||
}
|
||||
|
||||
async function assertUnmanagedGatewayRestartEnabled(port: number): Promise<void> {
|
||||
const probe = await probeGateway({
|
||||
url: `ws://127.0.0.1:${port}`,
|
||||
@@ -143,7 +70,7 @@ async function assertUnmanagedGatewayRestartEnabled(port: number): Promise<void>
|
||||
}
|
||||
|
||||
function resolveVerifiedGatewayListenerPids(port: number): number[] {
|
||||
return resolveGatewayListenerPids(port).filter(
|
||||
return findVerifiedGatewayListenerPidsOnPortSync(port).filter(
|
||||
(pid): pid is number => Number.isFinite(pid) && pid > 0,
|
||||
);
|
||||
}
|
||||
@@ -154,7 +81,7 @@ async function stopGatewayWithoutServiceManager(port: number) {
|
||||
return null;
|
||||
}
|
||||
for (const pid of pids) {
|
||||
signalGatewayPid(pid, "SIGTERM");
|
||||
signalVerifiedGatewayPidSync(pid, "SIGTERM");
|
||||
}
|
||||
return {
|
||||
result: "stopped" as const,
|
||||
@@ -173,7 +100,7 @@ async function restartGatewayWithoutServiceManager(port: number) {
|
||||
`multiple gateway processes are listening on port ${port}: ${formatGatewayPidList(pids)}; use "openclaw gateway status --deep" before retrying restart`,
|
||||
);
|
||||
}
|
||||
signalGatewayPid(pids[0], "SIGUSR1");
|
||||
signalVerifiedGatewayPidSync(pids[0], "SIGUSR1");
|
||||
return {
|
||||
result: "restarted" as const,
|
||||
message: `Gateway restart signal sent to unmanaged process on port ${port}: ${pids[0]}.`,
|
||||
|
||||
@@ -190,6 +190,32 @@ describe("inspectGatewayRestart", () => {
|
||||
);
|
||||
});
|
||||
|
||||
it("treats a busy port as healthy when runtime status lags but the probe succeeds", async () => {
|
||||
Object.defineProperty(process, "platform", { value: "win32", configurable: true });
|
||||
|
||||
const service = {
|
||||
readRuntime: vi.fn(async () => ({ status: "stopped" })),
|
||||
} as unknown as GatewayService;
|
||||
|
||||
inspectPortUsage.mockResolvedValue({
|
||||
port: 18789,
|
||||
status: "busy",
|
||||
listeners: [{ pid: 9100, commandLine: "openclaw-gateway" }],
|
||||
hints: [],
|
||||
});
|
||||
classifyPortListener.mockReturnValue("gateway");
|
||||
probeGateway.mockResolvedValue({
|
||||
ok: true,
|
||||
close: null,
|
||||
});
|
||||
|
||||
const { inspectGatewayRestart } = await import("./restart-health.js");
|
||||
const snapshot = await inspectGatewayRestart({ service, port: 18789 });
|
||||
|
||||
expect(snapshot.healthy).toBe(true);
|
||||
expect(snapshot.staleGatewayPids).toEqual([]);
|
||||
});
|
||||
|
||||
it("treats auth-closed probe as healthy gateway reachability", async () => {
|
||||
const snapshot = await inspectAmbiguousOwnershipWithProbe({
|
||||
ok: false,
|
||||
|
||||
@@ -65,7 +65,8 @@ async function confirmGatewayReachable(port: number): Promise<boolean> {
|
||||
const probe = await probeGateway({
|
||||
url: `ws://127.0.0.1:${port}`,
|
||||
auth: token || password ? { token, password } : undefined,
|
||||
timeoutMs: 1_000,
|
||||
timeoutMs: 3_000,
|
||||
includeDetails: false,
|
||||
});
|
||||
return probe.ok || looksLikeAuthClose(probe.close?.code, probe.close?.reason);
|
||||
}
|
||||
@@ -123,6 +124,22 @@ export async function inspectGatewayRestart(params: {
|
||||
};
|
||||
}
|
||||
|
||||
if (portUsage.status === "busy" && runtime.status !== "running") {
|
||||
try {
|
||||
const reachable = await confirmGatewayReachable(params.port);
|
||||
if (reachable) {
|
||||
return {
|
||||
runtime,
|
||||
portUsage,
|
||||
healthy: true,
|
||||
staleGatewayPids: [],
|
||||
};
|
||||
}
|
||||
} catch {
|
||||
// Probe is best-effort; keep the ownership-based diagnostics.
|
||||
}
|
||||
}
|
||||
|
||||
const gatewayListeners =
|
||||
portUsage.status === "busy"
|
||||
? portUsage.listeners.filter(
|
||||
|
||||
@@ -18,7 +18,12 @@ const readLastGatewayErrorLine = vi.fn(async (_env?: NodeJS.ProcessEnv) => null)
|
||||
const auditGatewayServiceConfig = vi.fn(async (_opts?: unknown) => undefined);
|
||||
const serviceIsLoaded = vi.fn(async (_opts?: unknown) => true);
|
||||
const serviceReadRuntime = vi.fn(async (_env?: NodeJS.ProcessEnv) => ({ status: "running" }));
|
||||
const serviceReadCommand = vi.fn(async (_env?: NodeJS.ProcessEnv) => ({
|
||||
const serviceReadCommand = vi.fn<
|
||||
(env?: NodeJS.ProcessEnv) => Promise<{
|
||||
programArguments: string[];
|
||||
environment?: Record<string, string>;
|
||||
}>
|
||||
>(async (_env?: NodeJS.ProcessEnv) => ({
|
||||
programArguments: ["/bin/node", "cli", "gateway", "--port", "19001"],
|
||||
environment: {
|
||||
OPENCLAW_STATE_DIR: "/tmp/openclaw-daemon",
|
||||
@@ -190,6 +195,37 @@ describe("gatherDaemonStatus", () => {
|
||||
expect(status.rpc?.url).toBe("wss://override.example:18790");
|
||||
});
|
||||
|
||||
it("reuses command environment when reading runtime status", async () => {
|
||||
serviceReadCommand.mockResolvedValueOnce({
|
||||
programArguments: ["/bin/node", "cli", "gateway", "--port", "19001"],
|
||||
environment: {
|
||||
OPENCLAW_GATEWAY_PORT: "19001",
|
||||
OPENCLAW_CONFIG_PATH: "/tmp/openclaw-daemon/openclaw.json",
|
||||
OPENCLAW_STATE_DIR: "/tmp/openclaw-daemon",
|
||||
} as Record<string, string>,
|
||||
});
|
||||
serviceReadRuntime.mockImplementationOnce(async (env?: NodeJS.ProcessEnv) => ({
|
||||
status: env?.OPENCLAW_GATEWAY_PORT === "19001" ? "running" : "unknown",
|
||||
detail: env?.OPENCLAW_GATEWAY_PORT ?? "missing-port",
|
||||
}));
|
||||
|
||||
const status = await gatherDaemonStatus({
|
||||
rpc: {},
|
||||
probe: false,
|
||||
deep: false,
|
||||
});
|
||||
|
||||
expect(serviceReadRuntime).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
OPENCLAW_GATEWAY_PORT: "19001",
|
||||
}),
|
||||
);
|
||||
expect(status.service.runtime).toMatchObject({
|
||||
status: "running",
|
||||
detail: "19001",
|
||||
});
|
||||
});
|
||||
|
||||
it("resolves daemon gateway auth password SecretRef values before probing", async () => {
|
||||
daemonLoadedConfig = {
|
||||
gateway: {
|
||||
|
||||
@@ -258,17 +258,21 @@ export async function gatherDaemonStatus(
|
||||
} & FindExtraGatewayServicesOptions,
|
||||
): Promise<DaemonStatus> {
|
||||
const service = resolveGatewayService();
|
||||
const [loaded, command, runtime] = await Promise.all([
|
||||
service.isLoaded({ env: process.env }).catch(() => false),
|
||||
service.readCommand(process.env).catch(() => null),
|
||||
service.readRuntime(process.env).catch((err) => ({ status: "unknown", detail: String(err) })),
|
||||
const command = await service.readCommand(process.env).catch(() => null);
|
||||
const serviceEnv = command?.environment
|
||||
? ({
|
||||
...process.env,
|
||||
...command.environment,
|
||||
} satisfies NodeJS.ProcessEnv)
|
||||
: process.env;
|
||||
const [loaded, runtime] = await Promise.all([
|
||||
service.isLoaded({ env: serviceEnv }).catch(() => false),
|
||||
service.readRuntime(serviceEnv).catch((err) => ({ status: "unknown", detail: String(err) })),
|
||||
]);
|
||||
const configAudit = await auditGatewayServiceConfig({
|
||||
env: process.env,
|
||||
command,
|
||||
});
|
||||
|
||||
const serviceEnv = command?.environment ?? undefined;
|
||||
const {
|
||||
mergedDaemonEnv,
|
||||
cliCfg,
|
||||
@@ -276,7 +280,7 @@ export async function gatherDaemonStatus(
|
||||
cliConfigSummary,
|
||||
daemonConfigSummary,
|
||||
configMismatch,
|
||||
} = await loadDaemonConfigContext(serviceEnv);
|
||||
} = await loadDaemonConfigContext(command?.environment);
|
||||
const { gateway, daemonPort, cliPort, probeUrlOverride } = await resolveGatewayStatusSummary({
|
||||
cliCfg,
|
||||
daemonCfg,
|
||||
|
||||
45
src/cli/daemon-cli/test-helpers/lifecycle-core-harness.ts
Normal file
45
src/cli/daemon-cli/test-helpers/lifecycle-core-harness.ts
Normal file
@@ -0,0 +1,45 @@
|
||||
import { vi } from "vitest";
|
||||
|
||||
export const runtimeLogs: string[] = [];
|
||||
|
||||
export const defaultRuntime = {
|
||||
log: (message: string) => runtimeLogs.push(message),
|
||||
error: vi.fn(),
|
||||
exit: (code: number) => {
|
||||
throw new Error(`__exit__:${code}`);
|
||||
},
|
||||
};
|
||||
|
||||
export const service = {
|
||||
label: "TestService",
|
||||
loadedText: "loaded",
|
||||
notLoadedText: "not loaded",
|
||||
install: vi.fn(),
|
||||
uninstall: vi.fn(),
|
||||
stop: vi.fn(),
|
||||
isLoaded: vi.fn(),
|
||||
readCommand: vi.fn(),
|
||||
readRuntime: vi.fn(),
|
||||
restart: vi.fn(),
|
||||
};
|
||||
|
||||
export function resetLifecycleRuntimeLogs() {
|
||||
runtimeLogs.length = 0;
|
||||
}
|
||||
|
||||
export function resetLifecycleServiceMocks() {
|
||||
service.isLoaded.mockClear();
|
||||
service.readCommand.mockClear();
|
||||
service.restart.mockClear();
|
||||
service.isLoaded.mockResolvedValue(true);
|
||||
service.readCommand.mockResolvedValue({ environment: {} });
|
||||
service.restart.mockResolvedValue({ outcome: "completed" });
|
||||
}
|
||||
|
||||
export function stubEmptyGatewayEnv() {
|
||||
vi.unstubAllEnvs();
|
||||
vi.stubEnv("OPENCLAW_GATEWAY_TOKEN", "");
|
||||
vi.stubEnv("CLAWDBOT_GATEWAY_TOKEN", "");
|
||||
vi.stubEnv("OPENCLAW_GATEWAY_URL", "");
|
||||
vi.stubEnv("CLAWDBOT_GATEWAY_URL", "");
|
||||
}
|
||||
@@ -287,6 +287,7 @@ describe("restart-helper", () => {
|
||||
expect(spawn).toHaveBeenCalledWith("/bin/sh", [scriptPath], {
|
||||
detached: true,
|
||||
stdio: "ignore",
|
||||
windowsHide: true,
|
||||
});
|
||||
expect(mockChild.unref).toHaveBeenCalled();
|
||||
});
|
||||
@@ -302,6 +303,7 @@ describe("restart-helper", () => {
|
||||
expect(spawn).toHaveBeenCalledWith("cmd.exe", ["/d", "/s", "/c", scriptPath], {
|
||||
detached: true,
|
||||
stdio: "ignore",
|
||||
windowsHide: true,
|
||||
});
|
||||
expect(mockChild.unref).toHaveBeenCalled();
|
||||
});
|
||||
@@ -317,6 +319,7 @@ describe("restart-helper", () => {
|
||||
expect(spawn).toHaveBeenCalledWith("cmd.exe", ["/d", "/s", "/c", `"${scriptPath}"`], {
|
||||
detached: true,
|
||||
stdio: "ignore",
|
||||
windowsHide: true,
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -169,6 +169,7 @@ export async function runRestartScript(scriptPath: string): Promise<void> {
|
||||
const child = spawn(file, args, {
|
||||
detached: true,
|
||||
stdio: "ignore",
|
||||
windowsHide: true,
|
||||
});
|
||||
child.unref();
|
||||
}
|
||||
|
||||
@@ -8,6 +8,92 @@ import { buildBackupArchiveRoot } from "./backup-shared.js";
|
||||
import { backupVerifyCommand } from "./backup-verify.js";
|
||||
import { backupCreateCommand } from "./backup.js";
|
||||
|
||||
const TEST_ARCHIVE_ROOT = "2026-03-09T00-00-00.000Z-openclaw-backup";
|
||||
|
||||
const createBackupVerifyRuntime = () => ({
|
||||
log: vi.fn(),
|
||||
error: vi.fn(),
|
||||
exit: vi.fn(),
|
||||
});
|
||||
|
||||
function createBackupManifest(assetArchivePath: string) {
|
||||
return {
|
||||
schemaVersion: 1,
|
||||
createdAt: "2026-03-09T00:00:00.000Z",
|
||||
archiveRoot: TEST_ARCHIVE_ROOT,
|
||||
runtimeVersion: "test",
|
||||
platform: process.platform,
|
||||
nodeVersion: process.version,
|
||||
assets: [
|
||||
{
|
||||
kind: "state",
|
||||
sourcePath: "/tmp/.openclaw",
|
||||
archivePath: assetArchivePath,
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
async function withBrokenArchiveFixture(
|
||||
options: {
|
||||
tempPrefix: string;
|
||||
manifestAssetArchivePath: string;
|
||||
payloads: Array<{ fileName: string; contents: string; archivePath?: string }>;
|
||||
buildTarEntries?: (paths: { manifestPath: string; payloadPaths: string[] }) => string[];
|
||||
},
|
||||
run: (archivePath: string) => Promise<void>,
|
||||
) {
|
||||
const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), options.tempPrefix));
|
||||
const archivePath = path.join(tempDir, "broken.tar.gz");
|
||||
const manifestPath = path.join(tempDir, "manifest.json");
|
||||
const payloadSpecs = await Promise.all(
|
||||
options.payloads.map(async (payload) => {
|
||||
const payloadPath = path.join(tempDir, payload.fileName);
|
||||
await fs.writeFile(payloadPath, payload.contents, "utf8");
|
||||
return {
|
||||
path: payloadPath,
|
||||
archivePath: payload.archivePath ?? options.manifestAssetArchivePath,
|
||||
};
|
||||
}),
|
||||
);
|
||||
const payloadEntryPathBySource = new Map(
|
||||
payloadSpecs.map((payload) => [payload.path, payload.archivePath]),
|
||||
);
|
||||
|
||||
try {
|
||||
await fs.writeFile(
|
||||
manifestPath,
|
||||
`${JSON.stringify(createBackupManifest(options.manifestAssetArchivePath), null, 2)}\n`,
|
||||
"utf8",
|
||||
);
|
||||
await tar.c(
|
||||
{
|
||||
file: archivePath,
|
||||
gzip: true,
|
||||
portable: true,
|
||||
preservePaths: true,
|
||||
onWriteEntry: (entry) => {
|
||||
if (entry.path === manifestPath) {
|
||||
entry.path = `${TEST_ARCHIVE_ROOT}/manifest.json`;
|
||||
return;
|
||||
}
|
||||
const payloadEntryPath = payloadEntryPathBySource.get(entry.path);
|
||||
if (payloadEntryPath) {
|
||||
entry.path = payloadEntryPath;
|
||||
}
|
||||
},
|
||||
},
|
||||
options.buildTarEntries?.({
|
||||
manifestPath,
|
||||
payloadPaths: payloadSpecs.map((payload) => payload.path),
|
||||
}) ?? [manifestPath, ...payloadSpecs.map((payload) => payload.path)],
|
||||
);
|
||||
await run(archivePath);
|
||||
} finally {
|
||||
await fs.rm(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
describe("backupVerifyCommand", () => {
|
||||
let tempHome: TempHomeEnv;
|
||||
|
||||
@@ -26,12 +112,7 @@ describe("backupVerifyCommand", () => {
|
||||
await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8");
|
||||
await fs.writeFile(path.join(stateDir, "state.txt"), "hello\n", "utf8");
|
||||
|
||||
const runtime = {
|
||||
log: vi.fn(),
|
||||
error: vi.fn(),
|
||||
exit: vi.fn(),
|
||||
};
|
||||
|
||||
const runtime = createBackupVerifyRuntime();
|
||||
const nowMs = Date.UTC(2026, 2, 9, 0, 0, 0);
|
||||
const created = await backupCreateCommand(runtime, { output: archiveDir, nowMs });
|
||||
const verified = await backupVerifyCommand(runtime, { archive: created.archivePath });
|
||||
@@ -53,12 +134,7 @@ describe("backupVerifyCommand", () => {
|
||||
await fs.writeFile(path.join(root, "payload", "data.txt"), "x\n", "utf8");
|
||||
await tar.c({ file: archivePath, gzip: true, cwd: tempDir }, ["root"]);
|
||||
|
||||
const runtime = {
|
||||
log: vi.fn(),
|
||||
error: vi.fn(),
|
||||
exit: vi.fn(),
|
||||
};
|
||||
|
||||
const runtime = createBackupVerifyRuntime();
|
||||
await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow(
|
||||
/expected exactly one backup manifest entry/i,
|
||||
);
|
||||
@@ -95,12 +171,7 @@ describe("backupVerifyCommand", () => {
|
||||
);
|
||||
await tar.c({ file: archivePath, gzip: true, cwd: tempDir }, [rootName]);
|
||||
|
||||
const runtime = {
|
||||
log: vi.fn(),
|
||||
error: vi.fn(),
|
||||
exit: vi.fn(),
|
||||
};
|
||||
|
||||
const runtime = createBackupVerifyRuntime();
|
||||
await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow(
|
||||
/missing payload for manifest asset/i,
|
||||
);
|
||||
@@ -110,119 +181,37 @@ describe("backupVerifyCommand", () => {
|
||||
});
|
||||
|
||||
it("fails when archive paths contain traversal segments", async () => {
|
||||
const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-backup-traversal-"));
|
||||
const archivePath = path.join(tempDir, "broken.tar.gz");
|
||||
const manifestPath = path.join(tempDir, "manifest.json");
|
||||
const payloadPath = path.join(tempDir, "payload.txt");
|
||||
try {
|
||||
const rootName = "2026-03-09T00-00-00.000Z-openclaw-backup";
|
||||
const traversalPath = `${rootName}/payload/../escaped.txt`;
|
||||
const manifest = {
|
||||
schemaVersion: 1,
|
||||
createdAt: "2026-03-09T00:00:00.000Z",
|
||||
archiveRoot: rootName,
|
||||
runtimeVersion: "test",
|
||||
platform: process.platform,
|
||||
nodeVersion: process.version,
|
||||
assets: [
|
||||
{
|
||||
kind: "state",
|
||||
sourcePath: "/tmp/.openclaw",
|
||||
archivePath: traversalPath,
|
||||
},
|
||||
],
|
||||
};
|
||||
await fs.writeFile(manifestPath, `${JSON.stringify(manifest, null, 2)}\n`, "utf8");
|
||||
await fs.writeFile(payloadPath, "payload\n", "utf8");
|
||||
await tar.c(
|
||||
{
|
||||
file: archivePath,
|
||||
gzip: true,
|
||||
portable: true,
|
||||
preservePaths: true,
|
||||
onWriteEntry: (entry) => {
|
||||
if (entry.path === manifestPath) {
|
||||
entry.path = `${rootName}/manifest.json`;
|
||||
return;
|
||||
}
|
||||
if (entry.path === payloadPath) {
|
||||
entry.path = traversalPath;
|
||||
}
|
||||
},
|
||||
},
|
||||
[manifestPath, payloadPath],
|
||||
);
|
||||
|
||||
const runtime = {
|
||||
log: vi.fn(),
|
||||
error: vi.fn(),
|
||||
exit: vi.fn(),
|
||||
};
|
||||
|
||||
await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow(
|
||||
/path traversal segments/i,
|
||||
);
|
||||
} finally {
|
||||
await fs.rm(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
const traversalPath = `${TEST_ARCHIVE_ROOT}/payload/../escaped.txt`;
|
||||
await withBrokenArchiveFixture(
|
||||
{
|
||||
tempPrefix: "openclaw-backup-traversal-",
|
||||
manifestAssetArchivePath: traversalPath,
|
||||
payloads: [{ fileName: "payload.txt", contents: "payload\n", archivePath: traversalPath }],
|
||||
},
|
||||
async (archivePath) => {
|
||||
const runtime = createBackupVerifyRuntime();
|
||||
await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow(
|
||||
/path traversal segments/i,
|
||||
);
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
it("fails when archive paths contain backslashes", async () => {
|
||||
const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-backup-backslash-"));
|
||||
const archivePath = path.join(tempDir, "broken.tar.gz");
|
||||
const manifestPath = path.join(tempDir, "manifest.json");
|
||||
const payloadPath = path.join(tempDir, "payload.txt");
|
||||
try {
|
||||
const rootName = "2026-03-09T00-00-00.000Z-openclaw-backup";
|
||||
const invalidPath = `${rootName}/payload\\..\\escaped.txt`;
|
||||
const manifest = {
|
||||
schemaVersion: 1,
|
||||
createdAt: "2026-03-09T00:00:00.000Z",
|
||||
archiveRoot: rootName,
|
||||
runtimeVersion: "test",
|
||||
platform: process.platform,
|
||||
nodeVersion: process.version,
|
||||
assets: [
|
||||
{
|
||||
kind: "state",
|
||||
sourcePath: "/tmp/.openclaw",
|
||||
archivePath: invalidPath,
|
||||
},
|
||||
],
|
||||
};
|
||||
await fs.writeFile(manifestPath, `${JSON.stringify(manifest, null, 2)}\n`, "utf8");
|
||||
await fs.writeFile(payloadPath, "payload\n", "utf8");
|
||||
await tar.c(
|
||||
{
|
||||
file: archivePath,
|
||||
gzip: true,
|
||||
portable: true,
|
||||
preservePaths: true,
|
||||
onWriteEntry: (entry) => {
|
||||
if (entry.path === manifestPath) {
|
||||
entry.path = `${rootName}/manifest.json`;
|
||||
return;
|
||||
}
|
||||
if (entry.path === payloadPath) {
|
||||
entry.path = invalidPath;
|
||||
}
|
||||
},
|
||||
},
|
||||
[manifestPath, payloadPath],
|
||||
);
|
||||
|
||||
const runtime = {
|
||||
log: vi.fn(),
|
||||
error: vi.fn(),
|
||||
exit: vi.fn(),
|
||||
};
|
||||
|
||||
await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow(
|
||||
/forward slashes/i,
|
||||
);
|
||||
} finally {
|
||||
await fs.rm(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
const invalidPath = `${TEST_ARCHIVE_ROOT}/payload\\..\\escaped.txt`;
|
||||
await withBrokenArchiveFixture(
|
||||
{
|
||||
tempPrefix: "openclaw-backup-backslash-",
|
||||
manifestAssetArchivePath: invalidPath,
|
||||
payloads: [{ fileName: "payload.txt", contents: "payload\n", archivePath: invalidPath }],
|
||||
},
|
||||
async (archivePath) => {
|
||||
const runtime = createBackupVerifyRuntime();
|
||||
await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow(
|
||||
/forward slashes/i,
|
||||
);
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
it("ignores payload manifest.json files when locating the backup manifest", async () => {
|
||||
@@ -251,12 +240,7 @@ describe("backupVerifyCommand", () => {
|
||||
"utf8",
|
||||
);
|
||||
|
||||
const runtime = {
|
||||
log: vi.fn(),
|
||||
error: vi.fn(),
|
||||
exit: vi.fn(),
|
||||
};
|
||||
|
||||
const runtime = createBackupVerifyRuntime();
|
||||
const created = await backupCreateCommand(runtime, {
|
||||
output: archiveDir,
|
||||
includeWorkspace: true,
|
||||
@@ -274,119 +258,44 @@ describe("backupVerifyCommand", () => {
|
||||
});
|
||||
|
||||
it("fails when the archive contains duplicate root manifest entries", async () => {
|
||||
const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-backup-duplicate-manifest-"));
|
||||
const archivePath = path.join(tempDir, "broken.tar.gz");
|
||||
const manifestPath = path.join(tempDir, "manifest.json");
|
||||
const payloadPath = path.join(tempDir, "payload.txt");
|
||||
try {
|
||||
const rootName = "2026-03-09T00-00-00.000Z-openclaw-backup";
|
||||
const manifest = {
|
||||
schemaVersion: 1,
|
||||
createdAt: "2026-03-09T00:00:00.000Z",
|
||||
archiveRoot: rootName,
|
||||
runtimeVersion: "test",
|
||||
platform: process.platform,
|
||||
nodeVersion: process.version,
|
||||
assets: [
|
||||
{
|
||||
kind: "state",
|
||||
sourcePath: "/tmp/.openclaw",
|
||||
archivePath: `${rootName}/payload/posix/tmp/.openclaw/payload.txt`,
|
||||
},
|
||||
const payloadArchivePath = `${TEST_ARCHIVE_ROOT}/payload/posix/tmp/.openclaw/payload.txt`;
|
||||
await withBrokenArchiveFixture(
|
||||
{
|
||||
tempPrefix: "openclaw-backup-duplicate-manifest-",
|
||||
manifestAssetArchivePath: payloadArchivePath,
|
||||
payloads: [{ fileName: "payload.txt", contents: "payload\n" }],
|
||||
buildTarEntries: ({ manifestPath, payloadPaths }) => [
|
||||
manifestPath,
|
||||
manifestPath,
|
||||
...payloadPaths,
|
||||
],
|
||||
};
|
||||
await fs.writeFile(manifestPath, `${JSON.stringify(manifest, null, 2)}\n`, "utf8");
|
||||
await fs.writeFile(payloadPath, "payload\n", "utf8");
|
||||
await tar.c(
|
||||
{
|
||||
file: archivePath,
|
||||
gzip: true,
|
||||
portable: true,
|
||||
preservePaths: true,
|
||||
onWriteEntry: (entry) => {
|
||||
if (entry.path === manifestPath) {
|
||||
entry.path = `${rootName}/manifest.json`;
|
||||
return;
|
||||
}
|
||||
if (entry.path === payloadPath) {
|
||||
entry.path = `${rootName}/payload/posix/tmp/.openclaw/payload.txt`;
|
||||
}
|
||||
},
|
||||
},
|
||||
[manifestPath, manifestPath, payloadPath],
|
||||
);
|
||||
|
||||
const runtime = {
|
||||
log: vi.fn(),
|
||||
error: vi.fn(),
|
||||
exit: vi.fn(),
|
||||
};
|
||||
|
||||
await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow(
|
||||
/expected exactly one backup manifest entry, found 2/i,
|
||||
);
|
||||
} finally {
|
||||
await fs.rm(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
},
|
||||
async (archivePath) => {
|
||||
const runtime = createBackupVerifyRuntime();
|
||||
await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow(
|
||||
/expected exactly one backup manifest entry, found 2/i,
|
||||
);
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
it("fails when the archive contains duplicate payload entries", async () => {
|
||||
const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-backup-duplicate-payload-"));
|
||||
const archivePath = path.join(tempDir, "broken.tar.gz");
|
||||
const manifestPath = path.join(tempDir, "manifest.json");
|
||||
const payloadPathA = path.join(tempDir, "payload-a.txt");
|
||||
const payloadPathB = path.join(tempDir, "payload-b.txt");
|
||||
try {
|
||||
const rootName = "2026-03-09T00-00-00.000Z-openclaw-backup";
|
||||
const payloadArchivePath = `${rootName}/payload/posix/tmp/.openclaw/payload.txt`;
|
||||
const manifest = {
|
||||
schemaVersion: 1,
|
||||
createdAt: "2026-03-09T00:00:00.000Z",
|
||||
archiveRoot: rootName,
|
||||
runtimeVersion: "test",
|
||||
platform: process.platform,
|
||||
nodeVersion: process.version,
|
||||
assets: [
|
||||
{
|
||||
kind: "state",
|
||||
sourcePath: "/tmp/.openclaw",
|
||||
archivePath: payloadArchivePath,
|
||||
},
|
||||
const payloadArchivePath = `${TEST_ARCHIVE_ROOT}/payload/posix/tmp/.openclaw/payload.txt`;
|
||||
await withBrokenArchiveFixture(
|
||||
{
|
||||
tempPrefix: "openclaw-backup-duplicate-payload-",
|
||||
manifestAssetArchivePath: payloadArchivePath,
|
||||
payloads: [
|
||||
{ fileName: "payload-a.txt", contents: "payload-a\n", archivePath: payloadArchivePath },
|
||||
{ fileName: "payload-b.txt", contents: "payload-b\n", archivePath: payloadArchivePath },
|
||||
],
|
||||
};
|
||||
await fs.writeFile(manifestPath, `${JSON.stringify(manifest, null, 2)}\n`, "utf8");
|
||||
await fs.writeFile(payloadPathA, "payload-a\n", "utf8");
|
||||
await fs.writeFile(payloadPathB, "payload-b\n", "utf8");
|
||||
await tar.c(
|
||||
{
|
||||
file: archivePath,
|
||||
gzip: true,
|
||||
portable: true,
|
||||
preservePaths: true,
|
||||
onWriteEntry: (entry) => {
|
||||
if (entry.path === manifestPath) {
|
||||
entry.path = `${rootName}/manifest.json`;
|
||||
return;
|
||||
}
|
||||
if (entry.path === payloadPathA || entry.path === payloadPathB) {
|
||||
entry.path = payloadArchivePath;
|
||||
}
|
||||
},
|
||||
},
|
||||
[manifestPath, payloadPathA, payloadPathB],
|
||||
);
|
||||
|
||||
const runtime = {
|
||||
log: vi.fn(),
|
||||
error: vi.fn(),
|
||||
exit: vi.fn(),
|
||||
};
|
||||
|
||||
await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow(
|
||||
/duplicate entry path/i,
|
||||
);
|
||||
} finally {
|
||||
await fs.rm(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
},
|
||||
async (archivePath) => {
|
||||
const runtime = createBackupVerifyRuntime();
|
||||
await expect(backupVerifyCommand(runtime, { archive: archivePath })).rejects.toThrow(
|
||||
/duplicate entry path/i,
|
||||
);
|
||||
},
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -3,6 +3,7 @@ import os from "node:os";
|
||||
import path from "node:path";
|
||||
import * as tar from "tar";
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import type { RuntimeEnv } from "../runtime.js";
|
||||
import { createTempHomeEnv, type TempHomeEnv } from "../test-utils/temp-home.js";
|
||||
import {
|
||||
buildBackupArchiveRoot,
|
||||
@@ -41,6 +42,39 @@ describe("backup commands", () => {
|
||||
await tempHome.restore();
|
||||
});
|
||||
|
||||
function createRuntime(): RuntimeEnv {
|
||||
return {
|
||||
log: vi.fn(),
|
||||
error: vi.fn(),
|
||||
exit: vi.fn(),
|
||||
} satisfies RuntimeEnv;
|
||||
}
|
||||
|
||||
async function withInvalidWorkspaceBackupConfig<T>(fn: (runtime: RuntimeEnv) => Promise<T>) {
|
||||
const stateDir = path.join(tempHome.home, ".openclaw");
|
||||
const configPath = path.join(tempHome.home, "custom-config.json");
|
||||
process.env.OPENCLAW_CONFIG_PATH = configPath;
|
||||
await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8");
|
||||
await fs.writeFile(configPath, '{"agents": { defaults: { workspace: ', "utf8");
|
||||
const runtime = createRuntime();
|
||||
|
||||
try {
|
||||
return await fn(runtime);
|
||||
} finally {
|
||||
delete process.env.OPENCLAW_CONFIG_PATH;
|
||||
}
|
||||
}
|
||||
|
||||
function expectWorkspaceCoveredByState(
|
||||
plan: Awaited<ReturnType<typeof resolveBackupPlanFromDisk>>,
|
||||
) {
|
||||
expect(plan.included).toHaveLength(1);
|
||||
expect(plan.included[0]?.kind).toBe("state");
|
||||
expect(plan.skipped).toEqual(
|
||||
expect.arrayContaining([expect.objectContaining({ kind: "workspace", reason: "covered" })]),
|
||||
);
|
||||
}
|
||||
|
||||
it("collapses default config, credentials, and workspace into the state backup root", async () => {
|
||||
const stateDir = path.join(tempHome.home, ".openclaw");
|
||||
await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8");
|
||||
@@ -50,12 +84,7 @@ describe("backup commands", () => {
|
||||
await fs.writeFile(path.join(stateDir, "workspace", "SOUL.md"), "# soul\n", "utf8");
|
||||
|
||||
const plan = await resolveBackupPlanFromDisk({ includeWorkspace: true, nowMs: 123 });
|
||||
|
||||
expect(plan.included).toHaveLength(1);
|
||||
expect(plan.included[0]?.kind).toBe("state");
|
||||
expect(plan.skipped).toEqual(
|
||||
expect.arrayContaining([expect.objectContaining({ kind: "workspace", reason: "covered" })]),
|
||||
);
|
||||
expectWorkspaceCoveredByState(plan);
|
||||
});
|
||||
|
||||
it("orders coverage checks by canonical path so symlinked workspaces do not duplicate state", async () => {
|
||||
@@ -84,12 +113,7 @@ describe("backup commands", () => {
|
||||
);
|
||||
|
||||
const plan = await resolveBackupPlanFromDisk({ includeWorkspace: true, nowMs: 123 });
|
||||
|
||||
expect(plan.included).toHaveLength(1);
|
||||
expect(plan.included[0]?.kind).toBe("state");
|
||||
expect(plan.skipped).toEqual(
|
||||
expect.arrayContaining([expect.objectContaining({ kind: "workspace", reason: "covered" })]),
|
||||
);
|
||||
expectWorkspaceCoveredByState(plan);
|
||||
} finally {
|
||||
await fs.rm(symlinkDir, { recursive: true, force: true });
|
||||
}
|
||||
@@ -116,11 +140,7 @@ describe("backup commands", () => {
|
||||
await fs.writeFile(path.join(stateDir, "state.txt"), "state\n", "utf8");
|
||||
await fs.writeFile(path.join(externalWorkspace, "SOUL.md"), "# external\n", "utf8");
|
||||
|
||||
const runtime = {
|
||||
log: vi.fn(),
|
||||
error: vi.fn(),
|
||||
exit: vi.fn(),
|
||||
};
|
||||
const runtime = createRuntime();
|
||||
|
||||
const nowMs = Date.UTC(2026, 2, 9, 0, 0, 0);
|
||||
const result = await backupCreateCommand(runtime, {
|
||||
@@ -189,11 +209,7 @@ describe("backup commands", () => {
|
||||
await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8");
|
||||
await fs.writeFile(path.join(stateDir, "state.txt"), "state\n", "utf8");
|
||||
|
||||
const runtime = {
|
||||
log: vi.fn(),
|
||||
error: vi.fn(),
|
||||
exit: vi.fn(),
|
||||
};
|
||||
const runtime = createRuntime();
|
||||
|
||||
const result = await backupCreateCommand(runtime, {
|
||||
output: archiveDir,
|
||||
@@ -214,11 +230,7 @@ describe("backup commands", () => {
|
||||
const stateDir = path.join(tempHome.home, ".openclaw");
|
||||
await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8");
|
||||
|
||||
const runtime = {
|
||||
log: vi.fn(),
|
||||
error: vi.fn(),
|
||||
exit: vi.fn(),
|
||||
};
|
||||
const runtime = createRuntime();
|
||||
|
||||
await expect(
|
||||
backupCreateCommand(runtime, {
|
||||
@@ -239,11 +251,7 @@ describe("backup commands", () => {
|
||||
await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8");
|
||||
await fs.symlink(stateDir, symlinkPath);
|
||||
|
||||
const runtime = {
|
||||
log: vi.fn(),
|
||||
error: vi.fn(),
|
||||
exit: vi.fn(),
|
||||
};
|
||||
const runtime = createRuntime();
|
||||
|
||||
await expect(
|
||||
backupCreateCommand(runtime, {
|
||||
@@ -263,11 +271,7 @@ describe("backup commands", () => {
|
||||
await fs.writeFile(path.join(workspaceDir, "SOUL.md"), "# soul\n", "utf8");
|
||||
process.chdir(workspaceDir);
|
||||
|
||||
const runtime = {
|
||||
log: vi.fn(),
|
||||
error: vi.fn(),
|
||||
exit: vi.fn(),
|
||||
};
|
||||
const runtime = createRuntime();
|
||||
|
||||
const nowMs = Date.UTC(2026, 2, 9, 1, 2, 3);
|
||||
const result = await backupCreateCommand(runtime, { nowMs });
|
||||
@@ -294,11 +298,7 @@ describe("backup commands", () => {
|
||||
await fs.symlink(workspaceDir, workspaceLink);
|
||||
process.chdir(workspaceLink);
|
||||
|
||||
const runtime = {
|
||||
log: vi.fn(),
|
||||
error: vi.fn(),
|
||||
exit: vi.fn(),
|
||||
};
|
||||
const runtime = createRuntime();
|
||||
|
||||
const nowMs = Date.UTC(2026, 2, 9, 1, 3, 4);
|
||||
const result = await backupCreateCommand(runtime, { nowMs });
|
||||
@@ -318,11 +318,7 @@ describe("backup commands", () => {
|
||||
await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8");
|
||||
await fs.writeFile(existingArchive, "already here", "utf8");
|
||||
|
||||
const runtime = {
|
||||
log: vi.fn(),
|
||||
error: vi.fn(),
|
||||
exit: vi.fn(),
|
||||
};
|
||||
const runtime = createRuntime();
|
||||
|
||||
const result = await backupCreateCommand(runtime, {
|
||||
output: existingArchive,
|
||||
@@ -336,41 +332,15 @@ describe("backup commands", () => {
|
||||
});
|
||||
|
||||
it("fails fast when config is invalid and workspace backup is enabled", async () => {
|
||||
const stateDir = path.join(tempHome.home, ".openclaw");
|
||||
const configPath = path.join(tempHome.home, "custom-config.json");
|
||||
process.env.OPENCLAW_CONFIG_PATH = configPath;
|
||||
await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8");
|
||||
await fs.writeFile(configPath, '{"agents": { defaults: { workspace: ', "utf8");
|
||||
|
||||
const runtime = {
|
||||
log: vi.fn(),
|
||||
error: vi.fn(),
|
||||
exit: vi.fn(),
|
||||
};
|
||||
|
||||
try {
|
||||
await withInvalidWorkspaceBackupConfig(async (runtime) => {
|
||||
await expect(backupCreateCommand(runtime, { dryRun: true })).rejects.toThrow(
|
||||
/--no-include-workspace/i,
|
||||
);
|
||||
} finally {
|
||||
delete process.env.OPENCLAW_CONFIG_PATH;
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
it("allows explicit partial backups when config is invalid", async () => {
|
||||
const stateDir = path.join(tempHome.home, ".openclaw");
|
||||
const configPath = path.join(tempHome.home, "custom-config.json");
|
||||
process.env.OPENCLAW_CONFIG_PATH = configPath;
|
||||
await fs.writeFile(path.join(stateDir, "openclaw.json"), JSON.stringify({}), "utf8");
|
||||
await fs.writeFile(configPath, '{"agents": { defaults: { workspace: ', "utf8");
|
||||
|
||||
const runtime = {
|
||||
log: vi.fn(),
|
||||
error: vi.fn(),
|
||||
exit: vi.fn(),
|
||||
};
|
||||
|
||||
try {
|
||||
await withInvalidWorkspaceBackupConfig(async (runtime) => {
|
||||
const result = await backupCreateCommand(runtime, {
|
||||
dryRun: true,
|
||||
includeWorkspace: false,
|
||||
@@ -378,9 +348,7 @@ describe("backup commands", () => {
|
||||
|
||||
expect(result.includeWorkspace).toBe(false);
|
||||
expect(result.assets.some((asset) => asset.kind === "workspace")).toBe(false);
|
||||
} finally {
|
||||
delete process.env.OPENCLAW_CONFIG_PATH;
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
it("backs up only the active config file when --only-config is requested", async () => {
|
||||
@@ -391,11 +359,7 @@ describe("backup commands", () => {
|
||||
await fs.writeFile(path.join(stateDir, "state.txt"), "state\n", "utf8");
|
||||
await fs.writeFile(path.join(stateDir, "credentials", "oauth.json"), "{}", "utf8");
|
||||
|
||||
const runtime = {
|
||||
log: vi.fn(),
|
||||
error: vi.fn(),
|
||||
exit: vi.fn(),
|
||||
};
|
||||
const runtime = createRuntime();
|
||||
|
||||
const result = await backupCreateCommand(runtime, {
|
||||
dryRun: true,
|
||||
@@ -413,11 +377,7 @@ describe("backup commands", () => {
|
||||
process.env.OPENCLAW_CONFIG_PATH = configPath;
|
||||
await fs.writeFile(configPath, '{"agents": { defaults: { workspace: ', "utf8");
|
||||
|
||||
const runtime = {
|
||||
log: vi.fn(),
|
||||
error: vi.fn(),
|
||||
exit: vi.fn(),
|
||||
};
|
||||
const runtime = createRuntime();
|
||||
|
||||
try {
|
||||
const result = await backupCreateCommand(runtime, {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { afterEach, describe, expect, it, vi } from "vitest";
|
||||
|
||||
const mocks = vi.hoisted(() => ({
|
||||
loadAuthProfileStoreForSecretsRuntime: vi.fn(),
|
||||
resolvePreferredNodePath: vi.fn(),
|
||||
resolveGatewayProgramArguments: vi.fn(),
|
||||
resolveSystemNodeInfo: vi.fn(),
|
||||
@@ -8,6 +9,10 @@ const mocks = vi.hoisted(() => ({
|
||||
buildServiceEnvironment: vi.fn(),
|
||||
}));
|
||||
|
||||
vi.mock("../agents/auth-profiles.js", () => ({
|
||||
loadAuthProfileStoreForSecretsRuntime: mocks.loadAuthProfileStoreForSecretsRuntime,
|
||||
}));
|
||||
|
||||
vi.mock("../daemon/runtime-paths.js", () => ({
|
||||
resolvePreferredNodePath: mocks.resolvePreferredNodePath,
|
||||
resolveSystemNodeInfo: mocks.resolveSystemNodeInfo,
|
||||
@@ -63,6 +68,10 @@ function mockNodeGatewayPlanFixture(
|
||||
programArguments: ["node", "gateway"],
|
||||
workingDirectory,
|
||||
});
|
||||
mocks.loadAuthProfileStoreForSecretsRuntime.mockReturnValue({
|
||||
version: 1,
|
||||
profiles: {},
|
||||
});
|
||||
mocks.resolveSystemNodeInfo.mockResolvedValue({
|
||||
path: "/opt/node",
|
||||
version,
|
||||
@@ -232,6 +241,67 @@ describe("buildGatewayInstallPlan", () => {
|
||||
expect(plan.environment.HOME).toBe("/Users/service");
|
||||
expect(plan.environment.OPENCLAW_PORT).toBe("3000");
|
||||
});
|
||||
|
||||
it("merges env-backed auth-profile refs into the service environment", async () => {
|
||||
mockNodeGatewayPlanFixture({
|
||||
serviceEnvironment: {
|
||||
OPENCLAW_PORT: "3000",
|
||||
},
|
||||
});
|
||||
mocks.loadAuthProfileStoreForSecretsRuntime.mockReturnValue({
|
||||
version: 1,
|
||||
profiles: {
|
||||
"openai:default": {
|
||||
type: "api_key",
|
||||
provider: "openai",
|
||||
keyRef: { source: "env", provider: "default", id: "OPENAI_API_KEY" },
|
||||
},
|
||||
"anthropic:default": {
|
||||
type: "token",
|
||||
provider: "anthropic",
|
||||
tokenRef: { source: "env", provider: "default", id: "ANTHROPIC_TOKEN" },
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const plan = await buildGatewayInstallPlan({
|
||||
env: {
|
||||
OPENAI_API_KEY: "sk-openai-test", // pragma: allowlist secret
|
||||
ANTHROPIC_TOKEN: "ant-test-token",
|
||||
},
|
||||
port: 3000,
|
||||
runtime: "node",
|
||||
});
|
||||
|
||||
expect(plan.environment.OPENAI_API_KEY).toBe("sk-openai-test");
|
||||
expect(plan.environment.ANTHROPIC_TOKEN).toBe("ant-test-token");
|
||||
});
|
||||
|
||||
it("skips unresolved auth-profile env refs", async () => {
|
||||
mockNodeGatewayPlanFixture({
|
||||
serviceEnvironment: {
|
||||
OPENCLAW_PORT: "3000",
|
||||
},
|
||||
});
|
||||
mocks.loadAuthProfileStoreForSecretsRuntime.mockReturnValue({
|
||||
version: 1,
|
||||
profiles: {
|
||||
"openai:default": {
|
||||
type: "api_key",
|
||||
provider: "openai",
|
||||
keyRef: { source: "env", provider: "default", id: "OPENAI_API_KEY" },
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const plan = await buildGatewayInstallPlan({
|
||||
env: {},
|
||||
port: 3000,
|
||||
runtime: "node",
|
||||
});
|
||||
|
||||
expect(plan.environment.OPENAI_API_KEY).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe("gatewayInstallErrorHint", () => {
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
import {
|
||||
loadAuthProfileStoreForSecretsRuntime,
|
||||
type AuthProfileStore,
|
||||
} from "../agents/auth-profiles.js";
|
||||
import { formatCliCommand } from "../cli/command-format.js";
|
||||
import { collectConfigServiceEnvVars } from "../config/env-vars.js";
|
||||
import type { OpenClawConfig } from "../config/types.js";
|
||||
@@ -19,6 +23,33 @@ export type GatewayInstallPlan = {
|
||||
environment: Record<string, string | undefined>;
|
||||
};
|
||||
|
||||
function collectAuthProfileServiceEnvVars(params: {
|
||||
env: Record<string, string | undefined>;
|
||||
authStore?: AuthProfileStore;
|
||||
}): Record<string, string> {
|
||||
const authStore = params.authStore ?? loadAuthProfileStoreForSecretsRuntime();
|
||||
const entries: Record<string, string> = {};
|
||||
|
||||
for (const credential of Object.values(authStore.profiles)) {
|
||||
const ref =
|
||||
credential.type === "api_key"
|
||||
? credential.keyRef
|
||||
: credential.type === "token"
|
||||
? credential.tokenRef
|
||||
: undefined;
|
||||
if (!ref || ref.source !== "env") {
|
||||
continue;
|
||||
}
|
||||
const value = params.env[ref.id]?.trim();
|
||||
if (!value) {
|
||||
continue;
|
||||
}
|
||||
entries[ref.id] = value;
|
||||
}
|
||||
|
||||
return entries;
|
||||
}
|
||||
|
||||
export async function buildGatewayInstallPlan(params: {
|
||||
env: Record<string, string | undefined>;
|
||||
port: number;
|
||||
@@ -28,6 +59,7 @@ export async function buildGatewayInstallPlan(params: {
|
||||
warn?: DaemonInstallWarnFn;
|
||||
/** Full config to extract env vars from (env vars + inline env keys). */
|
||||
config?: OpenClawConfig;
|
||||
authStore?: AuthProfileStore;
|
||||
}): Promise<GatewayInstallPlan> {
|
||||
const { devMode, nodePath } = await resolveDaemonInstallRuntimeInputs({
|
||||
env: params.env,
|
||||
@@ -61,6 +93,10 @@ export async function buildGatewayInstallPlan(params: {
|
||||
// Config env vars are added first so service-specific vars take precedence.
|
||||
const environment: Record<string, string | undefined> = {
|
||||
...collectConfigServiceEnvVars(params.config),
|
||||
...collectAuthProfileServiceEnvVars({
|
||||
env: params.env,
|
||||
authStore: params.authStore,
|
||||
}),
|
||||
};
|
||||
Object.assign(environment, serviceEnvironment);
|
||||
|
||||
|
||||
@@ -27,44 +27,55 @@ function makePrompter(confirmResult = true) {
|
||||
};
|
||||
}
|
||||
|
||||
function createCronConfig(storePath: string): OpenClawConfig {
|
||||
return {
|
||||
cron: {
|
||||
store: storePath,
|
||||
webhook: "https://example.invalid/cron-finished",
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
function createLegacyCronJob(overrides: Record<string, unknown> = {}) {
|
||||
return {
|
||||
jobId: "legacy-job",
|
||||
name: "Legacy job",
|
||||
notify: true,
|
||||
createdAtMs: Date.parse("2026-02-01T00:00:00.000Z"),
|
||||
updatedAtMs: Date.parse("2026-02-02T00:00:00.000Z"),
|
||||
schedule: { kind: "cron", cron: "0 7 * * *", tz: "UTC" },
|
||||
payload: {
|
||||
kind: "systemEvent",
|
||||
text: "Morning brief",
|
||||
},
|
||||
state: {},
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
async function writeCronStore(storePath: string, jobs: Array<Record<string, unknown>>) {
|
||||
await fs.mkdir(path.dirname(storePath), { recursive: true });
|
||||
await fs.writeFile(
|
||||
storePath,
|
||||
JSON.stringify(
|
||||
{
|
||||
version: 1,
|
||||
jobs,
|
||||
},
|
||||
null,
|
||||
2,
|
||||
),
|
||||
"utf-8",
|
||||
);
|
||||
}
|
||||
|
||||
describe("maybeRepairLegacyCronStore", () => {
|
||||
it("repairs legacy cron store fields and migrates notify fallback to webhook delivery", async () => {
|
||||
const storePath = await makeTempStorePath();
|
||||
await fs.mkdir(path.dirname(storePath), { recursive: true });
|
||||
await fs.writeFile(
|
||||
storePath,
|
||||
JSON.stringify(
|
||||
{
|
||||
version: 1,
|
||||
jobs: [
|
||||
{
|
||||
jobId: "legacy-job",
|
||||
name: "Legacy job",
|
||||
notify: true,
|
||||
createdAtMs: Date.parse("2026-02-01T00:00:00.000Z"),
|
||||
updatedAtMs: Date.parse("2026-02-02T00:00:00.000Z"),
|
||||
schedule: { kind: "cron", cron: "0 7 * * *", tz: "UTC" },
|
||||
payload: {
|
||||
kind: "systemEvent",
|
||||
text: "Morning brief",
|
||||
},
|
||||
state: {},
|
||||
},
|
||||
],
|
||||
},
|
||||
null,
|
||||
2,
|
||||
),
|
||||
"utf-8",
|
||||
);
|
||||
await writeCronStore(storePath, [createLegacyCronJob()]);
|
||||
|
||||
const noteSpy = vi.spyOn(noteModule, "note").mockImplementation(() => {});
|
||||
const cfg: OpenClawConfig = {
|
||||
cron: {
|
||||
store: storePath,
|
||||
webhook: "https://example.invalid/cron-finished",
|
||||
},
|
||||
};
|
||||
const cfg = createCronConfig(storePath);
|
||||
|
||||
await maybeRepairLegacyCronStore({
|
||||
cfg,
|
||||
@@ -158,44 +169,13 @@ describe("maybeRepairLegacyCronStore", () => {
|
||||
|
||||
it("does not auto-repair in non-interactive mode without explicit repair approval", async () => {
|
||||
const storePath = await makeTempStorePath();
|
||||
await fs.mkdir(path.dirname(storePath), { recursive: true });
|
||||
await fs.writeFile(
|
||||
storePath,
|
||||
JSON.stringify(
|
||||
{
|
||||
version: 1,
|
||||
jobs: [
|
||||
{
|
||||
jobId: "legacy-job",
|
||||
name: "Legacy job",
|
||||
notify: true,
|
||||
createdAtMs: Date.parse("2026-02-01T00:00:00.000Z"),
|
||||
updatedAtMs: Date.parse("2026-02-02T00:00:00.000Z"),
|
||||
schedule: { kind: "cron", cron: "0 7 * * *", tz: "UTC" },
|
||||
payload: {
|
||||
kind: "systemEvent",
|
||||
text: "Morning brief",
|
||||
},
|
||||
state: {},
|
||||
},
|
||||
],
|
||||
},
|
||||
null,
|
||||
2,
|
||||
),
|
||||
"utf-8",
|
||||
);
|
||||
await writeCronStore(storePath, [createLegacyCronJob()]);
|
||||
|
||||
const noteSpy = vi.spyOn(noteModule, "note").mockImplementation(() => {});
|
||||
const prompter = makePrompter(false);
|
||||
|
||||
await maybeRepairLegacyCronStore({
|
||||
cfg: {
|
||||
cron: {
|
||||
store: storePath,
|
||||
webhook: "https://example.invalid/cron-finished",
|
||||
},
|
||||
},
|
||||
cfg: createCronConfig(storePath),
|
||||
options: { nonInteractive: true },
|
||||
prompter,
|
||||
});
|
||||
|
||||
@@ -26,6 +26,32 @@ async function makeRootWithEmptyCfg() {
|
||||
return { root, cfg };
|
||||
}
|
||||
|
||||
function writeLegacyTelegramAllowFromStore(oauthDir: string) {
|
||||
fs.writeFileSync(
|
||||
path.join(oauthDir, "telegram-allowFrom.json"),
|
||||
JSON.stringify(
|
||||
{
|
||||
version: 1,
|
||||
allowFrom: ["123456"],
|
||||
},
|
||||
null,
|
||||
2,
|
||||
) + "\n",
|
||||
"utf-8",
|
||||
);
|
||||
}
|
||||
|
||||
async function runTelegramAllowFromMigration(params: { root: string; cfg: OpenClawConfig }) {
|
||||
const oauthDir = ensureCredentialsDir(params.root);
|
||||
writeLegacyTelegramAllowFromStore(oauthDir);
|
||||
const detected = await detectLegacyStateMigrations({
|
||||
cfg: params.cfg,
|
||||
env: { OPENCLAW_STATE_DIR: params.root } as NodeJS.ProcessEnv,
|
||||
});
|
||||
const result = await runLegacyStateMigrations({ detected, now: () => 123 });
|
||||
return { oauthDir, detected, result };
|
||||
}
|
||||
|
||||
afterEach(async () => {
|
||||
resetAutoMigrateLegacyStateForTest();
|
||||
resetAutoMigrateLegacyStateDirForTest();
|
||||
@@ -277,30 +303,11 @@ describe("doctor legacy state migrations", () => {
|
||||
|
||||
it("migrates legacy Telegram pairing allowFrom store to account-scoped default file", async () => {
|
||||
const { root, cfg } = await makeRootWithEmptyCfg();
|
||||
const oauthDir = ensureCredentialsDir(root);
|
||||
fs.writeFileSync(
|
||||
path.join(oauthDir, "telegram-allowFrom.json"),
|
||||
JSON.stringify(
|
||||
{
|
||||
version: 1,
|
||||
allowFrom: ["123456"],
|
||||
},
|
||||
null,
|
||||
2,
|
||||
) + "\n",
|
||||
"utf-8",
|
||||
);
|
||||
|
||||
const detected = await detectLegacyStateMigrations({
|
||||
cfg,
|
||||
env: { OPENCLAW_STATE_DIR: root } as NodeJS.ProcessEnv,
|
||||
});
|
||||
const { oauthDir, detected, result } = await runTelegramAllowFromMigration({ root, cfg });
|
||||
expect(detected.pairingAllowFrom.hasLegacyTelegram).toBe(true);
|
||||
expect(
|
||||
detected.pairingAllowFrom.copyPlans.map((plan) => path.basename(plan.targetPath)),
|
||||
).toEqual(["telegram-default-allowFrom.json"]);
|
||||
|
||||
const result = await runLegacyStateMigrations({ detected, now: () => 123 });
|
||||
expect(result.warnings).toEqual([]);
|
||||
|
||||
const target = path.join(oauthDir, "telegram-default-allowFrom.json");
|
||||
@@ -323,30 +330,11 @@ describe("doctor legacy state migrations", () => {
|
||||
},
|
||||
},
|
||||
};
|
||||
const oauthDir = ensureCredentialsDir(root);
|
||||
fs.writeFileSync(
|
||||
path.join(oauthDir, "telegram-allowFrom.json"),
|
||||
JSON.stringify(
|
||||
{
|
||||
version: 1,
|
||||
allowFrom: ["123456"],
|
||||
},
|
||||
null,
|
||||
2,
|
||||
) + "\n",
|
||||
"utf-8",
|
||||
);
|
||||
|
||||
const detected = await detectLegacyStateMigrations({
|
||||
cfg,
|
||||
env: { OPENCLAW_STATE_DIR: root } as NodeJS.ProcessEnv,
|
||||
});
|
||||
const { oauthDir, detected, result } = await runTelegramAllowFromMigration({ root, cfg });
|
||||
expect(detected.pairingAllowFrom.hasLegacyTelegram).toBe(true);
|
||||
expect(
|
||||
detected.pairingAllowFrom.copyPlans.map((plan) => path.basename(plan.targetPath)).toSorted(),
|
||||
).toEqual(["telegram-bot1-allowFrom.json", "telegram-bot2-allowFrom.json"]);
|
||||
|
||||
const result = await runLegacyStateMigrations({ detected, now: () => 123 });
|
||||
expect(result.warnings).toEqual([]);
|
||||
|
||||
const bot1Target = path.join(oauthDir, "telegram-bot1-allowFrom.json");
|
||||
|
||||
@@ -1,7 +1,28 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { NON_ENV_SECRETREF_MARKER } from "../../agents/model-auth-markers.js";
|
||||
import { withEnv } from "../../test-utils/env.js";
|
||||
import { resolveProviderAuthOverview } from "./list.auth-overview.js";
|
||||
|
||||
function resolveOpenAiOverview(apiKey: string) {
|
||||
return resolveProviderAuthOverview({
|
||||
provider: "openai",
|
||||
cfg: {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
api: "openai-completions",
|
||||
apiKey,
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
} as never,
|
||||
store: { version: 1, profiles: {} } as never,
|
||||
modelsPath: "/tmp/models.json",
|
||||
});
|
||||
}
|
||||
|
||||
describe("resolveProviderAuthOverview", () => {
|
||||
it("does not throw when token profile only has tokenRef", () => {
|
||||
const overview = resolveProviderAuthOverview({
|
||||
@@ -24,23 +45,9 @@ describe("resolveProviderAuthOverview", () => {
|
||||
});
|
||||
|
||||
it("renders marker-backed models.json auth as marker detail", () => {
|
||||
const overview = resolveProviderAuthOverview({
|
||||
provider: "openai",
|
||||
cfg: {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
api: "openai-completions",
|
||||
apiKey: NON_ENV_SECRETREF_MARKER,
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
} as never,
|
||||
store: { version: 1, profiles: {} } as never,
|
||||
modelsPath: "/tmp/models.json",
|
||||
});
|
||||
const overview = withEnv({ OPENAI_API_KEY: undefined }, () =>
|
||||
resolveOpenAiOverview(NON_ENV_SECRETREF_MARKER),
|
||||
);
|
||||
|
||||
expect(overview.effective.kind).toBe("missing");
|
||||
expect(overview.effective.detail).toBe("missing");
|
||||
@@ -48,23 +55,9 @@ describe("resolveProviderAuthOverview", () => {
|
||||
});
|
||||
|
||||
it("keeps env-var-shaped models.json values masked to avoid accidental plaintext exposure", () => {
|
||||
const overview = resolveProviderAuthOverview({
|
||||
provider: "openai",
|
||||
cfg: {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
api: "openai-completions",
|
||||
apiKey: "OPENAI_API_KEY", // pragma: allowlist secret
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
} as never,
|
||||
store: { version: 1, profiles: {} } as never,
|
||||
modelsPath: "/tmp/models.json",
|
||||
});
|
||||
const overview = withEnv({ OPENAI_API_KEY: undefined }, () =>
|
||||
resolveOpenAiOverview("OPENAI_API_KEY"),
|
||||
);
|
||||
|
||||
expect(overview.effective.kind).toBe("missing");
|
||||
expect(overview.effective.detail).toBe("missing");
|
||||
@@ -76,23 +69,7 @@ describe("resolveProviderAuthOverview", () => {
|
||||
const prior = process.env.OPENAI_API_KEY;
|
||||
process.env.OPENAI_API_KEY = "sk-openai-from-env"; // pragma: allowlist secret
|
||||
try {
|
||||
const overview = resolveProviderAuthOverview({
|
||||
provider: "openai",
|
||||
cfg: {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
api: "openai-completions",
|
||||
apiKey: "OPENAI_API_KEY", // pragma: allowlist secret
|
||||
models: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
} as never,
|
||||
store: { version: 1, profiles: {} } as never,
|
||||
modelsPath: "/tmp/models.json",
|
||||
});
|
||||
const overview = resolveOpenAiOverview("OPENAI_API_KEY");
|
||||
expect(overview.effective.kind).toBe("env");
|
||||
expect(overview.effective.detail).not.toContain("OPENAI_API_KEY");
|
||||
} finally {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { afterEach, describe, expect, it, vi } from "vitest";
|
||||
import type { RuntimeEnv } from "../runtime.js";
|
||||
import { jsonResponse, requestBodyText, requestUrl } from "../test-helpers/http.js";
|
||||
import type { WizardPrompter } from "../wizard/prompts.js";
|
||||
import {
|
||||
configureOllamaNonInteractive,
|
||||
@@ -23,27 +24,6 @@ vi.mock("./oauth-env.js", () => ({
|
||||
isRemoteEnvironment: isRemoteEnvironmentMock,
|
||||
}));
|
||||
|
||||
function jsonResponse(body: unknown, status = 200): Response {
|
||||
return new Response(JSON.stringify(body), {
|
||||
status,
|
||||
headers: { "Content-Type": "application/json" },
|
||||
});
|
||||
}
|
||||
|
||||
function requestUrl(input: string | URL | Request): string {
|
||||
if (typeof input === "string") {
|
||||
return input;
|
||||
}
|
||||
if (input instanceof URL) {
|
||||
return input.toString();
|
||||
}
|
||||
return input.url;
|
||||
}
|
||||
|
||||
function requestBody(body: BodyInit | null | undefined): string {
|
||||
return typeof body === "string" ? body : "{}";
|
||||
}
|
||||
|
||||
function createOllamaFetchMock(params: {
|
||||
tags?: string[];
|
||||
show?: Record<string, number | undefined>;
|
||||
@@ -61,7 +41,7 @@ function createOllamaFetchMock(params: {
|
||||
return jsonResponse({ models: (params.tags ?? []).map((name) => ({ name })) });
|
||||
}
|
||||
if (url.endsWith("/api/show")) {
|
||||
const body = JSON.parse(requestBody(init?.body)) as { name?: string };
|
||||
const body = JSON.parse(requestBodyText(init?.body)) as { name?: string };
|
||||
const contextWindow = body.name ? params.show?.[body.name] : undefined;
|
||||
return contextWindow
|
||||
? jsonResponse({ model_info: { "llama.context_length": contextWindow } })
|
||||
@@ -77,6 +57,45 @@ function createOllamaFetchMock(params: {
|
||||
});
|
||||
}
|
||||
|
||||
function createModePrompter(
|
||||
mode: "local" | "remote",
|
||||
params?: { confirm?: boolean },
|
||||
): WizardPrompter {
|
||||
return {
|
||||
text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"),
|
||||
select: vi.fn().mockResolvedValueOnce(mode),
|
||||
...(params?.confirm !== undefined
|
||||
? { confirm: vi.fn().mockResolvedValueOnce(params.confirm) }
|
||||
: {}),
|
||||
note: vi.fn(async () => undefined),
|
||||
} as unknown as WizardPrompter;
|
||||
}
|
||||
|
||||
function createSignedOutRemoteFetchMock() {
|
||||
return createOllamaFetchMock({
|
||||
tags: ["llama3:8b"],
|
||||
meResponses: [
|
||||
jsonResponse({ error: "not signed in", signin_url: "https://ollama.com/signin" }, 401),
|
||||
jsonResponse({ username: "testuser" }),
|
||||
],
|
||||
});
|
||||
}
|
||||
|
||||
function createDefaultOllamaConfig(primary: string) {
|
||||
return {
|
||||
agents: { defaults: { model: { primary } } },
|
||||
models: { providers: { ollama: { baseUrl: "http://127.0.0.1:11434", models: [] } } },
|
||||
};
|
||||
}
|
||||
|
||||
function createRuntime() {
|
||||
return {
|
||||
log: vi.fn(),
|
||||
error: vi.fn(),
|
||||
exit: vi.fn(),
|
||||
} as unknown as RuntimeEnv;
|
||||
}
|
||||
|
||||
describe("ollama setup", () => {
|
||||
afterEach(() => {
|
||||
vi.unstubAllGlobals();
|
||||
@@ -86,11 +105,7 @@ describe("ollama setup", () => {
|
||||
});
|
||||
|
||||
it("returns suggested default model for local mode", async () => {
|
||||
const prompter = {
|
||||
text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"),
|
||||
select: vi.fn().mockResolvedValueOnce("local"),
|
||||
note: vi.fn(async () => undefined),
|
||||
} as unknown as WizardPrompter;
|
||||
const prompter = createModePrompter("local");
|
||||
|
||||
const fetchMock = createOllamaFetchMock({ tags: ["llama3:8b"] });
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
@@ -101,11 +116,7 @@ describe("ollama setup", () => {
|
||||
});
|
||||
|
||||
it("returns suggested default model for remote mode", async () => {
|
||||
const prompter = {
|
||||
text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"),
|
||||
select: vi.fn().mockResolvedValueOnce("remote"),
|
||||
note: vi.fn(async () => undefined),
|
||||
} as unknown as WizardPrompter;
|
||||
const prompter = createModePrompter("remote");
|
||||
|
||||
const fetchMock = createOllamaFetchMock({ tags: ["llama3:8b"] });
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
@@ -116,11 +127,7 @@ describe("ollama setup", () => {
|
||||
});
|
||||
|
||||
it("mode selection affects model ordering (local)", async () => {
|
||||
const prompter = {
|
||||
text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"),
|
||||
select: vi.fn().mockResolvedValueOnce("local"),
|
||||
note: vi.fn(async () => undefined),
|
||||
} as unknown as WizardPrompter;
|
||||
const prompter = createModePrompter("local");
|
||||
|
||||
const fetchMock = createOllamaFetchMock({ tags: ["llama3:8b", "glm-4.7-flash"] });
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
@@ -134,20 +141,8 @@ describe("ollama setup", () => {
|
||||
});
|
||||
|
||||
it("cloud+local mode triggers /api/me check and opens sign-in URL", async () => {
|
||||
const prompter = {
|
||||
text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"),
|
||||
select: vi.fn().mockResolvedValueOnce("remote"),
|
||||
confirm: vi.fn().mockResolvedValueOnce(true),
|
||||
note: vi.fn(async () => undefined),
|
||||
} as unknown as WizardPrompter;
|
||||
|
||||
const fetchMock = createOllamaFetchMock({
|
||||
tags: ["llama3:8b"],
|
||||
meResponses: [
|
||||
jsonResponse({ error: "not signed in", signin_url: "https://ollama.com/signin" }, 401),
|
||||
jsonResponse({ username: "testuser" }),
|
||||
],
|
||||
});
|
||||
const prompter = createModePrompter("remote", { confirm: true });
|
||||
const fetchMock = createSignedOutRemoteFetchMock();
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
|
||||
await promptAndConfigureOllama({ cfg: {}, prompter });
|
||||
@@ -158,20 +153,8 @@ describe("ollama setup", () => {
|
||||
|
||||
it("cloud+local mode does not open browser in remote environment", async () => {
|
||||
isRemoteEnvironmentMock.mockReturnValue(true);
|
||||
const prompter = {
|
||||
text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"),
|
||||
select: vi.fn().mockResolvedValueOnce("remote"),
|
||||
confirm: vi.fn().mockResolvedValueOnce(true),
|
||||
note: vi.fn(async () => undefined),
|
||||
} as unknown as WizardPrompter;
|
||||
|
||||
const fetchMock = createOllamaFetchMock({
|
||||
tags: ["llama3:8b"],
|
||||
meResponses: [
|
||||
jsonResponse({ error: "not signed in", signin_url: "https://ollama.com/signin" }, 401),
|
||||
jsonResponse({ username: "testuser" }),
|
||||
],
|
||||
});
|
||||
const prompter = createModePrompter("remote", { confirm: true });
|
||||
const fetchMock = createSignedOutRemoteFetchMock();
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
|
||||
await promptAndConfigureOllama({ cfg: {}, prompter });
|
||||
@@ -180,11 +163,7 @@ describe("ollama setup", () => {
|
||||
});
|
||||
|
||||
it("local mode does not trigger cloud auth", async () => {
|
||||
const prompter = {
|
||||
text: vi.fn().mockResolvedValueOnce("http://127.0.0.1:11434"),
|
||||
select: vi.fn().mockResolvedValueOnce("local"),
|
||||
note: vi.fn(async () => undefined),
|
||||
} as unknown as WizardPrompter;
|
||||
const prompter = createModePrompter("local");
|
||||
|
||||
const fetchMock = createOllamaFetchMock({ tags: ["llama3:8b"] });
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
@@ -258,10 +237,7 @@ describe("ollama setup", () => {
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
|
||||
await ensureOllamaModelPulled({
|
||||
config: {
|
||||
agents: { defaults: { model: { primary: "ollama/glm-4.7-flash" } } },
|
||||
models: { providers: { ollama: { baseUrl: "http://127.0.0.1:11434", models: [] } } },
|
||||
},
|
||||
config: createDefaultOllamaConfig("ollama/glm-4.7-flash"),
|
||||
prompter,
|
||||
});
|
||||
|
||||
@@ -276,10 +252,7 @@ describe("ollama setup", () => {
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
|
||||
await ensureOllamaModelPulled({
|
||||
config: {
|
||||
agents: { defaults: { model: { primary: "ollama/glm-4.7-flash" } } },
|
||||
models: { providers: { ollama: { baseUrl: "http://127.0.0.1:11434", models: [] } } },
|
||||
},
|
||||
config: createDefaultOllamaConfig("ollama/glm-4.7-flash"),
|
||||
prompter,
|
||||
});
|
||||
|
||||
@@ -292,10 +265,7 @@ describe("ollama setup", () => {
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
|
||||
await ensureOllamaModelPulled({
|
||||
config: {
|
||||
agents: { defaults: { model: { primary: "ollama/kimi-k2.5:cloud" } } },
|
||||
models: { providers: { ollama: { baseUrl: "http://127.0.0.1:11434", models: [] } } },
|
||||
},
|
||||
config: createDefaultOllamaConfig("ollama/kimi-k2.5:cloud"),
|
||||
prompter,
|
||||
});
|
||||
|
||||
@@ -324,12 +294,7 @@ describe("ollama setup", () => {
|
||||
pullResponse: new Response('{"error":"disk full"}\n', { status: 200 }),
|
||||
});
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
|
||||
const runtime = {
|
||||
log: vi.fn(),
|
||||
error: vi.fn(),
|
||||
exit: vi.fn(),
|
||||
} as unknown as RuntimeEnv;
|
||||
const runtime = createRuntime();
|
||||
|
||||
const result = await configureOllamaNonInteractive({
|
||||
nextConfig: {
|
||||
@@ -362,12 +327,7 @@ describe("ollama setup", () => {
|
||||
pullResponse: new Response('{"status":"success"}\n', { status: 200 }),
|
||||
});
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
|
||||
const runtime = {
|
||||
log: vi.fn(),
|
||||
error: vi.fn(),
|
||||
exit: vi.fn(),
|
||||
} as unknown as RuntimeEnv;
|
||||
const runtime = createRuntime();
|
||||
|
||||
const result = await configureOllamaNonInteractive({
|
||||
nextConfig: {},
|
||||
@@ -379,7 +339,7 @@ describe("ollama setup", () => {
|
||||
});
|
||||
|
||||
const pullRequest = fetchMock.mock.calls[1]?.[1];
|
||||
expect(JSON.parse(requestBody(pullRequest?.body))).toEqual({ name: "llama3.2:latest" });
|
||||
expect(JSON.parse(requestBodyText(pullRequest?.body))).toEqual({ name: "llama3.2:latest" });
|
||||
expect(result.agents?.defaults?.model).toEqual(
|
||||
expect.objectContaining({ primary: "ollama/llama3.2:latest" }),
|
||||
);
|
||||
@@ -388,12 +348,7 @@ describe("ollama setup", () => {
|
||||
it("accepts cloud models in non-interactive mode without pulling", async () => {
|
||||
const fetchMock = createOllamaFetchMock({ tags: [] });
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
|
||||
const runtime = {
|
||||
log: vi.fn(),
|
||||
error: vi.fn(),
|
||||
exit: vi.fn(),
|
||||
} as unknown as RuntimeEnv;
|
||||
const runtime = createRuntime();
|
||||
|
||||
const result = await configureOllamaNonInteractive({
|
||||
nextConfig: {},
|
||||
|
||||
@@ -85,6 +85,29 @@ import {
|
||||
MODELSTUDIO_DEFAULT_MODEL_REF,
|
||||
} from "./onboard-auth.models.js";
|
||||
|
||||
function mergeProviderModels<T extends { id: string }>(
|
||||
existingProvider: Record<string, unknown> | undefined,
|
||||
defaultModels: T[],
|
||||
): T[] {
|
||||
const existingModels = Array.isArray(existingProvider?.models)
|
||||
? (existingProvider.models as T[])
|
||||
: [];
|
||||
const mergedModels = [...existingModels];
|
||||
const seen = new Set(existingModels.map((model) => model.id));
|
||||
for (const model of defaultModels) {
|
||||
if (!seen.has(model.id)) {
|
||||
mergedModels.push(model);
|
||||
seen.add(model.id);
|
||||
}
|
||||
}
|
||||
return mergedModels;
|
||||
}
|
||||
|
||||
function getNormalizedProviderApiKey(existingProvider: Record<string, unknown> | undefined) {
|
||||
const { apiKey } = (existingProvider ?? {}) as { apiKey?: string };
|
||||
return typeof apiKey === "string" ? apiKey.trim() || undefined : undefined;
|
||||
}
|
||||
|
||||
export function applyZaiProviderConfig(
|
||||
cfg: OpenClawConfig,
|
||||
params?: { endpoint?: string; modelId?: string },
|
||||
@@ -100,7 +123,6 @@ export function applyZaiProviderConfig(
|
||||
|
||||
const providers = { ...cfg.models?.providers };
|
||||
const existingProvider = providers.zai;
|
||||
const existingModels = Array.isArray(existingProvider?.models) ? existingProvider.models : [];
|
||||
|
||||
const defaultModels = [
|
||||
buildZaiModelDefinition({ id: "glm-5" }),
|
||||
@@ -109,21 +131,13 @@ export function applyZaiProviderConfig(
|
||||
buildZaiModelDefinition({ id: "glm-4.7-flashx" }),
|
||||
];
|
||||
|
||||
const mergedModels = [...existingModels];
|
||||
const seen = new Set(existingModels.map((m) => m.id));
|
||||
for (const model of defaultModels) {
|
||||
if (!seen.has(model.id)) {
|
||||
mergedModels.push(model);
|
||||
seen.add(model.id);
|
||||
}
|
||||
}
|
||||
const mergedModels = mergeProviderModels(existingProvider, defaultModels);
|
||||
|
||||
const { apiKey: existingApiKey, ...existingProviderRest } = (existingProvider ?? {}) as Record<
|
||||
const { apiKey: _existingApiKey, ...existingProviderRest } = (existingProvider ?? {}) as Record<
|
||||
string,
|
||||
unknown
|
||||
> as { apiKey?: string };
|
||||
const resolvedApiKey = typeof existingApiKey === "string" ? existingApiKey : undefined;
|
||||
const normalizedApiKey = resolvedApiKey?.trim();
|
||||
const normalizedApiKey = getNormalizedProviderApiKey(existingProvider);
|
||||
|
||||
const baseUrl = params?.endpoint
|
||||
? resolveZaiBaseUrl(params.endpoint)
|
||||
@@ -256,12 +270,11 @@ export function applySyntheticProviderConfig(cfg: OpenClawConfig): OpenClawConfi
|
||||
(model) => !existingModels.some((existing) => existing.id === model.id),
|
||||
),
|
||||
];
|
||||
const { apiKey: existingApiKey, ...existingProviderRest } = (existingProvider ?? {}) as Record<
|
||||
const { apiKey: _existingApiKey, ...existingProviderRest } = (existingProvider ?? {}) as Record<
|
||||
string,
|
||||
unknown
|
||||
> as { apiKey?: string };
|
||||
const resolvedApiKey = typeof existingApiKey === "string" ? existingApiKey : undefined;
|
||||
const normalizedApiKey = resolvedApiKey?.trim();
|
||||
const normalizedApiKey = getNormalizedProviderApiKey(existingProvider);
|
||||
providers.synthetic = {
|
||||
...existingProviderRest,
|
||||
baseUrl: SYNTHETIC_BASE_URL,
|
||||
@@ -609,7 +622,6 @@ function applyModelStudioProviderConfigWithBaseUrl(
|
||||
|
||||
const providers = { ...cfg.models?.providers };
|
||||
const existingProvider = providers.modelstudio;
|
||||
const existingModels = Array.isArray(existingProvider?.models) ? existingProvider.models : [];
|
||||
|
||||
const defaultModels = [
|
||||
buildModelStudioModelDefinition({ id: "qwen3.5-plus" }),
|
||||
@@ -622,21 +634,13 @@ function applyModelStudioProviderConfigWithBaseUrl(
|
||||
buildModelStudioModelDefinition({ id: "kimi-k2.5" }),
|
||||
];
|
||||
|
||||
const mergedModels = [...existingModels];
|
||||
const seen = new Set(existingModels.map((m) => m.id));
|
||||
for (const model of defaultModels) {
|
||||
if (!seen.has(model.id)) {
|
||||
mergedModels.push(model);
|
||||
seen.add(model.id);
|
||||
}
|
||||
}
|
||||
const mergedModels = mergeProviderModels(existingProvider, defaultModels);
|
||||
|
||||
const { apiKey: existingApiKey, ...existingProviderRest } = (existingProvider ?? {}) as Record<
|
||||
const { apiKey: _existingApiKey, ...existingProviderRest } = (existingProvider ?? {}) as Record<
|
||||
string,
|
||||
unknown
|
||||
> as { apiKey?: string };
|
||||
const resolvedApiKey = typeof existingApiKey === "string" ? existingApiKey : undefined;
|
||||
const normalizedApiKey = resolvedApiKey?.trim();
|
||||
const normalizedApiKey = getNormalizedProviderApiKey(existingProvider);
|
||||
|
||||
providers.modelstudio = {
|
||||
...existingProviderRest,
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
import type { GatewayService } from "../daemon/service.js";
|
||||
import type { GatewayServiceEnvArgs } from "../daemon/service.js";
|
||||
import { readServiceStatusSummary } from "./status.service-summary.js";
|
||||
|
||||
function createService(overrides: Partial<GatewayService>): GatewayService {
|
||||
@@ -57,4 +58,41 @@ describe("readServiceStatusSummary", () => {
|
||||
expect(summary.externallyManaged).toBe(false);
|
||||
expect(summary.loadedText).toBe("disabled");
|
||||
});
|
||||
|
||||
it("passes command environment to runtime and loaded checks", async () => {
|
||||
const isLoaded = vi.fn(async ({ env }: GatewayServiceEnvArgs) => {
|
||||
return env?.OPENCLAW_GATEWAY_PORT === "18789";
|
||||
});
|
||||
const readRuntime = vi.fn(async (env?: NodeJS.ProcessEnv) => ({
|
||||
status: env?.OPENCLAW_GATEWAY_PORT === "18789" ? ("running" as const) : ("unknown" as const),
|
||||
}));
|
||||
|
||||
const summary = await readServiceStatusSummary(
|
||||
createService({
|
||||
isLoaded,
|
||||
readCommand: vi.fn(async () => ({
|
||||
programArguments: ["openclaw", "gateway", "run", "--port", "18789"],
|
||||
environment: { OPENCLAW_GATEWAY_PORT: "18789" },
|
||||
})),
|
||||
readRuntime,
|
||||
}),
|
||||
"Daemon",
|
||||
);
|
||||
|
||||
expect(isLoaded).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
env: expect.objectContaining({
|
||||
OPENCLAW_GATEWAY_PORT: "18789",
|
||||
}),
|
||||
}),
|
||||
);
|
||||
expect(readRuntime).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
OPENCLAW_GATEWAY_PORT: "18789",
|
||||
}),
|
||||
);
|
||||
expect(summary.installed).toBe(true);
|
||||
expect(summary.loaded).toBe(true);
|
||||
expect(summary.runtime).toMatchObject({ status: "running" });
|
||||
});
|
||||
});
|
||||
|
||||
@@ -16,10 +16,16 @@ export async function readServiceStatusSummary(
|
||||
fallbackLabel: string,
|
||||
): Promise<ServiceStatusSummary> {
|
||||
try {
|
||||
const [loaded, runtime, command] = await Promise.all([
|
||||
service.isLoaded({ env: process.env }).catch(() => false),
|
||||
service.readRuntime(process.env).catch(() => undefined),
|
||||
service.readCommand(process.env).catch(() => null),
|
||||
const command = await service.readCommand(process.env).catch(() => null);
|
||||
const serviceEnv = command?.environment
|
||||
? ({
|
||||
...process.env,
|
||||
...command.environment,
|
||||
} satisfies NodeJS.ProcessEnv)
|
||||
: process.env;
|
||||
const [loaded, runtime] = await Promise.all([
|
||||
service.isLoaded({ env: serviceEnv }).catch(() => false),
|
||||
service.readRuntime(serviceEnv).catch(() => undefined),
|
||||
]);
|
||||
const managedByOpenClaw = command != null;
|
||||
const externallyManaged = !managedByOpenClaw && runtime?.status === "running";
|
||||
|
||||
@@ -8,38 +8,42 @@ describe("talk config validation fail-closed behavior", () => {
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
async function expectInvalidTalkConfig(config: unknown, messagePattern: RegExp) {
|
||||
await withTempHomeConfig(config, async () => {
|
||||
const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {});
|
||||
|
||||
let thrown: unknown;
|
||||
try {
|
||||
loadConfig();
|
||||
} catch (error) {
|
||||
thrown = error;
|
||||
}
|
||||
|
||||
expect(thrown).toBeInstanceOf(Error);
|
||||
expect((thrown as { code?: string } | undefined)?.code).toBe("INVALID_CONFIG");
|
||||
expect((thrown as Error).message).toMatch(messagePattern);
|
||||
expect(consoleSpy).toHaveBeenCalled();
|
||||
});
|
||||
}
|
||||
|
||||
it.each([
|
||||
["boolean", true],
|
||||
["string", "1500"],
|
||||
["float", 1500.5],
|
||||
])("rejects %s talk.silenceTimeoutMs during config load", async (_label, value) => {
|
||||
await withTempHomeConfig(
|
||||
await expectInvalidTalkConfig(
|
||||
{
|
||||
agents: { list: [{ id: "main" }] },
|
||||
talk: {
|
||||
silenceTimeoutMs: value,
|
||||
},
|
||||
},
|
||||
async () => {
|
||||
const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {});
|
||||
|
||||
let thrown: unknown;
|
||||
try {
|
||||
loadConfig();
|
||||
} catch (error) {
|
||||
thrown = error;
|
||||
}
|
||||
|
||||
expect(thrown).toBeInstanceOf(Error);
|
||||
expect((thrown as { code?: string } | undefined)?.code).toBe("INVALID_CONFIG");
|
||||
expect((thrown as Error).message).toMatch(/silenceTimeoutMs|talk/i);
|
||||
expect(consoleSpy).toHaveBeenCalled();
|
||||
},
|
||||
/silenceTimeoutMs|talk/i,
|
||||
);
|
||||
});
|
||||
|
||||
it("rejects talk.provider when it does not match talk.providers during config load", async () => {
|
||||
await withTempHomeConfig(
|
||||
await expectInvalidTalkConfig(
|
||||
{
|
||||
agents: { list: [{ id: "main" }] },
|
||||
talk: {
|
||||
@@ -51,26 +55,12 @@ describe("talk config validation fail-closed behavior", () => {
|
||||
},
|
||||
},
|
||||
},
|
||||
async () => {
|
||||
const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {});
|
||||
|
||||
let thrown: unknown;
|
||||
try {
|
||||
loadConfig();
|
||||
} catch (error) {
|
||||
thrown = error;
|
||||
}
|
||||
|
||||
expect(thrown).toBeInstanceOf(Error);
|
||||
expect((thrown as { code?: string } | undefined)?.code).toBe("INVALID_CONFIG");
|
||||
expect((thrown as Error).message).toMatch(/talk\.provider|talk\.providers|acme/i);
|
||||
expect(consoleSpy).toHaveBeenCalled();
|
||||
},
|
||||
/talk\.provider|talk\.providers|acme/i,
|
||||
);
|
||||
});
|
||||
|
||||
it("rejects multi-provider talk config without talk.provider during config load", async () => {
|
||||
await withTempHomeConfig(
|
||||
await expectInvalidTalkConfig(
|
||||
{
|
||||
agents: { list: [{ id: "main" }] },
|
||||
talk: {
|
||||
@@ -84,21 +74,7 @@ describe("talk config validation fail-closed behavior", () => {
|
||||
},
|
||||
},
|
||||
},
|
||||
async () => {
|
||||
const consoleSpy = vi.spyOn(console, "error").mockImplementation(() => {});
|
||||
|
||||
let thrown: unknown;
|
||||
try {
|
||||
loadConfig();
|
||||
} catch (error) {
|
||||
thrown = error;
|
||||
}
|
||||
|
||||
expect(thrown).toBeInstanceOf(Error);
|
||||
expect((thrown as { code?: string } | undefined)?.code).toBe("INVALID_CONFIG");
|
||||
expect((thrown as Error).message).toMatch(/talk\.provider|required/i);
|
||||
expect(consoleSpy).toHaveBeenCalled();
|
||||
},
|
||||
/talk\.provider|required/i,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { withTempDir } from "../test-helpers/temp-dir.js";
|
||||
import {
|
||||
resolveDefaultConfigCandidates,
|
||||
resolveConfigPathCandidate,
|
||||
@@ -37,15 +37,6 @@ describe("oauth paths", () => {
|
||||
});
|
||||
|
||||
describe("state + config path candidates", () => {
|
||||
async function withTempRoot(prefix: string, run: (root: string) => Promise<void>): Promise<void> {
|
||||
const root = await fs.mkdtemp(path.join(os.tmpdir(), prefix));
|
||||
try {
|
||||
await run(root);
|
||||
} finally {
|
||||
await fs.rm(root, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
function expectOpenClawHomeDefaults(env: NodeJS.ProcessEnv): void {
|
||||
const configuredHome = env.OPENCLAW_HOME;
|
||||
if (!configuredHome) {
|
||||
@@ -107,7 +98,7 @@ describe("state + config path candidates", () => {
|
||||
});
|
||||
|
||||
it("prefers ~/.openclaw when it exists and legacy dir is missing", async () => {
|
||||
await withTempRoot("openclaw-state-", async (root) => {
|
||||
await withTempDir({ prefix: "openclaw-state-" }, async (root) => {
|
||||
const newDir = path.join(root, ".openclaw");
|
||||
await fs.mkdir(newDir, { recursive: true });
|
||||
const resolved = resolveStateDir({} as NodeJS.ProcessEnv, () => root);
|
||||
@@ -116,7 +107,7 @@ describe("state + config path candidates", () => {
|
||||
});
|
||||
|
||||
it("falls back to existing legacy state dir when ~/.openclaw is missing", async () => {
|
||||
await withTempRoot("openclaw-state-legacy-", async (root) => {
|
||||
await withTempDir({ prefix: "openclaw-state-legacy-" }, async (root) => {
|
||||
const legacyDir = path.join(root, ".clawdbot");
|
||||
await fs.mkdir(legacyDir, { recursive: true });
|
||||
const resolved = resolveStateDir({} as NodeJS.ProcessEnv, () => root);
|
||||
@@ -125,7 +116,7 @@ describe("state + config path candidates", () => {
|
||||
});
|
||||
|
||||
it("CONFIG_PATH prefers existing config when present", async () => {
|
||||
await withTempRoot("openclaw-config-", async (root) => {
|
||||
await withTempDir({ prefix: "openclaw-config-" }, async (root) => {
|
||||
const legacyDir = path.join(root, ".openclaw");
|
||||
await fs.mkdir(legacyDir, { recursive: true });
|
||||
const legacyPath = path.join(legacyDir, "openclaw.json");
|
||||
@@ -137,7 +128,7 @@ describe("state + config path candidates", () => {
|
||||
});
|
||||
|
||||
it("respects state dir overrides when config is missing", async () => {
|
||||
await withTempRoot("openclaw-config-override-", async (root) => {
|
||||
await withTempDir({ prefix: "openclaw-config-override-" }, async (root) => {
|
||||
const legacyDir = path.join(root, ".openclaw");
|
||||
await fs.mkdir(legacyDir, { recursive: true });
|
||||
const legacyConfig = path.join(legacyDir, "openclaw.json");
|
||||
|
||||
@@ -4,7 +4,7 @@ export type BrowserProfileConfig = {
|
||||
/** CDP URL for this profile (use for remote Chrome). */
|
||||
cdpUrl?: string;
|
||||
/** Profile driver (default: openclaw). */
|
||||
driver?: "openclaw" | "clawd" | "extension";
|
||||
driver?: "openclaw" | "clawd" | "extension" | "existing-session";
|
||||
/** If true, never launch a browser for this profile; only attach. Falls back to browser.attachOnly. */
|
||||
attachOnly?: boolean;
|
||||
/** Profile color (hex). Auto-assigned at creation. */
|
||||
|
||||
@@ -360,7 +360,12 @@ export const OpenClawSchema = z
|
||||
cdpPort: z.number().int().min(1).max(65535).optional(),
|
||||
cdpUrl: z.string().optional(),
|
||||
driver: z
|
||||
.union([z.literal("openclaw"), z.literal("clawd"), z.literal("extension")])
|
||||
.union([
|
||||
z.literal("openclaw"),
|
||||
z.literal("clawd"),
|
||||
z.literal("extension"),
|
||||
z.literal("existing-session"),
|
||||
])
|
||||
.optional(),
|
||||
attachOnly: z.boolean().optional(),
|
||||
color: HexColorSchema,
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import "./isolated-agent.mocks.js";
|
||||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { runEmbeddedPiAgent } from "../agents/pi-embedded.js";
|
||||
import { createCliDeps, mockAgentPayloads } from "./isolated-agent.delivery.test-helpers.js";
|
||||
import { runCronIsolatedAgentTurn } from "./isolated-agent.js";
|
||||
import {
|
||||
makeCfg,
|
||||
@@ -9,27 +10,6 @@ import {
|
||||
writeSessionStoreEntries,
|
||||
} from "./isolated-agent.test-harness.js";
|
||||
|
||||
function makeDeps() {
|
||||
return {
|
||||
sendMessageSlack: vi.fn(),
|
||||
sendMessageWhatsApp: vi.fn(),
|
||||
sendMessageTelegram: vi.fn(),
|
||||
sendMessageDiscord: vi.fn(),
|
||||
sendMessageSignal: vi.fn(),
|
||||
sendMessageIMessage: vi.fn(),
|
||||
};
|
||||
}
|
||||
|
||||
function mockEmbeddedOk() {
|
||||
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
|
||||
payloads: [{ text: "ok" }],
|
||||
meta: {
|
||||
durationMs: 5,
|
||||
agentMeta: { sessionId: "s", provider: "p", model: "m" },
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
function lastEmbeddedLane(): string | undefined {
|
||||
const calls = vi.mocked(runEmbeddedPiAgent).mock.calls;
|
||||
expect(calls.length).toBeGreaterThan(0);
|
||||
@@ -45,11 +25,11 @@ async function runLaneCase(home: string, lane?: string) {
|
||||
lastTo: "",
|
||||
},
|
||||
});
|
||||
mockEmbeddedOk();
|
||||
mockAgentPayloads([{ text: "ok" }]);
|
||||
|
||||
await runCronIsolatedAgentTurn({
|
||||
cfg: makeCfg(home, storePath),
|
||||
deps: makeDeps(),
|
||||
deps: createCliDeps(),
|
||||
job: makeJob({ kind: "agentTurn", message: "do it", deliver: false }),
|
||||
message: "do it",
|
||||
sessionKey: "cron:job-1",
|
||||
|
||||
@@ -2,6 +2,7 @@ import "./isolated-agent.mocks.js";
|
||||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { loadModelCatalog } from "../agents/model-catalog.js";
|
||||
import { runEmbeddedPiAgent } from "../agents/pi-embedded.js";
|
||||
import { createCliDeps, mockAgentPayloads } from "./isolated-agent.delivery.test-helpers.js";
|
||||
import { runCronIsolatedAgentTurn } from "./isolated-agent.js";
|
||||
import {
|
||||
makeCfg,
|
||||
@@ -13,27 +14,6 @@ import type { CronJob } from "./types.js";
|
||||
|
||||
const withTempHome = withTempCronHome;
|
||||
|
||||
function makeDeps() {
|
||||
return {
|
||||
sendMessageSlack: vi.fn(),
|
||||
sendMessageWhatsApp: vi.fn(),
|
||||
sendMessageTelegram: vi.fn(),
|
||||
sendMessageDiscord: vi.fn(),
|
||||
sendMessageSignal: vi.fn(),
|
||||
sendMessageIMessage: vi.fn(),
|
||||
};
|
||||
}
|
||||
|
||||
function mockEmbeddedOk() {
|
||||
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
|
||||
payloads: [{ text: "ok" }],
|
||||
meta: {
|
||||
durationMs: 5,
|
||||
agentMeta: { sessionId: "s", provider: "p", model: "m" },
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract the provider and model from the last runEmbeddedPiAgent call.
|
||||
*/
|
||||
@@ -62,7 +42,7 @@ async function runTurnCore(home: string, options: TurnOptions = {}) {
|
||||
},
|
||||
...options.storeEntries,
|
||||
});
|
||||
mockEmbeddedOk();
|
||||
mockAgentPayloads([{ text: "ok" }]);
|
||||
|
||||
const jobPayload = options.jobPayload ?? {
|
||||
kind: "agentTurn" as const,
|
||||
@@ -72,7 +52,7 @@ async function runTurnCore(home: string, options: TurnOptions = {}) {
|
||||
|
||||
const res = await runCronIsolatedAgentTurn({
|
||||
cfg: makeCfg(home, storePath, options.cfgOverrides),
|
||||
deps: makeDeps(),
|
||||
deps: createCliDeps(),
|
||||
job: makeJob(jobPayload),
|
||||
message: DEFAULT_MESSAGE,
|
||||
sessionKey: options.sessionKey ?? "cron:job-1",
|
||||
@@ -310,7 +290,7 @@ describe("cron model formatting and precedence edge cases", () => {
|
||||
|
||||
// Step 2: No job model, session store says openai
|
||||
vi.mocked(runEmbeddedPiAgent).mockClear();
|
||||
mockEmbeddedOk();
|
||||
mockAgentPayloads([{ text: "ok" }]);
|
||||
const step2 = await runTurn(home, {
|
||||
jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, deliver: false },
|
||||
storeEntries: {
|
||||
@@ -327,7 +307,7 @@ describe("cron model formatting and precedence edge cases", () => {
|
||||
|
||||
// Step 3: Job payload says anthropic, session store still says openai
|
||||
vi.mocked(runEmbeddedPiAgent).mockClear();
|
||||
mockEmbeddedOk();
|
||||
mockAgentPayloads([{ text: "ok" }]);
|
||||
const step3 = await runTurn(home, {
|
||||
jobPayload: {
|
||||
kind: "agentTurn",
|
||||
@@ -365,7 +345,7 @@ describe("cron model formatting and precedence edge cases", () => {
|
||||
|
||||
// Run 2: no override — must revert to default anthropic
|
||||
vi.mocked(runEmbeddedPiAgent).mockClear();
|
||||
mockEmbeddedOk();
|
||||
mockAgentPayloads([{ text: "ok" }]);
|
||||
const r2 = await runTurn(home, {
|
||||
jobPayload: { kind: "agentTurn", message: DEFAULT_MESSAGE, deliver: false },
|
||||
});
|
||||
|
||||
@@ -133,6 +133,16 @@ async function runTelegramDeliveryResult(bestEffort: boolean) {
|
||||
return outcome;
|
||||
}
|
||||
|
||||
function expectSuccessfulTelegramTextDelivery(params: {
|
||||
res: Awaited<ReturnType<typeof runCronIsolatedAgentTurn>>;
|
||||
deps: CliDeps;
|
||||
}): void {
|
||||
expect(params.res.status).toBe("ok");
|
||||
expect(params.res.delivered).toBe(true);
|
||||
expect(params.res.deliveryAttempted).toBe(true);
|
||||
expect(runSubagentAnnounceFlow).not.toHaveBeenCalled();
|
||||
}
|
||||
|
||||
async function runSignalDeliveryResult(bestEffort: boolean) {
|
||||
let outcome:
|
||||
| {
|
||||
@@ -379,31 +389,11 @@ describe("runCronIsolatedAgentTurn", () => {
|
||||
});
|
||||
|
||||
it("delivers text directly when best-effort is disabled", async () => {
|
||||
await withTempHome(async (home) => {
|
||||
const storePath = await writeSessionStore(home, { lastProvider: "webchat", lastTo: "" });
|
||||
const deps = createCliDeps();
|
||||
mockAgentPayloads([{ text: "hello from cron" }]);
|
||||
|
||||
const res = await runTelegramAnnounceTurn({
|
||||
home,
|
||||
storePath,
|
||||
deps,
|
||||
delivery: {
|
||||
mode: "announce",
|
||||
channel: "telegram",
|
||||
to: "123",
|
||||
bestEffort: false,
|
||||
},
|
||||
});
|
||||
|
||||
expect(res.status).toBe("ok");
|
||||
expect(res.delivered).toBe(true);
|
||||
expect(res.deliveryAttempted).toBe(true);
|
||||
expect(runSubagentAnnounceFlow).not.toHaveBeenCalled();
|
||||
expectDirectTelegramDelivery(deps, {
|
||||
chatId: "123",
|
||||
text: "hello from cron",
|
||||
});
|
||||
const { res, deps } = await runTelegramDeliveryResult(false);
|
||||
expectSuccessfulTelegramTextDelivery({ res, deps });
|
||||
expectDirectTelegramDelivery(deps, {
|
||||
chatId: "123",
|
||||
text: "hello from cron",
|
||||
});
|
||||
});
|
||||
|
||||
@@ -459,10 +449,7 @@ describe("runCronIsolatedAgentTurn", () => {
|
||||
},
|
||||
});
|
||||
|
||||
expect(res.status).toBe("ok");
|
||||
expect(res.delivered).toBe(true);
|
||||
expect(res.deliveryAttempted).toBe(true);
|
||||
expect(runSubagentAnnounceFlow).not.toHaveBeenCalled();
|
||||
expectSuccessfulTelegramTextDelivery({ res, deps });
|
||||
expect(deps.sendMessageTelegram).toHaveBeenCalledTimes(2);
|
||||
expect(deps.sendMessageTelegram).toHaveBeenLastCalledWith(
|
||||
"123",
|
||||
@@ -490,10 +477,7 @@ describe("runCronIsolatedAgentTurn", () => {
|
||||
|
||||
it("delivers text directly when best-effort is enabled", async () => {
|
||||
const { res, deps } = await runTelegramDeliveryResult(true);
|
||||
expect(res.status).toBe("ok");
|
||||
expect(res.delivered).toBe(true);
|
||||
expect(res.deliveryAttempted).toBe(true);
|
||||
expect(runSubagentAnnounceFlow).not.toHaveBeenCalled();
|
||||
expectSuccessfulTelegramTextDelivery({ res, deps });
|
||||
expectDirectTelegramDelivery(deps, {
|
||||
chatId: "123",
|
||||
text: "hello from cron",
|
||||
|
||||
@@ -14,169 +14,102 @@ import {
|
||||
|
||||
const runCronIsolatedAgentTurn = await loadRunCronIsolatedAgentTurn();
|
||||
|
||||
const OPENAI_GPT4_MODEL = "openai/gpt-4";
|
||||
|
||||
function mockSuccessfulModelFallback() {
|
||||
runWithModelFallbackMock.mockImplementation(async ({ provider, model, run }) => {
|
||||
await run(provider, model);
|
||||
return {
|
||||
result: {
|
||||
payloads: [{ text: "ok" }],
|
||||
meta: { agentMeta: { usage: { input: 10, output: 20 } } },
|
||||
},
|
||||
provider,
|
||||
model,
|
||||
attempts: [],
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
async function runFastModeCase(params: {
|
||||
configFastMode: boolean;
|
||||
expectedFastMode: boolean;
|
||||
message: string;
|
||||
sessionFastMode?: boolean;
|
||||
}) {
|
||||
const baseSession = makeCronSession();
|
||||
resolveCronSessionMock.mockReturnValue(
|
||||
params.sessionFastMode === undefined
|
||||
? baseSession
|
||||
: makeCronSession({
|
||||
sessionEntry: {
|
||||
...baseSession.sessionEntry,
|
||||
fastMode: params.sessionFastMode,
|
||||
},
|
||||
}),
|
||||
);
|
||||
mockSuccessfulModelFallback();
|
||||
|
||||
const result = await runCronIsolatedAgentTurn(
|
||||
makeIsolatedAgentTurnParams({
|
||||
cfg: {
|
||||
agents: {
|
||||
defaults: {
|
||||
models: {
|
||||
[OPENAI_GPT4_MODEL]: {
|
||||
params: {
|
||||
fastMode: params.configFastMode,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
job: makeIsolatedAgentTurnJob({
|
||||
payload: {
|
||||
kind: "agentTurn",
|
||||
message: params.message,
|
||||
model: OPENAI_GPT4_MODEL,
|
||||
},
|
||||
}),
|
||||
}),
|
||||
);
|
||||
|
||||
expect(result.status).toBe("ok");
|
||||
expect(runEmbeddedPiAgentMock).toHaveBeenCalledOnce();
|
||||
expect(runEmbeddedPiAgentMock.mock.calls[0][0]).toMatchObject({
|
||||
provider: "openai",
|
||||
model: "gpt-4",
|
||||
fastMode: params.expectedFastMode,
|
||||
});
|
||||
}
|
||||
|
||||
describe("runCronIsolatedAgentTurn — fast mode", () => {
|
||||
setupRunCronIsolatedAgentTurnSuite();
|
||||
|
||||
it("passes config-driven fast mode into embedded cron runs", async () => {
|
||||
const cronSession = makeCronSession();
|
||||
resolveCronSessionMock.mockReturnValue(cronSession);
|
||||
|
||||
runWithModelFallbackMock.mockImplementation(async ({ provider, model, run }) => {
|
||||
await run(provider, model);
|
||||
return {
|
||||
result: {
|
||||
payloads: [{ text: "ok" }],
|
||||
meta: { agentMeta: { usage: { input: 10, output: 20 } } },
|
||||
},
|
||||
provider,
|
||||
model,
|
||||
attempts: [],
|
||||
};
|
||||
});
|
||||
|
||||
const result = await runCronIsolatedAgentTurn(
|
||||
makeIsolatedAgentTurnParams({
|
||||
cfg: {
|
||||
agents: {
|
||||
defaults: {
|
||||
models: {
|
||||
"openai/gpt-4": {
|
||||
params: {
|
||||
fastMode: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
job: makeIsolatedAgentTurnJob({
|
||||
payload: {
|
||||
kind: "agentTurn",
|
||||
message: "test fast mode",
|
||||
model: "openai/gpt-4",
|
||||
},
|
||||
}),
|
||||
}),
|
||||
);
|
||||
|
||||
expect(result.status).toBe("ok");
|
||||
expect(runEmbeddedPiAgentMock).toHaveBeenCalledOnce();
|
||||
expect(runEmbeddedPiAgentMock.mock.calls[0][0]).toMatchObject({
|
||||
provider: "openai",
|
||||
model: "gpt-4",
|
||||
fastMode: true,
|
||||
await runFastModeCase({
|
||||
configFastMode: true,
|
||||
expectedFastMode: true,
|
||||
message: "test fast mode",
|
||||
});
|
||||
});
|
||||
|
||||
it("honors session fastMode=false over config fastMode=true", async () => {
|
||||
const cronSession = makeCronSession({
|
||||
sessionEntry: {
|
||||
...makeCronSession().sessionEntry,
|
||||
fastMode: false,
|
||||
},
|
||||
});
|
||||
resolveCronSessionMock.mockReturnValue(cronSession);
|
||||
|
||||
runWithModelFallbackMock.mockImplementation(async ({ provider, model, run }) => {
|
||||
await run(provider, model);
|
||||
return {
|
||||
result: {
|
||||
payloads: [{ text: "ok" }],
|
||||
meta: { agentMeta: { usage: { input: 10, output: 20 } } },
|
||||
},
|
||||
provider,
|
||||
model,
|
||||
attempts: [],
|
||||
};
|
||||
});
|
||||
|
||||
const result = await runCronIsolatedAgentTurn(
|
||||
makeIsolatedAgentTurnParams({
|
||||
cfg: {
|
||||
agents: {
|
||||
defaults: {
|
||||
models: {
|
||||
"openai/gpt-4": {
|
||||
params: {
|
||||
fastMode: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
job: makeIsolatedAgentTurnJob({
|
||||
payload: {
|
||||
kind: "agentTurn",
|
||||
message: "test fast mode override",
|
||||
model: "openai/gpt-4",
|
||||
},
|
||||
}),
|
||||
}),
|
||||
);
|
||||
|
||||
expect(result.status).toBe("ok");
|
||||
expect(runEmbeddedPiAgentMock).toHaveBeenCalledOnce();
|
||||
expect(runEmbeddedPiAgentMock.mock.calls[0][0]).toMatchObject({
|
||||
provider: "openai",
|
||||
model: "gpt-4",
|
||||
fastMode: false,
|
||||
await runFastModeCase({
|
||||
configFastMode: true,
|
||||
expectedFastMode: false,
|
||||
message: "test fast mode override",
|
||||
sessionFastMode: false,
|
||||
});
|
||||
});
|
||||
|
||||
it("honors session fastMode=true over config fastMode=false", async () => {
|
||||
const cronSession = makeCronSession({
|
||||
sessionEntry: {
|
||||
...makeCronSession().sessionEntry,
|
||||
fastMode: true,
|
||||
},
|
||||
});
|
||||
resolveCronSessionMock.mockReturnValue(cronSession);
|
||||
|
||||
runWithModelFallbackMock.mockImplementation(async ({ provider, model, run }) => {
|
||||
await run(provider, model);
|
||||
return {
|
||||
result: {
|
||||
payloads: [{ text: "ok" }],
|
||||
meta: { agentMeta: { usage: { input: 10, output: 20 } } },
|
||||
},
|
||||
provider,
|
||||
model,
|
||||
attempts: [],
|
||||
};
|
||||
});
|
||||
|
||||
const result = await runCronIsolatedAgentTurn(
|
||||
makeIsolatedAgentTurnParams({
|
||||
cfg: {
|
||||
agents: {
|
||||
defaults: {
|
||||
models: {
|
||||
"openai/gpt-4": {
|
||||
params: {
|
||||
fastMode: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
job: makeIsolatedAgentTurnJob({
|
||||
payload: {
|
||||
kind: "agentTurn",
|
||||
message: "test fast mode session override",
|
||||
model: "openai/gpt-4",
|
||||
},
|
||||
}),
|
||||
}),
|
||||
);
|
||||
|
||||
expect(result.status).toBe("ok");
|
||||
expect(runEmbeddedPiAgentMock).toHaveBeenCalledOnce();
|
||||
expect(runEmbeddedPiAgentMock.mock.calls[0][0]).toMatchObject({
|
||||
provider: "openai",
|
||||
model: "gpt-4",
|
||||
fastMode: true,
|
||||
await runFastModeCase({
|
||||
configFastMode: false,
|
||||
expectedFastMode: true,
|
||||
message: "test fast mode session override",
|
||||
sessionFastMode: true,
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -47,326 +47,274 @@ describe("CronService restart catch-up", () => {
|
||||
};
|
||||
}
|
||||
|
||||
it("executes an overdue recurring job immediately on start", async () => {
|
||||
async function withRestartedCron(
|
||||
jobs: unknown[],
|
||||
run: (params: {
|
||||
cron: CronService;
|
||||
enqueueSystemEvent: ReturnType<typeof vi.fn>;
|
||||
requestHeartbeatNow: ReturnType<typeof vi.fn>;
|
||||
}) => Promise<void>,
|
||||
) {
|
||||
const store = await makeStorePath();
|
||||
const enqueueSystemEvent = vi.fn();
|
||||
const requestHeartbeatNow = vi.fn();
|
||||
|
||||
await writeStoreJobs(store.storePath, jobs);
|
||||
|
||||
const cron = createRestartCronService({
|
||||
storePath: store.storePath,
|
||||
enqueueSystemEvent,
|
||||
requestHeartbeatNow,
|
||||
});
|
||||
|
||||
try {
|
||||
await cron.start();
|
||||
await run({ cron, enqueueSystemEvent, requestHeartbeatNow });
|
||||
} finally {
|
||||
cron.stop();
|
||||
await store.cleanup();
|
||||
}
|
||||
}
|
||||
|
||||
it("executes an overdue recurring job immediately on start", async () => {
|
||||
const dueAt = Date.parse("2025-12-13T15:00:00.000Z");
|
||||
const lastRunAt = Date.parse("2025-12-12T15:00:00.000Z");
|
||||
|
||||
await writeStoreJobs(store.storePath, [
|
||||
{
|
||||
id: "restart-overdue-job",
|
||||
name: "daily digest",
|
||||
enabled: true,
|
||||
createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"),
|
||||
updatedAtMs: Date.parse("2025-12-12T15:00:00.000Z"),
|
||||
schedule: { kind: "cron", expr: "0 15 * * *", tz: "UTC" },
|
||||
sessionTarget: "main",
|
||||
wakeMode: "next-heartbeat",
|
||||
payload: { kind: "systemEvent", text: "digest now" },
|
||||
state: {
|
||||
nextRunAtMs: dueAt,
|
||||
lastRunAtMs: lastRunAt,
|
||||
lastStatus: "ok",
|
||||
await withRestartedCron(
|
||||
[
|
||||
{
|
||||
id: "restart-overdue-job",
|
||||
name: "daily digest",
|
||||
enabled: true,
|
||||
createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"),
|
||||
updatedAtMs: Date.parse("2025-12-12T15:00:00.000Z"),
|
||||
schedule: { kind: "cron", expr: "0 15 * * *", tz: "UTC" },
|
||||
sessionTarget: "main",
|
||||
wakeMode: "next-heartbeat",
|
||||
payload: { kind: "systemEvent", text: "digest now" },
|
||||
state: {
|
||||
nextRunAtMs: dueAt,
|
||||
lastRunAtMs: lastRunAt,
|
||||
lastStatus: "ok",
|
||||
},
|
||||
},
|
||||
],
|
||||
async ({ cron, enqueueSystemEvent, requestHeartbeatNow }) => {
|
||||
expect(enqueueSystemEvent).toHaveBeenCalledWith(
|
||||
"digest now",
|
||||
expect.objectContaining({ agentId: undefined }),
|
||||
);
|
||||
expect(requestHeartbeatNow).toHaveBeenCalled();
|
||||
|
||||
const listedJobs = await cron.list({ includeDisabled: true });
|
||||
const updated = listedJobs.find((job) => job.id === "restart-overdue-job");
|
||||
expect(updated?.state.lastStatus).toBe("ok");
|
||||
expect(updated?.state.lastRunAtMs).toBe(Date.parse("2025-12-13T17:00:00.000Z"));
|
||||
expect(updated?.state.nextRunAtMs).toBeGreaterThan(Date.parse("2025-12-13T17:00:00.000Z"));
|
||||
},
|
||||
]);
|
||||
|
||||
const cron = createRestartCronService({
|
||||
storePath: store.storePath,
|
||||
enqueueSystemEvent,
|
||||
requestHeartbeatNow,
|
||||
});
|
||||
|
||||
await cron.start();
|
||||
|
||||
expect(enqueueSystemEvent).toHaveBeenCalledWith(
|
||||
"digest now",
|
||||
expect.objectContaining({ agentId: undefined }),
|
||||
);
|
||||
expect(requestHeartbeatNow).toHaveBeenCalled();
|
||||
|
||||
const jobs = await cron.list({ includeDisabled: true });
|
||||
const updated = jobs.find((job) => job.id === "restart-overdue-job");
|
||||
expect(updated?.state.lastStatus).toBe("ok");
|
||||
expect(updated?.state.lastRunAtMs).toBe(Date.parse("2025-12-13T17:00:00.000Z"));
|
||||
expect(updated?.state.nextRunAtMs).toBeGreaterThan(Date.parse("2025-12-13T17:00:00.000Z"));
|
||||
|
||||
cron.stop();
|
||||
await store.cleanup();
|
||||
});
|
||||
|
||||
it("clears stale running markers without replaying interrupted startup jobs", async () => {
|
||||
const store = await makeStorePath();
|
||||
const enqueueSystemEvent = vi.fn();
|
||||
const requestHeartbeatNow = vi.fn();
|
||||
|
||||
const dueAt = Date.parse("2025-12-13T16:00:00.000Z");
|
||||
const staleRunningAt = Date.parse("2025-12-13T16:30:00.000Z");
|
||||
|
||||
await writeStoreJobs(store.storePath, [
|
||||
{
|
||||
id: "restart-stale-running",
|
||||
name: "daily stale marker",
|
||||
enabled: true,
|
||||
createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"),
|
||||
updatedAtMs: Date.parse("2025-12-13T16:30:00.000Z"),
|
||||
schedule: { kind: "cron", expr: "0 16 * * *", tz: "UTC" },
|
||||
sessionTarget: "main",
|
||||
wakeMode: "next-heartbeat",
|
||||
payload: { kind: "systemEvent", text: "resume stale marker" },
|
||||
state: {
|
||||
nextRunAtMs: dueAt,
|
||||
runningAtMs: staleRunningAt,
|
||||
await withRestartedCron(
|
||||
[
|
||||
{
|
||||
id: "restart-stale-running",
|
||||
name: "daily stale marker",
|
||||
enabled: true,
|
||||
createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"),
|
||||
updatedAtMs: Date.parse("2025-12-13T16:30:00.000Z"),
|
||||
schedule: { kind: "cron", expr: "0 16 * * *", tz: "UTC" },
|
||||
sessionTarget: "main",
|
||||
wakeMode: "next-heartbeat",
|
||||
payload: { kind: "systemEvent", text: "resume stale marker" },
|
||||
state: {
|
||||
nextRunAtMs: dueAt,
|
||||
runningAtMs: staleRunningAt,
|
||||
},
|
||||
},
|
||||
],
|
||||
async ({ cron, enqueueSystemEvent }) => {
|
||||
expect(enqueueSystemEvent).not.toHaveBeenCalled();
|
||||
expect(noopLogger.warn).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ jobId: "restart-stale-running" }),
|
||||
"cron: clearing stale running marker on startup",
|
||||
);
|
||||
|
||||
const listedJobs = await cron.list({ includeDisabled: true });
|
||||
const updated = listedJobs.find((job) => job.id === "restart-stale-running");
|
||||
expect(updated?.state.runningAtMs).toBeUndefined();
|
||||
expect(updated?.state.lastStatus).toBeUndefined();
|
||||
expect(updated?.state.lastRunAtMs).toBeUndefined();
|
||||
expect((updated?.state.nextRunAtMs ?? 0) > Date.parse("2025-12-13T17:00:00.000Z")).toBe(
|
||||
true,
|
||||
);
|
||||
},
|
||||
]);
|
||||
|
||||
const cron = createRestartCronService({
|
||||
storePath: store.storePath,
|
||||
enqueueSystemEvent,
|
||||
requestHeartbeatNow,
|
||||
});
|
||||
|
||||
await cron.start();
|
||||
|
||||
expect(enqueueSystemEvent).not.toHaveBeenCalled();
|
||||
expect(noopLogger.warn).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ jobId: "restart-stale-running" }),
|
||||
"cron: clearing stale running marker on startup",
|
||||
);
|
||||
|
||||
const jobs = await cron.list({ includeDisabled: true });
|
||||
const updated = jobs.find((job) => job.id === "restart-stale-running");
|
||||
expect(updated?.state.runningAtMs).toBeUndefined();
|
||||
expect(updated?.state.lastStatus).toBeUndefined();
|
||||
expect(updated?.state.lastRunAtMs).toBeUndefined();
|
||||
expect((updated?.state.nextRunAtMs ?? 0) > Date.parse("2025-12-13T17:00:00.000Z")).toBe(true);
|
||||
|
||||
cron.stop();
|
||||
await store.cleanup();
|
||||
});
|
||||
it("replays the most recent missed cron slot after restart when nextRunAtMs already advanced", async () => {
|
||||
vi.setSystemTime(new Date("2025-12-13T04:02:00.000Z"));
|
||||
const store = await makeStorePath();
|
||||
const enqueueSystemEvent = vi.fn();
|
||||
const requestHeartbeatNow = vi.fn();
|
||||
|
||||
await writeStoreJobs(store.storePath, [
|
||||
{
|
||||
id: "restart-missed-slot",
|
||||
name: "every ten minutes +1",
|
||||
enabled: true,
|
||||
createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"),
|
||||
updatedAtMs: Date.parse("2025-12-13T04:01:00.000Z"),
|
||||
schedule: { kind: "cron", expr: "1,11,21,31,41,51 4-20 * * *", tz: "UTC" },
|
||||
sessionTarget: "main",
|
||||
wakeMode: "next-heartbeat",
|
||||
payload: { kind: "systemEvent", text: "catch missed slot" },
|
||||
state: {
|
||||
// Persisted state may already be recomputed from restart time and
|
||||
// point to the future slot, even though 04:01 was missed.
|
||||
nextRunAtMs: Date.parse("2025-12-13T04:11:00.000Z"),
|
||||
lastRunAtMs: Date.parse("2025-12-13T03:51:00.000Z"),
|
||||
lastStatus: "ok",
|
||||
await withRestartedCron(
|
||||
[
|
||||
{
|
||||
id: "restart-missed-slot",
|
||||
name: "every ten minutes +1",
|
||||
enabled: true,
|
||||
createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"),
|
||||
updatedAtMs: Date.parse("2025-12-13T04:01:00.000Z"),
|
||||
schedule: { kind: "cron", expr: "1,11,21,31,41,51 4-20 * * *", tz: "UTC" },
|
||||
sessionTarget: "main",
|
||||
wakeMode: "next-heartbeat",
|
||||
payload: { kind: "systemEvent", text: "catch missed slot" },
|
||||
state: {
|
||||
// Persisted state may already be recomputed from restart time and
|
||||
// point to the future slot, even though 04:01 was missed.
|
||||
nextRunAtMs: Date.parse("2025-12-13T04:11:00.000Z"),
|
||||
lastRunAtMs: Date.parse("2025-12-13T03:51:00.000Z"),
|
||||
lastStatus: "ok",
|
||||
},
|
||||
},
|
||||
],
|
||||
async ({ cron, enqueueSystemEvent, requestHeartbeatNow }) => {
|
||||
expect(enqueueSystemEvent).toHaveBeenCalledWith(
|
||||
"catch missed slot",
|
||||
expect.objectContaining({ agentId: undefined }),
|
||||
);
|
||||
expect(requestHeartbeatNow).toHaveBeenCalled();
|
||||
|
||||
const listedJobs = await cron.list({ includeDisabled: true });
|
||||
const updated = listedJobs.find((job) => job.id === "restart-missed-slot");
|
||||
expect(updated?.state.lastRunAtMs).toBe(Date.parse("2025-12-13T04:02:00.000Z"));
|
||||
},
|
||||
]);
|
||||
|
||||
const cron = createRestartCronService({
|
||||
storePath: store.storePath,
|
||||
enqueueSystemEvent,
|
||||
requestHeartbeatNow,
|
||||
});
|
||||
|
||||
await cron.start();
|
||||
|
||||
expect(enqueueSystemEvent).toHaveBeenCalledWith(
|
||||
"catch missed slot",
|
||||
expect.objectContaining({ agentId: undefined }),
|
||||
);
|
||||
expect(requestHeartbeatNow).toHaveBeenCalled();
|
||||
|
||||
const jobs = await cron.list({ includeDisabled: true });
|
||||
const updated = jobs.find((job) => job.id === "restart-missed-slot");
|
||||
expect(updated?.state.lastRunAtMs).toBe(Date.parse("2025-12-13T04:02:00.000Z"));
|
||||
|
||||
cron.stop();
|
||||
await store.cleanup();
|
||||
});
|
||||
|
||||
it("does not replay interrupted one-shot jobs on startup", async () => {
|
||||
const store = await makeStorePath();
|
||||
const enqueueSystemEvent = vi.fn();
|
||||
const requestHeartbeatNow = vi.fn();
|
||||
|
||||
const dueAt = Date.parse("2025-12-13T16:00:00.000Z");
|
||||
const staleRunningAt = Date.parse("2025-12-13T16:30:00.000Z");
|
||||
|
||||
await writeStoreJobs(store.storePath, [
|
||||
{
|
||||
id: "restart-stale-one-shot",
|
||||
name: "one shot stale marker",
|
||||
enabled: true,
|
||||
createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"),
|
||||
updatedAtMs: Date.parse("2025-12-13T16:30:00.000Z"),
|
||||
schedule: { kind: "at", at: "2025-12-13T16:00:00.000Z" },
|
||||
sessionTarget: "main",
|
||||
wakeMode: "next-heartbeat",
|
||||
payload: { kind: "systemEvent", text: "one-shot stale marker" },
|
||||
state: {
|
||||
nextRunAtMs: dueAt,
|
||||
runningAtMs: staleRunningAt,
|
||||
await withRestartedCron(
|
||||
[
|
||||
{
|
||||
id: "restart-stale-one-shot",
|
||||
name: "one shot stale marker",
|
||||
enabled: true,
|
||||
createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"),
|
||||
updatedAtMs: Date.parse("2025-12-13T16:30:00.000Z"),
|
||||
schedule: { kind: "at", at: "2025-12-13T16:00:00.000Z" },
|
||||
sessionTarget: "main",
|
||||
wakeMode: "next-heartbeat",
|
||||
payload: { kind: "systemEvent", text: "one-shot stale marker" },
|
||||
state: {
|
||||
nextRunAtMs: dueAt,
|
||||
runningAtMs: staleRunningAt,
|
||||
},
|
||||
},
|
||||
],
|
||||
async ({ cron, enqueueSystemEvent, requestHeartbeatNow }) => {
|
||||
expect(enqueueSystemEvent).not.toHaveBeenCalled();
|
||||
expect(requestHeartbeatNow).not.toHaveBeenCalled();
|
||||
|
||||
const listedJobs = await cron.list({ includeDisabled: true });
|
||||
const updated = listedJobs.find((job) => job.id === "restart-stale-one-shot");
|
||||
expect(updated?.state.runningAtMs).toBeUndefined();
|
||||
},
|
||||
]);
|
||||
|
||||
const cron = createRestartCronService({
|
||||
storePath: store.storePath,
|
||||
enqueueSystemEvent,
|
||||
requestHeartbeatNow,
|
||||
});
|
||||
|
||||
await cron.start();
|
||||
|
||||
expect(enqueueSystemEvent).not.toHaveBeenCalled();
|
||||
expect(requestHeartbeatNow).not.toHaveBeenCalled();
|
||||
|
||||
const jobs = await cron.list({ includeDisabled: true });
|
||||
const updated = jobs.find((job) => job.id === "restart-stale-one-shot");
|
||||
expect(updated?.state.runningAtMs).toBeUndefined();
|
||||
|
||||
cron.stop();
|
||||
await store.cleanup();
|
||||
);
|
||||
});
|
||||
|
||||
it("does not replay cron slot when the latest slot already ran before restart", async () => {
|
||||
vi.setSystemTime(new Date("2025-12-13T04:02:00.000Z"));
|
||||
const store = await makeStorePath();
|
||||
const enqueueSystemEvent = vi.fn();
|
||||
const requestHeartbeatNow = vi.fn();
|
||||
|
||||
await writeStoreJobs(store.storePath, [
|
||||
{
|
||||
id: "restart-no-duplicate-slot",
|
||||
name: "every ten minutes +1 no duplicate",
|
||||
enabled: true,
|
||||
createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"),
|
||||
updatedAtMs: Date.parse("2025-12-13T04:01:00.000Z"),
|
||||
schedule: { kind: "cron", expr: "1,11,21,31,41,51 4-20 * * *", tz: "UTC" },
|
||||
sessionTarget: "main",
|
||||
wakeMode: "next-heartbeat",
|
||||
payload: { kind: "systemEvent", text: "already ran" },
|
||||
state: {
|
||||
nextRunAtMs: Date.parse("2025-12-13T04:11:00.000Z"),
|
||||
lastRunAtMs: Date.parse("2025-12-13T04:01:00.000Z"),
|
||||
lastStatus: "ok",
|
||||
await withRestartedCron(
|
||||
[
|
||||
{
|
||||
id: "restart-no-duplicate-slot",
|
||||
name: "every ten minutes +1 no duplicate",
|
||||
enabled: true,
|
||||
createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"),
|
||||
updatedAtMs: Date.parse("2025-12-13T04:01:00.000Z"),
|
||||
schedule: { kind: "cron", expr: "1,11,21,31,41,51 4-20 * * *", tz: "UTC" },
|
||||
sessionTarget: "main",
|
||||
wakeMode: "next-heartbeat",
|
||||
payload: { kind: "systemEvent", text: "already ran" },
|
||||
state: {
|
||||
nextRunAtMs: Date.parse("2025-12-13T04:11:00.000Z"),
|
||||
lastRunAtMs: Date.parse("2025-12-13T04:01:00.000Z"),
|
||||
lastStatus: "ok",
|
||||
},
|
||||
},
|
||||
],
|
||||
async ({ enqueueSystemEvent, requestHeartbeatNow }) => {
|
||||
expect(enqueueSystemEvent).not.toHaveBeenCalled();
|
||||
expect(requestHeartbeatNow).not.toHaveBeenCalled();
|
||||
},
|
||||
]);
|
||||
|
||||
const cron = createRestartCronService({
|
||||
storePath: store.storePath,
|
||||
enqueueSystemEvent,
|
||||
requestHeartbeatNow,
|
||||
});
|
||||
|
||||
await cron.start();
|
||||
|
||||
expect(enqueueSystemEvent).not.toHaveBeenCalled();
|
||||
expect(requestHeartbeatNow).not.toHaveBeenCalled();
|
||||
cron.stop();
|
||||
await store.cleanup();
|
||||
);
|
||||
});
|
||||
|
||||
it("does not replay missed cron slots while error backoff is pending after restart", async () => {
|
||||
vi.setSystemTime(new Date("2025-12-13T04:02:00.000Z"));
|
||||
const store = await makeStorePath();
|
||||
const enqueueSystemEvent = vi.fn();
|
||||
const requestHeartbeatNow = vi.fn();
|
||||
|
||||
await writeStoreJobs(store.storePath, [
|
||||
{
|
||||
id: "restart-backoff-pending",
|
||||
name: "backoff pending",
|
||||
enabled: true,
|
||||
createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"),
|
||||
updatedAtMs: Date.parse("2025-12-13T04:01:10.000Z"),
|
||||
schedule: { kind: "cron", expr: "* * * * *", tz: "UTC" },
|
||||
sessionTarget: "main",
|
||||
wakeMode: "next-heartbeat",
|
||||
payload: { kind: "systemEvent", text: "do not run during backoff" },
|
||||
state: {
|
||||
// Next retry is intentionally delayed by backoff despite a newer cron slot.
|
||||
nextRunAtMs: Date.parse("2025-12-13T04:10:00.000Z"),
|
||||
lastRunAtMs: Date.parse("2025-12-13T04:01:00.000Z"),
|
||||
lastStatus: "error",
|
||||
consecutiveErrors: 4,
|
||||
await withRestartedCron(
|
||||
[
|
||||
{
|
||||
id: "restart-backoff-pending",
|
||||
name: "backoff pending",
|
||||
enabled: true,
|
||||
createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"),
|
||||
updatedAtMs: Date.parse("2025-12-13T04:01:10.000Z"),
|
||||
schedule: { kind: "cron", expr: "* * * * *", tz: "UTC" },
|
||||
sessionTarget: "main",
|
||||
wakeMode: "next-heartbeat",
|
||||
payload: { kind: "systemEvent", text: "do not run during backoff" },
|
||||
state: {
|
||||
// Next retry is intentionally delayed by backoff despite a newer cron slot.
|
||||
nextRunAtMs: Date.parse("2025-12-13T04:10:00.000Z"),
|
||||
lastRunAtMs: Date.parse("2025-12-13T04:01:00.000Z"),
|
||||
lastStatus: "error",
|
||||
consecutiveErrors: 4,
|
||||
},
|
||||
},
|
||||
],
|
||||
async ({ enqueueSystemEvent, requestHeartbeatNow }) => {
|
||||
expect(enqueueSystemEvent).not.toHaveBeenCalled();
|
||||
expect(requestHeartbeatNow).not.toHaveBeenCalled();
|
||||
},
|
||||
]);
|
||||
|
||||
const cron = createRestartCronService({
|
||||
storePath: store.storePath,
|
||||
enqueueSystemEvent,
|
||||
requestHeartbeatNow,
|
||||
});
|
||||
|
||||
await cron.start();
|
||||
|
||||
expect(enqueueSystemEvent).not.toHaveBeenCalled();
|
||||
expect(requestHeartbeatNow).not.toHaveBeenCalled();
|
||||
|
||||
cron.stop();
|
||||
await store.cleanup();
|
||||
);
|
||||
});
|
||||
|
||||
it("replays missed cron slot after restart when error backoff has already elapsed", async () => {
|
||||
vi.setSystemTime(new Date("2025-12-13T04:02:00.000Z"));
|
||||
const store = await makeStorePath();
|
||||
const enqueueSystemEvent = vi.fn();
|
||||
const requestHeartbeatNow = vi.fn();
|
||||
|
||||
await writeStoreJobs(store.storePath, [
|
||||
{
|
||||
id: "restart-backoff-elapsed-replay",
|
||||
name: "backoff elapsed replay",
|
||||
enabled: true,
|
||||
createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"),
|
||||
updatedAtMs: Date.parse("2025-12-13T04:01:10.000Z"),
|
||||
schedule: { kind: "cron", expr: "1,11,21,31,41,51 4-20 * * *", tz: "UTC" },
|
||||
sessionTarget: "main",
|
||||
wakeMode: "next-heartbeat",
|
||||
payload: { kind: "systemEvent", text: "replay after backoff elapsed" },
|
||||
state: {
|
||||
// Startup maintenance may already point to a future slot (04:11) even
|
||||
// though 04:01 was missed and the 30s error backoff has elapsed.
|
||||
nextRunAtMs: Date.parse("2025-12-13T04:11:00.000Z"),
|
||||
lastRunAtMs: Date.parse("2025-12-13T03:51:00.000Z"),
|
||||
lastStatus: "error",
|
||||
consecutiveErrors: 1,
|
||||
await withRestartedCron(
|
||||
[
|
||||
{
|
||||
id: "restart-backoff-elapsed-replay",
|
||||
name: "backoff elapsed replay",
|
||||
enabled: true,
|
||||
createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"),
|
||||
updatedAtMs: Date.parse("2025-12-13T04:01:10.000Z"),
|
||||
schedule: { kind: "cron", expr: "1,11,21,31,41,51 4-20 * * *", tz: "UTC" },
|
||||
sessionTarget: "main",
|
||||
wakeMode: "next-heartbeat",
|
||||
payload: { kind: "systemEvent", text: "replay after backoff elapsed" },
|
||||
state: {
|
||||
// Startup maintenance may already point to a future slot (04:11) even
|
||||
// though 04:01 was missed and the 30s error backoff has elapsed.
|
||||
nextRunAtMs: Date.parse("2025-12-13T04:11:00.000Z"),
|
||||
lastRunAtMs: Date.parse("2025-12-13T03:51:00.000Z"),
|
||||
lastStatus: "error",
|
||||
consecutiveErrors: 1,
|
||||
},
|
||||
},
|
||||
],
|
||||
async ({ enqueueSystemEvent, requestHeartbeatNow }) => {
|
||||
expect(enqueueSystemEvent).toHaveBeenCalledWith(
|
||||
"replay after backoff elapsed",
|
||||
expect.objectContaining({ agentId: undefined }),
|
||||
);
|
||||
expect(requestHeartbeatNow).toHaveBeenCalled();
|
||||
},
|
||||
]);
|
||||
|
||||
const cron = createRestartCronService({
|
||||
storePath: store.storePath,
|
||||
enqueueSystemEvent,
|
||||
requestHeartbeatNow,
|
||||
});
|
||||
|
||||
await cron.start();
|
||||
|
||||
expect(enqueueSystemEvent).toHaveBeenCalledWith(
|
||||
"replay after backoff elapsed",
|
||||
expect.objectContaining({ agentId: undefined }),
|
||||
);
|
||||
expect(requestHeartbeatNow).toHaveBeenCalled();
|
||||
|
||||
cron.stop();
|
||||
await store.cleanup();
|
||||
});
|
||||
|
||||
it("reschedules deferred missed jobs from the post-catchup clock so they stay in the future", async () => {
|
||||
|
||||
@@ -31,6 +31,25 @@ const launchdRestartHandoffState = vi.hoisted(() => ({
|
||||
}));
|
||||
const defaultProgramArguments = ["node", "-e", "process.exit(0)"];
|
||||
|
||||
function expectLaunchctlEnableBootstrapOrder(env: Record<string, string | undefined>) {
|
||||
const domain = typeof process.getuid === "function" ? `gui/${process.getuid()}` : "gui/501";
|
||||
const label = "ai.openclaw.gateway";
|
||||
const plistPath = resolveLaunchAgentPlistPath(env);
|
||||
const serviceId = `${domain}/${label}`;
|
||||
const enableIndex = state.launchctlCalls.findIndex(
|
||||
(c) => c[0] === "enable" && c[1] === serviceId,
|
||||
);
|
||||
const bootstrapIndex = state.launchctlCalls.findIndex(
|
||||
(c) => c[0] === "bootstrap" && c[1] === domain && c[2] === plistPath,
|
||||
);
|
||||
|
||||
expect(enableIndex).toBeGreaterThanOrEqual(0);
|
||||
expect(bootstrapIndex).toBeGreaterThanOrEqual(0);
|
||||
expect(enableIndex).toBeLessThan(bootstrapIndex);
|
||||
|
||||
return { domain, label, serviceId, bootstrapIndex };
|
||||
}
|
||||
|
||||
function normalizeLaunchctlArgs(file: string, args: string[]): string[] {
|
||||
if (file === "launchctl") {
|
||||
return args;
|
||||
@@ -219,25 +238,12 @@ describe("launchd bootstrap repair", () => {
|
||||
const repair = await repairLaunchAgentBootstrap({ env });
|
||||
expect(repair.ok).toBe(true);
|
||||
|
||||
const domain = typeof process.getuid === "function" ? `gui/${process.getuid()}` : "gui/501";
|
||||
const label = "ai.openclaw.gateway";
|
||||
const plistPath = resolveLaunchAgentPlistPath(env);
|
||||
const serviceId = `${domain}/${label}`;
|
||||
|
||||
const enableIndex = state.launchctlCalls.findIndex(
|
||||
(c) => c[0] === "enable" && c[1] === serviceId,
|
||||
);
|
||||
const bootstrapIndex = state.launchctlCalls.findIndex(
|
||||
(c) => c[0] === "bootstrap" && c[1] === domain && c[2] === plistPath,
|
||||
);
|
||||
const { serviceId, bootstrapIndex } = expectLaunchctlEnableBootstrapOrder(env);
|
||||
const kickstartIndex = state.launchctlCalls.findIndex(
|
||||
(c) => c[0] === "kickstart" && c[1] === "-k" && c[2] === serviceId,
|
||||
);
|
||||
|
||||
expect(enableIndex).toBeGreaterThanOrEqual(0);
|
||||
expect(bootstrapIndex).toBeGreaterThanOrEqual(0);
|
||||
expect(kickstartIndex).toBeGreaterThanOrEqual(0);
|
||||
expect(enableIndex).toBeLessThan(bootstrapIndex);
|
||||
expect(bootstrapIndex).toBeLessThan(kickstartIndex);
|
||||
});
|
||||
});
|
||||
@@ -258,23 +264,10 @@ describe("launchd install", () => {
|
||||
programArguments: defaultProgramArguments,
|
||||
});
|
||||
|
||||
const domain = typeof process.getuid === "function" ? `gui/${process.getuid()}` : "gui/501";
|
||||
const label = "ai.openclaw.gateway";
|
||||
const plistPath = resolveLaunchAgentPlistPath(env);
|
||||
const serviceId = `${domain}/${label}`;
|
||||
|
||||
const enableIndex = state.launchctlCalls.findIndex(
|
||||
(c) => c[0] === "enable" && c[1] === serviceId,
|
||||
);
|
||||
const bootstrapIndex = state.launchctlCalls.findIndex(
|
||||
(c) => c[0] === "bootstrap" && c[1] === domain && c[2] === plistPath,
|
||||
);
|
||||
const { serviceId } = expectLaunchctlEnableBootstrapOrder(env);
|
||||
const installKickstartIndex = state.launchctlCalls.findIndex(
|
||||
(c) => c[0] === "kickstart" && c[2] === serviceId,
|
||||
);
|
||||
expect(enableIndex).toBeGreaterThanOrEqual(0);
|
||||
expect(bootstrapIndex).toBeGreaterThanOrEqual(0);
|
||||
expect(enableIndex).toBeLessThan(bootstrapIndex);
|
||||
expect(installKickstartIndex).toBe(-1);
|
||||
});
|
||||
|
||||
@@ -360,24 +353,13 @@ describe("launchd install", () => {
|
||||
stdout: new PassThrough(),
|
||||
});
|
||||
|
||||
const domain = typeof process.getuid === "function" ? `gui/${process.getuid()}` : "gui/501";
|
||||
const label = "ai.openclaw.gateway";
|
||||
const plistPath = resolveLaunchAgentPlistPath(env);
|
||||
const serviceId = `${domain}/${label}`;
|
||||
const { serviceId } = expectLaunchctlEnableBootstrapOrder(env);
|
||||
const kickstartCalls = state.launchctlCalls.filter(
|
||||
(c) => c[0] === "kickstart" && c[1] === "-k" && c[2] === serviceId,
|
||||
);
|
||||
const enableIndex = state.launchctlCalls.findIndex(
|
||||
(c) => c[0] === "enable" && c[1] === serviceId,
|
||||
);
|
||||
const bootstrapIndex = state.launchctlCalls.findIndex(
|
||||
(c) => c[0] === "bootstrap" && c[1] === domain && c[2] === plistPath,
|
||||
);
|
||||
|
||||
expect(result).toEqual({ outcome: "completed" });
|
||||
expect(kickstartCalls).toHaveLength(2);
|
||||
expect(enableIndex).toBeGreaterThanOrEqual(0);
|
||||
expect(bootstrapIndex).toBeGreaterThanOrEqual(0);
|
||||
expect(state.launchctlCalls.some((call) => call[0] === "bootout")).toBe(false);
|
||||
});
|
||||
|
||||
|
||||
@@ -1,37 +1,26 @@
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { PassThrough } from "node:stream";
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { quoteCmdScriptArg } from "./cmd-argv.js";
|
||||
|
||||
const schtasksResponses = vi.hoisted(
|
||||
() => [] as Array<{ code: number; stdout: string; stderr: string }>,
|
||||
);
|
||||
const schtasksCalls = vi.hoisted(() => [] as string[][]);
|
||||
const inspectPortUsage = vi.hoisted(() => vi.fn());
|
||||
const killProcessTree = vi.hoisted(() => vi.fn());
|
||||
import "./test-helpers/schtasks-base-mocks.js";
|
||||
import {
|
||||
inspectPortUsage,
|
||||
killProcessTree,
|
||||
resetSchtasksBaseMocks,
|
||||
schtasksResponses,
|
||||
withWindowsEnv,
|
||||
} from "./test-helpers/schtasks-fixtures.js";
|
||||
const childUnref = vi.hoisted(() => vi.fn());
|
||||
const spawn = vi.hoisted(() => vi.fn(() => ({ unref: childUnref })));
|
||||
|
||||
vi.mock("./schtasks-exec.js", () => ({
|
||||
execSchtasks: async (argv: string[]) => {
|
||||
schtasksCalls.push(argv);
|
||||
return schtasksResponses.shift() ?? { code: 0, stdout: "", stderr: "" };
|
||||
},
|
||||
}));
|
||||
|
||||
vi.mock("../infra/ports.js", () => ({
|
||||
inspectPortUsage: (...args: unknown[]) => inspectPortUsage(...args),
|
||||
}));
|
||||
|
||||
vi.mock("../process/kill-tree.js", () => ({
|
||||
killProcessTree: (...args: unknown[]) => killProcessTree(...args),
|
||||
}));
|
||||
|
||||
vi.mock("node:child_process", () => ({
|
||||
spawn,
|
||||
}));
|
||||
vi.mock("node:child_process", async (importOriginal) => {
|
||||
const actual = await importOriginal<typeof import("node:child_process")>();
|
||||
return {
|
||||
...actual,
|
||||
spawn,
|
||||
};
|
||||
});
|
||||
|
||||
const {
|
||||
installScheduledTask,
|
||||
@@ -39,6 +28,7 @@ const {
|
||||
readScheduledTaskRuntime,
|
||||
restartScheduledTask,
|
||||
resolveTaskScriptPath,
|
||||
stopScheduledTask,
|
||||
} = await import("./schtasks.js");
|
||||
|
||||
function resolveStartupEntryPath(env: Record<string, string>) {
|
||||
@@ -53,28 +43,22 @@ function resolveStartupEntryPath(env: Record<string, string>) {
|
||||
);
|
||||
}
|
||||
|
||||
async function withWindowsEnv(
|
||||
run: (params: { tmpDir: string; env: Record<string, string> }) => Promise<void>,
|
||||
) {
|
||||
const tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-win-startup-"));
|
||||
const env = {
|
||||
USERPROFILE: tmpDir,
|
||||
APPDATA: path.join(tmpDir, "AppData", "Roaming"),
|
||||
OPENCLAW_PROFILE: "default",
|
||||
OPENCLAW_GATEWAY_PORT: "18789",
|
||||
};
|
||||
try {
|
||||
await run({ tmpDir, env });
|
||||
} finally {
|
||||
await fs.rm(tmpDir, { recursive: true, force: true });
|
||||
}
|
||||
async function writeGatewayScript(env: Record<string, string>, port = 18789) {
|
||||
const scriptPath = resolveTaskScriptPath(env);
|
||||
await fs.mkdir(path.dirname(scriptPath), { recursive: true });
|
||||
await fs.writeFile(
|
||||
scriptPath,
|
||||
[
|
||||
"@echo off",
|
||||
`set "OPENCLAW_GATEWAY_PORT=${port}"`,
|
||||
`"C:\\Program Files\\nodejs\\node.exe" "C:\\Users\\steipete\\AppData\\Roaming\\npm\\node_modules\\openclaw\\dist\\index.js" gateway --port ${port}`,
|
||||
"",
|
||||
].join("\r\n"),
|
||||
"utf8",
|
||||
);
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
schtasksResponses.length = 0;
|
||||
schtasksCalls.length = 0;
|
||||
inspectPortUsage.mockReset();
|
||||
killProcessTree.mockReset();
|
||||
resetSchtasksBaseMocks();
|
||||
spawn.mockClear();
|
||||
childUnref.mockClear();
|
||||
});
|
||||
@@ -85,7 +69,7 @@ afterEach(() => {
|
||||
|
||||
describe("Windows startup fallback", () => {
|
||||
it("falls back to a Startup-folder launcher when schtasks create is denied", async () => {
|
||||
await withWindowsEnv(async ({ env }) => {
|
||||
await withWindowsEnv("openclaw-win-startup-", async ({ env }) => {
|
||||
schtasksResponses.push(
|
||||
{ code: 0, stdout: "", stderr: "" },
|
||||
{ code: 5, stdout: "", stderr: "ERROR: Access is denied." },
|
||||
@@ -120,7 +104,7 @@ describe("Windows startup fallback", () => {
|
||||
});
|
||||
|
||||
it("falls back to a Startup-folder launcher when schtasks create hangs", async () => {
|
||||
await withWindowsEnv(async ({ env }) => {
|
||||
await withWindowsEnv("openclaw-win-startup-", async ({ env }) => {
|
||||
schtasksResponses.push(
|
||||
{ code: 0, stdout: "", stderr: "" },
|
||||
{ code: 124, stdout: "", stderr: "schtasks timed out after 15000ms" },
|
||||
@@ -144,7 +128,7 @@ describe("Windows startup fallback", () => {
|
||||
});
|
||||
|
||||
it("treats an installed Startup-folder launcher as loaded", async () => {
|
||||
await withWindowsEnv(async ({ env }) => {
|
||||
await withWindowsEnv("openclaw-win-startup-", async ({ env }) => {
|
||||
schtasksResponses.push(
|
||||
{ code: 0, stdout: "", stderr: "" },
|
||||
{ code: 1, stdout: "", stderr: "not found" },
|
||||
@@ -157,7 +141,7 @@ describe("Windows startup fallback", () => {
|
||||
});
|
||||
|
||||
it("reports runtime from the gateway listener when using the Startup fallback", async () => {
|
||||
await withWindowsEnv(async ({ env }) => {
|
||||
await withWindowsEnv("openclaw-win-startup-", async ({ env }) => {
|
||||
schtasksResponses.push(
|
||||
{ code: 0, stdout: "", stderr: "" },
|
||||
{ code: 1, stdout: "", stderr: "not found" },
|
||||
@@ -179,7 +163,7 @@ describe("Windows startup fallback", () => {
|
||||
});
|
||||
|
||||
it("restarts the Startup fallback by killing the current pid and relaunching the entry", async () => {
|
||||
await withWindowsEnv(async ({ env }) => {
|
||||
await withWindowsEnv("openclaw-win-startup-", async ({ env }) => {
|
||||
schtasksResponses.push(
|
||||
{ code: 0, stdout: "", stderr: "" },
|
||||
{ code: 1, stdout: "", stderr: "not found" },
|
||||
@@ -207,4 +191,39 @@ describe("Windows startup fallback", () => {
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
it("kills the Startup fallback runtime even when the CLI env omits the gateway port", async () => {
|
||||
await withWindowsEnv("openclaw-win-startup-", async ({ env }) => {
|
||||
schtasksResponses.push({ code: 0, stdout: "", stderr: "" });
|
||||
await writeGatewayScript(env);
|
||||
await fs.mkdir(path.dirname(resolveStartupEntryPath(env)), { recursive: true });
|
||||
await fs.writeFile(resolveStartupEntryPath(env), "@echo off\r\n", "utf8");
|
||||
inspectPortUsage
|
||||
.mockResolvedValueOnce({
|
||||
port: 18789,
|
||||
status: "busy",
|
||||
listeners: [{ pid: 5151, command: "node.exe" }],
|
||||
hints: [],
|
||||
})
|
||||
.mockResolvedValueOnce({
|
||||
port: 18789,
|
||||
status: "busy",
|
||||
listeners: [{ pid: 5151, command: "node.exe" }],
|
||||
hints: [],
|
||||
})
|
||||
.mockResolvedValueOnce({
|
||||
port: 18789,
|
||||
status: "free",
|
||||
listeners: [],
|
||||
hints: [],
|
||||
});
|
||||
|
||||
const stdout = new PassThrough();
|
||||
const envWithoutPort = { ...env };
|
||||
delete envWithoutPort.OPENCLAW_GATEWAY_PORT;
|
||||
await stopScheduledTask({ env: envWithoutPort, stdout });
|
||||
|
||||
expect(killProcessTree).toHaveBeenCalledWith(5151, { graceMs: 300 });
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
209
src/daemon/schtasks.stop.test.ts
Normal file
209
src/daemon/schtasks.stop.test.ts
Normal file
@@ -0,0 +1,209 @@
|
||||
import fs from "node:fs/promises";
|
||||
import path from "node:path";
|
||||
import { PassThrough } from "node:stream";
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import "./test-helpers/schtasks-base-mocks.js";
|
||||
import {
|
||||
inspectPortUsage,
|
||||
killProcessTree,
|
||||
resetSchtasksBaseMocks,
|
||||
schtasksCalls,
|
||||
schtasksResponses,
|
||||
withWindowsEnv,
|
||||
} from "./test-helpers/schtasks-fixtures.js";
|
||||
const findVerifiedGatewayListenerPidsOnPortSync = vi.hoisted(() =>
|
||||
vi.fn<(port: number) => number[]>(() => []),
|
||||
);
|
||||
|
||||
vi.mock("../infra/gateway-processes.js", () => ({
|
||||
findVerifiedGatewayListenerPidsOnPortSync: (port: number) =>
|
||||
findVerifiedGatewayListenerPidsOnPortSync(port),
|
||||
}));
|
||||
|
||||
const { restartScheduledTask, resolveTaskScriptPath, stopScheduledTask } =
|
||||
await import("./schtasks.js");
|
||||
|
||||
async function writeGatewayScript(env: Record<string, string>, port = 18789) {
|
||||
const scriptPath = resolveTaskScriptPath(env);
|
||||
await fs.mkdir(path.dirname(scriptPath), { recursive: true });
|
||||
await fs.writeFile(
|
||||
scriptPath,
|
||||
[
|
||||
"@echo off",
|
||||
`set "OPENCLAW_GATEWAY_PORT=${port}"`,
|
||||
`"C:\\Program Files\\nodejs\\node.exe" "C:\\Users\\steipete\\AppData\\Roaming\\npm\\node_modules\\openclaw\\dist\\index.js" gateway --port ${port}`,
|
||||
"",
|
||||
].join("\r\n"),
|
||||
"utf8",
|
||||
);
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
resetSchtasksBaseMocks();
|
||||
findVerifiedGatewayListenerPidsOnPortSync.mockReset();
|
||||
findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([]);
|
||||
inspectPortUsage.mockResolvedValue({
|
||||
port: 18789,
|
||||
status: "free",
|
||||
listeners: [],
|
||||
hints: [],
|
||||
});
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
describe("Scheduled Task stop/restart cleanup", () => {
|
||||
it("kills lingering verified gateway listeners after schtasks stop", async () => {
|
||||
await withWindowsEnv("openclaw-win-stop-", async ({ env }) => {
|
||||
await writeGatewayScript(env);
|
||||
schtasksResponses.push(
|
||||
{ code: 0, stdout: "", stderr: "" },
|
||||
{ code: 0, stdout: "", stderr: "" },
|
||||
{ code: 0, stdout: "", stderr: "" },
|
||||
);
|
||||
findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([4242]);
|
||||
inspectPortUsage
|
||||
.mockResolvedValueOnce({
|
||||
port: 18789,
|
||||
status: "busy",
|
||||
listeners: [{ pid: 4242, command: "node.exe" }],
|
||||
hints: [],
|
||||
})
|
||||
.mockResolvedValueOnce({
|
||||
port: 18789,
|
||||
status: "free",
|
||||
listeners: [],
|
||||
hints: [],
|
||||
});
|
||||
|
||||
const stdout = new PassThrough();
|
||||
await stopScheduledTask({ env, stdout });
|
||||
|
||||
expect(findVerifiedGatewayListenerPidsOnPortSync).toHaveBeenCalledWith(18789);
|
||||
expect(killProcessTree).toHaveBeenCalledWith(4242, { graceMs: 300 });
|
||||
expect(inspectPortUsage).toHaveBeenCalledTimes(2);
|
||||
});
|
||||
});
|
||||
|
||||
it("force-kills remaining busy port listeners when the first stop pass does not free the port", async () => {
|
||||
await withWindowsEnv("openclaw-win-stop-", async ({ env }) => {
|
||||
await writeGatewayScript(env);
|
||||
schtasksResponses.push(
|
||||
{ code: 0, stdout: "", stderr: "" },
|
||||
{ code: 0, stdout: "", stderr: "" },
|
||||
{ code: 0, stdout: "", stderr: "" },
|
||||
);
|
||||
findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([4242]);
|
||||
inspectPortUsage.mockResolvedValueOnce({
|
||||
port: 18789,
|
||||
status: "busy",
|
||||
listeners: [{ pid: 4242, command: "node.exe" }],
|
||||
hints: [],
|
||||
});
|
||||
for (let i = 0; i < 20; i += 1) {
|
||||
inspectPortUsage.mockResolvedValueOnce({
|
||||
port: 18789,
|
||||
status: "busy",
|
||||
listeners: [{ pid: 4242, command: "node.exe" }],
|
||||
hints: [],
|
||||
});
|
||||
}
|
||||
inspectPortUsage
|
||||
.mockResolvedValueOnce({
|
||||
port: 18789,
|
||||
status: "busy",
|
||||
listeners: [{ pid: 5252, command: "node.exe" }],
|
||||
hints: [],
|
||||
})
|
||||
.mockResolvedValueOnce({
|
||||
port: 18789,
|
||||
status: "free",
|
||||
listeners: [],
|
||||
hints: [],
|
||||
});
|
||||
|
||||
const stdout = new PassThrough();
|
||||
await stopScheduledTask({ env, stdout });
|
||||
|
||||
expect(killProcessTree).toHaveBeenNthCalledWith(1, 4242, { graceMs: 300 });
|
||||
expect(killProcessTree).toHaveBeenNthCalledWith(2, expect.any(Number), { graceMs: 300 });
|
||||
expect(inspectPortUsage.mock.calls.length).toBeGreaterThanOrEqual(22);
|
||||
});
|
||||
});
|
||||
|
||||
it("falls back to inspected gateway listeners when sync verification misses on Windows", async () => {
|
||||
await withWindowsEnv("openclaw-win-stop-", async ({ env }) => {
|
||||
await writeGatewayScript(env);
|
||||
schtasksResponses.push(
|
||||
{ code: 0, stdout: "", stderr: "" },
|
||||
{ code: 0, stdout: "", stderr: "" },
|
||||
{ code: 0, stdout: "", stderr: "" },
|
||||
);
|
||||
findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([]);
|
||||
inspectPortUsage
|
||||
.mockResolvedValueOnce({
|
||||
port: 18789,
|
||||
status: "busy",
|
||||
listeners: [
|
||||
{
|
||||
pid: 6262,
|
||||
command: "node.exe",
|
||||
commandLine:
|
||||
'"C:\\Program Files\\nodejs\\node.exe" "C:\\Users\\steipete\\AppData\\Roaming\\npm\\node_modules\\openclaw\\dist\\index.js" gateway --port 18789',
|
||||
},
|
||||
],
|
||||
hints: [],
|
||||
})
|
||||
.mockResolvedValueOnce({
|
||||
port: 18789,
|
||||
status: "free",
|
||||
listeners: [],
|
||||
hints: [],
|
||||
});
|
||||
|
||||
const stdout = new PassThrough();
|
||||
await stopScheduledTask({ env, stdout });
|
||||
|
||||
expect(killProcessTree).toHaveBeenCalledWith(6262, { graceMs: 300 });
|
||||
expect(inspectPortUsage).toHaveBeenCalledTimes(2);
|
||||
});
|
||||
});
|
||||
|
||||
it("kills lingering verified gateway listeners and waits for port release before restart", async () => {
|
||||
await withWindowsEnv("openclaw-win-stop-", async ({ env }) => {
|
||||
await writeGatewayScript(env);
|
||||
schtasksResponses.push(
|
||||
{ code: 0, stdout: "", stderr: "" },
|
||||
{ code: 0, stdout: "", stderr: "" },
|
||||
{ code: 0, stdout: "", stderr: "" },
|
||||
{ code: 0, stdout: "", stderr: "" },
|
||||
);
|
||||
findVerifiedGatewayListenerPidsOnPortSync.mockReturnValue([5151]);
|
||||
inspectPortUsage
|
||||
.mockResolvedValueOnce({
|
||||
port: 18789,
|
||||
status: "busy",
|
||||
listeners: [{ pid: 5151, command: "node.exe" }],
|
||||
hints: [],
|
||||
})
|
||||
.mockResolvedValueOnce({
|
||||
port: 18789,
|
||||
status: "free",
|
||||
listeners: [],
|
||||
hints: [],
|
||||
});
|
||||
|
||||
const stdout = new PassThrough();
|
||||
await expect(restartScheduledTask({ env, stdout })).resolves.toEqual({
|
||||
outcome: "completed",
|
||||
});
|
||||
|
||||
expect(findVerifiedGatewayListenerPidsOnPortSync).toHaveBeenCalledWith(18789);
|
||||
expect(killProcessTree).toHaveBeenCalledWith(5151, { graceMs: 300 });
|
||||
expect(inspectPortUsage).toHaveBeenCalledTimes(2);
|
||||
expect(schtasksCalls.at(-1)).toEqual(["/Run", "/TN", "OpenClaw Gateway"]);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,8 +1,11 @@
|
||||
import { spawn } from "node:child_process";
|
||||
import { spawn, spawnSync } from "node:child_process";
|
||||
import fs from "node:fs/promises";
|
||||
import path from "node:path";
|
||||
import { isGatewayArgv } from "../infra/gateway-process-argv.js";
|
||||
import { findVerifiedGatewayListenerPidsOnPortSync } from "../infra/gateway-processes.js";
|
||||
import { inspectPortUsage } from "../infra/ports.js";
|
||||
import { killProcessTree } from "../process/kill-tree.js";
|
||||
import { sleep } from "../utils.js";
|
||||
import { parseCmdScriptCommandLine, quoteCmdScriptArg } from "./cmd-argv.js";
|
||||
import { assertNoCmdLineBreak, parseCmdSetAssignment, renderCmdSetAssignment } from "./cmd-set.js";
|
||||
import { resolveGatewayServiceDescription, resolveGatewayWindowsTaskName } from "./constants.js";
|
||||
@@ -158,6 +161,12 @@ export type ScheduledTaskInfo = {
|
||||
lastRunResult?: string;
|
||||
};
|
||||
|
||||
function hasListenerPid<T extends { pid?: number | null }>(
|
||||
listener: T,
|
||||
): listener is T & { pid: number } {
|
||||
return typeof listener.pid === "number";
|
||||
}
|
||||
|
||||
export function parseSchtasksQuery(output: string): ScheduledTaskInfo {
|
||||
const entries = parseKeyValueOutput(output, ":");
|
||||
const info: ScheduledTaskInfo = {};
|
||||
@@ -311,8 +320,175 @@ function resolveConfiguredGatewayPort(env: GatewayServiceEnv): number | null {
|
||||
return Number.isFinite(parsed) && parsed > 0 ? parsed : null;
|
||||
}
|
||||
|
||||
function parsePositivePort(raw: string | undefined): number | null {
|
||||
const value = raw?.trim();
|
||||
if (!value) {
|
||||
return null;
|
||||
}
|
||||
if (!/^\d+$/.test(value)) {
|
||||
return null;
|
||||
}
|
||||
const parsed = Number.parseInt(value, 10);
|
||||
return Number.isFinite(parsed) && parsed > 0 && parsed <= 65535 ? parsed : null;
|
||||
}
|
||||
|
||||
function parsePortFromProgramArguments(programArguments?: string[]): number | null {
|
||||
if (!programArguments?.length) {
|
||||
return null;
|
||||
}
|
||||
for (let i = 0; i < programArguments.length; i += 1) {
|
||||
const arg = programArguments[i];
|
||||
if (!arg) {
|
||||
continue;
|
||||
}
|
||||
const inlineMatch = arg.match(/^--port=(\d+)$/);
|
||||
if (inlineMatch) {
|
||||
return parsePositivePort(inlineMatch[1]);
|
||||
}
|
||||
if (arg === "--port") {
|
||||
return parsePositivePort(programArguments[i + 1]);
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
async function resolveScheduledTaskPort(env: GatewayServiceEnv): Promise<number | null> {
|
||||
const command = await readScheduledTaskCommand(env).catch(() => null);
|
||||
return (
|
||||
parsePortFromProgramArguments(command?.programArguments) ??
|
||||
parsePositivePort(command?.environment?.OPENCLAW_GATEWAY_PORT) ??
|
||||
resolveConfiguredGatewayPort(env)
|
||||
);
|
||||
}
|
||||
|
||||
async function resolveScheduledTaskGatewayListenerPids(port: number): Promise<number[]> {
|
||||
const verified = findVerifiedGatewayListenerPidsOnPortSync(port);
|
||||
if (verified.length > 0) {
|
||||
return verified;
|
||||
}
|
||||
|
||||
const diagnostics = await inspectPortUsage(port).catch(() => null);
|
||||
if (diagnostics?.status !== "busy") {
|
||||
return [];
|
||||
}
|
||||
|
||||
const matchedGatewayPids = Array.from(
|
||||
new Set(
|
||||
diagnostics.listeners
|
||||
.filter(
|
||||
(listener) =>
|
||||
typeof listener.pid === "number" &&
|
||||
listener.commandLine &&
|
||||
isGatewayArgv(parseCmdScriptCommandLine(listener.commandLine), {
|
||||
allowGatewayBinary: true,
|
||||
}),
|
||||
)
|
||||
.map((listener) => listener.pid as number),
|
||||
),
|
||||
);
|
||||
if (matchedGatewayPids.length > 0) {
|
||||
return matchedGatewayPids;
|
||||
}
|
||||
|
||||
return Array.from(
|
||||
new Set(
|
||||
diagnostics.listeners
|
||||
.map((listener) => listener.pid)
|
||||
.filter((pid): pid is number => typeof pid === "number" && Number.isFinite(pid) && pid > 0),
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
async function terminateScheduledTaskGatewayListeners(env: GatewayServiceEnv): Promise<number[]> {
|
||||
const port = await resolveScheduledTaskPort(env);
|
||||
if (!port) {
|
||||
return [];
|
||||
}
|
||||
const pids = await resolveScheduledTaskGatewayListenerPids(port);
|
||||
for (const pid of pids) {
|
||||
await terminateGatewayProcessTree(pid, 300);
|
||||
}
|
||||
return pids;
|
||||
}
|
||||
|
||||
function isProcessAlive(pid: number): boolean {
|
||||
try {
|
||||
process.kill(pid, 0);
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
async function waitForProcessExit(pid: number, timeoutMs: number): Promise<boolean> {
|
||||
const deadline = Date.now() + timeoutMs;
|
||||
while (Date.now() < deadline) {
|
||||
if (!isProcessAlive(pid)) {
|
||||
return true;
|
||||
}
|
||||
await sleep(100);
|
||||
}
|
||||
return !isProcessAlive(pid);
|
||||
}
|
||||
|
||||
async function terminateGatewayProcessTree(pid: number, graceMs: number): Promise<void> {
|
||||
if (process.platform !== "win32") {
|
||||
killProcessTree(pid, { graceMs });
|
||||
return;
|
||||
}
|
||||
const taskkillPath = path.join(
|
||||
process.env.SystemRoot ?? "C:\\Windows",
|
||||
"System32",
|
||||
"taskkill.exe",
|
||||
);
|
||||
spawnSync(taskkillPath, ["/T", "/PID", String(pid)], {
|
||||
stdio: "ignore",
|
||||
timeout: 5_000,
|
||||
windowsHide: true,
|
||||
});
|
||||
if (await waitForProcessExit(pid, graceMs)) {
|
||||
return;
|
||||
}
|
||||
spawnSync(taskkillPath, ["/F", "/T", "/PID", String(pid)], {
|
||||
stdio: "ignore",
|
||||
timeout: 5_000,
|
||||
windowsHide: true,
|
||||
});
|
||||
await waitForProcessExit(pid, 5_000);
|
||||
}
|
||||
|
||||
async function waitForGatewayPortRelease(port: number, timeoutMs = 5_000): Promise<boolean> {
|
||||
const deadline = Date.now() + timeoutMs;
|
||||
while (Date.now() < deadline) {
|
||||
const diagnostics = await inspectPortUsage(port).catch(() => null);
|
||||
if (diagnostics?.status === "free") {
|
||||
return true;
|
||||
}
|
||||
await sleep(250);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
async function terminateBusyPortListeners(port: number): Promise<number[]> {
|
||||
const diagnostics = await inspectPortUsage(port).catch(() => null);
|
||||
if (diagnostics?.status !== "busy") {
|
||||
return [];
|
||||
}
|
||||
const pids = Array.from(
|
||||
new Set(
|
||||
diagnostics.listeners
|
||||
.map((listener) => listener.pid)
|
||||
.filter((pid): pid is number => typeof pid === "number" && Number.isFinite(pid) && pid > 0),
|
||||
),
|
||||
);
|
||||
for (const pid of pids) {
|
||||
await terminateGatewayProcessTree(pid, 300);
|
||||
}
|
||||
return pids;
|
||||
}
|
||||
|
||||
async function resolveFallbackRuntime(env: GatewayServiceEnv): Promise<GatewayServiceRuntime> {
|
||||
const port = resolveConfiguredGatewayPort(env);
|
||||
const port = (await resolveScheduledTaskPort(env)) ?? resolveConfiguredGatewayPort(env);
|
||||
if (!port) {
|
||||
return {
|
||||
status: "unknown",
|
||||
@@ -326,7 +502,7 @@ async function resolveFallbackRuntime(env: GatewayServiceEnv): Promise<GatewaySe
|
||||
detail: `Startup-folder login item installed; could not inspect port ${port}.`,
|
||||
};
|
||||
}
|
||||
const listener = diagnostics.listeners.find((item) => typeof item.pid === "number");
|
||||
const listener = diagnostics.listeners.find(hasListenerPid);
|
||||
return {
|
||||
status: diagnostics.status === "busy" ? "running" : "stopped",
|
||||
...(listener?.pid ? { pid: listener.pid } : {}),
|
||||
@@ -343,18 +519,28 @@ async function stopStartupEntry(
|
||||
): Promise<void> {
|
||||
const runtime = await resolveFallbackRuntime(env);
|
||||
if (typeof runtime.pid === "number" && runtime.pid > 0) {
|
||||
killProcessTree(runtime.pid, { graceMs: 300 });
|
||||
await terminateGatewayProcessTree(runtime.pid, 300);
|
||||
}
|
||||
stdout.write(`${formatLine("Stopped Windows login item", resolveTaskName(env))}\n`);
|
||||
}
|
||||
|
||||
async function terminateInstalledStartupRuntime(env: GatewayServiceEnv): Promise<void> {
|
||||
if (!(await isStartupEntryInstalled(env))) {
|
||||
return;
|
||||
}
|
||||
const runtime = await resolveFallbackRuntime(env);
|
||||
if (typeof runtime.pid === "number" && runtime.pid > 0) {
|
||||
await terminateGatewayProcessTree(runtime.pid, 300);
|
||||
}
|
||||
}
|
||||
|
||||
async function restartStartupEntry(
|
||||
env: GatewayServiceEnv,
|
||||
stdout: NodeJS.WritableStream,
|
||||
): Promise<GatewayServiceRestartResult> {
|
||||
const runtime = await resolveFallbackRuntime(env);
|
||||
if (typeof runtime.pid === "number" && runtime.pid > 0) {
|
||||
killProcessTree(runtime.pid, { graceMs: 300 });
|
||||
await terminateGatewayProcessTree(runtime.pid, 300);
|
||||
}
|
||||
launchFallbackTaskScript(resolveTaskScriptPath(env));
|
||||
stdout.write(`${formatLine("Restarted Windows login item", resolveTaskName(env))}\n`);
|
||||
@@ -489,6 +675,19 @@ export async function stopScheduledTask({ stdout, env }: GatewayServiceControlAr
|
||||
if (res.code !== 0 && !isTaskNotRunning(res)) {
|
||||
throw new Error(`schtasks end failed: ${res.stderr || res.stdout}`.trim());
|
||||
}
|
||||
const stopPort = await resolveScheduledTaskPort(effectiveEnv);
|
||||
await terminateScheduledTaskGatewayListeners(effectiveEnv);
|
||||
await terminateInstalledStartupRuntime(effectiveEnv);
|
||||
if (stopPort) {
|
||||
const released = await waitForGatewayPortRelease(stopPort);
|
||||
if (!released) {
|
||||
await terminateBusyPortListeners(stopPort);
|
||||
const releasedAfterForce = await waitForGatewayPortRelease(stopPort, 2_000);
|
||||
if (!releasedAfterForce) {
|
||||
throw new Error(`gateway port ${stopPort} is still busy after stop`);
|
||||
}
|
||||
}
|
||||
}
|
||||
stdout.write(`${formatLine("Stopped Scheduled Task", taskName)}\n`);
|
||||
}
|
||||
|
||||
@@ -512,6 +711,19 @@ export async function restartScheduledTask({
|
||||
}
|
||||
const taskName = resolveTaskName(effectiveEnv);
|
||||
await execSchtasks(["/End", "/TN", taskName]);
|
||||
const restartPort = await resolveScheduledTaskPort(effectiveEnv);
|
||||
await terminateScheduledTaskGatewayListeners(effectiveEnv);
|
||||
await terminateInstalledStartupRuntime(effectiveEnv);
|
||||
if (restartPort) {
|
||||
const released = await waitForGatewayPortRelease(restartPort);
|
||||
if (!released) {
|
||||
await terminateBusyPortListeners(restartPort);
|
||||
const releasedAfterForce = await waitForGatewayPortRelease(restartPort, 2_000);
|
||||
if (!releasedAfterForce) {
|
||||
throw new Error(`gateway port ${restartPort} is still busy before restart`);
|
||||
}
|
||||
}
|
||||
}
|
||||
const res = await execSchtasks(["/Run", "/TN", taskName]);
|
||||
if (res.code !== 0) {
|
||||
throw new Error(`schtasks run failed: ${res.stderr || res.stdout}`.trim());
|
||||
|
||||
22
src/daemon/test-helpers/schtasks-base-mocks.ts
Normal file
22
src/daemon/test-helpers/schtasks-base-mocks.ts
Normal file
@@ -0,0 +1,22 @@
|
||||
import { vi } from "vitest";
|
||||
import {
|
||||
inspectPortUsage,
|
||||
killProcessTree,
|
||||
schtasksCalls,
|
||||
schtasksResponses,
|
||||
} from "./schtasks-fixtures.js";
|
||||
|
||||
vi.mock("../schtasks-exec.js", () => ({
|
||||
execSchtasks: async (argv: string[]) => {
|
||||
schtasksCalls.push(argv);
|
||||
return schtasksResponses.shift() ?? { code: 0, stdout: "", stderr: "" };
|
||||
},
|
||||
}));
|
||||
|
||||
vi.mock("../../infra/ports.js", () => ({
|
||||
inspectPortUsage: (...args: unknown[]) => inspectPortUsage(...args),
|
||||
}));
|
||||
|
||||
vi.mock("../../process/kill-tree.js", () => ({
|
||||
killProcessTree: (...args: unknown[]) => killProcessTree(...args),
|
||||
}));
|
||||
34
src/daemon/test-helpers/schtasks-fixtures.ts
Normal file
34
src/daemon/test-helpers/schtasks-fixtures.ts
Normal file
@@ -0,0 +1,34 @@
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { vi } from "vitest";
|
||||
|
||||
export const schtasksResponses: Array<{ code: number; stdout: string; stderr: string }> = [];
|
||||
export const schtasksCalls: string[][] = [];
|
||||
export const inspectPortUsage = vi.fn();
|
||||
export const killProcessTree = vi.fn();
|
||||
|
||||
export async function withWindowsEnv(
|
||||
prefix: string,
|
||||
run: (params: { tmpDir: string; env: Record<string, string> }) => Promise<void>,
|
||||
) {
|
||||
const tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), prefix));
|
||||
const env = {
|
||||
USERPROFILE: tmpDir,
|
||||
APPDATA: path.join(tmpDir, "AppData", "Roaming"),
|
||||
OPENCLAW_PROFILE: "default",
|
||||
OPENCLAW_GATEWAY_PORT: "18789",
|
||||
};
|
||||
try {
|
||||
await run({ tmpDir, env });
|
||||
} finally {
|
||||
await fs.rm(tmpDir, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
export function resetSchtasksBaseMocks() {
|
||||
schtasksResponses.length = 0;
|
||||
schtasksCalls.length = 0;
|
||||
inspectPortUsage.mockReset();
|
||||
killProcessTree.mockReset();
|
||||
}
|
||||
@@ -103,6 +103,21 @@ export function normalizeDiscordSlug(value: string) {
|
||||
.replace(/^-+|-+$/g, "");
|
||||
}
|
||||
|
||||
function resolveDiscordAllowListNameMatch(
|
||||
list: DiscordAllowList,
|
||||
candidate: { name?: string; tag?: string },
|
||||
): { matchKey: string; matchSource: "name" | "tag" } | null {
|
||||
const nameSlug = candidate.name ? normalizeDiscordSlug(candidate.name) : "";
|
||||
if (nameSlug && list.names.has(nameSlug)) {
|
||||
return { matchKey: nameSlug, matchSource: "name" };
|
||||
}
|
||||
const tagSlug = candidate.tag ? normalizeDiscordSlug(candidate.tag) : "";
|
||||
if (tagSlug && list.names.has(tagSlug)) {
|
||||
return { matchKey: tagSlug, matchSource: "tag" };
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
export function allowListMatches(
|
||||
list: DiscordAllowList,
|
||||
candidate: { id?: string; name?: string; tag?: string },
|
||||
@@ -115,11 +130,7 @@ export function allowListMatches(
|
||||
return true;
|
||||
}
|
||||
if (params?.allowNameMatching === true) {
|
||||
const slug = candidate.name ? normalizeDiscordSlug(candidate.name) : "";
|
||||
if (slug && list.names.has(slug)) {
|
||||
return true;
|
||||
}
|
||||
if (candidate.tag && list.names.has(normalizeDiscordSlug(candidate.tag))) {
|
||||
if (resolveDiscordAllowListNameMatch(list, candidate)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -139,13 +150,9 @@ export function resolveDiscordAllowListMatch(params: {
|
||||
return { allowed: true, matchKey: candidate.id, matchSource: "id" };
|
||||
}
|
||||
if (params.allowNameMatching === true) {
|
||||
const nameSlug = candidate.name ? normalizeDiscordSlug(candidate.name) : "";
|
||||
if (nameSlug && allowList.names.has(nameSlug)) {
|
||||
return { allowed: true, matchKey: nameSlug, matchSource: "name" };
|
||||
}
|
||||
const tagSlug = candidate.tag ? normalizeDiscordSlug(candidate.tag) : "";
|
||||
if (tagSlug && allowList.names.has(tagSlug)) {
|
||||
return { allowed: true, matchKey: tagSlug, matchSource: "tag" };
|
||||
const namedMatch = resolveDiscordAllowListNameMatch(allowList, candidate);
|
||||
if (namedMatch) {
|
||||
return { allowed: true, ...namedMatch };
|
||||
}
|
||||
}
|
||||
return { allowed: false };
|
||||
|
||||
@@ -123,6 +123,30 @@ describe("createDiscordGatewayPlugin", () => {
|
||||
};
|
||||
}
|
||||
|
||||
async function registerGatewayClient(plugin: unknown) {
|
||||
await (
|
||||
plugin as {
|
||||
registerClient: (client: { options: { token: string } }) => Promise<void>;
|
||||
}
|
||||
).registerClient({
|
||||
options: { token: "token-123" },
|
||||
});
|
||||
}
|
||||
|
||||
async function expectGatewayRegisterFetchFailure(response: Response) {
|
||||
const runtime = createRuntime();
|
||||
globalFetchMock.mockResolvedValue(response);
|
||||
const plugin = createDiscordGatewayPlugin({
|
||||
discordConfig: {},
|
||||
runtime,
|
||||
});
|
||||
|
||||
await expect(registerGatewayClient(plugin)).rejects.toThrow(
|
||||
"Failed to get gateway information from Discord: fetch failed",
|
||||
);
|
||||
expect(baseRegisterClientSpy).not.toHaveBeenCalled();
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
vi.stubGlobal("fetch", globalFetchMock);
|
||||
baseRegisterClientSpy.mockClear();
|
||||
@@ -165,28 +189,12 @@ describe("createDiscordGatewayPlugin", () => {
|
||||
});
|
||||
|
||||
it("maps plain-text Discord 503 responses to fetch failed", async () => {
|
||||
const runtime = createRuntime();
|
||||
globalFetchMock.mockResolvedValue({
|
||||
await expectGatewayRegisterFetchFailure({
|
||||
ok: false,
|
||||
status: 503,
|
||||
text: async () =>
|
||||
"upstream connect error or disconnect/reset before headers. reset reason: overflow",
|
||||
} as Response);
|
||||
const plugin = createDiscordGatewayPlugin({
|
||||
discordConfig: {},
|
||||
runtime,
|
||||
});
|
||||
|
||||
await expect(
|
||||
(
|
||||
plugin as unknown as {
|
||||
registerClient: (client: { options: { token: string } }) => Promise<void>;
|
||||
}
|
||||
).registerClient({
|
||||
options: { token: "token-123" },
|
||||
}),
|
||||
).rejects.toThrow("Failed to get gateway information from Discord: fetch failed");
|
||||
expect(baseRegisterClientSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("uses proxy agent for gateway WebSocket when configured", async () => {
|
||||
@@ -257,28 +265,12 @@ describe("createDiscordGatewayPlugin", () => {
|
||||
});
|
||||
|
||||
it("maps body read failures to fetch failed", async () => {
|
||||
const runtime = createRuntime();
|
||||
globalFetchMock.mockResolvedValue({
|
||||
await expectGatewayRegisterFetchFailure({
|
||||
ok: true,
|
||||
status: 200,
|
||||
text: async () => {
|
||||
throw new Error("body stream closed");
|
||||
},
|
||||
} as unknown as Response);
|
||||
const plugin = createDiscordGatewayPlugin({
|
||||
discordConfig: {},
|
||||
runtime,
|
||||
});
|
||||
|
||||
await expect(
|
||||
(
|
||||
plugin as unknown as {
|
||||
registerClient: (client: { options: { token: string } }) => Promise<void>;
|
||||
}
|
||||
).registerClient({
|
||||
options: { token: "token-123" },
|
||||
}),
|
||||
).rejects.toThrow("Failed to get gateway information from Discord: fetch failed");
|
||||
expect(baseRegisterClientSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { afterEach, describe, expect, it, vi } from "vitest";
|
||||
import {
|
||||
AUTH_RATE_LIMIT_SCOPE_DEVICE_TOKEN,
|
||||
AUTH_RATE_LIMIT_SCOPE_HOOK_AUTH,
|
||||
AUTH_RATE_LIMIT_SCOPE_SHARED_SECRET,
|
||||
createAuthRateLimiter,
|
||||
type AuthRateLimiter,
|
||||
@@ -8,6 +9,23 @@ import {
|
||||
|
||||
describe("auth rate limiter", () => {
|
||||
let limiter: AuthRateLimiter;
|
||||
const baseConfig = { maxAttempts: 2, windowMs: 60_000, lockoutMs: 60_000 };
|
||||
|
||||
function createLimiter(
|
||||
overrides?: Partial<{
|
||||
maxAttempts: number;
|
||||
windowMs: number;
|
||||
lockoutMs: number;
|
||||
exemptLoopback: boolean;
|
||||
pruneIntervalMs: number;
|
||||
}>,
|
||||
) {
|
||||
limiter = createAuthRateLimiter({
|
||||
...baseConfig,
|
||||
...overrides,
|
||||
});
|
||||
return limiter;
|
||||
}
|
||||
|
||||
afterEach(() => {
|
||||
limiter?.dispose();
|
||||
@@ -32,7 +50,7 @@ describe("auth rate limiter", () => {
|
||||
});
|
||||
|
||||
it("blocks the IP once maxAttempts is reached", () => {
|
||||
limiter = createAuthRateLimiter({ maxAttempts: 2, windowMs: 60_000, lockoutMs: 10_000 });
|
||||
createLimiter({ lockoutMs: 10_000 });
|
||||
limiter.recordFailure("10.0.0.2");
|
||||
limiter.recordFailure("10.0.0.2");
|
||||
const result = limiter.check("10.0.0.2");
|
||||
@@ -42,12 +60,20 @@ describe("auth rate limiter", () => {
|
||||
expect(result.retryAfterMs).toBeLessThanOrEqual(10_000);
|
||||
});
|
||||
|
||||
it("treats blank scopes as the default scope", () => {
|
||||
createLimiter();
|
||||
limiter.recordFailure("10.0.0.8", " ");
|
||||
limiter.recordFailure("10.0.0.8");
|
||||
expect(limiter.check("10.0.0.8").allowed).toBe(false);
|
||||
expect(limiter.check("10.0.0.8", " \t ").allowed).toBe(false);
|
||||
});
|
||||
|
||||
// ---------- lockout expiry ----------
|
||||
|
||||
it("unblocks after the lockout period expires", () => {
|
||||
vi.useFakeTimers();
|
||||
try {
|
||||
limiter = createAuthRateLimiter({ maxAttempts: 2, windowMs: 60_000, lockoutMs: 5_000 });
|
||||
createLimiter({ lockoutMs: 5_000 });
|
||||
limiter.recordFailure("10.0.0.3");
|
||||
limiter.recordFailure("10.0.0.3");
|
||||
expect(limiter.check("10.0.0.3").allowed).toBe(false);
|
||||
@@ -62,6 +88,25 @@ describe("auth rate limiter", () => {
|
||||
}
|
||||
});
|
||||
|
||||
it("does not extend lockout when failures are recorded while already locked", () => {
|
||||
vi.useFakeTimers();
|
||||
try {
|
||||
createLimiter({ lockoutMs: 5_000 });
|
||||
limiter.recordFailure("10.0.0.33");
|
||||
limiter.recordFailure("10.0.0.33");
|
||||
const locked = limiter.check("10.0.0.33");
|
||||
expect(locked.allowed).toBe(false);
|
||||
const initialRetryAfter = locked.retryAfterMs;
|
||||
|
||||
vi.advanceTimersByTime(1_000);
|
||||
limiter.recordFailure("10.0.0.33");
|
||||
const afterExtraFailure = limiter.check("10.0.0.33");
|
||||
expect(afterExtraFailure.retryAfterMs).toBeLessThanOrEqual(initialRetryAfter - 1_000);
|
||||
} finally {
|
||||
vi.useRealTimers();
|
||||
}
|
||||
});
|
||||
|
||||
// ---------- sliding window expiry ----------
|
||||
|
||||
it("expires old failures outside the window", () => {
|
||||
@@ -83,7 +128,7 @@ describe("auth rate limiter", () => {
|
||||
// ---------- per-IP isolation ----------
|
||||
|
||||
it("tracks IPs independently", () => {
|
||||
limiter = createAuthRateLimiter({ maxAttempts: 2, windowMs: 60_000, lockoutMs: 60_000 });
|
||||
createLimiter();
|
||||
limiter.recordFailure("10.0.0.10");
|
||||
limiter.recordFailure("10.0.0.10");
|
||||
expect(limiter.check("10.0.0.10").allowed).toBe(false);
|
||||
@@ -99,26 +144,22 @@ describe("auth rate limiter", () => {
|
||||
expect(limiter.check("::ffff:1.2.3.4").allowed).toBe(false);
|
||||
});
|
||||
|
||||
it("tracks scopes independently for the same IP", () => {
|
||||
limiter = createAuthRateLimiter({ maxAttempts: 1, windowMs: 60_000, lockoutMs: 60_000 });
|
||||
limiter.recordFailure("10.0.0.12", AUTH_RATE_LIMIT_SCOPE_SHARED_SECRET);
|
||||
expect(limiter.check("10.0.0.12", AUTH_RATE_LIMIT_SCOPE_SHARED_SECRET).allowed).toBe(false);
|
||||
expect(limiter.check("10.0.0.12", AUTH_RATE_LIMIT_SCOPE_DEVICE_TOKEN).allowed).toBe(true);
|
||||
});
|
||||
it.each([AUTH_RATE_LIMIT_SCOPE_DEVICE_TOKEN, AUTH_RATE_LIMIT_SCOPE_HOOK_AUTH])(
|
||||
"tracks %s independently from shared-secret for the same IP",
|
||||
(otherScope) => {
|
||||
limiter = createAuthRateLimiter({ maxAttempts: 1, windowMs: 60_000, lockoutMs: 60_000 });
|
||||
limiter.recordFailure("10.0.0.12", AUTH_RATE_LIMIT_SCOPE_SHARED_SECRET);
|
||||
expect(limiter.check("10.0.0.12", AUTH_RATE_LIMIT_SCOPE_SHARED_SECRET).allowed).toBe(false);
|
||||
expect(limiter.check("10.0.0.12", otherScope).allowed).toBe(true);
|
||||
},
|
||||
);
|
||||
|
||||
// ---------- loopback exemption ----------
|
||||
|
||||
it("exempts loopback addresses by default", () => {
|
||||
it.each(["127.0.0.1", "::1"])("exempts loopback address %s by default", (ip) => {
|
||||
limiter = createAuthRateLimiter({ maxAttempts: 1, windowMs: 60_000, lockoutMs: 60_000 });
|
||||
limiter.recordFailure("127.0.0.1");
|
||||
// Should still be allowed even though maxAttempts is 1.
|
||||
expect(limiter.check("127.0.0.1").allowed).toBe(true);
|
||||
});
|
||||
|
||||
it("exempts IPv6 loopback by default", () => {
|
||||
limiter = createAuthRateLimiter({ maxAttempts: 1, windowMs: 60_000, lockoutMs: 60_000 });
|
||||
limiter.recordFailure("::1");
|
||||
expect(limiter.check("::1").allowed).toBe(true);
|
||||
limiter.recordFailure(ip);
|
||||
expect(limiter.check(ip).allowed).toBe(true);
|
||||
});
|
||||
|
||||
it("rate-limits loopback when exemptLoopback is false", () => {
|
||||
@@ -135,7 +176,7 @@ describe("auth rate limiter", () => {
|
||||
// ---------- reset ----------
|
||||
|
||||
it("clears tracking state when reset is called", () => {
|
||||
limiter = createAuthRateLimiter({ maxAttempts: 2, windowMs: 60_000, lockoutMs: 60_000 });
|
||||
createLimiter();
|
||||
limiter.recordFailure("10.0.0.20");
|
||||
limiter.recordFailure("10.0.0.20");
|
||||
expect(limiter.check("10.0.0.20").allowed).toBe(false);
|
||||
@@ -193,7 +234,7 @@ describe("auth rate limiter", () => {
|
||||
// ---------- undefined / empty IP ----------
|
||||
|
||||
it("normalizes undefined IP to 'unknown'", () => {
|
||||
limiter = createAuthRateLimiter({ maxAttempts: 2, windowMs: 60_000, lockoutMs: 60_000 });
|
||||
createLimiter();
|
||||
limiter.recordFailure(undefined);
|
||||
limiter.recordFailure(undefined);
|
||||
expect(limiter.check(undefined).allowed).toBe(false);
|
||||
@@ -201,7 +242,7 @@ describe("auth rate limiter", () => {
|
||||
});
|
||||
|
||||
it("normalizes empty-string IP to 'unknown'", () => {
|
||||
limiter = createAuthRateLimiter({ maxAttempts: 2, windowMs: 60_000, lockoutMs: 60_000 });
|
||||
createLimiter();
|
||||
limiter.recordFailure("");
|
||||
limiter.recordFailure("");
|
||||
expect(limiter.check("").allowed).toBe(false);
|
||||
|
||||
@@ -101,6 +101,7 @@ vi.mock("../logger.js", async (importOriginal) => {
|
||||
});
|
||||
|
||||
const { GatewayClient } = await import("./client.js");
|
||||
type GatewayClientInstance = InstanceType<typeof GatewayClient>;
|
||||
|
||||
function getLatestWs(): MockWebSocket {
|
||||
const ws = wsInstances.at(-1);
|
||||
@@ -344,6 +345,20 @@ describe("GatewayClient connect auth payload", () => {
|
||||
return parsed.params?.auth ?? {};
|
||||
}
|
||||
|
||||
function connectRequestFrom(ws: MockWebSocket) {
|
||||
const raw = ws.sent.find((frame) => frame.includes('"method":"connect"'));
|
||||
expect(raw).toBeTruthy();
|
||||
return JSON.parse(raw ?? "{}") as {
|
||||
id?: string;
|
||||
params?: {
|
||||
auth?: {
|
||||
token?: string;
|
||||
deviceToken?: string;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
function emitConnectChallenge(ws: MockWebSocket, nonce = "nonce-1") {
|
||||
ws.emitMessage(
|
||||
JSON.stringify({
|
||||
@@ -354,6 +369,63 @@ describe("GatewayClient connect auth payload", () => {
|
||||
);
|
||||
}
|
||||
|
||||
function startClientAndConnect(params: { client: GatewayClientInstance; nonce?: string }) {
|
||||
params.client.start();
|
||||
const ws = getLatestWs();
|
||||
ws.emitOpen();
|
||||
emitConnectChallenge(ws, params.nonce);
|
||||
return { ws, connect: connectRequestFrom(ws) };
|
||||
}
|
||||
|
||||
function emitConnectFailure(
|
||||
ws: MockWebSocket,
|
||||
connectId: string | undefined,
|
||||
details: Record<string, unknown>,
|
||||
) {
|
||||
ws.emitMessage(
|
||||
JSON.stringify({
|
||||
type: "res",
|
||||
id: connectId,
|
||||
ok: false,
|
||||
error: {
|
||||
code: "INVALID_REQUEST",
|
||||
message: "unauthorized",
|
||||
details,
|
||||
},
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
async function expectRetriedConnectAuth(params: {
|
||||
firstWs: MockWebSocket;
|
||||
connectId: string | undefined;
|
||||
failureDetails: Record<string, unknown>;
|
||||
}) {
|
||||
emitConnectFailure(params.firstWs, params.connectId, params.failureDetails);
|
||||
await vi.waitFor(() => expect(wsInstances.length).toBeGreaterThan(1), { timeout: 3_000 });
|
||||
const ws = getLatestWs();
|
||||
ws.emitOpen();
|
||||
emitConnectChallenge(ws, "nonce-2");
|
||||
return connectFrameFrom(ws);
|
||||
}
|
||||
|
||||
async function expectNoReconnectAfterConnectFailure(params: {
|
||||
client: GatewayClientInstance;
|
||||
firstWs: MockWebSocket;
|
||||
connectId: string | undefined;
|
||||
failureDetails: Record<string, unknown>;
|
||||
}) {
|
||||
vi.useFakeTimers();
|
||||
try {
|
||||
emitConnectFailure(params.firstWs, params.connectId, params.failureDetails);
|
||||
await vi.advanceTimersByTimeAsync(30_000);
|
||||
expect(wsInstances).toHaveLength(1);
|
||||
} finally {
|
||||
params.client.stop();
|
||||
vi.useRealTimers();
|
||||
}
|
||||
}
|
||||
|
||||
it("uses explicit shared token and does not inject stored device token", () => {
|
||||
loadDeviceAuthTokenMock.mockReturnValue({ token: "stored-device-token" });
|
||||
const client = new GatewayClient({
|
||||
@@ -457,37 +529,16 @@ describe("GatewayClient connect auth payload", () => {
|
||||
token: "shared-token",
|
||||
});
|
||||
|
||||
client.start();
|
||||
const ws1 = getLatestWs();
|
||||
ws1.emitOpen();
|
||||
emitConnectChallenge(ws1);
|
||||
const firstConnectRaw = ws1.sent.find((frame) => frame.includes('"method":"connect"'));
|
||||
expect(firstConnectRaw).toBeTruthy();
|
||||
const firstConnect = JSON.parse(firstConnectRaw ?? "{}") as {
|
||||
id?: string;
|
||||
params?: { auth?: { token?: string; deviceToken?: string } };
|
||||
};
|
||||
const { ws: ws1, connect: firstConnect } = startClientAndConnect({ client });
|
||||
expect(firstConnect.params?.auth?.token).toBe("shared-token");
|
||||
expect(firstConnect.params?.auth?.deviceToken).toBeUndefined();
|
||||
|
||||
ws1.emitMessage(
|
||||
JSON.stringify({
|
||||
type: "res",
|
||||
id: firstConnect.id,
|
||||
ok: false,
|
||||
error: {
|
||||
code: "INVALID_REQUEST",
|
||||
message: "unauthorized",
|
||||
details: { code: "AUTH_TOKEN_MISMATCH", canRetryWithDeviceToken: true },
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
await vi.waitFor(() => expect(wsInstances.length).toBeGreaterThan(1), { timeout: 3_000 });
|
||||
const ws2 = getLatestWs();
|
||||
ws2.emitOpen();
|
||||
emitConnectChallenge(ws2, "nonce-2");
|
||||
expect(connectFrameFrom(ws2)).toMatchObject({
|
||||
const retriedAuth = await expectRetriedConnectAuth({
|
||||
firstWs: ws1,
|
||||
connectId: firstConnect.id,
|
||||
failureDetails: { code: "AUTH_TOKEN_MISMATCH", canRetryWithDeviceToken: true },
|
||||
});
|
||||
expect(retriedAuth).toMatchObject({
|
||||
token: "shared-token",
|
||||
deviceToken: "stored-device-token",
|
||||
});
|
||||
@@ -501,32 +552,13 @@ describe("GatewayClient connect auth payload", () => {
|
||||
token: "shared-token",
|
||||
});
|
||||
|
||||
client.start();
|
||||
const ws1 = getLatestWs();
|
||||
ws1.emitOpen();
|
||||
emitConnectChallenge(ws1);
|
||||
const firstConnectRaw = ws1.sent.find((frame) => frame.includes('"method":"connect"'));
|
||||
expect(firstConnectRaw).toBeTruthy();
|
||||
const firstConnect = JSON.parse(firstConnectRaw ?? "{}") as { id?: string };
|
||||
|
||||
ws1.emitMessage(
|
||||
JSON.stringify({
|
||||
type: "res",
|
||||
id: firstConnect.id,
|
||||
ok: false,
|
||||
error: {
|
||||
code: "INVALID_REQUEST",
|
||||
message: "unauthorized",
|
||||
details: { code: "AUTH_UNAUTHORIZED", recommendedNextStep: "retry_with_device_token" },
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
await vi.waitFor(() => expect(wsInstances.length).toBeGreaterThan(1), { timeout: 3_000 });
|
||||
const ws2 = getLatestWs();
|
||||
ws2.emitOpen();
|
||||
emitConnectChallenge(ws2, "nonce-2");
|
||||
expect(connectFrameFrom(ws2)).toMatchObject({
|
||||
const { ws: ws1, connect: firstConnect } = startClientAndConnect({ client });
|
||||
const retriedAuth = await expectRetriedConnectAuth({
|
||||
firstWs: ws1,
|
||||
connectId: firstConnect.id,
|
||||
failureDetails: { code: "AUTH_UNAUTHORIZED", recommendedNextStep: "retry_with_device_token" },
|
||||
});
|
||||
expect(retriedAuth).toMatchObject({
|
||||
token: "shared-token",
|
||||
deviceToken: "stored-device-token",
|
||||
});
|
||||
@@ -534,71 +566,33 @@ describe("GatewayClient connect auth payload", () => {
|
||||
});
|
||||
|
||||
it("does not auto-reconnect on AUTH_TOKEN_MISSING connect failures", async () => {
|
||||
vi.useFakeTimers();
|
||||
const client = new GatewayClient({
|
||||
url: "ws://127.0.0.1:18789",
|
||||
token: "shared-token",
|
||||
});
|
||||
|
||||
client.start();
|
||||
const ws1 = getLatestWs();
|
||||
ws1.emitOpen();
|
||||
emitConnectChallenge(ws1);
|
||||
const firstConnectRaw = ws1.sent.find((frame) => frame.includes('"method":"connect"'));
|
||||
expect(firstConnectRaw).toBeTruthy();
|
||||
const firstConnect = JSON.parse(firstConnectRaw ?? "{}") as { id?: string };
|
||||
|
||||
ws1.emitMessage(
|
||||
JSON.stringify({
|
||||
type: "res",
|
||||
id: firstConnect.id,
|
||||
ok: false,
|
||||
error: {
|
||||
code: "INVALID_REQUEST",
|
||||
message: "unauthorized",
|
||||
details: { code: "AUTH_TOKEN_MISSING" },
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
await vi.advanceTimersByTimeAsync(30_000);
|
||||
expect(wsInstances).toHaveLength(1);
|
||||
client.stop();
|
||||
vi.useRealTimers();
|
||||
const { ws: ws1, connect: firstConnect } = startClientAndConnect({ client });
|
||||
await expectNoReconnectAfterConnectFailure({
|
||||
client,
|
||||
firstWs: ws1,
|
||||
connectId: firstConnect.id,
|
||||
failureDetails: { code: "AUTH_TOKEN_MISSING" },
|
||||
});
|
||||
});
|
||||
|
||||
it("does not auto-reconnect on token mismatch when retry is not trusted", async () => {
|
||||
vi.useFakeTimers();
|
||||
loadDeviceAuthTokenMock.mockReturnValue({ token: "stored-device-token" });
|
||||
const client = new GatewayClient({
|
||||
url: "wss://gateway.example.com:18789",
|
||||
token: "shared-token",
|
||||
});
|
||||
|
||||
client.start();
|
||||
const ws1 = getLatestWs();
|
||||
ws1.emitOpen();
|
||||
emitConnectChallenge(ws1);
|
||||
const firstConnectRaw = ws1.sent.find((frame) => frame.includes('"method":"connect"'));
|
||||
expect(firstConnectRaw).toBeTruthy();
|
||||
const firstConnect = JSON.parse(firstConnectRaw ?? "{}") as { id?: string };
|
||||
|
||||
ws1.emitMessage(
|
||||
JSON.stringify({
|
||||
type: "res",
|
||||
id: firstConnect.id,
|
||||
ok: false,
|
||||
error: {
|
||||
code: "INVALID_REQUEST",
|
||||
message: "unauthorized",
|
||||
details: { code: "AUTH_TOKEN_MISMATCH", canRetryWithDeviceToken: true },
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
await vi.advanceTimersByTimeAsync(30_000);
|
||||
expect(wsInstances).toHaveLength(1);
|
||||
client.stop();
|
||||
vi.useRealTimers();
|
||||
const { ws: ws1, connect: firstConnect } = startClientAndConnect({ client });
|
||||
await expectNoReconnectAfterConnectFailure({
|
||||
client,
|
||||
firstWs: ws1,
|
||||
connectId: firstConnect.id,
|
||||
failureDetails: { code: "AUTH_TOKEN_MISMATCH", canRetryWithDeviceToken: true },
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -95,7 +95,7 @@ export type GatewayClientOptions = {
|
||||
commands?: string[];
|
||||
permissions?: Record<string, boolean>;
|
||||
pathEnv?: string;
|
||||
deviceIdentity?: DeviceIdentity;
|
||||
deviceIdentity?: DeviceIdentity | null;
|
||||
minProtocol?: number;
|
||||
maxProtocol?: number;
|
||||
tlsFingerprint?: string;
|
||||
@@ -138,7 +138,10 @@ export class GatewayClient {
|
||||
constructor(opts: GatewayClientOptions) {
|
||||
this.opts = {
|
||||
...opts,
|
||||
deviceIdentity: opts.deviceIdentity ?? loadOrCreateDeviceIdentity(),
|
||||
deviceIdentity:
|
||||
opts.deviceIdentity === null
|
||||
? undefined
|
||||
: (opts.deviceIdentity ?? loadOrCreateDeviceIdentity()),
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@@ -40,25 +40,6 @@ describe("handleControlUiHttpRequest", () => {
|
||||
expect(params.end).toHaveBeenCalledWith("Not Found");
|
||||
}
|
||||
|
||||
function expectUnhandledRoutes(params: {
|
||||
urls: string[];
|
||||
method: "GET" | "POST";
|
||||
rootPath: string;
|
||||
basePath?: string;
|
||||
expectationLabel: string;
|
||||
}) {
|
||||
for (const url of params.urls) {
|
||||
const { handled, end } = runControlUiRequest({
|
||||
url,
|
||||
method: params.method,
|
||||
rootPath: params.rootPath,
|
||||
...(params.basePath ? { basePath: params.basePath } : {}),
|
||||
});
|
||||
expect(handled, `${params.expectationLabel}: ${url}`).toBe(false);
|
||||
expect(end, `${params.expectationLabel}: ${url}`).not.toHaveBeenCalled();
|
||||
}
|
||||
}
|
||||
|
||||
function runControlUiRequest(params: {
|
||||
url: string;
|
||||
method: "GET" | "HEAD" | "POST";
|
||||
@@ -104,6 +85,13 @@ describe("handleControlUiHttpRequest", () => {
|
||||
return { assetsDir, filePath };
|
||||
}
|
||||
|
||||
async function createHardlinkedAssetFile(rootPath: string) {
|
||||
const { filePath } = await writeAssetFile(rootPath, "app.js", "console.log('hi');");
|
||||
const hardlinkPath = path.join(path.dirname(filePath), "app.hl.js");
|
||||
await fs.link(filePath, hardlinkPath);
|
||||
return hardlinkPath;
|
||||
}
|
||||
|
||||
async function withBasePathRootFixture<T>(params: {
|
||||
siblingDir: string;
|
||||
fn: (paths: { root: string; sibling: string }) => Promise<T>;
|
||||
@@ -166,80 +154,53 @@ describe("handleControlUiHttpRequest", () => {
|
||||
});
|
||||
});
|
||||
|
||||
it.each([
|
||||
{
|
||||
name: "at root",
|
||||
url: CONTROL_UI_BOOTSTRAP_CONFIG_PATH,
|
||||
expectedBasePath: "",
|
||||
assistantName: "</script><script>alert(1)//",
|
||||
assistantAvatar: "</script>.png",
|
||||
expectedAvatarUrl: "/avatar/main",
|
||||
},
|
||||
{
|
||||
name: "under basePath",
|
||||
url: `/openclaw${CONTROL_UI_BOOTSTRAP_CONFIG_PATH}`,
|
||||
basePath: "/openclaw",
|
||||
expectedBasePath: "/openclaw",
|
||||
assistantName: "Ops",
|
||||
assistantAvatar: "ops.png",
|
||||
expectedAvatarUrl: "/openclaw/avatar/main",
|
||||
},
|
||||
])("serves bootstrap config JSON $name", async (testCase) => {
|
||||
it("serves bootstrap config JSON", async () => {
|
||||
await withControlUiRoot({
|
||||
fn: async (tmp) => {
|
||||
const { res, end } = makeMockHttpResponse();
|
||||
const handled = handleControlUiHttpRequest(
|
||||
{ url: testCase.url, method: "GET" } as IncomingMessage,
|
||||
{ url: CONTROL_UI_BOOTSTRAP_CONFIG_PATH, method: "GET" } as IncomingMessage,
|
||||
res,
|
||||
{
|
||||
...(testCase.basePath ? { basePath: testCase.basePath } : {}),
|
||||
root: { kind: "resolved", path: tmp },
|
||||
config: {
|
||||
agents: { defaults: { workspace: tmp } },
|
||||
ui: {
|
||||
assistant: {
|
||||
name: testCase.assistantName,
|
||||
avatar: testCase.assistantAvatar,
|
||||
},
|
||||
},
|
||||
ui: { assistant: { name: "</script><script>alert(1)//", avatar: "</script>.png" } },
|
||||
},
|
||||
},
|
||||
);
|
||||
expect(handled).toBe(true);
|
||||
const parsed = parseBootstrapPayload(end);
|
||||
expect(parsed.basePath).toBe(testCase.expectedBasePath);
|
||||
expect(parsed.assistantName).toBe(testCase.assistantName);
|
||||
expect(parsed.assistantAvatar).toBe(testCase.expectedAvatarUrl);
|
||||
expect(parsed.basePath).toBe("");
|
||||
expect(parsed.assistantName).toBe("</script><script>alert(1)//");
|
||||
expect(parsed.assistantAvatar).toBe("/avatar/main");
|
||||
expect(parsed.assistantAgentId).toBe("main");
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it.each([
|
||||
{
|
||||
name: "at root",
|
||||
url: CONTROL_UI_BOOTSTRAP_CONFIG_PATH,
|
||||
},
|
||||
{
|
||||
name: "under basePath",
|
||||
url: `/openclaw${CONTROL_UI_BOOTSTRAP_CONFIG_PATH}`,
|
||||
basePath: "/openclaw",
|
||||
},
|
||||
])("serves bootstrap config HEAD $name without writing a body", async (testCase) => {
|
||||
it("serves bootstrap config JSON under basePath", async () => {
|
||||
await withControlUiRoot({
|
||||
fn: async (tmp) => {
|
||||
const { res, end } = makeMockHttpResponse();
|
||||
const handled = handleControlUiHttpRequest(
|
||||
{ url: testCase.url, method: "HEAD" } as IncomingMessage,
|
||||
{ url: `/openclaw${CONTROL_UI_BOOTSTRAP_CONFIG_PATH}`, method: "GET" } as IncomingMessage,
|
||||
res,
|
||||
{
|
||||
...(testCase.basePath ? { basePath: testCase.basePath } : {}),
|
||||
basePath: "/openclaw",
|
||||
root: { kind: "resolved", path: tmp },
|
||||
config: {
|
||||
agents: { defaults: { workspace: tmp } },
|
||||
ui: { assistant: { name: "Ops", avatar: "ops.png" } },
|
||||
},
|
||||
},
|
||||
);
|
||||
expect(handled).toBe(true);
|
||||
expect(res.statusCode).toBe(200);
|
||||
expect(end.mock.calls[0]?.length ?? -1).toBe(0);
|
||||
const parsed = parseBootstrapPayload(end);
|
||||
expect(parsed.basePath).toBe("/openclaw");
|
||||
expect(parsed.assistantName).toBe("Ops");
|
||||
expect(parsed.assistantAvatar).toBe("/openclaw/avatar/main");
|
||||
expect(parsed.assistantAgentId).toBe("main");
|
||||
},
|
||||
});
|
||||
});
|
||||
@@ -396,37 +357,39 @@ describe("handleControlUiHttpRequest", () => {
|
||||
});
|
||||
});
|
||||
|
||||
it.each([
|
||||
{
|
||||
name: "rejects hardlinked asset files for custom/resolved roots",
|
||||
rootKind: "resolved" as const,
|
||||
expectedStatus: 404,
|
||||
expectedBody: "Not Found",
|
||||
},
|
||||
{
|
||||
name: "serves hardlinked asset files for bundled roots",
|
||||
rootKind: "bundled" as const,
|
||||
expectedStatus: 200,
|
||||
expectedBody: "console.log('hi');",
|
||||
},
|
||||
])("$name", async (testCase) => {
|
||||
it("rejects hardlinked asset files for custom/resolved roots (security boundary)", async () => {
|
||||
await withControlUiRoot({
|
||||
fn: async (tmp) => {
|
||||
const assetsDir = path.join(tmp, "assets");
|
||||
await fs.mkdir(assetsDir, { recursive: true });
|
||||
await fs.writeFile(path.join(assetsDir, "app.js"), "console.log('hi');");
|
||||
await fs.link(path.join(assetsDir, "app.js"), path.join(assetsDir, "app.hl.js"));
|
||||
await createHardlinkedAssetFile(tmp);
|
||||
|
||||
const { res, end, handled } = runControlUiRequest({
|
||||
url: "/assets/app.hl.js",
|
||||
method: "GET",
|
||||
rootPath: tmp,
|
||||
rootKind: testCase.rootKind,
|
||||
});
|
||||
|
||||
expect(handled).toBe(true);
|
||||
expect(res.statusCode).toBe(testCase.expectedStatus);
|
||||
expect(String(end.mock.calls[0]?.[0] ?? "")).toBe(testCase.expectedBody);
|
||||
expect(res.statusCode).toBe(404);
|
||||
expect(end).toHaveBeenCalledWith("Not Found");
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it("serves hardlinked asset files for bundled roots (pnpm global install)", async () => {
|
||||
await withControlUiRoot({
|
||||
fn: async (tmp) => {
|
||||
await createHardlinkedAssetFile(tmp);
|
||||
|
||||
const { res, end, handled } = runControlUiRequest({
|
||||
url: "/assets/app.hl.js",
|
||||
method: "GET",
|
||||
rootPath: tmp,
|
||||
rootKind: "bundled",
|
||||
});
|
||||
|
||||
expect(handled).toBe(true);
|
||||
expect(res.statusCode).toBe(200);
|
||||
expect(String(end.mock.calls[0]?.[0] ?? "")).toBe("console.log('hi');");
|
||||
},
|
||||
});
|
||||
});
|
||||
@@ -434,12 +397,17 @@ describe("handleControlUiHttpRequest", () => {
|
||||
it("does not handle POST to root-mounted paths (plugin webhook passthrough)", async () => {
|
||||
await withControlUiRoot({
|
||||
fn: async (tmp) => {
|
||||
expectUnhandledRoutes({
|
||||
urls: ["/bluebubbles-webhook", "/custom-webhook", "/callback"],
|
||||
method: "POST",
|
||||
rootPath: tmp,
|
||||
expectationLabel: "POST should pass through to plugin handlers",
|
||||
});
|
||||
for (const webhookPath of ["/bluebubbles-webhook", "/custom-webhook", "/callback"]) {
|
||||
const { res } = makeMockHttpResponse();
|
||||
const handled = handleControlUiHttpRequest(
|
||||
{ url: webhookPath, method: "POST" } as IncomingMessage,
|
||||
res,
|
||||
{ root: { kind: "resolved", path: tmp } },
|
||||
);
|
||||
expect(handled, `POST to ${webhookPath} should pass through to plugin handlers`).toBe(
|
||||
false,
|
||||
);
|
||||
}
|
||||
},
|
||||
});
|
||||
});
|
||||
@@ -447,35 +415,43 @@ describe("handleControlUiHttpRequest", () => {
|
||||
it("does not handle POST to paths outside basePath", async () => {
|
||||
await withControlUiRoot({
|
||||
fn: async (tmp) => {
|
||||
expectUnhandledRoutes({
|
||||
urls: ["/bluebubbles-webhook"],
|
||||
method: "POST",
|
||||
rootPath: tmp,
|
||||
basePath: "/openclaw",
|
||||
expectationLabel: "POST outside basePath should pass through",
|
||||
});
|
||||
const { res } = makeMockHttpResponse();
|
||||
const handled = handleControlUiHttpRequest(
|
||||
{ url: "/bluebubbles-webhook", method: "POST" } as IncomingMessage,
|
||||
res,
|
||||
{ basePath: "/openclaw", root: { kind: "resolved", path: tmp } },
|
||||
);
|
||||
expect(handled).toBe(false);
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it.each([
|
||||
{
|
||||
name: "does not handle /api paths when basePath is empty",
|
||||
urls: ["/api", "/api/sessions", "/api/channels/nostr"],
|
||||
},
|
||||
{
|
||||
name: "does not handle /plugins paths when basePath is empty",
|
||||
urls: ["/plugins", "/plugins/diffs/view/abc/def"],
|
||||
},
|
||||
])("$name", async (testCase) => {
|
||||
it("does not handle /api paths when basePath is empty", async () => {
|
||||
await withControlUiRoot({
|
||||
fn: async (tmp) => {
|
||||
expectUnhandledRoutes({
|
||||
urls: testCase.urls,
|
||||
method: "GET",
|
||||
rootPath: tmp,
|
||||
expectationLabel: "expected route to not be handled",
|
||||
});
|
||||
for (const apiPath of ["/api", "/api/sessions", "/api/channels/nostr"]) {
|
||||
const { handled } = runControlUiRequest({
|
||||
url: apiPath,
|
||||
method: "GET",
|
||||
rootPath: tmp,
|
||||
});
|
||||
expect(handled, `expected ${apiPath} to not be handled`).toBe(false);
|
||||
}
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it("does not handle /plugins paths when basePath is empty", async () => {
|
||||
await withControlUiRoot({
|
||||
fn: async (tmp) => {
|
||||
for (const pluginPath of ["/plugins", "/plugins/diffs/view/abc/def"]) {
|
||||
const { handled } = runControlUiRequest({
|
||||
url: pluginPath,
|
||||
method: "GET",
|
||||
rootPath: tmp,
|
||||
});
|
||||
expect(handled, `expected ${pluginPath} to not be handled`).toBe(false);
|
||||
}
|
||||
},
|
||||
});
|
||||
});
|
||||
@@ -483,12 +459,13 @@ describe("handleControlUiHttpRequest", () => {
|
||||
it("falls through POST requests when basePath is empty", async () => {
|
||||
await withControlUiRoot({
|
||||
fn: async (tmp) => {
|
||||
expectUnhandledRoutes({
|
||||
urls: ["/webhook/bluebubbles"],
|
||||
const { handled, end } = runControlUiRequest({
|
||||
url: "/webhook/bluebubbles",
|
||||
method: "POST",
|
||||
rootPath: tmp,
|
||||
expectationLabel: "POST webhook should fall through",
|
||||
});
|
||||
expect(handled).toBe(false);
|
||||
expect(end).not.toHaveBeenCalled();
|
||||
},
|
||||
});
|
||||
});
|
||||
@@ -496,13 +473,16 @@ describe("handleControlUiHttpRequest", () => {
|
||||
it("falls through POST requests under configured basePath (plugin webhook passthrough)", async () => {
|
||||
await withControlUiRoot({
|
||||
fn: async (tmp) => {
|
||||
expectUnhandledRoutes({
|
||||
urls: ["/openclaw", "/openclaw/", "/openclaw/some-page"],
|
||||
method: "POST",
|
||||
rootPath: tmp,
|
||||
basePath: "/openclaw",
|
||||
expectationLabel: "POST under basePath should pass through to plugin handlers",
|
||||
});
|
||||
for (const route of ["/openclaw", "/openclaw/", "/openclaw/some-page"]) {
|
||||
const { handled, end } = runControlUiRequest({
|
||||
url: route,
|
||||
method: "POST",
|
||||
rootPath: tmp,
|
||||
basePath: "/openclaw",
|
||||
});
|
||||
expect(handled, `POST to ${route} should pass through to plugin handlers`).toBe(false);
|
||||
expect(end, `POST to ${route} should not write a response`).not.toHaveBeenCalled();
|
||||
}
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
@@ -71,6 +71,43 @@ function resolveLocalModeWithUnresolvedPassword(mode: "none" | "trusted-proxy")
|
||||
});
|
||||
}
|
||||
|
||||
function expectUnresolvedLocalAuthSecretRefFailure(params: {
|
||||
authMode: "token" | "password";
|
||||
secretId: string;
|
||||
errorPath: "gateway.auth.token" | "gateway.auth.password";
|
||||
remote?: { token?: string; password?: string };
|
||||
}) {
|
||||
const localAuth =
|
||||
params.authMode === "token"
|
||||
? {
|
||||
mode: "token" as const,
|
||||
token: { source: "env", provider: "default", id: params.secretId },
|
||||
}
|
||||
: {
|
||||
mode: "password" as const,
|
||||
password: { source: "env", provider: "default", id: params.secretId },
|
||||
};
|
||||
|
||||
expect(() =>
|
||||
resolveGatewayCredentialsFromConfig({
|
||||
cfg: {
|
||||
gateway: {
|
||||
mode: "local",
|
||||
auth: localAuth,
|
||||
remote: params.remote,
|
||||
},
|
||||
secrets: {
|
||||
providers: {
|
||||
default: { source: "env" },
|
||||
},
|
||||
},
|
||||
} as unknown as OpenClawConfig,
|
||||
env: {} as NodeJS.ProcessEnv,
|
||||
includeLegacyEnv: false,
|
||||
}),
|
||||
).toThrow(params.errorPath);
|
||||
}
|
||||
|
||||
describe("resolveGatewayCredentialsFromConfig", () => {
|
||||
it("prefers explicit credentials over config and environment", () => {
|
||||
const resolved = resolveGatewayCredentialsFor(
|
||||
@@ -159,78 +196,29 @@ describe("resolveGatewayCredentialsFromConfig", () => {
|
||||
});
|
||||
|
||||
it("fails closed when local token SecretRef is unresolved and remote token fallback exists", () => {
|
||||
expect(() =>
|
||||
resolveGatewayCredentialsFromConfig({
|
||||
cfg: {
|
||||
gateway: {
|
||||
mode: "local",
|
||||
auth: {
|
||||
mode: "token",
|
||||
token: { source: "env", provider: "default", id: "MISSING_LOCAL_TOKEN" },
|
||||
},
|
||||
remote: {
|
||||
token: "remote-token",
|
||||
},
|
||||
},
|
||||
secrets: {
|
||||
providers: {
|
||||
default: { source: "env" },
|
||||
},
|
||||
},
|
||||
} as unknown as OpenClawConfig,
|
||||
env: {} as NodeJS.ProcessEnv,
|
||||
includeLegacyEnv: false,
|
||||
}),
|
||||
).toThrow("gateway.auth.token");
|
||||
expectUnresolvedLocalAuthSecretRefFailure({
|
||||
authMode: "token",
|
||||
secretId: "MISSING_LOCAL_TOKEN",
|
||||
errorPath: "gateway.auth.token",
|
||||
remote: { token: "remote-token" },
|
||||
});
|
||||
});
|
||||
|
||||
it("fails closed when local password SecretRef is unresolved and remote password fallback exists", () => {
|
||||
expect(() =>
|
||||
resolveGatewayCredentialsFromConfig({
|
||||
cfg: {
|
||||
gateway: {
|
||||
mode: "local",
|
||||
auth: {
|
||||
mode: "password",
|
||||
password: { source: "env", provider: "default", id: "MISSING_LOCAL_PASSWORD" },
|
||||
},
|
||||
remote: {
|
||||
password: "remote-password", // pragma: allowlist secret
|
||||
},
|
||||
},
|
||||
secrets: {
|
||||
providers: {
|
||||
default: { source: "env" },
|
||||
},
|
||||
},
|
||||
} as unknown as OpenClawConfig,
|
||||
env: {} as NodeJS.ProcessEnv,
|
||||
includeLegacyEnv: false,
|
||||
}),
|
||||
).toThrow("gateway.auth.password");
|
||||
expectUnresolvedLocalAuthSecretRefFailure({
|
||||
authMode: "password",
|
||||
secretId: "MISSING_LOCAL_PASSWORD",
|
||||
errorPath: "gateway.auth.password",
|
||||
remote: { password: "remote-password" }, // pragma: allowlist secret
|
||||
});
|
||||
});
|
||||
|
||||
it("throws when local password auth relies on an unresolved SecretRef", () => {
|
||||
expect(() =>
|
||||
resolveGatewayCredentialsFromConfig({
|
||||
cfg: {
|
||||
gateway: {
|
||||
mode: "local",
|
||||
auth: {
|
||||
mode: "password",
|
||||
password: { source: "env", provider: "default", id: "MISSING_GATEWAY_PASSWORD" },
|
||||
},
|
||||
},
|
||||
secrets: {
|
||||
providers: {
|
||||
default: { source: "env" },
|
||||
},
|
||||
},
|
||||
} as unknown as OpenClawConfig,
|
||||
env: {} as NodeJS.ProcessEnv,
|
||||
includeLegacyEnv: false,
|
||||
}),
|
||||
).toThrow("gateway.auth.password");
|
||||
expectUnresolvedLocalAuthSecretRefFailure({
|
||||
authMode: "password",
|
||||
secretId: "MISSING_GATEWAY_PASSWORD",
|
||||
errorPath: "gateway.auth.password",
|
||||
});
|
||||
});
|
||||
|
||||
it("treats env-template local tokens as SecretRefs instead of plaintext", () => {
|
||||
@@ -275,55 +263,21 @@ describe("resolveGatewayCredentialsFromConfig", () => {
|
||||
});
|
||||
|
||||
it("throws when unresolved local token SecretRef would otherwise fall back to remote token", () => {
|
||||
expect(() =>
|
||||
resolveGatewayCredentialsFromConfig({
|
||||
cfg: {
|
||||
gateway: {
|
||||
mode: "local",
|
||||
auth: {
|
||||
mode: "token",
|
||||
token: { source: "env", provider: "default", id: "MISSING_LOCAL_TOKEN" },
|
||||
},
|
||||
remote: {
|
||||
token: "remote-token",
|
||||
},
|
||||
},
|
||||
secrets: {
|
||||
providers: {
|
||||
default: { source: "env" },
|
||||
},
|
||||
},
|
||||
} as unknown as OpenClawConfig,
|
||||
env: {} as NodeJS.ProcessEnv,
|
||||
includeLegacyEnv: false,
|
||||
}),
|
||||
).toThrow("gateway.auth.token");
|
||||
expectUnresolvedLocalAuthSecretRefFailure({
|
||||
authMode: "token",
|
||||
secretId: "MISSING_LOCAL_TOKEN",
|
||||
errorPath: "gateway.auth.token",
|
||||
remote: { token: "remote-token" },
|
||||
});
|
||||
});
|
||||
|
||||
it("throws when unresolved local password SecretRef would otherwise fall back to remote password", () => {
|
||||
expect(() =>
|
||||
resolveGatewayCredentialsFromConfig({
|
||||
cfg: {
|
||||
gateway: {
|
||||
mode: "local",
|
||||
auth: {
|
||||
mode: "password",
|
||||
password: { source: "env", provider: "default", id: "MISSING_LOCAL_PASSWORD" },
|
||||
},
|
||||
remote: {
|
||||
password: "remote-password", // pragma: allowlist secret
|
||||
},
|
||||
},
|
||||
secrets: {
|
||||
providers: {
|
||||
default: { source: "env" },
|
||||
},
|
||||
},
|
||||
} as unknown as OpenClawConfig,
|
||||
env: {} as NodeJS.ProcessEnv,
|
||||
includeLegacyEnv: false,
|
||||
}),
|
||||
).toThrow("gateway.auth.password");
|
||||
expectUnresolvedLocalAuthSecretRefFailure({
|
||||
authMode: "password",
|
||||
secretId: "MISSING_LOCAL_PASSWORD",
|
||||
errorPath: "gateway.auth.password",
|
||||
remote: { password: "remote-password" }, // pragma: allowlist secret
|
||||
});
|
||||
});
|
||||
|
||||
it("ignores unresolved local password ref when local auth mode is none", () => {
|
||||
|
||||
@@ -8,14 +8,15 @@ import { listGatewayMethods } from "./server-methods-list.js";
|
||||
import { coreGatewayHandlers } from "./server-methods.js";
|
||||
|
||||
describe("method scope resolution", () => {
|
||||
it("classifies sessions.resolve + config.schema.lookup as read and poll as write", () => {
|
||||
expect(resolveLeastPrivilegeOperatorScopesForMethod("sessions.resolve")).toEqual([
|
||||
"operator.read",
|
||||
]);
|
||||
expect(resolveLeastPrivilegeOperatorScopesForMethod("config.schema.lookup")).toEqual([
|
||||
"operator.read",
|
||||
]);
|
||||
expect(resolveLeastPrivilegeOperatorScopesForMethod("poll")).toEqual(["operator.write"]);
|
||||
it.each([
|
||||
["sessions.resolve", ["operator.read"]],
|
||||
["config.schema.lookup", ["operator.read"]],
|
||||
["poll", ["operator.write"]],
|
||||
["config.patch", ["operator.admin"]],
|
||||
["wizard.start", ["operator.admin"]],
|
||||
["update.run", ["operator.admin"]],
|
||||
])("resolves least-privilege scopes for %s", (method, expected) => {
|
||||
expect(resolveLeastPrivilegeOperatorScopesForMethod(method)).toEqual(expected);
|
||||
});
|
||||
|
||||
it("leaves node-only pending drain outside operator scopes", () => {
|
||||
@@ -28,16 +29,13 @@ describe("method scope resolution", () => {
|
||||
});
|
||||
|
||||
describe("operator scope authorization", () => {
|
||||
it("allows read methods with operator.read or operator.write", () => {
|
||||
expect(authorizeOperatorScopesForMethod("health", ["operator.read"])).toEqual({
|
||||
allowed: true,
|
||||
});
|
||||
expect(authorizeOperatorScopesForMethod("health", ["operator.write"])).toEqual({
|
||||
allowed: true,
|
||||
});
|
||||
expect(authorizeOperatorScopesForMethod("config.schema.lookup", ["operator.read"])).toEqual({
|
||||
allowed: true,
|
||||
});
|
||||
it.each([
|
||||
["health", ["operator.read"], { allowed: true }],
|
||||
["health", ["operator.write"], { allowed: true }],
|
||||
["config.schema.lookup", ["operator.read"], { allowed: true }],
|
||||
["config.patch", ["operator.admin"], { allowed: true }],
|
||||
])("authorizes %s for scopes %j", (method, scopes, expected) => {
|
||||
expect(authorizeOperatorScopesForMethod(method, scopes)).toEqual(expected);
|
||||
});
|
||||
|
||||
it("requires operator.write for write methods", () => {
|
||||
@@ -63,6 +61,11 @@ describe("operator scope authorization", () => {
|
||||
});
|
||||
|
||||
describe("core gateway method classification", () => {
|
||||
it("treats node-role methods as classified even without operator scopes", () => {
|
||||
expect(isGatewayMethodClassified("node.pending.drain")).toBe(true);
|
||||
expect(isGatewayMethodClassified("node.pending.pull")).toBe(true);
|
||||
});
|
||||
|
||||
it("classifies every exposed core gateway handler method", () => {
|
||||
const unclassified = Object.keys(coreGatewayHandlers).filter(
|
||||
(method) => !isGatewayMethodClassified(method),
|
||||
|
||||
@@ -5,10 +5,21 @@ import {
|
||||
resolveGatewayProbeAuthWithSecretInputs,
|
||||
} from "./probe-auth.js";
|
||||
|
||||
function expectUnresolvedProbeTokenWarning(cfg: OpenClawConfig) {
|
||||
const result = resolveGatewayProbeAuthSafe({
|
||||
cfg,
|
||||
mode: "local",
|
||||
env: {} as NodeJS.ProcessEnv,
|
||||
});
|
||||
|
||||
expect(result.auth).toEqual({});
|
||||
expect(result.warning).toContain("gateway.auth.token");
|
||||
expect(result.warning).toContain("unresolved");
|
||||
}
|
||||
|
||||
describe("resolveGatewayProbeAuthSafe", () => {
|
||||
it.each([
|
||||
{
|
||||
name: "returns probe auth credentials when available",
|
||||
it("returns probe auth credentials when available", () => {
|
||||
const result = resolveGatewayProbeAuthSafe({
|
||||
cfg: {
|
||||
gateway: {
|
||||
auth: {
|
||||
@@ -16,65 +27,56 @@ describe("resolveGatewayProbeAuthSafe", () => {
|
||||
},
|
||||
},
|
||||
} as OpenClawConfig,
|
||||
mode: "local" as const,
|
||||
mode: "local",
|
||||
env: {} as NodeJS.ProcessEnv,
|
||||
expected: {
|
||||
});
|
||||
|
||||
expect(result).toEqual({
|
||||
auth: {
|
||||
token: "token-value",
|
||||
password: undefined,
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it("returns warning and empty auth when token SecretRef is unresolved", () => {
|
||||
expectUnresolvedProbeTokenWarning({
|
||||
gateway: {
|
||||
auth: {
|
||||
token: "token-value",
|
||||
password: undefined,
|
||||
mode: "token",
|
||||
token: { source: "env", provider: "default", id: "MISSING_GATEWAY_TOKEN" },
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "returns warning and empty auth when a local token SecretRef is unresolved",
|
||||
cfg: {
|
||||
gateway: {
|
||||
auth: {
|
||||
mode: "token",
|
||||
token: { source: "env", provider: "default", id: "MISSING_GATEWAY_TOKEN" },
|
||||
},
|
||||
secrets: {
|
||||
providers: {
|
||||
default: { source: "env" },
|
||||
},
|
||||
secrets: {
|
||||
providers: {
|
||||
default: { source: "env" },
|
||||
},
|
||||
},
|
||||
} as OpenClawConfig,
|
||||
mode: "local" as const,
|
||||
env: {} as NodeJS.ProcessEnv,
|
||||
expected: {
|
||||
auth: {},
|
||||
warningIncludes: ["gateway.auth.token", "unresolved"],
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "does not fall through to remote token when the local SecretRef is unresolved",
|
||||
cfg: {
|
||||
gateway: {
|
||||
mode: "local",
|
||||
auth: {
|
||||
mode: "token",
|
||||
token: { source: "env", provider: "default", id: "MISSING_GATEWAY_TOKEN" },
|
||||
},
|
||||
remote: {
|
||||
token: "remote-token",
|
||||
},
|
||||
} as OpenClawConfig);
|
||||
});
|
||||
|
||||
it("does not fall through to remote token when local token SecretRef is unresolved", () => {
|
||||
expectUnresolvedProbeTokenWarning({
|
||||
gateway: {
|
||||
mode: "local",
|
||||
auth: {
|
||||
mode: "token",
|
||||
token: { source: "env", provider: "default", id: "MISSING_GATEWAY_TOKEN" },
|
||||
},
|
||||
secrets: {
|
||||
providers: {
|
||||
default: { source: "env" },
|
||||
},
|
||||
remote: {
|
||||
token: "remote-token",
|
||||
},
|
||||
} as OpenClawConfig,
|
||||
mode: "local" as const,
|
||||
env: {} as NodeJS.ProcessEnv,
|
||||
expected: {
|
||||
auth: {},
|
||||
warningIncludes: ["gateway.auth.token", "unresolved"],
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ignores unresolved local token SecretRefs in remote mode",
|
||||
secrets: {
|
||||
providers: {
|
||||
default: { source: "env" },
|
||||
},
|
||||
},
|
||||
} as OpenClawConfig);
|
||||
});
|
||||
|
||||
it("ignores unresolved local token SecretRef in remote mode when remote-only auth is requested", () => {
|
||||
const result = resolveGatewayProbeAuthSafe({
|
||||
cfg: {
|
||||
gateway: {
|
||||
mode: "remote",
|
||||
@@ -92,22 +94,16 @@ describe("resolveGatewayProbeAuthSafe", () => {
|
||||
},
|
||||
},
|
||||
} as OpenClawConfig,
|
||||
mode: "remote" as const,
|
||||
mode: "remote",
|
||||
env: {} as NodeJS.ProcessEnv,
|
||||
expected: {
|
||||
auth: {
|
||||
token: undefined,
|
||||
password: undefined,
|
||||
},
|
||||
},
|
||||
},
|
||||
])("$name", ({ cfg, mode, env, expected }) => {
|
||||
const result = resolveGatewayProbeAuthSafe({ cfg, mode, env });
|
||||
});
|
||||
|
||||
expect(result.auth).toEqual(expected.auth);
|
||||
for (const fragment of expected.warningIncludes ?? []) {
|
||||
expect(result.warning).toContain(fragment);
|
||||
}
|
||||
expect(result).toEqual({
|
||||
auth: {
|
||||
token: undefined,
|
||||
password: undefined,
|
||||
},
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ import { describe, expect, it, vi } from "vitest";
|
||||
|
||||
const gatewayClientState = vi.hoisted(() => ({
|
||||
options: null as Record<string, unknown> | null,
|
||||
requests: [] as string[],
|
||||
}));
|
||||
|
||||
class MockGatewayClient {
|
||||
@@ -10,6 +11,7 @@ class MockGatewayClient {
|
||||
constructor(opts: Record<string, unknown>) {
|
||||
this.opts = opts;
|
||||
gatewayClientState.options = opts;
|
||||
gatewayClientState.requests = [];
|
||||
}
|
||||
|
||||
start(): void {
|
||||
@@ -26,6 +28,7 @@ class MockGatewayClient {
|
||||
stop(): void {}
|
||||
|
||||
async request(method: string): Promise<unknown> {
|
||||
gatewayClientState.requests.push(method);
|
||||
if (method === "system-presence") {
|
||||
return [];
|
||||
}
|
||||
@@ -48,6 +51,34 @@ describe("probeGateway", () => {
|
||||
});
|
||||
|
||||
expect(gatewayClientState.options?.scopes).toEqual(["operator.read"]);
|
||||
expect(gatewayClientState.options?.deviceIdentity).toBeNull();
|
||||
expect(gatewayClientState.requests).toEqual([
|
||||
"health",
|
||||
"status",
|
||||
"system-presence",
|
||||
"config.get",
|
||||
]);
|
||||
expect(result.ok).toBe(true);
|
||||
});
|
||||
|
||||
it("keeps device identity enabled for remote probes", async () => {
|
||||
await probeGateway({
|
||||
url: "wss://gateway.example/ws",
|
||||
auth: { token: "secret" },
|
||||
timeoutMs: 1_000,
|
||||
});
|
||||
|
||||
expect(gatewayClientState.options?.deviceIdentity).toBeUndefined();
|
||||
});
|
||||
|
||||
it("skips detail RPCs for lightweight reachability probes", async () => {
|
||||
const result = await probeGateway({
|
||||
url: "ws://127.0.0.1:18789",
|
||||
timeoutMs: 1_000,
|
||||
includeDetails: false,
|
||||
});
|
||||
|
||||
expect(result.ok).toBe(true);
|
||||
expect(gatewayClientState.requests).toEqual([]);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -4,6 +4,7 @@ import type { SystemPresence } from "../infra/system-presence.js";
|
||||
import { GATEWAY_CLIENT_MODES, GATEWAY_CLIENT_NAMES } from "../utils/message-channel.js";
|
||||
import { GatewayClient } from "./client.js";
|
||||
import { READ_SCOPE } from "./method-scopes.js";
|
||||
import { isLoopbackHost } from "./net.js";
|
||||
|
||||
export type GatewayProbeAuth = {
|
||||
token?: string;
|
||||
@@ -32,6 +33,7 @@ export async function probeGateway(opts: {
|
||||
url: string;
|
||||
auth?: GatewayProbeAuth;
|
||||
timeoutMs: number;
|
||||
includeDetails?: boolean;
|
||||
}): Promise<GatewayProbeResult> {
|
||||
const startedAt = Date.now();
|
||||
const instanceId = randomUUID();
|
||||
@@ -39,6 +41,14 @@ export async function probeGateway(opts: {
|
||||
let connectError: string | null = null;
|
||||
let close: GatewayProbeClose | null = null;
|
||||
|
||||
const disableDeviceIdentity = (() => {
|
||||
try {
|
||||
return isLoopbackHost(new URL(opts.url).hostname);
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
})();
|
||||
|
||||
return await new Promise<GatewayProbeResult>((resolve) => {
|
||||
let settled = false;
|
||||
const settle = (result: Omit<GatewayProbeResult, "url">) => {
|
||||
@@ -60,6 +70,7 @@ export async function probeGateway(opts: {
|
||||
clientVersion: "dev",
|
||||
mode: GATEWAY_CLIENT_MODES.PROBE,
|
||||
instanceId,
|
||||
deviceIdentity: disableDeviceIdentity ? null : undefined,
|
||||
onConnectError: (err) => {
|
||||
connectError = formatErrorMessage(err);
|
||||
},
|
||||
@@ -68,6 +79,19 @@ export async function probeGateway(opts: {
|
||||
},
|
||||
onHelloOk: async () => {
|
||||
connectLatencyMs = Date.now() - startedAt;
|
||||
if (opts.includeDetails === false) {
|
||||
settle({
|
||||
ok: true,
|
||||
connectLatencyMs,
|
||||
error: null,
|
||||
close,
|
||||
health: null,
|
||||
status: null,
|
||||
presence: null,
|
||||
configSnapshot: null,
|
||||
});
|
||||
return;
|
||||
}
|
||||
try {
|
||||
const [health, status, presence, configSnapshot] = await Promise.all([
|
||||
client.request("health"),
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import type { DedupeEntry } from "../server-shared.js";
|
||||
import {
|
||||
__testing,
|
||||
readTerminalSnapshotFromGatewayDedupe,
|
||||
@@ -7,6 +8,25 @@ import {
|
||||
} from "./agent-wait-dedupe.js";
|
||||
|
||||
describe("agent wait dedupe helper", () => {
|
||||
function setRunEntry(params: {
|
||||
dedupe: Map<string, DedupeEntry>;
|
||||
kind: "agent" | "chat";
|
||||
runId: string;
|
||||
ts?: number;
|
||||
ok?: boolean;
|
||||
payload: Record<string, unknown>;
|
||||
}) {
|
||||
setGatewayDedupeEntry({
|
||||
dedupe: params.dedupe,
|
||||
key: `${params.kind}:${params.runId}`,
|
||||
entry: {
|
||||
ts: params.ts ?? Date.now(),
|
||||
ok: params.ok ?? true,
|
||||
payload: params.payload,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
__testing.resetWaiters();
|
||||
vi.useFakeTimers();
|
||||
@@ -29,18 +49,15 @@ describe("agent wait dedupe helper", () => {
|
||||
await Promise.resolve();
|
||||
expect(__testing.getWaiterCount(runId)).toBe(1);
|
||||
|
||||
setGatewayDedupeEntry({
|
||||
setRunEntry({
|
||||
dedupe,
|
||||
key: `chat:${runId}`,
|
||||
entry: {
|
||||
ts: Date.now(),
|
||||
ok: true,
|
||||
payload: {
|
||||
runId,
|
||||
status: "ok",
|
||||
startedAt: 100,
|
||||
endedAt: 200,
|
||||
},
|
||||
kind: "chat",
|
||||
runId,
|
||||
payload: {
|
||||
runId,
|
||||
status: "ok",
|
||||
startedAt: 100,
|
||||
endedAt: 200,
|
||||
},
|
||||
});
|
||||
|
||||
@@ -56,28 +73,22 @@ describe("agent wait dedupe helper", () => {
|
||||
it("keeps stale chat dedupe blocked while agent dedupe is in-flight", async () => {
|
||||
const dedupe = new Map();
|
||||
const runId = "run-stale-chat";
|
||||
setGatewayDedupeEntry({
|
||||
setRunEntry({
|
||||
dedupe,
|
||||
key: `chat:${runId}`,
|
||||
entry: {
|
||||
ts: Date.now(),
|
||||
ok: true,
|
||||
payload: {
|
||||
runId,
|
||||
status: "ok",
|
||||
},
|
||||
kind: "chat",
|
||||
runId,
|
||||
payload: {
|
||||
runId,
|
||||
status: "ok",
|
||||
},
|
||||
});
|
||||
setGatewayDedupeEntry({
|
||||
setRunEntry({
|
||||
dedupe,
|
||||
key: `agent:${runId}`,
|
||||
entry: {
|
||||
ts: Date.now(),
|
||||
ok: true,
|
||||
payload: {
|
||||
runId,
|
||||
status: "accepted",
|
||||
},
|
||||
kind: "agent",
|
||||
runId,
|
||||
payload: {
|
||||
runId,
|
||||
status: "accepted",
|
||||
},
|
||||
});
|
||||
|
||||
@@ -100,30 +111,26 @@ describe("agent wait dedupe helper", () => {
|
||||
it("uses newer terminal chat snapshot when agent entry is non-terminal", () => {
|
||||
const dedupe = new Map();
|
||||
const runId = "run-nonterminal-agent-with-newer-chat";
|
||||
setGatewayDedupeEntry({
|
||||
setRunEntry({
|
||||
dedupe,
|
||||
key: `agent:${runId}`,
|
||||
entry: {
|
||||
ts: 100,
|
||||
ok: true,
|
||||
payload: {
|
||||
runId,
|
||||
status: "accepted",
|
||||
},
|
||||
kind: "agent",
|
||||
runId,
|
||||
ts: 100,
|
||||
payload: {
|
||||
runId,
|
||||
status: "accepted",
|
||||
},
|
||||
});
|
||||
setGatewayDedupeEntry({
|
||||
setRunEntry({
|
||||
dedupe,
|
||||
key: `chat:${runId}`,
|
||||
entry: {
|
||||
ts: 200,
|
||||
ok: true,
|
||||
payload: {
|
||||
runId,
|
||||
status: "ok",
|
||||
startedAt: 1,
|
||||
endedAt: 2,
|
||||
},
|
||||
kind: "chat",
|
||||
runId,
|
||||
ts: 200,
|
||||
payload: {
|
||||
runId,
|
||||
status: "ok",
|
||||
startedAt: 1,
|
||||
endedAt: 2,
|
||||
},
|
||||
});
|
||||
|
||||
@@ -143,16 +150,13 @@ describe("agent wait dedupe helper", () => {
|
||||
it("ignores stale agent snapshots when waiting for an active chat run", async () => {
|
||||
const dedupe = new Map();
|
||||
const runId = "run-chat-active-ignore-agent";
|
||||
setGatewayDedupeEntry({
|
||||
setRunEntry({
|
||||
dedupe,
|
||||
key: `agent:${runId}`,
|
||||
entry: {
|
||||
ts: Date.now(),
|
||||
ok: true,
|
||||
payload: {
|
||||
runId,
|
||||
status: "ok",
|
||||
},
|
||||
kind: "agent",
|
||||
runId,
|
||||
payload: {
|
||||
runId,
|
||||
status: "ok",
|
||||
},
|
||||
});
|
||||
|
||||
@@ -173,18 +177,15 @@ describe("agent wait dedupe helper", () => {
|
||||
await Promise.resolve();
|
||||
expect(__testing.getWaiterCount(runId)).toBe(1);
|
||||
|
||||
setGatewayDedupeEntry({
|
||||
setRunEntry({
|
||||
dedupe,
|
||||
key: `chat:${runId}`,
|
||||
entry: {
|
||||
ts: Date.now(),
|
||||
ok: true,
|
||||
payload: {
|
||||
runId,
|
||||
status: "ok",
|
||||
startedAt: 123,
|
||||
endedAt: 456,
|
||||
},
|
||||
kind: "chat",
|
||||
runId,
|
||||
payload: {
|
||||
runId,
|
||||
status: "ok",
|
||||
startedAt: 123,
|
||||
endedAt: 456,
|
||||
},
|
||||
});
|
||||
|
||||
@@ -200,23 +201,20 @@ describe("agent wait dedupe helper", () => {
|
||||
const runId = "run-collision";
|
||||
const dedupe = new Map();
|
||||
|
||||
setGatewayDedupeEntry({
|
||||
setRunEntry({
|
||||
dedupe,
|
||||
key: `agent:${runId}`,
|
||||
entry: {
|
||||
ts: 100,
|
||||
ok: true,
|
||||
payload: { runId, status: "ok", startedAt: 10, endedAt: 20 },
|
||||
},
|
||||
kind: "agent",
|
||||
runId,
|
||||
ts: 100,
|
||||
payload: { runId, status: "ok", startedAt: 10, endedAt: 20 },
|
||||
});
|
||||
setGatewayDedupeEntry({
|
||||
setRunEntry({
|
||||
dedupe,
|
||||
key: `chat:${runId}`,
|
||||
entry: {
|
||||
ts: 200,
|
||||
ok: false,
|
||||
payload: { runId, status: "error", startedAt: 30, endedAt: 40, error: "chat failed" },
|
||||
},
|
||||
kind: "chat",
|
||||
runId,
|
||||
ts: 200,
|
||||
ok: false,
|
||||
payload: { runId, status: "error", startedAt: 30, endedAt: 40, error: "chat failed" },
|
||||
});
|
||||
|
||||
expect(
|
||||
@@ -232,23 +230,19 @@ describe("agent wait dedupe helper", () => {
|
||||
});
|
||||
|
||||
const dedupeReverse = new Map();
|
||||
setGatewayDedupeEntry({
|
||||
setRunEntry({
|
||||
dedupe: dedupeReverse,
|
||||
key: `chat:${runId}`,
|
||||
entry: {
|
||||
ts: 100,
|
||||
ok: true,
|
||||
payload: { runId, status: "ok", startedAt: 1, endedAt: 2 },
|
||||
},
|
||||
kind: "chat",
|
||||
runId,
|
||||
ts: 100,
|
||||
payload: { runId, status: "ok", startedAt: 1, endedAt: 2 },
|
||||
});
|
||||
setGatewayDedupeEntry({
|
||||
setRunEntry({
|
||||
dedupe: dedupeReverse,
|
||||
key: `agent:${runId}`,
|
||||
entry: {
|
||||
ts: 200,
|
||||
ok: true,
|
||||
payload: { runId, status: "timeout", startedAt: 3, endedAt: 4, error: "still running" },
|
||||
},
|
||||
kind: "agent",
|
||||
runId,
|
||||
ts: 200,
|
||||
payload: { runId, status: "timeout", startedAt: 3, endedAt: 4, error: "still running" },
|
||||
});
|
||||
|
||||
expect(
|
||||
@@ -281,14 +275,11 @@ describe("agent wait dedupe helper", () => {
|
||||
await Promise.resolve();
|
||||
expect(__testing.getWaiterCount(runId)).toBe(2);
|
||||
|
||||
setGatewayDedupeEntry({
|
||||
setRunEntry({
|
||||
dedupe,
|
||||
key: `chat:${runId}`,
|
||||
entry: {
|
||||
ts: Date.now(),
|
||||
ok: true,
|
||||
payload: { runId, status: "ok" },
|
||||
},
|
||||
kind: "chat",
|
||||
runId,
|
||||
payload: { runId, status: "ok" },
|
||||
});
|
||||
|
||||
await expect(first).resolves.toEqual(
|
||||
|
||||
@@ -50,8 +50,7 @@ import { performGatewaySessionReset } from "../session-reset-service.js";
|
||||
import {
|
||||
canonicalizeSpawnedByForAgent,
|
||||
loadSessionEntry,
|
||||
pruneLegacyStoreKeys,
|
||||
resolveGatewaySessionStoreTarget,
|
||||
migrateAndPruneGatewaySessionStoreKey,
|
||||
} from "../session-utils.js";
|
||||
import { formatForLog } from "../ws-log.js";
|
||||
import { waitForAgentJob } from "./agent-job.js";
|
||||
@@ -425,18 +424,13 @@ export const agentHandlers: GatewayRequestHandlers = {
|
||||
const mainSessionKey = resolveAgentMainSessionKey({ cfg, agentId });
|
||||
if (storePath) {
|
||||
const persisted = await updateSessionStore(storePath, (store) => {
|
||||
const target = resolveGatewaySessionStoreTarget({
|
||||
const { primaryKey } = migrateAndPruneGatewaySessionStoreKey({
|
||||
cfg,
|
||||
key: requestedSessionKey,
|
||||
store,
|
||||
});
|
||||
pruneLegacyStoreKeys({
|
||||
store,
|
||||
canonicalKey: target.canonicalKey,
|
||||
candidates: target.storeKeys,
|
||||
});
|
||||
const merged = mergeSessionEntry(store[canonicalSessionKey], nextEntryPatch);
|
||||
store[canonicalSessionKey] = merged;
|
||||
const merged = mergeSessionEntry(store[primaryKey], nextEntryPatch);
|
||||
store[primaryKey] = merged;
|
||||
return merged;
|
||||
});
|
||||
sessionEntry = persisted;
|
||||
|
||||
@@ -6,6 +6,30 @@ import {
|
||||
} from "./chat.abort.test-helpers.js";
|
||||
import { chatHandlers } from "./chat.js";
|
||||
|
||||
async function invokeSingleRunAbort({
|
||||
context,
|
||||
runId = "run-1",
|
||||
connId,
|
||||
deviceId,
|
||||
scopes,
|
||||
}: {
|
||||
context: ReturnType<typeof createChatAbortContext>;
|
||||
runId?: string;
|
||||
connId: string;
|
||||
deviceId: string;
|
||||
scopes: string[];
|
||||
}) {
|
||||
return await invokeChatAbortHandler({
|
||||
handler: chatHandlers["chat.abort"],
|
||||
context,
|
||||
request: { sessionKey: "main", runId },
|
||||
client: {
|
||||
connId,
|
||||
connect: { device: { id: deviceId }, scopes },
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
describe("chat.abort authorization", () => {
|
||||
it("rejects explicit run aborts from other clients", async () => {
|
||||
const context = createChatAbortContext({
|
||||
@@ -17,14 +41,11 @@ describe("chat.abort authorization", () => {
|
||||
]),
|
||||
});
|
||||
|
||||
const respond = await invokeChatAbortHandler({
|
||||
handler: chatHandlers["chat.abort"],
|
||||
const respond = await invokeSingleRunAbort({
|
||||
context,
|
||||
request: { sessionKey: "main", runId: "run-1" },
|
||||
client: {
|
||||
connId: "conn-other",
|
||||
connect: { device: { id: "dev-other" }, scopes: ["operator.write"] },
|
||||
},
|
||||
connId: "conn-other",
|
||||
deviceId: "dev-other",
|
||||
scopes: ["operator.write"],
|
||||
});
|
||||
|
||||
const [ok, payload, error] = respond.mock.calls.at(-1) ?? [];
|
||||
@@ -92,14 +113,11 @@ describe("chat.abort authorization", () => {
|
||||
]),
|
||||
});
|
||||
|
||||
const respond = await invokeChatAbortHandler({
|
||||
handler: chatHandlers["chat.abort"],
|
||||
const respond = await invokeSingleRunAbort({
|
||||
context,
|
||||
request: { sessionKey: "main", runId: "run-1" },
|
||||
client: {
|
||||
connId: "conn-admin",
|
||||
connect: { device: { id: "dev-admin" }, scopes: ["operator.admin"] },
|
||||
},
|
||||
connId: "conn-admin",
|
||||
deviceId: "dev-admin",
|
||||
scopes: ["operator.admin"],
|
||||
});
|
||||
|
||||
const [ok, payload] = respond.mock.calls.at(-1) ?? [];
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user