refactor(src): split oversized modules

This commit is contained in:
Peter Steinberger
2026-01-14 01:08:15 +00:00
parent b2179de839
commit bcbfb357be
675 changed files with 91476 additions and 73453 deletions

BIN
src/auto-reply/.DS_Store vendored Normal file

Binary file not shown.

View File

@@ -0,0 +1,282 @@
import { listChannelDocks } from "../channels/dock.js";
import type {
ChatCommandDefinition,
CommandScope,
} from "./commands-registry.types.js";
type DefineChatCommandInput = {
key: string;
nativeName?: string;
description: string;
acceptsArgs?: boolean;
textAlias?: string;
textAliases?: string[];
scope?: CommandScope;
};
function defineChatCommand(
command: DefineChatCommandInput,
): ChatCommandDefinition {
const aliases = (
command.textAliases ?? (command.textAlias ? [command.textAlias] : [])
)
.map((alias) => alias.trim())
.filter(Boolean);
const scope =
command.scope ??
(command.nativeName ? (aliases.length ? "both" : "native") : "text");
return {
key: command.key,
nativeName: command.nativeName,
description: command.description,
acceptsArgs: command.acceptsArgs,
textAliases: aliases,
scope,
};
}
function registerAlias(
commands: ChatCommandDefinition[],
key: string,
...aliases: string[]
): void {
const command = commands.find((entry) => entry.key === key);
if (!command) {
throw new Error(`registerAlias: unknown command key: ${key}`);
}
const existing = new Set(
command.textAliases.map((alias) => alias.trim().toLowerCase()),
);
for (const alias of aliases) {
const trimmed = alias.trim();
if (!trimmed) continue;
const lowered = trimmed.toLowerCase();
if (existing.has(lowered)) continue;
existing.add(lowered);
command.textAliases.push(trimmed);
}
}
function assertCommandRegistry(commands: ChatCommandDefinition[]): void {
const keys = new Set<string>();
const nativeNames = new Set<string>();
const textAliases = new Set<string>();
for (const command of commands) {
if (keys.has(command.key)) {
throw new Error(`Duplicate command key: ${command.key}`);
}
keys.add(command.key);
const nativeName = command.nativeName?.trim();
if (command.scope === "text") {
if (nativeName) {
throw new Error(`Text-only command has native name: ${command.key}`);
}
if (command.textAliases.length === 0) {
throw new Error(`Text-only command missing text alias: ${command.key}`);
}
} else if (!nativeName) {
throw new Error(`Native command missing native name: ${command.key}`);
} else {
const nativeKey = nativeName.toLowerCase();
if (nativeNames.has(nativeKey)) {
throw new Error(`Duplicate native command: ${nativeName}`);
}
nativeNames.add(nativeKey);
}
if (command.scope === "native" && command.textAliases.length > 0) {
throw new Error(`Native-only command has text aliases: ${command.key}`);
}
for (const alias of command.textAliases) {
if (!alias.startsWith("/")) {
throw new Error(`Command alias missing leading '/': ${alias}`);
}
const aliasKey = alias.toLowerCase();
if (textAliases.has(aliasKey)) {
throw new Error(`Duplicate command alias: ${alias}`);
}
textAliases.add(aliasKey);
}
}
}
export const CHAT_COMMANDS: ChatCommandDefinition[] = (() => {
const commands: ChatCommandDefinition[] = [
defineChatCommand({
key: "help",
nativeName: "help",
description: "Show available commands.",
textAlias: "/help",
}),
defineChatCommand({
key: "commands",
nativeName: "commands",
description: "List all slash commands.",
textAlias: "/commands",
}),
defineChatCommand({
key: "status",
nativeName: "status",
description: "Show current status.",
textAlias: "/status",
}),
defineChatCommand({
key: "whoami",
nativeName: "whoami",
description: "Show your sender id.",
textAlias: "/whoami",
}),
defineChatCommand({
key: "config",
nativeName: "config",
description: "Show or set config values.",
textAlias: "/config",
acceptsArgs: true,
}),
defineChatCommand({
key: "debug",
nativeName: "debug",
description: "Set runtime debug overrides.",
textAlias: "/debug",
acceptsArgs: true,
}),
defineChatCommand({
key: "cost",
nativeName: "cost",
description: "Toggle per-response usage line.",
textAlias: "/cost",
acceptsArgs: true,
}),
defineChatCommand({
key: "stop",
nativeName: "stop",
description: "Stop the current run.",
textAlias: "/stop",
}),
defineChatCommand({
key: "restart",
nativeName: "restart",
description: "Restart Clawdbot.",
textAlias: "/restart",
}),
defineChatCommand({
key: "activation",
nativeName: "activation",
description: "Set group activation mode.",
textAlias: "/activation",
acceptsArgs: true,
}),
defineChatCommand({
key: "send",
nativeName: "send",
description: "Set send policy.",
textAlias: "/send",
acceptsArgs: true,
}),
defineChatCommand({
key: "reset",
nativeName: "reset",
description: "Reset the current session.",
textAlias: "/reset",
}),
defineChatCommand({
key: "new",
nativeName: "new",
description: "Start a new session.",
textAlias: "/new",
}),
defineChatCommand({
key: "compact",
description: "Compact the session context.",
textAlias: "/compact",
scope: "text",
acceptsArgs: true,
}),
defineChatCommand({
key: "think",
nativeName: "think",
description: "Set thinking level.",
textAlias: "/think",
acceptsArgs: true,
}),
defineChatCommand({
key: "verbose",
nativeName: "verbose",
description: "Toggle verbose mode.",
textAlias: "/verbose",
acceptsArgs: true,
}),
defineChatCommand({
key: "reasoning",
nativeName: "reasoning",
description: "Toggle reasoning visibility.",
textAlias: "/reasoning",
acceptsArgs: true,
}),
defineChatCommand({
key: "elevated",
nativeName: "elevated",
description: "Toggle elevated mode.",
textAlias: "/elevated",
acceptsArgs: true,
}),
defineChatCommand({
key: "model",
nativeName: "model",
description: "Show or set the model.",
textAlias: "/model",
acceptsArgs: true,
}),
defineChatCommand({
key: "queue",
nativeName: "queue",
description: "Adjust queue settings.",
textAlias: "/queue",
acceptsArgs: true,
}),
defineChatCommand({
key: "bash",
description: "Run host shell commands (host-only).",
textAlias: "/bash",
scope: "text",
acceptsArgs: true,
}),
...listChannelDocks()
.filter((dock) => dock.capabilities.nativeCommands)
.map((dock) =>
defineChatCommand({
key: `dock:${dock.id}`,
nativeName: `dock-${dock.id}`,
description: `Switch to ${dock.id} for replies.`,
textAlias: `/dock-${dock.id}`,
acceptsArgs: false,
}),
),
];
registerAlias(commands, "status", "/usage");
registerAlias(commands, "whoami", "/id");
registerAlias(commands, "think", "/thinking", "/t");
registerAlias(commands, "verbose", "/v");
registerAlias(commands, "reasoning", "/reason");
registerAlias(commands, "elevated", "/elev");
registerAlias(commands, "model", "/models");
assertCommandRegistry(commands);
return commands;
})();
let cachedNativeCommandSurfaces: Set<string> | null = null;
export const getNativeCommandSurfaces = (): Set<string> => {
if (!cachedNativeCommandSurfaces) {
cachedNativeCommandSurfaces = new Set(
listChannelDocks()
.filter((dock) => dock.capabilities.nativeCommands)
.map((dock) => dock.id),
);
}
return cachedNativeCommandSurfaces;
};

View File

@@ -1,311 +1,52 @@
import { listChannelDocks } from "../channels/dock.js";
import type { ClawdbotConfig } from "../config/types.js";
import {
CHAT_COMMANDS,
getNativeCommandSurfaces,
} from "./commands-registry.data.js";
import type {
ChatCommandDefinition,
CommandDetection,
CommandNormalizeOptions,
NativeCommandSpec,
ShouldHandleTextCommandsParams,
} from "./commands-registry.types.js";
export type CommandScope = "text" | "native" | "both";
export type ChatCommandDefinition = {
key: string;
nativeName?: string;
description: string;
textAliases: string[];
acceptsArgs?: boolean;
scope: CommandScope;
};
export type NativeCommandSpec = {
name: string;
description: string;
acceptsArgs: boolean;
};
export { CHAT_COMMANDS } from "./commands-registry.data.js";
export type {
ChatCommandDefinition,
CommandDetection,
CommandNormalizeOptions,
CommandScope,
NativeCommandSpec,
ShouldHandleTextCommandsParams,
} from "./commands-registry.types.js";
type TextAliasSpec = {
key: string;
canonical: string;
acceptsArgs: boolean;
};
function defineChatCommand(command: {
key: string;
nativeName?: string;
description: string;
acceptsArgs?: boolean;
textAlias?: string;
textAliases?: string[];
scope?: CommandScope;
}): ChatCommandDefinition {
const aliases = (
command.textAliases ?? (command.textAlias ? [command.textAlias] : [])
)
.map((alias) => alias.trim())
.filter(Boolean);
const scope =
command.scope ??
(command.nativeName ? (aliases.length ? "both" : "native") : "text");
return {
key: command.key,
nativeName: command.nativeName,
description: command.description,
acceptsArgs: command.acceptsArgs,
textAliases: aliases,
scope,
};
}
function registerAlias(
commands: ChatCommandDefinition[],
key: string,
...aliases: string[]
): void {
const command = commands.find((entry) => entry.key === key);
if (!command) {
throw new Error(`registerAlias: unknown command key: ${key}`);
}
const existing = new Set(
command.textAliases.map((alias) => alias.trim().toLowerCase()),
);
for (const alias of aliases) {
const trimmed = alias.trim();
if (!trimmed) continue;
const lowered = trimmed.toLowerCase();
if (existing.has(lowered)) continue;
existing.add(lowered);
command.textAliases.push(trimmed);
}
}
function assertCommandRegistry(commands: ChatCommandDefinition[]): void {
const keys = new Set<string>();
const nativeNames = new Set<string>();
const textAliases = new Set<string>();
for (const command of commands) {
if (keys.has(command.key)) {
throw new Error(`Duplicate command key: ${command.key}`);
}
keys.add(command.key);
const nativeName = command.nativeName?.trim();
if (command.scope === "text") {
if (nativeName) {
throw new Error(`Text-only command has native name: ${command.key}`);
}
if (command.textAliases.length === 0) {
throw new Error(`Text-only command missing text alias: ${command.key}`);
}
} else if (!nativeName) {
throw new Error(`Native command missing native name: ${command.key}`);
} else {
const nativeKey = nativeName.toLowerCase();
if (nativeNames.has(nativeKey)) {
throw new Error(`Duplicate native command: ${nativeName}`);
}
nativeNames.add(nativeKey);
}
if (command.scope === "native" && command.textAliases.length > 0) {
throw new Error(`Native-only command has text aliases: ${command.key}`);
}
for (const alias of command.textAliases) {
if (!alias.startsWith("/")) {
throw new Error(`Command alias missing leading '/': ${alias}`);
}
const aliasKey = alias.toLowerCase();
if (textAliases.has(aliasKey)) {
throw new Error(`Duplicate command alias: ${alias}`);
}
textAliases.add(aliasKey);
}
}
}
export const CHAT_COMMANDS: ChatCommandDefinition[] = (() => {
const commands: ChatCommandDefinition[] = [
defineChatCommand({
key: "help",
nativeName: "help",
description: "Show available commands.",
textAlias: "/help",
}),
defineChatCommand({
key: "commands",
nativeName: "commands",
description: "List all slash commands.",
textAlias: "/commands",
}),
defineChatCommand({
key: "status",
nativeName: "status",
description: "Show current status.",
textAlias: "/status",
}),
defineChatCommand({
key: "whoami",
nativeName: "whoami",
description: "Show your sender id.",
textAlias: "/whoami",
}),
defineChatCommand({
key: "config",
nativeName: "config",
description: "Show or set config values.",
textAlias: "/config",
acceptsArgs: true,
}),
defineChatCommand({
key: "debug",
nativeName: "debug",
description: "Set runtime debug overrides.",
textAlias: "/debug",
acceptsArgs: true,
}),
defineChatCommand({
key: "cost",
nativeName: "cost",
description: "Toggle per-response usage line.",
textAlias: "/cost",
acceptsArgs: true,
}),
defineChatCommand({
key: "stop",
nativeName: "stop",
description: "Stop the current run.",
textAlias: "/stop",
}),
defineChatCommand({
key: "restart",
nativeName: "restart",
description: "Restart Clawdbot.",
textAlias: "/restart",
}),
defineChatCommand({
key: "activation",
nativeName: "activation",
description: "Set group activation mode.",
textAlias: "/activation",
acceptsArgs: true,
}),
defineChatCommand({
key: "send",
nativeName: "send",
description: "Set send policy.",
textAlias: "/send",
acceptsArgs: true,
}),
defineChatCommand({
key: "reset",
nativeName: "reset",
description: "Reset the current session.",
textAlias: "/reset",
}),
defineChatCommand({
key: "new",
nativeName: "new",
description: "Start a new session.",
textAlias: "/new",
}),
defineChatCommand({
key: "compact",
description: "Compact the session context.",
textAlias: "/compact",
scope: "text",
acceptsArgs: true,
}),
defineChatCommand({
key: "think",
nativeName: "think",
description: "Set thinking level.",
textAlias: "/think",
acceptsArgs: true,
}),
defineChatCommand({
key: "verbose",
nativeName: "verbose",
description: "Toggle verbose mode.",
textAlias: "/verbose",
acceptsArgs: true,
}),
defineChatCommand({
key: "reasoning",
nativeName: "reasoning",
description: "Toggle reasoning visibility.",
textAlias: "/reasoning",
acceptsArgs: true,
}),
defineChatCommand({
key: "elevated",
nativeName: "elevated",
description: "Toggle elevated mode.",
textAlias: "/elevated",
acceptsArgs: true,
}),
defineChatCommand({
key: "model",
nativeName: "model",
description: "Show or set the model.",
textAlias: "/model",
acceptsArgs: true,
}),
defineChatCommand({
key: "queue",
nativeName: "queue",
description: "Adjust queue settings.",
textAlias: "/queue",
acceptsArgs: true,
}),
defineChatCommand({
key: "bash",
description: "Run host shell commands (host-only).",
textAlias: "/bash",
scope: "text",
acceptsArgs: true,
}),
];
registerAlias(commands, "status", "/usage");
registerAlias(commands, "whoami", "/id");
registerAlias(commands, "think", "/thinking", "/t");
registerAlias(commands, "verbose", "/v");
registerAlias(commands, "reasoning", "/reason");
registerAlias(commands, "elevated", "/elev");
registerAlias(commands, "model", "/models");
assertCommandRegistry(commands);
return commands;
})();
let cachedNativeCommandSurfaces: Set<string> | null = null;
const getNativeCommandSurfaces = (): Set<string> => {
if (!cachedNativeCommandSurfaces) {
cachedNativeCommandSurfaces = new Set(
listChannelDocks()
.filter((dock) => dock.capabilities.nativeCommands)
.map((dock) => dock.id),
);
}
return cachedNativeCommandSurfaces;
};
const TEXT_ALIAS_MAP: Map<string, TextAliasSpec> = (() => {
const map = new Map<string, TextAliasSpec>();
for (const command of CHAT_COMMANDS) {
const canonical = `/${command.key}`;
// Canonicalize to the *primary* text alias, not `/${key}`. Some command keys are
// internal identifiers (e.g. `dock:telegram`) while the public text command is
// the alias (e.g. `/dock-telegram`).
const canonical = command.textAliases[0]?.trim() || `/${command.key}`;
const acceptsArgs = Boolean(command.acceptsArgs);
for (const alias of command.textAliases) {
const normalized = alias.trim().toLowerCase();
if (!normalized) continue;
if (!map.has(normalized)) {
map.set(normalized, { canonical, acceptsArgs });
map.set(normalized, { key: command.key, canonical, acceptsArgs });
}
}
}
return map;
})();
let cachedDetection:
| {
exact: Set<string>;
regex: RegExp;
}
| undefined;
let cachedDetection: CommandDetection | undefined;
function escapeRegExp(value: string) {
return value.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
@@ -369,10 +110,6 @@ export function buildCommandText(commandName: string, args?: string): string {
return trimmedArgs ? `/${commandName} ${trimmedArgs}` : `/${commandName}`;
}
export type CommandNormalizeOptions = {
botUsername?: string;
};
export function normalizeCommandBody(
raw: string,
options?: CommandNormalizeOptions,
@@ -424,10 +161,7 @@ export function isCommandMessage(raw: string): boolean {
return trimmed.startsWith("/");
}
export function getCommandDetection(_cfg?: ClawdbotConfig): {
exact: Set<string>;
regex: RegExp;
} {
export function getCommandDetection(_cfg?: ClawdbotConfig): CommandDetection {
if (cachedDetection) return cachedDetection;
const exact = new Set<string>();
const patterns: string[] = [];
@@ -479,9 +213,7 @@ export function resolveTextCommand(
if (!alias) return null;
const spec = TEXT_ALIAS_MAP.get(alias);
if (!spec) return null;
const command = CHAT_COMMANDS.find(
(entry) => `/${entry.key}` === spec.canonical,
);
const command = CHAT_COMMANDS.find((entry) => entry.key === spec.key);
if (!command) return null;
if (!spec.acceptsArgs) return { command };
const args = trimmed.slice(alias.length).trim();
@@ -493,11 +225,9 @@ export function isNativeCommandSurface(surface?: string): boolean {
return getNativeCommandSurfaces().has(surface.toLowerCase());
}
export function shouldHandleTextCommands(params: {
cfg: ClawdbotConfig;
surface: string;
commandSource?: "text" | "native";
}): boolean {
export function shouldHandleTextCommands(
params: ShouldHandleTextCommandsParams,
): boolean {
if (params.commandSource === "native") return true;
if (params.cfg.commands?.text !== false) return true;
return !isNativeCommandSurface(params.surface);

View File

@@ -0,0 +1,33 @@
import type { ClawdbotConfig } from "../config/types.js";
export type CommandScope = "text" | "native" | "both";
export type ChatCommandDefinition = {
key: string;
nativeName?: string;
description: string;
textAliases: string[];
acceptsArgs?: boolean;
scope: CommandScope;
};
export type NativeCommandSpec = {
name: string;
description: string;
acceptsArgs: boolean;
};
export type CommandNormalizeOptions = {
botUsername?: string;
};
export type CommandDetection = {
exact: Set<string>;
regex: RegExp;
};
export type ShouldHandleTextCommandsParams = {
cfg: ClawdbotConfig;
surface: string;
commandSource?: "text" | "native";
};

View File

@@ -0,0 +1,281 @@
import path from "node:path";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.js";
import { loadModelCatalog } from "../agents/model-catalog.js";
import { runEmbeddedPiAgent } from "../agents/pi-embedded.js";
import { loadSessionStore } from "../config/sessions.js";
import { getReplyFromConfig } from "./reply.js";
const MAIN_SESSION_KEY = "agent:main:main";
vi.mock("../agents/pi-embedded.js", () => ({
abortEmbeddedPiRun: vi.fn().mockReturnValue(false),
runEmbeddedPiAgent: vi.fn(),
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
resolveEmbeddedSessionLane: (key: string) =>
`session:${key.trim() || "main"}`,
isEmbeddedPiRunActive: vi.fn().mockReturnValue(false),
isEmbeddedPiRunStreaming: vi.fn().mockReturnValue(false),
}));
vi.mock("../agents/model-catalog.js", () => ({
loadModelCatalog: vi.fn(),
}));
async function withTempHome<T>(fn: (home: string) => Promise<T>): Promise<T> {
return withTempHomeBase(
async (home) => {
return await fn(home);
},
{
env: {
CLAWDBOT_AGENT_DIR: (home) => path.join(home, ".clawdbot", "agent"),
PI_CODING_AGENT_DIR: (home) => path.join(home, ".clawdbot", "agent"),
},
prefix: "clawdbot-reply-",
},
);
}
function _assertModelSelection(
storePath: string,
selection: { model?: string; provider?: string } = {},
) {
const store = loadSessionStore(storePath);
const entry = store[MAIN_SESSION_KEY];
expect(entry).toBeDefined();
expect(entry?.modelOverride).toBe(selection.model);
expect(entry?.providerOverride).toBe(selection.provider);
}
describe("directive behavior", () => {
beforeEach(() => {
vi.mocked(runEmbeddedPiAgent).mockReset();
vi.mocked(loadModelCatalog).mockResolvedValue([
{ id: "claude-opus-4-5", name: "Opus 4.5", provider: "anthropic" },
{ id: "claude-sonnet-4-1", name: "Sonnet 4.1", provider: "anthropic" },
{ id: "gpt-4.1-mini", name: "GPT-4.1 Mini", provider: "openai" },
]);
});
afterEach(() => {
vi.restoreAllMocks();
});
it("accepts /thinking xhigh for codex models", async () => {
await withTempHome(async (home) => {
const storePath = path.join(home, "sessions.json");
const res = await getReplyFromConfig(
{
Body: "/thinking xhigh",
From: "+1004",
To: "+2000",
},
{},
{
agents: {
defaults: {
model: "openai-codex/gpt-5.2-codex",
workspace: path.join(home, "clawd"),
},
},
whatsapp: { allowFrom: ["*"] },
session: { store: storePath },
},
);
const texts = (Array.isArray(res) ? res : [res])
.map((entry) => entry?.text)
.filter(Boolean);
expect(texts).toContain("Thinking level set to xhigh.");
});
});
it("accepts /thinking xhigh for openai gpt-5.2", async () => {
await withTempHome(async (home) => {
const storePath = path.join(home, "sessions.json");
const res = await getReplyFromConfig(
{
Body: "/thinking xhigh",
From: "+1004",
To: "+2000",
},
{},
{
agents: {
defaults: {
model: "openai/gpt-5.2",
workspace: path.join(home, "clawd"),
},
},
whatsapp: { allowFrom: ["*"] },
session: { store: storePath },
},
);
const texts = (Array.isArray(res) ? res : [res])
.map((entry) => entry?.text)
.filter(Boolean);
expect(texts).toContain("Thinking level set to xhigh.");
});
});
it("rejects /thinking xhigh for non-codex models", async () => {
await withTempHome(async (home) => {
const storePath = path.join(home, "sessions.json");
const res = await getReplyFromConfig(
{
Body: "/thinking xhigh",
From: "+1004",
To: "+2000",
},
{},
{
agents: {
defaults: {
model: "openai/gpt-4.1-mini",
workspace: path.join(home, "clawd"),
},
},
whatsapp: { allowFrom: ["*"] },
session: { store: storePath },
},
);
const texts = (Array.isArray(res) ? res : [res])
.map((entry) => entry?.text)
.filter(Boolean);
expect(texts).toContain(
'Thinking level "xhigh" is only supported for openai/gpt-5.2, openai-codex/gpt-5.2-codex or openai-codex/gpt-5.1-codex.',
);
});
});
it("keeps reserved command aliases from matching after trimming", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const res = await getReplyFromConfig(
{
Body: "/help",
From: "+1222",
To: "+1222",
},
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
models: {
"anthropic/claude-opus-4-5": { alias: " help " },
},
},
},
channels: { whatsapp: { allowFrom: ["*"] } },
session: { store: path.join(home, "sessions.json") },
},
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("Help");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("errors on invalid queue options", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const res = await getReplyFromConfig(
{
Body: "/queue collect debounce:bogus cap:zero drop:maybe",
From: "+1222",
To: "+1222",
},
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
},
},
channels: { whatsapp: { allowFrom: ["*"] } },
session: { store: path.join(home, "sessions.json") },
},
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("Invalid debounce");
expect(text).toContain("Invalid cap");
expect(text).toContain("Invalid drop policy");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("shows current queue settings when /queue has no arguments", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const res = await getReplyFromConfig(
{
Body: "/queue",
From: "+1222",
To: "+1222",
Provider: "whatsapp",
},
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
},
},
messages: {
queue: {
mode: "collect",
debounceMs: 1500,
cap: 9,
drop: "summarize",
},
},
channels: { whatsapp: { allowFrom: ["*"] } },
session: { store: path.join(home, "sessions.json") },
},
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain(
"Current queue settings: mode=collect, debounce=1500ms, cap=9, drop=summarize.",
);
expect(text).toContain(
"Options: modes steer, followup, collect, steer+backlog, interrupt; debounce:<ms|s|m>, cap:<n>, drop:old|new|summarize.",
);
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("shows current think level when /think has no argument", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const res = await getReplyFromConfig(
{ Body: "/think", From: "+1222", To: "+1222" },
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
thinkingDefault: "high",
},
},
session: { store: path.join(home, "sessions.json") },
},
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("Current thinking level: high");
expect(text).toContain("Options: off, minimal, low, medium, high.");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
});

View File

@@ -0,0 +1,278 @@
import path from "node:path";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.js";
import { loadModelCatalog } from "../agents/model-catalog.js";
import { runEmbeddedPiAgent } from "../agents/pi-embedded.js";
import { loadSessionStore } from "../config/sessions.js";
import { getReplyFromConfig } from "./reply.js";
const MAIN_SESSION_KEY = "agent:main:main";
vi.mock("../agents/pi-embedded.js", () => ({
abortEmbeddedPiRun: vi.fn().mockReturnValue(false),
runEmbeddedPiAgent: vi.fn(),
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
resolveEmbeddedSessionLane: (key: string) =>
`session:${key.trim() || "main"}`,
isEmbeddedPiRunActive: vi.fn().mockReturnValue(false),
isEmbeddedPiRunStreaming: vi.fn().mockReturnValue(false),
}));
vi.mock("../agents/model-catalog.js", () => ({
loadModelCatalog: vi.fn(),
}));
async function withTempHome<T>(fn: (home: string) => Promise<T>): Promise<T> {
return withTempHomeBase(
async (home) => {
return await fn(home);
},
{
env: {
CLAWDBOT_AGENT_DIR: (home) => path.join(home, ".clawdbot", "agent"),
PI_CODING_AGENT_DIR: (home) => path.join(home, ".clawdbot", "agent"),
},
prefix: "clawdbot-reply-",
},
);
}
function assertModelSelection(
storePath: string,
selection: { model?: string; provider?: string } = {},
) {
const store = loadSessionStore(storePath);
const entry = store[MAIN_SESSION_KEY];
expect(entry).toBeDefined();
expect(entry?.modelOverride).toBe(selection.model);
expect(entry?.providerOverride).toBe(selection.provider);
}
describe("directive behavior", () => {
beforeEach(() => {
vi.mocked(runEmbeddedPiAgent).mockReset();
vi.mocked(loadModelCatalog).mockResolvedValue([
{ id: "claude-opus-4-5", name: "Opus 4.5", provider: "anthropic" },
{ id: "claude-sonnet-4-1", name: "Sonnet 4.1", provider: "anthropic" },
{ id: "gpt-4.1-mini", name: "GPT-4.1 Mini", provider: "openai" },
]);
});
afterEach(() => {
vi.restoreAllMocks();
});
it("supports fuzzy model matches on /model directive", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const storePath = path.join(home, "sessions.json");
await getReplyFromConfig(
{ Body: "/model kimi", From: "+1222", To: "+1222" },
{},
{
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
workspace: path.join(home, "clawd"),
models: {
"anthropic/claude-opus-4-5": {},
"moonshot/kimi-k2-0905-preview": {},
},
},
},
models: {
mode: "merge",
providers: {
moonshot: {
baseUrl: "https://api.moonshot.ai/v1",
apiKey: "sk-test",
api: "openai-completions",
models: [{ id: "kimi-k2-0905-preview", name: "Kimi K2" }],
},
},
},
session: { store: storePath },
},
);
assertModelSelection(storePath, {
model: "kimi-k2-0905-preview",
provider: "moonshot",
});
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("resolves provider-less exact model ids via fuzzy matching when unambiguous", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const storePath = path.join(home, "sessions.json");
await getReplyFromConfig(
{ Body: "/model kimi-k2-0905-preview", From: "+1222", To: "+1222" },
{},
{
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
workspace: path.join(home, "clawd"),
models: {
"anthropic/claude-opus-4-5": {},
"moonshot/kimi-k2-0905-preview": {},
},
},
},
models: {
mode: "merge",
providers: {
moonshot: {
baseUrl: "https://api.moonshot.ai/v1",
apiKey: "sk-test",
api: "openai-completions",
models: [{ id: "kimi-k2-0905-preview", name: "Kimi K2" }],
},
},
},
session: { store: storePath },
},
);
assertModelSelection(storePath, {
model: "kimi-k2-0905-preview",
provider: "moonshot",
});
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("supports fuzzy matches within a provider on /model provider/model", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const storePath = path.join(home, "sessions.json");
await getReplyFromConfig(
{ Body: "/model moonshot/kimi", From: "+1222", To: "+1222" },
{},
{
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
workspace: path.join(home, "clawd"),
models: {
"anthropic/claude-opus-4-5": {},
"moonshot/kimi-k2-0905-preview": {},
},
},
},
models: {
mode: "merge",
providers: {
moonshot: {
baseUrl: "https://api.moonshot.ai/v1",
apiKey: "sk-test",
api: "openai-completions",
models: [{ id: "kimi-k2-0905-preview", name: "Kimi K2" }],
},
},
},
session: { store: storePath },
},
);
assertModelSelection(storePath, {
model: "kimi-k2-0905-preview",
provider: "moonshot",
});
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("picks the best fuzzy match when multiple models match", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const storePath = path.join(home, "sessions.json");
await getReplyFromConfig(
{ Body: "/model minimax", From: "+1222", To: "+1222" },
{},
{
agents: {
defaults: {
model: { primary: "minimax/MiniMax-M2.1" },
workspace: path.join(home, "clawd"),
models: {
"minimax/MiniMax-M2.1": {},
"minimax/MiniMax-M2.1-lightning": {},
"lmstudio/minimax-m2.1-gs32": {},
},
},
},
models: {
mode: "merge",
providers: {
minimax: {
baseUrl: "https://api.minimax.io/anthropic",
apiKey: "sk-test",
api: "anthropic-messages",
models: [{ id: "MiniMax-M2.1", name: "MiniMax M2.1" }],
},
lmstudio: {
baseUrl: "http://127.0.0.1:1234/v1",
apiKey: "lmstudio",
api: "openai-responses",
models: [
{ id: "minimax-m2.1-gs32", name: "MiniMax M2.1 GS32" },
],
},
},
},
session: { store: storePath },
},
);
assertModelSelection(storePath);
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("picks the best fuzzy match within a provider", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const storePath = path.join(home, "sessions.json");
await getReplyFromConfig(
{ Body: "/model minimax/m2.1", From: "+1222", To: "+1222" },
{},
{
agents: {
defaults: {
model: { primary: "minimax/MiniMax-M2.1" },
workspace: path.join(home, "clawd"),
models: {
"minimax/MiniMax-M2.1": {},
"minimax/MiniMax-M2.1-lightning": {},
},
},
},
models: {
mode: "merge",
providers: {
minimax: {
baseUrl: "https://api.minimax.io/anthropic",
apiKey: "sk-test",
api: "anthropic-messages",
models: [
{ id: "MiniMax-M2.1", name: "MiniMax M2.1" },
{
id: "MiniMax-M2.1-lightning",
name: "MiniMax M2.1 Lightning",
},
],
},
},
},
session: { store: storePath },
},
);
assertModelSelection(storePath);
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
});

View File

@@ -0,0 +1,256 @@
import fs from "node:fs/promises";
import path from "node:path";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.js";
import { loadModelCatalog } from "../agents/model-catalog.js";
import { runEmbeddedPiAgent } from "../agents/pi-embedded.js";
import { loadSessionStore } from "../config/sessions.js";
import { drainSystemEvents } from "../infra/system-events.js";
import { getReplyFromConfig } from "./reply.js";
const MAIN_SESSION_KEY = "agent:main:main";
vi.mock("../agents/pi-embedded.js", () => ({
abortEmbeddedPiRun: vi.fn().mockReturnValue(false),
runEmbeddedPiAgent: vi.fn(),
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
resolveEmbeddedSessionLane: (key: string) =>
`session:${key.trim() || "main"}`,
isEmbeddedPiRunActive: vi.fn().mockReturnValue(false),
isEmbeddedPiRunStreaming: vi.fn().mockReturnValue(false),
}));
vi.mock("../agents/model-catalog.js", () => ({
loadModelCatalog: vi.fn(),
}));
async function withTempHome<T>(fn: (home: string) => Promise<T>): Promise<T> {
return withTempHomeBase(
async (home) => {
return await fn(home);
},
{
env: {
CLAWDBOT_AGENT_DIR: (home) => path.join(home, ".clawdbot", "agent"),
PI_CODING_AGENT_DIR: (home) => path.join(home, ".clawdbot", "agent"),
},
prefix: "clawdbot-reply-",
},
);
}
function assertModelSelection(
storePath: string,
selection: { model?: string; provider?: string } = {},
) {
const store = loadSessionStore(storePath);
const entry = store[MAIN_SESSION_KEY];
expect(entry).toBeDefined();
expect(entry?.modelOverride).toBe(selection.model);
expect(entry?.providerOverride).toBe(selection.provider);
}
describe("directive behavior", () => {
beforeEach(() => {
vi.mocked(runEmbeddedPiAgent).mockReset();
vi.mocked(loadModelCatalog).mockResolvedValue([
{ id: "claude-opus-4-5", name: "Opus 4.5", provider: "anthropic" },
{ id: "claude-sonnet-4-1", name: "Sonnet 4.1", provider: "anthropic" },
{ id: "gpt-4.1-mini", name: "GPT-4.1 Mini", provider: "openai" },
]);
});
afterEach(() => {
vi.restoreAllMocks();
});
it("prefers alias matches when fuzzy selection is ambiguous", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const storePath = path.join(home, "sessions.json");
await getReplyFromConfig(
{ Body: "/model ki", From: "+1222", To: "+1222" },
{},
{
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
workspace: path.join(home, "clawd"),
models: {
"anthropic/claude-opus-4-5": {},
"moonshot/kimi-k2-0905-preview": { alias: "Kimi" },
"lmstudio/kimi-k2-0905-preview": {},
},
},
},
models: {
mode: "merge",
providers: {
moonshot: {
baseUrl: "https://api.moonshot.ai/v1",
apiKey: "sk-test",
api: "openai-completions",
models: [{ id: "kimi-k2-0905-preview", name: "Kimi K2" }],
},
lmstudio: {
baseUrl: "http://127.0.0.1:1234/v1",
apiKey: "lmstudio",
api: "openai-responses",
models: [
{ id: "kimi-k2-0905-preview", name: "Kimi K2 (Local)" },
],
},
},
},
session: { store: storePath },
},
);
assertModelSelection(storePath, {
model: "kimi-k2-0905-preview",
provider: "moonshot",
});
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("stores auth profile overrides on /model directive", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const storePath = path.join(home, "sessions.json");
const authDir = path.join(home, ".clawdbot", "agents", "main", "agent");
await fs.mkdir(authDir, { recursive: true, mode: 0o700 });
await fs.writeFile(
path.join(authDir, "auth-profiles.json"),
JSON.stringify(
{
version: 1,
profiles: {
"anthropic:work": {
type: "api_key",
provider: "anthropic",
key: "sk-test-1234567890",
},
},
},
null,
2,
),
);
const res = await getReplyFromConfig(
{ Body: "/model Opus@anthropic:work", From: "+1222", To: "+1222" },
{},
{
agents: {
defaults: {
model: { primary: "openai/gpt-4.1-mini" },
workspace: path.join(home, "clawd"),
models: {
"openai/gpt-4.1-mini": {},
"anthropic/claude-opus-4-5": { alias: "Opus" },
},
},
},
session: { store: storePath },
},
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("Auth profile set to anthropic:work");
const store = loadSessionStore(storePath);
const entry = store["agent:main:main"];
expect(entry.authProfileOverride).toBe("anthropic:work");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("queues a system event when switching models", async () => {
await withTempHome(async (home) => {
drainSystemEvents(MAIN_SESSION_KEY);
vi.mocked(runEmbeddedPiAgent).mockReset();
const storePath = path.join(home, "sessions.json");
await getReplyFromConfig(
{ Body: "/model Opus", From: "+1222", To: "+1222" },
{},
{
agents: {
defaults: {
model: { primary: "openai/gpt-4.1-mini" },
workspace: path.join(home, "clawd"),
models: {
"openai/gpt-4.1-mini": {},
"anthropic/claude-opus-4-5": { alias: "Opus" },
},
},
},
session: { store: storePath },
},
);
const events = drainSystemEvents(MAIN_SESSION_KEY);
expect(events).toContain(
"Model switched to Opus (anthropic/claude-opus-4-5).",
);
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("queues a system event when toggling elevated", async () => {
await withTempHome(async (home) => {
drainSystemEvents(MAIN_SESSION_KEY);
const storePath = path.join(home, "sessions.json");
await getReplyFromConfig(
{
Body: "/elevated on",
From: "+1222",
To: "+1222",
Provider: "whatsapp",
},
{},
{
agents: {
defaults: {
model: { primary: "openai/gpt-4.1-mini" },
workspace: path.join(home, "clawd"),
},
},
tools: { elevated: { allowFrom: { whatsapp: ["*"] } } },
channels: { whatsapp: { allowFrom: ["*"] } },
session: { store: storePath },
},
);
const events = drainSystemEvents(MAIN_SESSION_KEY);
expect(events.some((e) => e.includes("Elevated ON"))).toBe(true);
});
});
it("queues a system event when toggling reasoning", async () => {
await withTempHome(async (home) => {
drainSystemEvents(MAIN_SESSION_KEY);
const storePath = path.join(home, "sessions.json");
await getReplyFromConfig(
{
Body: "/reasoning stream",
From: "+1222",
To: "+1222",
Provider: "whatsapp",
},
{},
{
agents: {
defaults: {
model: { primary: "openai/gpt-4.1-mini" },
workspace: path.join(home, "clawd"),
},
},
channels: { whatsapp: { allowFrom: ["*"] } },
session: { store: storePath },
},
);
const events = drainSystemEvents(MAIN_SESSION_KEY);
expect(events.some((e) => e.includes("Reasoning STREAM"))).toBe(true);
});
});
});

View File

@@ -0,0 +1,197 @@
import path from "node:path";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.js";
import { loadModelCatalog } from "../agents/model-catalog.js";
import { runEmbeddedPiAgent } from "../agents/pi-embedded.js";
import { loadSessionStore } from "../config/sessions.js";
import { getReplyFromConfig } from "./reply.js";
const MAIN_SESSION_KEY = "agent:main:main";
vi.mock("../agents/pi-embedded.js", () => ({
abortEmbeddedPiRun: vi.fn().mockReturnValue(false),
runEmbeddedPiAgent: vi.fn(),
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
resolveEmbeddedSessionLane: (key: string) =>
`session:${key.trim() || "main"}`,
isEmbeddedPiRunActive: vi.fn().mockReturnValue(false),
isEmbeddedPiRunStreaming: vi.fn().mockReturnValue(false),
}));
vi.mock("../agents/model-catalog.js", () => ({
loadModelCatalog: vi.fn(),
}));
async function withTempHome<T>(fn: (home: string) => Promise<T>): Promise<T> {
return withTempHomeBase(
async (home) => {
return await fn(home);
},
{
env: {
CLAWDBOT_AGENT_DIR: (home) => path.join(home, ".clawdbot", "agent"),
PI_CODING_AGENT_DIR: (home) => path.join(home, ".clawdbot", "agent"),
},
prefix: "clawdbot-reply-",
},
);
}
function _assertModelSelection(
storePath: string,
selection: { model?: string; provider?: string } = {},
) {
const store = loadSessionStore(storePath);
const entry = store[MAIN_SESSION_KEY];
expect(entry).toBeDefined();
expect(entry?.modelOverride).toBe(selection.model);
expect(entry?.providerOverride).toBe(selection.provider);
}
describe("directive behavior", () => {
beforeEach(() => {
vi.mocked(runEmbeddedPiAgent).mockReset();
vi.mocked(loadModelCatalog).mockResolvedValue([
{ id: "claude-opus-4-5", name: "Opus 4.5", provider: "anthropic" },
{ id: "claude-sonnet-4-1", name: "Sonnet 4.1", provider: "anthropic" },
{ id: "gpt-4.1-mini", name: "GPT-4.1 Mini", provider: "openai" },
]);
});
afterEach(() => {
vi.restoreAllMocks();
});
it("ignores inline /model and uses the default model", async () => {
await withTempHome(async (home) => {
const storePath = path.join(home, "sessions.json");
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
payloads: [{ text: "done" }],
meta: {
durationMs: 5,
agentMeta: { sessionId: "s", provider: "p", model: "m" },
},
});
const res = await getReplyFromConfig(
{
Body: "please sync /model openai/gpt-4.1-mini now",
From: "+1004",
To: "+2000",
},
{},
{
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
workspace: path.join(home, "clawd"),
models: {
"anthropic/claude-opus-4-5": {},
"openai/gpt-4.1-mini": {},
},
},
},
channels: { whatsapp: { allowFrom: ["*"] } },
session: { store: storePath },
},
);
const texts = (Array.isArray(res) ? res : [res])
.map((entry) => entry?.text)
.filter(Boolean);
expect(texts).toContain("done");
expect(runEmbeddedPiAgent).toHaveBeenCalledOnce();
const call = vi.mocked(runEmbeddedPiAgent).mock.calls[0]?.[0];
expect(call?.provider).toBe("anthropic");
expect(call?.model).toBe("claude-opus-4-5");
});
});
it("defaults thinking to low for reasoning-capable models", async () => {
await withTempHome(async (home) => {
const storePath = path.join(home, "sessions.json");
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
payloads: [{ text: "done" }],
meta: {
durationMs: 5,
agentMeta: { sessionId: "s", provider: "p", model: "m" },
},
});
vi.mocked(loadModelCatalog).mockResolvedValueOnce([
{
id: "claude-opus-4-5",
name: "Opus 4.5",
provider: "anthropic",
reasoning: true,
},
]);
await getReplyFromConfig(
{
Body: "hello",
From: "+1004",
To: "+2000",
},
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
},
},
channels: { whatsapp: { allowFrom: ["*"] } },
session: { store: storePath },
},
);
expect(runEmbeddedPiAgent).toHaveBeenCalledOnce();
const call = vi.mocked(runEmbeddedPiAgent).mock.calls[0]?.[0];
expect(call?.thinkLevel).toBe("low");
});
});
it("passes elevated defaults when sender is approved", async () => {
await withTempHome(async (home) => {
const storePath = path.join(home, "sessions.json");
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
payloads: [{ text: "done" }],
meta: {
durationMs: 5,
agentMeta: { sessionId: "s", provider: "p", model: "m" },
},
});
await getReplyFromConfig(
{
Body: "hello",
From: "+1004",
To: "+2000",
Provider: "whatsapp",
SenderE164: "+1004",
},
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
},
},
tools: {
elevated: {
allowFrom: { whatsapp: ["+1004"] },
},
},
channels: { whatsapp: { allowFrom: ["*"] } },
session: { store: storePath },
},
);
expect(runEmbeddedPiAgent).toHaveBeenCalledOnce();
const call = vi.mocked(runEmbeddedPiAgent).mock.calls[0]?.[0];
expect(call?.bashElevated).toEqual({
enabled: true,
allowed: true,
defaultLevel: "on",
});
});
});
});

View File

@@ -0,0 +1,273 @@
import path from "node:path";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.js";
import { loadModelCatalog } from "../agents/model-catalog.js";
import { runEmbeddedPiAgent } from "../agents/pi-embedded.js";
import { loadSessionStore } from "../config/sessions.js";
import { getReplyFromConfig } from "./reply.js";
const MAIN_SESSION_KEY = "agent:main:main";
vi.mock("../agents/pi-embedded.js", () => ({
abortEmbeddedPiRun: vi.fn().mockReturnValue(false),
runEmbeddedPiAgent: vi.fn(),
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
resolveEmbeddedSessionLane: (key: string) =>
`session:${key.trim() || "main"}`,
isEmbeddedPiRunActive: vi.fn().mockReturnValue(false),
isEmbeddedPiRunStreaming: vi.fn().mockReturnValue(false),
}));
vi.mock("../agents/model-catalog.js", () => ({
loadModelCatalog: vi.fn(),
}));
async function withTempHome<T>(fn: (home: string) => Promise<T>): Promise<T> {
return withTempHomeBase(
async (home) => {
return await fn(home);
},
{
env: {
CLAWDBOT_AGENT_DIR: (home) => path.join(home, ".clawdbot", "agent"),
PI_CODING_AGENT_DIR: (home) => path.join(home, ".clawdbot", "agent"),
},
prefix: "clawdbot-reply-",
},
);
}
function _assertModelSelection(
storePath: string,
selection: { model?: string; provider?: string } = {},
) {
const store = loadSessionStore(storePath);
const entry = store[MAIN_SESSION_KEY];
expect(entry).toBeDefined();
expect(entry?.modelOverride).toBe(selection.model);
expect(entry?.providerOverride).toBe(selection.provider);
}
describe("directive behavior", () => {
beforeEach(() => {
vi.mocked(runEmbeddedPiAgent).mockReset();
vi.mocked(loadModelCatalog).mockResolvedValue([
{ id: "claude-opus-4-5", name: "Opus 4.5", provider: "anthropic" },
{ id: "claude-sonnet-4-1", name: "Sonnet 4.1", provider: "anthropic" },
{ id: "gpt-4.1-mini", name: "GPT-4.1 Mini", provider: "openai" },
]);
});
afterEach(() => {
vi.restoreAllMocks();
});
it("defaults /think to low for reasoning-capable models when no default set", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
vi.mocked(loadModelCatalog).mockResolvedValueOnce([
{
id: "claude-opus-4-5",
name: "Opus 4.5",
provider: "anthropic",
reasoning: true,
},
]);
const res = await getReplyFromConfig(
{ Body: "/think", From: "+1222", To: "+1222" },
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
},
},
session: { store: path.join(home, "sessions.json") },
},
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("Current thinking level: low");
expect(text).toContain("Options: off, minimal, low, medium, high.");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("shows off when /think has no argument and model lacks reasoning", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
vi.mocked(loadModelCatalog).mockResolvedValueOnce([
{
id: "claude-opus-4-5",
name: "Opus 4.5",
provider: "anthropic",
reasoning: false,
},
]);
const res = await getReplyFromConfig(
{ Body: "/think", From: "+1222", To: "+1222" },
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
},
},
session: { store: path.join(home, "sessions.json") },
},
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("Current thinking level: off");
expect(text).toContain("Options: off, minimal, low, medium, high.");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("strips reply tags and maps reply_to_current to MessageSid", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
payloads: [{ text: "hello [[reply_to_current]]" }],
meta: {
durationMs: 5,
agentMeta: { sessionId: "s", provider: "p", model: "m" },
},
});
const res = await getReplyFromConfig(
{
Body: "ping",
From: "+1004",
To: "+2000",
MessageSid: "msg-123",
},
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
},
},
channels: { whatsapp: { allowFrom: ["*"] } },
session: { store: path.join(home, "sessions.json") },
},
);
const payload = Array.isArray(res) ? res[0] : res;
expect(payload?.text).toBe("hello");
expect(payload?.replyToId).toBe("msg-123");
});
});
it("strips reply tags with whitespace and maps reply_to_current to MessageSid", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
payloads: [{ text: "hello [[ reply_to_current ]]" }],
meta: {
durationMs: 5,
agentMeta: { sessionId: "s", provider: "p", model: "m" },
},
});
const res = await getReplyFromConfig(
{
Body: "ping",
From: "+1004",
To: "+2000",
MessageSid: "msg-123",
},
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
},
},
channels: { whatsapp: { allowFrom: ["*"] } },
session: { store: path.join(home, "sessions.json") },
},
);
const payload = Array.isArray(res) ? res[0] : res;
expect(payload?.text).toBe("hello");
expect(payload?.replyToId).toBe("msg-123");
});
});
it("prefers explicit reply_to id over reply_to_current", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
payloads: [
{
text: "hi [[reply_to_current]] [[reply_to:abc-456]]",
},
],
meta: {
durationMs: 5,
agentMeta: { sessionId: "s", provider: "p", model: "m" },
},
});
const res = await getReplyFromConfig(
{
Body: "ping",
From: "+1004",
To: "+2000",
MessageSid: "msg-123",
},
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
},
},
channels: { whatsapp: { allowFrom: ["*"] } },
session: { store: path.join(home, "sessions.json") },
},
);
const payload = Array.isArray(res) ? res[0] : res;
expect(payload?.text).toBe("hi");
expect(payload?.replyToId).toBe("abc-456");
});
});
it("applies inline think and still runs agent content", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
payloads: [{ text: "done" }],
meta: {
durationMs: 5,
agentMeta: { sessionId: "s", provider: "p", model: "m" },
},
});
const res = await getReplyFromConfig(
{
Body: "please sync /think:high now",
From: "+1004",
To: "+2000",
},
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
},
},
channels: { whatsapp: { allowFrom: ["*"] } },
session: { store: path.join(home, "sessions.json") },
},
);
const texts = (Array.isArray(res) ? res : [res])
.map((entry) => entry?.text)
.filter(Boolean);
expect(texts).toContain("done");
expect(runEmbeddedPiAgent).toHaveBeenCalledOnce();
});
});
});

View File

@@ -0,0 +1,273 @@
import path from "node:path";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.js";
import { loadModelCatalog } from "../agents/model-catalog.js";
import { runEmbeddedPiAgent } from "../agents/pi-embedded.js";
import { loadSessionStore } from "../config/sessions.js";
import { getReplyFromConfig } from "./reply.js";
const MAIN_SESSION_KEY = "agent:main:main";
vi.mock("../agents/pi-embedded.js", () => ({
abortEmbeddedPiRun: vi.fn().mockReturnValue(false),
runEmbeddedPiAgent: vi.fn(),
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
resolveEmbeddedSessionLane: (key: string) =>
`session:${key.trim() || "main"}`,
isEmbeddedPiRunActive: vi.fn().mockReturnValue(false),
isEmbeddedPiRunStreaming: vi.fn().mockReturnValue(false),
}));
vi.mock("../agents/model-catalog.js", () => ({
loadModelCatalog: vi.fn(),
}));
async function withTempHome<T>(fn: (home: string) => Promise<T>): Promise<T> {
return withTempHomeBase(
async (home) => {
return await fn(home);
},
{
env: {
CLAWDBOT_AGENT_DIR: (home) => path.join(home, ".clawdbot", "agent"),
PI_CODING_AGENT_DIR: (home) => path.join(home, ".clawdbot", "agent"),
},
prefix: "clawdbot-reply-",
},
);
}
function _assertModelSelection(
storePath: string,
selection: { model?: string; provider?: string } = {},
) {
const store = loadSessionStore(storePath);
const entry = store[MAIN_SESSION_KEY];
expect(entry).toBeDefined();
expect(entry?.modelOverride).toBe(selection.model);
expect(entry?.providerOverride).toBe(selection.provider);
}
describe("directive behavior", () => {
beforeEach(() => {
vi.mocked(runEmbeddedPiAgent).mockReset();
vi.mocked(loadModelCatalog).mockResolvedValue([
{ id: "claude-opus-4-5", name: "Opus 4.5", provider: "anthropic" },
{ id: "claude-sonnet-4-1", name: "Sonnet 4.1", provider: "anthropic" },
{ id: "gpt-4.1-mini", name: "GPT-4.1 Mini", provider: "openai" },
]);
});
afterEach(() => {
vi.restoreAllMocks();
});
it("applies inline reasoning in mixed messages and acks immediately", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
payloads: [{ text: "done" }],
meta: {
durationMs: 5,
agentMeta: { sessionId: "s", provider: "p", model: "m" },
},
});
const blockReplies: string[] = [];
const storePath = path.join(home, "sessions.json");
const res = await getReplyFromConfig(
{
Body: "please reply\n/reasoning on",
From: "+1222",
To: "+1222",
Provider: "whatsapp",
},
{
onBlockReply: (payload) => {
if (payload.text) blockReplies.push(payload.text);
},
},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
},
},
channels: { whatsapp: { allowFrom: ["*"] } },
session: { store: storePath },
},
);
const texts = (Array.isArray(res) ? res : [res])
.map((entry) => entry?.text)
.filter(Boolean);
expect(texts).toContain("done");
expect(runEmbeddedPiAgent).toHaveBeenCalledOnce();
});
});
it("keeps reasoning acks for rapid mixed directives", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
payloads: [{ text: "ok" }],
meta: {
durationMs: 5,
agentMeta: { sessionId: "s", provider: "p", model: "m" },
},
});
const blockReplies: string[] = [];
const storePath = path.join(home, "sessions.json");
await getReplyFromConfig(
{
Body: "do it\n/reasoning on",
From: "+1222",
To: "+1222",
Provider: "whatsapp",
},
{
onBlockReply: (payload) => {
if (payload.text) blockReplies.push(payload.text);
},
},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
},
},
channels: { whatsapp: { allowFrom: ["*"] } },
session: { store: storePath },
},
);
await getReplyFromConfig(
{
Body: "again\n/reasoning on",
From: "+1222",
To: "+1222",
Provider: "whatsapp",
},
{
onBlockReply: (payload) => {
if (payload.text) blockReplies.push(payload.text);
},
},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
},
},
channels: { whatsapp: { allowFrom: ["*"] } },
session: { store: storePath },
},
);
expect(runEmbeddedPiAgent).toHaveBeenCalledTimes(2);
expect(blockReplies.length).toBe(0);
});
});
it("acks verbose directive immediately with system marker", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const res = await getReplyFromConfig(
{ Body: "/verbose on", From: "+1222", To: "+1222" },
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
},
},
session: { store: path.join(home, "sessions.json") },
},
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toMatch(/^⚙️ Verbose logging enabled\./);
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("persists verbose off when directive is standalone", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const storePath = path.join(home, "sessions.json");
const res = await getReplyFromConfig(
{ Body: "/verbose off", From: "+1222", To: "+1222" },
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
},
},
session: { store: storePath },
},
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toMatch(/Verbose logging disabled\./);
const store = loadSessionStore(storePath);
const entry = Object.values(store)[0];
expect(entry?.verboseLevel).toBe("off");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("shows current think level when /think has no argument", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const res = await getReplyFromConfig(
{ Body: "/think", From: "+1222", To: "+1222" },
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
thinkingDefault: "high",
},
},
session: { store: path.join(home, "sessions.json") },
},
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("Current thinking level: high");
expect(text).toContain("Options: off, minimal, low, medium, high.");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("shows off when /think has no argument and no default set", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const res = await getReplyFromConfig(
{ Body: "/think", From: "+1222", To: "+1222" },
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
},
},
session: { store: path.join(home, "sessions.json") },
},
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("Current thinking level: off");
expect(text).toContain("Options: off, minimal, low, medium, high.");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
});

View File

@@ -0,0 +1,243 @@
import path from "node:path";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.js";
import { loadModelCatalog } from "../agents/model-catalog.js";
import { runEmbeddedPiAgent } from "../agents/pi-embedded.js";
import { loadSessionStore } from "../config/sessions.js";
import { getReplyFromConfig } from "./reply.js";
const MAIN_SESSION_KEY = "agent:main:main";
vi.mock("../agents/pi-embedded.js", () => ({
abortEmbeddedPiRun: vi.fn().mockReturnValue(false),
runEmbeddedPiAgent: vi.fn(),
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
resolveEmbeddedSessionLane: (key: string) =>
`session:${key.trim() || "main"}`,
isEmbeddedPiRunActive: vi.fn().mockReturnValue(false),
isEmbeddedPiRunStreaming: vi.fn().mockReturnValue(false),
}));
vi.mock("../agents/model-catalog.js", () => ({
loadModelCatalog: vi.fn(),
}));
async function withTempHome<T>(fn: (home: string) => Promise<T>): Promise<T> {
return withTempHomeBase(
async (home) => {
return await fn(home);
},
{
env: {
CLAWDBOT_AGENT_DIR: (home) => path.join(home, ".clawdbot", "agent"),
PI_CODING_AGENT_DIR: (home) => path.join(home, ".clawdbot", "agent"),
},
prefix: "clawdbot-reply-",
},
);
}
function _assertModelSelection(
storePath: string,
selection: { model?: string; provider?: string } = {},
) {
const store = loadSessionStore(storePath);
const entry = store[MAIN_SESSION_KEY];
expect(entry).toBeDefined();
expect(entry?.modelOverride).toBe(selection.model);
expect(entry?.providerOverride).toBe(selection.provider);
}
describe("directive behavior", () => {
beforeEach(() => {
vi.mocked(runEmbeddedPiAgent).mockReset();
vi.mocked(loadModelCatalog).mockResolvedValue([
{ id: "claude-opus-4-5", name: "Opus 4.5", provider: "anthropic" },
{ id: "claude-sonnet-4-1", name: "Sonnet 4.1", provider: "anthropic" },
{ id: "gpt-4.1-mini", name: "GPT-4.1 Mini", provider: "openai" },
]);
});
afterEach(() => {
vi.restoreAllMocks();
});
it("shows current verbose level when /verbose has no argument", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const res = await getReplyFromConfig(
{ Body: "/verbose", From: "+1222", To: "+1222" },
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
verboseDefault: "on",
},
},
session: { store: path.join(home, "sessions.json") },
},
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("Current verbose level: on");
expect(text).toContain("Options: on, off.");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("shows current reasoning level when /reasoning has no argument", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const res = await getReplyFromConfig(
{ Body: "/reasoning", From: "+1222", To: "+1222" },
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
},
},
session: { store: path.join(home, "sessions.json") },
},
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("Current reasoning level: off");
expect(text).toContain("Options: on, off, stream.");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("shows current elevated level when /elevated has no argument", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const res = await getReplyFromConfig(
{
Body: "/elevated",
From: "+1222",
To: "+1222",
Provider: "whatsapp",
SenderE164: "+1222",
},
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
elevatedDefault: "on",
},
},
tools: {
elevated: {
allowFrom: { whatsapp: ["+1222"] },
},
},
channels: { whatsapp: { allowFrom: ["+1222"] } },
session: { store: path.join(home, "sessions.json") },
},
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("Current elevated level: on");
expect(text).toContain("Options: on, off.");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("persists elevated off and reflects it in /status (even when default is on)", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const storePath = path.join(home, "sessions.json");
const res = await getReplyFromConfig(
{
Body: "/elevated off\n/status",
From: "+1222",
To: "+1222",
Provider: "whatsapp",
SenderE164: "+1222",
},
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
elevatedDefault: "on",
},
},
tools: {
elevated: {
allowFrom: { whatsapp: ["+1222"] },
},
},
channels: { whatsapp: { allowFrom: ["+1222"] } },
session: { store: storePath },
},
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("Elevated mode disabled.");
const optionsLine = text
?.split("\n")
.find((line) => line.trim().startsWith("⚙️"));
expect(optionsLine).toBeTruthy();
expect(optionsLine).not.toContain("elevated");
const store = loadSessionStore(storePath);
expect(store["agent:main:main"]?.elevatedLevel).toBe("off");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("strips inline elevated directives from the user text (does not persist session override)", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
payloads: [{ text: "ok" }],
meta: {
durationMs: 1,
agentMeta: { sessionId: "s", provider: "p", model: "m" },
},
});
const storePath = path.join(home, "sessions.json");
await getReplyFromConfig(
{
Body: "hello there /elevated off",
From: "+1222",
To: "+1222",
Provider: "whatsapp",
SenderE164: "+1222",
},
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
elevatedDefault: "on",
},
},
tools: {
elevated: {
allowFrom: { whatsapp: ["+1222"] },
},
},
channels: { whatsapp: { allowFrom: ["+1222"] } },
session: { store: storePath },
},
);
const store = loadSessionStore(storePath);
expect(store["agent:main:main"]?.elevatedLevel).toBeUndefined();
const calls = vi.mocked(runEmbeddedPiAgent).mock.calls;
expect(calls.length).toBeGreaterThan(0);
const call = calls[0]?.[0];
expect(call?.prompt).toContain("hello there");
expect(call?.prompt).not.toContain("/elevated");
});
});
});

View File

@@ -0,0 +1,241 @@
import path from "node:path";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.js";
import { loadModelCatalog } from "../agents/model-catalog.js";
import { runEmbeddedPiAgent } from "../agents/pi-embedded.js";
import { loadSessionStore } from "../config/sessions.js";
import { getReplyFromConfig } from "./reply.js";
const MAIN_SESSION_KEY = "agent:main:main";
vi.mock("../agents/pi-embedded.js", () => ({
abortEmbeddedPiRun: vi.fn().mockReturnValue(false),
runEmbeddedPiAgent: vi.fn(),
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
resolveEmbeddedSessionLane: (key: string) =>
`session:${key.trim() || "main"}`,
isEmbeddedPiRunActive: vi.fn().mockReturnValue(false),
isEmbeddedPiRunStreaming: vi.fn().mockReturnValue(false),
}));
vi.mock("../agents/model-catalog.js", () => ({
loadModelCatalog: vi.fn(),
}));
async function withTempHome<T>(fn: (home: string) => Promise<T>): Promise<T> {
return withTempHomeBase(
async (home) => {
return await fn(home);
},
{
env: {
CLAWDBOT_AGENT_DIR: (home) => path.join(home, ".clawdbot", "agent"),
PI_CODING_AGENT_DIR: (home) => path.join(home, ".clawdbot", "agent"),
},
prefix: "clawdbot-reply-",
},
);
}
function _assertModelSelection(
storePath: string,
selection: { model?: string; provider?: string } = {},
) {
const store = loadSessionStore(storePath);
const entry = store[MAIN_SESSION_KEY];
expect(entry).toBeDefined();
expect(entry?.modelOverride).toBe(selection.model);
expect(entry?.providerOverride).toBe(selection.provider);
}
describe("directive behavior", () => {
beforeEach(() => {
vi.mocked(runEmbeddedPiAgent).mockReset();
vi.mocked(loadModelCatalog).mockResolvedValue([
{ id: "claude-opus-4-5", name: "Opus 4.5", provider: "anthropic" },
{ id: "claude-sonnet-4-1", name: "Sonnet 4.1", provider: "anthropic" },
{ id: "gpt-4.1-mini", name: "GPT-4.1 Mini", provider: "openai" },
]);
});
afterEach(() => {
vi.restoreAllMocks();
});
it("shows current elevated level as off after toggling it off", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const storePath = path.join(home, "sessions.json");
await getReplyFromConfig(
{
Body: "/elevated off",
From: "+1222",
To: "+1222",
Provider: "whatsapp",
SenderE164: "+1222",
},
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
elevatedDefault: "on",
},
},
tools: {
elevated: {
allowFrom: { whatsapp: ["+1222"] },
},
},
channels: { whatsapp: { allowFrom: ["+1222"] } },
session: { store: storePath },
},
);
const res = await getReplyFromConfig(
{
Body: "/elevated",
From: "+1222",
To: "+1222",
Provider: "whatsapp",
SenderE164: "+1222",
},
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
elevatedDefault: "on",
},
},
tools: {
elevated: {
allowFrom: { whatsapp: ["+1222"] },
},
},
channels: { whatsapp: { allowFrom: ["+1222"] } },
session: { store: storePath },
},
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("Current elevated level: off");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("can toggle elevated off then back on (status reflects on)", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const storePath = path.join(home, "sessions.json");
const cfg = {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
elevatedDefault: "on",
},
},
tools: {
elevated: {
allowFrom: { whatsapp: ["+1222"] },
},
},
channels: { whatsapp: { allowFrom: ["+1222"] } },
session: { store: storePath },
} as const;
await getReplyFromConfig(
{
Body: "/elevated off",
From: "+1222",
To: "+1222",
Provider: "whatsapp",
SenderE164: "+1222",
},
{},
cfg,
);
await getReplyFromConfig(
{
Body: "/elevated on",
From: "+1222",
To: "+1222",
Provider: "whatsapp",
SenderE164: "+1222",
},
{},
cfg,
);
const res = await getReplyFromConfig(
{
Body: "/status",
From: "+1222",
To: "+1222",
Provider: "whatsapp",
SenderE164: "+1222",
},
{},
cfg,
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
const optionsLine = text
?.split("\n")
.find((line) => line.trim().startsWith("⚙️"));
expect(optionsLine).toBeTruthy();
expect(optionsLine).toContain("elevated");
const store = loadSessionStore(storePath);
expect(store["agent:main:main"]?.elevatedLevel).toBe("on");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("rejects per-agent elevated when disabled", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const res = await getReplyFromConfig(
{
Body: "/elevated on",
From: "+1222",
To: "+1222",
Provider: "whatsapp",
SenderE164: "+1222",
SessionKey: "agent:restricted:main",
},
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
},
list: [
{
id: "restricted",
tools: {
elevated: { enabled: false },
},
},
],
},
tools: {
elevated: {
allowFrom: { whatsapp: ["+1222"] },
},
},
channels: { whatsapp: { allowFrom: ["+1222"] } },
session: { store: path.join(home, "sessions.json") },
},
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("agents.list[].tools.elevated.enabled");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
});

View File

@@ -0,0 +1,264 @@
import path from "node:path";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.js";
import { loadModelCatalog } from "../agents/model-catalog.js";
import { runEmbeddedPiAgent } from "../agents/pi-embedded.js";
import { loadSessionStore } from "../config/sessions.js";
import { getReplyFromConfig } from "./reply.js";
const MAIN_SESSION_KEY = "agent:main:main";
vi.mock("../agents/pi-embedded.js", () => ({
abortEmbeddedPiRun: vi.fn().mockReturnValue(false),
runEmbeddedPiAgent: vi.fn(),
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
resolveEmbeddedSessionLane: (key: string) =>
`session:${key.trim() || "main"}`,
isEmbeddedPiRunActive: vi.fn().mockReturnValue(false),
isEmbeddedPiRunStreaming: vi.fn().mockReturnValue(false),
}));
vi.mock("../agents/model-catalog.js", () => ({
loadModelCatalog: vi.fn(),
}));
async function withTempHome<T>(fn: (home: string) => Promise<T>): Promise<T> {
return withTempHomeBase(
async (home) => {
return await fn(home);
},
{
env: {
CLAWDBOT_AGENT_DIR: (home) => path.join(home, ".clawdbot", "agent"),
PI_CODING_AGENT_DIR: (home) => path.join(home, ".clawdbot", "agent"),
},
prefix: "clawdbot-reply-",
},
);
}
function _assertModelSelection(
storePath: string,
selection: { model?: string; provider?: string } = {},
) {
const store = loadSessionStore(storePath);
const entry = store[MAIN_SESSION_KEY];
expect(entry).toBeDefined();
expect(entry?.modelOverride).toBe(selection.model);
expect(entry?.providerOverride).toBe(selection.provider);
}
describe("directive behavior", () => {
beforeEach(() => {
vi.mocked(runEmbeddedPiAgent).mockReset();
vi.mocked(loadModelCatalog).mockResolvedValue([
{ id: "claude-opus-4-5", name: "Opus 4.5", provider: "anthropic" },
{ id: "claude-sonnet-4-1", name: "Sonnet 4.1", provider: "anthropic" },
{ id: "gpt-4.1-mini", name: "GPT-4.1 Mini", provider: "openai" },
]);
});
afterEach(() => {
vi.restoreAllMocks();
});
it("requires per-agent allowlist in addition to global", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const res = await getReplyFromConfig(
{
Body: "/elevated on",
From: "+1222",
To: "+1222",
Provider: "whatsapp",
SenderE164: "+1222",
SessionKey: "agent:work:main",
},
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
},
list: [
{
id: "work",
tools: {
elevated: {
allowFrom: { whatsapp: ["+1333"] },
},
},
},
],
},
tools: {
elevated: {
allowFrom: { whatsapp: ["+1222", "+1333"] },
},
},
channels: { whatsapp: { allowFrom: ["+1222", "+1333"] } },
session: { store: path.join(home, "sessions.json") },
},
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("agents.list[].tools.elevated.allowFrom.whatsapp");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("allows elevated when both global and per-agent allowlists match", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const res = await getReplyFromConfig(
{
Body: "/elevated on",
From: "+1333",
To: "+1333",
Provider: "whatsapp",
SenderE164: "+1333",
SessionKey: "agent:work:main",
},
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
},
list: [
{
id: "work",
tools: {
elevated: {
allowFrom: { whatsapp: ["+1333"] },
},
},
},
],
},
tools: {
elevated: {
allowFrom: { whatsapp: ["+1222", "+1333"] },
},
},
channels: { whatsapp: { allowFrom: ["+1222", "+1333"] } },
session: { store: path.join(home, "sessions.json") },
},
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("Elevated mode enabled");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("warns when elevated is used in direct runtime", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const res = await getReplyFromConfig(
{
Body: "/elevated off",
From: "+1222",
To: "+1222",
Provider: "whatsapp",
SenderE164: "+1222",
},
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
sandbox: { mode: "off" },
},
},
tools: {
elevated: {
allowFrom: { whatsapp: ["+1222"] },
},
},
channels: { whatsapp: { allowFrom: ["+1222"] } },
session: { store: path.join(home, "sessions.json") },
},
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("Elevated mode disabled.");
expect(text).toContain("Runtime is direct; sandboxing does not apply.");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("rejects invalid elevated level", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const res = await getReplyFromConfig(
{
Body: "/elevated maybe",
From: "+1222",
To: "+1222",
Provider: "whatsapp",
SenderE164: "+1222",
},
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
},
},
tools: {
elevated: {
allowFrom: { whatsapp: ["+1222"] },
},
},
channels: { whatsapp: { allowFrom: ["+1222"] } },
session: { store: path.join(home, "sessions.json") },
},
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("Unrecognized elevated level");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("handles multiple directives in a single message", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const res = await getReplyFromConfig(
{
Body: "/elevated off\n/verbose on",
From: "+1222",
To: "+1222",
Provider: "whatsapp",
SenderE164: "+1222",
},
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
},
},
tools: {
elevated: {
allowFrom: { whatsapp: ["+1222"] },
},
},
channels: { whatsapp: { allowFrom: ["+1222"] } },
session: { store: path.join(home, "sessions.json") },
},
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("Elevated mode disabled.");
expect(text).toContain("Verbose logging enabled.");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
});

View File

@@ -0,0 +1,265 @@
import path from "node:path";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.js";
import { loadModelCatalog } from "../agents/model-catalog.js";
import { runEmbeddedPiAgent } from "../agents/pi-embedded.js";
import { loadSessionStore } from "../config/sessions.js";
import { getReplyFromConfig } from "./reply.js";
const MAIN_SESSION_KEY = "agent:main:main";
vi.mock("../agents/pi-embedded.js", () => ({
abortEmbeddedPiRun: vi.fn().mockReturnValue(false),
runEmbeddedPiAgent: vi.fn(),
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
resolveEmbeddedSessionLane: (key: string) =>
`session:${key.trim() || "main"}`,
isEmbeddedPiRunActive: vi.fn().mockReturnValue(false),
isEmbeddedPiRunStreaming: vi.fn().mockReturnValue(false),
}));
vi.mock("../agents/model-catalog.js", () => ({
loadModelCatalog: vi.fn(),
}));
async function withTempHome<T>(fn: (home: string) => Promise<T>): Promise<T> {
return withTempHomeBase(
async (home) => {
return await fn(home);
},
{
env: {
CLAWDBOT_AGENT_DIR: (home) => path.join(home, ".clawdbot", "agent"),
PI_CODING_AGENT_DIR: (home) => path.join(home, ".clawdbot", "agent"),
},
prefix: "clawdbot-reply-",
},
);
}
function _assertModelSelection(
storePath: string,
selection: { model?: string; provider?: string } = {},
) {
const store = loadSessionStore(storePath);
const entry = store[MAIN_SESSION_KEY];
expect(entry).toBeDefined();
expect(entry?.modelOverride).toBe(selection.model);
expect(entry?.providerOverride).toBe(selection.provider);
}
describe("directive behavior", () => {
beforeEach(() => {
vi.mocked(runEmbeddedPiAgent).mockReset();
vi.mocked(loadModelCatalog).mockResolvedValue([
{ id: "claude-opus-4-5", name: "Opus 4.5", provider: "anthropic" },
{ id: "claude-sonnet-4-1", name: "Sonnet 4.1", provider: "anthropic" },
{ id: "gpt-4.1-mini", name: "GPT-4.1 Mini", provider: "openai" },
]);
});
afterEach(() => {
vi.restoreAllMocks();
});
it("returns status alongside directive-only acks", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const storePath = path.join(home, "sessions.json");
const res = await getReplyFromConfig(
{
Body: "/elevated off\n/status",
From: "+1222",
To: "+1222",
Provider: "whatsapp",
SenderE164: "+1222",
},
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
},
},
tools: {
elevated: {
allowFrom: { whatsapp: ["+1222"] },
},
},
channels: { whatsapp: { allowFrom: ["+1222"] } },
session: { store: storePath },
},
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("Elevated mode disabled.");
expect(text).toContain("Session: agent:main:main");
const optionsLine = text
?.split("\n")
.find((line) => line.trim().startsWith("⚙️"));
expect(optionsLine).toBeTruthy();
expect(optionsLine).not.toContain("elevated");
const store = loadSessionStore(storePath);
expect(store["agent:main:main"]?.elevatedLevel).toBe("off");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("shows elevated off in status when per-agent elevated is disabled", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const res = await getReplyFromConfig(
{
Body: "/status",
From: "+1222",
To: "+1222",
Provider: "whatsapp",
SenderE164: "+1222",
SessionKey: "agent:restricted:main",
},
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
},
list: [
{
id: "restricted",
tools: {
elevated: { enabled: false },
},
},
],
},
tools: {
elevated: {
allowFrom: { whatsapp: ["+1222"] },
},
},
channels: { whatsapp: { allowFrom: ["+1222"] } },
session: { store: path.join(home, "sessions.json") },
},
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).not.toContain("elevated");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("acks queue directive and persists override", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const storePath = path.join(home, "sessions.json");
const res = await getReplyFromConfig(
{ Body: "/queue interrupt", From: "+1222", To: "+1222" },
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
},
},
channels: { whatsapp: { allowFrom: ["*"] } },
session: { store: storePath },
},
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toMatch(/^⚙️ Queue mode set to interrupt\./);
const store = loadSessionStore(storePath);
const entry = Object.values(store)[0];
expect(entry?.queueMode).toBe("interrupt");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("persists queue options when directive is standalone", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const storePath = path.join(home, "sessions.json");
const res = await getReplyFromConfig(
{
Body: "/queue collect debounce:2s cap:5 drop:old",
From: "+1222",
To: "+1222",
},
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
},
},
channels: { whatsapp: { allowFrom: ["*"] } },
session: { store: storePath },
},
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toMatch(/^⚙️ Queue mode set to collect\./);
expect(text).toMatch(/Queue debounce set to 2000ms/);
expect(text).toMatch(/Queue cap set to 5/);
expect(text).toMatch(/Queue drop set to old/);
const store = loadSessionStore(storePath);
const entry = Object.values(store)[0];
expect(entry?.queueMode).toBe("collect");
expect(entry?.queueDebounceMs).toBe(2000);
expect(entry?.queueCap).toBe(5);
expect(entry?.queueDrop).toBe("old");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("resets queue mode to default", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const storePath = path.join(home, "sessions.json");
await getReplyFromConfig(
{ Body: "/queue interrupt", From: "+1222", To: "+1222" },
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
},
},
channels: { whatsapp: { allowFrom: ["*"] } },
session: { store: storePath },
},
);
const res = await getReplyFromConfig(
{ Body: "/queue reset", From: "+1222", To: "+1222" },
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
},
},
channels: { whatsapp: { allowFrom: ["*"] } },
session: { store: storePath },
},
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toMatch(/^⚙️ Queue mode reset to default\./);
const store = loadSessionStore(storePath);
const entry = Object.values(store)[0];
expect(entry?.queueMode).toBeUndefined();
expect(entry?.queueDebounceMs).toBeUndefined();
expect(entry?.queueCap).toBeUndefined();
expect(entry?.queueDrop).toBeUndefined();
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
});

View File

@@ -0,0 +1,261 @@
import path from "node:path";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.js";
import { loadModelCatalog } from "../agents/model-catalog.js";
import { runEmbeddedPiAgent } from "../agents/pi-embedded.js";
import {
loadSessionStore,
resolveSessionKey,
saveSessionStore,
} from "../config/sessions.js";
import { getReplyFromConfig } from "./reply.js";
const MAIN_SESSION_KEY = "agent:main:main";
vi.mock("../agents/pi-embedded.js", () => ({
abortEmbeddedPiRun: vi.fn().mockReturnValue(false),
runEmbeddedPiAgent: vi.fn(),
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
resolveEmbeddedSessionLane: (key: string) =>
`session:${key.trim() || "main"}`,
isEmbeddedPiRunActive: vi.fn().mockReturnValue(false),
isEmbeddedPiRunStreaming: vi.fn().mockReturnValue(false),
}));
vi.mock("../agents/model-catalog.js", () => ({
loadModelCatalog: vi.fn(),
}));
async function withTempHome<T>(fn: (home: string) => Promise<T>): Promise<T> {
return withTempHomeBase(
async (home) => {
return await fn(home);
},
{
env: {
CLAWDBOT_AGENT_DIR: (home) => path.join(home, ".clawdbot", "agent"),
PI_CODING_AGENT_DIR: (home) => path.join(home, ".clawdbot", "agent"),
},
prefix: "clawdbot-reply-",
},
);
}
function _assertModelSelection(
storePath: string,
selection: { model?: string; provider?: string } = {},
) {
const store = loadSessionStore(storePath);
const entry = store[MAIN_SESSION_KEY];
expect(entry).toBeDefined();
expect(entry?.modelOverride).toBe(selection.model);
expect(entry?.providerOverride).toBe(selection.provider);
}
describe("directive behavior", () => {
beforeEach(() => {
vi.mocked(runEmbeddedPiAgent).mockReset();
vi.mocked(loadModelCatalog).mockResolvedValue([
{ id: "claude-opus-4-5", name: "Opus 4.5", provider: "anthropic" },
{ id: "claude-sonnet-4-1", name: "Sonnet 4.1", provider: "anthropic" },
{ id: "gpt-4.1-mini", name: "GPT-4.1 Mini", provider: "openai" },
]);
});
afterEach(() => {
vi.restoreAllMocks();
});
it("updates tool verbose during an in-flight run (toggle on)", async () => {
await withTempHome(async (home) => {
const storePath = path.join(home, "sessions.json");
const ctx = { Body: "please do the thing", From: "+1004", To: "+2000" };
const sessionKey = resolveSessionKey(
"per-sender",
{ From: ctx.From, To: ctx.To, Body: ctx.Body },
"main",
);
vi.mocked(runEmbeddedPiAgent).mockImplementation(async (params) => {
const shouldEmit = params.shouldEmitToolResult;
expect(shouldEmit?.()).toBe(false);
const store = loadSessionStore(storePath);
const entry = store[sessionKey] ?? {
sessionId: "s",
updatedAt: Date.now(),
};
store[sessionKey] = {
...entry,
verboseLevel: "on",
updatedAt: Date.now(),
};
await saveSessionStore(storePath, store);
expect(shouldEmit?.()).toBe(true);
return {
payloads: [{ text: "done" }],
meta: {
durationMs: 5,
agentMeta: { sessionId: "s", provider: "p", model: "m" },
},
};
});
const res = await getReplyFromConfig(
ctx,
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
},
},
channels: { whatsapp: { allowFrom: ["*"] } },
session: { store: storePath },
},
);
const texts = (Array.isArray(res) ? res : [res])
.map((entry) => entry?.text)
.filter(Boolean);
expect(texts).toContain("done");
expect(runEmbeddedPiAgent).toHaveBeenCalledOnce();
});
});
it("updates tool verbose during an in-flight run (toggle off)", async () => {
await withTempHome(async (home) => {
const storePath = path.join(home, "sessions.json");
const ctx = {
Body: "please do the thing",
From: "+1004",
To: "+2000",
};
const sessionKey = resolveSessionKey(
"per-sender",
{ From: ctx.From, To: ctx.To, Body: ctx.Body },
"main",
);
vi.mocked(runEmbeddedPiAgent).mockImplementation(async (params) => {
const shouldEmit = params.shouldEmitToolResult;
expect(shouldEmit?.()).toBe(true);
const store = loadSessionStore(storePath);
const entry = store[sessionKey] ?? {
sessionId: "s",
updatedAt: Date.now(),
};
store[sessionKey] = {
...entry,
verboseLevel: "off",
updatedAt: Date.now(),
};
await saveSessionStore(storePath, store);
expect(shouldEmit?.()).toBe(false);
return {
payloads: [{ text: "done" }],
meta: {
durationMs: 5,
agentMeta: { sessionId: "s", provider: "p", model: "m" },
},
};
});
await getReplyFromConfig(
{ Body: "/verbose on", From: ctx.From, To: ctx.To },
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
},
},
channels: { whatsapp: { allowFrom: ["*"] } },
session: { store: storePath },
},
);
const res = await getReplyFromConfig(
ctx,
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: path.join(home, "clawd"),
},
},
channels: { whatsapp: { allowFrom: ["*"] } },
session: { store: storePath },
},
);
const texts = (Array.isArray(res) ? res : [res])
.map((entry) => entry?.text)
.filter(Boolean);
expect(texts).toContain("done");
expect(runEmbeddedPiAgent).toHaveBeenCalledOnce();
});
});
it("lists allowlisted models on /model", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const storePath = path.join(home, "sessions.json");
const res = await getReplyFromConfig(
{ Body: "/model", From: "+1222", To: "+1222" },
{},
{
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
workspace: path.join(home, "clawd"),
models: {
"anthropic/claude-opus-4-5": {},
"openai/gpt-4.1-mini": {},
},
},
},
session: { store: storePath },
},
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("anthropic/claude-opus-4-5");
expect(text).toContain("Pick: /model <#> or /model <provider/model>");
expect(text).toContain("gpt-4.1-mini — openai");
expect(text).not.toContain("claude-sonnet-4-1");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("lists allowlisted models on /model status", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const storePath = path.join(home, "sessions.json");
const res = await getReplyFromConfig(
{ Body: "/model status", From: "+1222", To: "+1222" },
{},
{
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
workspace: path.join(home, "clawd"),
models: {
"anthropic/claude-opus-4-5": {},
"openai/gpt-4.1-mini": {},
},
},
},
session: { store: storePath },
},
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("anthropic/claude-opus-4-5");
expect(text).toContain("openai/gpt-4.1-mini");
expect(text).not.toContain("claude-sonnet-4-1");
expect(text).toContain("auth:");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
});

View File

@@ -0,0 +1,263 @@
import path from "node:path";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.js";
import { loadModelCatalog } from "../agents/model-catalog.js";
import { runEmbeddedPiAgent } from "../agents/pi-embedded.js";
import { loadSessionStore } from "../config/sessions.js";
import { getReplyFromConfig } from "./reply.js";
const MAIN_SESSION_KEY = "agent:main:main";
vi.mock("../agents/pi-embedded.js", () => ({
abortEmbeddedPiRun: vi.fn().mockReturnValue(false),
runEmbeddedPiAgent: vi.fn(),
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
resolveEmbeddedSessionLane: (key: string) =>
`session:${key.trim() || "main"}`,
isEmbeddedPiRunActive: vi.fn().mockReturnValue(false),
isEmbeddedPiRunStreaming: vi.fn().mockReturnValue(false),
}));
vi.mock("../agents/model-catalog.js", () => ({
loadModelCatalog: vi.fn(),
}));
async function withTempHome<T>(fn: (home: string) => Promise<T>): Promise<T> {
return withTempHomeBase(
async (home) => {
return await fn(home);
},
{
env: {
CLAWDBOT_AGENT_DIR: (home) => path.join(home, ".clawdbot", "agent"),
PI_CODING_AGENT_DIR: (home) => path.join(home, ".clawdbot", "agent"),
},
prefix: "clawdbot-reply-",
},
);
}
function assertModelSelection(
storePath: string,
selection: { model?: string; provider?: string } = {},
) {
const store = loadSessionStore(storePath);
const entry = store[MAIN_SESSION_KEY];
expect(entry).toBeDefined();
expect(entry?.modelOverride).toBe(selection.model);
expect(entry?.providerOverride).toBe(selection.provider);
}
describe("directive behavior", () => {
beforeEach(() => {
vi.mocked(runEmbeddedPiAgent).mockReset();
vi.mocked(loadModelCatalog).mockResolvedValue([
{ id: "claude-opus-4-5", name: "Opus 4.5", provider: "anthropic" },
{ id: "claude-sonnet-4-1", name: "Sonnet 4.1", provider: "anthropic" },
{ id: "gpt-4.1-mini", name: "GPT-4.1 Mini", provider: "openai" },
]);
});
afterEach(() => {
vi.restoreAllMocks();
});
it("lists allowlisted models on /model list", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const storePath = path.join(home, "sessions.json");
const res = await getReplyFromConfig(
{ Body: "/model list", From: "+1222", To: "+1222" },
{},
{
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
workspace: path.join(home, "clawd"),
models: {
"anthropic/claude-opus-4-5": {},
"openai/gpt-4.1-mini": {},
},
},
},
session: { store: storePath },
},
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("Pick: /model <#> or /model <provider/model>");
expect(text).toContain("claude-opus-4-5 — anthropic");
expect(text).toContain("gpt-4.1-mini — openai");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("falls back to configured models when catalog is unavailable", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
vi.mocked(loadModelCatalog).mockResolvedValueOnce([]);
const storePath = path.join(home, "sessions.json");
const res = await getReplyFromConfig(
{ Body: "/model", From: "+1222", To: "+1222" },
{},
{
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
workspace: path.join(home, "clawd"),
models: {
"anthropic/claude-opus-4-5": {},
"openai/gpt-4.1-mini": {},
},
},
},
session: { store: storePath },
},
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("Pick: /model <#> or /model <provider/model>");
expect(text).toContain("claude-opus-4-5 — anthropic");
expect(text).toContain("gpt-4.1-mini — openai");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("merges config allowlist models even when catalog is present", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
// Catalog present but missing custom providers: /model should still include
// allowlisted provider/model keys from config.
vi.mocked(loadModelCatalog).mockResolvedValueOnce([
{
provider: "anthropic",
id: "claude-opus-4-5",
name: "Claude Opus 4.5",
},
{ provider: "openai", id: "gpt-4.1-mini", name: "GPT-4.1 mini" },
]);
const storePath = path.join(home, "sessions.json");
const res = await getReplyFromConfig(
{ Body: "/model list", From: "+1222", To: "+1222" },
{},
{
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
workspace: path.join(home, "clawd"),
models: {
"anthropic/claude-opus-4-5": {},
"openai/gpt-4.1-mini": {},
"minimax/MiniMax-M2.1": { alias: "minimax" },
},
},
},
models: {
mode: "merge",
providers: {
minimax: {
baseUrl: "https://api.minimax.io/anthropic",
api: "anthropic-messages",
models: [{ id: "MiniMax-M2.1", name: "MiniMax M2.1" }],
},
},
},
session: { store: storePath },
},
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("claude-opus-4-5 — anthropic");
expect(text).toContain("gpt-4.1-mini — openai");
expect(text).toContain("MiniMax-M2.1 — minimax");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("does not repeat missing auth labels on /model list", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const storePath = path.join(home, "sessions.json");
const res = await getReplyFromConfig(
{ Body: "/model list", From: "+1222", To: "+1222" },
{},
{
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
workspace: path.join(home, "clawd"),
models: {
"anthropic/claude-opus-4-5": {},
},
},
},
session: { store: storePath },
},
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).not.toContain("missing (missing)");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("sets model override on /model directive", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const storePath = path.join(home, "sessions.json");
await getReplyFromConfig(
{ Body: "/model openai/gpt-4.1-mini", From: "+1222", To: "+1222" },
{},
{
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-5" },
workspace: path.join(home, "clawd"),
models: {
"anthropic/claude-opus-4-5": {},
"openai/gpt-4.1-mini": {},
},
},
},
session: { store: storePath },
},
);
assertModelSelection(storePath, {
model: "gpt-4.1-mini",
provider: "openai",
});
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("supports model aliases on /model directive", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockReset();
const storePath = path.join(home, "sessions.json");
await getReplyFromConfig(
{ Body: "/model Opus", From: "+1222", To: "+1222" },
{},
{
agents: {
defaults: {
model: { primary: "openai/gpt-4.1-mini" },
workspace: path.join(home, "clawd"),
models: {
"openai/gpt-4.1-mini": {},
"anthropic/claude-opus-4-5": { alias: "Opus" },
},
},
},
session: { store: storePath },
},
);
assertModelSelection(storePath, {
model: "claude-opus-4-5",
provider: "anthropic",
});
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
});

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,200 @@
import { join } from "node:path";
import { afterEach, describe, expect, it, vi } from "vitest";
import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.js";
vi.mock("../agents/pi-embedded.js", () => ({
abortEmbeddedPiRun: vi.fn().mockReturnValue(false),
compactEmbeddedPiSession: vi.fn(),
runEmbeddedPiAgent: vi.fn(),
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
resolveEmbeddedSessionLane: (key: string) =>
`session:${key.trim() || "main"}`,
isEmbeddedPiRunActive: vi.fn().mockReturnValue(false),
isEmbeddedPiRunStreaming: vi.fn().mockReturnValue(false),
}));
const usageMocks = vi.hoisted(() => ({
loadProviderUsageSummary: vi.fn().mockResolvedValue({
updatedAt: 0,
providers: [],
}),
formatUsageSummaryLine: vi.fn().mockReturnValue("📊 Usage: Claude 80% left"),
resolveUsageProviderId: vi.fn((provider: string) => provider.split("/")[0]),
}));
vi.mock("../infra/provider-usage.js", () => usageMocks);
const modelCatalogMocks = vi.hoisted(() => ({
loadModelCatalog: vi.fn().mockResolvedValue([
{
provider: "anthropic",
id: "claude-opus-4-5",
name: "Claude Opus 4.5",
contextWindow: 200000,
},
{
provider: "openrouter",
id: "anthropic/claude-opus-4-5",
name: "Claude Opus 4.5 (OpenRouter)",
contextWindow: 200000,
},
{ provider: "openai", id: "gpt-4.1-mini", name: "GPT-4.1 mini" },
{ provider: "openai", id: "gpt-5.2", name: "GPT-5.2" },
{ provider: "openai-codex", id: "gpt-5.2", name: "GPT-5.2 (Codex)" },
{ provider: "minimax", id: "MiniMax-M2.1", name: "MiniMax M2.1" },
]),
resetModelCatalogCacheForTest: vi.fn(),
}));
vi.mock("../agents/model-catalog.js", () => modelCatalogMocks);
import {
abortEmbeddedPiRun,
runEmbeddedPiAgent,
} from "../agents/pi-embedded.js";
import { getReplyFromConfig } from "./reply.js";
const _MAIN_SESSION_KEY = "agent:main:main";
const webMocks = vi.hoisted(() => ({
webAuthExists: vi.fn().mockResolvedValue(true),
getWebAuthAgeMs: vi.fn().mockReturnValue(120_000),
readWebSelfId: vi.fn().mockReturnValue({ e164: "+1999" }),
}));
vi.mock("../web/session.js", () => webMocks);
async function withTempHome<T>(fn: (home: string) => Promise<T>): Promise<T> {
return withTempHomeBase(
async (home) => {
vi.mocked(runEmbeddedPiAgent).mockClear();
vi.mocked(abortEmbeddedPiRun).mockClear();
return await fn(home);
},
{ prefix: "clawdbot-triggers-" },
);
}
function makeCfg(home: string) {
return {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: join(home, "clawd"),
},
},
channels: {
whatsapp: {
allowFrom: ["*"],
},
},
session: { store: join(home, "sessions.json") },
};
}
afterEach(() => {
vi.restoreAllMocks();
});
describe("group intro prompts", () => {
const groupParticipationNote =
"Be a good group participant: mostly lurk and follow the conversation; reply only when directly addressed or you can add clear value. Emoji reactions are welcome when available. Write like a human. Avoid Markdown tables. Don't type literal \\n sequences; use real line breaks sparingly.";
it("labels Discord groups using the surface metadata", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
payloads: [{ text: "ok" }],
meta: {
durationMs: 1,
agentMeta: { sessionId: "s", provider: "p", model: "m" },
},
});
await getReplyFromConfig(
{
Body: "status update",
From: "group:dev",
To: "+1888",
ChatType: "group",
GroupSubject: "Release Squad",
GroupMembers: "Alice, Bob",
Provider: "discord",
},
{},
makeCfg(home),
);
expect(runEmbeddedPiAgent).toHaveBeenCalledOnce();
const extraSystemPrompt =
vi.mocked(runEmbeddedPiAgent).mock.calls.at(-1)?.[0]
?.extraSystemPrompt ?? "";
expect(extraSystemPrompt).toBe(
`You are replying inside the Discord group "Release Squad". Group members: Alice, Bob. Activation: trigger-only (you are invoked only when explicitly mentioned; recent context may be included). ${groupParticipationNote} Address the specific sender noted in the message context.`,
);
});
});
it("keeps WhatsApp labeling for WhatsApp group chats", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
payloads: [{ text: "ok" }],
meta: {
durationMs: 1,
agentMeta: { sessionId: "s", provider: "p", model: "m" },
},
});
await getReplyFromConfig(
{
Body: "ping",
From: "123@g.us",
To: "+1999",
ChatType: "group",
GroupSubject: "Ops",
Provider: "whatsapp",
},
{},
makeCfg(home),
);
expect(runEmbeddedPiAgent).toHaveBeenCalledOnce();
const extraSystemPrompt =
vi.mocked(runEmbeddedPiAgent).mock.calls.at(-1)?.[0]
?.extraSystemPrompt ?? "";
expect(extraSystemPrompt).toBe(
`You are replying inside the WhatsApp group "Ops". Activation: trigger-only (you are invoked only when explicitly mentioned; recent context may be included). WhatsApp IDs: SenderId is the participant JID; [message_id: ...] is the message id for reactions (use SenderId as participant). ${groupParticipationNote} Address the specific sender noted in the message context.`,
);
});
});
it("labels Telegram groups using their own surface", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
payloads: [{ text: "ok" }],
meta: {
durationMs: 1,
agentMeta: { sessionId: "s", provider: "p", model: "m" },
},
});
await getReplyFromConfig(
{
Body: "ping",
From: "group:tg",
To: "+1777",
ChatType: "group",
GroupSubject: "Dev Chat",
Provider: "telegram",
},
{},
makeCfg(home),
);
expect(runEmbeddedPiAgent).toHaveBeenCalledOnce();
const extraSystemPrompt =
vi.mocked(runEmbeddedPiAgent).mock.calls.at(-1)?.[0]
?.extraSystemPrompt ?? "";
expect(extraSystemPrompt).toBe(
`You are replying inside the Telegram group "Dev Chat". Activation: trigger-only (you are invoked only when explicitly mentioned; recent context may be included). ${groupParticipationNote} Address the specific sender noted in the message context.`,
);
});
});
});

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,239 @@
import { join } from "node:path";
import { afterEach, describe, expect, it, vi } from "vitest";
import { normalizeTestText } from "../../test/helpers/normalize-text.js";
import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.js";
vi.mock("../agents/pi-embedded.js", () => ({
abortEmbeddedPiRun: vi.fn().mockReturnValue(false),
compactEmbeddedPiSession: vi.fn(),
runEmbeddedPiAgent: vi.fn(),
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
resolveEmbeddedSessionLane: (key: string) =>
`session:${key.trim() || "main"}`,
isEmbeddedPiRunActive: vi.fn().mockReturnValue(false),
isEmbeddedPiRunStreaming: vi.fn().mockReturnValue(false),
}));
const usageMocks = vi.hoisted(() => ({
loadProviderUsageSummary: vi.fn().mockResolvedValue({
updatedAt: 0,
providers: [],
}),
formatUsageSummaryLine: vi.fn().mockReturnValue("📊 Usage: Claude 80% left"),
resolveUsageProviderId: vi.fn((provider: string) => provider.split("/")[0]),
}));
vi.mock("../infra/provider-usage.js", () => usageMocks);
const modelCatalogMocks = vi.hoisted(() => ({
loadModelCatalog: vi.fn().mockResolvedValue([
{
provider: "anthropic",
id: "claude-opus-4-5",
name: "Claude Opus 4.5",
contextWindow: 200000,
},
{
provider: "openrouter",
id: "anthropic/claude-opus-4-5",
name: "Claude Opus 4.5 (OpenRouter)",
contextWindow: 200000,
},
{ provider: "openai", id: "gpt-4.1-mini", name: "GPT-4.1 mini" },
{ provider: "openai", id: "gpt-5.2", name: "GPT-5.2" },
{ provider: "openai-codex", id: "gpt-5.2", name: "GPT-5.2 (Codex)" },
{ provider: "minimax", id: "MiniMax-M2.1", name: "MiniMax M2.1" },
]),
resetModelCatalogCacheForTest: vi.fn(),
}));
vi.mock("../agents/model-catalog.js", () => modelCatalogMocks);
import {
abortEmbeddedPiRun,
runEmbeddedPiAgent,
} from "../agents/pi-embedded.js";
import { getReplyFromConfig } from "./reply.js";
const _MAIN_SESSION_KEY = "agent:main:main";
const webMocks = vi.hoisted(() => ({
webAuthExists: vi.fn().mockResolvedValue(true),
getWebAuthAgeMs: vi.fn().mockReturnValue(120_000),
readWebSelfId: vi.fn().mockReturnValue({ e164: "+1999" }),
}));
vi.mock("../web/session.js", () => webMocks);
async function withTempHome<T>(fn: (home: string) => Promise<T>): Promise<T> {
return withTempHomeBase(
async (home) => {
vi.mocked(runEmbeddedPiAgent).mockClear();
vi.mocked(abortEmbeddedPiRun).mockClear();
return await fn(home);
},
{ prefix: "clawdbot-triggers-" },
);
}
function makeCfg(home: string) {
return {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: join(home, "clawd"),
},
},
channels: {
whatsapp: {
allowFrom: ["*"],
},
},
session: { store: join(home, "sessions.json") },
};
}
afterEach(() => {
vi.restoreAllMocks();
});
describe("trigger handling", () => {
it("filters usage summary to the current model provider", async () => {
await withTempHome(async (home) => {
usageMocks.loadProviderUsageSummary.mockClear();
const res = await getReplyFromConfig(
{
Body: "/status",
From: "+1000",
To: "+2000",
Provider: "whatsapp",
SenderE164: "+1000",
},
{},
makeCfg(home),
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(normalizeTestText(text ?? "")).toContain("Usage: Claude 80% left");
expect(usageMocks.loadProviderUsageSummary).toHaveBeenCalledWith(
expect.objectContaining({ providers: ["anthropic"] }),
);
});
});
it("emits /status once (no duplicate inline + final)", async () => {
await withTempHome(async (home) => {
const blockReplies: Array<{ text?: string }> = [];
const res = await getReplyFromConfig(
{
Body: "/status",
From: "+1000",
To: "+2000",
Provider: "whatsapp",
SenderE164: "+1000",
},
{
onBlockReply: async (payload) => {
blockReplies.push(payload);
},
},
makeCfg(home),
);
const replies = res ? (Array.isArray(res) ? res : [res]) : [];
expect(blockReplies.length).toBe(0);
expect(replies.length).toBe(1);
expect(String(replies[0]?.text ?? "")).toContain("Model:");
});
});
it("emits /usage once (alias of /status)", async () => {
await withTempHome(async (home) => {
const blockReplies: Array<{ text?: string }> = [];
const res = await getReplyFromConfig(
{
Body: "/usage",
From: "+1000",
To: "+2000",
Provider: "whatsapp",
SenderE164: "+1000",
},
{
onBlockReply: async (payload) => {
blockReplies.push(payload);
},
},
makeCfg(home),
);
const replies = res ? (Array.isArray(res) ? res : [res]) : [];
expect(blockReplies.length).toBe(0);
expect(replies.length).toBe(1);
expect(String(replies[0]?.text ?? "")).toContain("Model:");
});
});
it("sends one inline status and still returns agent reply for mixed text", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
payloads: [{ text: "agent says hi" }],
meta: {
durationMs: 1,
agentMeta: { sessionId: "s", provider: "p", model: "m" },
},
});
const blockReplies: Array<{ text?: string }> = [];
const res = await getReplyFromConfig(
{
Body: "here we go /status now",
From: "+1002",
To: "+2000",
Provider: "whatsapp",
SenderE164: "+1002",
},
{
onBlockReply: async (payload) => {
blockReplies.push(payload);
},
},
makeCfg(home),
);
const replies = res ? (Array.isArray(res) ? res : [res]) : [];
expect(blockReplies.length).toBe(1);
expect(String(blockReplies[0]?.text ?? "")).toContain("Model:");
expect(replies.length).toBe(1);
expect(replies[0]?.text).toBe("agent says hi");
const prompt =
vi.mocked(runEmbeddedPiAgent).mock.calls[0]?.[0]?.prompt ?? "";
expect(prompt).not.toContain("/status");
});
});
it("aborts even with timestamp prefix", async () => {
await withTempHome(async (home) => {
const res = await getReplyFromConfig(
{
Body: "[Dec 5 10:00] stop",
From: "+1000",
To: "+2000",
},
{},
makeCfg(home),
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toBe("⚙️ Agent was aborted.");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("handles /stop without invoking the agent", async () => {
await withTempHome(async (home) => {
const res = await getReplyFromConfig(
{
Body: "/stop",
From: "+1003",
To: "+2000",
},
{},
makeCfg(home),
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toBe("⚙️ Agent was aborted.");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
});

View File

@@ -0,0 +1,239 @@
import fs from "node:fs/promises";
import { join } from "node:path";
import { afterEach, describe, expect, it, vi } from "vitest";
import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.js";
vi.mock("../agents/pi-embedded.js", () => ({
abortEmbeddedPiRun: vi.fn().mockReturnValue(false),
compactEmbeddedPiSession: vi.fn(),
runEmbeddedPiAgent: vi.fn(),
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
resolveEmbeddedSessionLane: (key: string) =>
`session:${key.trim() || "main"}`,
isEmbeddedPiRunActive: vi.fn().mockReturnValue(false),
isEmbeddedPiRunStreaming: vi.fn().mockReturnValue(false),
}));
const usageMocks = vi.hoisted(() => ({
loadProviderUsageSummary: vi.fn().mockResolvedValue({
updatedAt: 0,
providers: [],
}),
formatUsageSummaryLine: vi.fn().mockReturnValue("📊 Usage: Claude 80% left"),
resolveUsageProviderId: vi.fn((provider: string) => provider.split("/")[0]),
}));
vi.mock("../infra/provider-usage.js", () => usageMocks);
const modelCatalogMocks = vi.hoisted(() => ({
loadModelCatalog: vi.fn().mockResolvedValue([
{
provider: "anthropic",
id: "claude-opus-4-5",
name: "Claude Opus 4.5",
contextWindow: 200000,
},
{
provider: "openrouter",
id: "anthropic/claude-opus-4-5",
name: "Claude Opus 4.5 (OpenRouter)",
contextWindow: 200000,
},
{ provider: "openai", id: "gpt-4.1-mini", name: "GPT-4.1 mini" },
{ provider: "openai", id: "gpt-5.2", name: "GPT-5.2" },
{ provider: "openai-codex", id: "gpt-5.2", name: "GPT-5.2 (Codex)" },
{ provider: "minimax", id: "MiniMax-M2.1", name: "MiniMax M2.1" },
]),
resetModelCatalogCacheForTest: vi.fn(),
}));
vi.mock("../agents/model-catalog.js", () => modelCatalogMocks);
import {
abortEmbeddedPiRun,
runEmbeddedPiAgent,
} from "../agents/pi-embedded.js";
import { getReplyFromConfig } from "./reply.js";
const MAIN_SESSION_KEY = "agent:main:main";
const webMocks = vi.hoisted(() => ({
webAuthExists: vi.fn().mockResolvedValue(true),
getWebAuthAgeMs: vi.fn().mockReturnValue(120_000),
readWebSelfId: vi.fn().mockReturnValue({ e164: "+1999" }),
}));
vi.mock("../web/session.js", () => webMocks);
async function withTempHome<T>(fn: (home: string) => Promise<T>): Promise<T> {
return withTempHomeBase(
async (home) => {
vi.mocked(runEmbeddedPiAgent).mockClear();
vi.mocked(abortEmbeddedPiRun).mockClear();
return await fn(home);
},
{ prefix: "clawdbot-triggers-" },
);
}
function makeCfg(home: string) {
return {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: join(home, "clawd"),
},
},
channels: {
whatsapp: {
allowFrom: ["*"],
},
},
session: { store: join(home, "sessions.json") },
};
}
afterEach(() => {
vi.restoreAllMocks();
});
describe("trigger handling", () => {
it("ignores inline elevated directive for unapproved sender", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
payloads: [{ text: "ok" }],
meta: {
durationMs: 1,
agentMeta: { sessionId: "s", provider: "p", model: "m" },
},
});
const cfg = {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: join(home, "clawd"),
},
},
tools: {
elevated: {
allowFrom: { whatsapp: ["+1000"] },
},
},
channels: {
whatsapp: {
allowFrom: ["+1000"],
},
},
session: { store: join(home, "sessions.json") },
};
const res = await getReplyFromConfig(
{
Body: "please /elevated on now",
From: "+2000",
To: "+2000",
Provider: "whatsapp",
SenderE164: "+2000",
},
{},
cfg,
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).not.toContain("elevated is not available right now");
expect(runEmbeddedPiAgent).toHaveBeenCalled();
});
});
it("uses tools.elevated.allowFrom.discord for elevated approval", async () => {
await withTempHome(async (home) => {
const cfg = {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: join(home, "clawd"),
},
},
tools: { elevated: { allowFrom: { discord: ["steipete"] } } },
session: { store: join(home, "sessions.json") },
};
const res = await getReplyFromConfig(
{
Body: "/elevated on",
From: "discord:123",
To: "user:123",
Provider: "discord",
SenderName: "Peter Steinberger",
SenderUsername: "steipete",
SenderTag: "steipete",
},
{},
cfg,
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("Elevated mode enabled");
const storeRaw = await fs.readFile(cfg.session.store, "utf-8");
const store = JSON.parse(storeRaw) as Record<
string,
{ elevatedLevel?: string }
>;
expect(store[MAIN_SESSION_KEY]?.elevatedLevel).toBe("on");
});
});
it("treats explicit discord elevated allowlist as override", async () => {
await withTempHome(async (home) => {
const cfg = {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: join(home, "clawd"),
},
},
tools: {
elevated: {
allowFrom: { discord: [] },
},
},
session: { store: join(home, "sessions.json") },
};
const res = await getReplyFromConfig(
{
Body: "/elevated on",
From: "discord:123",
To: "user:123",
Provider: "discord",
SenderName: "steipete",
},
{},
cfg,
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("tools.elevated.allowFrom.discord");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("returns a context overflow fallback when the embedded agent throws", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockRejectedValue(
new Error("Context window exceeded"),
);
const res = await getReplyFromConfig(
{
Body: "hello",
From: "+1002",
To: "+2000",
},
{},
makeCfg(home),
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toBe(
"⚠️ Context overflow — prompt too large for this model. Try a shorter message or a larger-context model.",
);
expect(runEmbeddedPiAgent).toHaveBeenCalledOnce();
});
});
});

View File

@@ -0,0 +1,233 @@
import fs from "node:fs/promises";
import { join } from "node:path";
import { afterEach, describe, expect, it, vi } from "vitest";
import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.js";
vi.mock("../agents/pi-embedded.js", () => ({
abortEmbeddedPiRun: vi.fn().mockReturnValue(false),
compactEmbeddedPiSession: vi.fn(),
runEmbeddedPiAgent: vi.fn(),
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
resolveEmbeddedSessionLane: (key: string) =>
`session:${key.trim() || "main"}`,
isEmbeddedPiRunActive: vi.fn().mockReturnValue(false),
isEmbeddedPiRunStreaming: vi.fn().mockReturnValue(false),
}));
const usageMocks = vi.hoisted(() => ({
loadProviderUsageSummary: vi.fn().mockResolvedValue({
updatedAt: 0,
providers: [],
}),
formatUsageSummaryLine: vi.fn().mockReturnValue("📊 Usage: Claude 80% left"),
resolveUsageProviderId: vi.fn((provider: string) => provider.split("/")[0]),
}));
vi.mock("../infra/provider-usage.js", () => usageMocks);
const modelCatalogMocks = vi.hoisted(() => ({
loadModelCatalog: vi.fn().mockResolvedValue([
{
provider: "anthropic",
id: "claude-opus-4-5",
name: "Claude Opus 4.5",
contextWindow: 200000,
},
{
provider: "openrouter",
id: "anthropic/claude-opus-4-5",
name: "Claude Opus 4.5 (OpenRouter)",
contextWindow: 200000,
},
{ provider: "openai", id: "gpt-4.1-mini", name: "GPT-4.1 mini" },
{ provider: "openai", id: "gpt-5.2", name: "GPT-5.2" },
{ provider: "openai-codex", id: "gpt-5.2", name: "GPT-5.2 (Codex)" },
{ provider: "minimax", id: "MiniMax-M2.1", name: "MiniMax M2.1" },
]),
resetModelCatalogCacheForTest: vi.fn(),
}));
vi.mock("../agents/model-catalog.js", () => modelCatalogMocks);
import {
abortEmbeddedPiRun,
runEmbeddedPiAgent,
} from "../agents/pi-embedded.js";
import { getReplyFromConfig } from "./reply.js";
import { HEARTBEAT_TOKEN } from "./tokens.js";
const _MAIN_SESSION_KEY = "agent:main:main";
const webMocks = vi.hoisted(() => ({
webAuthExists: vi.fn().mockResolvedValue(true),
getWebAuthAgeMs: vi.fn().mockReturnValue(120_000),
readWebSelfId: vi.fn().mockReturnValue({ e164: "+1999" }),
}));
vi.mock("../web/session.js", () => webMocks);
async function withTempHome<T>(fn: (home: string) => Promise<T>): Promise<T> {
return withTempHomeBase(
async (home) => {
vi.mocked(runEmbeddedPiAgent).mockClear();
vi.mocked(abortEmbeddedPiRun).mockClear();
return await fn(home);
},
{ prefix: "clawdbot-triggers-" },
);
}
function makeCfg(home: string) {
return {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: join(home, "clawd"),
},
},
channels: {
whatsapp: {
allowFrom: ["*"],
},
},
session: { store: join(home, "sessions.json") },
};
}
afterEach(() => {
vi.restoreAllMocks();
});
describe("trigger handling", () => {
it("includes the error cause when the embedded agent throws", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockRejectedValue(
new Error("sandbox is not defined"),
);
const res = await getReplyFromConfig(
{
Body: "hello",
From: "+1002",
To: "+2000",
},
{},
makeCfg(home),
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toBe(
"⚠️ Agent failed before reply: sandbox is not defined. Check gateway logs for details.",
);
expect(runEmbeddedPiAgent).toHaveBeenCalledOnce();
});
});
it("uses heartbeat model override for heartbeat runs", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
payloads: [{ text: "ok" }],
meta: {
durationMs: 1,
agentMeta: { sessionId: "s", provider: "p", model: "m" },
},
});
const cfg = makeCfg(home);
cfg.agents = {
...cfg.agents,
defaults: {
...cfg.agents?.defaults,
heartbeat: { model: "anthropic/claude-haiku-4-5-20251001" },
},
};
await getReplyFromConfig(
{
Body: "hello",
From: "+1002",
To: "+2000",
},
{ isHeartbeat: true },
cfg,
);
const call = vi.mocked(runEmbeddedPiAgent).mock.calls[0]?.[0];
expect(call?.provider).toBe("anthropic");
expect(call?.model).toBe("claude-haiku-4-5-20251001");
});
});
it("suppresses HEARTBEAT_OK replies outside heartbeat runs", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
payloads: [{ text: HEARTBEAT_TOKEN }],
meta: {
durationMs: 1,
agentMeta: { sessionId: "s", provider: "p", model: "m" },
},
});
const res = await getReplyFromConfig(
{
Body: "hello",
From: "+1002",
To: "+2000",
},
{},
makeCfg(home),
);
expect(res).toBeUndefined();
expect(runEmbeddedPiAgent).toHaveBeenCalledOnce();
});
});
it("strips HEARTBEAT_OK at edges outside heartbeat runs", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
payloads: [{ text: `${HEARTBEAT_TOKEN} hello` }],
meta: {
durationMs: 1,
agentMeta: { sessionId: "s", provider: "p", model: "m" },
},
});
const res = await getReplyFromConfig(
{
Body: "hello",
From: "+1002",
To: "+2000",
},
{},
makeCfg(home),
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toBe("hello");
});
});
it("updates group activation when the owner sends /activation", async () => {
await withTempHome(async (home) => {
const cfg = makeCfg(home);
const res = await getReplyFromConfig(
{
Body: "/activation always",
From: "123@g.us",
To: "+2000",
ChatType: "group",
Provider: "whatsapp",
SenderE164: "+2000",
},
{},
cfg,
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("Group activation set to always");
const store = JSON.parse(
await fs.readFile(cfg.session.store, "utf-8"),
) as Record<string, { groupActivation?: string }>;
expect(store["agent:main:whatsapp:group:123@g.us"]?.groupActivation).toBe(
"always",
);
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
});

View File

@@ -0,0 +1,215 @@
import { tmpdir } from "node:os";
import { join } from "node:path";
import { afterEach, describe, expect, it, vi } from "vitest";
import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.js";
vi.mock("../agents/pi-embedded.js", () => ({
abortEmbeddedPiRun: vi.fn().mockReturnValue(false),
compactEmbeddedPiSession: vi.fn(),
runEmbeddedPiAgent: vi.fn(),
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
resolveEmbeddedSessionLane: (key: string) =>
`session:${key.trim() || "main"}`,
isEmbeddedPiRunActive: vi.fn().mockReturnValue(false),
isEmbeddedPiRunStreaming: vi.fn().mockReturnValue(false),
}));
const usageMocks = vi.hoisted(() => ({
loadProviderUsageSummary: vi.fn().mockResolvedValue({
updatedAt: 0,
providers: [],
}),
formatUsageSummaryLine: vi.fn().mockReturnValue("📊 Usage: Claude 80% left"),
resolveUsageProviderId: vi.fn((provider: string) => provider.split("/")[0]),
}));
vi.mock("../infra/provider-usage.js", () => usageMocks);
const modelCatalogMocks = vi.hoisted(() => ({
loadModelCatalog: vi.fn().mockResolvedValue([
{
provider: "anthropic",
id: "claude-opus-4-5",
name: "Claude Opus 4.5",
contextWindow: 200000,
},
{
provider: "openrouter",
id: "anthropic/claude-opus-4-5",
name: "Claude Opus 4.5 (OpenRouter)",
contextWindow: 200000,
},
{ provider: "openai", id: "gpt-4.1-mini", name: "GPT-4.1 mini" },
{ provider: "openai", id: "gpt-5.2", name: "GPT-5.2" },
{ provider: "openai-codex", id: "gpt-5.2", name: "GPT-5.2 (Codex)" },
{ provider: "minimax", id: "MiniMax-M2.1", name: "MiniMax M2.1" },
]),
resetModelCatalogCacheForTest: vi.fn(),
}));
vi.mock("../agents/model-catalog.js", () => modelCatalogMocks);
import {
abortEmbeddedPiRun,
runEmbeddedPiAgent,
} from "../agents/pi-embedded.js";
import { getReplyFromConfig } from "./reply.js";
const _MAIN_SESSION_KEY = "agent:main:main";
const webMocks = vi.hoisted(() => ({
webAuthExists: vi.fn().mockResolvedValue(true),
getWebAuthAgeMs: vi.fn().mockReturnValue(120_000),
readWebSelfId: vi.fn().mockReturnValue({ e164: "+1999" }),
}));
vi.mock("../web/session.js", () => webMocks);
async function withTempHome<T>(fn: (home: string) => Promise<T>): Promise<T> {
return withTempHomeBase(
async (home) => {
vi.mocked(runEmbeddedPiAgent).mockClear();
vi.mocked(abortEmbeddedPiRun).mockClear();
return await fn(home);
},
{ prefix: "clawdbot-triggers-" },
);
}
function makeCfg(home: string) {
return {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: join(home, "clawd"),
},
},
channels: {
whatsapp: {
allowFrom: ["*"],
},
},
session: { store: join(home, "sessions.json") },
};
}
afterEach(() => {
vi.restoreAllMocks();
});
describe("trigger handling", () => {
it("allows /activation from allowFrom in groups", async () => {
await withTempHome(async (home) => {
const cfg = makeCfg(home);
const res = await getReplyFromConfig(
{
Body: "/activation mention",
From: "123@g.us",
To: "+2000",
ChatType: "group",
Provider: "whatsapp",
SenderE164: "+999",
},
{},
cfg,
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toBe("⚙️ Group activation set to mention.");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("injects group activation context into the system prompt", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
payloads: [{ text: "ok" }],
meta: {
durationMs: 1,
agentMeta: { sessionId: "s", provider: "p", model: "m" },
},
});
const res = await getReplyFromConfig(
{
Body: "hello group",
From: "123@g.us",
To: "+2000",
ChatType: "group",
Provider: "whatsapp",
SenderE164: "+2000",
GroupSubject: "Test Group",
GroupMembers: "Alice (+1), Bob (+2)",
},
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: join(home, "clawd"),
},
},
channels: {
whatsapp: {
allowFrom: ["*"],
groups: { "*": { requireMention: false } },
},
},
messages: {
groupChat: {},
},
session: { store: join(home, "sessions.json") },
},
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toBe("ok");
expect(runEmbeddedPiAgent).toHaveBeenCalledOnce();
const extra =
vi.mocked(runEmbeddedPiAgent).mock.calls[0]?.[0]?.extraSystemPrompt ??
"";
expect(extra).toContain("Test Group");
expect(extra).toContain("Activation: always-on");
});
});
it("runs a greeting prompt for a bare /new", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
payloads: [{ text: "hello" }],
meta: {
durationMs: 1,
agentMeta: { sessionId: "s", provider: "p", model: "m" },
},
});
const res = await getReplyFromConfig(
{
Body: "/new",
From: "+1003",
To: "+2000",
},
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: join(home, "clawd"),
},
},
channels: {
whatsapp: {
allowFrom: ["*"],
},
},
session: {
store: join(tmpdir(), `clawdbot-session-test-${Date.now()}.json`),
},
},
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toBe("hello");
expect(runEmbeddedPiAgent).toHaveBeenCalledOnce();
const prompt =
vi.mocked(runEmbeddedPiAgent).mock.calls[0]?.[0]?.prompt ?? "";
expect(prompt).toContain("A new session was started via /new or /reset");
});
});
});

View File

@@ -0,0 +1,204 @@
import { tmpdir } from "node:os";
import { join } from "node:path";
import { afterEach, describe, expect, it, vi } from "vitest";
import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.js";
vi.mock("../agents/pi-embedded.js", () => ({
abortEmbeddedPiRun: vi.fn().mockReturnValue(false),
compactEmbeddedPiSession: vi.fn(),
runEmbeddedPiAgent: vi.fn(),
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
resolveEmbeddedSessionLane: (key: string) =>
`session:${key.trim() || "main"}`,
isEmbeddedPiRunActive: vi.fn().mockReturnValue(false),
isEmbeddedPiRunStreaming: vi.fn().mockReturnValue(false),
}));
const usageMocks = vi.hoisted(() => ({
loadProviderUsageSummary: vi.fn().mockResolvedValue({
updatedAt: 0,
providers: [],
}),
formatUsageSummaryLine: vi.fn().mockReturnValue("📊 Usage: Claude 80% left"),
resolveUsageProviderId: vi.fn((provider: string) => provider.split("/")[0]),
}));
vi.mock("../infra/provider-usage.js", () => usageMocks);
const modelCatalogMocks = vi.hoisted(() => ({
loadModelCatalog: vi.fn().mockResolvedValue([
{
provider: "anthropic",
id: "claude-opus-4-5",
name: "Claude Opus 4.5",
contextWindow: 200000,
},
{
provider: "openrouter",
id: "anthropic/claude-opus-4-5",
name: "Claude Opus 4.5 (OpenRouter)",
contextWindow: 200000,
},
{ provider: "openai", id: "gpt-4.1-mini", name: "GPT-4.1 mini" },
{ provider: "openai", id: "gpt-5.2", name: "GPT-5.2" },
{ provider: "openai-codex", id: "gpt-5.2", name: "GPT-5.2 (Codex)" },
{ provider: "minimax", id: "MiniMax-M2.1", name: "MiniMax M2.1" },
]),
resetModelCatalogCacheForTest: vi.fn(),
}));
vi.mock("../agents/model-catalog.js", () => modelCatalogMocks);
import {
abortEmbeddedPiRun,
runEmbeddedPiAgent,
} from "../agents/pi-embedded.js";
import { getReplyFromConfig } from "./reply.js";
const _MAIN_SESSION_KEY = "agent:main:main";
const webMocks = vi.hoisted(() => ({
webAuthExists: vi.fn().mockResolvedValue(true),
getWebAuthAgeMs: vi.fn().mockReturnValue(120_000),
readWebSelfId: vi.fn().mockReturnValue({ e164: "+1999" }),
}));
vi.mock("../web/session.js", () => webMocks);
async function withTempHome<T>(fn: (home: string) => Promise<T>): Promise<T> {
return withTempHomeBase(
async (home) => {
vi.mocked(runEmbeddedPiAgent).mockClear();
vi.mocked(abortEmbeddedPiRun).mockClear();
return await fn(home);
},
{ prefix: "clawdbot-triggers-" },
);
}
function _makeCfg(home: string) {
return {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: join(home, "clawd"),
},
},
channels: {
whatsapp: {
allowFrom: ["*"],
},
},
session: { store: join(home, "sessions.json") },
};
}
afterEach(() => {
vi.restoreAllMocks();
});
describe("trigger handling", () => {
it("runs a greeting prompt for a bare /reset", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
payloads: [{ text: "hello" }],
meta: {
durationMs: 1,
agentMeta: { sessionId: "s", provider: "p", model: "m" },
},
});
const res = await getReplyFromConfig(
{
Body: "/reset",
From: "+1003",
To: "+2000",
},
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: join(home, "clawd"),
},
},
channels: {
whatsapp: {
allowFrom: ["*"],
},
},
session: {
store: join(tmpdir(), `clawdbot-session-test-${Date.now()}.json`),
},
},
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toBe("hello");
expect(runEmbeddedPiAgent).toHaveBeenCalledOnce();
const prompt =
vi.mocked(runEmbeddedPiAgent).mock.calls[0]?.[0]?.prompt ?? "";
expect(prompt).toContain("A new session was started via /new or /reset");
});
});
it("does not reset for unauthorized /reset", async () => {
await withTempHome(async (home) => {
const res = await getReplyFromConfig(
{
Body: "/reset",
From: "+1003",
To: "+2000",
CommandAuthorized: false,
},
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: join(home, "clawd"),
},
},
channels: {
whatsapp: {
allowFrom: ["+1999"],
},
},
session: {
store: join(tmpdir(), `clawdbot-session-test-${Date.now()}.json`),
},
},
);
expect(res).toBeUndefined();
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("blocks /reset for non-owner senders", async () => {
await withTempHome(async (home) => {
const res = await getReplyFromConfig(
{
Body: "/reset",
From: "+1003",
To: "+2000",
},
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: join(home, "clawd"),
},
},
channels: {
whatsapp: {
allowFrom: ["+1999"],
},
},
session: {
store: join(tmpdir(), `clawdbot-session-test-${Date.now()}.json`),
},
},
);
expect(res).toBeUndefined();
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
});

View File

@@ -0,0 +1,218 @@
import { tmpdir } from "node:os";
import { join } from "node:path";
import { afterEach, describe, expect, it, vi } from "vitest";
import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.js";
vi.mock("../agents/pi-embedded.js", () => ({
abortEmbeddedPiRun: vi.fn().mockReturnValue(false),
compactEmbeddedPiSession: vi.fn(),
runEmbeddedPiAgent: vi.fn(),
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
resolveEmbeddedSessionLane: (key: string) =>
`session:${key.trim() || "main"}`,
isEmbeddedPiRunActive: vi.fn().mockReturnValue(false),
isEmbeddedPiRunStreaming: vi.fn().mockReturnValue(false),
}));
const usageMocks = vi.hoisted(() => ({
loadProviderUsageSummary: vi.fn().mockResolvedValue({
updatedAt: 0,
providers: [],
}),
formatUsageSummaryLine: vi.fn().mockReturnValue("📊 Usage: Claude 80% left"),
resolveUsageProviderId: vi.fn((provider: string) => provider.split("/")[0]),
}));
vi.mock("../infra/provider-usage.js", () => usageMocks);
const modelCatalogMocks = vi.hoisted(() => ({
loadModelCatalog: vi.fn().mockResolvedValue([
{
provider: "anthropic",
id: "claude-opus-4-5",
name: "Claude Opus 4.5",
contextWindow: 200000,
},
{
provider: "openrouter",
id: "anthropic/claude-opus-4-5",
name: "Claude Opus 4.5 (OpenRouter)",
contextWindow: 200000,
},
{ provider: "openai", id: "gpt-4.1-mini", name: "GPT-4.1 mini" },
{ provider: "openai", id: "gpt-5.2", name: "GPT-5.2" },
{ provider: "openai-codex", id: "gpt-5.2", name: "GPT-5.2 (Codex)" },
{ provider: "minimax", id: "MiniMax-M2.1", name: "MiniMax M2.1" },
]),
resetModelCatalogCacheForTest: vi.fn(),
}));
vi.mock("../agents/model-catalog.js", () => modelCatalogMocks);
import {
abortEmbeddedPiRun,
compactEmbeddedPiSession,
runEmbeddedPiAgent,
} from "../agents/pi-embedded.js";
import { loadSessionStore, resolveSessionKey } from "../config/sessions.js";
import { getReplyFromConfig } from "./reply.js";
const _MAIN_SESSION_KEY = "agent:main:main";
const webMocks = vi.hoisted(() => ({
webAuthExists: vi.fn().mockResolvedValue(true),
getWebAuthAgeMs: vi.fn().mockReturnValue(120_000),
readWebSelfId: vi.fn().mockReturnValue({ e164: "+1999" }),
}));
vi.mock("../web/session.js", () => webMocks);
async function withTempHome<T>(fn: (home: string) => Promise<T>): Promise<T> {
return withTempHomeBase(
async (home) => {
vi.mocked(runEmbeddedPiAgent).mockClear();
vi.mocked(abortEmbeddedPiRun).mockClear();
return await fn(home);
},
{ prefix: "clawdbot-triggers-" },
);
}
function makeCfg(home: string) {
return {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: join(home, "clawd"),
},
},
channels: {
whatsapp: {
allowFrom: ["*"],
},
},
session: { store: join(home, "sessions.json") },
};
}
afterEach(() => {
vi.restoreAllMocks();
});
describe("trigger handling", () => {
it("runs /compact as a gated command", async () => {
await withTempHome(async (home) => {
const storePath = join(
tmpdir(),
`clawdbot-session-test-${Date.now()}.json`,
);
vi.mocked(compactEmbeddedPiSession).mockResolvedValue({
ok: true,
compacted: true,
result: {
summary: "summary",
firstKeptEntryId: "x",
tokensBefore: 12000,
},
});
const res = await getReplyFromConfig(
{
Body: "/compact focus on decisions",
From: "+1003",
To: "+2000",
},
{},
{
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: join(home, "clawd"),
},
},
channels: {
whatsapp: {
allowFrom: ["*"],
},
},
session: {
store: storePath,
},
},
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text?.startsWith("⚙️ Compacted")).toBe(true);
expect(compactEmbeddedPiSession).toHaveBeenCalledOnce();
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
const store = loadSessionStore(storePath);
const sessionKey = resolveSessionKey("per-sender", {
Body: "/compact focus on decisions",
From: "+1003",
To: "+2000",
});
expect(store[sessionKey]?.compactionCount).toBe(1);
});
});
it("ignores think directives that only appear in the context wrapper", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
payloads: [{ text: "ok" }],
meta: {
durationMs: 1,
agentMeta: { sessionId: "s", provider: "p", model: "m" },
},
});
const res = await getReplyFromConfig(
{
Body: [
"[Chat messages since your last reply - for context]",
"Peter: /thinking high [2025-12-05T21:45:00.000Z]",
"",
"[Current message - respond to this]",
"Give me the status",
].join("\n"),
From: "+1002",
To: "+2000",
},
{},
makeCfg(home),
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toBe("ok");
expect(runEmbeddedPiAgent).toHaveBeenCalledOnce();
const prompt =
vi.mocked(runEmbeddedPiAgent).mock.calls[0]?.[0]?.prompt ?? "";
expect(prompt).toContain("Give me the status");
expect(prompt).not.toContain("/thinking high");
expect(prompt).not.toContain("/think high");
});
});
it("does not emit directive acks for heartbeats with /think", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
payloads: [{ text: "ok" }],
meta: {
durationMs: 1,
agentMeta: { sessionId: "s", provider: "p", model: "m" },
},
});
const res = await getReplyFromConfig(
{
Body: "HEARTBEAT /think:high",
From: "+1003",
To: "+1003",
},
{ isHeartbeat: true },
makeCfg(home),
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toBe("ok");
expect(text).not.toMatch(/Thinking level set/i);
expect(runEmbeddedPiAgent).toHaveBeenCalledOnce();
});
});
});

View File

@@ -0,0 +1,193 @@
import fs from "node:fs/promises";
import { basename, join } from "node:path";
import { afterEach, describe, expect, it, vi } from "vitest";
import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.js";
vi.mock("../agents/pi-embedded.js", () => ({
abortEmbeddedPiRun: vi.fn().mockReturnValue(false),
compactEmbeddedPiSession: vi.fn(),
runEmbeddedPiAgent: vi.fn(),
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
resolveEmbeddedSessionLane: (key: string) =>
`session:${key.trim() || "main"}`,
isEmbeddedPiRunActive: vi.fn().mockReturnValue(false),
isEmbeddedPiRunStreaming: vi.fn().mockReturnValue(false),
}));
const usageMocks = vi.hoisted(() => ({
loadProviderUsageSummary: vi.fn().mockResolvedValue({
updatedAt: 0,
providers: [],
}),
formatUsageSummaryLine: vi.fn().mockReturnValue("📊 Usage: Claude 80% left"),
resolveUsageProviderId: vi.fn((provider: string) => provider.split("/")[0]),
}));
vi.mock("../infra/provider-usage.js", () => usageMocks);
const modelCatalogMocks = vi.hoisted(() => ({
loadModelCatalog: vi.fn().mockResolvedValue([
{
provider: "anthropic",
id: "claude-opus-4-5",
name: "Claude Opus 4.5",
contextWindow: 200000,
},
{
provider: "openrouter",
id: "anthropic/claude-opus-4-5",
name: "Claude Opus 4.5 (OpenRouter)",
contextWindow: 200000,
},
{ provider: "openai", id: "gpt-4.1-mini", name: "GPT-4.1 mini" },
{ provider: "openai", id: "gpt-5.2", name: "GPT-5.2" },
{ provider: "openai-codex", id: "gpt-5.2", name: "GPT-5.2 (Codex)" },
{ provider: "minimax", id: "MiniMax-M2.1", name: "MiniMax M2.1" },
]),
resetModelCatalogCacheForTest: vi.fn(),
}));
vi.mock("../agents/model-catalog.js", () => modelCatalogMocks);
import { resolveAgentWorkspaceDir } from "../agents/agent-scope.js";
import {
abortEmbeddedPiRun,
runEmbeddedPiAgent,
} from "../agents/pi-embedded.js";
import { ensureSandboxWorkspaceForSession } from "../agents/sandbox.js";
import {
resolveAgentIdFromSessionKey,
resolveSessionKey,
} from "../config/sessions.js";
import { getReplyFromConfig } from "./reply.js";
const _MAIN_SESSION_KEY = "agent:main:main";
const webMocks = vi.hoisted(() => ({
webAuthExists: vi.fn().mockResolvedValue(true),
getWebAuthAgeMs: vi.fn().mockReturnValue(120_000),
readWebSelfId: vi.fn().mockReturnValue({ e164: "+1999" }),
}));
vi.mock("../web/session.js", () => webMocks);
async function withTempHome<T>(fn: (home: string) => Promise<T>): Promise<T> {
return withTempHomeBase(
async (home) => {
vi.mocked(runEmbeddedPiAgent).mockClear();
vi.mocked(abortEmbeddedPiRun).mockClear();
return await fn(home);
},
{ prefix: "clawdbot-triggers-" },
);
}
function _makeCfg(home: string) {
return {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: join(home, "clawd"),
},
},
channels: {
whatsapp: {
allowFrom: ["*"],
},
},
session: { store: join(home, "sessions.json") },
};
}
afterEach(() => {
vi.restoreAllMocks();
});
describe("trigger handling", () => {
it(
"stages inbound media into the sandbox workspace",
{ timeout: 15_000 },
async () => {
await withTempHome(async (home) => {
const inboundDir = join(home, ".clawdbot", "media", "inbound");
await fs.mkdir(inboundDir, { recursive: true });
const mediaPath = join(inboundDir, "photo.jpg");
await fs.writeFile(mediaPath, "test");
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
payloads: [{ text: "ok" }],
meta: {
durationMs: 1,
agentMeta: { sessionId: "s", provider: "p", model: "m" },
},
});
const cfg = {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: join(home, "clawd"),
sandbox: {
mode: "non-main" as const,
workspaceRoot: join(home, "sandboxes"),
},
},
},
channels: {
whatsapp: {
allowFrom: ["*"],
},
},
session: {
store: join(home, "sessions.json"),
},
};
const ctx = {
Body: "hi",
From: "group:whatsapp:demo",
To: "+2000",
ChatType: "group" as const,
Provider: "whatsapp" as const,
MediaPath: mediaPath,
MediaType: "image/jpeg",
MediaUrl: mediaPath,
};
const res = await getReplyFromConfig(ctx, {}, cfg);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toBe("ok");
expect(runEmbeddedPiAgent).toHaveBeenCalledOnce();
const prompt =
vi.mocked(runEmbeddedPiAgent).mock.calls[0]?.[0]?.prompt ?? "";
const stagedPath = `media/inbound/${basename(mediaPath)}`;
expect(prompt).toContain(stagedPath);
expect(prompt).not.toContain(mediaPath);
const sessionKey = resolveSessionKey(
cfg.session?.scope ?? "per-sender",
ctx,
cfg.session?.mainKey,
);
const agentId = resolveAgentIdFromSessionKey(sessionKey);
const sandbox = await ensureSandboxWorkspaceForSession({
config: cfg,
sessionKey,
workspaceDir: resolveAgentWorkspaceDir(cfg, agentId),
});
expect(sandbox).not.toBeNull();
if (!sandbox) {
throw new Error("Expected sandbox to be set");
}
const stagedFullPath = join(
sandbox.workspaceDir,
"media",
"inbound",
basename(mediaPath),
);
await expect(fs.stat(stagedFullPath)).resolves.toBeTruthy();
});
},
);
});

View File

@@ -0,0 +1,223 @@
import fs from "node:fs/promises";
import { join } from "node:path";
import { afterEach, describe, expect, it, vi } from "vitest";
import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.js";
vi.mock("../agents/pi-embedded.js", () => ({
abortEmbeddedPiRun: vi.fn().mockReturnValue(false),
compactEmbeddedPiSession: vi.fn(),
runEmbeddedPiAgent: vi.fn(),
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
resolveEmbeddedSessionLane: (key: string) =>
`session:${key.trim() || "main"}`,
isEmbeddedPiRunActive: vi.fn().mockReturnValue(false),
isEmbeddedPiRunStreaming: vi.fn().mockReturnValue(false),
}));
const usageMocks = vi.hoisted(() => ({
loadProviderUsageSummary: vi.fn().mockResolvedValue({
updatedAt: 0,
providers: [],
}),
formatUsageSummaryLine: vi.fn().mockReturnValue("📊 Usage: Claude 80% left"),
resolveUsageProviderId: vi.fn((provider: string) => provider.split("/")[0]),
}));
vi.mock("../infra/provider-usage.js", () => usageMocks);
const modelCatalogMocks = vi.hoisted(() => ({
loadModelCatalog: vi.fn().mockResolvedValue([
{
provider: "anthropic",
id: "claude-opus-4-5",
name: "Claude Opus 4.5",
contextWindow: 200000,
},
{
provider: "openrouter",
id: "anthropic/claude-opus-4-5",
name: "Claude Opus 4.5 (OpenRouter)",
contextWindow: 200000,
},
{ provider: "openai", id: "gpt-4.1-mini", name: "GPT-4.1 mini" },
{ provider: "openai", id: "gpt-5.2", name: "GPT-5.2" },
{ provider: "openai-codex", id: "gpt-5.2", name: "GPT-5.2 (Codex)" },
{ provider: "minimax", id: "MiniMax-M2.1", name: "MiniMax M2.1" },
]),
resetModelCatalogCacheForTest: vi.fn(),
}));
vi.mock("../agents/model-catalog.js", () => modelCatalogMocks);
import {
abortEmbeddedPiRun,
runEmbeddedPiAgent,
} from "../agents/pi-embedded.js";
import { loadSessionStore } from "../config/sessions.js";
import { getReplyFromConfig } from "./reply.js";
const MAIN_SESSION_KEY = "agent:main:main";
const webMocks = vi.hoisted(() => ({
webAuthExists: vi.fn().mockResolvedValue(true),
getWebAuthAgeMs: vi.fn().mockReturnValue(120_000),
readWebSelfId: vi.fn().mockReturnValue({ e164: "+1999" }),
}));
vi.mock("../web/session.js", () => webMocks);
async function withTempHome<T>(fn: (home: string) => Promise<T>): Promise<T> {
return withTempHomeBase(
async (home) => {
vi.mocked(runEmbeddedPiAgent).mockClear();
vi.mocked(abortEmbeddedPiRun).mockClear();
return await fn(home);
},
{ prefix: "clawdbot-triggers-" },
);
}
function makeCfg(home: string) {
return {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: join(home, "clawd"),
},
},
channels: {
whatsapp: {
allowFrom: ["*"],
},
},
session: { store: join(home, "sessions.json") },
};
}
afterEach(() => {
vi.restoreAllMocks();
});
describe("trigger handling", () => {
it("targets the active session for native /stop", async () => {
await withTempHome(async (home) => {
const cfg = makeCfg(home);
const targetSessionKey = "agent:main:telegram:group:123";
const targetSessionId = "session-target";
await fs.writeFile(
cfg.session.store,
JSON.stringify(
{
[targetSessionKey]: {
sessionId: targetSessionId,
updatedAt: Date.now(),
},
},
null,
2,
),
);
const res = await getReplyFromConfig(
{
Body: "/stop",
From: "telegram:111",
To: "telegram:111",
ChatType: "direct",
Provider: "telegram",
Surface: "telegram",
SessionKey: "telegram:slash:111",
CommandSource: "native",
CommandTargetSessionKey: targetSessionKey,
CommandAuthorized: true,
},
{},
cfg,
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toBe("⚙️ Agent was aborted.");
expect(vi.mocked(abortEmbeddedPiRun)).toHaveBeenCalledWith(
targetSessionId,
);
const store = loadSessionStore(cfg.session.store);
expect(store[targetSessionKey]?.abortedLastRun).toBe(true);
});
});
it("applies native /model to the target session", async () => {
await withTempHome(async (home) => {
const cfg = makeCfg(home);
const slashSessionKey = "telegram:slash:111";
const targetSessionKey = MAIN_SESSION_KEY;
// Seed the target session to ensure the native command mutates it.
await fs.writeFile(
cfg.session.store,
JSON.stringify(
{
[targetSessionKey]: {
sessionId: "session-target",
updatedAt: Date.now(),
},
},
null,
2,
),
);
const res = await getReplyFromConfig(
{
Body: "/model openai/gpt-4.1-mini",
From: "telegram:111",
To: "telegram:111",
ChatType: "direct",
Provider: "telegram",
Surface: "telegram",
SessionKey: slashSessionKey,
CommandSource: "native",
CommandTargetSessionKey: targetSessionKey,
CommandAuthorized: true,
},
{},
cfg,
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("Model set to openai/gpt-4.1-mini");
const store = loadSessionStore(cfg.session.store);
expect(store[targetSessionKey]?.providerOverride).toBe("openai");
expect(store[targetSessionKey]?.modelOverride).toBe("gpt-4.1-mini");
expect(store[slashSessionKey]).toBeUndefined();
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
payloads: [{ text: "ok" }],
meta: {
durationMs: 5,
agentMeta: { sessionId: "s", provider: "p", model: "m" },
},
});
await getReplyFromConfig(
{
Body: "hi",
From: "telegram:111",
To: "telegram:111",
ChatType: "direct",
Provider: "telegram",
Surface: "telegram",
},
{},
cfg,
);
expect(runEmbeddedPiAgent).toHaveBeenCalledOnce();
expect(vi.mocked(runEmbeddedPiAgent).mock.calls[0]?.[0]).toEqual(
expect.objectContaining({
provider: "openai",
model: "gpt-4.1-mini",
}),
);
});
});
});

View File

@@ -0,0 +1,239 @@
import fs from "node:fs/promises";
import { join } from "node:path";
import { afterEach, describe, expect, it, vi } from "vitest";
import { normalizeTestText } from "../../test/helpers/normalize-text.js";
import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.js";
vi.mock("../agents/pi-embedded.js", () => ({
abortEmbeddedPiRun: vi.fn().mockReturnValue(false),
compactEmbeddedPiSession: vi.fn(),
runEmbeddedPiAgent: vi.fn(),
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
resolveEmbeddedSessionLane: (key: string) =>
`session:${key.trim() || "main"}`,
isEmbeddedPiRunActive: vi.fn().mockReturnValue(false),
isEmbeddedPiRunStreaming: vi.fn().mockReturnValue(false),
}));
const usageMocks = vi.hoisted(() => ({
loadProviderUsageSummary: vi.fn().mockResolvedValue({
updatedAt: 0,
providers: [],
}),
formatUsageSummaryLine: vi.fn().mockReturnValue("📊 Usage: Claude 80% left"),
resolveUsageProviderId: vi.fn((provider: string) => provider.split("/")[0]),
}));
vi.mock("../infra/provider-usage.js", () => usageMocks);
const modelCatalogMocks = vi.hoisted(() => ({
loadModelCatalog: vi.fn().mockResolvedValue([
{
provider: "anthropic",
id: "claude-opus-4-5",
name: "Claude Opus 4.5",
contextWindow: 200000,
},
{
provider: "openrouter",
id: "anthropic/claude-opus-4-5",
name: "Claude Opus 4.5 (OpenRouter)",
contextWindow: 200000,
},
{ provider: "openai", id: "gpt-4.1-mini", name: "GPT-4.1 mini" },
{ provider: "openai", id: "gpt-5.2", name: "GPT-5.2" },
{ provider: "openai-codex", id: "gpt-5.2", name: "GPT-5.2 (Codex)" },
{ provider: "minimax", id: "MiniMax-M2.1", name: "MiniMax M2.1" },
]),
resetModelCatalogCacheForTest: vi.fn(),
}));
vi.mock("../agents/model-catalog.js", () => modelCatalogMocks);
import {
abortEmbeddedPiRun,
runEmbeddedPiAgent,
} from "../agents/pi-embedded.js";
import { loadSessionStore } from "../config/sessions.js";
import { getReplyFromConfig } from "./reply.js";
const _MAIN_SESSION_KEY = "agent:main:main";
const webMocks = vi.hoisted(() => ({
webAuthExists: vi.fn().mockResolvedValue(true),
getWebAuthAgeMs: vi.fn().mockReturnValue(120_000),
readWebSelfId: vi.fn().mockReturnValue({ e164: "+1999" }),
}));
vi.mock("../web/session.js", () => webMocks);
async function withTempHome<T>(fn: (home: string) => Promise<T>): Promise<T> {
return withTempHomeBase(
async (home) => {
vi.mocked(runEmbeddedPiAgent).mockClear();
vi.mocked(abortEmbeddedPiRun).mockClear();
return await fn(home);
},
{ prefix: "clawdbot-triggers-" },
);
}
function makeCfg(home: string) {
return {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: join(home, "clawd"),
},
},
channels: {
whatsapp: {
allowFrom: ["*"],
},
},
session: { store: join(home, "sessions.json") },
};
}
afterEach(() => {
vi.restoreAllMocks();
});
describe("trigger handling", () => {
it("shows a quick /model picker grouped by model with providers", async () => {
await withTempHome(async (home) => {
const cfg = makeCfg(home);
const res = await getReplyFromConfig(
{
Body: "/model",
From: "telegram:111",
To: "telegram:111",
ChatType: "direct",
Provider: "telegram",
Surface: "telegram",
SessionKey: "telegram:slash:111",
},
{},
cfg,
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
const normalized = normalizeTestText(text ?? "");
expect(normalized).toContain(
"Pick: /model <#> or /model <provider/model>",
);
expect(normalized).toContain(
"1) claude-opus-4-5 — anthropic, openrouter",
);
expect(normalized).toContain("3) gpt-5.2 — openai, openai-codex");
expect(normalized).toContain("More: /model status");
expect(normalized).not.toContain("reasoning");
expect(normalized).not.toContain("image");
});
});
it("rejects invalid /model <#> selections", async () => {
await withTempHome(async (home) => {
const cfg = makeCfg(home);
const sessionKey = "telegram:slash:111";
const res = await getReplyFromConfig(
{
Body: "/model 99",
From: "telegram:111",
To: "telegram:111",
ChatType: "direct",
Provider: "telegram",
Surface: "telegram",
SessionKey: sessionKey,
},
{},
cfg,
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(normalizeTestText(text ?? "")).toContain(
'Invalid model selection "99". Use /model to list.',
);
const store = loadSessionStore(cfg.session.store);
expect(store[sessionKey]?.providerOverride).toBeUndefined();
expect(store[sessionKey]?.modelOverride).toBeUndefined();
});
});
it("prefers the current provider when selecting /model <#>", async () => {
await withTempHome(async (home) => {
const cfg = makeCfg(home);
const sessionKey = "telegram:slash:111";
await fs.writeFile(
cfg.session.store,
JSON.stringify(
{
[sessionKey]: {
sessionId: "session-openrouter",
updatedAt: Date.now(),
providerOverride: "openrouter",
modelOverride: "anthropic/claude-opus-4-5",
},
},
null,
2,
),
);
const res = await getReplyFromConfig(
{
Body: "/model 1",
From: "telegram:111",
To: "telegram:111",
ChatType: "direct",
Provider: "telegram",
Surface: "telegram",
SessionKey: sessionKey,
},
{},
cfg,
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(normalizeTestText(text ?? "")).toContain(
"Model set to openrouter/anthropic/claude-opus-4-5",
);
const store = loadSessionStore(cfg.session.store);
expect(store[sessionKey]?.providerOverride).toBe("openrouter");
expect(store[sessionKey]?.modelOverride).toBe(
"anthropic/claude-opus-4-5",
);
});
});
it("selects a model by index via /model <#>", async () => {
await withTempHome(async (home) => {
const cfg = makeCfg(home);
const sessionKey = "telegram:slash:111";
const res = await getReplyFromConfig(
{
Body: "/model 3",
From: "telegram:111",
To: "telegram:111",
ChatType: "direct",
Provider: "telegram",
Surface: "telegram",
SessionKey: sessionKey,
},
{},
cfg,
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(normalizeTestText(text ?? "")).toContain(
"Model set to openai/gpt-5.2",
);
const store = loadSessionStore(cfg.session.store);
expect(store[sessionKey]?.providerOverride).toBe("openai");
expect(store[sessionKey]?.modelOverride).toBe("gpt-5.2");
});
});
});

View File

@@ -0,0 +1,224 @@
import { join } from "node:path";
import { afterEach, describe, expect, it, vi } from "vitest";
import { normalizeTestText } from "../../test/helpers/normalize-text.js";
import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.js";
vi.mock("../agents/pi-embedded.js", () => ({
abortEmbeddedPiRun: vi.fn().mockReturnValue(false),
compactEmbeddedPiSession: vi.fn(),
runEmbeddedPiAgent: vi.fn(),
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
resolveEmbeddedSessionLane: (key: string) =>
`session:${key.trim() || "main"}`,
isEmbeddedPiRunActive: vi.fn().mockReturnValue(false),
isEmbeddedPiRunStreaming: vi.fn().mockReturnValue(false),
}));
const usageMocks = vi.hoisted(() => ({
loadProviderUsageSummary: vi.fn().mockResolvedValue({
updatedAt: 0,
providers: [],
}),
formatUsageSummaryLine: vi.fn().mockReturnValue("📊 Usage: Claude 80% left"),
resolveUsageProviderId: vi.fn((provider: string) => provider.split("/")[0]),
}));
vi.mock("../infra/provider-usage.js", () => usageMocks);
const modelCatalogMocks = vi.hoisted(() => ({
loadModelCatalog: vi.fn().mockResolvedValue([
{
provider: "anthropic",
id: "claude-opus-4-5",
name: "Claude Opus 4.5",
contextWindow: 200000,
},
{
provider: "openrouter",
id: "anthropic/claude-opus-4-5",
name: "Claude Opus 4.5 (OpenRouter)",
contextWindow: 200000,
},
{ provider: "openai", id: "gpt-4.1-mini", name: "GPT-4.1 mini" },
{ provider: "openai", id: "gpt-5.2", name: "GPT-5.2" },
{ provider: "openai-codex", id: "gpt-5.2", name: "GPT-5.2 (Codex)" },
{ provider: "minimax", id: "MiniMax-M2.1", name: "MiniMax M2.1" },
]),
resetModelCatalogCacheForTest: vi.fn(),
}));
vi.mock("../agents/model-catalog.js", () => modelCatalogMocks);
import {
abortEmbeddedPiRun,
runEmbeddedPiAgent,
} from "../agents/pi-embedded.js";
import { getReplyFromConfig } from "./reply.js";
const _MAIN_SESSION_KEY = "agent:main:main";
const webMocks = vi.hoisted(() => ({
webAuthExists: vi.fn().mockResolvedValue(true),
getWebAuthAgeMs: vi.fn().mockReturnValue(120_000),
readWebSelfId: vi.fn().mockReturnValue({ e164: "+1999" }),
}));
vi.mock("../web/session.js", () => webMocks);
async function withTempHome<T>(fn: (home: string) => Promise<T>): Promise<T> {
return withTempHomeBase(
async (home) => {
vi.mocked(runEmbeddedPiAgent).mockClear();
vi.mocked(abortEmbeddedPiRun).mockClear();
return await fn(home);
},
{ prefix: "clawdbot-triggers-" },
);
}
function makeCfg(home: string) {
return {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: join(home, "clawd"),
},
},
channels: {
whatsapp: {
allowFrom: ["*"],
},
},
session: { store: join(home, "sessions.json") },
};
}
afterEach(() => {
vi.restoreAllMocks();
});
describe("trigger handling", () => {
it("shows endpoint default in /model status when not configured", async () => {
await withTempHome(async (home) => {
const cfg = makeCfg(home);
const res = await getReplyFromConfig(
{
Body: "/model status",
From: "telegram:111",
To: "telegram:111",
ChatType: "direct",
Provider: "telegram",
Surface: "telegram",
SessionKey: "telegram:slash:111",
},
{},
cfg,
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(normalizeTestText(text ?? "")).toContain("endpoint: default");
});
});
it("includes endpoint details in /model status when configured", async () => {
await withTempHome(async (home) => {
const cfg = {
...makeCfg(home),
models: {
providers: {
minimax: {
baseUrl: "https://api.minimax.io/anthropic",
api: "anthropic-messages",
},
},
},
};
const res = await getReplyFromConfig(
{
Body: "/model status",
From: "telegram:111",
To: "telegram:111",
ChatType: "direct",
Provider: "telegram",
Surface: "telegram",
SessionKey: "telegram:slash:111",
},
{},
cfg,
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
const normalized = normalizeTestText(text ?? "");
expect(normalized).toContain(
"[minimax] endpoint: https://api.minimax.io/anthropic api: anthropic-messages auth:",
);
});
});
it("rejects /restart by default", async () => {
await withTempHome(async (home) => {
const res = await getReplyFromConfig(
{
Body: " [Dec 5] /restart",
From: "+1001",
To: "+2000",
},
{},
makeCfg(home),
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("/restart is disabled");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("restarts when enabled", async () => {
await withTempHome(async (home) => {
const cfg = { ...makeCfg(home), commands: { restart: true } };
const res = await getReplyFromConfig(
{
Body: "/restart",
From: "+1001",
To: "+2000",
},
{},
cfg,
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(
text?.startsWith("⚙️ Restarting") ||
text?.startsWith("⚠️ Restart failed"),
).toBe(true);
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("reports status without invoking the agent", async () => {
await withTempHome(async (home) => {
const res = await getReplyFromConfig(
{
Body: "/status",
From: "+1002",
To: "+2000",
},
{},
makeCfg(home),
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("Clawdbot");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("reports status via /usage without invoking the agent", async () => {
await withTempHome(async (home) => {
const res = await getReplyFromConfig(
{
Body: "/usage",
From: "+1002",
To: "+2000",
},
{},
makeCfg(home),
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("Clawdbot");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
});

View File

@@ -0,0 +1,234 @@
import fs from "node:fs/promises";
import { join } from "node:path";
import { afterEach, describe, expect, it, vi } from "vitest";
import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.js";
vi.mock("../agents/pi-embedded.js", () => ({
abortEmbeddedPiRun: vi.fn().mockReturnValue(false),
compactEmbeddedPiSession: vi.fn(),
runEmbeddedPiAgent: vi.fn(),
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
resolveEmbeddedSessionLane: (key: string) =>
`session:${key.trim() || "main"}`,
isEmbeddedPiRunActive: vi.fn().mockReturnValue(false),
isEmbeddedPiRunStreaming: vi.fn().mockReturnValue(false),
}));
const usageMocks = vi.hoisted(() => ({
loadProviderUsageSummary: vi.fn().mockResolvedValue({
updatedAt: 0,
providers: [],
}),
formatUsageSummaryLine: vi.fn().mockReturnValue("📊 Usage: Claude 80% left"),
resolveUsageProviderId: vi.fn((provider: string) => provider.split("/")[0]),
}));
vi.mock("../infra/provider-usage.js", () => usageMocks);
const modelCatalogMocks = vi.hoisted(() => ({
loadModelCatalog: vi.fn().mockResolvedValue([
{
provider: "anthropic",
id: "claude-opus-4-5",
name: "Claude Opus 4.5",
contextWindow: 200000,
},
{
provider: "openrouter",
id: "anthropic/claude-opus-4-5",
name: "Claude Opus 4.5 (OpenRouter)",
contextWindow: 200000,
},
{ provider: "openai", id: "gpt-4.1-mini", name: "GPT-4.1 mini" },
{ provider: "openai", id: "gpt-5.2", name: "GPT-5.2" },
{ provider: "openai-codex", id: "gpt-5.2", name: "GPT-5.2 (Codex)" },
{ provider: "minimax", id: "MiniMax-M2.1", name: "MiniMax M2.1" },
]),
resetModelCatalogCacheForTest: vi.fn(),
}));
vi.mock("../agents/model-catalog.js", () => modelCatalogMocks);
import {
abortEmbeddedPiRun,
runEmbeddedPiAgent,
} from "../agents/pi-embedded.js";
import { resolveSessionKey } from "../config/sessions.js";
import { getReplyFromConfig } from "./reply.js";
const _MAIN_SESSION_KEY = "agent:main:main";
const webMocks = vi.hoisted(() => ({
webAuthExists: vi.fn().mockResolvedValue(true),
getWebAuthAgeMs: vi.fn().mockReturnValue(120_000),
readWebSelfId: vi.fn().mockReturnValue({ e164: "+1999" }),
}));
vi.mock("../web/session.js", () => webMocks);
async function withTempHome<T>(fn: (home: string) => Promise<T>): Promise<T> {
return withTempHomeBase(
async (home) => {
vi.mocked(runEmbeddedPiAgent).mockClear();
vi.mocked(abortEmbeddedPiRun).mockClear();
return await fn(home);
},
{ prefix: "clawdbot-triggers-" },
);
}
function makeCfg(home: string) {
return {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: join(home, "clawd"),
},
},
channels: {
whatsapp: {
allowFrom: ["*"],
},
},
session: { store: join(home, "sessions.json") },
};
}
afterEach(() => {
vi.restoreAllMocks();
});
describe("trigger handling", () => {
it("reports active auth profile and key snippet in status", async () => {
await withTempHome(async (home) => {
const cfg = makeCfg(home);
const agentDir = join(home, ".clawdbot", "agents", "main", "agent");
await fs.mkdir(agentDir, { recursive: true });
await fs.writeFile(
join(agentDir, "auth-profiles.json"),
JSON.stringify(
{
version: 1,
profiles: {
"anthropic:work": {
type: "api_key",
provider: "anthropic",
key: "sk-test-1234567890abcdef",
},
},
lastGood: { anthropic: "anthropic:work" },
},
null,
2,
),
);
const sessionKey = resolveSessionKey("per-sender", {
From: "+1002",
To: "+2000",
Provider: "whatsapp",
} as Parameters<typeof resolveSessionKey>[1]);
await fs.writeFile(
cfg.session.store,
JSON.stringify(
{
[sessionKey]: {
sessionId: "session-auth",
updatedAt: Date.now(),
authProfileOverride: "anthropic:work",
},
},
null,
2,
),
);
const res = await getReplyFromConfig(
{
Body: "/status",
From: "+1002",
To: "+2000",
Provider: "whatsapp",
SenderE164: "+1002",
},
{},
cfg,
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("api-key");
expect(text).toMatch(/…|\.{3}/);
expect(text).toContain("(anthropic:work)");
expect(text).not.toContain("mixed");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("strips inline /status and still runs the agent", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
payloads: [{ text: "ok" }],
meta: {
durationMs: 1,
agentMeta: { sessionId: "s", provider: "p", model: "m" },
},
});
const blockReplies: Array<{ text?: string }> = [];
await getReplyFromConfig(
{
Body: "please /status now",
From: "+1002",
To: "+2000",
Provider: "whatsapp",
Surface: "whatsapp",
SenderE164: "+1002",
},
{
onBlockReply: async (payload) => {
blockReplies.push(payload);
},
},
makeCfg(home),
);
expect(runEmbeddedPiAgent).toHaveBeenCalled();
// Allowlisted senders: inline /status runs immediately (like /help) and is
// stripped from the prompt; the remaining text continues through the agent.
expect(blockReplies.length).toBe(1);
expect(String(blockReplies[0]?.text ?? "").length).toBeGreaterThan(0);
const prompt =
vi.mocked(runEmbeddedPiAgent).mock.calls[0]?.[0]?.prompt ?? "";
expect(prompt).not.toContain("/status");
});
});
it("handles inline /help and strips it before the agent", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
payloads: [{ text: "ok" }],
meta: {
durationMs: 1,
agentMeta: { sessionId: "s", provider: "p", model: "m" },
},
});
const blockReplies: Array<{ text?: string }> = [];
const res = await getReplyFromConfig(
{
Body: "please /help now",
From: "+1002",
To: "+2000",
},
{
onBlockReply: async (payload) => {
blockReplies.push(payload);
},
},
makeCfg(home),
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(blockReplies.length).toBe(1);
expect(blockReplies[0]?.text).toContain("Help");
expect(runEmbeddedPiAgent).toHaveBeenCalled();
const prompt =
vi.mocked(runEmbeddedPiAgent).mock.calls[0]?.[0]?.prompt ?? "";
expect(prompt).not.toContain("/help");
expect(text).toBe("ok");
});
});
});

View File

@@ -0,0 +1,229 @@
import { join } from "node:path";
import { afterEach, describe, expect, it, vi } from "vitest";
import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.js";
vi.mock("../agents/pi-embedded.js", () => ({
abortEmbeddedPiRun: vi.fn().mockReturnValue(false),
compactEmbeddedPiSession: vi.fn(),
runEmbeddedPiAgent: vi.fn(),
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
resolveEmbeddedSessionLane: (key: string) =>
`session:${key.trim() || "main"}`,
isEmbeddedPiRunActive: vi.fn().mockReturnValue(false),
isEmbeddedPiRunStreaming: vi.fn().mockReturnValue(false),
}));
const usageMocks = vi.hoisted(() => ({
loadProviderUsageSummary: vi.fn().mockResolvedValue({
updatedAt: 0,
providers: [],
}),
formatUsageSummaryLine: vi.fn().mockReturnValue("📊 Usage: Claude 80% left"),
resolveUsageProviderId: vi.fn((provider: string) => provider.split("/")[0]),
}));
vi.mock("../infra/provider-usage.js", () => usageMocks);
const modelCatalogMocks = vi.hoisted(() => ({
loadModelCatalog: vi.fn().mockResolvedValue([
{
provider: "anthropic",
id: "claude-opus-4-5",
name: "Claude Opus 4.5",
contextWindow: 200000,
},
{
provider: "openrouter",
id: "anthropic/claude-opus-4-5",
name: "Claude Opus 4.5 (OpenRouter)",
contextWindow: 200000,
},
{ provider: "openai", id: "gpt-4.1-mini", name: "GPT-4.1 mini" },
{ provider: "openai", id: "gpt-5.2", name: "GPT-5.2" },
{ provider: "openai-codex", id: "gpt-5.2", name: "GPT-5.2 (Codex)" },
{ provider: "minimax", id: "MiniMax-M2.1", name: "MiniMax M2.1" },
]),
resetModelCatalogCacheForTest: vi.fn(),
}));
vi.mock("../agents/model-catalog.js", () => modelCatalogMocks);
import {
abortEmbeddedPiRun,
runEmbeddedPiAgent,
} from "../agents/pi-embedded.js";
import { getReplyFromConfig } from "./reply.js";
const _MAIN_SESSION_KEY = "agent:main:main";
const webMocks = vi.hoisted(() => ({
webAuthExists: vi.fn().mockResolvedValue(true),
getWebAuthAgeMs: vi.fn().mockReturnValue(120_000),
readWebSelfId: vi.fn().mockReturnValue({ e164: "+1999" }),
}));
vi.mock("../web/session.js", () => webMocks);
async function withTempHome<T>(fn: (home: string) => Promise<T>): Promise<T> {
return withTempHomeBase(
async (home) => {
vi.mocked(runEmbeddedPiAgent).mockClear();
vi.mocked(abortEmbeddedPiRun).mockClear();
return await fn(home);
},
{ prefix: "clawdbot-triggers-" },
);
}
function makeCfg(home: string) {
return {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: join(home, "clawd"),
},
},
channels: {
whatsapp: {
allowFrom: ["*"],
},
},
session: { store: join(home, "sessions.json") },
};
}
afterEach(() => {
vi.restoreAllMocks();
});
describe("trigger handling", () => {
it("handles inline /commands and strips it before the agent", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
payloads: [{ text: "ok" }],
meta: {
durationMs: 1,
agentMeta: { sessionId: "s", provider: "p", model: "m" },
},
});
const blockReplies: Array<{ text?: string }> = [];
const res = await getReplyFromConfig(
{
Body: "please /commands now",
From: "+1002",
To: "+2000",
},
{
onBlockReply: async (payload) => {
blockReplies.push(payload);
},
},
makeCfg(home),
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(blockReplies.length).toBe(1);
expect(blockReplies[0]?.text).toContain("Slash commands");
expect(runEmbeddedPiAgent).toHaveBeenCalled();
const prompt =
vi.mocked(runEmbeddedPiAgent).mock.calls[0]?.[0]?.prompt ?? "";
expect(prompt).not.toContain("/commands");
expect(text).toBe("ok");
});
});
it("handles inline /whoami and strips it before the agent", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
payloads: [{ text: "ok" }],
meta: {
durationMs: 1,
agentMeta: { sessionId: "s", provider: "p", model: "m" },
},
});
const blockReplies: Array<{ text?: string }> = [];
const res = await getReplyFromConfig(
{
Body: "please /whoami now",
From: "+1002",
To: "+2000",
SenderId: "12345",
},
{
onBlockReply: async (payload) => {
blockReplies.push(payload);
},
},
makeCfg(home),
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(blockReplies.length).toBe(1);
expect(blockReplies[0]?.text).toContain("Identity");
expect(runEmbeddedPiAgent).toHaveBeenCalled();
const prompt =
vi.mocked(runEmbeddedPiAgent).mock.calls[0]?.[0]?.prompt ?? "";
expect(prompt).not.toContain("/whoami");
expect(text).toBe("ok");
});
});
it("drops /status for unauthorized senders", async () => {
await withTempHome(async (home) => {
const cfg = {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: join(home, "clawd"),
},
},
channels: {
whatsapp: {
allowFrom: ["+1000"],
},
},
session: { store: join(home, "sessions.json") },
};
const res = await getReplyFromConfig(
{
Body: "/status",
From: "+2001",
To: "+2000",
Provider: "whatsapp",
SenderE164: "+2001",
},
{},
cfg,
);
expect(res).toBeUndefined();
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("drops /whoami for unauthorized senders", async () => {
await withTempHome(async (home) => {
const cfg = {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: join(home, "clawd"),
},
},
channels: {
whatsapp: {
allowFrom: ["+1000"],
},
},
session: { store: join(home, "sessions.json") },
};
const res = await getReplyFromConfig(
{
Body: "/whoami",
From: "+2001",
To: "+2000",
Provider: "whatsapp",
SenderE164: "+2001",
},
{},
cfg,
);
expect(res).toBeUndefined();
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
});

View File

@@ -0,0 +1,242 @@
import fs from "node:fs/promises";
import { join } from "node:path";
import { afterEach, describe, expect, it, vi } from "vitest";
import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.js";
vi.mock("../agents/pi-embedded.js", () => ({
abortEmbeddedPiRun: vi.fn().mockReturnValue(false),
compactEmbeddedPiSession: vi.fn(),
runEmbeddedPiAgent: vi.fn(),
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
resolveEmbeddedSessionLane: (key: string) =>
`session:${key.trim() || "main"}`,
isEmbeddedPiRunActive: vi.fn().mockReturnValue(false),
isEmbeddedPiRunStreaming: vi.fn().mockReturnValue(false),
}));
const usageMocks = vi.hoisted(() => ({
loadProviderUsageSummary: vi.fn().mockResolvedValue({
updatedAt: 0,
providers: [],
}),
formatUsageSummaryLine: vi.fn().mockReturnValue("📊 Usage: Claude 80% left"),
resolveUsageProviderId: vi.fn((provider: string) => provider.split("/")[0]),
}));
vi.mock("../infra/provider-usage.js", () => usageMocks);
const modelCatalogMocks = vi.hoisted(() => ({
loadModelCatalog: vi.fn().mockResolvedValue([
{
provider: "anthropic",
id: "claude-opus-4-5",
name: "Claude Opus 4.5",
contextWindow: 200000,
},
{
provider: "openrouter",
id: "anthropic/claude-opus-4-5",
name: "Claude Opus 4.5 (OpenRouter)",
contextWindow: 200000,
},
{ provider: "openai", id: "gpt-4.1-mini", name: "GPT-4.1 mini" },
{ provider: "openai", id: "gpt-5.2", name: "GPT-5.2" },
{ provider: "openai-codex", id: "gpt-5.2", name: "GPT-5.2 (Codex)" },
{ provider: "minimax", id: "MiniMax-M2.1", name: "MiniMax M2.1" },
]),
resetModelCatalogCacheForTest: vi.fn(),
}));
vi.mock("../agents/model-catalog.js", () => modelCatalogMocks);
import {
abortEmbeddedPiRun,
runEmbeddedPiAgent,
} from "../agents/pi-embedded.js";
import { getReplyFromConfig } from "./reply.js";
const MAIN_SESSION_KEY = "agent:main:main";
const webMocks = vi.hoisted(() => ({
webAuthExists: vi.fn().mockResolvedValue(true),
getWebAuthAgeMs: vi.fn().mockReturnValue(120_000),
readWebSelfId: vi.fn().mockReturnValue({ e164: "+1999" }),
}));
vi.mock("../web/session.js", () => webMocks);
async function withTempHome<T>(fn: (home: string) => Promise<T>): Promise<T> {
return withTempHomeBase(
async (home) => {
vi.mocked(runEmbeddedPiAgent).mockClear();
vi.mocked(abortEmbeddedPiRun).mockClear();
return await fn(home);
},
{ prefix: "clawdbot-triggers-" },
);
}
function makeCfg(home: string) {
return {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: join(home, "clawd"),
},
},
channels: {
whatsapp: {
allowFrom: ["*"],
},
},
session: { store: join(home, "sessions.json") },
};
}
afterEach(() => {
vi.restoreAllMocks();
});
describe("trigger handling", () => {
it("keeps inline /status for unauthorized senders", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
payloads: [{ text: "ok" }],
meta: {
durationMs: 1,
agentMeta: { sessionId: "s", provider: "p", model: "m" },
},
});
const cfg = {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: join(home, "clawd"),
},
},
channels: {
whatsapp: {
allowFrom: ["+1000"],
},
},
session: { store: join(home, "sessions.json") },
};
const res = await getReplyFromConfig(
{
Body: "please /status now",
From: "+2001",
To: "+2000",
Provider: "whatsapp",
SenderE164: "+2001",
},
{},
cfg,
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toBe("ok");
expect(runEmbeddedPiAgent).toHaveBeenCalled();
const prompt =
vi.mocked(runEmbeddedPiAgent).mock.calls[0]?.[0]?.prompt ?? "";
// Not allowlisted: inline /status is treated as plain text and is not stripped.
expect(prompt).toContain("/status");
});
});
it("keeps inline /help for unauthorized senders", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
payloads: [{ text: "ok" }],
meta: {
durationMs: 1,
agentMeta: { sessionId: "s", provider: "p", model: "m" },
},
});
const cfg = {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: join(home, "clawd"),
},
},
channels: {
whatsapp: {
allowFrom: ["+1000"],
},
},
session: { store: join(home, "sessions.json") },
};
const res = await getReplyFromConfig(
{
Body: "please /help now",
From: "+2001",
To: "+2000",
Provider: "whatsapp",
SenderE164: "+2001",
},
{},
cfg,
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toBe("ok");
expect(runEmbeddedPiAgent).toHaveBeenCalled();
const prompt =
vi.mocked(runEmbeddedPiAgent).mock.calls[0]?.[0]?.prompt ?? "";
expect(prompt).toContain("/help");
});
});
it("returns help without invoking the agent", async () => {
await withTempHome(async (home) => {
const res = await getReplyFromConfig(
{
Body: "/help",
From: "+1002",
To: "+2000",
},
{},
makeCfg(home),
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("Help");
expect(text).toContain("Shortcuts");
expect(runEmbeddedPiAgent).not.toHaveBeenCalled();
});
});
it("allows owner to set send policy", async () => {
await withTempHome(async (home) => {
const cfg = {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: join(home, "clawd"),
},
},
channels: {
whatsapp: {
allowFrom: ["+1000"],
},
},
session: { store: join(home, "sessions.json") },
};
const res = await getReplyFromConfig(
{
Body: "/send off",
From: "+1000",
To: "+2000",
Provider: "whatsapp",
SenderE164: "+1000",
},
{},
cfg,
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("Send policy set to off");
const storeRaw = await fs.readFile(cfg.session.store, "utf-8");
const store = JSON.parse(storeRaw) as Record<
string,
{ sendPolicy?: string }
>;
expect(store[MAIN_SESSION_KEY]?.sendPolicy).toBe("deny");
});
});
});

View File

@@ -0,0 +1,238 @@
import fs from "node:fs/promises";
import { join } from "node:path";
import { afterEach, describe, expect, it, vi } from "vitest";
import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.js";
vi.mock("../agents/pi-embedded.js", () => ({
abortEmbeddedPiRun: vi.fn().mockReturnValue(false),
compactEmbeddedPiSession: vi.fn(),
runEmbeddedPiAgent: vi.fn(),
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
resolveEmbeddedSessionLane: (key: string) =>
`session:${key.trim() || "main"}`,
isEmbeddedPiRunActive: vi.fn().mockReturnValue(false),
isEmbeddedPiRunStreaming: vi.fn().mockReturnValue(false),
}));
const usageMocks = vi.hoisted(() => ({
loadProviderUsageSummary: vi.fn().mockResolvedValue({
updatedAt: 0,
providers: [],
}),
formatUsageSummaryLine: vi.fn().mockReturnValue("📊 Usage: Claude 80% left"),
resolveUsageProviderId: vi.fn((provider: string) => provider.split("/")[0]),
}));
vi.mock("../infra/provider-usage.js", () => usageMocks);
const modelCatalogMocks = vi.hoisted(() => ({
loadModelCatalog: vi.fn().mockResolvedValue([
{
provider: "anthropic",
id: "claude-opus-4-5",
name: "Claude Opus 4.5",
contextWindow: 200000,
},
{
provider: "openrouter",
id: "anthropic/claude-opus-4-5",
name: "Claude Opus 4.5 (OpenRouter)",
contextWindow: 200000,
},
{ provider: "openai", id: "gpt-4.1-mini", name: "GPT-4.1 mini" },
{ provider: "openai", id: "gpt-5.2", name: "GPT-5.2" },
{ provider: "openai-codex", id: "gpt-5.2", name: "GPT-5.2 (Codex)" },
{ provider: "minimax", id: "MiniMax-M2.1", name: "MiniMax M2.1" },
]),
resetModelCatalogCacheForTest: vi.fn(),
}));
vi.mock("../agents/model-catalog.js", () => modelCatalogMocks);
import {
abortEmbeddedPiRun,
runEmbeddedPiAgent,
} from "../agents/pi-embedded.js";
import { getReplyFromConfig } from "./reply.js";
const MAIN_SESSION_KEY = "agent:main:main";
const webMocks = vi.hoisted(() => ({
webAuthExists: vi.fn().mockResolvedValue(true),
getWebAuthAgeMs: vi.fn().mockReturnValue(120_000),
readWebSelfId: vi.fn().mockReturnValue({ e164: "+1999" }),
}));
vi.mock("../web/session.js", () => webMocks);
async function withTempHome<T>(fn: (home: string) => Promise<T>): Promise<T> {
return withTempHomeBase(
async (home) => {
vi.mocked(runEmbeddedPiAgent).mockClear();
vi.mocked(abortEmbeddedPiRun).mockClear();
return await fn(home);
},
{ prefix: "clawdbot-triggers-" },
);
}
function _makeCfg(home: string) {
return {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: join(home, "clawd"),
},
},
channels: {
whatsapp: {
allowFrom: ["*"],
},
},
session: { store: join(home, "sessions.json") },
};
}
afterEach(() => {
vi.restoreAllMocks();
});
describe("trigger handling", () => {
it("allows approved sender to toggle elevated mode", async () => {
await withTempHome(async (home) => {
const cfg = {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: join(home, "clawd"),
},
},
tools: {
elevated: {
allowFrom: { whatsapp: ["+1000"] },
},
},
channels: {
whatsapp: {
allowFrom: ["+1000"],
},
},
session: { store: join(home, "sessions.json") },
};
const res = await getReplyFromConfig(
{
Body: "/elevated on",
From: "+1000",
To: "+2000",
Provider: "whatsapp",
SenderE164: "+1000",
},
{},
cfg,
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("Elevated mode enabled");
const storeRaw = await fs.readFile(cfg.session.store, "utf-8");
const store = JSON.parse(storeRaw) as Record<
string,
{ elevatedLevel?: string }
>;
expect(store[MAIN_SESSION_KEY]?.elevatedLevel).toBe("on");
});
});
it("rejects elevated toggles when disabled", async () => {
await withTempHome(async (home) => {
const cfg = {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: join(home, "clawd"),
},
},
tools: {
elevated: {
enabled: false,
allowFrom: { whatsapp: ["+1000"] },
},
},
channels: {
whatsapp: {
allowFrom: ["+1000"],
},
},
session: { store: join(home, "sessions.json") },
};
const res = await getReplyFromConfig(
{
Body: "/elevated on",
From: "+1000",
To: "+2000",
Provider: "whatsapp",
SenderE164: "+1000",
},
{},
cfg,
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("tools.elevated.enabled");
const storeRaw = await fs.readFile(cfg.session.store, "utf-8");
const store = JSON.parse(storeRaw) as Record<
string,
{ elevatedLevel?: string }
>;
expect(store[MAIN_SESSION_KEY]?.elevatedLevel).toBeUndefined();
});
});
it("ignores elevated directive in groups when not mentioned", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
payloads: [{ text: "ok" }],
meta: {
durationMs: 1,
agentMeta: { sessionId: "s", provider: "p", model: "m" },
},
});
const cfg = {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: join(home, "clawd"),
},
},
tools: {
elevated: {
allowFrom: { whatsapp: ["+1000"] },
},
},
channels: {
whatsapp: {
allowFrom: ["+1000"],
groups: { "*": { requireMention: false } },
},
},
session: { store: join(home, "sessions.json") },
};
const res = await getReplyFromConfig(
{
Body: "/elevated on",
From: "group:123@g.us",
To: "whatsapp:+2000",
Provider: "whatsapp",
SenderE164: "+1000",
ChatType: "group",
WasMentioned: false,
},
{},
cfg,
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toBe("ok");
expect(text).not.toContain("Elevated mode enabled");
});
});
});

View File

@@ -0,0 +1,247 @@
import fs from "node:fs/promises";
import { join } from "node:path";
import { afterEach, describe, expect, it, vi } from "vitest";
import { withTempHome as withTempHomeBase } from "../../test/helpers/temp-home.js";
vi.mock("../agents/pi-embedded.js", () => ({
abortEmbeddedPiRun: vi.fn().mockReturnValue(false),
compactEmbeddedPiSession: vi.fn(),
runEmbeddedPiAgent: vi.fn(),
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
resolveEmbeddedSessionLane: (key: string) =>
`session:${key.trim() || "main"}`,
isEmbeddedPiRunActive: vi.fn().mockReturnValue(false),
isEmbeddedPiRunStreaming: vi.fn().mockReturnValue(false),
}));
const usageMocks = vi.hoisted(() => ({
loadProviderUsageSummary: vi.fn().mockResolvedValue({
updatedAt: 0,
providers: [],
}),
formatUsageSummaryLine: vi.fn().mockReturnValue("📊 Usage: Claude 80% left"),
resolveUsageProviderId: vi.fn((provider: string) => provider.split("/")[0]),
}));
vi.mock("../infra/provider-usage.js", () => usageMocks);
const modelCatalogMocks = vi.hoisted(() => ({
loadModelCatalog: vi.fn().mockResolvedValue([
{
provider: "anthropic",
id: "claude-opus-4-5",
name: "Claude Opus 4.5",
contextWindow: 200000,
},
{
provider: "openrouter",
id: "anthropic/claude-opus-4-5",
name: "Claude Opus 4.5 (OpenRouter)",
contextWindow: 200000,
},
{ provider: "openai", id: "gpt-4.1-mini", name: "GPT-4.1 mini" },
{ provider: "openai", id: "gpt-5.2", name: "GPT-5.2" },
{ provider: "openai-codex", id: "gpt-5.2", name: "GPT-5.2 (Codex)" },
{ provider: "minimax", id: "MiniMax-M2.1", name: "MiniMax M2.1" },
]),
resetModelCatalogCacheForTest: vi.fn(),
}));
vi.mock("../agents/model-catalog.js", () => modelCatalogMocks);
import {
abortEmbeddedPiRun,
runEmbeddedPiAgent,
} from "../agents/pi-embedded.js";
import { loadSessionStore } from "../config/sessions.js";
import { getReplyFromConfig } from "./reply.js";
const MAIN_SESSION_KEY = "agent:main:main";
const webMocks = vi.hoisted(() => ({
webAuthExists: vi.fn().mockResolvedValue(true),
getWebAuthAgeMs: vi.fn().mockReturnValue(120_000),
readWebSelfId: vi.fn().mockReturnValue({ e164: "+1999" }),
}));
vi.mock("../web/session.js", () => webMocks);
async function withTempHome<T>(fn: (home: string) => Promise<T>): Promise<T> {
return withTempHomeBase(
async (home) => {
vi.mocked(runEmbeddedPiAgent).mockClear();
vi.mocked(abortEmbeddedPiRun).mockClear();
return await fn(home);
},
{ prefix: "clawdbot-triggers-" },
);
}
function _makeCfg(home: string) {
return {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: join(home, "clawd"),
},
},
channels: {
whatsapp: {
allowFrom: ["*"],
},
},
session: { store: join(home, "sessions.json") },
};
}
afterEach(() => {
vi.restoreAllMocks();
});
describe("trigger handling", () => {
it("allows elevated off in groups without mention", async () => {
await withTempHome(async (home) => {
vi.mocked(runEmbeddedPiAgent).mockResolvedValue({
payloads: [{ text: "ok" }],
meta: {
durationMs: 1,
agentMeta: { sessionId: "s", provider: "p", model: "m" },
},
});
const cfg = {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: join(home, "clawd"),
},
},
tools: {
elevated: {
allowFrom: { whatsapp: ["+1000"] },
},
},
channels: {
whatsapp: {
allowFrom: ["+1000"],
groups: { "*": { requireMention: false } },
},
},
session: { store: join(home, "sessions.json") },
};
const res = await getReplyFromConfig(
{
Body: "/elevated off",
From: "group:123@g.us",
To: "whatsapp:+2000",
Provider: "whatsapp",
SenderE164: "+1000",
ChatType: "group",
WasMentioned: false,
},
{},
cfg,
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("Elevated mode disabled.");
const store = loadSessionStore(cfg.session.store);
expect(store["agent:main:whatsapp:group:123@g.us"]?.elevatedLevel).toBe(
"off",
);
});
});
it("allows elevated directive in groups when mentioned", async () => {
await withTempHome(async (home) => {
const cfg = {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: join(home, "clawd"),
},
},
tools: {
elevated: {
allowFrom: { whatsapp: ["+1000"] },
},
},
channels: {
whatsapp: {
allowFrom: ["+1000"],
groups: { "*": { requireMention: true } },
},
},
session: { store: join(home, "sessions.json") },
};
const res = await getReplyFromConfig(
{
Body: "/elevated on",
From: "group:123@g.us",
To: "whatsapp:+2000",
Provider: "whatsapp",
SenderE164: "+1000",
ChatType: "group",
WasMentioned: true,
},
{},
cfg,
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("Elevated mode enabled");
const storeRaw = await fs.readFile(cfg.session.store, "utf-8");
const store = JSON.parse(storeRaw) as Record<
string,
{ elevatedLevel?: string }
>;
expect(store["agent:main:whatsapp:group:123@g.us"]?.elevatedLevel).toBe(
"on",
);
});
});
it("allows elevated directive in direct chats without mentions", async () => {
await withTempHome(async (home) => {
const cfg = {
agents: {
defaults: {
model: "anthropic/claude-opus-4-5",
workspace: join(home, "clawd"),
},
},
tools: {
elevated: {
allowFrom: { whatsapp: ["+1000"] },
},
},
channels: {
whatsapp: {
allowFrom: ["+1000"],
},
},
session: { store: join(home, "sessions.json") },
};
const res = await getReplyFromConfig(
{
Body: "/elevated on",
From: "+1000",
To: "+2000",
Provider: "whatsapp",
SenderE164: "+1000",
},
{},
cfg,
);
const text = Array.isArray(res) ? res[0]?.text : res?.text;
expect(text).toContain("Elevated mode enabled");
const storeRaw = await fs.readFile(cfg.session.store, "utf-8");
const store = JSON.parse(storeRaw) as Record<
string,
{ elevatedLevel?: string }
>;
expect(store[MAIN_SESSION_KEY]?.elevatedLevel).toBe("on");
});
});
});

View File

@@ -0,0 +1,247 @@
import { describe, expect, it, vi } from "vitest";
import type { SessionEntry } from "../../config/sessions.js";
import type { TypingMode } from "../../config/types.js";
import type { TemplateContext } from "../templating.js";
import type { GetReplyOptions } from "../types.js";
import type { FollowupRun, QueueSettings } from "./queue.js";
import { createMockTypingController } from "./test-helpers.js";
const runEmbeddedPiAgentMock = vi.fn();
vi.mock("../../agents/model-fallback.js", () => ({
runWithModelFallback: async ({
provider,
model,
run,
}: {
provider: string;
model: string;
run: (provider: string, model: string) => Promise<unknown>;
}) => ({
result: await run(provider, model),
provider,
model,
}),
}));
vi.mock("../../agents/pi-embedded.js", () => ({
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
runEmbeddedPiAgent: (params: unknown) => runEmbeddedPiAgentMock(params),
}));
vi.mock("./queue.js", async () => {
const actual =
await vi.importActual<typeof import("./queue.js")>("./queue.js");
return {
...actual,
enqueueFollowupRun: vi.fn(),
scheduleFollowupDrain: vi.fn(),
};
});
import { runReplyAgent } from "./agent-runner.js";
function createMinimalRun(params?: {
opts?: GetReplyOptions;
resolvedVerboseLevel?: "off" | "on";
sessionStore?: Record<string, SessionEntry>;
sessionEntry?: SessionEntry;
sessionKey?: string;
storePath?: string;
typingMode?: TypingMode;
blockStreamingEnabled?: boolean;
}) {
const typing = createMockTypingController();
const opts = params?.opts;
const sessionCtx = {
Provider: "whatsapp",
MessageSid: "msg",
} as unknown as TemplateContext;
const resolvedQueue = { mode: "interrupt" } as unknown as QueueSettings;
const sessionKey = params?.sessionKey ?? "main";
const followupRun = {
prompt: "hello",
summaryLine: "hello",
enqueuedAt: Date.now(),
run: {
sessionId: "session",
sessionKey,
messageProvider: "whatsapp",
sessionFile: "/tmp/session.jsonl",
workspaceDir: "/tmp",
config: {},
skillsSnapshot: {},
provider: "anthropic",
model: "claude",
thinkLevel: "low",
verboseLevel: params?.resolvedVerboseLevel ?? "off",
elevatedLevel: "off",
bashElevated: {
enabled: false,
allowed: false,
defaultLevel: "off",
},
timeoutMs: 1_000,
blockReplyBreak: "message_end",
},
} as unknown as FollowupRun;
return {
typing,
opts,
run: () =>
runReplyAgent({
commandBody: "hello",
followupRun,
queueKey: "main",
resolvedQueue,
shouldSteer: false,
shouldFollowup: false,
isActive: false,
isStreaming: false,
opts,
typing,
sessionEntry: params?.sessionEntry,
sessionStore: params?.sessionStore,
sessionKey,
storePath: params?.storePath,
sessionCtx,
defaultModel: "anthropic/claude-opus-4-5",
resolvedVerboseLevel: params?.resolvedVerboseLevel ?? "off",
isNewSession: false,
blockStreamingEnabled: params?.blockStreamingEnabled ?? false,
resolvedBlockStreamingBreak: "message_end",
shouldInjectGroupIntro: false,
typingMode: params?.typingMode ?? "instant",
}),
};
}
describe("runReplyAgent typing (heartbeat)", () => {
it("signals typing for normal runs", async () => {
const onPartialReply = vi.fn();
runEmbeddedPiAgentMock.mockImplementationOnce(
async (params: EmbeddedPiAgentParams) => {
await params.onPartialReply?.({ text: "hi" });
return { payloads: [{ text: "final" }], meta: {} };
},
);
const { run, typing } = createMinimalRun({
opts: { isHeartbeat: false, onPartialReply },
});
await run();
expect(onPartialReply).toHaveBeenCalled();
expect(typing.startTypingOnText).toHaveBeenCalledWith("hi");
expect(typing.startTypingLoop).toHaveBeenCalled();
});
it("signals typing even without consumer partial handler", async () => {
runEmbeddedPiAgentMock.mockImplementationOnce(
async (params: EmbeddedPiAgentParams) => {
await params.onPartialReply?.({ text: "hi" });
return { payloads: [{ text: "final" }], meta: {} };
},
);
const { run, typing } = createMinimalRun({
typingMode: "message",
});
await run();
expect(typing.startTypingOnText).toHaveBeenCalledWith("hi");
expect(typing.startTypingLoop).not.toHaveBeenCalled();
});
it("never signals typing for heartbeat runs", async () => {
const onPartialReply = vi.fn();
runEmbeddedPiAgentMock.mockImplementationOnce(
async (params: EmbeddedPiAgentParams) => {
await params.onPartialReply?.({ text: "hi" });
return { payloads: [{ text: "final" }], meta: {} };
},
);
const { run, typing } = createMinimalRun({
opts: { isHeartbeat: true, onPartialReply },
});
await run();
expect(onPartialReply).toHaveBeenCalled();
expect(typing.startTypingOnText).not.toHaveBeenCalled();
expect(typing.startTypingLoop).not.toHaveBeenCalled();
});
it("suppresses partial streaming for NO_REPLY", async () => {
const onPartialReply = vi.fn();
runEmbeddedPiAgentMock.mockImplementationOnce(
async (params: EmbeddedPiAgentParams) => {
await params.onPartialReply?.({ text: "NO_REPLY" });
return { payloads: [{ text: "NO_REPLY" }], meta: {} };
},
);
const { run, typing } = createMinimalRun({
opts: { isHeartbeat: false, onPartialReply },
});
await run();
expect(onPartialReply).not.toHaveBeenCalled();
expect(typing.startTypingOnText).not.toHaveBeenCalled();
expect(typing.startTypingLoop).not.toHaveBeenCalled();
});
it("starts typing on assistant message start in message mode", async () => {
runEmbeddedPiAgentMock.mockImplementationOnce(
async (params: EmbeddedPiAgentParams) => {
await params.onAssistantMessageStart?.();
return { payloads: [{ text: "final" }], meta: {} };
},
);
const { run, typing } = createMinimalRun({
typingMode: "message",
});
await run();
expect(typing.startTypingLoop).toHaveBeenCalled();
expect(typing.startTypingOnText).not.toHaveBeenCalled();
});
it("starts typing from reasoning stream in thinking mode", async () => {
runEmbeddedPiAgentMock.mockImplementationOnce(
async (params: {
onPartialReply?: (payload: { text?: string }) => Promise<void> | void;
onReasoningStream?: (payload: {
text?: string;
}) => Promise<void> | void;
}) => {
await params.onReasoningStream?.({ text: "Reasoning:\n_step_" });
await params.onPartialReply?.({ text: "hi" });
return { payloads: [{ text: "final" }], meta: {} };
},
);
const { run, typing } = createMinimalRun({
typingMode: "thinking",
});
await run();
expect(typing.startTypingLoop).toHaveBeenCalled();
expect(typing.startTypingOnText).not.toHaveBeenCalled();
});
it("suppresses typing in never mode", async () => {
runEmbeddedPiAgentMock.mockImplementationOnce(
async (params: {
onPartialReply?: (payload: { text?: string }) => void;
}) => {
params.onPartialReply?.({ text: "hi" });
return { payloads: [{ text: "final" }], meta: {} };
},
);
const { run, typing } = createMinimalRun({
typingMode: "never",
});
await run();
expect(typing.startTypingOnText).not.toHaveBeenCalled();
expect(typing.startTypingLoop).not.toHaveBeenCalled();
});
});

View File

@@ -0,0 +1,225 @@
import fs from "node:fs/promises";
import { tmpdir } from "node:os";
import path from "node:path";
import { describe, expect, it, vi } from "vitest";
import type { SessionEntry } from "../../config/sessions.js";
import type { TypingMode } from "../../config/types.js";
import type { TemplateContext } from "../templating.js";
import type { GetReplyOptions } from "../types.js";
import type { FollowupRun, QueueSettings } from "./queue.js";
import { createMockTypingController } from "./test-helpers.js";
const runEmbeddedPiAgentMock = vi.fn();
vi.mock("../../agents/model-fallback.js", () => ({
runWithModelFallback: async ({
provider,
model,
run,
}: {
provider: string;
model: string;
run: (provider: string, model: string) => Promise<unknown>;
}) => ({
result: await run(provider, model),
provider,
model,
}),
}));
vi.mock("../../agents/pi-embedded.js", () => ({
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
runEmbeddedPiAgent: (params: unknown) => runEmbeddedPiAgentMock(params),
}));
vi.mock("./queue.js", async () => {
const actual =
await vi.importActual<typeof import("./queue.js")>("./queue.js");
return {
...actual,
enqueueFollowupRun: vi.fn(),
scheduleFollowupDrain: vi.fn(),
};
});
import { runReplyAgent } from "./agent-runner.js";
function createMinimalRun(params?: {
opts?: GetReplyOptions;
resolvedVerboseLevel?: "off" | "on";
sessionStore?: Record<string, SessionEntry>;
sessionEntry?: SessionEntry;
sessionKey?: string;
storePath?: string;
typingMode?: TypingMode;
blockStreamingEnabled?: boolean;
}) {
const typing = createMockTypingController();
const opts = params?.opts;
const sessionCtx = {
Provider: "whatsapp",
MessageSid: "msg",
} as unknown as TemplateContext;
const resolvedQueue = { mode: "interrupt" } as unknown as QueueSettings;
const sessionKey = params?.sessionKey ?? "main";
const followupRun = {
prompt: "hello",
summaryLine: "hello",
enqueuedAt: Date.now(),
run: {
sessionId: "session",
sessionKey,
messageProvider: "whatsapp",
sessionFile: "/tmp/session.jsonl",
workspaceDir: "/tmp",
config: {},
skillsSnapshot: {},
provider: "anthropic",
model: "claude",
thinkLevel: "low",
verboseLevel: params?.resolvedVerboseLevel ?? "off",
elevatedLevel: "off",
bashElevated: {
enabled: false,
allowed: false,
defaultLevel: "off",
},
timeoutMs: 1_000,
blockReplyBreak: "message_end",
},
} as unknown as FollowupRun;
return {
typing,
opts,
run: () =>
runReplyAgent({
commandBody: "hello",
followupRun,
queueKey: "main",
resolvedQueue,
shouldSteer: false,
shouldFollowup: false,
isActive: false,
isStreaming: false,
opts,
typing,
sessionEntry: params?.sessionEntry,
sessionStore: params?.sessionStore,
sessionKey,
storePath: params?.storePath,
sessionCtx,
defaultModel: "anthropic/claude-opus-4-5",
resolvedVerboseLevel: params?.resolvedVerboseLevel ?? "off",
isNewSession: false,
blockStreamingEnabled: params?.blockStreamingEnabled ?? false,
resolvedBlockStreamingBreak: "message_end",
shouldInjectGroupIntro: false,
typingMode: params?.typingMode ?? "instant",
}),
};
}
describe("runReplyAgent typing (heartbeat)", () => {
it("signals typing on block replies", async () => {
const onBlockReply = vi.fn();
runEmbeddedPiAgentMock.mockImplementationOnce(
async (params: EmbeddedPiAgentParams) => {
await params.onBlockReply?.({ text: "chunk", mediaUrls: [] });
return { payloads: [{ text: "final" }], meta: {} };
},
);
const { run, typing } = createMinimalRun({
typingMode: "message",
blockStreamingEnabled: true,
opts: { onBlockReply },
});
await run();
expect(typing.startTypingOnText).toHaveBeenCalledWith("chunk");
expect(onBlockReply).toHaveBeenCalled();
const [blockPayload, blockOpts] = onBlockReply.mock.calls[0] ?? [];
expect(blockPayload).toMatchObject({ text: "chunk", audioAsVoice: false });
expect(blockOpts).toMatchObject({
abortSignal: expect.any(AbortSignal),
timeoutMs: expect.any(Number),
});
});
it("signals typing on tool results", async () => {
const onToolResult = vi.fn();
runEmbeddedPiAgentMock.mockImplementationOnce(
async (params: EmbeddedPiAgentParams) => {
await params.onToolResult?.({ text: "tooling", mediaUrls: [] });
return { payloads: [{ text: "final" }], meta: {} };
},
);
const { run, typing } = createMinimalRun({
typingMode: "message",
opts: { onToolResult },
});
await run();
expect(typing.startTypingOnText).toHaveBeenCalledWith("tooling");
expect(onToolResult).toHaveBeenCalledWith({
text: "tooling",
mediaUrls: [],
});
});
it("skips typing for silent tool results", async () => {
const onToolResult = vi.fn();
runEmbeddedPiAgentMock.mockImplementationOnce(
async (params: EmbeddedPiAgentParams) => {
await params.onToolResult?.({ text: "NO_REPLY", mediaUrls: [] });
return { payloads: [{ text: "final" }], meta: {} };
},
);
const { run, typing } = createMinimalRun({
typingMode: "message",
opts: { onToolResult },
});
await run();
expect(typing.startTypingOnText).not.toHaveBeenCalled();
expect(onToolResult).not.toHaveBeenCalled();
});
it("announces auto-compaction in verbose mode and tracks count", async () => {
const storePath = path.join(
await fs.mkdtemp(path.join(tmpdir(), "clawdbot-compaction-")),
"sessions.json",
);
const sessionEntry = { sessionId: "session", updatedAt: Date.now() };
const sessionStore = { main: sessionEntry };
runEmbeddedPiAgentMock.mockImplementationOnce(
async (params: {
onAgentEvent?: (evt: {
stream: string;
data: Record<string, unknown>;
}) => void;
}) => {
params.onAgentEvent?.({
stream: "compaction",
data: { phase: "end", willRetry: false },
});
return { payloads: [{ text: "final" }], meta: {} };
},
);
const { run } = createMinimalRun({
resolvedVerboseLevel: "on",
sessionEntry,
sessionStore,
sessionKey: "main",
storePath,
});
const res = await run();
expect(Array.isArray(res)).toBe(true);
const payloads = res as { text?: string }[];
expect(payloads[0]?.text).toContain("Auto-compaction complete");
expect(payloads[0]?.text).toContain("count 1");
expect(sessionStore.main.compactionCount).toBe(1);
});
});

View File

@@ -0,0 +1,221 @@
import fs from "node:fs/promises";
import { tmpdir } from "node:os";
import path from "node:path";
import { describe, expect, it, vi } from "vitest";
import type { SessionEntry } from "../../config/sessions.js";
import * as sessions from "../../config/sessions.js";
import type { TypingMode } from "../../config/types.js";
import type { TemplateContext } from "../templating.js";
import type { GetReplyOptions } from "../types.js";
import type { FollowupRun, QueueSettings } from "./queue.js";
import { createMockTypingController } from "./test-helpers.js";
const runEmbeddedPiAgentMock = vi.fn();
vi.mock("../../agents/model-fallback.js", () => ({
runWithModelFallback: async ({
provider,
model,
run,
}: {
provider: string;
model: string;
run: (provider: string, model: string) => Promise<unknown>;
}) => ({
result: await run(provider, model),
provider,
model,
}),
}));
vi.mock("../../agents/pi-embedded.js", () => ({
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
runEmbeddedPiAgent: (params: unknown) => runEmbeddedPiAgentMock(params),
}));
vi.mock("./queue.js", async () => {
const actual =
await vi.importActual<typeof import("./queue.js")>("./queue.js");
return {
...actual,
enqueueFollowupRun: vi.fn(),
scheduleFollowupDrain: vi.fn(),
};
});
import { runReplyAgent } from "./agent-runner.js";
function createMinimalRun(params?: {
opts?: GetReplyOptions;
resolvedVerboseLevel?: "off" | "on";
sessionStore?: Record<string, SessionEntry>;
sessionEntry?: SessionEntry;
sessionKey?: string;
storePath?: string;
typingMode?: TypingMode;
blockStreamingEnabled?: boolean;
}) {
const typing = createMockTypingController();
const opts = params?.opts;
const sessionCtx = {
Provider: "whatsapp",
MessageSid: "msg",
} as unknown as TemplateContext;
const resolvedQueue = { mode: "interrupt" } as unknown as QueueSettings;
const sessionKey = params?.sessionKey ?? "main";
const followupRun = {
prompt: "hello",
summaryLine: "hello",
enqueuedAt: Date.now(),
run: {
sessionId: "session",
sessionKey,
messageProvider: "whatsapp",
sessionFile: "/tmp/session.jsonl",
workspaceDir: "/tmp",
config: {},
skillsSnapshot: {},
provider: "anthropic",
model: "claude",
thinkLevel: "low",
verboseLevel: params?.resolvedVerboseLevel ?? "off",
elevatedLevel: "off",
bashElevated: {
enabled: false,
allowed: false,
defaultLevel: "off",
},
timeoutMs: 1_000,
blockReplyBreak: "message_end",
},
} as unknown as FollowupRun;
return {
typing,
opts,
run: () =>
runReplyAgent({
commandBody: "hello",
followupRun,
queueKey: "main",
resolvedQueue,
shouldSteer: false,
shouldFollowup: false,
isActive: false,
isStreaming: false,
opts,
typing,
sessionEntry: params?.sessionEntry,
sessionStore: params?.sessionStore,
sessionKey,
storePath: params?.storePath,
sessionCtx,
defaultModel: "anthropic/claude-opus-4-5",
resolvedVerboseLevel: params?.resolvedVerboseLevel ?? "off",
isNewSession: false,
blockStreamingEnabled: params?.blockStreamingEnabled ?? false,
resolvedBlockStreamingBreak: "message_end",
shouldInjectGroupIntro: false,
typingMode: params?.typingMode ?? "instant",
}),
};
}
describe("runReplyAgent typing (heartbeat)", () => {
it("resets corrupted Gemini sessions and deletes transcripts", async () => {
const prevStateDir = process.env.CLAWDBOT_STATE_DIR;
const stateDir = await fs.mkdtemp(
path.join(tmpdir(), "clawdbot-session-reset-"),
);
process.env.CLAWDBOT_STATE_DIR = stateDir;
try {
const sessionId = "session-corrupt";
const storePath = path.join(stateDir, "sessions", "sessions.json");
const sessionEntry = { sessionId, updatedAt: Date.now() };
const sessionStore = { main: sessionEntry };
await fs.mkdir(path.dirname(storePath), { recursive: true });
await fs.writeFile(storePath, JSON.stringify(sessionStore), "utf-8");
const transcriptPath = sessions.resolveSessionTranscriptPath(sessionId);
await fs.mkdir(path.dirname(transcriptPath), { recursive: true });
await fs.writeFile(transcriptPath, "bad", "utf-8");
runEmbeddedPiAgentMock.mockImplementationOnce(async () => {
throw new Error(
"function call turn comes immediately after a user turn or after a function response turn",
);
});
const { run } = createMinimalRun({
sessionEntry,
sessionStore,
sessionKey: "main",
storePath,
});
const res = await run();
expect(res).toMatchObject({
text: expect.stringContaining("Session history was corrupted"),
});
expect(sessionStore.main).toBeUndefined();
await expect(fs.access(transcriptPath)).rejects.toThrow();
const persisted = JSON.parse(await fs.readFile(storePath, "utf-8"));
expect(persisted.main).toBeUndefined();
} finally {
if (prevStateDir) {
process.env.CLAWDBOT_STATE_DIR = prevStateDir;
} else {
delete process.env.CLAWDBOT_STATE_DIR;
}
}
});
it("keeps sessions intact on other errors", async () => {
const prevStateDir = process.env.CLAWDBOT_STATE_DIR;
const stateDir = await fs.mkdtemp(
path.join(tmpdir(), "clawdbot-session-noreset-"),
);
process.env.CLAWDBOT_STATE_DIR = stateDir;
try {
const sessionId = "session-ok";
const storePath = path.join(stateDir, "sessions", "sessions.json");
const sessionEntry = { sessionId, updatedAt: Date.now() };
const sessionStore = { main: sessionEntry };
await fs.mkdir(path.dirname(storePath), { recursive: true });
await fs.writeFile(storePath, JSON.stringify(sessionStore), "utf-8");
const transcriptPath = sessions.resolveSessionTranscriptPath(sessionId);
await fs.mkdir(path.dirname(transcriptPath), { recursive: true });
await fs.writeFile(transcriptPath, "ok", "utf-8");
runEmbeddedPiAgentMock.mockImplementationOnce(async () => {
throw new Error("INVALID_ARGUMENT: some other failure");
});
const { run } = createMinimalRun({
sessionEntry,
sessionStore,
sessionKey: "main",
storePath,
});
const res = await run();
expect(res).toMatchObject({
text: expect.stringContaining("Agent failed before reply"),
});
expect(sessionStore.main).toBeDefined();
await expect(fs.access(transcriptPath)).resolves.toBeUndefined();
const persisted = JSON.parse(await fs.readFile(storePath, "utf-8"));
expect(persisted.main).toBeDefined();
} finally {
if (prevStateDir) {
process.env.CLAWDBOT_STATE_DIR = prevStateDir;
} else {
delete process.env.CLAWDBOT_STATE_DIR;
}
}
});
});

View File

@@ -0,0 +1,232 @@
import fs from "node:fs/promises";
import { tmpdir } from "node:os";
import path from "node:path";
import { describe, expect, it, vi } from "vitest";
import type { SessionEntry } from "../../config/sessions.js";
import type { TypingMode } from "../../config/types.js";
import type { TemplateContext } from "../templating.js";
import type { GetReplyOptions } from "../types.js";
import type { FollowupRun, QueueSettings } from "./queue.js";
import { createMockTypingController } from "./test-helpers.js";
const runEmbeddedPiAgentMock = vi.fn();
vi.mock("../../agents/model-fallback.js", () => ({
runWithModelFallback: async ({
provider,
model,
run,
}: {
provider: string;
model: string;
run: (provider: string, model: string) => Promise<unknown>;
}) => ({
result: await run(provider, model),
provider,
model,
}),
}));
vi.mock("../../agents/pi-embedded.js", () => ({
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
runEmbeddedPiAgent: (params: unknown) => runEmbeddedPiAgentMock(params),
}));
vi.mock("./queue.js", async () => {
const actual =
await vi.importActual<typeof import("./queue.js")>("./queue.js");
return {
...actual,
enqueueFollowupRun: vi.fn(),
scheduleFollowupDrain: vi.fn(),
};
});
import { runReplyAgent } from "./agent-runner.js";
function createMinimalRun(params?: {
opts?: GetReplyOptions;
resolvedVerboseLevel?: "off" | "on";
sessionStore?: Record<string, SessionEntry>;
sessionEntry?: SessionEntry;
sessionKey?: string;
storePath?: string;
typingMode?: TypingMode;
blockStreamingEnabled?: boolean;
}) {
const typing = createMockTypingController();
const opts = params?.opts;
const sessionCtx = {
Provider: "whatsapp",
MessageSid: "msg",
} as unknown as TemplateContext;
const resolvedQueue = { mode: "interrupt" } as unknown as QueueSettings;
const sessionKey = params?.sessionKey ?? "main";
const followupRun = {
prompt: "hello",
summaryLine: "hello",
enqueuedAt: Date.now(),
run: {
sessionId: "session",
sessionKey,
messageProvider: "whatsapp",
sessionFile: "/tmp/session.jsonl",
workspaceDir: "/tmp",
config: {},
skillsSnapshot: {},
provider: "anthropic",
model: "claude",
thinkLevel: "low",
verboseLevel: params?.resolvedVerboseLevel ?? "off",
elevatedLevel: "off",
bashElevated: {
enabled: false,
allowed: false,
defaultLevel: "off",
},
timeoutMs: 1_000,
blockReplyBreak: "message_end",
},
} as unknown as FollowupRun;
return {
typing,
opts,
run: () =>
runReplyAgent({
commandBody: "hello",
followupRun,
queueKey: "main",
resolvedQueue,
shouldSteer: false,
shouldFollowup: false,
isActive: false,
isStreaming: false,
opts,
typing,
sessionEntry: params?.sessionEntry,
sessionStore: params?.sessionStore,
sessionKey,
storePath: params?.storePath,
sessionCtx,
defaultModel: "anthropic/claude-opus-4-5",
resolvedVerboseLevel: params?.resolvedVerboseLevel ?? "off",
isNewSession: false,
blockStreamingEnabled: params?.blockStreamingEnabled ?? false,
resolvedBlockStreamingBreak: "message_end",
shouldInjectGroupIntro: false,
typingMode: params?.typingMode ?? "instant",
}),
};
}
describe("runReplyAgent typing (heartbeat)", () => {
it("retries after compaction failure by resetting the session", async () => {
const prevStateDir = process.env.CLAWDBOT_STATE_DIR;
const stateDir = await fs.mkdtemp(
path.join(tmpdir(), "clawdbot-session-compaction-reset-"),
);
process.env.CLAWDBOT_STATE_DIR = stateDir;
try {
const sessionId = "session";
const storePath = path.join(stateDir, "sessions", "sessions.json");
const sessionEntry = { sessionId, updatedAt: Date.now() };
const sessionStore = { main: sessionEntry };
await fs.mkdir(path.dirname(storePath), { recursive: true });
await fs.writeFile(storePath, JSON.stringify(sessionStore), "utf-8");
runEmbeddedPiAgentMock
.mockImplementationOnce(async () => {
throw new Error(
'Context overflow: Summarization failed: 400 {"message":"prompt is too long"}',
);
})
.mockImplementationOnce(async () => ({
payloads: [{ text: "ok" }],
meta: {},
}));
const callsBefore = runEmbeddedPiAgentMock.mock.calls.length;
const { run } = createMinimalRun({
sessionEntry,
sessionStore,
sessionKey: "main",
storePath,
});
const res = await run();
expect(runEmbeddedPiAgentMock.mock.calls.length - callsBefore).toBe(2);
const payload = Array.isArray(res) ? res[0] : res;
expect(payload).toMatchObject({ text: "ok" });
expect(sessionStore.main.sessionId).not.toBe(sessionId);
const persisted = JSON.parse(await fs.readFile(storePath, "utf-8"));
expect(persisted.main.sessionId).toBe(sessionStore.main.sessionId);
} finally {
if (prevStateDir) {
process.env.CLAWDBOT_STATE_DIR = prevStateDir;
} else {
delete process.env.CLAWDBOT_STATE_DIR;
}
}
});
it("retries after context overflow payload by resetting the session", async () => {
const prevStateDir = process.env.CLAWDBOT_STATE_DIR;
const stateDir = await fs.mkdtemp(
path.join(tmpdir(), "clawdbot-session-overflow-reset-"),
);
process.env.CLAWDBOT_STATE_DIR = stateDir;
try {
const sessionId = "session";
const storePath = path.join(stateDir, "sessions", "sessions.json");
const sessionEntry = { sessionId, updatedAt: Date.now() };
const sessionStore = { main: sessionEntry };
await fs.mkdir(path.dirname(storePath), { recursive: true });
await fs.writeFile(storePath, JSON.stringify(sessionStore), "utf-8");
runEmbeddedPiAgentMock
.mockImplementationOnce(async () => ({
payloads: [
{ text: "Context overflow: prompt too large", isError: true },
],
meta: {
durationMs: 1,
error: {
kind: "context_overflow",
message:
'Context overflow: Summarization failed: 400 {"message":"prompt is too long"}',
},
},
}))
.mockImplementationOnce(async () => ({
payloads: [{ text: "ok" }],
meta: { durationMs: 1 },
}));
const callsBefore = runEmbeddedPiAgentMock.mock.calls.length;
const { run } = createMinimalRun({
sessionEntry,
sessionStore,
sessionKey: "main",
storePath,
});
const res = await run();
expect(runEmbeddedPiAgentMock.mock.calls.length - callsBefore).toBe(2);
const payload = Array.isArray(res) ? res[0] : res;
expect(payload).toMatchObject({ text: "ok" });
expect(sessionStore.main.sessionId).not.toBe(sessionId);
const persisted = JSON.parse(await fs.readFile(storePath, "utf-8"));
expect(persisted.main.sessionId).toBe(sessionStore.main.sessionId);
} finally {
if (prevStateDir) {
process.env.CLAWDBOT_STATE_DIR = prevStateDir;
} else {
delete process.env.CLAWDBOT_STATE_DIR;
}
}
});
});

View File

@@ -0,0 +1,193 @@
import fs from "node:fs/promises";
import { tmpdir } from "node:os";
import path from "node:path";
import { describe, expect, it, vi } from "vitest";
import type { SessionEntry } from "../../config/sessions.js";
import * as sessions from "../../config/sessions.js";
import type { TypingMode } from "../../config/types.js";
import type { TemplateContext } from "../templating.js";
import type { GetReplyOptions } from "../types.js";
import type { FollowupRun, QueueSettings } from "./queue.js";
import { createMockTypingController } from "./test-helpers.js";
const runEmbeddedPiAgentMock = vi.fn();
vi.mock("../../agents/model-fallback.js", () => ({
runWithModelFallback: async ({
provider,
model,
run,
}: {
provider: string;
model: string;
run: (provider: string, model: string) => Promise<unknown>;
}) => ({
result: await run(provider, model),
provider,
model,
}),
}));
vi.mock("../../agents/pi-embedded.js", () => ({
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
runEmbeddedPiAgent: (params: unknown) => runEmbeddedPiAgentMock(params),
}));
vi.mock("./queue.js", async () => {
const actual =
await vi.importActual<typeof import("./queue.js")>("./queue.js");
return {
...actual,
enqueueFollowupRun: vi.fn(),
scheduleFollowupDrain: vi.fn(),
};
});
import { runReplyAgent } from "./agent-runner.js";
function createMinimalRun(params?: {
opts?: GetReplyOptions;
resolvedVerboseLevel?: "off" | "on";
sessionStore?: Record<string, SessionEntry>;
sessionEntry?: SessionEntry;
sessionKey?: string;
storePath?: string;
typingMode?: TypingMode;
blockStreamingEnabled?: boolean;
}) {
const typing = createMockTypingController();
const opts = params?.opts;
const sessionCtx = {
Provider: "whatsapp",
MessageSid: "msg",
} as unknown as TemplateContext;
const resolvedQueue = { mode: "interrupt" } as unknown as QueueSettings;
const sessionKey = params?.sessionKey ?? "main";
const followupRun = {
prompt: "hello",
summaryLine: "hello",
enqueuedAt: Date.now(),
run: {
sessionId: "session",
sessionKey,
messageProvider: "whatsapp",
sessionFile: "/tmp/session.jsonl",
workspaceDir: "/tmp",
config: {},
skillsSnapshot: {},
provider: "anthropic",
model: "claude",
thinkLevel: "low",
verboseLevel: params?.resolvedVerboseLevel ?? "off",
elevatedLevel: "off",
bashElevated: {
enabled: false,
allowed: false,
defaultLevel: "off",
},
timeoutMs: 1_000,
blockReplyBreak: "message_end",
},
} as unknown as FollowupRun;
return {
typing,
opts,
run: () =>
runReplyAgent({
commandBody: "hello",
followupRun,
queueKey: "main",
resolvedQueue,
shouldSteer: false,
shouldFollowup: false,
isActive: false,
isStreaming: false,
opts,
typing,
sessionEntry: params?.sessionEntry,
sessionStore: params?.sessionStore,
sessionKey,
storePath: params?.storePath,
sessionCtx,
defaultModel: "anthropic/claude-opus-4-5",
resolvedVerboseLevel: params?.resolvedVerboseLevel ?? "off",
isNewSession: false,
blockStreamingEnabled: params?.blockStreamingEnabled ?? false,
resolvedBlockStreamingBreak: "message_end",
shouldInjectGroupIntro: false,
typingMode: params?.typingMode ?? "instant",
}),
};
}
describe("runReplyAgent typing (heartbeat)", () => {
it("still replies even if session reset fails to persist", async () => {
const prevStateDir = process.env.CLAWDBOT_STATE_DIR;
const stateDir = await fs.mkdtemp(
path.join(tmpdir(), "clawdbot-session-reset-fail-"),
);
process.env.CLAWDBOT_STATE_DIR = stateDir;
const saveSpy = vi
.spyOn(sessions, "saveSessionStore")
.mockRejectedValueOnce(new Error("boom"));
try {
const sessionId = "session-corrupt";
const storePath = path.join(stateDir, "sessions", "sessions.json");
const sessionEntry = { sessionId, updatedAt: Date.now() };
const sessionStore = { main: sessionEntry };
const transcriptPath = sessions.resolveSessionTranscriptPath(sessionId);
await fs.mkdir(path.dirname(transcriptPath), { recursive: true });
await fs.writeFile(transcriptPath, "bad", "utf-8");
runEmbeddedPiAgentMock.mockImplementationOnce(async () => {
throw new Error(
"function call turn comes immediately after a user turn or after a function response turn",
);
});
const { run } = createMinimalRun({
sessionEntry,
sessionStore,
sessionKey: "main",
storePath,
});
const res = await run();
expect(res).toMatchObject({
text: expect.stringContaining("Session history was corrupted"),
});
expect(sessionStore.main).toBeUndefined();
await expect(fs.access(transcriptPath)).rejects.toThrow();
} finally {
saveSpy.mockRestore();
if (prevStateDir) {
process.env.CLAWDBOT_STATE_DIR = prevStateDir;
} else {
delete process.env.CLAWDBOT_STATE_DIR;
}
}
});
it("rewrites Bun socket errors into friendly text", async () => {
runEmbeddedPiAgentMock.mockImplementationOnce(async () => ({
payloads: [
{
text: "TypeError: The socket connection was closed unexpectedly. For more information, pass `verbose: true` in the second argument to fetch()",
isError: true,
},
],
meta: {},
}));
const { run } = createMinimalRun();
const res = await run();
const payloads = Array.isArray(res) ? res : res ? [res] : [];
expect(payloads.length).toBe(1);
expect(payloads[0]?.text).toContain("LLM connection failed");
expect(payloads[0]?.text).toContain(
"socket connection was closed unexpectedly",
);
expect(payloads[0]?.text).toContain("```");
});
});

View File

@@ -1,656 +0,0 @@
import fs from "node:fs/promises";
import { tmpdir } from "node:os";
import path from "node:path";
import { describe, expect, it, vi } from "vitest";
import type { SessionEntry } from "../../config/sessions.js";
import * as sessions from "../../config/sessions.js";
import type { TypingMode } from "../../config/types.js";
import type { TemplateContext } from "../templating.js";
import type { GetReplyOptions } from "../types.js";
import type { FollowupRun, QueueSettings } from "./queue.js";
import { createMockTypingController } from "./test-helpers.js";
const runEmbeddedPiAgentMock = vi.fn();
vi.mock("../../agents/model-fallback.js", () => ({
runWithModelFallback: async ({
provider,
model,
run,
}: {
provider: string;
model: string;
run: (provider: string, model: string) => Promise<unknown>;
}) => ({
result: await run(provider, model),
provider,
model,
}),
}));
vi.mock("../../agents/pi-embedded.js", () => ({
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
runEmbeddedPiAgent: (params: unknown) => runEmbeddedPiAgentMock(params),
}));
vi.mock("./queue.js", async () => {
const actual =
await vi.importActual<typeof import("./queue.js")>("./queue.js");
return {
...actual,
enqueueFollowupRun: vi.fn(),
scheduleFollowupDrain: vi.fn(),
};
});
import { runReplyAgent } from "./agent-runner.js";
type EmbeddedPiAgentParams = {
onPartialReply?: (payload: {
text?: string;
mediaUrls?: string[];
}) => Promise<void> | void;
onAssistantMessageStart?: () => Promise<void> | void;
onBlockReply?: (payload: {
text?: string;
mediaUrls?: string[];
}) => Promise<void> | void;
onToolResult?: (payload: {
text?: string;
mediaUrls?: string[];
}) => Promise<void> | void;
};
function createMinimalRun(params?: {
opts?: GetReplyOptions;
resolvedVerboseLevel?: "off" | "on";
sessionStore?: Record<string, SessionEntry>;
sessionEntry?: SessionEntry;
sessionKey?: string;
storePath?: string;
typingMode?: TypingMode;
blockStreamingEnabled?: boolean;
}) {
const typing = createMockTypingController();
const opts = params?.opts;
const sessionCtx = {
Provider: "whatsapp",
MessageSid: "msg",
} as unknown as TemplateContext;
const resolvedQueue = { mode: "interrupt" } as unknown as QueueSettings;
const sessionKey = params?.sessionKey ?? "main";
const followupRun = {
prompt: "hello",
summaryLine: "hello",
enqueuedAt: Date.now(),
run: {
sessionId: "session",
sessionKey,
messageProvider: "whatsapp",
sessionFile: "/tmp/session.jsonl",
workspaceDir: "/tmp",
config: {},
skillsSnapshot: {},
provider: "anthropic",
model: "claude",
thinkLevel: "low",
verboseLevel: params?.resolvedVerboseLevel ?? "off",
elevatedLevel: "off",
bashElevated: {
enabled: false,
allowed: false,
defaultLevel: "off",
},
timeoutMs: 1_000,
blockReplyBreak: "message_end",
},
} as unknown as FollowupRun;
return {
typing,
opts,
run: () =>
runReplyAgent({
commandBody: "hello",
followupRun,
queueKey: "main",
resolvedQueue,
shouldSteer: false,
shouldFollowup: false,
isActive: false,
isStreaming: false,
opts,
typing,
sessionEntry: params?.sessionEntry,
sessionStore: params?.sessionStore,
sessionKey,
storePath: params?.storePath,
sessionCtx,
defaultModel: "anthropic/claude-opus-4-5",
resolvedVerboseLevel: params?.resolvedVerboseLevel ?? "off",
isNewSession: false,
blockStreamingEnabled: params?.blockStreamingEnabled ?? false,
resolvedBlockStreamingBreak: "message_end",
shouldInjectGroupIntro: false,
typingMode: params?.typingMode ?? "instant",
}),
};
}
describe("runReplyAgent typing (heartbeat)", () => {
it("signals typing for normal runs", async () => {
const onPartialReply = vi.fn();
runEmbeddedPiAgentMock.mockImplementationOnce(
async (params: EmbeddedPiAgentParams) => {
await params.onPartialReply?.({ text: "hi" });
return { payloads: [{ text: "final" }], meta: {} };
},
);
const { run, typing } = createMinimalRun({
opts: { isHeartbeat: false, onPartialReply },
});
await run();
expect(onPartialReply).toHaveBeenCalled();
expect(typing.startTypingOnText).toHaveBeenCalledWith("hi");
expect(typing.startTypingLoop).toHaveBeenCalled();
});
it("signals typing even without consumer partial handler", async () => {
runEmbeddedPiAgentMock.mockImplementationOnce(
async (params: EmbeddedPiAgentParams) => {
await params.onPartialReply?.({ text: "hi" });
return { payloads: [{ text: "final" }], meta: {} };
},
);
const { run, typing } = createMinimalRun({
typingMode: "message",
});
await run();
expect(typing.startTypingOnText).toHaveBeenCalledWith("hi");
expect(typing.startTypingLoop).not.toHaveBeenCalled();
});
it("never signals typing for heartbeat runs", async () => {
const onPartialReply = vi.fn();
runEmbeddedPiAgentMock.mockImplementationOnce(
async (params: EmbeddedPiAgentParams) => {
await params.onPartialReply?.({ text: "hi" });
return { payloads: [{ text: "final" }], meta: {} };
},
);
const { run, typing } = createMinimalRun({
opts: { isHeartbeat: true, onPartialReply },
});
await run();
expect(onPartialReply).toHaveBeenCalled();
expect(typing.startTypingOnText).not.toHaveBeenCalled();
expect(typing.startTypingLoop).not.toHaveBeenCalled();
});
it("suppresses partial streaming for NO_REPLY", async () => {
const onPartialReply = vi.fn();
runEmbeddedPiAgentMock.mockImplementationOnce(
async (params: EmbeddedPiAgentParams) => {
await params.onPartialReply?.({ text: "NO_REPLY" });
return { payloads: [{ text: "NO_REPLY" }], meta: {} };
},
);
const { run, typing } = createMinimalRun({
opts: { isHeartbeat: false, onPartialReply },
});
await run();
expect(onPartialReply).not.toHaveBeenCalled();
expect(typing.startTypingOnText).not.toHaveBeenCalled();
expect(typing.startTypingLoop).not.toHaveBeenCalled();
});
it("starts typing on assistant message start in message mode", async () => {
runEmbeddedPiAgentMock.mockImplementationOnce(
async (params: EmbeddedPiAgentParams) => {
await params.onAssistantMessageStart?.();
return { payloads: [{ text: "final" }], meta: {} };
},
);
const { run, typing } = createMinimalRun({
typingMode: "message",
});
await run();
expect(typing.startTypingLoop).toHaveBeenCalled();
expect(typing.startTypingOnText).not.toHaveBeenCalled();
});
it("starts typing from reasoning stream in thinking mode", async () => {
runEmbeddedPiAgentMock.mockImplementationOnce(
async (params: {
onPartialReply?: (payload: { text?: string }) => Promise<void> | void;
onReasoningStream?: (payload: {
text?: string;
}) => Promise<void> | void;
}) => {
await params.onReasoningStream?.({ text: "Reasoning:\n_step_" });
await params.onPartialReply?.({ text: "hi" });
return { payloads: [{ text: "final" }], meta: {} };
},
);
const { run, typing } = createMinimalRun({
typingMode: "thinking",
});
await run();
expect(typing.startTypingLoop).toHaveBeenCalled();
expect(typing.startTypingOnText).not.toHaveBeenCalled();
});
it("suppresses typing in never mode", async () => {
runEmbeddedPiAgentMock.mockImplementationOnce(
async (params: {
onPartialReply?: (payload: { text?: string }) => void;
}) => {
params.onPartialReply?.({ text: "hi" });
return { payloads: [{ text: "final" }], meta: {} };
},
);
const { run, typing } = createMinimalRun({
typingMode: "never",
});
await run();
expect(typing.startTypingOnText).not.toHaveBeenCalled();
expect(typing.startTypingLoop).not.toHaveBeenCalled();
});
it("signals typing on block replies", async () => {
const onBlockReply = vi.fn();
runEmbeddedPiAgentMock.mockImplementationOnce(
async (params: EmbeddedPiAgentParams) => {
await params.onBlockReply?.({ text: "chunk", mediaUrls: [] });
return { payloads: [{ text: "final" }], meta: {} };
},
);
const { run, typing } = createMinimalRun({
typingMode: "message",
blockStreamingEnabled: true,
opts: { onBlockReply },
});
await run();
expect(typing.startTypingOnText).toHaveBeenCalledWith("chunk");
expect(onBlockReply).toHaveBeenCalled();
const [blockPayload, blockOpts] = onBlockReply.mock.calls[0] ?? [];
expect(blockPayload).toMatchObject({ text: "chunk", audioAsVoice: false });
expect(blockOpts).toMatchObject({
abortSignal: expect.any(AbortSignal),
timeoutMs: expect.any(Number),
});
});
it("signals typing on tool results", async () => {
const onToolResult = vi.fn();
runEmbeddedPiAgentMock.mockImplementationOnce(
async (params: EmbeddedPiAgentParams) => {
await params.onToolResult?.({ text: "tooling", mediaUrls: [] });
return { payloads: [{ text: "final" }], meta: {} };
},
);
const { run, typing } = createMinimalRun({
typingMode: "message",
opts: { onToolResult },
});
await run();
expect(typing.startTypingOnText).toHaveBeenCalledWith("tooling");
expect(onToolResult).toHaveBeenCalledWith({
text: "tooling",
mediaUrls: [],
});
});
it("skips typing for silent tool results", async () => {
const onToolResult = vi.fn();
runEmbeddedPiAgentMock.mockImplementationOnce(
async (params: EmbeddedPiAgentParams) => {
await params.onToolResult?.({ text: "NO_REPLY", mediaUrls: [] });
return { payloads: [{ text: "final" }], meta: {} };
},
);
const { run, typing } = createMinimalRun({
typingMode: "message",
opts: { onToolResult },
});
await run();
expect(typing.startTypingOnText).not.toHaveBeenCalled();
expect(onToolResult).not.toHaveBeenCalled();
});
it("announces auto-compaction in verbose mode and tracks count", async () => {
const storePath = path.join(
await fs.mkdtemp(path.join(tmpdir(), "clawdbot-compaction-")),
"sessions.json",
);
const sessionEntry = { sessionId: "session", updatedAt: Date.now() };
const sessionStore = { main: sessionEntry };
runEmbeddedPiAgentMock.mockImplementationOnce(
async (params: {
onAgentEvent?: (evt: {
stream: string;
data: Record<string, unknown>;
}) => void;
}) => {
params.onAgentEvent?.({
stream: "compaction",
data: { phase: "end", willRetry: false },
});
return { payloads: [{ text: "final" }], meta: {} };
},
);
const { run } = createMinimalRun({
resolvedVerboseLevel: "on",
sessionEntry,
sessionStore,
sessionKey: "main",
storePath,
});
const res = await run();
expect(Array.isArray(res)).toBe(true);
const payloads = res as { text?: string }[];
expect(payloads[0]?.text).toContain("Auto-compaction complete");
expect(payloads[0]?.text).toContain("count 1");
expect(sessionStore.main.compactionCount).toBe(1);
});
it("resets corrupted Gemini sessions and deletes transcripts", async () => {
const prevStateDir = process.env.CLAWDBOT_STATE_DIR;
const stateDir = await fs.mkdtemp(
path.join(tmpdir(), "clawdbot-session-reset-"),
);
process.env.CLAWDBOT_STATE_DIR = stateDir;
try {
const sessionId = "session-corrupt";
const storePath = path.join(stateDir, "sessions", "sessions.json");
const sessionEntry = { sessionId, updatedAt: Date.now() };
const sessionStore = { main: sessionEntry };
await fs.mkdir(path.dirname(storePath), { recursive: true });
await fs.writeFile(storePath, JSON.stringify(sessionStore), "utf-8");
const transcriptPath = sessions.resolveSessionTranscriptPath(sessionId);
await fs.mkdir(path.dirname(transcriptPath), { recursive: true });
await fs.writeFile(transcriptPath, "bad", "utf-8");
runEmbeddedPiAgentMock.mockImplementationOnce(async () => {
throw new Error(
"function call turn comes immediately after a user turn or after a function response turn",
);
});
const { run } = createMinimalRun({
sessionEntry,
sessionStore,
sessionKey: "main",
storePath,
});
const res = await run();
expect(res).toMatchObject({
text: expect.stringContaining("Session history was corrupted"),
});
expect(sessionStore.main).toBeUndefined();
await expect(fs.access(transcriptPath)).rejects.toThrow();
const persisted = JSON.parse(await fs.readFile(storePath, "utf-8"));
expect(persisted.main).toBeUndefined();
} finally {
if (prevStateDir) {
process.env.CLAWDBOT_STATE_DIR = prevStateDir;
} else {
delete process.env.CLAWDBOT_STATE_DIR;
}
}
});
it("keeps sessions intact on other errors", async () => {
const prevStateDir = process.env.CLAWDBOT_STATE_DIR;
const stateDir = await fs.mkdtemp(
path.join(tmpdir(), "clawdbot-session-noreset-"),
);
process.env.CLAWDBOT_STATE_DIR = stateDir;
try {
const sessionId = "session-ok";
const storePath = path.join(stateDir, "sessions", "sessions.json");
const sessionEntry = { sessionId, updatedAt: Date.now() };
const sessionStore = { main: sessionEntry };
await fs.mkdir(path.dirname(storePath), { recursive: true });
await fs.writeFile(storePath, JSON.stringify(sessionStore), "utf-8");
const transcriptPath = sessions.resolveSessionTranscriptPath(sessionId);
await fs.mkdir(path.dirname(transcriptPath), { recursive: true });
await fs.writeFile(transcriptPath, "ok", "utf-8");
runEmbeddedPiAgentMock.mockImplementationOnce(async () => {
throw new Error("INVALID_ARGUMENT: some other failure");
});
const { run } = createMinimalRun({
sessionEntry,
sessionStore,
sessionKey: "main",
storePath,
});
const res = await run();
expect(res).toMatchObject({
text: expect.stringContaining("Agent failed before reply"),
});
expect(sessionStore.main).toBeDefined();
await expect(fs.access(transcriptPath)).resolves.toBeUndefined();
const persisted = JSON.parse(await fs.readFile(storePath, "utf-8"));
expect(persisted.main).toBeDefined();
} finally {
if (prevStateDir) {
process.env.CLAWDBOT_STATE_DIR = prevStateDir;
} else {
delete process.env.CLAWDBOT_STATE_DIR;
}
}
});
it("retries after compaction failure by resetting the session", async () => {
const prevStateDir = process.env.CLAWDBOT_STATE_DIR;
const stateDir = await fs.mkdtemp(
path.join(tmpdir(), "clawdbot-session-compaction-reset-"),
);
process.env.CLAWDBOT_STATE_DIR = stateDir;
try {
const sessionId = "session";
const storePath = path.join(stateDir, "sessions", "sessions.json");
const sessionEntry = { sessionId, updatedAt: Date.now() };
const sessionStore = { main: sessionEntry };
await fs.mkdir(path.dirname(storePath), { recursive: true });
await fs.writeFile(storePath, JSON.stringify(sessionStore), "utf-8");
runEmbeddedPiAgentMock
.mockImplementationOnce(async () => {
throw new Error(
'Context overflow: Summarization failed: 400 {"message":"prompt is too long"}',
);
})
.mockImplementationOnce(async () => ({
payloads: [{ text: "ok" }],
meta: {},
}));
const callsBefore = runEmbeddedPiAgentMock.mock.calls.length;
const { run } = createMinimalRun({
sessionEntry,
sessionStore,
sessionKey: "main",
storePath,
});
const res = await run();
expect(runEmbeddedPiAgentMock.mock.calls.length - callsBefore).toBe(2);
const payload = Array.isArray(res) ? res[0] : res;
expect(payload).toMatchObject({ text: "ok" });
expect(sessionStore.main.sessionId).not.toBe(sessionId);
const persisted = JSON.parse(await fs.readFile(storePath, "utf-8"));
expect(persisted.main.sessionId).toBe(sessionStore.main.sessionId);
} finally {
if (prevStateDir) {
process.env.CLAWDBOT_STATE_DIR = prevStateDir;
} else {
delete process.env.CLAWDBOT_STATE_DIR;
}
}
});
it("retries after context overflow payload by resetting the session", async () => {
const prevStateDir = process.env.CLAWDBOT_STATE_DIR;
const stateDir = await fs.mkdtemp(
path.join(tmpdir(), "clawdbot-session-overflow-reset-"),
);
process.env.CLAWDBOT_STATE_DIR = stateDir;
try {
const sessionId = "session";
const storePath = path.join(stateDir, "sessions", "sessions.json");
const sessionEntry = { sessionId, updatedAt: Date.now() };
const sessionStore = { main: sessionEntry };
await fs.mkdir(path.dirname(storePath), { recursive: true });
await fs.writeFile(storePath, JSON.stringify(sessionStore), "utf-8");
runEmbeddedPiAgentMock
.mockImplementationOnce(async () => ({
payloads: [
{ text: "Context overflow: prompt too large", isError: true },
],
meta: {
durationMs: 1,
error: {
kind: "context_overflow",
message:
'Context overflow: Summarization failed: 400 {"message":"prompt is too long"}',
},
},
}))
.mockImplementationOnce(async () => ({
payloads: [{ text: "ok" }],
meta: { durationMs: 1 },
}));
const callsBefore = runEmbeddedPiAgentMock.mock.calls.length;
const { run } = createMinimalRun({
sessionEntry,
sessionStore,
sessionKey: "main",
storePath,
});
const res = await run();
expect(runEmbeddedPiAgentMock.mock.calls.length - callsBefore).toBe(2);
const payload = Array.isArray(res) ? res[0] : res;
expect(payload).toMatchObject({ text: "ok" });
expect(sessionStore.main.sessionId).not.toBe(sessionId);
const persisted = JSON.parse(await fs.readFile(storePath, "utf-8"));
expect(persisted.main.sessionId).toBe(sessionStore.main.sessionId);
} finally {
if (prevStateDir) {
process.env.CLAWDBOT_STATE_DIR = prevStateDir;
} else {
delete process.env.CLAWDBOT_STATE_DIR;
}
}
});
it("still replies even if session reset fails to persist", async () => {
const prevStateDir = process.env.CLAWDBOT_STATE_DIR;
const stateDir = await fs.mkdtemp(
path.join(tmpdir(), "clawdbot-session-reset-fail-"),
);
process.env.CLAWDBOT_STATE_DIR = stateDir;
const saveSpy = vi
.spyOn(sessions, "saveSessionStore")
.mockRejectedValueOnce(new Error("boom"));
try {
const sessionId = "session-corrupt";
const storePath = path.join(stateDir, "sessions", "sessions.json");
const sessionEntry = { sessionId, updatedAt: Date.now() };
const sessionStore = { main: sessionEntry };
const transcriptPath = sessions.resolveSessionTranscriptPath(sessionId);
await fs.mkdir(path.dirname(transcriptPath), { recursive: true });
await fs.writeFile(transcriptPath, "bad", "utf-8");
runEmbeddedPiAgentMock.mockImplementationOnce(async () => {
throw new Error(
"function call turn comes immediately after a user turn or after a function response turn",
);
});
const { run } = createMinimalRun({
sessionEntry,
sessionStore,
sessionKey: "main",
storePath,
});
const res = await run();
expect(res).toMatchObject({
text: expect.stringContaining("Session history was corrupted"),
});
expect(sessionStore.main).toBeUndefined();
await expect(fs.access(transcriptPath)).rejects.toThrow();
} finally {
saveSpy.mockRestore();
if (prevStateDir) {
process.env.CLAWDBOT_STATE_DIR = prevStateDir;
} else {
delete process.env.CLAWDBOT_STATE_DIR;
}
}
});
it("rewrites Bun socket errors into friendly text", async () => {
runEmbeddedPiAgentMock.mockImplementationOnce(async () => ({
payloads: [
{
text: "TypeError: The socket connection was closed unexpectedly. For more information, pass `verbose: true` in the second argument to fetch()",
isError: true,
},
],
meta: {},
}));
const { run } = createMinimalRun();
const res = await run();
const payloads = Array.isArray(res) ? res : res ? [res] : [];
expect(payloads.length).toBe(1);
expect(payloads[0]?.text).toContain("LLM connection failed");
expect(payloads[0]?.text).toContain(
"socket connection was closed unexpectedly",
);
expect(payloads[0]?.text).toContain("```");
});
});

View File

@@ -0,0 +1,261 @@
import fs from "node:fs/promises";
import os from "node:os";
import path from "node:path";
import { describe, expect, it, vi } from "vitest";
import type { TemplateContext } from "../templating.js";
import { DEFAULT_MEMORY_FLUSH_PROMPT } from "./memory-flush.js";
import type { FollowupRun, QueueSettings } from "./queue.js";
import { createMockTypingController } from "./test-helpers.js";
const runEmbeddedPiAgentMock = vi.fn();
const runCliAgentMock = vi.fn();
type EmbeddedRunParams = {
prompt?: string;
extraSystemPrompt?: string;
onAgentEvent?: (evt: {
stream?: string;
data?: { phase?: string; willRetry?: boolean };
}) => void;
};
vi.mock("../../agents/model-fallback.js", () => ({
runWithModelFallback: async ({
provider,
model,
run,
}: {
provider: string;
model: string;
run: (provider: string, model: string) => Promise<unknown>;
}) => ({
result: await run(provider, model),
provider,
model,
}),
}));
vi.mock("../../agents/cli-runner.js", () => ({
runCliAgent: (params: unknown) => runCliAgentMock(params),
}));
vi.mock("../../agents/pi-embedded.js", () => ({
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
runEmbeddedPiAgent: (params: unknown) => runEmbeddedPiAgentMock(params),
}));
vi.mock("./queue.js", async () => {
const actual =
await vi.importActual<typeof import("./queue.js")>("./queue.js");
return {
...actual,
enqueueFollowupRun: vi.fn(),
scheduleFollowupDrain: vi.fn(),
};
});
import { runReplyAgent } from "./agent-runner.js";
async function seedSessionStore(params: {
storePath: string;
sessionKey: string;
entry: Record<string, unknown>;
}) {
await fs.mkdir(path.dirname(params.storePath), { recursive: true });
await fs.writeFile(
params.storePath,
JSON.stringify({ [params.sessionKey]: params.entry }, null, 2),
"utf-8",
);
}
function createBaseRun(params: {
storePath: string;
sessionEntry: Record<string, unknown>;
config?: Record<string, unknown>;
runOverrides?: Partial<FollowupRun["run"]>;
}) {
const typing = createMockTypingController();
const sessionCtx = {
Provider: "whatsapp",
OriginatingTo: "+15550001111",
AccountId: "primary",
MessageSid: "msg",
} as unknown as TemplateContext;
const resolvedQueue = { mode: "interrupt" } as unknown as QueueSettings;
const followupRun = {
prompt: "hello",
summaryLine: "hello",
enqueuedAt: Date.now(),
run: {
agentId: "main",
agentDir: "/tmp/agent",
sessionId: "session",
sessionKey: "main",
messageProvider: "whatsapp",
sessionFile: "/tmp/session.jsonl",
workspaceDir: "/tmp",
config: params.config ?? {},
skillsSnapshot: {},
provider: "anthropic",
model: "claude",
thinkLevel: "low",
verboseLevel: "off",
elevatedLevel: "off",
bashElevated: {
enabled: false,
allowed: false,
defaultLevel: "off",
},
timeoutMs: 1_000,
blockReplyBreak: "message_end",
},
} as unknown as FollowupRun;
const run = {
...followupRun.run,
...params.runOverrides,
config: params.config ?? followupRun.run.config,
};
return {
typing,
sessionCtx,
resolvedQueue,
followupRun: { ...followupRun, run },
};
}
describe("runReplyAgent memory flush", () => {
it("runs a memory flush turn and updates session metadata", async () => {
runEmbeddedPiAgentMock.mockReset();
const tmp = await fs.mkdtemp(path.join(os.tmpdir(), "clawdbot-flush-"));
const storePath = path.join(tmp, "sessions.json");
const sessionKey = "main";
const sessionEntry = {
sessionId: "session",
updatedAt: Date.now(),
totalTokens: 80_000,
compactionCount: 1,
};
await seedSessionStore({ storePath, sessionKey, entry: sessionEntry });
const calls: Array<{ prompt?: string }> = [];
runEmbeddedPiAgentMock.mockImplementation(
async (params: EmbeddedRunParams) => {
calls.push({ prompt: params.prompt });
if (params.prompt === DEFAULT_MEMORY_FLUSH_PROMPT) {
return { payloads: [], meta: {} };
}
return {
payloads: [{ text: "ok" }],
meta: { agentMeta: { usage: { input: 1, output: 1 } } },
};
},
);
const { typing, sessionCtx, resolvedQueue, followupRun } = createBaseRun({
storePath,
sessionEntry,
});
await runReplyAgent({
commandBody: "hello",
followupRun,
queueKey: "main",
resolvedQueue,
shouldSteer: false,
shouldFollowup: false,
isActive: false,
isStreaming: false,
typing,
sessionCtx,
sessionEntry,
sessionStore: { [sessionKey]: sessionEntry },
sessionKey,
storePath,
defaultModel: "anthropic/claude-opus-4-5",
agentCfgContextTokens: 100_000,
resolvedVerboseLevel: "off",
isNewSession: false,
blockStreamingEnabled: false,
resolvedBlockStreamingBreak: "message_end",
shouldInjectGroupIntro: false,
typingMode: "instant",
});
expect(calls.map((call) => call.prompt)).toEqual([
DEFAULT_MEMORY_FLUSH_PROMPT,
"hello",
]);
const stored = JSON.parse(await fs.readFile(storePath, "utf-8"));
expect(stored[sessionKey].memoryFlushAt).toBeTypeOf("number");
expect(stored[sessionKey].memoryFlushCompactionCount).toBe(1);
});
it("skips memory flush when disabled in config", async () => {
runEmbeddedPiAgentMock.mockReset();
const tmp = await fs.mkdtemp(path.join(os.tmpdir(), "clawdbot-flush-"));
const storePath = path.join(tmp, "sessions.json");
const sessionKey = "main";
const sessionEntry = {
sessionId: "session",
updatedAt: Date.now(),
totalTokens: 80_000,
compactionCount: 1,
};
await seedSessionStore({ storePath, sessionKey, entry: sessionEntry });
runEmbeddedPiAgentMock.mockImplementation(
async (_params: EmbeddedRunParams) => ({
payloads: [{ text: "ok" }],
meta: { agentMeta: { usage: { input: 1, output: 1 } } },
}),
);
const { typing, sessionCtx, resolvedQueue, followupRun } = createBaseRun({
storePath,
sessionEntry,
config: {
agents: {
defaults: { compaction: { memoryFlush: { enabled: false } } },
},
},
});
await runReplyAgent({
commandBody: "hello",
followupRun,
queueKey: "main",
resolvedQueue,
shouldSteer: false,
shouldFollowup: false,
isActive: false,
isStreaming: false,
typing,
sessionCtx,
sessionEntry,
sessionStore: { [sessionKey]: sessionEntry },
sessionKey,
storePath,
defaultModel: "anthropic/claude-opus-4-5",
agentCfgContextTokens: 100_000,
resolvedVerboseLevel: "off",
isNewSession: false,
blockStreamingEnabled: false,
resolvedBlockStreamingBreak: "message_end",
shouldInjectGroupIntro: false,
typingMode: "instant",
});
expect(runEmbeddedPiAgentMock).toHaveBeenCalledTimes(1);
const call = runEmbeddedPiAgentMock.mock.calls[0]?.[0] as
| { prompt?: string }
| undefined;
expect(call?.prompt).toBe("hello");
const stored = JSON.parse(await fs.readFile(storePath, "utf-8"));
expect(stored[sessionKey].memoryFlushAt).toBeUndefined();
});
});

View File

@@ -0,0 +1,196 @@
import fs from "node:fs/promises";
import os from "node:os";
import path from "node:path";
import { describe, expect, it, vi } from "vitest";
import type { TemplateContext } from "../templating.js";
import type { FollowupRun, QueueSettings } from "./queue.js";
import { createMockTypingController } from "./test-helpers.js";
const runEmbeddedPiAgentMock = vi.fn();
const runCliAgentMock = vi.fn();
type EmbeddedRunParams = {
prompt?: string;
extraSystemPrompt?: string;
onAgentEvent?: (evt: {
stream?: string;
data?: { phase?: string; willRetry?: boolean };
}) => void;
};
vi.mock("../../agents/model-fallback.js", () => ({
runWithModelFallback: async ({
provider,
model,
run,
}: {
provider: string;
model: string;
run: (provider: string, model: string) => Promise<unknown>;
}) => ({
result: await run(provider, model),
provider,
model,
}),
}));
vi.mock("../../agents/cli-runner.js", () => ({
runCliAgent: (params: unknown) => runCliAgentMock(params),
}));
vi.mock("../../agents/pi-embedded.js", () => ({
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
runEmbeddedPiAgent: (params: unknown) => runEmbeddedPiAgentMock(params),
}));
vi.mock("./queue.js", async () => {
const actual =
await vi.importActual<typeof import("./queue.js")>("./queue.js");
return {
...actual,
enqueueFollowupRun: vi.fn(),
scheduleFollowupDrain: vi.fn(),
};
});
import { runReplyAgent } from "./agent-runner.js";
async function seedSessionStore(params: {
storePath: string;
sessionKey: string;
entry: Record<string, unknown>;
}) {
await fs.mkdir(path.dirname(params.storePath), { recursive: true });
await fs.writeFile(
params.storePath,
JSON.stringify({ [params.sessionKey]: params.entry }, null, 2),
"utf-8",
);
}
function createBaseRun(params: {
storePath: string;
sessionEntry: Record<string, unknown>;
config?: Record<string, unknown>;
runOverrides?: Partial<FollowupRun["run"]>;
}) {
const typing = createMockTypingController();
const sessionCtx = {
Provider: "whatsapp",
OriginatingTo: "+15550001111",
AccountId: "primary",
MessageSid: "msg",
} as unknown as TemplateContext;
const resolvedQueue = { mode: "interrupt" } as unknown as QueueSettings;
const followupRun = {
prompt: "hello",
summaryLine: "hello",
enqueuedAt: Date.now(),
run: {
agentId: "main",
agentDir: "/tmp/agent",
sessionId: "session",
sessionKey: "main",
messageProvider: "whatsapp",
sessionFile: "/tmp/session.jsonl",
workspaceDir: "/tmp",
config: params.config ?? {},
skillsSnapshot: {},
provider: "anthropic",
model: "claude",
thinkLevel: "low",
verboseLevel: "off",
elevatedLevel: "off",
bashElevated: {
enabled: false,
allowed: false,
defaultLevel: "off",
},
timeoutMs: 1_000,
blockReplyBreak: "message_end",
},
} as unknown as FollowupRun;
const run = {
...followupRun.run,
...params.runOverrides,
config: params.config ?? followupRun.run.config,
};
return {
typing,
sessionCtx,
resolvedQueue,
followupRun: { ...followupRun, run },
};
}
describe("runReplyAgent memory flush", () => {
it("skips memory flush for CLI providers", async () => {
runEmbeddedPiAgentMock.mockReset();
runCliAgentMock.mockReset();
const tmp = await fs.mkdtemp(path.join(os.tmpdir(), "clawdbot-flush-"));
const storePath = path.join(tmp, "sessions.json");
const sessionKey = "main";
const sessionEntry = {
sessionId: "session",
updatedAt: Date.now(),
totalTokens: 80_000,
compactionCount: 1,
};
await seedSessionStore({ storePath, sessionKey, entry: sessionEntry });
const calls: Array<{ prompt?: string }> = [];
runEmbeddedPiAgentMock.mockImplementation(
async (params: EmbeddedRunParams) => {
calls.push({ prompt: params.prompt });
return {
payloads: [{ text: "ok" }],
meta: { agentMeta: { usage: { input: 1, output: 1 } } },
};
},
);
runCliAgentMock.mockResolvedValue({
payloads: [{ text: "ok" }],
meta: { agentMeta: { usage: { input: 1, output: 1 } } },
});
const { typing, sessionCtx, resolvedQueue, followupRun } = createBaseRun({
storePath,
sessionEntry,
runOverrides: { provider: "codex-cli" },
});
await runReplyAgent({
commandBody: "hello",
followupRun,
queueKey: "main",
resolvedQueue,
shouldSteer: false,
shouldFollowup: false,
isActive: false,
isStreaming: false,
typing,
sessionCtx,
sessionEntry,
sessionStore: { [sessionKey]: sessionEntry },
sessionKey,
storePath,
defaultModel: "anthropic/claude-opus-4-5",
agentCfgContextTokens: 100_000,
resolvedVerboseLevel: "off",
isNewSession: false,
blockStreamingEnabled: false,
resolvedBlockStreamingBreak: "message_end",
shouldInjectGroupIntro: false,
typingMode: "instant",
});
expect(runCliAgentMock).toHaveBeenCalledTimes(1);
const call = runCliAgentMock.mock.calls[0]?.[0] as
| { prompt?: string }
| undefined;
expect(call?.prompt).toBe("hello");
expect(runEmbeddedPiAgentMock).not.toHaveBeenCalled();
});
});

View File

@@ -0,0 +1,266 @@
import fs from "node:fs/promises";
import os from "node:os";
import path from "node:path";
import { describe, expect, it, vi } from "vitest";
import type { TemplateContext } from "../templating.js";
import { DEFAULT_MEMORY_FLUSH_PROMPT } from "./memory-flush.js";
import type { FollowupRun, QueueSettings } from "./queue.js";
import { createMockTypingController } from "./test-helpers.js";
const runEmbeddedPiAgentMock = vi.fn();
const runCliAgentMock = vi.fn();
type EmbeddedRunParams = {
prompt?: string;
extraSystemPrompt?: string;
onAgentEvent?: (evt: {
stream?: string;
data?: { phase?: string; willRetry?: boolean };
}) => void;
};
vi.mock("../../agents/model-fallback.js", () => ({
runWithModelFallback: async ({
provider,
model,
run,
}: {
provider: string;
model: string;
run: (provider: string, model: string) => Promise<unknown>;
}) => ({
result: await run(provider, model),
provider,
model,
}),
}));
vi.mock("../../agents/cli-runner.js", () => ({
runCliAgent: (params: unknown) => runCliAgentMock(params),
}));
vi.mock("../../agents/pi-embedded.js", () => ({
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
runEmbeddedPiAgent: (params: unknown) => runEmbeddedPiAgentMock(params),
}));
vi.mock("./queue.js", async () => {
const actual =
await vi.importActual<typeof import("./queue.js")>("./queue.js");
return {
...actual,
enqueueFollowupRun: vi.fn(),
scheduleFollowupDrain: vi.fn(),
};
});
import { runReplyAgent } from "./agent-runner.js";
async function seedSessionStore(params: {
storePath: string;
sessionKey: string;
entry: Record<string, unknown>;
}) {
await fs.mkdir(path.dirname(params.storePath), { recursive: true });
await fs.writeFile(
params.storePath,
JSON.stringify({ [params.sessionKey]: params.entry }, null, 2),
"utf-8",
);
}
function createBaseRun(params: {
storePath: string;
sessionEntry: Record<string, unknown>;
config?: Record<string, unknown>;
runOverrides?: Partial<FollowupRun["run"]>;
}) {
const typing = createMockTypingController();
const sessionCtx = {
Provider: "whatsapp",
OriginatingTo: "+15550001111",
AccountId: "primary",
MessageSid: "msg",
} as unknown as TemplateContext;
const resolvedQueue = { mode: "interrupt" } as unknown as QueueSettings;
const followupRun = {
prompt: "hello",
summaryLine: "hello",
enqueuedAt: Date.now(),
run: {
agentId: "main",
agentDir: "/tmp/agent",
sessionId: "session",
sessionKey: "main",
messageProvider: "whatsapp",
sessionFile: "/tmp/session.jsonl",
workspaceDir: "/tmp",
config: params.config ?? {},
skillsSnapshot: {},
provider: "anthropic",
model: "claude",
thinkLevel: "low",
verboseLevel: "off",
elevatedLevel: "off",
bashElevated: {
enabled: false,
allowed: false,
defaultLevel: "off",
},
timeoutMs: 1_000,
blockReplyBreak: "message_end",
},
} as unknown as FollowupRun;
const run = {
...followupRun.run,
...params.runOverrides,
config: params.config ?? followupRun.run.config,
};
return {
typing,
sessionCtx,
resolvedQueue,
followupRun: { ...followupRun, run },
};
}
describe("runReplyAgent memory flush", () => {
it("uses configured prompts for memory flush runs", async () => {
runEmbeddedPiAgentMock.mockReset();
const tmp = await fs.mkdtemp(path.join(os.tmpdir(), "clawdbot-flush-"));
const storePath = path.join(tmp, "sessions.json");
const sessionKey = "main";
const sessionEntry = {
sessionId: "session",
updatedAt: Date.now(),
totalTokens: 80_000,
compactionCount: 1,
};
await seedSessionStore({ storePath, sessionKey, entry: sessionEntry });
const calls: Array<EmbeddedRunParams> = [];
runEmbeddedPiAgentMock.mockImplementation(
async (params: EmbeddedRunParams) => {
calls.push(params);
if (params.prompt === DEFAULT_MEMORY_FLUSH_PROMPT) {
return { payloads: [], meta: {} };
}
return {
payloads: [{ text: "ok" }],
meta: { agentMeta: { usage: { input: 1, output: 1 } } },
};
},
);
const { typing, sessionCtx, resolvedQueue, followupRun } = createBaseRun({
storePath,
sessionEntry,
config: {
agents: {
defaults: {
compaction: {
memoryFlush: {
prompt: "Write notes.",
systemPrompt: "Flush memory now.",
},
},
},
},
},
runOverrides: { extraSystemPrompt: "extra system" },
});
await runReplyAgent({
commandBody: "hello",
followupRun,
queueKey: "main",
resolvedQueue,
shouldSteer: false,
shouldFollowup: false,
isActive: false,
isStreaming: false,
typing,
sessionCtx,
sessionEntry,
sessionStore: { [sessionKey]: sessionEntry },
sessionKey,
storePath,
defaultModel: "anthropic/claude-opus-4-5",
agentCfgContextTokens: 100_000,
resolvedVerboseLevel: "off",
isNewSession: false,
blockStreamingEnabled: false,
resolvedBlockStreamingBreak: "message_end",
shouldInjectGroupIntro: false,
typingMode: "instant",
});
const flushCall = calls[0];
expect(flushCall?.prompt).toContain("Write notes.");
expect(flushCall?.prompt).toContain("NO_REPLY");
expect(flushCall?.extraSystemPrompt).toContain("extra system");
expect(flushCall?.extraSystemPrompt).toContain("Flush memory now.");
expect(flushCall?.extraSystemPrompt).toContain("NO_REPLY");
expect(calls[1]?.prompt).toBe("hello");
});
it("skips memory flush after a prior flush in the same compaction cycle", async () => {
runEmbeddedPiAgentMock.mockReset();
const tmp = await fs.mkdtemp(path.join(os.tmpdir(), "clawdbot-flush-"));
const storePath = path.join(tmp, "sessions.json");
const sessionKey = "main";
const sessionEntry = {
sessionId: "session",
updatedAt: Date.now(),
totalTokens: 80_000,
compactionCount: 2,
memoryFlushCompactionCount: 2,
};
await seedSessionStore({ storePath, sessionKey, entry: sessionEntry });
const calls: Array<{ prompt?: string }> = [];
runEmbeddedPiAgentMock.mockImplementation(
async (params: EmbeddedRunParams) => {
calls.push({ prompt: params.prompt });
return {
payloads: [{ text: "ok" }],
meta: { agentMeta: { usage: { input: 1, output: 1 } } },
};
},
);
const { typing, sessionCtx, resolvedQueue, followupRun } = createBaseRun({
storePath,
sessionEntry,
});
await runReplyAgent({
commandBody: "hello",
followupRun,
queueKey: "main",
resolvedQueue,
shouldSteer: false,
shouldFollowup: false,
isActive: false,
isStreaming: false,
typing,
sessionCtx,
sessionEntry,
sessionStore: { [sessionKey]: sessionEntry },
sessionKey,
storePath,
defaultModel: "anthropic/claude-opus-4-5",
agentCfgContextTokens: 100_000,
resolvedVerboseLevel: "off",
isNewSession: false,
blockStreamingEnabled: false,
resolvedBlockStreamingBreak: "message_end",
shouldInjectGroupIntro: false,
typingMode: "instant",
});
expect(calls.map((call) => call.prompt)).toEqual(["hello"]);
});
});

View File

@@ -0,0 +1,259 @@
import fs from "node:fs/promises";
import os from "node:os";
import path from "node:path";
import { describe, expect, it, vi } from "vitest";
import type { TemplateContext } from "../templating.js";
import type { FollowupRun, QueueSettings } from "./queue.js";
import { createMockTypingController } from "./test-helpers.js";
const runEmbeddedPiAgentMock = vi.fn();
const runCliAgentMock = vi.fn();
type EmbeddedRunParams = {
prompt?: string;
extraSystemPrompt?: string;
onAgentEvent?: (evt: {
stream?: string;
data?: { phase?: string; willRetry?: boolean };
}) => void;
};
vi.mock("../../agents/model-fallback.js", () => ({
runWithModelFallback: async ({
provider,
model,
run,
}: {
provider: string;
model: string;
run: (provider: string, model: string) => Promise<unknown>;
}) => ({
result: await run(provider, model),
provider,
model,
}),
}));
vi.mock("../../agents/cli-runner.js", () => ({
runCliAgent: (params: unknown) => runCliAgentMock(params),
}));
vi.mock("../../agents/pi-embedded.js", () => ({
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
runEmbeddedPiAgent: (params: unknown) => runEmbeddedPiAgentMock(params),
}));
vi.mock("./queue.js", async () => {
const actual =
await vi.importActual<typeof import("./queue.js")>("./queue.js");
return {
...actual,
enqueueFollowupRun: vi.fn(),
scheduleFollowupDrain: vi.fn(),
};
});
import { runReplyAgent } from "./agent-runner.js";
async function seedSessionStore(params: {
storePath: string;
sessionKey: string;
entry: Record<string, unknown>;
}) {
await fs.mkdir(path.dirname(params.storePath), { recursive: true });
await fs.writeFile(
params.storePath,
JSON.stringify({ [params.sessionKey]: params.entry }, null, 2),
"utf-8",
);
}
function createBaseRun(params: {
storePath: string;
sessionEntry: Record<string, unknown>;
config?: Record<string, unknown>;
runOverrides?: Partial<FollowupRun["run"]>;
}) {
const typing = createMockTypingController();
const sessionCtx = {
Provider: "whatsapp",
OriginatingTo: "+15550001111",
AccountId: "primary",
MessageSid: "msg",
} as unknown as TemplateContext;
const resolvedQueue = { mode: "interrupt" } as unknown as QueueSettings;
const followupRun = {
prompt: "hello",
summaryLine: "hello",
enqueuedAt: Date.now(),
run: {
agentId: "main",
agentDir: "/tmp/agent",
sessionId: "session",
sessionKey: "main",
messageProvider: "whatsapp",
sessionFile: "/tmp/session.jsonl",
workspaceDir: "/tmp",
config: params.config ?? {},
skillsSnapshot: {},
provider: "anthropic",
model: "claude",
thinkLevel: "low",
verboseLevel: "off",
elevatedLevel: "off",
bashElevated: {
enabled: false,
allowed: false,
defaultLevel: "off",
},
timeoutMs: 1_000,
blockReplyBreak: "message_end",
},
} as unknown as FollowupRun;
const run = {
...followupRun.run,
...params.runOverrides,
config: params.config ?? followupRun.run.config,
};
return {
typing,
sessionCtx,
resolvedQueue,
followupRun: { ...followupRun, run },
};
}
describe("runReplyAgent memory flush", () => {
it("skips memory flush when the sandbox workspace is read-only", async () => {
runEmbeddedPiAgentMock.mockReset();
const tmp = await fs.mkdtemp(path.join(os.tmpdir(), "clawdbot-flush-"));
const storePath = path.join(tmp, "sessions.json");
const sessionKey = "main";
const sessionEntry = {
sessionId: "session",
updatedAt: Date.now(),
totalTokens: 80_000,
compactionCount: 1,
};
await seedSessionStore({ storePath, sessionKey, entry: sessionEntry });
const calls: Array<{ prompt?: string }> = [];
runEmbeddedPiAgentMock.mockImplementation(
async (params: EmbeddedRunParams) => {
calls.push({ prompt: params.prompt });
return {
payloads: [{ text: "ok" }],
meta: { agentMeta: { usage: { input: 1, output: 1 } } },
};
},
);
const { typing, sessionCtx, resolvedQueue, followupRun } = createBaseRun({
storePath,
sessionEntry,
config: {
agents: {
defaults: {
sandbox: { mode: "all", workspaceAccess: "ro" },
},
},
},
});
await runReplyAgent({
commandBody: "hello",
followupRun,
queueKey: "main",
resolvedQueue,
shouldSteer: false,
shouldFollowup: false,
isActive: false,
isStreaming: false,
typing,
sessionCtx,
sessionEntry,
sessionStore: { [sessionKey]: sessionEntry },
sessionKey,
storePath,
defaultModel: "anthropic/claude-opus-4-5",
agentCfgContextTokens: 100_000,
resolvedVerboseLevel: "off",
isNewSession: false,
blockStreamingEnabled: false,
resolvedBlockStreamingBreak: "message_end",
shouldInjectGroupIntro: false,
typingMode: "instant",
});
expect(calls.map((call) => call.prompt)).toEqual(["hello"]);
const stored = JSON.parse(await fs.readFile(storePath, "utf-8"));
expect(stored[sessionKey].memoryFlushAt).toBeUndefined();
});
it("skips memory flush when the sandbox workspace is none", async () => {
runEmbeddedPiAgentMock.mockReset();
const tmp = await fs.mkdtemp(path.join(os.tmpdir(), "clawdbot-flush-"));
const storePath = path.join(tmp, "sessions.json");
const sessionKey = "main";
const sessionEntry = {
sessionId: "session",
updatedAt: Date.now(),
totalTokens: 80_000,
compactionCount: 1,
};
await seedSessionStore({ storePath, sessionKey, entry: sessionEntry });
const calls: Array<{ prompt?: string }> = [];
runEmbeddedPiAgentMock.mockImplementation(
async (params: EmbeddedRunParams) => {
calls.push({ prompt: params.prompt });
return {
payloads: [{ text: "ok" }],
meta: { agentMeta: { usage: { input: 1, output: 1 } } },
};
},
);
const { typing, sessionCtx, resolvedQueue, followupRun } = createBaseRun({
storePath,
sessionEntry,
config: {
agents: {
defaults: {
sandbox: { mode: "all", workspaceAccess: "none" },
},
},
},
});
await runReplyAgent({
commandBody: "hello",
followupRun,
queueKey: "main",
resolvedQueue,
shouldSteer: false,
shouldFollowup: false,
isActive: false,
isStreaming: false,
typing,
sessionCtx,
sessionEntry,
sessionStore: { [sessionKey]: sessionEntry },
sessionKey,
storePath,
defaultModel: "anthropic/claude-opus-4-5",
agentCfgContextTokens: 100_000,
resolvedVerboseLevel: "off",
isNewSession: false,
blockStreamingEnabled: false,
resolvedBlockStreamingBreak: "message_end",
shouldInjectGroupIntro: false,
typingMode: "instant",
});
expect(calls.map((call) => call.prompt)).toEqual(["hello"]);
});
});

View File

@@ -0,0 +1,193 @@
import fs from "node:fs/promises";
import os from "node:os";
import path from "node:path";
import { describe, expect, it, vi } from "vitest";
import type { TemplateContext } from "../templating.js";
import { DEFAULT_MEMORY_FLUSH_PROMPT } from "./memory-flush.js";
import type { FollowupRun, QueueSettings } from "./queue.js";
import { createMockTypingController } from "./test-helpers.js";
const runEmbeddedPiAgentMock = vi.fn();
const runCliAgentMock = vi.fn();
type EmbeddedRunParams = {
prompt?: string;
extraSystemPrompt?: string;
onAgentEvent?: (evt: {
stream?: string;
data?: { phase?: string; willRetry?: boolean };
}) => void;
};
vi.mock("../../agents/model-fallback.js", () => ({
runWithModelFallback: async ({
provider,
model,
run,
}: {
provider: string;
model: string;
run: (provider: string, model: string) => Promise<unknown>;
}) => ({
result: await run(provider, model),
provider,
model,
}),
}));
vi.mock("../../agents/cli-runner.js", () => ({
runCliAgent: (params: unknown) => runCliAgentMock(params),
}));
vi.mock("../../agents/pi-embedded.js", () => ({
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
runEmbeddedPiAgent: (params: unknown) => runEmbeddedPiAgentMock(params),
}));
vi.mock("./queue.js", async () => {
const actual =
await vi.importActual<typeof import("./queue.js")>("./queue.js");
return {
...actual,
enqueueFollowupRun: vi.fn(),
scheduleFollowupDrain: vi.fn(),
};
});
import { runReplyAgent } from "./agent-runner.js";
async function seedSessionStore(params: {
storePath: string;
sessionKey: string;
entry: Record<string, unknown>;
}) {
await fs.mkdir(path.dirname(params.storePath), { recursive: true });
await fs.writeFile(
params.storePath,
JSON.stringify({ [params.sessionKey]: params.entry }, null, 2),
"utf-8",
);
}
function createBaseRun(params: {
storePath: string;
sessionEntry: Record<string, unknown>;
config?: Record<string, unknown>;
runOverrides?: Partial<FollowupRun["run"]>;
}) {
const typing = createMockTypingController();
const sessionCtx = {
Provider: "whatsapp",
OriginatingTo: "+15550001111",
AccountId: "primary",
MessageSid: "msg",
} as unknown as TemplateContext;
const resolvedQueue = { mode: "interrupt" } as unknown as QueueSettings;
const followupRun = {
prompt: "hello",
summaryLine: "hello",
enqueuedAt: Date.now(),
run: {
agentId: "main",
agentDir: "/tmp/agent",
sessionId: "session",
sessionKey: "main",
messageProvider: "whatsapp",
sessionFile: "/tmp/session.jsonl",
workspaceDir: "/tmp",
config: params.config ?? {},
skillsSnapshot: {},
provider: "anthropic",
model: "claude",
thinkLevel: "low",
verboseLevel: "off",
elevatedLevel: "off",
bashElevated: {
enabled: false,
allowed: false,
defaultLevel: "off",
},
timeoutMs: 1_000,
blockReplyBreak: "message_end",
},
} as unknown as FollowupRun;
const run = {
...followupRun.run,
...params.runOverrides,
config: params.config ?? followupRun.run.config,
};
return {
typing,
sessionCtx,
resolvedQueue,
followupRun: { ...followupRun, run },
};
}
describe("runReplyAgent memory flush", () => {
it("increments compaction count when flush compaction completes", async () => {
runEmbeddedPiAgentMock.mockReset();
const tmp = await fs.mkdtemp(path.join(os.tmpdir(), "clawdbot-flush-"));
const storePath = path.join(tmp, "sessions.json");
const sessionKey = "main";
const sessionEntry = {
sessionId: "session",
updatedAt: Date.now(),
totalTokens: 80_000,
compactionCount: 1,
};
await seedSessionStore({ storePath, sessionKey, entry: sessionEntry });
runEmbeddedPiAgentMock.mockImplementation(
async (params: EmbeddedRunParams) => {
if (params.prompt === DEFAULT_MEMORY_FLUSH_PROMPT) {
params.onAgentEvent?.({
stream: "compaction",
data: { phase: "end", willRetry: false },
});
return { payloads: [], meta: {} };
}
return {
payloads: [{ text: "ok" }],
meta: { agentMeta: { usage: { input: 1, output: 1 } } },
};
},
);
const { typing, sessionCtx, resolvedQueue, followupRun } = createBaseRun({
storePath,
sessionEntry,
});
await runReplyAgent({
commandBody: "hello",
followupRun,
queueKey: "main",
resolvedQueue,
shouldSteer: false,
shouldFollowup: false,
isActive: false,
isStreaming: false,
typing,
sessionCtx,
sessionEntry,
sessionStore: { [sessionKey]: sessionEntry },
sessionKey,
storePath,
defaultModel: "anthropic/claude-opus-4-5",
agentCfgContextTokens: 100_000,
resolvedVerboseLevel: "off",
isNewSession: false,
blockStreamingEnabled: false,
resolvedBlockStreamingBreak: "message_end",
shouldInjectGroupIntro: false,
typingMode: "instant",
});
const stored = JSON.parse(await fs.readFile(storePath, "utf-8"));
expect(stored[sessionKey].compactionCount).toBe(2);
expect(stored[sessionKey].memoryFlushCompactionCount).toBe(2);
});
});

View File

@@ -1,670 +0,0 @@
import fs from "node:fs/promises";
import os from "node:os";
import path from "node:path";
import { describe, expect, it, vi } from "vitest";
import type { TemplateContext } from "../templating.js";
import { DEFAULT_MEMORY_FLUSH_PROMPT } from "./memory-flush.js";
import type { FollowupRun, QueueSettings } from "./queue.js";
import { createMockTypingController } from "./test-helpers.js";
const runEmbeddedPiAgentMock = vi.fn();
const runCliAgentMock = vi.fn();
type EmbeddedRunParams = {
prompt?: string;
extraSystemPrompt?: string;
onAgentEvent?: (evt: {
stream?: string;
data?: { phase?: string; willRetry?: boolean };
}) => void;
};
vi.mock("../../agents/model-fallback.js", () => ({
runWithModelFallback: async ({
provider,
model,
run,
}: {
provider: string;
model: string;
run: (provider: string, model: string) => Promise<unknown>;
}) => ({
result: await run(provider, model),
provider,
model,
}),
}));
vi.mock("../../agents/cli-runner.js", () => ({
runCliAgent: (params: unknown) => runCliAgentMock(params),
}));
vi.mock("../../agents/pi-embedded.js", () => ({
queueEmbeddedPiMessage: vi.fn().mockReturnValue(false),
runEmbeddedPiAgent: (params: unknown) => runEmbeddedPiAgentMock(params),
}));
vi.mock("./queue.js", async () => {
const actual =
await vi.importActual<typeof import("./queue.js")>("./queue.js");
return {
...actual,
enqueueFollowupRun: vi.fn(),
scheduleFollowupDrain: vi.fn(),
};
});
import { runReplyAgent } from "./agent-runner.js";
async function seedSessionStore(params: {
storePath: string;
sessionKey: string;
entry: Record<string, unknown>;
}) {
await fs.mkdir(path.dirname(params.storePath), { recursive: true });
await fs.writeFile(
params.storePath,
JSON.stringify({ [params.sessionKey]: params.entry }, null, 2),
"utf-8",
);
}
function createBaseRun(params: {
storePath: string;
sessionEntry: Record<string, unknown>;
config?: Record<string, unknown>;
runOverrides?: Partial<FollowupRun["run"]>;
}) {
const typing = createMockTypingController();
const sessionCtx = {
Provider: "whatsapp",
OriginatingTo: "+15550001111",
AccountId: "primary",
MessageSid: "msg",
} as unknown as TemplateContext;
const resolvedQueue = { mode: "interrupt" } as unknown as QueueSettings;
const followupRun = {
prompt: "hello",
summaryLine: "hello",
enqueuedAt: Date.now(),
run: {
agentId: "main",
agentDir: "/tmp/agent",
sessionId: "session",
sessionKey: "main",
messageProvider: "whatsapp",
sessionFile: "/tmp/session.jsonl",
workspaceDir: "/tmp",
config: params.config ?? {},
skillsSnapshot: {},
provider: "anthropic",
model: "claude",
thinkLevel: "low",
verboseLevel: "off",
elevatedLevel: "off",
bashElevated: {
enabled: false,
allowed: false,
defaultLevel: "off",
},
timeoutMs: 1_000,
blockReplyBreak: "message_end",
},
} as unknown as FollowupRun;
const run = {
...followupRun.run,
...params.runOverrides,
config: params.config ?? followupRun.run.config,
};
return {
typing,
sessionCtx,
resolvedQueue,
followupRun: { ...followupRun, run },
};
}
describe("runReplyAgent memory flush", () => {
it("runs a memory flush turn and updates session metadata", async () => {
runEmbeddedPiAgentMock.mockReset();
const tmp = await fs.mkdtemp(path.join(os.tmpdir(), "clawdbot-flush-"));
const storePath = path.join(tmp, "sessions.json");
const sessionKey = "main";
const sessionEntry = {
sessionId: "session",
updatedAt: Date.now(),
totalTokens: 80_000,
compactionCount: 1,
};
await seedSessionStore({ storePath, sessionKey, entry: sessionEntry });
const calls: Array<{ prompt?: string }> = [];
runEmbeddedPiAgentMock.mockImplementation(
async (params: EmbeddedRunParams) => {
calls.push({ prompt: params.prompt });
if (params.prompt === DEFAULT_MEMORY_FLUSH_PROMPT) {
return { payloads: [], meta: {} };
}
return {
payloads: [{ text: "ok" }],
meta: { agentMeta: { usage: { input: 1, output: 1 } } },
};
},
);
const { typing, sessionCtx, resolvedQueue, followupRun } = createBaseRun({
storePath,
sessionEntry,
});
await runReplyAgent({
commandBody: "hello",
followupRun,
queueKey: "main",
resolvedQueue,
shouldSteer: false,
shouldFollowup: false,
isActive: false,
isStreaming: false,
typing,
sessionCtx,
sessionEntry,
sessionStore: { [sessionKey]: sessionEntry },
sessionKey,
storePath,
defaultModel: "anthropic/claude-opus-4-5",
agentCfgContextTokens: 100_000,
resolvedVerboseLevel: "off",
isNewSession: false,
blockStreamingEnabled: false,
resolvedBlockStreamingBreak: "message_end",
shouldInjectGroupIntro: false,
typingMode: "instant",
});
expect(calls.map((call) => call.prompt)).toEqual([
DEFAULT_MEMORY_FLUSH_PROMPT,
"hello",
]);
const stored = JSON.parse(await fs.readFile(storePath, "utf-8"));
expect(stored[sessionKey].memoryFlushAt).toBeTypeOf("number");
expect(stored[sessionKey].memoryFlushCompactionCount).toBe(1);
});
it("skips memory flush when disabled in config", async () => {
runEmbeddedPiAgentMock.mockReset();
const tmp = await fs.mkdtemp(path.join(os.tmpdir(), "clawdbot-flush-"));
const storePath = path.join(tmp, "sessions.json");
const sessionKey = "main";
const sessionEntry = {
sessionId: "session",
updatedAt: Date.now(),
totalTokens: 80_000,
compactionCount: 1,
};
await seedSessionStore({ storePath, sessionKey, entry: sessionEntry });
runEmbeddedPiAgentMock.mockImplementation(
async (_params: EmbeddedRunParams) => ({
payloads: [{ text: "ok" }],
meta: { agentMeta: { usage: { input: 1, output: 1 } } },
}),
);
const { typing, sessionCtx, resolvedQueue, followupRun } = createBaseRun({
storePath,
sessionEntry,
config: {
agents: {
defaults: { compaction: { memoryFlush: { enabled: false } } },
},
},
});
await runReplyAgent({
commandBody: "hello",
followupRun,
queueKey: "main",
resolvedQueue,
shouldSteer: false,
shouldFollowup: false,
isActive: false,
isStreaming: false,
typing,
sessionCtx,
sessionEntry,
sessionStore: { [sessionKey]: sessionEntry },
sessionKey,
storePath,
defaultModel: "anthropic/claude-opus-4-5",
agentCfgContextTokens: 100_000,
resolvedVerboseLevel: "off",
isNewSession: false,
blockStreamingEnabled: false,
resolvedBlockStreamingBreak: "message_end",
shouldInjectGroupIntro: false,
typingMode: "instant",
});
expect(runEmbeddedPiAgentMock).toHaveBeenCalledTimes(1);
const call = runEmbeddedPiAgentMock.mock.calls[0]?.[0] as
| { prompt?: string }
| undefined;
expect(call?.prompt).toBe("hello");
const stored = JSON.parse(await fs.readFile(storePath, "utf-8"));
expect(stored[sessionKey].memoryFlushAt).toBeUndefined();
});
it("skips memory flush for CLI providers", async () => {
runEmbeddedPiAgentMock.mockReset();
runCliAgentMock.mockReset();
const tmp = await fs.mkdtemp(path.join(os.tmpdir(), "clawdbot-flush-"));
const storePath = path.join(tmp, "sessions.json");
const sessionKey = "main";
const sessionEntry = {
sessionId: "session",
updatedAt: Date.now(),
totalTokens: 80_000,
compactionCount: 1,
};
await seedSessionStore({ storePath, sessionKey, entry: sessionEntry });
const calls: Array<{ prompt?: string }> = [];
runEmbeddedPiAgentMock.mockImplementation(
async (params: EmbeddedRunParams) => {
calls.push({ prompt: params.prompt });
return {
payloads: [{ text: "ok" }],
meta: { agentMeta: { usage: { input: 1, output: 1 } } },
};
},
);
runCliAgentMock.mockResolvedValue({
payloads: [{ text: "ok" }],
meta: { agentMeta: { usage: { input: 1, output: 1 } } },
});
const { typing, sessionCtx, resolvedQueue, followupRun } = createBaseRun({
storePath,
sessionEntry,
runOverrides: { provider: "codex-cli" },
});
await runReplyAgent({
commandBody: "hello",
followupRun,
queueKey: "main",
resolvedQueue,
shouldSteer: false,
shouldFollowup: false,
isActive: false,
isStreaming: false,
typing,
sessionCtx,
sessionEntry,
sessionStore: { [sessionKey]: sessionEntry },
sessionKey,
storePath,
defaultModel: "anthropic/claude-opus-4-5",
agentCfgContextTokens: 100_000,
resolvedVerboseLevel: "off",
isNewSession: false,
blockStreamingEnabled: false,
resolvedBlockStreamingBreak: "message_end",
shouldInjectGroupIntro: false,
typingMode: "instant",
});
expect(runCliAgentMock).toHaveBeenCalledTimes(1);
const call = runCliAgentMock.mock.calls[0]?.[0] as
| { prompt?: string }
| undefined;
expect(call?.prompt).toBe("hello");
expect(runEmbeddedPiAgentMock).not.toHaveBeenCalled();
});
it("uses configured prompts for memory flush runs", async () => {
runEmbeddedPiAgentMock.mockReset();
const tmp = await fs.mkdtemp(path.join(os.tmpdir(), "clawdbot-flush-"));
const storePath = path.join(tmp, "sessions.json");
const sessionKey = "main";
const sessionEntry = {
sessionId: "session",
updatedAt: Date.now(),
totalTokens: 80_000,
compactionCount: 1,
};
await seedSessionStore({ storePath, sessionKey, entry: sessionEntry });
const calls: Array<EmbeddedRunParams> = [];
runEmbeddedPiAgentMock.mockImplementation(
async (params: EmbeddedRunParams) => {
calls.push(params);
if (params.prompt === DEFAULT_MEMORY_FLUSH_PROMPT) {
return { payloads: [], meta: {} };
}
return {
payloads: [{ text: "ok" }],
meta: { agentMeta: { usage: { input: 1, output: 1 } } },
};
},
);
const { typing, sessionCtx, resolvedQueue, followupRun } = createBaseRun({
storePath,
sessionEntry,
config: {
agents: {
defaults: {
compaction: {
memoryFlush: {
prompt: "Write notes.",
systemPrompt: "Flush memory now.",
},
},
},
},
},
runOverrides: { extraSystemPrompt: "extra system" },
});
await runReplyAgent({
commandBody: "hello",
followupRun,
queueKey: "main",
resolvedQueue,
shouldSteer: false,
shouldFollowup: false,
isActive: false,
isStreaming: false,
typing,
sessionCtx,
sessionEntry,
sessionStore: { [sessionKey]: sessionEntry },
sessionKey,
storePath,
defaultModel: "anthropic/claude-opus-4-5",
agentCfgContextTokens: 100_000,
resolvedVerboseLevel: "off",
isNewSession: false,
blockStreamingEnabled: false,
resolvedBlockStreamingBreak: "message_end",
shouldInjectGroupIntro: false,
typingMode: "instant",
});
const flushCall = calls[0];
expect(flushCall?.prompt).toContain("Write notes.");
expect(flushCall?.prompt).toContain("NO_REPLY");
expect(flushCall?.extraSystemPrompt).toContain("extra system");
expect(flushCall?.extraSystemPrompt).toContain("Flush memory now.");
expect(flushCall?.extraSystemPrompt).toContain("NO_REPLY");
expect(calls[1]?.prompt).toBe("hello");
});
it("skips memory flush after a prior flush in the same compaction cycle", async () => {
runEmbeddedPiAgentMock.mockReset();
const tmp = await fs.mkdtemp(path.join(os.tmpdir(), "clawdbot-flush-"));
const storePath = path.join(tmp, "sessions.json");
const sessionKey = "main";
const sessionEntry = {
sessionId: "session",
updatedAt: Date.now(),
totalTokens: 80_000,
compactionCount: 2,
memoryFlushCompactionCount: 2,
};
await seedSessionStore({ storePath, sessionKey, entry: sessionEntry });
const calls: Array<{ prompt?: string }> = [];
runEmbeddedPiAgentMock.mockImplementation(
async (params: EmbeddedRunParams) => {
calls.push({ prompt: params.prompt });
return {
payloads: [{ text: "ok" }],
meta: { agentMeta: { usage: { input: 1, output: 1 } } },
};
},
);
const { typing, sessionCtx, resolvedQueue, followupRun } = createBaseRun({
storePath,
sessionEntry,
});
await runReplyAgent({
commandBody: "hello",
followupRun,
queueKey: "main",
resolvedQueue,
shouldSteer: false,
shouldFollowup: false,
isActive: false,
isStreaming: false,
typing,
sessionCtx,
sessionEntry,
sessionStore: { [sessionKey]: sessionEntry },
sessionKey,
storePath,
defaultModel: "anthropic/claude-opus-4-5",
agentCfgContextTokens: 100_000,
resolvedVerboseLevel: "off",
isNewSession: false,
blockStreamingEnabled: false,
resolvedBlockStreamingBreak: "message_end",
shouldInjectGroupIntro: false,
typingMode: "instant",
});
expect(calls.map((call) => call.prompt)).toEqual(["hello"]);
});
it("skips memory flush when the sandbox workspace is read-only", async () => {
runEmbeddedPiAgentMock.mockReset();
const tmp = await fs.mkdtemp(path.join(os.tmpdir(), "clawdbot-flush-"));
const storePath = path.join(tmp, "sessions.json");
const sessionKey = "main";
const sessionEntry = {
sessionId: "session",
updatedAt: Date.now(),
totalTokens: 80_000,
compactionCount: 1,
};
await seedSessionStore({ storePath, sessionKey, entry: sessionEntry });
const calls: Array<{ prompt?: string }> = [];
runEmbeddedPiAgentMock.mockImplementation(
async (params: EmbeddedRunParams) => {
calls.push({ prompt: params.prompt });
return {
payloads: [{ text: "ok" }],
meta: { agentMeta: { usage: { input: 1, output: 1 } } },
};
},
);
const { typing, sessionCtx, resolvedQueue, followupRun } = createBaseRun({
storePath,
sessionEntry,
config: {
agents: {
defaults: {
sandbox: { mode: "all", workspaceAccess: "ro" },
},
},
},
});
await runReplyAgent({
commandBody: "hello",
followupRun,
queueKey: "main",
resolvedQueue,
shouldSteer: false,
shouldFollowup: false,
isActive: false,
isStreaming: false,
typing,
sessionCtx,
sessionEntry,
sessionStore: { [sessionKey]: sessionEntry },
sessionKey,
storePath,
defaultModel: "anthropic/claude-opus-4-5",
agentCfgContextTokens: 100_000,
resolvedVerboseLevel: "off",
isNewSession: false,
blockStreamingEnabled: false,
resolvedBlockStreamingBreak: "message_end",
shouldInjectGroupIntro: false,
typingMode: "instant",
});
expect(calls.map((call) => call.prompt)).toEqual(["hello"]);
const stored = JSON.parse(await fs.readFile(storePath, "utf-8"));
expect(stored[sessionKey].memoryFlushAt).toBeUndefined();
});
it("skips memory flush when the sandbox workspace is none", async () => {
runEmbeddedPiAgentMock.mockReset();
const tmp = await fs.mkdtemp(path.join(os.tmpdir(), "clawdbot-flush-"));
const storePath = path.join(tmp, "sessions.json");
const sessionKey = "main";
const sessionEntry = {
sessionId: "session",
updatedAt: Date.now(),
totalTokens: 80_000,
compactionCount: 1,
};
await seedSessionStore({ storePath, sessionKey, entry: sessionEntry });
const calls: Array<{ prompt?: string }> = [];
runEmbeddedPiAgentMock.mockImplementation(
async (params: EmbeddedRunParams) => {
calls.push({ prompt: params.prompt });
return {
payloads: [{ text: "ok" }],
meta: { agentMeta: { usage: { input: 1, output: 1 } } },
};
},
);
const { typing, sessionCtx, resolvedQueue, followupRun } = createBaseRun({
storePath,
sessionEntry,
config: {
agents: {
defaults: {
sandbox: { mode: "all", workspaceAccess: "none" },
},
},
},
});
await runReplyAgent({
commandBody: "hello",
followupRun,
queueKey: "main",
resolvedQueue,
shouldSteer: false,
shouldFollowup: false,
isActive: false,
isStreaming: false,
typing,
sessionCtx,
sessionEntry,
sessionStore: { [sessionKey]: sessionEntry },
sessionKey,
storePath,
defaultModel: "anthropic/claude-opus-4-5",
agentCfgContextTokens: 100_000,
resolvedVerboseLevel: "off",
isNewSession: false,
blockStreamingEnabled: false,
resolvedBlockStreamingBreak: "message_end",
shouldInjectGroupIntro: false,
typingMode: "instant",
});
expect(calls.map((call) => call.prompt)).toEqual(["hello"]);
});
it("increments compaction count when flush compaction completes", async () => {
runEmbeddedPiAgentMock.mockReset();
const tmp = await fs.mkdtemp(path.join(os.tmpdir(), "clawdbot-flush-"));
const storePath = path.join(tmp, "sessions.json");
const sessionKey = "main";
const sessionEntry = {
sessionId: "session",
updatedAt: Date.now(),
totalTokens: 80_000,
compactionCount: 1,
};
await seedSessionStore({ storePath, sessionKey, entry: sessionEntry });
runEmbeddedPiAgentMock.mockImplementation(
async (params: EmbeddedRunParams) => {
if (params.prompt === DEFAULT_MEMORY_FLUSH_PROMPT) {
params.onAgentEvent?.({
stream: "compaction",
data: { phase: "end", willRetry: false },
});
return { payloads: [], meta: {} };
}
return {
payloads: [{ text: "ok" }],
meta: { agentMeta: { usage: { input: 1, output: 1 } } },
};
},
);
const { typing, sessionCtx, resolvedQueue, followupRun } = createBaseRun({
storePath,
sessionEntry,
});
await runReplyAgent({
commandBody: "hello",
followupRun,
queueKey: "main",
resolvedQueue,
shouldSteer: false,
shouldFollowup: false,
isActive: false,
isStreaming: false,
typing,
sessionCtx,
sessionEntry,
sessionStore: { [sessionKey]: sessionEntry },
sessionKey,
storePath,
defaultModel: "anthropic/claude-opus-4-5",
agentCfgContextTokens: 100_000,
resolvedVerboseLevel: "off",
isNewSession: false,
blockStreamingEnabled: false,
resolvedBlockStreamingBreak: "message_end",
shouldInjectGroupIntro: false,
typingMode: "instant",
});
const stored = JSON.parse(await fs.readFile(storePath, "utf-8"));
expect(stored[sessionKey].compactionCount).toBe(2);
expect(stored[sessionKey].memoryFlushCompactionCount).toBe(2);
});
});

View File

@@ -1,633 +1,11 @@
import type { SkillSnapshot } from "../../agents/skills.js";
import { parseDurationMs } from "../../cli/parse-duration.js";
import type { ClawdbotConfig } from "../../config/config.js";
import type { SessionEntry } from "../../config/sessions.js";
import { defaultRuntime } from "../../runtime.js";
import type { OriginatingChannelType } from "../templating.js";
import type {
ElevatedLevel,
ReasoningLevel,
ThinkLevel,
VerboseLevel,
} from "./directives.js";
import { isRoutableChannel } from "./route-reply.js";
export type QueueMode =
| "steer"
| "followup"
| "collect"
| "steer-backlog"
| "interrupt"
| "queue";
export type QueueDropPolicy = "old" | "new" | "summarize";
export type QueueSettings = {
mode: QueueMode;
debounceMs?: number;
cap?: number;
dropPolicy?: QueueDropPolicy;
};
export type QueueDedupeMode = "message-id" | "prompt" | "none";
export type FollowupRun = {
prompt: string;
/** Provider message ID, when available (for deduplication). */
messageId?: string;
summaryLine?: string;
enqueuedAt: number;
/**
* Originating channel for reply routing.
* When set, replies should be routed back to this provider
* instead of using the session's lastChannel.
*/
originatingChannel?: OriginatingChannelType;
/**
* Originating destination for reply routing.
* The chat/channel/user ID where the reply should be sent.
*/
originatingTo?: string;
/** Provider account id (multi-account). */
originatingAccountId?: string;
/** Telegram forum topic thread id. */
originatingThreadId?: number;
run: {
agentId: string;
agentDir: string;
sessionId: string;
sessionKey?: string;
messageProvider?: string;
agentAccountId?: string;
sessionFile: string;
workspaceDir: string;
config: ClawdbotConfig;
skillsSnapshot?: SkillSnapshot;
provider: string;
model: string;
authProfileId?: string;
thinkLevel?: ThinkLevel;
verboseLevel?: VerboseLevel;
reasoningLevel?: ReasoningLevel;
elevatedLevel?: ElevatedLevel;
bashElevated?: {
enabled: boolean;
allowed: boolean;
defaultLevel: ElevatedLevel;
};
timeoutMs: number;
blockReplyBreak: "text_end" | "message_end";
ownerNumbers?: string[];
extraSystemPrompt?: string;
enforceFinalTag?: boolean;
};
};
type FollowupQueueState = {
items: FollowupRun[];
draining: boolean;
lastEnqueuedAt: number;
mode: QueueMode;
debounceMs: number;
cap: number;
dropPolicy: QueueDropPolicy;
droppedCount: number;
summaryLines: string[];
lastRun?: FollowupRun["run"];
};
const DEFAULT_QUEUE_DEBOUNCE_MS = 1000;
const DEFAULT_QUEUE_CAP = 20;
const DEFAULT_QUEUE_DROP: QueueDropPolicy = "summarize";
const FOLLOWUP_QUEUES = new Map<string, FollowupQueueState>();
function normalizeQueueMode(raw?: string): QueueMode | undefined {
if (!raw) return undefined;
const cleaned = raw.trim().toLowerCase();
if (cleaned === "queue" || cleaned === "queued") return "steer";
if (
cleaned === "interrupt" ||
cleaned === "interrupts" ||
cleaned === "abort"
)
return "interrupt";
if (cleaned === "steer" || cleaned === "steering") return "steer";
if (
cleaned === "followup" ||
cleaned === "follow-ups" ||
cleaned === "followups"
)
return "followup";
if (cleaned === "collect" || cleaned === "coalesce") return "collect";
if (
cleaned === "steer+backlog" ||
cleaned === "steer-backlog" ||
cleaned === "steer_backlog"
)
return "steer-backlog";
return undefined;
}
function normalizeQueueDropPolicy(raw?: string): QueueDropPolicy | undefined {
if (!raw) return undefined;
const cleaned = raw.trim().toLowerCase();
if (cleaned === "old" || cleaned === "oldest") return "old";
if (cleaned === "new" || cleaned === "newest") return "new";
if (cleaned === "summarize" || cleaned === "summary") return "summarize";
return undefined;
}
function parseQueueDebounce(raw?: string): number | undefined {
if (!raw) return undefined;
try {
const parsed = parseDurationMs(raw.trim(), { defaultUnit: "ms" });
if (!parsed || parsed < 0) return undefined;
return Math.round(parsed);
} catch {
return undefined;
}
}
function parseQueueCap(raw?: string): number | undefined {
if (!raw) return undefined;
const num = Number(raw);
if (!Number.isFinite(num)) return undefined;
const cap = Math.floor(num);
if (cap < 1) return undefined;
return cap;
}
function parseQueueDirectiveArgs(raw: string): {
consumed: number;
queueMode?: QueueMode;
queueReset: boolean;
rawMode?: string;
debounceMs?: number;
cap?: number;
dropPolicy?: QueueDropPolicy;
rawDebounce?: string;
rawCap?: string;
rawDrop?: string;
hasOptions: boolean;
} {
let i = 0;
const len = raw.length;
while (i < len && /\s/.test(raw[i])) i += 1;
if (raw[i] === ":") {
i += 1;
while (i < len && /\s/.test(raw[i])) i += 1;
}
let consumed = i;
let queueMode: QueueMode | undefined;
let queueReset = false;
let rawMode: string | undefined;
let debounceMs: number | undefined;
let cap: number | undefined;
let dropPolicy: QueueDropPolicy | undefined;
let rawDebounce: string | undefined;
let rawCap: string | undefined;
let rawDrop: string | undefined;
let hasOptions = false;
const takeToken = (): string | null => {
if (i >= len) return null;
const start = i;
while (i < len && !/\s/.test(raw[i])) i += 1;
if (start === i) return null;
const token = raw.slice(start, i);
while (i < len && /\s/.test(raw[i])) i += 1;
return token;
};
while (i < len) {
const token = takeToken();
if (!token) break;
const lowered = token.trim().toLowerCase();
if (lowered === "default" || lowered === "reset" || lowered === "clear") {
queueReset = true;
consumed = i;
break;
}
if (lowered.startsWith("debounce:") || lowered.startsWith("debounce=")) {
rawDebounce = token.split(/[:=]/)[1] ?? "";
debounceMs = parseQueueDebounce(rawDebounce);
hasOptions = true;
consumed = i;
continue;
}
if (lowered.startsWith("cap:") || lowered.startsWith("cap=")) {
rawCap = token.split(/[:=]/)[1] ?? "";
cap = parseQueueCap(rawCap);
hasOptions = true;
consumed = i;
continue;
}
if (lowered.startsWith("drop:") || lowered.startsWith("drop=")) {
rawDrop = token.split(/[:=]/)[1] ?? "";
dropPolicy = normalizeQueueDropPolicy(rawDrop);
hasOptions = true;
consumed = i;
continue;
}
const mode = normalizeQueueMode(token);
if (mode) {
queueMode = mode;
rawMode = token;
consumed = i;
continue;
}
// Stop at first unrecognized token.
break;
}
return {
consumed,
queueMode,
queueReset,
rawMode,
debounceMs,
cap,
dropPolicy,
rawDebounce,
rawCap,
rawDrop,
hasOptions,
};
}
export function extractQueueDirective(body?: string): {
cleaned: string;
queueMode?: QueueMode;
queueReset: boolean;
rawMode?: string;
hasDirective: boolean;
debounceMs?: number;
cap?: number;
dropPolicy?: QueueDropPolicy;
rawDebounce?: string;
rawCap?: string;
rawDrop?: string;
hasOptions: boolean;
} {
if (!body)
return {
cleaned: "",
hasDirective: false,
queueReset: false,
hasOptions: false,
};
const re = /(?:^|\s)\/queue(?=$|\s|:)/i;
const match = re.exec(body);
if (!match) {
return {
cleaned: body.trim(),
hasDirective: false,
queueReset: false,
hasOptions: false,
};
}
const start = match.index + match[0].indexOf("/queue");
const argsStart = start + "/queue".length;
const args = body.slice(argsStart);
const parsed = parseQueueDirectiveArgs(args);
const cleanedRaw = `${body.slice(0, start)} ${body.slice(
argsStart + parsed.consumed,
)}`;
const cleaned = cleanedRaw.replace(/\s+/g, " ").trim();
return {
cleaned,
queueMode: parsed.queueMode,
queueReset: parsed.queueReset,
rawMode: parsed.rawMode,
debounceMs: parsed.debounceMs,
cap: parsed.cap,
dropPolicy: parsed.dropPolicy,
rawDebounce: parsed.rawDebounce,
rawCap: parsed.rawCap,
rawDrop: parsed.rawDrop,
hasDirective: true,
hasOptions: parsed.hasOptions,
};
}
function elideText(text: string, limit = 140): string {
if (text.length <= limit) return text;
return `${text.slice(0, Math.max(0, limit - 1)).trimEnd()}`;
}
function buildQueueSummaryLine(run: FollowupRun): string {
const base = run.summaryLine?.trim() || run.prompt.trim();
const cleaned = base.replace(/\s+/g, " ").trim();
return elideText(cleaned, 160);
}
function getFollowupQueue(
key: string,
settings: QueueSettings,
): FollowupQueueState {
const existing = FOLLOWUP_QUEUES.get(key);
if (existing) {
existing.mode = settings.mode;
existing.debounceMs =
typeof settings.debounceMs === "number"
? Math.max(0, settings.debounceMs)
: existing.debounceMs;
existing.cap =
typeof settings.cap === "number" && settings.cap > 0
? Math.floor(settings.cap)
: existing.cap;
existing.dropPolicy = settings.dropPolicy ?? existing.dropPolicy;
return existing;
}
const created: FollowupQueueState = {
items: [],
draining: false,
lastEnqueuedAt: 0,
mode: settings.mode,
debounceMs:
typeof settings.debounceMs === "number"
? Math.max(0, settings.debounceMs)
: DEFAULT_QUEUE_DEBOUNCE_MS,
cap:
typeof settings.cap === "number" && settings.cap > 0
? Math.floor(settings.cap)
: DEFAULT_QUEUE_CAP,
dropPolicy: settings.dropPolicy ?? DEFAULT_QUEUE_DROP,
droppedCount: 0,
summaryLines: [],
};
FOLLOWUP_QUEUES.set(key, created);
return created;
}
/**
* Check if a run is already queued using a stable dedup key.
*/
function isRunAlreadyQueued(
run: FollowupRun,
queue: FollowupQueueState,
allowPromptFallback = false,
): boolean {
const hasSameRouting = (item: FollowupRun) =>
item.originatingChannel === run.originatingChannel &&
item.originatingTo === run.originatingTo &&
item.originatingAccountId === run.originatingAccountId &&
item.originatingThreadId === run.originatingThreadId;
const messageId = run.messageId?.trim();
if (messageId) {
return queue.items.some(
(item) => item.messageId?.trim() === messageId && hasSameRouting(item),
);
}
if (!allowPromptFallback) return false;
return queue.items.some(
(item) => item.prompt === run.prompt && hasSameRouting(item),
);
}
export function enqueueFollowupRun(
key: string,
run: FollowupRun,
settings: QueueSettings,
dedupeMode: QueueDedupeMode = "message-id",
): boolean {
const queue = getFollowupQueue(key, settings);
// Deduplicate: skip if the same message is already queued.
if (dedupeMode !== "none") {
if (dedupeMode === "message-id" && isRunAlreadyQueued(run, queue)) {
return false;
}
if (dedupeMode === "prompt" && isRunAlreadyQueued(run, queue, true)) {
return false;
}
}
queue.lastEnqueuedAt = Date.now();
queue.lastRun = run.run;
const cap = queue.cap;
if (cap > 0 && queue.items.length >= cap) {
if (queue.dropPolicy === "new") {
return false;
}
const dropCount = queue.items.length - cap + 1;
const dropped = queue.items.splice(0, dropCount);
if (queue.dropPolicy === "summarize") {
for (const item of dropped) {
queue.droppedCount += 1;
queue.summaryLines.push(buildQueueSummaryLine(item));
}
while (queue.summaryLines.length > cap) queue.summaryLines.shift();
}
}
queue.items.push(run);
return true;
}
async function waitForQueueDebounce(queue: FollowupQueueState): Promise<void> {
const debounceMs = Math.max(0, queue.debounceMs);
if (debounceMs <= 0) return;
while (true) {
const since = Date.now() - queue.lastEnqueuedAt;
if (since >= debounceMs) return;
await new Promise((resolve) => setTimeout(resolve, debounceMs - since));
}
}
function buildSummaryPrompt(queue: FollowupQueueState): string | undefined {
if (queue.dropPolicy !== "summarize" || queue.droppedCount <= 0) {
return undefined;
}
const lines = [
`[Queue overflow] Dropped ${queue.droppedCount} message${queue.droppedCount === 1 ? "" : "s"} due to cap.`,
];
if (queue.summaryLines.length > 0) {
lines.push("Summary:");
for (const line of queue.summaryLines) {
lines.push(`- ${line}`);
}
}
queue.droppedCount = 0;
queue.summaryLines = [];
return lines.join("\n");
}
function buildCollectPrompt(items: FollowupRun[], summary?: string): string {
const blocks: string[] = ["[Queued messages while agent was busy]"];
if (summary) {
blocks.push(summary);
}
items.forEach((item, idx) => {
blocks.push(`---\nQueued #${idx + 1}\n${item.prompt}`.trim());
});
return blocks.join("\n\n");
}
/**
* Checks if queued items have different routable originating channels.
*
* Returns true if messages come from different channels (e.g., Slack + Telegram),
* meaning they cannot be safely collected into one prompt without losing routing.
* Also returns true for a mix of routable and non-routable channels.
*/
function hasCrossChannelItems(items: FollowupRun[]): boolean {
const keys = new Set<string>();
let hasUnkeyed = false;
for (const item of items) {
const channel = item.originatingChannel;
const to = item.originatingTo;
const accountId = item.originatingAccountId;
const threadId = item.originatingThreadId;
if (!channel && !to && !accountId && typeof threadId !== "number") {
hasUnkeyed = true;
continue;
}
if (!isRoutableChannel(channel) || !to) {
return true;
}
keys.add(
[
channel,
to,
accountId || "",
typeof threadId === "number" ? String(threadId) : "",
].join("|"),
);
}
if (keys.size === 0) return false;
if (hasUnkeyed) return true;
return keys.size > 1;
}
export function scheduleFollowupDrain(
key: string,
runFollowup: (run: FollowupRun) => Promise<void>,
): void {
const queue = FOLLOWUP_QUEUES.get(key);
if (!queue || queue.draining) return;
queue.draining = true;
void (async () => {
try {
let forceIndividualCollect = false;
while (queue.items.length > 0 || queue.droppedCount > 0) {
await waitForQueueDebounce(queue);
if (queue.mode === "collect") {
// Once the batch is mixed, never collect again within this drain.
// Prevents “collect after shift” collapsing different targets.
//
// Debug: `pnpm test src/auto-reply/reply/queue.collect-routing.test.ts`
if (forceIndividualCollect) {
const next = queue.items.shift();
if (!next) break;
await runFollowup(next);
continue;
}
// Check if messages span multiple channels.
// If so, process individually to preserve per-message routing.
const isCrossChannel = hasCrossChannelItems(queue.items);
if (isCrossChannel) {
forceIndividualCollect = true;
// Process one at a time to preserve per-message routing info.
const next = queue.items.shift();
if (!next) break;
await runFollowup(next);
continue;
}
// Same-channel messages can be safely collected.
const items = queue.items.splice(0, queue.items.length);
const summary = buildSummaryPrompt(queue);
const run = items.at(-1)?.run ?? queue.lastRun;
if (!run) break;
// Preserve originating channel from items when collecting same-channel.
const originatingChannel = items.find(
(i) => i.originatingChannel,
)?.originatingChannel;
const originatingTo = items.find(
(i) => i.originatingTo,
)?.originatingTo;
const originatingAccountId = items.find(
(i) => i.originatingAccountId,
)?.originatingAccountId;
const originatingThreadId = items.find(
(i) => typeof i.originatingThreadId === "number",
)?.originatingThreadId;
const prompt = buildCollectPrompt(items, summary);
await runFollowup({
prompt,
run,
enqueuedAt: Date.now(),
originatingChannel,
originatingTo,
originatingAccountId,
originatingThreadId,
});
continue;
}
const summaryPrompt = buildSummaryPrompt(queue);
if (summaryPrompt) {
const run = queue.lastRun;
if (!run) break;
await runFollowup({
prompt: summaryPrompt,
run,
enqueuedAt: Date.now(),
});
continue;
}
const next = queue.items.shift();
if (!next) break;
await runFollowup(next);
}
} catch (err) {
defaultRuntime.error?.(
`followup queue drain failed for ${key}: ${String(err)}`,
);
} finally {
queue.draining = false;
if (queue.items.length === 0 && queue.droppedCount === 0) {
FOLLOWUP_QUEUES.delete(key);
} else {
scheduleFollowupDrain(key, runFollowup);
}
}
})();
}
function defaultQueueModeForChannel(_channel?: string): QueueMode {
return "collect";
}
export function resolveQueueSettings(params: {
cfg: ClawdbotConfig;
channel?: string;
sessionEntry?: SessionEntry;
inlineMode?: QueueMode;
inlineOptions?: Partial<QueueSettings>;
}): QueueSettings {
const channelKey = params.channel?.trim().toLowerCase();
const queueCfg = params.cfg.messages?.queue;
const providerModeRaw =
channelKey && queueCfg?.byChannel
? (queueCfg.byChannel as Record<string, string | undefined>)[channelKey]
: undefined;
const resolvedMode =
params.inlineMode ??
normalizeQueueMode(params.sessionEntry?.queueMode) ??
normalizeQueueMode(providerModeRaw) ??
normalizeQueueMode(queueCfg?.mode) ??
defaultQueueModeForChannel(channelKey);
const debounceRaw =
params.inlineOptions?.debounceMs ??
params.sessionEntry?.queueDebounceMs ??
queueCfg?.debounceMs ??
DEFAULT_QUEUE_DEBOUNCE_MS;
const capRaw =
params.inlineOptions?.cap ??
params.sessionEntry?.queueCap ??
queueCfg?.cap ??
DEFAULT_QUEUE_CAP;
const dropRaw =
params.inlineOptions?.dropPolicy ??
params.sessionEntry?.queueDrop ??
normalizeQueueDropPolicy(queueCfg?.drop) ??
DEFAULT_QUEUE_DROP;
return {
mode: resolvedMode,
debounceMs:
typeof debounceRaw === "number" ? Math.max(0, debounceRaw) : undefined,
cap:
typeof capRaw === "number" ? Math.max(1, Math.floor(capRaw)) : undefined,
dropPolicy: dropRaw,
};
}
export function getFollowupQueueDepth(key: string): number {
const cleaned = key.trim();
if (!cleaned) return 0;
const queue = FOLLOWUP_QUEUES.get(cleaned);
if (!queue) return 0;
return queue.items.length;
}
export { extractQueueDirective } from "./queue/directive.js";
export { scheduleFollowupDrain } from "./queue/drain.js";
export { enqueueFollowupRun, getFollowupQueueDepth } from "./queue/enqueue.js";
export { resolveQueueSettings } from "./queue/settings.js";
export type {
FollowupRun,
QueueDedupeMode,
QueueDropPolicy,
QueueMode,
QueueSettings,
} from "./queue/types.js";

View File

@@ -0,0 +1,172 @@
import { parseDurationMs } from "../../../cli/parse-duration.js";
import { normalizeQueueDropPolicy, normalizeQueueMode } from "./normalize.js";
import type { QueueDropPolicy, QueueMode } from "./types.js";
function parseQueueDebounce(raw?: string): number | undefined {
if (!raw) return undefined;
try {
const parsed = parseDurationMs(raw.trim(), { defaultUnit: "ms" });
if (!parsed || parsed < 0) return undefined;
return Math.round(parsed);
} catch {
return undefined;
}
}
function parseQueueCap(raw?: string): number | undefined {
if (!raw) return undefined;
const num = Number(raw);
if (!Number.isFinite(num)) return undefined;
const cap = Math.floor(num);
if (cap < 1) return undefined;
return cap;
}
function parseQueueDirectiveArgs(raw: string): {
consumed: number;
queueMode?: QueueMode;
queueReset: boolean;
rawMode?: string;
debounceMs?: number;
cap?: number;
dropPolicy?: QueueDropPolicy;
rawDebounce?: string;
rawCap?: string;
rawDrop?: string;
hasOptions: boolean;
} {
let i = 0;
const len = raw.length;
while (i < len && /\s/.test(raw[i])) i += 1;
if (raw[i] === ":") {
i += 1;
while (i < len && /\s/.test(raw[i])) i += 1;
}
let consumed = i;
let queueMode: QueueMode | undefined;
let queueReset = false;
let rawMode: string | undefined;
let debounceMs: number | undefined;
let cap: number | undefined;
let dropPolicy: QueueDropPolicy | undefined;
let rawDebounce: string | undefined;
let rawCap: string | undefined;
let rawDrop: string | undefined;
let hasOptions = false;
const takeToken = (): string | null => {
if (i >= len) return null;
const start = i;
while (i < len && !/\s/.test(raw[i])) i += 1;
if (start === i) return null;
const token = raw.slice(start, i);
while (i < len && /\s/.test(raw[i])) i += 1;
return token;
};
while (i < len) {
const token = takeToken();
if (!token) break;
const lowered = token.trim().toLowerCase();
if (lowered === "default" || lowered === "reset" || lowered === "clear") {
queueReset = true;
consumed = i;
break;
}
if (lowered.startsWith("debounce:") || lowered.startsWith("debounce=")) {
rawDebounce = token.split(/[:=]/)[1] ?? "";
debounceMs = parseQueueDebounce(rawDebounce);
hasOptions = true;
consumed = i;
continue;
}
if (lowered.startsWith("cap:") || lowered.startsWith("cap=")) {
rawCap = token.split(/[:=]/)[1] ?? "";
cap = parseQueueCap(rawCap);
hasOptions = true;
consumed = i;
continue;
}
if (lowered.startsWith("drop:") || lowered.startsWith("drop=")) {
rawDrop = token.split(/[:=]/)[1] ?? "";
dropPolicy = normalizeQueueDropPolicy(rawDrop);
hasOptions = true;
consumed = i;
continue;
}
const mode = normalizeQueueMode(token);
if (mode) {
queueMode = mode;
rawMode = token;
consumed = i;
continue;
}
// Stop at first unrecognized token.
break;
}
return {
consumed,
queueMode,
queueReset,
rawMode,
debounceMs,
cap,
dropPolicy,
rawDebounce,
rawCap,
rawDrop,
hasOptions,
};
}
export function extractQueueDirective(body?: string): {
cleaned: string;
queueMode?: QueueMode;
queueReset: boolean;
rawMode?: string;
hasDirective: boolean;
debounceMs?: number;
cap?: number;
dropPolicy?: QueueDropPolicy;
rawDebounce?: string;
rawCap?: string;
rawDrop?: string;
hasOptions: boolean;
} {
if (!body) {
return {
cleaned: "",
hasDirective: false,
queueReset: false,
hasOptions: false,
};
}
const re = /(?:^|\s)\/queue(?=$|\s|:)/i;
const match = re.exec(body);
if (!match) {
return {
cleaned: body.trim(),
hasDirective: false,
queueReset: false,
hasOptions: false,
};
}
const start = match.index + match[0].indexOf("/queue");
const argsStart = start + "/queue".length;
const args = body.slice(argsStart);
const parsed = parseQueueDirectiveArgs(args);
const cleanedRaw = `${body.slice(0, start)} ${body.slice(argsStart + parsed.consumed)}`;
const cleaned = cleanedRaw.replace(/\s+/g, " ").trim();
return {
cleaned,
queueMode: parsed.queueMode,
queueReset: parsed.queueReset,
rawMode: parsed.rawMode,
debounceMs: parsed.debounceMs,
cap: parsed.cap,
dropPolicy: parsed.dropPolicy,
rawDebounce: parsed.rawDebounce,
rawCap: parsed.rawCap,
rawDrop: parsed.rawDrop,
hasDirective: true,
hasOptions: parsed.hasOptions,
};
}

View File

@@ -0,0 +1,185 @@
import { defaultRuntime } from "../../../runtime.js";
import { isRoutableChannel } from "../route-reply.js";
import { FOLLOWUP_QUEUES } from "./state.js";
import type { FollowupRun } from "./types.js";
async function waitForQueueDebounce(queue: {
debounceMs: number;
lastEnqueuedAt: number;
}) {
const debounceMs = Math.max(0, queue.debounceMs);
if (debounceMs <= 0) return;
while (true) {
const since = Date.now() - queue.lastEnqueuedAt;
if (since >= debounceMs) return;
await new Promise((resolve) => setTimeout(resolve, debounceMs - since));
}
}
function buildSummaryPrompt(queue: {
dropPolicy: "summarize" | "old" | "new";
droppedCount: number;
summaryLines: string[];
}): string | undefined {
if (queue.dropPolicy !== "summarize" || queue.droppedCount <= 0) {
return undefined;
}
const lines = [
`[Queue overflow] Dropped ${queue.droppedCount} message${queue.droppedCount === 1 ? "" : "s"} due to cap.`,
];
if (queue.summaryLines.length > 0) {
lines.push("Summary:");
for (const line of queue.summaryLines) {
lines.push(`- ${line}`);
}
}
queue.droppedCount = 0;
queue.summaryLines = [];
return lines.join("\\n");
}
function buildCollectPrompt(items: FollowupRun[], summary?: string): string {
const blocks: string[] = ["[Queued messages while agent was busy]"];
if (summary) blocks.push(summary);
items.forEach((item, idx) => {
blocks.push(`---\\nQueued #${idx + 1}\\n${item.prompt}`.trim());
});
return blocks.join("\\n\\n");
}
/**
* Checks if queued items have different routable originating channels.
*
* Returns true if messages come from different channels (e.g., Slack + Telegram),
* meaning they cannot be safely collected into one prompt without losing routing.
* Also returns true for a mix of routable and non-routable channels.
*/
function hasCrossChannelItems(items: FollowupRun[]): boolean {
const keys = new Set<string>();
let hasUnkeyed = false;
for (const item of items) {
const channel = item.originatingChannel;
const to = item.originatingTo;
const accountId = item.originatingAccountId;
const threadId = item.originatingThreadId;
if (!channel && !to && !accountId && typeof threadId !== "number") {
hasUnkeyed = true;
continue;
}
if (!isRoutableChannel(channel) || !to) {
return true;
}
keys.add(
[
channel,
to,
accountId || "",
typeof threadId === "number" ? String(threadId) : "",
].join("|"),
);
}
if (keys.size === 0) return false;
if (hasUnkeyed) return true;
return keys.size > 1;
}
export function scheduleFollowupDrain(
key: string,
runFollowup: (run: FollowupRun) => Promise<void>,
): void {
const queue = FOLLOWUP_QUEUES.get(key);
if (!queue || queue.draining) return;
queue.draining = true;
void (async () => {
try {
let forceIndividualCollect = false;
while (queue.items.length > 0 || queue.droppedCount > 0) {
await waitForQueueDebounce(queue);
if (queue.mode === "collect") {
// Once the batch is mixed, never collect again within this drain.
// Prevents “collect after shift” collapsing different targets.
//
// Debug: `pnpm test src/auto-reply/reply/queue.collect-routing.test.ts`
if (forceIndividualCollect) {
const next = queue.items.shift();
if (!next) break;
await runFollowup(next);
continue;
}
// Check if messages span multiple channels.
// If so, process individually to preserve per-message routing.
const isCrossChannel = hasCrossChannelItems(queue.items);
if (isCrossChannel) {
forceIndividualCollect = true;
const next = queue.items.shift();
if (!next) break;
await runFollowup(next);
continue;
}
const items = queue.items.splice(0, queue.items.length);
const summary = buildSummaryPrompt(queue);
const run = items.at(-1)?.run ?? queue.lastRun;
if (!run) break;
// Preserve originating channel from items when collecting same-channel.
const originatingChannel = items.find(
(i) => i.originatingChannel,
)?.originatingChannel;
const originatingTo = items.find(
(i) => i.originatingTo,
)?.originatingTo;
const originatingAccountId = items.find(
(i) => i.originatingAccountId,
)?.originatingAccountId;
const originatingThreadId = items.find(
(i) => typeof i.originatingThreadId === "number",
)?.originatingThreadId;
const prompt = buildCollectPrompt(items, summary);
await runFollowup({
prompt,
run,
enqueuedAt: Date.now(),
originatingChannel,
originatingTo,
originatingAccountId,
originatingThreadId,
});
continue;
}
const summaryPrompt = buildSummaryPrompt(queue);
if (summaryPrompt) {
const run = queue.lastRun;
if (!run) break;
await runFollowup({
prompt: summaryPrompt,
run,
enqueuedAt: Date.now(),
});
continue;
}
const next = queue.items.shift();
if (!next) break;
await runFollowup(next);
}
} catch (err) {
defaultRuntime.error?.(
`followup queue drain failed for ${key}: ${String(err)}`,
);
} finally {
queue.draining = false;
if (queue.items.length === 0 && queue.droppedCount === 0) {
FOLLOWUP_QUEUES.delete(key);
} else {
scheduleFollowupDrain(key, runFollowup);
}
}
})();
}

View File

@@ -0,0 +1,85 @@
import { FOLLOWUP_QUEUES, getFollowupQueue } from "./state.js";
import type { FollowupRun, QueueDedupeMode, QueueSettings } from "./types.js";
function elideText(text: string, limit = 140): string {
if (text.length <= limit) return text;
return `${text.slice(0, Math.max(0, limit - 1)).trimEnd()}`;
}
function buildQueueSummaryLine(run: FollowupRun): string {
const base = run.summaryLine?.trim() || run.prompt.trim();
const cleaned = base.replace(/\\s+/g, " ").trim();
return elideText(cleaned, 160);
}
function isRunAlreadyQueued(
run: FollowupRun,
items: FollowupRun[],
allowPromptFallback = false,
): boolean {
const hasSameRouting = (item: FollowupRun) =>
item.originatingChannel === run.originatingChannel &&
item.originatingTo === run.originatingTo &&
item.originatingAccountId === run.originatingAccountId &&
item.originatingThreadId === run.originatingThreadId;
const messageId = run.messageId?.trim();
if (messageId) {
return items.some(
(item) => item.messageId?.trim() === messageId && hasSameRouting(item),
);
}
if (!allowPromptFallback) return false;
return items.some(
(item) => item.prompt === run.prompt && hasSameRouting(item),
);
}
export function enqueueFollowupRun(
key: string,
run: FollowupRun,
settings: QueueSettings,
dedupeMode: QueueDedupeMode = "message-id",
): boolean {
const queue = getFollowupQueue(key, settings);
// Deduplicate: skip if the same message is already queued.
if (dedupeMode !== "none") {
if (dedupeMode === "message-id" && isRunAlreadyQueued(run, queue.items)) {
return false;
}
if (dedupeMode === "prompt" && isRunAlreadyQueued(run, queue.items, true)) {
return false;
}
}
queue.lastEnqueuedAt = Date.now();
queue.lastRun = run.run;
const cap = queue.cap;
if (cap > 0 && queue.items.length >= cap) {
if (queue.dropPolicy === "new") {
return false;
}
const dropCount = queue.items.length - cap + 1;
const dropped = queue.items.splice(0, dropCount);
if (queue.dropPolicy === "summarize") {
for (const item of dropped) {
queue.droppedCount += 1;
queue.summaryLines.push(buildQueueSummaryLine(item));
}
while (queue.summaryLines.length > cap) queue.summaryLines.shift();
}
}
queue.items.push(run);
return true;
}
export function getFollowupQueueDepth(key: string): number {
const cleaned = key.trim();
if (!cleaned) return 0;
const queue = FOLLOWUP_QUEUES.get(cleaned);
if (!queue) return 0;
return queue.items.length;
}

View File

@@ -0,0 +1,39 @@
import type { QueueDropPolicy, QueueMode } from "./types.js";
export function normalizeQueueMode(raw?: string): QueueMode | undefined {
if (!raw) return undefined;
const cleaned = raw.trim().toLowerCase();
if (cleaned === "queue" || cleaned === "queued") return "steer";
if (
cleaned === "interrupt" ||
cleaned === "interrupts" ||
cleaned === "abort"
)
return "interrupt";
if (cleaned === "steer" || cleaned === "steering") return "steer";
if (
cleaned === "followup" ||
cleaned === "follow-ups" ||
cleaned === "followups"
)
return "followup";
if (cleaned === "collect" || cleaned === "coalesce") return "collect";
if (
cleaned === "steer+backlog" ||
cleaned === "steer-backlog" ||
cleaned === "steer_backlog"
)
return "steer-backlog";
return undefined;
}
export function normalizeQueueDropPolicy(
raw?: string,
): QueueDropPolicy | undefined {
if (!raw) return undefined;
const cleaned = raw.trim().toLowerCase();
if (cleaned === "old" || cleaned === "oldest") return "old";
if (cleaned === "new" || cleaned === "newest") return "new";
if (cleaned === "summarize" || cleaned === "summary") return "summarize";
return undefined;
}

View File

@@ -0,0 +1,55 @@
import { normalizeQueueDropPolicy, normalizeQueueMode } from "./normalize.js";
import {
DEFAULT_QUEUE_CAP,
DEFAULT_QUEUE_DEBOUNCE_MS,
DEFAULT_QUEUE_DROP,
} from "./state.js";
import type {
QueueMode,
QueueSettings,
ResolveQueueSettingsParams,
} from "./types.js";
function defaultQueueModeForChannel(_channel?: string): QueueMode {
return "collect";
}
export function resolveQueueSettings(
params: ResolveQueueSettingsParams,
): QueueSettings {
const channelKey = params.channel?.trim().toLowerCase();
const queueCfg = params.cfg.messages?.queue;
const providerModeRaw =
channelKey && queueCfg?.byChannel
? (queueCfg.byChannel as Record<string, string | undefined>)[channelKey]
: undefined;
const resolvedMode =
params.inlineMode ??
normalizeQueueMode(params.sessionEntry?.queueMode) ??
normalizeQueueMode(providerModeRaw) ??
normalizeQueueMode(queueCfg?.mode) ??
defaultQueueModeForChannel(channelKey);
const debounceRaw =
params.inlineOptions?.debounceMs ??
params.sessionEntry?.queueDebounceMs ??
queueCfg?.debounceMs ??
DEFAULT_QUEUE_DEBOUNCE_MS;
const capRaw =
params.inlineOptions?.cap ??
params.sessionEntry?.queueCap ??
queueCfg?.cap ??
DEFAULT_QUEUE_CAP;
const dropRaw =
params.inlineOptions?.dropPolicy ??
params.sessionEntry?.queueDrop ??
normalizeQueueDropPolicy(queueCfg?.drop) ??
DEFAULT_QUEUE_DROP;
return {
mode: resolvedMode,
debounceMs:
typeof debounceRaw === "number" ? Math.max(0, debounceRaw) : undefined,
cap:
typeof capRaw === "number" ? Math.max(1, Math.floor(capRaw)) : undefined,
dropPolicy: dropRaw,
};
}

View File

@@ -0,0 +1,65 @@
import type {
FollowupRun,
QueueDropPolicy,
QueueMode,
QueueSettings,
} from "./types.js";
export type FollowupQueueState = {
items: FollowupRun[];
draining: boolean;
lastEnqueuedAt: number;
mode: QueueMode;
debounceMs: number;
cap: number;
dropPolicy: QueueDropPolicy;
droppedCount: number;
summaryLines: string[];
lastRun?: FollowupRun["run"];
};
export const DEFAULT_QUEUE_DEBOUNCE_MS = 1000;
export const DEFAULT_QUEUE_CAP = 20;
export const DEFAULT_QUEUE_DROP: QueueDropPolicy = "summarize";
export const FOLLOWUP_QUEUES = new Map<string, FollowupQueueState>();
export function getFollowupQueue(
key: string,
settings: QueueSettings,
): FollowupQueueState {
const existing = FOLLOWUP_QUEUES.get(key);
if (existing) {
existing.mode = settings.mode;
existing.debounceMs =
typeof settings.debounceMs === "number"
? Math.max(0, settings.debounceMs)
: existing.debounceMs;
existing.cap =
typeof settings.cap === "number" && settings.cap > 0
? Math.floor(settings.cap)
: existing.cap;
existing.dropPolicy = settings.dropPolicy ?? existing.dropPolicy;
return existing;
}
const created: FollowupQueueState = {
items: [],
draining: false,
lastEnqueuedAt: 0,
mode: settings.mode,
debounceMs:
typeof settings.debounceMs === "number"
? Math.max(0, settings.debounceMs)
: DEFAULT_QUEUE_DEBOUNCE_MS,
cap:
typeof settings.cap === "number" && settings.cap > 0
? Math.floor(settings.cap)
: DEFAULT_QUEUE_CAP,
dropPolicy: settings.dropPolicy ?? DEFAULT_QUEUE_DROP,
droppedCount: 0,
summaryLines: [],
};
FOLLOWUP_QUEUES.set(key, created);
return created;
}

View File

@@ -0,0 +1,89 @@
import type { SkillSnapshot } from "../../../agents/skills.js";
import type { ClawdbotConfig } from "../../../config/config.js";
import type { SessionEntry } from "../../../config/sessions.js";
import type { OriginatingChannelType } from "../../templating.js";
import type {
ElevatedLevel,
ReasoningLevel,
ThinkLevel,
VerboseLevel,
} from "../directives.js";
export type QueueMode =
| "steer"
| "followup"
| "collect"
| "steer-backlog"
| "interrupt"
| "queue";
export type QueueDropPolicy = "old" | "new" | "summarize";
export type QueueSettings = {
mode: QueueMode;
debounceMs?: number;
cap?: number;
dropPolicy?: QueueDropPolicy;
};
export type QueueDedupeMode = "message-id" | "prompt" | "none";
export type FollowupRun = {
prompt: string;
/** Provider message ID, when available (for deduplication). */
messageId?: string;
summaryLine?: string;
enqueuedAt: number;
/**
* Originating channel for reply routing.
* When set, replies should be routed back to this provider
* instead of using the session's lastChannel.
*/
originatingChannel?: OriginatingChannelType;
/**
* Originating destination for reply routing.
* The chat/channel/user ID where the reply should be sent.
*/
originatingTo?: string;
/** Provider account id (multi-account). */
originatingAccountId?: string;
/** Telegram forum topic thread id. */
originatingThreadId?: number;
run: {
agentId: string;
agentDir: string;
sessionId: string;
sessionKey?: string;
messageProvider?: string;
agentAccountId?: string;
sessionFile: string;
workspaceDir: string;
config: ClawdbotConfig;
skillsSnapshot?: SkillSnapshot;
provider: string;
model: string;
authProfileId?: string;
thinkLevel?: ThinkLevel;
verboseLevel?: VerboseLevel;
reasoningLevel?: ReasoningLevel;
elevatedLevel?: ElevatedLevel;
bashElevated?: {
enabled: boolean;
allowed: boolean;
defaultLevel: ElevatedLevel;
};
timeoutMs: number;
blockReplyBreak: "text_end" | "message_end";
ownerNumbers?: string[];
extraSystemPrompt?: string;
enforceFinalTag?: boolean;
};
};
export type ResolveQueueSettingsParams = {
cfg: ClawdbotConfig;
channel?: string;
sessionEntry?: SessionEntry;
inlineMode?: QueueMode;
inlineOptions?: Partial<QueueSettings>;
};