diff --git a/src/agents/model-fallback.test.ts b/src/agents/model-fallback.test.ts index 3b842765e23..c99d0a9bed9 100644 --- a/src/agents/model-fallback.test.ts +++ b/src/agents/model-fallback.test.ts @@ -518,6 +518,35 @@ describe("runWithModelFallback", () => { } }); + it("sanitizes model identifiers in model_not_found warnings", async () => { + setLoggerOverride({ level: "silent", consoleLevel: "warn" }); + const warnSpy = vi.spyOn(console, "warn").mockImplementation(() => {}); + try { + const cfg = makeCfg(); + const run = vi + .fn() + .mockRejectedValueOnce(new Error("Model not found: openai/gpt-6")) + .mockResolvedValueOnce("ok"); + + const result = await runWithModelFallback({ + cfg, + provider: "openai", + model: "gpt-6\u001B[31m\nspoof", + run, + }); + + expect(result.result).toBe("ok"); + const warning = warnSpy.mock.calls[0]?.[0] as string; + expect(warning).toContain('Model "openai/gpt-6spoof" not found'); + expect(warning).not.toContain("\u001B"); + expect(warning).not.toContain("\n"); + } finally { + warnSpy.mockRestore(); + setLoggerOverride(null); + resetLogger(); + } + }); + it("skips providers when all profiles are in cooldown", async () => { await expectSkippedUnavailableProvider({ providerPrefix: "cooldown-test", diff --git a/src/agents/model-selection.test.ts b/src/agents/model-selection.test.ts index b0131b13017..ab3a421ca1b 100644 --- a/src/agents/model-selection.test.ts +++ b/src/agents/model-selection.test.ts @@ -472,6 +472,39 @@ describe("model-selection", () => { } }); + it("sanitizes control characters in providerless-model warnings", () => { + setLoggerOverride({ level: "silent", consoleLevel: "warn" }); + const warnSpy = vi.spyOn(console, "warn").mockImplementation(() => {}); + try { + const cfg: Partial = { + agents: { + defaults: { + model: { primary: "\u001B[31mclaude-3-5-sonnet\nspoof" }, + }, + }, + }; + + const result = resolveConfiguredModelRef({ + cfg: cfg as OpenClawConfig, + defaultProvider: "google", + defaultModel: "gemini-pro", + }); + + expect(result).toEqual({ + provider: "anthropic", + model: "\u001B[31mclaude-3-5-sonnet\nspoof", + }); + const warning = warnSpy.mock.calls[0]?.[0] as string; + expect(warning).toContain('Falling back to "anthropic/claude-3-5-sonnet"'); + expect(warning).not.toContain("\u001B"); + expect(warning).not.toContain("\n"); + } finally { + warnSpy.mockRestore(); + setLoggerOverride(null); + resetLogger(); + } + }); + it("should use default provider/model if config is empty", () => { const cfg: Partial = {}; const result = resolveConfiguredModelRef({ diff --git a/src/agents/pi-embedded-runner-extraparams.test.ts b/src/agents/pi-embedded-runner-extraparams.test.ts index a9280aff934..3b10c2d6dba 100644 --- a/src/agents/pi-embedded-runner-extraparams.test.ts +++ b/src/agents/pi-embedded-runner-extraparams.test.ts @@ -1429,6 +1429,32 @@ describe("applyExtraParamsToAgent", () => { expect(payload).not.toHaveProperty("store"); }); + it("keeps existing context_management when stripping store for supportsStore=false models", () => { + const payload = runResponsesPayloadMutationCase({ + applyProvider: "custom-openai-responses", + applyModelId: "gemini-2.5-pro", + model: { + api: "openai-responses", + provider: "custom-openai-responses", + id: "gemini-2.5-pro", + name: "gemini-2.5-pro", + baseUrl: "https://gateway.ai.cloudflare.com/v1/account/gateway/openai", + reasoning: false, + input: ["text"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 1_000_000, + maxTokens: 65_536, + compat: { supportsStore: false }, + } as unknown as Model<"openai-responses">, + payload: { + store: false, + context_management: [{ type: "compaction", compact_threshold: 12_345 }], + }, + }); + expect(payload).not.toHaveProperty("store"); + expect(payload.context_management).toEqual([{ type: "compaction", compact_threshold: 12_345 }]); + }); + it("auto-injects OpenAI Responses context_management compaction for direct OpenAI models", () => { const payload = runResponsesPayloadMutationCase({ applyProvider: "openai", diff --git a/src/gateway/auth.test.ts b/src/gateway/auth.test.ts index f4efebf0339..1488b438237 100644 --- a/src/gateway/auth.test.ts +++ b/src/gateway/auth.test.ts @@ -434,6 +434,27 @@ describe("gateway auth", () => { ).toThrow(/provider reference object/); }); + it("accepts password mode when env provides OPENCLAW_GATEWAY_PASSWORD", () => { + const rawPasswordRef = { source: "exec", provider: "op", id: "pw" } as never; + const auth = resolveGatewayAuth({ + authConfig: { + mode: "password", + password: rawPasswordRef, + }, + env: { + OPENCLAW_GATEWAY_PASSWORD: "env-password", + } as NodeJS.ProcessEnv, + }); + + expect(auth.password).toBe("env-password"); + expect(() => + assertGatewayAuthConfigured(auth, { + mode: "password", + password: rawPasswordRef, + }), + ).not.toThrow(); + }); + it("throws generic error when password mode has no password at all", () => { const auth = resolveGatewayAuth({ authConfig: { mode: "password" } }); expect(() => assertGatewayAuthConfigured(auth, { mode: "password" })).toThrow( diff --git a/src/infra/outbound/outbound.test.ts b/src/infra/outbound/outbound.test.ts index c6a32522a92..960c30625ac 100644 --- a/src/infra/outbound/outbound.test.ts +++ b/src/infra/outbound/outbound.test.ts @@ -114,6 +114,19 @@ describe("delivery-queue", () => { await expect(ackDelivery("nonexistent-id", tmpDir)).resolves.toBeUndefined(); }); + it("ack cleans up leftover .delivered marker when .json is already gone", async () => { + const id = await enqueueDelivery( + { channel: "whatsapp", to: "+1", payloads: [{ text: "stale-marker" }] }, + tmpDir, + ); + const queueDir = path.join(tmpDir, "delivery-queue"); + + fs.renameSync(path.join(queueDir, `${id}.json`), path.join(queueDir, `${id}.delivered`)); + await expect(ackDelivery(id, tmpDir)).resolves.toBeUndefined(); + + expect(fs.existsSync(path.join(queueDir, `${id}.delivered`))).toBe(false); + }); + it("ack removes .delivered marker so recovery does not replay", async () => { const id = await enqueueDelivery( { channel: "whatsapp", to: "+1", payloads: [{ text: "ack-test" }] }, diff --git a/src/telegram/bot-message.test.ts b/src/telegram/bot-message.test.ts index 1837e6861f1..4a745cbbe47 100644 --- a/src/telegram/bot-message.test.ts +++ b/src/telegram/bot-message.test.ts @@ -97,4 +97,28 @@ describe("telegram bot message processor", () => { ); expect(runtimeError).toHaveBeenCalledWith(expect.stringContaining("dispatch exploded")); }); + + it("swallows fallback delivery failures after dispatch throws", async () => { + const sendMessage = vi.fn().mockRejectedValue(new Error("blocked by user")); + const runtimeError = vi.fn(); + buildTelegramMessageContext.mockResolvedValue({ + chatId: 123, + route: { sessionKey: "agent:main:main" }, + }); + dispatchTelegramMessage.mockRejectedValue(new Error("dispatch exploded")); + + const processMessage = createTelegramMessageProcessor({ + ...baseDeps, + bot: { api: { sendMessage } }, + runtime: { error: runtimeError }, + } as unknown as Parameters[0]); + await expect(processSampleMessage(processMessage)).resolves.toBeUndefined(); + + expect(sendMessage).toHaveBeenCalledWith( + 123, + "Something went wrong while processing your request. Please try again.", + undefined, + ); + expect(runtimeError).toHaveBeenCalledWith(expect.stringContaining("dispatch exploded")); + }); });