fix(agents): skip empty embedded prompts

This commit is contained in:
Peter Steinberger
2026-04-25 01:39:21 +01:00
parent ae57a7998e
commit 8a0cb03300
4 changed files with 99 additions and 0 deletions

View File

@@ -26,6 +26,7 @@ Docs: https://docs.openclaw.ai
- Control UI/Codex harness: emit native Codex app-server assistant and lifecycle completion events so live webchat runs stop spinning without needing a transcript reload fallback. (#70815) Thanks @lesaai.
- Agents/sessions: persist the runtime-resolved context budget from embedded agent runs, so Codex GPT-5.5 sessions keep the catalog/runtime context cap instead of falling back to the generic 200k status value. Fixes #71294. Thanks @tud0r.
- Agents/tools: fail runs before model submission when explicit tool allowlists resolve to no callable tools, preventing text-only hallucinated tool results for missing tools such as plugin commands that were not registered. Fixes #71292.
- Agents/embedded: skip provider submission when an embedded run has no prompt, replay history, or prompt-local images, preventing empty OpenAI Responses requests from surfacing provider errors into user channels. Fixes #71130.
- Providers/MiniMax: keep M2.7 chat model metadata text-only so image tool requests route through `MiniMax-VL-01` instead of the Anthropic-compatible chat endpoint. Fixes #71296. Thanks @ilker-cevikkaya.
- Discord/replies: run `message_sending` plugin hooks for Discord reply delivery, including DM targets, so plugins can transform or cancel outbound Discord replies consistently with other channels. Fixes #59350. (#71094) Thanks @wei840222.
- Control UI/commands: carry provider-owned thinking option ids/labels in session rows and defaults so fresh sessions show and accept dynamic modes such as `adaptive`, `xhigh`, and `max`. Fixes #71269. Thanks @Young-Khalil.

View File

@@ -0,0 +1,64 @@
import type { AgentMessage } from "@mariozechner/pi-agent-core";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import {
cleanupTempPaths,
createContextEngineAttemptRunner,
createContextEngineBootstrapAndAssemble,
resetEmbeddedAttemptHarness,
} from "./attempt.spawn-workspace.test-support.js";
describe("runEmbeddedAttempt empty prompt guard", () => {
const tempPaths: string[] = [];
beforeEach(() => {
resetEmbeddedAttemptHarness();
});
afterEach(async () => {
await cleanupTempPaths(tempPaths);
vi.restoreAllMocks();
});
it("skips provider submission when prompt, history, and images are empty", async () => {
const sessionPrompt = vi.fn(async () => {});
const { assemble } = createContextEngineBootstrapAndAssemble();
const result = await createContextEngineAttemptRunner({
contextEngine: { assemble },
sessionKey: "agent:main:guildchat:dm:empty-prompt",
tempPaths,
sessionMessages: [],
sessionPrompt,
attemptOverrides: {
prompt: " ",
},
});
expect(sessionPrompt).not.toHaveBeenCalled();
expect(result.promptError).toBeNull();
expect(result.finalPromptText).toBeUndefined();
expect(result.messagesSnapshot).toEqual([]);
expect(result.assistantTexts).toEqual([]);
});
it("still submits a blank prompt when replay history has content", async () => {
const sessionPrompt = vi.fn(async () => {});
const { assemble } = createContextEngineBootstrapAndAssemble();
const sessionMessages = [
{ role: "user", content: "previous turn", timestamp: 1 },
] as AgentMessage[];
await createContextEngineAttemptRunner({
contextEngine: { assemble },
sessionKey: "agent:main:guildchat:dm:empty-prompt-with-history",
tempPaths,
sessionMessages,
sessionPrompt,
attemptOverrides: {
prompt: " ",
},
});
expect(sessionPrompt).toHaveBeenCalledTimes(1);
});
});

View File

@@ -236,6 +236,7 @@ vi.mock("../../../infra/machine-name.js", () => ({
}));
vi.mock("../../../infra/net/undici-global-dispatcher.js", () => ({
DEFAULT_UNDICI_STREAM_TIMEOUT_MS: 120_000,
ensureGlobalUndiciEnvProxyDispatcher: (...args: unknown[]) =>
hoisted.ensureGlobalUndiciEnvProxyDispatcherMock(...args),
ensureGlobalUndiciStreamTimeouts: (...args: unknown[]) =>
@@ -281,6 +282,8 @@ vi.mock("../../pi-project-settings.js", () => ({
createPreparedEmbeddedPiSettingsManager: () => ({
getCompactionReserveTokens: () => 0,
getCompactionKeepRecentTokens: () => 40_000,
getGlobalSettings: () => ({}),
getProjectSettings: () => ({}),
applyOverrides: () => {},
setCompactionEnabled: () => {},
}),
@@ -302,6 +305,7 @@ vi.mock("../extensions.js", () => ({
}));
vi.mock("../replay-history.js", () => ({
normalizeAssistantReplayContent: <T>(messages: T) => messages,
sanitizeSessionHistory: async ({ messages }: { messages: unknown[] }) => messages,
validateReplayTurns: async ({ messages }: { messages: unknown[] }) => messages,
}));

View File

@@ -453,6 +453,14 @@ function summarizeSessionContext(messages: AgentMessage[]): {
};
}
function hasPromptSubmissionContent(params: {
prompt: string;
messages: readonly AgentMessage[];
imageCount: number;
}): boolean {
return params.prompt.trim().length > 0 || params.messages.length > 0 || params.imageCount > 0;
}
export function applyEmbeddedAttemptToolsAllow<T extends { name: string }>(
tools: T[],
toolsAllow?: string[],
@@ -2373,6 +2381,28 @@ export async function runEmbeddedAttempt(
transcriptLeafId,
});
if (
!skipPromptSubmission &&
!hasPromptSubmissionContent({
prompt: effectivePrompt,
messages: activeSession.messages,
imageCount: imageResult.images.length,
})
) {
skipPromptSubmission = true;
log.info(
`embedded run prompt skipped: empty prompt/history/images ` +
`runId=${params.runId} sessionId=${params.sessionId} trigger=${params.trigger} ` +
`provider=${params.provider}/${params.modelId}`,
);
trajectoryRecorder?.recordEvent("prompt.skipped", {
reason: "empty_prompt_history_images",
prompt: effectivePrompt,
messages: activeSession.messages,
imagesCount: imageResult.images.length,
});
}
// Diagnostic: log context sizes before prompt to help debug early overflow errors.
if (log.isEnabled("debug")) {
const msgCount = activeSession.messages.length;