fix: keep media provider inventory internal

This commit is contained in:
mkdev11
2026-05-01 10:58:23 +02:00
parent bf7ac8d8c4
commit 1a139730f0
3 changed files with 68 additions and 70 deletions

View File

@@ -19,6 +19,7 @@ Docs: https://docs.openclaw.ai
### Fixes
- Discord/voice: run voice-channel turns under a voice-output policy that hides the agent `tts` tool and asks for spoken reply text, so `/vc join` sessions synthesize and play agent replies instead of ending with `NO_REPLY`. Fixes #61536. Thanks @aounakram.
- Agents/media: keep image and video provider inventory internal when tool output is hidden, so shared chat surfaces no longer expose provider/model/auth-hint details from list results. Fixes #75166. Thanks @MkDev11.
- Plugins/runtime-deps: prune legacy version-scoped plugin runtime-deps roots during bundled dependency repair and cover the path in Package Acceptance's upgrade-survivor matrix, so upgrades from 2026.4.x no longer leave stale per-plugin runtime trees after doctor runs. Thanks @vincentkoc.
- Plugins/runtime-deps: keep Gateway startup plugin imports and runtime plugin fallback loads verify-only after startup/config repair planning, so packaged installs no longer spawn package-manager repair from hot paths after readiness. Refs #75283 and #75069. Thanks @brokemac79 and @xiaohuaxi.
- Voice Call/realtime: add default-off fast memory/session context for `openclaw_agent_consult`, giving live calls a bounded answer-or-miss path before the full agent consult. Fixes #71849. Thanks @amzzzzzzz.

View File

@@ -142,6 +142,44 @@ async function handleCaseVariantBuiltinMedia(mediaPathOrUrl: string) {
return ctx;
}
const providerInventoryText = [
"openai: default=sora-2 | models=sora-2",
"google: default=veo-3.1-fast-generate-preview | models=veo-3.1-fast-generate-preview",
].join("\n");
async function handleProviderInventoryListResult(params: {
toolName: "image_generate" | "video_generate";
shouldEmitToolOutput: boolean;
}) {
const ctx = createMockContext({
shouldEmitToolOutput: params.shouldEmitToolOutput,
onToolResult: vi.fn(),
toolResultFormat: "plain",
});
await handleToolExecutionEnd(ctx, {
type: "tool_execution_end",
toolName: params.toolName,
toolCallId: "tc-1",
isError: false,
result: {
content: [{ type: "text", text: providerInventoryText }],
details: {
providers: [
{ id: "openai", defaultModel: "sora-2", models: ["sora-2"] },
{
id: "google",
defaultModel: "veo-3.1-fast-generate-preview",
models: ["veo-3.1-fast-generate-preview"],
},
],
},
},
});
return ctx;
}
describe("handleToolExecutionEnd media emission", () => {
it("does not warn for read tool when path is provided via file_path alias", async () => {
const ctx = createMockContext();
@@ -424,52 +462,36 @@ describe("handleToolExecutionEnd media emission", () => {
expect(ctx.state.pendingToolMediaUrls).toEqual(["/tmp/generated.png"]);
});
it("emits provider inventory output for compact video_generate list results", async () => {
const ctx = createMockContext({
shouldEmitToolOutput: false,
onToolResult: vi.fn(),
toolResultFormat: "plain",
});
it.each(["image_generate", "video_generate"] as const)(
"keeps %s provider inventory internal when tool output is hidden",
async (toolName) => {
const ctx = await handleProviderInventoryListResult({
toolName,
shouldEmitToolOutput: false,
});
await handleToolExecutionEnd(ctx, {
type: "tool_execution_end",
toolName: "video_generate",
toolCallId: "tc-1",
isError: false,
result: {
content: [
{
type: "text",
text: [
"openai: default=sora-2 | models=sora-2",
"google: default=veo-3.1-fast-generate-preview | models=veo-3.1-fast-generate-preview",
].join("\n"),
},
],
details: {
providers: [
{ id: "openai", defaultModel: "sora-2", models: ["sora-2"] },
{
id: "google",
defaultModel: "veo-3.1-fast-generate-preview",
models: ["veo-3.1-fast-generate-preview"],
},
],
},
},
});
expect(ctx.emitToolOutput).not.toHaveBeenCalled();
expect(ctx.state.pendingToolMediaUrls).toEqual([]);
},
);
expect(ctx.emitToolOutput).toHaveBeenCalledWith(
"video_generate",
undefined,
[
"openai: default=sora-2 | models=sora-2",
"google: default=veo-3.1-fast-generate-preview | models=veo-3.1-fast-generate-preview",
].join("\n"),
expect.any(Object),
);
expect(ctx.state.pendingToolMediaUrls).toEqual([]);
});
it.each(["image_generate", "video_generate"] as const)(
"emits %s provider inventory when verbose tool output is enabled",
async (toolName) => {
const ctx = await handleProviderInventoryListResult({
toolName,
shouldEmitToolOutput: true,
});
expect(ctx.emitToolOutput).toHaveBeenCalledWith(
toolName,
undefined,
providerInventoryText,
expect.any(Object),
);
expect(ctx.state.pendingToolMediaUrls).toEqual([]);
},
);
it("does NOT emit media for error results", async () => {
const onToolResult = vi.fn();

View File

@@ -341,30 +341,6 @@ async function collectEmittedToolOutputMediaUrls(
return filterToolResultMediaUrls(toolName, mediaUrls, result);
}
const COMPACT_PROVIDER_INVENTORY_TOOLS = new Set(["image_generate", "video_generate"]);
function hasProviderInventoryDetails(result: unknown): boolean {
if (!result || typeof result !== "object") {
return false;
}
const details = readToolResultDetailsRecord(result);
return Array.isArray(details?.providers);
}
function shouldEmitCompactToolOutput(params: {
toolName: string;
result: unknown;
outputText?: string;
}): boolean {
if (!COMPACT_PROVIDER_INVENTORY_TOOLS.has(params.toolName)) {
return false;
}
if (!hasProviderInventoryDetails(params.result)) {
return false;
}
return Boolean(params.outputText?.trim());
}
function readExecApprovalPendingDetails(result: unknown): {
approvalId: string;
approvalSlug: string;
@@ -540,8 +516,7 @@ async function emitToolResultOutput(params: {
isToolError,
hasDeliverableStructuredMedia: hasStructuredMedia && mediaUrls.length > 0,
builtinToolNames: ctx.builtinToolNames,
}) &&
(ctx.shouldEmitToolOutput() || shouldEmitCompactToolOutput({ toolName, result, outputText }));
}) && ctx.shouldEmitToolOutput();
if (shouldEmitOutput) {
if (outputText) {
ctx.emitToolOutput(rawToolName, meta, outputText, result);