fix(cli): fail empty local model probes

This commit is contained in:
Peter Steinberger
2026-04-27 23:16:29 +01:00
parent 81390c643b
commit 48e91f09d5
4 changed files with 40 additions and 13 deletions

View File

@@ -16,6 +16,7 @@ Docs: https://docs.openclaw.ai
### Fixes
- CLI/model probes: fail local `infer model run` probes when the provider returns no text output, so unreachable local providers and empty completions no longer look like successful smoke tests. Refs #73023. Thanks @pavelyortho-cyber.
- CLI/Ollama: run local `infer model run` through the lean provider completion path and skip global model discovery for one-shot local probes, so Ollama smoke tests no longer pay full chat-agent/tool startup cost or hang before the native `/api/chat` request. Fixes #72851. Thanks @TotalRes2020.
- Daemon/service: only emit hard-coded version-manager paths such as `~/.volta/bin`, `~/.asdf/shims`, `~/.bun/bin`, and fnm/pnpm fallbacks into gateway and node service PATHs when the directories exist, so `openclaw doctor` no longer flags `gateway.path.non-minimal` against a PATH the daemon just wrote. Env-driven roots and stable user-bin dirs remain unconditional. Fixes #71944; carries forward #71964. Thanks @Sanjays2402.
- Channels/commands: make generated `/dock-*` commands switch the active session reply route through `session.identityLinks` instead of falling through to normal chat. Fixes #69206; carries forward #73033. Thanks @clawbones and @michaelatamuk.

View File

@@ -159,6 +159,7 @@ openclaw infer model run --local --model openai/gpt-4.1 --prompt "Reply with exa
Notes:
- Local `model run` is the narrowest CLI smoke for provider/model/auth health because it sends only the supplied prompt to the selected model.
- Local `model run` exits non-zero when the provider returns no text output, so unreachable local providers and empty completions do not look like successful probes.
- Use `model run --gateway` when you need to test Gateway routing, agent-runtime setup, or Gateway-managed provider state instead of the lean local completion path.
- `model auth login`, `model auth logout`, and `model auth status` manage saved provider auth state.

View File

@@ -419,6 +419,24 @@ describe("capability cli", () => {
);
});
it("fails local model probes when the provider returns no text output", async () => {
mocks.completeWithPreparedSimpleCompletionModel.mockResolvedValueOnce({
content: [],
} as never);
await expect(
runRegisteredCli({
register: registerCapabilityCli as (program: Command) => void,
argv: ["capability", "model", "run", "--prompt", "hello", "--json"],
}),
).rejects.toThrow("exit 1");
expect(mocks.runtime.error).toHaveBeenCalledWith(
expect.stringContaining('No text output returned for provider "openai" model "gpt-5.4"'),
);
expect(mocks.runtime.writeJson).not.toHaveBeenCalled();
});
it("runs gateway model probes without chat-agent prompt policy or tools", async () => {
await runRegisteredCli({
register: registerCapabilityCli as (program: Command) => void,

View File

@@ -112,7 +112,7 @@ type CapabilityEnvelope = {
const CAPABILITY_METADATA: CapabilityMetadata[] = [
{
id: "model.run",
description: "Run a one-shot text inference turn through the agent runtime.",
description: "Run a one-shot text inference turn through the selected model provider.",
transports: ["local", "gateway"],
flags: ["--prompt", "--model", "--local", "--gateway", "--json"],
resultShape: "normalized payloads plus provider/model attribution",
@@ -570,6 +570,13 @@ function requireProviderModelOverride(
};
}
function collectModelRunText(content: Array<{ type: string; text?: string }>): string {
return content
.map((block) => (block.type === "text" && typeof block.text === "string" ? block.text : ""))
.join("")
.trim();
}
async function runModelRun(params: {
prompt: string;
model?: string;
@@ -607,10 +614,12 @@ async function runModelRun(params: {
: undefined,
},
});
const text = result.content
.map((block) => (block.type === "text" ? block.text : ""))
.join("")
.trim();
const text = collectModelRunText(result.content);
if (!text) {
throw new Error(
`No text output returned for provider "${prepared.selection.provider}" model "${prepared.selection.modelId}".`,
);
}
return {
ok: true,
capability: "model.run",
@@ -618,14 +627,12 @@ async function runModelRun(params: {
provider: prepared.selection.provider,
model: prepared.selection.modelId,
attempts: [],
outputs: text
? [
{
text,
mediaUrl: null,
},
]
: [],
outputs: [
{
text,
mediaUrl: null,
},
],
} satisfies CapabilityEnvelope;
}