fix(models): guard provider policy model shape

This commit is contained in:
Peter Steinberger
2026-04-29 03:16:35 +01:00
parent b5a90b066d
commit f4c9e71e4e
3 changed files with 31 additions and 1 deletions

View File

@@ -71,6 +71,7 @@ Docs: https://docs.openclaw.ai
- CLI/image describe: pass `--prompt` and `--timeout-ms` through `infer image describe` and `describe-many`, so custom vision instructions and slow local model budgets reach media-understanding providers such as Ollama, OpenAI, Google, and OpenRouter. Refs #63700. Thanks @cedricjanssens.
- Model selection: include the rejected provider/model ref and allowlist recovery hint when a stored session override is cleared, so local model selections such as Gemma GGUF variants do not fall back to the default with a generic message. Refs #71069. Thanks @CyberRaccoonTeam.
- Local model prompt caching: keep stable Project Context above volatile channel/session prompt guidance and stop embedding current channel names in the message tool description, so Ollama, MLX, llama.cpp, and other prefix-cache backends avoid avoidable full prompt reprocessing across channel turns. Fixes #40256; supersedes #40296. Thanks @rhclaw and @sriram369.
- Gateway/OpenAI-compatible API: guard provider policy lookup against runtime providers with non-array `models` values, so `/v1/chat/completions` no longer fails with `provider?.models?.some is not a function`. Fixes #66744; carries forward #66761. Thanks @MightyMoud, @MukundaKatta.
- WhatsApp/Web: pass explicit Baileys socket timings into every WhatsApp Web socket and expose `web.whatsapp.*` keepalive, connect, and query timeout settings so unstable networks can avoid repeated 408 disconnect and opening-handshake timeout loops. Fixes #56365. (#73580) Thanks @velvet-shark.
- Channels/Telegram: persist native command metadata on target sessions so topic, helper, and ACP-bound slash commands keep their session metadata attached to the routed conversation. (#57548) Thanks @GaosCode.
- Channels/native commands: keep validated native slash command replies visible in group chats while preserving explicit owner allowlists for command authorization. (#73672) Thanks @obviyus.

View File

@@ -49,4 +49,31 @@ describe("resolveProviderPluginLookupKey", () => {
}),
).toBe("google");
});
it("does not throw when runtime provider models is an object map", () => {
expect(() =>
resolveProviderPluginLookupKey("openrouter", {
baseUrl: "https://openrouter.ai/api/v1",
models: { "some/model": { api: "openai-completions" } } as never,
}),
).not.toThrow();
});
it("does not throw when runtime provider models is undefined", () => {
expect(() =>
resolveProviderPluginLookupKey("openrouter", {
baseUrl: "https://openrouter.ai/api/v1",
models: undefined as never,
}),
).not.toThrow();
});
it("falls through to the provider key when runtime provider models is non-array", () => {
expect(
resolveProviderPluginLookupKey("openrouter", {
baseUrl: "https://openrouter.ai/api/v1",
models: { some: "garbage" } as never,
}),
).toBe("openrouter");
});
});

View File

@@ -21,8 +21,10 @@ export function resolveProviderPluginLookupKey(
) {
return "google";
}
// Runtime plugin data can be looser than ProviderConfig; guard before .some().
if (
provider?.models?.some((model) => normalizeOptionalString(model.api) === "google-generative-ai")
Array.isArray(provider?.models) &&
provider.models.some((model) => normalizeOptionalString(model.api) === "google-generative-ai")
) {
return "google";
}