fix(ollama): enable streaming usage for openai-compat (#66439)

* fix(ollama): enable streaming usage for openai-compat

* Update CHANGELOG.md
This commit is contained in:
Vincent Koc
2026-04-14 09:57:42 +01:00
committed by GitHub
parent b90d4ea3d7
commit 4f15d77ecc
4 changed files with 67 additions and 2 deletions

View File

@@ -16,6 +16,7 @@ Docs: https://docs.openclaw.ai
- Agents/gateway-tool: reject `config.patch` and `config.apply` calls from the model-facing gateway tool when they would newly enable any flag enumerated by `openclaw security audit` (for example `dangerouslyDisableDeviceAuth`, `allowInsecureAuth`, `dangerouslyAllowHostHeaderOriginFallback`, `hooks.gmail.allowUnsafeExternalContent`, `tools.exec.applyPatch.workspaceOnly: false`); already-enabled flags pass through unchanged so non-dangerous edits in the same patch still apply, and direct authenticated operator RPC behavior is unchanged. (#62006) Thanks @eleqtrizit.
- Telegram/forum topics: persist learned topic names to the Telegram session sidecar store so agent context can keep using human topic names after a restart instead of relearning from future service metadata. (#66107) Thanks @obviyus.
- Doctor/systemd: keep `openclaw doctor --repair` and service reinstall from re-embedding dotenv-backed secrets in user systemd units, while preserving newer inline overrides over stale state-dir `.env` values. (#66249) Thanks @tmimmanuel.
- Ollama/OpenAI-compat: send `stream_options.include_usage` for Ollama streaming completions so local Ollama runs report real usage instead of falling back to bogus prompt-token counts that trigger premature compaction. (#64568) Thanks @xchunzhao and @vincentkoc.
- Doctor/plugins: cache external `preferOver` catalog lookups within each plugin auto-enable pass so large `agents.list` configs no longer peg CPU and repeatedly reread plugin catalogs during doctor/plugins resolution. (#66246) Thanks @yfge.
- Agents/local models: clarify low-context preflight hints for self-hosted models, point config-backed caps at the relevant OpenClaw setting, and stop suggesting larger models when `agents.defaults.contextTokens` is the real limit. (#66236) Thanks @ImLukeF.
- Browser/SSRF: restore hostname navigation under the default browser SSRF policy while keeping explicit strict mode reachable from config, and keep managed loopback CDP `/json/new` fallback requests on the local CDP control policy so browser follow-up fixes stop regressing normal navigation or self-blocking local CDP control. (#66386) Thanks @obviyus.

View File

@@ -0,0 +1,34 @@
import { describe, expect, it } from "vitest";
import { resolveOpenAICompletionsCompatDefaults } from "./openai-completions-compat.js";
describe("resolveOpenAICompletionsCompatDefaults", () => {
it("enables streaming usage for local ollama OpenAI-compat endpoints", () => {
expect(
resolveOpenAICompletionsCompatDefaults({
provider: "ollama",
endpointClass: "local",
knownProviderFamily: "ollama",
}).supportsUsageInStreaming,
).toBe(true);
});
it("keeps streaming usage enabled for custom ollama OpenAI-compat endpoints", () => {
expect(
resolveOpenAICompletionsCompatDefaults({
provider: "ollama",
endpointClass: "custom",
knownProviderFamily: "ollama",
}).supportsUsageInStreaming,
).toBe(true);
});
it("does not broaden streaming usage for generic custom providers", () => {
expect(
resolveOpenAICompletionsCompatDefaults({
provider: "custom-cpa",
endpointClass: "custom",
knownProviderFamily: "custom-cpa",
}).supportsUsageInStreaming,
).toBe(false);
});
});

View File

@@ -33,6 +33,7 @@ export function resolveOpenAICompletionsCompatDefaults(
input: OpenAICompletionsCompatDefaultsInput,
): OpenAICompletionsCompatDefaults {
const {
provider,
endpointClass,
knownProviderFamily,
supportsNativeStreamingUsageCompat = false,
@@ -64,7 +65,8 @@ export function resolveOpenAICompletionsCompatDefaults(
endpointClass === "chutes-native" ||
endpointClass === "mistral-public" ||
knownProviderFamily === "mistral" ||
(isDefaultRoute && isDefaultRouteProvider(input.provider, "chutes"));
(isDefaultRoute && isDefaultRouteProvider(provider, "chutes"));
const isOllamaCompatProvider = provider === "ollama";
return {
supportsStore:
@@ -76,7 +78,8 @@ export function resolveOpenAICompletionsCompatDefaults(
endpointClass !== "xai-native" &&
!usesExplicitProxyLikeEndpoint,
supportsUsageInStreaming:
!isNonStandard && (!usesConfiguredNonOpenAIEndpoint || supportsNativeStreamingUsageCompat),
isOllamaCompatProvider ||
(!isNonStandard && (!usesConfiguredNonOpenAIEndpoint || supportsNativeStreamingUsageCompat)),
maxTokensField: usesMaxTokens ? "max_tokens" : "max_completion_tokens",
thinkingFormat: isZai ? "zai" : isOpenRouterLike ? "openrouter" : "openai",
supportsStrictMode: !isZai && !usesConfiguredNonOpenAIEndpoint,

View File

@@ -1196,6 +1196,33 @@ describe("openai transport stream", () => {
expect(params.stream_options).toMatchObject({ include_usage: true });
});
it("enables streaming usage compat for Ollama OpenAI-compat endpoints", () => {
const params = buildOpenAICompletionsParams(
{
id: "qwen2.5:7b",
name: "Qwen 2.5 7B",
api: "openai-completions",
provider: "ollama",
baseUrl: "http://127.0.0.1:11434/v1",
reasoning: true,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 32768,
maxTokens: 8192,
} satisfies Model<"openai-completions">,
{
systemPrompt: "system",
messages: [],
tools: [],
} as never,
undefined,
) as {
stream_options?: { include_usage?: boolean };
};
expect(params.stream_options).toMatchObject({ include_usage: true });
});
it("disables developer-role-only compat defaults for configured custom proxy completions providers", () => {
const params = buildOpenAICompletionsParams(
{