mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-06 07:50:43 +00:00
fix(ollama): preserve aborts with stream timeouts
This commit is contained in:
@@ -16,6 +16,7 @@ Docs: https://docs.openclaw.ai
|
||||
|
||||
### Fixes
|
||||
|
||||
- Ollama: compose caller abort signals with guarded-fetch timeouts for native `/api/chat` streams, so `/stop` and early cancellation still interrupt local Ollama requests that also carry provider timeout budgets. Refs #74133. Thanks @obviyus.
|
||||
- CLI/logs: fall back to the configured Gateway file log when implicit loopback Gateway connections close or time out before or during `logs.tail`, so `openclaw logs` still works while diagnosing local-model Gateway disconnects. Refs #74078. Thanks @sakalaboator.
|
||||
- MCP/plugins: stringify non-array plugin tool results with chat-content coercion instead of default object stringification, so MCP callers receive useful JSON/text content from plugin tools. Thanks @vincentkoc.
|
||||
- Active Memory/QMD: run QMD boot refresh through a one-shot subprocess path, preserve interactive file watching, and align watcher dependency/build ignores with QMD's scanner so gateway startup avoids arming long-lived QMD watchers. Thanks @codexGW.
|
||||
|
||||
@@ -23,6 +23,7 @@ type GuardedFetchCall = {
|
||||
url: string;
|
||||
init?: RequestInit;
|
||||
policy?: unknown;
|
||||
signal?: AbortSignal;
|
||||
timeoutMs?: number;
|
||||
auditContext?: string;
|
||||
};
|
||||
@@ -333,6 +334,29 @@ describe("createConfiguredOllamaCompatStreamWrapper", () => {
|
||||
);
|
||||
});
|
||||
|
||||
it("passes caller abort signals at guard level when a timeout is present", async () => {
|
||||
await withMockNdjsonFetch(
|
||||
[
|
||||
'{"model":"m","created_at":"t","message":{"role":"assistant","content":"ok"},"done":false}',
|
||||
'{"model":"m","created_at":"t","message":{"role":"assistant","content":""},"done":true,"prompt_eval_count":1,"eval_count":1}',
|
||||
],
|
||||
async (fetchMock) => {
|
||||
const signal = new AbortController().signal;
|
||||
const stream = await createOllamaTestStream({
|
||||
baseUrl: "http://ollama-host:11434",
|
||||
options: { signal, timeoutMs: 123_456 },
|
||||
});
|
||||
|
||||
await collectStreamEvents(stream);
|
||||
|
||||
const request = getGuardedFetchCall(fetchMock);
|
||||
expect(request.timeoutMs).toBe(123_456);
|
||||
expect(request.signal).toBe(signal);
|
||||
expect(request.init?.signal).toBeUndefined();
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
it("maps native Ollama max thinking to think=high on the wire", async () => {
|
||||
await withMockNdjsonFetch(
|
||||
[
|
||||
@@ -1018,6 +1042,7 @@ async function createOllamaTestStream(params: {
|
||||
maxTokens?: number;
|
||||
temperature?: number;
|
||||
signal?: AbortSignal;
|
||||
timeoutMs?: number;
|
||||
headers?: Record<string, string>;
|
||||
};
|
||||
}) {
|
||||
@@ -1401,8 +1426,9 @@ describe("createOllamaStreamFn", () => {
|
||||
const request = getGuardedFetchCall(fetchMock);
|
||||
expect(request.url).toBe("http://ollama-host:11434/api/chat");
|
||||
expect(request.auditContext).toBe("ollama-stream.chat");
|
||||
expect(request.signal).toBe(signal);
|
||||
const requestInit = request.init ?? {};
|
||||
expect(requestInit.signal).toBe(signal);
|
||||
expect(requestInit.signal).toBeUndefined();
|
||||
if (typeof requestInit.body !== "string") {
|
||||
throw new Error("Expected string request body");
|
||||
}
|
||||
|
||||
@@ -1000,9 +1000,9 @@ export function createOllamaStreamFn(
|
||||
method: "POST",
|
||||
headers,
|
||||
body: JSON.stringify(body),
|
||||
signal: options?.signal,
|
||||
},
|
||||
policy: ssrfPolicy,
|
||||
...(options?.signal ? { signal: options.signal } : {}),
|
||||
timeoutMs: resolveOllamaRequestTimeoutMs(
|
||||
model,
|
||||
options as { requestTimeoutMs?: unknown; timeoutMs?: unknown } | undefined,
|
||||
|
||||
Reference in New Issue
Block a user