From 678ed5d512cc8b090701a414a056401fcd6df356 Mon Sep 17 00:00:00 2001 From: Peter Steinberger Date: Sat, 25 Apr 2026 04:23:17 +0100 Subject: [PATCH] fix(deepseek): normalize V4 tool-call replay --- CHANGELOG.md | 1 + docs/providers/deepseek.md | 6 +++ extensions/deepseek/index.test.ts | 90 +++++++++++++++++++++++++++++++ extensions/deepseek/stream.ts | 19 +++++++ 4 files changed, 116 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5b095e9ef17..39a630fd502 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -70,6 +70,7 @@ Docs: https://docs.openclaw.ai ### Fixes - Agents/Gemini: retry reasoning-only, empty, and planning-only Gemini turns instead of letting sessions silently stall. Fixes #71074. (#71362) Thanks @neeravmakwana. +- Providers/DeepSeek: add missing `reasoning_content` placeholders for replayed assistant tool-call turns when DeepSeek V4 thinking is enabled, so switching an existing session to `deepseek-v4-flash` or `deepseek-v4-pro` no longer trips the provider's 400 replay check. Fixes #71372. Thanks @yangyang1719. - Exec approvals: allow bare command-name allowlist patterns to match PATH-resolved executable basenames without trusting `./tool` or absolute path-selected binaries. Fixes #71315. Thanks @chen-zhang-cs-code and @dengluozhang. - Config/recovery: skip whole-file last-known-good rollback when invalidity is scoped to `plugins.entries.*`, preserving unrelated user settings during plugin schema or host-version skew. Fixes #71289. Thanks @jalehman. - Agents/tools: keep resolved reply-run configs from being overwritten by stale runtime snapshots, and let empty web runtime metadata fall back to configured provider auto-detection so standard and queued turns expose the same tool set. Fixes #71355. Thanks @c-g14. diff --git a/docs/providers/deepseek.md b/docs/providers/deepseek.md index 2209c9e3e4d..ad80736e53f 100644 --- a/docs/providers/deepseek.md +++ b/docs/providers/deepseek.md @@ -90,6 +90,12 @@ back on the follow-up request. OpenClaw handles this inside the DeepSeek plugin, so normal multi-turn tool use works with `deepseek/deepseek-v4-flash` and `deepseek/deepseek-v4-pro`. +If you switch an existing session from another OpenAI-compatible provider to a +DeepSeek V4 model, older assistant tool-call turns may not have native +DeepSeek `reasoning_content`. OpenClaw fills that missing field for DeepSeek V4 +thinking requests so the provider can accept the replayed tool-call history +without requiring `/new`. + When thinking is disabled in OpenClaw (including the UI **None** selection), OpenClaw sends DeepSeek `thinking: { type: "disabled" }` and strips replayed `reasoning_content` from the outgoing history. This keeps disabled-thinking diff --git a/extensions/deepseek/index.test.ts b/extensions/deepseek/index.test.ts index f5c73367bbb..5097e1d4c92 100644 --- a/extensions/deepseek/index.test.ts +++ b/extensions/deepseek/index.test.ts @@ -217,6 +217,96 @@ describe("deepseek provider plugin", () => { }); }); + it("adds blank reasoning_content for replayed tool calls from non-DeepSeek turns", async () => { + let capturedPayload: Record | undefined; + const model = { + provider: "deepseek", + id: "deepseek-v4-pro", + name: "DeepSeek V4 Pro", + api: "openai-completions", + baseUrl: "https://api.deepseek.com", + reasoning: true, + input: ["text"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 1_000_000, + maxTokens: 384_000, + compat: { + supportsUsageInStreaming: true, + supportsReasoningEffort: true, + maxTokensField: "max_tokens", + }, + } as Model<"openai-completions">; + const context = { + messages: [ + { role: "user", content: "hi", timestamp: 1 }, + { + role: "assistant", + api: "openai-completions", + provider: "openai", + model: "gpt-5.4", + content: [{ type: "toolCall", id: "call_1", name: "read", arguments: {} }], + usage: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + totalTokens: 0, + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 }, + }, + stopReason: "toolUse", + timestamp: 2, + }, + { + role: "toolResult", + toolCallId: "call_1", + toolName: "read", + content: [{ type: "text", text: "ok" }], + isError: false, + timestamp: 3, + }, + ], + tools: [ + { + name: "read", + description: "Read data", + parameters: { type: "object", properties: {}, required: [], additionalProperties: false }, + }, + ], + } as Context; + const baseStreamFn = ( + streamModel: Model<"openai-completions">, + streamContext: Context, + options?: { onPayload?: (payload: unknown, model: unknown) => unknown }, + ) => { + capturedPayload = buildOpenAICompletionsParams(streamModel, streamContext, { + reasoning: "high", + } as never); + options?.onPayload?.(capturedPayload, streamModel); + const stream = createAssistantMessageEventStream(); + queueMicrotask(() => stream.end()); + return stream; + }; + + const wrapThinkingHigh = createDeepSeekV4ThinkingWrapper(baseStreamFn as never, "high"); + expect(wrapThinkingHigh).toBeDefined(); + wrapThinkingHigh?.(model, context, {}); + + expect((capturedPayload?.messages as Array>)[1]).toMatchObject({ + role: "assistant", + reasoning_content: "", + tool_calls: [ + { + id: "call_1", + type: "function", + function: { + name: "read", + arguments: "{}", + }, + }, + ], + }); + }); + it("strips replayed reasoning_content when DeepSeek V4 thinking is disabled", async () => { let capturedPayload: Record | undefined; const model = { diff --git a/extensions/deepseek/stream.ts b/extensions/deepseek/stream.ts index 5ec6a906102..26916503bd9 100644 --- a/extensions/deepseek/stream.ts +++ b/extensions/deepseek/stream.ts @@ -28,6 +28,24 @@ function stripDeepSeekReasoningContent(payload: Record): void { } } +function ensureDeepSeekToolCallReasoningContent(payload: Record): void { + if (!Array.isArray(payload.messages)) { + return; + } + for (const message of payload.messages) { + if (!message || typeof message !== "object") { + continue; + } + const record = message as Record; + if (record.role !== "assistant" || !Array.isArray(record.tool_calls)) { + continue; + } + if (!("reasoning_content" in record)) { + record.reasoning_content = ""; + } + } +} + export function createDeepSeekV4ThinkingWrapper( baseStreamFn: ProviderWrapStreamFnContext["streamFn"], thinkingLevel: DeepSeekThinkingLevel, @@ -52,6 +70,7 @@ export function createDeepSeekV4ThinkingWrapper( payload.thinking = { type: "enabled" }; payload.reasoning_effort = resolveDeepSeekReasoningEffort(thinkingLevel); + ensureDeepSeekToolCallReasoningContent(payload); }); }; }