fix(deepseek): normalize V4 tool-call replay

This commit is contained in:
Peter Steinberger
2026-04-25 04:23:17 +01:00
parent c81b3ab6b9
commit 678ed5d512
4 changed files with 116 additions and 0 deletions

View File

@@ -70,6 +70,7 @@ Docs: https://docs.openclaw.ai
### Fixes
- Agents/Gemini: retry reasoning-only, empty, and planning-only Gemini turns instead of letting sessions silently stall. Fixes #71074. (#71362) Thanks @neeravmakwana.
- Providers/DeepSeek: add missing `reasoning_content` placeholders for replayed assistant tool-call turns when DeepSeek V4 thinking is enabled, so switching an existing session to `deepseek-v4-flash` or `deepseek-v4-pro` no longer trips the provider's 400 replay check. Fixes #71372. Thanks @yangyang1719.
- Exec approvals: allow bare command-name allowlist patterns to match PATH-resolved executable basenames without trusting `./tool` or absolute path-selected binaries. Fixes #71315. Thanks @chen-zhang-cs-code and @dengluozhang.
- Config/recovery: skip whole-file last-known-good rollback when invalidity is scoped to `plugins.entries.*`, preserving unrelated user settings during plugin schema or host-version skew. Fixes #71289. Thanks @jalehman.
- Agents/tools: keep resolved reply-run configs from being overwritten by stale runtime snapshots, and let empty web runtime metadata fall back to configured provider auto-detection so standard and queued turns expose the same tool set. Fixes #71355. Thanks @c-g14.

View File

@@ -90,6 +90,12 @@ back on the follow-up request. OpenClaw handles this inside the DeepSeek plugin,
so normal multi-turn tool use works with `deepseek/deepseek-v4-flash` and
`deepseek/deepseek-v4-pro`.
If you switch an existing session from another OpenAI-compatible provider to a
DeepSeek V4 model, older assistant tool-call turns may not have native
DeepSeek `reasoning_content`. OpenClaw fills that missing field for DeepSeek V4
thinking requests so the provider can accept the replayed tool-call history
without requiring `/new`.
When thinking is disabled in OpenClaw (including the UI **None** selection),
OpenClaw sends DeepSeek `thinking: { type: "disabled" }` and strips replayed
`reasoning_content` from the outgoing history. This keeps disabled-thinking

View File

@@ -217,6 +217,96 @@ describe("deepseek provider plugin", () => {
});
});
it("adds blank reasoning_content for replayed tool calls from non-DeepSeek turns", async () => {
let capturedPayload: Record<string, unknown> | undefined;
const model = {
provider: "deepseek",
id: "deepseek-v4-pro",
name: "DeepSeek V4 Pro",
api: "openai-completions",
baseUrl: "https://api.deepseek.com",
reasoning: true,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 1_000_000,
maxTokens: 384_000,
compat: {
supportsUsageInStreaming: true,
supportsReasoningEffort: true,
maxTokensField: "max_tokens",
},
} as Model<"openai-completions">;
const context = {
messages: [
{ role: "user", content: "hi", timestamp: 1 },
{
role: "assistant",
api: "openai-completions",
provider: "openai",
model: "gpt-5.4",
content: [{ type: "toolCall", id: "call_1", name: "read", arguments: {} }],
usage: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
totalTokens: 0,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
},
stopReason: "toolUse",
timestamp: 2,
},
{
role: "toolResult",
toolCallId: "call_1",
toolName: "read",
content: [{ type: "text", text: "ok" }],
isError: false,
timestamp: 3,
},
],
tools: [
{
name: "read",
description: "Read data",
parameters: { type: "object", properties: {}, required: [], additionalProperties: false },
},
],
} as Context;
const baseStreamFn = (
streamModel: Model<"openai-completions">,
streamContext: Context,
options?: { onPayload?: (payload: unknown, model: unknown) => unknown },
) => {
capturedPayload = buildOpenAICompletionsParams(streamModel, streamContext, {
reasoning: "high",
} as never);
options?.onPayload?.(capturedPayload, streamModel);
const stream = createAssistantMessageEventStream();
queueMicrotask(() => stream.end());
return stream;
};
const wrapThinkingHigh = createDeepSeekV4ThinkingWrapper(baseStreamFn as never, "high");
expect(wrapThinkingHigh).toBeDefined();
wrapThinkingHigh?.(model, context, {});
expect((capturedPayload?.messages as Array<Record<string, unknown>>)[1]).toMatchObject({
role: "assistant",
reasoning_content: "",
tool_calls: [
{
id: "call_1",
type: "function",
function: {
name: "read",
arguments: "{}",
},
},
],
});
});
it("strips replayed reasoning_content when DeepSeek V4 thinking is disabled", async () => {
let capturedPayload: Record<string, unknown> | undefined;
const model = {

View File

@@ -28,6 +28,24 @@ function stripDeepSeekReasoningContent(payload: Record<string, unknown>): void {
}
}
function ensureDeepSeekToolCallReasoningContent(payload: Record<string, unknown>): void {
if (!Array.isArray(payload.messages)) {
return;
}
for (const message of payload.messages) {
if (!message || typeof message !== "object") {
continue;
}
const record = message as Record<string, unknown>;
if (record.role !== "assistant" || !Array.isArray(record.tool_calls)) {
continue;
}
if (!("reasoning_content" in record)) {
record.reasoning_content = "";
}
}
}
export function createDeepSeekV4ThinkingWrapper(
baseStreamFn: ProviderWrapStreamFnContext["streamFn"],
thinkingLevel: DeepSeekThinkingLevel,
@@ -52,6 +70,7 @@ export function createDeepSeekV4ThinkingWrapper(
payload.thinking = { type: "enabled" };
payload.reasoning_effort = resolveDeepSeekReasoningEffort(thinkingLevel);
ensureDeepSeekToolCallReasoningContent(payload);
});
};
}