From e2ade569528a4c9f7ff317c0300834ec507849c7 Mon Sep 17 00:00:00 2001 From: Peter Steinberger Date: Sat, 25 Apr 2026 02:09:45 +0100 Subject: [PATCH] test(deepseek): cover v4 reasoning replay payload --- extensions/deepseek/index.test.ts | 101 ++++++++++++++++++++++++++++++ 1 file changed, 101 insertions(+) diff --git a/extensions/deepseek/index.test.ts b/extensions/deepseek/index.test.ts index 86e7cc6c357..f5c73367bbb 100644 --- a/extensions/deepseek/index.test.ts +++ b/extensions/deepseek/index.test.ts @@ -116,6 +116,107 @@ describe("deepseek provider plugin", () => { }); }); + it("preserves replayed reasoning_content when DeepSeek V4 thinking is enabled", async () => { + let capturedPayload: Record | undefined; + const model = { + provider: "deepseek", + id: "deepseek-v4-flash", + name: "DeepSeek V4 Flash", + api: "openai-completions", + baseUrl: "https://api.deepseek.com", + reasoning: true, + input: ["text"], + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + contextWindow: 1_000_000, + maxTokens: 384_000, + compat: { + supportsUsageInStreaming: true, + supportsReasoningEffort: true, + maxTokensField: "max_tokens", + }, + } as Model<"openai-completions">; + const context = { + messages: [ + { role: "user", content: "hi", timestamp: 1 }, + { + role: "assistant", + api: "openai-completions", + provider: "deepseek", + model: "deepseek-v4-flash", + content: [ + { + type: "thinking", + thinking: "call reasoning", + thinkingSignature: "reasoning_content", + }, + { type: "toolCall", id: "call_1", name: "read", arguments: {} }, + ], + usage: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + totalTokens: 0, + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 }, + }, + stopReason: "toolUse", + timestamp: 2, + }, + { + role: "toolResult", + toolCallId: "call_1", + toolName: "read", + content: [{ type: "text", text: "ok" }], + isError: false, + timestamp: 3, + }, + ], + tools: [ + { + name: "read", + description: "Read data", + parameters: { type: "object", properties: {}, required: [], additionalProperties: false }, + }, + ], + } as Context; + const baseStreamFn = ( + streamModel: Model<"openai-completions">, + streamContext: Context, + options?: { onPayload?: (payload: unknown, model: unknown) => unknown }, + ) => { + capturedPayload = buildOpenAICompletionsParams(streamModel, streamContext, { + reasoning: "high", + } as never); + options?.onPayload?.(capturedPayload, streamModel); + const stream = createAssistantMessageEventStream(); + queueMicrotask(() => stream.end()); + return stream; + }; + + const wrapThinkingHigh = createDeepSeekV4ThinkingWrapper(baseStreamFn as never, "high"); + expect(wrapThinkingHigh).toBeDefined(); + wrapThinkingHigh?.(model, context, {}); + + expect(capturedPayload).toMatchObject({ + thinking: { type: "enabled" }, + reasoning_effort: "high", + }); + expect((capturedPayload?.messages as Array>)[1]).toMatchObject({ + role: "assistant", + reasoning_content: "call reasoning", + tool_calls: [ + { + id: "call_1", + type: "function", + function: { + name: "read", + arguments: "{}", + }, + }, + ], + }); + }); + it("strips replayed reasoning_content when DeepSeek V4 thinking is disabled", async () => { let capturedPayload: Record | undefined; const model = {