fix(deepseek): expose V4 max thinking levels (#73008)

Merged via squash.

Prepared head SHA: ef561a59de
Co-authored-by: ai-hpc <183861985+ai-hpc@users.noreply.github.com>
Co-authored-by: hxy91819 <8814856+hxy91819@users.noreply.github.com>
Reviewed-by: @hxy91819
This commit is contained in:
NVIDIAN
2026-04-30 08:34:05 -07:00
committed by GitHub
parent 0eb8f34000
commit 797d574dfd
7 changed files with 67 additions and 9 deletions

View File

@@ -1,9 +1,6 @@
import type { ProviderWrapStreamFnContext } from "openclaw/plugin-sdk/plugin-entry";
import { createDeepSeekV4OpenAICompatibleThinkingWrapper } from "openclaw/plugin-sdk/provider-stream-shared";
function isDeepSeekV4ModelId(modelId: unknown): boolean {
return modelId === "deepseek-v4-flash" || modelId === "deepseek-v4-pro";
}
import { isDeepSeekV4ModelRef } from "./models.js";
export function createDeepSeekV4ThinkingWrapper(
baseStreamFn: ProviderWrapStreamFnContext["streamFn"],
@@ -12,6 +9,6 @@ export function createDeepSeekV4ThinkingWrapper(
return createDeepSeekV4OpenAICompatibleThinkingWrapper({
baseStreamFn,
thinkingLevel,
shouldPatchModel: (model) => model.provider === "deepseek" && isDeepSeekV4ModelId(model.id),
shouldPatchModel: isDeepSeekV4ModelRef,
});
}