fix(deepseek): expose v4 thinking profile in policy surface

This commit is contained in:
Peter Steinberger
2026-05-04 08:12:34 +01:00
parent a5dcf3d300
commit c36f8f1e39
5 changed files with 58 additions and 17 deletions

View File

@@ -63,6 +63,7 @@ Docs: https://docs.openclaw.ai
- Web search: scope explicit bundled `web_search` provider runtime loading through manifest ownership, so selecting DuckDuckGo/Gemini/etc. does not import unrelated bundled providers or log their optional dependency failures. Thanks @vincentkoc.
- Plugins/discovery: demote the source-only TypeScript runtime check on already-installed `origin: "global"` plugin packages from a config-blocking error to a warning and let the runtime fall through to the TypeScript source via jiti, so a single broken installed package no longer blocks `plugins install` for unrelated plugins; install-time rejection of newly-installed source-only packages is unchanged. Thanks @romneyda.
- Providers/OpenAI Codex: stop the OAuth progress spinner before showing the manual redirect paste prompt, so callback timeouts do not spam `Browser callback did not finish` across terminals.
- Providers/DeepSeek: expose DeepSeek V4 `xhigh` and `max` thinking levels through the lightweight provider-policy surface, so Control UI `/think` pickers keep showing the max reasoning options when the runtime plugin registry is not active. Fixes #77139. Thanks @bittoby.
- Release/beta smoke: resolve the dispatched Telegram beta E2E run from `gh run list` when `gh workflow run` returns no run URL, so the maintainer helper does not fail immediately after dispatch. Thanks @vincentkoc.
- Media/images: keep HEIC/HEIF attachments fail-closed when optional Sharp conversion is unavailable instead of sending originals that still need conversion. Thanks @vincentkoc.
- Google Meet: fork the caller's current agent transcript into agent-mode meeting consultant sessions, so Meet replies inherit the context from the tool call that joined the meeting.

View File

@@ -1,27 +1,12 @@
import type { ProviderThinkingProfile } from "openclaw/plugin-sdk/plugin-entry";
import { readConfiguredProviderCatalogEntries } from "openclaw/plugin-sdk/provider-catalog-shared";
import { defineSingleProviderPluginEntry } from "openclaw/plugin-sdk/provider-entry";
import { buildProviderReplayFamilyHooks } from "openclaw/plugin-sdk/provider-model-shared";
import { isDeepSeekV4ModelId } from "./models.js";
import { applyDeepSeekConfig, DEEPSEEK_DEFAULT_MODEL_REF } from "./onboard.js";
import { buildDeepSeekProvider } from "./provider-catalog.js";
import { createDeepSeekV4ThinkingWrapper } from "./stream.js";
import { resolveDeepSeekV4ThinkingProfile } from "./thinking.js";
const PROVIDER_ID = "deepseek";
const V4_THINKING_LEVEL_IDS = ["off", "minimal", "low", "medium", "high", "xhigh", "max"] as const;
function buildDeepSeekV4ThinkingLevel(id: (typeof V4_THINKING_LEVEL_IDS)[number]) {
return { id };
}
const DEEPSEEK_V4_THINKING_PROFILE = {
levels: V4_THINKING_LEVEL_IDS.map(buildDeepSeekV4ThinkingLevel),
defaultLevel: "high",
} satisfies ProviderThinkingProfile;
function resolveDeepSeekV4ThinkingProfile(modelId: string): ProviderThinkingProfile | undefined {
return isDeepSeekV4ModelId(modelId) ? DEEPSEEK_V4_THINKING_PROFILE : undefined;
}
export default defineSingleProviderPluginEntry({
id: PROVIDER_ID,

View File

@@ -1,8 +1,37 @@
import type { ModelProviderConfig } from "openclaw/plugin-sdk/provider-model-types";
import { describe, expect, it } from "vitest";
import { normalizeConfig } from "./provider-policy-api.js";
import { normalizeConfig, resolveThinkingProfile } from "./provider-policy-api.js";
describe("deepseek provider-policy-api", () => {
it("advertises max thinking levels for DeepSeek V4 models", () => {
const expectedV4Levels = ["off", "minimal", "low", "medium", "high", "xhigh", "max"];
expect(
resolveThinkingProfile({
provider: "deepseek",
modelId: "deepseek-v4-pro",
})?.levels.map((level) => level.id),
).toEqual(expectedV4Levels);
expect(
resolveThinkingProfile({
provider: "deepseek",
modelId: "deepseek-v4-flash",
})?.defaultLevel,
).toBe("high");
expect(
resolveThinkingProfile({
provider: "deepseek",
modelId: "deepseek-chat",
}),
).toBe(undefined);
expect(
resolveThinkingProfile({
provider: "openrouter",
modelId: "deepseek-v4-pro",
}),
).toBe(null);
});
it("hydrates contextWindow and cost from catalog for known models", () => {
const providerConfig: ModelProviderConfig = {
baseUrl: "https://api.deepseek.com",

View File

@@ -1,6 +1,7 @@
import type { ModelDefinitionConfig } from "openclaw/plugin-sdk/provider-model-shared";
import type { ModelProviderConfig } from "openclaw/plugin-sdk/provider-model-types";
import { DEEPSEEK_MODEL_CATALOG } from "./models.js";
import { resolveDeepSeekV4ThinkingProfile } from "./thinking.js";
type ModelDefinitionDraft = Partial<ModelDefinitionConfig> &
Pick<ModelDefinitionConfig, "id" | "name">;
@@ -95,3 +96,9 @@ export function normalizeConfig(params: {
return { ...providerConfig, models: nextModels as ModelDefinitionConfig[] };
}
export function resolveThinkingProfile(params: { provider: string; modelId: string }) {
return params.provider.trim().toLowerCase() === "deepseek"
? resolveDeepSeekV4ThinkingProfile(params.modelId)
: null;
}

View File

@@ -0,0 +1,19 @@
import type { ProviderThinkingProfile } from "openclaw/plugin-sdk/plugin-entry";
import { isDeepSeekV4ModelId } from "./models.js";
const V4_THINKING_LEVEL_IDS = ["off", "minimal", "low", "medium", "high", "xhigh", "max"] as const;
function buildDeepSeekV4ThinkingLevel(id: (typeof V4_THINKING_LEVEL_IDS)[number]) {
return { id };
}
const DEEPSEEK_V4_THINKING_PROFILE = {
levels: V4_THINKING_LEVEL_IDS.map(buildDeepSeekV4ThinkingLevel),
defaultLevel: "high",
} satisfies ProviderThinkingProfile;
export function resolveDeepSeekV4ThinkingProfile(
modelId: string,
): ProviderThinkingProfile | undefined {
return isDeepSeekV4ModelId(modelId) ? DEEPSEEK_V4_THINKING_PROFILE : undefined;
}