mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-06 06:30:42 +00:00
fix(deepseek): expose v4 max thinking levels
This commit is contained in:
@@ -79,6 +79,8 @@ is available to that process (for example, in `~/.openclaw/.env` or via
|
||||
V4 models support DeepSeek's `thinking` control. OpenClaw also replays
|
||||
DeepSeek `reasoning_content` on follow-up turns so thinking sessions with tool
|
||||
calls can continue.
|
||||
Use `/think xhigh` or `/think max` with DeepSeek V4 models to request DeepSeek's
|
||||
maximum `reasoning_effort`.
|
||||
</Tip>
|
||||
|
||||
## Thinking and tools
|
||||
|
||||
@@ -26,6 +26,7 @@ title: "Thinking levels"
|
||||
- Anthropic Claude Opus 4.7 does not default to adaptive thinking. Its API effort default remains provider-owned unless you explicitly set a thinking level.
|
||||
- Anthropic Claude Opus 4.7 maps `/think xhigh` to adaptive thinking plus `output_config.effort: "xhigh"`, because `/think` is a thinking directive and `xhigh` is the Opus 4.7 effort setting.
|
||||
- Anthropic Claude Opus 4.7 also exposes `/think max`; it maps to the same provider-owned max effort path.
|
||||
- DeepSeek V4 models expose `/think xhigh|max`; both map to DeepSeek `reasoning_effort: "max"` while lower non-off levels map to `high`.
|
||||
- Ollama thinking-capable models expose `/think low|medium|high|max`; `max` maps to native `think: "high"` because Ollama's native API accepts `low`, `medium`, and `high` effort strings.
|
||||
- OpenAI GPT models map `/think` through model-specific Responses API effort support. `/think off` sends `reasoning.effort: "none"` only when the target model supports it; otherwise OpenClaw omits the disabled reasoning payload instead of sending an unsupported value.
|
||||
- Custom OpenAI-compatible catalog entries can opt into `/think xhigh` by setting `models.providers.<provider>.models[].compat.supportedReasoningEfforts` to include `"xhigh"`. This uses the same compat metadata that maps outbound OpenAI reasoning effort payloads, so menus, session validation, agent CLI, and `llm-task` agree with transport behavior.
|
||||
|
||||
@@ -110,6 +110,37 @@ describe("deepseek provider plugin", () => {
|
||||
);
|
||||
});
|
||||
|
||||
it("advertises max thinking levels for DeepSeek V4 models only", async () => {
|
||||
const provider = await registerSingleProviderPlugin(deepseekPlugin);
|
||||
const resolveThinkingProfile = provider.resolveThinkingProfile!;
|
||||
const expectedV4Levels = ["off", "minimal", "low", "medium", "high", "xhigh", "max"];
|
||||
|
||||
expect(
|
||||
resolveThinkingProfile({
|
||||
provider: "deepseek",
|
||||
modelId: "deepseek-v4-pro",
|
||||
} as never)?.levels.map((level) => level.id),
|
||||
).toEqual(expectedV4Levels);
|
||||
expect(
|
||||
resolveThinkingProfile({
|
||||
provider: "deepseek",
|
||||
modelId: "deepseek-v4-flash",
|
||||
} as never)?.defaultLevel,
|
||||
).toBe("high");
|
||||
expect(
|
||||
resolveThinkingProfile({
|
||||
provider: "deepseek",
|
||||
modelId: "deepseek-v4-flash",
|
||||
} as never)?.levels.map((level) => level.id),
|
||||
).toEqual(expectedV4Levels);
|
||||
expect(
|
||||
resolveThinkingProfile({ provider: "deepseek", modelId: "deepseek-chat" } as never),
|
||||
).toBe(undefined);
|
||||
expect(
|
||||
resolveThinkingProfile({ provider: "deepseek", modelId: "deepseek-reasoner" } as never),
|
||||
).toBe(undefined);
|
||||
});
|
||||
|
||||
it("maps thinking levels to DeepSeek V4 payload controls", async () => {
|
||||
let capturedPayload: Record<string, unknown> | undefined;
|
||||
const baseStreamFn = (
|
||||
|
||||
@@ -1,11 +1,27 @@
|
||||
import type { ProviderThinkingProfile } from "openclaw/plugin-sdk/plugin-entry";
|
||||
import { readConfiguredProviderCatalogEntries } from "openclaw/plugin-sdk/provider-catalog-shared";
|
||||
import { defineSingleProviderPluginEntry } from "openclaw/plugin-sdk/provider-entry";
|
||||
import { buildProviderReplayFamilyHooks } from "openclaw/plugin-sdk/provider-model-shared";
|
||||
import { isDeepSeekV4ModelId } from "./models.js";
|
||||
import { applyDeepSeekConfig, DEEPSEEK_DEFAULT_MODEL_REF } from "./onboard.js";
|
||||
import { buildDeepSeekProvider } from "./provider-catalog.js";
|
||||
import { createDeepSeekV4ThinkingWrapper } from "./stream.js";
|
||||
|
||||
const PROVIDER_ID = "deepseek";
|
||||
const V4_THINKING_LEVEL_IDS = ["off", "minimal", "low", "medium", "high", "xhigh", "max"] as const;
|
||||
|
||||
function buildDeepSeekV4ThinkingLevel(id: (typeof V4_THINKING_LEVEL_IDS)[number]) {
|
||||
return { id };
|
||||
}
|
||||
|
||||
const DEEPSEEK_V4_THINKING_PROFILE = {
|
||||
levels: V4_THINKING_LEVEL_IDS.map(buildDeepSeekV4ThinkingLevel),
|
||||
defaultLevel: "high",
|
||||
} satisfies ProviderThinkingProfile;
|
||||
|
||||
function resolveDeepSeekV4ThinkingProfile(modelId: string): ProviderThinkingProfile | undefined {
|
||||
return isDeepSeekV4ModelId(modelId) ? DEEPSEEK_V4_THINKING_PROFILE : undefined;
|
||||
}
|
||||
|
||||
export default defineSingleProviderPluginEntry({
|
||||
id: PROVIDER_ID,
|
||||
@@ -46,9 +62,7 @@ export default defineSingleProviderPluginEntry({
|
||||
/\bdeepseek\b.*(?:input.*too long|context.*exceed)/i.test(errorMessage),
|
||||
...buildProviderReplayFamilyHooks({ family: "openai-compatible" }),
|
||||
wrapStreamFn: (ctx) => createDeepSeekV4ThinkingWrapper(ctx.streamFn, ctx.thinkingLevel),
|
||||
isModernModelRef: ({ modelId }) => {
|
||||
const lower = modelId.toLowerCase();
|
||||
return lower === "deepseek-v4-flash" || lower === "deepseek-v4-pro";
|
||||
},
|
||||
resolveThinkingProfile: ({ modelId }) => resolveDeepSeekV4ThinkingProfile(modelId),
|
||||
isModernModelRef: ({ modelId }) => Boolean(resolveDeepSeekV4ThinkingProfile(modelId)),
|
||||
},
|
||||
});
|
||||
|
||||
@@ -19,3 +19,15 @@ export function buildDeepSeekModelDefinition(
|
||||
api: "openai-completions",
|
||||
};
|
||||
}
|
||||
|
||||
const DEEPSEEK_V4_MODEL_IDS = new Set(["deepseek-v4-flash", "deepseek-v4-pro"]);
|
||||
|
||||
export function isDeepSeekV4ModelId(modelId: string): boolean {
|
||||
return DEEPSEEK_V4_MODEL_IDS.has(modelId.toLowerCase());
|
||||
}
|
||||
|
||||
export function isDeepSeekV4ModelRef(model: { provider?: string; id?: unknown }): boolean {
|
||||
return (
|
||||
model.provider === "deepseek" && typeof model.id === "string" && isDeepSeekV4ModelId(model.id)
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
import type { ProviderWrapStreamFnContext } from "openclaw/plugin-sdk/plugin-entry";
|
||||
import { createDeepSeekV4OpenAICompatibleThinkingWrapper } from "openclaw/plugin-sdk/provider-stream-shared";
|
||||
|
||||
function isDeepSeekV4ModelId(modelId: unknown): boolean {
|
||||
return modelId === "deepseek-v4-flash" || modelId === "deepseek-v4-pro";
|
||||
}
|
||||
import { isDeepSeekV4ModelRef } from "./models.js";
|
||||
|
||||
export function createDeepSeekV4ThinkingWrapper(
|
||||
baseStreamFn: ProviderWrapStreamFnContext["streamFn"],
|
||||
@@ -12,6 +9,6 @@ export function createDeepSeekV4ThinkingWrapper(
|
||||
return createDeepSeekV4OpenAICompatibleThinkingWrapper({
|
||||
baseStreamFn,
|
||||
thinkingLevel,
|
||||
shouldPatchModel: (model) => model.provider === "deepseek" && isDeepSeekV4ModelId(model.id),
|
||||
shouldPatchModel: isDeepSeekV4ModelRef,
|
||||
});
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user