mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-06 09:30:43 +00:00
fix(opencode-go): route DeepSeek V4 through OpenAI transport
This commit is contained in:
@@ -119,15 +119,56 @@ describe("opencode-go provider plugin", () => {
|
||||
} as never),
|
||||
).toMatchObject({
|
||||
id: "deepseek-v4-pro",
|
||||
api: "anthropic-messages",
|
||||
api: "openai-completions",
|
||||
provider: "opencode-go",
|
||||
baseUrl: "https://opencode.ai/zen/go",
|
||||
baseUrl: "https://opencode.ai/zen/go/v1",
|
||||
reasoning: true,
|
||||
contextWindow: 1_000_000,
|
||||
maxTokens: 384_000,
|
||||
compat: {
|
||||
supportsUsageInStreaming: true,
|
||||
supportsReasoningEffort: true,
|
||||
maxTokensField: "max_tokens",
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it("disables invalid DeepSeek V4 reasoning_effort off payloads on OpenCode Go", async () => {
|
||||
const provider = await registerSingleProviderPlugin(plugin);
|
||||
const capturedPayloads: Record<string, unknown>[] = [];
|
||||
const baseStreamFn = (_model: unknown, _context: unknown, options: unknown) => {
|
||||
const payload = {
|
||||
model: "deepseek-v4-flash",
|
||||
reasoning_effort: "off",
|
||||
reasoning: "off",
|
||||
};
|
||||
(options as { onPayload?: (payload: Record<string, unknown>) => void })?.onPayload?.(payload);
|
||||
capturedPayloads.push(payload);
|
||||
return {} as never;
|
||||
};
|
||||
|
||||
const streamFn = provider.wrapStreamFn?.({
|
||||
streamFn: baseStreamFn as never,
|
||||
providerId: "opencode-go",
|
||||
modelId: "deepseek-v4-flash",
|
||||
thinkingLevel: "off",
|
||||
} as never);
|
||||
|
||||
expect(streamFn).toBeTypeOf("function");
|
||||
await streamFn?.(
|
||||
{ provider: "opencode-go", id: "deepseek-v4-flash" } as never,
|
||||
{} as never,
|
||||
{},
|
||||
);
|
||||
|
||||
expect(capturedPayloads).toEqual([
|
||||
{
|
||||
model: "deepseek-v4-flash",
|
||||
thinking: { type: "disabled" },
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
it("canonicalizes stale OpenCode Go base URLs", async () => {
|
||||
const provider = await registerSingleProviderPlugin(plugin);
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ import {
|
||||
normalizeOpencodeGoBaseUrl,
|
||||
resolveOpencodeGoSupplementalModel,
|
||||
} from "./provider-catalog.js";
|
||||
import { createOpencodeGoDeepSeekV4Wrapper } from "./stream.js";
|
||||
|
||||
const PROVIDER_ID = "opencode-go";
|
||||
export default definePluginEntry({
|
||||
@@ -67,6 +68,7 @@ export default definePluginEntry({
|
||||
resolveDynamicModel: ({ modelId }) => resolveOpencodeGoSupplementalModel(modelId),
|
||||
augmentModelCatalog: () => listOpencodeGoSupplementalModelCatalogEntries(),
|
||||
...PASSTHROUGH_GEMINI_REPLAY_HOOKS,
|
||||
wrapStreamFn: (ctx) => createOpencodeGoDeepSeekV4Wrapper(ctx.streamFn, ctx.thinkingLevel),
|
||||
isModernModelRef: () => true,
|
||||
});
|
||||
api.registerMediaUnderstandingProvider(opencodeGoMediaUnderstandingProvider);
|
||||
|
||||
@@ -12,9 +12,9 @@ const OPENCODE_GO_SUPPLEMENTAL_MODELS = (
|
||||
{
|
||||
id: "deepseek-v4-pro",
|
||||
name: "DeepSeek V4 Pro",
|
||||
api: "anthropic-messages",
|
||||
api: "openai-completions",
|
||||
provider: PROVIDER_ID,
|
||||
baseUrl: OPENCODE_GO_ANTHROPIC_BASE_URL,
|
||||
baseUrl: OPENCODE_GO_OPENAI_BASE_URL,
|
||||
reasoning: true,
|
||||
input: ["text"],
|
||||
cost: {
|
||||
@@ -25,13 +25,18 @@ const OPENCODE_GO_SUPPLEMENTAL_MODELS = (
|
||||
},
|
||||
contextWindow: 1_000_000,
|
||||
maxTokens: 384_000,
|
||||
compat: {
|
||||
supportsUsageInStreaming: true,
|
||||
supportsReasoningEffort: true,
|
||||
maxTokensField: "max_tokens",
|
||||
},
|
||||
},
|
||||
{
|
||||
id: "deepseek-v4-flash",
|
||||
name: "DeepSeek V4 Flash",
|
||||
api: "anthropic-messages",
|
||||
api: "openai-completions",
|
||||
provider: PROVIDER_ID,
|
||||
baseUrl: OPENCODE_GO_ANTHROPIC_BASE_URL,
|
||||
baseUrl: OPENCODE_GO_OPENAI_BASE_URL,
|
||||
reasoning: true,
|
||||
input: ["text"],
|
||||
cost: {
|
||||
@@ -42,6 +47,11 @@ const OPENCODE_GO_SUPPLEMENTAL_MODELS = (
|
||||
},
|
||||
contextWindow: 1_000_000,
|
||||
maxTokens: 384_000,
|
||||
compat: {
|
||||
supportsUsageInStreaming: true,
|
||||
supportsReasoningEffort: true,
|
||||
maxTokensField: "max_tokens",
|
||||
},
|
||||
},
|
||||
] satisfies ProviderRuntimeModel[]
|
||||
).map((model) => normalizeModelCompat(model));
|
||||
|
||||
18
extensions/opencode-go/stream.ts
Normal file
18
extensions/opencode-go/stream.ts
Normal file
@@ -0,0 +1,18 @@
|
||||
import type { ProviderWrapStreamFnContext } from "openclaw/plugin-sdk/plugin-entry";
|
||||
import { createDeepSeekV4OpenAICompatibleThinkingWrapper } from "openclaw/plugin-sdk/provider-stream-shared";
|
||||
|
||||
function isOpencodeGoDeepSeekV4ModelId(modelId: unknown): boolean {
|
||||
return modelId === "deepseek-v4-flash" || modelId === "deepseek-v4-pro";
|
||||
}
|
||||
|
||||
export function createOpencodeGoDeepSeekV4Wrapper(
|
||||
baseStreamFn: ProviderWrapStreamFnContext["streamFn"],
|
||||
thinkingLevel: ProviderWrapStreamFnContext["thinkingLevel"],
|
||||
): ProviderWrapStreamFnContext["streamFn"] {
|
||||
return createDeepSeekV4OpenAICompatibleThinkingWrapper({
|
||||
baseStreamFn,
|
||||
thinkingLevel,
|
||||
shouldPatchModel: (model) =>
|
||||
model.provider === "opencode-go" && isOpencodeGoDeepSeekV4ModelId(model.id),
|
||||
});
|
||||
}
|
||||
Reference in New Issue
Block a user