mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-06 21:00:44 +00:00
fix(providers): map native reasoning efforts
This commit is contained in:
60
extensions/groq/api.ts
Normal file
60
extensions/groq/api.ts
Normal file
@@ -0,0 +1,60 @@
|
||||
import type { ModelCompatConfig } from "openclaw/plugin-sdk/provider-model-shared";
|
||||
|
||||
const GROQ_QWEN3_32B_ID = "qwen/qwen3-32b";
|
||||
const GROQ_GPT_OSS_REASONING_IDS = new Set([
|
||||
"openai/gpt-oss-20b",
|
||||
"openai/gpt-oss-120b",
|
||||
"openai/gpt-oss-safeguard-20b",
|
||||
]);
|
||||
|
||||
export const GROQ_QWEN_REASONING_EFFORTS = ["none", "default"] as const;
|
||||
export const GROQ_GPT_OSS_REASONING_EFFORTS = ["low", "medium", "high"] as const;
|
||||
|
||||
export const GROQ_QWEN_REASONING_EFFORT_MAP: Record<string, string> = {
|
||||
off: "none",
|
||||
none: "none",
|
||||
minimal: "default",
|
||||
low: "default",
|
||||
medium: "default",
|
||||
high: "default",
|
||||
xhigh: "default",
|
||||
adaptive: "default",
|
||||
max: "default",
|
||||
};
|
||||
|
||||
function normalizeGroqModelId(modelId: string | undefined): string {
|
||||
return modelId?.trim().toLowerCase() ?? "";
|
||||
}
|
||||
|
||||
export function resolveGroqReasoningCompatPatch(
|
||||
modelId: string,
|
||||
): Pick<
|
||||
ModelCompatConfig,
|
||||
"supportsReasoningEffort" | "supportedReasoningEfforts" | "reasoningEffortMap"
|
||||
> | null {
|
||||
const normalized = normalizeGroqModelId(modelId);
|
||||
if (normalized === GROQ_QWEN3_32B_ID) {
|
||||
return {
|
||||
supportsReasoningEffort: true,
|
||||
supportedReasoningEfforts: [...GROQ_QWEN_REASONING_EFFORTS],
|
||||
reasoningEffortMap: GROQ_QWEN_REASONING_EFFORT_MAP,
|
||||
};
|
||||
}
|
||||
if (GROQ_GPT_OSS_REASONING_IDS.has(normalized)) {
|
||||
return {
|
||||
supportsReasoningEffort: true,
|
||||
supportedReasoningEfforts: [...GROQ_GPT_OSS_REASONING_EFFORTS],
|
||||
};
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
export function contributeGroqResolvedModelCompat(params: {
|
||||
modelId: string;
|
||||
model: { api?: unknown; provider?: unknown };
|
||||
}): Partial<ModelCompatConfig> | undefined {
|
||||
if (params.model.api !== "openai-completions" || params.model.provider !== "groq") {
|
||||
return undefined;
|
||||
}
|
||||
return resolveGroqReasoningCompatPatch(params.modelId) ?? undefined;
|
||||
}
|
||||
51
extensions/groq/index.test.ts
Normal file
51
extensions/groq/index.test.ts
Normal file
@@ -0,0 +1,51 @@
|
||||
import { capturePluginRegistration } from "openclaw/plugin-sdk/testing";
|
||||
import { describe, expect, it } from "vitest";
|
||||
import { contributeGroqResolvedModelCompat, resolveGroqReasoningCompatPatch } from "./api.js";
|
||||
import plugin from "./index.js";
|
||||
|
||||
describe("groq provider compat", () => {
|
||||
it("maps Groq Qwen 3 reasoning to provider-native none/default values", () => {
|
||||
expect(resolveGroqReasoningCompatPatch("qwen/qwen3-32b")).toEqual({
|
||||
supportsReasoningEffort: true,
|
||||
supportedReasoningEfforts: ["none", "default"],
|
||||
reasoningEffortMap: expect.objectContaining({
|
||||
off: "none",
|
||||
low: "default",
|
||||
medium: "default",
|
||||
high: "default",
|
||||
}),
|
||||
});
|
||||
});
|
||||
|
||||
it("keeps GPT-OSS reasoning on the Groq low/medium/high contract", () => {
|
||||
expect(resolveGroqReasoningCompatPatch("openai/gpt-oss-120b")).toEqual({
|
||||
supportsReasoningEffort: true,
|
||||
supportedReasoningEfforts: ["low", "medium", "high"],
|
||||
});
|
||||
});
|
||||
|
||||
it("contributes compat only for Groq OpenAI-compatible chat models", () => {
|
||||
expect(
|
||||
contributeGroqResolvedModelCompat({
|
||||
modelId: "qwen/qwen3-32b",
|
||||
model: { api: "openai-completions", provider: "groq" },
|
||||
}),
|
||||
).toMatchObject({ supportedReasoningEfforts: ["none", "default"] });
|
||||
expect(
|
||||
contributeGroqResolvedModelCompat({
|
||||
modelId: "qwen/qwen3-32b",
|
||||
model: { api: "openai-completions", provider: "openrouter" },
|
||||
}),
|
||||
).toBeUndefined();
|
||||
});
|
||||
|
||||
it("registers Groq model and media providers", () => {
|
||||
const captured = capturePluginRegistration(plugin);
|
||||
expect(captured.providers[0]).toMatchObject({
|
||||
id: "groq",
|
||||
label: "Groq",
|
||||
envVars: ["GROQ_API_KEY"],
|
||||
});
|
||||
expect(captured.mediaUnderstandingProviders[0]?.id).toBe("groq");
|
||||
});
|
||||
});
|
||||
@@ -1,11 +1,21 @@
|
||||
import { definePluginEntry } from "openclaw/plugin-sdk/plugin-entry";
|
||||
import { contributeGroqResolvedModelCompat } from "./api.js";
|
||||
import { groqMediaUnderstandingProvider } from "./media-understanding-provider.js";
|
||||
|
||||
export default definePluginEntry({
|
||||
id: "groq",
|
||||
name: "Groq Media Understanding",
|
||||
description: "Bundled Groq audio transcription provider",
|
||||
name: "Groq Provider",
|
||||
description: "Bundled Groq provider plugin",
|
||||
register(api) {
|
||||
api.registerProvider({
|
||||
id: "groq",
|
||||
label: "Groq",
|
||||
docsPath: "/providers/groq",
|
||||
envVars: ["GROQ_API_KEY"],
|
||||
auth: [],
|
||||
contributeResolvedModelCompat: ({ modelId, model }) =>
|
||||
contributeGroqResolvedModelCompat({ modelId, model }),
|
||||
});
|
||||
api.registerMediaUnderstandingProvider(groqMediaUnderstandingProvider);
|
||||
},
|
||||
});
|
||||
|
||||
@@ -147,6 +147,11 @@ describe("lmstudio plugin", () => {
|
||||
contextTokens: 8192,
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
compat: {
|
||||
supportsReasoningEffort: true,
|
||||
supportedReasoningEfforts: ["off", "on"],
|
||||
reasoningEffortMap: { off: "off", high: "on" },
|
||||
},
|
||||
},
|
||||
{
|
||||
id: "phi-4",
|
||||
@@ -173,7 +178,12 @@ describe("lmstudio plugin", () => {
|
||||
provider: "lmstudio",
|
||||
id: "qwen3-8b-instruct",
|
||||
name: "Qwen 3 8B Instruct",
|
||||
compat: { supportsUsageInStreaming: true },
|
||||
compat: {
|
||||
supportsUsageInStreaming: true,
|
||||
supportsReasoningEffort: true,
|
||||
supportedReasoningEfforts: ["off", "on"],
|
||||
reasoningEffortMap: { off: "off", high: "on" },
|
||||
},
|
||||
contextWindow: 32768,
|
||||
contextTokens: 8192,
|
||||
reasoning: true,
|
||||
|
||||
@@ -34,7 +34,7 @@ function resolveLmstudioAugmentedCatalogEntries(config: OpenClawConfig | undefin
|
||||
provider: PROVIDER_ID,
|
||||
id: entry.id,
|
||||
name: entry.name ?? entry.id,
|
||||
compat: { supportsUsageInStreaming: true },
|
||||
compat: { ...entry.compat, supportsUsageInStreaming: true },
|
||||
contextWindow: entry.contextWindow,
|
||||
contextTokens: entry.contextTokens,
|
||||
reasoning: entry.reasoning,
|
||||
|
||||
@@ -163,7 +163,7 @@ export async function discoverLmstudioModels(
|
||||
reasoning: base.reasoning,
|
||||
input: base.input,
|
||||
cost: SELF_HOSTED_DEFAULT_COST,
|
||||
compat: { supportsUsageInStreaming: true },
|
||||
compat: { ...base.compat, supportsUsageInStreaming: true },
|
||||
contextWindow: base.contextWindow,
|
||||
contextTokens: base.contextTokens,
|
||||
maxTokens: base.maxTokens,
|
||||
|
||||
@@ -8,6 +8,7 @@ import { discoverLmstudioModels, ensureLmstudioModelLoaded } from "./models.fetc
|
||||
import {
|
||||
normalizeLmstudioProviderConfig,
|
||||
resolveLmstudioInferenceBase,
|
||||
resolveLmstudioReasoningCompat,
|
||||
resolveLmstudioReasoningCapability,
|
||||
resolveLmstudioServerBase,
|
||||
} from "./models.js";
|
||||
@@ -145,6 +146,40 @@ describe("lmstudio-models", () => {
|
||||
).toBe(false);
|
||||
});
|
||||
|
||||
it("maps LM Studio native reasoning options into OpenAI-compatible effort compat", () => {
|
||||
expect(
|
||||
resolveLmstudioReasoningCompat({
|
||||
capabilities: {
|
||||
reasoning: {
|
||||
allowed_options: ["off", "on"],
|
||||
default: "on",
|
||||
},
|
||||
},
|
||||
}),
|
||||
).toEqual({
|
||||
supportsReasoningEffort: true,
|
||||
supportedReasoningEfforts: ["off", "on"],
|
||||
reasoningEffortMap: expect.objectContaining({
|
||||
off: "off",
|
||||
none: "off",
|
||||
low: "on",
|
||||
medium: "on",
|
||||
high: "on",
|
||||
}),
|
||||
});
|
||||
|
||||
expect(
|
||||
resolveLmstudioReasoningCompat({
|
||||
capabilities: {
|
||||
reasoning: {
|
||||
allowed_options: ["off"],
|
||||
default: "off",
|
||||
},
|
||||
},
|
||||
}),
|
||||
).toBeUndefined();
|
||||
});
|
||||
|
||||
it("discovers llm models and maps metadata", async () => {
|
||||
const fetchMock = vi.fn(async (_url: string | URL) => ({
|
||||
ok: true,
|
||||
@@ -205,7 +240,17 @@ describe("lmstudio-models", () => {
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
compat: { supportsUsageInStreaming: true },
|
||||
compat: {
|
||||
supportsUsageInStreaming: true,
|
||||
supportsReasoningEffort: true,
|
||||
supportedReasoningEfforts: ["off", "on"],
|
||||
reasoningEffortMap: expect.objectContaining({
|
||||
off: "off",
|
||||
none: "off",
|
||||
medium: "on",
|
||||
high: "on",
|
||||
}),
|
||||
},
|
||||
contextWindow: 262144,
|
||||
contextTokens: LMSTUDIO_DEFAULT_LOAD_CONTEXT_LENGTH,
|
||||
maxTokens: SELF_HOSTED_DEFAULT_MAX_TOKENS,
|
||||
|
||||
@@ -40,6 +40,7 @@ type LmstudioConfiguredCatalogEntry = {
|
||||
contextTokens?: number;
|
||||
reasoning?: boolean;
|
||||
input?: ("text" | "image" | "document")[];
|
||||
compat?: ModelDefinitionConfig["compat"];
|
||||
};
|
||||
|
||||
function normalizeReasoningOption(value: unknown): string | null {
|
||||
@@ -58,6 +59,83 @@ function isReasoningEnabledOption(value: unknown): boolean {
|
||||
return normalized !== "off";
|
||||
}
|
||||
|
||||
function normalizeReasoningOptions(value: unknown): string[] {
|
||||
if (!Array.isArray(value)) {
|
||||
return [];
|
||||
}
|
||||
return [
|
||||
...new Set(
|
||||
value
|
||||
.map((option) => normalizeReasoningOption(option))
|
||||
.filter((option): option is string => option !== null),
|
||||
),
|
||||
];
|
||||
}
|
||||
|
||||
function resolveLmstudioReasoningDefault(
|
||||
reasoning: LmstudioReasoningCapabilityWire,
|
||||
): string | null {
|
||||
const normalizedDefault = normalizeReasoningOption(reasoning.default);
|
||||
return normalizedDefault && isReasoningEnabledOption(normalizedDefault)
|
||||
? normalizedDefault
|
||||
: null;
|
||||
}
|
||||
|
||||
function resolveLmstudioEnabledReasoningOption(
|
||||
allowedOptions: readonly string[],
|
||||
reasoning: LmstudioReasoningCapabilityWire,
|
||||
): string | undefined {
|
||||
const normalizedDefault = resolveLmstudioReasoningDefault(reasoning);
|
||||
if (normalizedDefault && allowedOptions.includes(normalizedDefault)) {
|
||||
return normalizedDefault;
|
||||
}
|
||||
return (
|
||||
allowedOptions.find((option) => option === "on" || option === "default") ??
|
||||
allowedOptions.find((option) => isReasoningEnabledOption(option))
|
||||
);
|
||||
}
|
||||
|
||||
function resolveLmstudioDisabledReasoningOption(
|
||||
allowedOptions: readonly string[],
|
||||
): string | undefined {
|
||||
return (
|
||||
allowedOptions.find((option) => option === "off") ??
|
||||
allowedOptions.find((option) => option === "none")
|
||||
);
|
||||
}
|
||||
|
||||
export function resolveLmstudioReasoningCompat(
|
||||
entry: Pick<LmstudioModelWire, "capabilities">,
|
||||
): ModelDefinitionConfig["compat"] | undefined {
|
||||
const reasoning = entry.capabilities?.reasoning;
|
||||
if (reasoning === undefined || reasoning === null) {
|
||||
return undefined;
|
||||
}
|
||||
const allowedOptions = normalizeReasoningOptions(reasoning.allowed_options);
|
||||
if (allowedOptions.length === 0) {
|
||||
return undefined;
|
||||
}
|
||||
const enabled = resolveLmstudioEnabledReasoningOption(allowedOptions, reasoning);
|
||||
if (!enabled) {
|
||||
return undefined;
|
||||
}
|
||||
const disabled = resolveLmstudioDisabledReasoningOption(allowedOptions);
|
||||
return {
|
||||
supportsReasoningEffort: true,
|
||||
supportedReasoningEfforts: allowedOptions,
|
||||
reasoningEffortMap: {
|
||||
...(disabled ? { off: disabled, none: disabled } : {}),
|
||||
minimal: enabled,
|
||||
low: enabled,
|
||||
medium: enabled,
|
||||
high: enabled,
|
||||
xhigh: enabled,
|
||||
adaptive: enabled,
|
||||
max: enabled,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolves LM Studio reasoning support from capabilities payloads.
|
||||
* Defaults to false when the server omits reasoning metadata.
|
||||
@@ -69,12 +147,7 @@ export function resolveLmstudioReasoningCapability(
|
||||
if (reasoning === undefined || reasoning === null) {
|
||||
return false;
|
||||
}
|
||||
const allowedOptionsRaw = reasoning.allowed_options;
|
||||
const allowedOptions = Array.isArray(allowedOptionsRaw)
|
||||
? allowedOptionsRaw
|
||||
.map((option) => normalizeReasoningOption(option))
|
||||
.filter((option): option is string => option !== null)
|
||||
: [];
|
||||
const allowedOptions = normalizeReasoningOptions(reasoning.allowed_options);
|
||||
if (allowedOptions.length > 0) {
|
||||
return allowedOptions.some((option) => isReasoningEnabledOption(option));
|
||||
}
|
||||
@@ -130,6 +203,41 @@ function isLikelyHostBaseUrl(value: string): boolean {
|
||||
);
|
||||
}
|
||||
|
||||
function normalizeConfiguredReasoningEffortMap(value: unknown): Record<string, string> | undefined {
|
||||
if (!value || typeof value !== "object" || Array.isArray(value)) {
|
||||
return undefined;
|
||||
}
|
||||
const normalized = Object.fromEntries(
|
||||
Object.entries(value)
|
||||
.map(([key, mapped]) => [key.trim(), typeof mapped === "string" ? mapped.trim() : ""])
|
||||
.filter(([key, mapped]) => key.length > 0 && mapped.length > 0),
|
||||
);
|
||||
return Object.keys(normalized).length > 0 ? normalized : undefined;
|
||||
}
|
||||
|
||||
function normalizeLmstudioConfiguredCompat(value: unknown): ModelDefinitionConfig["compat"] {
|
||||
if (!value || typeof value !== "object" || Array.isArray(value)) {
|
||||
return undefined;
|
||||
}
|
||||
const record = value as Record<string, unknown>;
|
||||
const supportedReasoningEfforts = normalizeReasoningOptions(record.supportedReasoningEfforts);
|
||||
const reasoningEffortMap = normalizeConfiguredReasoningEffortMap(record.reasoningEffortMap);
|
||||
const compat: NonNullable<ModelDefinitionConfig["compat"]> = {};
|
||||
if (typeof record.supportsUsageInStreaming === "boolean") {
|
||||
compat.supportsUsageInStreaming = record.supportsUsageInStreaming;
|
||||
}
|
||||
if (typeof record.supportsReasoningEffort === "boolean") {
|
||||
compat.supportsReasoningEffort = record.supportsReasoningEffort;
|
||||
}
|
||||
if (supportedReasoningEfforts.length > 0) {
|
||||
compat.supportedReasoningEfforts = supportedReasoningEfforts;
|
||||
}
|
||||
if (reasoningEffortMap) {
|
||||
compat.reasoningEffortMap = reasoningEffortMap;
|
||||
}
|
||||
return Object.keys(compat).length > 0 ? compat : undefined;
|
||||
}
|
||||
|
||||
function toFetchableLmstudioBaseUrl(value: string): string {
|
||||
if (hasExplicitHttpScheme(value) || !isLikelyHostBaseUrl(value)) {
|
||||
return value;
|
||||
@@ -226,6 +334,7 @@ export function normalizeLmstudioConfiguredCatalogEntry(
|
||||
item === "text" || item === "image" || item === "document",
|
||||
)
|
||||
: undefined;
|
||||
const compat = normalizeLmstudioConfiguredCompat(record.compat);
|
||||
return {
|
||||
id,
|
||||
name,
|
||||
@@ -233,6 +342,7 @@ export function normalizeLmstudioConfiguredCatalogEntry(
|
||||
contextTokens,
|
||||
reasoning,
|
||||
input: input && input.length > 0 ? input : undefined,
|
||||
compat,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -290,6 +400,7 @@ export type LmstudioModelBase = {
|
||||
reasoning: boolean;
|
||||
input: Array<"text" | "image">;
|
||||
cost: ModelDefinitionConfig["cost"];
|
||||
compat?: ModelDefinitionConfig["compat"];
|
||||
contextWindow: number;
|
||||
contextTokens: number;
|
||||
maxTokens: number;
|
||||
@@ -335,6 +446,7 @@ export function mapLmstudioWireEntry(entry: LmstudioModelWire): LmstudioModelBas
|
||||
reasoning: resolveLmstudioReasoningCapability(entry),
|
||||
input: entry.capabilities?.vision ? ["text", "image"] : ["text"],
|
||||
cost: SELF_HOSTED_DEFAULT_COST,
|
||||
compat: resolveLmstudioReasoningCompat(entry),
|
||||
contextWindow,
|
||||
contextTokens,
|
||||
maxTokens: Math.max(1, Math.min(contextWindow, SELF_HOSTED_DEFAULT_MAX_TOKENS)),
|
||||
@@ -361,6 +473,7 @@ export function mapLmstudioWireModelsToConfig(
|
||||
reasoning: base.reasoning,
|
||||
input: base.input,
|
||||
cost: base.cost,
|
||||
...(base.compat ? { compat: base.compat } : {}),
|
||||
contextWindow: base.contextWindow,
|
||||
contextTokens: base.contextTokens,
|
||||
maxTokens: base.maxTokens,
|
||||
|
||||
Reference in New Issue
Block a user