mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-06 12:20:44 +00:00
fix: centralize provider thinking profiles
This commit is contained in:
@@ -96,23 +96,22 @@ describe("amazon-bedrock provider plugin", () => {
|
||||
const provider = await registerSingleProviderPlugin(amazonBedrockPlugin);
|
||||
|
||||
expect(
|
||||
provider.resolveDefaultThinkingLevel?.({
|
||||
provider.resolveThinkingProfile?.({
|
||||
provider: "amazon-bedrock",
|
||||
modelId: "us.anthropic.claude-opus-4-6-v1",
|
||||
} as never),
|
||||
).toBe("adaptive");
|
||||
).toMatchObject({
|
||||
levels: expect.arrayContaining([{ id: "adaptive" }]),
|
||||
defaultLevel: "adaptive",
|
||||
});
|
||||
expect(
|
||||
provider.resolveDefaultThinkingLevel?.({
|
||||
provider.resolveThinkingProfile?.({
|
||||
provider: "amazon-bedrock",
|
||||
modelId: "amazon.nova-micro-v1:0",
|
||||
} as never),
|
||||
).toBeUndefined();
|
||||
expect(
|
||||
provider.supportsAdaptiveThinking?.({
|
||||
provider: "amazon-bedrock",
|
||||
modelId: "us.anthropic.claude-opus-4-6-v1",
|
||||
} as never),
|
||||
).toBe(true);
|
||||
).toMatchObject({
|
||||
levels: expect.not.arrayContaining([{ id: "adaptive" }]),
|
||||
});
|
||||
});
|
||||
|
||||
it("owns Anthropic-style replay policy for Claude Bedrock models", async () => {
|
||||
|
||||
@@ -191,8 +191,16 @@ export function registerAmazonBedrockPlugin(api: OpenClawPluginApi): void {
|
||||
}
|
||||
return undefined;
|
||||
},
|
||||
supportsAdaptiveThinking: ({ modelId }) => claude46ModelRe.test(modelId.trim()),
|
||||
resolveDefaultThinkingLevel: ({ modelId }) =>
|
||||
claude46ModelRe.test(modelId.trim()) ? "adaptive" : undefined,
|
||||
resolveThinkingProfile: ({ modelId }) => ({
|
||||
levels: [
|
||||
{ id: "off" },
|
||||
{ id: "minimal" },
|
||||
{ id: "low" },
|
||||
{ id: "medium" },
|
||||
{ id: "high" },
|
||||
...(claude46ModelRe.test(modelId.trim()) ? [{ id: "adaptive" as const }] : []),
|
||||
],
|
||||
defaultLevel: claude46ModelRe.test(modelId.trim()) ? "adaptive" : undefined,
|
||||
}),
|
||||
});
|
||||
}
|
||||
|
||||
@@ -225,53 +225,31 @@ describe("anthropic provider replay hooks", () => {
|
||||
reasoning: true,
|
||||
});
|
||||
expect(
|
||||
provider.resolveDefaultThinkingLevel?.({
|
||||
provider.resolveThinkingProfile?.({
|
||||
provider: "anthropic",
|
||||
modelId: "claude-opus-4-7",
|
||||
} as never),
|
||||
).toBe("off");
|
||||
).toMatchObject({
|
||||
levels: expect.arrayContaining([{ id: "xhigh" }, { id: "adaptive" }, { id: "max" }]),
|
||||
defaultLevel: "off",
|
||||
});
|
||||
expect(
|
||||
provider.resolveDefaultThinkingLevel?.({
|
||||
provider.resolveThinkingProfile?.({
|
||||
provider: "anthropic",
|
||||
modelId: "claude-opus-4-6",
|
||||
} as never),
|
||||
).toBe("adaptive");
|
||||
).toMatchObject({
|
||||
levels: expect.arrayContaining([{ id: "adaptive" }]),
|
||||
defaultLevel: "adaptive",
|
||||
});
|
||||
expect(
|
||||
provider.supportsXHighThinking?.({
|
||||
provider: "anthropic",
|
||||
modelId: "claude-opus-4-7",
|
||||
} as never),
|
||||
).toBe(true);
|
||||
expect(
|
||||
provider.supportsXHighThinking?.({
|
||||
provider: "anthropic",
|
||||
modelId: "claude-opus-4-6",
|
||||
} as never),
|
||||
provider
|
||||
.resolveThinkingProfile?.({
|
||||
provider: "anthropic",
|
||||
modelId: "claude-opus-4-6",
|
||||
} as never)
|
||||
?.levels.some((level) => level.id === "xhigh" || level.id === "max"),
|
||||
).toBe(false);
|
||||
expect(
|
||||
provider.supportsMaxThinking?.({
|
||||
provider: "anthropic",
|
||||
modelId: "claude-opus-4-7",
|
||||
} as never),
|
||||
).toBe(true);
|
||||
expect(
|
||||
provider.supportsMaxThinking?.({
|
||||
provider: "anthropic",
|
||||
modelId: "claude-opus-4-6",
|
||||
} as never),
|
||||
).toBe(false);
|
||||
expect(
|
||||
provider.supportsAdaptiveThinking?.({
|
||||
provider: "anthropic",
|
||||
modelId: "claude-opus-4-7",
|
||||
} as never),
|
||||
).toBe(true);
|
||||
expect(
|
||||
provider.supportsAdaptiveThinking?.({
|
||||
provider: "anthropic",
|
||||
modelId: "claude-opus-4-6",
|
||||
} as never),
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
it("resolves claude-cli synthetic oauth auth", async () => {
|
||||
|
||||
@@ -494,16 +494,26 @@ export function buildAnthropicProvider(): ProviderPlugin {
|
||||
buildReplayPolicy: buildAnthropicReplayPolicy,
|
||||
isModernModelRef: ({ modelId }) => matchesAnthropicModernModel(modelId),
|
||||
resolveReasoningOutputMode: () => "native",
|
||||
supportsXHighThinking: ({ modelId }) => isAnthropicOpus47Model(modelId),
|
||||
supportsAdaptiveThinking: ({ modelId }) => supportsAnthropicAdaptiveThinking(modelId),
|
||||
supportsMaxThinking: ({ modelId }) => isAnthropicOpus47Model(modelId),
|
||||
resolveThinkingProfile: ({ modelId }) => {
|
||||
const levels: Array<{
|
||||
id: "off" | "minimal" | "low" | "medium" | "high" | "xhigh" | "adaptive" | "max";
|
||||
}> = [{ id: "off" }, { id: "minimal" }, { id: "low" }, { id: "medium" }, { id: "high" }];
|
||||
if (isAnthropicOpus47Model(modelId)) {
|
||||
levels.push({ id: "xhigh" }, { id: "adaptive" }, { id: "max" });
|
||||
} else if (supportsAnthropicAdaptiveThinking(modelId)) {
|
||||
levels.push({ id: "adaptive" });
|
||||
}
|
||||
return {
|
||||
levels,
|
||||
defaultLevel: isAnthropicOpus47Model(modelId)
|
||||
? "off"
|
||||
: matchesAnthropicModernModel(modelId) &&
|
||||
shouldUseAnthropicAdaptiveThinkingDefault(modelId)
|
||||
? "adaptive"
|
||||
: undefined,
|
||||
};
|
||||
},
|
||||
wrapStreamFn: wrapAnthropicProviderStream,
|
||||
resolveDefaultThinkingLevel: ({ modelId }) =>
|
||||
isAnthropicOpus47Model(modelId)
|
||||
? "off"
|
||||
: matchesAnthropicModernModel(modelId) && shouldUseAnthropicAdaptiveThinkingDefault(modelId)
|
||||
? "adaptive"
|
||||
: undefined,
|
||||
resolveUsageAuth: async (ctx) => await ctx.resolveOAuthToken(),
|
||||
fetchUsageSnapshot: async (ctx) =>
|
||||
await fetchClaudeUsage(ctx.token, ctx.timeoutMs, ctx.fetchFn),
|
||||
|
||||
@@ -160,7 +160,11 @@ describe("codex provider", () => {
|
||||
reasoning: true,
|
||||
compat: { supportsReasoningEffort: true },
|
||||
});
|
||||
expect(provider.supportsXHighThinking?.({ provider: "codex", modelId: "o4-mini" })).toBe(true);
|
||||
expect(
|
||||
provider
|
||||
.resolveThinkingProfile?.({ provider: "codex", modelId: "o4-mini" } as never)
|
||||
?.levels.some((level) => level.id === "xhigh"),
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
it("declares synthetic auth because the harness owns Codex credentials", () => {
|
||||
|
||||
@@ -89,7 +89,16 @@ export function buildCodexProvider(options: BuildCodexProviderOptions = {}): Pro
|
||||
source: "codex-app-server",
|
||||
mode: "token",
|
||||
}),
|
||||
supportsXHighThinking: ({ modelId }) => isKnownXHighCodexModel(modelId),
|
||||
resolveThinkingProfile: ({ modelId }) => ({
|
||||
levels: [
|
||||
{ id: "off" },
|
||||
{ id: "minimal" },
|
||||
{ id: "low" },
|
||||
{ id: "medium" },
|
||||
{ id: "high" },
|
||||
...(isKnownXHighCodexModel(modelId) ? [{ id: "xhigh" as const }] : []),
|
||||
],
|
||||
}),
|
||||
isModernModelRef: ({ modelId }) => isModernCodexModel(modelId),
|
||||
};
|
||||
}
|
||||
|
||||
@@ -34,10 +34,9 @@ const resolveConfiguredBindingRouteMock = vi.hoisted(() =>
|
||||
vi.fn<ResolveConfiguredBindingRoute>(() => createUnboundConfiguredRouteResult()),
|
||||
);
|
||||
const providerThinkingMocks = vi.hoisted(() => ({
|
||||
resolveProviderAdaptiveThinking: vi.fn(),
|
||||
resolveProviderBinaryThinking: vi.fn(),
|
||||
resolveProviderDefaultThinkingLevel: vi.fn(),
|
||||
resolveProviderMaxThinking: vi.fn(),
|
||||
resolveProviderThinkingProfile: vi.fn(),
|
||||
resolveProviderXHighThinking: vi.fn(),
|
||||
}));
|
||||
const buildModelsProviderDataMock = vi.hoisted(() => vi.fn());
|
||||
@@ -129,10 +128,9 @@ let resolveDiscordNativeChoiceContext: typeof import("./native-command-ui.js").r
|
||||
async function loadDiscordThinkAutocompleteModulesForTest() {
|
||||
vi.resetModules();
|
||||
vi.doMock("../../../../src/plugins/provider-thinking.js", () => ({
|
||||
resolveProviderAdaptiveThinking: providerThinkingMocks.resolveProviderAdaptiveThinking,
|
||||
resolveProviderBinaryThinking: providerThinkingMocks.resolveProviderBinaryThinking,
|
||||
resolveProviderDefaultThinkingLevel: providerThinkingMocks.resolveProviderDefaultThinkingLevel,
|
||||
resolveProviderMaxThinking: providerThinkingMocks.resolveProviderMaxThinking,
|
||||
resolveProviderThinkingProfile: providerThinkingMocks.resolveProviderThinkingProfile,
|
||||
resolveProviderXHighThinking: providerThinkingMocks.resolveProviderXHighThinking,
|
||||
}));
|
||||
const commandAuth = await import("openclaw/plugin-sdk/command-auth");
|
||||
@@ -147,9 +145,8 @@ async function loadDiscordThinkAutocompleteModulesForTest() {
|
||||
describe("discord native /think autocomplete", () => {
|
||||
beforeAll(async () => {
|
||||
providerThinkingMocks.resolveProviderBinaryThinking.mockReturnValue(undefined);
|
||||
providerThinkingMocks.resolveProviderAdaptiveThinking.mockReturnValue(undefined);
|
||||
providerThinkingMocks.resolveProviderDefaultThinkingLevel.mockReturnValue(undefined);
|
||||
providerThinkingMocks.resolveProviderMaxThinking.mockReturnValue(undefined);
|
||||
providerThinkingMocks.resolveProviderThinkingProfile.mockReturnValue(undefined);
|
||||
providerThinkingMocks.resolveProviderXHighThinking.mockImplementation(({ provider, context }) =>
|
||||
provider === "openai-codex" && ["gpt-5.4", "gpt-5.4-pro"].includes(context.modelId)
|
||||
? true
|
||||
@@ -176,14 +173,10 @@ describe("discord native /think autocomplete", () => {
|
||||
resolveConfiguredBindingRouteMock.mockReturnValue(createUnboundConfiguredRouteResult());
|
||||
providerThinkingMocks.resolveProviderBinaryThinking.mockReset();
|
||||
providerThinkingMocks.resolveProviderBinaryThinking.mockReturnValue(undefined);
|
||||
providerThinkingMocks.resolveProviderAdaptiveThinking.mockReset();
|
||||
providerThinkingMocks.resolveProviderAdaptiveThinking.mockReturnValue(undefined);
|
||||
providerThinkingMocks.resolveProviderDefaultThinkingLevel.mockReset();
|
||||
providerThinkingMocks.resolveProviderDefaultThinkingLevel.mockReturnValue(undefined);
|
||||
providerThinkingMocks.resolveProviderMaxThinking.mockReset();
|
||||
providerThinkingMocks.resolveProviderMaxThinking.mockImplementation(({ provider, context }) =>
|
||||
provider === "anthropic" && context.modelId === "claude-opus-4-7" ? true : undefined,
|
||||
);
|
||||
providerThinkingMocks.resolveProviderThinkingProfile.mockReset();
|
||||
providerThinkingMocks.resolveProviderThinkingProfile.mockReturnValue(undefined);
|
||||
providerThinkingMocks.resolveProviderXHighThinking.mockReset();
|
||||
providerThinkingMocks.resolveProviderXHighThinking.mockImplementation(({ provider, context }) =>
|
||||
provider === "openai-codex" && ["gpt-5.4", "gpt-5.4-pro"].includes(context.modelId)
|
||||
@@ -275,6 +268,12 @@ describe("discord native /think autocomplete", () => {
|
||||
});
|
||||
|
||||
it("includes max only for provider-advertised models", async () => {
|
||||
providerThinkingMocks.resolveProviderThinkingProfile.mockImplementation(
|
||||
({ provider, context }) =>
|
||||
provider === "anthropic" && context.modelId === "claude-opus-4-7"
|
||||
? { levels: [{ id: "off" }, { id: "max" }] }
|
||||
: undefined,
|
||||
);
|
||||
fs.writeFileSync(
|
||||
STORE_PATH,
|
||||
JSON.stringify({
|
||||
|
||||
@@ -138,10 +138,20 @@ export default definePluginEntry({
|
||||
resolveDynamicModel: (ctx) => resolveCopilotForwardCompatModel(ctx),
|
||||
wrapStreamFn: wrapCopilotProviderStream,
|
||||
buildReplayPolicy: ({ modelId }) => buildGithubCopilotReplayPolicy(modelId),
|
||||
supportsXHighThinking: ({ modelId }) =>
|
||||
COPILOT_XHIGH_MODEL_IDS.includes(
|
||||
(normalizeOptionalLowercaseString(modelId) ?? "") as never,
|
||||
),
|
||||
resolveThinkingProfile: ({ modelId }) => ({
|
||||
levels: [
|
||||
{ id: "off" },
|
||||
{ id: "minimal" },
|
||||
{ id: "low" },
|
||||
{ id: "medium" },
|
||||
{ id: "high" },
|
||||
...(COPILOT_XHIGH_MODEL_IDS.includes(
|
||||
(normalizeOptionalLowercaseString(modelId) ?? "") as never,
|
||||
)
|
||||
? [{ id: "xhigh" as const }]
|
||||
: []),
|
||||
],
|
||||
}),
|
||||
prepareRuntimeAuth: async (ctx) => {
|
||||
const { resolveCopilotApiToken } = await loadGithubCopilotRuntime();
|
||||
const token = await resolveCopilotApiToken({
|
||||
|
||||
@@ -1,9 +1,19 @@
|
||||
import type {
|
||||
ProviderDefaultThinkingPolicyContext,
|
||||
ProviderThinkingProfile,
|
||||
} from "openclaw/plugin-sdk/core";
|
||||
import { buildProviderReplayFamilyHooks } from "openclaw/plugin-sdk/provider-model-shared";
|
||||
import { createGoogleThinkingStreamWrapper } from "./thinking-api.js";
|
||||
import { createGoogleThinkingStreamWrapper, isGoogleGemini3ProModel } from "./thinking-api.js";
|
||||
|
||||
export const GOOGLE_GEMINI_PROVIDER_HOOKS = {
|
||||
...buildProviderReplayFamilyHooks({
|
||||
family: "google-gemini",
|
||||
}),
|
||||
resolveThinkingProfile: ({ modelId }: ProviderDefaultThinkingPolicyContext) =>
|
||||
({
|
||||
levels: isGoogleGemini3ProModel(modelId)
|
||||
? [{ id: "off" }, { id: "low" }, { id: "high" }]
|
||||
: [{ id: "off" }, { id: "minimal" }, { id: "low" }, { id: "medium" }, { id: "high" }],
|
||||
}) satisfies ProviderThinkingProfile,
|
||||
wrapStreamFn: createGoogleThinkingStreamWrapper,
|
||||
};
|
||||
|
||||
@@ -7,17 +7,17 @@ describe("kimi provider plugin", () => {
|
||||
const provider = await registerSingleProviderPlugin(plugin);
|
||||
|
||||
expect(
|
||||
provider.isBinaryThinking?.({
|
||||
provider: "kimi",
|
||||
modelId: "kimi-code",
|
||||
} as never),
|
||||
).toBe(true);
|
||||
expect(
|
||||
provider.resolveDefaultThinkingLevel?.({
|
||||
provider.resolveThinkingProfile?.({
|
||||
provider: "kimi",
|
||||
modelId: "kimi-code",
|
||||
reasoning: true,
|
||||
} as never),
|
||||
).toBe("off");
|
||||
).toEqual({
|
||||
levels: [
|
||||
{ id: "off", label: "off" },
|
||||
{ id: "low", label: "on" },
|
||||
],
|
||||
defaultLevel: "off",
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -96,8 +96,13 @@ export default definePluginEntry({
|
||||
},
|
||||
},
|
||||
buildReplayPolicy: () => KIMI_REPLAY_POLICY,
|
||||
isBinaryThinking: () => true,
|
||||
resolveDefaultThinkingLevel: () => "off",
|
||||
resolveThinkingProfile: () => ({
|
||||
levels: [
|
||||
{ id: "off", label: "off" },
|
||||
{ id: "low", label: "on" },
|
||||
],
|
||||
defaultLevel: "off",
|
||||
}),
|
||||
wrapStreamFn: wrapKimiProviderStream,
|
||||
});
|
||||
},
|
||||
|
||||
@@ -178,7 +178,7 @@ describe("llm-task tool (json-only)", () => {
|
||||
it("throws on unsupported xhigh thinking level", async () => {
|
||||
const tool = createLlmTaskTool(fakeApi());
|
||||
await expect(tool.execute("id", { prompt: "x", thinking: "xhigh" })).rejects.toThrow(
|
||||
/only supported/i,
|
||||
/not supported/i,
|
||||
);
|
||||
});
|
||||
|
||||
|
||||
@@ -4,11 +4,10 @@ import { Type } from "@sinclair/typebox";
|
||||
import Ajv from "ajv";
|
||||
import { normalizeOptionalString } from "openclaw/plugin-sdk/text-runtime";
|
||||
import {
|
||||
formatXHighModelHint,
|
||||
formatThinkingLevels,
|
||||
isThinkingLevelSupported,
|
||||
normalizeThinkLevel,
|
||||
resolvePreferredOpenClawTmpDir,
|
||||
resolveSupportedThinkingLevel,
|
||||
supportsXHighThinking,
|
||||
} from "../api.js";
|
||||
import type { OpenClawPluginApi } from "../api.js";
|
||||
|
||||
@@ -145,15 +144,17 @@ export function createLlmTaskTool(api: OpenClawPluginApi) {
|
||||
);
|
||||
}
|
||||
let resolvedThinkLevel = thinkLevel;
|
||||
if (thinkLevel === "xhigh" && !supportsXHighThinking(provider, model)) {
|
||||
throw new Error(`Thinking level "xhigh" is only supported for ${formatXHighModelHint()}.`);
|
||||
}
|
||||
if (thinkLevel === "max") {
|
||||
resolvedThinkLevel = resolveSupportedThinkingLevel({
|
||||
if (
|
||||
thinkLevel &&
|
||||
!isThinkingLevelSupported({
|
||||
provider,
|
||||
model,
|
||||
level: thinkLevel,
|
||||
});
|
||||
})
|
||||
) {
|
||||
throw new Error(
|
||||
`Thinking level "${thinkLevel}" is not supported for ${provider}/${model}. Use one of: ${formatThinkingLevels(provider, model)}.`,
|
||||
);
|
||||
}
|
||||
|
||||
const timeoutMs =
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { defineSingleProviderPluginEntry } from "openclaw/plugin-sdk/provider-entry";
|
||||
import { applyMistralModelCompat } from "./api.js";
|
||||
import { applyMistralModelCompat, MISTRAL_SMALL_LATEST_ID } from "./api.js";
|
||||
import { mistralMediaUnderstandingProvider } from "./media-understanding-provider.js";
|
||||
import { mistralMemoryEmbeddingProviderAdapter } from "./memory-embedding-adapter.js";
|
||||
import { applyMistralConfig, MISTRAL_DEFAULT_MODEL_REF } from "./onboard.js";
|
||||
@@ -46,6 +46,10 @@ export default defineSingleProviderPluginEntry({
|
||||
normalizeResolvedModel: ({ model }) => applyMistralModelCompat(model),
|
||||
contributeResolvedModelCompat: ({ modelId, model }) =>
|
||||
contributeMistralResolvedModelCompat({ modelId, model }),
|
||||
resolveThinkingProfile: ({ modelId }) =>
|
||||
modelId === MISTRAL_SMALL_LATEST_ID
|
||||
? { levels: [{ id: "off" }, { id: "high" }], defaultLevel: "off" }
|
||||
: undefined,
|
||||
buildReplayPolicy: () => buildMistralReplayPolicy(),
|
||||
},
|
||||
register(api) {
|
||||
|
||||
@@ -58,6 +58,13 @@ export default defineSingleProviderPluginEntry({
|
||||
applyMoonshotNativeStreamingUsageCompat(providerConfig),
|
||||
...OPENAI_COMPATIBLE_REPLAY_HOOKS,
|
||||
...MOONSHOT_THINKING_STREAM_HOOKS,
|
||||
resolveThinkingProfile: () => ({
|
||||
levels: [
|
||||
{ id: "off", label: "off" },
|
||||
{ id: "low", label: "on" },
|
||||
],
|
||||
defaultLevel: "off",
|
||||
}),
|
||||
},
|
||||
register(api) {
|
||||
api.registerMediaUnderstandingProvider(moonshotMediaUnderstandingProvider);
|
||||
|
||||
@@ -387,8 +387,18 @@ export function buildOpenAICodexProviderPlugin(): ProviderPlugin {
|
||||
},
|
||||
resolveDynamicModel: (ctx) => resolveCodexForwardCompatModel(ctx),
|
||||
buildAuthDoctorHint: (ctx) => buildOpenAICodexAuthDoctorHint(ctx),
|
||||
supportsXHighThinking: ({ modelId }) =>
|
||||
matchesExactOrPrefix(modelId, OPENAI_CODEX_XHIGH_MODEL_IDS),
|
||||
resolveThinkingProfile: ({ modelId }) => ({
|
||||
levels: [
|
||||
{ id: "off" },
|
||||
{ id: "minimal" },
|
||||
{ id: "low" },
|
||||
{ id: "medium" },
|
||||
{ id: "high" },
|
||||
...(matchesExactOrPrefix(modelId, OPENAI_CODEX_XHIGH_MODEL_IDS)
|
||||
? [{ id: "xhigh" as const }]
|
||||
: []),
|
||||
],
|
||||
}),
|
||||
isModernModelRef: ({ modelId }) => matchesExactOrPrefix(modelId, OPENAI_CODEX_MODERN_MODEL_IDS),
|
||||
preferRuntimeResolvedModel: (ctx) => {
|
||||
if (normalizeProviderId(ctx.provider) !== PROVIDER_ID) {
|
||||
|
||||
@@ -121,16 +121,20 @@ describe("buildOpenAIProvider", () => {
|
||||
const provider = buildOpenAIProvider();
|
||||
|
||||
expect(
|
||||
provider.supportsXHighThinking?.({
|
||||
provider: "openai",
|
||||
modelId: "gpt-5.4-mini",
|
||||
} as never),
|
||||
provider
|
||||
.resolveThinkingProfile?.({
|
||||
provider: "openai",
|
||||
modelId: "gpt-5.4-mini",
|
||||
} as never)
|
||||
?.levels.some((level) => level.id === "xhigh"),
|
||||
).toBe(true);
|
||||
expect(
|
||||
provider.supportsXHighThinking?.({
|
||||
provider: "openai",
|
||||
modelId: "gpt-5.4-nano",
|
||||
} as never),
|
||||
provider
|
||||
.resolveThinkingProfile?.({
|
||||
provider: "openai",
|
||||
modelId: "gpt-5.4-nano",
|
||||
} as never)
|
||||
?.levels.some((level) => level.id === "xhigh"),
|
||||
).toBe(true);
|
||||
|
||||
const entries = provider.augmentModelCatalog?.({
|
||||
|
||||
@@ -218,7 +218,18 @@ export function buildOpenAIProvider(): ProviderPlugin {
|
||||
matchesContextOverflowError: ({ errorMessage }) =>
|
||||
/content_filter.*(?:prompt|input).*(?:too long|exceed)/i.test(errorMessage),
|
||||
resolveReasoningOutputMode: () => "native",
|
||||
supportsXHighThinking: ({ modelId }) => matchesExactOrPrefix(modelId, OPENAI_XHIGH_MODEL_IDS),
|
||||
resolveThinkingProfile: ({ modelId }) => ({
|
||||
levels: [
|
||||
{ id: "off" },
|
||||
{ id: "minimal" },
|
||||
{ id: "low" },
|
||||
{ id: "medium" },
|
||||
{ id: "high" },
|
||||
...(matchesExactOrPrefix(modelId, OPENAI_XHIGH_MODEL_IDS)
|
||||
? [{ id: "xhigh" as const }]
|
||||
: []),
|
||||
],
|
||||
}),
|
||||
isModernModelRef: ({ modelId }) => matchesExactOrPrefix(modelId, OPENAI_MODERN_MODEL_IDS),
|
||||
buildMissingAuthMessage: (ctx) => {
|
||||
if (ctx.provider !== PROVIDER_ID || ctx.listProfileIds("openai-codex").length === 0) {
|
||||
|
||||
@@ -197,6 +197,7 @@ export default defineSingleProviderPluginEntry({
|
||||
shouldContributeXaiCompat({ modelId, model }) ? resolveXaiModelCompatPatch() : undefined,
|
||||
normalizeModelId: ({ modelId }) => normalizeXaiModelId(modelId),
|
||||
resolveDynamicModel: (ctx) => resolveXaiForwardCompatModel({ providerId: PROVIDER_ID, ctx }),
|
||||
resolveThinkingProfile: () => ({ levels: [{ id: "off" }], defaultLevel: "off" }),
|
||||
isModernModelRef: ({ modelId }) => isModernXaiModel(modelId),
|
||||
},
|
||||
register(api) {
|
||||
|
||||
@@ -280,7 +280,13 @@ export default definePluginEntry({
|
||||
...OPENAI_COMPATIBLE_REPLAY_HOOKS,
|
||||
prepareExtraParams: (ctx) => defaultToolStreamExtraParams(ctx.extraParams),
|
||||
...TOOL_STREAM_DEFAULT_ON_HOOKS,
|
||||
isBinaryThinking: () => true,
|
||||
resolveThinkingProfile: () => ({
|
||||
levels: [
|
||||
{ id: "off", label: "off" },
|
||||
{ id: "low", label: "on" },
|
||||
],
|
||||
defaultLevel: "off",
|
||||
}),
|
||||
isModernModelRef: ({ modelId }) => {
|
||||
const lower = normalizeLowercaseStringOrEmpty(modelId);
|
||||
return (
|
||||
|
||||
Reference in New Issue
Block a user