refactor(plugins): move auth and model policy to providers

This commit is contained in:
Peter Steinberger
2026-03-15 20:58:59 -07:00
parent ca2f046668
commit a33caab280
30 changed files with 1080 additions and 653 deletions

View File

@@ -25,8 +25,10 @@ For model selection rules, see [/concepts/models](/concepts/models).
`resolveDynamicModel`, `prepareDynamicModel`, `normalizeResolvedModel`,
`capabilities`, `prepareExtraParams`, `wrapStreamFn`,
`isCacheTtlEligible`, `buildMissingAuthMessage`,
`suppressBuiltInModel`, `augmentModelCatalog`, `prepareRuntimeAuth`,
`resolveUsageAuth`, and `fetchUsageSnapshot`.
`suppressBuiltInModel`, `augmentModelCatalog`, `isBinaryThinking`,
`supportsXHighThinking`, `resolveDefaultThinkingLevel`,
`isModernModelRef`, `prepareRuntimeAuth`, `resolveUsageAuth`, and
`fetchUsageSnapshot`.
## Plugin-owned provider behavior
@@ -51,6 +53,11 @@ Typical split:
vendor-owned error for direct resolution failures
- `augmentModelCatalog`: provider appends synthetic/final catalog rows after
discovery and config merging
- `isBinaryThinking`: provider owns binary on/off thinking UX
- `supportsXHighThinking`: provider opts selected models into `xhigh`
- `resolveDefaultThinkingLevel`: provider owns default `/think` policy for a
model family
- `isModernModelRef`: provider owns live/smoke preferred-model matching
- `prepareRuntimeAuth`: provider turns a configured credential into a short
lived runtime token
- `resolveUsageAuth`: provider resolves usage/quota credentials for `/usage`
@@ -68,14 +75,16 @@ Current bundled examples:
hints, runtime token exchange, and usage endpoint fetching
- `openai`: GPT-5.4 forward-compat fallback, direct OpenAI transport
normalization, Codex-aware missing-auth hints, Spark suppression, synthetic
OpenAI/Codex catalog rows, and provider-family metadata
- `google-gemini-cli`: Gemini 3.1 forward-compat fallback plus usage-token
parsing and quota endpoint fetching for usage surfaces
OpenAI/Codex catalog rows, thinking/live-model policy, and
provider-family metadata
- `google` and `google-gemini-cli`: Gemini 3.1 forward-compat fallback and
modern-model matching; Gemini CLI OAuth also owns usage-token parsing and
quota endpoint fetching for usage surfaces
- `moonshot`: shared transport, plugin-owned thinking payload normalization
- `kilocode`: shared transport, plugin-owned request headers, reasoning payload
normalization, Gemini transcript hints, and cache-TTL policy
- `zai`: GLM-5 forward-compat fallback, `tool_stream` defaults, cache-TTL
policy, and usage auth + quota fetching
policy, binary-thinking/live-model policy, and usage auth + quota fetching
- `mistral`, `opencode`, and `opencode-go`: plugin-owned capability metadata
- `byteplus`, `cloudflare-ai-gateway`, `huggingface`, `kimi-coding`,
`minimax-portal`, `modelstudio`, `nvidia`, `qianfan`, `qwen-portal`,

View File

@@ -220,7 +220,7 @@ Provider plugins now have two layers:
- manifest metadata: `providerAuthEnvVars` for cheap env-auth lookup before
runtime load
- config-time hooks: `catalog` / legacy `discovery`
- runtime hooks: `resolveDynamicModel`, `prepareDynamicModel`, `normalizeResolvedModel`, `capabilities`, `prepareExtraParams`, `wrapStreamFn`, `isCacheTtlEligible`, `buildMissingAuthMessage`, `suppressBuiltInModel`, `augmentModelCatalog`, `prepareRuntimeAuth`, `resolveUsageAuth`, `fetchUsageSnapshot`
- runtime hooks: `resolveDynamicModel`, `prepareDynamicModel`, `normalizeResolvedModel`, `capabilities`, `prepareExtraParams`, `wrapStreamFn`, `isCacheTtlEligible`, `buildMissingAuthMessage`, `suppressBuiltInModel`, `augmentModelCatalog`, `isBinaryThinking`, `supportsXHighThinking`, `resolveDefaultThinkingLevel`, `isModernModelRef`, `prepareRuntimeAuth`, `resolveUsageAuth`, `fetchUsageSnapshot`
OpenClaw still owns the generic agent loop, failover, transcript handling, and
tool policy. These hooks are the seam for provider-specific behavior without
@@ -263,13 +263,22 @@ For model/provider plugins, OpenClaw uses hooks in this rough order:
error hint.
12. `augmentModelCatalog`
Provider-owned synthetic/final catalog rows appended after discovery.
13. `prepareRuntimeAuth`
13. `isBinaryThinking`
Provider-owned on/off reasoning toggle for binary-thinking providers.
14. `supportsXHighThinking`
Provider-owned `xhigh` reasoning support for selected models.
15. `resolveDefaultThinkingLevel`
Provider-owned default `/think` level for a specific model family.
16. `isModernModelRef`
Provider-owned modern-model matcher used by live profile filters and smoke
selection.
17. `prepareRuntimeAuth`
Exchanges a configured credential into the actual runtime token/key just
before inference.
14. `resolveUsageAuth`
18. `resolveUsageAuth`
Resolves usage/billing credentials for `/usage` and related status
surfaces.
15. `fetchUsageSnapshot`
19. `fetchUsageSnapshot`
Fetches and normalizes provider-specific usage/quota snapshots after auth
is resolved.
@@ -286,6 +295,10 @@ For model/provider plugins, OpenClaw uses hooks in this rough order:
- `buildMissingAuthMessage`: replace the generic auth-store error with a provider-specific recovery hint
- `suppressBuiltInModel`: hide stale upstream rows and optionally return a provider-owned error for direct resolution failures
- `augmentModelCatalog`: append synthetic/final catalog rows after discovery and config merging
- `isBinaryThinking`: expose binary on/off reasoning UX without hardcoding provider ids in `/think`
- `supportsXHighThinking`: opt specific models into the `xhigh` reasoning level
- `resolveDefaultThinkingLevel`: keep provider/model default reasoning policy out of core
- `isModernModelRef`: keep live/smoke model family inclusion rules with the provider
- `prepareRuntimeAuth`: exchange a configured credential into the actual short-lived runtime token/key used for requests
- `resolveUsageAuth`: resolve provider-owned credentials for usage/billing endpoints without hardcoding token parsing in core
- `fetchUsageSnapshot`: own provider-specific usage endpoint fetch/parsing while core keeps summary fan-out and formatting
@@ -303,6 +316,10 @@ Rule of thumb:
- provider needs a provider-specific missing-auth recovery hint: use `buildMissingAuthMessage`
- provider needs to hide stale upstream rows or replace them with a vendor hint: use `suppressBuiltInModel`
- provider needs synthetic forward-compat rows in `models list` and pickers: use `augmentModelCatalog`
- provider exposes only binary thinking on/off: use `isBinaryThinking`
- provider wants `xhigh` on only a subset of models: use `supportsXHighThinking`
- provider owns default `/think` policy for a model family: use `resolveDefaultThinkingLevel`
- provider owns live/smoke preferred-model matching: use `isModernModelRef`
- provider needs a token exchange or short-lived request credential: use `prepareRuntimeAuth`
- provider needs custom usage/quota token parsing or a different usage credential: use `resolveUsageAuth`
- provider needs a provider-specific usage endpoint or payload parser: use `fetchUsageSnapshot`
@@ -368,14 +385,17 @@ api.registerProvider({
### Built-in examples
- Anthropic uses `resolveDynamicModel`, `capabilities`, `resolveUsageAuth`,
`fetchUsageSnapshot`, and `isCacheTtlEligible` because it owns Claude 4.6
forward-compat, provider-family hints, usage endpoint integration, and
prompt-cache eligibility.
`fetchUsageSnapshot`, `isCacheTtlEligible`, `resolveDefaultThinkingLevel`,
and `isModernModelRef` because it owns Claude 4.6 forward-compat,
provider-family hints, usage endpoint integration, prompt-cache
eligibility, and Claude default/adaptive thinking policy.
- OpenAI uses `resolveDynamicModel`, `normalizeResolvedModel`, and
`capabilities` plus `buildMissingAuthMessage`, `suppressBuiltInModel`, and
`augmentModelCatalog` because it owns GPT-5.4 forward-compat, the direct
OpenAI `openai-completions` -> `openai-responses` normalization, Codex-aware
auth hints, Spark suppression, and synthetic OpenAI list rows.
`capabilities` plus `buildMissingAuthMessage`, `suppressBuiltInModel`,
`augmentModelCatalog`, `supportsXHighThinking`, and `isModernModelRef`
because it owns GPT-5.4 forward-compat, the direct OpenAI
`openai-completions` -> `openai-responses` normalization, Codex-aware auth
hints, Spark suppression, synthetic OpenAI list rows, and GPT-5 thinking /
live-model policy.
- OpenRouter uses `catalog` plus `resolveDynamicModel` and
`prepareDynamicModel` because the provider is pass-through and may expose new
model ids before OpenClaw's static catalog updates.
@@ -389,9 +409,10 @@ api.registerProvider({
still runs on core OpenAI transports but owns its transport/base URL
normalization, default transport choice, synthetic Codex catalog rows, and
ChatGPT usage endpoint integration.
- Gemini CLI OAuth uses `resolveDynamicModel`, `resolveUsageAuth`, and
`fetchUsageSnapshot` because it owns Gemini 3.1 forward-compat fallback plus
the token parsing and quota endpoint wiring needed by `/usage`.
- Google AI Studio and Gemini CLI OAuth use `resolveDynamicModel` and
`isModernModelRef` because they own Gemini 3.1 forward-compat fallback and
modern-model matching; Gemini CLI OAuth also uses `resolveUsageAuth` and
`fetchUsageSnapshot` for token parsing and quota endpoint wiring.
- OpenRouter uses `capabilities`, `wrapStreamFn`, and `isCacheTtlEligible`
to keep provider-specific request headers, routing metadata, reasoning
patches, and prompt-cache policy out of core.
@@ -402,9 +423,10 @@ api.registerProvider({
reasoning payload normalization, Gemini transcript hints, and Anthropic
cache-TTL gating.
- Z.AI uses `resolveDynamicModel`, `prepareExtraParams`, `wrapStreamFn`,
`isCacheTtlEligible`, `resolveUsageAuth`, and `fetchUsageSnapshot` because it
owns GLM-5 fallback, `tool_stream` defaults, and both usage auth + quota
fetching.
`isCacheTtlEligible`, `isBinaryThinking`, `isModernModelRef`,
`resolveUsageAuth`, and `fetchUsageSnapshot` because it owns GLM-5 fallback,
`tool_stream` defaults, binary thinking UX, modern-model matching, and both
usage auth + quota fetching.
- Mistral, OpenCode Zen, and OpenCode Go use `capabilities` only to keep
transcript/tooling quirks out of core.
- Catalog-only bundled providers such as `byteplus`, `cloudflare-ai-gateway`,

View File

@@ -1,11 +1,14 @@
import {
emptyPluginConfigSchema,
type OpenClawPluginApi,
type ProviderAuthContext,
type ProviderResolveDynamicModelContext,
type ProviderRuntimeModel,
} from "openclaw/plugin-sdk/core";
import { normalizeModelCompat } from "../../src/agents/model-compat.js";
import { buildTokenProfileId, validateAnthropicSetupToken } from "../../src/commands/auth-token.js";
import { fetchClaudeUsage } from "../../src/infra/provider-usage.fetch.js";
import type { ProviderAuthResult } from "../../src/plugins/types.js";
const PROVIDER_ID = "anthropic";
const ANTHROPIC_OPUS_46_MODEL_ID = "claude-opus-4-6";
@@ -14,6 +17,13 @@ const ANTHROPIC_OPUS_TEMPLATE_MODEL_IDS = ["claude-opus-4-5", "claude-opus-4.5"]
const ANTHROPIC_SONNET_46_MODEL_ID = "claude-sonnet-4-6";
const ANTHROPIC_SONNET_46_DOT_MODEL_ID = "claude-sonnet-4.6";
const ANTHROPIC_SONNET_TEMPLATE_MODEL_IDS = ["claude-sonnet-4-5", "claude-sonnet-4.5"] as const;
const ANTHROPIC_MODERN_MODEL_PREFIXES = [
"claude-opus-4-6",
"claude-sonnet-4-6",
"claude-opus-4-5",
"claude-sonnet-4-5",
"claude-haiku-4-5",
] as const;
function cloneFirstTemplateModel(params: {
modelId: string;
@@ -96,6 +106,51 @@ function resolveAnthropicForwardCompatModel(
);
}
function matchesAnthropicModernModel(modelId: string): boolean {
const lower = modelId.trim().toLowerCase();
return ANTHROPIC_MODERN_MODEL_PREFIXES.some((prefix) => lower.startsWith(prefix));
}
async function runAnthropicSetupToken(ctx: ProviderAuthContext): Promise<ProviderAuthResult> {
await ctx.prompter.note(
["Run `claude setup-token` in your terminal.", "Then paste the generated token below."].join(
"\n",
),
"Anthropic setup-token",
);
const tokenRaw = await ctx.prompter.text({
message: "Paste Anthropic setup-token",
validate: (value) => validateAnthropicSetupToken(String(value ?? "")),
});
const token = String(tokenRaw ?? "").trim();
const tokenError = validateAnthropicSetupToken(token);
if (tokenError) {
throw new Error(tokenError);
}
const profileNameRaw = await ctx.prompter.text({
message: "Token name (blank = default)",
placeholder: "default",
});
return {
profiles: [
{
profileId: buildTokenProfileId({
provider: PROVIDER_ID,
name: String(profileNameRaw ?? ""),
}),
credential: {
type: "token",
provider: PROVIDER_ID,
token,
},
},
],
};
}
const anthropicPlugin = {
id: PROVIDER_ID,
name: "Anthropic Provider",
@@ -107,12 +162,29 @@ const anthropicPlugin = {
label: "Anthropic",
docsPath: "/providers/models",
envVars: ["ANTHROPIC_OAUTH_TOKEN", "ANTHROPIC_API_KEY"],
auth: [],
auth: [
{
id: "setup-token",
label: "setup-token (claude)",
hint: "Paste a setup-token from `claude setup-token`",
kind: "token",
run: async (ctx: ProviderAuthContext) => await runAnthropicSetupToken(ctx),
},
],
resolveDynamicModel: (ctx) => resolveAnthropicForwardCompatModel(ctx),
capabilities: {
providerFamily: "anthropic",
dropThinkingBlockModelHints: ["claude"],
},
isModernModelRef: ({ modelId }) => matchesAnthropicModernModel(modelId),
resolveDefaultThinkingLevel: ({ modelId }) =>
matchesAnthropicModernModel(modelId) &&
(modelId.toLowerCase().startsWith(ANTHROPIC_OPUS_46_MODEL_ID) ||
modelId.toLowerCase().startsWith(ANTHROPIC_OPUS_46_DOT_MODEL_ID) ||
modelId.toLowerCase().startsWith(ANTHROPIC_SONNET_46_MODEL_ID) ||
modelId.toLowerCase().startsWith(ANTHROPIC_SONNET_46_DOT_MODEL_ID))
? "adaptive"
: undefined,
resolveUsageAuth: async (ctx) => await ctx.resolveOAuthToken(),
fetchUsageSnapshot: async (ctx) =>
await fetchClaudeUsage(ctx.token, ctx.timeoutMs, ctx.fetchFn),

View File

@@ -15,6 +15,7 @@ const PROVIDER_ID = "github-copilot";
const COPILOT_ENV_VARS = ["COPILOT_GITHUB_TOKEN", "GH_TOKEN", "GITHUB_TOKEN"];
const CODEX_GPT_53_MODEL_ID = "gpt-5.3-codex";
const CODEX_TEMPLATE_MODEL_IDS = ["gpt-5.2-codex"] as const;
const COPILOT_XHIGH_MODEL_IDS = ["gpt-5.2", "gpt-5.2-codex"] as const;
function resolveFirstGithubToken(params: { agentDir?: string; env: NodeJS.ProcessEnv }): {
githubToken: string;
@@ -117,6 +118,8 @@ const githubCopilotPlugin = {
capabilities: {
dropThinkingBlockModelHints: ["claude"],
},
supportsXHighThinking: ({ modelId }) =>
COPILOT_XHIGH_MODEL_IDS.includes(modelId.trim().toLowerCase() as never),
prepareRuntimeAuth: async (ctx) => {
const token = await resolveCopilotApiToken({
githubToken: ctx.apiKey,

View File

@@ -7,8 +7,16 @@ import {
} from "../../src/test-utils/provider-usage-fetch.js";
import googlePlugin from "./index.js";
function findProvider(providers: ProviderPlugin[], id: string): ProviderPlugin {
const provider = providers.find((candidate) => candidate.id === id);
if (!provider) {
throw new Error(`provider ${id} missing`);
}
return provider;
}
function registerGooglePlugin(): {
provider: ProviderPlugin;
providers: ProviderPlugin[];
webSearchProvider: {
id: string;
envVars: string[];
@@ -18,13 +26,12 @@ function registerGooglePlugin(): {
} {
const captured = createCapturedPluginRegistration();
googlePlugin.register(captured.api);
const provider = captured.providers[0];
if (!provider) {
if (captured.providers.length === 0) {
throw new Error("provider registration missing");
}
const webSearchProvider = captured.webSearchProviders[0] ?? null;
return {
provider,
providers: captured.providers,
webSearchProviderRegistered: webSearchProvider !== null,
webSearchProvider:
webSearchProvider === null
@@ -38,10 +45,13 @@ function registerGooglePlugin(): {
}
describe("google plugin", () => {
it("registers both Gemini CLI auth and Gemini web search", () => {
it("registers Google direct, Gemini CLI auth, and Gemini web search", () => {
const result = registerGooglePlugin();
expect(result.provider.id).toBe("google-gemini-cli");
expect(result.providers.map((provider) => provider.id)).toEqual([
"google",
"google-gemini-cli",
]);
expect(result.webSearchProviderRegistered).toBe(true);
expect(result.webSearchProvider).toMatchObject({
id: "gemini",
@@ -50,8 +60,43 @@ describe("google plugin", () => {
});
});
it("owns gemini 3.1 forward-compat resolution", () => {
const { provider } = registerGooglePlugin();
it("owns google direct gemini 3.1 forward-compat resolution", () => {
const { providers } = registerGooglePlugin();
const provider = findProvider(providers, "google");
const model = provider.resolveDynamicModel?.({
provider: "google",
modelId: "gemini-3.1-pro-preview",
modelRegistry: {
find: (_provider: string, id: string) =>
id === "gemini-3-pro-preview"
? {
id,
name: id,
api: "google-generative-ai",
provider: "google",
baseUrl: "https://generativelanguage.googleapis.com",
reasoning: false,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 1_048_576,
maxTokens: 65_536,
}
: null,
} as never,
});
expect(model).toMatchObject({
id: "gemini-3.1-pro-preview",
provider: "google",
api: "google-generative-ai",
baseUrl: "https://generativelanguage.googleapis.com",
reasoning: true,
});
});
it("owns gemini cli 3.1 forward-compat resolution", () => {
const { providers } = registerGooglePlugin();
const provider = findProvider(providers, "google-gemini-cli");
const model = provider.resolveDynamicModel?.({
provider: "google-gemini-cli",
modelId: "gemini-3.1-pro-preview",
@@ -82,7 +127,8 @@ describe("google plugin", () => {
});
it("owns usage-token parsing", async () => {
const { provider } = registerGooglePlugin();
const { providers } = registerGooglePlugin();
const provider = findProvider(providers, "google-gemini-cli");
await expect(
provider.resolveUsageAuth?.({
config: {} as never,
@@ -101,7 +147,8 @@ describe("google plugin", () => {
});
it("owns usage snapshot fetching", async () => {
const { provider } = registerGooglePlugin();
const { providers } = registerGooglePlugin();
const provider = findProvider(providers, "google-gemini-cli");
const mockFetch = createProviderUsageFetch(async (url) => {
if (url.includes("cloudcode-pa.googleapis.com/v1internal:retrieveUserQuota")) {
return makeResponse(200, {

View File

@@ -1,22 +1,16 @@
import { normalizeModelCompat } from "../../src/agents/model-compat.js";
import { fetchGeminiUsage } from "../../src/infra/provider-usage.fetch.js";
import { buildOauthProviderAuthResult } from "../../src/plugin-sdk/provider-auth-result.js";
import type {
OpenClawPluginApi,
ProviderAuthContext,
ProviderFetchUsageSnapshotContext,
ProviderResolveDynamicModelContext,
ProviderRuntimeModel,
} from "../../src/plugins/types.js";
import { loginGeminiCliOAuth } from "./oauth.js";
import { isModernGoogleModel, resolveGoogle31ForwardCompatModel } from "./provider-models.js";
const PROVIDER_ID = "google-gemini-cli";
const PROVIDER_LABEL = "Gemini CLI OAuth";
const DEFAULT_MODEL = "google-gemini-cli/gemini-3.1-pro-preview";
const GEMINI_3_1_PRO_PREFIX = "gemini-3.1-pro";
const GEMINI_3_1_FLASH_PREFIX = "gemini-3.1-flash";
const GEMINI_3_1_PRO_TEMPLATE_IDS = ["gemini-3-pro-preview"] as const;
const GEMINI_3_1_FLASH_TEMPLATE_IDS = ["gemini-3-flash-preview"] as const;
const ENV_VARS = [
"OPENCLAW_GEMINI_OAUTH_CLIENT_ID",
"OPENCLAW_GEMINI_OAUTH_CLIENT_SECRET",
@@ -24,30 +18,6 @@ const ENV_VARS = [
"GEMINI_CLI_OAUTH_CLIENT_SECRET",
];
function cloneFirstTemplateModel(params: {
modelId: string;
templateIds: readonly string[];
ctx: ProviderResolveDynamicModelContext;
}): ProviderRuntimeModel | undefined {
const trimmedModelId = params.modelId.trim();
for (const templateId of [...new Set(params.templateIds)].filter(Boolean)) {
const template = params.ctx.modelRegistry.find(
PROVIDER_ID,
templateId,
) as ProviderRuntimeModel | null;
if (!template) {
continue;
}
return normalizeModelCompat({
...template,
id: trimmedModelId,
name: trimmedModelId,
reasoning: true,
} as ProviderRuntimeModel);
}
return undefined;
}
function parseGoogleUsageToken(apiKey: string): string {
try {
const parsed = JSON.parse(apiKey) as { token?: unknown };
@@ -64,28 +34,6 @@ async function fetchGeminiCliUsage(ctx: ProviderFetchUsageSnapshotContext) {
return await fetchGeminiUsage(ctx.token, ctx.timeoutMs, ctx.fetchFn, PROVIDER_ID);
}
function resolveGeminiCliForwardCompatModel(
ctx: ProviderResolveDynamicModelContext,
): ProviderRuntimeModel | undefined {
const trimmed = ctx.modelId.trim();
const lower = trimmed.toLowerCase();
let templateIds: readonly string[];
if (lower.startsWith(GEMINI_3_1_PRO_PREFIX)) {
templateIds = GEMINI_3_1_PRO_TEMPLATE_IDS;
} else if (lower.startsWith(GEMINI_3_1_FLASH_PREFIX)) {
templateIds = GEMINI_3_1_FLASH_TEMPLATE_IDS;
} else {
return undefined;
}
return cloneFirstTemplateModel({
modelId: trimmed,
templateIds,
ctx,
});
}
export function registerGoogleGeminiCliProvider(api: OpenClawPluginApi) {
api.registerProvider({
id: PROVIDER_ID,
@@ -133,7 +81,9 @@ export function registerGoogleGeminiCliProvider(api: OpenClawPluginApi) {
},
},
],
resolveDynamicModel: (ctx) => resolveGeminiCliForwardCompatModel(ctx),
resolveDynamicModel: (ctx) =>
resolveGoogle31ForwardCompatModel({ providerId: PROVIDER_ID, ctx }),
isModernModelRef: ({ modelId }) => isModernGoogleModel(modelId),
resolveUsageAuth: async (ctx) => {
const auth = await ctx.resolveOAuthToken();
if (!auth) {

View File

@@ -6,6 +6,7 @@ import {
import { emptyPluginConfigSchema } from "../../src/plugins/config-schema.js";
import type { OpenClawPluginApi } from "../../src/plugins/types.js";
import { registerGoogleGeminiCliProvider } from "./gemini-cli-provider.js";
import { isModernGoogleModel, resolveGoogle31ForwardCompatModel } from "./provider-models.js";
const googlePlugin = {
id: "google",
@@ -13,6 +14,16 @@ const googlePlugin = {
description: "Bundled Google plugin",
configSchema: emptyPluginConfigSchema(),
register(api: OpenClawPluginApi) {
api.registerProvider({
id: "google",
label: "Google AI Studio",
docsPath: "/providers/models",
envVars: ["GEMINI_API_KEY", "GOOGLE_API_KEY"],
auth: [],
resolveDynamicModel: (ctx) =>
resolveGoogle31ForwardCompatModel({ providerId: "google", ctx }),
isModernModelRef: ({ modelId }) => isModernGoogleModel(modelId),
});
registerGoogleGeminiCliProvider(api);
api.registerWebSearchProvider(
createPluginBackedWebSearchProvider({

View File

@@ -1,6 +1,9 @@
{
"id": "google",
"providers": ["google-gemini-cli"],
"providers": ["google", "google-gemini-cli"],
"providerAuthEnvVars": {
"google": ["GEMINI_API_KEY", "GOOGLE_API_KEY"]
},
"configSchema": {
"type": "object",
"additionalProperties": false,

View File

@@ -0,0 +1,63 @@
import { normalizeModelCompat } from "../../src/agents/model-compat.js";
import type {
ProviderResolveDynamicModelContext,
ProviderRuntimeModel,
} from "../../src/plugins/types.js";
const GEMINI_3_1_PRO_PREFIX = "gemini-3.1-pro";
const GEMINI_3_1_FLASH_PREFIX = "gemini-3.1-flash";
const GEMINI_3_1_PRO_TEMPLATE_IDS = ["gemini-3-pro-preview"] as const;
const GEMINI_3_1_FLASH_TEMPLATE_IDS = ["gemini-3-flash-preview"] as const;
function cloneFirstTemplateModel(params: {
providerId: string;
modelId: string;
templateIds: readonly string[];
ctx: ProviderResolveDynamicModelContext;
}): ProviderRuntimeModel | undefined {
const trimmedModelId = params.modelId.trim();
for (const templateId of [...new Set(params.templateIds)].filter(Boolean)) {
const template = params.ctx.modelRegistry.find(
params.providerId,
templateId,
) as ProviderRuntimeModel | null;
if (!template) {
continue;
}
return normalizeModelCompat({
...template,
id: trimmedModelId,
name: trimmedModelId,
reasoning: true,
} as ProviderRuntimeModel);
}
return undefined;
}
export function resolveGoogle31ForwardCompatModel(params: {
providerId: string;
ctx: ProviderResolveDynamicModelContext;
}): ProviderRuntimeModel | undefined {
const trimmed = params.ctx.modelId.trim();
const lower = trimmed.toLowerCase();
let templateIds: readonly string[];
if (lower.startsWith(GEMINI_3_1_PRO_PREFIX)) {
templateIds = GEMINI_3_1_PRO_TEMPLATE_IDS;
} else if (lower.startsWith(GEMINI_3_1_FLASH_PREFIX)) {
templateIds = GEMINI_3_1_FLASH_TEMPLATE_IDS;
} else {
return undefined;
}
return cloneFirstTemplateModel({
providerId: params.providerId,
modelId: trimmed,
templateIds,
ctx: params.ctx,
});
}
export function isModernGoogleModel(modelId: string): boolean {
return modelId.trim().toLowerCase().startsWith("gemini-3");
}

View File

@@ -30,6 +30,10 @@ function modelRef(modelId: string): string {
return `${PORTAL_PROVIDER_ID}/${modelId}`;
}
function isModernMiniMaxModel(modelId: string): boolean {
return modelId.trim().toLowerCase().startsWith("minimax-m2.5");
}
function buildPortalProviderCatalog(params: { baseUrl: string; apiKey: string }) {
return {
...buildMinimaxPortalProvider(),
@@ -167,6 +171,7 @@ const minimaxPlugin = {
});
return apiKey ? { token: apiKey } : null;
},
isModernModelRef: ({ modelId }) => isModernMiniMaxModel(modelId),
fetchUsageSnapshot: async (ctx) =>
await fetchMinimaxUsage(ctx.token, ctx.timeoutMs, ctx.fetchFn),
});
@@ -195,6 +200,7 @@ const minimaxPlugin = {
run: createOAuthHandler("cn"),
},
],
isModernModelRef: ({ modelId }) => isModernMiniMaxModel(modelId),
});
},
};

View File

@@ -1,4 +1,5 @@
import type {
ProviderAuthContext,
ProviderResolveDynamicModelContext,
ProviderRuntimeModel,
} from "openclaw/plugin-sdk/core";
@@ -8,9 +9,16 @@ import { DEFAULT_CONTEXT_TOKENS } from "../../src/agents/defaults.js";
import { normalizeModelCompat } from "../../src/agents/model-compat.js";
import { normalizeProviderId } from "../../src/agents/model-selection.js";
import { buildOpenAICodexProvider } from "../../src/agents/models-config.providers.static.js";
import { loginOpenAICodexOAuth } from "../../src/commands/openai-codex-oauth.js";
import { fetchCodexUsage } from "../../src/infra/provider-usage.fetch.js";
import { buildOauthProviderAuthResult } from "../../src/plugin-sdk/provider-auth-result.js";
import type { ProviderPlugin } from "../../src/plugins/types.js";
import { cloneFirstTemplateModel, findCatalogTemplate, isOpenAIApiBaseUrl } from "./shared.js";
import {
cloneFirstTemplateModel,
findCatalogTemplate,
isOpenAIApiBaseUrl,
matchesExactOrPrefix,
} from "./shared.js";
const PROVIDER_ID = "openai-codex";
const OPENAI_CODEX_BASE_URL = "https://chatgpt.com/backend-api";
@@ -23,6 +31,24 @@ const OPENAI_CODEX_GPT_53_SPARK_MODEL_ID = "gpt-5.3-codex-spark";
const OPENAI_CODEX_GPT_53_SPARK_CONTEXT_TOKENS = 128_000;
const OPENAI_CODEX_GPT_53_SPARK_MAX_TOKENS = 128_000;
const OPENAI_CODEX_TEMPLATE_MODEL_IDS = ["gpt-5.2-codex"] as const;
const OPENAI_CODEX_DEFAULT_MODEL = `${PROVIDER_ID}/${OPENAI_CODEX_GPT_54_MODEL_ID}`;
const OPENAI_CODEX_XHIGH_MODEL_IDS = [
OPENAI_CODEX_GPT_54_MODEL_ID,
OPENAI_CODEX_GPT_53_MODEL_ID,
OPENAI_CODEX_GPT_53_SPARK_MODEL_ID,
"gpt-5.2-codex",
"gpt-5.1-codex",
] as const;
const OPENAI_CODEX_MODERN_MODEL_IDS = [
OPENAI_CODEX_GPT_54_MODEL_ID,
"gpt-5.2",
"gpt-5.2-codex",
OPENAI_CODEX_GPT_53_MODEL_ID,
OPENAI_CODEX_GPT_53_SPARK_MODEL_ID,
"gpt-5.1-codex",
"gpt-5.1-codex-mini",
"gpt-5.1-codex-max",
] as const;
function isOpenAICodexBaseUrl(baseUrl?: string): boolean {
const trimmed = baseUrl?.trim();
@@ -106,12 +132,42 @@ function resolveCodexForwardCompatModel(
);
}
async function runOpenAICodexOAuth(ctx: ProviderAuthContext) {
const creds = await loginOpenAICodexOAuth({
prompter: ctx.prompter,
runtime: ctx.runtime,
isRemote: ctx.isRemote,
openUrl: ctx.openUrl,
localBrowserMessage: "Complete sign-in in browser…",
});
if (!creds) {
throw new Error("OpenAI Codex OAuth did not return credentials.");
}
return buildOauthProviderAuthResult({
providerId: PROVIDER_ID,
defaultModel: OPENAI_CODEX_DEFAULT_MODEL,
access: creds.access,
refresh: creds.refresh,
expires: creds.expires,
email: typeof creds.email === "string" ? creds.email : undefined,
});
}
export function buildOpenAICodexProviderPlugin(): ProviderPlugin {
return {
id: PROVIDER_ID,
label: "OpenAI Codex",
docsPath: "/providers/models",
auth: [],
auth: [
{
id: "oauth",
label: "ChatGPT OAuth",
hint: "Browser sign-in",
kind: "oauth",
run: async (ctx) => await runOpenAICodexOAuth(ctx),
},
],
catalog: {
order: "profile",
run: async (ctx) => {
@@ -130,6 +186,9 @@ export function buildOpenAICodexProviderPlugin(): ProviderPlugin {
capabilities: {
providerFamily: "openai",
},
supportsXHighThinking: ({ modelId }) =>
matchesExactOrPrefix(modelId, OPENAI_CODEX_XHIGH_MODEL_IDS),
isModernModelRef: ({ modelId }) => matchesExactOrPrefix(modelId, OPENAI_CODEX_MODERN_MODEL_IDS),
prepareExtraParams: (ctx) => {
const transport = ctx.extraParams?.transport;
if (transport === "auto" || transport === "sse" || transport === "websocket") {

View File

@@ -5,7 +5,12 @@ import {
import { normalizeModelCompat } from "../../src/agents/model-compat.js";
import { normalizeProviderId } from "../../src/agents/model-selection.js";
import type { ProviderPlugin } from "../../src/plugins/types.js";
import { cloneFirstTemplateModel, findCatalogTemplate, isOpenAIApiBaseUrl } from "./shared.js";
import {
cloneFirstTemplateModel,
findCatalogTemplate,
isOpenAIApiBaseUrl,
matchesExactOrPrefix,
} from "./shared.js";
const PROVIDER_ID = "openai";
const OPENAI_GPT_54_MODEL_ID = "gpt-5.4";
@@ -14,6 +19,8 @@ const OPENAI_GPT_54_CONTEXT_TOKENS = 1_050_000;
const OPENAI_GPT_54_MAX_TOKENS = 128_000;
const OPENAI_GPT_54_TEMPLATE_MODEL_IDS = ["gpt-5.2"] as const;
const OPENAI_GPT_54_PRO_TEMPLATE_MODEL_IDS = ["gpt-5.2-pro", "gpt-5.2"] as const;
const OPENAI_XHIGH_MODEL_IDS = ["gpt-5.4", "gpt-5.4-pro", "gpt-5.2"] as const;
const OPENAI_MODERN_MODEL_IDS = ["gpt-5.4", "gpt-5.4-pro", "gpt-5.2", "gpt-5.0"] as const;
const OPENAI_DIRECT_SPARK_MODEL_ID = "gpt-5.3-codex-spark";
const SUPPRESSED_SPARK_PROVIDERS = new Set(["openai", "azure-openai-responses"]);
@@ -93,6 +100,8 @@ export function buildOpenAIProvider(): ProviderPlugin {
capabilities: {
providerFamily: "openai",
},
supportsXHighThinking: ({ modelId }) => matchesExactOrPrefix(modelId, OPENAI_XHIGH_MODEL_IDS),
isModernModelRef: ({ modelId }) => matchesExactOrPrefix(modelId, OPENAI_MODERN_MODEL_IDS),
buildMissingAuthMessage: (ctx) => {
if (ctx.provider !== PROVIDER_ID || ctx.listProfileIds("openai-codex").length === 0) {
return undefined;

View File

@@ -6,6 +6,14 @@ import type {
export const OPENAI_API_BASE_URL = "https://api.openai.com/v1";
export function matchesExactOrPrefix(id: string, values: readonly string[]): boolean {
const normalizedId = id.trim().toLowerCase();
return values.some((value) => {
const normalizedValue = value.trim().toLowerCase();
return normalizedId === normalizedValue || normalizedId.startsWith(normalizedValue);
});
}
export function isOpenAIApiBaseUrl(baseUrl?: string): boolean {
const trimmed = baseUrl?.trim();
if (!trimmed) {

View File

@@ -19,6 +19,7 @@ const opencodeGoPlugin = {
geminiThoughtSignatureSanitization: true,
geminiThoughtSignatureModelHints: ["gemini"],
},
isModernModelRef: () => true,
});
},
};

View File

@@ -1,6 +1,15 @@
import { emptyPluginConfigSchema, type OpenClawPluginApi } from "openclaw/plugin-sdk/core";
const PROVIDER_ID = "opencode";
const MINIMAX_PREFIX = "minimax-m2.5";
function isModernOpencodeModel(modelId: string): boolean {
const lower = modelId.trim().toLowerCase();
if (lower.endsWith("-free") || lower === "alpha-glm-4.7") {
return false;
}
return !lower.startsWith(MINIMAX_PREFIX);
}
const opencodePlugin = {
id: PROVIDER_ID,
@@ -19,6 +28,7 @@ const opencodePlugin = {
geminiThoughtSignatureSanitization: true,
geminiThoughtSignatureModelHints: ["gemini"],
},
isModernModelRef: ({ modelId }) => isModernOpencodeModel(modelId),
});
},
};

View File

@@ -110,6 +110,7 @@ const openRouterPlugin = {
geminiThoughtSignatureSanitization: true,
geminiThoughtSignatureModelHints: ["gemini"],
},
isModernModelRef: () => true,
wrapStreamFn: (ctx) => {
let streamFn = ctx.streamFn;
const providerRouting =

View File

@@ -98,6 +98,16 @@ const zaiPlugin = {
},
wrapStreamFn: (ctx) =>
createZaiToolStreamWrapper(ctx.streamFn, ctx.extraParams?.tool_stream !== false),
isBinaryThinking: () => true,
isModernModelRef: ({ modelId }) => {
const lower = modelId.trim().toLowerCase();
return (
lower.startsWith("glm-5") ||
lower.startsWith("glm-4.7") ||
lower.startsWith("glm-4.7-flash") ||
lower.startsWith("glm-4.7-flashx")
);
},
resolveUsageAuth: async (ctx) => {
const apiKey = ctx.resolveApiKeyFromConfigAndStore({
providerIds: [PROVIDER_ID, "z-ai"],

View File

@@ -1,3 +1,5 @@
import { resolveProviderModernModelRef } from "../plugins/provider-runtime.js";
export type ModelRef = {
provider?: string | null;
id?: string | null;
@@ -41,6 +43,19 @@ export function isModernModelRef(ref: ModelRef): boolean {
return false;
}
const pluginDecision = resolveProviderModernModelRef({
provider,
context: {
provider,
modelId: id,
},
});
if (typeof pluginDecision === "boolean") {
return pluginDecision;
}
// Compatibility fallback for core-owned providers and tests that disable
// bundled provider runtime hooks.
if (provider === "anthropic") {
return matchesPrefix(id, ANTHROPIC_PREFIXES);
}

View File

@@ -1,9 +1,16 @@
import type { Api, Model } from "@mariozechner/pi-ai";
import type { ModelRegistry } from "@mariozechner/pi-coding-agent";
import { describe, expect, it } from "vitest";
import { beforeEach, describe, expect, it, vi } from "vitest";
const providerRuntimeMocks = vi.hoisted(() => ({
resolveProviderModernModelRef: vi.fn(),
}));
vi.mock("../plugins/provider-runtime.js", () => ({
resolveProviderModernModelRef: providerRuntimeMocks.resolveProviderModernModelRef,
}));
import { isModernModelRef } from "./live-model-filter.js";
import { normalizeModelCompat } from "./model-compat.js";
import { resolveForwardCompatModel } from "./model-forward-compat.js";
const baseModel = (): Model<Api> =>
({
@@ -32,43 +39,6 @@ function supportsStrictMode(model: Model<Api>): boolean | undefined {
return (model.compat as { supportsStrictMode?: boolean } | undefined)?.supportsStrictMode;
}
function createTemplateModel(provider: string, id: string): Model<Api> {
return {
id,
name: id,
provider,
api: "anthropic-messages",
input: ["text"],
reasoning: true,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 200_000,
maxTokens: 8_192,
} as Model<Api>;
}
function createOpenAITemplateModel(id: string): Model<Api> {
return {
id,
name: id,
provider: "openai",
api: "openai-responses",
baseUrl: "https://api.openai.com/v1",
input: ["text", "image"],
reasoning: true,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 400_000,
maxTokens: 32_768,
} as Model<Api>;
}
function createRegistry(models: Record<string, Model<Api>>): ModelRegistry {
return {
find(provider: string, modelId: string) {
return models[`${provider}/${modelId}`] ?? null;
},
} as ModelRegistry;
}
function expectSupportsDeveloperRoleForcedOff(overrides?: Partial<Model<Api>>): void {
const model = { ...baseModel(), ...overrides };
delete (model as { compat?: unknown }).compat;
@@ -90,14 +60,10 @@ function expectSupportsStrictModeForcedOff(overrides?: Partial<Model<Api>>): voi
expect(supportsStrictMode(normalized)).toBe(false);
}
function expectResolvedForwardCompat(
model: Model<Api> | undefined,
expected: { provider: string; id: string },
): void {
expect(model?.id).toBe(expected.id);
expect(model?.name).toBe(expected.id);
expect(model?.provider).toBe(expected.provider);
}
beforeEach(() => {
providerRuntimeMocks.resolveProviderModernModelRef.mockReset();
providerRuntimeMocks.resolveProviderModernModelRef.mockReturnValue(undefined);
});
describe("normalizeModelCompat — Anthropic baseUrl", () => {
const anthropicBase = (): Model<Api> =>
@@ -373,6 +339,12 @@ describe("normalizeModelCompat", () => {
});
describe("isModernModelRef", () => {
it("uses provider runtime hooks before fallback heuristics", () => {
providerRuntimeMocks.resolveProviderModernModelRef.mockReturnValue(false);
expect(isModernModelRef({ provider: "openrouter", id: "claude-opus-4-6" })).toBe(false);
});
it("includes OpenAI gpt-5.4 variants in modern selection", () => {
expect(isModernModelRef({ provider: "openai", id: "gpt-5.4" })).toBe(true);
expect(isModernModelRef({ provider: "openai", id: "gpt-5.4-pro" })).toBe(true);
@@ -395,71 +367,3 @@ describe("isModernModelRef", () => {
expect(isModernModelRef({ provider: "opencode-go", id: "minimax-m2.5" })).toBe(true);
});
});
describe("resolveForwardCompatModel", () => {
it("resolves openai gpt-5.4 via gpt-5.2 template", () => {
const registry = createRegistry({
"openai/gpt-5.2": createOpenAITemplateModel("gpt-5.2"),
});
const model = resolveForwardCompatModel("openai", "gpt-5.4", registry);
expectResolvedForwardCompat(model, { provider: "openai", id: "gpt-5.4" });
expect(model?.api).toBe("openai-responses");
expect(model?.baseUrl).toBe("https://api.openai.com/v1");
expect(model?.contextWindow).toBe(1_050_000);
expect(model?.maxTokens).toBe(128_000);
});
it("resolves openai gpt-5.4 without templates using normalized fallback defaults", () => {
const registry = createRegistry({});
const model = resolveForwardCompatModel("openai", "gpt-5.4", registry);
expectResolvedForwardCompat(model, { provider: "openai", id: "gpt-5.4" });
expect(model?.api).toBe("openai-responses");
expect(model?.baseUrl).toBe("https://api.openai.com/v1");
expect(model?.input).toEqual(["text", "image"]);
expect(model?.reasoning).toBe(true);
expect(model?.contextWindow).toBe(1_050_000);
expect(model?.maxTokens).toBe(128_000);
expect(model?.cost).toEqual({ input: 0, output: 0, cacheRead: 0, cacheWrite: 0 });
});
it("resolves openai gpt-5.4-pro via template fallback", () => {
const registry = createRegistry({
"openai/gpt-5.2": createOpenAITemplateModel("gpt-5.2"),
});
const model = resolveForwardCompatModel("openai", "gpt-5.4-pro", registry);
expectResolvedForwardCompat(model, { provider: "openai", id: "gpt-5.4-pro" });
expect(model?.api).toBe("openai-responses");
expect(model?.baseUrl).toBe("https://api.openai.com/v1");
expect(model?.contextWindow).toBe(1_050_000);
expect(model?.maxTokens).toBe(128_000);
});
it("resolves anthropic opus 4.6 via 4.5 template", () => {
const registry = createRegistry({
"anthropic/claude-opus-4-5": createTemplateModel("anthropic", "claude-opus-4-5"),
});
const model = resolveForwardCompatModel("anthropic", "claude-opus-4-6", registry);
expectResolvedForwardCompat(model, { provider: "anthropic", id: "claude-opus-4-6" });
});
it("resolves anthropic sonnet 4.6 dot variant with suffix", () => {
const registry = createRegistry({
"anthropic/claude-sonnet-4.5-20260219": createTemplateModel(
"anthropic",
"claude-sonnet-4.5-20260219",
),
});
const model = resolveForwardCompatModel("anthropic", "claude-sonnet-4.6-20260219", registry);
expectResolvedForwardCompat(model, { provider: "anthropic", id: "claude-sonnet-4.6-20260219" });
});
it("does not resolve anthropic 4.6 fallback for other providers", () => {
const registry = createRegistry({
"anthropic/claude-opus-4-5": createTemplateModel("anthropic", "claude-opus-4-5"),
});
const model = resolveForwardCompatModel("openai", "claude-opus-4-6", registry);
expect(model).toBeUndefined();
});
});

View File

@@ -1,123 +0,0 @@
import type { Api, Model } from "@mariozechner/pi-ai";
import type { ModelRegistry } from "@mariozechner/pi-coding-agent";
import { DEFAULT_CONTEXT_TOKENS } from "./defaults.js";
import { normalizeModelCompat } from "./model-compat.js";
import { normalizeProviderId } from "./model-selection.js";
const ZAI_GLM5_MODEL_ID = "glm-5";
const ZAI_GLM5_TEMPLATE_MODEL_IDS = ["glm-4.7"] as const;
// gemini-3.1-pro-preview / gemini-3.1-flash-preview are not present in some pi-ai
// Google catalogs yet. Clone the nearest gemini-3 template so users don't get
// "Unknown model" errors when Google ships new minor-version models before pi-ai
// updates its built-in registry.
const GEMINI_3_1_PRO_PREFIX = "gemini-3.1-pro";
const GEMINI_3_1_FLASH_PREFIX = "gemini-3.1-flash";
const GEMINI_3_1_PRO_TEMPLATE_IDS = ["gemini-3-pro-preview"] as const;
const GEMINI_3_1_FLASH_TEMPLATE_IDS = ["gemini-3-flash-preview"] as const;
function cloneFirstTemplateModel(params: {
normalizedProvider: string;
trimmedModelId: string;
templateIds: string[];
modelRegistry: ModelRegistry;
patch?: Partial<Model<Api>>;
}): Model<Api> | undefined {
const { normalizedProvider, trimmedModelId, templateIds, modelRegistry } = params;
for (const templateId of [...new Set(templateIds)].filter(Boolean)) {
const template = modelRegistry.find(normalizedProvider, templateId) as Model<Api> | null;
if (!template) {
continue;
}
return normalizeModelCompat({
...template,
id: trimmedModelId,
name: trimmedModelId,
...params.patch,
} as Model<Api>);
}
return undefined;
}
function resolveGoogle31ForwardCompatModel(
provider: string,
modelId: string,
modelRegistry: ModelRegistry,
): Model<Api> | undefined {
const normalizedProvider = normalizeProviderId(provider);
if (normalizedProvider !== "google" && normalizedProvider !== "google-gemini-cli") {
return undefined;
}
const trimmed = modelId.trim();
const lower = trimmed.toLowerCase();
let templateIds: readonly string[];
if (lower.startsWith(GEMINI_3_1_PRO_PREFIX)) {
templateIds = GEMINI_3_1_PRO_TEMPLATE_IDS;
} else if (lower.startsWith(GEMINI_3_1_FLASH_PREFIX)) {
templateIds = GEMINI_3_1_FLASH_TEMPLATE_IDS;
} else {
return undefined;
}
return cloneFirstTemplateModel({
normalizedProvider,
trimmedModelId: trimmed,
templateIds: [...templateIds],
modelRegistry,
patch: { reasoning: true },
});
}
// Z.ai's GLM-5 may not be present in pi-ai's built-in model catalog yet.
// When a user configures zai/glm-5 without a models.json entry, clone glm-4.7 as a forward-compat fallback.
function resolveZaiGlm5ForwardCompatModel(
provider: string,
modelId: string,
modelRegistry: ModelRegistry,
): Model<Api> | undefined {
if (normalizeProviderId(provider) !== "zai") {
return undefined;
}
const trimmed = modelId.trim();
const lower = trimmed.toLowerCase();
if (lower !== ZAI_GLM5_MODEL_ID && !lower.startsWith(`${ZAI_GLM5_MODEL_ID}-`)) {
return undefined;
}
for (const templateId of ZAI_GLM5_TEMPLATE_MODEL_IDS) {
const template = modelRegistry.find("zai", templateId) as Model<Api> | null;
if (!template) {
continue;
}
return normalizeModelCompat({
...template,
id: trimmed,
name: trimmed,
reasoning: true,
} as Model<Api>);
}
return normalizeModelCompat({
id: trimmed,
name: trimmed,
api: "openai-completions",
provider: "zai",
reasoning: true,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: DEFAULT_CONTEXT_TOKENS,
maxTokens: DEFAULT_CONTEXT_TOKENS,
} as Model<Api>);
}
export function resolveForwardCompatModel(
provider: string,
modelId: string,
modelRegistry: ModelRegistry,
): Model<Api> | undefined {
return (
resolveZaiGlm5ForwardCompatModel(provider, modelId, modelRegistry) ??
resolveGoogle31ForwardCompatModel(provider, modelId, modelRegistry)
);
}

View File

@@ -13,7 +13,6 @@ import { DEFAULT_CONTEXT_TOKENS } from "../defaults.js";
import { buildModelAliasLines } from "../model-alias-lines.js";
import { isSecretRefHeaderValueMarker } from "../model-auth-markers.js";
import { normalizeModelCompat } from "../model-compat.js";
import { resolveForwardCompatModel } from "../model-forward-compat.js";
import { findNormalizedProviderValue, normalizeProviderId } from "../model-selection.js";
import {
buildSuppressedBuiltInModelError,
@@ -34,8 +33,6 @@ type InlineProviderConfig = {
headers?: unknown;
};
const PLUGIN_FIRST_DYNAMIC_PROVIDERS = new Set(["google-gemini-cli", "zai"]);
function sanitizeModelHeaders(
headers: unknown,
opts?: { stripSecretRefMarkers?: boolean },
@@ -232,53 +229,6 @@ function resolveExplicitModelWithRegistry(params: {
};
}
if (PLUGIN_FIRST_DYNAMIC_PROVIDERS.has(normalizeProviderId(provider))) {
// Give migrated provider plugins first shot at ids that still keep a core
// forward-compat fallback for disabled-plugin/test compatibility.
const pluginDynamicModel = runProviderDynamicModel({
provider,
config: cfg,
context: {
config: cfg,
agentDir,
provider,
modelId,
modelRegistry,
providerConfig,
},
});
if (pluginDynamicModel) {
return {
kind: "resolved",
model: normalizeResolvedModel({
provider,
cfg,
agentDir,
model: pluginDynamicModel,
}),
};
}
}
// Forward-compat fallbacks must be checked BEFORE the generic providerCfg fallback.
// Otherwise, configured providers can default to a generic API and break specific transports.
const forwardCompat = resolveForwardCompatModel(provider, modelId, modelRegistry);
if (forwardCompat) {
return {
kind: "resolved",
model: normalizeResolvedModel({
provider,
cfg,
agentDir,
model: applyConfiguredProviderOverrides({
discoveredModel: forwardCompat,
providerConfig,
modelId,
}),
}),
};
}
return undefined;
}

View File

@@ -1,4 +1,16 @@
import { describe, expect, it } from "vitest";
import { beforeEach, describe, expect, it, vi } from "vitest";
const providerRuntimeMocks = vi.hoisted(() => ({
resolveProviderBinaryThinking: vi.fn(),
resolveProviderDefaultThinkingLevel: vi.fn(),
resolveProviderXHighThinking: vi.fn(),
}));
vi.mock("../plugins/provider-runtime.js", () => ({
resolveProviderBinaryThinking: providerRuntimeMocks.resolveProviderBinaryThinking,
resolveProviderDefaultThinkingLevel: providerRuntimeMocks.resolveProviderDefaultThinkingLevel,
resolveProviderXHighThinking: providerRuntimeMocks.resolveProviderXHighThinking,
}));
import {
listThinkingLevelLabels,
listThinkingLevels,
@@ -7,6 +19,15 @@ import {
resolveThinkingDefaultForModel,
} from "./thinking.js";
beforeEach(() => {
providerRuntimeMocks.resolveProviderBinaryThinking.mockReset();
providerRuntimeMocks.resolveProviderBinaryThinking.mockReturnValue(undefined);
providerRuntimeMocks.resolveProviderDefaultThinkingLevel.mockReset();
providerRuntimeMocks.resolveProviderDefaultThinkingLevel.mockReturnValue(undefined);
providerRuntimeMocks.resolveProviderXHighThinking.mockReset();
providerRuntimeMocks.resolveProviderXHighThinking.mockReturnValue(undefined);
});
describe("normalizeThinkLevel", () => {
it("accepts mid as medium", () => {
expect(normalizeThinkLevel("mid")).toBe("medium");
@@ -43,6 +64,12 @@ describe("normalizeThinkLevel", () => {
});
describe("listThinkingLevels", () => {
it("uses provider runtime hooks for xhigh support", () => {
providerRuntimeMocks.resolveProviderXHighThinking.mockReturnValue(true);
expect(listThinkingLevels("demo", "demo-model")).toContain("xhigh");
});
it("includes xhigh for codex models", () => {
expect(listThinkingLevels(undefined, "gpt-5.2-codex")).toContain("xhigh");
expect(listThinkingLevels(undefined, "gpt-5.3-codex")).toContain("xhigh");
@@ -75,6 +102,12 @@ describe("listThinkingLevels", () => {
});
describe("listThinkingLevelLabels", () => {
it("uses provider runtime hooks for binary thinking providers", () => {
providerRuntimeMocks.resolveProviderBinaryThinking.mockReturnValue(true);
expect(listThinkingLevelLabels("demo", "demo-model")).toEqual(["off", "on"]);
});
it("returns on/off for ZAI", () => {
expect(listThinkingLevelLabels("zai", "glm-4.7")).toEqual(["off", "on"]);
});
@@ -86,6 +119,14 @@ describe("listThinkingLevelLabels", () => {
});
describe("resolveThinkingDefaultForModel", () => {
it("uses provider runtime hooks for default thinking levels", () => {
providerRuntimeMocks.resolveProviderDefaultThinkingLevel.mockReturnValue("adaptive");
expect(resolveThinkingDefaultForModel({ provider: "demo", model: "demo-model" })).toBe(
"adaptive",
);
});
it("defaults Claude 4.6 models to adaptive", () => {
expect(
resolveThinkingDefaultForModel({ provider: "anthropic", model: "claude-opus-4-6" }),

View File

@@ -1,3 +1,9 @@
import {
resolveProviderBinaryThinking,
resolveProviderDefaultThinkingLevel,
resolveProviderXHighThinking,
} from "../plugins/provider-runtime.js";
export type ThinkLevel = "off" | "minimal" | "low" | "medium" | "high" | "xhigh" | "adaptive";
export type VerboseLevel = "off" | "on" | "full";
export type NoticeLevel = "off" | "on" | "full";
@@ -27,8 +33,24 @@ function normalizeProviderId(provider?: string | null): string {
return normalized;
}
export function isBinaryThinkingProvider(provider?: string | null): boolean {
return normalizeProviderId(provider) === "zai";
export function isBinaryThinkingProvider(provider?: string | null, model?: string | null): boolean {
const normalizedProvider = normalizeProviderId(provider);
if (!normalizedProvider) {
return false;
}
const pluginDecision = resolveProviderBinaryThinking({
provider: normalizedProvider,
context: {
provider: normalizedProvider,
modelId: model?.trim() ?? "",
},
});
if (typeof pluginDecision === "boolean") {
return pluginDecision;
}
return normalizedProvider === "zai";
}
export const XHIGH_MODEL_REFS = [
@@ -95,7 +117,19 @@ export function supportsXHighThinking(provider?: string | null, model?: string |
if (!modelKey) {
return false;
}
const providerKey = provider?.trim().toLowerCase();
const providerKey = normalizeProviderId(provider);
if (providerKey) {
const pluginDecision = resolveProviderXHighThinking({
provider: providerKey,
context: {
provider: providerKey,
modelId: modelKey,
},
});
if (typeof pluginDecision === "boolean") {
return pluginDecision;
}
}
if (providerKey) {
return XHIGH_MODEL_SET.has(`${providerKey}/${modelKey}`);
}
@@ -112,7 +146,7 @@ export function listThinkingLevels(provider?: string | null, model?: string | nu
}
export function listThinkingLevelLabels(provider?: string | null, model?: string | null): string[] {
if (isBinaryThinkingProvider(provider)) {
if (isBinaryThinkingProvider(provider, model)) {
return ["off", "on"];
}
return listThinkingLevels(provider, model);
@@ -147,6 +181,21 @@ export function resolveThinkingDefaultForModel(params: {
}): ThinkLevel {
const normalizedProvider = normalizeProviderId(params.provider);
const modelLower = params.model.trim().toLowerCase();
const candidate = params.catalog?.find(
(entry) => entry.provider === params.provider && entry.id === params.model,
);
const pluginDecision = resolveProviderDefaultThinkingLevel({
provider: normalizedProvider,
context: {
provider: normalizedProvider,
modelId: params.model,
reasoning: candidate?.reasoning,
},
});
if (pluginDecision) {
return pluginDecision;
}
const isAnthropicFamilyModel =
normalizedProvider === "anthropic" ||
normalizedProvider === "amazon-bedrock" ||
@@ -155,9 +204,6 @@ export function resolveThinkingDefaultForModel(params: {
if (isAnthropicFamilyModel && CLAUDE_46_MODEL_RE.test(modelLower)) {
return "adaptive";
}
const candidate = params.catalog?.find(
(entry) => entry.provider === params.provider && entry.id === params.model,
);
if (candidate?.reasoning) {
return "low";
}

View File

@@ -1,5 +1,6 @@
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import type { OpenClawConfig } from "../../config/config.js";
import type { ProviderPlugin } from "../../plugins/types.js";
import type { RuntimeEnv } from "../../runtime.js";
const mocks = vi.hoisted(() => ({
@@ -15,8 +16,6 @@ const mocks = vi.hoisted(() => ({
upsertAuthProfile: vi.fn(),
resolvePluginProviders: vi.fn(),
createClackPrompter: vi.fn(),
loginOpenAICodexOAuth: vi.fn(),
writeOAuthCredentials: vi.fn(),
loadValidConfigOrThrow: vi.fn(),
updateConfig: vi.fn(),
logConfigUpdated: vi.fn(),
@@ -59,18 +58,6 @@ vi.mock("../../wizard/clack-prompter.js", () => ({
createClackPrompter: mocks.createClackPrompter,
}));
vi.mock("../openai-codex-oauth.js", () => ({
loginOpenAICodexOAuth: mocks.loginOpenAICodexOAuth,
}));
vi.mock("../onboard-auth.js", async (importActual) => {
const actual = await importActual<typeof import("../onboard-auth.js")>();
return {
...actual,
writeOAuthCredentials: mocks.writeOAuthCredentials,
};
});
vi.mock("./shared.js", async (importActual) => {
const actual = await importActual<typeof import("./shared.js")>();
return {
@@ -88,7 +75,8 @@ vi.mock("../onboard-helpers.js", () => ({
openUrl: mocks.openUrl,
}));
const { modelsAuthLoginCommand, modelsAuthPasteTokenCommand } = await import("./auth.js");
const { modelsAuthLoginCommand, modelsAuthPasteTokenCommand, modelsAuthSetupTokenCommand } =
await import("./auth.js");
function createRuntime(): RuntimeEnv {
return {
@@ -116,10 +104,30 @@ function withInteractiveStdin() {
};
}
function createProvider(params: {
id: string;
label?: string;
run: NonNullable<ProviderPlugin["auth"]>[number]["run"];
}): ProviderPlugin {
return {
id: params.id,
label: params.label ?? params.id,
auth: [
{
id: "oauth",
label: "OAuth",
kind: "oauth",
run: params.run,
},
],
};
}
describe("modelsAuthLoginCommand", () => {
let restoreStdin: (() => void) | null = null;
let currentConfig: OpenClawConfig;
let lastUpdatedConfig: OpenClawConfig | null;
let runProviderAuth: ReturnType<typeof vi.fn>;
beforeEach(() => {
vi.clearAllMocks();
@@ -151,16 +159,29 @@ describe("modelsAuthLoginCommand", () => {
note: vi.fn(async () => {}),
select: vi.fn(),
});
mocks.loginOpenAICodexOAuth.mockResolvedValue({
type: "oauth",
provider: "openai-codex",
access: "access-token",
refresh: "refresh-token",
expires: Date.now() + 60_000,
email: "user@example.com",
runProviderAuth = vi.fn().mockResolvedValue({
profiles: [
{
profileId: "openai-codex:user@example.com",
credential: {
type: "oauth",
provider: "openai-codex",
access: "access-token",
refresh: "refresh-token",
expires: Date.now() + 60_000,
email: "user@example.com",
},
},
],
defaultModel: "openai-codex/gpt-5.4",
});
mocks.writeOAuthCredentials.mockResolvedValue("openai-codex:user@example.com");
mocks.resolvePluginProviders.mockReturnValue([]);
mocks.resolvePluginProviders.mockReturnValue([
createProvider({
id: "openai-codex",
label: "OpenAI Codex",
run: runProviderAuth as ProviderPlugin["auth"][number]["run"],
}),
]);
mocks.loadAuthProfileStoreForRuntime.mockReturnValue({ profiles: {}, usageStats: {} });
mocks.listProfilesForProvider.mockReturnValue([]);
mocks.clearAuthProfileCooldown.mockResolvedValue(undefined);
@@ -171,19 +192,20 @@ describe("modelsAuthLoginCommand", () => {
restoreStdin = null;
});
it("supports built-in openai-codex login without provider plugins", async () => {
it("runs plugin-owned openai-codex login", async () => {
const runtime = createRuntime();
await modelsAuthLoginCommand({ provider: "openai-codex" }, runtime);
expect(mocks.loginOpenAICodexOAuth).toHaveBeenCalledOnce();
expect(mocks.writeOAuthCredentials).toHaveBeenCalledWith(
"openai-codex",
expect.any(Object),
"/tmp/openclaw/agents/main",
{ syncSiblingAgents: true },
);
expect(mocks.resolvePluginProviders).not.toHaveBeenCalled();
expect(runProviderAuth).toHaveBeenCalledOnce();
expect(mocks.upsertAuthProfile).toHaveBeenCalledWith({
profileId: "openai-codex:user@example.com",
credential: expect.objectContaining({
type: "oauth",
provider: "openai-codex",
}),
agentDir: "/tmp/openclaw/agents/main",
});
expect(lastUpdatedConfig?.auth?.profiles?.["openai-codex:user@example.com"]).toMatchObject({
provider: "openai-codex",
mode: "oauth",
@@ -236,7 +258,7 @@ describe("modelsAuthLoginCommand", () => {
});
// Verify clearing happens before login attempt
const clearOrder = mocks.clearAuthProfileCooldown.mock.invocationCallOrder[0];
const loginOrder = mocks.loginOpenAICodexOAuth.mock.invocationCallOrder[0];
const loginOrder = runProviderAuth.mock.invocationCallOrder[0];
expect(clearOrder).toBeLessThan(loginOrder);
});
@@ -248,7 +270,7 @@ describe("modelsAuthLoginCommand", () => {
await modelsAuthLoginCommand({ provider: "openai-codex" }, runtime);
expect(mocks.loginOpenAICodexOAuth).toHaveBeenCalledOnce();
expect(runProviderAuth).toHaveBeenCalledOnce();
});
it("loads lockout state from the agent-scoped store", async () => {
@@ -261,11 +283,11 @@ describe("modelsAuthLoginCommand", () => {
expect(mocks.loadAuthProfileStoreForRuntime).toHaveBeenCalledWith("/tmp/openclaw/agents/main");
});
it("keeps existing plugin error behavior for non built-in providers", async () => {
it("reports loaded plugin providers when requested provider is unavailable", async () => {
const runtime = createRuntime();
await expect(modelsAuthLoginCommand({ provider: "anthropic" }, runtime)).rejects.toThrow(
"No provider plugins found.",
'Unknown provider "anthropic". Loaded providers: openai-codex. Verify plugins via `openclaw plugins list --json`.',
);
});
@@ -292,4 +314,47 @@ describe("modelsAuthLoginCommand", () => {
exitSpy.mockRestore();
}
});
it("runs token auth for any token-capable provider plugin", async () => {
const runtime = createRuntime();
const runTokenAuth = vi.fn().mockResolvedValue({
profiles: [
{
profileId: "moonshot:token",
credential: {
type: "token",
provider: "moonshot",
token: "moonshot-token",
},
},
],
});
mocks.resolvePluginProviders.mockReturnValue([
{
id: "moonshot",
label: "Moonshot",
auth: [
{
id: "setup-token",
label: "setup-token",
kind: "token",
run: runTokenAuth,
},
],
},
]);
await modelsAuthSetupTokenCommand({ provider: "moonshot", yes: true }, runtime);
expect(runTokenAuth).toHaveBeenCalledOnce();
expect(mocks.upsertAuthProfile).toHaveBeenCalledWith({
profileId: "moonshot:token",
credential: {
type: "token",
provider: "moonshot",
token: "moonshot-token",
},
agentDir: "/tmp/openclaw/agents/main",
});
});
});

View File

@@ -21,22 +21,21 @@ import { normalizeProviderId } from "../../agents/model-selection.js";
import { resolveDefaultAgentWorkspaceDir } from "../../agents/workspace.js";
import { formatCliCommand } from "../../cli/command-format.js";
import { parseDurationMs } from "../../cli/parse-duration.js";
import type { OpenClawConfig } from "../../config/config.js";
import { logConfigUpdated } from "../../config/logging.js";
import { resolvePluginProviders } from "../../plugins/providers.js";
import type { ProviderAuthResult, ProviderPlugin } from "../../plugins/types.js";
import type {
ProviderAuthMethod,
ProviderAuthResult,
ProviderPlugin,
} from "../../plugins/types.js";
import type { RuntimeEnv } from "../../runtime.js";
import { stylePromptHint, stylePromptMessage } from "../../terminal/prompt-style.js";
import { createClackPrompter } from "../../wizard/clack-prompter.js";
import { validateAnthropicSetupToken } from "../auth-token.js";
import { isRemoteEnvironment } from "../oauth-env.js";
import { createVpsAwareOAuthHandlers } from "../oauth-flow.js";
import { applyAuthProfileConfig, writeOAuthCredentials } from "../onboard-auth.js";
import { applyAuthProfileConfig } from "../onboard-auth.js";
import { openUrl } from "../onboard-helpers.js";
import {
applyOpenAICodexModelDefault,
OPENAI_CODEX_DEFAULT_MODEL,
} from "../openai-codex-model-default.js";
import { loginOpenAICodexOAuth } from "../openai-codex-oauth.js";
import {
applyDefaultModel,
mergeConfigPatch,
@@ -78,40 +77,250 @@ const select = async <T>(params: Parameters<typeof clackSelect<T>>[0]) =>
}),
);
type TokenProvider = "anthropic";
function resolveTokenProvider(raw?: string): TokenProvider | "custom" | null {
const trimmed = raw?.trim();
if (!trimmed) {
return null;
}
const normalized = normalizeProviderId(trimmed);
if (normalized === "anthropic") {
return "anthropic";
}
return "custom";
}
function resolveDefaultTokenProfileId(provider: string): string {
return `${normalizeProviderId(provider)}:manual`;
}
type ResolvedModelsAuthContext = {
config: OpenClawConfig;
agentDir: string;
workspaceDir: string;
providers: ProviderPlugin[];
};
function listProvidersWithAuthMethods(providers: ProviderPlugin[]): ProviderPlugin[] {
return providers.filter((provider) => provider.auth.length > 0);
}
function listTokenAuthMethods(provider: ProviderPlugin): ProviderAuthMethod[] {
return provider.auth.filter((method) => method.kind === "token");
}
function listProvidersWithTokenMethods(providers: ProviderPlugin[]): ProviderPlugin[] {
return providers.filter((provider) => listTokenAuthMethods(provider).length > 0);
}
async function resolveModelsAuthContext(): Promise<ResolvedModelsAuthContext> {
const config = await loadValidConfigOrThrow();
const defaultAgentId = resolveDefaultAgentId(config);
const agentDir = resolveAgentDir(config, defaultAgentId);
const workspaceDir =
resolveAgentWorkspaceDir(config, defaultAgentId) ?? resolveDefaultAgentWorkspaceDir();
const providers = resolvePluginProviders({ config, workspaceDir });
return { config, agentDir, workspaceDir, providers };
}
function resolveRequestedProviderOrThrow(
providers: ProviderPlugin[],
rawProvider?: string,
): ProviderPlugin | null {
const requested = rawProvider?.trim();
if (!requested) {
return null;
}
const matched = resolveProviderMatch(providers, requested);
if (matched) {
return matched;
}
const available = providers
.map((provider) => provider.id)
.filter(Boolean)
.toSorted((a, b) => a.localeCompare(b));
const availableText = available.length > 0 ? available.join(", ") : "(none)";
throw new Error(
`Unknown provider "${requested}". Loaded providers: ${availableText}. Verify plugins via \`${formatCliCommand("openclaw plugins list --json")}\`.`,
);
}
function resolveTokenMethodOrThrow(
provider: ProviderPlugin,
rawMethod?: string,
): ProviderAuthMethod | null {
const tokenMethods = listTokenAuthMethods(provider);
if (rawMethod?.trim()) {
const matched = pickAuthMethod(provider, rawMethod);
if (matched && matched.kind === "token") {
return matched;
}
const available = tokenMethods.map((method) => method.id).join(", ") || "(none)";
throw new Error(
`Unknown token auth method "${rawMethod}" for provider "${provider.id}". Available token methods: ${available}.`,
);
}
return null;
}
async function pickProviderAuthMethod(params: {
provider: ProviderPlugin;
requestedMethod?: string;
prompter: ReturnType<typeof createClackPrompter>;
}) {
const requestedMethod = pickAuthMethod(params.provider, params.requestedMethod);
if (requestedMethod) {
return requestedMethod;
}
if (params.provider.auth.length === 1) {
return params.provider.auth[0] ?? null;
}
return await params.prompter
.select({
message: `Auth method for ${params.provider.label}`,
options: params.provider.auth.map((method) => ({
value: method.id,
label: method.label,
hint: method.hint,
})),
})
.then((id) => params.provider.auth.find((method) => method.id === String(id)) ?? null);
}
async function pickProviderTokenMethod(params: {
provider: ProviderPlugin;
requestedMethod?: string;
prompter: ReturnType<typeof createClackPrompter>;
}) {
const explicitTokenMethod = resolveTokenMethodOrThrow(params.provider, params.requestedMethod);
if (explicitTokenMethod) {
return explicitTokenMethod;
}
const tokenMethods = listTokenAuthMethods(params.provider);
if (tokenMethods.length === 0) {
return null;
}
const setupTokenMethod = tokenMethods.find((method) => method.id === "setup-token");
if (setupTokenMethod) {
return setupTokenMethod;
}
if (tokenMethods.length === 1) {
return tokenMethods[0] ?? null;
}
return await params.prompter
.select({
message: `Token method for ${params.provider.label}`,
options: tokenMethods.map((method) => ({
value: method.id,
label: method.label,
hint: method.hint,
})),
})
.then((id) => tokenMethods.find((method) => method.id === String(id)) ?? null);
}
async function persistProviderAuthResult(params: {
result: ProviderAuthResult;
agentDir: string;
runtime: RuntimeEnv;
prompter: ReturnType<typeof createClackPrompter>;
setDefault?: boolean;
}) {
for (const profile of params.result.profiles) {
upsertAuthProfile({
profileId: profile.profileId,
credential: profile.credential,
agentDir: params.agentDir,
});
}
await updateConfig((cfg) => {
let next = cfg;
if (params.result.configPatch) {
next = mergeConfigPatch(next, params.result.configPatch);
}
for (const profile of params.result.profiles) {
next = applyAuthProfileConfig(next, {
profileId: profile.profileId,
provider: profile.credential.provider,
mode: credentialMode(profile.credential),
});
}
if (params.setDefault && params.result.defaultModel) {
next = applyDefaultModel(next, params.result.defaultModel);
}
return next;
});
logConfigUpdated(params.runtime);
for (const profile of params.result.profiles) {
params.runtime.log(
`Auth profile: ${profile.profileId} (${profile.credential.provider}/${credentialMode(profile.credential)})`,
);
}
if (params.result.defaultModel) {
params.runtime.log(
params.setDefault
? `Default model set to ${params.result.defaultModel}`
: `Default model available: ${params.result.defaultModel} (use --set-default to apply)`,
);
}
if (params.result.notes && params.result.notes.length > 0) {
await params.prompter.note(params.result.notes.join("\n"), "Provider notes");
}
}
async function runProviderAuthMethod(params: {
config: OpenClawConfig;
agentDir: string;
workspaceDir: string;
provider: ProviderPlugin;
method: ProviderAuthMethod;
runtime: RuntimeEnv;
prompter: ReturnType<typeof createClackPrompter>;
setDefault?: boolean;
}) {
await clearStaleProfileLockouts(params.provider.id, params.agentDir);
const result = await params.method.run({
config: params.config,
agentDir: params.agentDir,
workspaceDir: params.workspaceDir,
prompter: params.prompter,
runtime: params.runtime,
isRemote: isRemoteEnvironment(),
openUrl: async (url) => {
await openUrl(url);
},
oauth: {
createVpsAwareHandlers: (runtimeParams) => createVpsAwareOAuthHandlers(runtimeParams),
},
});
await persistProviderAuthResult({
result,
agentDir: params.agentDir,
runtime: params.runtime,
prompter: params.prompter,
setDefault: params.setDefault,
});
}
export async function modelsAuthSetupTokenCommand(
opts: { provider?: string; yes?: boolean },
runtime: RuntimeEnv,
) {
const provider = resolveTokenProvider(opts.provider ?? "anthropic");
if (provider !== "anthropic") {
throw new Error("Only --provider anthropic is supported for setup-token.");
}
if (!process.stdin.isTTY) {
throw new Error("setup-token requires an interactive TTY.");
}
const { config, agentDir, workspaceDir, providers } = await resolveModelsAuthContext();
const tokenProviders = listProvidersWithTokenMethods(providers);
if (tokenProviders.length === 0) {
throw new Error(
`No provider token-auth plugins found. Install one via \`${formatCliCommand("openclaw plugins install")}\`.`,
);
}
const provider =
resolveRequestedProviderOrThrow(tokenProviders, opts.provider ?? "anthropic") ??
tokenProviders.find((candidate) => normalizeProviderId(candidate.id) === "anthropic") ??
tokenProviders[0] ??
null;
if (!provider) {
throw new Error("No token-capable provider is available.");
}
if (!opts.yes) {
const proceed = await confirm({
message: "Have you run `claude setup-token` and copied the token?",
message: `Continue with ${provider.label} token auth?`,
initialValue: true,
});
if (!proceed) {
@@ -119,32 +328,21 @@ export async function modelsAuthSetupTokenCommand(
}
}
const tokenInput = await text({
message: "Paste Anthropic setup-token",
validate: (value) => validateAnthropicSetupToken(String(value ?? "")),
const prompter = createClackPrompter();
const method = await pickProviderTokenMethod({ provider, prompter });
if (!method) {
throw new Error(`Provider "${provider.id}" does not expose a token auth method.`);
}
await runProviderAuthMethod({
config,
agentDir,
workspaceDir,
provider,
method,
runtime,
prompter,
});
const token = String(tokenInput ?? "").trim();
const profileId = resolveDefaultTokenProfileId(provider);
upsertAuthProfile({
profileId,
credential: {
type: "token",
provider,
token,
},
});
await updateConfig((cfg) =>
applyAuthProfileConfig(cfg, {
profileId,
provider,
mode: "token",
}),
);
logConfigUpdated(runtime);
runtime.log(`Auth profile: ${profileId} (${provider}/token)`);
}
export async function modelsAuthPasteTokenCommand(
@@ -190,10 +388,17 @@ export async function modelsAuthPasteTokenCommand(
}
export async function modelsAuthAddCommand(_opts: Record<string, never>, runtime: RuntimeEnv) {
const { config, agentDir, workspaceDir, providers } = await resolveModelsAuthContext();
const tokenProviders = listProvidersWithTokenMethods(providers);
const provider = await select({
message: "Token provider",
options: [
{ value: "anthropic", label: "anthropic" },
...tokenProviders.map((providerPlugin) => ({
value: providerPlugin.id,
label: providerPlugin.id,
hint: providerPlugin.docsPath ? `Docs: ${providerPlugin.docsPath}` : undefined,
})),
{ value: "custom", label: "custom (type provider id)" },
],
});
@@ -210,25 +415,41 @@ export async function modelsAuthAddCommand(_opts: Record<string, never>, runtime
)
: provider;
const method = (await select({
message: "Token method",
options: [
...(providerId === "anthropic"
? [
{
value: "setup-token",
label: "setup-token (claude)",
hint: "Paste a setup-token from `claude setup-token`",
},
]
: []),
{ value: "paste", label: "paste token" },
],
})) as "setup-token" | "paste";
if (method === "setup-token") {
await modelsAuthSetupTokenCommand({ provider: providerId }, runtime);
return;
const providerPlugin =
provider === "custom" ? null : resolveRequestedProviderOrThrow(tokenProviders, providerId);
if (providerPlugin) {
const tokenMethods = listTokenAuthMethods(providerPlugin);
const methodId =
tokenMethods.length > 0
? await select({
message: "Token method",
options: [
...tokenMethods.map((method) => ({
value: method.id,
label: method.label,
hint: method.hint,
})),
{ value: "paste", label: "paste token" },
],
})
: "paste";
if (methodId !== "paste") {
const prompter = createClackPrompter();
const method = tokenMethods.find((candidate) => candidate.id === methodId);
if (!method) {
throw new Error(`Unknown token auth method "${String(methodId)}".`);
}
await runProviderAuthMethod({
config,
agentDir,
workspaceDir,
provider: providerPlugin,
method,
runtime,
prompter,
});
return;
}
}
const profileIdDefault = resolveDefaultTokenProfileId(providerId);
@@ -292,22 +513,7 @@ export function resolveRequestedLoginProviderOrThrow(
providers: ProviderPlugin[],
rawProvider?: string,
): ProviderPlugin | null {
const requested = rawProvider?.trim();
if (!requested) {
return null;
}
const matched = resolveProviderMatch(providers, requested);
if (matched) {
return matched;
}
const available = providers
.map((provider) => provider.id)
.filter(Boolean)
.toSorted((a, b) => a.localeCompare(b));
const availableText = available.length > 0 ? available.join(", ") : "(none)";
throw new Error(
`Unknown provider "${requested}". Loaded providers: ${availableText}. Verify plugins via \`${formatCliCommand("openclaw plugins list --json")}\`.`,
);
return resolveRequestedProviderOrThrow(providers, rawProvider);
}
function credentialMode(credential: AuthProfileCredential): "api_key" | "oauth" | "token" {
@@ -320,177 +526,55 @@ function credentialMode(credential: AuthProfileCredential): "api_key" | "oauth"
return "oauth";
}
async function runBuiltInOpenAICodexLogin(params: {
opts: LoginOptions;
runtime: RuntimeEnv;
prompter: ReturnType<typeof createClackPrompter>;
agentDir: string;
}) {
const creds = await loginOpenAICodexOAuth({
prompter: params.prompter,
runtime: params.runtime,
isRemote: isRemoteEnvironment(),
openUrl: async (url) => {
await openUrl(url);
},
localBrowserMessage: "Complete sign-in in browser…",
});
if (!creds) {
throw new Error("OpenAI Codex OAuth did not return credentials.");
}
const profileId = await writeOAuthCredentials("openai-codex", creds, params.agentDir, {
syncSiblingAgents: true,
});
await updateConfig((cfg) => {
let next = applyAuthProfileConfig(cfg, {
profileId,
provider: "openai-codex",
mode: "oauth",
});
if (params.opts.setDefault) {
next = applyOpenAICodexModelDefault(next).next;
}
return next;
});
logConfigUpdated(params.runtime);
params.runtime.log(`Auth profile: ${profileId} (openai-codex/oauth)`);
if (params.opts.setDefault) {
params.runtime.log(`Default model set to ${OPENAI_CODEX_DEFAULT_MODEL}`);
} else {
params.runtime.log(
`Default model available: ${OPENAI_CODEX_DEFAULT_MODEL} (use --set-default to apply)`,
);
}
}
export async function modelsAuthLoginCommand(opts: LoginOptions, runtime: RuntimeEnv) {
if (!process.stdin.isTTY) {
throw new Error("models auth login requires an interactive TTY.");
}
const config = await loadValidConfigOrThrow();
const defaultAgentId = resolveDefaultAgentId(config);
const agentDir = resolveAgentDir(config, defaultAgentId);
const workspaceDir =
resolveAgentWorkspaceDir(config, defaultAgentId) ?? resolveDefaultAgentWorkspaceDir();
const requestedProviderId = normalizeProviderId(String(opts.provider ?? ""));
const { config, agentDir, workspaceDir, providers } = await resolveModelsAuthContext();
const prompter = createClackPrompter();
if (requestedProviderId === "openai-codex") {
await clearStaleProfileLockouts("openai-codex", agentDir);
await runBuiltInOpenAICodexLogin({
opts,
runtime,
prompter,
agentDir,
});
return;
}
const providers = resolvePluginProviders({ config, workspaceDir });
if (providers.length === 0) {
const authProviders = listProvidersWithAuthMethods(providers);
if (authProviders.length === 0) {
throw new Error(
`No provider plugins found. Install one via \`${formatCliCommand("openclaw plugins install")}\`.`,
);
}
const requestedProvider = resolveRequestedLoginProviderOrThrow(providers, opts.provider);
const requestedProvider = resolveRequestedLoginProviderOrThrow(authProviders, opts.provider);
const selectedProvider =
requestedProvider ??
(await prompter
.select({
message: "Select a provider",
options: providers.map((provider) => ({
options: authProviders.map((provider) => ({
value: provider.id,
label: provider.label,
hint: provider.docsPath ? `Docs: ${provider.docsPath}` : undefined,
})),
})
.then((id) => resolveProviderMatch(providers, String(id))));
.then((id) => resolveProviderMatch(authProviders, String(id))));
if (!selectedProvider) {
throw new Error("Unknown provider. Use --provider <id> to pick a provider plugin.");
}
await clearStaleProfileLockouts(selectedProvider.id, agentDir);
const chosenMethod =
pickAuthMethod(selectedProvider, opts.method) ??
(selectedProvider.auth.length === 1
? selectedProvider.auth[0]
: await prompter
.select({
message: `Auth method for ${selectedProvider.label}`,
options: selectedProvider.auth.map((method) => ({
value: method.id,
label: method.label,
hint: method.hint,
})),
})
.then((id) => selectedProvider.auth.find((method) => method.id === String(id))));
const chosenMethod = await pickProviderAuthMethod({
provider: selectedProvider,
requestedMethod: opts.method,
prompter,
});
if (!chosenMethod) {
throw new Error("Unknown auth method. Use --method <id> to select one.");
}
const isRemote = isRemoteEnvironment();
const result: ProviderAuthResult = await chosenMethod.run({
await runProviderAuthMethod({
config,
agentDir,
workspaceDir,
prompter,
provider: selectedProvider,
method: chosenMethod,
runtime,
isRemote,
openUrl: async (url) => {
await openUrl(url);
},
oauth: {
createVpsAwareHandlers: (params) => createVpsAwareOAuthHandlers(params),
},
prompter,
setDefault: opts.setDefault,
});
for (const profile of result.profiles) {
upsertAuthProfile({
profileId: profile.profileId,
credential: profile.credential,
agentDir,
});
}
await updateConfig((cfg) => {
let next = cfg;
if (result.configPatch) {
next = mergeConfigPatch(next, result.configPatch);
}
for (const profile of result.profiles) {
next = applyAuthProfileConfig(next, {
profileId: profile.profileId,
provider: profile.credential.provider,
mode: credentialMode(profile.credential),
});
}
if (opts.setDefault && result.defaultModel) {
next = applyDefaultModel(next, result.defaultModel);
}
return next;
});
logConfigUpdated(runtime);
for (const profile of result.profiles) {
runtime.log(
`Auth profile: ${profile.profileId} (${profile.credential.provider}/${credentialMode(profile.credential)})`,
);
}
if (result.defaultModel) {
runtime.log(
opts.setDefault
? `Default model set to ${result.defaultModel}`
: `Default model available: ${result.defaultModel} (use --set-default to apply)`,
);
}
if (result.notes && result.notes.length > 0) {
await prompter.note(result.notes.join("\n"), "Provider notes");
}
}

View File

@@ -10,7 +10,9 @@ export type {
ProviderBuiltInModelSuppressionResult,
ProviderBuildMissingAuthMessageContext,
ProviderCacheTtlEligibilityContext,
ProviderDefaultThinkingPolicyContext,
ProviderFetchUsageSnapshotContext,
ProviderModernModelPolicyContext,
ProviderPreparedRuntimeAuth,
ProviderResolvedUsageAuth,
ProviderPrepareExtraParamsContext,
@@ -20,6 +22,7 @@ export type {
ProviderResolveDynamicModelContext,
ProviderNormalizeResolvedModelContext,
ProviderRuntimeModel,
ProviderThinkingPolicyContext,
ProviderWrapStreamFnContext,
OpenClawPluginService,
ProviderAuthContext,

View File

@@ -114,7 +114,9 @@ export type {
ProviderBuiltInModelSuppressionResult,
ProviderBuildMissingAuthMessageContext,
ProviderCacheTtlEligibilityContext,
ProviderDefaultThinkingPolicyContext,
ProviderFetchUsageSnapshotContext,
ProviderModernModelPolicyContext,
ProviderPreparedRuntimeAuth,
ProviderResolvedUsageAuth,
ProviderPrepareExtraParamsContext,
@@ -124,6 +126,7 @@ export type {
ProviderResolveDynamicModelContext,
ProviderNormalizeResolvedModelContext,
ProviderRuntimeModel,
ProviderThinkingPolicyContext,
ProviderWrapStreamFnContext,
} from "../plugins/types.js";
export type {

View File

@@ -17,10 +17,14 @@ import {
buildProviderMissingAuthMessageWithPlugin,
prepareProviderExtraParams,
resolveProviderCacheTtlEligibility,
resolveProviderBinaryThinking,
resolveProviderBuiltInModelSuppression,
resolveProviderDefaultThinkingLevel,
resolveProviderModernModelRef,
resolveProviderUsageSnapshotWithPlugin,
resolveProviderCapabilitiesWithPlugin,
resolveProviderUsageAuthWithPlugin,
resolveProviderXHighThinking,
normalizeProviderResolvedModelWithPlugin,
prepareProviderDynamicModel,
prepareProviderRuntimeAuth,
@@ -143,6 +147,10 @@ describe("provider-runtime", () => {
resolveUsageAuth,
fetchUsageSnapshot,
isCacheTtlEligible: ({ modelId }) => modelId.startsWith("anthropic/"),
isBinaryThinking: () => true,
supportsXHighThinking: ({ modelId }) => modelId === "gpt-5.4",
resolveDefaultThinkingLevel: ({ reasoning }) => (reasoning ? "low" : "off"),
isModernModelRef: ({ modelId }) => modelId.startsWith("gpt-5"),
},
];
});
@@ -278,6 +286,47 @@ describe("provider-runtime", () => {
}),
).toBe(true);
expect(
resolveProviderBinaryThinking({
provider: "demo",
context: {
provider: "demo",
modelId: "glm-5",
},
}),
).toBe(true);
expect(
resolveProviderXHighThinking({
provider: "demo",
context: {
provider: "demo",
modelId: "gpt-5.4",
},
}),
).toBe(true);
expect(
resolveProviderDefaultThinkingLevel({
provider: "demo",
context: {
provider: "demo",
modelId: "gpt-5.4",
reasoning: true,
},
}),
).toBe("low");
expect(
resolveProviderModernModelRef({
provider: "demo",
context: {
provider: "demo",
modelId: "gpt-5.4",
},
}),
).toBe(true);
expect(
buildProviderMissingAuthMessageWithPlugin({
provider: "openai",

View File

@@ -6,7 +6,9 @@ import type {
ProviderBuildMissingAuthMessageContext,
ProviderBuiltInModelSuppressionContext,
ProviderCacheTtlEligibilityContext,
ProviderDefaultThinkingPolicyContext,
ProviderFetchUsageSnapshotContext,
ProviderModernModelPolicyContext,
ProviderPrepareExtraParamsContext,
ProviderPrepareDynamicModelContext,
ProviderPrepareRuntimeAuthContext,
@@ -14,6 +16,7 @@ import type {
ProviderPlugin,
ProviderResolveDynamicModelContext,
ProviderRuntimeModel,
ProviderThinkingPolicyContext,
ProviderWrapStreamFnContext,
} from "./types.js";
@@ -179,6 +182,46 @@ export function resolveProviderCacheTtlEligibility(params: {
return resolveProviderRuntimePlugin(params)?.isCacheTtlEligible?.(params.context);
}
export function resolveProviderBinaryThinking(params: {
provider: string;
config?: OpenClawConfig;
workspaceDir?: string;
env?: NodeJS.ProcessEnv;
context: ProviderThinkingPolicyContext;
}) {
return resolveProviderRuntimePlugin(params)?.isBinaryThinking?.(params.context);
}
export function resolveProviderXHighThinking(params: {
provider: string;
config?: OpenClawConfig;
workspaceDir?: string;
env?: NodeJS.ProcessEnv;
context: ProviderThinkingPolicyContext;
}) {
return resolveProviderRuntimePlugin(params)?.supportsXHighThinking?.(params.context);
}
export function resolveProviderDefaultThinkingLevel(params: {
provider: string;
config?: OpenClawConfig;
workspaceDir?: string;
env?: NodeJS.ProcessEnv;
context: ProviderDefaultThinkingPolicyContext;
}) {
return resolveProviderRuntimePlugin(params)?.resolveDefaultThinkingLevel?.(params.context);
}
export function resolveProviderModernModelRef(params: {
provider: string;
config?: OpenClawConfig;
workspaceDir?: string;
env?: NodeJS.ProcessEnv;
context: ProviderModernModelPolicyContext;
}) {
return resolveProviderRuntimePlugin(params)?.isModernModelRef?.(params.context);
}
export function buildProviderMissingAuthMessageWithPlugin(params: {
provider: string;
config?: OpenClawConfig;

View File

@@ -426,6 +426,40 @@ export type ProviderBuiltInModelSuppressionResult = {
errorMessage?: string;
};
/**
* Provider-owned thinking policy input.
*
* Used by shared `/think`, ACP controls, and directive parsing to ask a
* provider whether a model supports special reasoning UX such as xhigh or a
* binary on/off toggle.
*/
export type ProviderThinkingPolicyContext = {
provider: string;
modelId: string;
};
/**
* Provider-owned default thinking policy input.
*
* `reasoning` is the merged catalog hint for the selected model when one is
* available. Providers can use it to keep "reasoning model => low" behavior
* without re-reading the catalog themselves.
*/
export type ProviderDefaultThinkingPolicyContext = ProviderThinkingPolicyContext & {
reasoning?: boolean;
};
/**
* Provider-owned "modern model" policy input.
*
* Live smoke/model-profile selection uses this to keep provider-specific
* inclusion/exclusion rules out of core.
*/
export type ProviderModernModelPolicyContext = {
provider: string;
modelId: string;
};
/**
* Final catalog augmentation hook.
*
@@ -651,6 +685,35 @@ export type ProviderPlugin = {
| Promise<Array<ModelCatalogEntry> | ReadonlyArray<ModelCatalogEntry> | null | undefined>
| null
| undefined;
/**
* Provider-owned binary thinking toggle.
*
* Return true when the provider exposes a coarse on/off reasoning control
* instead of the normal multi-level ladder shown by `/think`.
*/
isBinaryThinking?: (ctx: ProviderThinkingPolicyContext) => boolean | undefined;
/**
* Provider-owned xhigh reasoning support.
*
* Return true only for models that should expose the `xhigh` thinking level.
*/
supportsXHighThinking?: (ctx: ProviderThinkingPolicyContext) => boolean | undefined;
/**
* Provider-owned default thinking level.
*
* Use this to keep model-family defaults (for example Claude 4.6 =>
* adaptive) out of core command logic.
*/
resolveDefaultThinkingLevel?: (
ctx: ProviderDefaultThinkingPolicyContext,
) => "off" | "minimal" | "low" | "medium" | "high" | "xhigh" | "adaptive" | null | undefined;
/**
* Provider-owned "modern model" matcher used by live profile/smoke filters.
*
* Return true when the given provider/model ref should be treated as a
* preferred modern model candidate.
*/
isModernModelRef?: (ctx: ProviderModernModelPolicyContext) => boolean | undefined;
wizard?: ProviderPluginWizard;
formatApiKey?: (cred: AuthProfileCredential) => string;
refreshOAuth?: (cred: OAuthCredential) => Promise<OAuthCredential>;