refactor(providers): remove core default and usage bias

This commit is contained in:
Peter Steinberger
2026-04-04 07:19:15 +01:00
parent 9e4cf3996e
commit 666f1f4db0
15 changed files with 44 additions and 193 deletions

View File

@@ -163,7 +163,7 @@ Run an isolated agent turn:
curl -X POST http://127.0.0.1:18789/hooks/agent \
-H 'Authorization: Bearer SECRET' \
-H 'Content-Type: application/json' \
-d '{"message":"Summarize inbox","name":"Email","model":"openai/gpt-5.2-mini"}'
-d '{"message":"Summarize inbox","name":"Email","model":"openai/gpt-5.4-mini"}'
```
Fields: `message` (required), `name`, `agentId`, `wakeMode`, `deliver`, `channel`, `to`, `model`, `thinking`, `timeoutSeconds`.

View File

@@ -99,7 +99,7 @@ You can switch models for the current session without restarting:
/model
/model list
/model 3
/model openai/gpt-5.2
/model openai/gpt-5.4
/model status
```

View File

@@ -980,7 +980,7 @@ Time format in system prompt. Default: `auto` (OS preference).
- `pdfMaxPages`: default maximum pages considered by extraction fallback mode in the `pdf` tool.
- `verboseDefault`: default verbose level for agents. Values: `"off"`, `"on"`, `"full"`. Default: `"off"`.
- `elevatedDefault`: default elevated-output level for agents. Values: `"off"`, `"on"`, `"ask"`, `"full"`. Default: `"on"`.
- `model.primary`: format `provider/model` (e.g. `anthropic/claude-opus-4-6`). If you omit the provider, OpenClaw assumes `anthropic` (deprecated).
- `model.primary`: format `provider/model` (e.g. `openai/gpt-5.4`). If you omit the provider, OpenClaw assumes the configured default provider (currently `openai`; deprecated fallback behavior, so prefer explicit `provider/model`).
- `models`: the configured model catalog and allowlist for `/model`. Each entry can include `alias` (shortcut) and `params` (provider-specific, for example `temperature`, `maxTokens`, `cacheRetention`, `context1m`).
- `params`: global default provider parameters applied to all models. Set at `agents.defaults.params` (e.g. `{ cacheRetention: "long" }`).
- `params` merge precedence (config): `agents.defaults.params` (global base) is overridden by `agents.defaults.models["provider/model"].params` (per-model), then `agents.list[].params` (matching agent id) overrides by key. See [Prompt Caching](/reference/prompt-caching) for details.

View File

@@ -113,11 +113,11 @@ When validation fails:
defaults: {
model: {
primary: "anthropic/claude-sonnet-4-6",
fallbacks: ["openai/gpt-5.2"],
fallbacks: ["openai/gpt-5.4"],
},
models: {
"anthropic/claude-sonnet-4-6": { alias: "Sonnet" },
"openai/gpt-5.2": { alias: "GPT" },
"openai/gpt-5.4": { alias: "GPT" },
},
},
},

View File

@@ -2057,7 +2057,7 @@ for usage/billing and raise limits as needed.
agents.defaults.model.primary
```
Models are referenced as `provider/model` (example: `anthropic/claude-opus-4-6`). If you omit the provider, OpenClaw currently assumes `anthropic` as a temporary deprecation fallback - but you should still **explicitly** set `provider/model`.
Models are referenced as `provider/model` (example: `openai/gpt-5.4`). If you omit the provider, OpenClaw currently assumes the configured default provider (currently `openai`) as a temporary deprecation fallback - but you should still **explicitly** set `provider/model`.
</Accordion>

View File

@@ -193,7 +193,7 @@ Live tests are split into two layers so we can isolate failures:
- How to select models:
- `OPENCLAW_LIVE_MODELS=modern` to run the modern allowlist (Opus/Sonnet 4.6+, GPT-5.x + Codex, Gemini 3, GLM 4.7, MiniMax M2.7, Grok 4)
- `OPENCLAW_LIVE_MODELS=all` is an alias for the modern allowlist
- or `OPENCLAW_LIVE_MODELS="openai/gpt-5.2,anthropic/claude-opus-4-6,..."` (comma allowlist)
- or `OPENCLAW_LIVE_MODELS="openai/gpt-5.4,anthropic/claude-opus-4-6,..."` (comma allowlist)
- How to select providers:
- `OPENCLAW_LIVE_PROVIDERS="google,google-antigravity,google-gemini-cli"` (comma allowlist)
- Where keys come from:
@@ -356,13 +356,13 @@ Docker notes:
Narrow, explicit allowlists are fastest and least flaky:
- Single model, direct (no gateway):
- `OPENCLAW_LIVE_MODELS="openai/gpt-5.2" pnpm test:live src/agents/models.profiles.live.test.ts`
- `OPENCLAW_LIVE_MODELS="openai/gpt-5.4" pnpm test:live src/agents/models.profiles.live.test.ts`
- Single model, gateway smoke:
- `OPENCLAW_LIVE_GATEWAY_MODELS="openai/gpt-5.2" pnpm test:live src/gateway/gateway-models.profiles.live.test.ts`
- `OPENCLAW_LIVE_GATEWAY_MODELS="openai/gpt-5.4" pnpm test:live src/gateway/gateway-models.profiles.live.test.ts`
- Tool calling across several providers:
- `OPENCLAW_LIVE_GATEWAY_MODELS="openai/gpt-5.2,anthropic/claude-opus-4-6,google/gemini-3-flash-preview,zai/glm-4.7,minimax/MiniMax-M2.7" pnpm test:live src/gateway/gateway-models.profiles.live.test.ts`
- `OPENCLAW_LIVE_GATEWAY_MODELS="openai/gpt-5.4,anthropic/claude-opus-4-6,google/gemini-3-flash-preview,zai/glm-4.7,minimax/MiniMax-M2.7" pnpm test:live src/gateway/gateway-models.profiles.live.test.ts`
- Google focus (Gemini API key + Antigravity):
- Gemini (API key): `OPENCLAW_LIVE_GATEWAY_MODELS="google/gemini-3-flash-preview" pnpm test:live src/gateway/gateway-models.profiles.live.test.ts`
@@ -385,7 +385,7 @@ There is no fixed “CI model list” (live is opt-in), but these are the **reco
This is the “common models” run we expect to keep working:
- OpenAI (non-Codex): `openai/gpt-5.2` (optional: `openai/gpt-5.1`)
- OpenAI (non-Codex): `openai/gpt-5.4` (optional: `openai/gpt-5.4-mini`)
- OpenAI Codex: `openai-codex/gpt-5.4`
- Anthropic: `anthropic/claude-opus-4-6` (or `anthropic/claude-sonnet-4-6`)
- Google (Gemini API): `google/gemini-3.1-pro-preview` and `google/gemini-3-flash-preview` (avoid older Gemini 2.x models)
@@ -394,13 +394,13 @@ This is the “common models” run we expect to keep working:
- MiniMax: `minimax/MiniMax-M2.7`
Run gateway smoke with tools + image:
`OPENCLAW_LIVE_GATEWAY_MODELS="openai/gpt-5.2,openai-codex/gpt-5.4,anthropic/claude-opus-4-6,google/gemini-3.1-pro-preview,google/gemini-3-flash-preview,google-antigravity/claude-opus-4-6-thinking,google-antigravity/gemini-3-flash,zai/glm-4.7,minimax/MiniMax-M2.7" pnpm test:live src/gateway/gateway-models.profiles.live.test.ts`
`OPENCLAW_LIVE_GATEWAY_MODELS="openai/gpt-5.4,openai-codex/gpt-5.4,anthropic/claude-opus-4-6,google/gemini-3.1-pro-preview,google/gemini-3-flash-preview,google-antigravity/claude-opus-4-6-thinking,google-antigravity/gemini-3-flash,zai/glm-4.7,minimax/MiniMax-M2.7" pnpm test:live src/gateway/gateway-models.profiles.live.test.ts`
### Baseline: tool calling (Read + optional Exec)
Pick at least one per provider family:
- OpenAI: `openai/gpt-5.2` (or `openai/gpt-5-mini`)
- OpenAI: `openai/gpt-5.4` (or `openai/gpt-5.4-mini`)
- Anthropic: `anthropic/claude-opus-4-6` (or `anthropic/claude-sonnet-4-6`)
- Google: `google/gemini-3-flash-preview` (or `google/gemini-3.1-pro-preview`)
- Z.AI (GLM): `zai/glm-4.7`

View File

@@ -1,6 +1,6 @@
// Defaults for agent metadata when upstream does not supply them.
// Model id uses pi-ai's built-in Anthropic catalog.
export const DEFAULT_PROVIDER = "anthropic";
export const DEFAULT_MODEL = "claude-opus-4-6";
// Keep this aligned with the product-level latest-model baseline.
export const DEFAULT_PROVIDER = "openai";
export const DEFAULT_MODEL = "gpt-5.4";
// Conservative fallback used when model metadata is unavailable.
export const DEFAULT_CONTEXT_TOKENS = 200_000;

View File

@@ -101,8 +101,8 @@ function createProviderWithModelsConfig(provider: string, models: Array<Record<s
function resolveConfiguredRefForTest(cfg: Partial<OpenClawConfig>) {
return resolveConfiguredModelRef({
cfg: cfg as OpenClawConfig,
defaultProvider: "anthropic",
defaultModel: "claude-opus-4-6",
defaultProvider: "openai",
defaultModel: "gpt-5.4",
});
}
@@ -800,7 +800,7 @@ describe("model-selection", () => {
it("should fall back to hardcoded default when no custom providers have models", () => {
const cfg = createProviderWithModelsConfig("empty-provider", []);
const result = resolveConfiguredRefForTest(cfg);
expect(result).toEqual({ provider: "anthropic", model: "claude-opus-4-6" });
expect(result).toEqual({ provider: "openai", model: "gpt-5.4" });
});
it("should warn when specified model cannot be resolved and falls back to default", () => {
@@ -817,13 +817,13 @@ describe("model-selection", () => {
const result = resolveConfiguredModelRef({
cfg: cfg as OpenClawConfig,
defaultProvider: "anthropic",
defaultModel: "claude-opus-4-6",
defaultProvider: "openai",
defaultModel: "gpt-5.4",
});
expect(result).toEqual({ provider: "anthropic", model: "claude-opus-4-6" });
expect(result).toEqual({ provider: "openai", model: "gpt-5.4" });
expect(warnSpy).toHaveBeenCalledWith(
expect.stringContaining('Falling back to default "anthropic/claude-opus-4-6"'),
expect.stringContaining('Falling back to default "openai/gpt-5.4"'),
);
} finally {
warnSpy.mockRestore();
@@ -889,10 +889,10 @@ describe("model-selection", () => {
expect(resolveAnthropicOpusThinking(cfg)).toBe("adaptive");
});
it("defaults Anthropic Claude 4.6 models to adaptive", () => {
it("falls back to low when no provider thinking hook is active", () => {
const cfg = {} as OpenClawConfig;
expect(resolveAnthropicOpusThinking(cfg)).toBe("adaptive");
expect(resolveAnthropicOpusThinking(cfg)).toBe("low");
expect(
resolveThinkingDefault({
@@ -908,7 +908,7 @@ describe("model-selection", () => {
},
],
}),
).toBe("adaptive");
).toBe("low");
});
});
});

View File

@@ -81,8 +81,8 @@ describe("resolveSimpleCompletionSelectionForAgent", () => {
const selection = resolveSimpleCompletionSelectionForAgent({ cfg, agentId: "main" });
expect(selection).toEqual(
expect.objectContaining({
provider: "anthropic",
modelId: "claude-opus-4-6",
provider: "openai",
modelId: "gpt-5.4",
}),
);
});

View File

@@ -14,11 +14,11 @@ vi.mock("./status.summary.runtime.js", () => ({
classifySessionKey: vi.fn(() => "direct"),
resolveConfiguredStatusModelRef: vi.fn(() => ({
provider: "openai",
model: "gpt-5.2",
model: "gpt-5.4",
})),
resolveSessionModelRef: vi.fn(() => ({
provider: "openai",
model: "gpt-5.2",
model: "gpt-5.4",
})),
resolveContextTokensForModel: vi.fn(() => 200_000),
},
@@ -26,7 +26,7 @@ vi.mock("./status.summary.runtime.js", () => ({
vi.mock("../agents/defaults.js", () => ({
DEFAULT_CONTEXT_TOKENS: 200_000,
DEFAULT_MODEL: "gpt-5.2",
DEFAULT_MODEL: "gpt-5.4",
DEFAULT_PROVIDER: "openai",
}));

View File

@@ -129,7 +129,6 @@ vi.mock("../agents/auth-profiles.js", () => {
});
const providerRuntimeMocks = vi.hoisted(() => ({
resolveProviderUsageAuthWithPluginMock: vi.fn(async (..._args: unknown[]) => null),
providerRuntimeMock: {
augmentModelCatalogWithProviderPlugins: vi.fn((catalog: unknown) => catalog),
buildProviderAuthDoctorHintWithPlugin: vi.fn(() => undefined),
@@ -153,7 +152,6 @@ const providerRuntimeMocks = vi.hoisted(() => ({
resolveProviderRuntimePlugin: vi.fn(() => undefined),
resolveProviderStreamFn: vi.fn(() => undefined),
resolveProviderSyntheticAuthWithPlugin: vi.fn(() => undefined),
resolveProviderUsageSnapshotWithPlugin: vi.fn(async () => undefined),
resolveProviderXHighThinking: vi.fn(() => undefined),
runProviderDynamicModel: vi.fn(() => undefined),
wrapProviderStreamFn: vi.fn(() => undefined),
@@ -167,7 +165,6 @@ vi.mock("../plugins/provider-runtime.js", async () => {
return {
...actual,
...providerRuntimeMocks.providerRuntimeMock,
resolveProviderUsageAuthWithPlugin: providerRuntimeMocks.resolveProviderUsageAuthWithPluginMock,
};
});
@@ -178,7 +175,6 @@ vi.mock("../plugins/provider-runtime.ts", async () => {
return {
...actual,
...providerRuntimeMocks.providerRuntimeMock,
resolveProviderUsageAuthWithPlugin: providerRuntimeMocks.resolveProviderUsageAuthWithPluginMock,
};
});
@@ -224,8 +220,6 @@ describe("resolveProviderAuths key normalization", () => {
clearRuntimeConfigSnapshot();
clearConfigCache();
clearRuntimeAuthProfileStoreSnapshots();
providerRuntimeMocks.resolveProviderUsageAuthWithPluginMock.mockReset();
providerRuntimeMocks.resolveProviderUsageAuthWithPluginMock.mockResolvedValue(null);
});
afterEach(() => {

View File

@@ -11,7 +11,6 @@ import { normalizeProviderId } from "../agents/model-selection.js";
import { loadConfig, type OpenClawConfig } from "../config/config.js";
import { resolveProviderUsageAuthWithPlugin } from "../plugins/provider-runtime.js";
import { normalizeSecretInput } from "../utils/normalize-secret-input.js";
import { resolveLegacyPiAgentAccessToken } from "./provider-usage.shared.js";
import type { UsageProviderId } from "./provider-usage.types.js";
export type ProviderAuth = {
@@ -29,18 +28,6 @@ type UsageAuthState = {
agentDir?: string;
};
function parseGoogleUsageToken(apiKey: string): string {
try {
const parsed = JSON.parse(apiKey) as { token?: unknown };
if (typeof parsed?.token === "string") {
return parsed.token;
}
} catch {
// ignore
}
return apiKey;
}
function resolveProviderApiKeyFromConfigAndStore(params: {
state: UsageAuthState;
providerIds: string[];
@@ -183,46 +170,8 @@ async function resolveProviderUsageAuthFallback(params: {
state: UsageAuthState;
provider: UsageProviderId;
}): Promise<ProviderAuth | null> {
switch (params.provider) {
case "anthropic":
case "github-copilot":
case "openai-codex":
return await resolveOAuthToken(params);
case "google-gemini-cli": {
const auth = await resolveOAuthToken(params);
return auth ? { ...auth, token: parseGoogleUsageToken(auth.token) } : null;
}
case "zai": {
const apiKey = resolveProviderApiKeyFromConfigAndStore({
state: params.state,
providerIds: ["zai", "z-ai"],
envDirect: [params.state.env.ZAI_API_KEY, params.state.env.Z_AI_API_KEY],
});
if (apiKey) {
return { provider: "zai", token: apiKey };
}
const legacyToken = resolveLegacyPiAgentAccessToken(params.state.env, ["z-ai", "zai"]);
return legacyToken ? { provider: "zai", token: legacyToken } : null;
}
case "minimax": {
const apiKey = resolveProviderApiKeyFromConfigAndStore({
state: params.state,
providerIds: ["minimax"],
envDirect: [params.state.env.MINIMAX_CODE_PLAN_KEY, params.state.env.MINIMAX_API_KEY],
});
return apiKey ? { provider: "minimax", token: apiKey } : null;
}
case "xiaomi": {
const apiKey = resolveProviderApiKeyFromConfigAndStore({
state: params.state,
providerIds: ["xiaomi"],
envDirect: [params.state.env.XIAOMI_API_KEY],
});
return apiKey ? { provider: "xiaomi", token: apiKey } : null;
}
default:
return null;
}
void params;
return null;
}
export async function resolveProviderAuths(params: {

View File

@@ -10,16 +10,9 @@ import {
type ProviderAuth = ProviderUsageAuth<typeof loadProviderUsageSummary>;
const resolveProviderUsageSnapshotWithPlugin = vi.hoisted(() => vi.fn(async () => null));
vi.mock("../plugins/provider-runtime.js", () => ({
resolveProviderUsageSnapshotWithPlugin,
}));
describe("provider-usage.load", () => {
beforeEach(() => {
resolveProviderUsageSnapshotWithPlugin.mockReset();
resolveProviderUsageSnapshotWithPlugin.mockResolvedValue(null);
vi.restoreAllMocks();
});
it("loads snapshots for copilot gemini codex and xiaomi", async () => {

View File

@@ -2,13 +2,6 @@ import { loadConfig, type OpenClawConfig } from "../config/config.js";
import { resolveProviderUsageSnapshotWithPlugin } from "../plugins/provider-runtime.js";
import { resolveFetch } from "./fetch.js";
import { type ProviderAuth, resolveProviderAuths } from "./provider-usage.auth.js";
import {
fetchClaudeUsage,
fetchCodexUsage,
fetchGeminiUsage,
fetchMinimaxUsage,
fetchZaiUsage,
} from "./provider-usage.fetch.js";
import {
DEFAULT_TIMEOUT_MS,
ignoredErrors,
@@ -22,97 +15,19 @@ import type {
UsageSummary,
} from "./provider-usage.types.js";
async function fetchCopilotUsageFallback(
token: string,
timeoutMs: number,
fetchFn: typeof fetch,
): Promise<ProviderUsageSnapshot> {
const res = await fetchFn("https://api.github.com/copilot_internal/user", {
headers: {
Authorization: `token ${token}`,
"Editor-Version": "vscode/1.96.2",
"User-Agent": "GitHubCopilotChat/0.26.7",
"X-Github-Api-Version": "2025-04-01",
},
signal: AbortSignal.timeout(timeoutMs),
});
if (!res.ok) {
return {
provider: "github-copilot",
displayName: PROVIDER_LABELS["github-copilot"],
windows: [],
error: `HTTP ${res.status}`,
};
}
const data = (await res.json()) as {
quota_snapshots?: {
premium_interactions?: { percent_remaining?: number | null };
chat?: { percent_remaining?: number | null };
};
copilot_plan?: string;
};
const windows = [];
const premiumRemaining = data.quota_snapshots?.premium_interactions?.percent_remaining;
if (premiumRemaining !== undefined && premiumRemaining !== null) {
windows.push({
label: "Premium",
usedPercent: Math.max(0, Math.min(100, 100 - premiumRemaining)),
});
}
const chatRemaining = data.quota_snapshots?.chat?.percent_remaining;
if (chatRemaining !== undefined && chatRemaining !== null) {
windows.push({ label: "Chat", usedPercent: Math.max(0, Math.min(100, 100 - chatRemaining)) });
}
return {
provider: "github-copilot",
displayName: PROVIDER_LABELS["github-copilot"],
windows,
plan: data.copilot_plan,
};
}
async function fetchProviderUsageSnapshotFallback(params: {
auth: ProviderAuth;
timeoutMs: number;
fetchFn: typeof fetch;
}): Promise<ProviderUsageSnapshot> {
switch (params.auth.provider) {
case "anthropic":
return await fetchClaudeUsage(params.auth.token, params.timeoutMs, params.fetchFn);
case "github-copilot":
return await fetchCopilotUsageFallback(params.auth.token, params.timeoutMs, params.fetchFn);
case "google-gemini-cli":
return await fetchGeminiUsage(
params.auth.token,
params.timeoutMs,
params.fetchFn,
"google-gemini-cli",
);
case "openai-codex":
return await fetchCodexUsage(
params.auth.token,
params.auth.accountId,
params.timeoutMs,
params.fetchFn,
);
case "zai":
return await fetchZaiUsage(params.auth.token, params.timeoutMs, params.fetchFn);
case "minimax":
return await fetchMinimaxUsage(params.auth.token, params.timeoutMs, params.fetchFn);
case "xiaomi":
return {
provider: "xiaomi",
displayName: PROVIDER_LABELS.xiaomi,
windows: [],
};
default:
return {
provider: params.auth.provider,
displayName: PROVIDER_LABELS[params.auth.provider],
windows: [],
error: "Unsupported provider",
};
}
void params.timeoutMs;
void params.fetchFn;
return {
provider: params.auth.provider,
displayName: PROVIDER_LABELS[params.auth.provider] ?? params.auth.provider,
windows: [],
error: "Unsupported provider",
};
}
type UsageSummaryOptions = {

View File

@@ -25,7 +25,7 @@ export async function loadUsageWithAuth<T extends ProviderUsageLoader>(
now: usageNow,
auth,
fetch: mockFetch as unknown as typeof fetch,
// These tests exercise the built-in usage fetchers, not provider plugin hooks.
config: { plugins: { enabled: false } } as OpenClawConfig,
// Keep config minimal; bundled provider usage hooks own the provider-specific fetchers now.
config: {} as OpenClawConfig,
});
}