feat: support xhigh for Claude Opus 4.7

This commit is contained in:
Peter Steinberger
2026-04-17 00:58:16 +01:00
parent 272536015f
commit c73a6d2f68
11 changed files with 188 additions and 32 deletions

View File

@@ -15,12 +15,13 @@ title: "Thinking Levels"
- low → “think hard”
- medium → “think harder”
- high → “ultrathink” (max budget)
- xhigh → “ultrathink+” (GPT-5.2 + Codex models only)
- adaptive → provider-managed adaptive reasoning budget (supported for Anthropic Claude 4.6 model family)
- xhigh → “ultrathink+” (GPT-5.2 + Codex models and Anthropic Claude Opus 4.7)
- adaptive → provider-managed adaptive reasoning budget (supported for Anthropic Claude 4.6 and Opus 4.7)
- `x-high`, `x_high`, `extra-high`, `extra high`, and `extra_high` map to `xhigh`.
- `highest`, `max` map to `high`.
- Provider notes:
- Anthropic Claude 4.6 models default to `adaptive` when no explicit thinking level is set.
- Anthropic Claude 4.6 and Opus 4.7 models default to `adaptive` when no explicit thinking level is set.
- Anthropic Claude Opus 4.7 maps `/think xhigh` to `output_config.effort: "xhigh"`.
- MiniMax (`minimax/*`) on the Anthropic-compatible streaming path defaults to `thinking: { type: "disabled" }` unless you explicitly set thinking in model params or request params. This avoids leaked `reasoning_content` deltas from MiniMax's non-native Anthropic stream format.
- Z.AI (`zai/*`) only supports binary thinking (`on`/`off`). Any non-`off` level is treated as `on` (mapped to `low`).
- Moonshot (`moonshot/*`) maps `/think off` to `thinking: { type: "disabled" }` and any non-`off` level to `thinking: { type: "enabled" }`. When thinking is enabled, Moonshot only accepts `tool_choice` `auto|none`; OpenClaw normalizes incompatible values to `auto`.
@@ -31,7 +32,7 @@ title: "Thinking Levels"
2. Session override (set by sending a directive-only message).
3. Per-agent default (`agents.list[].thinkingDefault` in config).
4. Global default (`agents.defaults.thinkingDefault` in config).
5. Fallback: `adaptive` for Anthropic Claude 4.6 models, `low` for other reasoning-capable models, `off` otherwise.
5. Fallback: `adaptive` for Anthropic Claude 4.6 and Opus 4.7 models, `low` for other reasoning-capable models, `off` otherwise.
## Setting a session default
@@ -104,8 +105,9 @@ title: "Thinking Levels"
- The web chat thinking selector mirrors the session's stored level from the inbound session store/config when the page loads.
- Picking another level writes the session override immediately via `sessions.patch`; it does not wait for the next send and it is not a one-shot `thinkingOnce` override.
- The first option is always `Default (<resolved level>)`, where the resolved default comes from the active session model: `adaptive` for Claude 4.6 on Anthropic/Bedrock, `low` for other reasoning-capable models, `off` otherwise.
- The first option is always `Default (<resolved level>)`, where the resolved default comes from the active session model: `adaptive` for Claude 4.6 and Opus 4.7 on Anthropic, `low` for other reasoning-capable models, `off` otherwise.
- The picker stays provider-aware:
- most providers show `off | minimal | low | medium | high | adaptive`
- Anthropic Claude Opus 4.7 shows `off | minimal | low | medium | high | xhigh | adaptive`
- Z.AI shows binary `off | on`
- `/think:<level>` still works and updates the same stored session level, so chat directives and the picker stay in sync.

View File

@@ -200,6 +200,18 @@ describe("anthropic provider replay hooks", () => {
modelId: "claude-opus-4-7",
} as never),
).toBe("adaptive");
expect(
provider.supportsXHighThinking?.({
provider: "anthropic",
modelId: "claude-opus-4-7",
} as never),
).toBe(true);
expect(
provider.supportsXHighThinking?.({
provider: "anthropic",
modelId: "claude-opus-4-6",
} as never),
).toBe(false);
});
it("resolves claude-cli synthetic oauth auth", async () => {

View File

@@ -260,8 +260,7 @@ function resolveAnthropicForwardCompatModel(
function shouldUseAnthropicAdaptiveThinkingDefault(modelId: string): boolean {
const lowerModelId = normalizeLowercaseStringOrEmpty(modelId);
return (
lowerModelId.startsWith(ANTHROPIC_OPUS_47_MODEL_ID) ||
lowerModelId.startsWith(ANTHROPIC_OPUS_47_DOT_MODEL_ID) ||
isAnthropicOpus47Model(lowerModelId) ||
lowerModelId.startsWith(ANTHROPIC_OPUS_46_MODEL_ID) ||
lowerModelId.startsWith(ANTHROPIC_OPUS_46_DOT_MODEL_ID) ||
lowerModelId.startsWith(ANTHROPIC_SONNET_46_MODEL_ID) ||
@@ -269,6 +268,14 @@ function shouldUseAnthropicAdaptiveThinkingDefault(modelId: string): boolean {
);
}
function isAnthropicOpus47Model(modelId: string): boolean {
const lowerModelId = normalizeLowercaseStringOrEmpty(modelId);
return (
lowerModelId.startsWith(ANTHROPIC_OPUS_47_MODEL_ID) ||
lowerModelId.startsWith(ANTHROPIC_OPUS_47_DOT_MODEL_ID)
);
}
function matchesAnthropicModernModel(modelId: string): boolean {
const lower = normalizeLowercaseStringOrEmpty(modelId);
return ANTHROPIC_MODERN_MODEL_PREFIXES.some((prefix) => lower.startsWith(prefix));
@@ -481,6 +488,7 @@ export function registerAnthropicPlugin(api: OpenClawPluginApi): void {
buildReplayPolicy: buildAnthropicReplayPolicy,
isModernModelRef: ({ modelId }) => matchesAnthropicModernModel(modelId),
resolveReasoningOutputMode: () => "native",
supportsXHighThinking: ({ modelId }) => isAnthropicOpus47Model(modelId),
wrapStreamFn: wrapAnthropicProviderStream,
resolveDefaultThinkingLevel: ({ modelId }) =>
matchesAnthropicModernModel(modelId) && shouldUseAnthropicAdaptiveThinkingDefault(modelId)

View File

@@ -471,4 +471,49 @@ describe("anthropic transport stream", () => {
undefined,
);
});
it("maps xhigh thinking effort for Claude Opus 4.7 transport runs", async () => {
const model = attachModelProviderRequestTransport(
{
id: "claude-opus-4-7",
name: "Claude Opus 4.7",
api: "anthropic-messages",
provider: "anthropic",
baseUrl: "https://api.anthropic.com",
reasoning: true,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 200000,
maxTokens: 8192,
} satisfies Model<"anthropic-messages">,
{
proxy: {
mode: "env-proxy",
},
},
);
const streamFn = createAnthropicMessagesTransportStreamFn();
const stream = await Promise.resolve(
streamFn(
model,
{
messages: [{ role: "user", content: "Think extra hard." }],
} as Parameters<typeof streamFn>[1],
{
apiKey: "sk-ant-api",
reasoning: "xhigh",
} as Parameters<typeof streamFn>[2],
),
);
await stream.result();
expect(anthropicMessagesStreamMock).toHaveBeenCalledWith(
expect.objectContaining({
thinking: { type: "adaptive" },
output_config: { effort: "xhigh" },
}),
undefined,
);
});
});

View File

@@ -59,6 +59,7 @@ type AnthropicTransportModel = Model<"anthropic-messages"> & {
type AnthropicTransportOptions = AnthropicOptions &
Pick<SimpleStreamOptions, "reasoning" | "thinkingBudgets">;
type AnthropicAdaptiveEffort = NonNullable<AnthropicOptions["effort"]> | "xhigh";
type TransportContentBlock =
| { type: "text"; text: string; index?: number }
@@ -98,19 +99,24 @@ type MutableAssistantOutput = {
errorMessage?: string;
};
function isClaudeOpus47Model(modelId: string): boolean {
return modelId.includes("opus-4-7") || modelId.includes("opus-4.7");
}
function isClaudeOpus46Model(modelId: string): boolean {
return modelId.includes("opus-4-6") || modelId.includes("opus-4.6");
}
function supportsAdaptiveThinking(modelId: string): boolean {
return (
modelId.includes("opus-4-6") ||
modelId.includes("opus-4.6") ||
isClaudeOpus47Model(modelId) ||
isClaudeOpus46Model(modelId) ||
modelId.includes("sonnet-4-6") ||
modelId.includes("sonnet-4.6")
);
}
function mapThinkingLevelToEffort(
level: ThinkingLevel,
modelId: string,
): NonNullable<AnthropicOptions["effort"]> {
function mapThinkingLevelToEffort(level: ThinkingLevel, modelId: string): AnthropicAdaptiveEffort {
switch (level) {
case "minimal":
case "low":
@@ -118,7 +124,10 @@ function mapThinkingLevelToEffort(
case "medium":
return "medium";
case "xhigh":
return modelId.includes("opus-4-6") || modelId.includes("opus-4.6") ? "max" : "high";
if (isClaudeOpus47Model(modelId)) {
return "xhigh";
}
return isClaudeOpus46Model(modelId) ? "max" : "high";
default:
return "high";
}
@@ -616,7 +625,9 @@ function resolveAnthropicTransportOptions(
}
if (supportsAdaptiveThinking(model.id)) {
resolved.thinkingEnabled = true;
resolved.effort = mapThinkingLevelToEffort(options.reasoning, model.id);
resolved.effort = mapThinkingLevelToEffort(options.reasoning, model.id) as NonNullable<
AnthropicOptions["effort"]
>;
return resolved;
}
const adjusted = adjustMaxTokensForThinking({

View File

@@ -146,6 +146,22 @@ describe("createAnthropicVertexStreamFn", () => {
);
});
it("maps xhigh reasoning to xhigh effort for Opus 4.7", () => {
const streamFn = createAnthropicVertexStreamFn("vertex-project", "us-east5");
const model = makeModel({ id: "claude-opus-4-7", maxTokens: 64000 });
void streamFn(model, { messages: [] }, { reasoning: "xhigh" });
expect(hoisted.streamAnthropicMock).toHaveBeenCalledWith(
model,
{ messages: [] },
expect.objectContaining({
thinkingEnabled: true,
effort: "xhigh",
}),
);
});
it("applies Anthropic cache-boundary shaping before forwarding payload hooks", async () => {
const streamFn = createAnthropicVertexStreamFn("vertex-project", "us-east5");
const model = makeModel({ id: "claude-sonnet-4-6", maxTokens: 64000 });

View File

@@ -11,6 +11,38 @@ import {
} from "./anthropic-payload-policy.js";
type AnthropicVertexEffort = NonNullable<AnthropicOptions["effort"]>;
type AnthropicVertexAdaptiveEffort = AnthropicVertexEffort | "xhigh";
function isClaudeOpus47Model(modelId: string): boolean {
return modelId.includes("opus-4-7") || modelId.includes("opus-4.7");
}
function isClaudeOpus46Model(modelId: string): boolean {
return modelId.includes("opus-4-6") || modelId.includes("opus-4.6");
}
function supportsAdaptiveThinking(modelId: string): boolean {
return (
isClaudeOpus47Model(modelId) ||
isClaudeOpus46Model(modelId) ||
modelId.includes("sonnet-4-6") ||
modelId.includes("sonnet-4.6")
);
}
function mapAnthropicAdaptiveEffort(
reasoning: string,
modelId: string,
): AnthropicVertexAdaptiveEffort {
const effortMap: Record<string, AnthropicVertexAdaptiveEffort> = {
minimal: "low",
low: "low",
medium: "medium",
high: "high",
xhigh: isClaudeOpus47Model(modelId) ? "xhigh" : isClaudeOpus46Model(modelId) ? "max" : "high",
};
return effortMap[reasoning] ?? "high";
}
function resolveAnthropicVertexMaxTokens(params: {
modelMaxTokens: number | undefined;
@@ -110,22 +142,12 @@ export function createAnthropicVertexStreamFn(
};
if (options?.reasoning) {
const isAdaptive =
model.id.includes("opus-4-6") ||
model.id.includes("opus-4.6") ||
model.id.includes("sonnet-4-6") ||
model.id.includes("sonnet-4.6");
if (isAdaptive) {
if (supportsAdaptiveThinking(model.id)) {
opts.thinkingEnabled = true;
const effortMap: Record<string, AnthropicVertexEffort> = {
minimal: "low",
low: "low",
medium: "medium",
high: "high",
xhigh: model.id.includes("opus-4-6") || model.id.includes("opus-4.6") ? "max" : "high",
};
opts.effort = effortMap[options.reasoning] ?? "high";
opts.effort = mapAnthropicAdaptiveEffort(
options.reasoning,
model.id,
) as AnthropicVertexEffort;
} else {
opts.thinkingEnabled = true;
const budgets = options.thinkingBudgets;

View File

@@ -48,6 +48,15 @@ const ANTHROPIC_OPUS_CATALOG = [
},
];
const ANTHROPIC_OPUS_47_CATALOG = [
{
provider: "anthropic",
id: "claude-opus-4-7",
name: "Claude Opus 4.7",
reasoning: true,
},
];
function resolveAnthropicOpusThinking(cfg: OpenClawConfig) {
return resolveThinkingDefault({
cfg,
@@ -57,6 +66,15 @@ function resolveAnthropicOpusThinking(cfg: OpenClawConfig) {
});
}
function resolveAnthropicOpus47Thinking(cfg: OpenClawConfig) {
return resolveThinkingDefault({
cfg,
provider: "anthropic",
model: "claude-opus-4-7",
catalog: ANTHROPIC_OPUS_47_CATALOG,
});
}
function createAgentFallbackConfig(params: {
primary?: string;
fallbacks?: string[];
@@ -1158,6 +1176,18 @@ describe("model-selection", () => {
expect(resolveAnthropicOpusThinking(cfg)).toBe("adaptive");
});
it("uses adaptive fallback for explicitly configured Anthropic Opus 4.7", () => {
const cfg = {
agents: {
defaults: {
model: { primary: "anthropic/claude-opus-4-7" },
},
},
} as OpenClawConfig;
expect(resolveAnthropicOpus47Thinking(cfg)).toBe("adaptive");
});
it("falls back to low when no provider thinking hook is active", () => {
const cfg = {} as OpenClawConfig;

View File

@@ -58,8 +58,9 @@ export function resolveThinkingDefault(params: {
normalizedProvider === "anthropic" &&
explicitModelConfigured &&
typeof catalogCandidate?.name === "string" &&
/4\.6\b/.test(catalogCandidate.name) &&
(normalizedModel.startsWith("claude-opus-4-6") ||
/4\.[67]\b/.test(catalogCandidate.name) &&
(normalizedModel.startsWith("claude-opus-4-7") ||
normalizedModel.startsWith("claude-opus-4-6") ||
normalizedModel.startsWith("claude-sonnet-4-6"))
) {
return "adaptive";

View File

@@ -81,6 +81,12 @@ describe("listThinkingLevels", () => {
expect(listThinkingLevels("demo", "demo-model")).toContain("xhigh");
});
it("uses provider runtime hooks for xhigh labels", () => {
providerRuntimeMocks.resolveProviderXHighThinking.mockReturnValue(true);
expect(listThinkingLevelLabels("demo", "demo-model")).toContain("xhigh");
});
it("includes xhigh for provider-advertised models", () => {
providerRuntimeMocks.resolveProviderXHighThinking.mockImplementation(({ provider, context }) =>
(provider === "openai" && ["gpt-5.4", "gpt-5.4", "gpt-5.4-pro"].includes(context.modelId)) ||

View File

@@ -94,6 +94,9 @@ export function listThinkingLevelLabels(provider?: string | null, model?: string
if (isBinaryThinkingProvider(provider, model)) {
return ["off", "on"];
}
if (supportsXHighThinking(provider, model)) {
return listThinkingLevels(provider, model);
}
return listThinkingLevelLabelsFallback(provider, model);
}