fix: preserve OpenAI Codex xhigh thinking policy

This commit is contained in:
Peter Steinberger
2026-05-01 13:48:01 +01:00
parent 442f59508e
commit 94b4b3c644
10 changed files with 220 additions and 46 deletions

View File

@@ -22,6 +22,7 @@ Docs: https://docs.openclaw.ai
### Fixes
- Thinking/providers: resolve bundled provider thinking profiles through lightweight provider policy artifacts when startup-lazy providers are not active, so OpenAI Codex GPT-5.x keeps xhigh available in Gateway session validation. Fixes #74796. Thanks @maxschachere.
- Plugins/TTS: keep bundled speech-provider discovery available on cold package Gateway paths and add bundled plugin matrix runtime probes for health, readiness, RPC, TTS discovery, and post-ready runtime-deps watchdog coverage. Refs #75283. Thanks @vincentkoc.
- Google Meet/Twilio: show delegated voice call ID, DTMF, and intro-greeting state in `googlemeet doctor`, and avoid claiming DTMF was sent when no Meet PIN sequence was configured. Refs #72478. Thanks @DougButdorf.
- Voice Call/Twilio: send notify-mode initial TwiML directly in the outbound create-call request while keeping conversation and pre-connect DTMF calls webhook-driven, so one-shot notify calls do not depend on a first-answer webhook fetch. Supersedes #72758. Thanks @tyshepps.

View File

@@ -43,6 +43,7 @@ import {
findCatalogTemplate,
matchesExactOrPrefix,
} from "./shared.js";
import { resolveOpenAICodexThinkingProfile } from "./thinking-policy.js";
const PROVIDER_ID = "openai-codex";
const OPENAI_CODEX_BASE_URL = OPENAI_CODEX_RESPONSES_BASE_URL;
@@ -99,15 +100,6 @@ const OPENAI_CODEX_GPT_55_PRO_TEMPLATE_MODEL_IDS = [
] as const;
const OPENAI_CODEX_GPT_53_MODEL_ID = "gpt-5.3-codex";
const OPENAI_CODEX_TEMPLATE_MODEL_IDS = ["gpt-5.2-codex"] as const;
const OPENAI_CODEX_XHIGH_MODEL_IDS = [
OPENAI_CODEX_GPT_55_MODEL_ID,
OPENAI_CODEX_GPT_55_PRO_MODEL_ID,
OPENAI_CODEX_GPT_54_MODEL_ID,
OPENAI_CODEX_GPT_54_PRO_MODEL_ID,
OPENAI_CODEX_GPT_53_MODEL_ID,
"gpt-5.2-codex",
"gpt-5.1-codex",
] as const;
const OPENAI_CODEX_MODERN_MODEL_IDS = [
OPENAI_CODEX_GPT_55_MODEL_ID,
OPENAI_CODEX_GPT_55_PRO_MODEL_ID,
@@ -507,18 +499,7 @@ export function buildOpenAICodexProviderPlugin(): ProviderPlugin {
},
resolveDynamicModel: (ctx) => resolveCodexForwardCompatModel(ctx),
buildAuthDoctorHint: (ctx) => buildOpenAICodexAuthDoctorHint(ctx),
resolveThinkingProfile: ({ modelId }) => ({
levels: [
{ id: "off" },
{ id: "minimal" },
{ id: "low" },
{ id: "medium" },
{ id: "high" },
...(matchesExactOrPrefix(modelId, OPENAI_CODEX_XHIGH_MODEL_IDS)
? [{ id: "xhigh" as const }]
: []),
],
}),
resolveThinkingProfile: ({ modelId }) => resolveOpenAICodexThinkingProfile(modelId),
isModernModelRef: ({ modelId }) => matchesExactOrPrefix(modelId, OPENAI_CODEX_MODERN_MODEL_IDS),
preferRuntimeResolvedModel: (ctx) => {
if (normalizeProviderId(ctx.provider) !== PROVIDER_ID) {

View File

@@ -20,6 +20,7 @@ import {
findCatalogTemplate,
matchesExactOrPrefix,
} from "./shared.js";
import { resolveOpenAIThinkingProfile } from "./thinking-policy.js";
const PROVIDER_ID = "openai";
const OPENAI_GPT_55_MODEL_ID = "gpt-5.5";
@@ -59,15 +60,6 @@ const OPENAI_GPT_54_TEMPLATE_MODEL_IDS = ["gpt-5.2"] as const;
const OPENAI_GPT_54_PRO_TEMPLATE_MODEL_IDS = ["gpt-5.2-pro", "gpt-5.2"] as const;
const OPENAI_GPT_54_MINI_TEMPLATE_MODEL_IDS = ["gpt-5-mini"] as const;
const OPENAI_GPT_54_NANO_TEMPLATE_MODEL_IDS = ["gpt-5-nano", "gpt-5-mini"] as const;
const OPENAI_XHIGH_MODEL_IDS = [
OPENAI_GPT_55_MODEL_ID,
OPENAI_GPT_55_PRO_MODEL_ID,
OPENAI_GPT_54_MODEL_ID,
OPENAI_GPT_54_PRO_MODEL_ID,
OPENAI_GPT_54_MINI_MODEL_ID,
OPENAI_GPT_54_NANO_MODEL_ID,
"gpt-5.2",
] as const;
const OPENAI_MODERN_MODEL_IDS = [
OPENAI_GPT_55_MODEL_ID,
OPENAI_GPT_55_PRO_MODEL_ID,
@@ -239,18 +231,7 @@ export function buildOpenAIProvider(): ProviderPlugin {
matchesContextOverflowError: ({ errorMessage }) =>
/content_filter.*(?:prompt|input).*(?:too long|exceed)/i.test(errorMessage),
resolveReasoningOutputMode: () => "native",
resolveThinkingProfile: ({ modelId }) => ({
levels: [
{ id: "off" },
{ id: "minimal" },
{ id: "low" },
{ id: "medium" },
{ id: "high" },
...(matchesExactOrPrefix(modelId, OPENAI_XHIGH_MODEL_IDS)
? [{ id: "xhigh" as const }]
: []),
],
}),
resolveThinkingProfile: ({ modelId }) => resolveOpenAIThinkingProfile(modelId),
isModernModelRef: ({ modelId }) => matchesExactOrPrefix(modelId, OPENAI_MODERN_MODEL_IDS),
buildMissingAuthMessage: (ctx) => {
if (ctx.provider !== PROVIDER_ID || ctx.listProfileIds("openai-codex").length === 0) {

View File

@@ -1,5 +1,20 @@
import type { ModelProviderConfig } from "openclaw/plugin-sdk/provider-model-types";
import {
resolveOpenAICodexThinkingProfile,
resolveOpenAIThinkingProfile,
} from "./thinking-policy.js";
export function normalizeConfig(params: { provider: string; providerConfig: ModelProviderConfig }) {
return params.providerConfig;
}
export function resolveThinkingProfile(params: { provider: string; modelId: string }) {
switch (params.provider.trim().toLowerCase()) {
case "openai":
return resolveOpenAIThinkingProfile(params.modelId);
case "openai-codex":
return resolveOpenAICodexThinkingProfile(params.modelId);
default:
return null;
}
}

View File

@@ -0,0 +1,63 @@
import type { ProviderThinkingProfile } from "openclaw/plugin-sdk/plugin-entry";
const OPENAI_THINKING_BASE_LEVELS = [
{ id: "off" },
{ id: "minimal" },
{ id: "low" },
{ id: "medium" },
{ id: "high" },
] as const satisfies ProviderThinkingProfile["levels"];
const OPENAI_XHIGH_MODEL_IDS = [
"gpt-5.5",
"gpt-5.5-pro",
"gpt-5.4",
"gpt-5.4-pro",
"gpt-5.4-mini",
"gpt-5.4-nano",
"gpt-5.2",
] as const;
const OPENAI_CODEX_XHIGH_MODEL_IDS = [
"gpt-5.5",
"gpt-5.5-pro",
"gpt-5.4",
"gpt-5.4-pro",
"gpt-5.3-codex",
"gpt-5.2-codex",
"gpt-5.1-codex",
] as const;
function normalizeModelId(value: string): string {
return value.trim().toLowerCase();
}
function matchesExactOrPrefix(id: string, values: readonly string[]): boolean {
const normalizedId = normalizeModelId(id);
return values.some((value) => {
const normalizedValue = normalizeModelId(value);
return normalizedId === normalizedValue || normalizedId.startsWith(normalizedValue);
});
}
function buildOpenAIThinkingProfile(params: {
modelId: string;
xhighModelIds: readonly string[];
}): ProviderThinkingProfile {
return {
levels: [
...OPENAI_THINKING_BASE_LEVELS,
...(matchesExactOrPrefix(params.modelId, params.xhighModelIds)
? [{ id: "xhigh" as const }]
: []),
],
};
}
export function resolveOpenAIThinkingProfile(modelId: string): ProviderThinkingProfile {
return buildOpenAIThinkingProfile({ modelId, xhighModelIds: OPENAI_XHIGH_MODEL_IDS });
}
export function resolveOpenAICodexThinkingProfile(modelId: string): ProviderThinkingProfile {
return buildOpenAIThinkingProfile({ modelId, xhighModelIds: OPENAI_CODEX_XHIGH_MODEL_IDS });
}

View File

@@ -226,6 +226,21 @@ describe("gateway session utils", () => {
expect(row.thinkingLevels?.map((level) => level.id)).toContain("xhigh");
});
test("session defaults and rows expose bundled startup-lazy provider thinking without catalog", () => {
const cfg = createModelDefaultsConfig({ primary: "openai-codex/gpt-5.5" });
const defaults = getSessionDefaults(cfg);
const row = buildGatewaySessionRow({
cfg,
storePath: "",
store: {},
key: "main",
});
expect(defaults.thinkingLevels?.map((level) => level.id)).toContain("xhigh");
expect(row.thinkingLevels?.map((level) => level.id)).toContain("xhigh");
});
test("session defaults use configured thinking default", () => {
const defaults = getSessionDefaults({
agents: {

View File

@@ -429,6 +429,27 @@ describe("gateway sessions patch", () => {
expect(entry.thinkingLevel).toBe("xhigh");
});
test("accepts xhigh thinking patches from bundled startup-lazy provider policy without catalog", async () => {
const entry = expectPatchOk(
await runPatch({
cfg: {
agents: {
defaults: {
model: { primary: "openai-codex/gpt-5.5" },
},
},
} as OpenClawConfig,
patch: {
key: MAIN_SESSION_KEY,
thinkingLevel: "xhigh",
},
loadGatewayModelCatalog: async () => [],
}),
);
expect(entry.thinkingLevel).toBe("xhigh");
});
test("sets spawnedBy for ACP sessions", async () => {
const entry = expectPatchOk(
await runPatch({

View File

@@ -26,6 +26,28 @@ describe("provider public artifacts", () => {
).toBe(providerConfig);
});
it("resolves multi-provider policy artifacts by manifest-owned provider id", () => {
const surface = resolveBundledProviderPolicySurface("openai-codex");
expect(surface?.resolveThinkingProfile).toBeTypeOf("function");
expect(
surface
?.resolveThinkingProfile?.({
provider: "openai-codex",
modelId: "gpt-5.5",
})
?.levels.map((level) => level.id),
).toContain("xhigh");
expect(
surface
?.resolveThinkingProfile?.({
provider: "openai-codex",
modelId: "gpt-4.1",
})
?.levels.map((level) => level.id),
).not.toContain("xhigh");
});
it("loads provider policy surfaces without staging runtime deps", async () => {
const loadBundledPluginPublicArtifactModuleSync = vi.fn(() => ({
normalizeConfig: (ctx: { providerConfig: ModelProviderConfig }) => ctx.providerConfig,

View File

@@ -1,14 +1,22 @@
import fs from "node:fs";
import path from "node:path";
import { normalizeProviderId } from "../agents/provider-id.js";
import type { ModelProviderConfig } from "../config/types.js";
import type { OpenClawConfig } from "../config/types.openclaw.js";
import { resolveBundledPluginsDir } from "./bundled-dir.js";
import type {
ProviderApplyConfigDefaultsContext,
ProviderNormalizeConfigContext,
ProviderResolveConfigApiKeyContext,
} from "./provider-config-context.types.js";
import type {
ProviderDefaultThinkingPolicyContext,
ProviderThinkingProfile,
} from "./provider-thinking.types.js";
import { loadBundledPluginPublicArtifactModuleSync } from "./public-surface-loader.js";
const PROVIDER_POLICY_ARTIFACT_CANDIDATES = ["provider-policy-api.js"] as const;
const providerPolicyPluginIdsByProviderId = new Map<string, string | null>();
export type BundledProviderPolicySurface = {
normalizeConfig?: (ctx: ProviderNormalizeConfigContext) => ModelProviderConfig | null | undefined;
@@ -16,6 +24,9 @@ export type BundledProviderPolicySurface = {
ctx: ProviderApplyConfigDefaultsContext,
) => OpenClawConfig | null | undefined;
resolveConfigApiKey?: (ctx: ProviderResolveConfigApiKeyContext) => string | null | undefined;
resolveThinkingProfile?: (
ctx: ProviderDefaultThinkingPolicyContext,
) => ProviderThinkingProfile | null | undefined;
};
function hasProviderPolicyHook(
@@ -24,7 +35,8 @@ function hasProviderPolicyHook(
return (
typeof mod.normalizeConfig === "function" ||
typeof mod.applyConfigDefaults === "function" ||
typeof mod.resolveConfigApiKey === "function"
typeof mod.resolveConfigApiKey === "function" ||
typeof mod.resolveThinkingProfile === "function"
);
}
@@ -54,6 +66,52 @@ function tryLoadBundledProviderPolicySurface(
return null;
}
function resolveBundledProviderPolicyPluginId(providerId: string): string | null {
const normalizedProviderId = normalizeProviderId(providerId);
if (!normalizedProviderId) {
return null;
}
const bundledPluginsDir = resolveBundledPluginsDir();
const cacheKey = `${bundledPluginsDir ?? "<none>"}::${normalizedProviderId}`;
if (providerPolicyPluginIdsByProviderId.has(cacheKey)) {
return providerPolicyPluginIdsByProviderId.get(cacheKey) ?? null;
}
if (!bundledPluginsDir || !fs.existsSync(bundledPluginsDir)) {
providerPolicyPluginIdsByProviderId.set(cacheKey, null);
return null;
}
for (const entry of fs
.readdirSync(bundledPluginsDir, { withFileTypes: true })
.filter((candidate) => candidate.isDirectory())
.map((candidate) => candidate.name)
.toSorted((left, right) => left.localeCompare(right))) {
const manifestPath = path.join(bundledPluginsDir, entry, "openclaw.plugin.json");
if (!fs.existsSync(manifestPath)) {
continue;
}
let manifest: { providers?: unknown };
try {
manifest = JSON.parse(fs.readFileSync(manifestPath, "utf-8")) as { providers?: unknown };
} catch {
continue;
}
const providers = Array.isArray(manifest.providers) ? manifest.providers : [];
const ownsProvider = providers.some(
(candidate) =>
typeof candidate === "string" && normalizeProviderId(candidate) === normalizedProviderId,
);
if (ownsProvider) {
providerPolicyPluginIdsByProviderId.set(cacheKey, entry);
return entry;
}
}
providerPolicyPluginIdsByProviderId.set(cacheKey, null);
return null;
}
export function resolveBundledProviderPolicySurface(
providerId: string,
): BundledProviderPolicySurface | null {
@@ -61,5 +119,10 @@ export function resolveBundledProviderPolicySurface(
if (!normalizedProviderId) {
return null;
}
return tryLoadBundledProviderPolicySurface(normalizedProviderId);
return (
tryLoadBundledProviderPolicySurface(normalizedProviderId) ??
tryLoadBundledProviderPolicySurface(
resolveBundledProviderPolicyPluginId(normalizedProviderId) ?? normalizedProviderId,
)
);
}

View File

@@ -1,4 +1,5 @@
import { normalizeProviderId } from "../agents/provider-id.js";
import { resolveBundledProviderPolicySurface } from "./provider-public-artifacts.js";
import type {
ProviderDefaultThinkingPolicyContext,
ProviderThinkingProfile,
@@ -8,6 +9,7 @@ import type {
type ThinkingProviderPlugin = {
id: string;
aliases?: string[];
hookAliases?: string[];
isBinaryThinking?: (ctx: ProviderThinkingPolicyContext) => boolean | undefined;
supportsXHighThinking?: (ctx: ProviderThinkingPolicyContext) => boolean | undefined;
resolveThinkingProfile?: (
@@ -36,7 +38,9 @@ function matchesProviderId(provider: ThinkingProviderPlugin, providerId: string)
if (normalizeProviderId(provider.id) === normalized) {
return true;
}
return (provider.aliases ?? []).some((alias) => normalizeProviderId(alias) === normalized);
return [...(provider.aliases ?? []), ...(provider.hookAliases ?? [])].some(
(alias) => normalizeProviderId(alias) === normalized,
);
}
function resolveActiveThinkingProvider(providerId: string): ThinkingProviderPlugin | undefined {
@@ -72,7 +76,15 @@ export function resolveProviderXHighThinking(
export function resolveProviderThinkingProfile(
params: ThinkingHookParams<ProviderDefaultThinkingPolicyContext>,
) {
return resolveActiveThinkingProvider(params.provider)?.resolveThinkingProfile?.(params.context);
const activeProfile = resolveActiveThinkingProvider(params.provider)?.resolveThinkingProfile?.(
params.context,
);
if (activeProfile) {
return activeProfile;
}
return resolveBundledProviderPolicySurface(params.provider)?.resolveThinkingProfile?.(
params.context,
);
}
export function resolveProviderDefaultThinkingLevel(