fix(models): separate Codex harness from model choices (#71193)

* fix: separate Codex harness from model choices

* docs: note Codex harness model choice fix
This commit is contained in:
Peter Steinberger
2026-04-24 19:40:23 +01:00
committed by GitHub
parent dcf01ce72f
commit bc0f54bd04
11 changed files with 396 additions and 19 deletions

View File

@@ -46,6 +46,7 @@ Docs: https://docs.openclaw.ai
### Fixes
- Codex harness/models: keep legacy `codex/*` harness shorthand out of model picker and `/models` choice surfaces while migrating primary legacy refs to canonical `openai/*` plus explicit Codex harness config. (#71193) Thanks @vincentkoc.
- Plugins/runtime deps: respect explicit plugin and channel disablement when repairing bundled runtime dependencies, so doctor and health checks no longer install deps for disabled configured channels.
- Diagnostics: harden tool and model diagnostic events against hostile errors, blocking listeners, and unsafe stability reason fields. Thanks @vincentkoc.
- Plugins/onboarding: record local plugin install source metadata without duplicating raw absolute local paths in persisted `plugins.installs`, while preserving linked load-path cleanup. (#70970) Thanks @vincentkoc.

View File

@@ -369,7 +369,7 @@ Time format in system prompt. Default: `auto` (OS preference).
- For direct OpenAI Responses models, server-side compaction is enabled automatically. Use `params.responsesServerCompaction: false` to stop injecting `context_management`, or `params.responsesCompactThreshold` to override the threshold. See [OpenAI server-side compaction](/providers/openai#server-side-compaction-responses-api).
- `params`: global default provider parameters applied to all models. Set at `agents.defaults.params` (e.g. `{ cacheRetention: "long" }`).
- `params` merge precedence (config): `agents.defaults.params` (global base) is overridden by `agents.defaults.models["provider/model"].params` (per-model), then `agents.list[].params` (matching agent id) overrides by key. See [Prompt Caching](/reference/prompt-caching) for details.
- `embeddedHarness`: default low-level embedded agent runtime policy. Use `runtime: "auto"` to let registered plugin harnesses claim supported models, `runtime: "pi"` to force the built-in PI harness, or a registered harness id such as `runtime: "codex"`. Set `fallback: "none"` to disable automatic PI fallback.
- `embeddedHarness`: default low-level embedded agent runtime policy. Use `runtime: "auto"` to let registered plugin harnesses claim supported models, `runtime: "pi"` to force the built-in PI harness, or a registered harness id such as `runtime: "codex"`. Set `fallback: "none"` to disable automatic PI fallback. New Codex harness configs should keep model refs canonical as `openai/*` and select the harness here rather than using legacy `codex/*` model refs.
- Config writers that mutate these fields (for example `/models set`, `/models set-image`, and fallback add/remove commands) save canonical object form and preserve existing fallback lists when possible.
- `maxConcurrent`: max parallel agent runs across sessions (each session still serialized). Default: 4.

View File

@@ -3,7 +3,7 @@ summary: "Run OpenClaw embedded agent turns through the bundled Codex app-server
title: "Codex harness"
read_when:
- You want to use the bundled Codex app-server harness
- You need Codex model refs and config examples
- You need Codex harness config examples
- You want to disable PI fallback for Codex-only deployments
---
@@ -35,7 +35,8 @@ The harness is off by default. New configs should keep OpenAI model refs
canonical as `openai/gpt-*` and explicitly force
`embeddedHarness.runtime: "codex"` or `OPENCLAW_AGENT_RUNTIME=codex` when they
want native app-server execution. Legacy `codex/*` model refs still auto-select
the harness for compatibility.
the harness for compatibility, but they are not shown as normal model/provider
choices.
## Pick the right model prefix
@@ -54,10 +55,11 @@ GPT-5.5 is currently subscription/OAuth-only in OpenClaw. Use
app-server harness. Direct API-key access for `openai/gpt-5.5` is supported
once OpenAI enables GPT-5.5 on the public API.
Legacy `codex/gpt-*` refs remain accepted as compatibility aliases. New PI
Codex OAuth configs should use `openai-codex/gpt-*`; new native app-server
harness configs should use `openai/gpt-*` plus `embeddedHarness.runtime:
"codex"`.
Legacy `codex/gpt-*` refs remain accepted as compatibility aliases. Doctor
compatibility migration rewrites legacy primary `codex/*` refs to `openai/*`
and records the Codex harness policy separately. New PI Codex OAuth configs
should use `openai-codex/gpt-*`; new native app-server harness configs should
use `openai/gpt-*` plus `embeddedHarness.runtime: "codex"`.
`agents.defaults.imageModel` follows the same prefix split. Use
`openai-codex/gpt-*` when image understanding should run through the OpenAI

View File

@@ -0,0 +1,15 @@
import { normalizeProviderId } from "./provider-id.js";
const HIDDEN_MODEL_PICKER_PROVIDERS = new Set(["codex"]);
export function isModelPickerVisibleProvider(provider: string): boolean {
return !HIDDEN_MODEL_PICKER_PROVIDERS.has(normalizeProviderId(provider));
}
export function isModelPickerVisibleModelRef(ref: string): boolean {
const separatorIndex = ref.indexOf("/");
if (separatorIndex <= 0) {
return true;
}
return isModelPickerVisibleProvider(ref.slice(0, separatorIndex));
}

View File

@@ -207,6 +207,28 @@ describe("handleModelsCommand", () => {
});
});
it("hides the virtual Codex harness provider from /models menus", async () => {
modelCatalogMocks.loadModelCatalog.mockResolvedValue([
{ provider: "codex", id: "gpt-5.5", name: "GPT-5.5" },
{ provider: "openai", id: "gpt-5.5", name: "GPT-5.5" },
]);
const cfg = {
agents: {
defaults: {
models: {
"codex/gpt-5.5": { alias: "legacy-codex" },
"openai/gpt-5.5": { alias: "gpt" },
},
},
},
} satisfies Partial<OpenClawConfig>;
const result = await handleModelsCommand(buildParams("/models", cfg), true);
expect(result?.reply?.text).toContain("- openai (1)");
expect(result?.reply?.text).not.toContain("- codex");
});
it("lists models for /models <provider>", async () => {
const result = await handleModelsCommand(buildParams("/models openai"), true);

View File

@@ -1,6 +1,7 @@
import { resolveAgentDir, resolveSessionAgentId } from "../../agents/agent-scope.js";
import { resolveModelAuthLabel } from "../../agents/model-auth-label.js";
import { loadModelCatalog } from "../../agents/model-catalog.js";
import { isModelPickerVisibleProvider } from "../../agents/model-picker-visibility.js";
import {
buildAllowedModelSet,
buildModelAliasIndex,
@@ -77,6 +78,9 @@ export async function buildModelsProviderData(
const byProvider = new Map<string, Set<string>>();
const add = (p: string, m: string) => {
const key = normalizeProviderId(p);
if (!isModelPickerVisibleProvider(key)) {
return;
}
const set = byProvider.get(key) ?? new Set<string>();
set.add(m);
byProvider.set(key, set);

View File

@@ -318,6 +318,77 @@ describe("normalizeCompatibilityConfigValues", () => {
expect(res.changes).toEqual([]);
});
it("migrates legacy Codex primary refs to OpenAI refs plus explicit Codex harness", () => {
const res = normalizeCompatibilityConfigValues({
agents: {
defaults: {
embeddedHarness: { runtime: "auto", fallback: "pi" },
model: {
primary: "codex/gpt-5.5",
fallbacks: ["anthropic/claude-sonnet-4-6", "codex/gpt-5.4-mini"],
},
models: {
"codex/gpt-5.5": { alias: "legacy-codex" },
"openai/gpt-5.5": { alias: "gpt", params: { temperature: 0.2 } },
"codex/gpt-5.4-mini": {},
},
},
list: [
{
id: "reviewer",
model: "codex/gpt-5.4-mini",
},
],
},
} as unknown as OpenClawConfig);
expect(res.config.agents?.defaults?.model).toEqual({
primary: "openai/gpt-5.5",
fallbacks: ["anthropic/claude-sonnet-4-6", "openai/gpt-5.4-mini"],
});
expect(res.config.agents?.defaults?.embeddedHarness).toEqual({
runtime: "codex",
fallback: "pi",
});
expect(res.config.agents?.defaults?.models).toEqual({
"openai/gpt-5.5": { alias: "gpt", params: { temperature: 0.2 } },
"openai/gpt-5.4-mini": {},
});
expect(res.config.agents?.list?.[0]).toMatchObject({
id: "reviewer",
embeddedHarness: { runtime: "codex" },
model: "openai/gpt-5.4-mini",
});
expect(res.changes).toEqual(
expect.arrayContaining([
"Moved agents.defaults.model legacy codex/* primary refs to openai/* with Codex harness.",
"Moved agents.defaults.models legacy codex/* keys to openai/*.",
"Moved agents.list.reviewer.model legacy codex/* primary refs to openai/* with Codex harness.",
]),
);
});
it("does not force Codex harness for legacy fallback-only refs", () => {
const input = {
agents: {
defaults: {
model: {
primary: "openai/gpt-5.5",
fallbacks: ["codex/gpt-5.4-mini"],
},
models: {
"codex/gpt-5.4-mini": { alias: "legacy-codex" },
},
},
},
} as unknown as OpenClawConfig;
const res = normalizeCompatibilityConfigValues(input);
expect(res.config).toEqual(input);
expect(res.changes).toEqual([]);
});
it("prefers legacy nano-banana env.GEMINI_API_KEY over skill apiKey during migration", () => {
const res = normalizeCompatibilityConfigValues({
skills: {

View File

@@ -1,6 +1,7 @@
import type { OpenClawConfig } from "../../../config/types.openclaw.js";
import {
normalizeLegacyBrowserConfig,
normalizeLegacyCodexHarnessModelRefs,
normalizeLegacyCrossContextMessageConfig,
normalizeLegacyMediaProviderOptions,
normalizeLegacyMistralModelMaxTokens,
@@ -36,6 +37,7 @@ export function normalizeBaseCompatibilityConfigValues(
next = normalizeLegacyNanoBananaSkill(next, changes);
next = normalizeLegacyTalkConfig(next, changes);
next = normalizeLegacyCodexHarnessModelRefs(next, changes);
next = normalizeLegacyCrossContextMessageConfig(next, changes);
next = normalizeLegacyMediaProviderOptions(next, changes);
return normalizeLegacyMistralModelMaxTokens(next, changes);

View File

@@ -192,6 +192,194 @@ type ModelProviderEntry = Partial<
>;
type ModelsConfigPatch = Partial<NonNullable<OpenClawConfig["models"]>>;
type ModelDefinitionEntry = NonNullable<ModelProviderEntry["models"]>[number];
type AgentEmbeddedHarnessPatch = NonNullable<
NonNullable<NonNullable<OpenClawConfig["agents"]>["defaults"]>["embeddedHarness"]
>;
const LEGACY_CODEX_PROVIDER_ID = "codex";
const OPENAI_PROVIDER_ID = "openai";
const CODEX_HARNESS_RUNTIME = "codex";
function migrateLegacyCodexModelRef(raw: string): string | null {
const trimmed = raw.trim();
const separatorIndex = trimmed.indexOf("/");
if (separatorIndex <= 0 || separatorIndex === trimmed.length - 1) {
return null;
}
if (normalizeProviderId(trimmed.slice(0, separatorIndex)) !== LEGACY_CODEX_PROVIDER_ID) {
return null;
}
return `${OPENAI_PROVIDER_ID}/${trimmed.slice(separatorIndex + 1)}`;
}
function normalizeLegacyCodexAgentModelConfig(raw: unknown): {
value?: unknown;
changed: boolean;
codexPrimarySelected: boolean;
} {
if (typeof raw === "string") {
const migrated = migrateLegacyCodexModelRef(raw);
return migrated
? { value: migrated, changed: true, codexPrimarySelected: true }
: { value: raw, changed: false, codexPrimarySelected: false };
}
if (!isRecord(raw)) {
return { value: raw, changed: false, codexPrimarySelected: false };
}
const migratedPrimary =
typeof raw.primary === "string" ? migrateLegacyCodexModelRef(raw.primary) : null;
if (!migratedPrimary) {
return { value: raw, changed: false, codexPrimarySelected: false };
}
const next: Record<string, unknown> = { ...raw, primary: migratedPrimary };
if (Array.isArray(raw.fallbacks)) {
next.fallbacks = raw.fallbacks.map((fallback) => {
if (typeof fallback !== "string") {
return fallback;
}
return migrateLegacyCodexModelRef(fallback) ?? fallback;
});
}
return { value: next, changed: true, codexPrimarySelected: true };
}
function mergeModelEntry(legacyEntry: unknown, currentEntry: unknown): unknown {
if (!isRecord(legacyEntry) || !isRecord(currentEntry)) {
return currentEntry ?? legacyEntry;
}
return { ...legacyEntry, ...currentEntry };
}
function normalizeLegacyCodexAllowlistModels(
rawModels: unknown,
migrateCodexKeys: boolean,
): {
value?: unknown;
changed: boolean;
} {
if (!migrateCodexKeys || !isRecord(rawModels)) {
return { value: rawModels, changed: false };
}
let changed = false;
const next: Record<string, unknown> = {};
const legacyEntries: Array<[string, unknown]> = [];
for (const [rawKey, entry] of Object.entries(rawModels)) {
const migratedKey = migrateLegacyCodexModelRef(rawKey);
if (migratedKey) {
changed = true;
legacyEntries.push([migratedKey, entry]);
continue;
}
next[rawKey] = mergeModelEntry(entry, next[rawKey]);
}
for (const [migratedKey, entry] of legacyEntries) {
next[migratedKey] = mergeModelEntry(entry, next[migratedKey]);
}
return { value: next, changed };
}
function ensureCodexEmbeddedHarness(raw: unknown): {
value: AgentEmbeddedHarnessPatch;
changed: boolean;
} {
if (!isRecord(raw)) {
return { value: { runtime: CODEX_HARNESS_RUNTIME }, changed: true };
}
const runtime = normalizeOptionalLowercaseString(raw.runtime);
if (runtime === CODEX_HARNESS_RUNTIME) {
return { value: raw as AgentEmbeddedHarnessPatch, changed: false };
}
return {
value: { ...raw, runtime: CODEX_HARNESS_RUNTIME } as AgentEmbeddedHarnessPatch,
changed: true,
};
}
function normalizeLegacyCodexAgentContainer(
raw: Record<string, unknown>,
path: string,
changes: string[],
): { value: Record<string, unknown>; changed: boolean } {
let changed = false;
const next: Record<string, unknown> = { ...raw };
const model = normalizeLegacyCodexAgentModelConfig(raw.model);
if (model.changed) {
next.model = model.value;
changed = true;
changes.push(`Moved ${path}.model legacy codex/* primary refs to openai/* with Codex harness.`);
}
const models = normalizeLegacyCodexAllowlistModels(raw.models, model.codexPrimarySelected);
if (models.changed) {
next.models = models.value;
changed = true;
changes.push(`Moved ${path}.models legacy codex/* keys to openai/*.`);
}
if (model.codexPrimarySelected) {
const harness = ensureCodexEmbeddedHarness(raw.embeddedHarness);
if (harness.changed) {
next.embeddedHarness = harness.value;
changed = true;
}
}
return { value: next, changed };
}
export function normalizeLegacyCodexHarnessModelRefs(
cfg: OpenClawConfig,
changes: string[],
): OpenClawConfig {
const rawAgents = cfg.agents;
if (!isRecord(rawAgents)) {
return cfg;
}
let changed = false;
const nextAgents: Record<string, unknown> = { ...rawAgents };
if (isRecord(rawAgents.defaults)) {
const defaults = normalizeLegacyCodexAgentContainer(
rawAgents.defaults,
"agents.defaults",
changes,
);
if (defaults.changed) {
nextAgents.defaults = defaults.value;
changed = true;
}
}
if (Array.isArray(rawAgents.list)) {
const nextList = rawAgents.list.map((entry, index) => {
if (!isRecord(entry)) {
return entry;
}
const agentId = normalizeOptionalString(entry.id) ?? String(index);
const agent = normalizeLegacyCodexAgentContainer(entry, `agents.list.${agentId}`, changes);
if (!agent.changed) {
return entry;
}
changed = true;
return agent.value;
});
if (changed) {
nextAgents.list = nextList;
}
}
if (!changed) {
return cfg;
}
return {
...cfg,
agents: nextAgents as OpenClawConfig["agents"],
};
}
export function normalizeLegacyOpenAICodexModelsAddMetadata(
cfg: OpenClawConfig,

View File

@@ -152,6 +152,32 @@ describe("promptDefaultModel", () => {
);
});
it("hides the virtual Codex harness provider from default model choices", async () => {
loadModelCatalog.mockResolvedValue([
{ provider: "codex", id: "gpt-5.5", name: "GPT-5.5" },
{ provider: "openai", id: "gpt-5.5", name: "GPT-5.5" },
{ provider: "openai-codex", id: "gpt-5.5", name: "GPT-5.5" },
]);
const select = vi.fn(async (params) => params.initialValue as never);
const prompter = makePrompter({ select });
await promptDefaultModel({
config: { agents: { defaults: {} } } as OpenClawConfig,
prompter,
allowKeep: false,
includeManual: false,
ignoreAllowlist: true,
});
const optionValues = (select.mock.calls[0]?.[0]?.options ?? []).map(
(option: { value: string }) => option.value,
);
expect(optionValues).toContain("openai/gpt-5.5");
expect(optionValues).toContain("openai-codex/gpt-5.5");
expect(optionValues).not.toContain("codex/gpt-5.5");
});
it("treats byteplus plan models as preferred-provider matches", async () => {
loadModelCatalog.mockResolvedValue([
{
@@ -402,6 +428,35 @@ describe("promptModelAllowlist", () => {
});
});
describe("Codex harness model picker visibility", () => {
it("hides virtual Codex harness refs from allowlist choices and configured supplements", async () => {
loadModelCatalog.mockResolvedValue([
{ provider: "codex", id: "gpt-5.5", name: "GPT-5.5" },
{ provider: "openai", id: "gpt-5.5", name: "GPT-5.5" },
]);
const multiselect = createSelectAllMultiselect();
const prompter = makePrompter({ multiselect });
const config = {
agents: {
defaults: {
models: {
"codex/gpt-5.5": { alias: "legacy-codex" },
"openai/gpt-5.5": { alias: "gpt" },
},
},
},
} as OpenClawConfig;
await promptModelAllowlist({ config, prompter });
const call = multiselect.mock.calls[0]?.[0];
const optionValues = (call?.options ?? []).map((option: { value: string }) => option.value);
expect(optionValues).toEqual(["openai/gpt-5.5"]);
expect(call?.initialValues).toEqual(["openai/gpt-5.5"]);
});
});
describe("router model filtering", () => {
it("filters internal router models in both default and allowlist prompts", async () => {
loadModelCatalog.mockResolvedValue(OPENROUTER_CATALOG);

View File

@@ -2,6 +2,10 @@ import { ensureAuthProfileStore, listProfilesForProvider } from "../agents/auth-
import { DEFAULT_MODEL, DEFAULT_PROVIDER } from "../agents/defaults.js";
import { hasUsableCustomProviderApiKey, resolveEnvApiKey } from "../agents/model-auth.js";
import { loadModelCatalog } from "../agents/model-catalog.js";
import {
isModelPickerVisibleModelRef,
isModelPickerVisibleProvider,
} from "../agents/model-picker-visibility.js";
import {
buildAllowedModelSet,
buildModelAliasIndex,
@@ -142,7 +146,11 @@ function addModelSelectOption(params: {
hasAuth: (provider: string) => boolean;
}) {
const key = modelKey(params.entry.provider, params.entry.id);
if (params.seen.has(key) || HIDDEN_ROUTER_MODELS.has(key)) {
if (
params.seen.has(key) ||
HIDDEN_ROUTER_MODELS.has(key) ||
!isModelPickerVisibleProvider(params.entry.provider)
) {
return;
}
const hints: string[] = [];
@@ -263,15 +271,13 @@ async function maybeFilterModelsByProvider(params: {
workspaceDir?: string;
env?: NodeJS.ProcessEnv;
}): Promise<typeof params.models> {
const providerIds = Array.from(new Set(params.models.map((entry) => entry.provider))).toSorted(
(a, b) => a.localeCompare(b),
let next = params.models.filter((entry) => isModelPickerVisibleProvider(entry.provider));
const providerIds = Array.from(new Set(next.map((entry) => entry.provider))).toSorted((a, b) =>
a.localeCompare(b),
);
const hasPreferredProvider = !!params.preferredProvider;
const shouldPromptProvider =
!hasPreferredProvider &&
providerIds.length > 1 &&
params.models.length > PROVIDER_FILTER_THRESHOLD;
let next = params.models;
!hasPreferredProvider && providerIds.length > 1 && next.length > PROVIDER_FILTER_THRESHOLD;
const matchesPreferredProvider = params.preferredProvider
? createPreferredProviderMatcher({
preferredProvider: params.preferredProvider,
@@ -473,6 +479,13 @@ export async function promptDefaultModel(
workspaceDir: params.workspaceDir,
env: params.env,
});
if (filteredModels.length === 0) {
return promptManualModel({
prompter: params.prompter,
allowBlank: allowKeep,
initialValue: configuredRaw || resolvedKey || undefined,
});
}
const matchesPreferredProvider = preferredProvider
? createPreferredProviderMatcher({
preferredProvider,
@@ -609,7 +622,7 @@ export async function promptModelAllowlist(params: {
]);
const initialKeys = allowedKeySet
? initialSeeds.filter((key) => allowedKeySet.has(key))
: initialSeeds;
: initialSeeds.filter(isModelPickerVisibleModelRef);
const allowlistProgress = params.prompter.progress("Loading available models");
let catalog: Awaited<ReturnType<typeof loadModelCatalog>>;
@@ -650,9 +663,11 @@ export async function promptModelAllowlist(params: {
const options: WizardSelectOption[] = [];
const seen = new Set<string>();
const allowedCatalog = allowedKeySet
? catalog.filter((entry) => allowedKeySet.has(modelKey(entry.provider, entry.id)))
: catalog;
const allowedCatalog = (
allowedKeySet
? catalog.filter((entry) => allowedKeySet.has(modelKey(entry.provider, entry.id)))
: catalog
).filter((entry) => isModelPickerVisibleProvider(entry.provider));
const filteredCatalog =
preferredProvider && allowedCatalog.some((entry) => matchesPreferredProvider?.(entry.provider))
? allowedCatalog.filter((entry) => matchesPreferredProvider?.(entry.provider))
@@ -668,7 +683,9 @@ export async function promptModelAllowlist(params: {
addModelSelectOption({ entry, options, seen, aliasIndex, hasAuth });
}
const supplementalKeys = allowedKeySet ? allowedKeys : existingKeys;
const supplementalKeys = (allowedKeySet ? allowedKeys : existingKeys).filter(
isModelPickerVisibleModelRef,
);
for (const key of supplementalKeys) {
if (seen.has(key)) {
continue;