diff --git a/extensions/openai/cli-backend.ts b/extensions/openai/cli-backend.ts index ed24cfe4c55..7c29b8f8a1c 100644 --- a/extensions/openai/cli-backend.ts +++ b/extensions/openai/cli-backend.ts @@ -6,7 +6,7 @@ import { import { prepareOpenAICodexCliExecution } from "./openai-codex-cli-bridge.js"; const OPENAI_CODEX_DEFAULT_PROFILE_ID = "openai-codex:default"; -const CODEX_CLI_DEFAULT_MODEL_REF = "codex-cli/gpt-5.4"; +const CODEX_CLI_DEFAULT_MODEL_REF = "codex-cli/gpt-5.5"; export function buildOpenAICodexCliBackend(): CliBackendPlugin { return { diff --git a/extensions/openai/default-models.ts b/extensions/openai/default-models.ts index eef4757b9d1..7034c2053c0 100644 --- a/extensions/openai/default-models.ts +++ b/extensions/openai/default-models.ts @@ -4,8 +4,8 @@ import { type OpenClawConfig, } from "openclaw/plugin-sdk/provider-onboard"; -export const OPENAI_DEFAULT_MODEL = "openai/gpt-5.4"; -export const OPENAI_CODEX_DEFAULT_MODEL = "openai-codex/gpt-5.4"; +export const OPENAI_DEFAULT_MODEL = "openai/gpt-5.5"; +export const OPENAI_CODEX_DEFAULT_MODEL = "openai-codex/gpt-5.5"; export const OPENAI_DEFAULT_IMAGE_MODEL = "gpt-image-2"; export const OPENAI_DEFAULT_TTS_MODEL = "gpt-4o-mini-tts"; export const OPENAI_DEFAULT_TTS_VOICE = "alloy"; diff --git a/extensions/openai/media-understanding-provider.ts b/extensions/openai/media-understanding-provider.ts index 05ac7906f1e..3fb89248946 100644 --- a/extensions/openai/media-understanding-provider.ts +++ b/extensions/openai/media-understanding-provider.ts @@ -34,7 +34,7 @@ export const openaiMediaUnderstandingProvider: MediaUnderstandingProvider = { export const openaiCodexMediaUnderstandingProvider: MediaUnderstandingProvider = { id: "openai-codex", capabilities: ["image"], - defaultModels: { image: "gpt-5.4" }, + defaultModels: { image: "gpt-5.5" }, describeImage: describeImageWithModel, describeImages: describeImagesWithModel, }; diff --git a/extensions/openai/openai-codex-provider.ts b/extensions/openai/openai-codex-provider.ts index 9b5c89a5f23..84c31e7fb25 100644 --- a/extensions/openai/openai-codex-provider.ts +++ b/extensions/openai/openai-codex-provider.ts @@ -44,14 +44,30 @@ const PROVIDER_ID = "openai-codex"; const OPENAI_CODEX_BASE_URL = "https://chatgpt.com/backend-api/codex"; const OPENAI_CODEX_LOGIN_ASSISTANT_PRIORITY = -30; const OPENAI_CODEX_DEVICE_PAIRING_ASSISTANT_PRIORITY = -10; +const OPENAI_CODEX_GPT_55_MODEL_ID = "gpt-5.5"; +const OPENAI_CODEX_GPT_55_PRO_MODEL_ID = "gpt-5.5-pro"; const OPENAI_CODEX_GPT_54_MODEL_ID = "gpt-5.4"; const OPENAI_CODEX_GPT_54_LEGACY_MODEL_ID = "gpt-5.4-codex"; const OPENAI_CODEX_GPT_54_PRO_MODEL_ID = "gpt-5.4-pro"; const OPENAI_CODEX_GPT_54_MINI_MODEL_ID = "gpt-5.4-mini"; +const OPENAI_CODEX_GPT_55_NATIVE_CONTEXT_TOKENS = 1_000_000; +const OPENAI_CODEX_GPT_55_DEFAULT_CONTEXT_TOKENS = 272_000; const OPENAI_CODEX_GPT_54_NATIVE_CONTEXT_TOKENS = 1_050_000; const OPENAI_CODEX_GPT_54_DEFAULT_CONTEXT_TOKENS = 272_000; const OPENAI_CODEX_GPT_54_MINI_CONTEXT_TOKENS = 272_000; const OPENAI_CODEX_GPT_54_MAX_TOKENS = 128_000; +const OPENAI_CODEX_GPT_55_COST = { + input: 5, + output: 30, + cacheRead: 0, + cacheWrite: 0, +} as const; +const OPENAI_CODEX_GPT_55_PRO_COST = { + input: 30, + output: 180, + cacheRead: 0, + cacheWrite: 0, +} as const; const OPENAI_CODEX_GPT_54_COST = { input: 2.5, output: 15, @@ -76,6 +92,11 @@ const OPENAI_CODEX_GPT_54_CATALOG_SYNTH_TEMPLATE_MODEL_IDS = [ ...OPENAI_CODEX_GPT_54_TEMPLATE_MODEL_IDS, OPENAI_CODEX_GPT_54_MODEL_ID, ] as const; +const OPENAI_CODEX_GPT_55_TEMPLATE_MODEL_IDS = [ + OPENAI_CODEX_GPT_54_MODEL_ID, + OPENAI_CODEX_GPT_54_PRO_MODEL_ID, + ...OPENAI_CODEX_GPT_54_TEMPLATE_MODEL_IDS, +] as const; const OPENAI_CODEX_GPT_54_MINI_TEMPLATE_MODEL_IDS = [ OPENAI_CODEX_GPT_54_MODEL_ID, "gpt-5.1-codex-mini", @@ -87,6 +108,8 @@ const OPENAI_CODEX_GPT_53_SPARK_CONTEXT_TOKENS = 128_000; const OPENAI_CODEX_GPT_53_SPARK_MAX_TOKENS = 128_000; const OPENAI_CODEX_TEMPLATE_MODEL_IDS = ["gpt-5.2-codex"] as const; const OPENAI_CODEX_XHIGH_MODEL_IDS = [ + OPENAI_CODEX_GPT_55_MODEL_ID, + OPENAI_CODEX_GPT_55_PRO_MODEL_ID, OPENAI_CODEX_GPT_54_MODEL_ID, OPENAI_CODEX_GPT_54_PRO_MODEL_ID, OPENAI_CODEX_GPT_54_MINI_MODEL_ID, @@ -96,6 +119,8 @@ const OPENAI_CODEX_XHIGH_MODEL_IDS = [ "gpt-5.1-codex", ] as const; const OPENAI_CODEX_MODERN_MODEL_IDS = [ + OPENAI_CODEX_GPT_55_MODEL_ID, + OPENAI_CODEX_GPT_55_PRO_MODEL_ID, OPENAI_CODEX_GPT_54_MODEL_ID, OPENAI_CODEX_GPT_54_PRO_MODEL_ID, OPENAI_CODEX_GPT_54_MINI_MODEL_ID, @@ -169,7 +194,26 @@ function resolveCodexForwardCompatModel(ctx: ProviderResolveDynamicModelContext) let templateIds: readonly string[]; let patch: Parameters[0]["patch"]; - if (lower === OPENAI_CODEX_GPT_54_MODEL_ID || lower === OPENAI_CODEX_GPT_54_LEGACY_MODEL_ID) { + if (lower === OPENAI_CODEX_GPT_55_MODEL_ID) { + templateIds = OPENAI_CODEX_GPT_55_TEMPLATE_MODEL_IDS; + patch = { + contextWindow: OPENAI_CODEX_GPT_55_NATIVE_CONTEXT_TOKENS, + contextTokens: OPENAI_CODEX_GPT_55_DEFAULT_CONTEXT_TOKENS, + maxTokens: OPENAI_CODEX_GPT_54_MAX_TOKENS, + cost: OPENAI_CODEX_GPT_55_COST, + }; + } else if (lower === OPENAI_CODEX_GPT_55_PRO_MODEL_ID) { + templateIds = OPENAI_CODEX_GPT_55_TEMPLATE_MODEL_IDS; + patch = { + contextWindow: OPENAI_CODEX_GPT_55_NATIVE_CONTEXT_TOKENS, + contextTokens: OPENAI_CODEX_GPT_55_DEFAULT_CONTEXT_TOKENS, + maxTokens: OPENAI_CODEX_GPT_54_MAX_TOKENS, + cost: OPENAI_CODEX_GPT_55_PRO_COST, + }; + } else if ( + lower === OPENAI_CODEX_GPT_54_MODEL_ID || + lower === OPENAI_CODEX_GPT_54_LEGACY_MODEL_ID + ) { templateIds = OPENAI_CODEX_GPT_54_CATALOG_SYNTH_TEMPLATE_MODEL_IDS; patch = { contextWindow: OPENAI_CODEX_GPT_54_NATIVE_CONTEXT_TOKENS, @@ -476,6 +520,11 @@ export function buildOpenAICodexProviderPlugin(): ProviderPlugin { providerId: PROVIDER_ID, templateIds: OPENAI_CODEX_GPT_54_CATALOG_SYNTH_TEMPLATE_MODEL_IDS, }); + const gpt55Template = findCatalogTemplate({ + entries: ctx.entries, + providerId: PROVIDER_ID, + templateIds: OPENAI_CODEX_GPT_55_TEMPLATE_MODEL_IDS, + }); const gpt54MiniTemplate = findCatalogTemplate({ entries: ctx.entries, providerId: PROVIDER_ID, @@ -487,6 +536,22 @@ export function buildOpenAICodexProviderPlugin(): ProviderPlugin { templateIds: [OPENAI_CODEX_GPT_53_MODEL_ID, ...OPENAI_CODEX_TEMPLATE_MODEL_IDS], }); return [ + buildOpenAISyntheticCatalogEntry(gpt55Template, { + id: OPENAI_CODEX_GPT_55_MODEL_ID, + reasoning: true, + input: ["text", "image"], + contextWindow: OPENAI_CODEX_GPT_55_NATIVE_CONTEXT_TOKENS, + contextTokens: OPENAI_CODEX_GPT_55_DEFAULT_CONTEXT_TOKENS, + cost: OPENAI_CODEX_GPT_55_COST, + }), + buildOpenAISyntheticCatalogEntry(gpt55Template, { + id: OPENAI_CODEX_GPT_55_PRO_MODEL_ID, + reasoning: true, + input: ["text", "image"], + contextWindow: OPENAI_CODEX_GPT_55_NATIVE_CONTEXT_TOKENS, + contextTokens: OPENAI_CODEX_GPT_55_DEFAULT_CONTEXT_TOKENS, + cost: OPENAI_CODEX_GPT_55_PRO_COST, + }), buildOpenAISyntheticCatalogEntry(gpt54Template, { id: OPENAI_CODEX_GPT_54_MODEL_ID, reasoning: true, diff --git a/extensions/openai/openai-provider.ts b/extensions/openai/openai-provider.ts index a49507828a8..b52a87525aa 100644 --- a/extensions/openai/openai-provider.ts +++ b/extensions/openai/openai-provider.ts @@ -22,15 +22,21 @@ import { } from "./shared.js"; const PROVIDER_ID = "openai"; +const OPENAI_GPT_55_MODEL_ID = "gpt-5.5"; +const OPENAI_GPT_55_PRO_MODEL_ID = "gpt-5.5-pro"; const OPENAI_GPT_54_MODEL_ID = "gpt-5.4"; const OPENAI_GPT_54_PRO_MODEL_ID = "gpt-5.4-pro"; const OPENAI_GPT_54_MINI_MODEL_ID = "gpt-5.4-mini"; const OPENAI_GPT_54_NANO_MODEL_ID = "gpt-5.4-nano"; +const OPENAI_GPT_55_CONTEXT_TOKENS = 1_000_000; +const OPENAI_GPT_55_PRO_CONTEXT_TOKENS = 1_000_000; const OPENAI_GPT_54_CONTEXT_TOKENS = 1_050_000; const OPENAI_GPT_54_PRO_CONTEXT_TOKENS = 1_050_000; const OPENAI_GPT_54_MINI_CONTEXT_TOKENS = 400_000; const OPENAI_GPT_54_NANO_CONTEXT_TOKENS = 400_000; const OPENAI_GPT_54_MAX_TOKENS = 128_000; +const OPENAI_GPT_55_COST = { input: 5, output: 30, cacheRead: 0, cacheWrite: 0 } as const; +const OPENAI_GPT_55_PRO_COST = { input: 30, output: 180, cacheRead: 0, cacheWrite: 0 } as const; const OPENAI_GPT_54_COST = { input: 2.5, output: 15, cacheRead: 0.25, cacheWrite: 0 } as const; const OPENAI_GPT_54_PRO_COST = { input: 30, output: 180, cacheRead: 0, cacheWrite: 0 } as const; const OPENAI_GPT_54_MINI_COST = { @@ -45,11 +51,20 @@ const OPENAI_GPT_54_NANO_COST = { cacheRead: 0.02, cacheWrite: 0, } as const; +const OPENAI_GPT_55_TEMPLATE_MODEL_IDS = [OPENAI_GPT_54_MODEL_ID, "gpt-5.2"] as const; +const OPENAI_GPT_55_PRO_TEMPLATE_MODEL_IDS = [ + OPENAI_GPT_54_PRO_MODEL_ID, + OPENAI_GPT_54_MODEL_ID, + "gpt-5.2-pro", + "gpt-5.2", +] as const; const OPENAI_GPT_54_TEMPLATE_MODEL_IDS = ["gpt-5.2"] as const; const OPENAI_GPT_54_PRO_TEMPLATE_MODEL_IDS = ["gpt-5.2-pro", "gpt-5.2"] as const; const OPENAI_GPT_54_MINI_TEMPLATE_MODEL_IDS = ["gpt-5-mini"] as const; const OPENAI_GPT_54_NANO_TEMPLATE_MODEL_IDS = ["gpt-5-nano", "gpt-5-mini"] as const; const OPENAI_XHIGH_MODEL_IDS = [ + "gpt-5.5", + "gpt-5.5-pro", "gpt-5.4", "gpt-5.4-pro", "gpt-5.4-mini", @@ -57,6 +72,8 @@ const OPENAI_XHIGH_MODEL_IDS = [ "gpt-5.2", ] as const; const OPENAI_MODERN_MODEL_IDS = [ + "gpt-5.5", + "gpt-5.5-pro", "gpt-5.4", "gpt-5.4-pro", "gpt-5.4-mini", @@ -97,14 +114,38 @@ function normalizeOpenAITransport(model: ProviderRuntimeModel): ProviderRuntimeM }; } -function resolveOpenAIGpt54ForwardCompatModel( +function resolveOpenAIGptForwardCompatModel( ctx: ProviderResolveDynamicModelContext, ): ProviderRuntimeModel | undefined { const trimmedModelId = ctx.modelId.trim(); const lower = normalizeLowercaseStringOrEmpty(trimmedModelId); let templateIds: readonly string[]; let patch: Partial; - if (lower === OPENAI_GPT_54_MODEL_ID) { + if (lower === OPENAI_GPT_55_MODEL_ID) { + templateIds = OPENAI_GPT_55_TEMPLATE_MODEL_IDS; + patch = { + api: "openai-responses", + provider: PROVIDER_ID, + baseUrl: "https://api.openai.com/v1", + reasoning: true, + input: ["text", "image"], + cost: OPENAI_GPT_55_COST, + contextWindow: OPENAI_GPT_55_CONTEXT_TOKENS, + maxTokens: OPENAI_GPT_54_MAX_TOKENS, + }; + } else if (lower === OPENAI_GPT_55_PRO_MODEL_ID) { + templateIds = OPENAI_GPT_55_PRO_TEMPLATE_MODEL_IDS; + patch = { + api: "openai-responses", + provider: PROVIDER_ID, + baseUrl: "https://api.openai.com/v1", + reasoning: true, + input: ["text", "image"], + cost: OPENAI_GPT_55_PRO_COST, + contextWindow: OPENAI_GPT_55_PRO_CONTEXT_TOKENS, + maxTokens: OPENAI_GPT_54_MAX_TOKENS, + }; + } else if (lower === OPENAI_GPT_54_MODEL_ID) { templateIds = OPENAI_GPT_54_TEMPLATE_MODEL_IDS; patch = { api: "openai-responses", @@ -202,7 +243,7 @@ export function buildOpenAIProvider(): ProviderPlugin { }, }), ], - resolveDynamicModel: (ctx) => resolveOpenAIGpt54ForwardCompatModel(ctx), + resolveDynamicModel: (ctx) => resolveOpenAIGptForwardCompatModel(ctx), normalizeResolvedModel: (ctx) => { if (normalizeProviderId(ctx.provider) !== PROVIDER_ID) { return undefined; @@ -234,7 +275,7 @@ export function buildOpenAIProvider(): ProviderPlugin { if (ctx.provider !== PROVIDER_ID || ctx.listProfileIds("openai-codex").length === 0) { return undefined; } - return 'No API key found for provider "openai". You are authenticated with OpenAI Codex OAuth. Use openai-codex/gpt-5.4 (OAuth) or set OPENAI_API_KEY to use openai/gpt-5.4.'; + return 'No API key found for provider "openai". You are authenticated with OpenAI Codex OAuth. Use openai-codex/gpt-5.5 (OAuth) or set OPENAI_API_KEY to use openai/gpt-5.5.'; }, suppressBuiltInModel: (ctx) => { if ( @@ -249,6 +290,16 @@ export function buildOpenAIProvider(): ProviderPlugin { }; }, augmentModelCatalog: (ctx) => { + const openAiGpt55Template = findCatalogTemplate({ + entries: ctx.entries, + providerId: PROVIDER_ID, + templateIds: OPENAI_GPT_55_TEMPLATE_MODEL_IDS, + }); + const openAiGpt55ProTemplate = findCatalogTemplate({ + entries: ctx.entries, + providerId: PROVIDER_ID, + templateIds: OPENAI_GPT_55_PRO_TEMPLATE_MODEL_IDS, + }); const openAiGpt54Template = findCatalogTemplate({ entries: ctx.entries, providerId: PROVIDER_ID, @@ -270,6 +321,18 @@ export function buildOpenAIProvider(): ProviderPlugin { templateIds: OPENAI_GPT_54_NANO_TEMPLATE_MODEL_IDS, }); return [ + buildOpenAISyntheticCatalogEntry(openAiGpt55Template, { + id: OPENAI_GPT_55_MODEL_ID, + reasoning: true, + input: ["text", "image"], + contextWindow: OPENAI_GPT_55_CONTEXT_TOKENS, + }), + buildOpenAISyntheticCatalogEntry(openAiGpt55ProTemplate, { + id: OPENAI_GPT_55_PRO_MODEL_ID, + reasoning: true, + input: ["text", "image"], + contextWindow: OPENAI_GPT_55_PRO_CONTEXT_TOKENS, + }), buildOpenAISyntheticCatalogEntry(openAiGpt54Template, { id: OPENAI_GPT_54_MODEL_ID, reasoning: true, diff --git a/extensions/openai/openclaw.plugin.json b/extensions/openai/openclaw.plugin.json index 99187a4449c..100fed03b2b 100644 --- a/extensions/openai/openclaw.plugin.json +++ b/extensions/openai/openclaw.plugin.json @@ -72,7 +72,7 @@ "openai-codex": { "capabilities": ["image"], "defaultModels": { - "image": "gpt-5.4" + "image": "gpt-5.5" } } }, diff --git a/extensions/qa-lab/src/providers/live-frontier/catalog.ts b/extensions/qa-lab/src/providers/live-frontier/catalog.ts index e0b88519c56..f31f6a9e854 100644 --- a/extensions/qa-lab/src/providers/live-frontier/catalog.ts +++ b/extensions/qa-lab/src/providers/live-frontier/catalog.ts @@ -1,5 +1,5 @@ export const QA_FRONTIER_PROVIDER_IDS = ["anthropic", "google", "openai"] as const; -export const QA_FRONTIER_CATALOG_PRIMARY_MODEL = "openai/gpt-5.4"; +export const QA_FRONTIER_CATALOG_PRIMARY_MODEL = "openai/gpt-5.5"; export const QA_FRONTIER_CATALOG_ALTERNATE_MODEL = "anthropic/claude-sonnet-4-6"; export function isPreferredQaLiveFrontierCatalogModel(modelRef: string) { diff --git a/extensions/qa-lab/src/providers/live-frontier/character-eval.ts b/extensions/qa-lab/src/providers/live-frontier/character-eval.ts index b344b1d3ff6..571e850180c 100644 --- a/extensions/qa-lab/src/providers/live-frontier/character-eval.ts +++ b/extensions/qa-lab/src/providers/live-frontier/character-eval.ts @@ -6,7 +6,7 @@ type QaFrontierCharacterModelOptions = { }; export const QA_FRONTIER_CHARACTER_EVAL_MODELS = Object.freeze([ - "openai/gpt-5.4", + "openai/gpt-5.5", "openai/gpt-5.2", "openai/gpt-5", "anthropic/claude-opus-4-6", @@ -18,19 +18,19 @@ export const QA_FRONTIER_CHARACTER_EVAL_MODELS = Object.freeze([ export const QA_FRONTIER_CHARACTER_THINKING_BY_MODEL: Readonly> = Object.freeze({ - "openai/gpt-5.4": "xhigh", + "openai/gpt-5.5": "xhigh", "openai/gpt-5.2": "xhigh", "openai/gpt-5": "xhigh", }); export const QA_FRONTIER_CHARACTER_JUDGE_MODELS = Object.freeze([ - "openai/gpt-5.4", + "openai/gpt-5.5", "anthropic/claude-opus-4-6", ]); export const QA_FRONTIER_CHARACTER_JUDGE_MODEL_OPTIONS: Readonly< Record > = Object.freeze({ - "openai/gpt-5.4": { thinkingDefault: "xhigh" }, + "openai/gpt-5.5": { thinkingDefault: "xhigh" }, "anthropic/claude-opus-4-6": { thinkingDefault: "high" }, }); diff --git a/extensions/qa-lab/src/providers/live-frontier/index.ts b/extensions/qa-lab/src/providers/live-frontier/index.ts index ac54f7ea368..0613f7f96c6 100644 --- a/extensions/qa-lab/src/providers/live-frontier/index.ts +++ b/extensions/qa-lab/src/providers/live-frontier/index.ts @@ -23,7 +23,7 @@ function isClaudeOpusModel(modelRef: string) { export const liveFrontierProviderDefinition: QaProviderDefinition = { mode: "live-frontier", kind: "live", - defaultModel: (options) => options?.preferredLiveModel ?? "openai/gpt-5.4", + defaultModel: (options) => options?.preferredLiveModel ?? "openai/gpt-5.5", defaultImageGenerationProviderIds: ["openai"], defaultImageGenerationModel: ({ modelProviderIds }) => modelProviderIds.includes("openai") ? "openai/gpt-image-1" : null, diff --git a/extensions/qa-lab/src/providers/live-frontier/model-selection.runtime.ts b/extensions/qa-lab/src/providers/live-frontier/model-selection.runtime.ts index 903d6eb8660..cbfdd252c32 100644 --- a/extensions/qa-lab/src/providers/live-frontier/model-selection.runtime.ts +++ b/extensions/qa-lab/src/providers/live-frontier/model-selection.runtime.ts @@ -4,7 +4,7 @@ import { } from "openclaw/plugin-sdk/agent-runtime"; import { resolveEnvApiKey } from "openclaw/plugin-sdk/provider-auth"; -const QA_CODEX_OAUTH_LIVE_MODEL = "openai-codex/gpt-5.4"; +const QA_CODEX_OAUTH_LIVE_MODEL = "openai-codex/gpt-5.5"; export function resolveQaLiveFrontierPreferredModel() { if (resolveEnvApiKey("openai")?.apiKey) { diff --git a/extensions/qa-lab/src/providers/live-frontier/parity.ts b/extensions/qa-lab/src/providers/live-frontier/parity.ts index 887d691f33b..62bcd5556ce 100644 --- a/extensions/qa-lab/src/providers/live-frontier/parity.ts +++ b/extensions/qa-lab/src/providers/live-frontier/parity.ts @@ -1,2 +1,2 @@ -export const QA_FRONTIER_PARITY_CANDIDATE_LABEL = "openai/gpt-5.4"; +export const QA_FRONTIER_PARITY_CANDIDATE_LABEL = "openai/gpt-5.5"; export const QA_FRONTIER_PARITY_BASELINE_LABEL = "anthropic/claude-opus-4-6"; diff --git a/package.json b/package.json index 8e975efdb97..1ace4865e3a 100644 --- a/package.json +++ b/package.json @@ -1437,16 +1437,16 @@ "test:docker:live-cli-backend": "bash scripts/test-live-cli-backend-docker.sh", "test:docker:live-cli-backend:claude": "OPENCLAW_LIVE_CLI_BACKEND_MODEL=claude-cli/claude-sonnet-4-6 bash scripts/test-live-cli-backend-docker.sh", "test:docker:live-cli-backend:claude-subscription": "OPENCLAW_LIVE_CLI_BACKEND_AUTH=subscription OPENCLAW_LIVE_CLI_BACKEND_MODEL=claude-cli/claude-sonnet-4-6 OPENCLAW_LIVE_CLI_BACKEND_DISABLE_MCP_CONFIG=1 OPENCLAW_LIVE_CLI_BACKEND_MODEL_SWITCH_PROBE=0 OPENCLAW_LIVE_CLI_BACKEND_RESUME_PROBE=1 OPENCLAW_LIVE_CLI_BACKEND_IMAGE_PROBE=0 OPENCLAW_LIVE_CLI_BACKEND_MCP_PROBE=0 bash scripts/test-live-cli-backend-docker.sh", - "test:docker:live-cli-backend:codex": "OPENCLAW_LIVE_CLI_BACKEND_MODEL=codex-cli/gpt-5.4 bash scripts/test-live-cli-backend-docker.sh", + "test:docker:live-cli-backend:codex": "OPENCLAW_LIVE_CLI_BACKEND_MODEL=codex-cli/gpt-5.5 bash scripts/test-live-cli-backend-docker.sh", "test:docker:live-cli-backend:gemini": "OPENCLAW_LIVE_CLI_BACKEND_MODEL=google-gemini-cli/gemini-3-flash-preview bash scripts/test-live-cli-backend-docker.sh", "test:docker:live-codex-harness": "bash scripts/test-live-codex-harness-docker.sh", "test:docker:live-gateway": "bash scripts/test-live-gateway-models-docker.sh", "test:docker:live-gateway:claude": "OPENCLAW_LIVE_GATEWAY_PROVIDERS=claude-cli OPENCLAW_LIVE_GATEWAY_MODELS=claude-cli/claude-sonnet-4-6 bash scripts/test-live-gateway-models-docker.sh", - "test:docker:live-gateway:codex": "OPENCLAW_LIVE_GATEWAY_PROVIDERS=codex-cli OPENCLAW_LIVE_GATEWAY_MODELS=codex-cli/gpt-5.4 bash scripts/test-live-gateway-models-docker.sh", + "test:docker:live-gateway:codex": "OPENCLAW_LIVE_GATEWAY_PROVIDERS=codex-cli OPENCLAW_LIVE_GATEWAY_MODELS=codex-cli/gpt-5.5 bash scripts/test-live-gateway-models-docker.sh", "test:docker:live-gateway:gemini": "OPENCLAW_LIVE_GATEWAY_PROVIDERS=google-gemini-cli OPENCLAW_LIVE_GATEWAY_MODELS=google-gemini-cli/gemini-3.1-pro-preview bash scripts/test-live-gateway-models-docker.sh", "test:docker:live-models": "bash scripts/test-live-models-docker.sh", "test:docker:live-models:claude": "OPENCLAW_LIVE_PROVIDERS=claude-cli OPENCLAW_LIVE_MODELS=claude-cli/claude-sonnet-4-6 bash scripts/test-live-models-docker.sh", - "test:docker:live-models:codex": "OPENCLAW_LIVE_PROVIDERS=codex-cli OPENCLAW_LIVE_MODELS=codex-cli/gpt-5.4 bash scripts/test-live-models-docker.sh", + "test:docker:live-models:codex": "OPENCLAW_LIVE_PROVIDERS=codex-cli OPENCLAW_LIVE_MODELS=codex-cli/gpt-5.5 bash scripts/test-live-models-docker.sh", "test:docker:live-models:gemini": "OPENCLAW_LIVE_PROVIDERS=google-gemini-cli OPENCLAW_LIVE_MODELS=google-gemini-cli/gemini-3.1-pro-preview bash scripts/test-live-models-docker.sh", "test:docker:mcp-channels": "bash scripts/e2e/mcp-channels-docker.sh", "test:docker:npm-onboard-channel-agent": "bash scripts/e2e/npm-onboard-channel-agent-docker.sh", diff --git a/src/agents/defaults.ts b/src/agents/defaults.ts index af2a945a175..ff4b015a98a 100644 --- a/src/agents/defaults.ts +++ b/src/agents/defaults.ts @@ -1,6 +1,6 @@ // Defaults for agent metadata when upstream does not supply them. // Keep this aligned with the product-level latest-model baseline. export const DEFAULT_PROVIDER = "openai"; -export const DEFAULT_MODEL = "gpt-5.4"; +export const DEFAULT_MODEL = "gpt-5.5"; // Conservative fallback used when model metadata is unavailable. export const DEFAULT_CONTEXT_TOKENS = 200_000; diff --git a/src/agents/live-cache-regression-runner.ts b/src/agents/live-cache-regression-runner.ts index 3ec549941a7..609d81ec0ee 100644 --- a/src/agents/live-cache-regression-runner.ts +++ b/src/agents/live-cache-regression-runner.ts @@ -410,7 +410,7 @@ export async function runLiveCacheRegression(): Promise