mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-06 06:20:43 +00:00
feat(openai): default to GPT-5.5
This commit is contained in:
@@ -6,7 +6,7 @@ import {
|
||||
import { prepareOpenAICodexCliExecution } from "./openai-codex-cli-bridge.js";
|
||||
|
||||
const OPENAI_CODEX_DEFAULT_PROFILE_ID = "openai-codex:default";
|
||||
const CODEX_CLI_DEFAULT_MODEL_REF = "codex-cli/gpt-5.4";
|
||||
const CODEX_CLI_DEFAULT_MODEL_REF = "codex-cli/gpt-5.5";
|
||||
|
||||
export function buildOpenAICodexCliBackend(): CliBackendPlugin {
|
||||
return {
|
||||
|
||||
@@ -4,8 +4,8 @@ import {
|
||||
type OpenClawConfig,
|
||||
} from "openclaw/plugin-sdk/provider-onboard";
|
||||
|
||||
export const OPENAI_DEFAULT_MODEL = "openai/gpt-5.4";
|
||||
export const OPENAI_CODEX_DEFAULT_MODEL = "openai-codex/gpt-5.4";
|
||||
export const OPENAI_DEFAULT_MODEL = "openai/gpt-5.5";
|
||||
export const OPENAI_CODEX_DEFAULT_MODEL = "openai-codex/gpt-5.5";
|
||||
export const OPENAI_DEFAULT_IMAGE_MODEL = "gpt-image-2";
|
||||
export const OPENAI_DEFAULT_TTS_MODEL = "gpt-4o-mini-tts";
|
||||
export const OPENAI_DEFAULT_TTS_VOICE = "alloy";
|
||||
|
||||
@@ -34,7 +34,7 @@ export const openaiMediaUnderstandingProvider: MediaUnderstandingProvider = {
|
||||
export const openaiCodexMediaUnderstandingProvider: MediaUnderstandingProvider = {
|
||||
id: "openai-codex",
|
||||
capabilities: ["image"],
|
||||
defaultModels: { image: "gpt-5.4" },
|
||||
defaultModels: { image: "gpt-5.5" },
|
||||
describeImage: describeImageWithModel,
|
||||
describeImages: describeImagesWithModel,
|
||||
};
|
||||
|
||||
@@ -44,14 +44,30 @@ const PROVIDER_ID = "openai-codex";
|
||||
const OPENAI_CODEX_BASE_URL = "https://chatgpt.com/backend-api/codex";
|
||||
const OPENAI_CODEX_LOGIN_ASSISTANT_PRIORITY = -30;
|
||||
const OPENAI_CODEX_DEVICE_PAIRING_ASSISTANT_PRIORITY = -10;
|
||||
const OPENAI_CODEX_GPT_55_MODEL_ID = "gpt-5.5";
|
||||
const OPENAI_CODEX_GPT_55_PRO_MODEL_ID = "gpt-5.5-pro";
|
||||
const OPENAI_CODEX_GPT_54_MODEL_ID = "gpt-5.4";
|
||||
const OPENAI_CODEX_GPT_54_LEGACY_MODEL_ID = "gpt-5.4-codex";
|
||||
const OPENAI_CODEX_GPT_54_PRO_MODEL_ID = "gpt-5.4-pro";
|
||||
const OPENAI_CODEX_GPT_54_MINI_MODEL_ID = "gpt-5.4-mini";
|
||||
const OPENAI_CODEX_GPT_55_NATIVE_CONTEXT_TOKENS = 1_000_000;
|
||||
const OPENAI_CODEX_GPT_55_DEFAULT_CONTEXT_TOKENS = 272_000;
|
||||
const OPENAI_CODEX_GPT_54_NATIVE_CONTEXT_TOKENS = 1_050_000;
|
||||
const OPENAI_CODEX_GPT_54_DEFAULT_CONTEXT_TOKENS = 272_000;
|
||||
const OPENAI_CODEX_GPT_54_MINI_CONTEXT_TOKENS = 272_000;
|
||||
const OPENAI_CODEX_GPT_54_MAX_TOKENS = 128_000;
|
||||
const OPENAI_CODEX_GPT_55_COST = {
|
||||
input: 5,
|
||||
output: 30,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
} as const;
|
||||
const OPENAI_CODEX_GPT_55_PRO_COST = {
|
||||
input: 30,
|
||||
output: 180,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
} as const;
|
||||
const OPENAI_CODEX_GPT_54_COST = {
|
||||
input: 2.5,
|
||||
output: 15,
|
||||
@@ -76,6 +92,11 @@ const OPENAI_CODEX_GPT_54_CATALOG_SYNTH_TEMPLATE_MODEL_IDS = [
|
||||
...OPENAI_CODEX_GPT_54_TEMPLATE_MODEL_IDS,
|
||||
OPENAI_CODEX_GPT_54_MODEL_ID,
|
||||
] as const;
|
||||
const OPENAI_CODEX_GPT_55_TEMPLATE_MODEL_IDS = [
|
||||
OPENAI_CODEX_GPT_54_MODEL_ID,
|
||||
OPENAI_CODEX_GPT_54_PRO_MODEL_ID,
|
||||
...OPENAI_CODEX_GPT_54_TEMPLATE_MODEL_IDS,
|
||||
] as const;
|
||||
const OPENAI_CODEX_GPT_54_MINI_TEMPLATE_MODEL_IDS = [
|
||||
OPENAI_CODEX_GPT_54_MODEL_ID,
|
||||
"gpt-5.1-codex-mini",
|
||||
@@ -87,6 +108,8 @@ const OPENAI_CODEX_GPT_53_SPARK_CONTEXT_TOKENS = 128_000;
|
||||
const OPENAI_CODEX_GPT_53_SPARK_MAX_TOKENS = 128_000;
|
||||
const OPENAI_CODEX_TEMPLATE_MODEL_IDS = ["gpt-5.2-codex"] as const;
|
||||
const OPENAI_CODEX_XHIGH_MODEL_IDS = [
|
||||
OPENAI_CODEX_GPT_55_MODEL_ID,
|
||||
OPENAI_CODEX_GPT_55_PRO_MODEL_ID,
|
||||
OPENAI_CODEX_GPT_54_MODEL_ID,
|
||||
OPENAI_CODEX_GPT_54_PRO_MODEL_ID,
|
||||
OPENAI_CODEX_GPT_54_MINI_MODEL_ID,
|
||||
@@ -96,6 +119,8 @@ const OPENAI_CODEX_XHIGH_MODEL_IDS = [
|
||||
"gpt-5.1-codex",
|
||||
] as const;
|
||||
const OPENAI_CODEX_MODERN_MODEL_IDS = [
|
||||
OPENAI_CODEX_GPT_55_MODEL_ID,
|
||||
OPENAI_CODEX_GPT_55_PRO_MODEL_ID,
|
||||
OPENAI_CODEX_GPT_54_MODEL_ID,
|
||||
OPENAI_CODEX_GPT_54_PRO_MODEL_ID,
|
||||
OPENAI_CODEX_GPT_54_MINI_MODEL_ID,
|
||||
@@ -169,7 +194,26 @@ function resolveCodexForwardCompatModel(ctx: ProviderResolveDynamicModelContext)
|
||||
|
||||
let templateIds: readonly string[];
|
||||
let patch: Parameters<typeof cloneFirstTemplateModel>[0]["patch"];
|
||||
if (lower === OPENAI_CODEX_GPT_54_MODEL_ID || lower === OPENAI_CODEX_GPT_54_LEGACY_MODEL_ID) {
|
||||
if (lower === OPENAI_CODEX_GPT_55_MODEL_ID) {
|
||||
templateIds = OPENAI_CODEX_GPT_55_TEMPLATE_MODEL_IDS;
|
||||
patch = {
|
||||
contextWindow: OPENAI_CODEX_GPT_55_NATIVE_CONTEXT_TOKENS,
|
||||
contextTokens: OPENAI_CODEX_GPT_55_DEFAULT_CONTEXT_TOKENS,
|
||||
maxTokens: OPENAI_CODEX_GPT_54_MAX_TOKENS,
|
||||
cost: OPENAI_CODEX_GPT_55_COST,
|
||||
};
|
||||
} else if (lower === OPENAI_CODEX_GPT_55_PRO_MODEL_ID) {
|
||||
templateIds = OPENAI_CODEX_GPT_55_TEMPLATE_MODEL_IDS;
|
||||
patch = {
|
||||
contextWindow: OPENAI_CODEX_GPT_55_NATIVE_CONTEXT_TOKENS,
|
||||
contextTokens: OPENAI_CODEX_GPT_55_DEFAULT_CONTEXT_TOKENS,
|
||||
maxTokens: OPENAI_CODEX_GPT_54_MAX_TOKENS,
|
||||
cost: OPENAI_CODEX_GPT_55_PRO_COST,
|
||||
};
|
||||
} else if (
|
||||
lower === OPENAI_CODEX_GPT_54_MODEL_ID ||
|
||||
lower === OPENAI_CODEX_GPT_54_LEGACY_MODEL_ID
|
||||
) {
|
||||
templateIds = OPENAI_CODEX_GPT_54_CATALOG_SYNTH_TEMPLATE_MODEL_IDS;
|
||||
patch = {
|
||||
contextWindow: OPENAI_CODEX_GPT_54_NATIVE_CONTEXT_TOKENS,
|
||||
@@ -476,6 +520,11 @@ export function buildOpenAICodexProviderPlugin(): ProviderPlugin {
|
||||
providerId: PROVIDER_ID,
|
||||
templateIds: OPENAI_CODEX_GPT_54_CATALOG_SYNTH_TEMPLATE_MODEL_IDS,
|
||||
});
|
||||
const gpt55Template = findCatalogTemplate({
|
||||
entries: ctx.entries,
|
||||
providerId: PROVIDER_ID,
|
||||
templateIds: OPENAI_CODEX_GPT_55_TEMPLATE_MODEL_IDS,
|
||||
});
|
||||
const gpt54MiniTemplate = findCatalogTemplate({
|
||||
entries: ctx.entries,
|
||||
providerId: PROVIDER_ID,
|
||||
@@ -487,6 +536,22 @@ export function buildOpenAICodexProviderPlugin(): ProviderPlugin {
|
||||
templateIds: [OPENAI_CODEX_GPT_53_MODEL_ID, ...OPENAI_CODEX_TEMPLATE_MODEL_IDS],
|
||||
});
|
||||
return [
|
||||
buildOpenAISyntheticCatalogEntry(gpt55Template, {
|
||||
id: OPENAI_CODEX_GPT_55_MODEL_ID,
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
contextWindow: OPENAI_CODEX_GPT_55_NATIVE_CONTEXT_TOKENS,
|
||||
contextTokens: OPENAI_CODEX_GPT_55_DEFAULT_CONTEXT_TOKENS,
|
||||
cost: OPENAI_CODEX_GPT_55_COST,
|
||||
}),
|
||||
buildOpenAISyntheticCatalogEntry(gpt55Template, {
|
||||
id: OPENAI_CODEX_GPT_55_PRO_MODEL_ID,
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
contextWindow: OPENAI_CODEX_GPT_55_NATIVE_CONTEXT_TOKENS,
|
||||
contextTokens: OPENAI_CODEX_GPT_55_DEFAULT_CONTEXT_TOKENS,
|
||||
cost: OPENAI_CODEX_GPT_55_PRO_COST,
|
||||
}),
|
||||
buildOpenAISyntheticCatalogEntry(gpt54Template, {
|
||||
id: OPENAI_CODEX_GPT_54_MODEL_ID,
|
||||
reasoning: true,
|
||||
|
||||
@@ -22,15 +22,21 @@ import {
|
||||
} from "./shared.js";
|
||||
|
||||
const PROVIDER_ID = "openai";
|
||||
const OPENAI_GPT_55_MODEL_ID = "gpt-5.5";
|
||||
const OPENAI_GPT_55_PRO_MODEL_ID = "gpt-5.5-pro";
|
||||
const OPENAI_GPT_54_MODEL_ID = "gpt-5.4";
|
||||
const OPENAI_GPT_54_PRO_MODEL_ID = "gpt-5.4-pro";
|
||||
const OPENAI_GPT_54_MINI_MODEL_ID = "gpt-5.4-mini";
|
||||
const OPENAI_GPT_54_NANO_MODEL_ID = "gpt-5.4-nano";
|
||||
const OPENAI_GPT_55_CONTEXT_TOKENS = 1_000_000;
|
||||
const OPENAI_GPT_55_PRO_CONTEXT_TOKENS = 1_000_000;
|
||||
const OPENAI_GPT_54_CONTEXT_TOKENS = 1_050_000;
|
||||
const OPENAI_GPT_54_PRO_CONTEXT_TOKENS = 1_050_000;
|
||||
const OPENAI_GPT_54_MINI_CONTEXT_TOKENS = 400_000;
|
||||
const OPENAI_GPT_54_NANO_CONTEXT_TOKENS = 400_000;
|
||||
const OPENAI_GPT_54_MAX_TOKENS = 128_000;
|
||||
const OPENAI_GPT_55_COST = { input: 5, output: 30, cacheRead: 0, cacheWrite: 0 } as const;
|
||||
const OPENAI_GPT_55_PRO_COST = { input: 30, output: 180, cacheRead: 0, cacheWrite: 0 } as const;
|
||||
const OPENAI_GPT_54_COST = { input: 2.5, output: 15, cacheRead: 0.25, cacheWrite: 0 } as const;
|
||||
const OPENAI_GPT_54_PRO_COST = { input: 30, output: 180, cacheRead: 0, cacheWrite: 0 } as const;
|
||||
const OPENAI_GPT_54_MINI_COST = {
|
||||
@@ -45,11 +51,20 @@ const OPENAI_GPT_54_NANO_COST = {
|
||||
cacheRead: 0.02,
|
||||
cacheWrite: 0,
|
||||
} as const;
|
||||
const OPENAI_GPT_55_TEMPLATE_MODEL_IDS = [OPENAI_GPT_54_MODEL_ID, "gpt-5.2"] as const;
|
||||
const OPENAI_GPT_55_PRO_TEMPLATE_MODEL_IDS = [
|
||||
OPENAI_GPT_54_PRO_MODEL_ID,
|
||||
OPENAI_GPT_54_MODEL_ID,
|
||||
"gpt-5.2-pro",
|
||||
"gpt-5.2",
|
||||
] as const;
|
||||
const OPENAI_GPT_54_TEMPLATE_MODEL_IDS = ["gpt-5.2"] as const;
|
||||
const OPENAI_GPT_54_PRO_TEMPLATE_MODEL_IDS = ["gpt-5.2-pro", "gpt-5.2"] as const;
|
||||
const OPENAI_GPT_54_MINI_TEMPLATE_MODEL_IDS = ["gpt-5-mini"] as const;
|
||||
const OPENAI_GPT_54_NANO_TEMPLATE_MODEL_IDS = ["gpt-5-nano", "gpt-5-mini"] as const;
|
||||
const OPENAI_XHIGH_MODEL_IDS = [
|
||||
"gpt-5.5",
|
||||
"gpt-5.5-pro",
|
||||
"gpt-5.4",
|
||||
"gpt-5.4-pro",
|
||||
"gpt-5.4-mini",
|
||||
@@ -57,6 +72,8 @@ const OPENAI_XHIGH_MODEL_IDS = [
|
||||
"gpt-5.2",
|
||||
] as const;
|
||||
const OPENAI_MODERN_MODEL_IDS = [
|
||||
"gpt-5.5",
|
||||
"gpt-5.5-pro",
|
||||
"gpt-5.4",
|
||||
"gpt-5.4-pro",
|
||||
"gpt-5.4-mini",
|
||||
@@ -97,14 +114,38 @@ function normalizeOpenAITransport(model: ProviderRuntimeModel): ProviderRuntimeM
|
||||
};
|
||||
}
|
||||
|
||||
function resolveOpenAIGpt54ForwardCompatModel(
|
||||
function resolveOpenAIGptForwardCompatModel(
|
||||
ctx: ProviderResolveDynamicModelContext,
|
||||
): ProviderRuntimeModel | undefined {
|
||||
const trimmedModelId = ctx.modelId.trim();
|
||||
const lower = normalizeLowercaseStringOrEmpty(trimmedModelId);
|
||||
let templateIds: readonly string[];
|
||||
let patch: Partial<ProviderRuntimeModel>;
|
||||
if (lower === OPENAI_GPT_54_MODEL_ID) {
|
||||
if (lower === OPENAI_GPT_55_MODEL_ID) {
|
||||
templateIds = OPENAI_GPT_55_TEMPLATE_MODEL_IDS;
|
||||
patch = {
|
||||
api: "openai-responses",
|
||||
provider: PROVIDER_ID,
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: OPENAI_GPT_55_COST,
|
||||
contextWindow: OPENAI_GPT_55_CONTEXT_TOKENS,
|
||||
maxTokens: OPENAI_GPT_54_MAX_TOKENS,
|
||||
};
|
||||
} else if (lower === OPENAI_GPT_55_PRO_MODEL_ID) {
|
||||
templateIds = OPENAI_GPT_55_PRO_TEMPLATE_MODEL_IDS;
|
||||
patch = {
|
||||
api: "openai-responses",
|
||||
provider: PROVIDER_ID,
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: OPENAI_GPT_55_PRO_COST,
|
||||
contextWindow: OPENAI_GPT_55_PRO_CONTEXT_TOKENS,
|
||||
maxTokens: OPENAI_GPT_54_MAX_TOKENS,
|
||||
};
|
||||
} else if (lower === OPENAI_GPT_54_MODEL_ID) {
|
||||
templateIds = OPENAI_GPT_54_TEMPLATE_MODEL_IDS;
|
||||
patch = {
|
||||
api: "openai-responses",
|
||||
@@ -202,7 +243,7 @@ export function buildOpenAIProvider(): ProviderPlugin {
|
||||
},
|
||||
}),
|
||||
],
|
||||
resolveDynamicModel: (ctx) => resolveOpenAIGpt54ForwardCompatModel(ctx),
|
||||
resolveDynamicModel: (ctx) => resolveOpenAIGptForwardCompatModel(ctx),
|
||||
normalizeResolvedModel: (ctx) => {
|
||||
if (normalizeProviderId(ctx.provider) !== PROVIDER_ID) {
|
||||
return undefined;
|
||||
@@ -234,7 +275,7 @@ export function buildOpenAIProvider(): ProviderPlugin {
|
||||
if (ctx.provider !== PROVIDER_ID || ctx.listProfileIds("openai-codex").length === 0) {
|
||||
return undefined;
|
||||
}
|
||||
return 'No API key found for provider "openai". You are authenticated with OpenAI Codex OAuth. Use openai-codex/gpt-5.4 (OAuth) or set OPENAI_API_KEY to use openai/gpt-5.4.';
|
||||
return 'No API key found for provider "openai". You are authenticated with OpenAI Codex OAuth. Use openai-codex/gpt-5.5 (OAuth) or set OPENAI_API_KEY to use openai/gpt-5.5.';
|
||||
},
|
||||
suppressBuiltInModel: (ctx) => {
|
||||
if (
|
||||
@@ -249,6 +290,16 @@ export function buildOpenAIProvider(): ProviderPlugin {
|
||||
};
|
||||
},
|
||||
augmentModelCatalog: (ctx) => {
|
||||
const openAiGpt55Template = findCatalogTemplate({
|
||||
entries: ctx.entries,
|
||||
providerId: PROVIDER_ID,
|
||||
templateIds: OPENAI_GPT_55_TEMPLATE_MODEL_IDS,
|
||||
});
|
||||
const openAiGpt55ProTemplate = findCatalogTemplate({
|
||||
entries: ctx.entries,
|
||||
providerId: PROVIDER_ID,
|
||||
templateIds: OPENAI_GPT_55_PRO_TEMPLATE_MODEL_IDS,
|
||||
});
|
||||
const openAiGpt54Template = findCatalogTemplate({
|
||||
entries: ctx.entries,
|
||||
providerId: PROVIDER_ID,
|
||||
@@ -270,6 +321,18 @@ export function buildOpenAIProvider(): ProviderPlugin {
|
||||
templateIds: OPENAI_GPT_54_NANO_TEMPLATE_MODEL_IDS,
|
||||
});
|
||||
return [
|
||||
buildOpenAISyntheticCatalogEntry(openAiGpt55Template, {
|
||||
id: OPENAI_GPT_55_MODEL_ID,
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
contextWindow: OPENAI_GPT_55_CONTEXT_TOKENS,
|
||||
}),
|
||||
buildOpenAISyntheticCatalogEntry(openAiGpt55ProTemplate, {
|
||||
id: OPENAI_GPT_55_PRO_MODEL_ID,
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
contextWindow: OPENAI_GPT_55_PRO_CONTEXT_TOKENS,
|
||||
}),
|
||||
buildOpenAISyntheticCatalogEntry(openAiGpt54Template, {
|
||||
id: OPENAI_GPT_54_MODEL_ID,
|
||||
reasoning: true,
|
||||
|
||||
@@ -72,7 +72,7 @@
|
||||
"openai-codex": {
|
||||
"capabilities": ["image"],
|
||||
"defaultModels": {
|
||||
"image": "gpt-5.4"
|
||||
"image": "gpt-5.5"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
export const QA_FRONTIER_PROVIDER_IDS = ["anthropic", "google", "openai"] as const;
|
||||
export const QA_FRONTIER_CATALOG_PRIMARY_MODEL = "openai/gpt-5.4";
|
||||
export const QA_FRONTIER_CATALOG_PRIMARY_MODEL = "openai/gpt-5.5";
|
||||
export const QA_FRONTIER_CATALOG_ALTERNATE_MODEL = "anthropic/claude-sonnet-4-6";
|
||||
|
||||
export function isPreferredQaLiveFrontierCatalogModel(modelRef: string) {
|
||||
|
||||
@@ -6,7 +6,7 @@ type QaFrontierCharacterModelOptions = {
|
||||
};
|
||||
|
||||
export const QA_FRONTIER_CHARACTER_EVAL_MODELS = Object.freeze([
|
||||
"openai/gpt-5.4",
|
||||
"openai/gpt-5.5",
|
||||
"openai/gpt-5.2",
|
||||
"openai/gpt-5",
|
||||
"anthropic/claude-opus-4-6",
|
||||
@@ -18,19 +18,19 @@ export const QA_FRONTIER_CHARACTER_EVAL_MODELS = Object.freeze([
|
||||
|
||||
export const QA_FRONTIER_CHARACTER_THINKING_BY_MODEL: Readonly<Record<string, QaThinkingLevel>> =
|
||||
Object.freeze({
|
||||
"openai/gpt-5.4": "xhigh",
|
||||
"openai/gpt-5.5": "xhigh",
|
||||
"openai/gpt-5.2": "xhigh",
|
||||
"openai/gpt-5": "xhigh",
|
||||
});
|
||||
|
||||
export const QA_FRONTIER_CHARACTER_JUDGE_MODELS = Object.freeze([
|
||||
"openai/gpt-5.4",
|
||||
"openai/gpt-5.5",
|
||||
"anthropic/claude-opus-4-6",
|
||||
]);
|
||||
|
||||
export const QA_FRONTIER_CHARACTER_JUDGE_MODEL_OPTIONS: Readonly<
|
||||
Record<string, QaFrontierCharacterModelOptions>
|
||||
> = Object.freeze({
|
||||
"openai/gpt-5.4": { thinkingDefault: "xhigh" },
|
||||
"openai/gpt-5.5": { thinkingDefault: "xhigh" },
|
||||
"anthropic/claude-opus-4-6": { thinkingDefault: "high" },
|
||||
});
|
||||
|
||||
@@ -23,7 +23,7 @@ function isClaudeOpusModel(modelRef: string) {
|
||||
export const liveFrontierProviderDefinition: QaProviderDefinition = {
|
||||
mode: "live-frontier",
|
||||
kind: "live",
|
||||
defaultModel: (options) => options?.preferredLiveModel ?? "openai/gpt-5.4",
|
||||
defaultModel: (options) => options?.preferredLiveModel ?? "openai/gpt-5.5",
|
||||
defaultImageGenerationProviderIds: ["openai"],
|
||||
defaultImageGenerationModel: ({ modelProviderIds }) =>
|
||||
modelProviderIds.includes("openai") ? "openai/gpt-image-1" : null,
|
||||
|
||||
@@ -4,7 +4,7 @@ import {
|
||||
} from "openclaw/plugin-sdk/agent-runtime";
|
||||
import { resolveEnvApiKey } from "openclaw/plugin-sdk/provider-auth";
|
||||
|
||||
const QA_CODEX_OAUTH_LIVE_MODEL = "openai-codex/gpt-5.4";
|
||||
const QA_CODEX_OAUTH_LIVE_MODEL = "openai-codex/gpt-5.5";
|
||||
|
||||
export function resolveQaLiveFrontierPreferredModel() {
|
||||
if (resolveEnvApiKey("openai")?.apiKey) {
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
export const QA_FRONTIER_PARITY_CANDIDATE_LABEL = "openai/gpt-5.4";
|
||||
export const QA_FRONTIER_PARITY_CANDIDATE_LABEL = "openai/gpt-5.5";
|
||||
export const QA_FRONTIER_PARITY_BASELINE_LABEL = "anthropic/claude-opus-4-6";
|
||||
|
||||
@@ -1437,16 +1437,16 @@
|
||||
"test:docker:live-cli-backend": "bash scripts/test-live-cli-backend-docker.sh",
|
||||
"test:docker:live-cli-backend:claude": "OPENCLAW_LIVE_CLI_BACKEND_MODEL=claude-cli/claude-sonnet-4-6 bash scripts/test-live-cli-backend-docker.sh",
|
||||
"test:docker:live-cli-backend:claude-subscription": "OPENCLAW_LIVE_CLI_BACKEND_AUTH=subscription OPENCLAW_LIVE_CLI_BACKEND_MODEL=claude-cli/claude-sonnet-4-6 OPENCLAW_LIVE_CLI_BACKEND_DISABLE_MCP_CONFIG=1 OPENCLAW_LIVE_CLI_BACKEND_MODEL_SWITCH_PROBE=0 OPENCLAW_LIVE_CLI_BACKEND_RESUME_PROBE=1 OPENCLAW_LIVE_CLI_BACKEND_IMAGE_PROBE=0 OPENCLAW_LIVE_CLI_BACKEND_MCP_PROBE=0 bash scripts/test-live-cli-backend-docker.sh",
|
||||
"test:docker:live-cli-backend:codex": "OPENCLAW_LIVE_CLI_BACKEND_MODEL=codex-cli/gpt-5.4 bash scripts/test-live-cli-backend-docker.sh",
|
||||
"test:docker:live-cli-backend:codex": "OPENCLAW_LIVE_CLI_BACKEND_MODEL=codex-cli/gpt-5.5 bash scripts/test-live-cli-backend-docker.sh",
|
||||
"test:docker:live-cli-backend:gemini": "OPENCLAW_LIVE_CLI_BACKEND_MODEL=google-gemini-cli/gemini-3-flash-preview bash scripts/test-live-cli-backend-docker.sh",
|
||||
"test:docker:live-codex-harness": "bash scripts/test-live-codex-harness-docker.sh",
|
||||
"test:docker:live-gateway": "bash scripts/test-live-gateway-models-docker.sh",
|
||||
"test:docker:live-gateway:claude": "OPENCLAW_LIVE_GATEWAY_PROVIDERS=claude-cli OPENCLAW_LIVE_GATEWAY_MODELS=claude-cli/claude-sonnet-4-6 bash scripts/test-live-gateway-models-docker.sh",
|
||||
"test:docker:live-gateway:codex": "OPENCLAW_LIVE_GATEWAY_PROVIDERS=codex-cli OPENCLAW_LIVE_GATEWAY_MODELS=codex-cli/gpt-5.4 bash scripts/test-live-gateway-models-docker.sh",
|
||||
"test:docker:live-gateway:codex": "OPENCLAW_LIVE_GATEWAY_PROVIDERS=codex-cli OPENCLAW_LIVE_GATEWAY_MODELS=codex-cli/gpt-5.5 bash scripts/test-live-gateway-models-docker.sh",
|
||||
"test:docker:live-gateway:gemini": "OPENCLAW_LIVE_GATEWAY_PROVIDERS=google-gemini-cli OPENCLAW_LIVE_GATEWAY_MODELS=google-gemini-cli/gemini-3.1-pro-preview bash scripts/test-live-gateway-models-docker.sh",
|
||||
"test:docker:live-models": "bash scripts/test-live-models-docker.sh",
|
||||
"test:docker:live-models:claude": "OPENCLAW_LIVE_PROVIDERS=claude-cli OPENCLAW_LIVE_MODELS=claude-cli/claude-sonnet-4-6 bash scripts/test-live-models-docker.sh",
|
||||
"test:docker:live-models:codex": "OPENCLAW_LIVE_PROVIDERS=codex-cli OPENCLAW_LIVE_MODELS=codex-cli/gpt-5.4 bash scripts/test-live-models-docker.sh",
|
||||
"test:docker:live-models:codex": "OPENCLAW_LIVE_PROVIDERS=codex-cli OPENCLAW_LIVE_MODELS=codex-cli/gpt-5.5 bash scripts/test-live-models-docker.sh",
|
||||
"test:docker:live-models:gemini": "OPENCLAW_LIVE_PROVIDERS=google-gemini-cli OPENCLAW_LIVE_MODELS=google-gemini-cli/gemini-3.1-pro-preview bash scripts/test-live-models-docker.sh",
|
||||
"test:docker:mcp-channels": "bash scripts/e2e/mcp-channels-docker.sh",
|
||||
"test:docker:npm-onboard-channel-agent": "bash scripts/e2e/npm-onboard-channel-agent-docker.sh",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Defaults for agent metadata when upstream does not supply them.
|
||||
// Keep this aligned with the product-level latest-model baseline.
|
||||
export const DEFAULT_PROVIDER = "openai";
|
||||
export const DEFAULT_MODEL = "gpt-5.4";
|
||||
export const DEFAULT_MODEL = "gpt-5.5";
|
||||
// Conservative fallback used when model metadata is unavailable.
|
||||
export const DEFAULT_CONTEXT_TOKENS = 200_000;
|
||||
|
||||
@@ -410,7 +410,7 @@ export async function runLiveCacheRegression(): Promise<LiveCacheRegressionResul
|
||||
provider: "openai",
|
||||
api: "openai-responses",
|
||||
envVar: "OPENCLAW_LIVE_OPENAI_CACHE_MODEL",
|
||||
preferredModelIds: ["gpt-5.4-mini", "gpt-5.4", "gpt-5.2"],
|
||||
preferredModelIds: ["gpt-5.5", "gpt-5.4-mini", "gpt-5.4", "gpt-5.2"],
|
||||
});
|
||||
const anthropic = await resolveLiveDirectModel({
|
||||
provider: "anthropic",
|
||||
|
||||
@@ -356,7 +356,7 @@ function buildMissingApiKeyFailureText(message: string): string | null {
|
||||
return null;
|
||||
}
|
||||
if (provider === "openai" && normalizedMessage.includes("OpenAI Codex OAuth")) {
|
||||
return "⚠️ Missing API key for OpenAI on the gateway. Use `openai-codex/gpt-5.4` for OAuth, or set `OPENAI_API_KEY`, then try again.";
|
||||
return "⚠️ Missing API key for OpenAI on the gateway. Use `openai-codex/gpt-5.5` for OAuth, or set `OPENAI_API_KEY`, then try again.";
|
||||
}
|
||||
if (SAFE_MISSING_API_KEY_PROVIDERS.has(provider)) {
|
||||
return `⚠️ Missing API key for provider "${provider}". Configure the gateway auth for that provider, then try again.`;
|
||||
|
||||
@@ -6,8 +6,8 @@ export {
|
||||
import { ensureModelAllowlistEntry } from "./provider-model-allowlist.js";
|
||||
import { applyAgentDefaultPrimaryModel } from "./provider-model-primary.js";
|
||||
|
||||
export const OPENAI_DEFAULT_MODEL = "openai/gpt-5.4";
|
||||
export const OPENAI_CODEX_DEFAULT_MODEL = "openai-codex/gpt-5.4";
|
||||
export const OPENAI_DEFAULT_MODEL = "openai/gpt-5.5";
|
||||
export const OPENAI_CODEX_DEFAULT_MODEL = "openai-codex/gpt-5.5";
|
||||
export const OPENAI_DEFAULT_IMAGE_MODEL = "gpt-image-2";
|
||||
export const OPENAI_DEFAULT_TTS_MODEL = "gpt-4o-mini-tts";
|
||||
export const OPENAI_DEFAULT_TTS_VOICE = "alloy";
|
||||
|
||||
Reference in New Issue
Block a user