test: update QA parity fixtures for GPT-5.5

This commit is contained in:
Peter Steinberger
2026-04-25 18:05:13 +01:00
parent 39343088ed
commit 6b3e4b88d6
59 changed files with 407 additions and 399 deletions

View File

@@ -58,7 +58,7 @@ type TranslationBatchItem = {
};
const CONTROL_UI_I18N_WORKFLOW = 1;
const DEFAULT_OPENAI_MODEL = "gpt-5.4";
const DEFAULT_OPENAI_MODEL = "gpt-5.5";
const DEFAULT_ANTHROPIC_MODEL = "claude-opus-4-6";
const DEFAULT_PROVIDER = "openai";
const DEFAULT_PI_PACKAGE_VERSION = "0.58.3";

View File

@@ -501,7 +501,7 @@ run_profile() {
local image_model
if [[ "$agent_model_provider" == "openai" ]]; then
agent_model="$(set_agent_model "$profile" \
"openai/gpt-5.4" \
"openai/gpt-5.5" \
"openai/gpt-4o-mini" \
"openai/gpt-4o")"
image_model="$(set_image_model "$profile" \

View File

@@ -183,13 +183,13 @@ func TestResolveDocsPiCommandUsesOverrideEnv(t *testing.T) {
func TestDocsPiModelRefUsesProviderPrefixWhenProviderFlagIsOmitted(t *testing.T) {
t.Setenv(envDocsI18nProvider, "openai")
t.Setenv(envDocsI18nModel, "gpt-5.4")
t.Setenv(envDocsI18nModel, "gpt-5.5")
t.Setenv(envDocsPiOmitProvider, "1")
if got := docsPiProviderArg(); got != "" {
t.Fatalf("expected empty provider arg when omit-provider is enabled, got %q", got)
}
if got := docsPiModelRef(); got != "openai/gpt-5.4" {
if got := docsPiModelRef(); got != "openai/gpt-5.5" {
t.Fatalf("expected provider-qualified model ref, got %q", got)
}
}

View File

@@ -14,7 +14,7 @@ const (
docsI18nEngineName = "pi"
envDocsI18nProvider = "OPENCLAW_DOCS_I18N_PROVIDER"
envDocsI18nModel = "OPENCLAW_DOCS_I18N_MODEL"
defaultOpenAIModel = "gpt-5.4"
defaultOpenAIModel = "gpt-5.5"
defaultAnthropicModel = "claude-opus-4-6"
defaultFallbackProvider = "openai"
defaultFallbackModelName = defaultOpenAIModel

View File

@@ -6,12 +6,12 @@ import {
export type { OpenClawConfig };
const DOCKER_OPENAI_MODEL_REF = "openai/gpt-5.4";
const DOCKER_OPENAI_MODEL_REF = "openai/gpt-5.5";
const DOCKER_OPENAI_BASE_URL =
process.env.OPENCLAW_DOCKER_OPENAI_BASE_URL?.trim() || "http://127.0.0.1:9/v1";
const DOCKER_OPENAI_MODEL: ModelDefinitionConfig = {
id: "gpt-5.4",
name: "gpt-5.4",
id: "gpt-5.5",
name: "gpt-5.5",
api: "openai-responses",
reasoning: true,
input: ["text", "image"],

View File

@@ -111,7 +111,7 @@ const server = http.createServer(async (req, res) => {
if (req.method === "GET" && url.pathname === "/v1/models") {
writeJson(res, 200, {
object: "list",
data: [{ id: "gpt-5.4", object: "model", owned_by: "openclaw-e2e" }],
data: [{ id: "gpt-5.5", object: "model", owned_by: "openclaw-e2e" }],
});
return;
}

View File

@@ -206,7 +206,7 @@ const path = require("node:path");
const mockPort = Number(process.argv[2]);
const configPath = path.join(process.env.HOME, ".openclaw", "openclaw.json");
const cfg = JSON.parse(fs.readFileSync(configPath, "utf8"));
const modelRef = "openai/gpt-5.4";
const modelRef = "openai/gpt-5.5";
const cost = { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 };
cfg.models = {
@@ -222,8 +222,8 @@ cfg.models = {
request: { ...(cfg.models?.providers?.openai?.request || {}), allowPrivateNetwork: true },
models: [
{
id: "gpt-5.4",
name: "gpt-5.4",
id: "gpt-5.5",
name: "gpt-5.5",
api: "openai-responses",
reasoning: false,
input: ["text", "image"],

View File

@@ -200,7 +200,7 @@ case "$PROVIDER" in
openai)
AUTH_CHOICE="openai-api-key"
AUTH_KEY_FLAG="openai-api-key"
MODEL_ID="openai/gpt-5.4"
MODEL_ID="openai/gpt-5.5"
[[ -n "$API_KEY_ENV" ]] || API_KEY_ENV="OPENAI_API_KEY"
;;
anthropic)

View File

@@ -258,7 +258,7 @@ case "$PROVIDER" in
openai)
AUTH_CHOICE="openai-api-key"
AUTH_KEY_FLAG="openai-api-key"
MODEL_ID="openai/gpt-5.4"
MODEL_ID="openai/gpt-5.5"
[[ -n "$API_KEY_ENV" ]] || API_KEY_ENV="OPENAI_API_KEY"
;;
anthropic)

View File

@@ -206,7 +206,7 @@ case "$PROVIDER" in
openai)
AUTH_CHOICE="openai-api-key"
AUTH_KEY_FLAG="openai-api-key"
MODEL_ID="openai/gpt-5.4"
MODEL_ID="openai/gpt-5.5"
[[ -n "$API_KEY_ENV" ]] || API_KEY_ENV="OPENAI_API_KEY"
;;
anthropic)

View File

@@ -249,7 +249,7 @@ case "$PROVIDER" in
openai)
AUTH_CHOICE="openai-api-key"
AUTH_KEY_FLAG="openai-api-key"
MODEL_ID="openai/gpt-5.4"
MODEL_ID="openai/gpt-5.5"
[[ -n "$API_KEY_ENV" ]] || API_KEY_ENV="OPENAI_API_KEY"
;;
anthropic)

View File

@@ -35,7 +35,7 @@ const providerConfig = {
extensionId: "openai",
secretEnv: "OPENAI_API_KEY",
authChoice: "openai-api-key",
model: "openai/gpt-5.4",
model: "openai/gpt-5.5",
},
anthropic: {
extensionId: "anthropic",

View File

@@ -200,7 +200,7 @@ openclaw_live_codex_harness_append_build_extension codex
"$ROOT_DIR/scripts/test-live-build-docker.sh"
echo "==> Run Codex harness live test in Docker"
echo "==> Model: ${OPENCLAW_LIVE_CODEX_HARNESS_MODEL:-codex/gpt-5.4}"
echo "==> Model: ${OPENCLAW_LIVE_CODEX_HARNESS_MODEL:-codex/gpt-5.5}"
echo "==> Image probe: ${OPENCLAW_LIVE_CODEX_HARNESS_IMAGE_PROBE:-1}"
echo "==> MCP probe: ${OPENCLAW_LIVE_CODEX_HARNESS_MCP_PROBE:-1}"
echo "==> Guardian probe: ${OPENCLAW_LIVE_CODEX_HARNESS_GUARDIAN_PROBE:-1}"
@@ -227,7 +227,7 @@ DOCKER_RUN_ARGS=(docker run --rm -t \
-e OPENCLAW_LIVE_CODEX_HARNESS_GUARDIAN_PROBE="${OPENCLAW_LIVE_CODEX_HARNESS_GUARDIAN_PROBE:-1}" \
-e OPENCLAW_LIVE_CODEX_HARNESS_IMAGE_PROBE="${OPENCLAW_LIVE_CODEX_HARNESS_IMAGE_PROBE:-1}" \
-e OPENCLAW_LIVE_CODEX_HARNESS_MCP_PROBE="${OPENCLAW_LIVE_CODEX_HARNESS_MCP_PROBE:-1}" \
-e OPENCLAW_LIVE_CODEX_HARNESS_MODEL="${OPENCLAW_LIVE_CODEX_HARNESS_MODEL:-codex/gpt-5.4}" \
-e OPENCLAW_LIVE_CODEX_HARNESS_MODEL="${OPENCLAW_LIVE_CODEX_HARNESS_MODEL:-codex/gpt-5.5}" \
-e OPENCLAW_LIVE_CODEX_HARNESS_REQUIRE_GUARDIAN_EVENTS="${OPENCLAW_LIVE_CODEX_HARNESS_REQUIRE_GUARDIAN_EVENTS:-1}" \
-e OPENCLAW_LIVE_CODEX_HARNESS_REQUEST_TIMEOUT_MS="${OPENCLAW_LIVE_CODEX_HARNESS_REQUEST_TIMEOUT_MS:-}" \
-e OPENCLAW_LIVE_CODEX_HARNESS_USE_CI_SAFE_CODEX_CONFIG="${OPENCLAW_LIVE_CODEX_HARNESS_USE_CI_SAFE_CODEX_CONFIG:-1}" \