fix: align Codex Responses instructions payload

This commit is contained in:
Peter Steinberger
2026-04-25 09:30:08 +01:00
parent bf0221c5b3
commit bc21f500d4
3 changed files with 68 additions and 1 deletions

View File

@@ -18,6 +18,8 @@ Docs: https://docs.openclaw.ai
- Browser/Linux: detect Chromium-based installs under `/opt/google`, `/opt/brave.com`, `/usr/lib/chromium`, and `/usr/lib/chromium-browser` before asking users to set `browser.executablePath`. (#48563) Thanks @lupuletic.
- Sessions/browser: close tracked browser tabs when idle, daily, `/new`, or `/reset` session rollover archives the previous transcript, preventing tabs from leaking past the old session. Thanks @jakozloski.
- Sessions/forking: fall back to transcript-estimated parent token counts when cached totals are stale or missing, so oversized thread forks start fresh instead of cloning the full parent transcript. Thanks @jalehman.
- OpenAI/Codex: send Codex Responses system prompts through top-level
`instructions` while preserving the existing native Codex payload controls.
- MCP/CLI: retire bundled MCP runtimes at the end of one-shot `openclaw agent` and `openclaw infer model run` gateway/local executions, so repeated scripted runs do not accumulate stdio MCP child processes. Fixes #71457.
- OpenAI/Codex image generation: canonicalize legacy `openai-codex.baseUrl` values such as `https://chatgpt.com/backend-api` to the Codex Responses backend before calling `gpt-image-2`, matching the chat transport. Fixes #71460.
- Control UI: make `/usage` use the fresh context snapshot for context percentage, and include cache-write tokens in the Usage overview cache-hit denominator. Fixes #47885. Thanks @imwyvern and @Ante042.

View File

@@ -709,6 +709,57 @@ describe("openai transport stream", () => {
expect(params.input?.[0]).toMatchObject({ role: "developer" });
});
it("uses top-level instructions for Codex responses without dropping parity fields", () => {
const params = buildOpenAIResponsesParams(
{
id: "gpt-5.4",
name: "GPT-5.4",
api: "openai-codex-responses",
provider: "openai-codex",
baseUrl: "https://chatgpt.com/backend-api",
reasoning: true,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 200000,
maxTokens: 8192,
} satisfies Model<"openai-codex-responses">,
{
systemPrompt: `Stable prefix${SYSTEM_PROMPT_CACHE_BOUNDARY}Dynamic suffix`,
messages: [{ role: "user", content: "Hello", timestamp: 1 }],
tools: [],
} as never,
{
cacheRetention: "long",
maxTokens: 1024,
serviceTier: "auto",
sessionId: "session-123",
temperature: 0.2,
},
{
openclaw_session_id: "session-123",
openclaw_turn_id: "turn-123",
},
) as Record<string, unknown> & {
input?: Array<{ role?: string }>;
instructions?: string;
};
expect(params.instructions).toBe("Stable prefix\nDynamic suffix");
expect(params.input?.some((item) => item.role === "system" || item.role === "developer")).toBe(
false,
);
expect(params.prompt_cache_key).toBe("session-123");
expect(params.prompt_cache_retention).toBeUndefined();
expect(params.metadata).toEqual({
openclaw_session_id: "session-123",
openclaw_turn_id: "turn-123",
});
expect(params.store).toBe(false);
expect(params.max_output_tokens).toBe(1024);
expect(params.temperature).toBe(0.2);
expect(params.service_tier).toBe("auto");
});
it("does not infer high reasoning when Pi passes thinking off", () => {
const params = buildOpenAIResponsesParams(
{

View File

@@ -828,12 +828,24 @@ function raiseMinimalReasoningForResponsesWebSearch(params: {
return params.effort;
}
function isOpenAICodexResponsesModel(model: Model<Api>): boolean {
return model.provider === "openai-codex" && model.api === "openai-codex-responses";
}
function buildOpenAICodexResponsesInstructions(context: Context): string | undefined {
if (!context.systemPrompt) {
return undefined;
}
return sanitizeTransportPayloadText(stripSystemPromptCacheBoundary(context.systemPrompt));
}
export function buildOpenAIResponsesParams(
model: Model<Api>,
context: Context,
options: OpenAIResponsesOptions | undefined,
metadata?: Record<string, string>,
) {
const isCodexResponses = isOpenAICodexResponsesModel(model);
const compat = getCompat(model as OpenAIModeModel);
const supportsDeveloperRole =
typeof compat.supportsDeveloperRole === "boolean" ? compat.supportsDeveloperRole : undefined;
@@ -841,7 +853,7 @@ export function buildOpenAIResponsesParams(
model,
context,
new Set(["openai", "openai-codex", "opencode", "azure-openai-responses"]),
{ supportsDeveloperRole },
{ includeSystemPrompt: !isCodexResponses, supportsDeveloperRole },
);
const cacheRetention = resolveCacheRetention(options?.cacheRetention);
const payloadPolicy = resolveOpenAIResponsesPayloadPolicy(model, {
@@ -853,6 +865,7 @@ export function buildOpenAIResponsesParams(
stream: true,
prompt_cache_key: cacheRetention === "none" ? undefined : options?.sessionId,
prompt_cache_retention: getPromptCacheRetention(model.baseUrl, cacheRetention),
...(isCodexResponses ? { instructions: buildOpenAICodexResponsesInstructions(context) } : {}),
...(metadata ? { metadata } : {}),
};
if (options?.maxTokens) {
@@ -1566,6 +1579,7 @@ type OpenAIResponsesRequestParams = {
model: string;
input: ResponseInput;
stream: true;
instructions?: string;
prompt_cache_key?: string;
prompt_cache_retention?: "24h";
metadata?: Record<string, string>;