fix(agents): repair codex responses tool args (#75281)

This commit is contained in:
Peter Steinberger
2026-05-02 11:21:07 +01:00
parent edca8c721a
commit 68bfdb4d29
3 changed files with 93 additions and 56 deletions

View File

@@ -39,6 +39,7 @@ Docs: https://docs.openclaw.ai
- Model commands: clarify direct and inline `/model` acknowledgements for non-default selections as session-scoped. Thanks @addu2612.
- Doctor/gateway: stop warning that non-existent, unconfigured user-bin directories are required in the Gateway service PATH. Fixes #76017. Thanks @xiphis.
- TUI/chat: skip full provider model normalization during context-window warmup while preserving provider-owned context metadata, avoiding cold-start stalls with large model registries. Thanks @547895019.
- Agents: enable malformed tool-call argument repair for Codex and Azure OpenAI Responses transports while keeping generic OpenAI Responses paths out of the repair gate. Fixes #75154. Thanks @Nimraakram22.
- Memory Wiki: accept relative Markdown links that include the `.md` suffix during broken-wikilink validation, avoiding false positives for native render-mode links. Thanks @Kenneth8128.
- OpenAI Codex: show the device-pairing code in the interactive SSH/headless prompt while keeping the short-lived code out of persistent runtime logs. Fixes #74212. Thanks @da22le123.
- QA Lab: stop gateway children when the suite parent disappears, so interrupted local QA runs cannot leave hot orphaned gateways behind.

View File

@@ -83,63 +83,94 @@ describe("shouldRepairMalformedToolCallArguments", () => {
}),
).toBe(false);
});
it("does not enable the repair for direct OpenAI responses", () => {
expect(
shouldRepairMalformedToolCallArguments({
provider: "openai",
modelApi: "openai-responses",
}),
).toBe(false);
});
it("enables the repair for Codex and Azure Responses transports", () => {
expect(
shouldRepairMalformedToolCallArguments({
provider: "openai-codex",
modelApi: "openai-codex-responses",
}),
).toBe(true);
expect(
shouldRepairMalformedToolCallArguments({
provider: "azure-openai-responses",
modelApi: "azure-openai-responses",
}),
).toBe(true);
});
});
describe("openai-completions malformed tool-call argument repair", () => {
it("repairs fragmented OpenAI-compatible function-call args before tool execution", async () => {
const partialToolCall = { type: "functionCall", name: "read", arguments: {} };
const streamedToolCall = { type: "functionCall", name: "read", arguments: {} };
const endMessageToolCall = { type: "functionCall", name: "read", arguments: {} };
const finalToolCall = { type: "functionCall", name: "read", arguments: {} };
const partialMessage = { role: "assistant", content: [partialToolCall] };
const endMessage = { role: "assistant", content: [endMessageToolCall] };
const finalMessage = { role: "assistant", content: [finalToolCall] };
it.each([
["openai-completions", "sglang"],
["openai-codex-responses", "openai-codex"],
["azure-openai-responses", "azure-openai-responses"],
])(
"repairs fragmented %s function-call args before tool execution",
async (modelApi, provider) => {
const partialToolCall = { type: "functionCall", name: "read", arguments: {} };
const streamedToolCall = { type: "functionCall", name: "read", arguments: {} };
const endMessageToolCall = { type: "functionCall", name: "read", arguments: {} };
const finalToolCall = { type: "functionCall", name: "read", arguments: {} };
const partialMessage = { role: "assistant", content: [partialToolCall] };
const endMessage = { role: "assistant", content: [endMessageToolCall] };
const finalMessage = { role: "assistant", content: [finalToolCall] };
const stream = await invokeProviderStream({
provider: "sglang",
modelApi: "openai-completions",
baseFn: () =>
createFakeStream({
events: [
{
type: "toolcall_delta",
contentIndex: 0,
delta: ".functions.read:0 ",
partial: partialMessage,
},
{
type: "toolcall_delta",
contentIndex: 0,
delta: '{"path":"/tmp/report.txt"',
partial: partialMessage,
},
{
type: "toolcall_delta",
contentIndex: 0,
delta: "}x",
partial: partialMessage,
},
{
type: "toolcall_end",
contentIndex: 0,
toolCall: streamedToolCall,
partial: partialMessage,
message: endMessage,
},
],
resultMessage: finalMessage,
}),
});
const stream = await invokeProviderStream({
provider,
modelApi,
baseFn: () =>
createFakeStream({
events: [
{
type: "toolcall_delta",
contentIndex: 0,
delta: ".functions.read:0 ",
partial: partialMessage,
},
{
type: "toolcall_delta",
contentIndex: 0,
delta: '{"path":"/tmp/report.txt"',
partial: partialMessage,
},
{
type: "toolcall_delta",
contentIndex: 0,
delta: "}x",
partial: partialMessage,
},
{
type: "toolcall_end",
contentIndex: 0,
toolCall: streamedToolCall,
partial: partialMessage,
message: endMessage,
},
],
resultMessage: finalMessage,
}),
});
for await (const _item of stream) {
// drain
}
const result = await stream.result();
for await (const _item of stream) {
// drain
}
const result = await stream.result();
expect(partialToolCall.arguments).toEqual({ path: "/tmp/report.txt" });
expect(streamedToolCall.arguments).toEqual({ path: "/tmp/report.txt" });
expect(endMessageToolCall.arguments).toEqual({ path: "/tmp/report.txt" });
expect(finalToolCall.arguments).toEqual({ path: "/tmp/report.txt" });
expect(result).toBe(finalMessage);
});
expect(partialToolCall.arguments).toEqual({ path: "/tmp/report.txt" });
expect(streamedToolCall.arguments).toEqual({ path: "/tmp/report.txt" });
expect(endMessageToolCall.arguments).toEqual({ path: "/tmp/report.txt" });
expect(finalToolCall.arguments).toEqual({ path: "/tmp/report.txt" });
expect(result).toBe(finalMessage);
},
);
});

View File

@@ -18,6 +18,10 @@ const MAX_TOOLCALL_REPAIR_LEADING_CHARS = 96;
const MAX_TOOLCALL_REPAIR_TRAILING_CHARS = 3;
const TOOLCALL_REPAIR_ALLOWED_LEADING_RE = /^[a-z0-9\s"'`.:/_\\-]+$/i;
const TOOLCALL_REPAIR_ALLOWED_TRAILING_RE = /^[^\s{}[\]":,\\]{1,3}$/;
const TOOLCALL_REPAIR_RESPONSES_APIS = new Set([
"azure-openai-responses",
"openai-codex-responses",
]);
function shouldAttemptMalformedToolCallRepair(partialJson: string, delta: string): boolean {
if (/[}\]]/.test(delta)) {
@@ -298,10 +302,11 @@ export function shouldRepairMalformedToolCallArguments(params: {
provider?: string;
modelApi?: string | null;
}): boolean {
const modelApi = params.modelApi ?? "";
return (
(normalizeProviderId(params.provider ?? "") === "kimi" &&
params.modelApi === "anthropic-messages") ||
params.modelApi === "openai-completions"
(normalizeProviderId(params.provider ?? "") === "kimi" && modelApi === "anthropic-messages") ||
modelApi === "openai-completions" ||
TOOLCALL_REPAIR_RESPONSES_APIS.has(modelApi)
);
}