Pi Runner: gate parallel_tool_calls to compatible APIs (#39356)

* Pi Runner: gate parallel_tool_calls payload injection

* Pi Runner: cover parallel_tool_calls alias precedence

* Changelog: note parallel_tool_calls compatibility fix

* Update CHANGELOG.md

* Pi Runner: clarify null parallel_tool_calls override logging
This commit is contained in:
Vincent Koc
2026-03-07 20:57:53 -05:00
committed by GitHub
parent 2c7fb54956
commit daecd2d8c3
3 changed files with 315 additions and 3 deletions

View File

@@ -332,6 +332,7 @@ Docs: https://docs.openclaw.ai
- Discord/DM session-key normalization: rewrite legacy `discord:dm:*` and phantom direct-message `discord:channel:<user>` session keys to `discord:direct:*` when the sender matches, so multi-agent Discord DMs stop falling into empty channel-shaped sessions and resume replying correctly.
- Discord/native slash session fallback: treat empty configured bound-session keys as missing so `/status` and other native commands fall back to the routed slash session and routed channel session instead of blanking Discord session keys in normal channel bindings.
- Agents/tool-call dispatch normalization: normalize provider-prefixed tool names before dispatch across `toolCall`, `toolUse`, and `functionCall` blocks, while preserving multi-segment tool suffixes when stripping provider wrappers so malformed-but-recoverable tool names no longer fail with `Tool not found`. (#39328) Thanks @vincentkoc.
- Agents/parallel tool-call compatibility: honor `parallel_tool_calls` / `parallelToolCalls` extra params only for `openai-completions` and `openai-responses` payloads, preserve higher-precedence alias overrides across config and runtime layers, and ignore invalid non-boolean values so single-tool-call providers like NVIDIA-hosted Kimi stop failing on forced parallel tool-call payloads. (#37048) Thanks @vincentkoc.
- Config/invalid-load fail-closed: stop converting `INVALID_CONFIG` into an empty runtime config, keep valid settings available only through explicit best-effort diagnostic reads, and route read-only CLI diagnostics through that path so unknown keys no longer silently drop security-sensitive config. (#28140) Thanks @bobsahur-robot and @vincentkoc.
## 2026.3.2

View File

@@ -116,6 +116,39 @@ describe("resolveExtraParams", () => {
});
});
it("preserves higher-precedence agent parallelToolCalls override across alias styles", () => {
const result = resolveExtraParams({
cfg: {
agents: {
defaults: {
models: {
"openai/gpt-4.1": {
params: {
parallel_tool_calls: true,
},
},
},
},
list: [
{
id: "main",
params: {
parallelToolCalls: false,
},
},
],
},
},
provider: "openai",
modelId: "gpt-4.1",
agentId: "main",
});
expect(result).toEqual({
parallel_tool_calls: false,
});
});
it("ignores per-agent params when agentId does not match", () => {
const result = resolveExtraParams({
cfg: {
@@ -190,6 +223,32 @@ describe("applyExtraParamsToAgent", () => {
return payload;
}
function runParallelToolCallsPayloadMutationCase(params: {
applyProvider: string;
applyModelId: string;
model: Model<"openai-completions"> | Model<"openai-responses"> | Model<"anthropic-messages">;
cfg?: Record<string, unknown>;
extraParamsOverride?: Record<string, unknown>;
payload?: Record<string, unknown>;
}) {
const payload = params.payload ?? {};
const baseStreamFn: StreamFn = (_model, _context, options) => {
options?.onPayload?.(payload);
return {} as ReturnType<StreamFn>;
};
const agent = { streamFn: baseStreamFn };
applyExtraParamsToAgent(
agent,
params.cfg as Parameters<typeof applyExtraParamsToAgent>[1],
params.applyProvider,
params.applyModelId,
params.extraParamsOverride,
);
const context: Context = { messages: [] };
void agent.streamFn?.(params.model, context, {});
return payload;
}
function runAnthropicHeaderCase(params: {
cfg: Record<string, unknown>;
modelId: string;
@@ -350,6 +409,181 @@ describe("applyExtraParamsToAgent", () => {
expect(payloads[0]).not.toHaveProperty("reasoning_effort");
});
it("injects parallel_tool_calls for openai-completions payloads when configured", () => {
const payload = runParallelToolCallsPayloadMutationCase({
applyProvider: "nvidia-nim",
applyModelId: "moonshotai/kimi-k2.5",
cfg: {
agents: {
defaults: {
models: {
"nvidia-nim/moonshotai/kimi-k2.5": {
params: {
parallel_tool_calls: false,
},
},
},
},
},
},
model: {
api: "openai-completions",
provider: "nvidia-nim",
id: "moonshotai/kimi-k2.5",
} as Model<"openai-completions">,
});
expect(payload.parallel_tool_calls).toBe(false);
});
it("injects parallel_tool_calls for openai-responses payloads when configured", () => {
const payload = runParallelToolCallsPayloadMutationCase({
applyProvider: "openai",
applyModelId: "gpt-5",
cfg: {
agents: {
defaults: {
models: {
"openai/gpt-5": {
params: {
parallelToolCalls: true,
},
},
},
},
},
},
model: {
api: "openai-responses",
provider: "openai",
id: "gpt-5",
baseUrl: "https://api.openai.com/v1",
} as unknown as Model<"openai-responses">,
});
expect(payload.parallel_tool_calls).toBe(true);
});
it("does not inject parallel_tool_calls for unsupported APIs", () => {
const payload = runParallelToolCallsPayloadMutationCase({
applyProvider: "anthropic",
applyModelId: "claude-sonnet-4-6",
cfg: {
agents: {
defaults: {
models: {
"anthropic/claude-sonnet-4-6": {
params: {
parallel_tool_calls: false,
},
},
},
},
},
},
model: {
api: "anthropic-messages",
provider: "anthropic",
id: "claude-sonnet-4-6",
} as Model<"anthropic-messages">,
});
expect(payload).not.toHaveProperty("parallel_tool_calls");
});
it("lets runtime override win across alias styles for parallel_tool_calls", () => {
const payload = runParallelToolCallsPayloadMutationCase({
applyProvider: "nvidia-nim",
applyModelId: "moonshotai/kimi-k2.5",
cfg: {
agents: {
defaults: {
models: {
"nvidia-nim/moonshotai/kimi-k2.5": {
params: {
parallel_tool_calls: true,
},
},
},
},
},
},
extraParamsOverride: {
parallelToolCalls: false,
},
model: {
api: "openai-completions",
provider: "nvidia-nim",
id: "moonshotai/kimi-k2.5",
} as Model<"openai-completions">,
});
expect(payload.parallel_tool_calls).toBe(false);
});
it("lets null runtime override suppress inherited parallel_tool_calls injection", () => {
const payload = runParallelToolCallsPayloadMutationCase({
applyProvider: "nvidia-nim",
applyModelId: "moonshotai/kimi-k2.5",
cfg: {
agents: {
defaults: {
models: {
"nvidia-nim/moonshotai/kimi-k2.5": {
params: {
parallel_tool_calls: true,
},
},
},
},
},
},
extraParamsOverride: {
parallelToolCalls: null,
},
model: {
api: "openai-completions",
provider: "nvidia-nim",
id: "moonshotai/kimi-k2.5",
} as Model<"openai-completions">,
});
expect(payload).not.toHaveProperty("parallel_tool_calls");
});
it("warns and skips invalid parallel_tool_calls values", () => {
const warnSpy = vi.spyOn(log, "warn").mockImplementation(() => undefined);
try {
const payload = runParallelToolCallsPayloadMutationCase({
applyProvider: "nvidia-nim",
applyModelId: "moonshotai/kimi-k2.5",
cfg: {
agents: {
defaults: {
models: {
"nvidia-nim/moonshotai/kimi-k2.5": {
params: {
parallelToolCalls: "false",
},
},
},
},
},
},
model: {
api: "openai-completions",
provider: "nvidia-nim",
id: "moonshotai/kimi-k2.5",
} as Model<"openai-completions">,
});
expect(payload).not.toHaveProperty("parallel_tool_calls");
expect(warnSpy).toHaveBeenCalledWith("ignoring invalid parallel_tool_calls param: false");
} finally {
warnSpy.mockRestore();
}
});
it("normalizes thinking=off to null for SiliconFlow Pro models", () => {
const payloads: Record<string, unknown>[] = [];
const baseStreamFn: StreamFn = (_model, _context, options) => {

View File

@@ -49,7 +49,18 @@ export function resolveExtraParams(params: {
return undefined;
}
return Object.assign({}, globalParams, agentParams);
const merged = Object.assign({}, globalParams, agentParams);
const resolvedParallelToolCalls = resolveAliasedParamValue(
[globalParams, agentParams],
"parallel_tool_calls",
"parallelToolCalls",
);
if (resolvedParallelToolCalls !== undefined) {
merged.parallel_tool_calls = resolvedParallelToolCalls;
delete merged.parallelToolCalls;
}
return merged;
}
type CacheRetention = "none" | "short" | "long";
@@ -1108,6 +1119,53 @@ function createZaiToolStreamWrapper(
};
}
function resolveAliasedParamValue(
sources: Array<Record<string, unknown> | undefined>,
snakeCaseKey: string,
camelCaseKey: string,
): unknown {
let resolved: unknown = undefined;
let seen = false;
for (const source of sources) {
if (!source) {
continue;
}
const hasSnakeCaseKey = Object.hasOwn(source, snakeCaseKey);
const hasCamelCaseKey = Object.hasOwn(source, camelCaseKey);
if (!hasSnakeCaseKey && !hasCamelCaseKey) {
continue;
}
resolved = hasSnakeCaseKey ? source[snakeCaseKey] : source[camelCaseKey];
seen = true;
}
return seen ? resolved : undefined;
}
function createParallelToolCallsWrapper(
baseStreamFn: StreamFn | undefined,
enabled: boolean,
): StreamFn {
const underlying = baseStreamFn ?? streamSimple;
return (model, context, options) => {
if (model.api !== "openai-completions" && model.api !== "openai-responses") {
return underlying(model, context, options);
}
log.debug(
`applying parallel_tool_calls=${enabled} for ${model.provider ?? "unknown"}/${model.id ?? "unknown"} api=${model.api}`,
);
const originalOnPayload = options?.onPayload;
return underlying(model, context, {
...options,
onPayload: (payload) => {
if (payload && typeof payload === "object") {
(payload as Record<string, unknown>).parallel_tool_calls = enabled;
}
originalOnPayload?.(payload);
},
});
};
}
/**
* Apply extra params (like temperature) to an agent's streamFn.
* Also adds OpenRouter app attribution headers when using the OpenRouter provider.
@@ -1123,7 +1181,7 @@ export function applyExtraParamsToAgent(
thinkingLevel?: ThinkLevel,
agentId?: string,
): void {
const extraParams = resolveExtraParams({
const resolvedExtraParams = resolveExtraParams({
cfg,
provider,
modelId,
@@ -1142,7 +1200,7 @@ export function applyExtraParamsToAgent(
Object.entries(extraParamsOverride).filter(([, value]) => value !== undefined),
)
: undefined;
const merged = Object.assign({}, extraParams, override);
const merged = Object.assign({}, resolvedExtraParams, override);
const wrappedStreamFn = createStreamFnWithExtraParams(agent.streamFn, merged, provider);
if (wrappedStreamFn) {
@@ -1238,4 +1296,23 @@ export function applyExtraParamsToAgent(
// Force `store=true` for direct OpenAI Responses models and auto-enable
// server-side compaction for compatible OpenAI Responses payloads.
agent.streamFn = createOpenAIResponsesContextManagementWrapper(agent.streamFn, merged);
const rawParallelToolCalls = resolveAliasedParamValue(
[resolvedExtraParams, override],
"parallel_tool_calls",
"parallelToolCalls",
);
if (rawParallelToolCalls !== undefined) {
if (typeof rawParallelToolCalls === "boolean") {
agent.streamFn = createParallelToolCallsWrapper(agent.streamFn, rawParallelToolCalls);
} else if (rawParallelToolCalls === null) {
log.debug("parallel_tool_calls suppressed by null override, skipping injection");
} else {
const summary =
typeof rawParallelToolCalls === "string"
? rawParallelToolCalls
: typeof rawParallelToolCalls;
log.warn(`ignoring invalid parallel_tool_calls param: ${summary}`);
}
}
}