mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-07 10:10:43 +00:00
* test: add pi codex runtime contract coverage * test: expand pi codex tool runtime contracts * test: tighten tool runtime contracts * test: reset tool contract param cache * test: document codex tool middleware fixture * test: type pi tool contract events * test: satisfy pi tool contract test types * test: cover tool media telemetry contracts * test: reset plugin runtime after tool contracts * test: add auth profile runtime contracts * test: strengthen auth profile runtime contracts * test: clarify auth profile contract fixtures * test: expand auth profile contract matrix * test: assert unrelated cli auth isolation * test: expand auth profile contract matrix * test: tighten auth profile contract expectations * test: add outcome fallback runtime contracts * test: strengthen outcome fallback contracts * test: isolate outcome fallback contracts * test: cover codex terminal outcome signals * test: expand terminal fallback contracts * test: add delivery no reply runtime contracts * test: document json no-reply delivery gap * test: align delivery contract fixtures * test: add transcript repair runtime contracts * test: tighten transcript repair contracts * test: add prompt overlay runtime contracts * test: tighten prompt overlay contract scope * test: type prompt overlay contracts * test: add schema normalization runtime contracts * test: clarify schema normalization contract gaps * test: simplify schema normalization contracts * test: tighten schema normalization contract gaps * test: cover compaction schema contract * test: satisfy schema contract lint * test: add transport params runtime contracts * test: tighten transport params contract scope * test: isolate transport params contracts * test: lock exact transport defaults * feat: add agent runtime plan foundation * fix: preserve codex harness auth profiles * fix: route followup delivery through runtime plan * fix: normalize parameter-free openai tool schemas * fix: satisfy runtime plan type checks * fix: narrow followup delivery runtime planning * fix: apply codex app-server auth profiles * fix: classify codex terminal outcomes * fix: prevent harness auth leakage into unrelated cli providers * feat: expand agent runtime plan policy contract * fix: route pi runtime policy through runtime plan * fix: route codex runtime policy through runtime plan * fix: route fallback outcome classification through runtime plan * refactor: make runtime plan contracts topology-safe * fix: restore runtime plan test type coverage * fix: align runtime plan schema contract assertions * fix: stabilize incomplete turn runtime tests * fix: stabilize codex native web search test * fix: preserve codex auth profile secret refs * fix: keep runtime resolved refs canonical * fix: preserve permissive nested openai schemas * fix: accept Codex auth provider aliases * test: update media-only groups mock * fix: resolve runtime plan rebase checks * fix: resolve runtime plan rebase checks --------- Co-authored-by: Eva <eva@100yen.org> Co-authored-by: Peter Steinberger <steipete@gmail.com>
240 lines
7.4 KiB
TypeScript
240 lines
7.4 KiB
TypeScript
import type { StreamFn } from "@mariozechner/pi-agent-core";
|
|
import type { Context, Model, SimpleStreamOptions } from "@mariozechner/pi-ai";
|
|
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
|
import {
|
|
GPT_PARALLEL_TOOL_CALLS_PAYLOAD_APIS,
|
|
NON_OPENAI_GPT5_TRANSPORT_CASE,
|
|
OPENAI_GPT5_TRANSPORT_DEFAULT_CASES,
|
|
OPENAI_GPT5_TRANSPORT_DEFAULTS,
|
|
UNRELATED_TOOL_CALLS_PAYLOAD_APIS,
|
|
} from "../../test/helpers/agents/transport-params-runtime-contract.js";
|
|
import {
|
|
__testing as extraParamsTesting,
|
|
applyExtraParamsToAgent,
|
|
resolveExtraParams,
|
|
resolvePreparedExtraParams,
|
|
} from "./pi-embedded-runner/extra-params.js";
|
|
import { createOpenAIThinkingLevelWrapper } from "./pi-embedded-runner/openai-stream-wrappers.js";
|
|
import { supportsGptParallelToolCallsPayload } from "./provider-api-families.js";
|
|
|
|
beforeEach(() => {
|
|
installNoopProviderRuntimeDeps();
|
|
});
|
|
|
|
afterEach(() => {
|
|
extraParamsTesting.resetProviderRuntimeDepsForTest();
|
|
});
|
|
|
|
describe("transport params runtime contract (Pi/OpenAI path)", () => {
|
|
it.each(OPENAI_GPT5_TRANSPORT_DEFAULT_CASES)(
|
|
"applies OpenAI GPT-5 transport defaults for $provider/$modelId",
|
|
({ provider, modelId }) => {
|
|
expect(resolveExtraParams({ cfg: undefined, provider, modelId })).toEqual(
|
|
OPENAI_GPT5_TRANSPORT_DEFAULTS,
|
|
);
|
|
},
|
|
);
|
|
|
|
it("does not leak OpenAI GPT-5 defaults to non-OpenAI providers", () => {
|
|
expect(
|
|
resolveExtraParams({
|
|
cfg: undefined,
|
|
provider: NON_OPENAI_GPT5_TRANSPORT_CASE.provider,
|
|
modelId: NON_OPENAI_GPT5_TRANSPORT_CASE.modelId,
|
|
}),
|
|
).toBeUndefined();
|
|
});
|
|
|
|
it("normalizes aliased caller params without losing explicit overrides", () => {
|
|
const cfg = {
|
|
agents: {
|
|
defaults: {
|
|
models: {
|
|
"openai/gpt-5.4": {
|
|
params: {
|
|
parallelToolCalls: false,
|
|
textVerbosity: "medium",
|
|
cached_content: "conversation-cache",
|
|
openaiWsWarmup: true,
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
};
|
|
|
|
expect(resolveExtraParams({ cfg, provider: "openai", modelId: "gpt-5.4" })).toEqual({
|
|
parallel_tool_calls: false,
|
|
text_verbosity: "medium",
|
|
cachedContent: "conversation-cache",
|
|
openaiWsWarmup: true,
|
|
});
|
|
});
|
|
|
|
it.each(GPT_PARALLEL_TOOL_CALLS_PAYLOAD_APIS)(
|
|
"advertises %s as accepting the GPT parallel_tool_calls payload patch",
|
|
(api) => {
|
|
expect(supportsGptParallelToolCallsPayload(api)).toBe(true);
|
|
},
|
|
);
|
|
|
|
it.each(UNRELATED_TOOL_CALLS_PAYLOAD_APIS)(
|
|
"does not advertise %s as accepting the GPT parallel_tool_calls payload patch",
|
|
(api) => {
|
|
expect(supportsGptParallelToolCallsPayload(api)).toBe(false);
|
|
},
|
|
);
|
|
|
|
it("injects parallel_tool_calls into openai-codex Responses payloads", () => {
|
|
const payload = runPayloadMutation({
|
|
applyProvider: "openai-codex",
|
|
applyModelId: "gpt-5.4",
|
|
model: {
|
|
api: "openai-codex-responses",
|
|
provider: "openai-codex",
|
|
id: "gpt-5.4",
|
|
} as Model<"openai-codex-responses">,
|
|
});
|
|
|
|
expect(payload.parallel_tool_calls).toBe(true);
|
|
});
|
|
|
|
it("propagates OpenAI GPT-5 warmup default through stream options", () => {
|
|
const { agent, calls } = createOptionsCaptureAgent();
|
|
applyExtraParamsToAgent(agent, undefined, "openai", "gpt-5.4");
|
|
|
|
void agent.streamFn?.(
|
|
{
|
|
api: "openai-responses",
|
|
provider: "openai",
|
|
id: "gpt-5.4",
|
|
} as Model<"openai-responses">,
|
|
{ messages: [] },
|
|
{},
|
|
);
|
|
|
|
expect(calls).toEqual([
|
|
expect.objectContaining({
|
|
openaiWsWarmup: false,
|
|
}),
|
|
]);
|
|
});
|
|
|
|
it("maps OpenAI GPT-5 thinking level into Responses reasoning effort payloads", () => {
|
|
extraParamsTesting.setProviderRuntimeDepsForTest({
|
|
prepareProviderExtraParams: () => undefined,
|
|
resolveProviderExtraParamsForTransport: () => undefined,
|
|
wrapProviderStreamFn: (params) =>
|
|
createOpenAIThinkingLevelWrapper(params.context.streamFn, params.context.thinkingLevel),
|
|
});
|
|
|
|
const payload = runPayloadMutation({
|
|
applyProvider: "openai-codex",
|
|
applyModelId: "gpt-5.4",
|
|
thinkingLevel: "high",
|
|
model: {
|
|
api: "openai-codex-responses",
|
|
provider: "openai-codex",
|
|
id: "gpt-5.4",
|
|
baseUrl: "https://chatgpt.com/backend-api",
|
|
} as Model<"openai-codex-responses">,
|
|
payload: { reasoning: { effort: "none", summary: "auto" } },
|
|
});
|
|
|
|
expect(payload.reasoning).toEqual({ effort: "high", summary: "auto" });
|
|
});
|
|
|
|
it("composes provider preparation before transport patch resolution", () => {
|
|
const resolveProviderExtraParamsForTransport = vi.fn(() => ({
|
|
patch: {
|
|
parallel_tool_calls: false,
|
|
transportHookApplied: true,
|
|
},
|
|
}));
|
|
extraParamsTesting.setProviderRuntimeDepsForTest({
|
|
prepareProviderExtraParams: (params) => ({
|
|
...params.context.extraParams,
|
|
transport: "websocket",
|
|
preparedByProvider: true,
|
|
}),
|
|
resolveProviderExtraParamsForTransport,
|
|
wrapProviderStreamFn: (params) => params.context.streamFn,
|
|
});
|
|
|
|
const prepared = resolvePreparedExtraParams({
|
|
cfg: undefined,
|
|
provider: "openai",
|
|
modelId: "gpt-5.4",
|
|
thinkingLevel: "high",
|
|
model: {
|
|
api: "openai-responses",
|
|
provider: "openai",
|
|
id: "gpt-5.4",
|
|
} as Model<"openai-responses">,
|
|
});
|
|
|
|
expect(prepared).toMatchObject({
|
|
transport: "websocket",
|
|
preparedByProvider: true,
|
|
parallel_tool_calls: false,
|
|
transportHookApplied: true,
|
|
});
|
|
expect(resolveProviderExtraParamsForTransport).toHaveBeenCalledWith(
|
|
expect.objectContaining({
|
|
context: expect.objectContaining({
|
|
extraParams: expect.objectContaining({
|
|
preparedByProvider: true,
|
|
}),
|
|
transport: "websocket",
|
|
}),
|
|
}),
|
|
);
|
|
});
|
|
});
|
|
|
|
function runPayloadMutation(params: {
|
|
applyProvider: string;
|
|
applyModelId: string;
|
|
model: Model<"openai-codex-responses"> | Model<"openai-responses">;
|
|
thinkingLevel?: Parameters<typeof applyExtraParamsToAgent>[5];
|
|
payload?: Record<string, unknown>;
|
|
}): Record<string, unknown> {
|
|
const payload: Record<string, unknown> = params.payload ?? {};
|
|
const baseStreamFn: StreamFn = (model, _context, options) => {
|
|
options?.onPayload?.(payload, model);
|
|
return {} as ReturnType<StreamFn>;
|
|
};
|
|
const agent = { streamFn: baseStreamFn };
|
|
applyExtraParamsToAgent(
|
|
agent,
|
|
undefined,
|
|
params.applyProvider,
|
|
params.applyModelId,
|
|
undefined,
|
|
params.thinkingLevel,
|
|
);
|
|
const context: Context = { messages: [] };
|
|
void agent.streamFn?.(params.model, context, {} as SimpleStreamOptions);
|
|
return payload;
|
|
}
|
|
|
|
function installNoopProviderRuntimeDeps() {
|
|
extraParamsTesting.setProviderRuntimeDepsForTest({
|
|
prepareProviderExtraParams: () => undefined,
|
|
resolveProviderExtraParamsForTransport: () => undefined,
|
|
wrapProviderStreamFn: (params) => params.context.streamFn,
|
|
});
|
|
}
|
|
|
|
function createOptionsCaptureAgent() {
|
|
const calls: Array<(SimpleStreamOptions & { openaiWsWarmup?: boolean }) | undefined> = [];
|
|
const baseStreamFn: StreamFn = (_model, _context, options) => {
|
|
calls.push(options as (SimpleStreamOptions & { openaiWsWarmup?: boolean }) | undefined);
|
|
return {} as ReturnType<StreamFn>;
|
|
};
|
|
return {
|
|
calls,
|
|
agent: { streamFn: baseStreamFn },
|
|
};
|
|
}
|