fix: pass directories to provider stream wrappers (#67843)

* fix: pass directories to provider stream wrappers

* fix: pass directories to provider stream wrappers

---------

Co-authored-by: neilofneils404 <258699186+neilofneils404@users.noreply.github.com>
Co-authored-by: vincentkoc <25068+vincentkoc@users.noreply.github.com>
This commit is contained in:
neilofneils404
2026-04-27 18:43:38 -04:00
committed by GitHub
parent 94f5827c6e
commit 482ff924ef
5 changed files with 85 additions and 0 deletions

View File

@@ -17,6 +17,7 @@ Docs: https://docs.openclaw.ai
### Fixes
- Control UI/Agents: redact tool-call args, partial/final results, derived exec output, and configured custom secret patterns before streaming tool events to the Control UI, so tool output cannot expose provider or channel credentials. Fixes #72283. (#72319) Thanks @volcano303 and @BunsDev.
- Providers/Codex: pass agent and workspace directories into provider stream wrappers so Codex native `web_search` activation can evaluate the correct auth context, and smoke-test the built status-message runtime by resolving the emitted bundle name. Carries forward #67843; refs #65909. Thanks @neilofneils404.
- Models/fallbacks: treat user-selected session models as exact choices, so `/model ollama/...` and model-picker switches fail visibly when the selected provider is unreachable instead of answering from an unrelated configured fallback. Fixes #73023. Thanks @pavelyortho-cyber.
- CLI/model probes: fail local `infer model run` probes when the provider returns no text output, so unreachable local providers and empty completions no longer look like successful smoke tests. Refs #73023. Thanks @pavelyortho-cyber.
- CLI/Ollama: run local `infer model run` through the lean provider completion path and skip global model discovery for one-shot local probes, so Ollama smoke tests no longer pay full chat-agent/tool startup cost or hang before the native `/api/chat` request. Fixes #72851. Thanks @TotalRes2020.

View File

@@ -1568,6 +1568,7 @@
"test:auth:compat": "node scripts/run-vitest.mjs run --config test/vitest/vitest.gateway.config.ts src/gateway/server.auth.compat-baseline.test.ts src/gateway/client.test.ts src/gateway/reconnect-gating.test.ts src/gateway/protocol/connect-error-details.test.ts",
"test:build:bundled-runtime-deps": "node scripts/test-built-bundled-runtime-deps.mjs",
"test:build:singleton": "node scripts/test-built-plugin-singleton.mjs",
"test:build:status-message-runtime": "node scripts/test-built-status-message-runtime.mjs",
"test:bundled": "node scripts/run-vitest.mjs run --config test/vitest/vitest.bundled.config.ts",
"test:changed": "node scripts/test-projects.mjs --changed origin/main",
"test:changed:max": "OPENCLAW_VITEST_MAX_WORKERS=8 node scripts/test-projects.mjs --changed origin/main",

View File

@@ -0,0 +1,47 @@
import assert from "node:assert/strict";
import fs from "node:fs";
import path from "node:path";
import { pathToFileURL } from "node:url";
import { parsePackageRootArg } from "./lib/package-root-args.mjs";
const STATUS_MESSAGE_RUNTIME_RE = /^status-message\.runtime(?:-[A-Za-z0-9_-]+)?\.js$/u;
const { packageRoot } = parsePackageRootArg(
process.argv.slice(2),
"OPENCLAW_STATUS_MESSAGE_RUNTIME_ROOT",
);
function findBuiltStatusMessageRuntimePath(distDir) {
const candidates = fs
.readdirSync(distDir, { withFileTypes: true })
.filter((entry) => entry.isFile() && STATUS_MESSAGE_RUNTIME_RE.test(entry.name))
.map((entry) => entry.name)
.toSorted((left, right) => {
const leftHasHash = left !== "status-message.runtime.js";
const rightHasHash = right !== "status-message.runtime.js";
if (leftHasHash !== rightHasHash) {
return leftHasHash ? -1 : 1;
}
return left.localeCompare(right);
});
assert.ok(candidates.length > 0, `missing built status-message runtime bundle under ${distDir}`);
return path.join(distDir, candidates[0]);
}
const runtimePath = findBuiltStatusMessageRuntimePath(path.join(packageRoot, "dist"));
const runtimeModule = await import(pathToFileURL(runtimePath).href);
assert.equal(
typeof runtimeModule.loadStatusMessageRuntimeModule,
"function",
`built status-message runtime did not export loadStatusMessageRuntimeModule: ${runtimePath}`,
);
const statusModule = await runtimeModule.loadStatusMessageRuntimeModule();
assert.equal(
typeof statusModule.buildStatusMessage,
"function",
"status-message runtime did not load buildStatusMessage",
);

View File

@@ -479,6 +479,40 @@ describe("applyExtraParamsToAgent", () => {
};
}
it("passes agentDir and workspaceDir to provider stream wrappers", () => {
let capturedContext: WrapProviderStreamFnParams["context"] | undefined;
extraParamsTesting.setProviderRuntimeDepsForTest({
prepareProviderExtraParams: () => undefined,
wrapProviderStreamFn: (params) => {
capturedContext = params.context;
return params.context.streamFn;
},
});
const agent = { streamFn: (() => ({}) as ReturnType<StreamFn>) as StreamFn };
const model = {
api: "openai-codex-responses",
provider: "openai-codex",
id: "gpt-5.4",
} as Model<"openai-codex-responses">;
applyExtraParamsToAgent(
agent,
undefined,
"openai-codex",
"gpt-5.4",
undefined,
"high",
"cass",
"/tmp/openclaw-workspace",
model,
"/tmp/openclaw-agent",
);
expect(capturedContext?.agentDir).toBe("/tmp/openclaw-agent");
expect(capturedContext?.workspaceDir).toBe("/tmp/openclaw-workspace");
});
function runResponsesPayloadMutationCase(params: {
applyProvider: string;
applyModelId: string;

View File

@@ -682,6 +682,8 @@ export function applyExtraParamsToAgent(
config: cfg,
context: {
config: cfg,
agentDir,
workspaceDir,
provider,
modelId,
extraParams: effectiveExtraParams,