test: stabilize Docker MCP lanes under load

This commit is contained in:
Peter Steinberger
2026-04-24 17:41:30 +01:00
parent 5deef28b7a
commit 11ad1919ed
5 changed files with 190 additions and 161 deletions

View File

@@ -34,11 +34,19 @@ docker run --rm \
bash -lc "set -euo pipefail
entry=dist/index.mjs
[ -f \"\$entry\" ] || entry=dist/index.js
export MOCK_PORT=44081
export SUCCESS_MARKER=OPENCLAW_CRON_MCP_CLEANUP_OK
export MOCK_REQUEST_LOG=/tmp/openclaw-cron-mock-openai-requests.jsonl
export OPENCLAW_DOCKER_OPENAI_BASE_URL=\"http://127.0.0.1:\$MOCK_PORT/v1\"
node scripts/e2e/mock-openai-server.mjs >/tmp/cron-mcp-cleanup-mock-openai.log 2>&1 &
mock_pid=\$!
node --import tsx scripts/e2e/cron-mcp-cleanup-seed.ts >/tmp/cron-mcp-cleanup-seed.log
node \"\$entry\" gateway --port $PORT --bind loopback --allow-unconfigured >/tmp/cron-mcp-cleanup-gateway.log 2>&1 &
gateway_pid=\$!
cleanup_inner() {
kill \"\$mock_pid\" >/dev/null 2>&1 || true
kill \"\$gateway_pid\" >/dev/null 2>&1 || true
wait \"\$mock_pid\" >/dev/null 2>&1 || true
wait \"\$gateway_pid\" >/dev/null 2>&1 || true
}
dump_gateway_log_on_error() {
@@ -46,12 +54,21 @@ docker run --rm \
if [ \"\$status\" -ne 0 ]; then
tail -n 80 /tmp/cron-mcp-cleanup-gateway.log 2>/dev/null || true
cat /tmp/cron-mcp-cleanup-seed.log 2>/dev/null || true
cat /tmp/cron-mcp-cleanup-mock-openai.log 2>/dev/null || true
cat \"\$MOCK_REQUEST_LOG\" 2>/dev/null || true
fi
cleanup_inner
exit \"\$status\"
}
trap cleanup_inner EXIT
trap dump_gateway_log_on_error ERR
for _ in \$(seq 1 80); do
if node -e \"fetch('http://127.0.0.1:' + process.env.MOCK_PORT + '/health').then(r=>process.exit(r.ok?0:1)).catch(()=>process.exit(1))\"; then
break
fi
sleep 0.1
done
node -e \"fetch('http://127.0.0.1:' + process.env.MOCK_PORT + '/health').then(r=>process.exit(r.ok?0:1)).catch(()=>process.exit(1))\"
gateway_ready=0
for _ in \$(seq 1 300); do
if grep -q '\[gateway\] ready' /tmp/cron-mcp-cleanup-gateway.log 2>/dev/null; then

View File

@@ -7,6 +7,8 @@ import {
export type { OpenClawConfig };
const DOCKER_OPENAI_MODEL_REF = "openai/gpt-5.4";
const DOCKER_OPENAI_BASE_URL =
process.env.OPENCLAW_DOCKER_OPENAI_BASE_URL?.trim() || "http://127.0.0.1:9/v1";
const DOCKER_OPENAI_MODEL: ModelDefinitionConfig = {
id: "gpt-5.4",
name: "gpt-5.4",
@@ -30,7 +32,7 @@ export function applyDockerOpenAiProviderConfig(
const seededConfig = applyProviderConfigWithDefaultModelPreset(config, {
providerId: "openai",
api: "openai-responses",
baseUrl: "http://127.0.0.1:9/v1",
baseUrl: DOCKER_OPENAI_BASE_URL,
defaultModel: DOCKER_OPENAI_MODEL,
defaultModelId: DOCKER_OPENAI_MODEL.id,
aliases: [{ modelRef: DOCKER_OPENAI_MODEL_REF, alias: "GPT" }],

View File

@@ -66,7 +66,7 @@ docker run --rm \
exit 1
fi
acpx_ready=0
for _ in \$(seq 1 960); do
for _ in \$(seq 1 2400); do
if grep -q '\[plugins\] embedded acpx runtime backend ready' /tmp/mcp-channels-gateway.log 2>/dev/null; then
acpx_ready=1
break

View File

@@ -0,0 +1,168 @@
import fs from "node:fs";
import http from "node:http";
const port = Number(process.env.MOCK_PORT ?? process.env.OPENCLAW_MOCK_OPENAI_PORT);
const successMarker = process.env.SUCCESS_MARKER ?? "OPENCLAW_E2E_OK";
const requestLog = process.env.MOCK_REQUEST_LOG;
if (!Number.isInteger(port) || port <= 0) {
throw new Error("missing valid MOCK_PORT or OPENCLAW_MOCK_OPENAI_PORT");
}
function readBody(req) {
return new Promise((resolve, reject) => {
let body = "";
req.setEncoding("utf8");
req.on("data", (chunk) => {
body += chunk;
});
req.on("end", () => resolve(body));
req.on("error", reject);
});
}
function writeJson(res, status, body) {
res.writeHead(status, { "content-type": "application/json" });
res.end(JSON.stringify(body));
}
function responseEvents(text) {
return [
{
type: "response.output_item.added",
item: {
type: "message",
id: "msg_e2e_1",
role: "assistant",
content: [],
status: "in_progress",
},
},
{
type: "response.output_item.done",
item: {
type: "message",
id: "msg_e2e_1",
role: "assistant",
status: "completed",
content: [{ type: "output_text", text, annotations: [] }],
},
},
{
type: "response.completed",
response: {
status: "completed",
usage: {
input_tokens: 11,
output_tokens: 7,
total_tokens: 18,
input_tokens_details: { cached_tokens: 0 },
},
},
},
];
}
function writeSse(res, events) {
res.writeHead(200, {
"content-type": "text/event-stream",
"cache-control": "no-store",
connection: "keep-alive",
});
for (const event of events) {
res.write(`data: ${JSON.stringify(event)}\n\n`);
}
res.write("data: [DONE]\n\n");
res.end();
}
function writeChatCompletion(res, stream) {
if (stream) {
writeSse(res, [
{
id: "chatcmpl_e2e",
object: "chat.completion.chunk",
choices: [{ index: 0, delta: { role: "assistant", content: successMarker } }],
},
{
id: "chatcmpl_e2e",
object: "chat.completion.chunk",
choices: [{ index: 0, delta: {}, finish_reason: "stop" }],
},
]);
return;
}
writeJson(res, 200, {
id: "chatcmpl_e2e",
object: "chat.completion",
choices: [
{ index: 0, message: { role: "assistant", content: successMarker }, finish_reason: "stop" },
],
usage: { prompt_tokens: 11, completion_tokens: 7, total_tokens: 18 },
});
}
const server = http.createServer(async (req, res) => {
const url = new URL(req.url ?? "/", "http://127.0.0.1");
if (req.method === "GET" && url.pathname === "/health") {
writeJson(res, 200, { ok: true });
return;
}
if (req.method === "GET" && url.pathname === "/v1/models") {
writeJson(res, 200, {
object: "list",
data: [{ id: "gpt-5.4", object: "model", owned_by: "openclaw-e2e" }],
});
return;
}
const bodyText = await readBody(req);
if (requestLog) {
fs.appendFileSync(
requestLog,
`${JSON.stringify({ method: req.method, path: url.pathname, body: bodyText })}\n`,
);
}
let body = {};
try {
body = bodyText ? JSON.parse(bodyText) : {};
} catch {
body = {};
}
if (req.method === "POST" && url.pathname === "/v1/responses") {
if (body.stream === false) {
writeJson(res, 200, {
id: "resp_e2e",
object: "response",
status: "completed",
output: [
{
type: "message",
id: "msg_e2e_1",
role: "assistant",
status: "completed",
content: [{ type: "output_text", text: successMarker, annotations: [] }],
},
],
usage: { input_tokens: 11, output_tokens: 7, total_tokens: 18 },
});
return;
}
writeSse(res, responseEvents(successMarker));
return;
}
if (req.method === "POST" && url.pathname === "/v1/chat/completions") {
writeChatCompletion(res, body.stream !== false);
return;
}
writeJson(res, 404, {
error: { message: `unhandled mock route: ${req.method} ${url.pathname}` },
});
});
server.listen(port, "127.0.0.1", () => {
console.log(`mock-openai listening on ${port}`);
});

View File

@@ -148,165 +148,7 @@ assert_dep_present() {
fi
}
cat >/tmp/openclaw-mock-openai.mjs <<'NODE'
import http from "node:http";
import fs from "node:fs";
const port = Number(process.env.MOCK_PORT);
const successMarker = process.env.SUCCESS_MARKER;
const requestLog = process.env.MOCK_REQUEST_LOG;
function readBody(req) {
return new Promise((resolve, reject) => {
let body = "";
req.setEncoding("utf8");
req.on("data", (chunk) => {
body += chunk;
});
req.on("end", () => resolve(body));
req.on("error", reject);
});
}
function writeJson(res, status, body) {
res.writeHead(status, { "content-type": "application/json" });
res.end(JSON.stringify(body));
}
function responseEvents(text) {
return [
{
type: "response.output_item.added",
item: {
type: "message",
id: "msg_e2e_1",
role: "assistant",
content: [],
status: "in_progress",
},
},
{
type: "response.output_item.done",
item: {
type: "message",
id: "msg_e2e_1",
role: "assistant",
status: "completed",
content: [{ type: "output_text", text, annotations: [] }],
},
},
{
type: "response.completed",
response: {
status: "completed",
usage: {
input_tokens: 11,
output_tokens: 7,
total_tokens: 18,
input_tokens_details: { cached_tokens: 0 },
},
},
},
];
}
function writeSse(res, events) {
res.writeHead(200, {
"content-type": "text/event-stream",
"cache-control": "no-store",
connection: "keep-alive",
});
for (const event of events) {
res.write(`data: ${JSON.stringify(event)}\n\n`);
}
res.write("data: [DONE]\n\n");
res.end();
}
function writeChatCompletion(res, stream) {
if (stream) {
writeSse(res, [
{
id: "chatcmpl_e2e",
object: "chat.completion.chunk",
choices: [{ index: 0, delta: { role: "assistant", content: successMarker } }],
},
{
id: "chatcmpl_e2e",
object: "chat.completion.chunk",
choices: [{ index: 0, delta: {}, finish_reason: "stop" }],
},
]);
return;
}
writeJson(res, 200, {
id: "chatcmpl_e2e",
object: "chat.completion",
choices: [{ index: 0, message: { role: "assistant", content: successMarker }, finish_reason: "stop" }],
usage: { prompt_tokens: 11, completion_tokens: 7, total_tokens: 18 },
});
}
const server = http.createServer(async (req, res) => {
const url = new URL(req.url ?? "/", "http://127.0.0.1");
if (req.method === "GET" && url.pathname === "/health") {
writeJson(res, 200, { ok: true });
return;
}
if (req.method === "GET" && url.pathname === "/v1/models") {
writeJson(res, 200, {
object: "list",
data: [{ id: "gpt-5.4", object: "model", owned_by: "openclaw-e2e" }],
});
return;
}
const bodyText = await readBody(req);
fs.appendFileSync(requestLog, JSON.stringify({ method: req.method, path: url.pathname, body: bodyText }) + "\n");
let body = {};
try {
body = bodyText ? JSON.parse(bodyText) : {};
} catch {
body = {};
}
if (req.method === "POST" && url.pathname === "/v1/responses") {
if (body.stream === false) {
writeJson(res, 200, {
id: "resp_e2e",
object: "response",
status: "completed",
output: [
{
type: "message",
id: "msg_e2e_1",
role: "assistant",
status: "completed",
content: [{ type: "output_text", text: successMarker, annotations: [] }],
},
],
usage: { input_tokens: 11, output_tokens: 7, total_tokens: 18 },
});
return;
}
writeSse(res, responseEvents(successMarker));
return;
}
if (req.method === "POST" && url.pathname === "/v1/chat/completions") {
writeChatCompletion(res, body.stream !== false);
return;
}
writeJson(res, 404, { error: { message: `unhandled mock route: ${req.method} ${url.pathname}` } });
});
server.listen(port, "127.0.0.1", () => {
console.log(`mock-openai listening on ${port}`);
});
NODE
MOCK_PORT="$MOCK_PORT" SUCCESS_MARKER="$SUCCESS_MARKER" MOCK_REQUEST_LOG="$MOCK_REQUEST_LOG" node /tmp/openclaw-mock-openai.mjs >/tmp/openclaw-mock-openai.log 2>&1 &
MOCK_PORT="$MOCK_PORT" SUCCESS_MARKER="$SUCCESS_MARKER" MOCK_REQUEST_LOG="$MOCK_REQUEST_LOG" node scripts/e2e/mock-openai-server.mjs >/tmp/openclaw-mock-openai.log 2>&1 &
mock_pid="$!"
for _ in $(seq 1 80); do
if node -e "fetch('http://127.0.0.1:${MOCK_PORT}/health').then(r=>process.exit(r.ok?0:1)).catch(()=>process.exit(1))"; then