mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-06 11:00:42 +00:00
fix(gateway): start channels before model prewarm
This commit is contained in:
@@ -13,6 +13,7 @@ Docs: https://docs.openclaw.ai
|
||||
|
||||
### Fixes
|
||||
|
||||
- Gateway/startup: start chat channels without waiting for primary model prewarm, keeping model warmup bounded in the background so Slack and other channels come online promptly when provider discovery is slow. Supersedes #73420. Thanks @dorukardahan.
|
||||
- Gateway/install: carry env-backed config SecretRefs such as `channels.discord.token` into generated service environments when they are present only in the installing shell, while keeping gateway auth SecretRefs non-persisted. Fixes #67817; supersedes #73426. Thanks @wdimaculangan and @ztexydt-cqh.
|
||||
- Auto-reply/commands: stop bare `/reset` and `/new` after reset hooks acknowledge the command, so non-ACP channels no longer fall through into empty provider calls while `/reset <message>` and `/new <message>` still seed the next model turn. Fixes #73367. Thanks @hoyanhan and @wenxu007.
|
||||
- Auto-reply: preserve voice-note media from silent turns while continuing to suppress text and non-voice media, so `NO_REPLY` TTS replies still deliver the requested audio bubble. (#73406) Thanks @zqchris.
|
||||
|
||||
@@ -27,6 +27,16 @@ const hoisted = vi.hoisted(() => {
|
||||
resolved: 0,
|
||||
failed: 0,
|
||||
}));
|
||||
const resolveAgentModelPrimaryValue = vi.fn(() => "");
|
||||
const normalizeProviderId = vi.fn((provider: string) => provider.toLowerCase());
|
||||
const resolveOpenClawAgentDir = vi.fn(() => "/tmp/openclaw-state/agents/default/agent");
|
||||
const isCliProvider = vi.fn(() => false);
|
||||
const resolveConfiguredModelRef = vi.fn(() => ({
|
||||
provider: "openai",
|
||||
model: "gpt-5.4",
|
||||
}));
|
||||
const resolveEmbeddedAgentRuntime = vi.fn(() => "pi");
|
||||
const ensureOpenClawModelsJson = vi.fn(async () => undefined);
|
||||
return {
|
||||
startPluginServices,
|
||||
startGmailWatcherWithLogs,
|
||||
@@ -46,6 +56,13 @@ const hoisted = vi.hoisted(() => {
|
||||
refreshLatestUpdateRestartSentinel,
|
||||
getAcpRuntimeBackend,
|
||||
reconcilePendingSessionIdentities,
|
||||
resolveAgentModelPrimaryValue,
|
||||
normalizeProviderId,
|
||||
resolveOpenClawAgentDir,
|
||||
isCliProvider,
|
||||
resolveConfiguredModelRef,
|
||||
resolveEmbeddedAgentRuntime,
|
||||
ensureOpenClawModelsJson,
|
||||
};
|
||||
});
|
||||
|
||||
@@ -123,6 +140,36 @@ vi.mock("../infra/update-startup.js", () => ({
|
||||
scheduleGatewayUpdateCheck: hoisted.scheduleGatewayUpdateCheck,
|
||||
}));
|
||||
|
||||
vi.mock("../config/model-input.js", () => ({
|
||||
resolveAgentModelPrimaryValue: hoisted.resolveAgentModelPrimaryValue,
|
||||
}));
|
||||
|
||||
vi.mock("../agents/provider-id.js", () => ({
|
||||
normalizeProviderId: hoisted.normalizeProviderId,
|
||||
}));
|
||||
|
||||
vi.mock("../agents/agent-paths.js", () => ({
|
||||
resolveOpenClawAgentDir: hoisted.resolveOpenClawAgentDir,
|
||||
}));
|
||||
|
||||
vi.mock("../agents/defaults.js", () => ({
|
||||
DEFAULT_MODEL: "gpt-5.4",
|
||||
DEFAULT_PROVIDER: "openai",
|
||||
}));
|
||||
|
||||
vi.mock("../agents/model-selection.js", () => ({
|
||||
isCliProvider: hoisted.isCliProvider,
|
||||
resolveConfiguredModelRef: hoisted.resolveConfiguredModelRef,
|
||||
}));
|
||||
|
||||
vi.mock("../agents/pi-embedded-runner/runtime.js", () => ({
|
||||
resolveEmbeddedAgentRuntime: hoisted.resolveEmbeddedAgentRuntime,
|
||||
}));
|
||||
|
||||
vi.mock("../agents/models-config.js", () => ({
|
||||
ensureOpenClawModelsJson: hoisted.ensureOpenClawModelsJson,
|
||||
}));
|
||||
|
||||
vi.mock("./server-tailscale.js", () => ({
|
||||
startGatewayTailscaleExposure: hoisted.startGatewayTailscaleExposure,
|
||||
}));
|
||||
@@ -155,6 +202,17 @@ describe("startGatewayPostAttachRuntime", () => {
|
||||
hoisted.getAcpRuntimeBackend.mockReset();
|
||||
hoisted.getAcpRuntimeBackend.mockReturnValue(null);
|
||||
hoisted.reconcilePendingSessionIdentities.mockClear();
|
||||
hoisted.resolveAgentModelPrimaryValue.mockReset();
|
||||
hoisted.resolveAgentModelPrimaryValue.mockReturnValue("");
|
||||
hoisted.normalizeProviderId.mockClear();
|
||||
hoisted.resolveOpenClawAgentDir.mockClear();
|
||||
hoisted.isCliProvider.mockReset();
|
||||
hoisted.isCliProvider.mockReturnValue(false);
|
||||
hoisted.resolveConfiguredModelRef.mockClear();
|
||||
hoisted.resolveEmbeddedAgentRuntime.mockReset();
|
||||
hoisted.resolveEmbeddedAgentRuntime.mockReturnValue("pi");
|
||||
hoisted.ensureOpenClawModelsJson.mockReset();
|
||||
hoisted.ensureOpenClawModelsJson.mockResolvedValue(undefined);
|
||||
});
|
||||
|
||||
it("re-enables startup-gated methods after post-attach sidecars start", async () => {
|
||||
@@ -247,13 +305,58 @@ describe("startGatewayPostAttachRuntime", () => {
|
||||
|
||||
expect(prewarm).toHaveBeenCalledTimes(1);
|
||||
expect(log.warn).toHaveBeenCalledWith(
|
||||
"startup model warmup timed out after 25ms; continuing channel startup",
|
||||
"startup model warmup timed out after 25ms; continuing without waiting",
|
||||
);
|
||||
} finally {
|
||||
vi.useRealTimers();
|
||||
}
|
||||
});
|
||||
|
||||
it("starts channels without waiting for primary model prewarm completion", async () => {
|
||||
hoisted.resolveAgentModelPrimaryValue.mockReturnValue("openai/gpt-5.4");
|
||||
let resolvePrewarm!: () => void;
|
||||
hoisted.ensureOpenClawModelsJson.mockImplementation(
|
||||
async () =>
|
||||
await new Promise<undefined>((resolve) => {
|
||||
resolvePrewarm = () => resolve(undefined);
|
||||
}),
|
||||
);
|
||||
const startChannels = vi.fn(async () => undefined);
|
||||
|
||||
const sidecarsPromise = startGatewaySidecars({
|
||||
cfg: {
|
||||
hooks: { internal: { enabled: false } },
|
||||
agents: { defaults: { model: "openai/gpt-5.4" } },
|
||||
} as never,
|
||||
pluginRegistry: createPostAttachParams().pluginRegistry,
|
||||
defaultWorkspaceDir: "/tmp/openclaw-workspace",
|
||||
deps: {} as never,
|
||||
startChannels,
|
||||
log: { warn: vi.fn() },
|
||||
logHooks: {
|
||||
info: vi.fn(),
|
||||
warn: vi.fn(),
|
||||
error: vi.fn(),
|
||||
},
|
||||
logChannels: {
|
||||
info: vi.fn(),
|
||||
error: vi.fn(),
|
||||
},
|
||||
});
|
||||
|
||||
await vi.waitFor(
|
||||
() => {
|
||||
expect(hoisted.ensureOpenClawModelsJson).toHaveBeenCalledTimes(1);
|
||||
expect(startChannels).toHaveBeenCalledTimes(1);
|
||||
},
|
||||
{ timeout: 250 },
|
||||
);
|
||||
await sidecarsPromise;
|
||||
|
||||
resolvePrewarm();
|
||||
await Promise.resolve();
|
||||
});
|
||||
|
||||
it("keeps startup-gated methods unavailable while sidecars are still resuming", async () => {
|
||||
let resumeSidecars!: () => void;
|
||||
const sidecarsReady = new Promise<{ pluginServices: null }>((resolve) => {
|
||||
|
||||
@@ -174,13 +174,34 @@ async function prewarmConfiguredPrimaryModelWithTimeout(
|
||||
}).then(() => {
|
||||
if (!settled) {
|
||||
params.log.warn(
|
||||
`startup model warmup timed out after ${params.timeoutMs ?? PRIMARY_MODEL_PREWARM_TIMEOUT_MS}ms; continuing channel startup`,
|
||||
`startup model warmup timed out after ${params.timeoutMs ?? PRIMARY_MODEL_PREWARM_TIMEOUT_MS}ms; continuing without waiting`,
|
||||
);
|
||||
}
|
||||
});
|
||||
await Promise.race([warmup, timeout]);
|
||||
}
|
||||
|
||||
function schedulePrimaryModelPrewarm(
|
||||
params: {
|
||||
cfg: OpenClawConfig;
|
||||
log: { warn: (msg: string) => void };
|
||||
startupTrace?: GatewayStartupTrace;
|
||||
},
|
||||
prewarm: typeof prewarmConfiguredPrimaryModel = prewarmConfiguredPrimaryModel,
|
||||
): void {
|
||||
void measureStartup(params.startupTrace, "sidecars.model-prewarm", () =>
|
||||
prewarmConfiguredPrimaryModelWithTimeout(
|
||||
{
|
||||
cfg: params.cfg,
|
||||
log: params.log,
|
||||
},
|
||||
prewarm,
|
||||
),
|
||||
).catch((err) => {
|
||||
params.log.warn(`startup model warmup failed: ${String(err)}`);
|
||||
});
|
||||
}
|
||||
|
||||
export async function startGatewaySidecars(params: {
|
||||
cfg: OpenClawConfig;
|
||||
pluginRegistry: ReturnType<typeof loadOpenClawPlugins>;
|
||||
@@ -308,12 +329,11 @@ export async function startGatewaySidecars(params: {
|
||||
await measureStartup(params.startupTrace, "sidecars.channels", async () => {
|
||||
if (!skipChannels) {
|
||||
try {
|
||||
await measureStartup(params.startupTrace, "sidecars.model-prewarm", () =>
|
||||
prewarmConfiguredPrimaryModelWithTimeout({
|
||||
cfg: params.cfg,
|
||||
log: params.log,
|
||||
}),
|
||||
);
|
||||
schedulePrimaryModelPrewarm({
|
||||
cfg: params.cfg,
|
||||
log: params.log,
|
||||
startupTrace: params.startupTrace,
|
||||
});
|
||||
await measureStartup(params.startupTrace, "sidecars.channel-start", () =>
|
||||
params.startChannels(),
|
||||
);
|
||||
@@ -636,4 +656,5 @@ export async function startGatewayPostAttachRuntime(
|
||||
export const __testing = {
|
||||
prewarmConfiguredPrimaryModel,
|
||||
prewarmConfiguredPrimaryModelWithTimeout,
|
||||
schedulePrimaryModelPrewarm,
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user