fix(gateway): resolve dynamic models during warmup

This commit is contained in:
Peter Steinberger
2026-04-23 02:14:46 +01:00
parent 1cbd5a9470
commit 26bf916382
2 changed files with 73 additions and 5 deletions

View File

@@ -86,7 +86,7 @@ async function prewarmConfiguredPrimaryModel(params: {
{ selectAgentHarness },
{ isCliProvider, resolveConfiguredModelRef },
{ ensureOpenClawModelsJson },
{ resolveModel },
{ resolveModel, resolveModelAsync },
{ resolveEmbeddedAgentRuntime },
] = await Promise.all([
import("../agents/agent-paths.js"),
@@ -119,10 +119,12 @@ async function prewarmConfiguredPrimaryModel(params: {
skipProviderRuntimeHooks: true,
});
if (!resolved.model) {
throw new Error(
resolved.error ??
`Unknown model: ${provider}/${model} (startup warmup only checks static model resolution)`,
);
const asyncResolved = await resolveModelAsync(provider, model, agentDir, params.cfg);
if (!asyncResolved.model) {
throw new Error(
resolved.error ?? asyncResolved.error ?? `Unknown model: ${provider}/${model}`,
);
}
}
} catch (err) {
params.log.warn(`startup model warmup failed for ${provider}/${model}: ${String(err)}`);

View File

@@ -19,6 +19,20 @@ const resolveModelMock = vi.fn<
api: "openai-codex-responses",
},
}));
const resolveModelAsyncMock = vi.fn<
(
provider: unknown,
modelId: unknown,
agentDir: unknown,
cfg: unknown,
) => Promise<{ model?: { id: string; provider: string; api: string }; error?: string }>
>(async () => ({
model: {
id: "gpt-5.4",
provider: "openai-codex",
api: "openai-codex-responses",
},
}));
const selectAgentHarnessMock = vi.fn((_params: unknown) => ({ id: "pi" }));
const resolveEmbeddedAgentRuntimeMock = vi.fn(() => "auto");
@@ -43,6 +57,8 @@ vi.mock("../agents/pi-embedded-runner/model.js", () => ({
cfg: unknown,
options?: unknown,
) => resolveModelMock(provider, modelId, agentDir, cfg, options),
resolveModelAsync: (provider: unknown, modelId: unknown, agentDir: unknown, cfg: unknown) =>
resolveModelAsyncMock(provider, modelId, agentDir, cfg),
}));
vi.mock("../agents/pi-embedded-runner/runtime.js", () => ({
@@ -61,6 +77,7 @@ describe("gateway startup primary model warmup", () => {
beforeEach(() => {
ensureOpenClawModelsJsonMock.mockClear();
resolveModelMock.mockClear();
resolveModelAsyncMock.mockClear();
selectAgentHarnessMock.mockClear();
selectAgentHarnessMock.mockReturnValue({ id: "pi" });
resolveEmbeddedAgentRuntimeMock.mockClear();
@@ -194,4 +211,53 @@ describe("gateway startup primary model warmup", () => {
expect(ensureOpenClawModelsJsonMock).toHaveBeenCalledWith(cfg, "/tmp/agent");
expect(resolveModelMock).toHaveBeenCalled();
});
it("falls back to async model resolution before warning", async () => {
resolveModelMock.mockReturnValueOnce({ model: undefined } as never);
resolveModelAsyncMock.mockResolvedValueOnce({
model: {
id: "gpt-5.4",
provider: "codex",
api: "openai-codex-responses",
},
});
const warn = vi.fn();
const cfg = {
agents: {
defaults: {
model: {
primary: "codex/gpt-5.4",
},
},
},
} as OpenClawConfig;
await prewarmConfiguredPrimaryModel({ cfg, log: { warn } });
expect(resolveModelAsyncMock).toHaveBeenCalledWith("codex", "gpt-5.4", "/tmp/agent", cfg);
expect(warn).not.toHaveBeenCalled();
});
it("warns only when both static and async model resolution miss", async () => {
resolveModelMock.mockReturnValueOnce({ model: undefined, error: "static miss" } as never);
resolveModelAsyncMock.mockResolvedValueOnce({ error: "async miss" });
const warn = vi.fn();
await prewarmConfiguredPrimaryModel({
cfg: {
agents: {
defaults: {
model: {
primary: "codex/gpt-5.4",
},
},
},
} as OpenClawConfig,
log: { warn },
});
expect(warn).toHaveBeenCalledWith(
expect.stringContaining("startup model warmup failed for codex/gpt-5.4"),
);
});
});