test(telegram): cover model picker display names (#71016) (thanks @iskim77)

This commit is contained in:
Peter Steinberger
2026-04-25 01:48:53 +01:00
parent d1386ada5a
commit a9c46d5b1a
2 changed files with 79 additions and 0 deletions

View File

@@ -70,6 +70,7 @@ Docs: https://docs.openclaw.ai
- Plugins/runtime deps: isolate the internal npm cache used for bundled plugin runtime-dependency repair and let package updates refresh/verify already-current installs, so failed update or sudo doctor runs can be repaired by rerunning `openclaw update`. Thanks @steipete.
- Agents/delete: keep `--json` output machine-readable and retain workspaces that overlap another agent's workspace instead of moving shared state to Trash. Fixes #70889 and #70890. (#70897) Thanks @kaseonedge.
- Browser/screenshot: honor `timeoutMs` through host and node screenshot requests, bound raw CDP screenshot commands, and avoid beyond-viewport CDP capture for ordinary viewport screenshots, so Windows Chrome captures no longer hang past the requested deadline. Fixes #68330. Thanks @Woodylai24.
- Telegram/model picker: show configured model display names when browsing models through provider buttons, matching typed `/models <provider>` output. Fixes #70560. (#71016) Thanks @iskim77.
- Plugins/runtime deps: stage bundled plugin runtime dependencies for packaged/global installs in an external runtime root and retain already staged deps across repairs, avoiding package-tree update races and npm pruning after upgrades. Thanks @steipete.
- Plugins/runtime deps: log bundled plugin runtime-dependency staging before synchronous npm installs start and include elapsed timing afterward, so first boot after upgrades no longer looks hung while dependencies are being repaired. Thanks @steipete.
- Agents/failover: forward embedded run abort signals into provider-owned model streams, cap implicit LLM idle watchdogs below long run timeouts, and mark 429 responses without usable retry timing as non-retryable so GitHub Copilot rate limits fail over or surface promptly instead of hanging until run timeout. Fixes #71120. Thanks @steipete.

View File

@@ -1072,6 +1072,84 @@ describe("createTelegramBot", () => {
}
});
it("renders model callback lists with configured display names", async () => {
onSpy.mockClear();
replySpy.mockClear();
editMessageTextSpy.mockClear();
const buildModelsProviderDataMock =
telegramBotDepsForTest.buildModelsProviderData as unknown as ReturnType<typeof vi.fn>;
buildModelsProviderDataMock.mockResolvedValueOnce({
byProvider: new Map<string, Set<string>>([["openai", new Set(["gpt-5", "gpt-4.1"])]]),
providers: ["openai"],
resolvedDefault: { provider: "openai", model: "gpt-5" },
modelNames: new Map<string, string>([
["openai/gpt-4.1", "GPT 4.1 Bridge"],
["openai/gpt-5", "GPT Five Bridge"],
]),
});
const config = {
agents: {
defaults: {
model: "openai/gpt-5",
},
},
channels: {
telegram: {
dmPolicy: "open",
allowFrom: ["*"],
},
},
} satisfies NonNullable<Parameters<typeof createTelegramBot>[0]["config"]>;
loadConfig.mockReturnValue(config);
createTelegramBot({
token: "tok",
config,
});
const callbackHandler = onSpy.mock.calls.find((call) => call[0] === "callback_query")?.[1] as (
ctx: Record<string, unknown>,
) => Promise<void>;
expect(callbackHandler).toBeDefined();
await callbackHandler({
callbackQuery: {
id: "cbq-model-display-names-1",
data: "mdl_list_openai_1",
from: { id: 9, first_name: "Ada", username: "ada_bot" },
message: {
chat: { id: 1234, type: "private" },
date: 1736380800,
message_id: 23,
},
},
me: { username: "openclaw_bot" },
getFile: async () => ({ download: async () => new Uint8Array() }),
});
expect(replySpy).not.toHaveBeenCalled();
expect(editMessageTextSpy).toHaveBeenCalledTimes(1);
const [, , , params] = editMessageTextSpy.mock.calls[0] ?? [];
const buttons = (
params as {
reply_markup?: {
inline_keyboard?: Array<Array<{ text?: string; callback_data?: string }>>;
};
}
).reply_markup?.inline_keyboard?.flat();
expect(buttons).toContainEqual({
text: "GPT 4.1 Bridge",
callback_data: "mdl_sel_openai/gpt-4.1",
});
expect(buttons).toContainEqual({
text: "GPT Five Bridge ✓",
callback_data: "mdl_sel_openai/gpt-5",
});
expect(answerCallbackQuerySpy).toHaveBeenCalledWith("cbq-model-display-names-1");
});
it("resets overrides when selecting the configured default model", async () => {
onSpy.mockClear();
replySpy.mockClear();