fix: show current think level in Telegram picker (#78278)

This commit is contained in:
Ayaan Zaidi
2026-05-06 11:21:41 +05:30
parent 1672d35ef5
commit 98cbf7f11c
3 changed files with 159 additions and 7 deletions

View File

@@ -512,12 +512,24 @@ describe("registerTelegramNativeCommands — session metadata", () => {
});
it("uses the target session model when building native argument menus", async () => {
const cfg: OpenClawConfig = {};
const cfg = {
agents: {
defaults: {
thinkingDefault: "low",
models: {
"anthropic/claude-opus-4-7": {
params: { thinking: "xhigh" },
},
},
},
},
} as OpenClawConfig;
sessionMocks.loadSessionStore.mockReturnValue({
"agent:main:main": {
providerOverride: "anthropic",
modelOverride: "claude-opus-4-7",
modelOverrideSource: "user",
thinkingLevel: "high",
updatedAt: 0,
},
});
@@ -541,7 +553,7 @@ describe("registerTelegramNativeCommands — session metadata", () => {
expect(sessionMocks.loadSessionStore).toHaveBeenCalledWith("/tmp/openclaw-sessions.json");
expect(sendMessage).toHaveBeenCalledWith(
100,
expect.stringContaining("Choose level for /think."),
expect.stringContaining("Current thinking level: high.\nChoose level for /think."),
expect.objectContaining({ reply_markup: expect.any(Object) }),
);
expect(replyMocks.dispatchReplyWithBufferedBlockDispatcher).not.toHaveBeenCalled();
@@ -587,6 +599,7 @@ describe("registerTelegramNativeCommands — session metadata", () => {
agents: {
defaults: {
model: { primary: "openai/gpt-5.5" },
thinkingDefault: "medium",
},
},
} as OpenClawConfig;
@@ -619,7 +632,81 @@ describe("registerTelegramNativeCommands — session metadata", () => {
);
expect(sendMessage).toHaveBeenCalledWith(
100,
expect.stringContaining("Choose level for /think."),
expect.stringContaining("Current thinking level: medium.\nChoose level for /think."),
expect.objectContaining({ reply_markup: expect.any(Object) }),
);
expect(replyMocks.dispatchReplyWithBufferedBlockDispatcher).not.toHaveBeenCalled();
});
it("uses target model thinking defaults before global thinking defaults", async () => {
const cfg = {
agents: {
defaults: {
thinkingDefault: "low",
models: {
"anthropic/claude-opus-4-7": {
params: { thinking: "xhigh" },
},
},
},
},
} as OpenClawConfig;
sessionMocks.loadSessionStore.mockReturnValue({
"agent:main:main": {
providerOverride: "anthropic",
modelOverride: "claude-opus-4-7",
modelOverrideSource: "user",
updatedAt: 0,
},
});
const { handler, sendMessage } = registerAndResolveCommandHandler({
commandName: "think",
cfg,
allowFrom: ["*"],
});
await handler(createTelegramPrivateCommandContext());
expect(sendMessage).toHaveBeenCalledWith(
100,
expect.stringContaining("Current thinking level: xhigh.\nChoose level for /think."),
expect.objectContaining({ reply_markup: expect.any(Object) }),
);
expect(replyMocks.dispatchReplyWithBufferedBlockDispatcher).not.toHaveBeenCalled();
});
it("uses per-agent thinking defaults before target model and global thinking defaults", async () => {
const cfg = {
agents: {
defaults: {
thinkingDefault: "low",
models: {
"anthropic/claude-opus-4-7": {
params: { thinking: "xhigh" },
},
},
},
list: [
{
id: "alpha",
model: { primary: "anthropic/claude-opus-4-7" },
thinkingDefault: "minimal",
},
],
},
} as OpenClawConfig;
sessionMocks.loadSessionStore.mockReturnValue({});
const { handler, sendMessage } = registerAndResolveCommandHandler({
commandName: "think",
cfg,
allowFrom: ["*"],
});
await handler(createTelegramPrivateCommandContext());
expect(sendMessage).toHaveBeenCalledWith(
100,
expect.stringContaining("Current thinking level: minimal.\nChoose level for /think."),
expect.objectContaining({ reply_markup: expect.any(Object) }),
);
expect(replyMocks.dispatchReplyWithBufferedBlockDispatcher).not.toHaveBeenCalled();

View File

@@ -1,7 +1,12 @@
import { randomUUID } from "node:crypto";
import path from "node:path";
import type { Bot, Context } from "grammy";
import { resolveDefaultModelForAgent } from "openclaw/plugin-sdk/agent-runtime";
import {
buildConfiguredModelCatalog,
resolveAgentConfig,
resolveDefaultModelForAgent,
resolveThinkingDefault,
} from "openclaw/plugin-sdk/agent-runtime";
import { resolveChannelStreamingBlockEnabled } from "openclaw/plugin-sdk/channel-streaming";
import {
resolveCommandAuthorization,
@@ -203,7 +208,7 @@ function resolveTelegramCommandMenuModelContext(params: {
cfg: OpenClawConfig;
agentId: string;
sessionKey: string;
}): { provider?: string; model?: string } {
}): { provider?: string; model?: string; thinkingLevel?: string } {
if (!params.sessionKey.trim()) {
return {};
}
@@ -215,8 +220,13 @@ function resolveTelegramCommandMenuModelContext(params: {
});
const store = loadSessionStore(storePath);
const entry = resolveSessionStoreEntry({ store, sessionKey: params.sessionKey }).existing;
const thinkingLevel = normalizeOptionalString(entry?.thinkingLevel);
if (entry?.modelOverrideSource === "auto" && normalizeOptionalString(entry.modelOverride)) {
return { provider: defaultModel.provider, model: defaultModel.model };
return {
provider: defaultModel.provider,
model: defaultModel.model,
...(thinkingLevel ? { thinkingLevel } : {}),
};
}
const override = resolveStoredModelOverride({
sessionEntry: entry,
@@ -228,6 +238,7 @@ function resolveTelegramCommandMenuModelContext(params: {
return {
provider: override.provider || defaultModel.provider,
model: override.model,
...(thinkingLevel ? { thinkingLevel } : {}),
};
}
const provider =
@@ -238,12 +249,54 @@ function resolveTelegramCommandMenuModelContext(params: {
return {
...(provider ? { provider } : {}),
...(model ? { model } : {}),
...(thinkingLevel ? { thinkingLevel } : {}),
};
} catch {
return {};
}
}
function resolveTelegramThinkMenuCurrentLevel(params: {
cfg: OpenClawConfig;
agentId: string;
provider?: string;
model?: string;
thinkingLevel?: string;
}): string {
const explicit = normalizeOptionalString(params.thinkingLevel);
if (explicit) {
return explicit;
}
const agentThinkingDefault = normalizeOptionalString(
resolveAgentConfig(params.cfg, params.agentId)?.thinkingDefault,
);
if (agentThinkingDefault) {
return agentThinkingDefault;
}
const defaultModel = resolveDefaultModelForAgent({
cfg: params.cfg,
agentId: params.agentId,
});
return resolveThinkingDefault({
cfg: params.cfg,
provider: params.provider ?? defaultModel.provider,
model: params.model ?? defaultModel.model,
catalog: buildConfiguredModelCatalog({ cfg: params.cfg }),
});
}
function formatTelegramCommandArgMenuTitle(params: {
command: NonNullable<ReturnType<typeof findCommandByNativeName>>;
menu: NonNullable<ReturnType<typeof resolveCommandArgMenu>>;
currentThinkingLevel?: string;
}): string {
const title = formatCommandArgMenuTitle({ command: params.command, menu: params.menu });
if (params.command.key !== "think" || !params.currentThinkingLevel) {
return title;
}
return `Current thinking level: ${params.currentThinkingLevel}.\n${title}`;
}
function resolveTelegramNativeReplyChannelData(
result: TelegramNativeReplyPayload,
): TelegramNativeReplyChannelData | undefined {
@@ -1006,7 +1059,18 @@ export const registerTelegramNativeCommands = ({
})
: null;
if (menu && commandDefinition) {
const title = formatCommandArgMenuTitle({ command: commandDefinition, menu });
const title = formatTelegramCommandArgMenuTitle({
command: commandDefinition,
menu,
currentThinkingLevel:
commandDefinition.key === "think"
? resolveTelegramThinkMenuCurrentLevel({
cfg: runtimeCfg,
agentId: route.agentId,
...menuModelContext,
})
: undefined,
});
const rows: Array<Array<{ text: string; callback_data: string }>> = [];
for (let i = 0; i < menu.choices.length; i += 2) {
const slice = menu.choices.slice(i, i + 2);