mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-06 08:20:43 +00:00
fix: honor Ollama thinking catalog metadata
This commit is contained in:
@@ -30,6 +30,7 @@ Docs: https://docs.openclaw.ai
|
||||
- Gateway/media: route text-only `chat.send` image offloads through media-understanding fields so `agents.defaults.imageModel` can describe WebChat attachments instead of leaving only an opaque `media://inbound` marker. Fixes #72968. Thanks @vorajeeah.
|
||||
- Gateway/Windows: route no-listener restart handoffs through the Windows supervisor without leaving restart tokens in flight, so failed task scheduling can be retried and successful handoffs do not coalesce later restart requests. (#69056) Thanks @Thatgfsj.
|
||||
- Gateway/model pricing: skip plugin manifest discovery during background pricing refreshes when `plugins.enabled: false`, so disabled-plugin setups do not keep rebuilding plugin metadata from the Gateway hot path. Fixes #73291. Thanks @slideshow-dingo and @fishgills.
|
||||
- Ollama/thinking: validate `/think` commands against live Ollama catalog reasoning metadata, so models whose `/api/show` capabilities include `thinking` expose `low`, `medium`, `high`, and `max` instead of being stuck on `off`. Fixes #73366. Thanks @cymise.
|
||||
- Gateway/sessions: remove automatic oversized `sessions.json` rotation backups, deprecate `session.maintenance.rotateBytes`, and teach `openclaw doctor --fix` to remove the ignored key so hot session writes no longer copy multi-MB stores. Refs #72338. Thanks @midhunmonachan and @DougButdorf.
|
||||
- Channels/Telegram: fail fast when Telegram rejects the startup `getMe` token probe with 401, so invalid or stale BotFather tokens are reported as token auth failures instead of misleading `deleteWebhook` cleanup failures. Fixes #47674. Thanks @samaedan-arch.
|
||||
- ACPX: keep generated Codex and Claude ACP wrapper startup paths working when remote or special state filesystems reject chmod, since OpenClaw invokes the wrappers through Node instead of executing them directly. Fixes #73333. Thanks @david-garcia-garcia.
|
||||
|
||||
@@ -181,14 +181,14 @@ Choose your preferred setup method and mode.
|
||||
|
||||
When you set `OLLAMA_API_KEY` (or an auth profile) and **do not** define `models.providers.ollama` or another custom remote provider with `api: "ollama"`, OpenClaw discovers models from the local Ollama instance at `http://127.0.0.1:11434`.
|
||||
|
||||
| Behavior | Detail |
|
||||
| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Catalog query | Queries `/api/tags` |
|
||||
| Capability detection | Uses best-effort `/api/show` lookups to read `contextWindow`, expanded `num_ctx` Modelfile parameters, and capabilities including vision/tools |
|
||||
| Vision models | Models with a `vision` capability reported by `/api/show` are marked as image-capable (`input: ["text", "image"]`), so OpenClaw auto-injects images into the prompt |
|
||||
| Reasoning detection | Marks `reasoning` with a model-name heuristic (`r1`, `reasoning`, `think`) |
|
||||
| Token limits | Sets `maxTokens` to the default Ollama max-token cap used by OpenClaw |
|
||||
| Costs | Sets all costs to `0` |
|
||||
| Behavior | Detail |
|
||||
| -------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Catalog query | Queries `/api/tags` |
|
||||
| Capability detection | Uses best-effort `/api/show` lookups to read `contextWindow`, expanded `num_ctx` Modelfile parameters, and capabilities including vision/tools |
|
||||
| Vision models | Models with a `vision` capability reported by `/api/show` are marked as image-capable (`input: ["text", "image"]`), so OpenClaw auto-injects images into the prompt |
|
||||
| Reasoning detection | Uses `/api/show` capabilities when available, including `thinking`; falls back to a model-name heuristic (`r1`, `reasoning`, `think`) when Ollama omits capabilities |
|
||||
| Token limits | Sets `maxTokens` to the default Ollama max-token cap used by OpenClaw |
|
||||
| Costs | Sets all costs to `0` |
|
||||
|
||||
This avoids manual model entries while keeping the catalog aligned with the local Ollama instance. You can use a full ref such as `ollama/<pulled-model>:latest` in local `infer model run`; OpenClaw resolves that installed model from Ollama's live catalog without requiring a hand-written `models.json` entry.
|
||||
|
||||
@@ -836,7 +836,7 @@ For the full setup and behavior details, see [Ollama Web Search](/tools/ollama-s
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Thinking control">
|
||||
For native Ollama models, OpenClaw forwards thinking control as Ollama expects it: top-level `think`, not `options.think`.
|
||||
For native Ollama models, OpenClaw forwards thinking control as Ollama expects it: top-level `think`, not `options.think`. Auto-discovered models whose `/api/show` response includes the `thinking` capability expose `/think low`, `/think medium`, `/think high`, and `/think max`; non-thinking models expose only `/think off`.
|
||||
|
||||
```bash
|
||||
openclaw agent --model ollama/gemma4 --thinking off
|
||||
|
||||
@@ -6,7 +6,6 @@ import type {
|
||||
SetSessionModeRequest,
|
||||
} from "@agentclientprotocol/sdk";
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
import { listThinkingLevels } from "../auto-reply/thinking.js";
|
||||
import type { GatewayClient } from "../gateway/client.js";
|
||||
import type { EventFrame } from "../gateway/protocol/index.js";
|
||||
import { createInMemorySessionStore } from "./session.js";
|
||||
@@ -271,6 +270,11 @@ describe("acp session UX bridge behavior", () => {
|
||||
thinkingLevel: "high",
|
||||
modelProvider: "openai",
|
||||
model: "gpt-5.4",
|
||||
thinkingLevels: [
|
||||
{ id: "off", label: "off" },
|
||||
{ id: "medium", label: "medium" },
|
||||
{ id: "max", label: "max" },
|
||||
],
|
||||
verboseLevel: "full",
|
||||
reasoningLevel: "stream",
|
||||
responseUsage: "tokens",
|
||||
@@ -307,9 +311,12 @@ describe("acp session UX bridge behavior", () => {
|
||||
const result = await agent.loadSession(createLoadSessionRequest("agent:main:work"));
|
||||
|
||||
expect(result.modes?.currentModeId).toBe("high");
|
||||
expect(result.modes?.availableModes.map((mode) => mode.id)).toEqual(
|
||||
listThinkingLevels("openai", "gpt-5.4"),
|
||||
);
|
||||
expect(result.modes?.availableModes.map((mode) => mode.id)).toEqual([
|
||||
"off",
|
||||
"medium",
|
||||
"max",
|
||||
"high",
|
||||
]);
|
||||
expect(result.configOptions).toEqual(
|
||||
expect.arrayContaining([
|
||||
expect.objectContaining({
|
||||
|
||||
@@ -117,6 +117,7 @@ type GatewaySessionPresentationRow = Pick<
|
||||
| "fastMode"
|
||||
| "modelProvider"
|
||||
| "model"
|
||||
| "thinkingLevels"
|
||||
| "verboseLevel"
|
||||
| "traceLevel"
|
||||
| "reasoningLevel"
|
||||
@@ -247,7 +248,9 @@ function buildSessionPresentation(params: {
|
||||
...params.row,
|
||||
...params.overrides,
|
||||
};
|
||||
const availableLevelIds: string[] = [...listThinkingLevels(row.modelProvider, row.model)];
|
||||
const availableLevelIds: string[] = row.thinkingLevels?.map((level) => level.id) ?? [
|
||||
...listThinkingLevels(row.modelProvider, row.model),
|
||||
];
|
||||
const currentModeId = normalizeOptionalString(row.thinkingLevel) || "adaptive";
|
||||
if (!availableLevelIds.includes(currentModeId)) {
|
||||
availableLevelIds.push(currentModeId);
|
||||
@@ -1268,6 +1271,7 @@ export class AcpGatewayAgent implements Agent {
|
||||
derivedTitle: session.derivedTitle,
|
||||
updatedAt: session.updatedAt,
|
||||
thinkingLevel: session.thinkingLevel,
|
||||
thinkingLevels: session.thinkingLevels,
|
||||
modelProvider: session.modelProvider,
|
||||
model: session.model,
|
||||
fastMode: session.fastMode,
|
||||
|
||||
@@ -78,6 +78,7 @@ export async function applyInlineDirectivesFastLane(
|
||||
aliasIndex,
|
||||
allowedModelKeys,
|
||||
allowedModelCatalog,
|
||||
thinkingCatalog: await modelState.resolveThinkingCatalog(),
|
||||
resetModelOverride,
|
||||
provider,
|
||||
model,
|
||||
|
||||
@@ -130,6 +130,12 @@ export async function handleDirectiveOnly(
|
||||
|
||||
const resolvedProvider = modelSelection?.provider ?? provider;
|
||||
const resolvedModel = modelSelection?.model ?? model;
|
||||
const thinkingCatalog =
|
||||
params.thinkingCatalog && params.thinkingCatalog.length > 0
|
||||
? params.thinkingCatalog
|
||||
: allowedModelCatalog.length > 0
|
||||
? allowedModelCatalog
|
||||
: undefined;
|
||||
const fastModeState = resolveFastModeState({
|
||||
cfg: params.cfg,
|
||||
provider: resolvedProvider,
|
||||
@@ -148,12 +154,12 @@ export async function handleDirectiveOnly(
|
||||
return {
|
||||
text: withOptions(
|
||||
`Current thinking level: ${level}.`,
|
||||
formatThinkingLevels(resolvedProvider, resolvedModel),
|
||||
formatThinkingLevels(resolvedProvider, resolvedModel, ", ", thinkingCatalog),
|
||||
),
|
||||
};
|
||||
}
|
||||
return {
|
||||
text: `Unrecognized thinking level "${directives.rawThinkLevel}". Valid levels: ${formatThinkingLevels(resolvedProvider, resolvedModel)}.`,
|
||||
text: `Unrecognized thinking level "${directives.rawThinkLevel}". Valid levels: ${formatThinkingLevels(resolvedProvider, resolvedModel, ", ", thinkingCatalog)}.`,
|
||||
};
|
||||
}
|
||||
if (directives.hasVerboseDirective && !directives.verboseLevel) {
|
||||
@@ -300,10 +306,11 @@ export async function handleDirectiveOnly(
|
||||
provider: resolvedProvider,
|
||||
model: resolvedModel,
|
||||
level: directives.thinkLevel,
|
||||
catalog: thinkingCatalog,
|
||||
})
|
||||
) {
|
||||
return {
|
||||
text: `Thinking level "${directives.thinkLevel}" is not supported for ${resolvedProvider}/${resolvedModel}. Use one of: ${formatThinkingLevels(resolvedProvider, resolvedModel)}.`,
|
||||
text: `Thinking level "${directives.thinkLevel}" is not supported for ${resolvedProvider}/${resolvedModel}. Use one of: ${formatThinkingLevels(resolvedProvider, resolvedModel, ", ", thinkingCatalog)}.`,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -318,11 +325,13 @@ export async function handleDirectiveOnly(
|
||||
provider: resolvedProvider,
|
||||
model: resolvedModel,
|
||||
level: nextThinkLevel,
|
||||
catalog: thinkingCatalog,
|
||||
})
|
||||
? resolveSupportedThinkingLevel({
|
||||
provider: resolvedProvider,
|
||||
model: resolvedModel,
|
||||
level: nextThinkLevel,
|
||||
catalog: thinkingCatalog,
|
||||
})
|
||||
: undefined;
|
||||
const shouldRemapUnsupportedThinkLevel =
|
||||
|
||||
@@ -83,6 +83,7 @@ describe("mixed inline directives", () => {
|
||||
agentCfg: cfg.agents?.defaults,
|
||||
modelState: {
|
||||
resolveDefaultThinkingLevel: async () => "off",
|
||||
resolveThinkingCatalog: async () => [],
|
||||
allowedModelKeys: new Set(),
|
||||
allowedModelCatalog: [],
|
||||
resetModelOverride: false,
|
||||
@@ -156,6 +157,7 @@ describe("mixed inline directives", () => {
|
||||
agentCfg: cfg.agents?.defaults,
|
||||
modelState: {
|
||||
resolveDefaultThinkingLevel: async () => "off",
|
||||
resolveThinkingCatalog: async () => [],
|
||||
allowedModelKeys: new Set(),
|
||||
allowedModelCatalog: [],
|
||||
resetModelOverride: false,
|
||||
|
||||
@@ -931,6 +931,54 @@ describe("handleDirectiveOnly model persist behavior (fixes #1435)", () => {
|
||||
expect(result?.text).toContain("Options: off, minimal, low, medium, adaptive, high.");
|
||||
});
|
||||
|
||||
it("uses catalog reasoning metadata for provider-owned thinking levels", async () => {
|
||||
setDirectiveTestProviders([
|
||||
{
|
||||
id: "ollama",
|
||||
label: "Ollama",
|
||||
auth: [],
|
||||
resolveThinkingProfile: ({ reasoning }) => ({
|
||||
levels:
|
||||
reasoning === true
|
||||
? [{ id: "off" }, { id: "low" }, { id: "medium" }, { id: "high" }, { id: "max" }]
|
||||
: [{ id: "off" }],
|
||||
defaultLevel: "off",
|
||||
}),
|
||||
},
|
||||
]);
|
||||
const sessionEntry = createSessionEntry();
|
||||
const sessionStore = { [sessionKey]: sessionEntry };
|
||||
|
||||
const result = await handleDirectiveOnly(
|
||||
createHandleParams({
|
||||
directives: parseInlineDirectives("/think medium"),
|
||||
provider: "ollama",
|
||||
model: "qwen3.6:35b-a3b-mxfp8",
|
||||
allowedModelCatalog: [
|
||||
{
|
||||
provider: "ollama",
|
||||
id: "qwen3.6:35b-a3b-mxfp8",
|
||||
name: "qwen3.6:35b-a3b-mxfp8",
|
||||
reasoning: true,
|
||||
},
|
||||
],
|
||||
thinkingCatalog: [
|
||||
{
|
||||
provider: "ollama",
|
||||
id: "qwen3.6:35b-a3b-mxfp8",
|
||||
name: "qwen3.6:35b-a3b-mxfp8",
|
||||
reasoning: true,
|
||||
},
|
||||
],
|
||||
sessionEntry,
|
||||
sessionStore,
|
||||
}),
|
||||
);
|
||||
|
||||
expect(result?.text).toContain("Thinking level set to medium.");
|
||||
expect(sessionEntry.thinkingLevel).toBe("medium");
|
||||
});
|
||||
|
||||
it("persists verbose on and off directives", async () => {
|
||||
const sessionEntry = createSessionEntry();
|
||||
const sessionStore = { [sessionKey]: sessionEntry };
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import type { ModelCatalogEntry } from "../../agents/model-catalog.js";
|
||||
import type { ModelAliasIndex } from "../../agents/model-selection.js";
|
||||
import type { SessionEntry } from "../../config/sessions.js";
|
||||
import type { OpenClawConfig } from "../../config/types.openclaw.js";
|
||||
@@ -23,6 +24,7 @@ export type HandleDirectiveOnlyCoreParams = {
|
||||
allowedModelCatalog: Awaited<
|
||||
ReturnType<typeof import("../../agents/model-catalog.js").loadModelCatalog>
|
||||
>;
|
||||
thinkingCatalog?: ModelCatalogEntry[];
|
||||
resetModelOverride: boolean;
|
||||
provider: string;
|
||||
model: string;
|
||||
@@ -52,6 +54,7 @@ export type ApplyInlineDirectivesFastLaneParams = HandleDirectiveOnlyCoreParams
|
||||
agentCfg?: NonNullable<OpenClawConfig["agents"]>["defaults"];
|
||||
modelState: {
|
||||
resolveDefaultThinkingLevel: () => Promise<ThinkLevel | undefined>;
|
||||
resolveThinkingCatalog: () => Promise<ModelCatalogEntry[] | undefined>;
|
||||
allowedModelKeys: Set<string>;
|
||||
allowedModelCatalog: Awaited<
|
||||
ReturnType<typeof import("../../agents/model-catalog.js").loadModelCatalog>
|
||||
|
||||
@@ -5,6 +5,7 @@ import {
|
||||
} from "../../agents/agent-scope.js";
|
||||
import { resolveContextTokensForModel } from "../../agents/context.js";
|
||||
import { DEFAULT_CONTEXT_TOKENS } from "../../agents/defaults.js";
|
||||
import type { ModelCatalogEntry } from "../../agents/model-catalog.js";
|
||||
import { listLegacyRuntimeModelProviderAliases } from "../../agents/model-runtime-aliases.js";
|
||||
import { normalizeProviderId, type ModelAliasIndex } from "../../agents/model-selection.js";
|
||||
import { updateSessionStore } from "../../config/sessions/store.js";
|
||||
@@ -92,6 +93,7 @@ export async function persistInlineDirectives(params: {
|
||||
gatewayClientScopes?: string[];
|
||||
senderIsOwner?: boolean;
|
||||
markLiveSwitchPending?: boolean;
|
||||
thinkingCatalog?: ModelCatalogEntry[];
|
||||
}): Promise<{
|
||||
provider: string;
|
||||
model: string;
|
||||
@@ -127,6 +129,10 @@ export async function persistInlineDirectives(params: {
|
||||
surface: params.surface,
|
||||
gatewayClientScopes: params.gatewayClientScopes,
|
||||
});
|
||||
const thinkingCatalog =
|
||||
params.thinkingCatalog && params.thinkingCatalog.length > 0
|
||||
? params.thinkingCatalog
|
||||
: undefined;
|
||||
const delegatedTraceAllowed = (params.gatewayClientScopes ?? []).includes("operator.admin");
|
||||
const activeAgentId = sessionKey
|
||||
? resolveSessionAgentId({ sessionKey, config: cfg })
|
||||
@@ -273,12 +279,14 @@ export async function persistInlineDirectives(params: {
|
||||
provider,
|
||||
model,
|
||||
level: currentThinkingLevel,
|
||||
catalog: thinkingCatalog,
|
||||
})
|
||||
) {
|
||||
const remappedThinkingLevel = resolveSupportedThinkingLevel({
|
||||
provider,
|
||||
model,
|
||||
level: currentThinkingLevel,
|
||||
catalog: thinkingCatalog,
|
||||
});
|
||||
if (remappedThinkingLevel !== currentThinkingLevel) {
|
||||
sessionEntry.thinkingLevel = remappedThinkingLevel;
|
||||
|
||||
@@ -228,6 +228,7 @@ export async function applyInlineDirectiveOverrides(params: {
|
||||
defaultModel,
|
||||
aliasIndex,
|
||||
allowedModelKeys: modelState.allowedModelKeys,
|
||||
thinkingCatalog: modelState.allowedModelCatalog,
|
||||
initialModelLabel,
|
||||
formatModelSwitchEvent,
|
||||
agentCfg,
|
||||
@@ -312,10 +313,12 @@ export async function applyInlineDirectiveOverrides(params: {
|
||||
resolveDefaultThinkingLevel: () => modelState.resolveDefaultThinkingLevel(),
|
||||
});
|
||||
const currentThinkLevel = resolvedDefaultThinkLevel;
|
||||
const thinkingCatalog = await modelState.resolveThinkingCatalog();
|
||||
const directiveReply = await (
|
||||
await loadDirectiveImpl()
|
||||
).handleDirectiveOnly({
|
||||
...createDirectiveHandlingBase(),
|
||||
thinkingCatalog,
|
||||
currentThinkLevel,
|
||||
currentFastMode,
|
||||
currentVerboseLevel,
|
||||
@@ -392,6 +395,7 @@ export async function applyInlineDirectiveOverrides(params: {
|
||||
agentCfg,
|
||||
modelState: {
|
||||
resolveDefaultThinkingLevel: modelState.resolveDefaultThinkingLevel,
|
||||
resolveThinkingCatalog: modelState.resolveThinkingCatalog,
|
||||
...directiveModelState,
|
||||
},
|
||||
});
|
||||
|
||||
@@ -239,6 +239,7 @@ describe("resolveReplyDirectives", () => {
|
||||
allowedModelKeys: new Set<string>(),
|
||||
allowedModelCatalog: [],
|
||||
resetModelOverride: false,
|
||||
resolveThinkingCatalog: vi.fn(async () => []),
|
||||
resolveDefaultThinkingLevel: vi.fn(async () => "off"),
|
||||
resolveDefaultReasoningLevel: vi.fn(async () => "off"),
|
||||
});
|
||||
|
||||
@@ -204,6 +204,7 @@ function baseParams(
|
||||
resolvedBlockStreamingBreak: "message_end",
|
||||
modelState: {
|
||||
resolveDefaultThinkingLevel: async () => "medium",
|
||||
resolveThinkingCatalog: async () => [],
|
||||
} as never,
|
||||
provider: "anthropic",
|
||||
model: "claude-opus-4-1",
|
||||
|
||||
@@ -536,7 +536,11 @@ export async function runPreparedReply(
|
||||
if (!resolvedThinkLevel && prefixedBodyBase) {
|
||||
const parts = prefixedBodyBase.split(/\s+/);
|
||||
const maybeLevel = normalizeThinkLevel(parts[0]);
|
||||
if (maybeLevel && isThinkingLevelSupported({ provider, model, level: maybeLevel })) {
|
||||
const thinkingCatalog = maybeLevel ? await modelState.resolveThinkingCatalog() : undefined;
|
||||
if (
|
||||
maybeLevel &&
|
||||
isThinkingLevelSupported({ provider, model, level: maybeLevel, catalog: thinkingCatalog })
|
||||
) {
|
||||
resolvedThinkLevel = maybeLevel;
|
||||
prefixedBodyBase = parts.slice(1).join(" ").trim();
|
||||
}
|
||||
@@ -608,18 +612,27 @@ export async function runPreparedReply(
|
||||
if (!resolvedThinkLevel) {
|
||||
resolvedThinkLevel = await modelState.resolveDefaultThinkingLevel();
|
||||
}
|
||||
if (!isThinkingLevelSupported({ provider, model, level: resolvedThinkLevel })) {
|
||||
const thinkingCatalog = await modelState.resolveThinkingCatalog();
|
||||
if (
|
||||
!isThinkingLevelSupported({
|
||||
provider,
|
||||
model,
|
||||
level: resolvedThinkLevel,
|
||||
catalog: thinkingCatalog,
|
||||
})
|
||||
) {
|
||||
const explicitThink = directives.hasThinkDirective && directives.thinkLevel !== undefined;
|
||||
if (explicitThink) {
|
||||
typing.cleanup();
|
||||
return {
|
||||
text: `Thinking level "${resolvedThinkLevel}" is not supported for ${provider}/${model}. Use one of: ${formatThinkingLevels(provider, model)}.`,
|
||||
text: `Thinking level "${resolvedThinkLevel}" is not supported for ${provider}/${model}. Use one of: ${formatThinkingLevels(provider, model, ", ", thinkingCatalog)}.`,
|
||||
};
|
||||
}
|
||||
const fallbackThinkLevel = resolveSupportedThinkingLevel({
|
||||
provider,
|
||||
model,
|
||||
level: resolvedThinkLevel,
|
||||
catalog: thinkingCatalog,
|
||||
});
|
||||
if (fallbackThinkLevel !== resolvedThinkLevel) {
|
||||
const previousThinkLevel = resolvedThinkLevel;
|
||||
|
||||
@@ -126,6 +126,7 @@ export function createGetReplyContinueDirectivesResult(params: {
|
||||
model: "gpt-4o-mini",
|
||||
modelState: {
|
||||
resolveDefaultThinkingLevel: async () => undefined,
|
||||
resolveThinkingCatalog: async () => [],
|
||||
},
|
||||
contextTokens: 0,
|
||||
inlineStatusRequested: false,
|
||||
|
||||
@@ -31,6 +31,7 @@ type ModelSelectionState = {
|
||||
allowedModelKeys: Set<string>;
|
||||
allowedModelCatalog: ModelCatalog;
|
||||
resetModelOverride: boolean;
|
||||
resolveThinkingCatalog: () => Promise<ModelCatalog | undefined>;
|
||||
resolveDefaultThinkingLevel: () => Promise<ThinkLevel>;
|
||||
/** Default reasoning level from model capability: "on" if model has reasoning, else "off". */
|
||||
resolveDefaultReasoningLevel: () => Promise<"on" | "off">;
|
||||
@@ -48,6 +49,7 @@ export function createFastTestModelSelectionState(params: {
|
||||
allowedModelKeys: new Set<string>(),
|
||||
allowedModelCatalog: [],
|
||||
resetModelOverride: false,
|
||||
resolveThinkingCatalog: async () => [],
|
||||
resolveDefaultThinkingLevel: async () => params.agentCfg?.thinkingDefault as ThinkLevel,
|
||||
resolveDefaultReasoningLevel: async () => "off",
|
||||
needsModelCatalog: false,
|
||||
@@ -235,17 +237,10 @@ export async function createModelSelectionState(params: {
|
||||
}
|
||||
}
|
||||
|
||||
let defaultThinkingLevel: ThinkLevel | undefined;
|
||||
const resolveDefaultThinkingLevel = async () => {
|
||||
if (defaultThinkingLevel) {
|
||||
return defaultThinkingLevel;
|
||||
}
|
||||
const agentThinkingDefault = agentEntry?.thinkingDefault as ThinkLevel | undefined;
|
||||
const configuredThinkingDefault = agentCfg?.thinkingDefault as ThinkLevel | undefined;
|
||||
const explicitThinkingDefault = agentThinkingDefault ?? configuredThinkingDefault;
|
||||
if (explicitThinkingDefault) {
|
||||
defaultThinkingLevel = explicitThinkingDefault;
|
||||
return defaultThinkingLevel;
|
||||
let thinkingCatalog: ModelCatalog | undefined;
|
||||
const resolveThinkingCatalog = async () => {
|
||||
if (thinkingCatalog) {
|
||||
return thinkingCatalog;
|
||||
}
|
||||
let catalogForThinking =
|
||||
modelCatalog && modelCatalog.length > 0 ? modelCatalog : allowedModelCatalog;
|
||||
@@ -267,6 +262,23 @@ export async function createModelSelectionState(params: {
|
||||
: allowedModelCatalog
|
||||
: allowedModelCatalog;
|
||||
}
|
||||
thinkingCatalog = catalogForThinking.length > 0 ? catalogForThinking : undefined;
|
||||
return thinkingCatalog;
|
||||
};
|
||||
|
||||
let defaultThinkingLevel: ThinkLevel | undefined;
|
||||
const resolveDefaultThinkingLevel = async () => {
|
||||
if (defaultThinkingLevel) {
|
||||
return defaultThinkingLevel;
|
||||
}
|
||||
const agentThinkingDefault = agentEntry?.thinkingDefault as ThinkLevel | undefined;
|
||||
const configuredThinkingDefault = agentCfg?.thinkingDefault as ThinkLevel | undefined;
|
||||
const explicitThinkingDefault = agentThinkingDefault ?? configuredThinkingDefault;
|
||||
if (explicitThinkingDefault) {
|
||||
defaultThinkingLevel = explicitThinkingDefault;
|
||||
return defaultThinkingLevel;
|
||||
}
|
||||
const catalogForThinking = await resolveThinkingCatalog();
|
||||
const resolved = resolveThinkingDefault({
|
||||
cfg,
|
||||
provider,
|
||||
@@ -297,6 +309,7 @@ export async function createModelSelectionState(params: {
|
||||
allowedModelKeys,
|
||||
allowedModelCatalog,
|
||||
resetModelOverride,
|
||||
resolveThinkingCatalog,
|
||||
resolveDefaultThinkingLevel,
|
||||
resolveDefaultReasoningLevel,
|
||||
needsModelCatalog,
|
||||
|
||||
@@ -205,21 +205,30 @@ export function supportsXHighThinking(provider?: string | null, model?: string |
|
||||
return supportsThinkingLevel(provider, model, "xhigh");
|
||||
}
|
||||
|
||||
export function listThinkingLevels(provider?: string | null, model?: string | null): ThinkLevel[] {
|
||||
const profile = resolveThinkingProfile({ provider, model });
|
||||
export function listThinkingLevels(
|
||||
provider?: string | null,
|
||||
model?: string | null,
|
||||
catalog?: ThinkingCatalogEntry[],
|
||||
): ThinkLevel[] {
|
||||
const profile = resolveThinkingProfile({ provider, model, catalog });
|
||||
return profile.levels.map((level) => level.id);
|
||||
}
|
||||
|
||||
export function listThinkingLevelOptions(
|
||||
provider?: string | null,
|
||||
model?: string | null,
|
||||
catalog?: ThinkingCatalogEntry[],
|
||||
): ThinkingLevelOption[] {
|
||||
const profile = resolveThinkingProfile({ provider, model });
|
||||
const profile = resolveThinkingProfile({ provider, model, catalog });
|
||||
return profile.levels.map(({ id, label }) => ({ id, label }));
|
||||
}
|
||||
|
||||
export function listThinkingLevelLabels(provider?: string | null, model?: string | null): string[] {
|
||||
return listThinkingLevelOptions(provider, model).map((level) => level.label);
|
||||
export function listThinkingLevelLabels(
|
||||
provider?: string | null,
|
||||
model?: string | null,
|
||||
catalog?: ThinkingCatalogEntry[],
|
||||
): string[] {
|
||||
return listThinkingLevelOptions(provider, model, catalog).map((level) => level.label);
|
||||
}
|
||||
|
||||
export function formatThinkingLevels(
|
||||
|
||||
@@ -4,11 +4,14 @@ import {
|
||||
loadRunCronIsolatedAgentTurn,
|
||||
makeCronSession,
|
||||
makeCronSessionEntry,
|
||||
isThinkingLevelSupportedMock,
|
||||
loadModelCatalogMock,
|
||||
resolveAgentConfigMock,
|
||||
resolveAgentModelFallbacksOverrideMock,
|
||||
resolveAllowedModelRefMock,
|
||||
resolveConfiguredModelRefMock,
|
||||
resolveCronSessionMock,
|
||||
resolveSupportedThinkingLevelMock,
|
||||
resetRunCronIsolatedAgentTurnHarness,
|
||||
restoreFastTestEnv,
|
||||
runEmbeddedPiAgentMock,
|
||||
@@ -147,6 +150,56 @@ describe("runCronIsolatedAgentTurn — cron model override forwarding (#58065)",
|
||||
expect(embeddedCall?.model).toBe("gemini-2.0-flash");
|
||||
});
|
||||
|
||||
it("validates cron thinking with catalog reasoning metadata", async () => {
|
||||
resolveAllowedModelRefMock.mockImplementation(() => ({
|
||||
ref: { provider: "ollama", model: "qwen3:0.6b" },
|
||||
}));
|
||||
loadModelCatalogMock.mockResolvedValue([
|
||||
{
|
||||
provider: "ollama",
|
||||
id: "qwen3:0.6b",
|
||||
name: "qwen3:0.6b",
|
||||
reasoning: true,
|
||||
},
|
||||
]);
|
||||
isThinkingLevelSupportedMock.mockImplementation(
|
||||
({ catalog, level }: { catalog?: Array<{ reasoning?: boolean }>; level?: string }) =>
|
||||
level === "medium" && catalog?.[0]?.reasoning === true,
|
||||
);
|
||||
resolveSupportedThinkingLevelMock.mockReturnValue("off");
|
||||
runWithModelFallbackMock.mockImplementation(async ({ provider, model, run }) => {
|
||||
const result = await run(provider, model);
|
||||
return { result, provider, model, attempts: [] };
|
||||
});
|
||||
|
||||
await runCronIsolatedAgentTurn(
|
||||
makeParams({
|
||||
job: makeJob({
|
||||
payload: {
|
||||
kind: "agentTurn",
|
||||
message: "summarize",
|
||||
model: "ollama/qwen3:0.6b",
|
||||
thinking: "medium",
|
||||
},
|
||||
}),
|
||||
}),
|
||||
);
|
||||
|
||||
expect(isThinkingLevelSupportedMock).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
provider: "ollama",
|
||||
model: "qwen3:0.6b",
|
||||
level: "medium",
|
||||
catalog: expect.arrayContaining([
|
||||
expect.objectContaining({ provider: "ollama", id: "qwen3:0.6b", reasoning: true }),
|
||||
]),
|
||||
}),
|
||||
);
|
||||
expect(runEmbeddedPiAgentMock).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ provider: "ollama", model: "qwen3:0.6b", thinkLevel: "medium" }),
|
||||
);
|
||||
});
|
||||
|
||||
it("does not add agent primary model as fallback when cron payload model is set", async () => {
|
||||
// No per-agent fallbacks configured — resolveAgentModelFallbacksOverride
|
||||
// returns undefined in that case. Before the fix, this caused
|
||||
|
||||
@@ -81,8 +81,8 @@ const hasNonzeroUsageMock = createMock();
|
||||
const ensureAgentWorkspaceMock = createMock();
|
||||
const normalizeThinkLevelMock = createMock();
|
||||
const normalizeVerboseLevelMock = createMock();
|
||||
const isThinkingLevelSupportedMock = createMock();
|
||||
const resolveSupportedThinkingLevelMock = createMock();
|
||||
export const isThinkingLevelSupportedMock = createMock();
|
||||
export const resolveSupportedThinkingLevelMock = createMock();
|
||||
const supportsXHighThinkingMock = createMock();
|
||||
const resolveSessionTranscriptPathMock = createMock();
|
||||
const setSessionRuntimeModelMock = createMock();
|
||||
@@ -93,7 +93,7 @@ const mapHookExternalContentSourceMock = createMock();
|
||||
const isExternalHookSessionMock = createMock();
|
||||
const resolveHookExternalContentSourceMock = createMock();
|
||||
const getSkillsSnapshotVersionMock = createMock();
|
||||
const loadModelCatalogMock = createMock();
|
||||
export const loadModelCatalogMock = createMock();
|
||||
const getRemoteSkillEligibilityMock = createMock();
|
||||
|
||||
vi.mock("./run.runtime.js", () => ({
|
||||
|
||||
@@ -604,18 +604,21 @@ async function prepareCronRunContext(params: {
|
||||
);
|
||||
let thinkLevel: ThinkLevel | undefined = jobThink ?? hooksGmailThinking;
|
||||
if (!thinkLevel) {
|
||||
const thinkingCatalog = await loadCatalog();
|
||||
thinkLevel = resolveThinkingDefault({
|
||||
cfg: cfgWithAgentDefaults,
|
||||
provider,
|
||||
model,
|
||||
catalog: await loadCatalog(),
|
||||
catalog: thinkingCatalog,
|
||||
});
|
||||
}
|
||||
if (!isThinkingLevelSupported({ provider, model, level: thinkLevel })) {
|
||||
const thinkingCatalog = await loadCatalog();
|
||||
if (!isThinkingLevelSupported({ provider, model, level: thinkLevel, catalog: thinkingCatalog })) {
|
||||
const fallbackThinkLevel = resolveSupportedThinkingLevel({
|
||||
provider,
|
||||
model,
|
||||
level: thinkLevel,
|
||||
catalog: thinkingCatalog,
|
||||
});
|
||||
if (fallbackThinkLevel !== thinkLevel) {
|
||||
logWarn(
|
||||
|
||||
@@ -604,17 +604,20 @@ async function handleSessionSend(params: {
|
||||
}
|
||||
}
|
||||
export const sessionsHandlers: GatewayRequestHandlers = {
|
||||
"sessions.list": ({ params, respond, context }) => {
|
||||
"sessions.list": async ({ params, respond, context }) => {
|
||||
if (!assertValidParams(params, validateSessionsListParams, "sessions.list", respond)) {
|
||||
return;
|
||||
}
|
||||
const p = params;
|
||||
const cfg = context.getRuntimeConfig();
|
||||
const { storePath, store } = loadCombinedSessionStoreForGateway(cfg);
|
||||
const loadedCatalog = await context.loadGatewayModelCatalog().catch(() => undefined);
|
||||
const modelCatalog = Array.isArray(loadedCatalog) ? loadedCatalog : undefined;
|
||||
const result = listSessionsFromStore({
|
||||
cfg,
|
||||
storePath,
|
||||
store,
|
||||
modelCatalog,
|
||||
opts: p,
|
||||
});
|
||||
respond(true, result, undefined);
|
||||
|
||||
@@ -140,6 +140,61 @@ describe("gateway session utils", () => {
|
||||
);
|
||||
});
|
||||
|
||||
test("session defaults and rows use catalog reasoning metadata for provider thinking options", () => {
|
||||
const registry = createEmptyPluginRegistry();
|
||||
registry.providers.push({
|
||||
pluginId: "ollama",
|
||||
source: "test",
|
||||
provider: {
|
||||
id: "ollama",
|
||||
label: "Ollama",
|
||||
auth: [],
|
||||
resolveThinkingProfile: ({ reasoning }) => ({
|
||||
levels:
|
||||
reasoning === true
|
||||
? [{ id: "off" }, { id: "low" }, { id: "medium" }, { id: "high" }, { id: "max" }]
|
||||
: [{ id: "off" }],
|
||||
defaultLevel: "off",
|
||||
}),
|
||||
},
|
||||
});
|
||||
setActivePluginRegistry(registry);
|
||||
|
||||
const cfg = createModelDefaultsConfig({ primary: "ollama/qwen3:0.6b" });
|
||||
const catalog = [
|
||||
{
|
||||
provider: "ollama",
|
||||
id: "qwen3:0.6b",
|
||||
name: "qwen3:0.6b",
|
||||
reasoning: true,
|
||||
},
|
||||
];
|
||||
|
||||
const defaults = getSessionDefaults(cfg, catalog);
|
||||
const row = buildGatewaySessionRow({
|
||||
cfg,
|
||||
storePath: "",
|
||||
store: {},
|
||||
key: "main",
|
||||
modelCatalog: catalog,
|
||||
});
|
||||
|
||||
expect(defaults.thinkingLevels?.map((level) => level.id)).toEqual([
|
||||
"off",
|
||||
"low",
|
||||
"medium",
|
||||
"high",
|
||||
"max",
|
||||
]);
|
||||
expect(row.thinkingLevels?.map((level) => level.id)).toEqual([
|
||||
"off",
|
||||
"low",
|
||||
"medium",
|
||||
"high",
|
||||
"max",
|
||||
]);
|
||||
});
|
||||
|
||||
test("session defaults use configured thinking default", () => {
|
||||
const defaults = getSessionDefaults({
|
||||
agents: {
|
||||
|
||||
@@ -1047,6 +1047,7 @@ function resolveGatewaySessionThinkingDefault(params: {
|
||||
provider: string;
|
||||
model: string;
|
||||
agentId?: string;
|
||||
modelCatalog?: ModelCatalogEntry[];
|
||||
}) {
|
||||
const agentThinkingDefault = params.agentId
|
||||
? resolveAgentConfig(params.cfg, params.agentId)?.thinkingDefault
|
||||
@@ -1057,11 +1058,15 @@ function resolveGatewaySessionThinkingDefault(params: {
|
||||
cfg: params.cfg,
|
||||
provider: params.provider,
|
||||
model: params.model,
|
||||
catalog: params.modelCatalog,
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
export function getSessionDefaults(cfg: OpenClawConfig): GatewaySessionsDefaults {
|
||||
export function getSessionDefaults(
|
||||
cfg: OpenClawConfig,
|
||||
modelCatalog?: ModelCatalogEntry[],
|
||||
): GatewaySessionsDefaults {
|
||||
const resolved = resolveConfiguredModelRef({
|
||||
cfg,
|
||||
defaultProvider: DEFAULT_PROVIDER,
|
||||
@@ -1071,7 +1076,7 @@ export function getSessionDefaults(cfg: OpenClawConfig): GatewaySessionsDefaults
|
||||
cfg.agents?.defaults?.contextTokens ??
|
||||
lookupContextTokens(resolved.model, { allowAsyncLoad: false }) ??
|
||||
DEFAULT_CONTEXT_TOKENS;
|
||||
const thinkingLevels = listThinkingLevelOptions(resolved.provider, resolved.model);
|
||||
const thinkingLevels = listThinkingLevelOptions(resolved.provider, resolved.model, modelCatalog);
|
||||
return {
|
||||
modelProvider: resolved.provider ?? null,
|
||||
model: resolved.model ?? null,
|
||||
@@ -1082,6 +1087,7 @@ export function getSessionDefaults(cfg: OpenClawConfig): GatewaySessionsDefaults
|
||||
cfg,
|
||||
provider: resolved.provider,
|
||||
model: resolved.model,
|
||||
modelCatalog,
|
||||
}),
|
||||
};
|
||||
}
|
||||
@@ -1247,6 +1253,7 @@ export function buildGatewaySessionRow(params: {
|
||||
store: Record<string, SessionEntry>;
|
||||
key: string;
|
||||
entry?: SessionEntry;
|
||||
modelCatalog?: ModelCatalogEntry[];
|
||||
now?: number;
|
||||
includeDerivedTitles?: boolean;
|
||||
includeLastMessage?: boolean;
|
||||
@@ -1427,7 +1434,11 @@ export function buildGatewaySessionRow(params: {
|
||||
const rowModel = selectedModel?.model ?? model;
|
||||
const thinkingProvider = rowModelProvider ?? DEFAULT_PROVIDER;
|
||||
const thinkingModel = rowModel ?? DEFAULT_MODEL;
|
||||
const thinkingLevels = listThinkingLevelOptions(thinkingProvider, thinkingModel);
|
||||
const thinkingLevels = listThinkingLevelOptions(
|
||||
thinkingProvider,
|
||||
thinkingModel,
|
||||
params.modelCatalog,
|
||||
);
|
||||
const pluginExtensions = entry
|
||||
? projectPluginSessionExtensionsSync({ sessionKey: key, entry })
|
||||
: [];
|
||||
@@ -1463,6 +1474,7 @@ export function buildGatewaySessionRow(params: {
|
||||
provider: thinkingProvider,
|
||||
model: thinkingModel,
|
||||
agentId: sessionAgentId,
|
||||
modelCatalog: params.modelCatalog,
|
||||
}),
|
||||
fastMode: entry?.fastMode,
|
||||
verboseLevel: entry?.verboseLevel,
|
||||
@@ -1544,6 +1556,7 @@ export function listSessionsFromStore(params: {
|
||||
cfg: OpenClawConfig;
|
||||
storePath: string;
|
||||
store: Record<string, SessionEntry>;
|
||||
modelCatalog?: ModelCatalogEntry[];
|
||||
opts: import("./protocol/index.js").SessionsListParams;
|
||||
}): SessionsListResult {
|
||||
const { cfg, storePath, store, opts } = params;
|
||||
@@ -1650,6 +1663,7 @@ export function listSessionsFromStore(params: {
|
||||
store,
|
||||
key,
|
||||
entry,
|
||||
modelCatalog: params.modelCatalog,
|
||||
now,
|
||||
includeDerivedTitles,
|
||||
includeLastMessage,
|
||||
@@ -1660,7 +1674,7 @@ export function listSessionsFromStore(params: {
|
||||
ts: now,
|
||||
path: storePath,
|
||||
count: sessions.length,
|
||||
defaults: getSessionDefaults(cfg),
|
||||
defaults: getSessionDefaults(cfg, params.modelCatalog),
|
||||
sessions,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import { describe, expect, test } from "vitest";
|
||||
import { afterEach, describe, expect, test } from "vitest";
|
||||
import type { OpenClawConfig } from "../config/config.js";
|
||||
import type { SessionEntry } from "../config/sessions.js";
|
||||
import { createEmptyPluginRegistry } from "../plugins/registry-empty.js";
|
||||
import { resetPluginRuntimeStateForTest, setActivePluginRegistry } from "../plugins/runtime.js";
|
||||
import { applySessionsPatchToStore } from "./sessions-patch.js";
|
||||
|
||||
const SUBAGENT_MODEL = "synthetic/hf:moonshotai/Kimi-K2.5";
|
||||
@@ -105,6 +107,10 @@ function createAllowlistedAnthropicModelCfg(): OpenClawConfig {
|
||||
}
|
||||
|
||||
describe("gateway sessions patch", () => {
|
||||
afterEach(() => {
|
||||
resetPluginRuntimeStateForTest();
|
||||
});
|
||||
|
||||
test("persists thinkingLevel=off (does not clear)", async () => {
|
||||
const entry = expectPatchOk(
|
||||
await runPatch({
|
||||
@@ -331,6 +337,53 @@ describe("gateway sessions patch", () => {
|
||||
expect(entry.spawnDepth).toBe(2);
|
||||
});
|
||||
|
||||
test("validates thinking patches with live catalog reasoning metadata", async () => {
|
||||
const registry = createEmptyPluginRegistry();
|
||||
registry.providers.push({
|
||||
pluginId: "ollama",
|
||||
source: "test",
|
||||
provider: {
|
||||
id: "ollama",
|
||||
label: "Ollama",
|
||||
auth: [],
|
||||
resolveThinkingProfile: ({ reasoning }) => ({
|
||||
levels:
|
||||
reasoning === true
|
||||
? [{ id: "off" }, { id: "low" }, { id: "medium" }, { id: "high" }, { id: "max" }]
|
||||
: [{ id: "off" }],
|
||||
defaultLevel: "off",
|
||||
}),
|
||||
},
|
||||
});
|
||||
setActivePluginRegistry(registry);
|
||||
|
||||
const entry = expectPatchOk(
|
||||
await runPatch({
|
||||
cfg: {
|
||||
agents: {
|
||||
defaults: {
|
||||
model: { primary: "ollama/qwen3:0.6b" },
|
||||
},
|
||||
},
|
||||
} as OpenClawConfig,
|
||||
patch: {
|
||||
key: MAIN_SESSION_KEY,
|
||||
thinkingLevel: "medium",
|
||||
},
|
||||
loadGatewayModelCatalog: async () => [
|
||||
{
|
||||
provider: "ollama",
|
||||
id: "qwen3:0.6b",
|
||||
name: "qwen3:0.6b",
|
||||
reasoning: true,
|
||||
},
|
||||
],
|
||||
}),
|
||||
);
|
||||
|
||||
expect(entry.thinkingLevel).toBe("medium");
|
||||
});
|
||||
|
||||
test("sets spawnedBy for ACP sessions", async () => {
|
||||
const entry = expectPatchOk(
|
||||
await runPatch({
|
||||
|
||||
@@ -101,6 +101,18 @@ export async function applySessionsPatchToStore(params: {
|
||||
const subagentModelHint = isSubagentSessionKey(storeKey)
|
||||
? resolveSubagentConfiguredModelSelection({ cfg, agentId: sessionAgentId })
|
||||
: undefined;
|
||||
let loadedModelCatalog: ModelCatalogEntry[] | undefined;
|
||||
const loadModelCatalogForPatch = async () => {
|
||||
if (loadedModelCatalog) {
|
||||
return loadedModelCatalog;
|
||||
}
|
||||
if (!params.loadGatewayModelCatalog) {
|
||||
return undefined;
|
||||
}
|
||||
const catalog = await params.loadGatewayModelCatalog();
|
||||
loadedModelCatalog = Array.isArray(catalog) ? catalog : [];
|
||||
return loadedModelCatalog;
|
||||
};
|
||||
|
||||
const existing = store[storeKey];
|
||||
const next: SessionEntry = existing
|
||||
@@ -248,8 +260,9 @@ export async function applySessionsPatchToStore(params: {
|
||||
const hintProvider =
|
||||
normalizeOptionalString(existing?.providerOverride) || resolvedDefault.provider;
|
||||
const hintModel = normalizeOptionalString(existing?.modelOverride) || resolvedDefault.model;
|
||||
const thinkingCatalog = await loadModelCatalogForPatch();
|
||||
return invalid(
|
||||
`invalid thinkingLevel (use ${formatThinkingLevels(hintProvider, hintModel, "|")})`,
|
||||
`invalid thinkingLevel (use ${formatThinkingLevels(hintProvider, hintModel, "|", thinkingCatalog)})`,
|
||||
);
|
||||
}
|
||||
next.thinkingLevel = normalized;
|
||||
@@ -408,7 +421,13 @@ export async function applySessionsPatchToStore(params: {
|
||||
error: errorShape(ErrorCodes.UNAVAILABLE, "model catalog unavailable"),
|
||||
};
|
||||
}
|
||||
const catalog = await params.loadGatewayModelCatalog();
|
||||
const catalog = await loadModelCatalogForPatch();
|
||||
if (!catalog) {
|
||||
return {
|
||||
ok: false,
|
||||
error: errorShape(ErrorCodes.UNAVAILABLE, "model catalog unavailable"),
|
||||
};
|
||||
}
|
||||
const resolved = resolveAllowedModelRef({
|
||||
cfg,
|
||||
catalog,
|
||||
@@ -438,6 +457,7 @@ export async function applySessionsPatchToStore(params: {
|
||||
const effectiveProvider = next.providerOverride ?? resolvedDefault.provider;
|
||||
const effectiveModel = next.modelOverride ?? resolvedDefault.model;
|
||||
const thinkingLevel = normalizeThinkLevel(next.thinkingLevel);
|
||||
const thinkingCatalog = await loadModelCatalogForPatch();
|
||||
if (!thinkingLevel) {
|
||||
delete next.thinkingLevel;
|
||||
} else if (
|
||||
@@ -445,17 +465,19 @@ export async function applySessionsPatchToStore(params: {
|
||||
provider: effectiveProvider,
|
||||
model: effectiveModel,
|
||||
level: thinkingLevel,
|
||||
catalog: thinkingCatalog,
|
||||
})
|
||||
) {
|
||||
if ("thinkingLevel" in patch) {
|
||||
return invalid(
|
||||
`thinkingLevel "${thinkingLevel}" is not supported for ${effectiveProvider}/${effectiveModel} (use ${formatThinkingLevels(effectiveProvider, effectiveModel, "|")})`,
|
||||
`thinkingLevel "${thinkingLevel}" is not supported for ${effectiveProvider}/${effectiveModel} (use ${formatThinkingLevels(effectiveProvider, effectiveModel, "|", thinkingCatalog)})`,
|
||||
);
|
||||
}
|
||||
next.thinkingLevel = resolveSupportedThinkingLevel({
|
||||
provider: effectiveProvider,
|
||||
model: effectiveModel,
|
||||
level: thinkingLevel,
|
||||
catalog: thinkingCatalog,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,6 +38,23 @@ describe("getSlashCommands", () => {
|
||||
expect(gatewayStatus?.description).toBe("Show gateway status summary");
|
||||
expect(crestodian?.description).toBe("Return to Crestodian");
|
||||
});
|
||||
|
||||
it("uses session-provided thinking levels for completions", () => {
|
||||
const commands = getSlashCommands({
|
||||
provider: "ollama",
|
||||
model: "qwen3:0.6b",
|
||||
thinkingLevels: [
|
||||
{ id: "off", label: "off" },
|
||||
{ id: "medium", label: "medium" },
|
||||
{ id: "max", label: "max" },
|
||||
],
|
||||
});
|
||||
const think = commands.find((command) => command.name === "think");
|
||||
expect(think?.getArgumentCompletions?.("m")).toEqual([
|
||||
{ value: "medium", label: "medium" },
|
||||
{ value: "max", label: "max" },
|
||||
]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("helpText", () => {
|
||||
|
||||
@@ -21,6 +21,7 @@ export type SlashCommandOptions = {
|
||||
cfg?: OpenClawConfig;
|
||||
provider?: string;
|
||||
model?: string;
|
||||
thinkingLevels?: Array<{ id: string; label: string }>;
|
||||
local?: boolean;
|
||||
};
|
||||
|
||||
@@ -55,7 +56,9 @@ export function parseCommand(input: string): ParsedCommand {
|
||||
}
|
||||
|
||||
export function getSlashCommands(options: SlashCommandOptions = {}): SlashCommand[] {
|
||||
const thinkLevels = listThinkingLevelLabels(options.provider, options.model);
|
||||
const thinkLevels =
|
||||
options.thinkingLevels?.map((level) => level.label) ??
|
||||
listThinkingLevelLabels(options.provider, options.model);
|
||||
const verboseCompletions = createLevelCompletion(VERBOSE_LEVELS);
|
||||
const traceCompletions = createLevelCompletion(TRACE_LEVELS);
|
||||
const fastCompletions = createLevelCompletion(FAST_LEVELS);
|
||||
|
||||
@@ -28,11 +28,13 @@ export type TuiSessionList = {
|
||||
model?: string | null;
|
||||
modelProvider?: string | null;
|
||||
contextTokens?: number | null;
|
||||
thinkingLevels?: Array<{ id: string; label: string }>;
|
||||
};
|
||||
sessions: Array<
|
||||
Pick<
|
||||
SessionInfo,
|
||||
| "thinkingLevel"
|
||||
| "thinkingLevels"
|
||||
| "fastMode"
|
||||
| "verboseLevel"
|
||||
| "reasoningLevel"
|
||||
|
||||
@@ -403,11 +403,9 @@ export function createCommandHandlers(context: CommandHandlerContext) {
|
||||
break;
|
||||
case "think":
|
||||
if (!args) {
|
||||
const levels = formatThinkingLevels(
|
||||
state.sessionInfo.modelProvider,
|
||||
state.sessionInfo.model,
|
||||
"|",
|
||||
);
|
||||
const levels =
|
||||
state.sessionInfo.thinkingLevels?.map((level) => level.label).join("|") ||
|
||||
formatThinkingLevels(state.sessionInfo.modelProvider, state.sessionInfo.model, "|");
|
||||
chatLog.addSystem(`usage: /think <${levels}>`);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -38,6 +38,7 @@ type SessionInfoDefaults = {
|
||||
model?: string | null;
|
||||
modelProvider?: string | null;
|
||||
contextTokens?: number | null;
|
||||
thinkingLevels?: Array<{ id: string; label: string }>;
|
||||
};
|
||||
|
||||
type SessionInfoEntry = SessionInfo & {
|
||||
@@ -168,6 +169,9 @@ export function createSessionActions(context: SessionActionContext) {
|
||||
if (entry?.thinkingLevel !== undefined) {
|
||||
next.thinkingLevel = entry.thinkingLevel;
|
||||
}
|
||||
if (entry?.thinkingLevels !== undefined || defaults?.thinkingLevels !== undefined) {
|
||||
next.thinkingLevels = entry?.thinkingLevels ?? defaults?.thinkingLevels;
|
||||
}
|
||||
if (entry?.fastMode !== undefined) {
|
||||
next.fastMode = entry.fastMode;
|
||||
}
|
||||
|
||||
@@ -47,6 +47,7 @@ export type ResponseUsageMode = "on" | "off" | "tokens" | "full";
|
||||
|
||||
export type SessionInfo = {
|
||||
thinkingLevel?: string;
|
||||
thinkingLevels?: Array<{ id: string; label: string }>;
|
||||
fastMode?: boolean;
|
||||
verboseLevel?: string;
|
||||
traceLevel?: string;
|
||||
|
||||
@@ -549,6 +549,7 @@ export async function runTui(opts: RunTuiOptions): Promise<TuiResult> {
|
||||
local: isLocalMode,
|
||||
provider: sessionInfo.modelProvider,
|
||||
model: sessionInfo.model,
|
||||
thinkingLevels: sessionInfo.thinkingLevels,
|
||||
}),
|
||||
process.cwd(),
|
||||
),
|
||||
|
||||
Reference in New Issue
Block a user