diff --git a/CHANGELOG.md b/CHANGELOG.md
index dadbc04ba72..0313dea5da4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -25,6 +25,7 @@ Docs: https://docs.openclaw.ai
- Logging/sessions: apply configured redaction patterns to persisted session transcript text and accept escaped character classes in safe custom redaction regexes, so transcript JSONL no longer keeps matching sensitive text in the clear. Fixes #42982. Thanks @panpan0000.
- Providers/Ollama: honor `/api/show` capabilities when registering local models so non-tool Ollama models no longer receive the agent tool surface, and keep native Ollama thinking opt-in instead of enabling it by default. Fixes #64710 and duplicate #65343. Thanks @yuan-b, @netherby, @xilopaint, and @Diyforfun2026.
- Providers/Ollama: read larger custom Modelfile `PARAMETER num_ctx` values from `/api/show` so auto-discovered Ollama models with expanded context no longer stay pinned to the base model context. Fixes #68344. Thanks @neeravmakwana.
+- Providers/Ollama: honor configured model `params.num_ctx` in native and OpenAI-compatible Ollama requests so local models can cap runtime context without rebuilding Modelfiles. Fixes #44550 and #52206; supersedes #69464. Thanks @taitruong, @armi0024, and @LokiCode404.
- Providers/Ollama: expose native Ollama thinking effort levels so `/think max` is accepted for reasoning-capable Ollama models and maps to Ollama's highest supported `think` effort. Fixes #71584. Thanks @g0st1n.
- Providers/Ollama: strip the active custom Ollama provider prefix before native chat and embedding requests, so custom provider ids like `ollama-spark/qwen3:32b` reach Ollama as the real model name. Fixes #72353. Thanks @maximus-dss and @hclsys.
- Providers/Ollama: parse stringified native tool-call arguments before dispatch, preserving unsafe integer values so Ollama tool use receives structured parameters. Fixes #69735; supersedes #69910. Thanks @rongshuzhao and @yfge.
diff --git a/docs/.generated/config-baseline.sha256 b/docs/.generated/config-baseline.sha256
index 9c40ce9f7a8..3a86622216c 100644
--- a/docs/.generated/config-baseline.sha256
+++ b/docs/.generated/config-baseline.sha256
@@ -1,4 +1,4 @@
-79fa6b9b9df5e22ac56a7edb9bfc25550131e285ce9f4868f468d957a8768240 config-baseline.json
-2722504ab6bd37eea9e7542689bd6dba5fb4e485c0eab9c1915427c49a5c5b66 config-baseline.core.json
+502a73267bd7195caf3fc4fb513e51a01bfd1c9567f8c22037ee10a11169a0bf config-baseline.json
+2edac1da06bbb3709375bf82ae68890c67634f5ad3200a98a1d008b22c335e79 config-baseline.core.json
7cd9c908f066c143eab2a201efbc9640f483ab28bba92ddeca1d18cc2b528bc3 config-baseline.channel.json
74b74cb18ac37c0acaa765f398f1f9edbcee4c43567f02d45c89598a1e13afb4 config-baseline.plugin.json
diff --git a/docs/providers/ollama.md b/docs/providers/ollama.md
index 0233c70f6fb..692b25e20fd 100644
--- a/docs/providers/ollama.md
+++ b/docs/providers/ollama.md
@@ -401,7 +401,7 @@ For the full setup and behavior details, see [Ollama Web Search](/tools/ollama-s
For auto-discovered models, OpenClaw uses the context window reported by Ollama when available, including larger `PARAMETER num_ctx` values from custom Modelfiles. Otherwise it falls back to the default Ollama context window used by OpenClaw.
- You can override `contextWindow` and `maxTokens` in explicit provider config:
+ You can override `contextWindow` and `maxTokens` in explicit provider config. To cap Ollama's per-request runtime context without rebuilding a Modelfile, set `params.num_ctx`; OpenClaw sends it as `options.num_ctx` for both native Ollama and the OpenAI-compatible Ollama adapter. Invalid, zero, negative, and non-finite values are ignored and fall back to `contextWindow`.
```json5
{
@@ -413,6 +413,9 @@ For the full setup and behavior details, see [Ollama Web Search](/tools/ollama-s
id: "llama3.3",
contextWindow: 131072,
maxTokens: 65536,
+ params: {
+ num_ctx: 32768,
+ },
}
]
}
@@ -421,6 +424,8 @@ For the full setup and behavior details, see [Ollama Web Search](/tools/ollama-s
}
```
+ Per-model `agents.defaults.models["ollama/"].params.num_ctx` works too. If both are configured, the explicit provider model entry wins over the agent default.
+
diff --git a/extensions/ollama/ollama.live.test.ts b/extensions/ollama/ollama.live.test.ts
index c4d4666dd1c..88304ddadcf 100644
--- a/extensions/ollama/ollama.live.test.ts
+++ b/extensions/ollama/ollama.live.test.ts
@@ -26,6 +26,7 @@ describe.skipIf(!LIVE)("ollama live", () => {
let payload:
| {
model?: string;
+ options?: { num_ctx?: number };
tools?: Array<{
function?: {
parameters?: {
@@ -42,6 +43,7 @@ describe.skipIf(!LIVE)("ollama live", () => {
api: "ollama",
provider: PROVIDER_ID,
contextWindow: 8192,
+ params: { num_ctx: 4096 },
} as never,
{
messages: [{ role: "user", content: "Reply exactly OK." }],
@@ -79,6 +81,7 @@ describe.skipIf(!LIVE)("ollama live", () => {
expect(error).toBeUndefined();
expect(events.some((event) => (event as { type?: string }).type === "done")).toBe(true);
expect(payload?.model).toBe(CHAT_MODEL);
+ expect(payload?.options?.num_ctx).toBe(4096);
const properties = payload?.tools?.[0]?.function?.parameters?.properties;
expect(properties?.city?.type).toBe("string");
expect(properties?.units?.type).toBe("string");
diff --git a/extensions/ollama/src/stream-runtime.test.ts b/extensions/ollama/src/stream-runtime.test.ts
index dcd664b1f2d..4d7e8958f4c 100644
--- a/extensions/ollama/src/stream-runtime.test.ts
+++ b/extensions/ollama/src/stream-runtime.test.ts
@@ -94,6 +94,7 @@ describe("createConfiguredOllamaCompatStreamWrapper", () => {
provider: "ollama",
id: "kimi-k2.5:cloud",
contextWindow: 262144,
+ params: { num_ctx: 65536 },
};
const wrapped = createConfiguredOllamaCompatStreamWrapper({
@@ -117,7 +118,43 @@ describe("createConfiguredOllamaCompatStreamWrapper", () => {
expect(patchedPayload).toMatchObject({
thinking: { type: "enabled" },
- options: { num_ctx: 262144 },
+ options: { num_ctx: 65536 },
+ });
+ });
+
+ it("falls back to contextWindow when configured num_ctx is invalid", async () => {
+ let patchedPayload: Record | undefined;
+ const baseStreamFn = vi.fn((_model, _context, options) => {
+ options?.onPayload?.({});
+ return (async function* () {})();
+ });
+ const model = {
+ api: "openai-completions",
+ provider: "ollama",
+ id: "qwen3:32b",
+ contextWindow: 131072,
+ params: { num_ctx: 0 },
+ };
+
+ const wrapped = createConfiguredOllamaCompatStreamWrapper({
+ provider: "ollama",
+ modelId: "qwen3:32b",
+ model,
+ streamFn: baseStreamFn,
+ } as never);
+
+ await wrapped?.(
+ model as never,
+ { messages: [] } as never,
+ {
+ onPayload: (payload: unknown) => {
+ patchedPayload = payload as Record;
+ },
+ } as never,
+ );
+
+ expect(patchedPayload).toMatchObject({
+ options: { num_ctx: 131072 },
});
});
@@ -878,6 +915,7 @@ function getGuardedFetchCall(fetchMock: typeof fetchWithSsrFGuardMock): GuardedF
async function createOllamaTestStream(params: {
baseUrl: string;
defaultHeaders?: Record;
+ model?: Record;
options?: {
apiKey?: string;
maxTokens?: number;
@@ -892,6 +930,7 @@ async function createOllamaTestStream(params: {
api: "ollama",
provider: "custom-ollama",
contextWindow: 131072,
+ ...params.model,
} as unknown as Parameters[0],
{
messages: [{ role: "user", content: "hello" }],
@@ -1157,6 +1196,33 @@ describe("createOllamaStreamFn", () => {
);
});
+ it("uses configured params.num_ctx for native Ollama chat options", async () => {
+ await withMockNdjsonFetch(
+ [
+ '{"model":"m","created_at":"t","message":{"role":"assistant","content":"ok"},"done":false}',
+ '{"model":"m","created_at":"t","message":{"role":"assistant","content":""},"done":true,"prompt_eval_count":1,"eval_count":1}',
+ ],
+ async (fetchMock) => {
+ const stream = await createOllamaTestStream({
+ baseUrl: "http://ollama-host:11434",
+ model: { params: { num_ctx: 32768 }, contextWindow: 131072 },
+ });
+
+ const events = await collectStreamEvents(stream);
+ expect(events.at(-1)?.type).toBe("done");
+
+ const requestInit = getGuardedFetchCall(fetchMock).init ?? {};
+ if (typeof requestInit.body !== "string") {
+ throw new Error("Expected string request body");
+ }
+ const requestBody = JSON.parse(requestInit.body) as {
+ options: { num_ctx?: number };
+ };
+ expect(requestBody.options.num_ctx).toBe(32768);
+ },
+ );
+ });
+
it("uses the default loopback policy when baseUrl is empty", async () => {
await withMockNdjsonFetch(
[
diff --git a/extensions/ollama/src/stream.ts b/extensions/ollama/src/stream.ts
index 4dfd694fcdd..6845b47a713 100644
--- a/extensions/ollama/src/stream.ts
+++ b/extensions/ollama/src/stream.ts
@@ -181,8 +181,19 @@ function resolveOllamaThinkValue(thinkingLevel: unknown): OllamaThinkValue | und
return undefined;
}
-function resolveOllamaCompatNumCtx(model: ProviderRuntimeModel): number {
- return Math.max(1, Math.floor(model.contextWindow ?? model.maxTokens ?? DEFAULT_CONTEXT_TOKENS));
+function resolveOllamaConfiguredNumCtx(model: ProviderRuntimeModel): number | undefined {
+ const raw = model.params?.num_ctx;
+ if (typeof raw !== "number" || !Number.isFinite(raw) || raw <= 0) {
+ return undefined;
+ }
+ return Math.floor(raw);
+}
+
+function resolveOllamaNumCtx(model: ProviderRuntimeModel): number {
+ return (
+ resolveOllamaConfiguredNumCtx(model) ??
+ Math.max(1, Math.floor(model.contextWindow ?? model.maxTokens ?? DEFAULT_CONTEXT_TOKENS))
+ );
}
function isOllamaCloudKimiModelRef(modelId: string): boolean {
@@ -215,7 +226,7 @@ export function createConfiguredOllamaCompatStreamWrapper(
}
if (injectNumCtx && model) {
- streamFn = wrapOllamaCompatNumCtx(streamFn, resolveOllamaCompatNumCtx(model));
+ streamFn = wrapOllamaCompatNumCtx(streamFn, resolveOllamaNumCtx(model));
}
const ollamaThinkValue = isNativeOllamaTransport
@@ -743,7 +754,7 @@ export function createOllamaStreamFn(
);
const ollamaTools = extractOllamaTools(context.tools);
- const ollamaOptions: Record = { num_ctx: model.contextWindow ?? 65536 };
+ const ollamaOptions: Record = { num_ctx: resolveOllamaNumCtx(model) };
if (typeof options?.temperature === "number") {
ollamaOptions.temperature = options.temperature;
}
diff --git a/src/agents/pi-embedded-runner/model.test.ts b/src/agents/pi-embedded-runner/model.test.ts
index b8fef65e0ff..a3b2839a44b 100644
--- a/src/agents/pi-embedded-runner/model.test.ts
+++ b/src/agents/pi-embedded-runner/model.test.ts
@@ -369,6 +369,80 @@ describe("resolveModel", () => {
expect(result.model?.maxTokens).toBe(32768);
});
+ it("merges configured model params with agent defaults for resolved models", () => {
+ mockDiscoveredModel(discoverModels, {
+ provider: "ollama",
+ modelId: "qwen3:32b",
+ templateModel: {
+ ...makeModel("qwen3:32b"),
+ provider: "ollama",
+ params: { num_ctx: 4096, keep_alive: "1m" },
+ },
+ });
+ const cfg = {
+ agents: {
+ defaults: {
+ models: {
+ "OLLAMA/qwen3:32B": {
+ params: { num_ctx: 8192, thinking: "low" },
+ },
+ },
+ },
+ },
+ models: {
+ providers: {
+ ollama: {
+ baseUrl: "http://localhost:11434",
+ models: [
+ {
+ ...makeModel("qwen3:32b"),
+ params: { num_ctx: 16384 },
+ },
+ ],
+ },
+ },
+ },
+ } as unknown as OpenClawConfig;
+
+ const result = resolveModelForTest("ollama", "qwen3:32b", "/tmp/agent", cfg);
+
+ expect(result.error).toBeUndefined();
+ expect((result.model as { params?: Record } | undefined)?.params).toEqual({
+ num_ctx: 16384,
+ keep_alive: "1m",
+ thinking: "low",
+ });
+ });
+
+ it("applies agent default model params without explicit provider config", () => {
+ mockDiscoveredModel(discoverModels, {
+ provider: "ollama",
+ modelId: "llama3.2",
+ templateModel: {
+ ...makeModel("llama3.2"),
+ provider: "ollama",
+ },
+ });
+ const cfg = {
+ agents: {
+ defaults: {
+ models: {
+ "ollama/llama3.2": {
+ params: { num_ctx: 32768 },
+ },
+ },
+ },
+ },
+ } as unknown as OpenClawConfig;
+
+ const result = resolveModelForTest("ollama", "llama3.2", "/tmp/agent", cfg);
+
+ expect(result.error).toBeUndefined();
+ expect((result.model as { params?: Record } | undefined)?.params).toEqual({
+ num_ctx: 32768,
+ });
+ });
+
it("propagates reasoning from matching configured fallback model", () => {
const cfg = {
models: {
diff --git a/src/agents/pi-embedded-runner/model.ts b/src/agents/pi-embedded-runner/model.ts
index c8791fd9b91..2586218bce4 100644
--- a/src/agents/pi-embedded-runner/model.ts
+++ b/src/agents/pi-embedded-runner/model.ts
@@ -21,7 +21,7 @@ import {
import { resolveOpenClawAgentDir } from "../agent-paths.js";
import { DEFAULT_CONTEXT_TOKENS } from "../defaults.js";
import { buildModelAliasLines } from "../model-alias-lines.js";
-import { normalizeStaticProviderModelId } from "../model-ref-shared.js";
+import { modelKey, normalizeStaticProviderModelId } from "../model-ref-shared.js";
import { findNormalizedProviderValue, normalizeProviderId } from "../model-selection.js";
import {
buildSuppressedBuiltInModelError,
@@ -346,6 +346,80 @@ function findConfiguredProviderModel(
);
}
+function readModelParams(value: unknown): Record | undefined {
+ if (!value || typeof value !== "object" || Array.isArray(value)) {
+ return undefined;
+ }
+ return value as Record;
+}
+
+function mergeModelParams(
+ ...entries: Array | undefined>
+): Record | undefined {
+ const merged = Object.assign({}, ...entries.filter(Boolean));
+ return Object.keys(merged).length > 0 ? merged : undefined;
+}
+
+function findConfiguredAgentModelParams(params: {
+ cfg?: OpenClawConfig;
+ provider: string;
+ modelId: string;
+}): Record | undefined {
+ const configuredModels = params.cfg?.agents?.defaults?.models;
+ if (!configuredModels) {
+ return undefined;
+ }
+ const directKeys = [
+ modelKey(params.provider, params.modelId),
+ `${params.provider}/${params.modelId}`,
+ ];
+ for (const key of directKeys) {
+ const direct = readModelParams(configuredModels[key]?.params);
+ if (direct) {
+ return direct;
+ }
+ }
+
+ const normalizedProvider = normalizeProviderId(params.provider);
+ const normalizedModelId = normalizeStaticProviderModelId(normalizedProvider, params.modelId)
+ .trim()
+ .toLowerCase();
+ for (const [rawKey, entry] of Object.entries(configuredModels)) {
+ const slashIndex = rawKey.indexOf("/");
+ if (slashIndex <= 0) {
+ continue;
+ }
+ const candidateProvider = rawKey.slice(0, slashIndex);
+ const candidateModelId = rawKey.slice(slashIndex + 1);
+ if (
+ normalizeProviderId(candidateProvider) === normalizedProvider &&
+ normalizeStaticProviderModelId(normalizedProvider, candidateModelId).trim().toLowerCase() ===
+ normalizedModelId
+ ) {
+ return readModelParams(entry.params);
+ }
+ }
+ return undefined;
+}
+
+function mergeConfiguredRuntimeModelParams(params: {
+ cfg?: OpenClawConfig;
+ provider: string;
+ modelId: string;
+ discoveredParams?: unknown;
+ configuredParams?: unknown;
+}): Record | undefined {
+ return mergeModelParams(
+ readModelParams(params.discoveredParams),
+ findConfiguredAgentModelParams({
+ cfg: params.cfg,
+ provider: params.provider,
+ modelId: params.modelId,
+ }),
+ readModelParams(params.configuredParams),
+ );
+}
+
function applyConfiguredProviderOverrides(params: {
provider: string;
discoveredModel: ProviderRuntimeModel;
@@ -356,9 +430,19 @@ function applyConfiguredProviderOverrides(params: {
preferDiscoveredModelMetadata?: boolean;
}): ProviderRuntimeModel {
const { discoveredModel, providerConfig, modelId } = params;
+ const defaultModelParams = findConfiguredAgentModelParams({
+ cfg: params.cfg,
+ provider: params.provider,
+ modelId,
+ });
if (!providerConfig) {
+ const resolvedParams = mergeModelParams(
+ readModelParams(discoveredModel.params),
+ defaultModelParams,
+ );
return {
...discoveredModel,
+ ...(resolvedParams ? { params: resolvedParams } : {}),
// Discovered models originate from models.json and may contain persistence markers.
headers: sanitizeModelHeaders(discoveredModel.headers, { stripSecretRefMarkers: true }),
};
@@ -390,11 +474,21 @@ function applyConfiguredProviderOverrides(params: {
!providerHeaders &&
!providerRequest
) {
+ const resolvedParams = mergeModelParams(
+ readModelParams(discoveredModel.params),
+ defaultModelParams,
+ );
return {
...discoveredModel,
+ ...(resolvedParams ? { params: resolvedParams } : {}),
headers: discoveredHeaders,
};
}
+ const resolvedParams = mergeModelParams(
+ readModelParams(discoveredModel.params),
+ defaultModelParams,
+ readModelParams(configuredModel?.params),
+ );
const normalizedInput = resolveProviderModelInput({
provider: params.provider,
modelId,
@@ -436,6 +530,7 @@ function applyConfiguredProviderOverrides(params: {
contextWindow: metadataOverrideModel?.contextWindow ?? discoveredModel.contextWindow,
contextTokens: metadataOverrideModel?.contextTokens ?? discoveredModel.contextTokens,
maxTokens: metadataOverrideModel?.maxTokens ?? discoveredModel.maxTokens,
+ ...(resolvedParams ? { params: resolvedParams } : {}),
headers: requestConfig.headers,
compat: metadataOverrideModel?.compat ?? discoveredModel.compat,
},
@@ -468,13 +563,22 @@ function resolveExplicitModelWithRegistry(params: {
modelId,
});
if (inlineMatch?.api) {
+ const resolvedParams = mergeConfiguredRuntimeModelParams({
+ cfg,
+ provider,
+ modelId,
+ configuredParams: inlineMatch.params,
+ });
return {
kind: "resolved",
model: normalizeResolvedModel({
provider,
cfg,
agentDir,
- model: inlineMatch as Model,
+ model: {
+ ...inlineMatch,
+ ...(resolvedParams ? { params: resolvedParams } : {}),
+ } as Model,
runtimeHooks,
}),
};
@@ -508,13 +612,22 @@ function resolveExplicitModelWithRegistry(params: {
modelId,
});
if (fallbackInlineMatch?.api) {
+ const resolvedParams = mergeConfiguredRuntimeModelParams({
+ cfg,
+ provider,
+ modelId,
+ configuredParams: fallbackInlineMatch.params,
+ });
return {
kind: "resolved",
model: normalizeResolvedModel({
provider,
cfg,
agentDir,
- model: fallbackInlineMatch as Model,
+ model: {
+ ...fallbackInlineMatch,
+ ...(resolvedParams ? { params: resolvedParams } : {}),
+ } as Model,
runtimeHooks,
}),
};
@@ -594,6 +707,12 @@ function resolveConfiguredFallbackModel(params: {
const modelHeaders = sanitizeModelHeaders(configuredModel?.headers, {
stripSecretRefMarkers: true,
});
+ const resolvedParams = mergeConfiguredRuntimeModelParams({
+ cfg,
+ provider,
+ modelId,
+ configuredParams: configuredModel?.params,
+ });
if (!providerConfig && !modelId.startsWith("mock-")) {
return undefined;
}
@@ -643,6 +762,7 @@ function resolveConfiguredFallbackModel(params: {
configuredModel?.maxTokens ??
providerConfig?.models?.[0]?.maxTokens ??
DEFAULT_CONTEXT_TOKENS,
+ ...(resolvedParams ? { params: resolvedParams } : {}),
headers: requestConfig.headers,
} as Model,
providerRequest,
diff --git a/src/config/schema.base.generated.ts b/src/config/schema.base.generated.ts
index f380f1f6921..1210a81a461 100644
--- a/src/config/schema.base.generated.ts
+++ b/src/config/schema.base.generated.ts
@@ -2995,6 +2995,13 @@ export const GENERATED_BASE_CONFIG_SCHEMA: BaseConfigSchemaResponse = {
type: "number",
exclusiveMinimum: 0,
},
+ params: {
+ type: "object",
+ propertyNames: {
+ type: "string",
+ },
+ additionalProperties: {},
+ },
headers: {
type: "object",
propertyNames: {
@@ -3122,7 +3129,7 @@ export const GENERATED_BASE_CONFIG_SCHEMA: BaseConfigSchemaResponse = {
},
title: "Model Provider Model List",
description:
- "Declared model list for a provider including identifiers, metadata, and optional compatibility/cost hints. Keep IDs exact to provider catalog values so selection and fallback resolve correctly.",
+ "Declared model list for a provider including identifiers, metadata, provider-specific params, and optional compatibility/cost hints. Keep IDs exact to provider catalog values so selection and fallback resolve correctly.",
},
},
required: ["baseUrl", "models"],
@@ -26612,7 +26619,7 @@ export const GENERATED_BASE_CONFIG_SCHEMA: BaseConfigSchemaResponse = {
},
"models.providers.*.models": {
label: "Model Provider Model List",
- help: "Declared model list for a provider including identifiers, metadata, and optional compatibility/cost hints. Keep IDs exact to provider catalog values so selection and fallback resolve correctly.",
+ help: "Declared model list for a provider including identifiers, metadata, provider-specific params, and optional compatibility/cost hints. Keep IDs exact to provider catalog values so selection and fallback resolve correctly.",
tags: ["models"],
},
"auth.cooldowns.billingBackoffHours": {
diff --git a/src/config/schema.help.ts b/src/config/schema.help.ts
index 557b638c5cb..83d63ae9a0a 100644
--- a/src/config/schema.help.ts
+++ b/src/config/schema.help.ts
@@ -885,7 +885,7 @@ export const FIELD_HELP: Record = {
"models.providers.*.request.allowPrivateNetwork":
"When true, allow HTTPS to the model base URL when DNS resolves to private, CGNAT, or similar ranges, via the provider HTTP fetch guard (fetchWithSsrFGuard). OpenAI Responses WebSocket reuses request for headers/TLS but does not use that fetch SSRF path. Use only for operator-controlled self-hosted OpenAI-compatible endpoints (LAN, overlay, split DNS). Default is false.",
"models.providers.*.models":
- "Declared model list for a provider including identifiers, metadata, and optional compatibility/cost hints. Keep IDs exact to provider catalog values so selection and fallback resolve correctly.",
+ "Declared model list for a provider including identifiers, metadata, provider-specific params, and optional compatibility/cost hints. Keep IDs exact to provider catalog values so selection and fallback resolve correctly.",
auth: "Authentication profile root used for multi-profile provider credentials and cooldown-based failover ordering. Keep profiles minimal and explicit so automatic failover behavior stays auditable.",
"channels.matrix.allowBots":
'Allow messages from other configured Matrix bot accounts to trigger replies (default: false). Set "mentions" to only accept bot messages that visibly mention this bot.',
diff --git a/src/config/types.models.ts b/src/config/types.models.ts
index 985d3f476b3..07ee3da6662 100644
--- a/src/config/types.models.ts
+++ b/src/config/types.models.ts
@@ -107,6 +107,8 @@ export type ModelDefinitionConfig = {
*/
contextTokens?: number;
maxTokens: number;
+ /** Provider-specific request/runtime parameters passed through to provider plugins. */
+ params?: Record;
headers?: Record;
compat?: ModelCompatConfig;
metadataSource?: "models-add";
diff --git a/src/config/zod-schema.core.ts b/src/config/zod-schema.core.ts
index 1b387910c88..d8b49977df9 100644
--- a/src/config/zod-schema.core.ts
+++ b/src/config/zod-schema.core.ts
@@ -342,6 +342,7 @@ export const ModelDefinitionSchema = z
contextWindow: z.number().positive().optional(),
contextTokens: z.number().int().positive().optional(),
maxTokens: z.number().positive().optional(),
+ params: z.record(z.string(), z.unknown()).optional(),
headers: z.record(z.string(), z.string()).optional(),
compat: ModelCompatSchema,
metadataSource: z.literal("models-add").optional(),
diff --git a/src/plugins/provider-runtime-model.types.ts b/src/plugins/provider-runtime-model.types.ts
index 5eba458acc2..7c07fbc4d04 100644
--- a/src/plugins/provider-runtime-model.types.ts
+++ b/src/plugins/provider-runtime-model.types.ts
@@ -6,4 +6,5 @@ import type { Api, Model } from "@mariozechner/pi-ai";
*/
export type ProviderRuntimeModel = Model & {
contextTokens?: number;
+ params?: Record;
};