Files
openclaw/src/config/types.models.ts
Vincent Koc f16ecd1dac fix(ollama): unify context window handling across discovery, merge, and OpenAI-compat transport (#29205)
* fix(ollama): inject num_ctx for OpenAI-compatible transport

* fix(ollama): discover per-model context and preserve higher limits

* fix(agents): prefer matching provider model for fallback limits

* fix(types): require numeric token limits in provider model merge

* fix(types): accept unknown payload in ollama num_ctx wrapper

* fix(types): simplify ollama settled-result extraction

* config(models): add provider flag for Ollama OpenAI num_ctx injection

* config(schema): allow provider num_ctx injection flag

* config(labels): label provider num_ctx injection flag

* config(help): document provider num_ctx injection flag

* agents(ollama): gate OpenAI num_ctx injection with provider config

* tests(ollama): cover provider num_ctx injection flag behavior

* docs(config): list provider num_ctx injection option

* docs(ollama): document OpenAI num_ctx injection toggle

* docs(config): clarify merge token-limit precedence

* config(help): note merge uses higher model token limits

* fix(ollama): cap /api/show discovery concurrency

* fix(ollama): restrict num_ctx injection to OpenAI compat

* tests(ollama): cover ipv6 and compat num_ctx gating

* fix(ollama): detect remote compat endpoints for ollama-labeled providers

* fix(ollama): cap per-model /api/show lookups to bound discovery load
2026-02-27 17:20:47 -08:00

75 lines
1.8 KiB
TypeScript

import type { SecretInput } from "./types.secrets.js";
export const MODEL_APIS = [
"openai-completions",
"openai-responses",
"openai-codex-responses",
"anthropic-messages",
"google-generative-ai",
"github-copilot",
"bedrock-converse-stream",
"ollama",
] as const;
export type ModelApi = (typeof MODEL_APIS)[number];
export type ModelCompatConfig = {
supportsStore?: boolean;
supportsDeveloperRole?: boolean;
supportsReasoningEffort?: boolean;
supportsUsageInStreaming?: boolean;
supportsStrictMode?: boolean;
maxTokensField?: "max_completion_tokens" | "max_tokens";
thinkingFormat?: "openai" | "zai" | "qwen";
requiresToolResultName?: boolean;
requiresAssistantAfterToolResult?: boolean;
requiresThinkingAsText?: boolean;
requiresMistralToolIds?: boolean;
};
export type ModelProviderAuthMode = "api-key" | "aws-sdk" | "oauth" | "token";
export type ModelDefinitionConfig = {
id: string;
name: string;
api?: ModelApi;
reasoning: boolean;
input: Array<"text" | "image">;
cost: {
input: number;
output: number;
cacheRead: number;
cacheWrite: number;
};
contextWindow: number;
maxTokens: number;
headers?: Record<string, string>;
compat?: ModelCompatConfig;
};
export type ModelProviderConfig = {
baseUrl: string;
apiKey?: SecretInput;
auth?: ModelProviderAuthMode;
api?: ModelApi;
injectNumCtxForOpenAICompat?: boolean;
headers?: Record<string, string>;
authHeader?: boolean;
models: ModelDefinitionConfig[];
};
export type BedrockDiscoveryConfig = {
enabled?: boolean;
region?: string;
providerFilter?: string[];
refreshInterval?: number;
defaultContextWindow?: number;
defaultMaxTokens?: number;
};
export type ModelsConfig = {
mode?: "merge" | "replace";
providers?: Record<string, ModelProviderConfig>;
bedrockDiscovery?: BedrockDiscoveryConfig;
};