OpenRouter: allow any model ID instead of restricting to static catalog (#14312)

* OpenRouter: allow any model ID instead of restricting to static catalog

OpenRouter models were restricted to a hardcoded prefix list in the internal model catalog, preventing use of newly added or less common models. This change makes OpenRouter work as the pass-through proxy it is -- any valid OpenRouter model ID now resolves dynamically.

Fixes https://github.com/openclaw/openclaw/issues/5241

Changes:
- Add OpenRouter as an implicit provider in resolveImplicitProviders so models.json is populated when an API key is detected (models-config.providers.ts)
- Add a pass-through fallback in resolveModel that creates OpenRouter models on-the-fly when they aren't pre-registered in the local catalog (
model.ts
)
- Remove the static prefix filter for OpenRouter/opencode in isModernModelRef (live-model-filter.ts)

* Apply requested change for maxTokens

* Agents: remove dead helper in live model filter

* Changelog: note Joly0/main OpenRouter fix

* Changelog: fix OpenRouter entry text

---------

Co-authored-by: Vincent Koc <vincentkoc@ieee.org>
This commit is contained in:
Joly0
2026-02-22 18:21:20 +01:00
committed by GitHub
parent c543994e90
commit ded9a59f78
4 changed files with 60 additions and 14 deletions

View File

@@ -27,9 +27,10 @@ Docs: https://docs.openclaw.ai
### Fixes
- Providers/OpenRouter: allow pass-through OpenRouter and Opencode model IDs in live model filtering so custom routed model IDs are treated as modern refs. (#14312) Thanks @Joly0.
- Providers/OpenRouter: default reasoning to enabled when the selected model advertises `reasoning: true` and no session/directive override is set. (#22513) Thanks @zwffff.
- Providers/OpenRouter: map `/think` levels to `reasoning.effort` in embedded runs while preserving explicit `reasoning.max_tokens` payloads. (#17236) Thanks @robbyczgw-cla.
- Gateway/OpenRouter: preserve stored session provider when model IDs are vendor-prefixed (for example, `anthropic/...`) so follow-up turns do not incorrectly route to direct provider APIs. (#22753) Thanks @dndodson.
- Providers/OpenRouter: preserve stored session provider when model IDs are vendor-prefixed (for example, `anthropic/...`) so follow-up turns do not incorrectly route to direct provider APIs. (#22753) Thanks @dndodson.
- Providers/OpenRouter: preserve the required `openrouter/` prefix for OpenRouter-native model IDs during model-ref normalization. (#12942) Thanks @omair445.
- Providers/OpenRouter: pass through provider routing parameters from model params.provider to OpenRouter request payloads for provider selection controls. (#17148) Thanks @carrotRakko.
- Telegram/Webhook: keep webhook monitors alive until gateway abort signals fire, preventing false channel exits and immediate webhook auto-restart loops.

View File

@@ -33,10 +33,6 @@ function matchesExactOrPrefix(id: string, values: string[]): boolean {
return values.some((value) => id === value || id.startsWith(value));
}
function matchesAny(id: string, values: string[]): boolean {
return values.some((value) => id.includes(value));
}
export function isModernModelRef(ref: ModelRef): boolean {
const provider = ref.provider?.trim().toLowerCase() ?? "";
const id = ref.id?.trim().toLowerCase() ?? "";
@@ -89,15 +85,9 @@ export function isModernModelRef(ref: ModelRef): boolean {
}
if (provider === "openrouter" || provider === "opencode") {
return matchesAny(id, [
...ANTHROPIC_PREFIXES,
...OPENAI_MODELS,
...CODEX_MODELS,
...GOOGLE_PREFIXES,
...ZAI_PREFIXES,
...MINIMAX_PREFIXES,
...XAI_PREFIXES,
]);
// OpenRouter/opencode are pass-through proxies; accept any model ID
// rather than restricting to a static prefix list.
return true;
}
return false;

View File

@@ -144,6 +144,17 @@ const OLLAMA_DEFAULT_COST = {
cacheWrite: 0,
};
const OPENROUTER_BASE_URL = "https://openrouter.ai/api/v1";
const OPENROUTER_DEFAULT_MODEL_ID = "auto";
const OPENROUTER_DEFAULT_CONTEXT_WINDOW = 200000;
const OPENROUTER_DEFAULT_MAX_TOKENS = 8192;
const OPENROUTER_DEFAULT_COST = {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
};
const VLLM_BASE_URL = "http://127.0.0.1:8000/v1";
const VLLM_DEFAULT_CONTEXT_WINDOW = 128000;
const VLLM_DEFAULT_MAX_TOKENS = 8192;
@@ -659,6 +670,24 @@ function buildTogetherProvider(): ProviderConfig {
};
}
function buildOpenrouterProvider(): ProviderConfig {
return {
baseUrl: OPENROUTER_BASE_URL,
api: "openai-completions",
models: [
{
id: OPENROUTER_DEFAULT_MODEL_ID,
name: "OpenRouter Auto",
reasoning: false,
input: ["text", "image"],
cost: OPENROUTER_DEFAULT_COST,
contextWindow: OPENROUTER_DEFAULT_CONTEXT_WINDOW,
maxTokens: OPENROUTER_DEFAULT_MAX_TOKENS,
},
],
};
}
async function buildVllmProvider(params?: {
baseUrl?: string;
apiKey?: string;
@@ -671,6 +700,7 @@ async function buildVllmProvider(params?: {
models,
};
}
export function buildQianfanProvider(): ProviderConfig {
return {
baseUrl: QIANFAN_BASE_URL,
@@ -907,6 +937,13 @@ export async function resolveImplicitProviders(params: {
providers.qianfan = { ...buildQianfanProvider(), apiKey: qianfanKey };
}
const openrouterKey =
resolveEnvApiKeyVarName("openrouter") ??
resolveApiKeyFromProfiles({ provider: "openrouter", store: authStore });
if (openrouterKey) {
providers.openrouter = { ...buildOpenrouterProvider(), apiKey: openrouterKey };
}
const nvidiaKey =
resolveEnvApiKeyVarName("nvidia") ??
resolveApiKeyFromProfiles({ provider: "nvidia", store: authStore });

View File

@@ -80,6 +80,24 @@ export function resolveModel(
if (forwardCompat) {
return { model: forwardCompat, authStorage, modelRegistry };
}
// OpenRouter is a pass-through proxy — any model ID available on OpenRouter
// should work without being pre-registered in the local catalog.
if (normalizedProvider === "openrouter") {
const fallbackModel: Model<Api> = normalizeModelCompat({
id: modelId,
name: modelId,
api: "openai-completions",
provider,
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: DEFAULT_CONTEXT_TOKENS,
// Align with OPENROUTER_DEFAULT_MAX_TOKENS in models-config.providers.ts
maxTokens: 8192,
} as Model<Api>);
return { model: fallbackModel, authStorage, modelRegistry };
}
const providerCfg = providers[provider];
if (providerCfg || modelId.startsWith("mock-")) {
const fallbackModel: Model<Api> = normalizeModelCompat({