feat: declare refreshable model catalog supplements

This commit is contained in:
Shakker
2026-04-27 19:10:15 +01:00
parent 7231fcfec3
commit 53b53ba06b
3 changed files with 114 additions and 0 deletions

View File

@@ -144,6 +144,7 @@ Docs: https://docs.openclaw.ai
### Fixes
- CLI/models: skip duplicate catalog supplement resolution during broad `models list --all` output so already-listed registry rows do not pay a second registry lookup pass. Thanks @shakkernerd.
- CLI/models: move OpenAI and OpenCode Go forward-compat list rows into refreshable manifest catalogs and stop broad `models list --all` from loading runtime catalog supplement hooks. Thanks @shakkernerd.
- CLI/models: keep broad unfiltered `models list --all` on raw registry rows instead of loading every provider runtime normalization hook, while preserving full normalization for provider-filtered and configured model paths. Thanks @shakkernerd.
## 2026.4.26

View File

@@ -42,12 +42,74 @@
}
},
"modelCatalog": {
"providers": {
"openai": {
"baseUrl": "https://api.openai.com/v1",
"api": "openai-responses",
"models": [
{
"id": "gpt-5.5-pro",
"name": "gpt-5.5-pro",
"reasoning": true,
"input": ["text", "image"],
"contextWindow": 1000000,
"maxTokens": 128000,
"cost": {
"input": 30,
"output": 180,
"cacheRead": 0,
"cacheWrite": 0
}
}
]
},
"openai-codex": {
"baseUrl": "https://chatgpt.com/backend-api/codex",
"api": "openai-codex-responses",
"models": [
{
"id": "gpt-5.4-pro",
"name": "gpt-5.4-pro",
"reasoning": true,
"input": ["text", "image"],
"contextWindow": 1050000,
"contextTokens": 272000,
"maxTokens": 128000,
"cost": {
"input": 30,
"output": 180,
"cacheRead": 0,
"cacheWrite": 0
}
},
{
"id": "gpt-5.5-pro",
"name": "gpt-5.5-pro",
"reasoning": true,
"input": ["text", "image"],
"contextWindow": 1000000,
"contextTokens": 272000,
"maxTokens": 128000,
"cost": {
"input": 30,
"output": 180,
"cacheRead": 0,
"cacheWrite": 0
}
}
]
}
},
"aliases": {
"azure-openai-responses": {
"provider": "openai",
"api": "azure-openai-responses"
}
},
"discovery": {
"openai": "refreshable",
"openai-codex": "refreshable"
},
"suppressions": [
{
"provider": "openai",

View File

@@ -15,6 +15,57 @@
}
}
},
"modelCatalog": {
"providers": {
"opencode-go": {
"baseUrl": "https://opencode.ai/zen/go/v1",
"api": "openai-completions",
"models": [
{
"id": "deepseek-v4-pro",
"name": "DeepSeek V4 Pro",
"reasoning": true,
"input": ["text"],
"contextWindow": 1000000,
"maxTokens": 384000,
"cost": {
"input": 1.74,
"output": 3.48,
"cacheRead": 0.145,
"cacheWrite": 0
},
"compat": {
"supportsUsageInStreaming": true,
"supportsReasoningEffort": true,
"maxTokensField": "max_tokens"
}
},
{
"id": "deepseek-v4-flash",
"name": "DeepSeek V4 Flash",
"reasoning": true,
"input": ["text"],
"contextWindow": 1000000,
"maxTokens": 384000,
"cost": {
"input": 0.14,
"output": 0.28,
"cacheRead": 0.028,
"cacheWrite": 0
},
"compat": {
"supportsUsageInStreaming": true,
"supportsReasoningEffort": true,
"maxTokensField": "max_tokens"
}
}
]
}
},
"discovery": {
"opencode-go": "refreshable"
}
},
"providerAuthEnvVars": {
"opencode-go": ["OPENCODE_API_KEY", "OPENCODE_ZEN_API_KEY"]
},