diff --git a/CHANGELOG.md b/CHANGELOG.md index 56c41c013fc..8c4fec04a2a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -144,6 +144,7 @@ Docs: https://docs.openclaw.ai ### Fixes - CLI/models: skip duplicate catalog supplement resolution during broad `models list --all` output so already-listed registry rows do not pay a second registry lookup pass. Thanks @shakkernerd. +- CLI/models: move OpenAI and OpenCode Go forward-compat list rows into refreshable manifest catalogs and stop broad `models list --all` from loading runtime catalog supplement hooks. Thanks @shakkernerd. - CLI/models: keep broad unfiltered `models list --all` on raw registry rows instead of loading every provider runtime normalization hook, while preserving full normalization for provider-filtered and configured model paths. Thanks @shakkernerd. ## 2026.4.26 diff --git a/extensions/openai/openclaw.plugin.json b/extensions/openai/openclaw.plugin.json index cc21a80adf9..b6112d2a39a 100644 --- a/extensions/openai/openclaw.plugin.json +++ b/extensions/openai/openclaw.plugin.json @@ -42,12 +42,74 @@ } }, "modelCatalog": { + "providers": { + "openai": { + "baseUrl": "https://api.openai.com/v1", + "api": "openai-responses", + "models": [ + { + "id": "gpt-5.5-pro", + "name": "gpt-5.5-pro", + "reasoning": true, + "input": ["text", "image"], + "contextWindow": 1000000, + "maxTokens": 128000, + "cost": { + "input": 30, + "output": 180, + "cacheRead": 0, + "cacheWrite": 0 + } + } + ] + }, + "openai-codex": { + "baseUrl": "https://chatgpt.com/backend-api/codex", + "api": "openai-codex-responses", + "models": [ + { + "id": "gpt-5.4-pro", + "name": "gpt-5.4-pro", + "reasoning": true, + "input": ["text", "image"], + "contextWindow": 1050000, + "contextTokens": 272000, + "maxTokens": 128000, + "cost": { + "input": 30, + "output": 180, + "cacheRead": 0, + "cacheWrite": 0 + } + }, + { + "id": "gpt-5.5-pro", + "name": "gpt-5.5-pro", + "reasoning": true, + "input": ["text", "image"], + "contextWindow": 1000000, + "contextTokens": 272000, + "maxTokens": 128000, + "cost": { + "input": 30, + "output": 180, + "cacheRead": 0, + "cacheWrite": 0 + } + } + ] + } + }, "aliases": { "azure-openai-responses": { "provider": "openai", "api": "azure-openai-responses" } }, + "discovery": { + "openai": "refreshable", + "openai-codex": "refreshable" + }, "suppressions": [ { "provider": "openai", diff --git a/extensions/opencode-go/openclaw.plugin.json b/extensions/opencode-go/openclaw.plugin.json index 76214c9c0b5..cb2176eac18 100644 --- a/extensions/opencode-go/openclaw.plugin.json +++ b/extensions/opencode-go/openclaw.plugin.json @@ -15,6 +15,57 @@ } } }, + "modelCatalog": { + "providers": { + "opencode-go": { + "baseUrl": "https://opencode.ai/zen/go/v1", + "api": "openai-completions", + "models": [ + { + "id": "deepseek-v4-pro", + "name": "DeepSeek V4 Pro", + "reasoning": true, + "input": ["text"], + "contextWindow": 1000000, + "maxTokens": 384000, + "cost": { + "input": 1.74, + "output": 3.48, + "cacheRead": 0.145, + "cacheWrite": 0 + }, + "compat": { + "supportsUsageInStreaming": true, + "supportsReasoningEffort": true, + "maxTokensField": "max_tokens" + } + }, + { + "id": "deepseek-v4-flash", + "name": "DeepSeek V4 Flash", + "reasoning": true, + "input": ["text"], + "contextWindow": 1000000, + "maxTokens": 384000, + "cost": { + "input": 0.14, + "output": 0.28, + "cacheRead": 0.028, + "cacheWrite": 0 + }, + "compat": { + "supportsUsageInStreaming": true, + "supportsReasoningEffort": true, + "maxTokensField": "max_tokens" + } + } + ] + } + }, + "discovery": { + "opencode-go": "refreshable" + } + }, "providerAuthEnvVars": { "opencode-go": ["OPENCODE_API_KEY", "OPENCODE_ZEN_API_KEY"] },