diff --git a/CHANGELOG.md b/CHANGELOG.md
index cda0d18499e..40d25a6836f 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -6,6 +6,7 @@ Docs: https://docs.openclaw.ai
### Changes
+- Providers/NVIDIA: add the NVIDIA provider with API-key onboarding, setup docs, static catalog metadata, and literal model-ref picker support so NVIDIA hosted models can be selected with their provider prefix intact. (#71204) Thanks @eleqtrizit.
- Messages: add global `messages.visibleReplies` so operators can require visible output to go through `message(action=send)` for any source chat, while `messages.groupChat.visibleReplies` stays available as the group/channel override. Thanks @scoootscooob.
- Gateway/dev: run `pnpm gateway:watch` through a named tmux session by default, with `gateway:watch:raw` and `OPENCLAW_GATEWAY_WATCH_TMUX=0` for foreground mode, so repeated starts respawn an inspectable watcher without trapping the invoking agent shell. Thanks @vincentkoc.
- Plugin SDK: mark remaining legacy alias exports and diffs tool/config aliases with deprecation metadata, and add a guard so future legacy alias comments require `@deprecated` tags. Thanks @vincentkoc.
diff --git a/docs/.i18n/glossary.zh-CN.json b/docs/.i18n/glossary.zh-CN.json
index 68bc126be27..258b9e13f6b 100644
--- a/docs/.i18n/glossary.zh-CN.json
+++ b/docs/.i18n/glossary.zh-CN.json
@@ -143,6 +143,26 @@
"source": "Moonshot AI",
"target": "Moonshot AI"
},
+ {
+ "source": "Model providers",
+ "target": "模型提供商"
+ },
+ {
+ "source": "Model Providers",
+ "target": "模型提供商"
+ },
+ {
+ "source": "NVIDIA",
+ "target": "NVIDIA"
+ },
+ {
+ "source": "NVIDIA API key",
+ "target": "NVIDIA API key"
+ },
+ {
+ "source": "Provider directory",
+ "target": "提供商目录"
+ },
{
"source": "Additional bundled variants",
"target": "其他内置变体"
diff --git a/docs/concepts/model-providers.md b/docs/concepts/model-providers.md
index a1b5b59c924..4e8c9eb92c0 100644
--- a/docs/concepts/model-providers.md
+++ b/docs/concepts/model-providers.md
@@ -278,32 +278,32 @@ See [/providers/kilocode](/providers/kilocode) for setup details.
### Other bundled provider plugins
-| Provider | Id | Auth env | Example model |
-| ----------------------- | -------------------------------- | ------------------------------------------------------------ | ----------------------------------------------- |
-| BytePlus | `byteplus` / `byteplus-plan` | `BYTEPLUS_API_KEY` | `byteplus-plan/ark-code-latest` |
-| Cerebras | `cerebras` | `CEREBRAS_API_KEY` | `cerebras/zai-glm-4.7` |
-| Cloudflare AI Gateway | `cloudflare-ai-gateway` | `CLOUDFLARE_AI_GATEWAY_API_KEY` | — |
-| DeepInfra | `deepinfra` | `DEEPINFRA_API_KEY` | `deepinfra/deepseek-ai/DeepSeek-V3.2` |
-| DeepSeek | `deepseek` | `DEEPSEEK_API_KEY` | `deepseek/deepseek-v4-flash` |
-| GitHub Copilot | `github-copilot` | `COPILOT_GITHUB_TOKEN` / `GH_TOKEN` / `GITHUB_TOKEN` | — |
-| Groq | `groq` | `GROQ_API_KEY` | — |
-| Hugging Face Inference | `huggingface` | `HUGGINGFACE_HUB_TOKEN` or `HF_TOKEN` | `huggingface/deepseek-ai/DeepSeek-R1` |
-| Kilo Gateway | `kilocode` | `KILOCODE_API_KEY` | `kilocode/kilo/auto` |
-| Kimi Coding | `kimi` | `KIMI_API_KEY` or `KIMICODE_API_KEY` | `kimi/kimi-code` |
-| MiniMax | `minimax` / `minimax-portal` | `MINIMAX_API_KEY` / `MINIMAX_OAUTH_TOKEN` | `minimax/MiniMax-M2.7` |
-| Mistral | `mistral` | `MISTRAL_API_KEY` | `mistral/mistral-large-latest` |
-| Moonshot | `moonshot` | `MOONSHOT_API_KEY` | `moonshot/kimi-k2.6` |
-| NVIDIA | `nvidia` | `NVIDIA_API_KEY` | `nvidia/nvidia/llama-3.1-nemotron-70b-instruct` |
-| OpenRouter | `openrouter` | `OPENROUTER_API_KEY` | `openrouter/auto` |
-| Qianfan | `qianfan` | `QIANFAN_API_KEY` | `qianfan/deepseek-v3.2` |
-| Qwen Cloud | `qwen` | `QWEN_API_KEY` / `MODELSTUDIO_API_KEY` / `DASHSCOPE_API_KEY` | `qwen/qwen3.5-plus` |
-| StepFun | `stepfun` / `stepfun-plan` | `STEPFUN_API_KEY` | `stepfun/step-3.5-flash` |
-| Together | `together` | `TOGETHER_API_KEY` | `together/moonshotai/Kimi-K2.5` |
-| Venice | `venice` | `VENICE_API_KEY` | — |
-| Vercel AI Gateway | `vercel-ai-gateway` | `AI_GATEWAY_API_KEY` | `vercel-ai-gateway/anthropic/claude-opus-4.6` |
-| Volcano Engine (Doubao) | `volcengine` / `volcengine-plan` | `VOLCANO_ENGINE_API_KEY` | `volcengine-plan/ark-code-latest` |
-| xAI | `xai` | `XAI_API_KEY` | `xai/grok-4` |
-| Xiaomi | `xiaomi` | `XIAOMI_API_KEY` | `xiaomi/mimo-v2-flash` |
+| Provider | Id | Auth env | Example model |
+| ----------------------- | -------------------------------- | ------------------------------------------------------------ | --------------------------------------------- |
+| BytePlus | `byteplus` / `byteplus-plan` | `BYTEPLUS_API_KEY` | `byteplus-plan/ark-code-latest` |
+| Cerebras | `cerebras` | `CEREBRAS_API_KEY` | `cerebras/zai-glm-4.7` |
+| Cloudflare AI Gateway | `cloudflare-ai-gateway` | `CLOUDFLARE_AI_GATEWAY_API_KEY` | — |
+| DeepInfra | `deepinfra` | `DEEPINFRA_API_KEY` | `deepinfra/deepseek-ai/DeepSeek-V3.2` |
+| DeepSeek | `deepseek` | `DEEPSEEK_API_KEY` | `deepseek/deepseek-v4-flash` |
+| GitHub Copilot | `github-copilot` | `COPILOT_GITHUB_TOKEN` / `GH_TOKEN` / `GITHUB_TOKEN` | — |
+| Groq | `groq` | `GROQ_API_KEY` | — |
+| Hugging Face Inference | `huggingface` | `HUGGINGFACE_HUB_TOKEN` or `HF_TOKEN` | `huggingface/deepseek-ai/DeepSeek-R1` |
+| Kilo Gateway | `kilocode` | `KILOCODE_API_KEY` | `kilocode/kilo/auto` |
+| Kimi Coding | `kimi` | `KIMI_API_KEY` or `KIMICODE_API_KEY` | `kimi/kimi-code` |
+| MiniMax | `minimax` / `minimax-portal` | `MINIMAX_API_KEY` / `MINIMAX_OAUTH_TOKEN` | `minimax/MiniMax-M2.7` |
+| Mistral | `mistral` | `MISTRAL_API_KEY` | `mistral/mistral-large-latest` |
+| Moonshot | `moonshot` | `MOONSHOT_API_KEY` | `moonshot/kimi-k2.6` |
+| NVIDIA | `nvidia` | `NVIDIA_API_KEY` | `nvidia/nvidia/nemotron-3-super-120b-a12b` |
+| OpenRouter | `openrouter` | `OPENROUTER_API_KEY` | `openrouter/auto` |
+| Qianfan | `qianfan` | `QIANFAN_API_KEY` | `qianfan/deepseek-v3.2` |
+| Qwen Cloud | `qwen` | `QWEN_API_KEY` / `MODELSTUDIO_API_KEY` / `DASHSCOPE_API_KEY` | `qwen/qwen3.5-plus` |
+| StepFun | `stepfun` / `stepfun-plan` | `STEPFUN_API_KEY` | `stepfun/step-3.5-flash` |
+| Together | `together` | `TOGETHER_API_KEY` | `together/moonshotai/Kimi-K2.5` |
+| Venice | `venice` | `VENICE_API_KEY` | — |
+| Vercel AI Gateway | `vercel-ai-gateway` | `AI_GATEWAY_API_KEY` | `vercel-ai-gateway/anthropic/claude-opus-4.6` |
+| Volcano Engine (Doubao) | `volcengine` / `volcengine-plan` | `VOLCANO_ENGINE_API_KEY` | `volcengine-plan/ark-code-latest` |
+| xAI | `xai` | `XAI_API_KEY` | `xai/grok-4` |
+| Xiaomi | `xiaomi` | `XIAOMI_API_KEY` | `xiaomi/mimo-v2-flash` |
#### Quirks worth knowing
@@ -317,6 +317,9 @@ See [/providers/kilocode](/providers/kilocode) for setup details.
API-key onboarding writes explicit text-only M2.7 chat model definitions; image understanding stays on the plugin-owned `MiniMax-VL-01` media provider.
+
+ Model ids use a `nvidia//` namespace (for example `nvidia/nvidia/nemotron-...` alongside `nvidia/moonshotai/kimi-k2.5`); pickers preserve the literal `/` composition while the canonical key sent to the API stays single-prefixed.
+
Uses the xAI Responses path. `/fast` or `params.fastMode: true` rewrites `grok-3`, `grok-3-mini`, `grok-4`, and `grok-4-0709` to their `*-fast` variants. `tool_stream` defaults on; disable via `agents.defaults.models["xai/"].params.tool_stream=false`.
diff --git a/docs/providers/nvidia.md b/docs/providers/nvidia.md
index 25326f1aed7..e35feb42475 100644
--- a/docs/providers/nvidia.md
+++ b/docs/providers/nvidia.md
@@ -19,7 +19,7 @@ open models for free. Authenticate with an API key from
```bash
export NVIDIA_API_KEY="nvapi-..."
- openclaw onboard --auth-choice skip
+ openclaw onboard --auth-choice nvidia-api-key
```
@@ -30,10 +30,17 @@ open models for free. Authenticate with an API key from
-If you pass `--token` instead of the env var, the value lands in shell history and
-`ps` output. Prefer the `NVIDIA_API_KEY` environment variable when possible.
+If you pass `--nvidia-api-key` instead of the env var, the value lands in shell
+history and `ps` output. Prefer the `NVIDIA_API_KEY` environment variable when
+possible.
+For non-interactive setup, you can also pass the key directly:
+
+```bash
+openclaw onboard --auth-choice nvidia-api-key --nvidia-api-key "nvapi-..."
+```
+
## Config example
```json5
diff --git a/extensions/nvidia/api.ts b/extensions/nvidia/api.ts
index 587953fc750..bcbb874d54e 100644
--- a/extensions/nvidia/api.ts
+++ b/extensions/nvidia/api.ts
@@ -1 +1,6 @@
-export { buildNvidiaProvider } from "./provider-catalog.js";
+export { buildNvidiaProvider, NVIDIA_DEFAULT_MODEL_ID } from "./provider-catalog.js";
+export {
+ applyNvidiaConfig,
+ applyNvidiaProviderConfig,
+ NVIDIA_DEFAULT_MODEL_REF,
+} from "./onboard.js";
diff --git a/extensions/nvidia/index.test.ts b/extensions/nvidia/index.test.ts
index abb72bb8e1c..75a49b465f4 100644
--- a/extensions/nvidia/index.test.ts
+++ b/extensions/nvidia/index.test.ts
@@ -16,12 +16,23 @@ function readManifest(): NvidiaManifest {
) as NvidiaManifest;
}
-describe("nvidia provider plugin", () => {
- it("registers API-key auth metadata", async () => {
- const provider = await registerSingleProviderPlugin(plugin);
+async function registerNvidiaProvider() {
+ return registerSingleProviderPlugin(plugin);
+}
+
+describe("nvidia provider hooks", () => {
+ it("registers the nvidia provider with correct metadata", async () => {
+ const provider = await registerNvidiaProvider();
expect(provider.id).toBe("nvidia");
+ expect(provider.label).toBe("NVIDIA");
+ expect(provider.docsPath).toBe("/providers/nvidia");
expect(provider.envVars).toEqual(["NVIDIA_API_KEY"]);
+ });
+
+ it("registers API-key auth choice metadata", async () => {
+ const provider = await registerNvidiaProvider();
+
expect(provider.auth?.map((method) => method.id)).toEqual(["api-key"]);
const choice = resolveProviderPluginChoice({
@@ -40,4 +51,107 @@ describe("nvidia provider plugin", () => {
]),
);
});
+
+ it("keeps nvidia auth setup metadata aligned", async () => {
+ const provider = await registerNvidiaProvider();
+
+ expect(
+ provider.auth.map((method) => ({
+ id: method.id,
+ label: method.label,
+ hint: method.hint,
+ choiceId: method.wizard?.choiceId,
+ groupId: method.wizard?.groupId,
+ groupLabel: method.wizard?.groupLabel,
+ groupHint: method.wizard?.groupHint,
+ })),
+ ).toEqual([
+ {
+ id: "api-key",
+ label: "NVIDIA API key",
+ hint: "Direct API key",
+ choiceId: "nvidia-api-key",
+ groupId: "nvidia",
+ groupLabel: "NVIDIA",
+ groupHint: "Direct API key",
+ },
+ ]);
+ });
+
+ it("keeps nvidia wizard setup metadata aligned", async () => {
+ const provider = await registerNvidiaProvider();
+
+ expect(provider.wizard?.setup).toMatchObject({
+ choiceId: "nvidia-api-key",
+ choiceLabel: "NVIDIA API key",
+ groupId: "nvidia",
+ groupLabel: "NVIDIA",
+ groupHint: "Direct API key",
+ methodId: "api-key",
+ });
+ });
+
+ it("keeps nvidia model picker metadata aligned", async () => {
+ const provider = await registerNvidiaProvider();
+
+ expect(provider.wizard?.modelPicker).toMatchObject({
+ label: "NVIDIA (custom)",
+ hint: "Use NVIDIA-hosted open models",
+ methodId: "api-key",
+ });
+ });
+
+ it("does not override replay policy for standard openai-compatible transport", async () => {
+ const provider = await registerNvidiaProvider();
+
+ // NVIDIA uses standard OpenAI-compatible API without custom replay logic
+ expect(provider.buildReplayPolicy).toBeUndefined();
+ });
+
+ it("does not override stream wrapper for standard models", async () => {
+ const provider = await registerNvidiaProvider();
+
+ // NVIDIA uses standard streaming without custom wrappers
+ expect(provider.wrapStreamFn).toBeUndefined();
+ });
+
+ it("surfaces the bundled NVIDIA models via augmentModelCatalog", async () => {
+ const provider = await registerNvidiaProvider();
+
+ const entries = await provider.augmentModelCatalog?.({
+ env: process.env,
+ entries: [],
+ });
+
+ expect(entries?.map((entry) => entry.id)).toEqual([
+ "nvidia/nemotron-3-super-120b-a12b",
+ "moonshotai/kimi-k2.5",
+ "minimaxai/minimax-m2.5",
+ "z-ai/glm5",
+ ]);
+ expect(entries?.every((entry) => entry.provider === "nvidia")).toBe(true);
+ });
+
+ it("opts into literal provider-prefix preservation", async () => {
+ const provider = await registerNvidiaProvider();
+
+ // NVIDIA's ids like nvidia/nemotron-... sit alongside moonshotai/...,
+ // minimaxai/..., z-ai/... in the same catalog, so the leading nvidia/
+ // is a vendor namespace rather than a redundant provider prefix. The
+ // flag keeps the canonical ref as nvidia/nvidia/nemotron-... instead
+ // of letting the default string-based dedupe collapse it.
+ expect(provider.preserveLiteralProviderPrefix).toBe(true);
+ });
+
+ it("registers nvidia provider through the plugin api", () => {
+ const registeredProviders: string[] = [];
+
+ plugin.register({
+ registerProvider(provider: { id: string }) {
+ registeredProviders.push(provider.id);
+ },
+ } as any);
+
+ expect(registeredProviders).toContain("nvidia");
+ });
});
diff --git a/extensions/nvidia/index.ts b/extensions/nvidia/index.ts
index a045f12e986..f4c226ceb66 100644
--- a/extensions/nvidia/index.ts
+++ b/extensions/nvidia/index.ts
@@ -1,8 +1,20 @@
import { defineSingleProviderPluginEntry } from "openclaw/plugin-sdk/provider-entry";
+import { applyNvidiaConfig, NVIDIA_DEFAULT_MODEL_REF } from "./onboard.js";
import { buildNvidiaProvider } from "./provider-catalog.js";
const PROVIDER_ID = "nvidia";
+function buildNvidiaCatalogModels() {
+ return buildNvidiaProvider().models.map((model) => ({
+ provider: PROVIDER_ID,
+ id: model.id,
+ name: model.name ?? model.id,
+ contextWindow: model.contextWindow,
+ reasoning: model.reasoning,
+ input: model.input,
+ }));
+}
+
export default defineSingleProviderPluginEntry({
id: PROVIDER_ID,
name: "NVIDIA Provider",
@@ -11,26 +23,42 @@ export default defineSingleProviderPluginEntry({
label: "NVIDIA",
docsPath: "/providers/nvidia",
envVars: ["NVIDIA_API_KEY"],
+ preserveLiteralProviderPrefix: true,
auth: [
{
methodId: "api-key",
label: "NVIDIA API key",
- hint: "API key",
+ hint: "Direct API key",
optionKey: "nvidiaApiKey",
flagName: "--nvidia-api-key",
envVar: "NVIDIA_API_KEY",
promptMessage: "Enter NVIDIA API key",
- wizard: {
- choiceId: "nvidia-api-key",
- choiceLabel: "NVIDIA API key",
- groupId: "nvidia",
- groupLabel: "NVIDIA",
- groupHint: "API key",
- },
+ defaultModel: NVIDIA_DEFAULT_MODEL_REF,
+ applyConfig: applyNvidiaConfig,
},
],
catalog: {
buildProvider: buildNvidiaProvider,
},
+ augmentModelCatalog: buildNvidiaCatalogModels,
+ wizard: {
+ setup: {
+ choiceId: "nvidia-api-key",
+ choiceLabel: "NVIDIA API key",
+ groupId: "nvidia",
+ groupLabel: "NVIDIA",
+ groupHint: "Direct API key",
+ methodId: "api-key",
+ modelSelection: {
+ promptWhenAuthChoiceProvided: true,
+ allowKeepCurrent: false,
+ },
+ },
+ modelPicker: {
+ label: "NVIDIA (custom)",
+ hint: "Use NVIDIA-hosted open models",
+ methodId: "api-key",
+ },
+ },
},
});
diff --git a/extensions/nvidia/onboard.test.ts b/extensions/nvidia/onboard.test.ts
new file mode 100644
index 00000000000..63636f69935
--- /dev/null
+++ b/extensions/nvidia/onboard.test.ts
@@ -0,0 +1,47 @@
+import {
+ expectProviderOnboardMergedLegacyConfig,
+ expectProviderOnboardPrimaryModel,
+} from "openclaw/plugin-sdk/provider-test-contracts";
+import { describe, expect, it } from "vitest";
+import { applyNvidiaConfig, applyNvidiaProviderConfig } from "./onboard.js";
+
+describe("nvidia onboard", () => {
+ it("adds NVIDIA provider with correct settings", () => {
+ const cfg = applyNvidiaConfig({});
+ expect(cfg.models?.providers?.nvidia).toMatchObject({
+ baseUrl: "https://integrate.api.nvidia.com/v1",
+ api: "openai-completions",
+ });
+ expect(cfg.models?.providers?.nvidia?.models.map((model) => model.id)).toEqual([
+ "nvidia/nemotron-3-super-120b-a12b",
+ "moonshotai/kimi-k2.5",
+ "minimaxai/minimax-m2.5",
+ "z-ai/glm5",
+ ]);
+ // Config stores the canonical form; the picker label shows the literal
+ // form via preserveLiteralProviderPrefix.
+ expectProviderOnboardPrimaryModel({
+ applyConfig: applyNvidiaConfig,
+ modelRef: "nvidia/nemotron-3-super-120b-a12b",
+ });
+ });
+
+ it("merges NVIDIA models and keeps existing provider overrides", () => {
+ const provider = expectProviderOnboardMergedLegacyConfig({
+ applyProviderConfig: applyNvidiaProviderConfig,
+ providerId: "nvidia",
+ providerApi: "openai-completions",
+ baseUrl: "https://integrate.api.nvidia.com/v1",
+ legacyApi: "openai-completions",
+ legacyModelId: "custom-model",
+ legacyModelName: "Custom",
+ });
+ expect(provider?.models.map((model) => model.id)).toEqual([
+ "custom-model",
+ "nvidia/nemotron-3-super-120b-a12b",
+ "moonshotai/kimi-k2.5",
+ "minimaxai/minimax-m2.5",
+ "z-ai/glm5",
+ ]);
+ });
+});
diff --git a/extensions/nvidia/onboard.ts b/extensions/nvidia/onboard.ts
new file mode 100644
index 00000000000..98f1e574ff5
--- /dev/null
+++ b/extensions/nvidia/onboard.ts
@@ -0,0 +1,30 @@
+import {
+ createDefaultModelsPresetAppliers,
+ type OpenClawConfig,
+} from "openclaw/plugin-sdk/provider-onboard";
+import { buildNvidiaProvider, NVIDIA_DEFAULT_MODEL_ID } from "./provider-catalog.js";
+
+export const NVIDIA_DEFAULT_MODEL_REF = NVIDIA_DEFAULT_MODEL_ID;
+
+const nvidiaPresetAppliers = createDefaultModelsPresetAppliers({
+ primaryModelRef: NVIDIA_DEFAULT_MODEL_REF,
+ resolveParams: (_cfg: OpenClawConfig) => {
+ const defaultProvider = buildNvidiaProvider();
+ return {
+ providerId: "nvidia",
+ api: defaultProvider.api ?? "openai-completions",
+ baseUrl: defaultProvider.baseUrl,
+ defaultModels: defaultProvider.models ?? [],
+ defaultModelId: NVIDIA_DEFAULT_MODEL_ID,
+ aliases: [{ modelRef: NVIDIA_DEFAULT_MODEL_REF, alias: "NVIDIA" }],
+ };
+ },
+});
+
+export function applyNvidiaProviderConfig(cfg: OpenClawConfig): OpenClawConfig {
+ return nvidiaPresetAppliers.applyProviderConfig(cfg);
+}
+
+export function applyNvidiaConfig(cfg: OpenClawConfig): OpenClawConfig {
+ return nvidiaPresetAppliers.applyConfig(cfg);
+}
diff --git a/extensions/nvidia/openclaw.plugin.json b/extensions/nvidia/openclaw.plugin.json
index 7eb372a7e1d..a8ed8edde9a 100644
--- a/extensions/nvidia/openclaw.plugin.json
+++ b/extensions/nvidia/openclaw.plugin.json
@@ -100,7 +100,7 @@
"choiceLabel": "NVIDIA API key",
"groupId": "nvidia",
"groupLabel": "NVIDIA",
- "groupHint": "API key",
+ "groupHint": "Direct API key",
"optionKey": "nvidiaApiKey",
"cliFlag": "--nvidia-api-key",
"cliOption": "--nvidia-api-key ",
diff --git a/extensions/nvidia/plugin-registration.contract.test.ts b/extensions/nvidia/plugin-registration.contract.test.ts
new file mode 100644
index 00000000000..717e912231f
--- /dev/null
+++ b/extensions/nvidia/plugin-registration.contract.test.ts
@@ -0,0 +1,14 @@
+import { describePluginRegistrationContract } from "openclaw/plugin-sdk/plugin-test-contracts";
+
+describePluginRegistrationContract({
+ pluginId: "nvidia",
+ providerIds: ["nvidia"],
+ manifestAuthChoice: {
+ pluginId: "nvidia",
+ choiceId: "nvidia-api-key",
+ choiceLabel: "NVIDIA API key",
+ groupId: "nvidia",
+ groupLabel: "NVIDIA",
+ groupHint: "Direct API key",
+ },
+});
diff --git a/extensions/nvidia/provider-catalog.ts b/extensions/nvidia/provider-catalog.ts
index d01b50c3f94..8db86c961d9 100644
--- a/extensions/nvidia/provider-catalog.ts
+++ b/extensions/nvidia/provider-catalog.ts
@@ -2,6 +2,8 @@ import { buildManifestModelProviderConfig } from "openclaw/plugin-sdk/provider-c
import type { ModelProviderConfig } from "openclaw/plugin-sdk/provider-model-shared";
import manifest from "./openclaw.plugin.json" with { type: "json" };
+export const NVIDIA_DEFAULT_MODEL_ID = "nvidia/nemotron-3-super-120b-a12b";
+
export function buildNvidiaProvider(): ModelProviderConfig {
return {
...buildManifestModelProviderConfig({
diff --git a/src/agents/model-ref-shared.ts b/src/agents/model-ref-shared.ts
index c0de50888ec..22b8b866961 100644
--- a/src/agents/model-ref-shared.ts
+++ b/src/agents/model-ref-shared.ts
@@ -70,3 +70,17 @@ export function resolveStaticAllowlistModelKey(
}
return modelKey(parsed.provider, parsed.model);
}
+
+export function formatLiteralProviderPrefixedModelRef(provider: string, modelRef: string): string {
+ const providerId = normalizeProviderId(provider);
+ const trimmedRef = modelRef.trim();
+ if (!providerId || !trimmedRef) {
+ return trimmedRef;
+ }
+ const normalizedRef = normalizeLowercaseStringOrEmpty(trimmedRef);
+ const literalPrefix = `${providerId}/${providerId}/`;
+ if (normalizedRef.startsWith(literalPrefix)) {
+ return trimmedRef;
+ }
+ return normalizedRef.startsWith(`${providerId}/`) ? `${providerId}/${trimmedRef}` : trimmedRef;
+}
diff --git a/src/agents/model-selection.test.ts b/src/agents/model-selection.test.ts
index 4133b08df0f..a21a8a31e3d 100644
--- a/src/agents/model-selection.test.ts
+++ b/src/agents/model-selection.test.ts
@@ -223,6 +223,12 @@ describe("model-selection", () => {
defaultProvider: "anthropic",
expected: { provider: "mlx", model: "mlx-community/Qwen3-30B-A3B-6bit" },
},
+ {
+ name: "preserves three-segment refs where the maker equals the provider",
+ variants: ["nvidia/nvidia/nemotron-3-super-120b-a12b"],
+ defaultProvider: "anthropic",
+ expected: { provider: "nvidia", model: "nvidia/nemotron-3-super-120b-a12b" },
+ },
{
name: "normalizes anthropic shorthand aliases",
variants: ["anthropic/opus-4.6", "opus-4.6", " anthropic / opus-4.6 "],
diff --git a/src/commands/configure.gateway-auth.ts b/src/commands/configure.gateway-auth.ts
index 54b24307dd2..0f442db5b7f 100644
--- a/src/commands/configure.gateway-auth.ts
+++ b/src/commands/configure.gateway-auth.ts
@@ -254,6 +254,8 @@ export async function promptAuthConfig(
const allowlistSelection = await promptModelAllowlist({
config: next,
prompter,
+ workspaceDir: resolveDefaultAgentWorkspaceDir(),
+ env: process.env,
allowedKeys: modelPrompt?.allowedKeys,
initialSelections: modelPrompt?.initialSelections,
message: modelPrompt?.message,
diff --git a/src/commands/model-picker.test.ts b/src/commands/model-picker.test.ts
index 073de57700a..80c9b54e522 100644
--- a/src/commands/model-picker.test.ts
+++ b/src/commands/model-picker.test.ts
@@ -293,6 +293,98 @@ describe("promptDefaultModel", () => {
);
});
+ it("shows literal double-prefix labels for providers that preserve literal prefixes", async () => {
+ loadModelCatalog.mockResolvedValue([
+ {
+ provider: "nvidia",
+ id: "nvidia/nemotron-3-super-120b-a12b",
+ name: "Nemotron",
+ },
+ ]);
+ resolvePluginProviders.mockReturnValue([
+ {
+ id: "nvidia",
+ preserveLiteralProviderPrefix: true,
+ },
+ ] as never);
+
+ const select = vi.fn(async (params) => params.initialValue as never);
+ const prompter = makePrompter({ select });
+ const config = {
+ agents: {
+ defaults: {
+ model: "nvidia/nemotron-3-super-120b-a12b",
+ },
+ },
+ } as OpenClawConfig;
+
+ await promptDefaultModel({
+ config,
+ prompter,
+ allowKeep: true,
+ includeManual: false,
+ ignoreAllowlist: true,
+ });
+
+ const options = select.mock.calls[0]?.[0]?.options ?? [];
+ expect(options).toEqual(
+ expect.arrayContaining([
+ expect.objectContaining({
+ value: "__keep__",
+ label: "Keep current (nvidia/nvidia/nemotron-3-super-120b-a12b)",
+ }),
+ expect.objectContaining({
+ value: "nvidia/nemotron-3-super-120b-a12b",
+ label: "nvidia/nvidia/nemotron-3-super-120b-a12b",
+ }),
+ ]),
+ );
+ });
+
+ it("shows literal double-prefix keep label before browsing provider catalogs", async () => {
+ resolvePluginProviders.mockReturnValue([
+ {
+ id: "nvidia",
+ preserveLiteralProviderPrefix: true,
+ },
+ ] as never);
+
+ const select = vi.fn(async (params) => params.initialValue as never);
+ const prompter = makePrompter({ select });
+ const config = {
+ agents: {
+ defaults: {
+ model: "nvidia/nemotron-3-super-120b-a12b",
+ },
+ },
+ } as OpenClawConfig;
+
+ const result = await promptDefaultModel({
+ config,
+ prompter,
+ allowKeep: true,
+ includeManual: true,
+ ignoreAllowlist: true,
+ preferredProvider: "nvidia",
+ browseCatalogOnDemand: true,
+ });
+
+ expect(result).toEqual({});
+ expect(loadModelCatalog).not.toHaveBeenCalled();
+ expect(select.mock.calls[0]?.[0]).toMatchObject({
+ searchable: false,
+ initialValue: "__keep__",
+ });
+ expect(select.mock.calls[0]?.[0]?.options).toEqual([
+ expect.objectContaining({
+ value: "__keep__",
+ label: "Keep current (nvidia/nvidia/nemotron-3-super-120b-a12b)",
+ }),
+ expect.objectContaining({ value: "__manual__" }),
+ expect.objectContaining({ value: "__browse__" }),
+ ]);
+ });
+
it("keeps current preferred-provider models cold until browsing is requested", async () => {
const select = vi.fn(async (params) => params.initialValue as never);
const prompter = makePrompter({ select });
@@ -535,6 +627,57 @@ describe("promptDefaultModel", () => {
expect.objectContaining({ value: "openai/gpt-5.5" }),
]);
});
+
+ it("surfaces NVIDIA provider model-picker contributions", async () => {
+ loadModelCatalog.mockResolvedValue([
+ {
+ provider: "openai",
+ id: "gpt-5.4",
+ name: "GPT-5.4",
+ },
+ ]);
+ providerModelPickerContributionRuntime.enabled = true;
+ providerModelPickerContributionRuntime.resolve.mockReturnValue([
+ {
+ id: "provider:model-picker:provider-plugin:nvidia:api-key",
+ kind: "provider",
+ surface: "model-picker",
+ option: {
+ value: "provider-plugin:nvidia:api-key",
+ label: "NVIDIA (custom)",
+ hint: "Use NVIDIA-hosted open models",
+ },
+ },
+ ] as never);
+
+ const select = vi.fn(async (params) => {
+ const nvidia = params.options.find(
+ (opt: { value: string }) => opt.value === "provider-plugin:nvidia:api-key",
+ );
+ return (nvidia?.value ?? "") as never;
+ });
+ const prompter = makePrompter({ select });
+
+ await promptDefaultModel({
+ config: { agents: { defaults: {} } } as OpenClawConfig,
+ prompter,
+ allowKeep: false,
+ includeManual: false,
+ includeProviderPluginSetups: true,
+ ignoreAllowlist: true,
+ agentDir: "/tmp/openclaw-agent",
+ runtime: {} as never,
+ });
+
+ expect(select.mock.calls[0]?.[0]?.options).toEqual(
+ expect.arrayContaining([
+ expect.objectContaining({
+ value: "provider-plugin:nvidia:api-key",
+ label: "NVIDIA (custom)",
+ }),
+ ]),
+ );
+ });
});
describe("promptModelAllowlist", () => {
diff --git a/src/flows/model-picker.ts b/src/flows/model-picker.ts
index f31f621f4eb..c7ad83c8e72 100644
--- a/src/flows/model-picker.ts
+++ b/src/flows/model-picker.ts
@@ -7,6 +7,7 @@ import {
isModelPickerVisibleModelRef,
isModelPickerVisibleProvider,
} from "../agents/model-picker-visibility.js";
+import { formatLiteralProviderPrefixedModelRef } from "../agents/model-ref-shared.js";
import {
buildAllowedModelSet,
buildConfiguredModelCatalog,
@@ -38,6 +39,7 @@ const KEEP_VALUE = "__keep__";
const MANUAL_VALUE = "__manual__";
const BROWSE_VALUE = "__browse__";
const PROVIDER_FILTER_THRESHOLD = 30;
+const EMPTY_LITERAL_PREFIX_PROVIDERS = new Set();
// Internal router models are valid defaults during auth/setup but not manual API targets.
const HIDDEN_ROUTER_MODELS = new Set(["openrouter/auto"]);
@@ -219,6 +221,39 @@ function resolveModelRouteHint(provider: string): string | undefined {
return undefined;
}
+async function resolveLiteralPrefixProviderIds(params: {
+ cfg: OpenClawConfig;
+ workspaceDir?: string;
+ env?: NodeJS.ProcessEnv;
+}): Promise> {
+ const { resolvePluginProviders } = await loadResolvedModelPickerRuntime();
+ const providers = resolvePluginProviders({
+ config: params.cfg,
+ workspaceDir: params.workspaceDir,
+ env: params.env,
+ activate: false,
+ cache: false,
+ includeUntrustedWorkspacePlugins: false,
+ });
+ const ids = new Set();
+ for (const provider of providers) {
+ if (!provider.preserveLiteralProviderPrefix) {
+ continue;
+ }
+ const id = normalizeProviderId(provider.id);
+ if (id) {
+ ids.add(id);
+ }
+ for (const alias of provider.aliases ?? []) {
+ const aliasId = normalizeProviderId(alias);
+ if (aliasId) {
+ ids.add(aliasId);
+ }
+ }
+ }
+ return ids;
+}
+
function addModelSelectOption(params: {
entry: {
provider: string;
@@ -231,6 +266,7 @@ function addModelSelectOption(params: {
seen: Set;
aliasIndex: ReturnType;
hasAuth: (provider: string) => boolean;
+ literalPrefixProviders: Set;
}) {
const key = modelKey(params.entry.provider, params.entry.id);
if (
@@ -261,9 +297,12 @@ function addModelSelectOption(params: {
if (!params.hasAuth(params.entry.provider)) {
hints.push("auth missing");
}
+ const label = params.literalPrefixProviders.has(normalizeProviderId(params.entry.provider))
+ ? `${params.entry.provider}/${params.entry.id}`
+ : key;
params.options.push({
value: key,
- label: key,
+ label,
hint: hints.length > 0 ? hints.join(" · ") : undefined,
});
params.seen.add(key);
@@ -286,6 +325,7 @@ function addModelKeySelectOption(params: {
seen: Set;
aliasIndex: ReturnType;
hasAuth: (provider: string) => boolean;
+ literalPrefixProviders?: Set;
fallbackHint: string;
}) {
const entry = splitModelKey(params.key);
@@ -299,6 +339,7 @@ function addModelKeySelectOption(params: {
seen: params.seen,
aliasIndex: params.aliasIndex,
hasAuth: params.hasAuth,
+ literalPrefixProviders: params.literalPrefixProviders ?? EMPTY_LITERAL_PREFIX_PROVIDERS,
});
if (params.seen.size > before) {
const option = params.options.at(-1);
@@ -563,6 +604,27 @@ export async function promptDefaultModel(
});
const resolvedKey = modelKey(resolved.provider, resolved.model);
const configuredKey = configuredRaw ? resolvedKey : "";
+ let literalPrefixProvidersCache: Set | undefined;
+ const resolveCachedLiteralPrefixProviders = async () => {
+ if (!literalPrefixProvidersCache) {
+ literalPrefixProvidersCache = await resolveLiteralPrefixProviderIds({
+ cfg,
+ workspaceDir: params.workspaceDir,
+ env: params.env,
+ });
+ }
+ return literalPrefixProvidersCache;
+ };
+ const resolveConfiguredDisplayLabel = async () => {
+ const providerId = normalizeProviderId(resolved.provider);
+ if (!providerId) {
+ return configuredRaw || resolvedKey;
+ }
+ const literalPrefixProviders = await resolveCachedLiteralPrefixProviders();
+ return literalPrefixProviders.has(providerId)
+ ? formatLiteralProviderPrefixedModelRef(resolved.provider, resolvedKey)
+ : configuredRaw || resolvedKey;
+ };
if (
loadCatalog &&
@@ -571,11 +633,12 @@ export async function promptDefaultModel(
allowKeep &&
normalizeProviderId(resolved.provider) === preferredProvider
) {
+ const configuredLabel = await resolveConfiguredDisplayLabel();
const options: WizardSelectOption[] = [
{
value: KEEP_VALUE,
label: configuredRaw
- ? `Keep current (${configuredRaw})`
+ ? `Keep current (${configuredLabel})`
: `Keep current (default: ${resolvedKey})`,
hint:
configuredRaw && configuredRaw !== resolvedKey ? `resolves to ${resolvedKey}` : undefined,
@@ -612,12 +675,13 @@ export async function promptDefaultModel(
}
if (!loadCatalog) {
+ const configuredLabel = await resolveConfiguredDisplayLabel();
const options: WizardSelectOption[] = [];
if (allowKeep) {
options.push({
value: KEEP_VALUE,
label: configuredRaw
- ? `Keep current (${configuredRaw})`
+ ? `Keep current (${configuredLabel})`
: `Keep current (default: ${resolvedKey})`,
hint:
configuredRaw && configuredRaw !== resolvedKey ? `resolves to ${resolvedKey}` : undefined,
@@ -723,16 +787,22 @@ export async function promptDefaultModel(
? filteredModels.some((entry) => matchesPreferredProvider?.(entry.provider))
: false;
const hasAuth = createProviderAuthChecker({ cfg, agentDir: params.agentDir });
+ const literalPrefixProviders = await resolveCachedLiteralPrefixProviders();
+
+ // Show the literal form (e.g. nvidia/nvidia/...) in the "Keep current" label
+ // for providers that set preserveLiteralProviderPrefix, so the user sees the
+ // same ref they'll pick from the catalog rows. Config itself stays canonical.
+ const configuredLabel = literalPrefixProviders.has(normalizeProviderId(resolved.provider))
+ ? formatLiteralProviderPrefixedModelRef(resolved.provider, resolvedKey)
+ : configuredRaw || resolvedKey;
const options: WizardSelectOption[] = [];
if (allowKeep) {
options.push({
value: KEEP_VALUE,
label: configuredRaw
- ? `Keep current (${configuredRaw})`
+ ? `Keep current (${configuredLabel})`
: `Keep current (default: ${resolvedKey})`,
- hint:
- configuredRaw && configuredRaw !== resolvedKey ? `resolves to ${resolvedKey}` : undefined,
});
}
if (includeManual) {
@@ -750,12 +820,19 @@ export async function promptDefaultModel(
const seen = new Set();
for (const entry of filteredModels) {
- addModelSelectOption({ entry, options, seen, aliasIndex, hasAuth });
+ addModelSelectOption({
+ entry,
+ options,
+ seen,
+ aliasIndex,
+ hasAuth,
+ literalPrefixProviders,
+ });
}
if (configuredKey && !seen.has(configuredKey)) {
options.push({
value: configuredKey,
- label: configuredKey,
+ label: configuredLabel,
hint: "current (not in catalog)",
});
}
@@ -822,6 +899,8 @@ export async function promptModelAllowlist(params: {
prompter: WizardPrompter;
message?: string;
agentDir?: string;
+ workspaceDir?: string;
+ env?: NodeJS.ProcessEnv;
allowedKeys?: string[];
initialSelections?: string[];
preferredProvider?: string;
@@ -970,6 +1049,12 @@ export async function promptModelAllowlist(params: {
return { models: normalizeModelKeys(parsed) };
}
+ const literalPrefixProviders = await resolveLiteralPrefixProviderIds({
+ cfg,
+ workspaceDir: params.workspaceDir,
+ env: params.env,
+ });
+
const options: WizardSelectOption[] = [];
const seen = new Set();
const allowedCatalog = (
@@ -997,7 +1082,14 @@ export async function promptModelAllowlist(params: {
: selectableInitialSeeds.filter(isModelPickerVisibleModelRef);
for (const entry of filteredCatalog) {
- addModelSelectOption({ entry, options, seen, aliasIndex, hasAuth });
+ addModelSelectOption({
+ entry,
+ options,
+ seen,
+ aliasIndex,
+ hasAuth,
+ literalPrefixProviders,
+ });
}
const supplementalKeys = (allowedKeySet ? allowedKeys : selectableInitialSeeds).filter(
diff --git a/src/plugins/provider-auth-choice.ts b/src/plugins/provider-auth-choice.ts
index fbcea0c0449..36388861ed2 100644
--- a/src/plugins/provider-auth-choice.ts
+++ b/src/plugins/provider-auth-choice.ts
@@ -5,6 +5,7 @@ import {
resolveAgentWorkspaceDir,
} from "../agents/agent-scope.js";
import { upsertAuthProfile } from "../agents/auth-profiles.js";
+import { formatLiteralProviderPrefixedModelRef } from "../agents/model-ref-shared.js";
import { resolveDefaultAgentWorkspaceDir } from "../agents/workspace.js";
import type { OpenClawConfig } from "../config/types.openclaw.js";
import type { RuntimeEnv } from "../runtime.js";
@@ -54,6 +55,13 @@ export type PluginProviderAuthChoiceOptions = {
label: string;
};
+function formatModelRefForDisplay(modelRef: string, provider: ProviderPlugin): string {
+ if (!provider.preserveLiteralProviderPrefix) {
+ return modelRef;
+ }
+ return formatLiteralProviderPrefixedModelRef(provider.id, modelRef);
+}
+
function restoreConfiguredPrimaryModel(
nextConfig: OpenClawConfig,
originalConfig: OpenClawConfig,
@@ -100,27 +108,30 @@ function resolveConfiguredDefaultModelPrimary(cfg: OpenClawConfig): string | und
async function noteDefaultModelResult(params: {
previousPrimary: string | undefined;
selectedModel: string;
+ selectedModelDisplay?: string;
preserveExistingDefaultModel: boolean | undefined;
prompter: WizardPrompter;
}): Promise {
+ const selectedModelDisplay = params.selectedModelDisplay ?? params.selectedModel;
if (
params.preserveExistingDefaultModel === true &&
params.previousPrimary &&
params.previousPrimary !== params.selectedModel
) {
await params.prompter.note(
- `Kept existing default model ${params.previousPrimary}; ${params.selectedModel} is available.`,
+ `Kept existing default model ${params.previousPrimary}; ${selectedModelDisplay} is available.`,
"Model configured",
);
return;
}
- await params.prompter.note(`Default model set to ${params.selectedModel}`, "Model configured");
+ await params.prompter.note(`Default model set to ${selectedModelDisplay}`, "Model configured");
}
async function applyDefaultModelFromAuthChoice(params: {
config: OpenClawConfig;
selectedModel: string;
+ selectedModelDisplay?: string;
preserveExistingDefaultModel: boolean | undefined;
prompter: WizardPrompter;
runSelectedModelHook: (config: OpenClawConfig) => Promise;
@@ -139,6 +150,7 @@ async function applyDefaultModelFromAuthChoice(params: {
await noteDefaultModelResult({
previousPrimary,
selectedModel: params.selectedModel,
+ selectedModelDisplay: params.selectedModelDisplay,
preserveExistingDefaultModel: params.preserveExistingDefaultModel,
prompter: params.prompter,
});
@@ -400,10 +412,12 @@ export async function applyAuthChoiceLoadedPluginProvider(
let agentModelOverride: string | undefined;
if (applied.defaultModel) {
const selectedModel = applied.defaultModel;
+ const selectedModelDisplay = formatModelRefForDisplay(selectedModel, resolved.provider);
if (params.setDefaultModel) {
nextConfig = await applyDefaultModelFromAuthChoice({
config: nextConfig,
selectedModel,
+ selectedModelDisplay,
preserveExistingDefaultModel: params.preserveExistingDefaultModel,
prompter: params.prompter,
runSelectedModelHook: async (config) => {
@@ -491,10 +505,12 @@ export async function applyAuthChoicePluginProvider(
nextConfig = applied.config;
if (applied.defaultModel) {
const selectedModel = applied.defaultModel;
+ const selectedModelDisplay = formatModelRefForDisplay(selectedModel, provider);
if (params.setDefaultModel) {
nextConfig = await applyDefaultModelFromAuthChoice({
config: nextConfig,
selectedModel,
+ selectedModelDisplay,
preserveExistingDefaultModel: params.preserveExistingDefaultModel,
prompter: params.prompter,
runSelectedModelHook: async (config) => {
@@ -511,7 +527,7 @@ export async function applyAuthChoicePluginProvider(
}
if (params.agentId) {
await params.prompter.note(
- `Default model set to ${selectedModel} for agent "${params.agentId}".`,
+ `Default model set to ${selectedModelDisplay} for agent "${params.agentId}".`,
"Model configured",
);
}
diff --git a/src/plugins/types.ts b/src/plugins/types.ts
index 70ca1c3b714..1a2c9a11ff7 100644
--- a/src/plugins/types.ts
+++ b/src/plugins/types.ts
@@ -1214,6 +1214,19 @@ export type ProviderPlugin = {
* configured.
*/
staticCatalog?: ProviderPluginCatalog;
+ /**
+ * Show catalog row labels as the literal `/`
+ * composition instead of the canonical (deduped) key.
+ *
+ * `modelKey` strips a duplicate `/` prefix so storage and
+ * lookups stay stable. This flag only changes the picker label — the
+ * option value and persisted config remain canonical.
+ *
+ * Set when the leading `/` segment in the native model id is
+ * a meaningful vendor namespace (e.g. NVIDIA's `nvidia/nemotron-...`
+ * alongside `moonshotai/kimi-k2.5`).
+ */
+ preserveLiteralProviderPrefix?: boolean;
/**
* @deprecated Use catalog.
*