mirror of
https://github.com/openclaw/openclaw.git
synced 2026-03-12 07:20:45 +00:00
fix: align NVIDIA provider docs and model ids (#11606)
This commit is contained in:
@@ -12,11 +12,12 @@ NVIDIA provides an OpenAI-compatible API at `https://integrate.api.nvidia.com/v1
|
||||
|
||||
## CLI setup
|
||||
|
||||
Export the key once, then run onboarding without an inline secret:
|
||||
Export the key once, then run onboarding and set an NVIDIA model:
|
||||
|
||||
```bash
|
||||
export NVIDIA_API_KEY="nvapi-..."
|
||||
openclaw onboard --auth-choice apiKey --token-provider nvidia
|
||||
openclaw onboard --auth-choice skip
|
||||
openclaw models set nvidia/nvidia/llama-3.1-nemotron-70b-instruct
|
||||
```
|
||||
|
||||
If you still pass `--token`, remember it lands in shell history and `ps` output; prefer the env var when possible.
|
||||
@@ -36,7 +37,7 @@ If you still pass `--token`, remember it lands in shell history and `ps` output;
|
||||
},
|
||||
agents: {
|
||||
defaults: {
|
||||
model: { primary: "nvidia/llama-3.1-nemotron-70b-instruct" },
|
||||
model: { primary: "nvidia/nvidia/llama-3.1-nemotron-70b-instruct" },
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -45,7 +46,7 @@ If you still pass `--token`, remember it lands in shell history and `ps` output;
|
||||
## Model IDs
|
||||
|
||||
- `nvidia/llama-3.1-nemotron-70b-instruct` (default)
|
||||
- `nvidia/llama-3.3-70b-instruct`
|
||||
- `meta/llama-3.3-70b-instruct`
|
||||
- `nvidia/mistral-nemo-minitron-8b-8k-instruct`
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -59,7 +59,7 @@ describe("NVIDIA provider", () => {
|
||||
const provider = buildNvidiaProvider();
|
||||
const modelIds = provider.models.map((m) => m.id);
|
||||
expect(modelIds).toContain("nvidia/llama-3.1-nemotron-70b-instruct");
|
||||
expect(modelIds).toContain("nvidia/llama-3.3-70b-instruct");
|
||||
expect(modelIds).toContain("meta/llama-3.3-70b-instruct");
|
||||
expect(modelIds).toContain("nvidia/mistral-nemo-minitron-8b-8k-instruct");
|
||||
});
|
||||
});
|
||||
|
||||
@@ -635,8 +635,8 @@ export function buildNvidiaProvider(): ProviderConfig {
|
||||
maxTokens: NVIDIA_DEFAULT_MAX_TOKENS,
|
||||
},
|
||||
{
|
||||
id: "nvidia/llama-3.3-70b-instruct",
|
||||
name: "NVIDIA Llama 3.3 70B Instruct",
|
||||
id: "meta/llama-3.3-70b-instruct",
|
||||
name: "Meta Llama 3.3 70B Instruct",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: NVIDIA_DEFAULT_COST,
|
||||
|
||||
Reference in New Issue
Block a user