test: replace models-config matrix with narrow coverage

This commit is contained in:
Peter Steinberger
2026-04-09 01:39:35 +01:00
parent 3ae10b02f2
commit f0644d7613
4 changed files with 99 additions and 206 deletions

View File

@@ -1,176 +0,0 @@
import { mkdtempSync } from "node:fs";
import { writeFile } from "node:fs/promises";
import { tmpdir } from "node:os";
import { join } from "node:path";
import { describe, expect, it } from "vitest";
import type { OpenClawConfig } from "../config/config.js";
import {
MINIMAX_OAUTH_MARKER,
NON_ENV_SECRETREF_MARKER,
OLLAMA_LOCAL_AUTH_MARKER,
} from "./model-auth-markers.js";
import { resolveImplicitProvidersForTest } from "./models-config.e2e-harness.js";
type ProvidersMap = Awaited<ReturnType<typeof resolveImplicitProvidersForTest>>;
type ExplicitProviders = NonNullable<NonNullable<OpenClawConfig["models"]>["providers"]>;
type MatrixCase = {
name: string;
env?: NodeJS.ProcessEnv;
authProfiles?: Record<string, unknown>;
explicitProviders?: ExplicitProviders;
assertProviders: (providers: ProvidersMap) => void;
};
async function writeAuthProfiles(
agentDir: string,
profiles: Record<string, unknown> | undefined,
): Promise<void> {
if (!profiles) {
return;
}
await writeFile(
join(agentDir, "auth-profiles.json"),
JSON.stringify({ version: 1, profiles }, null, 2),
"utf8",
);
}
const MATRIX_CASES: MatrixCase[] = [
{
name: "env api key injects a simple provider",
env: { NVIDIA_API_KEY: "test-nvidia-key" }, // pragma: allowlist secret
assertProviders(providers) {
expect(providers?.nvidia?.apiKey).toBe("NVIDIA_API_KEY");
expect(providers?.nvidia?.baseUrl).toBe("https://integrate.api.nvidia.com/v1");
expect(providers?.nvidia?.models?.length).toBeGreaterThan(0);
},
},
{
name: "env api key injects paired plan providers",
env: { VOLCANO_ENGINE_API_KEY: "test-volcengine-key" }, // pragma: allowlist secret
assertProviders(providers) {
expect(providers?.volcengine?.apiKey).toBe("VOLCANO_ENGINE_API_KEY");
expect(providers?.["volcengine-plan"]?.apiKey).toBe("VOLCANO_ENGINE_API_KEY");
expect(providers?.["volcengine-plan"]?.api).toBe("openai-completions");
},
},
{
name: "env-backed auth profiles persist env markers",
env: {},
authProfiles: {
"together:default": {
type: "token",
provider: "together",
tokenRef: { source: "env", provider: "default", id: "TOGETHER_API_KEY" },
},
},
assertProviders(providers) {
expect(providers?.together?.apiKey).toBe("TOGETHER_API_KEY");
},
},
{
name: "non-env secret refs preserve compatibility markers",
env: {},
authProfiles: {
"byteplus:default": {
type: "api_key",
provider: "byteplus",
key: "runtime-byteplus-key",
keyRef: { source: "file", provider: "vault", id: "/byteplus/apiKey" },
},
},
assertProviders(providers) {
expect(providers?.byteplus?.apiKey).toBe(NON_ENV_SECRETREF_MARKER);
expect(providers?.["byteplus-plan"]?.apiKey).toBe(NON_ENV_SECRETREF_MARKER);
},
},
{
name: "oauth profiles still inject compatibility providers",
env: {},
authProfiles: {
"openai-codex:default": {
type: "oauth",
provider: "openai-codex",
access: "codex-access-token",
refresh: "codex-refresh-token",
expires: Date.now() + 60_000,
},
"minimax-portal:default": {
type: "oauth",
provider: "minimax-portal",
access: "minimax-access-token",
refresh: "minimax-refresh-token",
expires: Date.now() + 60_000,
},
},
assertProviders(providers) {
expect(providers?.["openai-codex"]).toMatchObject({
baseUrl: "https://chatgpt.com/backend-api",
api: "openai-codex-responses",
models: [],
});
expect(providers?.["openai-codex"]).not.toHaveProperty("apiKey");
expect(providers?.["minimax-portal"]?.apiKey).toBe(MINIMAX_OAUTH_MARKER);
},
},
{
name: "explicit vllm config suppresses implicit vllm injection",
env: { VLLM_API_KEY: "test-vllm-key" }, // pragma: allowlist secret
explicitProviders: {
vllm: {
baseUrl: "http://127.0.0.1:8000/v1",
api: "openai-completions",
models: [],
},
},
assertProviders(providers) {
expect(providers?.vllm).toBeUndefined();
},
},
{
name: "explicit ollama models still normalize the returned provider",
env: {},
explicitProviders: {
ollama: {
baseUrl: "http://remote-ollama:11434/v1",
models: [
{
id: "gpt-oss:20b",
name: "GPT-OSS 20B",
reasoning: false,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 8192,
maxTokens: 81920,
},
],
},
},
assertProviders(providers) {
expect(providers?.ollama?.baseUrl).toBe("http://remote-ollama:11434");
expect(providers?.ollama?.api).toBe("ollama");
expect(providers?.ollama?.apiKey).toBe(OLLAMA_LOCAL_AUTH_MARKER);
expect(providers?.ollama?.models).toHaveLength(1);
},
},
];
describe("implicit provider resolution matrix", () => {
it.each(MATRIX_CASES)(
"$name",
async ({ env, authProfiles, explicitProviders, assertProviders }) => {
const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-"));
await writeAuthProfiles(agentDir, authProfiles);
const providers = await resolveImplicitProvidersForTest({
agentDir,
env,
explicitProviders,
});
assertProviders(providers);
},
240_000,
);
});

View File

@@ -1,29 +1,15 @@
import { mkdtempSync } from "node:fs";
import { tmpdir } from "node:os";
import { join } from "node:path";
import { beforeEach, describe, expect, it, vi } from "vitest";
import { describe, expect, it } from "vitest";
import type { ModelDefinitionConfig, ModelProviderConfig } from "../config/types.models.js";
import { withEnvAsync } from "../test-utils/env.js";
import { installModelsConfigTestHooks } from "./models-config.e2e-harness.js";
import { resolveEnvApiKey } from "./model-auth-env.js";
import {
resolveEnvApiKeyVarName,
resolveMissingProviderApiKey,
} from "./models-config.providers.secrets.js";
const NVIDIA_BASE_URL = "https://integrate.api.nvidia.com/v1";
const MINIMAX_BASE_URL = "https://api.minimax.io/anthropic";
const VLLM_DEFAULT_BASE_URL = "http://127.0.0.1:8000/v1";
installModelsConfigTestHooks();
let resolveApiKeyForProvider: typeof import("./model-auth.js").resolveApiKeyForProvider;
let resolveEnvApiKeyVarName: typeof import("./models-config.providers.secrets.js").resolveEnvApiKeyVarName;
let resolveMissingProviderApiKey: typeof import("./models-config.providers.secrets.js").resolveMissingProviderApiKey;
beforeEach(async () => {
vi.doUnmock("../plugins/provider-runtime.js");
vi.resetModules();
({ resolveApiKeyForProvider } = await import("./model-auth.js"));
({ resolveEnvApiKeyVarName, resolveMissingProviderApiKey } =
await import("./models-config.providers.secrets.js"));
});
function createTestModel(id: string): ModelDefinitionConfig {
return {
id,
@@ -93,17 +79,14 @@ describe("NVIDIA provider", () => {
expect(provider.models?.length).toBeGreaterThan(0);
});
it("resolves the nvidia api key value from env", async () => {
const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-"));
await withEnvAsync({ NVIDIA_API_KEY: "nvidia-test-api-key" }, async () => {
const auth = await resolveApiKeyForProvider({
provider: "nvidia",
agentDir,
});
it("resolves the nvidia api key value from env", () => {
const auth = resolveEnvApiKey("nvidia", {
NVIDIA_API_KEY: "nvidia-test-api-key",
} as NodeJS.ProcessEnv);
expect(auth.apiKey).toBe("nvidia-test-api-key");
expect(auth.mode).toBe("api-key");
expect(auth.source).toContain("NVIDIA_API_KEY");
expect(auth).toEqual({
apiKey: "nvidia-test-api-key",
source: "env: NVIDIA_API_KEY",
});
});
});

View File

@@ -11,6 +11,7 @@ import {
} from "../plugins/provider-discovery.js";
import type { ProviderPlugin } from "../plugins/types.js";
import { withFetchPreconnect } from "../test-utils/fetch-mock.js";
import { OLLAMA_LOCAL_AUTH_MARKER } from "./model-auth-markers.js";
import { resolveImplicitProviders } from "./models-config.providers.js";
import type { ProviderConfig } from "./models-config.providers.js";
@@ -351,6 +352,43 @@ describe("Ollama provider", () => {
});
});
it("should use synthetic local auth for configured remote providers without apiKey", async () => {
await withoutAmbientOllamaEnv(async () => {
const fetchMock = vi.fn();
vi.stubGlobal("fetch", withFetchPreconnect(fetchMock));
const provider = await runOllamaCatalog({
config: {
models: {
providers: {
ollama: {
baseUrl: "http://remote-ollama:11434/v1",
models: [
{
id: "gpt-oss:20b",
name: "GPT-OSS 20B",
reasoning: false,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 8192,
maxTokens: 81920,
},
],
},
},
},
},
env: { VITEST: "", NODE_ENV: "development" },
});
expect(fetchMock).not.toHaveBeenCalled();
expect(provider?.baseUrl).toBe("http://remote-ollama:11434");
expect(provider?.api).toBe("ollama");
expect(provider?.apiKey).toBe(OLLAMA_LOCAL_AUTH_MARKER);
expect(provider?.models).toHaveLength(1);
});
});
it("should preserve explicit apiKey from configured remote providers", async () => {
await withoutAmbientOllamaEnv(async () => {
const fetchMock = vi.fn(async (input: unknown) => {

View File

@@ -76,6 +76,54 @@ describe("models-config", () => {
).toBe("https://copilot.local");
});
it("passes explicit provider config to implicit discovery so plugins can skip duplicates", async () => {
const resolveImplicitProviders = vi.fn<ResolveImplicitProvidersForModelsJson>(
async ({ explicitProviders }) => {
expect(explicitProviders.vllm?.baseUrl).toBe("http://127.0.0.1:8000/v1");
return {};
},
);
const plan = await planOpenClawModelsJsonWithDeps(
{
cfg: {
models: {
providers: {
vllm: {
baseUrl: "http://127.0.0.1:8000/v1",
api: "openai-completions",
models: [],
},
},
},
},
agentDir: "/tmp/openclaw-agent",
env: { VLLM_API_KEY: "test-vllm-key" } as NodeJS.ProcessEnv,
existingRaw: "",
existingParsed: null,
},
{ resolveImplicitProviders },
);
expect(resolveImplicitProviders).toHaveBeenCalledOnce();
expect(plan).toEqual({
action: "write",
contents: `${JSON.stringify(
{
providers: {
vllm: {
baseUrl: "http://127.0.0.1:8000/v1",
api: "openai-completions",
models: [],
},
},
},
null,
2,
)}\n`,
});
});
it("uses tokenRef env var when github-copilot profile omits plaintext token", () => {
const auth = createProviderAuthResolver(
{