fix: restore qa lab config typing

This commit is contained in:
Peter Steinberger
2026-04-06 01:15:58 +01:00
parent 15f74b89c8
commit 89c8a1c36a
2 changed files with 54 additions and 40 deletions

View File

@@ -1,6 +1,17 @@
import { describe, expect, it } from "vitest";
import { buildQaGatewayConfig } from "./qa-gateway-config.js";
function getPrimaryModel(value: unknown): string | undefined {
if (typeof value === "string") {
return value;
}
if (value && typeof value === "object" && "primary" in value) {
const primary = (value as { primary?: unknown }).primary;
return typeof primary === "string" ? primary : undefined;
}
return undefined;
}
describe("buildQaGatewayConfig", () => {
it("keeps mock-openai as the default provider lane", () => {
const cfg = buildQaGatewayConfig({
@@ -12,7 +23,7 @@ describe("buildQaGatewayConfig", () => {
workspaceDir: "/tmp/qa-workspace",
});
expect(cfg.agents?.defaults?.model?.primary).toBe("mock-openai/gpt-5.4");
expect(getPrimaryModel(cfg.agents?.defaults?.model)).toBe("mock-openai/gpt-5.4");
expect(cfg.models?.providers?.["mock-openai"]?.baseUrl).toBe("http://127.0.0.1:44080/v1");
expect(cfg.plugins?.allow).toEqual(["memory-core", "qa-channel"]);
expect(cfg.plugins?.entries?.["memory-core"]).toEqual({ enabled: true });
@@ -32,8 +43,8 @@ describe("buildQaGatewayConfig", () => {
alternateModel: "openai/gpt-5.4",
});
expect(cfg.agents?.defaults?.model?.primary).toBe("openai/gpt-5.4");
expect(cfg.agents?.list?.[0]?.model?.primary).toBe("openai/gpt-5.4");
expect(getPrimaryModel(cfg.agents?.defaults?.model)).toBe("openai/gpt-5.4");
expect(getPrimaryModel(cfg.agents?.list?.[0]?.model)).toBe("openai/gpt-5.4");
expect(cfg.models).toBeUndefined();
expect(cfg.plugins?.allow).toEqual(["memory-core", "openai", "qa-channel"]);
expect(cfg.plugins?.entries?.openai).toEqual({ enabled: true });

View File

@@ -1,4 +1,5 @@
import type { OpenClawConfig } from "openclaw/plugin-sdk/core";
import type { ModelProviderConfig } from "openclaw/plugin-sdk/provider-model-shared";
const DISABLED_BUNDLED_CHANNELS = Object.freeze({
bluebubbles: { enabled: false },
@@ -37,6 +38,44 @@ export function buildQaGatewayConfig(params: {
alternateModel?: string;
fastMode?: boolean;
}): OpenClawConfig {
const mockProviderBaseUrl = params.providerBaseUrl ?? "http://127.0.0.1:44080/v1";
const mockOpenAiProvider: ModelProviderConfig = {
baseUrl: mockProviderBaseUrl,
apiKey: "test",
api: "openai-responses",
models: [
{
id: "gpt-5.4",
name: "gpt-5.4",
api: "openai-responses",
reasoning: false,
input: ["text"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 128_000,
maxTokens: 4096,
},
{
id: "gpt-5.4-alt",
name: "gpt-5.4-alt",
api: "openai-responses",
reasoning: false,
input: ["text"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 128_000,
maxTokens: 4096,
},
],
};
const providerMode = params.providerMode ?? "mock-openai";
const allowedPlugins =
providerMode === "live-openai"
@@ -131,43 +170,7 @@ export function buildQaGatewayConfig(params: {
models: {
mode: "replace",
providers: {
"mock-openai": {
baseUrl: params.providerBaseUrl,
apiKey: "test",
api: "openai-responses",
models: [
{
id: "gpt-5.4",
name: "gpt-5.4",
api: "openai-responses",
reasoning: false,
input: ["text"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 128_000,
maxTokens: 4096,
},
{
id: "gpt-5.4-alt",
name: "gpt-5.4-alt",
api: "openai-responses",
reasoning: false,
input: ["text"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 128_000,
maxTokens: 4096,
},
],
},
"mock-openai": mockOpenAiProvider,
},
},
}