mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-05 15:50:25 +00:00
test: move extension-owned coverage into plugins
This commit is contained in:
84
extensions/openai/media-understanding-provider.test.ts
Normal file
84
extensions/openai/media-understanding-provider.test.ts
Normal file
@@ -0,0 +1,84 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import {
|
||||
createAuthCaptureJsonFetch,
|
||||
createRequestCaptureJsonFetch,
|
||||
installPinnedHostnameTestHooks,
|
||||
} from "../../src/media-understanding/audio.test-helpers.js";
|
||||
import { transcribeOpenAiAudio } from "./media-understanding-provider.js";
|
||||
|
||||
installPinnedHostnameTestHooks();
|
||||
|
||||
describe("transcribeOpenAiAudio", () => {
|
||||
it("respects lowercase authorization header overrides", async () => {
|
||||
const { fetchFn, getAuthHeader } = createAuthCaptureJsonFetch({ text: "ok" });
|
||||
|
||||
const result = await transcribeOpenAiAudio({
|
||||
buffer: Buffer.from("audio"),
|
||||
fileName: "note.mp3",
|
||||
apiKey: "test-key",
|
||||
timeoutMs: 1000,
|
||||
headers: { authorization: "Bearer override" },
|
||||
fetchFn,
|
||||
});
|
||||
|
||||
expect(getAuthHeader()).toBe("Bearer override");
|
||||
expect(result.text).toBe("ok");
|
||||
});
|
||||
|
||||
it("builds the expected request payload", async () => {
|
||||
const { fetchFn, getRequest } = createRequestCaptureJsonFetch({ text: "hello" });
|
||||
|
||||
const result = await transcribeOpenAiAudio({
|
||||
buffer: Buffer.from("audio-bytes"),
|
||||
fileName: "voice.wav",
|
||||
apiKey: "test-key",
|
||||
timeoutMs: 1234,
|
||||
baseUrl: "https://api.example.com/v1/",
|
||||
model: " ",
|
||||
language: " en ",
|
||||
prompt: " hello ",
|
||||
mime: "audio/wav",
|
||||
headers: { "X-Custom": "1" },
|
||||
fetchFn,
|
||||
});
|
||||
const { url: seenUrl, init: seenInit } = getRequest();
|
||||
|
||||
expect(result.model).toBe("gpt-4o-mini-transcribe");
|
||||
expect(result.text).toBe("hello");
|
||||
expect(seenUrl).toBe("https://api.example.com/v1/audio/transcriptions");
|
||||
expect(seenInit?.method).toBe("POST");
|
||||
expect(seenInit?.signal).toBeInstanceOf(AbortSignal);
|
||||
|
||||
const headers = new Headers(seenInit?.headers);
|
||||
expect(headers.get("authorization")).toBe("Bearer test-key");
|
||||
expect(headers.get("x-custom")).toBe("1");
|
||||
|
||||
const form = seenInit?.body as FormData;
|
||||
expect(form).toBeInstanceOf(FormData);
|
||||
expect(form.get("model")).toBe("gpt-4o-mini-transcribe");
|
||||
expect(form.get("language")).toBe("en");
|
||||
expect(form.get("prompt")).toBe("hello");
|
||||
const file = form.get("file") as Blob | { type?: string; name?: string } | null;
|
||||
expect(file).not.toBeNull();
|
||||
if (file) {
|
||||
expect(file.type).toBe("audio/wav");
|
||||
if ("name" in file && typeof file.name === "string") {
|
||||
expect(file.name).toBe("voice.wav");
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
it("throws when the provider response omits text", async () => {
|
||||
const { fetchFn } = createRequestCaptureJsonFetch({});
|
||||
|
||||
await expect(
|
||||
transcribeOpenAiAudio({
|
||||
buffer: Buffer.from("audio-bytes"),
|
||||
fileName: "voice.wav",
|
||||
apiKey: "test-key",
|
||||
timeoutMs: 1234,
|
||||
fetchFn,
|
||||
}),
|
||||
).rejects.toThrow("Audio transcription response missing text");
|
||||
});
|
||||
});
|
||||
1
extensions/openai/test-api.ts
Normal file
1
extensions/openai/test-api.ts
Normal file
@@ -0,0 +1 @@
|
||||
export { buildOpenAISpeechProvider } from "./speech-provider.js";
|
||||
73
extensions/openai/tts.test.ts
Normal file
73
extensions/openai/tts.test.ts
Normal file
@@ -0,0 +1,73 @@
|
||||
import { describe, expect, it } from "vitest";
|
||||
import {
|
||||
isValidOpenAIModel,
|
||||
isValidOpenAIVoice,
|
||||
OPENAI_TTS_MODELS,
|
||||
OPENAI_TTS_VOICES,
|
||||
resolveOpenAITtsInstructions,
|
||||
} from "./tts.js";
|
||||
|
||||
describe("openai tts", () => {
|
||||
describe("isValidOpenAIVoice", () => {
|
||||
it("accepts all valid OpenAI voices including newer additions", () => {
|
||||
for (const voice of OPENAI_TTS_VOICES) {
|
||||
expect(isValidOpenAIVoice(voice)).toBe(true);
|
||||
}
|
||||
for (const newerVoice of ["ballad", "cedar", "juniper", "marin", "verse"]) {
|
||||
expect(isValidOpenAIVoice(newerVoice), newerVoice).toBe(true);
|
||||
}
|
||||
});
|
||||
|
||||
it("rejects invalid voice names", () => {
|
||||
expect(isValidOpenAIVoice("invalid")).toBe(false);
|
||||
expect(isValidOpenAIVoice("")).toBe(false);
|
||||
expect(isValidOpenAIVoice("ALLOY")).toBe(false);
|
||||
expect(isValidOpenAIVoice("alloy ")).toBe(false);
|
||||
expect(isValidOpenAIVoice(" alloy")).toBe(false);
|
||||
});
|
||||
|
||||
it("treats the default endpoint with trailing slash as the default endpoint", () => {
|
||||
expect(isValidOpenAIVoice("kokoro-custom-voice", "https://api.openai.com/v1/")).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("isValidOpenAIModel", () => {
|
||||
it("matches the supported model set and rejects unsupported values", () => {
|
||||
expect(OPENAI_TTS_MODELS).toContain("gpt-4o-mini-tts");
|
||||
expect(OPENAI_TTS_MODELS).toContain("tts-1");
|
||||
expect(OPENAI_TTS_MODELS).toContain("tts-1-hd");
|
||||
expect(OPENAI_TTS_MODELS).toHaveLength(3);
|
||||
expect(Array.isArray(OPENAI_TTS_MODELS)).toBe(true);
|
||||
expect(OPENAI_TTS_MODELS.length).toBeGreaterThan(0);
|
||||
const cases = [
|
||||
{ model: "gpt-4o-mini-tts", expected: true },
|
||||
{ model: "tts-1", expected: true },
|
||||
{ model: "tts-1-hd", expected: true },
|
||||
{ model: "invalid", expected: false },
|
||||
{ model: "", expected: false },
|
||||
{ model: "gpt-4", expected: false },
|
||||
] as const;
|
||||
for (const testCase of cases) {
|
||||
expect(isValidOpenAIModel(testCase.model), testCase.model).toBe(testCase.expected);
|
||||
}
|
||||
});
|
||||
|
||||
it("treats the default endpoint with trailing slash as the default endpoint", () => {
|
||||
expect(isValidOpenAIModel("kokoro-custom-model", "https://api.openai.com/v1/")).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("resolveOpenAITtsInstructions", () => {
|
||||
it("keeps instructions only for gpt-4o-mini-tts variants", () => {
|
||||
expect(resolveOpenAITtsInstructions("gpt-4o-mini-tts", " Speak warmly ")).toBe(
|
||||
"Speak warmly",
|
||||
);
|
||||
expect(resolveOpenAITtsInstructions("gpt-4o-mini-tts-2025-12-15", "Speak warmly")).toBe(
|
||||
"Speak warmly",
|
||||
);
|
||||
expect(resolveOpenAITtsInstructions("tts-1", "Speak warmly")).toBeUndefined();
|
||||
expect(resolveOpenAITtsInstructions("tts-1-hd", "Speak warmly")).toBeUndefined();
|
||||
expect(resolveOpenAITtsInstructions("gpt-4o-mini-tts", " ")).toBeUndefined();
|
||||
});
|
||||
});
|
||||
});
|
||||
Reference in New Issue
Block a user