feat: add Seedance 2 fal video models

This commit is contained in:
Peter Steinberger
2026-04-11 02:18:25 +01:00
parent 21dfea837c
commit b56cd114e7
5 changed files with 184 additions and 10 deletions

View File

@@ -20,6 +20,7 @@ Docs: https://docs.openclaw.ai
- Control UI/dreaming: simplify the Scene and Diary surfaces, preserve unknown phase state for partial status payloads, and stabilize waiting-entry recency ordering so Dreaming status and review lists stay clear and deterministic. (#64035) Thanks @davemorin.
- Agents: add an opt-in strict-agentic embedded Pi execution contract for GPT-5-family runs so plan-only or filler turns keep acting until they hit a real blocker. (#64241) Thanks @100yenadmin.
- Docs i18n: chunk raw doc translation, reject truncated tagged outputs, avoid ambiguous body-only wrapper unwrapping, and recover from terminated Pi translation sessions without changing the default `openai/gpt-5.4` path. (#62969, #63808) Thanks @hxy91819.
- Tools/video generation: add Seedance 2.0 model refs to the bundled fal provider and submit the provider-specific duration, resolution, audio, and seed metadata fields needed for live Seedance 2.0 runs.
### Fixes

View File

@@ -69,15 +69,20 @@ The bundled `fal` video-generation provider defaults to
- Modes: text-to-video and single-image reference flows
- Runtime: queue-backed submit/status/result flow for long-running jobs
- Seedance 2.0 model refs:
- `fal/bytedance/seedance-2.0/fast/text-to-video`
- `fal/bytedance/seedance-2.0/fast/image-to-video`
- `fal/bytedance/seedance-2.0/text-to-video`
- `fal/bytedance/seedance-2.0/image-to-video`
To use fal as the default video provider:
To use Seedance 2.0 as the default video model:
```json5
{
agents: {
defaults: {
videoGenerationModel: {
primary: "fal/fal-ai/minimax/video-01-live",
primary: "fal/bytedance/seedance-2.0/fast/text-to-video",
},
},
},

View File

@@ -201,6 +201,20 @@ entries.
}
```
Seedance 2.0 on fal can be pinned with:
```json5
{
agents: {
defaults: {
videoGenerationModel: {
primary: "fal/bytedance/seedance-2.0/fast/text-to-video",
},
},
},
}
```
## Provider notes
| Provider | Notes |
@@ -208,7 +222,7 @@ entries.
| Alibaba | Uses DashScope/Model Studio async endpoint. Reference images and videos must be remote `http(s)` URLs. |
| BytePlus | Single image reference only. |
| ComfyUI | Workflow-driven local or cloud execution. Supports text-to-video and image-to-video through the configured graph. |
| fal | Uses queue-backed flow for long-running jobs. Single image reference only. |
| fal | Uses queue-backed flow for long-running jobs. Single image reference only. Includes Seedance 2.0 text-to-video and image-to-video model refs. |
| Google | Uses Gemini/Veo. Supports one image or one video reference. |
| MiniMax | Single image reference only. |
| OpenAI | Only `size` override is forwarded. Other style overrides (`aspectRatio`, `resolution`, `audio`, `watermark`) are ignored with a warning. |

View File

@@ -115,4 +115,110 @@ describe("fal video generation provider", () => {
requestId: "req-123",
});
});
it("exposes Seedance 2 models", () => {
const provider = buildFalVideoGenerationProvider();
expect(provider.models).toEqual(
expect.arrayContaining([
"bytedance/seedance-2.0/fast/text-to-video",
"bytedance/seedance-2.0/fast/image-to-video",
"bytedance/seedance-2.0/text-to-video",
"bytedance/seedance-2.0/image-to-video",
]),
);
});
it("submits Seedance 2 requests with fal schema fields", async () => {
vi.spyOn(providerAuth, "resolveApiKeyForProvider").mockResolvedValue({
apiKey: "fal-key",
source: "env",
mode: "api-key",
});
vi.spyOn(providerHttp, "resolveProviderHttpRequestConfig").mockReturnValue({
baseUrl: "https://fal.run",
allowPrivateNetwork: false,
headers: new Headers({
Authorization: "Key fal-key",
"Content-Type": "application/json",
}),
dispatcherPolicy: undefined,
requestConfig: createMockRequestConfig(),
});
vi.spyOn(providerHttp, "assertOkOrThrowHttpError").mockResolvedValue(undefined);
_setFalVideoFetchGuardForTesting(fetchGuardMock as never);
fetchGuardMock
.mockResolvedValueOnce({
response: {
json: async () => ({
request_id: "seedance-req-123",
status_url:
"https://queue.fal.run/bytedance/seedance-2.0/fast/text-to-video/requests/seedance-req-123/status",
response_url:
"https://queue.fal.run/bytedance/seedance-2.0/fast/text-to-video/requests/seedance-req-123",
}),
},
release: vi.fn(async () => {}),
})
.mockResolvedValueOnce({
response: {
json: async () => ({
status: "COMPLETED",
}),
},
release: vi.fn(async () => {}),
})
.mockResolvedValueOnce({
response: {
json: async () => ({
status: "COMPLETED",
response: {
video: { url: "https://fal.run/files/seedance.mp4" },
seed: 42,
},
}),
},
release: vi.fn(async () => {}),
})
.mockResolvedValueOnce({
response: {
headers: new Headers({ "content-type": "video/mp4" }),
arrayBuffer: async () => Buffer.from("seedance-mp4-bytes"),
},
release: vi.fn(async () => {}),
});
const provider = buildFalVideoGenerationProvider();
const result = await provider.generateVideo({
provider: "fal",
model: "bytedance/seedance-2.0/fast/text-to-video",
prompt: "A chrome lobster drives a tiny kart across a neon pier",
durationSeconds: 7,
aspectRatio: "16:9",
resolution: "720P",
audio: false,
cfg: {},
});
expect(fetchGuardMock).toHaveBeenNthCalledWith(
1,
expect.objectContaining({
url: "https://queue.fal.run/bytedance/seedance-2.0/fast/text-to-video",
}),
);
const submitBody = JSON.parse(
String(fetchGuardMock.mock.calls[0]?.[0]?.init?.body ?? "{}"),
) as Record<string, unknown>;
expect(submitBody).toEqual({
prompt: "A chrome lobster drives a tiny kart across a neon pier",
aspect_ratio: "16:9",
resolution: "720p",
duration: "7",
generate_audio: false,
});
expect(result.metadata).toEqual({
requestId: "seedance-req-123",
seed: 42,
});
});
});

View File

@@ -22,6 +22,13 @@ import type {
const DEFAULT_FAL_BASE_URL = "https://fal.run";
const DEFAULT_FAL_QUEUE_BASE_URL = "https://queue.fal.run";
const DEFAULT_FAL_VIDEO_MODEL = "fal-ai/minimax/video-01-live";
const SEEDANCE_2_VIDEO_MODELS = [
"bytedance/seedance-2.0/fast/text-to-video",
"bytedance/seedance-2.0/fast/image-to-video",
"bytedance/seedance-2.0/text-to-video",
"bytedance/seedance-2.0/image-to-video",
] as const;
const SEEDANCE_2_DURATION_SECONDS = [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] as const;
const DEFAULT_HTTP_TIMEOUT_MS = 30_000;
const DEFAULT_OPERATION_TIMEOUT_MS = 600_000;
const POLL_INTERVAL_MS = 5_000;
@@ -36,6 +43,7 @@ type FalVideoResponse = {
content_type?: string;
}>;
prompt?: string;
seed?: number;
};
type FalQueueResponse = {
@@ -114,6 +122,34 @@ function isFalMiniMaxLiveModel(model: string): boolean {
return normalizeLowercaseStringOrEmpty(model) === DEFAULT_FAL_VIDEO_MODEL;
}
function isFalSeedance2Model(model: string): boolean {
return SEEDANCE_2_VIDEO_MODELS.includes(model as (typeof SEEDANCE_2_VIDEO_MODELS)[number]);
}
function resolveFalResolution(resolution: VideoGenerationRequest["resolution"], model: string) {
if (!resolution) {
return undefined;
}
if (isFalSeedance2Model(model)) {
return resolution.toLowerCase();
}
return resolution;
}
function resolveFalDuration(
durationSeconds: number | undefined,
model: string,
): number | string | undefined {
if (typeof durationSeconds !== "number" || !Number.isFinite(durationSeconds)) {
return undefined;
}
const duration = Math.max(1, Math.round(durationSeconds));
if (isFalSeedance2Model(model)) {
return String(duration);
}
return duration;
}
function buildFalVideoRequestBody(params: {
req: VideoGenerationRequest;
model: string;
@@ -143,14 +179,16 @@ function buildFalVideoRequestBody(params: {
if (size) {
requestBody.size = size;
}
if (params.req.resolution) {
requestBody.resolution = params.req.resolution;
const resolution = resolveFalResolution(params.req.resolution, params.model);
if (resolution) {
requestBody.resolution = resolution;
}
if (
typeof params.req.durationSeconds === "number" &&
Number.isFinite(params.req.durationSeconds)
) {
requestBody.duration = Math.max(1, Math.round(params.req.durationSeconds));
const duration = resolveFalDuration(params.req.durationSeconds, params.model);
if (duration) {
requestBody.duration = duration;
}
if (isFalSeedance2Model(params.model) && typeof params.req.audio === "boolean") {
requestBody.generate_audio = params.req.audio;
}
return requestBody;
}
@@ -247,6 +285,7 @@ export function buildFalVideoGenerationProvider(): VideoGenerationProvider {
defaultModel: DEFAULT_FAL_VIDEO_MODEL,
models: [
DEFAULT_FAL_VIDEO_MODEL,
...SEEDANCE_2_VIDEO_MODELS,
"fal-ai/kling-video/v2.1/master/text-to-video",
"fal-ai/wan/v2.2-a14b/text-to-video",
"fal-ai/wan/v2.2-a14b/image-to-video",
@@ -259,17 +298,25 @@ export function buildFalVideoGenerationProvider(): VideoGenerationProvider {
capabilities: {
generate: {
maxVideos: 1,
supportedDurationSecondsByModel: Object.fromEntries(
SEEDANCE_2_VIDEO_MODELS.map((model) => [model, SEEDANCE_2_DURATION_SECONDS]),
),
supportsAspectRatio: true,
supportsResolution: true,
supportsSize: true,
supportsAudio: true,
},
imageToVideo: {
enabled: true,
maxVideos: 1,
maxInputImages: 1,
supportedDurationSecondsByModel: Object.fromEntries(
SEEDANCE_2_VIDEO_MODELS.map((model) => [model, SEEDANCE_2_DURATION_SECONDS]),
),
supportsAspectRatio: true,
supportsResolution: true,
supportsSize: true,
supportsAudio: true,
},
videoToVideo: {
enabled: false,
@@ -349,6 +396,7 @@ export function buildFalVideoGenerationProvider(): VideoGenerationProvider {
? { requestId: normalizeOptionalString(submitted.request_id) }
: {}),
...(videoPayload.prompt ? { prompt: videoPayload.prompt } : {}),
...(typeof videoPayload.seed === "number" ? { seed: videoPayload.seed } : {}),
},
};
},