fix: prefer OpenAI media for Codex defaults

This commit is contained in:
Peter Steinberger
2026-04-28 11:30:01 +01:00
parent 32c987626b
commit 35bc13f9ef
10 changed files with 173 additions and 19 deletions

View File

@@ -14,6 +14,7 @@ Docs: https://docs.openclaw.ai
### Fixes
- Agents/media: register detached `video_generate` and `music_generate` tool run contexts until terminal status, so Discord-backed provider jobs stay live in `/tasks` instead of becoming `lost` when the parent chat run context disappears. Thanks @vincentkoc.
- Agents/media: prefer OpenAI image and video providers when the default model uses the OpenAI Codex auth alias, so auto media generation no longer falls through to Fal before GPT Image or Sora. Thanks @vincentkoc.
- Tasks/media: infer agent ownership for session-scoped task records so `/tasks` agent-local fallback includes session-backed `video_generate` and other async media jobs even when the current chat session has no linked rows. Thanks @vincentkoc.
- Agents/media: keep long-running `video_generate` and `music_generate` tasks fresh while provider jobs are still pending, so task maintenance does not mark active Discord media renders lost before completion. Thanks @vincentkoc.
- CLI/status: treat scope-limited gateway probes as reachable-but-degraded in shared status scans, so `openclaw status --all` no longer reports a live gateway as unreachable after `missing scope: operator.read`. Fixes #49180; supersedes #47981. Thanks @openjay.

View File

@@ -194,6 +194,7 @@ describe("openai image generation provider", () => {
const provider = buildOpenAIImageGenerationProvider();
expect(provider.defaultModel).toBe("gpt-image-2");
expect(provider.aliases).toContain("openai-codex");
expect(provider.models).toEqual([
"gpt-image-2",
"gpt-image-1.5",

View File

@@ -537,6 +537,7 @@ function createOpenAIImageGenerationProviderBase(params: {
}): ImageGenerationProvider {
return {
id: params.id,
aliases: ["openai-codex"],
label: params.label,
defaultModel: DEFAULT_OPENAI_IMAGE_MODEL,
models: [...OPENAI_IMAGE_MODELS],

View File

@@ -17,6 +17,12 @@ beforeAll(async () => {
installProviderHttpMockCleanup();
describe("openai video generation provider", () => {
it("declares the openai-codex alias for default-model ordering", () => {
const provider = buildOpenAIVideoGenerationProvider();
expect(provider.aliases).toContain("openai-codex");
});
it("declares explicit mode capabilities", () => {
expectExplicitVideoGenerationCapabilities(buildOpenAIVideoGenerationProvider());
});

View File

@@ -180,6 +180,7 @@ async function downloadOpenAIVideo(params: {
export function buildOpenAIVideoGenerationProvider(): VideoGenerationProvider {
return {
id: "openai",
aliases: ["openai-codex"],
label: "OpenAI",
defaultModel: DEFAULT_OPENAI_VIDEO_MODEL,
models: [DEFAULT_OPENAI_VIDEO_MODEL, "sora-2-pro"],

View File

@@ -339,6 +339,55 @@ describe("createImageGenerateTool", () => {
});
});
it("prefers OpenAI image generation when the default model uses its Codex provider alias", () => {
vi.spyOn(imageGenerationRuntime, "listRuntimeImageGenerationProviders").mockReturnValue([
{
id: "fal",
defaultModel: "fal-ai/flux/dev",
models: ["fal-ai/flux/dev"],
isConfigured: () => true,
capabilities: {
generate: { maxCount: 4 },
edit: { enabled: true, maxInputImages: 1 },
},
generateImage: vi.fn(async () => {
throw new Error("not used");
}),
},
{
id: "openai",
aliases: ["openai-codex"],
defaultModel: "gpt-image-2",
models: ["gpt-image-2"],
isConfigured: () => true,
capabilities: {
generate: { maxCount: 4 },
edit: { enabled: true, maxInputImages: 5 },
},
generateImage: vi.fn(async () => {
throw new Error("not used");
}),
},
]);
expect(
resolveImageGenerationModelConfigForTool({
cfg: {
agents: {
defaults: {
model: {
primary: "openai-codex/gpt-5.5",
},
},
},
},
}),
).toEqual({
primary: "openai/gpt-image-2",
fallbacks: ["fal/fal-ai/flux/dev"],
});
});
it("prefers the primary model provider when multiple image providers have auth", () => {
stubImageGenerationProviders();
vi.stubEnv("OPENAI_API_KEY", "openai-test");

View File

@@ -192,7 +192,7 @@ export function resolveCapabilityModelCandidatesForTool(params: {
agentDir?: string;
providers: CapabilityProvider[];
}): string[] {
const providerDefaults = new Map<string, string>();
const providerDefaults = new Map<string, { ref: string; aliases: string[] }>();
for (const provider of params.providers) {
const providerId = provider.id.trim();
const modelId = provider.defaultModel?.trim();
@@ -209,25 +209,36 @@ export function resolveCapabilityModelCandidatesForTool(params: {
) {
continue;
}
providerDefaults.set(providerId, `${providerId}/${modelId}`);
const aliases = (provider.aliases ?? []).flatMap((alias) => {
const normalized = normalizeProviderId(alias);
return normalized ? [normalized] : [];
});
providerDefaults.set(providerId, { ref: `${providerId}/${modelId}`, aliases });
}
const primaryProvider = resolveDefaultModelRef(params.cfg).provider;
const normalizedPrimaryProvider = normalizeProviderId(primaryProvider);
const providerIds = [...providerDefaults.keys()].toSorted();
const matchesPrimaryProvider = (providerId: string): boolean => {
const entry = providerDefaults.get(providerId);
return (
normalizeProviderId(providerId) === normalizedPrimaryProvider ||
(entry?.aliases ?? []).includes(normalizedPrimaryProvider)
);
};
const orderedProviders = [
primaryProvider,
...[...providerDefaults.keys()]
.filter((providerId) => providerId !== primaryProvider)
.toSorted(),
...providerIds.filter(matchesPrimaryProvider),
...providerIds.filter((providerId) => !matchesPrimaryProvider(providerId)),
];
const orderedRefs: string[] = [];
const seen = new Set<string>();
for (const providerId of orderedProviders) {
const ref = providerDefaults.get(providerId);
if (!ref || seen.has(ref)) {
const entry = providerDefaults.get(providerId);
if (!entry || seen.has(entry.ref)) {
continue;
}
seen.add(ref);
orderedRefs.push(ref);
seen.add(entry.ref);
orderedRefs.push(entry.ref);
}
return orderedRefs;
}

View File

@@ -5,7 +5,10 @@ import * as mediaStore from "../../media/store.js";
import * as webMedia from "../../media/web-media.js";
import * as videoGenerationRuntime from "../../video-generation/runtime.js";
import * as videoGenerateBackground from "./video-generate-background.js";
import { createVideoGenerateTool } from "./video-generate-tool.js";
import {
createVideoGenerateTool,
resolveVideoGenerationModelConfigForTool,
} from "./video-generate-tool.js";
const taskRuntimeInternalMocks = vi.hoisted(() => ({
listTasksForOwnerKey: vi.fn(),
@@ -110,6 +113,45 @@ describe("createVideoGenerateTool", () => {
).not.toBeNull();
});
it("orders auto-detected provider defaults by canonical aliases", () => {
vi.spyOn(videoGenerationRuntime, "listRuntimeVideoGenerationProviders").mockReturnValue([
{
id: "fal",
defaultModel: "fal-ai/minimax/video-01-live",
models: ["fal-ai/minimax/video-01-live"],
capabilities: {},
isConfigured: () => true,
generateVideo: vi.fn(async () => ({ videos: [] })),
},
{
id: "openai",
aliases: ["openai-codex"],
defaultModel: "sora-2",
models: ["sora-2"],
capabilities: {},
isConfigured: () => true,
generateVideo: vi.fn(async () => ({ videos: [] })),
},
]);
expect(
resolveVideoGenerationModelConfigForTool({
cfg: asConfig({
agents: {
defaults: {
model: {
primary: "openai-codex/gpt-5.5",
},
},
},
}),
}),
).toEqual({
primary: "openai/sora-2",
fallbacks: ["fal/fal-ai/minimax/video-01-live"],
});
});
it("generates videos, saves them, and emits MEDIA paths without a session-backed detach", async () => {
taskExecutorMocks.createRunningTaskRun.mockReturnValue({
taskId: "task-123",

View File

@@ -95,6 +95,40 @@ describe("media-generation runtime shared candidates", () => {
]);
});
it("orders auto-detected provider defaults by canonical aliases", () => {
const candidates = resolveCapabilityModelCandidates({
cfg: {
agents: {
defaults: {
model: {
primary: "openai-codex/gpt-5.5",
},
},
},
} as OpenClawConfig,
modelConfig: undefined,
parseModelRef,
listProviders: () => [
{
id: "fal",
defaultModel: "fal-ai/flux/dev",
isConfigured: () => true,
},
{
id: "openai",
aliases: ["openai-codex"],
defaultModel: "gpt-image-2",
isConfigured: () => true,
},
],
});
expect(candidates).toEqual([
{ provider: "openai", model: "gpt-image-2" },
{ provider: "fal", model: "fal-ai/flux/dev" },
]);
});
it("disables implicit provider expansion when mediaGenerationAutoProviderFallback=false", () => {
const candidates = resolveCapabilityModelCandidates({
cfg: {

View File

@@ -62,6 +62,7 @@ const IMAGE_RESOLUTION_ORDER = ["1K", "2K", "4K"] as const;
type CapabilityProviderCandidate = {
id: string;
aliases?: readonly string[];
defaultModel?: string | null;
isConfigured?: (ctx: { cfg?: OpenClawConfig; agentDir?: string }) => boolean;
};
@@ -122,7 +123,7 @@ function resolveAutoCapabilityFallbackRefs(params: {
agentDir?: string;
listProviders: (cfg?: OpenClawConfig) => CapabilityProviderCandidate[];
}): string[] {
const providerDefaults = new Map<string, string>();
const providerDefaults = new Map<string, { ref: string; aliases: string[] }>();
for (const provider of params.listProviders(params.cfg)) {
const providerId = normalizeOptionalString(provider.id);
const modelId = normalizeOptionalString(provider.defaultModel);
@@ -138,19 +139,26 @@ function resolveAutoCapabilityFallbackRefs(params: {
) {
continue;
}
providerDefaults.set(providerId, `${providerId}/${modelId}`);
const aliases = (provider.aliases ?? []).flatMap((alias) => {
const normalized = normalizeOptionalString(alias);
return normalized ? [normalized] : [];
});
providerDefaults.set(providerId, { ref: `${providerId}/${modelId}`, aliases });
}
const defaultProvider = resolveCurrentDefaultProviderId(params.cfg);
const providerIds = [...providerDefaults.keys()].toSorted();
const matchesDefaultProvider = (providerId: string): boolean => {
const entry = providerDefaults.get(providerId);
return providerId === defaultProvider || (entry?.aliases ?? []).includes(defaultProvider);
};
const orderedProviders = [
defaultProvider,
...[...providerDefaults.keys()]
.filter((providerId) => providerId !== defaultProvider)
.toSorted(),
...providerIds.filter(matchesDefaultProvider),
...providerIds.filter((providerId) => !matchesDefaultProvider(providerId)),
];
return orderedProviders.flatMap((providerId) => {
const ref = providerDefaults.get(providerId);
return ref ? [ref] : [];
const entry = providerDefaults.get(providerId);
return entry ? [entry.ref] : [];
});
}