mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-05 04:40:23 +00:00
feat(openai): add gpt-5.4 support for API and Codex OAuth (#36590)
* feat(openai): add gpt-5.4 support and priority processing * feat(openai-codex): add gpt-5.4 oauth support * fix(openai): preserve provider overrides in gpt-5.4 fallback * fix(openai-codex): keep xhigh for gpt-5.4 default * fix(models): preserve configured overrides in list output * fix(models): close gpt-5.4 integration gaps * fix(openai): scope service tier to public api * fix(openai): complete prep followups for gpt-5.4 support (#36590) (thanks @dorukardahan) --------- Co-authored-by: Tyler Yust <TYTYYUST@YAHOO.COM>
This commit is contained in:
@@ -37,6 +37,36 @@ function createTemplateModel(provider: string, id: string): Model<Api> {
|
||||
} as Model<Api>;
|
||||
}
|
||||
|
||||
function createOpenAITemplateModel(id: string): Model<Api> {
|
||||
return {
|
||||
id,
|
||||
name: id,
|
||||
provider: "openai",
|
||||
api: "openai-responses",
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
input: ["text", "image"],
|
||||
reasoning: true,
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 400_000,
|
||||
maxTokens: 32_768,
|
||||
} as Model<Api>;
|
||||
}
|
||||
|
||||
function createOpenAICodexTemplateModel(id: string): Model<Api> {
|
||||
return {
|
||||
id,
|
||||
name: id,
|
||||
provider: "openai-codex",
|
||||
api: "openai-codex-responses",
|
||||
baseUrl: "https://chatgpt.com/backend-api",
|
||||
input: ["text", "image"],
|
||||
reasoning: true,
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 272_000,
|
||||
maxTokens: 128_000,
|
||||
} as Model<Api>;
|
||||
}
|
||||
|
||||
function createRegistry(models: Record<string, Model<Api>>): ModelRegistry {
|
||||
return {
|
||||
find(provider: string, modelId: string) {
|
||||
@@ -235,6 +265,12 @@ describe("normalizeModelCompat", () => {
|
||||
});
|
||||
|
||||
describe("isModernModelRef", () => {
|
||||
it("includes OpenAI gpt-5.4 variants in modern selection", () => {
|
||||
expect(isModernModelRef({ provider: "openai", id: "gpt-5.4" })).toBe(true);
|
||||
expect(isModernModelRef({ provider: "openai", id: "gpt-5.4-pro" })).toBe(true);
|
||||
expect(isModernModelRef({ provider: "openai-codex", id: "gpt-5.4" })).toBe(true);
|
||||
});
|
||||
|
||||
it("excludes opencode minimax variants from modern selection", () => {
|
||||
expect(isModernModelRef({ provider: "opencode", id: "minimax-m2.5" })).toBe(false);
|
||||
expect(isModernModelRef({ provider: "opencode", id: "minimax-m2.5" })).toBe(false);
|
||||
@@ -247,6 +283,57 @@ describe("isModernModelRef", () => {
|
||||
});
|
||||
|
||||
describe("resolveForwardCompatModel", () => {
|
||||
it("resolves openai gpt-5.4 via gpt-5.2 template", () => {
|
||||
const registry = createRegistry({
|
||||
"openai/gpt-5.2": createOpenAITemplateModel("gpt-5.2"),
|
||||
});
|
||||
const model = resolveForwardCompatModel("openai", "gpt-5.4", registry);
|
||||
expectResolvedForwardCompat(model, { provider: "openai", id: "gpt-5.4" });
|
||||
expect(model?.api).toBe("openai-responses");
|
||||
expect(model?.baseUrl).toBe("https://api.openai.com/v1");
|
||||
expect(model?.contextWindow).toBe(1_050_000);
|
||||
expect(model?.maxTokens).toBe(128_000);
|
||||
});
|
||||
|
||||
it("resolves openai gpt-5.4 without templates using normalized fallback defaults", () => {
|
||||
const registry = createRegistry({});
|
||||
|
||||
const model = resolveForwardCompatModel("openai", "gpt-5.4", registry);
|
||||
|
||||
expectResolvedForwardCompat(model, { provider: "openai", id: "gpt-5.4" });
|
||||
expect(model?.api).toBe("openai-responses");
|
||||
expect(model?.baseUrl).toBe("https://api.openai.com/v1");
|
||||
expect(model?.input).toEqual(["text", "image"]);
|
||||
expect(model?.reasoning).toBe(true);
|
||||
expect(model?.contextWindow).toBe(1_050_000);
|
||||
expect(model?.maxTokens).toBe(128_000);
|
||||
expect(model?.cost).toEqual({ input: 0, output: 0, cacheRead: 0, cacheWrite: 0 });
|
||||
});
|
||||
|
||||
it("resolves openai gpt-5.4-pro via template fallback", () => {
|
||||
const registry = createRegistry({
|
||||
"openai/gpt-5.2": createOpenAITemplateModel("gpt-5.2"),
|
||||
});
|
||||
const model = resolveForwardCompatModel("openai", "gpt-5.4-pro", registry);
|
||||
expectResolvedForwardCompat(model, { provider: "openai", id: "gpt-5.4-pro" });
|
||||
expect(model?.api).toBe("openai-responses");
|
||||
expect(model?.baseUrl).toBe("https://api.openai.com/v1");
|
||||
expect(model?.contextWindow).toBe(1_050_000);
|
||||
expect(model?.maxTokens).toBe(128_000);
|
||||
});
|
||||
|
||||
it("resolves openai-codex gpt-5.4 via codex template fallback", () => {
|
||||
const registry = createRegistry({
|
||||
"openai-codex/gpt-5.2-codex": createOpenAICodexTemplateModel("gpt-5.2-codex"),
|
||||
});
|
||||
const model = resolveForwardCompatModel("openai-codex", "gpt-5.4", registry);
|
||||
expectResolvedForwardCompat(model, { provider: "openai-codex", id: "gpt-5.4" });
|
||||
expect(model?.api).toBe("openai-codex-responses");
|
||||
expect(model?.baseUrl).toBe("https://chatgpt.com/backend-api");
|
||||
expect(model?.contextWindow).toBe(272_000);
|
||||
expect(model?.maxTokens).toBe(128_000);
|
||||
});
|
||||
|
||||
it("resolves anthropic opus 4.6 via 4.5 template", () => {
|
||||
const registry = createRegistry({
|
||||
"anthropic/claude-opus-4-5": createTemplateModel("anthropic", "claude-opus-4-5"),
|
||||
|
||||
Reference in New Issue
Block a user