mirror of
https://github.com/openclaw/openclaw.git
synced 2026-03-12 07:20:45 +00:00
fix(agents): add forward-compat fallback for google-gemini-cli gemini-3.1-pro/flash-preview (#26570)
* fix(agents): add "google" provider to isReasoningTagProvider to prevent reasoning leak The gemini-api-key auth flow creates a profile with provider "google" (e.g. google/gemini-3-pro-preview), but isReasoningTagProvider only matched "google-gemini-cli" (OAuth) and "google-generative-ai". As a result: - reasoningTagHint was false → system prompt omitted <think>/<final> formatting instructions - enforceFinalTag was false → <final> tag filtering was skipped Raw <think> reasoning output was delivered to the end user. Fix: add the bare "google" provider string to the match list and cover it with two new test cases (exact match + case-insensitive). Fixes #26551 * fix(agents): add forward-compat fallback for google-gemini-cli gemini-3.1-pro/flash-preview gemini-3.1-pro-preview and gemini-3.1-flash-preview are not yet present in pi-ai's built-in google-gemini-cli model catalog (only gemini-3-pro-preview and gemini-3-flash-preview are registered). When users configure these models they get "Unknown model" errors even though Gemini CLI OAuth supports them. The codebase already has isGemini31Model() in extra-params.ts, which proves intent to support these models. Add a resolveGoogleGeminiCli31ForwardCompatModel entry to resolveForwardCompatModel following the same clone-template pattern used for zai/glm-5 and anthropic 4.6 models. - gemini-3.1-pro-* clones gemini-3-pro-preview (with reasoning: true) - gemini-3.1-flash-* clones gemini-3-flash-preview (with reasoning: true) Also add test helpers and three test cases to model.forward-compat.test.ts. Fixes #26524 * Changelog: credit Google Gemini provider fallback fixes --------- Co-authored-by: Vincent Koc <vincentkoc@ieee.org>
This commit is contained in:
@@ -72,6 +72,7 @@ Docs: https://docs.openclaw.ai
|
||||
- Gateway/macOS restart-loop hardening: detect OpenClaw-managed supervisor markers during SIGUSR1 restart handoff, clean stale gateway PIDs before `/restart` launchctl/systemctl triggers, and set LaunchAgent `ThrottleInterval=60` to bound launchd retry storms during lock-release races. Landed from contributor PRs #27655 (@taw0002), #27448 (@Sid-Qin), and #27650 (@kevinWangSheng). (#27605, #27590, #26904, #26736)
|
||||
- Models/MiniMax auth header defaults: set `authHeader: true` for both onboarding-generated MiniMax API providers and implicit built-in MiniMax (`minimax`, `minimax-portal`) provider templates so first requests no longer fail with MiniMax `401 authentication_error` due to missing `Authorization` header. Landed from contributor PRs #27622 by @riccoyuanft and #27631 by @kevinWangSheng. (#27600, #15303)
|
||||
- Auth/Auth profiles: normalize `auth-profiles.json` alias fields (`mode -> type`, `apiKey -> key`) before credential validation so entries copied from `openclaw.json` auth examples are no longer silently dropped. (#26950) thanks @byungsker.
|
||||
- Models/Google Gemini: treat `google` (Gemini API key auth profile) as a reasoning-tag provider to prevent `<think>` leakage, and add forward-compat model fallback for `google-gemini-cli` `gemini-3.1-pro*` / `gemini-3.1-flash*` IDs to avoid false unknown-model errors. (#26551, #26524) Thanks @byungsker.
|
||||
- Models/Profile suffix parsing: centralize trailing `@profile` parsing and only treat `@` as a profile separator when it appears after the final `/`, preserving model IDs like `openai/@cf/...` and `openrouter/@preset/...` across `/model` directive parsing and allowlist model resolution, with regression coverage.
|
||||
- Models/OpenAI Codex config schema parity: accept `openai-codex-responses` in the config model API schema and TypeScript `ModelApi` union, with regression coverage for config validation. Landed from contributor PR #27501 by @AytuncYildizli. Thanks @AytuncYildizli.
|
||||
- Agents/Models config: preserve agent-level provider `apiKey` and `baseUrl` during merge-mode `models.json` updates when agent values are present. (#27293) thanks @Sid-Qin.
|
||||
|
||||
@@ -17,6 +17,14 @@ const ANTHROPIC_SONNET_TEMPLATE_MODEL_IDS = ["claude-sonnet-4-5", "claude-sonnet
|
||||
const ZAI_GLM5_MODEL_ID = "glm-5";
|
||||
const ZAI_GLM5_TEMPLATE_MODEL_IDS = ["glm-4.7"] as const;
|
||||
|
||||
// gemini-3.1-pro-preview / gemini-3.1-flash-preview are not yet in pi-ai's built-in
|
||||
// google-gemini-cli catalog. Clone the gemini-3-pro/flash-preview template so users
|
||||
// don't get "Unknown model" errors when Google releases a new minor version.
|
||||
const GEMINI_3_1_PRO_PREFIX = "gemini-3.1-pro";
|
||||
const GEMINI_3_1_FLASH_PREFIX = "gemini-3.1-flash";
|
||||
const GEMINI_3_1_PRO_TEMPLATE_IDS = ["gemini-3-pro-preview"] as const;
|
||||
const GEMINI_3_1_FLASH_TEMPLATE_IDS = ["gemini-3-flash-preview"] as const;
|
||||
|
||||
function cloneFirstTemplateModel(params: {
|
||||
normalizedProvider: string;
|
||||
trimmedModelId: string;
|
||||
@@ -160,6 +168,38 @@ function resolveAnthropicSonnet46ForwardCompatModel(
|
||||
});
|
||||
}
|
||||
|
||||
// gemini-3.1-pro-preview / gemini-3.1-flash-preview are not present in pi-ai's built-in
|
||||
// google-gemini-cli catalog yet. Clone the nearest gemini-3 template so users don't get
|
||||
// "Unknown model" errors when Google Gemini CLI gains new minor-version models.
|
||||
function resolveGoogleGeminiCli31ForwardCompatModel(
|
||||
provider: string,
|
||||
modelId: string,
|
||||
modelRegistry: ModelRegistry,
|
||||
): Model<Api> | undefined {
|
||||
if (normalizeProviderId(provider) !== "google-gemini-cli") {
|
||||
return undefined;
|
||||
}
|
||||
const trimmed = modelId.trim();
|
||||
const lower = trimmed.toLowerCase();
|
||||
|
||||
let templateIds: readonly string[];
|
||||
if (lower.startsWith(GEMINI_3_1_PRO_PREFIX)) {
|
||||
templateIds = GEMINI_3_1_PRO_TEMPLATE_IDS;
|
||||
} else if (lower.startsWith(GEMINI_3_1_FLASH_PREFIX)) {
|
||||
templateIds = GEMINI_3_1_FLASH_TEMPLATE_IDS;
|
||||
} else {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
return cloneFirstTemplateModel({
|
||||
normalizedProvider: "google-gemini-cli",
|
||||
trimmedModelId: trimmed,
|
||||
templateIds: [...templateIds],
|
||||
modelRegistry,
|
||||
patch: { reasoning: true },
|
||||
});
|
||||
}
|
||||
|
||||
// Z.ai's GLM-5 may not be present in pi-ai's built-in model catalog yet.
|
||||
// When a user configures zai/glm-5 without a models.json entry, clone glm-4.7 as a forward-compat fallback.
|
||||
function resolveZaiGlm5ForwardCompatModel(
|
||||
@@ -211,6 +251,7 @@ export function resolveForwardCompatModel(
|
||||
resolveOpenAICodexGpt53FallbackModel(provider, modelId, modelRegistry) ??
|
||||
resolveAnthropicOpus46ForwardCompatModel(provider, modelId, modelRegistry) ??
|
||||
resolveAnthropicSonnet46ForwardCompatModel(provider, modelId, modelRegistry) ??
|
||||
resolveZaiGlm5ForwardCompatModel(provider, modelId, modelRegistry)
|
||||
resolveZaiGlm5ForwardCompatModel(provider, modelId, modelRegistry) ??
|
||||
resolveGoogleGeminiCli31ForwardCompatModel(provider, modelId, modelRegistry)
|
||||
);
|
||||
}
|
||||
|
||||
@@ -8,7 +8,11 @@ vi.mock("../pi-model-discovery.js", () => ({
|
||||
import { buildInlineProviderModels, resolveModel } from "./model.js";
|
||||
import {
|
||||
buildOpenAICodexForwardCompatExpectation,
|
||||
GOOGLE_GEMINI_CLI_FLASH_TEMPLATE_MODEL,
|
||||
GOOGLE_GEMINI_CLI_PRO_TEMPLATE_MODEL,
|
||||
makeModel,
|
||||
mockGoogleGeminiCliFlashTemplateModel,
|
||||
mockGoogleGeminiCliProTemplateModel,
|
||||
mockOpenAICodexTemplateModel,
|
||||
resetMockDiscoverModels,
|
||||
} from "./model.test-harness.js";
|
||||
@@ -50,4 +54,36 @@ describe("pi embedded model e2e smoke", () => {
|
||||
expect(result.model).toBeUndefined();
|
||||
expect(result.error).toBe("Unknown model: openai-codex/gpt-4.1-mini");
|
||||
});
|
||||
|
||||
it("builds a google-gemini-cli forward-compat fallback for gemini-3.1-pro-preview", () => {
|
||||
mockGoogleGeminiCliProTemplateModel();
|
||||
|
||||
const result = resolveModel("google-gemini-cli", "gemini-3.1-pro-preview", "/tmp/agent");
|
||||
expect(result.error).toBeUndefined();
|
||||
expect(result.model).toMatchObject({
|
||||
...GOOGLE_GEMINI_CLI_PRO_TEMPLATE_MODEL,
|
||||
id: "gemini-3.1-pro-preview",
|
||||
name: "gemini-3.1-pro-preview",
|
||||
reasoning: true,
|
||||
});
|
||||
});
|
||||
|
||||
it("builds a google-gemini-cli forward-compat fallback for gemini-3.1-flash-preview", () => {
|
||||
mockGoogleGeminiCliFlashTemplateModel();
|
||||
|
||||
const result = resolveModel("google-gemini-cli", "gemini-3.1-flash-preview", "/tmp/agent");
|
||||
expect(result.error).toBeUndefined();
|
||||
expect(result.model).toMatchObject({
|
||||
...GOOGLE_GEMINI_CLI_FLASH_TEMPLATE_MODEL,
|
||||
id: "gemini-3.1-flash-preview",
|
||||
name: "gemini-3.1-flash-preview",
|
||||
reasoning: true,
|
||||
});
|
||||
});
|
||||
|
||||
it("keeps unknown-model errors for unrecognized google-gemini-cli model IDs", () => {
|
||||
const result = resolveModel("google-gemini-cli", "gemini-4-unknown", "/tmp/agent");
|
||||
expect(result.model).toBeUndefined();
|
||||
expect(result.error).toBe("Unknown model: google-gemini-cli/gemini-4-unknown");
|
||||
});
|
||||
});
|
||||
|
||||
@@ -47,6 +47,48 @@ export function buildOpenAICodexForwardCompatExpectation(
|
||||
};
|
||||
}
|
||||
|
||||
export const GOOGLE_GEMINI_CLI_PRO_TEMPLATE_MODEL = {
|
||||
id: "gemini-3-pro-preview",
|
||||
name: "Gemini 3 Pro Preview (Cloud Code Assist)",
|
||||
provider: "google-gemini-cli",
|
||||
api: "google-gemini-cli",
|
||||
baseUrl: "https://cloudcode-pa.googleapis.com",
|
||||
reasoning: true,
|
||||
input: ["text", "image"] as const,
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 200000,
|
||||
maxTokens: 64000,
|
||||
};
|
||||
|
||||
export const GOOGLE_GEMINI_CLI_FLASH_TEMPLATE_MODEL = {
|
||||
id: "gemini-3-flash-preview",
|
||||
name: "Gemini 3 Flash Preview (Cloud Code Assist)",
|
||||
provider: "google-gemini-cli",
|
||||
api: "google-gemini-cli",
|
||||
baseUrl: "https://cloudcode-pa.googleapis.com",
|
||||
reasoning: false,
|
||||
input: ["text", "image"] as const,
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: 200000,
|
||||
maxTokens: 64000,
|
||||
};
|
||||
|
||||
export function mockGoogleGeminiCliProTemplateModel(): void {
|
||||
mockDiscoveredModel({
|
||||
provider: "google-gemini-cli",
|
||||
modelId: "gemini-3-pro-preview",
|
||||
templateModel: GOOGLE_GEMINI_CLI_PRO_TEMPLATE_MODEL,
|
||||
});
|
||||
}
|
||||
|
||||
export function mockGoogleGeminiCliFlashTemplateModel(): void {
|
||||
mockDiscoveredModel({
|
||||
provider: "google-gemini-cli",
|
||||
modelId: "gemini-3-flash-preview",
|
||||
templateModel: GOOGLE_GEMINI_CLI_FLASH_TEMPLATE_MODEL,
|
||||
});
|
||||
}
|
||||
|
||||
export function resetMockDiscoverModels(): void {
|
||||
vi.mocked(discoverModels).mockReturnValue({
|
||||
find: vi.fn(() => null),
|
||||
|
||||
@@ -18,7 +18,11 @@ export function isReasoningTagProvider(provider: string | undefined | null): boo
|
||||
// handles reasoning natively via the `reasoning` field in streaming chunks,
|
||||
// so tag-based enforcement is unnecessary and causes all output to be
|
||||
// discarded as "(no output)" (#2279).
|
||||
if (normalized === "google-gemini-cli" || normalized === "google-generative-ai") {
|
||||
if (
|
||||
normalized === "google" ||
|
||||
normalized === "google-gemini-cli" ||
|
||||
normalized === "google-generative-ai"
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@@ -58,6 +58,16 @@ describe("isReasoningTagProvider", () => {
|
||||
value: "Ollama",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "returns true for google (gemini-api-key auth provider)",
|
||||
value: "google",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "returns true for Google (case-insensitive)",
|
||||
value: "Google",
|
||||
expected: true,
|
||||
},
|
||||
{ name: "returns true for google-gemini-cli", value: "google-gemini-cli", expected: true },
|
||||
{
|
||||
name: "returns true for google-generative-ai",
|
||||
|
||||
Reference in New Issue
Block a user