Merge branch 'main' into main

This commit is contained in:
Bijin
2026-03-21 20:09:02 +08:00
committed by GitHub
6 changed files with 223 additions and 34 deletions

View File

@@ -57,6 +57,7 @@ Docs: https://docs.openclaw.ai
- Telegram/apiRoot: add per-account custom Bot API endpoint support across send, probe, setup, doctor repair, and inbound media download paths so proxied or self-hosted Telegram deployments work end to end. (#48842) Thanks @Cypherm.
- Telegram/topics: auto-rename DM forum topics on first message with LLM-generated labels, with per-account and per-DM `autoTopicLabel` overrides. (#51502) Thanks @Lukavyi.
- Docs/plugins: add the community wecom plugin listing to the docs catalog. (#29905) Thanks @sliverp.
- Models/GitHub Copilot: allow forward-compat dynamic model ids without code updates, while preserving configured provider and per-model overrides for those synthetic models. (#51325) Thanks @fuller-stack-dev.
### Fixes

View File

@@ -1,20 +1,12 @@
import { ensureAuthProfileStore, listProfilesForProvider } from "openclaw/plugin-sdk/agent-runtime";
import {
definePluginEntry,
type ProviderAuthContext,
type ProviderResolveDynamicModelContext,
type ProviderRuntimeModel,
} from "openclaw/plugin-sdk/core";
import { definePluginEntry, type ProviderAuthContext } from "openclaw/plugin-sdk/core";
import { coerceSecretRef } from "openclaw/plugin-sdk/provider-auth";
import { githubCopilotLoginCommand } from "openclaw/plugin-sdk/provider-auth-login";
import { normalizeModelCompat } from "openclaw/plugin-sdk/provider-models";
import { PROVIDER_ID, resolveCopilotForwardCompatModel } from "./models.js";
import { DEFAULT_COPILOT_API_BASE_URL, resolveCopilotApiToken } from "./token.js";
import { fetchCopilotUsage } from "./usage.js";
const PROVIDER_ID = "github-copilot";
const COPILOT_ENV_VARS = ["COPILOT_GITHUB_TOKEN", "GH_TOKEN", "GITHUB_TOKEN"];
const CODEX_GPT_53_MODEL_ID = "gpt-5.3-codex";
const CODEX_TEMPLATE_MODEL_IDS = ["gpt-5.2-codex"] as const;
const COPILOT_XHIGH_MODEL_IDS = ["gpt-5.2", "gpt-5.2-codex"] as const;
function resolveFirstGithubToken(params: { agentDir?: string; env: NodeJS.ProcessEnv }): {
@@ -51,27 +43,6 @@ function resolveFirstGithubToken(params: { agentDir?: string; env: NodeJS.Proces
return { githubToken: "", hasProfile };
}
function resolveCopilotForwardCompatModel(
ctx: ProviderResolveDynamicModelContext,
): ProviderRuntimeModel | undefined {
const trimmedModelId = ctx.modelId.trim();
if (trimmedModelId.toLowerCase() !== CODEX_GPT_53_MODEL_ID) {
return undefined;
}
for (const templateId of CODEX_TEMPLATE_MODEL_IDS) {
const template = ctx.modelRegistry.find(PROVIDER_ID, templateId) as ProviderRuntimeModel | null;
if (!template) {
continue;
}
return normalizeModelCompat({
...template,
id: trimmedModelId,
name: trimmedModelId,
} as ProviderRuntimeModel);
}
return undefined;
}
async function runGitHubCopilotAuth(ctx: ProviderAuthContext) {
await ctx.prompter.note(
[

View File

@@ -0,0 +1,101 @@
import { describe, expect, it, vi } from "vitest";
vi.mock("@mariozechner/pi-ai/oauth", () => ({
getOAuthApiKey: vi.fn(),
getOAuthProviders: vi.fn(() => []),
}));
vi.mock("openclaw/plugin-sdk/provider-models", () => ({
normalizeModelCompat: (model: Record<string, unknown>) => model,
}));
import type { ProviderResolveDynamicModelContext } from "openclaw/plugin-sdk/core";
import { resolveCopilotForwardCompatModel } from "./models.js";
function createMockCtx(
modelId: string,
registryModels: Record<string, Record<string, unknown>> = {},
): ProviderResolveDynamicModelContext {
return {
modelId,
provider: "github-copilot",
config: {},
modelRegistry: {
find: (provider: string, id: string) => registryModels[`${provider}/${id}`] ?? null,
},
} as unknown as ProviderResolveDynamicModelContext;
}
describe("resolveCopilotForwardCompatModel", () => {
it("returns undefined for empty modelId", () => {
expect(resolveCopilotForwardCompatModel(createMockCtx(""))).toBeUndefined();
expect(resolveCopilotForwardCompatModel(createMockCtx(" "))).toBeUndefined();
});
it("returns undefined when model is already in registry", () => {
const ctx = createMockCtx("gpt-4o", {
"github-copilot/gpt-4o": { id: "gpt-4o", name: "gpt-4o" },
});
expect(resolveCopilotForwardCompatModel(ctx)).toBeUndefined();
});
it("clones gpt-5.2-codex template for gpt-5.3-codex", () => {
const template = {
id: "gpt-5.2-codex",
name: "gpt-5.2-codex",
provider: "github-copilot",
api: "openai-responses",
reasoning: true,
contextWindow: 200_000,
};
const ctx = createMockCtx("gpt-5.3-codex", {
"github-copilot/gpt-5.2-codex": template,
});
const result = resolveCopilotForwardCompatModel(ctx);
expect(result).toBeDefined();
expect(result!.id).toBe("gpt-5.3-codex");
expect(result!.name).toBe("gpt-5.3-codex");
expect((result as unknown as Record<string, unknown>).reasoning).toBe(true);
});
it("falls through to synthetic catch-all when codex template is missing", () => {
const ctx = createMockCtx("gpt-5.3-codex");
const result = resolveCopilotForwardCompatModel(ctx);
expect(result).toBeDefined();
expect(result!.id).toBe("gpt-5.3-codex");
});
it("creates synthetic model for arbitrary unknown model ID", () => {
const ctx = createMockCtx("gpt-5.4-mini");
const result = resolveCopilotForwardCompatModel(ctx);
expect(result).toBeDefined();
expect(result!.id).toBe("gpt-5.4-mini");
expect(result!.name).toBe("gpt-5.4-mini");
expect((result as unknown as Record<string, unknown>).api).toBe("openai-responses");
expect((result as unknown as Record<string, unknown>).input).toEqual(["text", "image"]);
});
it("infers reasoning=true for o1/o3 model IDs", () => {
for (const id of ["o1", "o3", "o3-mini", "o1-preview"]) {
const ctx = createMockCtx(id);
const result = resolveCopilotForwardCompatModel(ctx);
expect(result).toBeDefined();
expect((result as unknown as Record<string, unknown>).reasoning).toBe(true);
}
});
it("sets reasoning=false for non-reasoning model IDs including mid-string o1/o3", () => {
for (const id of [
"gpt-5.4-mini",
"claude-sonnet-4.6",
"gpt-4o",
"audio-o1-hd",
"turbo-o3-voice",
]) {
const ctx = createMockCtx(id);
const result = resolveCopilotForwardCompatModel(ctx);
expect(result).toBeDefined();
expect((result as unknown as Record<string, unknown>).reasoning).toBe(false);
}
});
});

View File

@@ -0,0 +1,68 @@
import type {
ProviderResolveDynamicModelContext,
ProviderRuntimeModel,
} from "openclaw/plugin-sdk/core";
import { normalizeModelCompat } from "openclaw/plugin-sdk/provider-models";
export const PROVIDER_ID = "github-copilot";
const CODEX_GPT_53_MODEL_ID = "gpt-5.3-codex";
const CODEX_TEMPLATE_MODEL_IDS = ["gpt-5.2-codex"] as const;
const DEFAULT_CONTEXT_WINDOW = 128_000;
const DEFAULT_MAX_TOKENS = 8192;
export function resolveCopilotForwardCompatModel(
ctx: ProviderResolveDynamicModelContext,
): ProviderRuntimeModel | undefined {
const trimmedModelId = ctx.modelId.trim();
if (!trimmedModelId) {
return undefined;
}
// If the model is already in the registry, let the normal path handle it.
const existing = ctx.modelRegistry.find(PROVIDER_ID, trimmedModelId.toLowerCase());
if (existing) {
return undefined;
}
// For gpt-5.3-codex specifically, clone from the gpt-5.2-codex template
// to preserve any special settings the registry has for codex models.
if (trimmedModelId.toLowerCase() === CODEX_GPT_53_MODEL_ID) {
for (const templateId of CODEX_TEMPLATE_MODEL_IDS) {
const template = ctx.modelRegistry.find(
PROVIDER_ID,
templateId,
) as ProviderRuntimeModel | null;
if (!template) {
continue;
}
return normalizeModelCompat({
...template,
id: trimmedModelId,
name: trimmedModelId,
} as ProviderRuntimeModel);
}
// Template not found — fall through to synthetic catch-all below.
}
// Catch-all: create a synthetic model definition for any unknown model ID.
// The Copilot API is OpenAI-compatible and will return its own error if the
// model isn't available on the user's plan. This lets new models be used
// by simply adding them to agents.defaults.models in openclaw.json — no
// code change required.
const lowerModelId = trimmedModelId.toLowerCase();
const reasoning = /^o[13](\b|$)/.test(lowerModelId);
return normalizeModelCompat({
id: trimmedModelId,
name: trimmedModelId,
provider: PROVIDER_ID,
api: "openai-responses",
reasoning,
// Optimistic: most Copilot models support images, and the API rejects
// image payloads for text-only models rather than failing silently.
input: ["text", "image"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: DEFAULT_CONTEXT_WINDOW,
maxTokens: DEFAULT_MAX_TOKENS,
} as ProviderRuntimeModel);
}

View File

@@ -753,9 +753,53 @@ describe("resolveModel", () => {
api: "openai-responses",
baseUrl: "https://proxy.example.com/v1",
});
expect((result.model as unknown as { headers?: Record<string, string> }).headers).toEqual({
"X-Proxy-Auth": "token-123",
expect((result.model as unknown as { headers?: Record<string, string> }).headers).toMatchObject(
{
"X-Proxy-Auth": "token-123",
},
);
});
it("applies configured overrides to github-copilot dynamic models", () => {
const cfg = {
models: {
providers: {
"github-copilot": {
baseUrl: "https://proxy.example.com/v1",
api: "openai-completions",
headers: { "X-Proxy-Auth": "token-123" },
models: [
{
...makeModel("gpt-5.4-mini"),
reasoning: true,
input: ["text"],
contextWindow: 256000,
maxTokens: 32000,
},
],
},
},
},
} as OpenClawConfig;
const result = resolveModel("github-copilot", "gpt-5.4-mini", "/tmp/agent", cfg);
expect(result.error).toBeUndefined();
expect(result.model).toMatchObject({
provider: "github-copilot",
id: "gpt-5.4-mini",
api: "openai-completions",
baseUrl: "https://proxy.example.com/v1",
reasoning: true,
input: ["text"],
contextWindow: 256000,
maxTokens: 32000,
});
expect((result.model as unknown as { headers?: Record<string, string> }).headers).toMatchObject(
{
"X-Proxy-Auth": "token-123",
},
);
});
it("builds an openai fallback for gpt-5.4 mini from the gpt-5-mini template", () => {

View File

@@ -266,7 +266,11 @@ export function resolveModelWithRegistry(params: {
provider,
cfg,
agentDir,
model: pluginDynamicModel,
model: applyConfiguredProviderOverrides({
discoveredModel: pluginDynamicModel as Model<Api>,
providerConfig,
modelId,
}),
});
}