mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-10 04:40:43 +00:00
The plugin's `catalog.run` hook already exchanged a GitHub OAuth token
for a short-lived Copilot API token and resolved the per-account baseUrl,
but it returned `models: []` and the bundled openclaw runtime relied
entirely on the static manifest catalog. That meant:
- Static `contextWindow` values were a conservative 128k for every
model, far below reality (gpt-5.4/5.5 are 400k, claude-opus-4.6/4.7
internal variants are 1M, claude-sonnet-4 is 200k, etc.).
- Newly published Copilot models (gpt-5.5, gpt-5.1*, gemini-3-pro-preview,
the claude-opus-*-1m internal variants, etc.) didn't appear at all
until the manifest was patched.
- Per-account entitlement was invisible — every user saw the same
hardcoded 22-model list regardless of plan.
Wire it up:
- Add `fetchCopilotModelCatalog` in `extensions/github-copilot/models.ts`.
Calls `${baseUrl}/models` with the resolved Copilot API token and the
same Editor-Version / Copilot-Integration-Id headers used elsewhere in
the plugin. Maps each entry to a `ModelDefinitionConfig`:
- `contextWindow` ← `capabilities.limits.max_context_window_tokens`
- `maxTokens` ← `capabilities.limits.max_output_tokens`
- `input` ← `["text", "image"]` if `supports.vision`, else `["text"]`
- `reasoning` ← `Array.isArray(supports.reasoning_effort) && supports.reasoning_effort.length > 0`
- `api` ← `anthropic-messages` for Anthropic vendor or claude*
ids; otherwise `openai-responses`
Filters out non-chat objects (embeddings) and internal routers
(`accounts/...` ids). Dedupes by id. 10s default timeout.
- Update the `catalog.run` hook in `extensions/github-copilot/index.ts`
to call the new function after token-exchange and return the live
results. On any HTTP/parse failure it falls back to `models: []`,
which preserves the static manifest catalog as the visible fallback —
no behavior regression for users with `discovery.enabled: false` or
in offline scenarios.
- Bump `modelCatalog.discovery."github-copilot"` from `"static"` to
`"refreshable"` in `openclaw.plugin.json` so the catalog hook is
actually invoked at runtime. Without this the discovery infrastructure
treats the provider as static-only and never calls `catalog.run`.
- Add `gpt-5.5` to the static manifest catalog and `DEFAULT_MODEL_IDS`
with the correct values from the API (`contextWindow: 400000`,
`maxTokens: 128000`, `reasoning: true`, multimodal). This means users
on `discovery.enabled: false` still get gpt-5.5 visible without
needing to override `models.providers.github-copilot.models` in their
config.
Tests added (5, all passing alongside the existing 24):
- `fetchCopilotModelCatalog` maps a representative `/models` response
(chat models incl. an internal 1M-context Anthropic variant, a router,
an embedding) to the right `ModelDefinitionConfig` shape with real
context windows.
- baseUrl trailing slash is normalized.
- Duplicate ids in the API response are deduped (first wins).
- Non-2xx HTTP raises so the caller can fall back to the static catalog.
- Empty token / baseUrl reject synchronously without calling fetch.
Targeted run: `pnpm test extensions/github-copilot/models.test.ts` →
29/29 pass. `pnpm exec oxfmt --check extensions/github-copilot/` clean.
`pnpm tsgo:core` clean.
Real-world proof:
Built locally and dropped the resulting tarball into a downstream
container with `gh auth login --hostname github.com` (Copilot
subscription on the linked account). Before this change,
`openclaw models list --provider github-copilot` returned the 22-entry
static catalog with every entry showing 128k context. After this change,
the same command (with `--refresh`) returns 30 entries with API-accurate
context windows including the new gpt-5.1 family, the claude-opus-*-1m
variants, and the corrected `gemini-3*-preview` ids.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
248 lines
8.3 KiB
TypeScript
248 lines
8.3 KiB
TypeScript
import type {
|
|
ProviderResolveDynamicModelContext,
|
|
ProviderRuntimeModel,
|
|
} from "openclaw/plugin-sdk/core";
|
|
import type { ModelDefinitionConfig } from "openclaw/plugin-sdk/provider-model-shared";
|
|
import { normalizeModelCompat } from "openclaw/plugin-sdk/provider-model-shared";
|
|
import { normalizeOptionalLowercaseString } from "openclaw/plugin-sdk/text-runtime";
|
|
|
|
export const PROVIDER_ID = "github-copilot";
|
|
const CODEX_FORWARD_COMPAT_TARGET_IDS = new Set(["gpt-5.4", "gpt-5.3-codex"]);
|
|
// gpt-5.3-codex is only a useful template when gpt-5.4 is the target; it is
|
|
// always a registry miss (and therefore skipped) when it is the target itself.
|
|
const CODEX_TEMPLATE_MODEL_IDS = ["gpt-5.3-codex", "gpt-5.2-codex"] as const;
|
|
|
|
const DEFAULT_CONTEXT_WINDOW = 128_000;
|
|
const DEFAULT_MAX_TOKENS = 8192;
|
|
|
|
function isCopilotCodexModelId(modelId: string): boolean {
|
|
return /(?:^|[-_.])codex(?:$|[-_.])/.test(modelId);
|
|
}
|
|
|
|
export function resolveCopilotTransportApi(
|
|
modelId: string,
|
|
): "anthropic-messages" | "openai-responses" {
|
|
return (normalizeOptionalLowercaseString(modelId) ?? "").includes("claude")
|
|
? "anthropic-messages"
|
|
: "openai-responses";
|
|
}
|
|
|
|
export function resolveCopilotForwardCompatModel(
|
|
ctx: ProviderResolveDynamicModelContext,
|
|
): ProviderRuntimeModel | undefined {
|
|
const trimmedModelId = ctx.modelId.trim();
|
|
if (!trimmedModelId) {
|
|
return undefined;
|
|
}
|
|
|
|
// If the model is already in the registry, let the normal path handle it.
|
|
const lowerModelId = normalizeOptionalLowercaseString(trimmedModelId) ?? "";
|
|
const existing = ctx.modelRegistry.find(PROVIDER_ID, lowerModelId);
|
|
if (existing) {
|
|
return undefined;
|
|
}
|
|
|
|
// For gpt-5.4 and gpt-5.3-codex, clone from a registered codex template
|
|
// to inherit the correct reasoning and capability flags.
|
|
if (CODEX_FORWARD_COMPAT_TARGET_IDS.has(lowerModelId)) {
|
|
for (const templateId of CODEX_TEMPLATE_MODEL_IDS) {
|
|
const template = ctx.modelRegistry.find(
|
|
PROVIDER_ID,
|
|
templateId,
|
|
) as ProviderRuntimeModel | null;
|
|
if (!template) {
|
|
continue;
|
|
}
|
|
return normalizeModelCompat({
|
|
...template,
|
|
id: trimmedModelId,
|
|
name: trimmedModelId,
|
|
} as ProviderRuntimeModel);
|
|
}
|
|
// Template not found — fall through to synthetic catch-all below.
|
|
}
|
|
|
|
// Catch-all: create a synthetic model definition for any unknown model ID.
|
|
// The Copilot API is OpenAI-compatible and will return its own error if the
|
|
// model isn't available on the user's plan. This lets new models be used
|
|
// by simply adding them to agents.defaults.models in openclaw.json — no
|
|
// code change required.
|
|
const reasoning = /^o[13](\b|$)/.test(lowerModelId) || isCopilotCodexModelId(lowerModelId);
|
|
return normalizeModelCompat({
|
|
id: trimmedModelId,
|
|
name: trimmedModelId,
|
|
provider: PROVIDER_ID,
|
|
api: resolveCopilotTransportApi(trimmedModelId),
|
|
reasoning,
|
|
// Optimistic: most Copilot models support images, and the API rejects
|
|
// image payloads for text-only models rather than failing silently.
|
|
input: ["text", "image"],
|
|
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
|
contextWindow: DEFAULT_CONTEXT_WINDOW,
|
|
maxTokens: DEFAULT_MAX_TOKENS,
|
|
} as ProviderRuntimeModel);
|
|
}
|
|
|
|
// Subset of the Copilot /models response shape that we depend on. We only read
|
|
// fields we need; everything else is preserved as `unknown` so upstream changes
|
|
// don't break parsing.
|
|
type CopilotApiModelEntry = {
|
|
id?: string;
|
|
name?: string;
|
|
object?: string;
|
|
vendor?: string;
|
|
preview?: boolean;
|
|
model_picker_enabled?: boolean;
|
|
capabilities?: {
|
|
type?: string;
|
|
family?: string;
|
|
limits?: {
|
|
max_context_window_tokens?: number;
|
|
max_output_tokens?: number;
|
|
max_prompt_tokens?: number;
|
|
};
|
|
supports?: {
|
|
vision?: boolean;
|
|
tool_calls?: boolean;
|
|
streaming?: boolean;
|
|
structured_outputs?: boolean;
|
|
reasoning_effort?: string[] | null;
|
|
};
|
|
};
|
|
};
|
|
|
|
const COPILOT_MODELS_LIST_DEFAULT_TIMEOUT_MS = 10_000;
|
|
const COPILOT_ROUTER_ID_PREFIX = "accounts/";
|
|
|
|
function resolveCopilotApiForVendor(
|
|
vendor: string | undefined,
|
|
modelId: string,
|
|
): "anthropic-messages" | "openai-responses" {
|
|
if (vendor && vendor.toLowerCase() === "anthropic") {
|
|
return "anthropic-messages";
|
|
}
|
|
return resolveCopilotTransportApi(modelId);
|
|
}
|
|
|
|
function mapCopilotApiModelToDefinition(
|
|
entry: CopilotApiModelEntry,
|
|
): ModelDefinitionConfig | undefined {
|
|
const id = entry.id?.trim();
|
|
if (!id) {
|
|
return undefined;
|
|
}
|
|
// Skip non-chat objects (embeddings, routers, etc.) and internal router ids.
|
|
if (entry.object && entry.object !== "model") {
|
|
return undefined;
|
|
}
|
|
if (entry.capabilities?.type && entry.capabilities.type !== "chat") {
|
|
return undefined;
|
|
}
|
|
if (id.startsWith(COPILOT_ROUTER_ID_PREFIX)) {
|
|
return undefined;
|
|
}
|
|
|
|
const limits = entry.capabilities?.limits;
|
|
const supports = entry.capabilities?.supports;
|
|
const reasoning = Array.isArray(supports?.reasoning_effort)
|
|
? supports.reasoning_effort.length > 0
|
|
: false;
|
|
const supportsVision = supports?.vision === true;
|
|
const input: ModelDefinitionConfig["input"] = supportsVision ? ["text", "image"] : ["text"];
|
|
|
|
const contextWindow =
|
|
typeof limits?.max_context_window_tokens === "number" && limits.max_context_window_tokens > 0
|
|
? limits.max_context_window_tokens
|
|
: DEFAULT_CONTEXT_WINDOW;
|
|
const maxTokens =
|
|
typeof limits?.max_output_tokens === "number" && limits.max_output_tokens > 0
|
|
? limits.max_output_tokens
|
|
: DEFAULT_MAX_TOKENS;
|
|
|
|
const definition: ModelDefinitionConfig = {
|
|
id,
|
|
name: entry.name?.trim() || id,
|
|
api: resolveCopilotApiForVendor(entry.vendor, id),
|
|
reasoning,
|
|
input,
|
|
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
|
contextWindow,
|
|
maxTokens,
|
|
};
|
|
return definition;
|
|
}
|
|
|
|
export type FetchCopilotModelCatalogParams = {
|
|
/** Short-lived Copilot API token (from `resolveCopilotApiToken`). */
|
|
copilotApiToken: string;
|
|
/** Resolved baseUrl from the same token-exchange response. */
|
|
baseUrl: string;
|
|
/** Optional fetch override for testing. */
|
|
fetchImpl?: typeof fetch;
|
|
/** Optional AbortSignal; defaults to a 10s timeout. */
|
|
signal?: AbortSignal;
|
|
};
|
|
|
|
/**
|
|
* Fetch the live Copilot model catalog from `${baseUrl}/models` and project it
|
|
* into `ModelDefinitionConfig[]`. Used by the plugin's discovery hook so the
|
|
* runtime catalog tracks per-account entitlements + accurate context windows
|
|
* without manifest churn.
|
|
*
|
|
* Filters out non-chat objects (embeddings, routers) and internal router ids.
|
|
* On any HTTP/parse failure the caller should fall back to the static manifest
|
|
* catalog; this function throws so the caller decides the recovery shape.
|
|
*/
|
|
export async function fetchCopilotModelCatalog(
|
|
params: FetchCopilotModelCatalogParams,
|
|
): Promise<ModelDefinitionConfig[]> {
|
|
const fetchImpl = params.fetchImpl ?? fetch;
|
|
const trimmedBase = params.baseUrl.replace(/\/+$/, "");
|
|
if (!trimmedBase) {
|
|
throw new Error("fetchCopilotModelCatalog: baseUrl required");
|
|
}
|
|
if (!params.copilotApiToken.trim()) {
|
|
throw new Error("fetchCopilotModelCatalog: copilotApiToken required");
|
|
}
|
|
const url = `${trimmedBase}/models`;
|
|
const controller = params.signal ? undefined : new AbortController();
|
|
const timeoutId = controller
|
|
? setTimeout(() => controller.abort(), COPILOT_MODELS_LIST_DEFAULT_TIMEOUT_MS)
|
|
: undefined;
|
|
try {
|
|
const res = await fetchImpl(url, {
|
|
method: "GET",
|
|
headers: {
|
|
Accept: "application/json",
|
|
Authorization: `Bearer ${params.copilotApiToken}`,
|
|
"Editor-Version": "vscode/1.96.2",
|
|
"Copilot-Integration-Id": "vscode-chat",
|
|
},
|
|
signal: params.signal ?? controller?.signal,
|
|
});
|
|
if (!res.ok) {
|
|
throw new Error(`Copilot /models fetch failed: HTTP ${res.status}`);
|
|
}
|
|
const json = (await res.json()) as { data?: CopilotApiModelEntry[] };
|
|
const data = Array.isArray(json?.data) ? json.data : [];
|
|
const seen = new Set<string>();
|
|
const out: ModelDefinitionConfig[] = [];
|
|
for (const entry of data) {
|
|
const def = mapCopilotApiModelToDefinition(entry);
|
|
if (!def) {
|
|
continue;
|
|
}
|
|
if (seen.has(def.id)) {
|
|
continue;
|
|
}
|
|
seen.add(def.id);
|
|
out.push(def);
|
|
}
|
|
return out;
|
|
} finally {
|
|
if (timeoutId !== undefined) {
|
|
clearTimeout(timeoutId);
|
|
}
|
|
}
|
|
}
|