fix: use provider-aware context window lookup (#54796) (thanks @neeravmakwana)

* fix(status): use provider-aware context window lookup

* test(status): cover provider-aware context lookup

* fix: use provider-aware context window lookup (#54796) (thanks @neeravmakwana)

---------

Co-authored-by: Ayaan Zaidi <hi@obviy.us>
This commit is contained in:
Neerav Makwana
2026-03-25 23:58:20 -04:00
committed by GitHub
parent 14430ade57
commit 68d854cb9c
5 changed files with 59 additions and 4 deletions

View File

@@ -29,6 +29,7 @@ Docs: https://docs.openclaw.ai
- Agents/embedded replies: surface mid-turn 429 and overload failures when embedded runs end without a user-visible reply, while preserving successful media-only replies that still use legacy `mediaUrl`. (#50930) Thanks @infichen.
- Agents/compaction: trigger timeout recovery compaction before retrying high-context LLM timeouts so embedded runs stop repeating oversized requests. (#46417) thanks @joeykrug.
- Microsoft Teams/config: accept the existing `welcomeCard`, `groupWelcomeCard`, `promptStarters`, and feedback/reflection keys in strict config validation so already-supported Teams runtime settings stop failing schema checks. (#54679) Thanks @gumclaw.
- Agents/status: use provider-aware context window lookup for fresh Anthropic 4.6 model overrides so `/status` shows the correct 1.0m window instead of an underreported shared-cache minimum. (#54796) Thanks @neeravmakwana.
## 2026.3.24

View File

@@ -448,7 +448,9 @@ export async function resolveReplyDirectives(params: {
}
let contextTokens = resolveContextTokens({
cfg,
agentCfg,
provider,
model,
});

View File

@@ -1,8 +1,9 @@
import { describe, expect, it, vi } from "vitest";
import { afterEach, describe, expect, it, vi } from "vitest";
import { MODEL_CONTEXT_TOKEN_CACHE } from "../../agents/context-cache.js";
import { loadModelCatalog } from "../../agents/model-catalog.js";
import type { OpenClawConfig } from "../../config/config.js";
import type { SessionEntry } from "../../config/sessions.js";
import { createModelSelectionState } from "./model-selection.js";
import { createModelSelectionState, resolveContextTokens } from "./model-selection.js";
vi.mock("../../agents/model-catalog.js", () => ({
loadModelCatalog: vi.fn(async () => [
@@ -16,6 +17,10 @@ vi.mock("../../agents/model-catalog.js", () => ({
]),
}));
afterEach(() => {
MODEL_CONTEXT_TOKEN_CACHE.clear();
});
const makeConfiguredModel = (overrides: Record<string, unknown> = {}) => ({
id: "gpt-5.4",
name: "GPT-5.4",
@@ -126,6 +131,22 @@ describe("createModelSelectionState catalog loading", () => {
});
});
describe("resolveContextTokens", () => {
it("prefers provider-qualified cache keys over bare model ids", () => {
MODEL_CONTEXT_TOKEN_CACHE.set("claude-opus-4-6", 200_000);
MODEL_CONTEXT_TOKEN_CACHE.set("anthropic/claude-opus-4-6", 1_000_000);
const result = resolveContextTokens({
cfg: {} as OpenClawConfig,
agentCfg: undefined,
provider: "anthropic",
model: "claude-opus-4-6",
});
expect(result).toBe(1_000_000);
});
});
const makeEntry = (overrides: Partial<SessionEntry> = {}): SessionEntry => ({
sessionId: "session-id",
updatedAt: Date.now(),

View File

@@ -1,6 +1,6 @@
import { resolveAgentConfig } from "../../agents/agent-scope.js";
import { clearSessionAuthProfileOverride } from "../../agents/auth-profiles/session-override.js";
import { lookupContextTokens } from "../../agents/context.js";
import { resolveContextTokensForModel } from "../../agents/context.js";
import { DEFAULT_CONTEXT_TOKENS } from "../../agents/defaults.js";
import type { ModelCatalogEntry } from "../../agents/model-catalog.js";
import {
@@ -672,12 +672,19 @@ export function resolveModelDirectiveSelection(params: {
}
export function resolveContextTokens(params: {
cfg: OpenClawConfig;
agentCfg: NonNullable<NonNullable<OpenClawConfig["agents"]>["defaults"]> | undefined;
provider: string;
model: string;
}): number {
return (
params.agentCfg?.contextTokens ??
lookupContextTokens(params.model, { allowAsyncLoad: false }) ??
resolveContextTokensForModel({
cfg: params.cfg,
provider: params.provider,
model: params.model,
allowAsyncLoad: false,
}) ??
DEFAULT_CONTEXT_TOKENS
);
}

View File

@@ -1200,6 +1200,30 @@ describe("buildStatusMessage", () => {
);
});
it("prefers provider-qualified context windows for fresh bare model ids", () => {
MODEL_CONTEXT_TOKEN_CACHE.set("claude-opus-4-6", 200_000);
MODEL_CONTEXT_TOKEN_CACHE.set("anthropic/claude-opus-4-6", 1_000_000);
const text = buildStatusMessage({
agent: {
model: "anthropic/claude-opus-4-6",
},
sessionEntry: {
sessionId: "sess-anthropic-qualified-context",
updatedAt: 0,
totalTokens: 25_000,
},
sessionKey: "agent:main:main",
sessionScope: "per-sender",
queue: { mode: "collect", depth: 0 },
modelAuth: "api-key",
});
const normalized = normalizeTestText(text);
expect(normalized).toContain("Context: 25k/1.0m");
expect(normalized).not.toContain("Context: 25k/200k");
});
it("does not synthesize a 32k fallback window when the active runtime model is unknown", () => {
const text = buildStatusMessage({
config: {