fix: scope live model switch pending state (#60266) (thanks @kiranvk-2011)

This commit is contained in:
Peter Steinberger
2026-04-04 11:45:01 +01:00
parent b36a3a3295
commit e4ea3c03cf
11 changed files with 109 additions and 15 deletions

View File

@@ -47,6 +47,7 @@ Docs: https://docs.openclaw.ai
- Providers/OpenAI-compatible WS: compute fallback token totals from normalized usage when providers omit or zero `total_tokens`, so DashScope-compatible sessions stop storing zero totals after alias normalization. (#54940) Thanks @lyfuci.
- Status/usage: let `/status` and `session_status` fall back to transcript token totals when the session meta store stayed at zero, so LM Studio, Ollama, DashScope, and similar OpenAI-compatible providers stop showing `Context: 0/...`. (#55041) Thanks @jjjojoj.
- Providers/Z.AI: preserve explicitly registered `glm-5-*` variants like `glm-5-turbo` instead of intercepting them with the generic GLM-5 forward-compat shim. (#48185) Thanks @haoyu-haoyu.
- Live model switching: only treat explicit user-driven model changes as pending live switches, so fallback rotation, heartbeat overrides, and compaction no longer trip `LiveSessionModelSwitchError` before making an API call. (#60266) Thanks @kiranvk-2011.
- Plugins/OpenAI: enable `gpt-image-1` reference-image edits through `/images/edits` multipart uploads, and stop inferring unsupported resolution overrides when no explicit `size` or `resolution` is provided.
- Gateway/startup: default `gateway.mode` to `local` when unset, detect PID recycling in gateway lock files on Windows and macOS, and show startup progress so healthy restarts stop getting blocked by stale locks. (#54801, #60085, #59843)
- Mobile pairing/Android: tighten secure endpoint handling so Tailscale and public remote setup reject cleartext endpoints, private LAN pairing still works, merged-role approvals mint both node and operator device tokens, and bootstrap tokens survive node auto-pair until operator approval finishes. (#60128, #60208, #60221)

View File

@@ -29,6 +29,15 @@ vi.mock("./model-selection.js", () => ({
resolvePersistedModelRef: (...args: unknown[]) => state.resolvePersistedModelRefMock(...args),
}));
vi.mock("../config/sessions/store.js", () => ({
loadSessionStore: (...args: unknown[]) => state.loadSessionStoreMock(...args),
updateSessionStore: (...args: unknown[]) => state.updateSessionStoreMock(...args),
}));
vi.mock("../config/sessions/paths.js", () => ({
resolveStorePath: (...args: unknown[]) => state.resolveStorePathMock(...args),
}));
vi.mock("../config/sessions.js", () => ({
loadSessionStore: (...args: unknown[]) => state.loadSessionStoreMock(...args),
resolveStorePath: (...args: unknown[]) => state.resolveStorePathMock(...args),

View File

@@ -664,6 +664,7 @@ describe("session_status tool", () => {
"/tmp/main/sessions.json",
expect.objectContaining({
"agent:main:subagent:child": expect.objectContaining({
liveModelSwitchPending: true,
modelOverride: "claude-sonnet-4-6",
}),
}),
@@ -1407,5 +1408,6 @@ describe("session_status tool", () => {
expect(saved.providerOverride).toBeUndefined();
expect(saved.modelOverride).toBeUndefined();
expect(saved.authProfileOverride).toBeUndefined();
expect(saved.liveModelSwitchPending).toBe(true);
});
});

View File

@@ -426,6 +426,7 @@ export function createSessionStatusTool(opts?: {
model: selection.model,
isDefault: selection.isDefault,
},
markLiveSwitchPending: true,
});
if (applied.updated) {
store[resolved.key] = nextEntry;

View File

@@ -413,14 +413,9 @@ export async function handleDirectiveOnly(
entry: sessionEntry,
selection: modelSelection,
profileOverride,
markLiveSwitchPending: true,
});
modelSelectionUpdated = applied.updated;
if (applied.updated) {
// Signal the embedded runner that this is a user-initiated model
// switch, so it can distinguish it from system-initiated fallback
// rotations and correctly throw LiveSessionModelSwitchError.
sessionEntry.liveModelSwitchPending = true;
}
}
if (directives.hasQueueDirective && directives.queueReset) {
delete sessionEntry.queueMode;

View File

@@ -514,6 +514,7 @@ describe("handleDirectiveOnly model persist behavior (fixes #1435)", () => {
expect(result?.text).toContain("Model set to");
expect(result?.text).toContain("openai/gpt-4o");
expect(result?.text).not.toContain("failed");
expect(sessionEntry.liveModelSwitchPending).toBe(true);
});
it("does not request a live restart when /model mutates an active session", async () => {

View File

@@ -133,11 +133,11 @@ export type SessionEntry = {
authProfileOverrideSource?: "auto" | "user";
authProfileOverrideCompactionCount?: number;
/**
* Set to `true` by the `/model` command when the user explicitly switches
* models during an active run. The embedded runner checks this flag to
* decide whether to throw `LiveSessionModelSwitchError`. System-initiated
* fallbacks (rate-limit retry rotation) never set this flag, so they are
* never mistaken for user-initiated switches.
* Set on explicit user-driven session model changes (for example `/model`
* and `sessions.patch`) during an active run. The embedded runner checks
* this flag to decide whether to throw `LiveSessionModelSwitchError`.
* System-initiated fallbacks (rate-limit retry rotation) never set this
* flag, so they are never mistaken for user-initiated switches.
*/
liveModelSwitchPending?: boolean;
groupActivation?: "mention" | "always";

View File

@@ -233,19 +233,67 @@ describe("gateway sessions patch", () => {
const entry = expectPatchOk(
await runPatch({
store,
patch: { key: MAIN_SESSION_KEY, model: "openai/gpt-5.4" },
patch: { key: MAIN_SESSION_KEY, model: "anthropic/claude-sonnet-4-6" },
loadGatewayModelCatalog: async () => [
{ provider: "openai", id: "gpt-5.4", name: "gpt-5.4" },
{ provider: "anthropic", id: "claude-sonnet-4-6", name: "claude-sonnet-4-6" },
],
}),
);
expect(entry.providerOverride).toBe("openai");
expect(entry.modelOverride).toBe("gpt-5.4");
expect(entry.providerOverride).toBe("anthropic");
expect(entry.modelOverride).toBe("claude-sonnet-4-6");
expect(entry.authProfileOverride).toBeUndefined();
expect(entry.authProfileOverrideSource).toBeUndefined();
expect(entry.authProfileOverrideCompactionCount).toBeUndefined();
});
test("marks explicit model patches as pending live model switches", async () => {
const store: Record<string, SessionEntry> = {
[MAIN_SESSION_KEY]: {
sessionId: "sess-live",
updatedAt: 1,
providerOverride: "openai",
modelOverride: "gpt-5.4",
} as SessionEntry,
};
const entry = expectPatchOk(
await runPatch({
store,
cfg: createAllowlistedAnthropicModelCfg(),
patch: { key: MAIN_SESSION_KEY, model: "anthropic/claude-sonnet-4-6" },
loadGatewayModelCatalog: async () => [
{ provider: "openai", id: "gpt-5.4", name: "gpt-5.4" },
{ provider: "anthropic", id: "claude-sonnet-4-6", name: "claude-sonnet-4-6" },
],
}),
);
expect(entry.providerOverride).toBe("anthropic");
expect(entry.modelOverride).toBe("claude-sonnet-4-6");
expect(entry.liveModelSwitchPending).toBe(true);
});
test("marks model reset patches as pending live model switches", async () => {
const store: Record<string, SessionEntry> = {
[MAIN_SESSION_KEY]: {
sessionId: "sess-live-reset",
updatedAt: 1,
providerOverride: "anthropic",
modelOverride: "claude-sonnet-4-6",
} as SessionEntry,
};
const entry = expectPatchOk(
await runPatch({
store,
cfg: createAllowlistedAnthropicModelCfg(),
patch: { key: MAIN_SESSION_KEY, model: null },
}),
);
expect(entry.providerOverride).toBeUndefined();
expect(entry.modelOverride).toBeUndefined();
expect(entry.liveModelSwitchPending).toBe(true);
});
test.each([
{
name: "accepts explicit allowlisted provider/model refs from sessions.patch",

View File

@@ -376,6 +376,7 @@ export async function applySessionsPatchToStore(params: {
model: resolvedDefault.model,
isDefault: true,
},
markLiveSwitchPending: true,
});
} else if (raw !== undefined) {
const trimmed = String(raw).trim();
@@ -409,6 +410,7 @@ export async function applySessionsPatchToStore(params: {
model: resolved.ref.model,
isDefault,
},
markLiveSwitchPending: true,
});
}
}

View File

@@ -117,4 +117,35 @@ describe("applyModelOverrideToSessionEntry", () => {
expect(entry.contextTokens).toBeUndefined();
expect((entry.updatedAt ?? 0) > before).toBe(true);
});
it("sets liveModelSwitchPending only when explicitly requested", () => {
const entry: SessionEntry = {
sessionId: "sess-5",
updatedAt: Date.now() - 5_000,
providerOverride: "anthropic",
modelOverride: "claude-sonnet-4-6",
};
const withoutFlag = applyModelOverrideToSessionEntry({
entry: { ...entry },
selection: {
provider: "openai",
model: "gpt-5.4",
},
});
expect(withoutFlag.updated).toBe(true);
expect(entry.liveModelSwitchPending).toBeUndefined();
const withFlagEntry: SessionEntry = { ...entry };
const withFlag = applyModelOverrideToSessionEntry({
entry: withFlagEntry,
selection: {
provider: "openai",
model: "gpt-5.4",
},
markLiveSwitchPending: true,
});
expect(withFlag.updated).toBe(true);
expect(withFlagEntry.liveModelSwitchPending).toBe(true);
});
});

View File

@@ -11,6 +11,7 @@ export function applyModelOverrideToSessionEntry(params: {
selection: ModelOverrideSelection;
profileOverride?: string;
profileOverrideSource?: "auto" | "user";
markLiveSwitchPending?: boolean;
}): { updated: boolean } {
const { entry, selection, profileOverride } = params;
const profileOverrideSource = params.profileOverrideSource ?? "user";
@@ -102,6 +103,9 @@ export function applyModelOverrideToSessionEntry(params: {
// Clear stale fallback notice when the user explicitly switches models.
if (updated) {
if (selectionUpdated && params.markLiveSwitchPending) {
entry.liveModelSwitchPending = true;
}
delete entry.fallbackNoticeSelectedModel;
delete entry.fallbackNoticeActiveModel;
delete entry.fallbackNoticeReason;