mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-06 07:50:43 +00:00
fix: honor subagent spawn model overrides
This commit is contained in:
@@ -40,6 +40,7 @@ Docs: https://docs.openclaw.ai
|
||||
### Fixes
|
||||
|
||||
- Gateway/startup: keep value-option foreground starts on the gateway fast path and skip proxy bootstrap unless proxy env is configured, reducing normal gateway startup RSS and avoiding full CLI graph loading. Thanks @vincentkoc.
|
||||
- Subagents/models: persist `sessions_spawn.model` and configured subagent models as child-session model overrides before the first turn, so spawned subagents actually run on the requested provider/model instead of reverting to the target agent default. Fixes #73180. Thanks @danielzinhu99.
|
||||
- Backup: skip installed plugin `extensions/*/node_modules` dependency trees while keeping plugin manifests and source files in archives, so local backups avoid rebuildable npm payload bloat. Fixes #64144. Thanks @BrilliantWang.
|
||||
- Cron/models: fail isolated cron runs closed when an explicit `payload.model` is not allowed or cannot be resolved, so scheduled jobs do not silently fall back to an unrelated agent default or paid route before configured provider proxies such as LiteLLM can run. Fixes #73146. Thanks @oneandrewwang.
|
||||
- Memory/QMD: back off repeated chat-turn QMD open failures while still letting memory status and CLI probes recheck immediately, so a broken sidecar dependency cannot trigger active-memory or cron retry storms. Fixes #73188 and #73176. Thanks @leonlushgit and @w3i-William.
|
||||
|
||||
@@ -4,6 +4,7 @@ import { DEFAULT_MODEL, DEFAULT_PROVIDER } from "./defaults.js";
|
||||
import {
|
||||
resolveConfiguredSubagentRunTimeoutSeconds,
|
||||
resolveSubagentModelAndThinkingPlan,
|
||||
splitModelRef,
|
||||
} from "./subagent-spawn-plan.js";
|
||||
|
||||
function createConfig(overrides?: Record<string, unknown>): OpenClawConfig {
|
||||
@@ -26,10 +27,18 @@ describe("subagent spawn model + thinking plan", () => {
|
||||
modelApplied: true,
|
||||
initialSessionPatch: {
|
||||
model: "claude-haiku-4-5",
|
||||
modelOverrideSource: "user",
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it("preserves model ids containing slashes", () => {
|
||||
expect(splitModelRef("openrouter/meta-llama/llama-3.3-70b:free")).toEqual({
|
||||
provider: "openrouter",
|
||||
model: "meta-llama/llama-3.3-70b:free",
|
||||
});
|
||||
});
|
||||
|
||||
it("normalizes thinking overrides into the initial patch", () => {
|
||||
const plan = resolveSubagentModelAndThinkingPlan({
|
||||
cfg: createConfig(),
|
||||
@@ -69,7 +78,7 @@ describe("subagent spawn model + thinking plan", () => {
|
||||
expect(plan).toMatchObject({
|
||||
status: "ok",
|
||||
resolvedModel: "minimax/MiniMax-M2.7",
|
||||
initialSessionPatch: { model: "minimax/MiniMax-M2.7" },
|
||||
initialSessionPatch: { model: "minimax/MiniMax-M2.7", modelOverrideSource: "auto" },
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@@ -11,7 +11,14 @@ export function splitModelRef(ref?: string) {
|
||||
if (!trimmed) {
|
||||
return { provider: undefined, model: undefined };
|
||||
}
|
||||
const [provider, model] = trimmed.split("/", 2);
|
||||
const slash = trimmed.indexOf("/");
|
||||
if (slash > 0 && slash < trimmed.length - 1) {
|
||||
const provider = trimmed.slice(0, slash);
|
||||
const model = trimmed.slice(slash + 1);
|
||||
return { provider, model };
|
||||
}
|
||||
const provider = undefined;
|
||||
const model = trimmed;
|
||||
if (model) {
|
||||
return { provider, model };
|
||||
}
|
||||
@@ -66,7 +73,12 @@ export function resolveSubagentModelAndThinkingPlan(params: {
|
||||
modelApplied: Boolean(resolvedModel),
|
||||
thinkingOverride: thinkingPlan.thinkingOverride,
|
||||
initialSessionPatch: {
|
||||
...(resolvedModel ? { model: resolvedModel } : {}),
|
||||
...(resolvedModel
|
||||
? {
|
||||
model: resolvedModel,
|
||||
modelOverrideSource: params.modelOverride?.trim() ? "user" : "auto",
|
||||
}
|
||||
: {}),
|
||||
...thinkingPlan.initialSessionPatch,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -89,6 +89,7 @@ describe("spawnSubagentDirect runtime model persistence", () => {
|
||||
sessionKey: /^agent:main:subagent:/,
|
||||
provider: "openai-codex",
|
||||
model: "gpt-5.4",
|
||||
overrideSource: "user",
|
||||
});
|
||||
expect(pruneLegacyStoreKeysMock).toHaveBeenCalledTimes(3);
|
||||
expect(operations.indexOf("store:update")).toBeGreaterThan(-1);
|
||||
|
||||
@@ -97,6 +97,7 @@ export function expectPersistedRuntimeModel(params: {
|
||||
sessionKey: string | RegExp;
|
||||
provider: string;
|
||||
model: string;
|
||||
overrideSource?: "auto" | "user";
|
||||
}) {
|
||||
const [persistedKey, persistedEntry] = Object.entries(params.persistedStore ?? {})[0] ?? [];
|
||||
if (typeof params.sessionKey === "string") {
|
||||
@@ -107,6 +108,9 @@ export function expectPersistedRuntimeModel(params: {
|
||||
expect(persistedEntry).toMatchObject({
|
||||
modelProvider: params.provider,
|
||||
model: params.model,
|
||||
providerOverride: params.provider,
|
||||
modelOverride: params.model,
|
||||
...(params.overrideSource ? { modelOverrideSource: params.overrideSource } : {}),
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -239,6 +239,7 @@ describe("spawnSubagentDirect seam flow", () => {
|
||||
sessionKey: childSessionKey,
|
||||
provider: "openai-codex",
|
||||
model: "gpt-5.4",
|
||||
overrideSource: "user",
|
||||
});
|
||||
expect(operations.indexOf("store:update")).toBeGreaterThan(-1);
|
||||
expect(operations.indexOf("gateway:agent")).toBeGreaterThan(
|
||||
|
||||
@@ -241,8 +241,11 @@ function buildDirectChildSessionPatch(patch: Record<string, unknown>): Partial<S
|
||||
const { provider, model } = splitModelRef(patch.model.trim());
|
||||
if (model) {
|
||||
entry.model = model;
|
||||
entry.modelOverride = model;
|
||||
entry.modelOverrideSource = patch.modelOverrideSource === "auto" ? "auto" : "user";
|
||||
if (provider) {
|
||||
entry.modelProvider = provider;
|
||||
entry.providerOverride = provider;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user