fix: simplify gateway model startup modes

This commit is contained in:
Peter Steinberger
2026-05-05 00:07:04 +01:00
parent 34f805a012
commit ab032675ce
4 changed files with 95 additions and 19 deletions

View File

@@ -59,7 +59,7 @@ Docs: https://docs.openclaw.ai
### Fixes
- Gateway/startup: include the resolved thinking, reasoning, and fast-mode defaults in the `agent model` startup log line so operator logs show which mode new sessions will inherit.
- Gateway/startup: include resolved thinking and fast-mode defaults in the `agent model` startup log line, defaulting unset startup thinking to `medium` without mixing in reasoning visibility.
- Gateway/watch: suppress sync-I/O trace output during `pnpm gateway:watch --benchmark` unless explicitly requested, so CPU profiling no longer floods the terminal with stack traces.
- Gateway/watch: when benchmark sync-I/O tracing is explicitly enabled, tee trace blocks to the benchmark output log and filter them from the terminal pane while keeping normal Gateway logs visible.
- Agents/OpenAI: default direct OpenAI Responses models to the SSE transport instead of WebSocket auto-selection, preventing pi runtime chat turns from hanging on servers where the WebSocket path stalls while the OpenAI HTTP stream works. Thanks @vincentkoc.

View File

@@ -19,12 +19,12 @@ At startup, the Gateway logs the resolved default agent model together with the
mode defaults that affect new sessions, for example:
```text
agent model: openai-codex/gpt-5.5 (thinking=medium, reasoning=off, fast=on)
agent model: openai-codex/gpt-5.5 (thinking=medium, fast=on)
```
`thinking` comes from the default agent, model params, or global agent default;
`reasoning` comes from the default agent or global reasoning default; and `fast`
comes from the default agent or model `fastMode` params.
when it is unset, the startup summary shows `medium`. `fast` comes from the
default agent or model `fastMode` params.
## File-based logger

View File

@@ -49,7 +49,7 @@ describe("gateway startup log", () => {
expect(warn).not.toHaveBeenCalled();
});
it("logs configured model mode defaults with the startup model", () => {
it("logs configured model thinking and fast mode defaults with the startup model", () => {
const info = vi.fn();
const warn = vi.fn();
@@ -78,15 +78,50 @@ describe("gateway startup log", () => {
});
expect(info).toHaveBeenCalledWith(
"agent model: openai-codex/gpt-5.5 (thinking=medium, reasoning=stream, fast=on)",
"agent model: openai-codex/gpt-5.5 (thinking=medium, fast=on)",
expect.objectContaining({
consoleMessage: expect.stringContaining(
"agent model: openai-codex/gpt-5.5 (thinking=medium, reasoning=stream, fast=on)",
"agent model: openai-codex/gpt-5.5 (thinking=medium, fast=on)",
),
}),
);
});
it("defaults unset startup thinking to medium", () => {
expect(
formatAgentModelStartupDetails({
cfg: {
agents: {
defaults: {
model: "openai-codex/gpt-5.5",
},
list: [{ id: "main", default: true, fastModeDefault: true }],
},
},
provider: "openai-codex",
model: "gpt-5.5",
}),
).toBe("thinking=medium, fast=on");
});
it("preserves explicit startup thinking off", () => {
expect(
formatAgentModelStartupDetails({
cfg: {
agents: {
defaults: {
models: {
"openai-codex/gpt-5.5": { params: { thinking: "off", fastMode: true } },
},
},
},
},
provider: "openai-codex",
model: "gpt-5.5",
}),
).toBe("thinking=off, fast=on");
});
it("uses default agent mode overrides in the startup model details", () => {
expect(
formatAgentModelStartupDetails({
@@ -105,7 +140,7 @@ describe("gateway startup log", () => {
provider: "openai",
model: "gpt-5.5",
}),
).toBe("thinking=high, reasoning=off, fast=on");
).toBe("thinking=high, fast=on");
});
it("logs a compact listening line with loaded plugin ids and duration", () => {

View File

@@ -4,13 +4,24 @@ import { DEFAULT_MODEL, DEFAULT_PROVIDER } from "../agents/defaults.js";
import { resolveFastModeState } from "../agents/fast-mode.js";
import {
resolveConfiguredModelRef,
resolveReasoningDefault,
resolveThinkingDefault,
legacyModelKey,
modelKey,
} from "../agents/model-selection.js";
import type { OpenClawConfig } from "../config/types.openclaw.js";
import { getResolvedLoggerSettings } from "../logging.js";
import { collectEnabledInsecureOrDangerousFlags } from "../security/dangerous-config-flags.js";
type StartupThinkLevel =
| "off"
| "minimal"
| "low"
| "medium"
| "high"
| "xhigh"
| "adaptive"
| "max";
export function logGatewayStartup(params: {
cfg: OpenClawConfig;
bindHost: string;
@@ -57,6 +68,36 @@ export function logGatewayStartup(params: {
}
}
function normalizeStartupThinkLevel(value: unknown): StartupThinkLevel | undefined {
return value === "off" ||
value === "minimal" ||
value === "low" ||
value === "medium" ||
value === "high" ||
value === "xhigh" ||
value === "adaptive" ||
value === "max"
? value
: undefined;
}
function resolveExplicitStartupThinking(params: {
cfg: OpenClawConfig;
provider: string;
model: string;
defaultAgentThinking: unknown;
}): StartupThinkLevel | undefined {
const models = params.cfg.agents?.defaults?.models;
const canonicalKey = modelKey(params.provider, params.model);
const legacyKey = legacyModelKey(params.provider, params.model);
return (
normalizeStartupThinkLevel(params.defaultAgentThinking) ??
normalizeStartupThinkLevel(models?.[canonicalKey]?.params?.thinking) ??
normalizeStartupThinkLevel(legacyKey ? models?.[legacyKey]?.params?.thinking : undefined) ??
normalizeStartupThinkLevel(params.cfg.agents?.defaults?.thinkingDefault)
);
}
export function formatAgentModelStartupDetails(params: {
cfg: OpenClawConfig;
provider: string;
@@ -64,20 +105,20 @@ export function formatAgentModelStartupDetails(params: {
}): string {
const defaultAgentId = resolveDefaultAgentId(params.cfg);
const defaultAgentConfig = resolveAgentConfig(params.cfg, defaultAgentId);
const thinking =
defaultAgentConfig?.thinkingDefault ??
const explicitThinking = resolveExplicitStartupThinking({
cfg: params.cfg,
provider: params.provider,
model: params.model,
defaultAgentThinking: defaultAgentConfig?.thinkingDefault,
});
const resolvedThinking =
explicitThinking ??
resolveThinkingDefault({
cfg: params.cfg,
provider: params.provider,
model: params.model,
});
const reasoning =
defaultAgentConfig?.reasoningDefault ??
params.cfg.agents?.defaults?.reasoningDefault ??
resolveReasoningDefault({
provider: params.provider,
model: params.model,
});
const thinking = explicitThinking ?? (resolvedThinking === "off" ? "medium" : resolvedThinking);
const fast = resolveFastModeState({
cfg: params.cfg,
provider: params.provider,
@@ -85,7 +126,7 @@ export function formatAgentModelStartupDetails(params: {
agentId: defaultAgentId,
});
return `thinking=${thinking}, reasoning=${reasoning}, fast=${fast.enabled ? "on" : "off"}`;
return `thinking=${thinking}, fast=${fast.enabled ? "on" : "off"}`;
}
function formatReadyDetails(