fix: log gateway model mode defaults

This commit is contained in:
Peter Steinberger
2026-05-04 23:52:58 +01:00
parent c3c7c2df6f
commit 03f7e26d54
4 changed files with 117 additions and 4 deletions

View File

@@ -59,6 +59,7 @@ Docs: https://docs.openclaw.ai
### Fixes
- Gateway/startup: include the resolved thinking, reasoning, and fast-mode defaults in the `agent model` startup log line so operator logs show which mode new sessions will inherit.
- Gateway/watch: suppress sync-I/O trace output during `pnpm gateway:watch --benchmark` unless explicitly requested, so CPU profiling no longer floods the terminal with stack traces.
- Gateway/watch: when benchmark sync-I/O tracing is explicitly enabled, tee trace blocks to the benchmark output log and filter them from the terminal pane while keeping normal Gateway logs visible.
- Agents/OpenAI: default direct OpenAI Responses models to the SSE transport instead of WebSocket auto-selection, preventing pi runtime chat turns from hanging on servers where the WebSocket path stalls while the OpenAI HTTP stream works. Thanks @vincentkoc.

View File

@@ -15,6 +15,17 @@ OpenClaw has two log “surfaces”:
- **Console output** (what you see in the terminal / Debug UI).
- **File logs** (JSON lines) written by the gateway logger.
At startup, the Gateway logs the resolved default agent model together with the
mode defaults that affect new sessions, for example:
```text
agent model: openai-codex/gpt-5.5 (thinking=medium, reasoning=off, fast=on)
```
`thinking` comes from the default agent, model params, or global agent default;
`reasoning` comes from the default agent or global reasoning default; and `fast`
comes from the default agent or model `fastMode` params.
## File-based logger
- Default rolling log file is under `/tmp/openclaw/` (one file per day): `openclaw-YYYY-MM-DD.log`

View File

@@ -1,5 +1,5 @@
import { afterEach, describe, expect, it, vi } from "vitest";
import { logGatewayStartup } from "./server-startup-log.js";
import { formatAgentModelStartupDetails, logGatewayStartup } from "./server-startup-log.js";
describe("gateway startup log", () => {
afterEach(() => {
@@ -49,6 +49,65 @@ describe("gateway startup log", () => {
expect(warn).not.toHaveBeenCalled();
});
it("logs configured model mode defaults with the startup model", () => {
const info = vi.fn();
const warn = vi.fn();
logGatewayStartup({
cfg: {
agents: {
defaults: {
model: "openai-codex/gpt-5.5",
models: {
"openai-codex/gpt-5.5": {
params: {
fastMode: true,
thinking: "medium",
},
},
},
reasoningDefault: "stream",
},
},
},
bindHost: "127.0.0.1",
loadedPluginIds: [],
port: 18789,
log: { info, warn },
isNixMode: false,
});
expect(info).toHaveBeenCalledWith(
"agent model: openai-codex/gpt-5.5 (thinking=medium, reasoning=stream, fast=on)",
expect.objectContaining({
consoleMessage: expect.stringContaining(
"agent model: openai-codex/gpt-5.5 (thinking=medium, reasoning=stream, fast=on)",
),
}),
);
});
it("uses default agent mode overrides in the startup model details", () => {
expect(
formatAgentModelStartupDetails({
cfg: {
agents: {
defaults: {
thinkingDefault: "low",
reasoningDefault: "off",
models: {
"openai/gpt-5.5": { params: { fastMode: false } },
},
},
list: [{ id: "alpha", default: true, thinkingDefault: "high", fastModeDefault: true }],
},
},
provider: "openai",
model: "gpt-5.5",
}),
).toBe("thinking=high, reasoning=off, fast=on");
});
it("logs a compact listening line with loaded plugin ids and duration", () => {
vi.useFakeTimers();
vi.setSystemTime(new Date("2026-04-03T10:00:16.000Z"));

View File

@@ -1,6 +1,12 @@
import chalk from "chalk";
import { resolveDefaultAgentId, resolveAgentConfig } from "../agents/agent-scope.js";
import { DEFAULT_MODEL, DEFAULT_PROVIDER } from "../agents/defaults.js";
import { resolveConfiguredModelRef } from "../agents/model-selection.js";
import { resolveFastModeState } from "../agents/fast-mode.js";
import {
resolveConfiguredModelRef,
resolveReasoningDefault,
resolveThinkingDefault,
} from "../agents/model-selection.js";
import type { OpenClawConfig } from "../config/types.openclaw.js";
import { getResolvedLoggerSettings } from "../logging.js";
import { collectEnabledInsecureOrDangerousFlags } from "../security/dangerous-config-flags.js";
@@ -22,8 +28,13 @@ export function logGatewayStartup(params: {
defaultModel: DEFAULT_MODEL,
});
const modelRef = `${agentProvider}/${agentModel}`;
params.log.info(`agent model: ${modelRef}`, {
consoleMessage: `agent model: ${chalk.whiteBright(modelRef)}`,
const modelDetails = formatAgentModelStartupDetails({
cfg: params.cfg,
provider: agentProvider,
model: agentModel,
});
params.log.info(`agent model: ${modelRef} (${modelDetails})`, {
consoleMessage: `agent model: ${chalk.whiteBright(modelRef)} (${modelDetails})`,
});
const startupDurationMs =
typeof params.startupStartedAt === "number" ? Date.now() - params.startupStartedAt : null;
@@ -46,6 +57,37 @@ export function logGatewayStartup(params: {
}
}
export function formatAgentModelStartupDetails(params: {
cfg: OpenClawConfig;
provider: string;
model: string;
}): string {
const defaultAgentId = resolveDefaultAgentId(params.cfg);
const defaultAgentConfig = resolveAgentConfig(params.cfg, defaultAgentId);
const thinking =
defaultAgentConfig?.thinkingDefault ??
resolveThinkingDefault({
cfg: params.cfg,
provider: params.provider,
model: params.model,
});
const reasoning =
defaultAgentConfig?.reasoningDefault ??
params.cfg.agents?.defaults?.reasoningDefault ??
resolveReasoningDefault({
provider: params.provider,
model: params.model,
});
const fast = resolveFastModeState({
cfg: params.cfg,
provider: params.provider,
model: params.model,
agentId: defaultAgentId,
});
return `thinking=${thinking}, reasoning=${reasoning}, fast=${fast.enabled ? "on" : "off"}`;
}
function formatReadyDetails(
loadedPluginIds: readonly string[],
startupDurationLabel: string | null,