feat(agents): add provider-owned system prompt contributions

This commit is contained in:
Peter Steinberger
2026-04-05 14:05:35 +01:00
parent 1a7c2a9bc8
commit 760c4be438
15 changed files with 413 additions and 59 deletions

View File

@@ -24,6 +24,7 @@ Docs: https://docs.openclaw.ai
- MiniMax/TTS: add a bundled MiniMax speech provider backed by the T2A v2 API so speech synthesis can run through MiniMax-native voices and auth. (#55921) Thanks @duncanita.
- Plugins/install: add `openclaw plugins install --force` to overwrite existing plugin and hook-pack install targets without using the dangerous-code override flag. (#60544) Thanks @gumadeiras.
- Plugins/onboarding: add plugin config TUI prompts to onboard and configure wizards so more plugin setup can stay in the guided flow. (#60590) Thanks @odysseus0.
- Providers/OpenAI: move GPT-5 prompt tuning onto provider-owned system-prompt contributions so cache-stable guidance stays above the prompt cache boundary and embedded runner paths reuse the same provider-specific prompt behavior.
- Prompt caching: keep prompt prefixes more reusable across transport fallback, deterministic MCP tool ordering, compaction, and embedded image history so follow-up turns hit cache more reliably. (#58036, #58037, #58038, #59054, #60603, #60691) Thanks @bcherny.
- Providers/Amazon Bedrock: discover regional and global inference profiles, inherit their backing model capabilities, and inject the Bedrock request region automatically so cross-region Claude profiles work without manual provider overrides. (#61299) Thanks @wirjo.
- Providers/Amazon Bedrock Mantle: add a bundled OpenAI-compatible Mantle provider with bearer-token discovery, automatic OSS model catalog loading, and Bedrock Mantle region detection for hosted GPT-OSS, Qwen, Kimi, GLM, and similar routes. (#61296) Thanks @wirjo.

View File

@@ -12,6 +12,18 @@ OpenClaw builds a custom system prompt for every agent run. The prompt is **Open
The prompt is assembled by OpenClaw and injected into each agent run.
Provider plugins can contribute cache-aware prompt guidance without replacing
the full OpenClaw-owned prompt. The provider runtime can:
- replace a small set of named core sections (`interaction_style`,
`tool_call_style`, `execution_bias`)
- inject a **stable prefix** above the prompt cache boundary
- inject a **dynamic suffix** below the prompt cache boundary
Use provider-owned contributions for model-family-specific tuning. Keep legacy
`before_prompt_build` prompt mutation for compatibility or truly global prompt
changes, not normal provider behavior.
## Structure
The prompt is intentionally compact and uses fixed sections:

View File

@@ -513,6 +513,13 @@ API key auth, and dynamic model resolution.
| 42 | `validateReplayTurns` | Strict replay-turn validation before the embedded runner |
| 43 | `onModelSelected` | Post-selection callback (e.g. telemetry) |
Prompt tuning note:
- `resolveSystemPromptContribution` lets a provider inject cache-aware
system-prompt guidance for a model family. Prefer it over
`before_prompt_build` when the behavior belongs to one provider/model
family and should preserve the stable/dynamic cache split.
For detailed descriptions and real-world examples, see
[Internals: Provider Runtime Hooks](/plugins/architecture#provider-runtime-hooks).
</Accordion>

View File

@@ -1,5 +1,6 @@
import type { OpenClawConfig } from "openclaw/plugin-sdk/config-runtime";
import * as providerAuth from "openclaw/plugin-sdk/provider-auth-runtime";
import type { ProviderPlugin } from "openclaw/plugin-sdk/provider-model-shared";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { createTestPluginApi } from "../../test/helpers/plugins/plugin-api.js";
import {
@@ -8,7 +9,11 @@ import {
} from "../../test/helpers/plugins/provider-registration.js";
import { buildOpenAIImageGenerationProvider } from "./image-generation-provider.js";
import plugin from "./index.js";
import { OPENAI_FRIENDLY_PROMPT_OVERLAY } from "./prompt-overlay.js";
import {
OPENAI_FRIENDLY_PROMPT_OVERLAY,
OPENAI_GPT5_EXECUTION_BIAS,
OPENAI_GPT5_OUTPUT_CONTRACT,
} from "./prompt-overlay.js";
const runtimeMocks = vi.hoisted(() => ({
ensureGlobalUndiciEnvProxyDispatcher: vi.fn(),
@@ -40,6 +45,7 @@ const registerOpenAIPlugin = async () =>
async function registerOpenAIPluginWithHook(params?: { pluginConfig?: Record<string, unknown> }) {
const on = vi.fn();
const providers: ProviderPlugin[] = [];
await plugin.register(
createTestPluginApi({
id: "openai",
@@ -49,9 +55,12 @@ async function registerOpenAIPluginWithHook(params?: { pluginConfig?: Record<str
runtime: {} as never,
pluginConfig: params?.pluginConfig,
on,
registerProvider: (provider) => {
providers.push(provider);
},
}),
);
return { on };
return { on, providers };
}
describe("openai plugin", () => {
@@ -240,36 +249,51 @@ describe("openai plugin", () => {
).toBeLessThan(runtimeMocks.refreshOpenAICodexToken.mock.invocationCallOrder[0]);
});
it("registers the friendly prompt overlay by default and scopes it to OpenAI providers", async () => {
const { on } = await registerOpenAIPluginWithHook();
it("registers GPT-5 system prompt contributions on OpenAI providers by default", async () => {
const { on, providers } = await registerOpenAIPluginWithHook();
expect(on).toHaveBeenCalledWith("before_prompt_build", expect.any(Function));
const beforePromptBuild = on.mock.calls.find((call) => call[0] === "before_prompt_build")?.[1];
const openaiResult = await beforePromptBuild?.(
{ prompt: "hello", messages: [] },
{ modelProviderId: "openai", modelId: "gpt-5.4" },
);
expect(openaiResult).toEqual({
appendSystemContext: OPENAI_FRIENDLY_PROMPT_OVERLAY,
expect(on).not.toHaveBeenCalledWith("before_prompt_build", expect.any(Function));
const openaiProvider = requireRegisteredProvider(providers, "openai");
const codexProvider = requireRegisteredProvider(providers, "openai-codex");
const contributionContext: Parameters<
NonNullable<ProviderPlugin["resolveSystemPromptContribution"]>
>[0] = {
config: undefined,
agentDir: undefined,
workspaceDir: undefined,
provider: "openai",
modelId: "gpt-5.4",
promptMode: "full",
runtimeChannel: undefined,
runtimeCapabilities: undefined,
agentId: undefined,
};
expect(openaiProvider.resolveSystemPromptContribution?.(contributionContext)).toEqual({
stablePrefix: OPENAI_GPT5_OUTPUT_CONTRACT,
sectionOverrides: {
interaction_style: OPENAI_FRIENDLY_PROMPT_OVERLAY,
execution_bias: OPENAI_GPT5_EXECUTION_BIAS,
},
});
expect(OPENAI_FRIENDLY_PROMPT_OVERLAY).toContain("This is a live chat, not a memo.");
expect(OPENAI_FRIENDLY_PROMPT_OVERLAY).toContain(
"Avoid walls of text, long preambles, and repetitive restatement.",
);
const codexResult = await beforePromptBuild?.(
{ prompt: "hello", messages: [] },
{ modelProviderId: "openai-codex", modelId: "gpt-5.4" },
);
expect(codexResult).toEqual({
appendSystemContext: OPENAI_FRIENDLY_PROMPT_OVERLAY,
expect(codexProvider.resolveSystemPromptContribution?.(contributionContext)).toEqual({
stablePrefix: OPENAI_GPT5_OUTPUT_CONTRACT,
sectionOverrides: {
interaction_style: OPENAI_FRIENDLY_PROMPT_OVERLAY,
execution_bias: OPENAI_GPT5_EXECUTION_BIAS,
},
});
const nonOpenAIResult = await beforePromptBuild?.(
{ prompt: "hello", messages: [] },
{ modelProviderId: "anthropic", modelId: "sonnet-4.6" },
);
expect(nonOpenAIResult).toBeUndefined();
expect(
openaiProvider.resolveSystemPromptContribution?.({
...contributionContext,
modelId: "gpt-image-1",
}),
).toBeUndefined();
});
it("includes stronger execution guidance in the OpenAI prompt overlay", () => {
@@ -282,13 +306,38 @@ describe("openai plugin", () => {
expect(OPENAI_FRIENDLY_PROMPT_OVERLAY).toContain(
"Commentary-only turns are incomplete when the next action is clear.",
);
expect(OPENAI_GPT5_EXECUTION_BIAS).toContain(
"Do prerequisite lookup or discovery before dependent actions.",
);
expect(OPENAI_GPT5_OUTPUT_CONTRACT).toContain(
"Return the requested sections only, in the requested order.",
);
});
it("supports opting out of the prompt overlay via plugin config", async () => {
const { on } = await registerOpenAIPluginWithHook({
const { on, providers } = await registerOpenAIPluginWithHook({
pluginConfig: { personalityOverlay: "off" },
});
expect(on).not.toHaveBeenCalledWith("before_prompt_build", expect.any(Function));
const openaiProvider = requireRegisteredProvider(providers, "openai");
expect(
openaiProvider.resolveSystemPromptContribution?.({
config: undefined,
agentDir: undefined,
workspaceDir: undefined,
provider: "openai",
modelId: "gpt-5.4",
promptMode: "full",
runtimeChannel: undefined,
runtimeCapabilities: undefined,
agentId: undefined,
}),
).toEqual({
stablePrefix: OPENAI_GPT5_OUTPUT_CONTRACT,
sectionOverrides: {
execution_bias: OPENAI_GPT5_EXECUTION_BIAS,
},
});
});
});

View File

@@ -8,9 +8,8 @@ import {
import { buildOpenAICodexProviderPlugin } from "./openai-codex-provider.js";
import { buildOpenAIProvider } from "./openai-provider.js";
import {
OPENAI_FRIENDLY_PROMPT_OVERLAY,
resolveOpenAIPromptOverlayMode,
shouldApplyOpenAIPromptOverlay,
resolveOpenAISystemPromptContribution,
} from "./prompt-overlay.js";
import { buildOpenAIRealtimeTranscriptionProvider } from "./realtime-transcription-provider.js";
import { buildOpenAIRealtimeVoiceProvider } from "./realtime-voice-provider.js";
@@ -22,24 +21,29 @@ export default definePluginEntry({
description: "Bundled OpenAI provider plugins",
register(api) {
const promptOverlayMode = resolveOpenAIPromptOverlayMode(api.pluginConfig);
const buildProviderWithPromptContribution = <
T extends
| ReturnType<typeof buildOpenAIProvider>
| ReturnType<typeof buildOpenAICodexProviderPlugin>,
>(
provider: T,
): T => ({
...provider,
resolveSystemPromptContribution: (ctx) =>
resolveOpenAISystemPromptContribution({
mode: promptOverlayMode,
modelProviderId: provider.id,
modelId: ctx.modelId,
}),
});
api.registerCliBackend(buildOpenAICodexCliBackend());
api.registerProvider(buildOpenAIProvider());
api.registerProvider(buildOpenAICodexProviderPlugin());
api.registerProvider(buildProviderWithPromptContribution(buildOpenAIProvider()));
api.registerProvider(buildProviderWithPromptContribution(buildOpenAICodexProviderPlugin()));
api.registerImageGenerationProvider(buildOpenAIImageGenerationProvider());
api.registerRealtimeTranscriptionProvider(buildOpenAIRealtimeTranscriptionProvider());
api.registerRealtimeVoiceProvider(buildOpenAIRealtimeVoiceProvider());
api.registerSpeechProvider(buildOpenAISpeechProvider());
api.registerMediaUnderstandingProvider(openaiMediaUnderstandingProvider);
api.registerMediaUnderstandingProvider(openaiCodexMediaUnderstandingProvider);
if (promptOverlayMode !== "off") {
api.on("before_prompt_build", (_event, ctx) =>
shouldApplyOpenAIPromptOverlay({
mode: promptOverlayMode,
modelProviderId: ctx.modelProviderId,
})
? { appendSystemContext: OPENAI_FRIENDLY_PROMPT_OVERLAY }
: undefined,
);
}
},
});

View File

@@ -1,4 +1,5 @@
const OPENAI_PROVIDER_IDS = new Set(["openai", "openai-codex"]);
const OPENAI_GPT5_MODEL_PREFIX = "gpt-5";
export const OPENAI_FRIENDLY_PROMPT_OVERLAY = `## Interaction Style
@@ -21,6 +22,20 @@ Default to short natural replies unless the user asks for depth.
Avoid walls of text, long preambles, and repetitive restatement.
Keep replies concise by default; friendly does not mean verbose.`;
export const OPENAI_GPT5_OUTPUT_CONTRACT = `## GPT-5 Output Contract
Return the requested sections only, in the requested order.
Prefer terse answers by default; expand only when depth materially helps.
Avoid restating large internal plans when the next action is already clear.`;
export const OPENAI_GPT5_EXECUTION_BIAS = `## Execution Bias
Start the real work in the same turn when the next step is clear.
Do prerequisite lookup or discovery before dependent actions.
If another tool call would likely improve correctness or completeness, keep going instead of stopping at partial progress.
Multi-part requests stay incomplete until every requested item is handled or clearly marked blocked.
Before the final answer, quickly verify correctness, coverage, formatting, and obvious side effects.`;
export type OpenAIPromptOverlayMode = "friendly" | "off";
export function resolveOpenAIPromptOverlayMode(
@@ -30,8 +45,34 @@ export function resolveOpenAIPromptOverlayMode(
}
export function shouldApplyOpenAIPromptOverlay(params: {
modelProviderId?: string;
modelId?: string;
}): boolean {
if (!OPENAI_PROVIDER_IDS.has(params.modelProviderId ?? "")) {
return false;
}
const normalizedModelId = params.modelId?.trim().toLowerCase() ?? "";
return normalizedModelId.startsWith(OPENAI_GPT5_MODEL_PREFIX);
}
export function resolveOpenAISystemPromptContribution(params: {
mode: OpenAIPromptOverlayMode;
modelProviderId?: string;
}): boolean {
return params.mode === "friendly" && OPENAI_PROVIDER_IDS.has(params.modelProviderId ?? "");
modelId?: string;
}) {
if (
!shouldApplyOpenAIPromptOverlay({
modelProviderId: params.modelProviderId,
modelId: params.modelId,
})
) {
return undefined;
}
return {
stablePrefix: OPENAI_GPT5_OUTPUT_CONTRACT,
sectionOverrides: {
execution_bias: OPENAI_GPT5_EXECUTION_BIAS,
...(params.mode === "friendly" ? { interaction_style: OPENAI_FRIENDLY_PROMPT_OVERLAY } : {}),
},
};
}

View File

@@ -18,7 +18,10 @@ import {
import { getMachineDisplayName } from "../../infra/machine-name.js";
import { generateSecureToken } from "../../infra/secure-random.js";
import { getGlobalHookRunner } from "../../plugins/hook-runner-global.js";
import { prepareProviderRuntimeAuth } from "../../plugins/provider-runtime.js";
import {
prepareProviderRuntimeAuth,
resolveProviderSystemPromptContribution,
} from "../../plugins/provider-runtime.js";
import type { ProviderRuntimeModel } from "../../plugins/types.js";
import { type enqueueCommand, enqueueCommandInLane } from "../../process/command-queue.js";
import { isCronSessionKey, isSubagentSessionKey } from "../../routing/session-key.js";
@@ -648,6 +651,22 @@ export async function compactEmbeddedPiSessionDirect(
});
const ttsHint = params.config ? buildTtsSystemPromptHint(params.config) : undefined;
const ownerDisplay = resolveOwnerDisplaySetting(params.config);
const promptContribution = resolveProviderSystemPromptContribution({
provider,
config: params.config,
workspaceDir: effectiveWorkspace,
context: {
config: params.config,
agentDir,
workspaceDir: effectiveWorkspace,
provider,
modelId,
promptMode,
runtimeChannel,
runtimeCapabilities,
agentId: sessionAgentId,
},
});
const buildSystemPromptOverride = (defaultThinkLevel: ThinkLevel) =>
createSystemPromptOverride(
buildEmbeddedSystemPrompt({
@@ -678,6 +697,7 @@ export async function compactEmbeddedPiSessionDirect(
userTimeFormat,
contextFiles,
memoryCitationsMode: params.config?.memory?.citations,
promptContribution,
}),
);

View File

@@ -22,6 +22,7 @@ import {
import { MAX_IMAGE_BYTES } from "../../../media/constants.js";
import { getGlobalHookRunner } from "../../../plugins/hook-runner-global.js";
import { resolveToolCallArgumentsEncoding } from "../../../plugins/provider-model-compat.js";
import { resolveProviderSystemPromptContribution } from "../../../plugins/provider-runtime.js";
import { isSubagentSessionKey } from "../../../routing/session-key.js";
import { buildTtsSystemPromptHint } from "../../../tts/tts.js";
import { resolveUserPath } from "../../../utils.js";
@@ -656,6 +657,22 @@ export async function runEmbeddedAttempt(
})
? resolveHeartbeatPrompt(params.config?.agents?.defaults?.heartbeat?.prompt)
: undefined;
const promptContribution = resolveProviderSystemPromptContribution({
provider: params.provider,
config: params.config,
workspaceDir: effectiveWorkspace,
context: {
config: params.config,
agentDir: params.agentDir,
workspaceDir: effectiveWorkspace,
provider: params.provider,
modelId: params.modelId,
promptMode: effectivePromptMode,
runtimeChannel,
runtimeCapabilities,
agentId: sessionAgentId,
},
});
const appendPrompt = buildEmbeddedSystemPrompt({
workspaceDir: effectiveWorkspace,
@@ -684,6 +701,7 @@ export async function runEmbeddedAttempt(
userTimeFormat,
contextFiles,
memoryCitationsMode: params.config?.memory?.citations,
promptContribution,
});
const systemPromptReport = buildSystemPromptReport({
source: "run",

View File

@@ -1,6 +1,10 @@
import type { AgentSession } from "@mariozechner/pi-coding-agent";
import { describe, expect, it } from "vitest";
import { applySystemPromptOverrideToSession, createSystemPromptOverride } from "./system-prompt.js";
import {
applySystemPromptOverrideToSession,
buildEmbeddedSystemPrompt,
createSystemPromptOverride,
} from "./system-prompt.js";
type MutableSession = {
_baseSystemPrompt?: string;
@@ -61,3 +65,28 @@ describe("applySystemPromptOverrideToSession", () => {
expect(mutable._rebuildSystemPrompt?.(["tool1"])).toBe("rebuild test");
});
});
describe("buildEmbeddedSystemPrompt", () => {
it("forwards provider prompt contributions into the embedded prompt", () => {
const prompt = buildEmbeddedSystemPrompt({
workspaceDir: "/tmp/openclaw",
reasoningTagHint: false,
runtimeInfo: {
host: "local",
os: "darwin",
arch: "arm64",
node: process.version,
model: "gpt-5.4",
provider: "openai",
},
tools: [],
modelAliasLines: [],
userTimezone: "UTC",
promptContribution: {
stablePrefix: "## Embedded Stable\n\nStable provider guidance.",
},
});
expect(prompt).toContain("## Embedded Stable\n\nStable provider guidance.");
});
});

View File

@@ -3,6 +3,7 @@ import type { AgentSession } from "@mariozechner/pi-coding-agent";
import type { MemoryCitationsMode } from "../../config/types.memory.js";
import type { ResolvedTimeFormat } from "../date-time.js";
import type { EmbeddedContextFile } from "../pi-embedded-helpers.js";
import type { ProviderSystemPromptContribution } from "../system-prompt-contribution.js";
import { buildAgentSystemPrompt, type PromptMode } from "../system-prompt.js";
import type { EmbeddedSandboxInfo } from "./types.js";
import type { ReasoningLevel, ThinkLevel } from "./utils.js";
@@ -51,6 +52,7 @@ export function buildEmbeddedSystemPrompt(params: {
userTimeFormat?: ResolvedTimeFormat;
contextFiles?: EmbeddedContextFile[];
memoryCitationsMode?: MemoryCitationsMode;
promptContribution?: ProviderSystemPromptContribution;
}): string {
return buildAgentSystemPrompt({
workspaceDir: params.workspaceDir,
@@ -79,6 +81,7 @@ export function buildEmbeddedSystemPrompt(params: {
userTimeFormat: params.userTimeFormat,
contextFiles: params.contextFiles,
memoryCitationsMode: params.memoryCitationsMode,
promptContribution: params.promptContribution,
});
}

View File

@@ -0,0 +1,28 @@
export type ProviderSystemPromptSectionId =
| "interaction_style"
| "tool_call_style"
| "execution_bias";
export type ProviderSystemPromptContribution = {
/**
* Cache-stable provider guidance inserted above the system-prompt cache boundary.
*
* Use this for static provider/model-family instructions that should preserve
* KV cache reuse across turns.
*/
stablePrefix?: string;
/**
* Provider guidance inserted below the cache boundary.
*
* Use this only for genuinely dynamic text that is expected to vary across
* runs or sessions.
*/
dynamicSuffix?: string;
/**
* Whole-section replacements for selected core prompt sections.
*
* Values should contain the complete rendered section, including any desired
* heading such as `## Tool Call Style`.
*/
sectionOverrides?: Partial<Record<ProviderSystemPromptSectionId, string>>;
};

View File

@@ -711,6 +711,56 @@ describe("buildAgentSystemPrompt", () => {
expect(prompt).not.toContain("# Dynamic Project Context");
});
it("replaces provider-owned prompt sections without disturbing core ordering", () => {
const prompt = buildAgentSystemPrompt({
workspaceDir: "/tmp/openclaw",
promptContribution: {
sectionOverrides: {
interaction_style: "## Interaction Style\n\nCustom interaction guidance.",
execution_bias: "## Execution Bias\n\nCustom execution guidance.",
},
},
});
expect(prompt).toContain("## Interaction Style\n\nCustom interaction guidance.");
expect(prompt).toContain("## Execution Bias\n\nCustom execution guidance.");
expect(prompt).not.toContain("Bias toward action and momentum.");
});
it("places provider stable prefixes above the cache boundary", () => {
const prompt = buildAgentSystemPrompt({
workspaceDir: "/tmp/openclaw",
promptContribution: {
stablePrefix: "## Provider Stable Block\n\nStable provider guidance.",
},
});
const boundaryIndex = prompt.indexOf(SYSTEM_PROMPT_CACHE_BOUNDARY);
const stableIndex = prompt.indexOf("## Provider Stable Block");
const safetyIndex = prompt.indexOf("## Safety");
expect(stableIndex).toBeGreaterThan(-1);
expect(boundaryIndex).toBeGreaterThan(stableIndex);
expect(safetyIndex).toBeGreaterThan(stableIndex);
});
it("places provider dynamic suffixes below the cache boundary", () => {
const prompt = buildAgentSystemPrompt({
workspaceDir: "/tmp/openclaw",
promptContribution: {
dynamicSuffix: "## Provider Dynamic Block\n\nPer-turn provider guidance.",
},
});
const boundaryIndex = prompt.indexOf(SYSTEM_PROMPT_CACHE_BOUNDARY);
const dynamicIndex = prompt.indexOf("## Provider Dynamic Block");
const heartbeatIndex = prompt.indexOf("## Heartbeats");
expect(boundaryIndex).toBeGreaterThan(-1);
expect(dynamicIndex).toBeGreaterThan(boundaryIndex);
expect(heartbeatIndex).toBeGreaterThan(dynamicIndex);
});
it("summarizes the message tool when available", () => {
const prompt = buildAgentSystemPrompt({
workspaceDir: "/tmp/openclaw",

View File

@@ -15,6 +15,10 @@ import {
} from "./prompt-cache-stability.js";
import { sanitizeForPromptLiteral } from "./sanitize-for-prompt.js";
import { SYSTEM_PROMPT_CACHE_BOUNDARY } from "./system-prompt-cache-boundary.js";
import type {
ProviderSystemPromptContribution,
ProviderSystemPromptSectionId,
} from "./system-prompt-contribution.js";
/**
* Controls which hardcoded sections are included in the system prompt.
@@ -269,6 +273,25 @@ function buildExecutionBiasSection(params: { isMinimal: boolean }) {
];
}
function normalizeProviderPromptBlock(value?: string): string | undefined {
if (typeof value !== "string") {
return undefined;
}
const normalized = normalizeStructuredPromptSection(value);
return normalized || undefined;
}
function buildOverridablePromptSection(params: {
override?: string;
fallback: string[];
}): string[] {
const override = normalizeProviderPromptBlock(params.override);
if (override) {
return [override, ""];
}
return params.fallback;
}
function buildExecApprovalPromptGuidance(params: {
runtimeChannel?: string;
inlineButtonsEnabled?: boolean;
@@ -332,6 +355,7 @@ export function buildAgentSystemPrompt(params: {
channel: string;
};
memoryCitationsMode?: MemoryCitationsMode;
promptContribution?: ProviderSystemPromptContribution;
}) {
const acpEnabled = params.acpEnabled !== false;
const sandboxedRuntime = params.sandboxInfo?.enabled === true;
@@ -362,6 +386,17 @@ export function buildAgentSystemPrompt(params: {
typeof params.extraSystemPrompt === "string"
? normalizeStructuredPromptSection(params.extraSystemPrompt)
: undefined;
const promptContribution = params.promptContribution;
const providerStablePrefix = normalizeProviderPromptBlock(promptContribution?.stablePrefix);
const providerDynamicSuffix = normalizeProviderPromptBlock(promptContribution?.dynamicSuffix);
const providerSectionOverrides = Object.fromEntries(
Object.entries(promptContribution?.sectionOverrides ?? {})
.map(([key, value]) => [
key,
normalizeProviderPromptBlock(typeof value === "string" ? value : undefined),
])
.filter(([, value]) => Boolean(value)),
) as Partial<Record<ProviderSystemPromptSectionId, string>>;
const ownerDisplay = params.ownerDisplay === "hash" ? "hash" : "raw";
const ownerLine = buildOwnerIdentityLine(
params.ownerNumbers ?? [],
@@ -476,22 +511,38 @@ export function buildAgentSystemPrompt(params: {
: []),
"Do not poll `subagents list` / `sessions_list` in a loop; only check status on-demand (for intervention, debugging, or when explicitly asked).",
"",
"## Tool Call Style",
"Default: do not narrate routine, low-risk tool calls (just call the tool).",
"Narrate only when it helps: multi-step work, complex/challenging problems, sensitive actions (e.g., deletions), or when the user explicitly asks.",
"Keep narration brief and value-dense; avoid repeating obvious steps.",
"Use plain human language for narration unless in a technical context.",
"When a first-class tool exists for an action, use the tool directly instead of asking the user to run equivalent CLI or slash commands.",
buildExecApprovalPromptGuidance({
runtimeChannel: params.runtimeInfo?.channel,
inlineButtonsEnabled,
...buildOverridablePromptSection({
override: providerSectionOverrides.interaction_style,
fallback: [],
}),
"Never execute /approve through exec or any other shell/tool path; /approve is a user-facing approval command, not a shell command.",
"Treat allow-once as single-command only: if another elevated command needs approval, request a fresh /approve and do not claim prior approval covered it.",
"When approvals are required, preserve and show the full command/script exactly as provided (including chained operators like &&, ||, |, ;, or multiline shells) so the user can approve what will actually run.",
"",
...buildExecutionBiasSection({
isMinimal,
...buildOverridablePromptSection({
override: providerSectionOverrides.tool_call_style,
fallback: [
"## Tool Call Style",
"Default: do not narrate routine, low-risk tool calls (just call the tool).",
"Narrate only when it helps: multi-step work, complex/challenging problems, sensitive actions (e.g., deletions), or when the user explicitly asks.",
"Keep narration brief and value-dense; avoid repeating obvious steps.",
"Use plain human language for narration unless in a technical context.",
"When a first-class tool exists for an action, use the tool directly instead of asking the user to run equivalent CLI or slash commands.",
buildExecApprovalPromptGuidance({
runtimeChannel: params.runtimeInfo?.channel,
inlineButtonsEnabled,
}),
"Never execute /approve through exec or any other shell/tool path; /approve is a user-facing approval command, not a shell command.",
"Treat allow-once as single-command only: if another elevated command needs approval, request a fresh /approve and do not claim prior approval covered it.",
"When approvals are required, preserve and show the full command/script exactly as provided (including chained operators like &&, ||, |, ;, or multiline shells) so the user can approve what will actually run.",
"",
],
}),
...buildOverridablePromptSection({
override: providerSectionOverrides.execution_bias,
fallback: buildExecutionBiasSection({
isMinimal,
}),
}),
...buildOverridablePromptSection({
override: providerStablePrefix,
fallback: [],
}),
...safetySection,
"## OpenClaw CLI Quick Reference",
@@ -682,6 +733,9 @@ export function buildAgentSystemPrompt(params: {
promptMode === "minimal" ? "## Subagent Context" : "## Group Chat Context";
lines.push(contextHeader, extraSystemPrompt, "");
}
if (providerDynamicSuffix) {
lines.push(providerDynamicSuffix, "");
}
// Skip heartbeats for subagent/none modes
if (!isMinimal && heartbeatPrompt) {

View File

@@ -1,5 +1,6 @@
import type { AuthProfileCredential, OAuthCredential } from "../agents/auth-profiles/types.js";
import { normalizeProviderId } from "../agents/provider-id.js";
import type { ProviderSystemPromptContribution } from "../agents/system-prompt-contribution.js";
import type { OpenClawConfig } from "../config/config.js";
import type { ModelProviderConfig } from "../config/types.js";
import { resolveCatalogHookProviderPluginIds } from "./providers.js";
@@ -41,6 +42,7 @@ import type {
ProviderResolveDynamicModelContext,
ProviderResolveTransportTurnStateContext,
ProviderResolveWebSocketSessionPolicyContext,
ProviderSystemPromptContributionContext,
ProviderRuntimeModel,
ProviderThinkingPolicyContext,
ProviderTransportTurnState,
@@ -208,6 +210,19 @@ export function runProviderDynamicModel(params: {
return resolveProviderRuntimePlugin(params)?.resolveDynamicModel?.(params.context) ?? undefined;
}
export function resolveProviderSystemPromptContribution(params: {
provider: string;
config?: OpenClawConfig;
workspaceDir?: string;
env?: NodeJS.ProcessEnv;
context: ProviderSystemPromptContributionContext;
}): ProviderSystemPromptContribution | undefined {
return (
resolveProviderRuntimePlugin(params)?.resolveSystemPromptContribution?.(params.context) ??
undefined
);
}
export async function prepareProviderDynamicModel(params: {
provider: string;
config?: OpenClawConfig;

View File

@@ -13,6 +13,8 @@ import type {
import type { ModelCatalogEntry } from "../agents/model-catalog.js";
import type { FailoverReason } from "../agents/pi-embedded-helpers/types.js";
import type { ProviderRequestTransportOverrides } from "../agents/provider-request-config.js";
import type { ProviderSystemPromptContribution } from "../agents/system-prompt-contribution.js";
import type { PromptMode } from "../agents/system-prompt.js";
import type { AnyAgentTool } from "../agents/tools/common.js";
import type { ThinkLevel } from "../auto-reply/thinking.js";
import type { ReplyPayload } from "../auto-reply/types.js";
@@ -1044,6 +1046,18 @@ export type ProviderDeferSyntheticProfileAuthContext = {
resolvedApiKey?: string;
};
export type ProviderSystemPromptContributionContext = {
config?: OpenClawConfig;
agentDir?: string;
workspaceDir?: string;
provider: string;
modelId: string;
promptMode: PromptMode;
runtimeChannel?: string;
runtimeCapabilities?: string[];
agentId?: string;
};
/** Text-inference provider capability registered by a plugin. */
export type ProviderPlugin = {
id: string;
@@ -1401,6 +1415,15 @@ export type ProviderPlugin = {
resolveDefaultThinkingLevel?: (
ctx: ProviderDefaultThinkingPolicyContext,
) => "off" | "minimal" | "low" | "medium" | "high" | "xhigh" | "adaptive" | null | undefined;
/**
* Provider-owned system-prompt contribution.
*
* Use this when a provider/model family needs cache-aware prompt tuning
* without replacing the full OpenClaw-owned system prompt.
*/
resolveSystemPromptContribution?: (
ctx: ProviderSystemPromptContributionContext,
) => ProviderSystemPromptContribution | null | undefined;
/**
* Provider-owned global config defaults.
*