feat: inject runtime model identity into prompts

This commit is contained in:
Peter Steinberger
2026-05-09 08:51:00 +01:00
parent fe9a89a2d2
commit 7cfa12fd2b
8 changed files with 106 additions and 15 deletions

View File

@@ -7,6 +7,7 @@ Docs: https://docs.openclaw.ai
### Changes
- Plugins/ACPX: accept an optional `args` array in `agents.<name>` config so paths and flag values containing spaces stay intact when spawning ACP agent processes. Thanks @TheArchitectit and @BunsDev.
- Agents: inject the current provider/model identity into system prompts, including configured prompt overrides and CLI hook prompt replacements, so agents can answer model-identity questions from the actual runtime selection.
- Plugins/CLI: add the optional bundled `oc-path` plugin, providing `openclaw path` for surgical `oc://` access to markdown, JSONC, and JSONL workspace files.
- Plugins/SDK: add unified model catalog registration for text, image, video, and music providers, including `providerCatalogEntry` manifests, shared media list help, live catalog caching, and per-model video capability overlays.
- CLI: make parser, startup, config, guardrail, channel, agent, task, session, and MCP failures explain what happened and point to the next recovery command.

View File

@@ -273,7 +273,9 @@ describe("shouldSkipLocalCliCredentialEpoch", () => {
});
expect(context.params.prompt).toBe("history:2\n\nlatest ask");
expect(context.systemPrompt).toBe("prepend system\n\nhook system\n\nappend system");
expect(context.systemPrompt).toBe(
"prepend system\n\nhook system\n\nappend system\n\nCurrent model identity: test-cli/test-model. If asked what model you are, answer with this value for the current run.",
);
expect(hookRunner.runBeforePromptBuild).toHaveBeenCalledWith(
{
prompt: "latest ask",
@@ -441,7 +443,7 @@ describe("shouldSkipLocalCliCredentialEpoch", () => {
expect(context.params.prompt).toBe("prompt prepend\n\nlegacy prepend\n\nlatest ask");
expect(context.systemPrompt).toBe(
"prompt prepend system\n\nlegacy prepend system\n\nprompt system\n\nprompt append system\n\nlegacy append system",
"prompt prepend system\n\nlegacy prepend system\n\nprompt system\n\nprompt append system\n\nlegacy append system\n\nCurrent model identity: test-cli/test-model. If asked what model you are, answer with this value for the current run.",
);
expect(hookRunner.runBeforePromptBuild).toHaveBeenCalledOnce();
expect(hookRunner.runBeforeAgentStart).toHaveBeenCalledOnce();
@@ -475,7 +477,9 @@ describe("shouldSkipLocalCliCredentialEpoch", () => {
});
expect(context.params.prompt).toBe("latest ask");
expect(context.systemPrompt).toBe("base extra system");
expect(context.systemPrompt).toBe(
"base extra system\n\nCurrent model identity: test-cli/test-model. If asked what model you are, answer with this value for the current run.",
);
expect(context.systemPrompt).not.toContain("hook exploded");
expect(hookRunner.runBeforePromptBuild).toHaveBeenCalledOnce();
} finally {
@@ -570,7 +574,9 @@ describe("shouldSkipLocalCliCredentialEpoch", () => {
config: createCliBackendConfig(),
});
expect(context.systemPrompt).toBe("active video task\n\nhook prepend system\n\nhook system");
expect(context.systemPrompt).toBe(
"active video task\n\nhook prepend system\n\nhook system\n\nCurrent model identity: test-cli/test-model. If asked what model you are, answer with this value for the current run.",
);
expect(mockBuildActiveVideoGenerationTaskPromptContextForSession).toHaveBeenCalledWith(
"agent:main:test",
);

View File

@@ -43,6 +43,7 @@ import { applyPluginTextReplacements } from "../plugin-text-transforms.js";
import { resolveSkillsPromptForRun } from "../skills.js";
import { resolveSystemPromptOverride } from "../system-prompt-override.js";
import { buildSystemPromptReport } from "../system-prompt-report.js";
import { appendModelIdentitySystemPrompt } from "../system-prompt.js";
import { redactRunIdentifier, resolveRunWorkspaceDir } from "../workspace-run.js";
import { prepareCliBundleMcpConfig } from "./bundle-mcp.js";
import { buildSystemPrompt, normalizeCliModel } from "./helpers.js";
@@ -417,7 +418,10 @@ export async function prepareCliRunContext(
}),
prompt: preparedPrompt,
});
systemPrompt = applyPluginTextReplacements(systemPrompt, backendResolved.textTransforms?.input);
systemPrompt = appendModelIdentitySystemPrompt({
systemPrompt: applyPluginTextReplacements(systemPrompt, backendResolved.textTransforms?.input),
model: modelDisplay,
});
const systemPromptReport = buildSystemPromptReport({
source: "run",
generatedAt: Date.now(),

View File

@@ -58,6 +58,7 @@ describe("buildAttemptSystemPrompt", () => {
});
expect(result.systemPrompt).toContain("Custom override prompt.");
expect(result.systemPrompt).toContain("Current model identity: openai/gpt-5.5.");
expect(result.systemPrompt).toContain("## Bootstrap Pending");
expect(result.systemPrompt).toContain("BOOTSTRAP.md is included below in Project Context");
expect(result.systemPrompt).toContain("## Bootstrap Context Notice");
@@ -96,6 +97,7 @@ describe("buildAttemptSystemPrompt", () => {
});
expect(result.systemPrompt).toContain("Custom override prompt.");
expect(result.systemPrompt).toContain("Current model identity: openai/gpt-5.5.");
expect(result.systemPrompt).toContain("## Subagent Context");
expect(result.systemPrompt).toContain("RUN_MODE_TASK_77950");
});

View File

@@ -1,6 +1,9 @@
import type { OpenClawConfig } from "../../../config/types.openclaw.js";
import type { ProviderTransformSystemPromptContext } from "../../../plugins/types.js";
import { appendAgentBootstrapSystemPromptSupplement } from "../../system-prompt.js";
import {
appendAgentBootstrapSystemPromptSupplement,
appendModelIdentitySystemPrompt,
} from "../../system-prompt.js";
import { buildEmbeddedSystemPrompt, createSystemPromptOverride } from "../system-prompt.js";
type EmbeddedSystemPromptParams = Parameters<typeof buildEmbeddedSystemPrompt>[0];
@@ -48,15 +51,18 @@ export function buildAttemptSystemPrompt(
params: BuildAttemptSystemPromptParams,
): AttemptSystemPrompt {
const baseSystemPrompt = params.systemPromptOverrideText
? appendRuntimeExtraSystemPrompt({
systemPrompt: appendAgentBootstrapSystemPromptSupplement({
systemPrompt: params.systemPromptOverrideText,
bootstrapMode: params.embeddedSystemPrompt.bootstrapMode,
bootstrapTruncationNotice: params.embeddedSystemPrompt.bootstrapTruncationNotice,
contextFiles: params.embeddedSystemPrompt.contextFiles,
? appendModelIdentitySystemPrompt({
systemPrompt: appendRuntimeExtraSystemPrompt({
systemPrompt: appendAgentBootstrapSystemPromptSupplement({
systemPrompt: params.systemPromptOverrideText,
bootstrapMode: params.embeddedSystemPrompt.bootstrapMode,
bootstrapTruncationNotice: params.embeddedSystemPrompt.bootstrapTruncationNotice,
contextFiles: params.embeddedSystemPrompt.contextFiles,
}),
extraSystemPrompt: params.embeddedSystemPrompt.extraSystemPrompt,
promptMode: params.embeddedSystemPrompt.promptMode,
}),
extraSystemPrompt: params.embeddedSystemPrompt.extraSystemPrompt,
promptMode: params.embeddedSystemPrompt.promptMode,
model: params.embeddedSystemPrompt.runtimeInfo.model,
})
: buildEmbeddedSystemPrompt(params.embeddedSystemPrompt);

View File

@@ -165,6 +165,7 @@ import {
import { resolveSystemPromptOverride } from "../../system-prompt-override.js";
import { buildSystemPromptParams } from "../../system-prompt-params.js";
import { buildSystemPromptReport } from "../../system-prompt-report.js";
import { appendModelIdentitySystemPrompt } from "../../system-prompt.js";
import { resolveAgentTimeoutMs } from "../../timeout.js";
import {
buildEmptyExplicitToolAllowlistError,
@@ -2717,6 +2718,14 @@ export async function runEmbeddedAttempt(
);
}
}
const modelAwareSystemPrompt = appendModelIdentitySystemPrompt({
systemPrompt: systemPromptText,
model: runtimeInfo.model,
});
if (modelAwareSystemPrompt !== systemPromptText) {
applySystemPromptOverrideToSession(activeSession, modelAwareSystemPrompt);
systemPromptText = modelAwareSystemPrompt;
}
if (cacheObservabilityEnabled) {
const cacheObservation = beginPromptCacheObservation({

View File

@@ -101,6 +101,20 @@ describe("buildAgentSystemPrompt", () => {
expect(tokenA).not.toBe(tokenB);
});
it("injects the current model identity into the runtime prompt", () => {
const prompt = buildAgentSystemPrompt({
workspaceDir: "/tmp/openclaw",
runtimeInfo: {
agentId: "main",
model: "openai/gpt-5.5",
},
});
expect(prompt).toContain(
"Current model identity: openai/gpt-5.5. If asked what model you are, answer with this value for the current run.",
);
});
it("omits extended sections in minimal prompt mode", () => {
const prompt = buildAgentSystemPrompt({
workspaceDir: "/tmp/openclaw",

View File

@@ -550,6 +550,51 @@ function formatFullAccessBlockedReason(reason?: EmbeddedFullAccessBlockedReason)
}
return "runtime constraints";
}
const MODEL_IDENTITY_PREFIX = "Current model identity:";
export function buildModelIdentityPromptLine(model?: string): string | undefined {
const trimmed = model?.trim();
if (!trimmed) {
return undefined;
}
return `${MODEL_IDENTITY_PREFIX} ${trimmed}. If asked what model you are, answer with this value for the current run.`;
}
export function appendModelIdentitySystemPrompt(params: {
systemPrompt: string;
model?: string;
}): string {
const line = buildModelIdentityPromptLine(params.model);
if (!line) {
return params.systemPrompt;
}
let replaced = false;
const nextLines = params.systemPrompt
.split(/\r?\n/u)
.filter((candidate) => {
if (!candidate.trimStart().startsWith(MODEL_IDENTITY_PREFIX)) {
return true;
}
if (replaced) {
return false;
}
replaced = true;
return true;
})
.map((candidate) =>
candidate.trimStart().startsWith(MODEL_IDENTITY_PREFIX) ? line : candidate,
);
if (replaced) {
return nextLines.join("\n");
}
const base = params.systemPrompt.trimEnd();
return base ? `${base}\n\n${line}` : line;
}
export function buildAgentSystemPrompt(params: {
workspaceDir: string;
defaultThinkLevel?: ThinkLevel;
@@ -757,6 +802,7 @@ export function buildAgentSystemPrompt(params: {
const skillsPrompt = params.skillsPrompt?.trim();
const heartbeatPrompt = params.heartbeatPrompt?.trim();
const runtimeInfo = params.runtimeInfo;
const modelIdentityLine = buildModelIdentityPromptLine(runtimeInfo?.model);
const runtimeChannel = normalizeOptionalLowercaseString(runtimeInfo?.channel);
const runtimeCapabilities = runtimeInfo?.capabilities ?? [];
const runtimeCapabilitiesLower = new Set(
@@ -816,7 +862,9 @@ export function buildAgentSystemPrompt(params: {
// For "none" mode, return just the basic identity line
if (promptMode === "none") {
return "You are a personal assistant running inside OpenClaw.";
return ["You are a personal assistant running inside OpenClaw.", modelIdentityLine]
.filter(Boolean)
.join("\n");
}
const contextFiles = params.contextFiles ?? [];
@@ -1174,6 +1222,7 @@ export function buildAgentSystemPrompt(params: {
lines.push(
"## Runtime",
buildRuntimeLine(runtimeInfo, runtimeChannel, runtimeCapabilities, params.defaultThinkLevel),
...(modelIdentityLine ? [modelIdentityLine] : []),
`Reasoning: ${reasoningLevel} (hidden unless on/stream). Toggle /reasoning; /status shows Reasoning when enabled.`,
);