feat(openai): add default prompt overlay

This commit is contained in:
Peter Steinberger
2026-04-04 09:26:45 +09:00
parent f6f7609b66
commit a38cb20177
8 changed files with 176 additions and 1 deletions

View File

@@ -12,6 +12,68 @@ OpenAI provides developer APIs for GPT models. Codex supports **ChatGPT sign-in*
access or **API key** sign-in for usage-based access. Codex cloud requires ChatGPT sign-in.
OpenAI explicitly supports subscription OAuth usage in external tools/workflows like OpenClaw.
## Default interaction style
OpenClaw adds a small OpenAI-specific prompt overlay by default for both
`openai/*` and `openai-codex/*` runs. The overlay keeps the assistant warm,
collaborative, concise, and direct without replacing the base OpenClaw system
prompt.
Config key:
`plugins.entries.openai.config.personalityOverlay`
Allowed values:
- `"friendly"`: default; enable the OpenAI-specific overlay.
- `"off"`: disable the overlay and use the base OpenClaw prompt only.
Scope:
- Applies to `openai/*` models.
- Applies to `openai-codex/*` models.
- Does not affect other providers.
This behavior is enabled by default:
```json5
{
plugins: {
entries: {
openai: {
config: {
personalityOverlay: "friendly",
},
},
},
},
}
```
### Disable the OpenAI prompt overlay
If you prefer the unmodified base OpenClaw prompt, turn the overlay off:
```json5
{
plugins: {
entries: {
openai: {
config: {
personalityOverlay: "off",
},
},
},
},
}
```
You can also set it directly with the config CLI:
```bash
openclaw config set plugins.entries.openai.config.personalityOverlay off
```
## Option A: OpenAI API key (OpenAI Platform)
**Best for:** direct API access and usage-based billing.

View File

@@ -1,12 +1,14 @@
import type { OpenClawConfig } from "openclaw/plugin-sdk/config-runtime";
import * as providerAuth from "openclaw/plugin-sdk/provider-auth-runtime";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { createTestPluginApi } from "../../test/helpers/plugins/plugin-api.js";
import {
registerProviderPlugin,
requireRegisteredProvider,
} from "../../test/helpers/plugins/provider-registration.js";
import { buildOpenAIImageGenerationProvider } from "./image-generation-provider.js";
import plugin from "./index.js";
import { OPENAI_FRIENDLY_PROMPT_OVERLAY } from "./prompt-overlay.js";
const runtimeMocks = vi.hoisted(() => ({
ensureGlobalUndiciEnvProxyDispatcher: vi.fn(),
@@ -36,6 +38,22 @@ const registerOpenAIPlugin = () =>
name: "OpenAI Provider",
});
function registerOpenAIPluginWithHook(params?: { pluginConfig?: Record<string, unknown> }) {
const on = vi.fn();
plugin.register(
createTestPluginApi({
id: "openai",
name: "OpenAI Provider",
source: "test",
config: {},
runtime: {} as never,
pluginConfig: params?.pluginConfig,
on,
}),
);
return { on };
}
describe("openai plugin", () => {
beforeEach(() => {
vi.clearAllMocks();
@@ -221,4 +239,40 @@ describe("openai plugin", () => {
runtimeMocks.ensureGlobalUndiciEnvProxyDispatcher.mock.invocationCallOrder[0],
).toBeLessThan(runtimeMocks.refreshOpenAICodexToken.mock.invocationCallOrder[0]);
});
it("registers the friendly prompt overlay by default and scopes it to OpenAI providers", async () => {
const { on } = registerOpenAIPluginWithHook();
expect(on).toHaveBeenCalledWith("before_prompt_build", expect.any(Function));
const beforePromptBuild = on.mock.calls.find((call) => call[0] === "before_prompt_build")?.[1];
const openaiResult = await beforePromptBuild?.(
{ prompt: "hello", messages: [] },
{ modelProviderId: "openai", modelId: "gpt-5.4" },
);
expect(openaiResult).toEqual({
appendSystemContext: OPENAI_FRIENDLY_PROMPT_OVERLAY,
});
const codexResult = await beforePromptBuild?.(
{ prompt: "hello", messages: [] },
{ modelProviderId: "openai-codex", modelId: "gpt-5.4" },
);
expect(codexResult).toEqual({
appendSystemContext: OPENAI_FRIENDLY_PROMPT_OVERLAY,
});
const nonOpenAIResult = await beforePromptBuild?.(
{ prompt: "hello", messages: [] },
{ modelProviderId: "anthropic", modelId: "sonnet-4.6" },
);
expect(nonOpenAIResult).toBeUndefined();
});
it("supports opting out of the prompt overlay via plugin config", () => {
const { on } = registerOpenAIPluginWithHook({
pluginConfig: { personalityOverlay: "off" },
});
expect(on).not.toHaveBeenCalledWith("before_prompt_build", expect.any(Function));
});
});

View File

@@ -7,6 +7,11 @@ import {
} from "./media-understanding-provider.js";
import { buildOpenAICodexProviderPlugin } from "./openai-codex-provider.js";
import { buildOpenAIProvider } from "./openai-provider.js";
import {
OPENAI_FRIENDLY_PROMPT_OVERLAY,
resolveOpenAIPromptOverlayMode,
shouldApplyOpenAIPromptOverlay,
} from "./prompt-overlay.js";
import { buildOpenAISpeechProvider } from "./speech-provider.js";
export default definePluginEntry({
@@ -14,6 +19,7 @@ export default definePluginEntry({
name: "OpenAI Provider",
description: "Bundled OpenAI provider plugins",
register(api) {
const promptOverlayMode = resolveOpenAIPromptOverlayMode(api.pluginConfig);
api.registerCliBackend(buildOpenAICodexCliBackend());
api.registerProvider(buildOpenAIProvider());
api.registerProvider(buildOpenAICodexProviderPlugin());
@@ -21,5 +27,15 @@ export default definePluginEntry({
api.registerMediaUnderstandingProvider(openaiMediaUnderstandingProvider);
api.registerMediaUnderstandingProvider(openaiCodexMediaUnderstandingProvider);
api.registerImageGenerationProvider(buildOpenAIImageGenerationProvider());
if (promptOverlayMode !== "off") {
api.on("before_prompt_build", (_event, ctx) =>
shouldApplyOpenAIPromptOverlay({
mode: promptOverlayMode,
modelProviderId: ctx.modelProviderId,
})
? { appendSystemContext: OPENAI_FRIENDLY_PROMPT_OVERLAY }
: undefined,
);
}
},
});

View File

@@ -40,6 +40,13 @@
"configSchema": {
"type": "object",
"additionalProperties": false,
"properties": {}
"properties": {
"personalityOverlay": {
"type": "string",
"enum": ["friendly", "off"],
"default": "friendly",
"description": "Controls the default OpenAI-specific prompt overlay used for OpenAI and OpenAI Codex runs."
}
}
}
}

View File

@@ -0,0 +1,28 @@
const OPENAI_PROVIDER_IDS = new Set(["openai", "openai-codex"]);
export const OPENAI_FRIENDLY_PROMPT_OVERLAY = `## Interaction Style
Be warm, collaborative, and quietly supportive.
Communicate like a capable teammate sitting next to the user.
Keep progress updates clear and concrete.
Explain decisions without ego.
When the user is wrong or a plan is risky, say so kindly and directly.
Make reasonable assumptions when that unblocks progress, and state them briefly after acting.
Do not make the user do unnecessary work.
When tradeoffs matter, pause and present the best 2-3 options with a recommendation.
Keep replies concise by default; friendly does not mean verbose.`;
export type OpenAIPromptOverlayMode = "friendly" | "off";
export function resolveOpenAIPromptOverlayMode(
pluginConfig?: Record<string, unknown>,
): OpenAIPromptOverlayMode {
return pluginConfig?.personalityOverlay === "off" ? "off" : "friendly";
}
export function shouldApplyOpenAIPromptOverlay(params: {
mode: OpenAIPromptOverlayMode;
modelProviderId?: string;
}): boolean {
return params.mode === "friendly" && OPENAI_PROVIDER_IDS.has(params.modelProviderId ?? "");
}

View File

@@ -156,6 +156,8 @@ export async function runEmbeddedPiAgent(
sessionKey: params.sessionKey,
sessionId: params.sessionId,
workspaceDir: resolvedWorkspace,
modelProviderId: provider,
modelId,
messageProvider: params.messageProvider ?? undefined,
trigger: params.trigger,
channelId: params.messageChannel ?? params.messageProvider ?? undefined,

View File

@@ -1464,6 +1464,8 @@ export async function runEmbeddedAttempt(
sessionKey: params.sessionKey,
sessionId: params.sessionId,
workspaceDir: params.workspaceDir,
modelProviderId: params.model.provider,
modelId: params.model.id,
messageProvider: params.messageProvider ?? undefined,
trigger: params.trigger,
channelId: params.messageChannel ?? params.messageProvider ?? undefined,

View File

@@ -2000,6 +2000,10 @@ export type PluginHookAgentContext = {
sessionKey?: string;
sessionId?: string;
workspaceDir?: string;
/** Resolved model provider for this run (for example "openai"). */
modelProviderId?: string;
/** Resolved model id for this run (for example "gpt-5.4"). */
modelId?: string;
messageProvider?: string;
/** What initiated this agent run: "user", "heartbeat", "cron", or "memory". */
trigger?: string;