fix: make conversation labels work with Codex (#78450)

Summary:
- The PR changes the shared conversation-label generator to send label instructions as `systemPrompt`, omit `temperature` for Codex simple completions, log error stop reasons, and add focused tests plus a changelog entry.
- Reproducibility: yes. Source reproduction is high-confidence: current main sends the prompt only inside user ... ple transport reads instructions from `context.systemPrompt` and only includes `temperature` when supplied.

Automerge notes:
- PR branch already contained follow-up commit before automerge: docs: note Codex topic label fix

Validation:
- ClawSweeper review passed for head 9380907984.
- Required merge gates passed before the squash merge.

Prepared head SHA: 9380907984
Review: https://github.com/openclaw/openclaw/pull/78450#issuecomment-4387573775

Co-authored-by: Clever <clever@users.noreply.github.com>
This commit is contained in:
simplyclever914
2026-05-06 15:39:30 +03:00
committed by GitHub
parent 458ce2da94
commit 20906f56e2
3 changed files with 94 additions and 2 deletions

View File

@@ -113,6 +113,7 @@ Docs: https://docs.openclaw.ai
### Fixes
- Telegram/Codex: generate DM topic labels with Codex-compatible simple-completion requests so auto-created private topics can be renamed instead of staying `New Chat`.
- Web fetch: bound guarded dispatcher cleanup after request timeouts so timed-out fetches return tool errors instead of leaving Gateway tool lanes active. (#78439) Thanks @obviyus.
- Gate Slack startup user allowlist resolution [AI]. (#77898) Thanks @pgondhi987.
- OpenAI/Codex: suppress stale `openai-codex` GPT-5.1/5.2/5.3 model refs that ChatGPT/Codex OAuth accounts now reject, keeping model lists, config validation, and forward-compat resolution on current 5.4/5.5 routes. Fixes #67158. Thanks @drpau.
@@ -174,6 +175,7 @@ Docs: https://docs.openclaw.ai
- Hooks/session-memory: add collision suffixes to fallback memory filenames so repeated `/new` or `/reset` captures in the same minute do not overwrite the earlier session archive. Thanks @vincentkoc.
- Agents/config: remove the ambiguous legacy `main` agent dir helper from runtime paths; model, auth, gateway, bundled plugin, and test helpers now resolve default/session agent dirs through `agents.list`/agent-scope helpers while plugin SDK keeps a deprecated compatibility export.
- CLI/status: show the selected agent runtime/harness in `openclaw status` session rows so terminal status matches the `/status` runtime line. Thanks @vincentkoc.
- CLI/sessions: prune old unreferenced transcript, compaction checkpoint, and trajectory artifacts during normal `sessions cleanup`, so gateway restart or crash orphans do not accumulate indefinitely outside `sessions.json`. Fixes #77608. Thanks @slideshow-dingo.
- Doctor/Codex: repair legacy `openai-codex/*` routes in primary models, fallbacks, heartbeat/subagent/compaction overrides, hooks, channel overrides, and stale session pins to canonical `openai/*`, selecting `agentRuntime.id: "codex"` only when the Codex plugin is installed, enabled, contributes the `codex` harness, and has usable OAuth; otherwise select `agentRuntime.id: "pi"`. Thanks @vincentkoc.
- Video generation: wait up to 20 minutes for slow fal/MiniMax queue-backed jobs, stop forwarding unsupported Google Veo generated-audio options, and normalize MiniMax `720P` requests to its supported `768P` resolution with the usual override warning/details instead of failing fallback.

View File

@@ -2,6 +2,7 @@ import { beforeEach, describe, expect, it, vi } from "vitest";
const completeSimple = vi.hoisted(() => vi.fn());
const getRuntimeAuthForModel = vi.hoisted(() => vi.fn());
const logVerbose = vi.hoisted(() => vi.fn());
const requireApiKey = vi.hoisted(() => vi.fn());
const resolveDefaultModelForAgent = vi.hoisted(() => vi.fn());
const resolveModelAsync = vi.hoisted(() => vi.fn());
@@ -18,6 +19,8 @@ vi.mock("@mariozechner/pi-ai", async () => {
vi.mock("../../agents/model-auth.js", () => ({ requireApiKey }));
vi.mock("../../globals.js", () => ({ logVerbose }));
vi.mock("../../agents/model-selection.js", () => ({
resolveDefaultModelForAgent,
}));
@@ -40,6 +43,7 @@ describe("generateConversationLabel", () => {
beforeEach(() => {
completeSimple.mockReset();
getRuntimeAuthForModel.mockReset();
logVerbose.mockReset();
requireApiKey.mockReset();
resolveDefaultModelForAgent.mockReset();
resolveModelAsync.mockReset();
@@ -88,4 +92,70 @@ describe("generateConversationLabel", () => {
cfg: {},
});
});
it("passes the label prompt as systemPrompt and the user text as message content", async () => {
await generateConversationLabel({
userMessage: "Need help with invoices",
prompt: "Generate a label",
cfg: {},
});
expect(completeSimple).toHaveBeenCalledWith(
{ provider: "openai" },
{
systemPrompt: "Generate a label",
messages: [
{
role: "user",
content: "Need help with invoices",
timestamp: expect.any(Number),
},
],
},
expect.objectContaining({
apiKey: "resolved-key",
maxTokens: 100,
temperature: 0.3,
signal: expect.any(AbortSignal),
}),
);
});
it("omits temperature for Codex Responses simple completions", async () => {
resolveDefaultModelForAgent.mockReturnValue({ provider: "openai-codex", model: "gpt-5.5" });
resolveModelAsync.mockResolvedValue({
model: { provider: "openai-codex", api: "openai-codex-responses" },
authStorage: {},
modelRegistry: {},
});
await generateConversationLabel({
userMessage: "тест создания топика-треда",
prompt: "Generate a label",
cfg: {},
});
expect(completeSimple.mock.calls[0]?.[2]).toEqual(
expect.not.objectContaining({ temperature: expect.anything() }),
);
});
it("logs completion errors instead of treating them as empty labels", async () => {
completeSimple.mockResolvedValue({
content: [],
stopReason: "error",
errorMessage: "Codex error: Instructions are required",
});
const label = await generateConversationLabel({
userMessage: "Need help with invoices",
prompt: "Generate a label",
cfg: {},
});
expect(label).toBeNull();
expect(logVerbose).toHaveBeenCalledWith(
"conversation-label-generator: completion failed: Codex error: Instructions are required",
);
});
});

View File

@@ -23,6 +23,20 @@ function isTextContentBlock(block: { type: string }): block is TextContent {
return block.type === "text";
}
function isCodexSimpleCompletionModel(model: { api?: string; provider?: string }): boolean {
return model.provider === "openai-codex" || model.api === "openai-codex-responses";
}
function extractSimpleCompletionError(result: {
stopReason?: string;
errorMessage?: string;
}): string | null {
if (result.stopReason !== "error") {
return null;
}
return result.errorMessage?.trim() || "unknown error";
}
export async function generateConversationLabel(
params: ConversationLabelParams,
): Promise<string | null> {
@@ -58,10 +72,11 @@ export async function generateConversationLabel(
const result = await completeSimple(
completionModel,
{
systemPrompt: prompt,
messages: [
{
role: "user",
content: `${prompt}\n\n${userMessage}`,
content: userMessage,
timestamp: Date.now(),
},
],
@@ -69,10 +84,15 @@ export async function generateConversationLabel(
{
apiKey,
maxTokens: 100,
temperature: 0.3,
...(isCodexSimpleCompletionModel(completionModel) ? {} : { temperature: 0.3 }),
signal: controller.signal,
},
);
const errorMessage = extractSimpleCompletionError(result);
if (errorMessage) {
logVerbose(`conversation-label-generator: completion failed: ${errorMessage}`);
return null;
}
const text = result.content
.filter(isTextContentBlock)