chore: refresh talk generated metadata

This commit is contained in:
Peter Steinberger
2026-05-05 21:00:06 +01:00
parent ada560ece4
commit 7760edc68e
5 changed files with 8 additions and 7 deletions

View File

@@ -8,6 +8,7 @@ Docs: https://docs.openclaw.ai
- PR triage: mark external pull requests with `proof: supplied` when Barnacle finds structured real behavior proof, keep stale negative proof labels in sync across CRLF-edited PR bodies, and let ClawSweeper own the stronger `proof: sufficient` judgement.
- Sessions CLI: show the selected agent runtime in the `openclaw sessions` table so terminal output matches the runtime visibility already present in JSON/status surfaces. Thanks @vincentkoc.
- Talk/voice: unify realtime relay, transcription relay, managed-room handoff, Voice Call, Google Meet, VoiceClaw, and native clients around a shared Talk session controller and add the Gateway-managed `talk.session.*` RPC surface.
- Google Meet/Voice Call: make Twilio dial-in joins speak through the realtime Gemini voice bridge with paced audio streaming, backpressure-aware buffering, barge-in queue clearing, same-session agent consult routing, duplicate-consult coalescing, and no TwiML fallback during realtime speech, giving Meet participants a much snappier OpenClaw voice agent. (#77064) Thanks @scoootscooob.
- Voice Call/realtime: add opt-in OpenClaw agent voice context capsules and consult-cadence guidance so Gemini/OpenAI realtime calls can sound like the configured agent without consulting the full agent on every ordinary turn. Thanks @scoootscooob.
- Docker/Gateway: harden the gateway container by dropping `NET_RAW` and `NET_ADMIN` capabilities and enabling `no-new-privileges` in the bundled `docker-compose.yml`. Thanks @VintageAyu.

View File

@@ -1,4 +1,4 @@
c93176f87a1e4576f5951b82037394c4bc9628bb6e056b6b24f96e662d6d636c config-baseline.json
92cbb12ca382f7424e7bd52df21798b10a57621f5c266909fa74e23f6cb973d7 config-baseline.core.json
60fe8b70598ccd0cf41875b1615106583a466694de2bf50019a9a251b58fa02e config-baseline.json
28d86173c32d17ce6348c7af028a00118e32eb9d344a0b19f9132c606da210c0 config-baseline.core.json
cd7c0c7fb1435bc7e59099e9ac334462d5ad444016e9ab4512aae63a238f78dc config-baseline.channel.json
6871e789b74722e4ff2c877940dac256c232433ae26b305fc6ca782b90662097 config-baseline.plugin.json

View File

@@ -1,2 +1,2 @@
1a06492fe05d1c9dc3194677f52d57ec90468b93023b70d0852ef01d87c7eae3 plugin-sdk-api-baseline.json
c950a1923c0dc7d31120a3010e24217bcf22fd9cacbe102d3ae19b0120c0f648 plugin-sdk-api-baseline.jsonl
40646f6311034e22ac58d8141ceefd03a9dceec6ddaa53ff790955c81f2b045f plugin-sdk-api-baseline.json
f70f443f9ae6905f7e7b32233e84a0d041996b288068e9c20eb455676e6a47a5 plugin-sdk-api-baseline.jsonl

View File

@@ -981,7 +981,7 @@ describe("sanitizeSessionHistory", () => {
expect(result).toEqual([
{
...(messages[0] as Record<string, unknown>),
...(messages[0] as unknown as Record<string, unknown>),
usage: makeZeroUsageSnapshot(),
},
]);

View File

@@ -97,7 +97,7 @@ describe("normalizeMessagesForLlmBoundary", () => {
const output = normalizeMessagesForLlmBoundary(
input as Parameters<typeof normalizeMessagesForLlmBoundary>[0],
) as Array<Record<string, unknown>>;
) as unknown as Array<Record<string, unknown>>;
expect(output[0]).not.toHaveProperty("details");
expect(output[0]?.content).toEqual([{ type: "text", text: "visible output" }]);
@@ -136,7 +136,7 @@ describe("normalizeMessagesForLlmBoundary", () => {
const output = normalizeMessagesForLlmBoundary(
input as Parameters<typeof normalizeMessagesForLlmBoundary>[0],
) as Array<Record<string, unknown>>;
) as unknown as Array<Record<string, unknown>>;
expect(output).toHaveLength(3);
expect(output).not.toEqual(