test: keep prompt cache PR gate green

This commit is contained in:
Peter Steinberger
2026-04-16 17:28:51 +01:00
parent eb10803691
commit 6429fa0a7f
2 changed files with 23 additions and 21 deletions

View File

@@ -47,6 +47,7 @@ Docs: https://docs.openclaw.ai
- Codex/app-server: parse Desktop-originated app-server user agents such as `Codex Desktop/0.118.0`, keeping the version gate working when the Codex CLI inherits a multi-word originator. (#64666) Thanks @cyrusaf.
- Cron/announce delivery: keep isolated announce `NO_REPLY` stripping case-insensitive across direct and text delivery, preserve structured media-only sends when a caption strips silent, and derive main-session awareness from the cleaned payloads so silent captions no longer leak stale `NO_REPLY` text. (#65016) Thanks @BKF-Gitty.
- Sessions/Codex: skip redundant `delivery-mirror` transcript appends only when the latest assistant message has the same visible text, preventing duplicate visible replies on Codex-backed turns without suppressing repeated answers across turns. (#67185) Thanks @andyylin.
- Auto-reply/prompt-cache: keep volatile inbound chat IDs out of the stable system prompt so task-scoped adapters can reuse prompt caches across runs, while preserving conversation metadata for the user turn and media-only messages. (#65071) Thanks @MonkeyLeeT.
## 2026.4.15-beta.1

View File

@@ -643,27 +643,6 @@ export function describeTtsConfigContract() {
});
it("passes cfg into auto-selection so model-provider Google keys can configure TTS", () => {
const cfg = asLegacyOpenClawConfig({
agents: { defaults: { model: { primary: "openai/gpt-4o-mini" } } },
models: {
providers: {
google: {
apiKey: "model-provider-google-key",
},
},
},
messages: {
tts: {
providers: {
microsoft: {
enabled: false,
},
},
},
},
});
const config = resolveTtsConfig(cfg);
const prefsPath = `/tmp/tts-prefs-google-model-provider-${Date.now()}.json`;
withEnv(
{
OPENAI_API_KEY: undefined,
@@ -674,6 +653,28 @@ export function describeTtsConfigContract() {
GOOGLE_API_KEY: undefined,
},
() => {
const cfg = asLegacyOpenClawConfig({
agents: { defaults: { model: { primary: "openai/gpt-4o-mini" } } },
models: {
providers: {
google: {
apiKey: "model-provider-google-key",
},
},
},
messages: {
tts: {
providers: {
microsoft: {
enabled: false,
},
},
},
},
});
const config = resolveTtsConfig(cfg);
const prefsPath = `/tmp/tts-prefs-google-model-provider-${Date.now()}.json`;
expect(getTtsProvider(config, prefsPath)).toBe("google");
},
);