fix(memory): preserve embedding proxy provider prefixes (#66452)

* fix(memory): preserve embedding proxy provider prefixes

* docs(changelog): fix embeddings entry

* Update CHANGELOG.md
This commit is contained in:
Vincent Koc
2026-04-14 11:05:07 +01:00
committed by GitHub
parent e9f5619716
commit 8820a43818
3 changed files with 33 additions and 6 deletions

View File

@@ -19,6 +19,7 @@ Docs: https://docs.openclaw.ai
- Ollama/OpenAI-compat: send `stream_options.include_usage` for Ollama streaming completions so local Ollama runs report real usage instead of falling back to bogus prompt-token counts that trigger premature compaction. (#64568) Thanks @xchunzhao and @vincentkoc.
- Doctor/plugins: cache external `preferOver` catalog lookups within each plugin auto-enable pass so large `agents.list` configs no longer peg CPU and repeatedly reread plugin catalogs during doctor/plugins resolution. (#66246) Thanks @yfge.
- GitHub Copilot/thinking: allow `github-copilot/gpt-5.4` to use `xhigh` reasoning so Copilot GPT-5.4 matches the rest of the GPT-5.4 family. (#50168) Thanks @jakepresent and @vincentkoc.
- Memory/embeddings: preserve non-OpenAI provider prefixes when normalizing OpenAI-compatible embedding model refs so proxy-backed memory providers stop failing with `Unknown memory embedding provider`. (#66452) Thanks @jlapenna.
- Agents/local models: clarify low-context preflight hints for self-hosted models, point config-backed caps at the relevant OpenClaw setting, and stop suggesting larger models when `agents.defaults.contextTokens` is the real limit. (#66236) Thanks @ImLukeF.
- Browser/SSRF: restore hostname navigation under the default browser SSRF policy while keeping explicit strict mode reachable from config, and keep managed loopback CDP `/json/new` fallback requests on the local CDP control policy so browser follow-up fixes stop regressing normal navigation or self-blocking local CDP control. (#66386) Thanks @obviyus.
- Models/Codex: canonicalize the legacy `openai-codex/gpt-5.4-codex` runtime alias to `openai-codex/gpt-5.4` while still honoring alias-specific and canonical per-model overrides. (#43060) Thanks @Sapientropic and @vincentkoc.

View File

@@ -0,0 +1,25 @@
import { describe, expect, it } from "vitest";
import { DEFAULT_OPENAI_EMBEDDING_MODEL, normalizeOpenAiModel } from "./embeddings-openai.js";
describe("normalizeOpenAiModel", () => {
it("returns the default model when input is blank", () => {
expect(normalizeOpenAiModel("")).toBe(DEFAULT_OPENAI_EMBEDDING_MODEL);
expect(normalizeOpenAiModel(" ")).toBe(DEFAULT_OPENAI_EMBEDDING_MODEL);
});
it("strips the openai/ prefix", () => {
expect(normalizeOpenAiModel("openai/text-embedding-3-small")).toBe("text-embedding-3-small");
expect(normalizeOpenAiModel("openai/text-embedding-ada-002")).toBe("text-embedding-ada-002");
});
it("preserves explicit third-party provider prefixes", () => {
expect(normalizeOpenAiModel("spark/text-embedding-3-small")).toBe(
"spark/text-embedding-3-small",
);
expect(normalizeOpenAiModel("litellm/azure/ada-002")).toBe("litellm/azure/ada-002");
});
it("preserves unprefixed model ids", () => {
expect(normalizeOpenAiModel("text-embedding-3-large")).toBe("text-embedding-3-large");
});
});

View File

@@ -1,6 +1,6 @@
import { parseStaticModelRef } from "../../agents/model-ref-shared.js";
import type { SsrFPolicy } from "../../infra/net/ssrf.js";
import { OPENAI_DEFAULT_EMBEDDING_MODEL } from "../../plugins/provider-model-defaults.js";
import { normalizeEmbeddingModelWithPrefixes } from "./embeddings-model-normalize.js";
import {
createRemoteEmbeddingProvider,
resolveRemoteEmbeddingClient,
@@ -24,11 +24,12 @@ const OPENAI_MAX_INPUT_TOKENS: Record<string, number> = {
};
export function normalizeOpenAiModel(model: string): string {
return normalizeEmbeddingModelWithPrefixes({
model,
defaultModel: DEFAULT_OPENAI_EMBEDDING_MODEL,
prefixes: ["openai/"],
});
const trimmed = model.trim();
if (!trimmed) {
return DEFAULT_OPENAI_EMBEDDING_MODEL;
}
const parsed = parseStaticModelRef(trimmed, "openai");
return parsed && parsed.provider === "openai" ? parsed.model : trimmed;
}
export async function createOpenAiEmbeddingProvider(