Files
openclaw/src/memory/embedding-input-limits.ts
Rodrigo Uroz 7f1712c1ba (fix): enforce embedding model token limit to prevent overflow (#13455)
* fix: enforce embedding model token limit to prevent 8192 overflow

- Replace EMBEDDING_APPROX_CHARS_PER_TOKEN=1 with UTF-8 byte length
  estimation (safe upper bound for tokenizer output)
- Add EMBEDDING_MODEL_MAX_TOKENS=8192 hard cap
- Add splitChunkToTokenLimit() that binary-searches for the largest
  safe split point, with surrogate pair handling
- Add enforceChunkTokenLimit() wrapper called in indexFile() after
  chunkMarkdown(), before any embedding API call
- Fixes: session files with large JSONL entries could produce chunks
  exceeding text-embedding-3-small's 8192 token limit

Tests: 2 new colocated tests in manager.embedding-token-limit.test.ts
- Verifies oversized ASCII chunks are split to <=8192 bytes each
- Verifies multibyte (emoji) content batching respects byte limits

* fix: make embedding token limit provider-aware

- Add optional maxInputTokens to EmbeddingProvider interface
- Each provider (openai, gemini, voyage) reports its own limit
- Known-limits map as fallback: openai 8192, gemini 2048, voyage 32K
- Resolution: provider field > known map > default 8192
- Backward compatible: local/llama uses fallback

* fix: enforce embedding input size limits (#13455) (thanks @rodrigouroz)

---------

Co-authored-by: Tak Hoffman <781889+Takhoffman@users.noreply.github.com>
2026-02-10 20:10:17 -06:00

68 lines
1.7 KiB
TypeScript

// Helpers for enforcing embedding model input size limits.
//
// We use UTF-8 byte length as a conservative upper bound for tokenizer output.
// Tokenizers operate over bytes; a token must contain at least one byte, so
// token_count <= utf8_byte_length.
export function estimateUtf8Bytes(text: string): number {
if (!text) {
return 0;
}
return Buffer.byteLength(text, "utf8");
}
export function splitTextToUtf8ByteLimit(text: string, maxUtf8Bytes: number): string[] {
if (maxUtf8Bytes <= 0) {
return [text];
}
if (estimateUtf8Bytes(text) <= maxUtf8Bytes) {
return [text];
}
const parts: string[] = [];
let cursor = 0;
while (cursor < text.length) {
// The number of UTF-16 code units is always <= the number of UTF-8 bytes.
// This makes `cursor + maxUtf8Bytes` a safe upper bound on the next split point.
let low = cursor + 1;
let high = Math.min(text.length, cursor + maxUtf8Bytes);
let best = cursor;
while (low <= high) {
const mid = Math.floor((low + high) / 2);
const bytes = estimateUtf8Bytes(text.slice(cursor, mid));
if (bytes <= maxUtf8Bytes) {
best = mid;
low = mid + 1;
} else {
high = mid - 1;
}
}
if (best <= cursor) {
best = Math.min(text.length, cursor + 1);
}
// Avoid splitting inside a surrogate pair.
if (
best < text.length &&
best > cursor &&
text.charCodeAt(best - 1) >= 0xd800 &&
text.charCodeAt(best - 1) <= 0xdbff &&
text.charCodeAt(best) >= 0xdc00 &&
text.charCodeAt(best) <= 0xdfff
) {
best -= 1;
}
const part = text.slice(cursor, best);
if (!part) {
break;
}
parts.push(part);
cursor = best;
}
return parts;
}