mirror of
https://github.com/openclaw/openclaw.git
synced 2026-03-15 12:00:43 +00:00
Kimi K2 models use automatic prefix caching and return cache stats in a nested field: usage.prompt_tokens_details.cached_tokens This fixes issue #7073 where cacheRead was showing 0 for K2.5 users. Also adds cached_tokens (top-level) for moonshot-v1 explicit caching API. Closes #7073
154 lines
4.1 KiB
TypeScript
154 lines
4.1 KiB
TypeScript
export type UsageLike = {
|
|
input?: number;
|
|
output?: number;
|
|
cacheRead?: number;
|
|
cacheWrite?: number;
|
|
total?: number;
|
|
// Common alternates across providers/SDKs.
|
|
inputTokens?: number;
|
|
outputTokens?: number;
|
|
promptTokens?: number;
|
|
completionTokens?: number;
|
|
input_tokens?: number;
|
|
output_tokens?: number;
|
|
prompt_tokens?: number;
|
|
completion_tokens?: number;
|
|
cache_read_input_tokens?: number;
|
|
cache_creation_input_tokens?: number;
|
|
// Moonshot/Kimi uses cached_tokens for cache read count (explicit caching API).
|
|
cached_tokens?: number;
|
|
// Kimi K2 uses prompt_tokens_details.cached_tokens for automatic prefix caching.
|
|
prompt_tokens_details?: { cached_tokens?: number };
|
|
// Some agents/logs emit alternate naming.
|
|
totalTokens?: number;
|
|
total_tokens?: number;
|
|
cache_read?: number;
|
|
cache_write?: number;
|
|
};
|
|
|
|
export type NormalizedUsage = {
|
|
input?: number;
|
|
output?: number;
|
|
cacheRead?: number;
|
|
cacheWrite?: number;
|
|
total?: number;
|
|
};
|
|
|
|
const asFiniteNumber = (value: unknown): number | undefined => {
|
|
if (typeof value !== "number") {
|
|
return undefined;
|
|
}
|
|
if (!Number.isFinite(value)) {
|
|
return undefined;
|
|
}
|
|
return value;
|
|
};
|
|
|
|
export function hasNonzeroUsage(usage?: NormalizedUsage | null): usage is NormalizedUsage {
|
|
if (!usage) {
|
|
return false;
|
|
}
|
|
return [usage.input, usage.output, usage.cacheRead, usage.cacheWrite, usage.total].some(
|
|
(v) => typeof v === "number" && Number.isFinite(v) && v > 0,
|
|
);
|
|
}
|
|
|
|
export function normalizeUsage(raw?: UsageLike | null): NormalizedUsage | undefined {
|
|
if (!raw) {
|
|
return undefined;
|
|
}
|
|
|
|
const input = asFiniteNumber(
|
|
raw.input ?? raw.inputTokens ?? raw.input_tokens ?? raw.promptTokens ?? raw.prompt_tokens,
|
|
);
|
|
const output = asFiniteNumber(
|
|
raw.output ??
|
|
raw.outputTokens ??
|
|
raw.output_tokens ??
|
|
raw.completionTokens ??
|
|
raw.completion_tokens,
|
|
);
|
|
const cacheRead = asFiniteNumber(
|
|
raw.cacheRead ??
|
|
raw.cache_read ??
|
|
raw.cache_read_input_tokens ??
|
|
raw.cached_tokens ??
|
|
raw.prompt_tokens_details?.cached_tokens,
|
|
);
|
|
const cacheWrite = asFiniteNumber(
|
|
raw.cacheWrite ?? raw.cache_write ?? raw.cache_creation_input_tokens,
|
|
);
|
|
const total = asFiniteNumber(raw.total ?? raw.totalTokens ?? raw.total_tokens);
|
|
|
|
if (
|
|
input === undefined &&
|
|
output === undefined &&
|
|
cacheRead === undefined &&
|
|
cacheWrite === undefined &&
|
|
total === undefined
|
|
) {
|
|
return undefined;
|
|
}
|
|
|
|
return {
|
|
input,
|
|
output,
|
|
cacheRead,
|
|
cacheWrite,
|
|
total,
|
|
};
|
|
}
|
|
|
|
export function derivePromptTokens(usage?: {
|
|
input?: number;
|
|
cacheRead?: number;
|
|
cacheWrite?: number;
|
|
}): number | undefined {
|
|
if (!usage) {
|
|
return undefined;
|
|
}
|
|
const input = usage.input ?? 0;
|
|
const cacheRead = usage.cacheRead ?? 0;
|
|
const cacheWrite = usage.cacheWrite ?? 0;
|
|
const sum = input + cacheRead + cacheWrite;
|
|
return sum > 0 ? sum : undefined;
|
|
}
|
|
|
|
export function deriveSessionTotalTokens(params: {
|
|
usage?: {
|
|
input?: number;
|
|
total?: number;
|
|
cacheRead?: number;
|
|
cacheWrite?: number;
|
|
};
|
|
contextTokens?: number;
|
|
promptTokens?: number;
|
|
}): number | undefined {
|
|
const promptOverride = params.promptTokens;
|
|
const hasPromptOverride =
|
|
typeof promptOverride === "number" && Number.isFinite(promptOverride) && promptOverride > 0;
|
|
const usage = params.usage;
|
|
if (!usage && !hasPromptOverride) {
|
|
return undefined;
|
|
}
|
|
const input = usage?.input ?? 0;
|
|
const promptTokens = hasPromptOverride
|
|
? promptOverride
|
|
: derivePromptTokens({
|
|
input: usage?.input,
|
|
cacheRead: usage?.cacheRead,
|
|
cacheWrite: usage?.cacheWrite,
|
|
});
|
|
let total = promptTokens ?? usage?.total ?? input;
|
|
if (!(total > 0)) {
|
|
return undefined;
|
|
}
|
|
|
|
// NOTE: Do NOT clamp total to contextTokens here. The stored totalTokens
|
|
// should reflect the actual token count (or best estimate). Clamping causes
|
|
// /status to display contextTokens/contextTokens (100%) when the accumulated
|
|
// input exceeds the context window, hiding the real usage. The display layer
|
|
// (formatTokens in status.ts) already caps the percentage at 999%.
|
|
return total;
|
|
}
|