mirror of
https://github.com/openclaw/openclaw.git
synced 2026-03-12 07:20:45 +00:00
fix(auth/session): preserve override reset behavior and repair oauth profile-id drift (openclaw#18820) thanks @Glucksberg
Verified: - pnpm build - pnpm check - pnpm test:macmini Co-authored-by: Glucksberg <80581902+Glucksberg@users.noreply.github.com> Co-authored-by: Tak Hoffman <781889+Takhoffman@users.noreply.github.com>
This commit is contained in:
@@ -29,6 +29,7 @@ Docs: https://docs.openclaw.ai
|
||||
- Discord/Gateway: handle close code 4014 (missing privileged gateway intents) without crashing the gateway. Thanks @thewilloftheshadow.
|
||||
- Security/Net: strip sensitive headers (`Authorization`, `Proxy-Authorization`, `Cookie`, `Cookie2`) on cross-origin redirects in `fetchWithSsrFGuard` to prevent credential forwarding across origin boundaries. (#20313) Thanks @afurm.
|
||||
- Auto-reply/Runner: emit `onAgentRunStart` only after agent lifecycle or tool activity begins (and only once per run), so fallback preflight errors no longer mark runs as started. (#21165) Thanks @shakkernerd.
|
||||
- Agents/Failover: treat non-default override runs as direct fallback-to-configured-primary (skip configured fallback chain), normalize default-model detection for provider casing/whitespace, and add regression coverage for override/auth error paths. (#18820) Thanks @Glucksberg.
|
||||
- Auto-reply/Tool results: serialize tool-result delivery and keep the delivery chain progressing after individual failures so concurrent tool outputs preserve user-visible ordering. (#21231) thanks @ahdernasr.
|
||||
- Auto-reply/Prompt caching: restore prefix-cache stability by keeping inbound system metadata session-stable and moving per-message IDs (`message_id`, `message_id_full`, `reply_to_id`, `sender_id`) into untrusted conversation context. (#20597) Thanks @anisoptera.
|
||||
- CLI/Onboarding: fix Anthropic-compatible custom provider verification by normalizing base URLs to avoid duplicate `/v1` paths during setup checks. (#21336) Thanks @17jmumford.
|
||||
|
||||
@@ -49,6 +49,74 @@ describe("resolveAuthProfileOrder", () => {
|
||||
});
|
||||
expect(order).toEqual(["minimax:prod"]);
|
||||
});
|
||||
it("falls back to stored provider profiles when config profile ids drift", () => {
|
||||
const order = resolveAuthProfileOrder({
|
||||
cfg: {
|
||||
auth: {
|
||||
profiles: {
|
||||
"openai-codex:default": {
|
||||
provider: "openai-codex",
|
||||
mode: "oauth",
|
||||
},
|
||||
},
|
||||
order: {
|
||||
"openai-codex": ["openai-codex:default"],
|
||||
},
|
||||
},
|
||||
},
|
||||
store: {
|
||||
version: 1,
|
||||
profiles: {
|
||||
"openai-codex:user@example.com": {
|
||||
type: "oauth",
|
||||
provider: "openai-codex",
|
||||
access: "access-token",
|
||||
refresh: "refresh-token",
|
||||
expires: Date.now() + 60_000,
|
||||
},
|
||||
},
|
||||
},
|
||||
provider: "openai-codex",
|
||||
});
|
||||
expect(order).toEqual(["openai-codex:user@example.com"]);
|
||||
});
|
||||
it("does not bypass explicit ids when the configured profile exists but is invalid", () => {
|
||||
const order = resolveAuthProfileOrder({
|
||||
cfg: {
|
||||
auth: {
|
||||
profiles: {
|
||||
"openai-codex:default": {
|
||||
provider: "openai-codex",
|
||||
mode: "token",
|
||||
},
|
||||
},
|
||||
order: {
|
||||
"openai-codex": ["openai-codex:default"],
|
||||
},
|
||||
},
|
||||
},
|
||||
store: {
|
||||
version: 1,
|
||||
profiles: {
|
||||
"openai-codex:default": {
|
||||
type: "token",
|
||||
provider: "openai-codex",
|
||||
token: "expired-token",
|
||||
expires: Date.now() - 1_000,
|
||||
},
|
||||
"openai-codex:user@example.com": {
|
||||
type: "oauth",
|
||||
provider: "openai-codex",
|
||||
access: "access-token",
|
||||
refresh: "refresh-token",
|
||||
expires: Date.now() + 60_000,
|
||||
},
|
||||
},
|
||||
},
|
||||
provider: "openai-codex",
|
||||
});
|
||||
expect(order).toEqual([]);
|
||||
});
|
||||
it("drops explicit order entries that belong to another provider", () => {
|
||||
const order = resolveAuthProfileOrder({
|
||||
cfg: {
|
||||
|
||||
@@ -37,7 +37,7 @@ export function resolveAuthProfileOrder(params: {
|
||||
return [];
|
||||
}
|
||||
|
||||
const filtered = baseOrder.filter((profileId) => {
|
||||
const isValidProfile = (profileId: string): boolean => {
|
||||
const cred = store.profiles[profileId];
|
||||
if (!cred) {
|
||||
return false;
|
||||
@@ -78,7 +78,18 @@ export function resolveAuthProfileOrder(params: {
|
||||
return Boolean(cred.access?.trim() || cred.refresh?.trim());
|
||||
}
|
||||
return false;
|
||||
});
|
||||
};
|
||||
let filtered = baseOrder.filter(isValidProfile);
|
||||
|
||||
// Repair config/store profile-id drift from older onboarding flows:
|
||||
// if configured profile ids no longer exist in auth-profiles.json, scan the
|
||||
// provider's stored credentials and use any valid entries.
|
||||
const allBaseProfilesMissing = baseOrder.every((profileId) => !store.profiles[profileId]);
|
||||
if (filtered.length === 0 && explicitProfiles.length > 0 && allBaseProfilesMissing) {
|
||||
const storeProfiles = listProfilesForProvider(store, providerKey);
|
||||
filtered = storeProfiles.filter(isValidProfile);
|
||||
}
|
||||
|
||||
const deduped = dedupeProfileIds(filtered);
|
||||
|
||||
// If user specified explicit order (store override or config), respect it
|
||||
|
||||
@@ -139,6 +139,75 @@ describe("runWithModelFallback", () => {
|
||||
});
|
||||
});
|
||||
|
||||
it("falls back directly to configured primary when an override model fails", async () => {
|
||||
const cfg = makeCfg({
|
||||
agents: {
|
||||
defaults: {
|
||||
model: {
|
||||
primary: "openai/gpt-4.1-mini",
|
||||
fallbacks: ["anthropic/claude-haiku-3-5", "openrouter/deepseek-chat"],
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const run = vi.fn().mockImplementation(async (provider, model) => {
|
||||
if (provider === "anthropic" && model === "claude-opus-4-5") {
|
||||
throw Object.assign(new Error("unauthorized"), { status: 401 });
|
||||
}
|
||||
if (provider === "openai" && model === "gpt-4.1-mini") {
|
||||
return "ok";
|
||||
}
|
||||
throw new Error(`unexpected fallback candidate: ${provider}/${model}`);
|
||||
});
|
||||
|
||||
const result = await runWithModelFallback({
|
||||
cfg,
|
||||
provider: "anthropic",
|
||||
model: "claude-opus-4-5",
|
||||
run,
|
||||
});
|
||||
|
||||
expect(result.result).toBe("ok");
|
||||
expect(result.provider).toBe("openai");
|
||||
expect(result.model).toBe("gpt-4.1-mini");
|
||||
expect(run.mock.calls).toEqual([
|
||||
["anthropic", "claude-opus-4-5"],
|
||||
["openai", "gpt-4.1-mini"],
|
||||
]);
|
||||
});
|
||||
|
||||
it("treats normalized default refs as primary and keeps configured fallback chain", async () => {
|
||||
const cfg = makeCfg({
|
||||
agents: {
|
||||
defaults: {
|
||||
model: {
|
||||
primary: "openai/gpt-4.1-mini",
|
||||
fallbacks: ["anthropic/claude-haiku-3-5"],
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const run = vi
|
||||
.fn()
|
||||
.mockRejectedValueOnce(Object.assign(new Error("nope"), { status: 401 }))
|
||||
.mockResolvedValueOnce("ok");
|
||||
|
||||
const result = await runWithModelFallback({
|
||||
cfg,
|
||||
provider: " OpenAI ",
|
||||
model: "gpt-4.1-mini",
|
||||
run,
|
||||
});
|
||||
|
||||
expect(result.result).toBe("ok");
|
||||
expect(run.mock.calls).toEqual([
|
||||
["openai", "gpt-4.1-mini"],
|
||||
["anthropic", "claude-haiku-3-5"],
|
||||
]);
|
||||
});
|
||||
|
||||
it("falls back on transient HTTP 5xx errors", async () => {
|
||||
await expectFallsBackToHaiku({
|
||||
provider: "openai",
|
||||
@@ -167,12 +236,30 @@ describe("runWithModelFallback", () => {
|
||||
});
|
||||
});
|
||||
|
||||
it("falls back on credential validation errors", async () => {
|
||||
await expectFallsBackToHaiku({
|
||||
it("falls back to configured primary for override credential validation errors", async () => {
|
||||
const cfg = makeCfg();
|
||||
const run = vi.fn().mockImplementation(async (provider, model) => {
|
||||
if (provider === "anthropic" && model === "claude-opus-4") {
|
||||
throw new Error('No credentials found for profile "anthropic:default".');
|
||||
}
|
||||
if (provider === "openai" && model === "gpt-4.1-mini") {
|
||||
return "ok";
|
||||
}
|
||||
throw new Error(`unexpected fallback candidate: ${provider}/${model}`);
|
||||
});
|
||||
|
||||
const result = await runWithModelFallback({
|
||||
cfg,
|
||||
provider: "anthropic",
|
||||
model: "claude-opus-4",
|
||||
firstError: new Error('No credentials found for profile "anthropic:default".'),
|
||||
run,
|
||||
});
|
||||
|
||||
expect(result.result).toBe("ok");
|
||||
expect(run.mock.calls).toEqual([
|
||||
["anthropic", "claude-opus-4"],
|
||||
["openai", "gpt-4.1-mini"],
|
||||
]);
|
||||
});
|
||||
|
||||
it("skips providers when all profiles are in cooldown", async () => {
|
||||
|
||||
@@ -96,6 +96,10 @@ type ModelFallbackRunResult<T> = {
|
||||
attempts: FallbackAttempt[];
|
||||
};
|
||||
|
||||
function sameModelCandidate(a: ModelCandidate, b: ModelCandidate): boolean {
|
||||
return a.provider === b.provider && a.model === b.model;
|
||||
}
|
||||
|
||||
function throwFallbackFailureSummary(params: {
|
||||
attempts: FallbackAttempt[];
|
||||
candidates: ModelCandidate[];
|
||||
@@ -193,6 +197,7 @@ function resolveFallbackCandidates(params: {
|
||||
const providerRaw = String(params.provider ?? "").trim() || defaultProvider;
|
||||
const modelRaw = String(params.model ?? "").trim() || defaultModel;
|
||||
const normalizedPrimary = normalizeModelRef(providerRaw, modelRaw);
|
||||
const configuredPrimary = normalizeModelRef(defaultProvider, defaultModel);
|
||||
const aliasIndex = buildModelAliasIndex({
|
||||
cfg: params.cfg ?? {},
|
||||
defaultProvider,
|
||||
@@ -209,6 +214,11 @@ function resolveFallbackCandidates(params: {
|
||||
if (params.fallbacksOverride !== undefined) {
|
||||
return params.fallbacksOverride;
|
||||
}
|
||||
// Skip configured fallback chain when the user runs a non-default override.
|
||||
// In that case, retry should return directly to configured primary.
|
||||
if (!sameModelCandidate(normalizedPrimary, configuredPrimary)) {
|
||||
return []; // Override model failed → go straight to configured default
|
||||
}
|
||||
const model = params.cfg?.agents?.defaults?.model as
|
||||
| { fallbacks?: string[] }
|
||||
| string
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
import { resolveAgentDir, resolveSessionAgentId } from "../../agents/agent-scope.js";
|
||||
import { DEFAULT_MODEL, DEFAULT_PROVIDER } from "../../agents/defaults.js";
|
||||
import { resolveModelAuthLabel } from "../../agents/model-auth-label.js";
|
||||
import { loadModelCatalog } from "../../agents/model-catalog.js";
|
||||
import {
|
||||
buildAllowedModelSet,
|
||||
@@ -8,6 +10,7 @@ import {
|
||||
resolveModelRefFromString,
|
||||
} from "../../agents/model-selection.js";
|
||||
import type { OpenClawConfig } from "../../config/config.js";
|
||||
import type { SessionEntry } from "../../config/sessions.js";
|
||||
import {
|
||||
buildModelsKeyboard,
|
||||
buildProviderKeyboard,
|
||||
@@ -177,11 +180,47 @@ function parseModelsArgs(raw: string): {
|
||||
};
|
||||
}
|
||||
|
||||
function resolveProviderLabel(params: {
|
||||
provider: string;
|
||||
cfg: OpenClawConfig;
|
||||
agentDir?: string;
|
||||
sessionEntry?: SessionEntry;
|
||||
}): string {
|
||||
const authLabel = resolveModelAuthLabel({
|
||||
provider: params.provider,
|
||||
cfg: params.cfg,
|
||||
sessionEntry: params.sessionEntry,
|
||||
agentDir: params.agentDir,
|
||||
});
|
||||
if (!authLabel || authLabel === "unknown") {
|
||||
return params.provider;
|
||||
}
|
||||
return `${params.provider} · 🔑 ${authLabel}`;
|
||||
}
|
||||
|
||||
export function formatModelsAvailableHeader(params: {
|
||||
provider: string;
|
||||
total: number;
|
||||
cfg: OpenClawConfig;
|
||||
agentDir?: string;
|
||||
sessionEntry?: SessionEntry;
|
||||
}): string {
|
||||
const providerLabel = resolveProviderLabel({
|
||||
provider: params.provider,
|
||||
cfg: params.cfg,
|
||||
agentDir: params.agentDir,
|
||||
sessionEntry: params.sessionEntry,
|
||||
});
|
||||
return `Models (${providerLabel}) — ${params.total} available`;
|
||||
}
|
||||
|
||||
export async function resolveModelsCommandReply(params: {
|
||||
cfg: OpenClawConfig;
|
||||
commandBodyNormalized: string;
|
||||
surface?: string;
|
||||
currentModel?: string;
|
||||
agentDir?: string;
|
||||
sessionEntry?: SessionEntry;
|
||||
}): Promise<ReplyPayload | null> {
|
||||
const body = params.commandBodyNormalized.trim();
|
||||
if (!body.startsWith("/models")) {
|
||||
@@ -237,10 +276,16 @@ export async function resolveModelsCommandReply(params: {
|
||||
|
||||
const models = [...(byProvider.get(provider) ?? new Set<string>())].toSorted();
|
||||
const total = models.length;
|
||||
const providerLabel = resolveProviderLabel({
|
||||
provider,
|
||||
cfg: params.cfg,
|
||||
agentDir: params.agentDir,
|
||||
sessionEntry: params.sessionEntry,
|
||||
});
|
||||
|
||||
if (total === 0) {
|
||||
const lines: string[] = [
|
||||
`Models (${provider}) — none`,
|
||||
`Models (${providerLabel}) — none`,
|
||||
"",
|
||||
"Browse: /models",
|
||||
"Switch: /model <provider/model>",
|
||||
@@ -263,7 +308,13 @@ export async function resolveModelsCommandReply(params: {
|
||||
pageSize: telegramPageSize,
|
||||
});
|
||||
|
||||
const text = `Models (${provider}) — ${total} available`;
|
||||
const text = formatModelsAvailableHeader({
|
||||
provider,
|
||||
total,
|
||||
cfg: params.cfg,
|
||||
agentDir: params.agentDir,
|
||||
sessionEntry: params.sessionEntry,
|
||||
});
|
||||
return {
|
||||
text,
|
||||
channelData: { telegram: { buttons } },
|
||||
@@ -289,7 +340,7 @@ export async function resolveModelsCommandReply(params: {
|
||||
const endIndexExclusive = Math.min(total, startIndex + effectivePageSize);
|
||||
const pageModels = models.slice(startIndex, endIndexExclusive);
|
||||
|
||||
const header = `Models (${provider}) — showing ${startIndex + 1}-${endIndexExclusive} of ${total} (page ${safePage}/${pageCount})`;
|
||||
const header = `Models (${providerLabel}) — showing ${startIndex + 1}-${endIndexExclusive} of ${total} (page ${safePage}/${pageCount})`;
|
||||
|
||||
const lines: string[] = [header];
|
||||
for (const id of pageModels) {
|
||||
@@ -313,11 +364,21 @@ export const handleModelsCommand: CommandHandler = async (params, allowTextComma
|
||||
return null;
|
||||
}
|
||||
|
||||
const modelsAgentId =
|
||||
params.agentId ??
|
||||
resolveSessionAgentId({
|
||||
sessionKey: params.sessionKey,
|
||||
config: params.cfg,
|
||||
});
|
||||
const modelsAgentDir = resolveAgentDir(params.cfg, modelsAgentId);
|
||||
|
||||
const reply = await resolveModelsCommandReply({
|
||||
cfg: params.cfg,
|
||||
commandBodyNormalized: params.command.commandBodyNormalized,
|
||||
surface: params.ctx.Surface,
|
||||
currentModel: params.model ? `${params.provider}/${params.model}` : undefined,
|
||||
agentDir: modelsAgentDir,
|
||||
sessionEntry: params.sessionEntry,
|
||||
});
|
||||
if (!reply) {
|
||||
return null;
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import crypto from "node:crypto";
|
||||
import { resolveSessionAuthProfileOverride } from "../../agents/auth-profiles/session-override.js";
|
||||
import type { ExecToolDefaults } from "../../agents/bash-tools.js";
|
||||
import { resolveModelAuthLabel } from "../../agents/model-auth-label.js";
|
||||
import {
|
||||
abortEmbeddedPiRun,
|
||||
isEmbeddedPiRunActive,
|
||||
@@ -325,10 +326,18 @@ export async function runPreparedReply(
|
||||
if (channel && to) {
|
||||
const modelLabel = `${provider}/${model}`;
|
||||
const defaultLabel = `${defaultProvider}/${defaultModel}`;
|
||||
const modelAuthLabel = resolveModelAuthLabel({
|
||||
provider,
|
||||
cfg,
|
||||
sessionEntry,
|
||||
agentDir,
|
||||
});
|
||||
const authSuffix =
|
||||
modelAuthLabel && modelAuthLabel !== "unknown" ? ` · 🔑 ${modelAuthLabel}` : "";
|
||||
const text =
|
||||
modelLabel === defaultLabel
|
||||
? `✅ New session started · model: ${modelLabel}`
|
||||
: `✅ New session started · model: ${modelLabel} (default: ${defaultLabel})`;
|
||||
? `✅ New session started · model: ${modelLabel}${authSuffix}`
|
||||
: `✅ New session started · model: ${modelLabel} (default: ${defaultLabel})${authSuffix}`;
|
||||
await routeReply({
|
||||
payload: { text },
|
||||
channel,
|
||||
|
||||
@@ -13,19 +13,7 @@ type IncrementRunCompactionCountParams = Omit<
|
||||
};
|
||||
|
||||
export async function persistRunSessionUsage(params: PersistRunSessionUsageParams): Promise<void> {
|
||||
await persistSessionUsageUpdate({
|
||||
storePath: params.storePath,
|
||||
sessionKey: params.sessionKey,
|
||||
usage: params.usage,
|
||||
lastCallUsage: params.lastCallUsage,
|
||||
promptTokens: params.promptTokens,
|
||||
modelUsed: params.modelUsed,
|
||||
providerUsed: params.providerUsed,
|
||||
contextTokensUsed: params.contextTokensUsed,
|
||||
systemPromptReport: params.systemPromptReport,
|
||||
cliSessionId: params.cliSessionId,
|
||||
logLabel: params.logLabel,
|
||||
});
|
||||
await persistSessionUsageUpdate(params);
|
||||
}
|
||||
|
||||
export async function incrementRunCompactionCount(
|
||||
|
||||
@@ -256,6 +256,8 @@ export async function initSessionState(params: {
|
||||
persistedVerbose = entry.verboseLevel;
|
||||
persistedReasoning = entry.reasoningLevel;
|
||||
persistedTtsAuto = entry.ttsAuto;
|
||||
persistedModelOverride = entry.modelOverride;
|
||||
persistedProviderOverride = entry.providerOverride;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -68,11 +68,7 @@ export async function applyAuthChoiceOAuth(
|
||||
});
|
||||
|
||||
spin.stop("Chutes OAuth complete");
|
||||
const email =
|
||||
typeof creds.email === "string" && creds.email.trim() ? creds.email.trim() : "default";
|
||||
const profileId = `chutes:${email}`;
|
||||
|
||||
await writeOAuthCredentials("chutes", creds, params.agentDir);
|
||||
const profileId = await writeOAuthCredentials("chutes", creds, params.agentDir);
|
||||
nextConfig = applyAuthProfileConfig(nextConfig, {
|
||||
profileId,
|
||||
provider: "chutes",
|
||||
|
||||
@@ -117,9 +117,9 @@ export async function applyAuthChoiceOpenAI(
|
||||
return { config: nextConfig, agentModelOverride };
|
||||
}
|
||||
if (creds) {
|
||||
await writeOAuthCredentials("openai-codex", creds, params.agentDir);
|
||||
const profileId = await writeOAuthCredentials("openai-codex", creds, params.agentDir);
|
||||
nextConfig = applyAuthProfileConfig(nextConfig, {
|
||||
profileId: "openai-codex:default",
|
||||
profileId,
|
||||
provider: "openai-codex",
|
||||
mode: "oauth",
|
||||
});
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import fs from "node:fs/promises";
|
||||
import type { OAuthCredentials } from "@mariozechner/pi-ai";
|
||||
import { afterEach, describe, expect, it, vi } from "vitest";
|
||||
import type { WizardPrompter } from "../wizard/prompts.js";
|
||||
import { applyAuthChoice, resolvePreferredProviderForAuthChoice } from "./auth-choice.js";
|
||||
@@ -22,7 +23,9 @@ vi.mock("../providers/github-copilot-auth.js", () => ({
|
||||
githubCopilotLoginCommand: vi.fn(async () => {}),
|
||||
}));
|
||||
|
||||
const loginOpenAICodexOAuth = vi.hoisted(() => vi.fn(async () => null));
|
||||
const loginOpenAICodexOAuth = vi.hoisted(() =>
|
||||
vi.fn<() => Promise<OAuthCredentials | null>>(async () => null),
|
||||
);
|
||||
vi.mock("./openai-codex-oauth.js", () => ({
|
||||
loginOpenAICodexOAuth,
|
||||
}));
|
||||
@@ -123,6 +126,41 @@ describe("applyAuthChoice", () => {
|
||||
).resolves.toEqual({ config: {} });
|
||||
});
|
||||
|
||||
it("stores openai-codex OAuth with email profile id", async () => {
|
||||
await setupTempState();
|
||||
|
||||
loginOpenAICodexOAuth.mockResolvedValueOnce({
|
||||
email: "user@example.com",
|
||||
refresh: "refresh-token",
|
||||
access: "access-token",
|
||||
expires: Date.now() + 60_000,
|
||||
});
|
||||
|
||||
const prompter = createPrompter({});
|
||||
const runtime = createExitThrowingRuntime();
|
||||
|
||||
const result = await applyAuthChoice({
|
||||
authChoice: "openai-codex",
|
||||
config: {},
|
||||
prompter,
|
||||
runtime,
|
||||
setDefaultModel: false,
|
||||
});
|
||||
|
||||
expect(result.config.auth?.profiles?.["openai-codex:user@example.com"]).toMatchObject({
|
||||
provider: "openai-codex",
|
||||
mode: "oauth",
|
||||
});
|
||||
expect(result.config.auth?.profiles?.["openai-codex:default"]).toBeUndefined();
|
||||
expect(await readAuthProfile("openai-codex:user@example.com")).toMatchObject({
|
||||
type: "oauth",
|
||||
provider: "openai-codex",
|
||||
refresh: "refresh-token",
|
||||
access: "access-token",
|
||||
email: "user@example.com",
|
||||
});
|
||||
});
|
||||
|
||||
it("prompts and writes MiniMax API key when selecting minimax-api", async () => {
|
||||
await setupTempState();
|
||||
|
||||
|
||||
@@ -10,11 +10,12 @@ export async function writeOAuthCredentials(
|
||||
provider: string,
|
||||
creds: OAuthCredentials,
|
||||
agentDir?: string,
|
||||
): Promise<void> {
|
||||
): Promise<string> {
|
||||
const email =
|
||||
typeof creds.email === "string" && creds.email.trim() ? creds.email.trim() : "default";
|
||||
const profileId = `${provider}:${email}`;
|
||||
upsertAuthProfile({
|
||||
profileId: `${provider}:${email}`,
|
||||
profileId,
|
||||
credential: {
|
||||
type: "oauth",
|
||||
provider,
|
||||
@@ -22,6 +23,7 @@ export async function writeOAuthCredentials(
|
||||
},
|
||||
agentDir: resolveAuthAgentDir(agentDir),
|
||||
});
|
||||
return profileId;
|
||||
}
|
||||
|
||||
export async function setAnthropicApiKey(key: string, agentDir?: string) {
|
||||
|
||||
@@ -125,12 +125,13 @@ describe("writeOAuthCredentials", () => {
|
||||
expires: Date.now() + 60_000,
|
||||
} satisfies OAuthCredentials;
|
||||
|
||||
await writeOAuthCredentials("openai-codex", creds);
|
||||
const profileId = await writeOAuthCredentials("openai-codex", creds);
|
||||
expect(profileId).toBe("openai-codex:default");
|
||||
|
||||
const parsed = await readAuthProfilesForAgent<{
|
||||
profiles?: Record<string, OAuthCredentials & { type?: string }>;
|
||||
}>(env.agentDir);
|
||||
expect(parsed.profiles?.["openai-codex:default"]).toMatchObject({
|
||||
expect(parsed.profiles?.[profileId]).toMatchObject({
|
||||
refresh: "refresh-token",
|
||||
access: "access-token",
|
||||
type: "oauth",
|
||||
@@ -140,6 +141,32 @@ describe("writeOAuthCredentials", () => {
|
||||
fs.readFile(path.join(env.stateDir, "agents", "main", "agent", "auth-profiles.json"), "utf8"),
|
||||
).rejects.toThrow();
|
||||
});
|
||||
|
||||
it("uses OAuth email as profile id when provided", async () => {
|
||||
const env = await setupAuthTestEnv("openclaw-oauth-");
|
||||
lifecycle.setStateDir(env.stateDir);
|
||||
|
||||
const creds = {
|
||||
email: "user@example.com",
|
||||
refresh: "refresh-token",
|
||||
access: "access-token",
|
||||
expires: Date.now() + 60_000,
|
||||
} satisfies OAuthCredentials;
|
||||
|
||||
const profileId = await writeOAuthCredentials("openai-codex", creds);
|
||||
expect(profileId).toBe("openai-codex:user@example.com");
|
||||
|
||||
const parsed = await readAuthProfilesForAgent<{
|
||||
profiles?: Record<string, OAuthCredentials & { type?: string }>;
|
||||
}>(env.agentDir);
|
||||
expect(parsed.profiles?.[profileId]).toMatchObject({
|
||||
refresh: "refresh-token",
|
||||
access: "access-token",
|
||||
type: "oauth",
|
||||
provider: "openai-codex",
|
||||
email: "user@example.com",
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("setMinimaxApiKey", () => {
|
||||
|
||||
@@ -64,7 +64,11 @@ export function applyModelOverrideToSessionEntry(params: {
|
||||
}
|
||||
}
|
||||
|
||||
// Clear stale fallback notice when the user explicitly switches models.
|
||||
if (updated) {
|
||||
delete entry.fallbackNoticeSelectedModel;
|
||||
delete entry.fallbackNoticeActiveModel;
|
||||
delete entry.fallbackNoticeReason;
|
||||
entry.updatedAt = Date.now();
|
||||
}
|
||||
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
import type { Message, ReactionTypeEmoji } from "@grammyjs/types";
|
||||
import { resolveDefaultAgentId } from "../agents/agent-scope.js";
|
||||
import { resolveAgentDir, resolveDefaultAgentId } from "../agents/agent-scope.js";
|
||||
import { hasControlCommand } from "../auto-reply/command-detection.js";
|
||||
import {
|
||||
createInboundDebouncer,
|
||||
resolveInboundDebounceMs,
|
||||
} from "../auto-reply/inbound-debounce.js";
|
||||
import { buildCommandsPaginationKeyboard } from "../auto-reply/reply/commands-info.js";
|
||||
import { buildModelsProviderData } from "../auto-reply/reply/commands-models.js";
|
||||
import {
|
||||
buildModelsProviderData,
|
||||
formatModelsAvailableHeader,
|
||||
} from "../auto-reply/reply/commands-models.js";
|
||||
import { resolveStoredModelOverride } from "../auto-reply/reply/model-selection.js";
|
||||
import { listSkillCommandsForAgents } from "../auto-reply/skill-commands.js";
|
||||
import { buildCommandsMessagePaginated } from "../auto-reply/status.js";
|
||||
@@ -182,13 +185,17 @@ export const registerTelegramHandlers = ({
|
||||
},
|
||||
});
|
||||
|
||||
const resolveTelegramSessionModel = (params: {
|
||||
const resolveTelegramSessionState = (params: {
|
||||
chatId: number | string;
|
||||
isGroup: boolean;
|
||||
isForum: boolean;
|
||||
messageThreadId?: number;
|
||||
resolvedThreadId?: number;
|
||||
}): string | undefined => {
|
||||
}): {
|
||||
agentId: string;
|
||||
sessionEntry: ReturnType<typeof loadSessionStore>[string];
|
||||
model?: string;
|
||||
} => {
|
||||
const resolvedThreadId =
|
||||
params.resolvedThreadId ??
|
||||
resolveTelegramForumThreadId({
|
||||
@@ -229,17 +236,29 @@ export const registerTelegramHandlers = ({
|
||||
sessionKey,
|
||||
});
|
||||
if (storedOverride) {
|
||||
return storedOverride.provider
|
||||
? `${storedOverride.provider}/${storedOverride.model}`
|
||||
: storedOverride.model;
|
||||
return {
|
||||
agentId: route.agentId,
|
||||
sessionEntry: entry,
|
||||
model: storedOverride.provider
|
||||
? `${storedOverride.provider}/${storedOverride.model}`
|
||||
: storedOverride.model,
|
||||
};
|
||||
}
|
||||
const provider = entry?.modelProvider?.trim();
|
||||
const model = entry?.model?.trim();
|
||||
if (provider && model) {
|
||||
return `${provider}/${model}`;
|
||||
return {
|
||||
agentId: route.agentId,
|
||||
sessionEntry: entry,
|
||||
model: `${provider}/${model}`,
|
||||
};
|
||||
}
|
||||
const modelCfg = cfg.agents?.defaults?.model;
|
||||
return typeof modelCfg === "string" ? modelCfg : modelCfg?.primary;
|
||||
return {
|
||||
agentId: route.agentId,
|
||||
sessionEntry: entry,
|
||||
model: typeof modelCfg === "string" ? modelCfg : modelCfg?.primary,
|
||||
};
|
||||
};
|
||||
|
||||
const processMediaGroup = async (entry: MediaGroupEntry) => {
|
||||
@@ -933,13 +952,14 @@ export const registerTelegramHandlers = ({
|
||||
const safePage = Math.max(1, Math.min(page, totalPages));
|
||||
|
||||
// Resolve current model from session (prefer overrides)
|
||||
const currentModel = resolveTelegramSessionModel({
|
||||
const sessionState = resolveTelegramSessionState({
|
||||
chatId,
|
||||
isGroup,
|
||||
isForum,
|
||||
messageThreadId,
|
||||
resolvedThreadId,
|
||||
});
|
||||
const currentModel = sessionState.model;
|
||||
|
||||
const buttons = buildModelsKeyboard({
|
||||
provider,
|
||||
@@ -949,7 +969,13 @@ export const registerTelegramHandlers = ({
|
||||
totalPages,
|
||||
pageSize,
|
||||
});
|
||||
const text = `Models (${provider}) — ${models.length} available`;
|
||||
const text = formatModelsAvailableHeader({
|
||||
provider,
|
||||
total: models.length,
|
||||
cfg,
|
||||
agentDir: resolveAgentDir(cfg, sessionState.agentId),
|
||||
sessionEntry: sessionState.sessionEntry,
|
||||
});
|
||||
await editMessageWithButtons(text, buttons);
|
||||
return;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user