mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-06 18:20:44 +00:00
fix: harden custom-provider verification probes (#24743) (thanks @Glucksberg)
This commit is contained in:
@@ -10,6 +10,7 @@ Docs: https://docs.openclaw.ai
|
|||||||
|
|
||||||
### Fixes
|
### Fixes
|
||||||
|
|
||||||
|
- Onboarding/Custom providers: raise verification probe token budgets for OpenAI and Anthropic compatibility checks to avoid false negatives on strict provider defaults. (#24743) Thanks @Glucksberg.
|
||||||
- WhatsApp/DM routing: only update main-session last-route state when DM traffic is bound to the main session, preserving isolated `dmScope` routing. (#24949) Thanks @kevinWangSheng.
|
- WhatsApp/DM routing: only update main-session last-route state when DM traffic is bound to the main session, preserving isolated `dmScope` routing. (#24949) Thanks @kevinWangSheng.
|
||||||
- Providers/OpenRouter: when thinking is explicitly off, avoid injecting `reasoning.effort` so reasoning-required models can use provider defaults instead of failing request validation. (#24863) Thanks @DevSecTim.
|
- Providers/OpenRouter: when thinking is explicitly off, avoid injecting `reasoning.effort` so reasoning-required models can use provider defaults instead of failing request validation. (#24863) Thanks @DevSecTim.
|
||||||
- Status/Pairing recovery: show explicit pairing-approval command hints (including requestId when safe) when gateway probe failures report pairing-required closures. (#24771) Thanks @markmusson.
|
- Status/Pairing recovery: show explicit pairing-approval command hints (including requestId when safe) when gateway probe failures report pairing-required closures. (#24771) Thanks @markmusson.
|
||||||
|
|||||||
@@ -116,6 +116,35 @@ describe("promptCustomApiConfig", () => {
|
|||||||
expectOpenAiCompatResult({ prompter, textCalls: 5, selectCalls: 1, result });
|
expectOpenAiCompatResult({ prompter, textCalls: 5, selectCalls: 1, result });
|
||||||
});
|
});
|
||||||
|
|
||||||
|
it("uses expanded max_tokens for openai verification probes", async () => {
|
||||||
|
const prompter = createTestPrompter({
|
||||||
|
text: ["https://example.com/v1", "test-key", "detected-model", "custom", "alias"],
|
||||||
|
select: ["openai"],
|
||||||
|
});
|
||||||
|
const fetchMock = stubFetchSequence([{ ok: true }]);
|
||||||
|
|
||||||
|
await runPromptCustomApi(prompter);
|
||||||
|
|
||||||
|
const firstCall = fetchMock.mock.calls[0]?.[1] as { body?: string } | undefined;
|
||||||
|
expect(firstCall?.body).toBeDefined();
|
||||||
|
expect(JSON.parse(firstCall?.body ?? "{}")).toMatchObject({ max_tokens: 1024 });
|
||||||
|
});
|
||||||
|
|
||||||
|
it("uses expanded max_tokens for anthropic verification probes", async () => {
|
||||||
|
const prompter = createTestPrompter({
|
||||||
|
text: ["https://example.com", "test-key", "detected-model", "custom", "alias"],
|
||||||
|
select: ["unknown"],
|
||||||
|
});
|
||||||
|
const fetchMock = stubFetchSequence([{ ok: false, status: 404 }, { ok: true }]);
|
||||||
|
|
||||||
|
await runPromptCustomApi(prompter);
|
||||||
|
|
||||||
|
expect(fetchMock).toHaveBeenCalledTimes(2);
|
||||||
|
const secondCall = fetchMock.mock.calls[1]?.[1] as { body?: string } | undefined;
|
||||||
|
expect(secondCall?.body).toBeDefined();
|
||||||
|
expect(JSON.parse(secondCall?.body ?? "{}")).toMatchObject({ max_tokens: 1024 });
|
||||||
|
});
|
||||||
|
|
||||||
it("re-prompts base url when unknown detection fails", async () => {
|
it("re-prompts base url when unknown detection fails", async () => {
|
||||||
const prompter = createTestPrompter({
|
const prompter = createTestPrompter({
|
||||||
text: [
|
text: [
|
||||||
|
|||||||
Reference in New Issue
Block a user