fix: relax pricing fetch timeout

This commit is contained in:
Peter Steinberger
2026-04-25 20:33:31 +01:00
parent 1915b29a3c
commit a5a438a17c
3 changed files with 4 additions and 4 deletions

View File

@@ -606,7 +606,7 @@ Docs: https://docs.openclaw.ai
- CLI/Gateway: wait for one-shot gateway RPC clients to finish WebSocket teardown before the CLI process exits, reducing hangs where commands like `openclaw status` or `openclaw version` could finish their work but stay alive until an external timeout killed them (#70691). Thanks @Takhoffman.
- Thinking defaults/status: raise the implicit default thinking level for reasoning-capable models from legacy `off`/`low` fallback behavior to a safe provider-supported `medium` equivalent when no explicit config default is set, preserve configured-model reasoning metadata when runtime catalog loading is empty, and make `/status` report the same resolved default as runtime (#70601). Thanks @Takhoffman.
- Gateway/model pricing: fetch OpenRouter and LiteLLM pricing asynchronously at startup and extend catalog fetch timeouts to 30 seconds, reducing noisy timeout warnings during slow upstream responses.
- Gateway/model pricing: extend OpenRouter and LiteLLM catalog fetch timeouts to 60 seconds, reducing noisy timeout warnings during slow upstream responses. Thanks @steipete.
- Agents/failover: classify bare undici transport failures (`terminated`, `UND_ERR_SOCKET`, `UND_ERR_CONNECT_TIMEOUT`, body/header timeouts, aborted streams) and pi-ai's openai-codex `Request failed` sentinel as `timeout`, so Cloudflare 502s with empty bodies and mid-response socket resets actually enter the configured fallback chain instead of surfacing as unclassified errors. Fixes #69368. (#69677) Thanks @sk7n4k3d.
- Providers/Anthropic Vertex: restore ADC-backed model discovery after the lightweight provider-discovery path by resolving emitted discovery entries, exposing synthetic auth on bootstrap discovery, and honoring copied env snapshots when probing the default GCP ADC path. Fixes #65715. (#65716) Thanks @feiskyer.
- Plugins/install: add newly installed plugin ids to an existing `plugins.allow` list before enabling them, so allowlisted configs load installed plugins after restart.

View File

@@ -583,10 +583,10 @@ describe("model-pricing-cache", () => {
expect(warnings).toEqual(
expect.arrayContaining([
expect.stringContaining(
"OpenRouter pricing fetch failed (timeout 30s): TimeoutError: The operation was aborted due to timeout",
"OpenRouter pricing fetch failed (timeout 60s): TimeoutError: The operation was aborted due to timeout",
),
expect.stringContaining(
"LiteLLM pricing fetch failed (timeout 30s): TimeoutError: The operation was aborted due to timeout",
"LiteLLM pricing fetch failed (timeout 60s): TimeoutError: The operation was aborted due to timeout",
),
]),
);

View File

@@ -40,7 +40,7 @@ const OPENROUTER_MODELS_URL = "https://openrouter.ai/api/v1/models";
const LITELLM_PRICING_URL =
"https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json";
const CACHE_TTL_MS = 24 * 60 * 60_000;
const FETCH_TIMEOUT_MS = 30_000;
const FETCH_TIMEOUT_MS = 60_000;
const MAX_PRICING_CATALOG_BYTES = 5 * 1024 * 1024;
const PROVIDER_ALIAS_TO_OPENROUTER: Record<string, string> = {
"google-gemini-cli": "google",