From cc0bf449a248f2f1c405f9c06a7008378ab713e2 Mon Sep 17 00:00:00 2001 From: Robert Brennan Date: Thu, 30 Jan 2025 17:20:23 -0500 Subject: [PATCH] stop retrying on all exceptions --- openhands/llm/llm.py | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/openhands/llm/llm.py b/openhands/llm/llm.py index af25baded4c4..9aba1545529b 100644 --- a/openhands/llm/llm.py +++ b/openhands/llm/llm.py @@ -18,11 +18,8 @@ from litellm import completion as litellm_completion from litellm import completion_cost as litellm_completion_cost from litellm.exceptions import ( - APIConnectionError, APIError, - InternalServerError, RateLimitError, - ServiceUnavailableError, ) from litellm.types.utils import CostPerToken, ModelResponse, Usage from litellm.utils import create_pretrained_tokenizer @@ -42,15 +39,7 @@ __all__ = ['LLM'] # tuple of exceptions to retry on -LLM_RETRY_EXCEPTIONS: tuple[type[Exception], ...] = ( - APIConnectionError, - # FIXME: APIError is useful on 502 from a proxy for example, - # but it also retries on other errors that are permanent - APIError, - InternalServerError, - RateLimitError, - ServiceUnavailableError, -) +LLM_RETRY_EXCEPTIONS: tuple[type[Exception], ...] = (RateLimitError,) # cache prompt supporting models # remove this when we gemini and deepseek are supported