diff --git a/litellm/caching/_internal_lru_cache.py b/litellm/caching/_internal_lru_cache.py new file mode 100644 index 000000000000..78df493fc879 --- /dev/null +++ b/litellm/caching/_internal_lru_cache.py @@ -0,0 +1,12 @@ +from functools import lru_cache, wraps +from typing import Callable, TypeVar, cast + +RT = TypeVar("RT") # Return type + + +def typed_lru_cache(maxsize: int = 128) -> Callable: + def decorator(func: Callable[..., RT]) -> Callable[..., RT]: + wrapped = lru_cache(maxsize=maxsize)(func) + return cast(Callable[..., RT], wraps(func)(wrapped)) + + return decorator diff --git a/litellm/litellm_core_utils/get_llm_provider_logic.py b/litellm/litellm_core_utils/get_llm_provider_logic.py index 834e35c733fa..6c339d7bc8b7 100644 --- a/litellm/litellm_core_utils/get_llm_provider_logic.py +++ b/litellm/litellm_core_utils/get_llm_provider_logic.py @@ -1,8 +1,10 @@ -from typing import Optional, Tuple +from functools import lru_cache +from typing import Optional, Tuple, overload import httpx import litellm +from litellm.caching._internal_lru_cache import typed_lru_cache from litellm.secret_managers.main import get_secret, get_secret_str from ..types.router import LiteLLM_Params @@ -84,6 +86,29 @@ def handle_anthropic_text_model_custom_llm_provider( return model, custom_llm_provider +@overload +def get_llm_provider( + model: str, + custom_llm_provider: str, + api_base: str, + api_key: str, + litellm_params: LiteLLM_Params, +) -> Tuple[str, str, str, str]: + pass + + +@overload +def get_llm_provider( + model: str, + custom_llm_provider: Optional[str] = None, + api_base: Optional[str] = None, + api_key: Optional[str] = None, + litellm_params: Optional[LiteLLM_Params] = None, +) -> Tuple[str, str, Optional[str], Optional[str]]: + pass + + +@lru_cache(maxsize=16) def get_llm_provider( # noqa: PLR0915 model: str, custom_llm_provider: Optional[str] = None, diff --git a/litellm/litellm_core_utils/get_supported_openai_params.py b/litellm/litellm_core_utils/get_supported_openai_params.py index e251784f4e18..62f84e0ebd5a 100644 --- a/litellm/litellm_core_utils/get_supported_openai_params.py +++ b/litellm/litellm_core_utils/get_supported_openai_params.py @@ -2,6 +2,7 @@ import litellm from litellm import LlmProviders +from litellm.caching._internal_lru_cache import typed_lru_cache from litellm.exceptions import BadRequestError diff --git a/litellm/types/router.py b/litellm/types/router.py index 8c671fe52c8c..7bf54126f704 100644 --- a/litellm/types/router.py +++ b/litellm/types/router.py @@ -177,7 +177,7 @@ class GenericLiteLLMParams(BaseModel): max_budget: Optional[float] = None budget_duration: Optional[str] = None - model_config = ConfigDict(extra="allow", arbitrary_types_allowed=True) + model_config = ConfigDict(extra="allow", arbitrary_types_allowed=True, frozen=True) def __init__( self, @@ -249,7 +249,7 @@ class LiteLLM_Params(GenericLiteLLMParams): """ model: str - model_config = ConfigDict(extra="allow", arbitrary_types_allowed=True) + model_config = ConfigDict(extra="allow", arbitrary_types_allowed=True, frozen=True) def __init__( self, diff --git a/tests/llm_translation/test_xai.py b/tests/llm_translation/test_xai.py index fe909a4d2e1b..de4bfc907d08 100644 --- a/tests/llm_translation/test_xai.py +++ b/tests/llm_translation/test_xai.py @@ -6,7 +6,7 @@ sys.path.insert( 0, os.path.abspath("../..") -) # Adds the parent directory to the system-path +) # Adds the parent directory to the system path import httpx