diff --git a/botorch/acquisition/analytic.py b/botorch/acquisition/analytic.py index 868337ce81..418c01e67d 100644 --- a/botorch/acquisition/analytic.py +++ b/botorch/acquisition/analytic.py @@ -16,7 +16,7 @@ from abc import ABC from contextlib import nullcontext from copy import deepcopy -from typing import Dict, Optional, Tuple, Union +from typing import Optional, Union import torch from botorch.acquisition.acquisition import AcquisitionFunction @@ -83,7 +83,7 @@ def set_X_pending(self, X_pending: Optional[Tensor] = None) -> None: def _mean_and_sigma( self, X: Tensor, compute_sigma: bool = True, min_var: float = 1e-12 - ) -> Tuple[Tensor, Optional[Tensor]]: + ) -> tuple[Tensor, Optional[Tensor]]: """Computes the first and second moments of the model posterior. Args: @@ -449,7 +449,7 @@ def __init__( model: Model, best_f: Union[float, Tensor], objective_index: int, - constraints: Dict[int, Tuple[Optional[float], Optional[float]]], + constraints: dict[int, tuple[Optional[float], Optional[float]]], maximize: bool = True, ) -> None: r"""Analytic Log Constrained Expected Improvement. @@ -527,7 +527,7 @@ def __init__( model: Model, best_f: Union[float, Tensor], objective_index: int, - constraints: Dict[int, Tuple[Optional[float], Optional[float]]], + constraints: dict[int, tuple[Optional[float], Optional[float]]], maximize: bool = True, ) -> None: r"""Analytic Constrained Expected Improvement. @@ -1134,7 +1134,7 @@ def _get_noiseless_fantasy_model( def _preprocess_constraint_bounds( acqf: Union[LogConstrainedExpectedImprovement, ConstrainedExpectedImprovement], - constraints: Dict[int, Tuple[Optional[float], Optional[float]]], + constraints: dict[int, tuple[Optional[float], Optional[float]]], ) -> None: r"""Set up constraint bounds. diff --git a/botorch/acquisition/factory.py b/botorch/acquisition/factory.py index 989154575d..7047fede25 100644 --- a/botorch/acquisition/factory.py +++ b/botorch/acquisition/factory.py @@ -10,7 +10,7 @@ from __future__ import annotations -from typing import Callable, List, Optional, Union +from typing import Callable, Optional, Union import torch @@ -37,7 +37,7 @@ def get_acquisition_function( X_observed: Tensor, posterior_transform: Optional[PosteriorTransform] = None, X_pending: Optional[Tensor] = None, - constraints: Optional[List[Callable[[Tensor], Tensor]]] = None, + constraints: Optional[list[Callable[[Tensor], Tensor]]] = None, eta: Optional[Union[Tensor, float]] = 1e-3, mc_samples: int = 512, seed: Optional[int] = None, @@ -48,7 +48,7 @@ def get_acquisition_function( marginalize_dim: Optional[int] = None, cache_root: bool = True, beta: Optional[float] = None, - ref_point: Union[None, List[float], Tensor] = None, + ref_point: Union[None, list[float], Tensor] = None, Y: Optional[Tensor] = None, alpha: float = 0.0, ) -> monte_carlo.MCAcquisitionFunction: diff --git a/botorch/acquisition/fixed_feature.py b/botorch/acquisition/fixed_feature.py index 8f88524013..e0f0fa38ab 100644 --- a/botorch/acquisition/fixed_feature.py +++ b/botorch/acquisition/fixed_feature.py @@ -11,8 +11,10 @@ from __future__ import annotations +from collections.abc import Sequence + from numbers import Number -from typing import List, Optional, Sequence, Union +from typing import Optional, Union import torch from botorch.acquisition.acquisition import AcquisitionFunction @@ -65,7 +67,7 @@ def __init__( self, acq_function: AcquisitionFunction, d: int, - columns: List[int], + columns: list[int], values: Union[Tensor, Sequence[Union[Tensor, float]]], ) -> None: r"""Derived Acquisition Function by fixing a subset of input features. diff --git a/botorch/acquisition/input_constructors.py b/botorch/acquisition/input_constructors.py index dcb8abda40..49a8a599f0 100644 --- a/botorch/acquisition/input_constructors.py +++ b/botorch/acquisition/input_constructors.py @@ -12,20 +12,8 @@ from __future__ import annotations import inspect -from typing import ( - Any, - Callable, - Dict, - Hashable, - Iterable, - List, - Optional, - Sequence, - Tuple, - Type, - TypeVar, - Union, -) +from collections.abc import Hashable, Iterable, Sequence +from typing import Any, Callable, Optional, TypeVar, Union import torch from botorch.acquisition.acquisition import AcquisitionFunction @@ -118,23 +106,23 @@ ACQF_INPUT_CONSTRUCTOR_REGISTRY = {} T = TypeVar("T") -MaybeDict = Union[T, Dict[Hashable, T]] +MaybeDict = Union[T, dict[Hashable, T]] TOptimizeObjectiveKwargs = Union[ None, MCAcquisitionObjective, PosteriorTransform, - Tuple[Tensor, Tensor], - Dict[int, float], + tuple[Tensor, Tensor], + dict[int, float], bool, int, - Dict[str, Any], + dict[str, Any], Callable[[Tensor], Tensor], Tensor, ] def _field_is_shared( - datasets: Union[Iterable[SupervisedDataset], Dict[Hashable, SupervisedDataset]], + datasets: Union[Iterable[SupervisedDataset], dict[Hashable, SupervisedDataset]], fieldname: str, ) -> bool: r"""Determines whether or not a given field is shared by all datasets.""" @@ -184,8 +172,8 @@ def _get_dataset_field( def get_acqf_input_constructor( - acqf_cls: Type[AcquisitionFunction], -) -> Callable[..., Dict[str, Any]]: + acqf_cls: type[AcquisitionFunction], +) -> Callable[..., dict[str, Any]]: r"""Get acquisition function input constructor from registry. Args: @@ -245,7 +233,7 @@ def g(*args: Any, **kwargs: Any) -> T: def acqf_input_constructor( - *acqf_cls: Type[AcquisitionFunction], + *acqf_cls: type[AcquisitionFunction], ) -> Callable[..., AcquisitionFunction]: r"""Decorator for registering acquisition function input constructors. @@ -270,8 +258,8 @@ def decorator(method): def _register_acqf_input_constructor( - acqf_cls: Type[AcquisitionFunction], - input_constructor: Callable[..., Dict[str, Any]], + acqf_cls: type[AcquisitionFunction], + input_constructor: Callable[..., dict[str, Any]], ) -> None: ACQF_INPUT_CONSTRUCTOR_REGISTRY[acqf_cls] = input_constructor @@ -283,7 +271,7 @@ def _register_acqf_input_constructor( def construct_inputs_posterior_mean( model: Model, posterior_transform: Optional[PosteriorTransform] = None, -) -> Dict[str, Union[Model, Optional[PosteriorTransform]]]: +) -> dict[str, Union[Model, Optional[PosteriorTransform]]]: r"""Construct kwargs for PosteriorMean acquisition function. Args: @@ -309,7 +297,7 @@ def construct_inputs_best_f( posterior_transform: Optional[PosteriorTransform] = None, best_f: Optional[Union[float, Tensor]] = None, maximize: bool = True, -) -> Dict[str, Any]: +) -> dict[str, Any]: r"""Construct kwargs for the acquisition functions requiring `best_f`. Args: @@ -344,7 +332,7 @@ def construct_inputs_ucb( posterior_transform: Optional[PosteriorTransform] = None, beta: Union[float, Tensor] = 0.2, maximize: bool = True, -) -> Dict[str, Any]: +) -> dict[str, Any]: r"""Construct kwargs for `UpperConfidenceBound`. Args: @@ -372,7 +360,7 @@ def construct_inputs_noisy_ei( training_data: MaybeDict[SupervisedDataset], num_fantasies: int = 20, maximize: bool = True, -) -> Dict[str, Any]: +) -> dict[str, Any]: r"""Construct kwargs for `NoisyExpectedImprovement`. Args: @@ -403,9 +391,9 @@ def construct_inputs_qSimpleRegret( posterior_transform: Optional[PosteriorTransform] = None, X_pending: Optional[Tensor] = None, sampler: Optional[MCSampler] = None, - constraints: Optional[List[Callable[[Tensor], Tensor]]] = None, + constraints: Optional[list[Callable[[Tensor], Tensor]]] = None, X_baseline: Optional[Tensor] = None, -) -> Dict[str, Any]: +) -> dict[str, Any]: r"""Construct kwargs for qSimpleRegret. Args: @@ -458,9 +446,9 @@ def construct_inputs_qEI( X_pending: Optional[Tensor] = None, sampler: Optional[MCSampler] = None, best_f: Optional[Union[float, Tensor]] = None, - constraints: Optional[List[Callable[[Tensor], Tensor]]] = None, + constraints: Optional[list[Callable[[Tensor], Tensor]]] = None, eta: Union[Tensor, float] = 1e-3, -) -> Dict[str, Any]: +) -> dict[str, Any]: r"""Construct kwargs for the `qExpectedImprovement` constructor. Args: @@ -516,12 +504,12 @@ def construct_inputs_qLogEI( X_pending: Optional[Tensor] = None, sampler: Optional[MCSampler] = None, best_f: Optional[Union[float, Tensor]] = None, - constraints: Optional[List[Callable[[Tensor], Tensor]]] = None, + constraints: Optional[list[Callable[[Tensor], Tensor]]] = None, eta: Union[Tensor, float] = 1e-3, fat: bool = True, tau_max: float = TAU_MAX, tau_relu: float = TAU_RELU, -) -> Dict[str, Any]: +) -> dict[str, Any]: r"""Construct kwargs for the `qExpectedImprovement` constructor. Args: @@ -582,9 +570,9 @@ def construct_inputs_qNEI( X_baseline: Optional[Tensor] = None, prune_baseline: Optional[bool] = True, cache_root: Optional[bool] = True, - constraints: Optional[List[Callable[[Tensor], Tensor]]] = None, + constraints: Optional[list[Callable[[Tensor], Tensor]]] = None, eta: Union[Tensor, float] = 1e-3, -) -> Dict[str, Any]: +) -> dict[str, Any]: r"""Construct kwargs for the `qNoisyExpectedImprovement` constructor. Args: @@ -648,7 +636,7 @@ def construct_inputs_qLogNEI( X_baseline: Optional[Tensor] = None, prune_baseline: Optional[bool] = True, cache_root: Optional[bool] = True, - constraints: Optional[List[Callable[[Tensor], Tensor]]] = None, + constraints: Optional[list[Callable[[Tensor], Tensor]]] = None, eta: Union[Tensor, float] = 1e-3, fat: bool = True, tau_max: float = TAU_MAX, @@ -721,9 +709,9 @@ def construct_inputs_qPI( sampler: Optional[MCSampler] = None, tau: float = 1e-3, best_f: Optional[Union[float, Tensor]] = None, - constraints: Optional[List[Callable[[Tensor], Tensor]]] = None, + constraints: Optional[list[Callable[[Tensor], Tensor]]] = None, eta: Union[Tensor, float] = 1e-3, -) -> Dict[str, Any]: +) -> dict[str, Any]: r"""Construct kwargs for the `qProbabilityOfImprovement` constructor. Args: @@ -785,7 +773,7 @@ def construct_inputs_qUCB( X_pending: Optional[Tensor] = None, sampler: Optional[MCSampler] = None, beta: float = 0.2, -) -> Dict[str, Any]: +) -> dict[str, Any]: r"""Construct kwargs for the `qUpperConfidenceBound` constructor. Args: @@ -828,10 +816,10 @@ def construct_inputs_EHVI( training_data: MaybeDict[SupervisedDataset], objective_thresholds: Tensor, posterior_transform: Optional[PosteriorTransform] = None, - constraints: Optional[List[Callable[[Tensor], Tensor]]] = None, + constraints: Optional[list[Callable[[Tensor], Tensor]]] = None, alpha: Optional[float] = None, Y_pmean: Optional[Tensor] = None, -) -> Dict[str, Any]: +) -> dict[str, Any]: r"""Construct kwargs for `ExpectedHypervolumeImprovement` constructor.""" num_objectives = objective_thresholds.shape[0] if constraints is not None: @@ -885,14 +873,14 @@ def construct_inputs_qEHVI( training_data: MaybeDict[SupervisedDataset], objective_thresholds: Tensor, objective: Optional[MCMultiOutputObjective] = None, - constraints: Optional[List[Callable[[Tensor], Tensor]]] = None, + constraints: Optional[list[Callable[[Tensor], Tensor]]] = None, alpha: Optional[float] = None, sampler: Optional[MCSampler] = None, X_pending: Optional[Tensor] = None, eta: float = 1e-3, mc_samples: int = 128, qmc: bool = True, -) -> Dict[str, Any]: +) -> dict[str, Any]: r""" Construct kwargs for `qExpectedHypervolumeImprovement` and `qLogExpectedHypervolumeImprovement`. @@ -965,7 +953,7 @@ def construct_inputs_qNEHVI( objective_thresholds: Tensor, objective: Optional[MCMultiOutputObjective] = None, X_baseline: Optional[Tensor] = None, - constraints: Optional[List[Callable[[Tensor], Tensor]]] = None, + constraints: Optional[list[Callable[[Tensor], Tensor]]] = None, alpha: Optional[float] = None, sampler: Optional[MCSampler] = None, X_pending: Optional[Tensor] = None, @@ -978,7 +966,7 @@ def construct_inputs_qNEHVI( max_iep: int = 0, incremental_nehvi: bool = True, cache_root: bool = True, -) -> Dict[str, Any]: +) -> dict[str, Any]: r"""Construct kwargs for `qNoisyExpectedHypervolumeImprovement`'s constructor.""" if X_baseline is None: X_baseline = _get_dataset_field( @@ -1037,7 +1025,7 @@ def construct_inputs_qLogNEHVI( objective_thresholds: Tensor, objective: Optional[MCMultiOutputObjective] = None, X_baseline: Optional[Tensor] = None, - constraints: Optional[List[Callable[[Tensor], Tensor]]] = None, + constraints: Optional[list[Callable[[Tensor], Tensor]]] = None, alpha: Optional[float] = None, sampler: Optional[MCSampler] = None, X_pending: Optional[Tensor] = None, @@ -1052,7 +1040,7 @@ def construct_inputs_qLogNEHVI( cache_root: bool = True, tau_relu: float = TAU_RELU, tau_max: float = TAU_MAX, -) -> Dict[str, Any]: +) -> dict[str, Any]: """ Construct kwargs for `qLogNoisyExpectedHypervolumeImprovement`'s constructor." """ @@ -1093,7 +1081,7 @@ def construct_inputs_qLogNParEGO( X_baseline: Optional[Tensor] = None, prune_baseline: Optional[bool] = True, cache_root: Optional[bool] = True, - constraints: Optional[List[Callable[[Tensor], Tensor]]] = None, + constraints: Optional[list[Callable[[Tensor], Tensor]]] = None, eta: Union[Tensor, float] = 1e-3, fat: bool = True, tau_max: float = TAU_MAX, @@ -1164,12 +1152,12 @@ def construct_inputs_qLogNParEGO( def construct_inputs_qMES( model: Model, training_data: MaybeDict[SupervisedDataset], - bounds: List[Tuple[float, float]], + bounds: list[tuple[float, float]], posterior_transform: Optional[PosteriorTransform] = None, candidate_size: int = 1000, maximize: bool = True, # TODO: qMES also supports other inputs, such as num_fantasies -) -> Dict[str, Any]: +) -> dict[str, Any]: r"""Construct kwargs for `qMaxValueEntropy` constructor.""" X = _get_dataset_field(training_data, "X", first_only=True) @@ -1185,11 +1173,11 @@ def construct_inputs_qMES( def construct_inputs_mf_base( - target_fidelities: Dict[int, Union[int, float]], - fidelity_weights: Optional[Dict[int, float]] = None, + target_fidelities: dict[int, Union[int, float]], + fidelity_weights: Optional[dict[int, float]] = None, cost_intercept: float = 1.0, num_trace_observations: int = 0, -) -> Dict[str, Any]: +) -> dict[str, Any]: r"""Construct kwargs for a multifidelity acquisition function's constructor.""" if fidelity_weights is None: fidelity_weights = {f: 1.0 for f in target_fidelities} @@ -1224,12 +1212,12 @@ def construct_inputs_mf_base( def construct_inputs_qKG( model: Model, training_data: MaybeDict[SupervisedDataset], - bounds: List[Tuple[float, float]], + bounds: list[tuple[float, float]], objective: Optional[MCAcquisitionObjective] = None, posterior_transform: Optional[PosteriorTransform] = None, num_fantasies: int = 64, **optimize_objective_kwargs: TOptimizeObjectiveKwargs, -) -> Dict[str, Any]: +) -> dict[str, Any]: r"""Construct kwargs for `qKnowledgeGradient` constructor.""" X = _get_dataset_field(training_data, "X", first_only=True) @@ -1257,15 +1245,15 @@ def construct_inputs_qKG( def construct_inputs_qMFKG( model: Model, training_data: MaybeDict[SupervisedDataset], - bounds: List[Tuple[float, float]], - target_fidelities: Dict[int, Union[int, float]], + bounds: list[tuple[float, float]], + target_fidelities: dict[int, Union[int, float]], objective: Optional[MCAcquisitionObjective] = None, posterior_transform: Optional[PosteriorTransform] = None, - fidelity_weights: Optional[Dict[int, float]] = None, + fidelity_weights: Optional[dict[int, float]] = None, cost_intercept: float = 1.0, num_trace_observations: int = 0, num_fantasies: int = 64, -) -> Dict[str, Any]: +) -> dict[str, Any]: r"""Construct kwargs for `qMultiFidelityKnowledgeGradient` constructor.""" inputs_mf = construct_inputs_mf_base( @@ -1291,15 +1279,15 @@ def construct_inputs_qMFKG( def construct_inputs_qMFMES( model: Model, training_data: MaybeDict[SupervisedDataset], - bounds: List[Tuple[float, float]], - target_fidelities: Dict[int, Union[int, float]], + bounds: list[tuple[float, float]], + target_fidelities: dict[int, Union[int, float]], num_fantasies: int = 64, - fidelity_weights: Optional[Dict[int, float]] = None, + fidelity_weights: Optional[dict[int, float]] = None, cost_intercept: float = 1.0, num_trace_observations: int = 0, candidate_size: int = 1000, maximize: bool = True, -) -> Dict[str, Any]: +) -> dict[str, Any]: r"""Construct kwargs for `qMultiFidelityMaxValueEntropy` constructor.""" inputs_mf = construct_inputs_mf_base( target_fidelities=target_fidelities, @@ -1327,7 +1315,7 @@ def construct_inputs_analytic_eubo( sample_multiplier: Optional[float] = 1.0, objective: Optional[LearnedObjective] = None, posterior_transform: Optional[PosteriorTransform] = None, -) -> Dict[str, Any]: +) -> dict[str, Any]: r"""Construct kwargs for the `AnalyticExpectedUtilityOfBestOption` constructor. `model` is the primary model defined over the parameter space. It can be the @@ -1387,7 +1375,7 @@ def construct_inputs_qeubo( objective: Optional[MCAcquisitionObjective] = None, posterior_transform: Optional[PosteriorTransform] = None, X_pending: Optional[Tensor] = None, -) -> Dict[str, Any]: +) -> dict[str, Any]: r"""Construct kwargs for the `qExpectedUtilityOfBestOption` (qEUBO) constructor. `model` is the primary model defined over the parameter space. It can be the @@ -1467,7 +1455,7 @@ def get_best_f_mc( training_data: MaybeDict[SupervisedDataset], objective: Optional[MCAcquisitionObjective] = None, posterior_transform: Optional[PosteriorTransform] = None, - constraints: Optional[List[Callable[[Tensor], Tensor]]] = None, + constraints: Optional[list[Callable[[Tensor], Tensor]]] = None, model: Optional[Model] = None, ) -> Tensor: """ @@ -1544,16 +1532,16 @@ def optimize_objective( q: int, objective: Optional[MCAcquisitionObjective] = None, posterior_transform: Optional[PosteriorTransform] = None, - linear_constraints: Optional[Tuple[Tensor, Tensor]] = None, - fixed_features: Optional[Dict[int, float]] = None, + linear_constraints: Optional[tuple[Tensor, Tensor]] = None, + fixed_features: Optional[dict[int, float]] = None, qmc: bool = True, mc_samples: int = 512, seed_inner: Optional[int] = None, - optimizer_options: Optional[Dict[str, Any]] = None, + optimizer_options: Optional[dict[str, Any]] = None, post_processing_func: Optional[Callable[[Tensor], Tensor]] = None, batch_initial_conditions: Optional[Tensor] = None, sequential: bool = False, -) -> Tuple[Tensor, Tensor]: +) -> tuple[Tensor, Tensor]: r"""Optimize an objective under the given model. Args: @@ -1645,7 +1633,7 @@ def optimize_objective( @acqf_input_constructor(qJointEntropySearch) def construct_inputs_qJES( model: Model, - bounds: List[Tuple[float, float]], + bounds: list[tuple[float, float]], num_optima: int = 64, maximize: bool = True, condition_noiseless: bool = True, diff --git a/botorch/acquisition/knowledge_gradient.py b/botorch/acquisition/knowledge_gradient.py index 7ac6cd0ee5..66cb8125e9 100644 --- a/botorch/acquisition/knowledge_gradient.py +++ b/botorch/acquisition/knowledge_gradient.py @@ -27,7 +27,7 @@ from __future__ import annotations from copy import deepcopy -from typing import Any, Callable, Dict, Optional, Tuple, Type +from typing import Any, Callable, Optional import torch from botorch import settings @@ -329,8 +329,8 @@ def __init__( cost_aware_utility: Optional[CostAwareUtility] = None, project: Callable[[Tensor], Tensor] = lambda X: X, expand: Callable[[Tensor], Tensor] = lambda X: X, - valfunc_cls: Optional[Type[AcquisitionFunction]] = None, - valfunc_argfac: Optional[Callable[[Model], Dict[str, Any]]] = None, + valfunc_cls: Optional[type[AcquisitionFunction]] = None, + valfunc_argfac: Optional[Callable[[Model], dict[str, Any]]] = None, ) -> None: r"""Multi-Fidelity q-Knowledge Gradient (one-shot optimization). @@ -520,12 +520,12 @@ def _get_value_function( posterior_transform: Optional[PosteriorTransform] = None, sampler: Optional[MCSampler] = None, project: Optional[Callable[[Tensor], Tensor]] = None, - valfunc_cls: Optional[Type[AcquisitionFunction]] = None, - valfunc_argfac: Optional[Callable[[Model], Dict[str, Any]]] = None, + valfunc_cls: Optional[type[AcquisitionFunction]] = None, + valfunc_argfac: Optional[Callable[[Model], dict[str, Any]]] = None, ) -> AcquisitionFunction: r"""Construct value function (i.e. inner acquisition function).""" if valfunc_cls is not None: - common_kwargs: Dict[str, Any] = { + common_kwargs: dict[str, Any] = { "model": model, "posterior_transform": posterior_transform, } @@ -556,7 +556,7 @@ def _get_value_function( ) -def _split_fantasy_points(X: Tensor, n_f: int) -> Tuple[Tensor, Tensor]: +def _split_fantasy_points(X: Tensor, n_f: int) -> tuple[Tensor, Tensor]: r"""Split a one-shot optimization input into actual and fantasy points Args: diff --git a/botorch/acquisition/logei.py b/botorch/acquisition/logei.py index 210f2f1f6f..0c67b201a7 100644 --- a/botorch/acquisition/logei.py +++ b/botorch/acquisition/logei.py @@ -21,7 +21,7 @@ from functools import partial -from typing import Callable, List, Optional, Tuple, TypeVar, Union +from typing import Callable, Optional, TypeVar, Union import torch from botorch.acquisition.cached_cholesky import CachedCholeskyMCSamplerMixin @@ -82,7 +82,7 @@ def __init__( objective: Optional[MCAcquisitionObjective] = None, posterior_transform: Optional[PosteriorTransform] = None, X_pending: Optional[Tensor] = None, - constraints: Optional[List[Callable[[Tensor], Tensor]]] = None, + constraints: Optional[list[Callable[[Tensor], Tensor]]] = None, eta: Union[Tensor, float] = 1e-3, fat: bool = True, tau_max: float = TAU_MAX, @@ -166,7 +166,7 @@ def __init__( objective: Optional[MCAcquisitionObjective] = None, posterior_transform: Optional[PosteriorTransform] = None, X_pending: Optional[Tensor] = None, - constraints: Optional[List[Callable[[Tensor], Tensor]]] = None, + constraints: Optional[list[Callable[[Tensor], Tensor]]] = None, eta: Union[Tensor, float] = 1e-3, fat: bool = True, tau_max: float = TAU_MAX, @@ -266,7 +266,7 @@ def __init__( objective: Optional[MCAcquisitionObjective] = None, posterior_transform: Optional[PosteriorTransform] = None, X_pending: Optional[Tensor] = None, - constraints: Optional[List[Callable[[Tensor], Tensor]]] = None, + constraints: Optional[list[Callable[[Tensor], Tensor]]] = None, eta: Union[Tensor, float] = 1e-3, fat: bool = True, prune_baseline: bool = False, @@ -445,7 +445,7 @@ def compute_best_f(self, obj: Tensor) -> Tensor: ) return val.view(view_shape).to(obj) # obj.shape[:-1], i.e. without `q`-dim` - def _get_samples_and_objectives(self, X: Tensor) -> Tuple[Tensor, Tensor]: + def _get_samples_and_objectives(self, X: Tensor) -> tuple[Tensor, Tensor]: r"""Compute samples at new points, using the cached root decomposition. Args: diff --git a/botorch/acquisition/monte_carlo.py b/botorch/acquisition/monte_carlo.py index 2df84be69c..766aec144e 100644 --- a/botorch/acquisition/monte_carlo.py +++ b/botorch/acquisition/monte_carlo.py @@ -26,7 +26,7 @@ from abc import ABC, abstractmethod from copy import deepcopy from functools import partial -from typing import Callable, List, Optional, Protocol, Tuple, Union +from typing import Callable, Optional, Protocol, Union import torch from botorch.acquisition.acquisition import AcquisitionFunction, MCSamplerMixin @@ -104,7 +104,7 @@ def __init__( self.objective: MCAcquisitionObjective = objective self.set_X_pending(X_pending) - def _get_samples_and_objectives(self, X: Tensor) -> Tuple[Tensor, Tensor]: + def _get_samples_and_objectives(self, X: Tensor) -> tuple[Tensor, Tensor]: """Computes posterior samples and objective values at input X. Args: @@ -186,7 +186,7 @@ def __init__( X_pending: Optional[Tensor] = None, sample_reduction: SampleReductionProtocol = torch.mean, q_reduction: SampleReductionProtocol = torch.amax, - constraints: Optional[List[Callable[[Tensor], Tensor]]] = None, + constraints: Optional[list[Callable[[Tensor], Tensor]]] = None, eta: Union[Tensor, float] = 1e-3, fat: bool = False, ): @@ -361,7 +361,7 @@ def __init__( objective: Optional[MCAcquisitionObjective] = None, posterior_transform: Optional[PosteriorTransform] = None, X_pending: Optional[Tensor] = None, - constraints: Optional[List[Callable[[Tensor], Tensor]]] = None, + constraints: Optional[list[Callable[[Tensor], Tensor]]] = None, eta: Union[Tensor, float] = 1e-3, ) -> None: r"""q-Expected Improvement. @@ -448,7 +448,7 @@ def __init__( X_pending: Optional[Tensor] = None, prune_baseline: bool = True, cache_root: bool = True, - constraints: Optional[List[Callable[[Tensor], Tensor]]] = None, + constraints: Optional[list[Callable[[Tensor], Tensor]]] = None, eta: Union[Tensor, float] = 1e-3, marginalize_dim: Optional[int] = None, ) -> None: @@ -588,7 +588,7 @@ def _sample_forward(self, obj: Tensor) -> Tensor: """ return (obj - self.compute_best_f(obj).unsqueeze(-1)).clamp_min(0) - def _get_samples_and_objectives(self, X: Tensor) -> Tuple[Tensor, Tensor]: + def _get_samples_and_objectives(self, X: Tensor) -> tuple[Tensor, Tensor]: r"""Compute samples at new points, using the cached root decomposition. Args: @@ -673,7 +673,7 @@ def __init__( posterior_transform: Optional[PosteriorTransform] = None, X_pending: Optional[Tensor] = None, tau: float = 1e-3, - constraints: Optional[List[Callable[[Tensor], Tensor]]] = None, + constraints: Optional[list[Callable[[Tensor], Tensor]]] = None, eta: Union[Tensor, float] = 1e-3, ) -> None: r"""q-Probability of Improvement. diff --git a/botorch/acquisition/multi_objective/analytic.py b/botorch/acquisition/multi_objective/analytic.py index 0901a80240..6b4c4aeb50 100644 --- a/botorch/acquisition/multi_objective/analytic.py +++ b/botorch/acquisition/multi_objective/analytic.py @@ -21,7 +21,7 @@ from abc import abstractmethod from itertools import product -from typing import List, Optional +from typing import Optional import torch from botorch.acquisition.acquisition import AcquisitionFunction @@ -79,7 +79,7 @@ class ExpectedHypervolumeImprovement(MultiObjectiveAnalyticAcquisitionFunction): def __init__( self, model: Model, - ref_point: List[float], + ref_point: list[float], partitioning: NondominatedPartitioning, posterior_transform: Optional[PosteriorTransform] = None, ) -> None: diff --git a/botorch/acquisition/multi_objective/hypervolume_knowledge_gradient.py b/botorch/acquisition/multi_objective/hypervolume_knowledge_gradient.py index 850f3d854a..f43a55c090 100644 --- a/botorch/acquisition/multi_objective/hypervolume_knowledge_gradient.py +++ b/botorch/acquisition/multi_objective/hypervolume_knowledge_gradient.py @@ -17,7 +17,7 @@ """ from copy import deepcopy -from typing import Any, Callable, Dict, List, Optional, Tuple, Type +from typing import Any, Callable, Optional import torch from botorch import settings @@ -78,7 +78,7 @@ def __init__( sampler: Optional[ListSampler] = None, objective: Optional[MCMultiOutputObjective] = None, inner_sampler: Optional[MCSampler] = None, - X_evaluation_mask: Optional[List[Tensor]] = None, + X_evaluation_mask: Optional[list[Tensor]] = None, X_pending: Optional[Tensor] = None, X_pending_evaluation_mask: Optional[Tensor] = None, current_value: Optional[Tensor] = None, @@ -306,7 +306,7 @@ def __init__( self, model: Model, ref_point: Tensor, - target_fidelities: Dict[int, float], + target_fidelities: dict[int, float], num_fantasies: int = 8, num_pareto: int = 10, sampler: Optional[MCSampler] = None, @@ -318,8 +318,8 @@ def __init__( current_value: Optional[Tensor] = None, cost_aware_utility: Optional[CostAwareUtility] = None, project: Callable[[Tensor], Tensor] = lambda X: X, - valfunc_cls: Optional[Type[AcquisitionFunction]] = None, - valfunc_argfac: Optional[Callable[[Model], Dict[str, Any]]] = None, + valfunc_cls: Optional[type[AcquisitionFunction]] = None, + valfunc_argfac: Optional[Callable[[Model], dict[str, Any]]] = None, use_posterior_mean: bool = True, **kwargs: Any, ) -> None: @@ -490,8 +490,8 @@ def _get_hv_value_function( objective: Optional[MCMultiOutputObjective] = None, sampler: Optional[MCSampler] = None, project: Optional[Callable[[Tensor], Tensor]] = None, - valfunc_cls: Optional[Type[AcquisitionFunction]] = None, - valfunc_argfac: Optional[Callable[[Model], Dict[str, Any]]] = None, + valfunc_cls: Optional[type[AcquisitionFunction]] = None, + valfunc_argfac: Optional[Callable[[Model], dict[str, Any]]] = None, use_posterior_mean: bool = False, ) -> AcquisitionFunction: r"""Construct value function (i.e. inner acquisition function). @@ -528,7 +528,7 @@ def _get_hv_value_function( def _split_hvkg_fantasy_points( X: Tensor, n_f: int, num_pareto: int -) -> Tuple[Tensor, Tensor]: +) -> tuple[Tensor, Tensor]: r"""Split a one-shot HV-KGoptimization input into actual and fantasy points Args: diff --git a/botorch/acquisition/multi_objective/joint_entropy_search.py b/botorch/acquisition/multi_objective/joint_entropy_search.py index ea6d9b0fae..1b8c167936 100644 --- a/botorch/acquisition/multi_objective/joint_entropy_search.py +++ b/botorch/acquisition/multi_objective/joint_entropy_search.py @@ -19,7 +19,7 @@ from abc import abstractmethod from math import pi -from typing import Optional, Tuple, Union +from typing import Optional, Union import torch from botorch import settings @@ -154,7 +154,7 @@ def _compute_posterior_statistics( @abstractmethod def _compute_monte_carlo_variables( self, posterior: GPyTorchPosterior - ) -> Tuple[Tensor, Tensor]: + ) -> tuple[Tensor, Tensor]: r"""Compute the samples and log-probability associated with a posterior distribution. @@ -406,7 +406,7 @@ def _compute_posterior_statistics( def _compute_monte_carlo_variables( self, posterior: GPyTorchPosterior - ) -> Tuple[Tensor, Tensor]: + ) -> tuple[Tensor, Tensor]: r"""Compute the samples and log-probability associated with the posterior distribution that conditions on the Pareto optimal points. diff --git a/botorch/acquisition/multi_objective/logei.py b/botorch/acquisition/multi_objective/logei.py index b43ee75b4b..9a942a8c13 100644 --- a/botorch/acquisition/multi_objective/logei.py +++ b/botorch/acquisition/multi_objective/logei.py @@ -10,7 +10,7 @@ from __future__ import annotations -from typing import Callable, List, Optional, Tuple, Union +from typing import Callable, Optional, Union import torch from botorch.acquisition.logei import TAU_MAX, TAU_RELU @@ -54,11 +54,11 @@ class qLogExpectedHypervolumeImprovement( def __init__( self, model: Model, - ref_point: Union[List[float], Tensor], + ref_point: Union[list[float], Tensor], partitioning: NondominatedPartitioning, sampler: Optional[MCSampler] = None, objective: Optional[MCMultiOutputObjective] = None, - constraints: Optional[List[Callable[[Tensor], Tensor]]] = None, + constraints: Optional[list[Callable[[Tensor], Tensor]]] = None, X_pending: Optional[Tensor] = None, eta: Optional[Union[Tensor, float]] = 1e-2, fat: bool = True, @@ -267,7 +267,7 @@ def _compute_log_qehvi(self, samples: Tensor, X: Optional[Tensor] = None) -> Ten return logmeanexp(logsumexp(log_areas_per_segment, dim=-1), dim=0) def _log_improvement( - self, obj_subsets: Tensor, view_shape: Union[Tuple, torch.Size] + self, obj_subsets: Tensor, view_shape: Union[tuple, torch.Size] ) -> Tensor: # smooth out the clamp and take the log (previous step 3) # subtract cell lower bounds, clamp min at zero, but first @@ -282,7 +282,7 @@ def _log_improvement( return log_Zi # mc_samples x batch_shape x num_cells x q_choose_i x i x m def _log_cell_lengths( - self, log_improvement_i: Tensor, view_shape: Union[Tuple, torch.Size] + self, log_improvement_i: Tensor, view_shape: Union[tuple, torch.Size] ) -> Tensor: cell_upper_bounds = self.cell_upper_bounds.clamp_max( 1e10 if log_improvement_i.dtype == torch.double else 1e8 @@ -327,11 +327,11 @@ class qLogNoisyExpectedHypervolumeImprovement( def __init__( self, model: Model, - ref_point: Union[List[float], Tensor], + ref_point: Union[list[float], Tensor], X_baseline: Tensor, sampler: Optional[MCSampler] = None, objective: Optional[MCMultiOutputObjective] = None, - constraints: Optional[List[Callable[[Tensor], Tensor]]] = None, + constraints: Optional[list[Callable[[Tensor], Tensor]]] = None, X_pending: Optional[Tensor] = None, eta: Optional[Union[Tensor, float]] = 1e-3, prune_baseline: bool = False, diff --git a/botorch/acquisition/multi_objective/max_value_entropy_search.py b/botorch/acquisition/multi_objective/max_value_entropy_search.py index f94de6c4a1..dc33d1255a 100644 --- a/botorch/acquisition/multi_objective/max_value_entropy_search.py +++ b/botorch/acquisition/multi_objective/max_value_entropy_search.py @@ -21,7 +21,7 @@ from math import pi -from typing import Callable, Optional, Tuple, Union +from typing import Callable, Optional, Union import torch from botorch.acquisition.max_value_entropy_search import qMaxValueEntropy @@ -322,7 +322,7 @@ def _compute_posterior_statistics( def _compute_monte_carlo_variables( self, posterior: GPyTorchPosterior - ) -> Tuple[Tensor, Tensor]: + ) -> tuple[Tensor, Tensor]: r"""Compute the samples and log-probability associated with a posterior distribution. diff --git a/botorch/acquisition/multi_objective/monte_carlo.py b/botorch/acquisition/multi_objective/monte_carlo.py index 006b4a4969..0a82f9e2da 100644 --- a/botorch/acquisition/multi_objective/monte_carlo.py +++ b/botorch/acquisition/multi_objective/monte_carlo.py @@ -27,7 +27,7 @@ from __future__ import annotations from abc import ABC, abstractmethod -from typing import Callable, List, Optional, Union +from typing import Callable, Optional, Union import torch from botorch.acquisition.acquisition import AcquisitionFunction, MCSamplerMixin @@ -73,7 +73,7 @@ def __init__( model: Model, sampler: Optional[MCSampler] = None, objective: Optional[MCMultiOutputObjective] = None, - constraints: Optional[List[Callable[[Tensor], Tensor]]] = None, + constraints: Optional[list[Callable[[Tensor], Tensor]]] = None, eta: Optional[Union[Tensor, float]] = 1e-3, X_pending: Optional[Tensor] = None, ) -> None: @@ -149,11 +149,11 @@ class qExpectedHypervolumeImprovement( def __init__( self, model: Model, - ref_point: Union[List[float], Tensor], + ref_point: Union[list[float], Tensor], partitioning: NondominatedPartitioning, sampler: Optional[MCSampler] = None, objective: Optional[MCMultiOutputObjective] = None, - constraints: Optional[List[Callable[[Tensor], Tensor]]] = None, + constraints: Optional[list[Callable[[Tensor], Tensor]]] = None, X_pending: Optional[Tensor] = None, eta: Optional[Union[Tensor, float]] = 1e-3, fat: bool = False, @@ -328,11 +328,11 @@ class qNoisyExpectedHypervolumeImprovement( def __init__( self, model: Model, - ref_point: Union[List[float], Tensor], + ref_point: Union[list[float], Tensor], X_baseline: Tensor, sampler: Optional[MCSampler] = None, objective: Optional[MCMultiOutputObjective] = None, - constraints: Optional[List[Callable[[Tensor], Tensor]]] = None, + constraints: Optional[list[Callable[[Tensor], Tensor]]] = None, X_pending: Optional[Tensor] = None, eta: Optional[Union[Tensor, float]] = 1e-3, fat: bool = False, diff --git a/botorch/acquisition/multi_objective/multi_fidelity.py b/botorch/acquisition/multi_objective/multi_fidelity.py index fb68bcbf64..1430381503 100644 --- a/botorch/acquisition/multi_objective/multi_fidelity.py +++ b/botorch/acquisition/multi_objective/multi_fidelity.py @@ -18,7 +18,7 @@ from __future__ import annotations -from typing import Callable, List, Optional, Union +from typing import Callable, Optional, Union import torch from botorch.acquisition.cost_aware import InverseCostWeightedUtility @@ -41,11 +41,11 @@ class MOMF(qExpectedHypervolumeImprovement): def __init__( self, model: Model, - ref_point: Union[List[float], Tensor], + ref_point: Union[list[float], Tensor], partitioning: NondominatedPartitioning, sampler: Optional[MCSampler] = None, objective: Optional[MCMultiOutputObjective] = None, - constraints: Optional[List[Callable[[Tensor], Tensor]]] = None, + constraints: Optional[list[Callable[[Tensor], Tensor]]] = None, eta: Optional[Union[Tensor, float]] = 1e-3, X_pending: Optional[Tensor] = None, cost_call: Optional[Callable[[Tensor], Tensor]] = None, diff --git a/botorch/acquisition/multi_objective/multi_output_risk_measures.py b/botorch/acquisition/multi_objective/multi_output_risk_measures.py index 81ef9a2b36..a98dddea55 100644 --- a/botorch/acquisition/multi_objective/multi_output_risk_measures.py +++ b/botorch/acquisition/multi_objective/multi_output_risk_measures.py @@ -29,7 +29,7 @@ import warnings from abc import ABC, abstractmethod from math import ceil -from typing import Callable, List, Optional, Union +from typing import Callable, Optional, Union import torch from botorch.acquisition.multi_objective.objective import ( @@ -295,7 +295,7 @@ def __init__( self.filter_dominated = filter_dominated self.use_counting = use_counting - def get_mvar_set_via_counting(self, Y: Tensor) -> List[Tensor]: + def get_mvar_set_via_counting(self, Y: Tensor) -> list[Tensor]: r"""Find MVaR set based on the definition in [Prekopa2012MVaR]_. This first calculates the CDF for each point on the extended domain of the @@ -386,7 +386,7 @@ def get_mvar_set_via_counting(self, Y: Tensor) -> List[Tensor]: mvar = alpha_level_points return [mvar] - def get_mvar_set_vectorized(self, Y: Tensor) -> List[Tensor]: + def get_mvar_set_vectorized(self, Y: Tensor) -> list[Tensor]: r"""Find MVaR set based on the definition in [Prekopa2012MVaR]_. This first calculates the CDF for each point on the extended domain of the @@ -549,9 +549,9 @@ def __init__( self, alpha: float, n_w: int, - chebyshev_weights: Union[Tensor, List[float]], + chebyshev_weights: Union[Tensor, list[float]], baseline_Y: Optional[Tensor] = None, - ref_point: Optional[Union[Tensor, List[float]]] = None, + ref_point: Optional[Union[Tensor, list[float]]] = None, preprocessing_function: Optional[Callable[[Tensor], Tensor]] = None, ) -> None: r"""Transform the posterior samples to samples of a risk measure. @@ -629,7 +629,7 @@ def chebyshev_weights(self) -> Tensor: return self._chebyshev_weights @chebyshev_weights.setter - def chebyshev_weights(self, chebyshev_weights: Union[Tensor, List[float]]) -> None: + def chebyshev_weights(self, chebyshev_weights: Union[Tensor, list[float]]) -> None: r"""Update the Chebyshev weights. Invalidates the cached Chebyshev objective. diff --git a/botorch/acquisition/multi_objective/objective.py b/botorch/acquisition/multi_objective/objective.py index a159c7ff8d..c3cf0c44ec 100644 --- a/botorch/acquisition/multi_objective/objective.py +++ b/botorch/acquisition/multi_objective/objective.py @@ -7,7 +7,7 @@ from __future__ import annotations from abc import abstractmethod -from typing import List, Optional +from typing import Optional import torch from botorch.acquisition.objective import GenericMCObjective, MCAcquisitionObjective @@ -72,7 +72,7 @@ class IdentityMCMultiOutputObjective(MCMultiOutputObjective): """ def __init__( - self, outcomes: Optional[List[int]] = None, num_outcomes: Optional[int] = None + self, outcomes: Optional[list[int]] = None, num_outcomes: Optional[int] = None ) -> None: r"""Initialize Objective. @@ -114,7 +114,7 @@ class WeightedMCMultiOutputObjective(IdentityMCMultiOutputObjective): def __init__( self, weights: Tensor, - outcomes: Optional[List[int]] = None, + outcomes: Optional[list[int]] = None, num_outcomes: Optional[int] = None, ) -> None: r"""Initialize Objective. @@ -147,7 +147,7 @@ def __init__( self, model: Model, X_baseline: Tensor, - constraint_idcs: List[int], + constraint_idcs: list[int], objective: Optional[MCMultiOutputObjective] = None, ) -> None: r"""Construct a feasibility-weighted objective. diff --git a/botorch/acquisition/multi_objective/parego.py b/botorch/acquisition/multi_objective/parego.py index e7e2a47e37..c6de972e9f 100644 --- a/botorch/acquisition/multi_objective/parego.py +++ b/botorch/acquisition/multi_objective/parego.py @@ -3,7 +3,7 @@ # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. -from typing import Callable, List, Optional, Union +from typing import Callable, Optional, Union import torch from botorch.acquisition.logei import qLogNoisyExpectedImprovement, TAU_MAX, TAU_RELU @@ -29,7 +29,7 @@ def __init__( scalarization_weights: Optional[Tensor] = None, sampler: Optional[MCSampler] = None, objective: Optional[MCMultiOutputObjective] = None, - constraints: Optional[List[Callable[[Tensor], Tensor]]] = None, + constraints: Optional[list[Callable[[Tensor], Tensor]]] = None, X_pending: Optional[Tensor] = None, eta: Union[Tensor, float] = 1e-3, fat: bool = True, diff --git a/botorch/acquisition/multi_objective/predictive_entropy_search.py b/botorch/acquisition/multi_objective/predictive_entropy_search.py index 2146c85dd3..6157ecab7e 100644 --- a/botorch/acquisition/multi_objective/predictive_entropy_search.py +++ b/botorch/acquisition/multi_objective/predictive_entropy_search.py @@ -23,7 +23,7 @@ from __future__ import annotations -from typing import Optional, Tuple +from typing import Optional import torch from botorch.acquisition.acquisition import AcquisitionFunction @@ -520,7 +520,7 @@ def _initialize_predictive_matrices( observation_noise: bool = True, jitter: float = 1e-4, natural: bool = True, -) -> Tuple[Tensor, Tensor, Tensor, Tensor]: +) -> tuple[Tensor, Tensor, Tensor, Tensor]: r"""Initializes the natural predictive mean and covariance matrix. For a multivariate normal distribution with mean mu and covariance Sigma, the natural mean is Sigma^{-1} mu and the natural covariance is Sigma^{-1}. @@ -584,7 +584,7 @@ def _initialize_predictive_matrices( def _get_omega_f_contribution( mean: Tensor, cov: Tensor, N: int, P: int, M: int -) -> Tuple[Tensor, Tensor]: +) -> tuple[Tensor, Tensor]: r"""Extract the mean vector and covariance matrix corresponding to the `2 x 2` multivariate normal blocks in the objective model between the points in `X` and the Pareto optimal set. @@ -678,7 +678,7 @@ def _update_omega( M: int, maximize: bool = True, jitter: float = 1e-6, -) -> Tuple[Tensor, Tensor]: +) -> tuple[Tensor, Tensor]: r"""Computes the new omega factors by matching the moments. Args: @@ -847,7 +847,7 @@ def _safe_update_omega( M: int, maximize: bool = True, jitter: float = 1e-6, -) -> Tuple[Tensor, Tensor]: +) -> tuple[Tensor, Tensor]: r"""Try to update the new omega factors by matching the moments. If the update is not possible then this returns the initial omega factors. @@ -902,7 +902,7 @@ def _update_marginals( omega_f_nat_cov: Tensor, N: int, P: int, -) -> Tuple[Tensor, Tensor]: +) -> tuple[Tensor, Tensor]: r"""Computes the new marginal by summing up all the natural factors. Args: @@ -1002,7 +1002,7 @@ def _update_damping( nat_cov_new: Tensor, damping_factor: Tensor, jitter: Tensor, -) -> Tuple[Tensor, Tensor]: +) -> tuple[Tensor, Tensor]: r"""Updates the damping factor whilst ensuring the covariance matrix is positive definite by trying a Cholesky decomposition. @@ -1070,7 +1070,7 @@ def _update_damping_when_converged( damping_factor: Tensor, iteration: Tensor, threshold: float = 1e-3, -) -> Tuple[Tensor, Tensor, Tensor]: +) -> tuple[Tensor, Tensor, Tensor]: r"""Set the damping factor to 0 once converged. Convergence is determined by the relative change in the entries of the mean and covariance matrix. @@ -1114,7 +1114,7 @@ def _augment_factors_with_cached_factors( cached_omega_f_nat_mean: Tensor, omega_f_nat_cov: Tensor, cached_omega_f_nat_cov: Tensor, -) -> Tuple[Tensor, Tensor]: +) -> tuple[Tensor, Tensor]: r"""Incorporate the cached Pareto updated factors in the forward call and augment them with the previously computed factors. diff --git a/botorch/acquisition/multi_objective/utils.py b/botorch/acquisition/multi_objective/utils.py index bb4502b137..369c0e6a5c 100644 --- a/botorch/acquisition/multi_objective/utils.py +++ b/botorch/acquisition/multi_objective/utils.py @@ -13,7 +13,7 @@ import math import warnings from math import ceil -from typing import Any, Callable, Dict, List, Optional, Tuple +from typing import Any, Callable, Optional import torch from botorch.acquisition import monte_carlo # noqa F401 @@ -69,7 +69,7 @@ def prune_inferior_points_multi_objective( X: Tensor, ref_point: Tensor, objective: Optional[MCMultiOutputObjective] = None, - constraints: Optional[List[Callable[[Tensor], Tensor]]] = None, + constraints: Optional[list[Callable[[Tensor], Tensor]]] = None, num_samples: int = 2048, max_frac: float = 1.0, marginalize_dim: Optional[int] = None, @@ -269,7 +269,7 @@ def random_search_optimizer( maximize: bool, pop_size: int = 1024, max_tries: int = 10, -) -> Tuple[Tensor, Tensor]: +) -> tuple[Tensor, Tensor]: r"""Optimize a function via random search. Args: @@ -318,11 +318,11 @@ def sample_optimal_points( num_samples: int, num_points: int, optimizer: Callable[ - [GenericDeterministicModel, Tensor, int, bool, Any], Tuple[Tensor, Tensor] + [GenericDeterministicModel, Tensor, int, bool, Any], tuple[Tensor, Tensor] ] = random_search_optimizer, maximize: bool = True, - optimizer_kwargs: Optional[Dict[str, Any]] = None, -) -> Tuple[Tensor, Tensor]: + optimizer_kwargs: Optional[dict[str, Any]] = None, +) -> tuple[Tensor, Tensor]: r"""Compute a collection of optimal inputs and outputs from samples of a Gaussian Process (GP). @@ -354,7 +354,7 @@ def sample_optimal_points( - A `num_samples x num_points x M`-dim Tensor containing the collection of optimal objectives. """ - tkwargs: Dict[str, Any] = {"dtype": bounds.dtype, "device": bounds.device} + tkwargs: dict[str, Any] = {"dtype": bounds.dtype, "device": bounds.device} M = model.num_outputs d = bounds.shape[-1] if M == 1: diff --git a/botorch/acquisition/multi_step_lookahead.py b/botorch/acquisition/multi_step_lookahead.py index 15e1c798be..930878571f 100644 --- a/botorch/acquisition/multi_step_lookahead.py +++ b/botorch/acquisition/multi_step_lookahead.py @@ -19,7 +19,7 @@ import math import warnings -from typing import Any, Callable, Dict, List, Optional, Tuple, Type +from typing import Any, Callable, Optional import numpy as np import torch @@ -43,7 +43,7 @@ from torch.nn import ModuleList -TAcqfArgConstructor = Callable[[Model, Tensor], Dict[str, Any]] +TAcqfArgConstructor = Callable[[Model, Tensor], dict[str, Any]] class qMultiStepLookahead(MCAcquisitionFunction, OneShotAcquisitionFunction): @@ -52,14 +52,14 @@ class qMultiStepLookahead(MCAcquisitionFunction, OneShotAcquisitionFunction): def __init__( self, model: Model, - batch_sizes: List[int], - num_fantasies: Optional[List[int]] = None, - samplers: Optional[List[MCSampler]] = None, - valfunc_cls: Optional[List[Optional[Type[AcquisitionFunction]]]] = None, - valfunc_argfacs: Optional[List[Optional[TAcqfArgConstructor]]] = None, + batch_sizes: list[int], + num_fantasies: Optional[list[int]] = None, + samplers: Optional[list[MCSampler]] = None, + valfunc_cls: Optional[list[Optional[type[AcquisitionFunction]]]] = None, + valfunc_argfacs: Optional[list[Optional[TAcqfArgConstructor]]] = None, objective: Optional[MCAcquisitionObjective] = None, posterior_transform: Optional[PosteriorTransform] = None, - inner_mc_samples: Optional[List[int]] = None, + inner_mc_samples: Optional[list[int]] = None, X_pending: Optional[Tensor] = None, collapse_fantasy_base_samples: bool = True, ) -> None: @@ -131,7 +131,7 @@ def __init__( if samplers is None: # If collapse_fantasy_base_samples is False, the `batch_range_override` # is set on the samplers during the forward call. - samplers: List[MCSampler] = [ + samplers: list[MCSampler] = [ SobolQMCNormalSampler(sample_shape=torch.Size([nf])) for nf in num_fantasies ] @@ -225,7 +225,7 @@ def get_augmented_q_batch_size(self, q: int) -> int: """ return q + self._num_auxiliary - def get_split_shapes(self, X: Tensor) -> Tuple[Size, List[Size], List[int]]: + def get_split_shapes(self, X: Tensor) -> tuple[Size, list[Size], list[int]]: r"""Get the split shapes from X. Args: @@ -249,7 +249,7 @@ def get_split_shapes(self, X: Tensor) -> Tuple[Size, List[Size], List[int]]: sizes = [s[: (-2 - len(batch_shape))].numel() * s[-2] for s in shapes] return batch_shape, shapes, sizes - def get_multi_step_tree_input_representation(self, X: Tensor) -> List[Tensor]: + def get_multi_step_tree_input_representation(self, X: Tensor) -> list[Tensor]: r"""Get the multi-step tree representation of X. Args: @@ -316,11 +316,11 @@ def get_induced_fantasy_model(self, X: Tensor) -> Model: def _step( model: Model, - Xs: List[Tensor], - samplers: List[Optional[MCSampler]], - valfunc_cls: List[Optional[Type[AcquisitionFunction]]], - valfunc_argfacs: List[Optional[TAcqfArgConstructor]], - inner_samplers: List[Optional[MCSampler]], + Xs: list[Tensor], + samplers: list[Optional[MCSampler]], + valfunc_cls: list[Optional[type[AcquisitionFunction]]], + valfunc_argfacs: list[Optional[TAcqfArgConstructor]], + inner_samplers: list[Optional[MCSampler]], objective: MCAcquisitionObjective, posterior_transform: Optional[PosteriorTransform], running_val: Optional[Tensor] = None, @@ -424,7 +424,7 @@ def _step( def _compute_stage_value( model: Model, - valfunc_cls: Optional[Type[AcquisitionFunction]], + valfunc_cls: Optional[type[AcquisitionFunction]], X: Tensor, objective: MCAcquisitionObjective, posterior_transform: Optional[PosteriorTransform], @@ -455,7 +455,7 @@ def _compute_stage_value( """ if valfunc_cls is None: return None - common_kwargs: Dict[str, Any] = { + common_kwargs: dict[str, Any] = { "model": model, "posterior_transform": posterior_transform, } @@ -500,11 +500,11 @@ def _construct_sample_weights( def _construct_inner_samplers( - batch_sizes: List[int], - valfunc_cls: List[Optional[Type[AcquisitionFunction]]], - inner_mc_samples: List[Optional[int]], + batch_sizes: list[int], + valfunc_cls: list[Optional[type[AcquisitionFunction]]], + inner_mc_samples: list[Optional[int]], objective: Optional[MCAcquisitionObjective] = None, -) -> List[Optional[MCSampler]]: +) -> list[Optional[MCSampler]]: r"""Check validity of inputs and construct inner samplers. Helper function to be used internally for constructing inner samplers. @@ -562,7 +562,7 @@ def _construct_inner_samplers( def _get_induced_fantasy_model( - model: Model, Xs: List[Tensor], samplers: List[Optional[MCSampler]] + model: Model, Xs: list[Tensor], samplers: list[Optional[MCSampler]] ) -> Model: r"""Recursive computation of the fantasy model induced by an input tree. @@ -631,7 +631,7 @@ def mixin_layer(X: Tensor, bounds: Tensor, eta: float) -> Tensor: perturbations = unnormalize(B.sample(X.shape).squeeze(-1), bounds) return (1 - eta) * X + eta * perturbations - def make_init_tree(Xopts: List[Tensor], bounds: Tensor, etas: Tensor) -> Tensor: + def make_init_tree(Xopts: list[Tensor], bounds: Tensor, etas: Tensor) -> Tensor: Xtrs = [mixin_layer(X=X, bounds=bounds, eta=eta) for eta, X in zip(etas, Xopts)] return torch.cat(Xtrs, dim=-2) @@ -660,6 +660,6 @@ def mixin_tree(T: Tensor, bounds: Tensor, alpha: float) -> Tensor: return X_init[:raw_samples] -def make_best_f(model: Model, X: Tensor) -> Dict[str, Any]: +def make_best_f(model: Model, X: Tensor) -> dict[str, Any]: r"""Extract the best observed training input from the model.""" return {"best_f": model.train_targets.max(dim=-1).values} diff --git a/botorch/acquisition/objective.py b/botorch/acquisition/objective.py index 18a01c9f2f..05d8b7d4f8 100644 --- a/botorch/acquisition/objective.py +++ b/botorch/acquisition/objective.py @@ -10,7 +10,7 @@ import warnings from abc import ABC, abstractmethod -from typing import Callable, List, Optional, TYPE_CHECKING, Union +from typing import Callable, Optional, TYPE_CHECKING, Union import torch from botorch.exceptions.errors import UnsupportedError @@ -412,7 +412,7 @@ class ConstrainedMCObjective(GenericMCObjective): def __init__( self, objective: Callable[[Tensor, Optional[Tensor]], Tensor], - constraints: List[Callable[[Tensor], Tensor]], + constraints: list[Callable[[Tensor], Tensor]], infeasible_cost: Union[Tensor, float] = 0.0, eta: Union[Tensor, float] = 1e-3, ) -> None: diff --git a/botorch/acquisition/penalized.py b/botorch/acquisition/penalized.py index 9100bd6f69..c01b1927eb 100644 --- a/botorch/acquisition/penalized.py +++ b/botorch/acquisition/penalized.py @@ -11,7 +11,7 @@ from __future__ import annotations import math -from typing import Any, Callable, List, Optional +from typing import Any, Callable, Optional import torch from botorch.acquisition.acquisition import AcquisitionFunction @@ -112,7 +112,7 @@ class GroupLassoPenalty(torch.nn.Module): r"""Group lasso penalty class to be added to any arbitrary acquisition function to construct a PenalizedAcquisitionFunction.""" - def __init__(self, init_point: Tensor, groups: List[List[int]]): + def __init__(self, init_point: Tensor, groups: list[list[int]]): r"""Initializing Group-Lasso regularization. Args: @@ -246,7 +246,7 @@ def set_X_pending(self, X_pending: Optional[Tensor] = None) -> None: ) -def group_lasso_regularizer(X: Tensor, groups: List[List[int]]) -> Tensor: +def group_lasso_regularizer(X: Tensor, groups: list[list[int]]) -> Tensor: r"""Computes the group lasso regularization function for the given point. Args: diff --git a/botorch/acquisition/utils.py b/botorch/acquisition/utils.py index 2c4e58ba75..198228409a 100644 --- a/botorch/acquisition/utils.py +++ b/botorch/acquisition/utils.py @@ -11,7 +11,7 @@ from __future__ import annotations import math -from typing import Callable, Dict, List, Optional, Tuple +from typing import Callable, Optional import torch from botorch.acquisition.objective import ( @@ -90,7 +90,7 @@ def repeat_to_match_aug_dim(target_tensor: Tensor, reference_tensor: Tensor) -> def compute_best_feasible_objective( samples: Tensor, obj: Tensor, - constraints: Optional[List[Callable[[Tensor], Tensor]]], + constraints: Optional[list[Callable[[Tensor], Tensor]]], model: Optional[Model] = None, objective: Optional[MCAcquisitionObjective] = None, posterior_transform: Optional[PosteriorTransform] = None, @@ -247,7 +247,7 @@ def prune_inferior_points( X: Tensor, objective: Optional[MCAcquisitionObjective] = None, posterior_transform: Optional[PosteriorTransform] = None, - constraints: Optional[List[Callable[[Tensor], Tensor]]] = None, + constraints: Optional[list[Callable[[Tensor], Tensor]]] = None, num_samples: int = 2048, max_frac: float = 1.0, sampler: Optional[MCSampler] = None, @@ -351,7 +351,7 @@ def prune_inferior_points( def project_to_target_fidelity( X: Tensor, - target_fidelities: Optional[Dict[int, float]] = None, + target_fidelities: Optional[dict[int, float]] = None, d: Optional[int] = None, ) -> Tensor: r"""Project `X` onto the target set of fidelities. @@ -413,7 +413,7 @@ def project_to_target_fidelity( def expand_trace_observations( - X: Tensor, fidelity_dims: Optional[List[int]] = None, num_trace_obs: int = 0 + X: Tensor, fidelity_dims: Optional[list[int]] = None, num_trace_obs: int = 0 ) -> Tensor: r"""Expand `X` with trace observations. @@ -491,7 +491,7 @@ def get_optimal_samples( raw_samples: int = 1024, num_restarts: int = 20, maximize: bool = True, -) -> Tuple[Tensor, Tensor]: +) -> tuple[Tensor, Tensor]: """Draws sample paths from the posterior and maximizes the samples using GD. Args: diff --git a/botorch/cross_validation.py b/botorch/cross_validation.py index ad632cb6ce..2fb0831797 100644 --- a/botorch/cross_validation.py +++ b/botorch/cross_validation.py @@ -10,7 +10,7 @@ from __future__ import annotations -from typing import Any, Dict, NamedTuple, Optional, Type +from typing import Any, NamedTuple, Optional import torch from botorch.fit import fit_gpytorch_mll @@ -107,12 +107,12 @@ def gen_loo_cv_folds( def batch_cross_validation( - model_cls: Type[GPyTorchModel], - mll_cls: Type[MarginalLogLikelihood], + model_cls: type[GPyTorchModel], + mll_cls: type[MarginalLogLikelihood], cv_folds: CVFolds, - fit_args: Optional[Dict[str, Any]] = None, + fit_args: Optional[dict[str, Any]] = None, observation_noise: bool = False, - model_init_kwargs: Optional[Dict[str, Any]] = None, + model_init_kwargs: Optional[dict[str, Any]] = None, ) -> CVResults: r"""Perform cross validation by using GPyTorch batch mode. diff --git a/botorch/fit.py b/botorch/fit.py index 1b2ab0fc85..44c74618bd 100644 --- a/botorch/fit.py +++ b/botorch/fit.py @@ -9,10 +9,11 @@ from __future__ import annotations import logging +from collections.abc import Sequence from copy import deepcopy from functools import partial from itertools import filterfalse -from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Type, Union +from typing import Any, Callable, Optional, Union from warnings import catch_warnings, simplefilter, warn_explicit, WarningMessage from botorch.exceptions.errors import ModelFittingError, UnsupportedError @@ -73,10 +74,10 @@ def _rethrow_warn(w: WarningMessage) -> bool: def fit_gpytorch_mll( mll: MarginalLogLikelihood, - closure: Optional[Callable[[], Tuple[Tensor, Sequence[Optional[Tensor]]]]] = None, + closure: Optional[Callable[[], tuple[Tensor, Sequence[Optional[Tensor]]]]] = None, optimizer: Optional[Callable] = None, - closure_kwargs: Optional[Dict[str, Any]] = None, - optimizer_kwargs: Optional[Dict[str, Any]] = None, + closure_kwargs: Optional[dict[str, Any]] = None, + optimizer_kwargs: Optional[dict[str, Any]] = None, **kwargs: Any, ) -> MarginalLogLikelihood: r"""Clearing house for fitting models passed as GPyTorch MarginalLogLikelihoods. @@ -115,17 +116,17 @@ def fit_gpytorch_mll( @FitGPyTorchMLL.register(MarginalLogLikelihood, object, object) def _fit_fallback( mll: MarginalLogLikelihood, - _: Type[object], - __: Type[object], + _: type[object], + __: type[object], *, - closure: Optional[Callable[[], Tuple[Tensor, Sequence[Optional[Tensor]]]]] = None, + closure: Optional[Callable[[], tuple[Tensor, Sequence[Optional[Tensor]]]]] = None, optimizer: Callable = fit_gpytorch_mll_scipy, - closure_kwargs: Optional[Dict[str, Any]] = None, - optimizer_kwargs: Optional[Dict[str, Any]] = None, + closure_kwargs: Optional[dict[str, Any]] = None, + optimizer_kwargs: Optional[dict[str, Any]] = None, max_attempts: int = 5, pick_best_of_all_attempts: bool = False, warning_handler: Callable[[WarningMessage], bool] = DEFAULT_WARNING_HANDLER, - caught_exception_types: Tuple[Type[BaseException], ...] = (NotPSDError,), + caught_exception_types: tuple[type[BaseException], ...] = (NotPSDError,), **ignore: Any, ) -> MarginalLogLikelihood: r"""Generic fallback method for fitting Gaussian processes. @@ -166,9 +167,9 @@ def _fit_fallback( """ # Setup optimizer_kwargs = {} if optimizer_kwargs is None else optimizer_kwargs - params_nograd: Dict[str, Parameter] = None # pyre-ignore [9] - ckpt_nograd: Dict[str, TensorCheckpoint] = None # pyre-ignore [9] - ckpt: Dict[str, TensorCheckpoint] = None # pyre-ignore [9] + params_nograd: dict[str, Parameter] = None # pyre-ignore [9] + ckpt_nograd: dict[str, TensorCheckpoint] = None # pyre-ignore [9] + ckpt: dict[str, TensorCheckpoint] = None # pyre-ignore [9] # Build closure mll.train() @@ -261,8 +262,8 @@ def _fit_fallback( @FitGPyTorchMLL.register(SumMarginalLogLikelihood, object, ModelListGP) def _fit_list( mll: SumMarginalLogLikelihood, - _: Type[Likelihood], - __: Type[ModelListGP], + _: type[Likelihood], + __: type[ModelListGP], **kwargs: Any, ) -> SumMarginalLogLikelihood: r"""Fitting routine for lists of independent Gaussian processes. @@ -285,10 +286,10 @@ def _fit_list( @FitGPyTorchMLL.register(_ApproximateMarginalLogLikelihood, object, object) def _fit_fallback_approximate( mll: _ApproximateMarginalLogLikelihood, - _: Type[Likelihood], - __: Type[ApproximateGPyTorchModel], + _: type[Likelihood], + __: type[ApproximateGPyTorchModel], *, - closure: Optional[Callable[[], Tuple[Tensor, Sequence[Optional[Tensor]]]]] = None, + closure: Optional[Callable[[], tuple[Tensor, Sequence[Optional[Tensor]]]]] = None, data_loader: Optional[DataLoader] = None, optimizer: Optional[Callable] = None, full_batch_limit: int = 1024, diff --git a/botorch/generation/gen.py b/botorch/generation/gen.py index 966ac48dc0..d038fd27ac 100644 --- a/botorch/generation/gen.py +++ b/botorch/generation/gen.py @@ -13,7 +13,7 @@ import time import warnings from functools import partial -from typing import Any, Callable, Dict, List, NoReturn, Optional, Tuple, Type, Union +from typing import Any, Callable, NoReturn, Optional, Union import numpy as np import torch @@ -40,7 +40,7 @@ logger = _get_logger() -TGenCandidates = Callable[[Tensor, AcquisitionFunction, Any], Tuple[Tensor, Tensor]] +TGenCandidates = Callable[[Tensor, AcquisitionFunction, Any], tuple[Tensor, Tensor]] def gen_candidates_scipy( @@ -48,13 +48,13 @@ def gen_candidates_scipy( acquisition_function: AcquisitionFunction, lower_bounds: Optional[Union[float, Tensor]] = None, upper_bounds: Optional[Union[float, Tensor]] = None, - inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None, - equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None, - nonlinear_inequality_constraints: Optional[List[Tuple[Callable, bool]]] = None, - options: Optional[Dict[str, Any]] = None, - fixed_features: Optional[Dict[int, Optional[float]]] = None, + inequality_constraints: Optional[list[tuple[Tensor, Tensor, float]]] = None, + equality_constraints: Optional[list[tuple[Tensor, Tensor, float]]] = None, + nonlinear_inequality_constraints: Optional[list[tuple[Callable, bool]]] = None, + options: Optional[dict[str, Any]] = None, + fixed_features: Optional[dict[int, Optional[float]]] = None, timeout_sec: Optional[float] = None, -) -> Tuple[Tensor, Tensor]: +) -> tuple[Tensor, Tensor]: r"""Generate a set of candidates using `scipy.optimize.minimize`. Optimizes an acquisition function starting from a set of initial candidates @@ -303,12 +303,12 @@ def gen_candidates_torch( acquisition_function: AcquisitionFunction, lower_bounds: Optional[Union[float, Tensor]] = None, upper_bounds: Optional[Union[float, Tensor]] = None, - optimizer: Type[Optimizer] = torch.optim.Adam, - options: Optional[Dict[str, Union[float, str]]] = None, + optimizer: type[Optimizer] = torch.optim.Adam, + options: Optional[dict[str, Union[float, str]]] = None, callback: Optional[Callable[[int, Tensor, Tensor], NoReturn]] = None, - fixed_features: Optional[Dict[int, Optional[float]]] = None, + fixed_features: Optional[dict[int, Optional[float]]] = None, timeout_sec: Optional[float] = None, -) -> Tuple[Tensor, Tensor]: +) -> tuple[Tensor, Tensor]: r"""Generate a set of candidates using a `torch.optim` optimizer. Optimizes an acquisition function starting from a set of initial candidates @@ -455,7 +455,7 @@ def get_best_candidates(batch_candidates: Tensor, batch_values: Tensor) -> Tenso return batch_candidates[best] -def _process_scipy_result(res: OptimizeResult, options: Dict[str, Any]) -> None: +def _process_scipy_result(res: OptimizeResult, options: dict[str, Any]) -> None: r"""Process scipy optimization result to produce relevant logs and warnings.""" if "success" not in res.keys() or "status" not in res.keys(): with warnings.catch_warnings(): diff --git a/botorch/generation/utils.py b/botorch/generation/utils.py index 037d107306..b4cb0ad5ad 100644 --- a/botorch/generation/utils.py +++ b/botorch/generation/utils.py @@ -8,7 +8,7 @@ import warnings from dataclasses import dataclass -from typing import Callable, Dict, List, Optional, Tuple, Union +from typing import Callable, Optional, Union import torch @@ -21,8 +21,8 @@ def _convert_nonlinear_inequality_constraints( - nonlinear_inequality_constraints: List[Union[Callable, Tuple[Callable, bool]]] -) -> List[Tuple[Callable, bool]]: + nonlinear_inequality_constraints: list[Union[Callable, tuple[Callable, bool]]] +) -> list[tuple[Callable, bool]]: """Convert legacy defintions of nonlinear inequality constraints into the new format. Assumes intra-point constraints. """ @@ -94,20 +94,20 @@ class _NoFixedFeatures: initial_conditions: Tensor lower_bounds: Optional[Union[float, Tensor]] upper_bounds: Optional[Union[float, Tensor]] - inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] - equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] - nonlinear_inequality_constraints: Optional[List[Callable[[Tensor], Tensor]]] + inequality_constraints: Optional[list[tuple[Tensor, Tensor, float]]] + equality_constraints: Optional[list[tuple[Tensor, Tensor, float]]] + nonlinear_inequality_constraints: Optional[list[Callable[[Tensor], Tensor]]] def _remove_fixed_features_from_optimization( - fixed_features: Dict[int, Optional[float]], + fixed_features: dict[int, Optional[float]], acquisition_function: AcquisitionFunction, initial_conditions: Tensor, lower_bounds: Optional[Union[float, Tensor]], upper_bounds: Optional[Union[float, Tensor]], - inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]], - equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]], - nonlinear_inequality_constraints: Optional[List[Callable[[Tensor], Tensor]]], + inequality_constraints: Optional[list[tuple[Tensor, Tensor, float]]], + equality_constraints: Optional[list[tuple[Tensor, Tensor, float]]], + nonlinear_inequality_constraints: Optional[list[Callable[[Tensor], Tensor]]], ) -> _NoFixedFeatures: """ Given a set of non-empty fixed features, this function effectively reduces the diff --git a/botorch/models/approximate_gp.py b/botorch/models/approximate_gp.py index 9b10549700..f9face99fb 100644 --- a/botorch/models/approximate_gp.py +++ b/botorch/models/approximate_gp.py @@ -32,7 +32,7 @@ import copy import warnings -from typing import Optional, Type, TypeVar, Union +from typing import Optional, TypeVar, Union import torch from botorch.models.gpytorch import GPyTorchModel @@ -183,7 +183,7 @@ def __init__( covar_module: Optional[Kernel] = None, mean_module: Optional[Mean] = None, variational_distribution: Optional[_VariationalDistribution] = None, - variational_strategy: Type[_VariationalStrategy] = VariationalStrategy, + variational_strategy: type[_VariationalStrategy] = VariationalStrategy, inducing_points: Optional[Union[Tensor, int]] = None, inducing_point_allocator: Optional[InducingPointAllocator] = None, ) -> None: @@ -328,7 +328,7 @@ def __init__( covar_module: Optional[Kernel] = None, mean_module: Optional[Mean] = None, variational_distribution: Optional[_VariationalDistribution] = None, - variational_strategy: Type[_VariationalStrategy] = VariationalStrategy, + variational_strategy: type[_VariationalStrategy] = VariationalStrategy, inducing_points: Optional[Union[Tensor, int]] = None, outcome_transform: Optional[OutcomeTransform] = None, input_transform: Optional[InputTransform] = None, diff --git a/botorch/models/contextual.py b/botorch/models/contextual.py index 55decc5de7..4388bd5079 100644 --- a/botorch/models/contextual.py +++ b/botorch/models/contextual.py @@ -4,7 +4,7 @@ # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. -from typing import Any, Dict, List, Optional +from typing import Any, Optional from botorch.models.gp_regression import SingleTaskGP from botorch.models.kernels.contextual_lcea import LCEAKernel @@ -21,7 +21,7 @@ def __init__( train_X: Tensor, train_Y: Tensor, train_Yvar: Optional[Tensor], - decomposition: Dict[str, List[int]], + decomposition: dict[str, list[int]], ) -> None: r""" Args: @@ -46,8 +46,8 @@ def __init__( def construct_inputs( cls, training_data: SupervisedDataset, - decomposition: Dict[str, List[int]], - ) -> Dict[str, Any]: + decomposition: dict[str, list[int]], + ) -> dict[str, Any]: r"""Construct `Model` keyword arguments from a dict of `SupervisedDataset`. Args: @@ -74,12 +74,12 @@ def __init__( train_X: Tensor, train_Y: Tensor, train_Yvar: Optional[Tensor], - decomposition: Dict[str, List[int]], + decomposition: dict[str, list[int]], train_embedding: bool = True, - cat_feature_dict: Optional[Dict] = None, - embs_feature_dict: Optional[Dict] = None, - embs_dim_list: Optional[List[int]] = None, - context_weight_dict: Optional[Dict] = None, + cat_feature_dict: Optional[dict] = None, + embs_feature_dict: Optional[dict] = None, + embs_dim_list: Optional[list[int]] = None, + context_weight_dict: Optional[dict] = None, ) -> None: r""" Args: @@ -120,13 +120,13 @@ def __init__( def construct_inputs( cls, training_data: SupervisedDataset, - decomposition: Dict[str, List[str]], + decomposition: dict[str, list[str]], train_embedding: bool = True, - cat_feature_dict: Optional[Dict] = None, - embs_feature_dict: Optional[Dict] = None, - embs_dim_list: Optional[List[int]] = None, - context_weight_dict: Optional[Dict] = None, - ) -> Dict[str, Any]: + cat_feature_dict: Optional[dict] = None, + embs_feature_dict: Optional[dict] = None, + embs_dim_list: Optional[list[int]] = None, + context_weight_dict: Optional[dict] = None, + ) -> dict[str, Any]: r"""Construct `Model` keyword arguments from a dict of `SupervisedDataset`. Args: diff --git a/botorch/models/contextual_multioutput.py b/botorch/models/contextual_multioutput.py index 40a7f01846..81178f54d1 100644 --- a/botorch/models/contextual_multioutput.py +++ b/botorch/models/contextual_multioutput.py @@ -14,7 +14,7 @@ """ import warnings -from typing import Any, Dict, List, Optional, Union +from typing import Any, Optional, Union import torch from botorch.models.multitask import MultiTaskGP @@ -48,9 +48,9 @@ def __init__( likelihood: Optional[Likelihood] = None, context_cat_feature: Optional[Tensor] = None, context_emb_feature: Optional[Tensor] = None, - embs_dim_list: Optional[List[int]] = None, - output_tasks: Optional[List[int]] = None, - all_tasks: Optional[List[int]] = None, + embs_dim_list: Optional[list[int]] = None, + output_tasks: Optional[list[int]] = None, + all_tasks: Optional[list[int]] = None, input_transform: Optional[InputTransform] = None, outcome_transform: Optional[OutcomeTransform] = None, ) -> None: @@ -211,12 +211,12 @@ def construct_inputs( cls, training_data: Union[SupervisedDataset, MultiTaskDataset], task_feature: int, - output_tasks: Optional[List[int]] = None, + output_tasks: Optional[list[int]] = None, context_cat_feature: Optional[Tensor] = None, context_emb_feature: Optional[Tensor] = None, - embs_dim_list: Optional[List[int]] = None, + embs_dim_list: Optional[list[int]] = None, **kwargs, - ) -> Dict[str, Any]: + ) -> dict[str, Any]: r"""Construct `Model` keyword arguments from a dataset and other args. Args: @@ -265,8 +265,8 @@ def __init__( task_feature: int, context_cat_feature: Optional[Tensor] = None, context_emb_feature: Optional[Tensor] = None, - embs_dim_list: Optional[List[int]] = None, - output_tasks: Optional[List[int]] = None, + embs_dim_list: Optional[list[int]] = None, + output_tasks: Optional[list[int]] = None, ) -> None: r""" Args: diff --git a/botorch/models/converter.py b/botorch/models/converter.py index ecdaef2400..4f2f348b9c 100644 --- a/botorch/models/converter.py +++ b/botorch/models/converter.py @@ -12,7 +12,7 @@ import warnings from copy import deepcopy -from typing import Dict, Optional, Set, Tuple +from typing import Optional import torch from botorch.exceptions import UnsupportedError @@ -426,10 +426,10 @@ def batched_multi_output_to_single_output( def _get_adjusted_batch_keys( - batch_state_dict: Dict[str, Tensor], + batch_state_dict: dict[str, Tensor], input_transform: Optional[InputTransform], outcome_transform: Optional[OutcomeTransform] = None, -) -> Tuple[Set[str], Set[str]]: +) -> tuple[set[str], set[str]]: r"""Group the keys based on whether the value requires batch shape changes. Args: diff --git a/botorch/models/cost.py b/botorch/models/cost.py index 7ee37590f7..17b1998a61 100644 --- a/botorch/models/cost.py +++ b/botorch/models/cost.py @@ -15,7 +15,7 @@ from __future__ import annotations -from typing import Dict, Optional +from typing import Optional import torch from botorch.models.deterministic import DeterministicModel @@ -45,7 +45,7 @@ class AffineFidelityCostModel(DeterministicModel): def __init__( self, - fidelity_weights: Optional[Dict[int, float]] = None, + fidelity_weights: Optional[dict[int, float]] = None, fixed_cost: float = 0.01, ) -> None: r""" diff --git a/botorch/models/deterministic.py b/botorch/models/deterministic.py index 63c4f2974a..8c7b9a2436 100644 --- a/botorch/models/deterministic.py +++ b/botorch/models/deterministic.py @@ -27,7 +27,7 @@ from __future__ import annotations from abc import abstractmethod -from typing import Callable, List, Optional, Union +from typing import Callable, Optional, Union import torch from botorch.models.ensemble import EnsembleModel @@ -76,7 +76,7 @@ def __init__(self, f: Callable[[Tensor], Tensor], num_outputs: int = 1) -> None: self._f = f self._num_outputs = num_outputs - def subset_output(self, idcs: List[int]) -> GenericDeterministicModel: + def subset_output(self, idcs: list[int]) -> GenericDeterministicModel: r"""Subset the model along the output dimension. Args: @@ -131,7 +131,7 @@ def __init__(self, a: Tensor, b: Union[Tensor, float] = 0.01) -> None: self.register_buffer("b", b.expand(a.size(-1))) self._num_outputs = a.size(-1) - def subset_output(self, idcs: List[int]) -> AffineDeterministicModel: + def subset_output(self, idcs: list[int]) -> AffineDeterministicModel: r"""Subset the model along the output dimension. Args: diff --git a/botorch/models/ensemble.py b/botorch/models/ensemble.py index 5561b65775..abf896f693 100644 --- a/botorch/models/ensemble.py +++ b/botorch/models/ensemble.py @@ -12,7 +12,7 @@ from __future__ import annotations from abc import ABC, abstractmethod -from typing import Any, List, Optional +from typing import Any, Optional from botorch.acquisition.objective import PosteriorTransform from botorch.exceptions.errors import UnsupportedError @@ -48,7 +48,7 @@ def num_outputs(self) -> int: def posterior( self, X: Tensor, - output_indices: Optional[List[int]] = None, + output_indices: Optional[list[int]] = None, posterior_transform: Optional[PosteriorTransform] = None, **kwargs: Any, ) -> EnsemblePosterior: diff --git a/botorch/models/fully_bayesian.py b/botorch/models/fully_bayesian.py index ccc396478f..27d066ef00 100644 --- a/botorch/models/fully_bayesian.py +++ b/botorch/models/fully_bayesian.py @@ -33,7 +33,8 @@ import math from abc import abstractmethod -from typing import Any, Dict, List, Mapping, Optional, Tuple +from collections.abc import Mapping +from typing import Any, Optional import pyro import torch @@ -132,15 +133,15 @@ def sample(self) -> None: @abstractmethod def postprocess_mcmc_samples( self, - mcmc_samples: Dict[str, Tensor], - ) -> Dict[str, Tensor]: + mcmc_samples: dict[str, Tensor], + ) -> dict[str, Tensor]: """Post-process the final MCMC samples.""" pass # pragma: no cover @abstractmethod def load_mcmc_samples( - self, mcmc_samples: Dict[str, Tensor] - ) -> Tuple[Mean, Kernel, Likelihood]: + self, mcmc_samples: dict[str, Tensor] + ) -> tuple[Mean, Kernel, Likelihood]: pass # pragma: no cover @@ -243,8 +244,8 @@ def sample_lengthscale( return lengthscale def postprocess_mcmc_samples( - self, mcmc_samples: Dict[str, Tensor] - ) -> Dict[str, Tensor]: + self, mcmc_samples: dict[str, Tensor] + ) -> dict[str, Tensor]: r"""Post-process the MCMC samples. This computes the true lengthscales and removes the inverse lengthscales and @@ -261,8 +262,8 @@ def postprocess_mcmc_samples( return mcmc_samples def load_mcmc_samples( - self, mcmc_samples: Dict[str, Tensor] - ) -> Tuple[Mean, Kernel, Likelihood]: + self, mcmc_samples: dict[str, Tensor] + ) -> tuple[Mean, Kernel, Likelihood]: r"""Load the MCMC samples into the mean_module, covar_module, and likelihood.""" tkwargs = {"device": self.train_X.device, "dtype": self.train_X.dtype} num_mcmc_samples = len(mcmc_samples["mean"]) @@ -442,7 +443,7 @@ def train(self, mode: bool = True) -> None: self.covar_module = None self.likelihood = None - def load_mcmc_samples(self, mcmc_samples: Dict[str, Tensor]) -> None: + def load_mcmc_samples(self, mcmc_samples: dict[str, Tensor]) -> None: r"""Load the MCMC hyperparameter samples into the model. This method will be called by `fit_fully_bayesian_model_nuts` when the model @@ -505,7 +506,7 @@ def forward(self, X: Tensor) -> MultivariateNormal: def posterior( self, X: Tensor, - output_indices: Optional[List[int]] = None, + output_indices: Optional[list[int]] = None, observation_noise: bool = False, posterior_transform: Optional[PosteriorTransform] = None, **kwargs: Any, diff --git a/botorch/models/fully_bayesian_multitask.py b/botorch/models/fully_bayesian_multitask.py index bdc5a48723..94c3f30d94 100644 --- a/botorch/models/fully_bayesian_multitask.py +++ b/botorch/models/fully_bayesian_multitask.py @@ -8,7 +8,8 @@ """ -from typing import Any, Dict, List, Mapping, NoReturn, Optional, Tuple +from collections.abc import Mapping +from typing import Any, NoReturn, Optional import pyro import torch @@ -127,8 +128,8 @@ def sample_task_lengthscale( ) def load_mcmc_samples( - self, mcmc_samples: Dict[str, Tensor] - ) -> Tuple[Mean, Kernel, Likelihood, Kernel, Parameter]: + self, mcmc_samples: dict[str, Tensor] + ) -> tuple[Mean, Kernel, Likelihood, Kernel, Parameter]: r"""Load the MCMC samples into the mean_module, covar_module, and likelihood.""" tkwargs = {"device": self.train_X.device, "dtype": self.train_X.dtype} num_mcmc_samples = len(mcmc_samples["mean"]) @@ -196,9 +197,9 @@ def __init__( train_Y: Tensor, task_feature: int, train_Yvar: Optional[Tensor] = None, - output_tasks: Optional[List[int]] = None, + output_tasks: Optional[list[int]] = None, rank: Optional[int] = None, - all_tasks: Optional[List[int]] = None, + all_tasks: Optional[list[int]] = None, outcome_transform: Optional[OutcomeTransform] = None, input_transform: Optional[InputTransform] = None, pyro_model: Optional[MultitaskSaasPyroModel] = None, @@ -324,7 +325,7 @@ def _check_if_fitted(self): "`fit_fully_bayesian_model_nuts` to fit the model." ) - def load_mcmc_samples(self, mcmc_samples: Dict[str, Tensor]) -> None: + def load_mcmc_samples(self, mcmc_samples: dict[str, Tensor]) -> None: r"""Load the MCMC hyperparameter samples into the model. This method will be called by `fit_fully_bayesian_model_nuts` when the model @@ -341,7 +342,7 @@ def load_mcmc_samples(self, mcmc_samples: Dict[str, Tensor]) -> None: def posterior( self, X: Tensor, - output_indices: Optional[List[int]] = None, + output_indices: Optional[list[int]] = None, observation_noise: bool = False, posterior_transform: Optional[PosteriorTransform] = None, **kwargs: Any, diff --git a/botorch/models/gp_regression.py b/botorch/models/gp_regression.py index 733774ed43..cb5e1e0f8c 100644 --- a/botorch/models/gp_regression.py +++ b/botorch/models/gp_regression.py @@ -31,7 +31,7 @@ from __future__ import annotations import warnings -from typing import Dict, NoReturn, Optional, Union +from typing import NoReturn, Optional, Union import torch from botorch.models.gpytorch import BatchedMultiOutputGPyTorchModel @@ -215,7 +215,7 @@ def __init__( @classmethod def construct_inputs( cls, training_data: SupervisedDataset, *, task_feature: Optional[int] = None - ) -> Dict[str, Union[BotorchContainer, Tensor]]: + ) -> dict[str, Union[BotorchContainer, Tensor]]: r"""Construct `SingleTaskGP` keyword arguments from a `SupervisedDataset`. Args: diff --git a/botorch/models/gp_regression_fidelity.py b/botorch/models/gp_regression_fidelity.py index 4aa878a358..bb0255e835 100644 --- a/botorch/models/gp_regression_fidelity.py +++ b/botorch/models/gp_regression_fidelity.py @@ -27,7 +27,7 @@ import warnings -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import Any, Optional, Union import torch from botorch.exceptions.errors import UnsupportedError @@ -69,7 +69,7 @@ def __init__( train_Y: Tensor, train_Yvar: Optional[Tensor] = None, iteration_fidelity: Optional[int] = None, - data_fidelities: Optional[Union[List[int], Tuple[int]]] = None, + data_fidelities: Optional[Union[list[int], tuple[int]]] = None, data_fidelity: Optional[int] = None, linear_truncated: bool = True, nu: float = 2.5, @@ -166,8 +166,8 @@ def __init__( def construct_inputs( cls, training_data: SupervisedDataset, - fidelity_features: List[int], - ) -> Dict[str, Any]: + fidelity_features: list[int], + ) -> dict[str, Any]: r"""Construct `Model` keyword arguments from a dict of `SupervisedDataset`. Args: @@ -186,7 +186,7 @@ def __init__( train_Y: Tensor, train_Yvar: Tensor, iteration_fidelity: Optional[int] = None, - data_fidelities: Optional[Union[List[int], Tuple[int]]] = None, + data_fidelities: Optional[Union[list[int], tuple[int]]] = None, data_fidelity: Optional[int] = None, linear_truncated: bool = True, nu: float = 2.5, @@ -219,10 +219,10 @@ def _setup_multifidelity_covar_module( dim: int, aug_batch_shape: torch.Size, iteration_fidelity: Optional[int], - data_fidelities: Optional[List[int]], + data_fidelities: Optional[list[int]], linear_truncated: bool, nu: float, -) -> Tuple[ScaleKernel, Dict]: +) -> tuple[ScaleKernel, dict]: """Helper function to get the covariance module and associated subset_batch_dict for the multifidelity setting. diff --git a/botorch/models/gp_regression_mixed.py b/botorch/models/gp_regression_mixed.py index 8e103fc8f4..fbb99b870f 100644 --- a/botorch/models/gp_regression_mixed.py +++ b/botorch/models/gp_regression_mixed.py @@ -6,7 +6,7 @@ from __future__ import annotations -from typing import Any, Callable, Dict, List, Optional +from typing import Any, Callable, Optional import torch from botorch.models.gp_regression import SingleTaskGP @@ -61,10 +61,10 @@ def __init__( self, train_X: Tensor, train_Y: Tensor, - cat_dims: List[int], + cat_dims: list[int], train_Yvar: Optional[Tensor] = None, cont_kernel_factory: Optional[ - Callable[[torch.Size, int, List[int]], Kernel] + Callable[[torch.Size, int, list[int]], Kernel] ] = None, likelihood: Optional[Likelihood] = None, outcome_transform: Optional[OutcomeTransform] = None, # TODO @@ -109,7 +109,7 @@ def __init__( def cont_kernel_factory( batch_shape: torch.Size, ard_num_dims: int, - active_dims: List[int], + active_dims: list[int], ) -> MaternKernel: return MaternKernel( nu=2.5, @@ -185,9 +185,9 @@ def cont_kernel_factory( def construct_inputs( cls, training_data: SupervisedDataset, - categorical_features: List[int], + categorical_features: list[int], likelihood: Optional[Likelihood] = None, - ) -> Dict[str, Any]: + ) -> dict[str, Any]: r"""Construct `Model` keyword arguments from a dict of `SupervisedDataset`. Args: diff --git a/botorch/models/gpytorch.py b/botorch/models/gpytorch.py index 42537a2164..d5adb2b45e 100644 --- a/botorch/models/gpytorch.py +++ b/botorch/models/gpytorch.py @@ -17,7 +17,7 @@ import warnings from abc import ABC from copy import deepcopy -from typing import Any, List, Optional, Tuple, TYPE_CHECKING, Union +from typing import Any, Optional, TYPE_CHECKING, Union import torch from botorch.acquisition.objective import PosteriorTransform @@ -276,7 +276,7 @@ class BatchedMultiOutputGPyTorchModel(GPyTorchModel): @staticmethod def get_batch_dimensions( train_X: Tensor, train_Y: Tensor - ) -> Tuple[torch.Size, torch.Size]: + ) -> tuple[torch.Size, torch.Size]: r"""Get the raw batch shape and output-augmented batch shape of the inputs. Args: @@ -326,7 +326,7 @@ def batch_shape(self) -> torch.Size: def _transform_tensor_args( self, X: Tensor, Y: Tensor, Yvar: Optional[Tensor] = None - ) -> Tuple[Tensor, Tensor, Optional[Tensor]]: + ) -> tuple[Tensor, Tensor, Optional[Tensor]]: r"""Transforms tensor arguments: for single output models, the output dimension is squeezed and for multi-output models, the output dimension is transformed into the left-most batch dimension. @@ -405,7 +405,7 @@ def _apply_noise( def posterior( self, X: Tensor, - output_indices: Optional[List[int]] = None, + output_indices: Optional[list[int]] = None, observation_noise: Union[bool, Tensor] = False, posterior_transform: Optional[PosteriorTransform] = None, ) -> Union[GPyTorchPosterior, TransformedPosterior]: @@ -530,7 +530,7 @@ def condition_on_observations( fantasy_model._aug_batch_shape = fantasy_model.train_targets.shape[:-1] return fantasy_model - def subset_output(self, idcs: List[int]) -> BatchedMultiOutputGPyTorchModel: + def subset_output(self, idcs: list[int]) -> BatchedMultiOutputGPyTorchModel: r"""Subset the model along the output dimension. Args: @@ -629,7 +629,7 @@ def batch_shape(self) -> torch.Size: def posterior( self, X: Tensor, - output_indices: Optional[List[int]] = None, + output_indices: Optional[list[int]] = None, observation_noise: Union[bool, Tensor] = False, posterior_transform: Optional[PosteriorTransform] = None, ) -> Union[GPyTorchPosterior, PosteriorList]: @@ -834,7 +834,7 @@ def _apply_noise( def posterior( self, X: Tensor, - output_indices: Optional[List[int]] = None, + output_indices: Optional[list[int]] = None, observation_noise: Union[bool, Tensor] = False, posterior_transform: Optional[PosteriorTransform] = None, ) -> Union[GPyTorchPosterior, TransformedPosterior]: @@ -917,7 +917,7 @@ def posterior( return posterior_transform(posterior) return posterior - def subset_output(self, idcs: List[int]) -> MultiTaskGPyTorchModel: + def subset_output(self, idcs: list[int]) -> MultiTaskGPyTorchModel: r"""Returns a new model that only outputs a subset of the outputs. Args: diff --git a/botorch/models/higher_order_gp.py b/botorch/models/higher_order_gp.py index b31639284d..6f0fe6eb2c 100644 --- a/botorch/models/higher_order_gp.py +++ b/botorch/models/higher_order_gp.py @@ -16,7 +16,7 @@ import warnings from contextlib import ExitStack -from typing import Any, List, Optional, Tuple, Union +from typing import Any, Optional, Union import torch from botorch.acquisition.objective import PosteriorTransform @@ -92,7 +92,7 @@ def _return_to_output_shape(self, tsr: Tensor) -> Tensor: def forward( self, Y: Tensor, Yvar: Optional[Tensor] = None - ) -> Tuple[Tensor, Optional[Tensor]]: + ) -> tuple[Tensor, Optional[Tensor]]: Y = self._squeeze_to_single_output(Y) if Yvar is not None: Yvar = self._squeeze_to_single_output(Yvar) @@ -108,7 +108,7 @@ def forward( def untransform( self, Y: Tensor, Yvar: Optional[Tensor] = None - ) -> Tuple[Tensor, Optional[Tensor]]: + ) -> tuple[Tensor, Optional[Tensor]]: Y = self._squeeze_to_single_output(Y) if Yvar is not None: Yvar = self._squeeze_to_single_output(Yvar) @@ -179,8 +179,8 @@ def __init__( train_X: Tensor, train_Y: Tensor, likelihood: Optional[Likelihood] = None, - covar_modules: Optional[List[Kernel]] = None, - num_latent_dims: Optional[List[int]] = None, + covar_modules: Optional[list[Kernel]] = None, + num_latent_dims: Optional[list[int]] = None, learn_latent_pars: bool = True, latent_init: str = "default", outcome_transform: Optional[OutcomeTransform] = None, @@ -280,7 +280,7 @@ def __init__( def _initialize_latents( self, latent_init: str, - num_latent_dims: List[int], + num_latent_dims: list[int], learn_latent_pars: bool, device: torch.device, dtype: torch.dtype, @@ -435,7 +435,7 @@ def condition_on_observations( def posterior( self, X: Tensor, - output_indices: Optional[List[int]] = None, + output_indices: Optional[list[int]] = None, observation_noise: Union[bool, Tensor] = False, posterior_transform: Optional[PosteriorTransform] = None, ) -> GPyTorchPosterior: diff --git a/botorch/models/kernels/contextual_lcea.py b/botorch/models/kernels/contextual_lcea.py index 3f3a0cb312..c33526ec2c 100644 --- a/botorch/models/kernels/contextual_lcea.py +++ b/botorch/models/kernels/contextual_lcea.py @@ -4,7 +4,7 @@ # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. -from typing import Any, Dict, List, Optional +from typing import Any, Optional import torch from gpytorch.constraints import Positive @@ -17,7 +17,7 @@ from torch.nn import ModuleList -def get_order(indices: List[int]) -> List[int]: +def get_order(indices: list[int]) -> list[int]: r"""Get the order indices as integers ranging from 0 to the number of indices. Args: @@ -29,7 +29,7 @@ def get_order(indices: List[int]) -> List[int]: return [i % len(indices) for i in indices] -def is_contiguous(indices: List[int]) -> bool: +def is_contiguous(indices: list[int]) -> bool: r"""Check if the list of integers is contiguous. Args: @@ -41,7 +41,7 @@ def is_contiguous(indices: List[int]) -> bool: return set(indices) == set(range(min_idx, min_idx + len(indices))) -def get_permutation(decomposition: Dict[str, List[int]]) -> Optional[List[int]]: +def get_permutation(decomposition: dict[str, list[int]]) -> Optional[list[int]]: """Construct permutation to reorder the parameters such that: 1) the parameters for each context are contiguous. @@ -74,7 +74,7 @@ def get_permutation(decomposition: Dict[str, List[int]]) -> Optional[List[int]]: return permutation -def _create_new_permutation(decomposition: Dict[str, List[int]]) -> List[int]: +def _create_new_permutation(decomposition: dict[str, list[int]]) -> list[int]: # make contiguous and ordered permutation = [] for active_parameters in decomposition.values(): @@ -95,13 +95,13 @@ class LCEAKernel(Kernel): def __init__( self, - decomposition: Dict[str, List[int]], + decomposition: dict[str, list[int]], batch_shape: torch.Size, train_embedding: bool = True, - cat_feature_dict: Optional[Dict] = None, - embs_feature_dict: Optional[Dict] = None, - embs_dim_list: Optional[List[int]] = None, - context_weight_dict: Optional[Dict] = None, + cat_feature_dict: Optional[dict] = None, + embs_feature_dict: Optional[dict] = None, + embs_dim_list: Optional[list[int]] = None, + context_weight_dict: Optional[dict] = None, device: Optional[torch.device] = None, ) -> None: r""" @@ -216,9 +216,9 @@ def _set_outputscale_list(self, value: Tensor) -> None: def _set_context_features( self, - cat_feature_dict: Optional[Dict] = None, - embs_feature_dict: Optional[Dict] = None, - embs_dim_list: Optional[List[int]] = None, + cat_feature_dict: Optional[dict] = None, + embs_feature_dict: Optional[dict] = None, + embs_dim_list: Optional[list[int]] = None, ) -> None: """Set context categorical features and continuous embedding features. If cat_feature_dict is None, context indices will be used; If embs_dim_list diff --git a/botorch/models/kernels/contextual_sac.py b/botorch/models/kernels/contextual_sac.py index 0e53a516d3..dcc13f2f29 100644 --- a/botorch/models/kernels/contextual_sac.py +++ b/botorch/models/kernels/contextual_sac.py @@ -4,7 +4,7 @@ # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. -from typing import Any, Dict, List, Optional +from typing import Any, Optional import torch from gpytorch.kernels.kernel import Kernel @@ -42,7 +42,7 @@ class SACKernel(Kernel): def __init__( self, - decomposition: Dict[str, List[int]], + decomposition: dict[str, list[int]], batch_shape: torch.Size, device: Optional[torch.device] = None, ) -> None: diff --git a/botorch/models/kernels/infinite_width_bnn.py b/botorch/models/kernels/infinite_width_bnn.py index 356c18a40e..a3eefe2479 100644 --- a/botorch/models/kernels/infinite_width_bnn.py +++ b/botorch/models/kernels/infinite_width_bnn.py @@ -6,7 +6,7 @@ from __future__ import annotations -from typing import Optional, Tuple +from typing import Optional import torch from gpytorch.constraints import Positive @@ -37,7 +37,7 @@ def __init__( self, depth: int = 3, batch_shape: Optional[torch.Size] = None, - active_dims: Optional[Tuple[int, ...]] = None, + active_dims: Optional[tuple[int, ...]] = None, acos_eps: float = 1e-7, device: Optional[torch.device] = None, ) -> None: diff --git a/botorch/models/kernels/linear_truncated_fidelity.py b/botorch/models/kernels/linear_truncated_fidelity.py index d102bf588f..c5187ebe80 100644 --- a/botorch/models/kernels/linear_truncated_fidelity.py +++ b/botorch/models/kernels/linear_truncated_fidelity.py @@ -6,7 +6,7 @@ from __future__ import annotations -from typing import Any, List, Optional +from typing import Any, Optional import torch from botorch.exceptions import UnsupportedError @@ -54,7 +54,7 @@ class LinearTruncatedFidelityKernel(Kernel): def __init__( # noqa C901 self, - fidelity_dims: List[int], + fidelity_dims: list[int], dimension: Optional[int] = None, power_prior: Optional[Prior] = None, power_constraint: Optional[Interval] = None, diff --git a/botorch/models/kernels/orthogonal_additive_kernel.py b/botorch/models/kernels/orthogonal_additive_kernel.py index 65a8b8e2a1..6bfef33594 100644 --- a/botorch/models/kernels/orthogonal_additive_kernel.py +++ b/botorch/models/kernels/orthogonal_additive_kernel.py @@ -4,7 +4,7 @@ # LICENSE file in the root directory of this source tree. import math -from typing import List, Optional, Tuple +from typing import Optional import numpy import torch @@ -237,7 +237,7 @@ def leggauss( b: float = 1.0, dtype: Optional[torch.dtype] = None, device: Optional[torch.device] = None, -) -> Tuple[Tensor, Tensor]: +) -> tuple[Tensor, Tensor]: """Computes Gauss-Legendre quadrature nodes and weights. Wraps `numpy.polynomial.legendre.leggauss` and returns Torch Tensors. @@ -272,7 +272,7 @@ def _check_hypercube(x: Tensor, name: str) -> None: raise ValueError(name + " is not in hypercube [0, 1]^d.") -def _reverse_triu_indices(d: int) -> List[int]: +def _reverse_triu_indices(d: int) -> list[int]: """Computes a list of indices which, upon indexing a `d * (d - 1) / 2 + 1`-dim Tensor whose last element is zero, will lead to a vectorized representation of an upper-triangular matrix, whose diagonal is set to zero and whose super-diagonal diff --git a/botorch/models/likelihoods/pairwise.py b/botorch/models/likelihoods/pairwise.py index d5a1c52af4..3d08169c6d 100644 --- a/botorch/models/likelihoods/pairwise.py +++ b/botorch/models/likelihoods/pairwise.py @@ -12,7 +12,6 @@ import math from abc import ABC, abstractmethod -from typing import Tuple import torch from botorch.utils.probability.utils import ( @@ -121,7 +120,7 @@ def _calc_z(self, utility: Tensor, D: Tensor) -> Tensor: z = z.clamp(-self._zlim, self._zlim).squeeze(-1) return z - def _calc_z_derived(self, z: Tensor) -> Tuple[Tensor, Tensor, Tensor]: + def _calc_z_derived(self, z: Tensor) -> tuple[Tensor, Tensor, Tensor]: """Calculate auxiliary statistics derived from z, including log pdf, log cdf, and the hazard function (pdf divided by cdf) diff --git a/botorch/models/model.py b/botorch/models/model.py index 21e5bcbd2a..f42fb46f6d 100644 --- a/botorch/models/model.py +++ b/botorch/models/model.py @@ -15,18 +15,8 @@ import warnings from abc import ABC, abstractmethod from collections import defaultdict -from typing import ( - Any, - Callable, - Dict, - List, - Mapping, - Optional, - Set, - TYPE_CHECKING, - TypeVar, - Union, -) +from collections.abc import Mapping +from typing import Any, Callable, Optional, TYPE_CHECKING, TypeVar, Union import numpy as np import torch @@ -92,7 +82,7 @@ class Model(Module, ABC): def posterior( self, X: Tensor, - output_indices: Optional[List[int]] = None, + output_indices: Optional[list[int]] = None, observation_noise: Union[bool, Tensor] = False, posterior_transform: Optional[PosteriorTransform] = None, ) -> Posterior: @@ -144,7 +134,7 @@ def num_outputs(self) -> int: cls_name = self.__class__.__name__ raise NotImplementedError(f"{cls_name} does not define num_outputs property") - def subset_output(self, idcs: List[int]) -> Model: + def subset_output(self, idcs: list[int]) -> Model: r"""Subset the model along the output dimension. Args: @@ -185,7 +175,7 @@ def condition_on_observations(self, X: Tensor, Y: Tensor, **kwargs: Any) -> Mode def construct_inputs( cls, training_data: SupervisedDataset, - ) -> Dict[str, Union[BotorchContainer, Tensor]]: + ) -> dict[str, Union[BotorchContainer, Tensor]]: """ Construct `Model` keyword arguments from a `SupervisedDataset`. @@ -275,7 +265,7 @@ def train(self, mode: bool = True) -> Model: return super().train(mode=mode) @property - def dtypes_of_buffers(self) -> Set[torch.dtype]: + def dtypes_of_buffers(self) -> set[torch.dtype]: return {t.dtype for t in self.buffers() if t is not None} @@ -443,8 +433,8 @@ def __init__(self, *models: Model) -> None: self.models = ModuleList(models) def _get_group_subset_indices( - self, idcs: Optional[List[int]] - ) -> Dict[int, List[int]]: + self, idcs: Optional[list[int]] + ) -> dict[int, list[int]]: r"""Convert global subset indices to indices for the individual models. Args: @@ -460,7 +450,7 @@ def _get_group_subset_indices( output_sizes = [model.num_outputs for model in self.models] cum_output_sizes = np.cumsum(output_sizes) idcs = [idx % cum_output_sizes[-1] for idx in idcs] - group_indices: Dict[int, List[int]] = defaultdict(list) + group_indices: dict[int, list[int]] = defaultdict(list) for idx in idcs: grp_idx = np.argwhere(idx < cum_output_sizes)[0].item() sub_idx = idx - int(np.sum(output_sizes[:grp_idx])) @@ -470,7 +460,7 @@ def _get_group_subset_indices( def posterior( self, X: Tensor, - output_indices: Optional[List[int]] = None, + output_indices: Optional[list[int]] = None, observation_noise: Union[bool, Tensor] = False, posterior_transform: Optional[Callable[[PosteriorList], Posterior]] = None, ) -> Posterior: @@ -551,7 +541,7 @@ def num_outputs(self) -> int: """ return sum(model.num_outputs for model in self.models) - def subset_output(self, idcs: List[int]) -> Model: + def subset_output(self, idcs: list[int]) -> Model: r"""Subset the model along the output dimension. Args: @@ -581,7 +571,7 @@ def subset_output(self, idcs: List[int]) -> Model: return subset_models[0] return self.__class__(*subset_models) - def transform_inputs(self, X: Tensor) -> List[Tensor]: + def transform_inputs(self, X: Tensor) -> list[Tensor]: r"""Individually transform the inputs for each model. Args: diff --git a/botorch/models/model_list_gp_regression.py b/botorch/models/model_list_gp_regression.py index c7251a0572..626dd2c279 100644 --- a/botorch/models/model_list_gp_regression.py +++ b/botorch/models/model_list_gp_regression.py @@ -10,7 +10,7 @@ from __future__ import annotations -from typing import Any, List +from typing import Any import torch @@ -56,7 +56,7 @@ def __init__(self, *gp_models: GPyTorchModel) -> None: # pyre-fixme[14]: Inconsistent override. Here `X` is a List[Tensor], but in the # parent method it's a Tensor. def condition_on_observations( - self, X: List[Tensor], Y: Tensor, **kwargs: Any + self, X: list[Tensor], Y: Tensor, **kwargs: Any ) -> ModelListGP: r"""Condition the model on new observations. diff --git a/botorch/models/multitask.py b/botorch/models/multitask.py index 814af5e510..575577b038 100644 --- a/botorch/models/multitask.py +++ b/botorch/models/multitask.py @@ -30,7 +30,7 @@ from __future__ import annotations import math -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import Any, Optional, Union import torch from botorch.acquisition.objective import PosteriorTransform @@ -147,9 +147,9 @@ def __init__( covar_module: Optional[Module] = None, likelihood: Optional[Likelihood] = None, task_covar_prior: Optional[Prior] = None, - output_tasks: Optional[List[int]] = None, + output_tasks: Optional[list[int]] = None, rank: Optional[int] = None, - all_tasks: Optional[List[int]] = None, + all_tasks: Optional[list[int]] = None, input_transform: Optional[InputTransform] = None, outcome_transform: Optional[OutcomeTransform] = None, ) -> None: @@ -271,7 +271,7 @@ def __init__( self.outcome_transform = outcome_transform self.to(train_X) - def _split_inputs(self, x: Tensor) -> Tuple[Tensor, Tensor]: + def _split_inputs(self, x: Tensor) -> tuple[Tensor, Tensor]: r"""Extracts base features and task indices from input data. Args: @@ -316,8 +316,8 @@ def get_all_tasks( cls, train_X: Tensor, task_feature: int, - output_tasks: Optional[List[int]] = None, - ) -> Tuple[List[int], int, int]: + output_tasks: Optional[list[int]] = None, + ) -> tuple[list[int], int, int]: if train_X.ndim != 2: # Currently, batch mode MTGPs are blocked upstream in GPyTorch raise ValueError(f"Unsupported shape {train_X.shape} for train_X.") @@ -336,11 +336,11 @@ def construct_inputs( cls, training_data: Union[SupervisedDataset, MultiTaskDataset], task_feature: int, - output_tasks: Optional[List[int]] = None, + output_tasks: Optional[list[int]] = None, task_covar_prior: Optional[Prior] = None, prior_config: Optional[dict] = None, rank: Optional[int] = None, - ) -> Dict[str, Any]: + ) -> dict[str, Any]: r"""Construct `Model` keyword arguments from a dataset and other args. Args: @@ -569,7 +569,7 @@ def predictive_mean_cache(self): def posterior( self, X: Tensor, - output_indices: Optional[List[int]] = None, + output_indices: Optional[list[int]] = None, observation_noise: Union[bool, Tensor] = False, posterior_transform: Optional[PosteriorTransform] = None, ) -> MultitaskGPPosterior: diff --git a/botorch/models/pairwise_gp.py b/botorch/models/pairwise_gp.py index 83250822f9..e45f058755 100644 --- a/botorch/models/pairwise_gp.py +++ b/botorch/models/pairwise_gp.py @@ -21,8 +21,9 @@ from __future__ import annotations import warnings +from collections.abc import Iterable from copy import deepcopy -from typing import Any, Dict, Iterable, List, Optional, Tuple, Union +from typing import Any, Optional, Union import numpy as np import torch @@ -59,7 +60,7 @@ # Helper functions def _check_strict_input( - inputs: Iterable[Tensor], t_inputs: List[Tensor], target_or_inputs: str + inputs: Iterable[Tensor], t_inputs: list[Tensor], target_or_inputs: str ): for input_, t_input in zip(inputs, t_inputs or (None,)): for attr in {"shape", "dtype", "device"}: @@ -380,7 +381,7 @@ def _prior_mean(self, X: Tensor) -> Union[Tensor, LinearOperator]: """ return self.mean_module(X) - def _prior_predict(self, X: Tensor) -> Tuple[Tensor, Tensor]: + def _prior_predict(self, X: Tensor) -> tuple[Tensor, Tensor]: r"""Predict utility based on prior info only Args: @@ -618,7 +619,7 @@ def _update(self, datapoints: Tensor, **kwargs) -> None: dp=datapoints, x0=f.clone().requires_grad_(True), max_iter=2 ) - def _transform_batch_shape(self, X: Tensor, X_new: Tensor) -> Tuple[Tensor, Tensor]: + def _transform_batch_shape(self, X: Tensor, X_new: Tensor) -> tuple[Tensor, Tensor]: r"""Transform X and X_new into the same shape Transform the batch shape of X to be compatible @@ -702,7 +703,7 @@ def _util_newton_updates( def _consolidate_duplicates( self, datapoints: Tensor, comparisons: Tensor - ) -> Tuple[Tensor, Tensor]: + ) -> tuple[Tensor, Tensor]: """Consolidate and cache datapoints and comparisons""" # check if consolidated datapoints/comparisons are cached if ( @@ -781,7 +782,7 @@ def batch_shape(self) -> torch.Size: def construct_inputs( cls, training_data: SupervisedDataset, - ) -> Dict[str, Tensor]: + ) -> dict[str, Tensor]: r""" Construct `Model` keyword arguments from a `RankingDataset`. @@ -897,7 +898,7 @@ def set_train_data( self.to(self.datapoints) def load_state_dict( - self, state_dict: Dict[str, Tensor], strict: bool = False + self, state_dict: dict[str, Tensor], strict: bool = False ) -> _IncompatibleKeys: r"""Removes data related buffers from the `state_dict` and calls `super().load_state_dict` with `strict=False`. @@ -920,13 +921,13 @@ def load_state_dict( def _load_from_state_dict( self, - state_dict: Dict[str, Tensor], + state_dict: dict[str, Tensor], prefix: str, - local_metadata: Dict[str, Any], + local_metadata: dict[str, Any], strict: bool, - missing_keys: List[str], - unexpected_keys: List[str], - error_msgs: List[str], + missing_keys: list[str], + unexpected_keys: list[str], + error_msgs: list[str], ) -> None: super()._load_from_state_dict( state_dict={ @@ -1067,7 +1068,7 @@ def forward(self, datapoints: Tensor) -> MultivariateNormal: def posterior( self, X: Tensor, - output_indices: Optional[List[int]] = None, + output_indices: Optional[list[int]] = None, observation_noise: bool = False, posterior_transform: Optional[PosteriorTransform] = None, ) -> Posterior: diff --git a/botorch/models/transforms/factory.py b/botorch/models/transforms/factory.py index 847fdf1b7c..faaca1d019 100644 --- a/botorch/models/transforms/factory.py +++ b/botorch/models/transforms/factory.py @@ -7,7 +7,7 @@ from __future__ import annotations from collections import OrderedDict -from typing import Dict, List, Optional +from typing import Optional from botorch.models.transforms.input import ( ChainedInputTransform, @@ -20,8 +20,8 @@ def get_rounding_input_transform( one_hot_bounds: Tensor, - integer_indices: Optional[List[int]] = None, - categorical_features: Optional[Dict[int, int]] = None, + integer_indices: Optional[list[int]] = None, + categorical_features: Optional[dict[int, int]] = None, initialization: bool = False, return_numeric: bool = False, approximate: bool = False, diff --git a/botorch/models/transforms/input.py b/botorch/models/transforms/input.py index 40c5e5ebc3..d56a1db2e3 100644 --- a/botorch/models/transforms/input.py +++ b/botorch/models/transforms/input.py @@ -17,7 +17,7 @@ from abc import ABC, abstractmethod from collections import OrderedDict -from typing import Any, Callable, Dict, List, Optional, Union +from typing import Any, Callable, Optional, Union from warnings import warn import numpy as np @@ -322,7 +322,7 @@ def __init__( d: int, coefficient: Tensor, offset: Tensor, - indices: Optional[Union[List[int], Tensor]] = None, + indices: Optional[Union[list[int], Tensor]] = None, batch_shape: torch.Size = torch.Size(), # noqa: B008 transform_on_train: bool = True, transform_on_eval: bool = True, @@ -499,7 +499,7 @@ class Normalize(AffineInputTransform): def __init__( self, d: int, - indices: Optional[Union[List[int], Tensor]] = None, + indices: Optional[Union[list[int], Tensor]] = None, bounds: Optional[Tensor] = None, batch_shape: torch.Size = torch.Size(), # noqa: B008 transform_on_train: bool = True, @@ -621,7 +621,7 @@ def _update_coefficients(self, X) -> None: self._coefficient = torch.where(almost_zero, 1.0, coefficient) self._offset = torch.where(almost_zero, 0.0, offset) - def get_init_args(self) -> Dict[str, Any]: + def get_init_args(self) -> dict[str, Any]: r"""Get the arguments necessary to construct an exact copy of the transform.""" return { "d": self._d, @@ -648,7 +648,7 @@ class InputStandardize(AffineInputTransform): def __init__( self, d: int, - indices: Optional[Union[List[int], Tensor]] = None, + indices: Optional[Union[list[int], Tensor]] = None, batch_shape: torch.Size = torch.Size(), # noqa: B008 transform_on_train: bool = True, transform_on_eval: bool = True, @@ -770,8 +770,8 @@ class Round(InputTransform, Module): def __init__( self, - integer_indices: Union[List[int], LongTensor, None] = None, - categorical_features: Optional[Dict[int, int]] = None, + integer_indices: Union[list[int], LongTensor, None] = None, + categorical_features: Optional[dict[int, int]] = None, transform_on_train: bool = True, transform_on_eval: bool = True, transform_on_fantasize: bool = True, @@ -850,7 +850,7 @@ def equals(self, other: InputTransform) -> bool: and self.tau == other.tau ) - def get_init_args(self) -> Dict[str, Any]: + def get_init_args(self) -> dict[str, Any]: r"""Get the arguments necessary to construct an exact copy of the transform.""" return { "integer_indices": self.integer_indices, @@ -868,7 +868,7 @@ class Log10(ReversibleInputTransform, Module): def __init__( self, - indices: List[int], + indices: list[int], transform_on_train: bool = True, transform_on_eval: bool = True, transform_on_fantasize: bool = True, @@ -939,7 +939,7 @@ class Warp(ReversibleInputTransform, GPyTorchModule): def __init__( self, - indices: List[int], + indices: list[int], transform_on_train: bool = True, transform_on_eval: bool = True, transform_on_fantasize: bool = True, @@ -1132,8 +1132,8 @@ def __init__( self, feature_set: Optional[Tensor] = None, f: Optional[Callable[[Tensor], Tensor]] = None, - indices: Optional[List[int]] = None, - fkwargs: Optional[Dict[str, Any]] = None, + indices: Optional[list[int]] = None, + fkwargs: Optional[dict[str, Any]] = None, skip_expand: bool = False, transform_on_train: bool = False, transform_on_eval: bool = True, @@ -1330,7 +1330,7 @@ def __init__( self, perturbation_set: Union[Tensor, Callable[[Tensor], Tensor]], bounds: Optional[Tensor] = None, - indices: Optional[List[int]] = None, + indices: Optional[list[int]] = None, multiplicative: bool = False, transform_on_train: bool = False, transform_on_eval: bool = True, @@ -1447,7 +1447,7 @@ class OneHotToNumeric(InputTransform, Module): def __init__( self, dim: int, - categorical_features: Optional[Dict[int, int]] = None, + categorical_features: Optional[dict[int, int]] = None, transform_on_train: bool = True, transform_on_eval: bool = True, transform_on_fantasize: bool = True, diff --git a/botorch/models/transforms/outcome.py b/botorch/models/transforms/outcome.py index a797254724..ed923cde79 100644 --- a/botorch/models/transforms/outcome.py +++ b/botorch/models/transforms/outcome.py @@ -24,7 +24,7 @@ from abc import ABC, abstractmethod from collections import OrderedDict -from typing import List, Optional, Tuple, Union +from typing import Optional, Union import torch from botorch.models.transforms.utils import ( @@ -44,7 +44,7 @@ class OutcomeTransform(Module, ABC): @abstractmethod def forward( self, Y: Tensor, Yvar: Optional[Tensor] = None - ) -> Tuple[Tensor, Optional[Tensor]]: + ) -> tuple[Tensor, Optional[Tensor]]: r"""Transform the outcomes in a model's training targets Args: @@ -60,7 +60,7 @@ def forward( """ pass # pragma: no cover - def subset_output(self, idcs: List[int]) -> OutcomeTransform: + def subset_output(self, idcs: list[int]) -> OutcomeTransform: r"""Subset the transform along the output dimension. This functionality is used to properly treat outcome transformations @@ -79,7 +79,7 @@ def subset_output(self, idcs: List[int]) -> OutcomeTransform: def untransform( self, Y: Tensor, Yvar: Optional[Tensor] = None - ) -> Tuple[Tensor, Optional[Tensor]]: + ) -> tuple[Tensor, Optional[Tensor]]: r"""Un-transform previously transformed outcomes Args: @@ -141,7 +141,7 @@ def __init__(self, **transforms: OutcomeTransform) -> None: def forward( self, Y: Tensor, Yvar: Optional[Tensor] = None - ) -> Tuple[Tensor, Optional[Tensor]]: + ) -> tuple[Tensor, Optional[Tensor]]: r"""Transform the outcomes in a model's training targets Args: @@ -159,7 +159,7 @@ def forward( Y, Yvar = tf.forward(Y, Yvar) return Y, Yvar - def subset_output(self, idcs: List[int]) -> OutcomeTransform: + def subset_output(self, idcs: list[int]) -> OutcomeTransform: r"""Subset the transform along the output dimension. Args: @@ -174,7 +174,7 @@ def subset_output(self, idcs: List[int]) -> OutcomeTransform: def untransform( self, Y: Tensor, Yvar: Optional[Tensor] = None - ) -> Tuple[Tensor, Optional[Tensor]]: + ) -> tuple[Tensor, Optional[Tensor]]: r"""Un-transform previously transformed outcomes Args: @@ -226,7 +226,7 @@ class Standardize(OutcomeTransform): def __init__( self, m: int, - outputs: Optional[List[int]] = None, + outputs: Optional[list[int]] = None, batch_shape: torch.Size = torch.Size(), # noqa: B008 min_stdv: float = 1e-8, ) -> None: @@ -252,7 +252,7 @@ def __init__( def forward( self, Y: Tensor, Yvar: Optional[Tensor] = None - ) -> Tuple[Tensor, Optional[Tensor]]: + ) -> tuple[Tensor, Optional[Tensor]]: r"""Standardize outcomes. If the module is in train mode, this updates the module state (i.e. the @@ -306,7 +306,7 @@ def forward( Yvar_tf = Yvar / self._stdvs_sq if Yvar is not None else None return Y_tf, Yvar_tf - def subset_output(self, idcs: List[int]) -> OutcomeTransform: + def subset_output(self, idcs: list[int]) -> OutcomeTransform: r"""Subset the transform along the output dimension. Args: @@ -341,7 +341,7 @@ def subset_output(self, idcs: List[int]) -> OutcomeTransform: def untransform( self, Y: Tensor, Yvar: Optional[Tensor] = None - ) -> Tuple[Tensor, Optional[Tensor]]: + ) -> tuple[Tensor, Optional[Tensor]]: r"""Un-standardize outcomes. Args: @@ -455,7 +455,7 @@ class Log(OutcomeTransform): log-transformed outcomes and un-transform the model posterior of that GP. """ - def __init__(self, outputs: Optional[List[int]] = None) -> None: + def __init__(self, outputs: Optional[list[int]] = None) -> None: r"""Log-transform outcomes. Args: @@ -465,7 +465,7 @@ def __init__(self, outputs: Optional[List[int]] = None) -> None: super().__init__() self._outputs = outputs - def subset_output(self, idcs: List[int]) -> OutcomeTransform: + def subset_output(self, idcs: list[int]) -> OutcomeTransform: r"""Subset the transform along the output dimension. Args: @@ -489,7 +489,7 @@ def subset_output(self, idcs: List[int]) -> OutcomeTransform: def forward( self, Y: Tensor, Yvar: Optional[Tensor] = None - ) -> Tuple[Tensor, Optional[Tensor]]: + ) -> tuple[Tensor, Optional[Tensor]]: r"""Log-transform outcomes. Args: @@ -522,7 +522,7 @@ def forward( def untransform( self, Y: Tensor, Yvar: Optional[Tensor] = None - ) -> Tuple[Tensor, Optional[Tensor]]: + ) -> tuple[Tensor, Optional[Tensor]]: r"""Un-transform log-transformed outcomes Args: @@ -583,7 +583,7 @@ class Power(OutcomeTransform): power-transformed outcomes and un-transform the model posterior of that GP. """ - def __init__(self, power: float, outputs: Optional[List[int]] = None) -> None: + def __init__(self, power: float, outputs: Optional[list[int]] = None) -> None: r"""Power-transform outcomes. Args: @@ -594,7 +594,7 @@ def __init__(self, power: float, outputs: Optional[List[int]] = None) -> None: self._outputs = outputs self.power = power - def subset_output(self, idcs: List[int]) -> OutcomeTransform: + def subset_output(self, idcs: list[int]) -> OutcomeTransform: r"""Subset the transform along the output dimension. Args: @@ -618,7 +618,7 @@ def subset_output(self, idcs: List[int]) -> OutcomeTransform: def forward( self, Y: Tensor, Yvar: Optional[Tensor] = None - ) -> Tuple[Tensor, Optional[Tensor]]: + ) -> tuple[Tensor, Optional[Tensor]]: r"""Power-transform outcomes. Args: @@ -651,7 +651,7 @@ def forward( def untransform( self, Y: Tensor, Yvar: Optional[Tensor] = None - ) -> Tuple[Tensor, Optional[Tensor]]: + ) -> tuple[Tensor, Optional[Tensor]]: r"""Un-transform power-transformed outcomes Args: @@ -709,7 +709,7 @@ class Bilog(OutcomeTransform): constraints as it magnifies values near zero and flattens extreme values. """ - def __init__(self, outputs: Optional[List[int]] = None) -> None: + def __init__(self, outputs: Optional[list[int]] = None) -> None: r"""Bilog-transform outcomes. Args: @@ -719,7 +719,7 @@ def __init__(self, outputs: Optional[List[int]] = None) -> None: super().__init__() self._outputs = outputs - def subset_output(self, idcs: List[int]) -> OutcomeTransform: + def subset_output(self, idcs: list[int]) -> OutcomeTransform: r"""Subset the transform along the output dimension. Args: @@ -743,7 +743,7 @@ def subset_output(self, idcs: List[int]) -> OutcomeTransform: def forward( self, Y: Tensor, Yvar: Optional[Tensor] = None - ) -> Tuple[Tensor, Optional[Tensor]]: + ) -> tuple[Tensor, Optional[Tensor]]: r"""Bilog-transform outcomes. Args: @@ -775,7 +775,7 @@ def forward( def untransform( self, Y: Tensor, Yvar: Optional[Tensor] = None - ) -> Tuple[Tensor, Optional[Tensor]]: + ) -> tuple[Tensor, Optional[Tensor]]: r"""Un-transform bilog-transformed outcomes Args: diff --git a/botorch/models/transforms/utils.py b/botorch/models/transforms/utils.py index 6d1f9411d2..a956ea32b2 100644 --- a/botorch/models/transforms/utils.py +++ b/botorch/models/transforms/utils.py @@ -7,13 +7,12 @@ from __future__ import annotations from functools import wraps -from typing import Tuple import torch from torch import Tensor -def lognorm_to_norm(mu: Tensor, Cov: Tensor) -> Tuple[Tensor, Tensor]: +def lognorm_to_norm(mu: Tensor, Cov: Tensor) -> tuple[Tensor, Tensor]: """Compute mean and covariance of a MVN from those of the associated log-MVN If `Y` is log-normal with mean mu_ln and covariance Cov_ln, then @@ -38,7 +37,7 @@ def lognorm_to_norm(mu: Tensor, Cov: Tensor) -> Tuple[Tensor, Tensor]: return mu_n, Cov_n -def norm_to_lognorm(mu: Tensor, Cov: Tensor) -> Tuple[Tensor, Tensor]: +def norm_to_lognorm(mu: Tensor, Cov: Tensor) -> tuple[Tensor, Tensor]: """Compute mean and covariance of a log-MVN from its MVN sufficient statistics If `X ~ N(mu, Cov)` and `Y = exp(X)`, then `Y` is log-normal with diff --git a/botorch/models/utils/assorted.py b/botorch/models/utils/assorted.py index 0272d8057f..fa1e62f30a 100644 --- a/botorch/models/utils/assorted.py +++ b/botorch/models/utils/assorted.py @@ -9,8 +9,9 @@ from __future__ import annotations import warnings +from collections.abc import Iterator from contextlib import contextmanager, ExitStack -from typing import Iterator, List, Optional, Tuple +from typing import Optional import torch from botorch import settings @@ -21,7 +22,7 @@ from torch import Tensor -def _make_X_full(X: Tensor, output_indices: List[int], tf: int) -> Tensor: +def _make_X_full(X: Tensor, output_indices: list[int], tf: int) -> Tensor: r"""Helper to construct input tensor with task indices. Args: @@ -49,7 +50,7 @@ def multioutput_to_batch_mode_transform( train_Y: Tensor, num_outputs: int, train_Yvar: Optional[Tensor] = None, -) -> Tuple[Tensor, Tensor, Optional[Tensor]]: +) -> tuple[Tensor, Tensor, Optional[Tensor]]: r"""Transforms training inputs for a multi-output model. Used for multi-output models that internally are represented by a @@ -84,7 +85,7 @@ def multioutput_to_batch_mode_transform( return train_X, train_Y, train_Yvar -def add_output_dim(X: Tensor, original_batch_shape: torch.Size) -> Tuple[Tensor, int]: +def add_output_dim(X: Tensor, original_batch_shape: torch.Size) -> tuple[Tensor, int]: r"""Insert the output dimension at the correct location. The trailing batch dimensions of X must match the original batch dimensions @@ -137,7 +138,7 @@ def check_min_max_scaling( strict: bool = False, atol: float = 1e-2, raise_on_fail: bool = False, - ignore_dims: Optional[List[int]] = None, + ignore_dims: Optional[list[int]] = None, ) -> None: r"""Check that tensor is normalized to the unit cube. @@ -220,7 +221,7 @@ def validate_input_scaling( train_Y: Tensor, train_Yvar: Optional[Tensor] = None, raise_on_fail: bool = False, - ignore_X_dims: Optional[List[int]] = None, + ignore_X_dims: Optional[list[int]] = None, ) -> None: r"""Helper function to validate input data to models. @@ -260,7 +261,7 @@ def validate_input_scaling( check_standardization(Y=train_Y, raise_on_fail=raise_on_fail) -def mod_batch_shape(module: Module, names: List[str], b: int) -> None: +def mod_batch_shape(module: Module, names: list[str], b: int) -> None: r"""Recursive helper to modify gpytorch modules' batch shape attribute. Modifies the module in-place. @@ -300,7 +301,7 @@ def detect_duplicates( X: Tensor, rtol: float = 0, atol: float = 1e-8, -) -> Iterator[Tuple[int, int]]: +) -> Iterator[tuple[int, int]]: """Returns an iterator over index pairs `(duplicate index, original index)` for all duplicate entries of `X`. Supporting 2-d Tensor only. @@ -332,7 +333,7 @@ def detect_duplicates( def consolidate_duplicates( X: Tensor, Y: Tensor, rtol: float = 0.0, atol: float = 1e-8 -) -> Tuple[Tensor, Tensor, Tensor]: +) -> tuple[Tensor, Tensor, Tensor]: """Drop duplicated Xs and update the indices tensor Y accordingly. Supporting 2d Tensor only as in batch mode block design is not guaranteed. diff --git a/botorch/optim/closures/core.py b/botorch/optim/closures/core.py index 33e45954c9..01fa9085d0 100644 --- a/botorch/optim/closures/core.py +++ b/botorch/optim/closures/core.py @@ -8,8 +8,10 @@ from __future__ import annotations +from collections.abc import Sequence + from functools import partial -from typing import Any, Callable, Dict, Optional, Sequence, Tuple +from typing import Any, Callable, Optional import torch from botorch.optim.utils import ( @@ -29,7 +31,7 @@ class ForwardBackwardClosure: def __init__( self, forward: Callable[[], Tensor], - parameters: Dict[str, Tensor], + parameters: dict[str, Tensor], backward: Callable[[Tensor], None] = Tensor.backward, reducer: Optional[Callable[[Tensor], Tensor]] = torch.sum, callback: Optional[Callable[[Tensor, Sequence[Optional[Tensor]]], None]] = None, @@ -59,7 +61,7 @@ def __init__( self.callback = callback self.context_manager = context_manager - def __call__(self, **kwargs: Any) -> Tuple[Tensor, Tuple[Optional[Tensor], ...]]: + def __call__(self, **kwargs: Any) -> tuple[Tensor, tuple[Optional[Tensor], ...]]: with self.context_manager(): values = self.forward(**kwargs) value = values if self.reducer is None else self.reducer(values) @@ -78,8 +80,8 @@ class NdarrayOptimizationClosure: def __init__( self, - closure: Callable[[], Tuple[Tensor, Sequence[Optional[Tensor]]]], - parameters: Dict[str, Tensor], + closure: Callable[[], tuple[Tensor, Sequence[Optional[Tensor]]]], + parameters: dict[str, Tensor], as_array: Callable[[Tensor], ndarray] = None, # pyre-ignore [9] as_tensor: Callable[[ndarray], Tensor] = torch.as_tensor, get_state: Callable[[], ndarray] = None, # pyre-ignore [9] @@ -142,7 +144,7 @@ def __init__( def __call__( self, state: Optional[ndarray] = None, **kwargs: Any - ) -> Tuple[ndarray, ndarray]: + ) -> tuple[ndarray, ndarray]: if state is not None: self.state = state diff --git a/botorch/optim/closures/model_closures.py b/botorch/optim/closures/model_closures.py index 992c9f8674..e511d8b958 100644 --- a/botorch/optim/closures/model_closures.py +++ b/botorch/optim/closures/model_closures.py @@ -8,9 +8,11 @@ from __future__ import annotations +from collections.abc import Sequence + from itertools import chain, repeat from types import NoneType -from typing import Any, Callable, Dict, Optional, Sequence, Tuple +from typing import Any, Callable, Optional from botorch.optim.closures.core import ForwardBackwardClosure from botorch.utils.dispatcher import Dispatcher, type_bypassing_encoder @@ -62,13 +64,13 @@ def get_loss_closure( def get_loss_closure_with_grads( mll: MarginalLogLikelihood, - parameters: Dict[str, Tensor], + parameters: dict[str, Tensor], data_loader: Optional[DataLoader] = None, backward: Callable[[Tensor], None] = Tensor.backward, reducer: Optional[Callable[[Tensor], Tensor]] = Tensor.sum, context_manager: Optional[Callable] = None, **kwargs: Any, -) -> Callable[[], Tuple[Tensor, Tuple[Tensor, ...]]]: +) -> Callable[[], tuple[Tensor, tuple[Tensor, ...]]]: r"""Public API for GetLossClosureWithGrads dispatcher. In most cases, this method simply adds a backward pass to a loss closure obtained by @@ -107,7 +109,7 @@ def _get_loss_closure_with_grads_fallback( _likelihood_type: object, _model_type: object, data_loader: Optional[DataLoader], - parameters: Dict[str, Tensor], + parameters: dict[str, Tensor], reducer: Callable[[Tensor], Tensor] = Tensor.sum, backward: Callable[[Tensor], None] = Tensor.backward, context_manager: Callable = None, # pyre-ignore [9] diff --git a/botorch/optim/core.py b/botorch/optim/core.py index 97afdb59f1..691d55cf4b 100644 --- a/botorch/optim/core.py +++ b/botorch/optim/core.py @@ -9,12 +9,13 @@ from __future__ import annotations import re +from collections.abc import Sequence from dataclasses import dataclass, replace from enum import auto, Enum from itertools import count from sys import maxsize from time import monotonic -from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +from typing import Any, Callable, Optional, Union from botorch.optim.closures import NdarrayOptimizationClosure from botorch.optim.utils.numpy_utils import get_bounds_as_ndarray @@ -53,15 +54,15 @@ class OptimizationResult: def scipy_minimize( closure: Union[ - Callable[[], Tuple[Tensor, Sequence[Optional[Tensor]]]], + Callable[[], tuple[Tensor, Sequence[Optional[Tensor]]]], NdarrayOptimizationClosure, ], - parameters: Dict[str, Tensor], - bounds: Optional[Dict[str, Tuple[Optional[float], Optional[float]]]] = None, - callback: Optional[Callable[[Dict[str, Tensor], OptimizationResult], None]] = None, + parameters: dict[str, Tensor], + bounds: Optional[dict[str, tuple[Optional[float], Optional[float]]]] = None, + callback: Optional[Callable[[dict[str, Tensor], OptimizationResult], None]] = None, x0: Optional[ndarray] = None, method: str = "L-BFGS-B", - options: Optional[Dict[str, Any]] = None, + options: Optional[dict[str, Any]] = None, timeout_sec: Optional[float] = None, ) -> OptimizationResult: r"""Generic scipy.optimize.minimize-based optimization routine. @@ -140,11 +141,11 @@ def wrapped_callback(x: ndarray): def torch_minimize( - closure: Callable[[], Tuple[Tensor, Sequence[Optional[Tensor]]]], - parameters: Dict[str, Tensor], - bounds: Optional[Dict[str, Tuple[Optional[float], Optional[float]]]] = None, - callback: Optional[Callable[[Dict[str, Tensor], OptimizationResult], None]] = None, - optimizer: Union[Optimizer, Callable[[List[Tensor]], Optimizer]] = Adam, + closure: Callable[[], tuple[Tensor, Sequence[Optional[Tensor]]]], + parameters: dict[str, Tensor], + bounds: Optional[dict[str, tuple[Optional[float], Optional[float]]]] = None, + callback: Optional[Callable[[dict[str, Tensor], OptimizationResult], None]] = None, + optimizer: Union[Optimizer, Callable[[list[Tensor]], Optimizer]] = Adam, scheduler: Optional[Union[LRScheduler, Callable[[Optimizer], LRScheduler]]] = None, step_limit: Optional[int] = None, timeout_sec: Optional[float] = None, diff --git a/botorch/optim/fit.py b/botorch/optim/fit.py index b6bd4f99b7..828c5202ed 100644 --- a/botorch/optim/fit.py +++ b/botorch/optim/fit.py @@ -8,8 +8,10 @@ from __future__ import annotations +from collections.abc import Sequence + from functools import partial -from typing import Any, Callable, Dict, Optional, Sequence, Set, Tuple, Union +from typing import Any, Callable, Optional, Union from warnings import warn from botorch.exceptions.warnings import OptimizationWarning @@ -31,26 +33,26 @@ from torch.optim.lr_scheduler import _LRScheduler from torch.optim.optimizer import Optimizer -TBoundsDict = Dict[str, Tuple[Optional[float], Optional[float]]] +TBoundsDict = dict[str, tuple[Optional[float], Optional[float]]] TScipyObjective = Callable[ - [ndarray, MarginalLogLikelihood, Dict[str, TorchAttr]], Tuple[float, ndarray] + [ndarray, MarginalLogLikelihood, dict[str, TorchAttr]], tuple[float, ndarray] ] TModToArray = Callable[ - [Module, Optional[TBoundsDict], Optional[Set[str]]], - Tuple[ndarray, Dict[str, TorchAttr], Optional[ndarray]], + [Module, Optional[TBoundsDict], Optional[set[str]]], + tuple[ndarray, dict[str, TorchAttr], Optional[ndarray]], ] -TArrayToMod = Callable[[Module, ndarray, Dict[str, TorchAttr]], Module] +TArrayToMod = Callable[[Module, ndarray, dict[str, TorchAttr]], Module] def fit_gpytorch_mll_scipy( mll: MarginalLogLikelihood, - parameters: Optional[Dict[str, Tensor]] = None, - bounds: Optional[Dict[str, Tuple[Optional[float], Optional[float]]]] = None, - closure: Optional[Callable[[], Tuple[Tensor, Sequence[Optional[Tensor]]]]] = None, - closure_kwargs: Optional[Dict[str, Any]] = None, + parameters: Optional[dict[str, Tensor]] = None, + bounds: Optional[dict[str, tuple[Optional[float], Optional[float]]]] = None, + closure: Optional[Callable[[], tuple[Tensor, Sequence[Optional[Tensor]]]]] = None, + closure_kwargs: Optional[dict[str, Any]] = None, method: str = "L-BFGS-B", - options: Optional[Dict[str, Any]] = None, - callback: Optional[Callable[[Dict[str, Tensor], OptimizationResult], None]] = None, + options: Optional[dict[str, Any]] = None, + callback: Optional[Callable[[dict[str, Tensor], OptimizationResult], None]] = None, timeout_sec: Optional[float] = None, ) -> OptimizationResult: r"""Generic scipy.optimized-based fitting routine for GPyTorch MLLs. @@ -110,15 +112,15 @@ def fit_gpytorch_mll_scipy( def fit_gpytorch_mll_torch( mll: MarginalLogLikelihood, - parameters: Optional[Dict[str, Tensor]] = None, - bounds: Optional[Dict[str, Tuple[Optional[float], Optional[float]]]] = None, - closure: Optional[Callable[[], Tuple[Tensor, Sequence[Optional[Tensor]]]]] = None, - closure_kwargs: Optional[Dict[str, Any]] = None, + parameters: Optional[dict[str, Tensor]] = None, + bounds: Optional[dict[str, tuple[Optional[float], Optional[float]]]] = None, + closure: Optional[Callable[[], tuple[Tensor, Sequence[Optional[Tensor]]]]] = None, + closure_kwargs: Optional[dict[str, Any]] = None, step_limit: Optional[int] = None, stopping_criterion: Optional[Callable[[Tensor], bool]] = DEFAULT, # pyre-ignore [9] optimizer: Union[Optimizer, Callable[..., Optimizer]] = Adam, scheduler: Optional[Union[_LRScheduler, Callable[..., _LRScheduler]]] = None, - callback: Optional[Callable[[Dict[str, Tensor], OptimizationResult], None]] = None, + callback: Optional[Callable[[dict[str, Tensor], OptimizationResult], None]] = None, timeout_sec: Optional[float] = None, ) -> OptimizationResult: r"""Generic torch.optim-based fitting routine for GPyTorch MLLs. diff --git a/botorch/optim/homotopy.py b/botorch/optim/homotopy.py index 5d820de5a1..4aaed8e076 100644 --- a/botorch/optim/homotopy.py +++ b/botorch/optim/homotopy.py @@ -7,7 +7,7 @@ import math from dataclasses import dataclass -from typing import Callable, List, Optional, Union +from typing import Callable, Optional, Union import torch from torch import Tensor @@ -17,7 +17,7 @@ class FixedHomotopySchedule: """Homotopy schedule with a fixed list of values.""" - def __init__(self, values: List[float]) -> None: + def __init__(self, values: list[float]) -> None: r"""Initialize FixedHomotopySchedule. Args: @@ -103,8 +103,8 @@ class Homotopy: def __init__( self, - homotopy_parameters: List[HomotopyParameter], - callbacks: Optional[List[Callable]] = None, + homotopy_parameters: list[HomotopyParameter], + callbacks: Optional[list[Callable]] = None, ) -> None: r"""Initialize the homotopy. diff --git a/botorch/optim/initializers.py b/botorch/optim/initializers.py index bb026138e8..17f9edfa72 100644 --- a/botorch/optim/initializers.py +++ b/botorch/optim/initializers.py @@ -16,7 +16,7 @@ import warnings from math import ceil -from typing import Callable, Dict, List, Optional, Tuple, Union +from typing import Callable, Optional, Union import torch from botorch import settings @@ -60,18 +60,18 @@ int, int, int, - Optional[Dict[int, float]], - Optional[Dict[str, Union[bool, float, int]]], - Optional[List[Tuple[Tensor, Tensor, float]]], - Optional[List[Tuple[Tensor, Tensor, float]]], + Optional[dict[int, float]], + Optional[dict[str, Union[bool, float, int]]], + Optional[list[tuple[Tensor, Tensor, float]]], + Optional[list[tuple[Tensor, Tensor, float]]], ], Optional[Tensor], ] def transform_constraints( - constraints: Union[List[Tuple[Tensor, Tensor, float]], None], q: int, d: int -) -> List[Tuple[Tensor, Tensor, float]]: + constraints: Union[list[tuple[Tensor, Tensor, float]], None], q: int, d: int +) -> list[tuple[Tensor, Tensor, float]]: r"""Transform constraints to sample from a d*q-dimensional space instead of a d-dimensional state. @@ -103,8 +103,8 @@ def transform_constraints( def transform_intra_point_constraint( - constraint: Tuple[Tensor, Tensor, float], d: int, q: int -) -> List[Tuple[Tensor, Tensor, float]]: + constraint: tuple[Tensor, Tensor, float], d: int, q: int +) -> list[tuple[Tensor, Tensor, float]]: r"""Transforms an intra-point/pointwise constraint from d-dimensional space to a d*q-dimesional space. @@ -141,8 +141,8 @@ def transform_intra_point_constraint( def transform_inter_point_constraint( - constraint: Tuple[Tensor, Tensor, float], d: int -) -> Tuple[Tensor, Tensor, float]: + constraint: tuple[Tensor, Tensor, float], d: int +) -> tuple[Tensor, Tensor, float]: r"""Transforms an inter-point constraint from d-dimensional space to a d*q dimesional space. @@ -182,8 +182,8 @@ def sample_q_batches_from_polytope( n_burnin: int, n_thinning: int, seed: int, - inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None, - equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None, + inequality_constraints: Optional[list[tuple[Tensor, Tensor, float]]] = None, + equality_constraints: Optional[list[tuple[Tensor, Tensor, float]]] = None, ) -> Tensor: r"""Samples `n` q-baches from a polytope of dimension `d`. @@ -246,10 +246,10 @@ def gen_batch_initial_conditions( q: int, num_restarts: int, raw_samples: int, - fixed_features: Optional[Dict[int, float]] = None, - options: Optional[Dict[str, Union[bool, float, int]]] = None, - inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None, - equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None, + fixed_features: Optional[dict[int, float]] = None, + options: Optional[dict[str, Union[bool, float, int]]] = None, + inequality_constraints: Optional[list[tuple[Tensor, Tensor, float]]] = None, + equality_constraints: Optional[list[tuple[Tensor, Tensor, float]]] = None, generator: Optional[Callable[[int, int, Optional[int]], Tensor]] = None, fixed_X_fantasies: Optional[Tensor] = None, ) -> Tensor: @@ -444,10 +444,10 @@ def gen_one_shot_kg_initial_conditions( q: int, num_restarts: int, raw_samples: int, - fixed_features: Optional[Dict[int, float]] = None, - options: Optional[Dict[str, Union[bool, float, int]]] = None, - inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None, - equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None, + fixed_features: Optional[dict[int, float]] = None, + options: Optional[dict[str, Union[bool, float, int]]] = None, + inequality_constraints: Optional[list[tuple[Tensor, Tensor, float]]] = None, + equality_constraints: Optional[list[tuple[Tensor, Tensor, float]]] = None, ) -> Optional[Tensor]: r"""Generate a batch of smart initializations for qKnowledgeGradient. @@ -563,10 +563,10 @@ def gen_one_shot_hvkg_initial_conditions( q: int, num_restarts: int, raw_samples: int, - fixed_features: Optional[Dict[int, float]] = None, - options: Optional[Dict[str, Union[bool, float, int]]] = None, - inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None, - equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None, + fixed_features: Optional[dict[int, float]] = None, + options: Optional[dict[str, Union[bool, float, int]]] = None, + inequality_constraints: Optional[list[tuple[Tensor, Tensor, float]]] = None, + equality_constraints: Optional[list[tuple[Tensor, Tensor, float]]] = None, ) -> Optional[Tensor]: r"""Generate a batch of smart initializations for qHypervolumeKnowledgeGradient. @@ -761,8 +761,8 @@ def gen_value_function_initial_conditions( num_restarts: int, raw_samples: int, current_model: Model, - fixed_features: Optional[Dict[int, float]] = None, - options: Optional[Dict[str, Union[bool, float, int]]] = None, + fixed_features: Optional[dict[int, float]] = None, + options: Optional[dict[str, Union[bool, float, int]]] = None, ) -> Tensor: r"""Generate a batch of smart initializations for optimizing the value function of qKnowledgeGradient. diff --git a/botorch/optim/optimize.py b/botorch/optim/optimize.py index 2a5a26bea2..577fd6979b 100644 --- a/botorch/optim/optimize.py +++ b/botorch/optim/optimize.py @@ -14,7 +14,7 @@ import warnings -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from typing import Any, Callable, Optional, Union import torch from botorch.acquisition.acquisition import ( @@ -69,11 +69,11 @@ class OptimizeAcqfInputs: q: int num_restarts: int raw_samples: Optional[int] - options: Optional[Dict[str, Union[bool, float, int, str]]] - inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] - equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] - nonlinear_inequality_constraints: Optional[List[Tuple[Callable, bool]]] - fixed_features: Optional[Dict[int, float]] + options: Optional[dict[str, Union[bool, float, int, str]]] + inequality_constraints: Optional[list[tuple[Tensor, Tensor, float]]] + equality_constraints: Optional[list[tuple[Tensor, Tensor, float]]] + nonlinear_inequality_constraints: Optional[list[tuple[Callable, bool]]] + fixed_features: Optional[dict[int, float]] post_processing_func: Optional[Callable[[Tensor], Tensor]] batch_initial_conditions: Optional[Tensor] return_best_only: bool @@ -83,7 +83,7 @@ class OptimizeAcqfInputs: timeout_sec: Optional[float] = None return_full_tree: bool = False retry_on_optimization_warning: bool = True - ic_gen_kwargs: Dict = dataclasses.field(default_factory=dict) + ic_gen_kwargs: dict = dataclasses.field(default_factory=dict) @property def full_tree(self) -> bool: @@ -140,10 +140,10 @@ def get_ic_generator(self) -> TGenInitialConditions: def _optimize_acqf_all_features_fixed( *, bounds: Tensor, - fixed_features: Dict[int, float], + fixed_features: dict[int, float], q: int, acq_function: AcquisitionFunction, -) -> Tuple[Tensor, Tensor]: +) -> tuple[Tensor, Tensor]: """ Helper function for `optimize_acqf` for the trivial case where all features are fixed. @@ -201,7 +201,7 @@ def _validate_sequential_inputs(opt_inputs: OptimizeAcqfInputs) -> None: def _optimize_acqf_sequential_q( opt_inputs: OptimizeAcqfInputs, -) -> Tuple[Tensor, Tensor]: +) -> tuple[Tensor, Tensor]: """ Helper function for `optimize_acqf` when sequential=True and q > 1. @@ -243,7 +243,7 @@ def _optimize_acqf_sequential_q( return candidates, torch.stack(acq_value_list) -def _optimize_acqf_batch(opt_inputs: OptimizeAcqfInputs) -> Tuple[Tensor, Tensor]: +def _optimize_acqf_batch(opt_inputs: OptimizeAcqfInputs) -> tuple[Tensor, Tensor]: options = opt_inputs.options or {} initial_conditions_provided = opt_inputs.batch_initial_conditions is not None @@ -274,9 +274,9 @@ def _optimize_acqf_batch(opt_inputs: OptimizeAcqfInputs) -> Tuple[Tensor, Tensor ), ) - def _optimize_batch_candidates() -> Tuple[Tensor, Tensor, List[Warning]]: - batch_candidates_list: List[Tensor] = [] - batch_acq_values_list: List[Tensor] = [] + def _optimize_batch_candidates() -> tuple[Tensor, Tensor, list[Warning]]: + batch_candidates_list: list[Tensor] = [] + batch_acq_values_list: list[Tensor] = [] batched_ics = batch_initial_conditions.split(batch_limit) opt_warnings = [] timeout_sec = ( @@ -286,7 +286,7 @@ def _optimize_batch_candidates() -> Tuple[Tensor, Tensor, List[Warning]]: ) bounds = opt_inputs.bounds - gen_kwargs: Dict[str, Any] = { + gen_kwargs: dict[str, Any] = { "lower_bounds": None if bounds[0].isinf().all() else bounds[0], "upper_bounds": None if bounds[1].isinf().all() else bounds[1], "options": {k: v for k, v in options.items() if k not in INIT_OPTION_KEYS}, @@ -400,11 +400,11 @@ def optimize_acqf( q: int, num_restarts: int, raw_samples: Optional[int] = None, - options: Optional[Dict[str, Union[bool, float, int, str]]] = None, - inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None, - equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None, - nonlinear_inequality_constraints: Optional[List[Tuple[Callable, bool]]] = None, - fixed_features: Optional[Dict[int, float]] = None, + options: Optional[dict[str, Union[bool, float, int, str]]] = None, + inequality_constraints: Optional[list[tuple[Tensor, Tensor, float]]] = None, + equality_constraints: Optional[list[tuple[Tensor, Tensor, float]]] = None, + nonlinear_inequality_constraints: Optional[list[tuple[Callable, bool]]] = None, + fixed_features: Optional[dict[int, float]] = None, post_processing_func: Optional[Callable[[Tensor], Tensor]] = None, batch_initial_conditions: Optional[Tensor] = None, return_best_only: bool = True, @@ -416,7 +416,7 @@ def optimize_acqf( return_full_tree: bool = False, retry_on_optimization_warning: bool = True, **ic_gen_kwargs: Any, -) -> Tuple[Tensor, Tensor]: +) -> tuple[Tensor, Tensor]: r"""Generate a set of candidates via multi-start optimization. Args: @@ -543,7 +543,7 @@ def optimize_acqf( return _optimize_acqf(opt_acqf_inputs) -def _optimize_acqf(opt_inputs: OptimizeAcqfInputs) -> Tuple[Tensor, Tensor]: +def _optimize_acqf(opt_inputs: OptimizeAcqfInputs) -> tuple[Tensor, Tensor]: # Handle the trivial case when all features are fixed if ( opt_inputs.fixed_features is not None @@ -570,20 +570,20 @@ def optimize_acqf_cyclic( q: int, num_restarts: int, raw_samples: Optional[int] = None, - options: Optional[Dict[str, Union[bool, float, int, str]]] = None, - inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None, - equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None, - fixed_features: Optional[Dict[int, float]] = None, + options: Optional[dict[str, Union[bool, float, int, str]]] = None, + inequality_constraints: Optional[list[tuple[Tensor, Tensor, float]]] = None, + equality_constraints: Optional[list[tuple[Tensor, Tensor, float]]] = None, + fixed_features: Optional[dict[int, float]] = None, post_processing_func: Optional[Callable[[Tensor], Tensor]] = None, batch_initial_conditions: Optional[Tensor] = None, - cyclic_options: Optional[Dict[str, Union[bool, float, int, str]]] = None, + cyclic_options: Optional[dict[str, Union[bool, float, int, str]]] = None, *, ic_generator: Optional[TGenInitialConditions] = None, timeout_sec: Optional[float] = None, return_full_tree: bool = False, retry_on_optimization_warning: bool = True, **ic_gen_kwargs: Any, -) -> Tuple[Tensor, Tensor]: +) -> tuple[Tensor, Tensor]: r"""Generate a set of `q` candidates via cyclic optimization. Args: @@ -701,20 +701,20 @@ def optimize_acqf_cyclic( def optimize_acqf_list( - acq_function_list: List[AcquisitionFunction], + acq_function_list: list[AcquisitionFunction], bounds: Tensor, num_restarts: int, raw_samples: Optional[int] = None, - options: Optional[Dict[str, Union[bool, float, int, str]]] = None, - inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None, - equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None, - nonlinear_inequality_constraints: Optional[List[Tuple[Callable, bool]]] = None, - fixed_features: Optional[Dict[int, float]] = None, - fixed_features_list: Optional[List[Dict[int, float]]] = None, + options: Optional[dict[str, Union[bool, float, int, str]]] = None, + inequality_constraints: Optional[list[tuple[Tensor, Tensor, float]]] = None, + equality_constraints: Optional[list[tuple[Tensor, Tensor, float]]] = None, + nonlinear_inequality_constraints: Optional[list[tuple[Callable, bool]]] = None, + fixed_features: Optional[dict[int, float]] = None, + fixed_features_list: Optional[list[dict[int, float]]] = None, post_processing_func: Optional[Callable[[Tensor], Tensor]] = None, ic_generator: Optional[TGenInitialConditions] = None, - ic_gen_kwargs: Optional[Dict] = None, -) -> Tuple[Tensor, Tensor]: + ic_gen_kwargs: Optional[dict] = None, +) -> tuple[Tensor, Tensor]: r"""Generate a list of candidates from a list of acquisition functions. The acquisition functions are optimized in sequence, with previous candidates @@ -837,17 +837,17 @@ def optimize_acqf_mixed( bounds: Tensor, q: int, num_restarts: int, - fixed_features_list: List[Dict[int, float]], + fixed_features_list: list[dict[int, float]], raw_samples: Optional[int] = None, - options: Optional[Dict[str, Union[bool, float, int, str]]] = None, - inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None, - equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None, - nonlinear_inequality_constraints: Optional[List[Tuple[Callable, bool]]] = None, + options: Optional[dict[str, Union[bool, float, int, str]]] = None, + inequality_constraints: Optional[list[tuple[Tensor, Tensor, float]]] = None, + equality_constraints: Optional[list[tuple[Tensor, Tensor, float]]] = None, + nonlinear_inequality_constraints: Optional[list[tuple[Callable, bool]]] = None, post_processing_func: Optional[Callable[[Tensor], Tensor]] = None, batch_initial_conditions: Optional[Tensor] = None, ic_generator: Optional[TGenInitialConditions] = None, - ic_gen_kwargs: Optional[Dict] = None, -) -> Tuple[Tensor, Tensor]: + ic_gen_kwargs: Optional[dict] = None, +) -> tuple[Tensor, Tensor]: r"""Optimize over a list of fixed_features and returns the best solution. This is useful for optimizing over mixed continuous and discrete domains. @@ -993,7 +993,7 @@ def optimize_acqf_discrete( choices: Tensor, max_batch_size: int = 2048, unique: bool = True, -) -> Tuple[Tensor, Tensor]: +) -> tuple[Tensor, Tensor]: r"""Optimize over a discrete set of points using batch evaluation. For `q > 1` this function generates candidates by means of sequential @@ -1071,9 +1071,9 @@ def _split_batch_eval_acqf( def _generate_neighbors( x: Tensor, - discrete_choices: List[Tensor], + discrete_choices: list[Tensor], X_avoid: Tensor, - inequality_constraints: List[Tuple[Tensor, Tensor, float]], + inequality_constraints: list[tuple[Tensor, Tensor, float]], ): # generate all 1D perturbations npts = sum([len(c) for c in discrete_choices]) @@ -1089,7 +1089,7 @@ def _generate_neighbors( def _filter_infeasible( - X: Tensor, inequality_constraints: List[Tuple[Tensor, Tensor, float]] + X: Tensor, inequality_constraints: list[tuple[Tensor, Tensor, float]] ): """Remove all points from `X` that don't satisfy the constraints.""" is_feasible = torch.ones(X.shape[0], dtype=torch.bool, device=X.device) @@ -1104,10 +1104,10 @@ def _filter_invalid(X: Tensor, X_avoid: Tensor): def _gen_batch_initial_conditions_local_search( - discrete_choices: List[Tensor], + discrete_choices: list[Tensor], raw_samples: int, X_avoid: Tensor, - inequality_constraints: List[Tuple[Tensor, Tensor, float]], + inequality_constraints: list[tuple[Tensor, Tensor, float]], min_points: int, max_tries: int = 100, ): @@ -1132,16 +1132,16 @@ def _gen_batch_initial_conditions_local_search( def optimize_acqf_discrete_local_search( acq_function: AcquisitionFunction, - discrete_choices: List[Tensor], + discrete_choices: list[Tensor], q: int, num_restarts: int = 20, raw_samples: int = 4096, - inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None, + inequality_constraints: Optional[list[tuple[Tensor, Tensor, float]]] = None, X_avoid: Optional[Tensor] = None, batch_initial_conditions: Optional[Tensor] = None, max_batch_size: int = 2048, unique: bool = True, -) -> Tuple[Tensor, Tensor]: +) -> tuple[Tensor, Tensor]: r"""Optimize acquisition function over a lattice. This is useful when d is large and enumeration of the search space diff --git a/botorch/optim/optimize_homotopy.py b/botorch/optim/optimize_homotopy.py index 4fb26ced1b..8b89a7d80e 100644 --- a/botorch/optim/optimize_homotopy.py +++ b/botorch/optim/optimize_homotopy.py @@ -3,7 +3,7 @@ # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. -from typing import Callable, Dict, Optional, Tuple, Union +from typing import Callable, Optional, Union import torch from botorch.acquisition import AcquisitionFunction @@ -53,13 +53,13 @@ def optimize_acqf_homotopy( homotopy: Homotopy, num_restarts: int, raw_samples: Optional[int] = None, - fixed_features: Optional[Dict[int, float]] = None, - options: Optional[Dict[str, Union[bool, float, int, str]]] = None, - final_options: Optional[Dict[str, Union[bool, float, int, str]]] = None, + fixed_features: Optional[dict[int, float]] = None, + options: Optional[dict[str, Union[bool, float, int, str]]] = None, + final_options: Optional[dict[str, Union[bool, float, int, str]]] = None, batch_initial_conditions: Optional[Tensor] = None, post_processing_func: Optional[Callable[[Tensor], Tensor]] = None, prune_tolerance: float = 1e-4, -) -> Tuple[Tensor, Tensor]: +) -> tuple[Tensor, Tensor]: r"""Generate a set of candidates via multi-start optimization. Args: diff --git a/botorch/optim/parameter_constraints.py b/botorch/optim/parameter_constraints.py index 64b2d10b60..b5536db48b 100644 --- a/botorch/optim/parameter_constraints.py +++ b/botorch/optim/parameter_constraints.py @@ -11,7 +11,7 @@ from __future__ import annotations from functools import partial -from typing import Callable, Dict, List, Optional, Tuple, Union +from typing import Callable, Optional, Union import numpy as np import torch @@ -20,7 +20,7 @@ from torch import Tensor -ScipyConstraintDict = Dict[ +ScipyConstraintDict = dict[ str, Union[str, Callable[[np.ndarray], float], Callable[[np.ndarray], np.ndarray]] ] NLC_TOL = -1e-6 @@ -67,9 +67,9 @@ def _expand(bounds: Union[float, Tensor], X: Tensor, lower: bool) -> Tensor: def make_scipy_linear_constraints( shapeX: torch.Size, - inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None, - equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None, -) -> List[ScipyConstraintDict]: + inequality_constraints: Optional[list[tuple[Tensor, Tensor, float]]] = None, + equality_constraints: Optional[list[tuple[Tensor, Tensor, float]]] = None, +) -> list[ScipyConstraintDict]: r"""Generate scipy constraints from torch representation. Args: @@ -129,7 +129,7 @@ def make_scipy_linear_constraints( def eval_lin_constraint( - x: np.ndarray, flat_idxr: List[int], coeffs: np.ndarray, rhs: float + x: np.ndarray, flat_idxr: list[int], coeffs: np.ndarray, rhs: float ) -> np.float64: r"""Evaluate a single linear constraint. @@ -146,7 +146,7 @@ def eval_lin_constraint( def lin_constraint_jac( - x: np.ndarray, flat_idxr: List[int], coeffs: np.ndarray, n: int + x: np.ndarray, flat_idxr: list[int], coeffs: np.ndarray, n: int ) -> np.ndarray: r"""Return the Jacobian associated with a linear constraint. @@ -219,7 +219,7 @@ def _make_linear_constraints( rhs: float, shapeX: torch.Size, eq: bool = False, -) -> List[ScipyConstraintDict]: +) -> list[ScipyConstraintDict]: r"""Create linear constraints to be used by `scipy.minimize`. Encodes constraints of the form @@ -282,7 +282,7 @@ def _make_linear_constraints( b, q, d = shapeX _validate_linear_constraints_indices_input(indices, q, d) n = shapeX.numel() - constraints: List[ScipyConstraintDict] = [] + constraints: list[ScipyConstraintDict] = [] coeffs = _arrayify(coefficients) ctype = "eq" if eq else "ineq" @@ -314,7 +314,7 @@ def _make_linear_constraints( def _make_nonlinear_constraints( f_np_wrapper: Callable, nlc: Callable, is_intrapoint: bool, shapeX: torch.Size -) -> List[ScipyConstraintDict]: +) -> list[ScipyConstraintDict]: """Create nonlinear constraints to be used by `scipy.minimize`. Args: @@ -370,10 +370,10 @@ def get_interpoint_constraint(b: int, nlc: Callable) -> Callable: def _generate_unfixed_nonlin_constraints( - constraints: Optional[List[Tuple[Callable[[Tensor], Tensor], bool]]], - fixed_features: Dict[int, float], + constraints: Optional[list[tuple[Callable[[Tensor], Tensor], bool]]], + fixed_features: dict[int, float], dimension: int, -) -> Optional[List[Callable[[Tensor], Tensor]]]: +) -> Optional[list[Callable[[Tensor], Tensor]]]: """Given a dictionary of fixed features, returns a list of callables for nonlinear inequality constraints expecting only a tensor with the non-fixed features as input. @@ -410,11 +410,11 @@ def new_nonlin_constraint(X: Tensor) -> Tensor: def _generate_unfixed_lin_constraints( - constraints: Optional[List[Tuple[Tensor, Tensor, float]]], - fixed_features: Dict[int, float], + constraints: Optional[list[tuple[Tensor, Tensor, float]]], + fixed_features: dict[int, float], dimension: int, eq: bool, -) -> Optional[List[Tuple[Tensor, Tensor, float]]]: +) -> Optional[list[tuple[Tensor, Tensor, float]]]: # If constraints is None or an empty list, then return itself if not constraints: return constraints @@ -473,7 +473,7 @@ def _generate_unfixed_lin_constraints( def _make_f_and_grad_nonlinear_inequality_constraints( f_np_wrapper: Callable, nlc: Callable -) -> Tuple[Callable[[Tensor], Tensor], Callable[[Tensor], Tensor]]: +) -> tuple[Callable[[Tensor], Tensor], Callable[[Tensor], Tensor]]: """ Create callables for objective + grad for the nonlinear inequality constraints. The Scipy interface requires specifying separate callables and we use caching to @@ -539,11 +539,11 @@ def check_x(x: Tensor) -> bool: def make_scipy_nonlinear_inequality_constraints( - nonlinear_inequality_constraints: List[Tuple[Callable, bool]], + nonlinear_inequality_constraints: list[tuple[Callable, bool]], f_np_wrapper: Callable, x0: Tensor, shapeX: torch.Size, -) -> List[Dict]: +) -> list[dict]: r"""Generate Scipy nonlinear inequality constraints from callables. Args: diff --git a/botorch/optim/utils/acquisition_utils.py b/botorch/optim/utils/acquisition_utils.py index c8df213590..48292c4291 100644 --- a/botorch/optim/utils/acquisition_utils.py +++ b/botorch/optim/utils/acquisition_utils.py @@ -8,7 +8,7 @@ from __future__ import annotations -from typing import Dict, Optional, Union +from typing import Optional, Union from warnings import warn import torch @@ -64,7 +64,7 @@ def columnwise_clamp( def fix_features( - X: Tensor, fixed_features: Optional[Dict[int, Optional[float]]] = None + X: Tensor, fixed_features: Optional[dict[int, Optional[float]]] = None ) -> Tensor: r"""Fix feature values in a Tensor. diff --git a/botorch/optim/utils/common.py b/botorch/optim/utils/common.py index 22f91006fe..44990ece06 100644 --- a/botorch/optim/utils/common.py +++ b/botorch/optim/utils/common.py @@ -9,7 +9,7 @@ from __future__ import annotations from logging import debug as logging_debug -from typing import Callable, Optional, Tuple +from typing import Callable, Optional from warnings import warn_explicit, WarningMessage import numpy as np @@ -18,7 +18,7 @@ def _handle_numerical_errors( error: RuntimeError, x: np.ndarray, dtype: Optional[np.dtype] = None -) -> Tuple[np.ndarray, np.ndarray]: +) -> tuple[np.ndarray, np.ndarray]: if isinstance(error, NotPSDError): raise error error_message = error.args[0] if len(error.args) > 0 else "" diff --git a/botorch/optim/utils/model_utils.py b/botorch/optim/utils/model_utils.py index 3978796852..092410c3fb 100644 --- a/botorch/optim/utils/model_utils.py +++ b/botorch/optim/utils/model_utils.py @@ -8,8 +8,10 @@ from __future__ import annotations +from collections.abc import Iterator + from re import Pattern -from typing import Any, Callable, Dict, Iterator, NamedTuple, Optional, Tuple, Union +from typing import Any, Callable, NamedTuple, Optional, Union from warnings import warn import torch @@ -39,7 +41,7 @@ def get_parameters( module: Module, requires_grad: Optional[bool] = None, name_filter: Optional[Callable[[str], bool]] = None, -) -> Dict[str, Tensor]: +) -> dict[str, Tensor]: r"""Helper method for obtaining a module's parameters and their respective ranges. Args: @@ -68,8 +70,8 @@ def get_parameters_and_bounds( module: Module, requires_grad: Optional[bool] = None, name_filter: Optional[Callable[[str], bool]] = None, - default_bounds: Tuple[float, float] = (-float("inf"), float("inf")), -) -> Tuple[Dict[str, Tensor], Dict[str, Tuple[Optional[float], Optional[float]]]]: + default_bounds: tuple[float, float] = (-float("inf"), float("inf")), +) -> tuple[dict[str, Tensor], dict[str, tuple[Optional[float], Optional[float]]]]: r"""Helper method for obtaining a module's parameters and their respective ranges. Args: @@ -109,7 +111,7 @@ def get_parameters_and_bounds( def get_name_filter( patterns: Iterator[Union[Pattern, str]] -) -> Callable[[Union[str, Tuple[str, Any, ...]]], bool]: +) -> Callable[[Union[str, tuple[str, Any, ...]]], bool]: r"""Returns a binary function that filters strings (or iterables whose first element is a string) according to a bank of excluded patterns. Typically, used in conjunction with generators such as `module.named_parameters()`. @@ -134,7 +136,7 @@ def get_name_filter( f"but found {type(pattern)}." ) - def name_filter(item: Union[str, Tuple[str, Any, ...]]) -> bool: + def name_filter(item: Union[str, tuple[str, Any, ...]]) -> bool: name = item if isinstance(item, str) else next(iter(item)) if name in names: return False diff --git a/botorch/optim/utils/numpy_utils.py b/botorch/optim/utils/numpy_utils.py index cab5da7e30..fc815ea17f 100644 --- a/botorch/optim/utils/numpy_utils.py +++ b/botorch/optim/utils/numpy_utils.py @@ -8,8 +8,10 @@ from __future__ import annotations +from collections.abc import Iterator + from itertools import tee -from typing import Callable, Dict, Iterator, Optional, Tuple, Union +from typing import Callable, Optional, Union import numpy as np import torch @@ -65,7 +67,7 @@ def as_ndarray( def get_tensors_as_ndarray_1d( - tensors: Union[Iterator[Tensor], Dict[str, Tensor]], + tensors: Union[Iterator[Tensor], dict[str, Tensor]], out: Optional[ndarray] = None, dtype: Optional[Union[np.dtype, str]] = None, as_array: Callable[[Tensor], ndarray] = as_ndarray, @@ -110,7 +112,7 @@ def get_tensors_as_ndarray_1d( def set_tensors_from_ndarray_1d( - tensors: Union[Iterator[Tensor], Dict[str, Tensor]], + tensors: Union[Iterator[Tensor], dict[str, Tensor]], array: ndarray, as_tensor: Callable[[ndarray], Tensor] = torch.as_tensor, ) -> None: @@ -134,9 +136,9 @@ def set_tensors_from_ndarray_1d( def get_bounds_as_ndarray( - parameters: Dict[str, Tensor], - bounds: Dict[ - str, Tuple[Optional[Union[float, Tensor]], Optional[Union[float, Tensor]]] + parameters: dict[str, Tensor], + bounds: dict[ + str, tuple[Optional[Union[float, Tensor]], Optional[Union[float, Tensor]]] ], ) -> Optional[np.ndarray]: r"""Helper method for converting bounds into an ndarray. diff --git a/botorch/optim/utils/timeout.py b/botorch/optim/utils/timeout.py index 89b0dfb433..eea0d98963 100644 --- a/botorch/optim/utils/timeout.py +++ b/botorch/optim/utils/timeout.py @@ -8,7 +8,8 @@ import time import warnings -from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Union +from collections.abc import Sequence +from typing import Any, Callable, Optional, Union import numpy as np from botorch.exceptions.errors import OptimizationTimeoutError @@ -18,16 +19,16 @@ def minimize_with_timeout( fun: Callable[[np.ndarray, ...], float], x0: np.ndarray, - args: Tuple[Any, ...] = (), + args: tuple[Any, ...] = (), method: Optional[str] = None, jac: Optional[Union[str, Callable, bool]] = None, hess: Optional[Union[str, Callable, optimize.HessianUpdateStrategy]] = None, hessp: Optional[Callable] = None, - bounds: Optional[Union[Sequence[Tuple[float, float]], optimize.Bounds]] = None, + bounds: Optional[Union[Sequence[tuple[float, float]], optimize.Bounds]] = None, constraints=(), # Typing this properly is a s**t job tol: Optional[float] = None, callback: Optional[Callable] = None, - options: Optional[Dict[str, Any]] = None, + options: Optional[dict[str, Any]] = None, timeout_sec: Optional[float] = None, ) -> optimize.OptimizeResult: r"""Wrapper around scipy.optimize.minimize to support timeout. diff --git a/botorch/posteriors/fully_bayesian.py b/botorch/posteriors/fully_bayesian.py index 059da219ac..9d0fdaadc3 100644 --- a/botorch/posteriors/fully_bayesian.py +++ b/botorch/posteriors/fully_bayesian.py @@ -5,7 +5,7 @@ from __future__ import annotations -from typing import Callable, Optional, Tuple +from typing import Callable, Optional from warnings import warn import torch @@ -147,7 +147,7 @@ def quantile(self, value: Tensor) -> Tensor: return _quantile(posterior=self, value=value) @property - def batch_range(self) -> Tuple[int, int]: + def batch_range(self) -> tuple[int, int]: r"""The t-batch range. This is used in samplers to identify the t-batch component of the diff --git a/botorch/posteriors/gpytorch.py b/botorch/posteriors/gpytorch.py index 27cf48a176..4e29f69287 100644 --- a/botorch/posteriors/gpytorch.py +++ b/botorch/posteriors/gpytorch.py @@ -11,7 +11,7 @@ from __future__ import annotations from contextlib import ExitStack -from typing import Optional, Tuple, TYPE_CHECKING, Union +from typing import Optional, TYPE_CHECKING, Union import torch from botorch.exceptions.errors import BotorchTensorDimensionError @@ -58,7 +58,7 @@ def base_sample_shape(self) -> torch.Size: return self.distribution.batch_shape + self.distribution.base_sample_shape @property - def batch_range(self) -> Tuple[int, int]: + def batch_range(self) -> tuple[int, int]: r"""The t-batch range. This is used in samplers to identify the t-batch component of the @@ -193,7 +193,7 @@ def scalarize_posterior_gpytorch( posterior: GPyTorchPosterior, weights: Tensor, offset: float = 0.0, -) -> Tuple[Tensor, Union[Tensor, LinearOperator]]: +) -> tuple[Tensor, Union[Tensor, LinearOperator]]: r"""Helper function for `scalarize_posterior`, producing a mean and variance. diff --git a/botorch/posteriors/higher_order.py b/botorch/posteriors/higher_order.py index 376e82d06f..77581c393a 100644 --- a/botorch/posteriors/higher_order.py +++ b/botorch/posteriors/higher_order.py @@ -4,7 +4,7 @@ # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. -from typing import Optional, Tuple +from typing import Optional import torch from botorch.exceptions.errors import BotorchTensorDimensionError @@ -77,7 +77,7 @@ def base_sample_shape(self): return batch_shape + sampling_shape @property - def batch_range(self) -> Tuple[int, int]: + def batch_range(self) -> tuple[int, int]: r"""The t-batch range. This is used in samplers to identify the t-batch component of the diff --git a/botorch/posteriors/multitask.py b/botorch/posteriors/multitask.py index 29245d7b6a..03a6267dbc 100644 --- a/botorch/posteriors/multitask.py +++ b/botorch/posteriors/multitask.py @@ -3,7 +3,7 @@ # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. -from typing import Optional, Tuple, Union +from typing import Optional, Union import torch from botorch.exceptions.errors import BotorchTensorDimensionError @@ -75,7 +75,7 @@ def base_sample_shape(self) -> torch.Size: return batch_shape + torch.Size((sampling_shape,)) @property - def batch_range(self) -> Tuple[int, int]: + def batch_range(self) -> tuple[int, int]: r"""The t-batch range. This is used in samplers to identify the t-batch component of the @@ -87,7 +87,7 @@ def batch_range(self) -> Tuple[int, int]: def _prepare_base_samples( self, sample_shape: torch.Size, base_samples: Tensor = None - ) -> Tuple[Tensor, Tensor]: + ) -> tuple[Tensor, Tensor]: covariance_matrix = self.joint_covariance_matrix joint_size = covariance_matrix.shape[-1] batch_shape = covariance_matrix.batch_shape diff --git a/botorch/posteriors/posterior.py b/botorch/posteriors/posterior.py index 306c447029..a872d7988c 100644 --- a/botorch/posteriors/posterior.py +++ b/botorch/posteriors/posterior.py @@ -11,7 +11,7 @@ from __future__ import annotations from abc import ABC, abstractmethod, abstractproperty -from typing import Optional, Tuple +from typing import Optional import torch from torch import Tensor @@ -130,7 +130,7 @@ def base_sample_shape(self) -> torch.Size: ) @property - def batch_range(self) -> Tuple[int, int]: + def batch_range(self) -> tuple[int, int]: r"""The t-batch range. This is used in samplers to identify the t-batch component of the diff --git a/botorch/posteriors/posterior_list.py b/botorch/posteriors/posterior_list.py index 7b6a585dde..f49ed55d63 100644 --- a/botorch/posteriors/posterior_list.py +++ b/botorch/posteriors/posterior_list.py @@ -11,7 +11,7 @@ from __future__ import annotations from functools import cached_property -from typing import Any, List, Optional +from typing import Any, Optional import torch from botorch.posteriors.fully_bayesian import GaussianMixturePosterior, MCMC_DIM @@ -67,7 +67,7 @@ def _reshape_tensor(X: Tensor, mcmc_samples: int) -> Tensor: X = X.unsqueeze(MCMC_DIM) return X.expand(*X.shape[:MCMC_DIM], mcmc_samples, *X.shape[MCMC_DIM + 1 :]) - def _reshape_and_cat(self, tensors: List[Tensor]): + def _reshape_and_cat(self, tensors: list[Tensor]): r"""Reshape, if needed, and concatenate (across dim=-1) a list of tensors.""" if self._is_gaussian_mixture: mcmc_samples = self._get_mcmc_batch_dimension() diff --git a/botorch/posteriors/torch.py b/botorch/posteriors/torch.py index d9443a4dac..140bcad88a 100644 --- a/botorch/posteriors/torch.py +++ b/botorch/posteriors/torch.py @@ -10,7 +10,7 @@ from __future__ import annotations -from typing import Any, Dict, Optional +from typing import Any, Optional import torch from botorch.posteriors.posterior import Posterior @@ -78,7 +78,7 @@ def __getattr__(self, name: str) -> Any: """ return getattr(self.distribution, name) - def __getstate__(self) -> Dict[str, Any]: + def __getstate__(self) -> dict[str, Any]: r"""A minimal utility to support pickle protocol. Pickle uses `__get/setstate__` to serialize / deserialize the objects. @@ -88,7 +88,7 @@ def __getstate__(self) -> Dict[str, Any]: """ return self.__dict__ - def __setstate__(self, d: Dict[str, Any]) -> None: + def __setstate__(self, d: dict[str, Any]) -> None: r"""A minimal utility to support pickle protocol.""" self.__dict__ = d diff --git a/botorch/posteriors/transformed.py b/botorch/posteriors/transformed.py index 6e78592f71..fa5721c720 100644 --- a/botorch/posteriors/transformed.py +++ b/botorch/posteriors/transformed.py @@ -6,7 +6,7 @@ from __future__ import annotations -from typing import Callable, Optional, Tuple +from typing import Callable, Optional import torch from botorch.posteriors.posterior import Posterior @@ -49,7 +49,7 @@ def base_sample_shape(self) -> torch.Size: return self._posterior.base_sample_shape @property - def batch_range(self) -> Tuple[int, int]: + def batch_range(self) -> tuple[int, int]: r"""The t-batch range. This is used in samplers to identify the t-batch component of the diff --git a/botorch/sampling/base.py b/botorch/sampling/base.py index 47434ee636..6bf3e7fa6d 100644 --- a/botorch/sampling/base.py +++ b/botorch/sampling/base.py @@ -11,7 +11,7 @@ from __future__ import annotations from abc import ABC, abstractmethod -from typing import Optional, Tuple +from typing import Optional import torch from botorch.exceptions.errors import InputDataError @@ -76,7 +76,7 @@ def forward(self, posterior: Posterior) -> Tensor: """ pass # pragma no cover - def _get_batch_range(self, posterior: Posterior) -> Tuple[int, int]: + def _get_batch_range(self, posterior: Posterior) -> tuple[int, int]: r"""Get the t-batch range of the posterior with an optional override. In rare cases, e.g., in `qMultiStepLookahead`, we may want to override the diff --git a/botorch/sampling/get_sampler.py b/botorch/sampling/get_sampler.py index 0bf4b9bf20..5ff4dd331e 100644 --- a/botorch/sampling/get_sampler.py +++ b/botorch/sampling/get_sampler.py @@ -5,7 +5,7 @@ # LICENSE file in the root directory of this source tree. -from typing import Optional, Type, Union +from typing import Optional, Union import torch from botorch.logging import logger @@ -31,7 +31,7 @@ def _posterior_to_distribution_encoder( posterior: Posterior, -) -> Union[Type[Distribution], Type[Posterior]]: +) -> Union[type[Distribution], type[Posterior]]: r"""An encoder returning the type of the distribution for `TorchPosterior` and the type of the posterior for the rest. """ diff --git a/botorch/sampling/pathwise/paths.py b/botorch/sampling/pathwise/paths.py index 1e778884ff..de840c4e1d 100644 --- a/botorch/sampling/pathwise/paths.py +++ b/botorch/sampling/pathwise/paths.py @@ -7,18 +7,8 @@ from __future__ import annotations from abc import ABC -from typing import ( - Any, - Callable, - Dict, - Iterable, - Iterator, - List, - Mapping, - Optional, - Tuple, - Union, -) +from collections.abc import Iterable, Iterator, Mapping +from typing import Any, Callable, Optional, Union from botorch.exceptions.errors import UnsupportedError from botorch.sampling.pathwise.features import FeatureMap @@ -41,7 +31,7 @@ class PathDict(SamplePath): def __init__( self, paths: Optional[Mapping[str, SamplePath]] = None, - join: Optional[Callable[[List[Tensor]], Tensor]] = None, + join: Optional[Callable[[list[Tensor]], Tensor]] = None, input_transform: Optional[TInputTransform] = None, output_transform: Optional[TOutputTransform] = None, ) -> None: @@ -66,11 +56,11 @@ def __init__( else ModuleDict({} if paths is None else paths) ) - def forward(self, x: Tensor, **kwargs: Any) -> Union[Tensor, Dict[str, Tensor]]: + def forward(self, x: Tensor, **kwargs: Any) -> Union[Tensor, dict[str, Tensor]]: out = [path(x, **kwargs) for path in self.paths.values()] return dict(zip(self.paths, out)) if self.join is None else self.join(out) - def items(self) -> Iterable[Tuple[str, SamplePath]]: + def items(self) -> Iterable[tuple[str, SamplePath]]: return self.paths.items() def keys(self) -> Iterable[str]: @@ -101,7 +91,7 @@ class PathList(SamplePath): def __init__( self, paths: Optional[Iterable[SamplePath]] = None, - join: Optional[Callable[[List[Tensor]], Tensor]] = None, + join: Optional[Callable[[list[Tensor]], Tensor]] = None, input_transform: Optional[TInputTransform] = None, output_transform: Optional[TOutputTransform] = None, ) -> None: @@ -127,7 +117,7 @@ def __init__( else ModuleList({} if paths is None else paths) ) - def forward(self, x: Tensor, **kwargs: Any) -> Union[Tensor, List[Tensor]]: + def forward(self, x: Tensor, **kwargs: Any) -> Union[Tensor, list[Tensor]]: out = [path(x, **kwargs) for path in self.paths] return out if self.join is None else self.join(out) diff --git a/botorch/sampling/pathwise/prior_samplers.py b/botorch/sampling/pathwise/prior_samplers.py index e03d6bee5e..32bced7b27 100644 --- a/botorch/sampling/pathwise/prior_samplers.py +++ b/botorch/sampling/pathwise/prior_samplers.py @@ -6,7 +6,7 @@ from __future__ import annotations -from typing import Any, Callable, List, Optional +from typing import Any, Callable, Optional from botorch.models.approximate_gp import ApproximateGPyTorchModel from botorch.models.model_list_gp_regression import ModelListGP @@ -109,7 +109,7 @@ def _draw_kernel_feature_paths_ExactGP( @DrawKernelFeaturePaths.register(ModelListGP) def _draw_kernel_feature_paths_list( model: ModelListGP, - join: Optional[Callable[[List[Tensor]], Tensor]] = None, + join: Optional[Callable[[list[Tensor]], Tensor]] = None, **kwargs: Any, ) -> PathList: paths = [draw_kernel_feature_paths(m, **kwargs) for m in model.models] diff --git a/botorch/sampling/pathwise/utils.py b/botorch/sampling/pathwise/utils.py index 62753aa106..c4ab9ab261 100644 --- a/botorch/sampling/pathwise/utils.py +++ b/botorch/sampling/pathwise/utils.py @@ -7,7 +7,8 @@ from __future__ import annotations from abc import ABC, abstractmethod -from typing import Any, Callable, Iterable, List, Optional, overload, Tuple, Union +from collections.abc import Iterable +from typing import Any, Callable, Optional, overload, Union import torch from botorch.models.approximate_gp import SingleTaskVariationalGP @@ -207,12 +208,12 @@ def get_output_transform(model: GPyTorchModel) -> Optional[OutcomeUntransformer] @overload -def get_train_inputs(model: Model, transformed: bool = False) -> Tuple[Tensor, ...]: +def get_train_inputs(model: Model, transformed: bool = False) -> tuple[Tensor, ...]: pass # pragma: no cover @overload -def get_train_inputs(model: ModelList, transformed: bool = False) -> List[...]: +def get_train_inputs(model: ModelList, transformed: bool = False) -> list[...]: pass # pragma: no cover @@ -221,7 +222,7 @@ def get_train_inputs(model: Model, transformed: bool = False): @GetTrainInputs.register(Model) -def _get_train_inputs_Model(model: Model, transformed: bool = False) -> Tuple[Tensor]: +def _get_train_inputs_Model(model: Model, transformed: bool = False) -> tuple[Tensor]: if not transformed: original_train_input = getattr(model, "_original_train_inputs", None) if torch.is_tensor(original_train_input): @@ -240,7 +241,7 @@ def _get_train_inputs_Model(model: Model, transformed: bool = False) -> Tuple[Te @GetTrainInputs.register(SingleTaskVariationalGP) def _get_train_inputs_SingleTaskVariationalGP( model: SingleTaskVariationalGP, transformed: bool = False -) -> Tuple[Tensor]: +) -> tuple[Tensor]: (X,) = model.model.train_inputs if model.training != transformed: return (X,) @@ -255,7 +256,7 @@ def _get_train_inputs_SingleTaskVariationalGP( @GetTrainInputs.register(ModelList) def _get_train_inputs_ModelList( model: ModelList, transformed: bool = False -) -> List[...]: +) -> list[...]: return [get_train_inputs(m, transformed=transformed) for m in model.models] @@ -265,7 +266,7 @@ def get_train_targets(model: Model, transformed: bool = False) -> Tensor: @overload -def get_train_targets(model: ModelList, transformed: bool = False) -> List[...]: +def get_train_targets(model: ModelList, transformed: bool = False) -> list[...]: pass # pragma: no cover @@ -306,5 +307,5 @@ def _get_train_targets_SingleTaskVariationalGP( @GetTrainTargets.register(ModelList) def _get_train_targets_ModelList( model: ModelList, transformed: bool = False -) -> List[...]: +) -> list[...]: return [get_train_targets(m, transformed=transformed) for m in model.models] diff --git a/botorch/test_functions/base.py b/botorch/test_functions/base.py index a05fa13a22..3c05d4fef7 100644 --- a/botorch/test_functions/base.py +++ b/botorch/test_functions/base.py @@ -11,7 +11,7 @@ from __future__ import annotations from abc import ABC, abstractmethod -from typing import List, Optional, Tuple, Union +from typing import Optional, Union import torch from botorch.exceptions.errors import InputDataError @@ -23,12 +23,12 @@ class BaseTestProblem(Module, ABC): r"""Base class for test functions.""" dim: int - _bounds: List[Tuple[float, float]] + _bounds: list[tuple[float, float]] _check_grad_at_opt: bool = True def __init__( self, - noise_std: Union[None, float, List[float]] = None, + noise_std: Union[None, float, list[float]] = None, negate: bool = False, ) -> None: r"""Base constructor for test functions. @@ -89,7 +89,7 @@ class ConstrainedBaseTestProblem(BaseTestProblem, ABC): num_constraints: int _check_grad_at_opt: bool = False - constraint_noise_std: Union[None, float, List[float]] = None + constraint_noise_std: Union[None, float, list[float]] = None def evaluate_slack(self, X: Tensor, noise: bool = True) -> Tensor: r"""Evaluate the constraint slack on a set of points. @@ -153,12 +153,12 @@ class MultiObjectiveTestProblem(BaseTestProblem, ABC): """ num_objectives: int - _ref_point: List[float] + _ref_point: list[float] _max_hv: Optional[float] = None def __init__( self, - noise_std: Union[None, float, List[float]] = None, + noise_std: Union[None, float, list[float]] = None, negate: bool = False, ) -> None: r"""Base constructor for multi-objective test functions. diff --git a/botorch/test_functions/multi_objective.py b/botorch/test_functions/multi_objective.py index 01262fbf3b..19b1140c97 100644 --- a/botorch/test_functions/multi_objective.py +++ b/botorch/test_functions/multi_objective.py @@ -76,7 +76,7 @@ import math from abc import ABC, abstractmethod from math import pi -from typing import List, Tuple, Union +from typing import Union import torch from botorch.exceptions.errors import UnsupportedError @@ -118,7 +118,7 @@ class BraninCurrin(MultiObjectiveTestProblem): def __init__( self, - noise_std: Union[None, float, List[float]] = None, + noise_std: Union[None, float, list[float]] = None, negate: bool = False, ) -> None: r""" @@ -170,7 +170,7 @@ class DH(MultiObjectiveTestProblem, ABC): """ num_objectives = 2 - _ref_point: List[float] = [1.1, 1.1] + _ref_point: list[float] = [1.1, 1.1] _x_1_lb: float _area_under_curve: float _min_dim: int @@ -178,7 +178,7 @@ class DH(MultiObjectiveTestProblem, ABC): def __init__( self, dim: int, - noise_std: Union[None, float, List[float]] = None, + noise_std: Union[None, float, list[float]] = None, negate: bool = False, ) -> None: r""" @@ -286,7 +286,7 @@ class DH3(DH): _min_dim = 3 @staticmethod - def _exp_args(x: Tensor) -> Tuple[Tensor, Tensor]: + def _exp_args(x: Tensor) -> tuple[Tensor, Tensor]: exp_arg_1 = -((x - 0.35) / 0.25).pow(2) exp_arg_2 = -((x - 0.85) / 0.03).pow(2) return exp_arg_1, exp_arg_2 @@ -338,7 +338,7 @@ def __init__( self, dim: int, num_objectives: int = 2, - noise_std: Union[None, float, List[float]] = None, + noise_std: Union[None, float, list[float]] = None, negate: bool = False, ) -> None: r""" @@ -606,7 +606,7 @@ class GMM(MultiObjectiveTestProblem): def __init__( self, - noise_std: Union[None, float, List[float]] = None, + noise_std: Union[None, float, list[float]] = None, negate: bool = False, num_objectives: int = 2, ) -> None: @@ -934,7 +934,7 @@ def __init__( self, dim: int, num_objectives: int = 2, - noise_std: Union[None, float, List[float]] = None, + noise_std: Union[None, float, list[float]] = None, negate: bool = False, ) -> None: r""" @@ -1244,8 +1244,8 @@ class ConstrainedBraninCurrin(BraninCurrin, ConstrainedBaseTestProblem): def __init__( self, - noise_std: Union[None, float, List[float]] = None, - constraint_noise_std: Union[None, float, List[float]] = None, + noise_std: Union[None, float, list[float]] = None, + constraint_noise_std: Union[None, float, list[float]] = None, negate: bool = False, ) -> None: r""" @@ -1356,8 +1356,8 @@ class MW7(MultiObjectiveTestProblem, ConstrainedBaseTestProblem): def __init__( self, dim: int, - noise_std: Union[None, float, List[float]] = None, - constraint_noise_std: Union[None, float, List[float]] = None, + noise_std: Union[None, float, list[float]] = None, + constraint_noise_std: Union[None, float, list[float]] = None, negate: bool = False, ) -> None: r""" diff --git a/botorch/test_functions/sensitivity_analysis.py b/botorch/test_functions/sensitivity_analysis.py index f4f2f006f5..4c8a69f0db 100644 --- a/botorch/test_functions/sensitivity_analysis.py +++ b/botorch/test_functions/sensitivity_analysis.py @@ -4,7 +4,7 @@ # LICENSE file in the root directory of this source tree. import math -from typing import List, Optional, Tuple +from typing import Optional import torch @@ -59,7 +59,7 @@ def __init__( def _optimal_value(self) -> float: raise NotImplementedError - def compute_dgsm(self, X: Tensor) -> Tuple[List[float], List[float], List[float]]: + def compute_dgsm(self, X: Tensor) -> tuple[list[float], list[float], list[float]]: r"""Compute derivative global sensitivity measures. This function can be called separately to estimate the dgsm measure @@ -125,7 +125,7 @@ class Gsobol(SyntheticTestFunction): def __init__( self, dim: int, - a: List = None, + a: list = None, noise_std: Optional[float] = None, negate: bool = False, ) -> None: diff --git a/botorch/test_functions/synthetic.py b/botorch/test_functions/synthetic.py index 4da4d468c5..69630a5c08 100644 --- a/botorch/test_functions/synthetic.py +++ b/botorch/test_functions/synthetic.py @@ -48,7 +48,7 @@ import math from abc import ABC -from typing import List, Optional, Tuple, Union +from typing import Optional, Union import torch from botorch.exceptions.errors import InputDataError @@ -61,14 +61,14 @@ class SyntheticTestFunction(BaseTestProblem, ABC): r"""Base class for synthetic test functions.""" _optimal_value: Optional[float] = None - _optimizers: Optional[List[Tuple[float, ...]]] = None + _optimizers: Optional[list[tuple[float, ...]]] = None num_objectives: int = 1 def __init__( self, - noise_std: Union[None, float, List[float]] = None, + noise_std: Union[None, float, list[float]] = None, negate: bool = False, - bounds: Optional[List[Tuple[float, float]]] = None, + bounds: Optional[list[tuple[float, float]]] = None, ) -> None: r""" Args: @@ -85,7 +85,7 @@ def __init__( if bounds is not None: # Ensure at least one optimizer lies within the custom bounds def in_bounds( - optimizer: Tuple[float, ...], bounds: List[Tuple[float, float]] + optimizer: tuple[float, ...], bounds: list[tuple[float, float]] ) -> bool: for i, xopt in enumerate(optimizer): lower, upper = bounds[i] @@ -138,7 +138,7 @@ def __init__( dim: int = 2, noise_std: Optional[float] = None, negate: bool = False, - bounds: Optional[List[Tuple[float, float]]] = None, + bounds: Optional[list[tuple[float, float]]] = None, ) -> None: r""" Args: @@ -265,7 +265,7 @@ def __init__( dim=2, noise_std: Optional[float] = None, negate: bool = False, - bounds: Optional[List[Tuple[float, float]]] = None, + bounds: Optional[list[tuple[float, float]]] = None, ) -> None: r""" Args: @@ -334,7 +334,7 @@ def __init__( dim=2, noise_std: Optional[float] = None, negate: bool = False, - bounds: Optional[List[Tuple[float, float]]] = None, + bounds: Optional[list[tuple[float, float]]] = None, ) -> None: r""" Args: @@ -376,7 +376,7 @@ def __init__( dim=6, noise_std: Optional[float] = None, negate: bool = False, - bounds: Optional[List[Tuple[float, float]]] = None, + bounds: Optional[list[tuple[float, float]]] = None, ) -> None: r""" Args: @@ -510,7 +510,7 @@ def __init__( dim=2, noise_std: Optional[float] = None, negate: bool = False, - bounds: Optional[List[Tuple[float, float]]] = None, + bounds: Optional[list[tuple[float, float]]] = None, ) -> None: r""" Args: @@ -552,7 +552,7 @@ def __init__( dim=2, noise_std: Optional[float] = None, negate: bool = False, - bounds: Optional[List[Tuple[float, float]]] = None, + bounds: Optional[list[tuple[float, float]]] = None, ) -> None: r""" Args: @@ -612,7 +612,7 @@ def __init__( dim=4, noise_std: Optional[float] = None, negate: bool = False, - bounds: Optional[List[Tuple[float, float]]] = None, + bounds: Optional[list[tuple[float, float]]] = None, ) -> None: r""" Args: @@ -648,7 +648,7 @@ def __init__( dim=2, noise_std: Optional[float] = None, negate: bool = False, - bounds: Optional[List[Tuple[float, float]]] = None, + bounds: Optional[list[tuple[float, float]]] = None, ) -> None: r""" Args: @@ -687,7 +687,7 @@ def __init__( dim=2, noise_std: Optional[float] = None, negate: bool = False, - bounds: Optional[List[Tuple[float, float]]] = None, + bounds: Optional[list[tuple[float, float]]] = None, ) -> None: r""" Args: @@ -729,7 +729,7 @@ def __init__( m: int = 10, noise_std: Optional[float] = None, negate: bool = False, - bounds: Optional[List[Tuple[float, float]]] = None, + bounds: Optional[list[tuple[float, float]]] = None, ) -> None: r""" Args: @@ -795,7 +795,7 @@ def __init__( dim=2, noise_std: Optional[float] = None, negate: bool = False, - bounds: Optional[List[Tuple[float, float]]] = None, + bounds: Optional[list[tuple[float, float]]] = None, ) -> None: r""" Args: @@ -839,10 +839,10 @@ class ConstrainedSyntheticTestFunction( def __init__( self, - noise_std: Union[None, float, List[float]] = None, - constraint_noise_std: Union[None, float, List[float]] = None, + noise_std: Union[None, float, list[float]] = None, + constraint_noise_std: Union[None, float, list[float]] = None, negate: bool = False, - bounds: Optional[List[Tuple[float, float]]] = None, + bounds: Optional[list[tuple[float, float]]] = None, ) -> None: r""" Args: @@ -864,7 +864,7 @@ def __init__( def _validate_constraint_noise( self, constraint_noise_std - ) -> Union[None, float, List[float]]: + ) -> Union[None, float, list[float]]: """ Validates that constraint_noise_std has length equal to the number of constraints, if given as a list @@ -932,9 +932,9 @@ def __init__( self, dim: int = 6, noise_std: Union[None, float] = None, - constraint_noise_std: Union[None, float, List[float]] = None, + constraint_noise_std: Union[None, float, list[float]] = None, negate: bool = False, - bounds: Optional[List[Tuple[float, float]]] = None, + bounds: Optional[list[tuple[float, float]]] = None, ) -> None: r""" Args: @@ -970,9 +970,9 @@ def __init__( self, dim: int = 6, noise_std: Union[None, float] = None, - constraint_noise_std: Union[None, float, List[float]] = None, + constraint_noise_std: Union[None, float, list[float]] = None, negate: bool = False, - bounds: Optional[List[Tuple[float, float]]] = None, + bounds: Optional[list[tuple[float, float]]] = None, ) -> None: r""" Args: diff --git a/botorch/test_functions/utils.py b/botorch/test_functions/utils.py index 969524822d..0726e96512 100644 --- a/botorch/test_functions/utils.py +++ b/botorch/test_functions/utils.py @@ -7,7 +7,7 @@ from __future__ import annotations -from typing import Optional, Tuple +from typing import Optional import torch @@ -15,7 +15,7 @@ def round_nearest( - X: Tensor, increment: float, bounds: Optional[Tuple[float, float]] + X: Tensor, increment: float, bounds: Optional[tuple[float, float]] ) -> Tensor: r"""Rounds the input tensor to the nearest multiple of `increment`. diff --git a/botorch/utils/constants.py b/botorch/utils/constants.py index 367e376d58..2488828dcd 100644 --- a/botorch/utils/constants.py +++ b/botorch/utils/constants.py @@ -6,9 +6,11 @@ from __future__ import annotations +from collections.abc import Iterator + from functools import lru_cache from numbers import Number -from typing import Iterator, Optional, Tuple, Union +from typing import Optional, Union import torch from torch import Tensor @@ -19,7 +21,7 @@ def get_constants( values: Union[Number, Iterator[Number]], device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, -) -> Union[Tensor, Tuple[Tensor, ...]]: +) -> Union[Tensor, tuple[Tensor, ...]]: r"""Returns scalar-valued Tensors containing each of the given constants. Used to expedite tensor operations involving scalar arithmetic. Note that the returned Tensors should not be modified in-place.""" diff --git a/botorch/utils/constraints.py b/botorch/utils/constraints.py index d28d309b84..84d772ea2d 100644 --- a/botorch/utils/constraints.py +++ b/botorch/utils/constraints.py @@ -11,15 +11,15 @@ from __future__ import annotations from functools import partial -from typing import Callable, List, Optional, Tuple +from typing import Callable, Optional import torch from torch import Tensor def get_outcome_constraint_transforms( - outcome_constraints: Optional[Tuple[Tensor, Tensor]] -) -> Optional[List[Callable[[Tensor], Tensor]]]: + outcome_constraints: Optional[tuple[Tensor, Tensor]] +) -> Optional[list[Callable[[Tensor], Tensor]]]: r"""Create outcome constraint callables from outcome constraint tensors. Args: @@ -68,7 +68,7 @@ def get_monotonicity_constraints( descending: bool = False, dtype: Optional[torch.dtype] = None, device: Optional[torch.device] = None, -) -> Tuple[Tensor, Tensor]: +) -> tuple[Tensor, Tensor]: """Returns a system of linear inequalities `(A, b)` that generically encodes order constraints on the elements of a `d`-dimsensional space, i.e. `A @ x < b` implies `x[i] < x[i + 1]` for a `d`-dimensional vector `x`. diff --git a/botorch/utils/context_managers.py b/botorch/utils/context_managers.py index b0e31d1750..e9d2e9f76e 100644 --- a/botorch/utils/context_managers.py +++ b/botorch/utils/context_managers.py @@ -10,8 +10,10 @@ from __future__ import annotations +from collections.abc import Generator, Iterable + from contextlib import contextmanager -from typing import Any, Callable, Dict, Generator, Iterable, NamedTuple, Optional, Union +from typing import Any, Callable, NamedTuple, Optional, Union from torch import device as Device, dtype as Dtype, Tensor from torch.nn import Module @@ -46,10 +48,10 @@ def delattr_ctx( @contextmanager def parameter_rollback_ctx( - parameters: Dict[str, Tensor], - checkpoint: Optional[Dict[str, TensorCheckpoint]] = None, + parameters: dict[str, Tensor], + checkpoint: Optional[dict[str, TensorCheckpoint]] = None, **tkwargs: Any, -) -> Generator[Dict[str, TensorCheckpoint], None, None]: +) -> Generator[dict[str, TensorCheckpoint], None, None]: r"""Contextmanager that exits by rolling back a module's state_dict. Args: @@ -91,9 +93,9 @@ def parameter_rollback_ctx( def module_rollback_ctx( module: Module, name_filter: Optional[Callable[[str], bool]] = None, - checkpoint: Optional[Dict[str, TensorCheckpoint]] = None, + checkpoint: Optional[dict[str, TensorCheckpoint]] = None, **tkwargs: Any, -) -> Generator[Dict[str, TensorCheckpoint], None, None]: +) -> Generator[dict[str, TensorCheckpoint], None, None]: r"""Contextmanager that exits by rolling back a module's state_dict. Args: @@ -139,7 +141,7 @@ def module_rollback_ctx( @contextmanager def zero_grad_ctx( - parameters: Union[Dict[str, Tensor], Iterable[Tensor]], + parameters: Union[dict[str, Tensor], Iterable[Tensor]], zero_on_enter: bool = True, zero_on_exit: bool = False, ) -> Generator[None, None, None]: diff --git a/botorch/utils/datasets.py b/botorch/utils/datasets.py index 3bb31b1a9a..ac6337596c 100644 --- a/botorch/utils/datasets.py +++ b/botorch/utils/datasets.py @@ -9,7 +9,7 @@ from __future__ import annotations import warnings -from typing import Any, Dict, List, Optional, Union +from typing import Any, Optional, Union import torch from botorch.exceptions.errors import InputDataError, UnsupportedError @@ -50,8 +50,8 @@ def __init__( X: Union[BotorchContainer, Tensor], Y: Union[BotorchContainer, Tensor], *, - feature_names: List[str], - outcome_names: List[str], + feature_names: list[str], + outcome_names: list[str], Yvar: Union[BotorchContainer, Tensor, None] = None, validate_init: bool = True, ) -> None: @@ -162,8 +162,8 @@ def __init__( X: Union[BotorchContainer, Tensor], Y: Union[BotorchContainer, Tensor], Yvar: Union[BotorchContainer, Tensor], - feature_names: List[str], - outcome_names: List[str], + feature_names: list[str], + outcome_names: list[str], validate_init: bool = True, ) -> None: r"""Initialize a `FixedNoiseDataset` -- deprecated!""" @@ -218,8 +218,8 @@ def __init__( self, X: SliceContainer, Y: Union[BotorchContainer, Tensor], - feature_names: List[str], - outcome_names: List[str], + feature_names: list[str], + outcome_names: list[str], validate_init: bool = True, ) -> None: r"""Construct a `RankingDataset`. @@ -287,7 +287,7 @@ class MultiTaskDataset(SupervisedDataset): def __init__( self, - datasets: List[SupervisedDataset], + datasets: list[SupervisedDataset], target_outcome_name: str, task_feature_index: Optional[int] = None, ): @@ -303,7 +303,7 @@ def __init__( If given, we sanity-check that the names of the task features match between all datasets. """ - self.datasets: Dict[str, SupervisedDataset] = { + self.datasets: dict[str, SupervisedDataset] = { ds.outcome_names[0]: ds for ds in datasets } self.target_outcome_name = target_outcome_name @@ -323,7 +323,7 @@ def from_joint_dataset( dataset: SupervisedDataset, task_feature_index: int, target_task_value: int, - outcome_names_per_task: Optional[Dict[int, str]] = None, + outcome_names_per_task: Optional[dict[int, str]] = None, ) -> MultiTaskDataset: r"""Construct a `MultiTaskDataset` from a joint dataset that includes the data for all tasks with the task feature index. @@ -382,7 +382,7 @@ def from_joint_dataset( task_feature_index=task_feature_index, ) - def _validate_datasets(self, datasets: List[SupervisedDataset]) -> None: + def _validate_datasets(self, datasets: list[SupervisedDataset]) -> None: """Validates that: * Each dataset models only one outcome; * Each outcome is modeled by only one dataset; @@ -501,9 +501,9 @@ class ContextualDataset(SupervisedDataset): def __init__( self, - datasets: List[SupervisedDataset], - parameter_decomposition: Dict[str, List[str]], - metric_decomposition: Optional[Dict[str, List[str]]] = None, + datasets: list[SupervisedDataset], + parameter_decomposition: dict[str, list[str]], + metric_decomposition: Optional[dict[str, list[str]]] = None, ): """Construct a `ContextualDataset`. @@ -516,7 +516,7 @@ def __init__( Values are the lists of metric names belonging to the context: {'context1': ['m1_c1'], 'context2': ['m1_c2'],}. """ - self.datasets: Dict[str, SupervisedDataset] = { + self.datasets: dict[str, SupervisedDataset] = { ds.outcome_names[0]: ds for ds in datasets } self.feature_names = datasets[0].feature_names @@ -561,7 +561,7 @@ def Yvar(self) -> Tensor: else: return torch.cat(Yvars, dim=-1) - def _extract_context_buckets(self) -> List[str]: + def _extract_context_buckets(self) -> list[str]: """Determines the context buckets from the data, and sets the context_buckets attribute. diff --git a/botorch/utils/dispatcher.py b/botorch/utils/dispatcher.py index 7e4fe2e113..ee372b8d2f 100644 --- a/botorch/utils/dispatcher.py +++ b/botorch/utils/dispatcher.py @@ -7,7 +7,7 @@ from __future__ import annotations from inspect import getsource, getsourcefile -from typing import Any, Callable, Optional, Tuple, Type +from typing import Any, Callable, Optional from multipledispatch.dispatcher import ( Dispatcher as MDDispatcher, @@ -16,7 +16,7 @@ ) -def type_bypassing_encoder(arg: Any) -> Type: +def type_bypassing_encoder(arg: Any) -> type: # Allow type variables to be passed as pre-encoded arguments return arg if isinstance(arg, type) else type(arg) @@ -32,7 +32,7 @@ def __init__( self, name: str, doc: Optional[str] = None, - encoder: Callable[Any, Type] = type, + encoder: Callable[Any, type] = type, ) -> None: """ Args: @@ -48,7 +48,7 @@ def __init__( def __getitem__( self, args: Optional[Any] = None, - types: Optional[Tuple[Type]] = None, + types: Optional[tuple[type]] = None, ) -> Callable: r"""Method lookup. @@ -106,7 +106,7 @@ def __call__(self, *args: Any, **kwargs: Any) -> Any: "found, but none completed successfully" ) - def dispatch(self, *types: Type) -> Callable: + def dispatch(self, *types: type) -> Callable: r"""Method lookup strategy. Checks for an exact match before traversing the set of registered methods according to the current ordering. @@ -124,7 +124,7 @@ def dispatch(self, *types: Type) -> Callable: except StopIteration: return None - def encode_args(self, args: Any) -> Tuple[Type]: + def encode_args(self, args: Any) -> tuple[type]: r"""Converts arguments into a tuple of types used during method lookup.""" return tuple(map(self.encoder, args if isinstance(args, tuple) else (args,))) @@ -148,5 +148,5 @@ def source(self, *args, **kwargs) -> None: print(self._source(*args)) @property - def encoder(self) -> Callable[Any, Type]: + def encoder(self) -> Callable[Any, type]: return self._encoder diff --git a/botorch/utils/feasible_volume.py b/botorch/utils/feasible_volume.py index affdd71fc0..d14fb8fe46 100644 --- a/botorch/utils/feasible_volume.py +++ b/botorch/utils/feasible_volume.py @@ -6,7 +6,7 @@ from __future__ import annotations -from typing import Callable, List, Optional, Tuple +from typing import Callable, Optional import botorch.models.model as model import torch @@ -20,8 +20,8 @@ def get_feasible_samples( samples: Tensor, - inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None, -) -> Tuple[Tensor, float]: + inequality_constraints: Optional[list[tuple[Tensor, Tensor, float]]] = None, +) -> tuple[Tensor, float]: r""" Checks which of the samples satisfy all of the inequality constraints. @@ -59,7 +59,7 @@ def get_feasible_samples( def get_outcome_feasibility_probability( model: model.Model, X: Tensor, - outcome_constraints: List[Callable[[Tensor], Tensor]], + outcome_constraints: list[Callable[[Tensor], Tensor]], threshold: float = 0.1, nsample_outcome: int = 1000, seed: Optional[int] = None, @@ -119,8 +119,8 @@ def get_outcome_feasibility_probability( def estimate_feasible_volume( bounds: Tensor, model: model.Model, - outcome_constraints: List[Callable[[Tensor], Tensor]], - inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None, + outcome_constraints: list[Callable[[Tensor], Tensor]], + inequality_constraints: Optional[list[tuple[Tensor, Tensor, float]]] = None, nsample_feature: int = 1000, nsample_outcome: int = 1000, threshold: float = 0.1, @@ -128,7 +128,7 @@ def estimate_feasible_volume( seed: Optional[int] = None, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, -) -> Tuple[float, float]: +) -> tuple[float, float]: r""" Monte Carlo estimate of the feasible volume with respect to feature constraints and outcome constraints. diff --git a/botorch/utils/gp_sampling.py b/botorch/utils/gp_sampling.py index c5ab09b981..c85a73f7c1 100644 --- a/botorch/utils/gp_sampling.py +++ b/botorch/utils/gp_sampling.py @@ -9,7 +9,7 @@ import warnings from copy import deepcopy from math import pi -from typing import List, Optional +from typing import Optional import torch from botorch.models.converter import batched_to_model_list @@ -292,8 +292,8 @@ def _check_forward_X_shape_compatibility(self, X: Tensor) -> None: def get_deterministic_model_multi_samples( - weights: List[Tensor], - bases: List[RandomFourierFeatures], + weights: list[Tensor], + bases: list[RandomFourierFeatures], ) -> GenericDeterministicModel: """ Get a batched deterministic model that batch evaluates `n_samples` function @@ -334,7 +334,7 @@ def _f(X): def get_deterministic_model( - weights: List[Tensor], bases: List[RandomFourierFeatures] + weights: list[Tensor], bases: list[RandomFourierFeatures] ) -> GenericDeterministicModel: """Get a deterministic model using the provided weights and bases for each output. @@ -357,8 +357,8 @@ def evaluate_gp_sample(X): def get_deterministic_model_list( - weights: List[Tensor], - bases: List[RandomFourierFeatures], + weights: list[Tensor], + bases: list[RandomFourierFeatures], ) -> ModelList: """Get a deterministic model list using the provided weights and bases for each output. diff --git a/botorch/utils/multi_objective/box_decompositions/box_decomposition_list.py b/botorch/utils/multi_objective/box_decompositions/box_decomposition_list.py index 262fdf890e..56f01cdaa9 100644 --- a/botorch/utils/multi_objective/box_decompositions/box_decomposition_list.py +++ b/botorch/utils/multi_objective/box_decompositions/box_decomposition_list.py @@ -8,7 +8,7 @@ from __future__ import annotations -from typing import List, Union +from typing import Union import torch from botorch.exceptions.errors import BotorchTensorDimensionError @@ -37,7 +37,7 @@ def __init__(self, *box_decompositions: BoxDecomposition) -> None: self.box_decompositions = ModuleList(box_decompositions) @property - def pareto_Y(self) -> List[Tensor]: + def pareto_Y(self) -> list[Tensor]: r"""This returns the non-dominated set. Note: Internally, we store the negative pareto set (minimization). @@ -94,7 +94,7 @@ def get_hypercell_bounds(self) -> Tensor: return torch.stack(bounds_list, dim=-3) - def update(self, Y: Union[List[Tensor], Tensor]) -> None: + def update(self, Y: Union[list[Tensor], Tensor]) -> None: r"""Update the partitioning. Args: @@ -106,7 +106,7 @@ def update(self, Y: Union[List[Tensor], Tensor]) -> None: torch.is_tensor(Y) and Y.ndim != 3 and Y.shape[0] != len(self.box_decompositions) - ) or (isinstance(Y, List) and len(Y) != len(self.box_decompositions)): + ) or (isinstance(Y, list) and len(Y) != len(self.box_decompositions)): raise BotorchTensorDimensionError( "BoxDecompositionList.update requires either a batched tensor Y, " "with one batch per box decomposition or a list of tensors with " diff --git a/botorch/utils/multi_objective/box_decompositions/utils.py b/botorch/utils/multi_objective/box_decompositions/utils.py index 2c410152d5..2c65a103c0 100644 --- a/botorch/utils/multi_objective/box_decompositions/utils.py +++ b/botorch/utils/multi_objective/box_decompositions/utils.py @@ -6,7 +6,7 @@ r"""Utilities for box decomposition algorithms.""" -from typing import Optional, Tuple +from typing import Optional import torch from botorch.exceptions.errors import BotorchTensorDimensionError, UnsupportedError @@ -102,7 +102,7 @@ def _pad_batch_pareto_frontier( def compute_local_upper_bounds( U: Tensor, Z: Tensor, z: Tensor -) -> Tuple[Tensor, Tensor]: +) -> tuple[Tensor, Tensor]: r"""Compute local upper bounds. Note: this assumes minimization. @@ -197,7 +197,7 @@ def get_partition_bounds(Z: Tensor, U: Tensor, ref_point: Tensor) -> Tensor: def update_local_upper_bounds_incremental( new_pareto_Y: Tensor, U: Tensor, Z: Tensor -) -> Tuple[Tensor, Tensor]: +) -> tuple[Tensor, Tensor]: r"""Update the current local upper with the new pareto points. This assumes minimization. diff --git a/botorch/utils/multi_objective/hypervolume.py b/botorch/utils/multi_objective/hypervolume.py index bf7f7d55de..310185f9ec 100644 --- a/botorch/utils/multi_objective/hypervolume.py +++ b/botorch/utils/multi_objective/hypervolume.py @@ -26,7 +26,7 @@ from itertools import combinations -from typing import Callable, List, Optional, Union +from typing import Callable, Optional, Union import torch from botorch.acquisition.cached_cholesky import CachedCholeskyMCSamplerMixin @@ -322,7 +322,7 @@ def _initialize_multilist(self, pareto_Y: Tensor) -> None: self.list.extend(nodes, i) -def sort_by_dimension(nodes: List[Node], i: int) -> None: +def sort_by_dimension(nodes: list[Node], i: int) -> None: r"""Sorts the list of nodes in-place by the specified objective. Args: @@ -399,7 +399,7 @@ def append(self, node: Node, index: int) -> None: self.sentinel.prev[index] = node last.next[index] = node - def extend(self, nodes: List[Node], index: int) -> None: + def extend(self, nodes: list[Node], index: int) -> None: r"""Extends the list at the given index with the nodes. Args: @@ -508,11 +508,11 @@ class NoisyExpectedHypervolumeMixin(CachedCholeskyMCSamplerMixin): def __init__( self, model: Model, - ref_point: Union[List[float], Tensor], + ref_point: Union[list[float], Tensor], X_baseline: Tensor, sampler: Optional[MCSampler] = None, objective: Optional[MCMultiOutputObjective] = None, - constraints: Optional[List[Callable[[Tensor], Tensor]]] = None, + constraints: Optional[list[Callable[[Tensor], Tensor]]] = None, X_pending: Optional[Tensor] = None, prune_baseline: bool = False, alpha: float = 0.0, diff --git a/botorch/utils/multitask.py b/botorch/utils/multitask.py index aade4ee99e..ca8a6810c1 100644 --- a/botorch/utils/multitask.py +++ b/botorch/utils/multitask.py @@ -10,15 +10,13 @@ from __future__ import annotations -from typing import List - import torch from gpytorch.distributions import MultitaskMultivariateNormal from gpytorch.distributions.multivariate_normal import MultivariateNormal from linear_operator import to_linear_operator -def separate_mtmvn(mvn: MultitaskMultivariateNormal) -> List[MultivariateNormal]: +def separate_mtmvn(mvn: MultitaskMultivariateNormal) -> list[MultivariateNormal]: """ Separate a MTMVN into a list of MVNs, where covariance across data within each task are preserved, while covariance across task are dropped. diff --git a/botorch/utils/objective.py b/botorch/utils/objective.py index 277a3bfadd..7fc89210da 100644 --- a/botorch/utils/objective.py +++ b/botorch/utils/objective.py @@ -10,7 +10,7 @@ from __future__ import annotations -from typing import Callable, List, Optional, Union +from typing import Callable, Optional, Union import torch from botorch.utils.safe_math import log_fatmoid, logexpit @@ -65,7 +65,7 @@ def _objective(Y: Tensor, X: Optional[Tensor] = None): def apply_constraints_nonnegative_soft( obj: Tensor, - constraints: List[Callable[[Tensor], Tensor]], + constraints: list[Callable[[Tensor], Tensor]], samples: Tensor, eta: Union[Tensor, float], ) -> Tensor: @@ -99,7 +99,7 @@ def apply_constraints_nonnegative_soft( def compute_feasibility_indicator( - constraints: Optional[List[Callable[[Tensor], Tensor]]], + constraints: Optional[list[Callable[[Tensor], Tensor]]], samples: Tensor, marginalize_dim: Optional[int] = None, ) -> Tensor: @@ -132,7 +132,7 @@ def compute_feasibility_indicator( def compute_smoothed_feasibility_indicator( - constraints: List[Callable[[Tensor], Tensor]], + constraints: list[Callable[[Tensor], Tensor]], samples: Tensor, eta: Union[Tensor, float], log: bool = False, @@ -182,7 +182,7 @@ def compute_smoothed_feasibility_indicator( def apply_constraints( obj: Tensor, - constraints: List[Callable[[Tensor], Tensor]], + constraints: list[Callable[[Tensor], Tensor]], samples: Tensor, infeasible_cost: float, eta: Union[Tensor, float] = 1e-3, diff --git a/botorch/utils/probability/bvn.py b/botorch/utils/probability/bvn.py index 35fba31fd8..1499a2a3ac 100644 --- a/botorch/utils/probability/bvn.py +++ b/botorch/utils/probability/bvn.py @@ -19,7 +19,7 @@ from __future__ import annotations from math import pi as _pi -from typing import Optional, Tuple +from typing import Optional import torch from botorch.exceptions import UnsupportedError @@ -243,7 +243,7 @@ def bvnmom( xu: Tensor, yu: Tensor, p: Optional[Tensor] = None, -) -> Tuple[Tensor, Tensor]: +) -> tuple[Tensor, Tensor]: r"""Computes the expected values of truncated, bivariate normal random variables. Let `x` and `y` be a pair of standard bivariate normal random variables having diff --git a/botorch/utils/probability/lin_ess.py b/botorch/utils/probability/lin_ess.py index fe21dc2a5f..2b524bad92 100644 --- a/botorch/utils/probability/lin_ess.py +++ b/botorch/utils/probability/lin_ess.py @@ -35,7 +35,7 @@ from __future__ import annotations import math -from typing import List, Optional, Tuple, Union +from typing import Optional, Union import torch from botorch.utils.sampling import PolytopeSampler @@ -55,10 +55,10 @@ class LinearEllipticalSliceSampler(PolytopeSampler): def __init__( self, - inequality_constraints: Optional[Tuple[Tensor, Tensor]] = None, + inequality_constraints: Optional[tuple[Tensor, Tensor]] = None, bounds: Optional[Tensor] = None, interior_point: Optional[Tensor] = None, - fixed_indices: Optional[Union[List[int], Tensor]] = None, + fixed_indices: Optional[Union[list[int], Tensor]] = None, mean: Optional[Tensor] = None, covariance_matrix: Optional[Union[Tensor, LinearOperator]] = None, covariance_root: Optional[Union[Tensor, LinearOperator]] = None, @@ -184,11 +184,11 @@ def _fixed_features_initialization( A: Tensor, b: Tensor, interior_point: Optional[Tensor], - fixed_indices: Union[List[int], Tensor], + fixed_indices: Union[list[int], Tensor], mean: Optional[Tensor], covariance_matrix: Optional[Tensor], covariance_root: Optional[Tensor], - ) -> Tuple[Optional[Tensor], Optional[Tensor]]: + ) -> tuple[Optional[Tensor], Optional[Tensor]]: """Modifies the constraint system (A, b) due to fixed indices and assigns the modified constraints system to `self._Az`, `self._bz`. NOTE: Needs to be called prior to `self._standardization_initialization` in the constructor. @@ -339,7 +339,7 @@ def _get_cart_coords(self, nu: Tensor, theta: Tensor) -> Tensor: """ return self._z * torch.cos(theta) + nu * torch.sin(theta) - def _trim_intervals(self, left: Tensor, right: Tensor) -> Tuple[Tensor, Tensor]: + def _trim_intervals(self, left: Tensor, right: Tensor) -> tuple[Tensor, Tensor]: """Trim the intervals by a small positive constant. This encourages the Markov chain to stay in the interior of the domain. """ @@ -348,7 +348,7 @@ def _trim_intervals(self, left: Tensor, right: Tensor) -> Tuple[Tensor, Tensor]: return left + eps, right - eps - def _find_active_intersection_angles(self, nu: Tensor) -> Tuple[Tensor, Tensor]: + def _find_active_intersection_angles(self, nu: Tensor) -> tuple[Tensor, Tensor]: """Construct the active intersection angles. Args: @@ -375,7 +375,7 @@ def _find_active_intersection_angles(self, nu: Tensor) -> Tuple[Tensor, Tensor]: return cummax, srted - def _find_intersection_angles(self, nu: Tensor) -> Tuple[Tensor, Tensor]: + def _find_intersection_angles(self, nu: Tensor) -> tuple[Tensor, Tensor]: """Compute all 2 * m intersections of the ellipse and the domain, where `m = n_ineq_con` is the number of inequality constraints defining the domain. If the i-th linear inequality constraint has no intersection with the ellipse, @@ -493,8 +493,8 @@ def _unstandardize(self, z: Tensor) -> Tensor: def get_index_tensors( - fixed_indices: Union[List[int], Tensor], d: int -) -> Tuple[Tensor, Tensor]: + fixed_indices: Union[list[int], Tensor], d: int +) -> tuple[Tensor, Tensor]: """Converts `fixed_indices` to a `d`-dim integral Tensor that is True at indices that are contained in `fixed_indices` and False otherwise. diff --git a/botorch/utils/probability/linalg.py b/botorch/utils/probability/linalg.py index adfa7c367c..080cfec220 100644 --- a/botorch/utils/probability/linalg.py +++ b/botorch/utils/probability/linalg.py @@ -6,9 +6,11 @@ from __future__ import annotations +from collections.abc import Sequence + from dataclasses import dataclass, InitVar from itertools import chain -from typing import Any, Optional, Sequence +from typing import Any, Optional import torch from botorch.utils.probability.utils import swap_along_dim_ diff --git a/botorch/utils/probability/truncated_multivariate_normal.py b/botorch/utils/probability/truncated_multivariate_normal.py index 0bdb0ed2fd..5523e3d2c3 100644 --- a/botorch/utils/probability/truncated_multivariate_normal.py +++ b/botorch/utils/probability/truncated_multivariate_normal.py @@ -6,7 +6,9 @@ from __future__ import annotations -from typing import Optional, Sequence +from collections.abc import Sequence + +from typing import Optional import torch from botorch.utils.probability.lin_ess import LinearEllipticalSliceSampler diff --git a/botorch/utils/probability/unified_skew_normal.py b/botorch/utils/probability/unified_skew_normal.py index cdef4eb22c..b042cc5ec4 100644 --- a/botorch/utils/probability/unified_skew_normal.py +++ b/botorch/utils/probability/unified_skew_normal.py @@ -6,8 +6,10 @@ from __future__ import annotations +from collections.abc import Sequence + from inspect import getmembers -from typing import Optional, Sequence, Union +from typing import Optional, Union import torch from botorch.utils.probability.linalg import augment_cholesky, block_matrix_concat diff --git a/botorch/utils/probability/utils.py b/botorch/utils/probability/utils.py index 56dbedb1fe..ae63959b43 100644 --- a/botorch/utils/probability/utils.py +++ b/botorch/utils/probability/utils.py @@ -7,18 +7,19 @@ from __future__ import annotations import math +from collections.abc import Iterable, Iterator from functools import lru_cache from math import pi from numbers import Number -from typing import Any, Callable, Iterable, Iterator, Optional, Tuple, Union +from typing import Any, Callable, Optional, Union import torch from botorch.utils.safe_math import logdiffexp from numpy.polynomial.legendre import leggauss as numpy_leggauss from torch import BoolTensor, LongTensor, Tensor -CaseNd = Tuple[Callable[[], BoolTensor], Callable[[BoolTensor], Tensor]] +CaseNd = tuple[Callable[[], BoolTensor], Callable[[BoolTensor], Tensor]] _log_2 = math.log(2) _sqrt_pi = math.sqrt(pi) @@ -27,7 +28,7 @@ _inv_sqrt_2 = 1 / math.sqrt(2) _neg_inv_sqrt_2 = -_inv_sqrt_2 _log_sqrt_2pi = math.log(2 * pi) / 2 -STANDARDIZED_RANGE: Tuple[float, float] = (-1e6, 1e6) +STANDARDIZED_RANGE: tuple[float, float] = (-1e6, 1e6) _log_two_inv_sqrt_2pi = _log_2 - _log_sqrt_2pi # = log(2 / sqrt(2 * pi)) @@ -82,7 +83,7 @@ def get_constants( values: Union[Number, Iterator[Number]], device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, -) -> Union[Tensor, Tuple[Tensor, ...]]: +) -> Union[Tensor, tuple[Tensor, ...]]: r"""Returns scalar-valued Tensors containing each of the given constants. Used to expedite tensor operations involving scalar arithmetic. Note that the returned Tensors should not be modified in-place.""" @@ -124,7 +125,7 @@ def build_positional_indices( @lru_cache(maxsize=None) -def leggauss(deg: int, **tkwargs: Any) -> Tuple[Tensor, Tensor]: +def leggauss(deg: int, **tkwargs: Any) -> tuple[Tensor, Tensor]: x, w = numpy_leggauss(deg) return torch.as_tensor(x, **tkwargs), torch.as_tensor(w, **tkwargs) diff --git a/botorch/utils/safe_math.py b/botorch/utils/safe_math.py index 4ec2892e90..8b4ce7b957 100644 --- a/botorch/utils/safe_math.py +++ b/botorch/utils/safe_math.py @@ -17,7 +17,7 @@ import math -from typing import Callable, Tuple, Union +from typing import Callable, Union import torch from botorch.exceptions import UnsupportedError @@ -122,7 +122,7 @@ def logdiffexp(log_a: Tensor, log_b: Tensor) -> Tensor: def logsumexp( - x: Tensor, dim: Union[int, Tuple[int, ...]], keepdim: bool = False + x: Tensor, dim: Union[int, tuple[int, ...]], keepdim: bool = False ) -> Tensor: """Version of logsumexp that has a well-behaved backward pass when x contains infinities. @@ -149,7 +149,7 @@ def logsumexp( def _inf_max_helper( max_fun: Callable[[Tensor], Tensor], x: Tensor, - dim: Union[int, Tuple[int, ...]], + dim: Union[int, tuple[int, ...]], keepdim: bool, ) -> Tensor: """Helper function that generalizes the treatment of infinities for approximations @@ -187,7 +187,7 @@ def _inf_max_helper( return res if keepdim else res.sum(dim=dim) -def _any(x: Tensor, dim: Union[int, Tuple[int, ...]], keepdim: bool = False) -> Tensor: +def _any(x: Tensor, dim: Union[int, tuple[int, ...]], keepdim: bool = False) -> Tensor: """Extension of torch.any, which supports reducing over tuples of dimensions. Args: @@ -198,7 +198,7 @@ def _any(x: Tensor, dim: Union[int, Tuple[int, ...]], keepdim: bool = False) -> Returns: The Tensor corresponding to `any` over the specified dimensions. """ - if isinstance(dim, Tuple): + if isinstance(dim, tuple): for d in dim: x = x.any(dim=d, keepdim=True) else: @@ -207,7 +207,7 @@ def _any(x: Tensor, dim: Union[int, Tuple[int, ...]], keepdim: bool = False) -> def logmeanexp( - X: Tensor, dim: Union[int, Tuple[int, ...]], keepdim: bool = False + X: Tensor, dim: Union[int, tuple[int, ...]], keepdim: bool = False ) -> Tensor: """Computes `log(mean(exp(X), dim=dim, keepdim=keepdim))`. @@ -249,7 +249,7 @@ def log_softplus(x: Tensor, tau: Union[float, Tensor] = TAU) -> Tensor: def smooth_amax( X: Tensor, - dim: Union[int, Tuple[int, ...]] = -1, + dim: Union[int, tuple[int, ...]] = -1, keepdim: bool = False, tau: Union[float, Tensor] = 1.0, ) -> Tensor: @@ -275,7 +275,7 @@ def smooth_amax( def smooth_amin( X: Tensor, - dim: Union[int, Tuple[int, ...]] = -1, + dim: Union[int, tuple[int, ...]] = -1, keepdim: bool = False, tau: Union[float, Tensor] = 1.0, ) -> Tensor: @@ -322,7 +322,7 @@ def _fatplus(x: Tensor) -> Tensor: def fatmax( x: Tensor, - dim: Union[int, Tuple[int, ...]], + dim: Union[int, tuple[int, ...]], keepdim: bool = False, tau: Union[float, Tensor] = TAU, alpha: float = ALPHA, @@ -345,7 +345,7 @@ def fatmax( """ def max_fun( - x: Tensor, dim: Union[int, Tuple[int, ...]], keepdim: bool = False + x: Tensor, dim: Union[int, tuple[int, ...]], keepdim: bool = False ) -> Tensor: return tau * _pareto(-x / tau, alpha=alpha).sum(dim=dim, keepdim=keepdim).log() @@ -354,7 +354,7 @@ def max_fun( def fatmin( x: Tensor, - dim: Union[int, Tuple[int, ...]], + dim: Union[int, tuple[int, ...]], keepdim: bool = False, tau: Union[float, Tensor] = TAU, alpha: float = ALPHA, diff --git a/botorch/utils/sampling.py b/botorch/utils/sampling.py index 21730874cf..cb7c6d3ad5 100644 --- a/botorch/utils/sampling.py +++ b/botorch/utils/sampling.py @@ -19,8 +19,9 @@ import warnings from abc import ABC, abstractmethod +from collections.abc import Generator, Iterable from contextlib import contextmanager -from typing import Any, Generator, Iterable, List, Optional, Tuple, TYPE_CHECKING, Union +from typing import Any, Optional, TYPE_CHECKING, Union import numpy as np import scipy @@ -352,7 +353,7 @@ def batched_multinomial( return flat_samples.view(*batch_shape, num_samples) -def _convert_bounds_to_inequality_constraints(bounds: Tensor) -> Tuple[Tensor, Tensor]: +def _convert_bounds_to_inequality_constraints(bounds: Tensor) -> tuple[Tensor, Tensor]: r"""Convert bounds into inequality constraints of the form Ax <= b. Args: @@ -458,8 +459,8 @@ class PolytopeSampler(ABC): def __init__( self, - inequality_constraints: Optional[Tuple[Tensor, Tensor]] = None, - equality_constraints: Optional[Tuple[Tensor, Tensor]] = None, + inequality_constraints: Optional[tuple[Tensor, Tensor]] = None, + equality_constraints: Optional[tuple[Tensor, Tensor]] = None, bounds: Optional[Tensor] = None, interior_point: Optional[Tensor] = None, ) -> None: @@ -582,8 +583,8 @@ class HitAndRunPolytopeSampler(PolytopeSampler): def __init__( self, - inequality_constraints: Optional[Tuple[Tensor, Tensor]] = None, - equality_constraints: Optional[Tuple[Tensor, Tensor]] = None, + inequality_constraints: Optional[tuple[Tensor, Tensor]] = None, + equality_constraints: Optional[tuple[Tensor, Tensor]] = None, bounds: Optional[Tensor] = None, interior_point: Optional[Tensor] = None, n_burnin: int = 200, @@ -725,8 +726,8 @@ class DelaunayPolytopeSampler(PolytopeSampler): def __init__( self, - inequality_constraints: Optional[Tuple[Tensor, Tensor]] = None, - equality_constraints: Optional[Tuple[Tensor, Tensor]] = None, + inequality_constraints: Optional[tuple[Tensor, Tensor]] = None, + equality_constraints: Optional[tuple[Tensor, Tensor]] = None, bounds: Optional[Tensor] = None, interior_point: Optional[Tensor] = None, ) -> None: @@ -825,8 +826,8 @@ def draw(self, n: int = 1, seed: Optional[int] = None) -> Tensor: def normalize_sparse_linear_constraints( - bounds: Tensor, constraints: List[Tuple[Tensor, Tensor, float]] -) -> List[Tuple[Tensor, Tensor, float]]: + bounds: Tensor, constraints: list[tuple[Tensor, Tensor, float]] +) -> list[tuple[Tensor, Tensor, float]]: r"""Normalize sparse linear constraints to the unit cube. Args: @@ -849,8 +850,8 @@ def normalize_sparse_linear_constraints( def normalize_dense_linear_constraints( bounds: Tensor, - constraints: Tuple[Tensor, Tensor], -) -> Tuple[Tensor, Tensor]: + constraints: tuple[Tensor, Tensor], +) -> tuple[Tensor, Tensor]: r"""Normalize dense linear constraints to the unit cube. Args: @@ -873,8 +874,8 @@ def normalize_dense_linear_constraints( def get_polytope_samples( n: int, bounds: Tensor, - inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None, - equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None, + inequality_constraints: Optional[list[tuple[Tensor, Tensor, float]]] = None, + equality_constraints: Optional[list[tuple[Tensor, Tensor, float]]] = None, seed: Optional[int] = None, n_burnin: int = 10_000, n_thinning: int = 32, @@ -940,8 +941,8 @@ def get_polytope_samples( def sparse_to_dense_constraints( d: int, - constraints: List[Tuple[Tensor, Tensor, float]], -) -> Tuple[Tensor, Tensor]: + constraints: list[tuple[Tensor, Tensor, float]], +) -> tuple[Tensor, Tensor]: r"""Convert parameter constraints from a sparse format into a dense format. This method converts sparse triples of the form (indices, coefficients, rhs) @@ -976,7 +977,7 @@ def optimize_posterior_samples( num_restarts: int = 20, maximize: bool = True, **kwargs: Any, -) -> Tuple[Tensor, Tensor]: +) -> tuple[Tensor, Tensor]: r"""Cheaply maximizes posterior samples by random querying followed by vanilla gradient descent on the best num_restarts points. diff --git a/botorch/utils/test_helpers.py b/botorch/utils/test_helpers.py index 060b56386e..4cb8e460e7 100644 --- a/botorch/utils/test_helpers.py +++ b/botorch/utils/test_helpers.py @@ -12,7 +12,7 @@ from __future__ import annotations import math -from typing import List, Optional, Tuple +from typing import Optional import torch from botorch.acquisition.objective import PosteriorTransform @@ -36,7 +36,7 @@ from torch.nn.functional import pad -def get_sample_moments(samples: Tensor, sample_shape: Size) -> Tuple[Tensor, Tensor]: +def get_sample_moments(samples: Tensor, sample_shape: Size) -> tuple[Tensor, Tensor]: """Computes the mean and covariance of a set of samples. Args: @@ -55,7 +55,7 @@ def standardize_moments( transform: Standardize, loc: Tensor, covariance_matrix: Tensor, -) -> Tuple[Tensor, Tensor]: +) -> tuple[Tensor, Tensor]: """Standardizes the loc and covariance_matrix using the mean and standard deviations from a Standardize transform. """ @@ -68,10 +68,10 @@ def standardize_moments( def gen_multi_task_dataset( yvar: Optional[float] = None, - task_values: Optional[List[int]] = None, + task_values: Optional[list[int]] = None, skip_task_features_in_datasets: bool = False, **tkwargs, -) -> Tuple[MultiTaskDataset, Tuple[Tensor, Tensor, Optional[Tensor]]]: +) -> tuple[MultiTaskDataset, tuple[Tensor, Tensor, Optional[Tensor]]]: """Constructs a multi-task dataset with two tasks, each with 10 data points. Args: diff --git a/botorch/utils/testing.py b/botorch/utils/testing.py index 37cfc03867..bcb41462b6 100644 --- a/botorch/utils/testing.py +++ b/botorch/utils/testing.py @@ -10,7 +10,8 @@ import warnings from abc import abstractproperty from collections import OrderedDict -from typing import Any, List, Optional, Sequence, Tuple +from collections.abc import Sequence +from typing import Any, Optional from unittest import mock, TestCase import torch @@ -285,7 +286,7 @@ def base_sample_shape(self) -> torch.Size: return torch.Size() @property - def batch_range(self) -> Tuple[int, int]: + def batch_range(self) -> tuple[int, int]: return self._batch_range @property @@ -337,7 +338,7 @@ def __init__(self, posterior: MockPosterior) -> None: # noqa: D107 def posterior( self, X: Tensor, - output_indices: Optional[List[int]] = None, + output_indices: Optional[list[int]] = None, posterior_transform: Optional[PosteriorTransform] = None, observation_noise: bool = False, ) -> MockPosterior: @@ -381,7 +382,7 @@ def set_X_pending(self, X_pending: Optional[Tensor] = None): def _get_random_data( batch_shape: torch.Size, m: int, d: int = 1, n: int = 10, **tkwargs -) -> Tuple[Tensor, Tensor]: +) -> tuple[Tensor, Tensor]: r"""Generate random data for testing purposes. Args: @@ -473,7 +474,7 @@ def _get_max_violation_of_bounds(samples: torch.Tensor, bounds: torch.Tensor) -> def _get_max_violation_of_constraints( samples: torch.Tensor, - constraints: Optional[List[Tuple[Tensor, Tensor, float]]], + constraints: Optional[list[tuple[Tensor, Tensor, float]]], equality: bool, ) -> float: r""" diff --git a/botorch/utils/transforms.py b/botorch/utils/transforms.py index 3006f75106..6770db9bd8 100644 --- a/botorch/utils/transforms.py +++ b/botorch/utils/transforms.py @@ -12,7 +12,7 @@ import warnings from functools import wraps -from typing import Any, Callable, List, Optional, TYPE_CHECKING +from typing import Any, Callable, Optional, TYPE_CHECKING import torch from botorch.utils.safe_math import logmeanexp @@ -119,7 +119,7 @@ def unnormalize(X: Tensor, bounds: Tensor) -> Tensor: return X * (bounds[1] - bounds[0]) + bounds[0] -def normalize_indices(indices: Optional[List[int]], d: int) -> Optional[List[int]]: +def normalize_indices(indices: Optional[list[int]], d: int) -> Optional[list[int]]: r"""Normalize a list of indices to ensure that they are positive. Args: diff --git a/test/acquisition/multi_objective/test_monte_carlo.py b/test/acquisition/multi_objective/test_monte_carlo.py index 91ef0ddf25..d790dd4781 100644 --- a/test/acquisition/multi_objective/test_monte_carlo.py +++ b/test/acquisition/multi_objective/test_monte_carlo.py @@ -8,7 +8,7 @@ from copy import deepcopy from itertools import product from math import pi -from typing import Any, Dict, Optional, Type +from typing import Any, Optional from unittest import mock from warnings import catch_warnings, simplefilter @@ -159,9 +159,9 @@ def test_fat_q_log_expected_hypervolume_improvement(self): def _test_q_expected_hypervolume_improvement( self, - acqf_class: Type[AcquisitionFunction], + acqf_class: type[AcquisitionFunction], dtype: torch.dtype, - acqf_kwargs: Optional[Dict[str, Any]] = None, + acqf_kwargs: Optional[dict[str, Any]] = None, ): if acqf_kwargs is None: acqf_kwargs = {} @@ -607,9 +607,9 @@ def test_fat_constrained_q_log_expected_hypervolume_improvement(self): def _test_constrained_q_expected_hypervolume_improvement( self, - acqf_class: Type[AcquisitionFunction], + acqf_class: type[AcquisitionFunction], dtype: torch.dtype, - acqf_kwargs: Optional[Dict[str, Any]] = None, + acqf_kwargs: Optional[dict[str, Any]] = None, ): if acqf_kwargs is None: acqf_kwargs = {} @@ -758,7 +758,7 @@ def test_q_log_noisy_expected_hypervolume_improvement(self): ) def _test_q_noisy_expected_hypervolume_improvement_m1( - self, acqf_class: Type[AcquisitionFunction], dtype: torch.dtype + self, acqf_class: type[AcquisitionFunction], dtype: torch.dtype ): # special case test for m = 1. ( @@ -785,7 +785,7 @@ def _test_q_noisy_expected_hypervolume_improvement_m1( ) def _test_q_noisy_expected_hypervolume_improvement( - self, acqf_class: Type[AcquisitionFunction], dtype: torch.dtype, m: int + self, acqf_class: type[AcquisitionFunction], dtype: torch.dtype, m: int ) -> None: self._test_qnehvi_base(acqf_class, dtype, m) # test with and without cached box decomposition (CBD) @@ -811,7 +811,7 @@ def _setup_qnehvi_test(self, dtype: torch.dtype, m: int) -> None: return ref_point, X, X_baseline, mm, sampler, samples, baseline_samples, tkwargs def _test_qnehvi_base( - self, acqf_class: Type[AcquisitionFunction], dtype: torch.dtype, m: int + self, acqf_class: type[AcquisitionFunction], dtype: torch.dtype, m: int ) -> None: ( ref_point, @@ -996,7 +996,7 @@ def _test_qnehvi_base( self.assertEqual(list(b.shape), [1, 1, m]) def _test_qnehvi_with_CBD( - self, acqf_class: Type[AcquisitionFunction], dtype: torch.dtype, m: int + self, acqf_class: type[AcquisitionFunction], dtype: torch.dtype, m: int ) -> None: ( ref_point, @@ -1223,7 +1223,7 @@ def _test_qnehvi_with_CBD( self.assertTrue(torch.equal(acqf_pareto_Y[-2:], expected_new_Y2)) def _test_qnehvi_without_CBD( - self, acqf_class: Type[AcquisitionFunction], dtype: torch.dtype, m: int + self, acqf_class: type[AcquisitionFunction], dtype: torch.dtype, m: int ) -> None: tkwargs = {"device": self.device} tkwargs["dtype"] = dtype @@ -1355,7 +1355,7 @@ def test_constrained_q_log_noisy_expected_hypervolume_improvement(self) -> None: ) def _test_constrained_q_noisy_expected_hypervolume_improvement( - self, acqf_class: Type[AcquisitionFunction], dtype: torch.dtype, fat: bool + self, acqf_class: type[AcquisitionFunction], dtype: torch.dtype, fat: bool ): # TODO: improve tests with constraints tkwargs = {"device": self.device, "dtype": dtype} @@ -1601,7 +1601,7 @@ def test_prune_baseline(self): simplefilter("ignore", category=NumericsWarning) self._test_prune_baseline(acqf_class) - def _test_prune_baseline(self, acqf_class: Type[AcquisitionFunction]): + def _test_prune_baseline(self, acqf_class: type[AcquisitionFunction]): # test prune_baseline no = "botorch.utils.testing.MockModel.num_outputs" prune = ( @@ -1644,7 +1644,7 @@ def test_cache_root(self): simplefilter("ignore", category=NumericsWarning) self._test_cache_root(acqf_class) - def _test_cache_root(self, acqf_class: Type[AcquisitionFunction]): + def _test_cache_root(self, acqf_class: type[AcquisitionFunction]): sample_cached_path = ( "botorch.acquisition.cached_cholesky.sample_cached_cholesky" ) @@ -1798,7 +1798,7 @@ def test_with_set_valued_objectives(self): self._test_with_set_valued_objectives(acqf_class) def _test_with_set_valued_objectives( - self, acqf_class: Type[AcquisitionFunction] + self, acqf_class: type[AcquisitionFunction] ) -> None: for dtype in (torch.float, torch.double): tkwargs = {"device": self.device, "dtype": dtype} @@ -1859,7 +1859,7 @@ def test_deterministic(self): simplefilter("ignore", category=NumericsWarning) self._test_deterministic(acqf_class) - def _test_deterministic(self, acqf_class: Type[AcquisitionFunction]): + def _test_deterministic(self, acqf_class: type[AcquisitionFunction]): for dtype, prune in ((torch.float, False), (torch.double, True)): tkwargs = {"device": self.device, "dtype": dtype} model = GenericDeterministicModel(f=lambda x: x, num_outputs=2) @@ -1889,7 +1889,7 @@ def test_with_multitask(self): simplefilter("ignore", category=NumericsWarning) self._test_with_multitask(acqf_class) - def _test_with_multitask(self, acqf_class: Type[AcquisitionFunction]): + def _test_with_multitask(self, acqf_class: type[AcquisitionFunction]): # Verify that _set_sampler works with MTGP, KroneckerMTGP and HOGP. torch.manual_seed(1234) tkwargs = {"device": self.device, "dtype": torch.double} diff --git a/test/acquisition/multi_objective/test_parego.py b/test/acquisition/multi_objective/test_parego.py index 4ca92209f0..2624a456ef 100644 --- a/test/acquisition/multi_objective/test_parego.py +++ b/test/acquisition/multi_objective/test_parego.py @@ -3,7 +3,7 @@ # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. -from typing import Any, Dict, Optional +from typing import Any, Optional import torch from botorch.acquisition.logei import qLogNoisyExpectedImprovement @@ -29,7 +29,7 @@ def base_test_parego( ) -> None: if with_constraints: assert with_objective, "Objective must be specified if constraints are." - tkwargs: Dict[str, Any] = {"device": self.device, "dtype": torch.double} + tkwargs: dict[str, Any] = {"device": self.device, "dtype": torch.double} num_objectives = 2 num_constraints = 1 if with_constraints else 0 num_outputs = num_objectives + num_constraints @@ -103,7 +103,7 @@ def test_parego_with_constraints_objective_weights(self) -> None: ) def test_parego_with_ensemble_model(self) -> None: - tkwargs: Dict[str, Any] = {"device": self.device, "dtype": torch.double} + tkwargs: dict[str, Any] = {"device": self.device, "dtype": torch.double} models = [] for _ in range(2): model = SaasFullyBayesianSingleTaskGP( diff --git a/test/acquisition/test_input_constructors.py b/test/acquisition/test_input_constructors.py index 849c2f20e2..54da0d80d9 100644 --- a/test/acquisition/test_input_constructors.py +++ b/test/acquisition/test_input_constructors.py @@ -13,7 +13,7 @@ import math from functools import reduce -from typing import Callable, Type +from typing import Callable from unittest import mock from unittest.mock import MagicMock @@ -1043,7 +1043,7 @@ def test_construct_inputs_qNEHVI(self) -> None: def test_construct_inputs_qLogNEHVI(self) -> None: self._test_construct_inputs_qNEHVI(qLogNoisyExpectedHypervolumeImprovement) - def _test_construct_inputs_qNEHVI(self, acqf_class: Type[AcquisitionFunction]): + def _test_construct_inputs_qNEHVI(self, acqf_class: type[AcquisitionFunction]): c = get_acqf_input_constructor(acqf_class) objective_thresholds = torch.rand(2) diff --git a/test/acquisition/test_integration.py b/test/acquisition/test_integration.py index 2aa77c935f..bdfcad172d 100644 --- a/test/acquisition/test_integration.py +++ b/test/acquisition/test_integration.py @@ -5,7 +5,6 @@ # LICENSE file in the root directory of this source tree. from itertools import product -from typing import Dict from warnings import catch_warnings, simplefilter import torch @@ -36,7 +35,7 @@ def setUp(self) -> None: self.d = 2 self.tkwargs = {"device": self.device, "dtype": torch.double} - def _get_acqf_inputs(self, train_batch_shape: torch.Size, m: int) -> Dict: + def _get_acqf_inputs(self, train_batch_shape: torch.Size, m: int) -> dict: train_x = torch.rand((*train_batch_shape, 5, self.d), **self.tkwargs) y = torch.rand((*train_batch_shape, 5, m), **self.tkwargs) diff --git a/test/acquisition/test_proximal.py b/test/acquisition/test_proximal.py index 69926dc897..1a7a0ce0ff 100644 --- a/test/acquisition/test_proximal.py +++ b/test/acquisition/test_proximal.py @@ -4,7 +4,6 @@ # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. -from typing import List import torch from botorch.acquisition import LinearMCObjective, ScalarizedPosteriorTransform @@ -27,7 +26,7 @@ class DummyModel(GPyTorchModel): def __init__(self): # noqa: D107 super(GPyTorchModel, self).__init__() - def subset_output(self, idcs: List[int]) -> Model: + def subset_output(self, idcs: list[int]) -> Model: pass diff --git a/test/models/test_contextual.py b/test/models/test_contextual.py index fe61e7e3b5..4f7bf643b7 100644 --- a/test/models/test_contextual.py +++ b/test/models/test_contextual.py @@ -5,8 +5,6 @@ # LICENSE file in the root directory of this source tree. -from typing import Dict, Tuple - import torch from botorch.fit import fit_gpytorch_mll from botorch.models.contextual import LCEAGP, SACGP @@ -28,7 +26,7 @@ def _gen_datasets( infer_noise: bool = False, **tkwargs, -) -> Tuple[Dict[int, SupervisedDataset], Tuple[Tensor, Tensor, Tensor]]: +) -> tuple[dict[int, SupervisedDataset], tuple[Tensor, Tensor, Tensor]]: train_X = torch.tensor( [[0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0], [2.0, 2.0, 2.0, 2.0]], **tkwargs ) diff --git a/test/models/test_fully_bayesian_multitask.py b/test/models/test_fully_bayesian_multitask.py index f8bd9132e7..9cd9e33009 100644 --- a/test/models/test_fully_bayesian_multitask.py +++ b/test/models/test_fully_bayesian_multitask.py @@ -6,7 +6,7 @@ import itertools -from typing import List, Optional +from typing import Optional import torch from botorch import fit_fully_bayesian_model_nuts @@ -73,7 +73,7 @@ class TestFullyBayesianMultiTaskGP(BotorchTestCase): def _get_data_and_model( self, task_rank: Optional[int] = None, - output_tasks: Optional[List[int]] = None, + output_tasks: Optional[list[int]] = None, infer_noise: bool = False, **tkwargs ): diff --git a/test/models/test_gp_regression_fidelity.py b/test/models/test_gp_regression_fidelity.py index dbf60b37fb..c44c403757 100644 --- a/test/models/test_gp_regression_fidelity.py +++ b/test/models/test_gp_regression_fidelity.py @@ -6,7 +6,6 @@ import itertools import warnings -from typing import Tuple import torch from botorch.exceptions.errors import UnsupportedError @@ -30,7 +29,7 @@ def _get_random_data_with_fidelity( batch_shape: torch.Size, m: int, n_fidelity: int, d: int = 1, n: int = 10, **tkwargs -) -> Tuple[Tensor, Tensor]: +) -> tuple[Tensor, Tensor]: r"""Construct test data. For this test, by convention the trailing dimensions are the fidelity dimensions """ diff --git a/test/models/test_multitask.py b/test/models/test_multitask.py index 639fca2e2c..45218d89a3 100644 --- a/test/models/test_multitask.py +++ b/test/models/test_multitask.py @@ -7,7 +7,7 @@ import itertools import math import warnings -from typing import Any, Dict, List, Optional +from typing import Any, Optional import torch from botorch.acquisition.objective import ScalarizedPosteriorTransform @@ -49,8 +49,8 @@ def _gen_model_and_data( fixed_noise: bool, task_feature: int = 0, - output_tasks: Optional[List[int]] = None, - task_values: Optional[List[int]] = None, + output_tasks: Optional[list[int]] = None, + task_values: Optional[list[int]] = None, skip_task_features_in_datasets: bool = False, input_transform: Optional[InputTransform] = None, outcome_transform: Optional[OutcomeTransform] = None, @@ -140,7 +140,7 @@ def test_MultiTaskGP(self) -> None: (False, True), strict=True, ): - tkwargs: Dict[str, Any] = {"device": self.device, "dtype": dtype} + tkwargs: dict[str, Any] = {"device": self.device, "dtype": dtype} octf = Standardize(m=1) if use_octf else None intf = ( @@ -328,7 +328,7 @@ def test_MultiTaskGP(self) -> None: def test_MultiTaskGP_single_output(self) -> None: for dtype in (torch.float, torch.double): - tkwargs: Dict[str, Any] = {"device": self.device, "dtype": dtype} + tkwargs: dict[str, Any] = {"device": self.device, "dtype": dtype} model = _gen_model_single_output(**tkwargs) self.assertIsInstance(model, MultiTaskGP) self.assertEqual(model.num_outputs, 1) @@ -442,7 +442,7 @@ def test_MultiTaskGP_construct_inputs(self) -> None: for dtype, fixed_noise, skip_task_features_in_datasets in zip( (torch.float, torch.double), (True, False), (True, False), strict=True ): - tkwargs: Dict[str, Any] = {"device": self.device, "dtype": dtype} + tkwargs: dict[str, Any] = {"device": self.device, "dtype": dtype} task_feature = 0 model, datasets, (train_X, train_Y, train_Yvar) = _gen_model_and_data( fixed_noise=fixed_noise, @@ -511,7 +511,7 @@ def test_KroneckerMultiTaskGP_default(self) -> None: (False, True), (False, True), ): - tkwargs: Dict[str, Any] = {"device": self.device, "dtype": dtype} + tkwargs: dict[str, Any] = {"device": self.device, "dtype": dtype} octf = Standardize(m=2) if use_octf else None diff --git a/test/models/test_pairwise_gp.py b/test/models/test_pairwise_gp.py index 3f1f2deed0..34b261f606 100644 --- a/test/models/test_pairwise_gp.py +++ b/test/models/test_pairwise_gp.py @@ -7,7 +7,7 @@ import itertools import random import warnings -from typing import Dict, Tuple, Union +from typing import Union import torch from botorch.acquisition.objective import ScalarizedPosteriorTransform @@ -49,7 +49,7 @@ def _make_rand_mini_data( self, batch_shape, X_dim=2, - ) -> Tuple[Tensor, Tensor]: + ) -> tuple[Tensor, Tensor]: train_X = torch.rand( *batch_shape, 2, X_dim, device=self.device, dtype=self.dtype ) @@ -63,7 +63,7 @@ def _get_model_and_data( batch_shape, X_dim=2, likelihood_cls=None, - ) -> Tuple[Model, Dict[str, Union[Tensor, PairwiseLikelihood]]]: + ) -> tuple[Model, dict[str, Union[Tensor, PairwiseLikelihood]]]: train_X, train_comp = self._make_rand_mini_data( batch_shape=batch_shape, X_dim=X_dim, diff --git a/test/optim/closures/test_core.py b/test/optim/closures/test_core.py index 6ce4f78a58..2f7283b231 100644 --- a/test/optim/closures/test_core.py +++ b/test/optim/closures/test_core.py @@ -6,7 +6,6 @@ from contextlib import nullcontext from functools import partial -from typing import Dict from unittest.mock import MagicMock import numpy as np @@ -36,7 +35,7 @@ def forward(self) -> torch.Tensor: return self.w * self.x + self.b @property - def free_parameters(self) -> Dict[str, torch.Tensor]: + def free_parameters(self) -> dict[str, torch.Tensor]: return {n: p for n, p in self.named_parameters() if p.requires_grad} diff --git a/test/optim/test_core.py b/test/optim/test_core.py index 2dcdfa5b61..4288980959 100644 --- a/test/optim/test_core.py +++ b/test/optim/test_core.py @@ -6,7 +6,6 @@ import time from functools import partial -from typing import Dict from unittest.mock import MagicMock, patch import torch @@ -43,7 +42,7 @@ def forward(self) -> Tensor: return (self.x - self.b).square().sum() @property - def free_parameters(self) -> Dict[str, Tensor]: + def free_parameters(self) -> dict[str, Tensor]: return {n: p for n, p in self.named_parameters() if p.requires_grad} diff --git a/test/optim/utils/test_model_utils.py b/test/optim/utils/test_model_utils.py index a0ab24c222..a7e684e465 100644 --- a/test/optim/utils/test_model_utils.py +++ b/test/optim/utils/test_model_utils.py @@ -11,7 +11,7 @@ import warnings from copy import deepcopy from string import ascii_lowercase -from typing import Any, Dict +from typing import Any from unittest.mock import MagicMock, patch import torch @@ -264,7 +264,7 @@ def test_sample_all_priors(self): sample_all_priors(model) def test_univariate_prior(self) -> None: - tkwargs: Dict[str, Any] = {"device": self.device, "dtype": torch.double} + tkwargs: dict[str, Any] = {"device": self.device, "dtype": torch.double} for batch in (torch.Size([]), torch.Size([2, 2])): model = SingleTaskGP( train_X=torch.rand(*batch, 5, 3, **tkwargs), @@ -285,7 +285,7 @@ def test_univariate_prior(self) -> None: def test_with_multivariate_prior(self) -> None: # This is modified from https://github.com/pytorch/botorch/issues/780. - tkwargs: Dict[str, Any] = {"device": self.device, "dtype": torch.double} + tkwargs: dict[str, Any] = {"device": self.device, "dtype": torch.double} for batch in (torch.Size([]), torch.Size([3])): model = SingleTaskGP( train_X=torch.rand(*batch, 2, 2, **tkwargs), diff --git a/test/sampling/pathwise/test_posterior_samplers.py b/test/sampling/pathwise/test_posterior_samplers.py index 5a57f34d14..f0ff1a79ed 100644 --- a/test/sampling/pathwise/test_posterior_samplers.py +++ b/test/sampling/pathwise/test_posterior_samplers.py @@ -7,7 +7,7 @@ from __future__ import annotations from copy import deepcopy -from typing import Any, Dict +from typing import Any import torch from botorch.exceptions.errors import UnsupportedError @@ -28,7 +28,7 @@ class TestPosteriorSamplers(BotorchTestCase): def setUp(self, suppress_input_warnings: bool = True) -> None: super().setUp(suppress_input_warnings=suppress_input_warnings) - tkwargs: Dict[str, Any] = {"device": self.device, "dtype": torch.float64} + tkwargs: dict[str, Any] = {"device": self.device, "dtype": torch.float64} torch.manual_seed(0) base = MaternKernel(nu=2.5, ard_num_dims=2, batch_shape=Size([])) diff --git a/test/test_fit.py b/test/test_fit.py index 4541abe315..62e18ccea6 100644 --- a/test/test_fit.py +++ b/test/test_fit.py @@ -5,10 +5,11 @@ # LICENSE file in the root directory of this source tree. import math +from collections.abc import Iterable from contextlib import ExitStack, nullcontext from copy import deepcopy from itertools import filterfalse, product -from typing import Callable, Iterable, Optional +from typing import Callable, Optional from unittest.mock import MagicMock, patch from warnings import catch_warnings, warn, WarningMessage diff --git a/test/test_functions/test_multi_objective.py b/test/test_functions/test_multi_objective.py index 2c943cc1c0..04e70472b6 100644 --- a/test/test_functions/test_multi_objective.py +++ b/test/test_functions/test_multi_objective.py @@ -5,7 +5,6 @@ # LICENSE file in the root directory of this source tree. import math -from typing import List import torch from botorch.exceptions.errors import InputDataError, UnsupportedError @@ -87,7 +86,7 @@ class TestBraninCurrin( MultiObjectiveTestProblemTestCaseMixin, ): @property - def functions(self) -> List[BaseTestProblem]: + def functions(self) -> list[BaseTestProblem]: return [BraninCurrin()] def test_init(self): @@ -118,7 +117,7 @@ def setUp(self, suppress_input_warnings: bool = True) -> None: ] @property - def functions(self) -> List[BaseTestProblem]: + def functions(self) -> list[BaseTestProblem]: return [DH1(dim=2), DH2(dim=3), DH3(dim=4), DH4(dim=5)] def test_init(self): @@ -151,7 +150,7 @@ class TestDTLZ( MultiObjectiveTestProblemTestCaseMixin, ): @property - def functions(self) -> List[BaseTestProblem]: + def functions(self) -> list[BaseTestProblem]: return [ DTLZ1(dim=5, num_objectives=2), DTLZ2(dim=5, num_objectives=2), @@ -220,7 +219,7 @@ class TestGMM( MultiObjectiveTestProblemTestCaseMixin, ): @property - def functions(self) -> List[BaseTestProblem]: + def functions(self) -> list[BaseTestProblem]: return [ GMM(num_objectives=4), GMM(num_objectives=4, noise_std=[0.0, 0.1, 0.2, 0.3]), @@ -276,7 +275,7 @@ class TestMW7( ConstrainedTestProblemTestCaseMixin, ): @property - def functions(self) -> List[BaseTestProblem]: + def functions(self) -> list[BaseTestProblem]: return [ MW7(dim=3), MW7(dim=3, noise_std=[0.1, 0.2]), @@ -297,7 +296,7 @@ class TestZDT( MultiObjectiveTestProblemTestCaseMixin, ): @property - def functions(self) -> List[BaseTestProblem]: + def functions(self) -> list[BaseTestProblem]: return [ ZDT1(dim=3, num_objectives=2), ZDT2(dim=3, num_objectives=2), @@ -375,7 +374,7 @@ class TestCarSideImpact( MultiObjectiveTestProblemTestCaseMixin, ): @property - def functions(self) -> List[BaseTestProblem]: + def functions(self) -> list[BaseTestProblem]: return [CarSideImpact(), CarSideImpact(noise_std=[0.1, 0.2, 0.3, 0.4])] @@ -385,7 +384,7 @@ class TestPenicillin( MultiObjectiveTestProblemTestCaseMixin, ): @property - def functions(self) -> List[BaseTestProblem]: + def functions(self) -> list[BaseTestProblem]: return [Penicillin(), Penicillin(noise_std=[0.1, 0.2, 0.3])] @@ -395,7 +394,7 @@ class TestToyRobust( MultiObjectiveTestProblemTestCaseMixin, ): @property - def functions(self) -> List[BaseTestProblem]: + def functions(self) -> list[BaseTestProblem]: return [ToyRobust(), ToyRobust(noise_std=[0.1, 0.2])] @@ -405,7 +404,7 @@ class TestVehicleSafety( MultiObjectiveTestProblemTestCaseMixin, ): @property - def functions(self) -> List[BaseTestProblem]: + def functions(self) -> list[BaseTestProblem]: return [VehicleSafety(), VehicleSafety(noise_std=[0.1, 0.2, 0.3])] @@ -419,7 +418,7 @@ class TestBNH( ConstrainedTestProblemTestCaseMixin, ): @property - def functions(self) -> List[BaseTestProblem]: + def functions(self) -> list[BaseTestProblem]: return [BNH(), BNH(noise_std=[0.1, 0.2])] @@ -430,7 +429,7 @@ class TestSRN( ConstrainedTestProblemTestCaseMixin, ): @property - def functions(self) -> List[BaseTestProblem]: + def functions(self) -> list[BaseTestProblem]: return [SRN(), SRN(noise_std=[0.1, 0.2])] @@ -441,7 +440,7 @@ class TestCONSTR( ConstrainedTestProblemTestCaseMixin, ): @property - def functions(self) -> List[BaseTestProblem]: + def functions(self) -> list[BaseTestProblem]: return [CONSTR(), CONSTR(noise_std=[0.1, 0.2])] @@ -452,7 +451,7 @@ class TestConstrainedBraninCurrin( ConstrainedTestProblemTestCaseMixin, ): @property - def functions(self) -> List[BaseTestProblem]: + def functions(self) -> list[BaseTestProblem]: return [ ConstrainedBraninCurrin(), ConstrainedBraninCurrin(noise_std=[0.1, 0.2]), @@ -467,7 +466,7 @@ class TestC2DTLZ2( ConstrainedTestProblemTestCaseMixin, ): @property - def functions(self) -> List[BaseTestProblem]: + def functions(self) -> list[BaseTestProblem]: return [ C2DTLZ2(dim=3, num_objectives=2), C2DTLZ2(dim=3, num_objectives=2, noise_std=0.1), @@ -487,7 +486,7 @@ class TestDiscBrake( ConstrainedTestProblemTestCaseMixin, ): @property - def functions(self) -> List[BaseTestProblem]: + def functions(self) -> list[BaseTestProblem]: return [DiscBrake(), DiscBrake(noise_std=[0.1, 0.2])] @@ -498,7 +497,7 @@ class TestWeldedBeam( ConstrainedTestProblemTestCaseMixin, ): @property - def functions(self) -> List[BaseTestProblem]: + def functions(self) -> list[BaseTestProblem]: return [WeldedBeam(), WeldedBeam(noise_std=[0.1, 0.2])] @@ -509,5 +508,5 @@ class TestOSY( ConstrainedTestProblemTestCaseMixin, ): @property - def functions(self) -> List[BaseTestProblem]: + def functions(self) -> list[BaseTestProblem]: return [OSY(), OSY(noise_std=[0.1, 0.2])] diff --git a/test/utils/probability/test_bvn.py b/test/utils/probability/test_bvn.py index 0e7c5484cf..37910762aa 100644 --- a/test/utils/probability/test_bvn.py +++ b/test/utils/probability/test_bvn.py @@ -7,7 +7,7 @@ from __future__ import annotations from itertools import count -from typing import Any, Callable, Dict, Optional, Tuple, Union +from typing import Any, Callable, Optional, Union import torch from botorch.exceptions import UnsupportedError @@ -24,7 +24,7 @@ def run_gaussian_estimator( - estimator: Callable[[Tensor], Tuple[Tensor, Union[Tensor, float, int]]], + estimator: Callable[[Tensor], tuple[Tensor, Union[Tensor, float, int]]], sqrt_cov: Tensor, num_samples: int, batch_limit: Optional[int] = None, @@ -64,7 +64,7 @@ class TestBVN(BotorchTestCase): def setUp( self, nprobs_per_coeff: int = 3, - bound_range: Tuple[float, float] = (-3.0, 3.0), + bound_range: tuple[float, float] = (-3.0, 3.0), mc_num_samples: int = 10000, mc_batch_limit: int = 1000, mc_atol_multiplier: float = 4.0, @@ -106,7 +106,7 @@ def setUp( self.sqrt_covariances[:, 1, 1] = (1 - self.correlations**2) ** 0.5 @property - def tkwargs(self) -> Dict[str, Any]: + def tkwargs(self) -> dict[str, Any]: return {"dtype": self.dtype, "device": self.device} @property diff --git a/test/utils/probability/test_mvnxpb.py b/test/utils/probability/test_mvnxpb.py index b18ebebcc3..7693b01530 100644 --- a/test/utils/probability/test_mvnxpb.py +++ b/test/utils/probability/test_mvnxpb.py @@ -6,11 +6,13 @@ from __future__ import annotations +from collections.abc import Sequence + from copy import deepcopy from functools import partial from itertools import count -from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Union +from typing import Any, Callable, Optional, Union from unittest.mock import patch import torch @@ -22,7 +24,7 @@ def run_gaussian_estimator( - estimator: Callable[[Tensor], Tuple[Tensor, Union[Tensor, float, int]]], + estimator: Callable[[Tensor], tuple[Tensor, Union[Tensor, float, int]]], sqrt_cov: Tensor, num_samples: int, batch_limit: Optional[int] = None, @@ -62,7 +64,7 @@ def setUp( self, ndims: Sequence[int] = (4, 8), batch_shape: Sequence[int] = (4,), - bound_range: Tuple[float, float] = (-5.0, 5.0), + bound_range: tuple[float, float] = (-5.0, 5.0), mc_num_samples: int = 100000, mc_batch_limit: int = 10000, mc_atol_multiplier: float = 4.0, @@ -116,8 +118,8 @@ def gen_bounds( self, ndim: int, batch_shape: Sequence[int] = (), - bound_range: Optional[Tuple[float, float]] = None, - ) -> Tuple[Tensor, Tensor]: + bound_range: Optional[tuple[float, float]] = None, + ) -> tuple[Tensor, Tensor]: shape = tuple(batch_shape) + (ndim,) lower = torch.rand(shape, **self.tkwargs) upper = lower + (1 - lower) * torch.rand_like(lower) @@ -128,7 +130,7 @@ def gen_bounds( return torch.stack([lower, upper], dim=-1) @property - def tkwargs(self) -> Dict[str, Any]: + def tkwargs(self) -> dict[str, Any]: return {"dtype": self.dtype, "device": self.device} def assertEqualMXNBPB(self, A: MVNXPB, B: MVNXPB): diff --git a/test/utils/probability/test_truncated_multivariate_normal.py b/test/utils/probability/test_truncated_multivariate_normal.py index 1f216d4ee8..c96cf022bd 100644 --- a/test/utils/probability/test_truncated_multivariate_normal.py +++ b/test/utils/probability/test_truncated_multivariate_normal.py @@ -6,8 +6,9 @@ from __future__ import annotations +from collections.abc import Sequence + from itertools import count -from typing import Sequence, Tuple import torch from botorch.utils.probability.mvnxpb import MVNXPB @@ -23,7 +24,7 @@ class TestTruncatedMultivariateNormal(BotorchTestCase): def setUp( self, - ndims: Sequence[Tuple[int, int]] = (2, 4), + ndims: Sequence[tuple[int, int]] = (2, 4), lower_quantile_max: float = 0.9, # if these get too far into the tail, naive upper_quantile_min: float = 0.1, # MC methods will not produce any samples. num_log_probs: int = 4, diff --git a/test/utils/probability/test_unified_skew_normal.py b/test/utils/probability/test_unified_skew_normal.py index 80e1c068e8..34da68b887 100644 --- a/test/utils/probability/test_unified_skew_normal.py +++ b/test/utils/probability/test_unified_skew_normal.py @@ -6,11 +6,13 @@ from __future__ import annotations +from collections.abc import Sequence + from copy import deepcopy from itertools import count -from typing import Any, Dict, Optional, Sequence, Tuple +from typing import Any, Optional import torch from botorch.utils.probability.mvnxpb import MVNXPB @@ -28,7 +30,7 @@ class TestUnifiedSkewNormal(BotorchTestCase): def setUp( self, - ndims: Sequence[Tuple[int, int]] = ((1, 1), (2, 3), (3, 2), (3, 3)), + ndims: Sequence[tuple[int, int]] = ((1, 1), (2, 3), (3, 2), (3, 3)), lower_quantile_max: float = 0.9, # if these get too far into the tail, naive upper_quantile_min: float = 0.1, # MC methods will not produce any samples. num_log_probs: int = 4, @@ -87,7 +89,7 @@ def setUp( ) @property - def tkwargs(self) -> Dict[str, Any]: + def tkwargs(self) -> dict[str, Any]: return {"dtype": self.dtype, "device": self.device} def gen_covariances( diff --git a/test/utils/test_datasets.py b/test/utils/test_datasets.py index 8a836c982f..b617ec1680 100644 --- a/test/utils/test_datasets.py +++ b/test/utils/test_datasets.py @@ -4,7 +4,7 @@ # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. -from typing import List, Optional +from typing import Optional import torch from botorch.exceptions.errors import InputDataError, UnsupportedError @@ -25,8 +25,8 @@ def make_dataset( d: int = 2, m: int = 1, has_yvar: bool = False, - feature_names: Optional[List[str]] = None, - outcome_names: Optional[List[str]] = None, + feature_names: Optional[list[str]] = None, + outcome_names: Optional[list[str]] = None, batch_shape: Optional[torch.Size] = None, ) -> SupervisedDataset: feature_names = feature_names or [f"x{i}" for i in range(d)] diff --git a/test/utils/test_sampling.py b/test/utils/test_sampling.py index 2459d332df..0e6fdf3e8b 100644 --- a/test/utils/test_sampling.py +++ b/test/utils/test_sampling.py @@ -9,7 +9,7 @@ import itertools import warnings from abc import ABC -from typing import Any, Dict, Type +from typing import Any from unittest import mock import numpy as np @@ -352,10 +352,10 @@ def test_sample_polytope_boundary(self) -> None: class PolytopeSamplerTestBase(ABC): - sampler_class: Type[PolytopeSampler] - sampler_kwargs: Dict[str, Any] = {} - constructor_seed_kwarg: Dict[str, int] = {} - draw_seed_kwarg: Dict[str, int] = {} + sampler_class: type[PolytopeSampler] + sampler_kwargs: dict[str, Any] = {} + constructor_seed_kwarg: dict[str, int] = {} + draw_seed_kwarg: dict[str, int] = {} def test_sample_polytope(self): for dtype in (torch.float, torch.double):