Skip to content

Commit

Permalink
Apply PEP 604 union type syntax codemod (pytorch#2561)
Browse files Browse the repository at this point in the history
Summary:
This codemods all `Optional[X]` type definitions to use the PEP 604 syntax `X | None` instead. I also ran ufmt to fix the imports.

Why? I came across https://github.com/asottile/pyupgrade randomly and figured I'd give it a shot...

This is mostly to test that everything runs smoothly. This may be kind of a pain to rebase on so not sure we should merge this in in one big chunk...

Pull Request resolved: pytorch#2561

Reviewed By: esantorella

Differential Revision: D63733395

Pulled By: Balandat

fbshipit-source-id: 58e700487cb63223a4bc54a692567f0debd8f8a7
  • Loading branch information
Balandat authored and facebook-github-bot committed Oct 2, 2024
1 parent 68faeff commit a0a2c05
Show file tree
Hide file tree
Showing 167 changed files with 1,581 additions and 1,669 deletions.
5 changes: 2 additions & 3 deletions botorch/acquisition/acquisition.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@

import warnings
from abc import ABC, abstractmethod
from typing import Optional

import torch
from botorch.exceptions import BotorchWarning
Expand Down Expand Up @@ -41,7 +40,7 @@ def __init__(self, model: Model) -> None:
super().__init__()
self.model: Model = model

def set_X_pending(self, X_pending: Optional[Tensor] = None) -> None:
def set_X_pending(self, X_pending: Tensor | None = None) -> None:
r"""Informs the acquisition function about pending design points.
Args:
Expand Down Expand Up @@ -115,7 +114,7 @@ class MCSamplerMixin(ABC):

_default_sample_shape = torch.Size([512])

def __init__(self, sampler: Optional[MCSampler] = None) -> None:
def __init__(self, sampler: MCSampler | None = None) -> None:
r"""Register the sampler on the acquisition function.
Args:
Expand Down
10 changes: 4 additions & 6 deletions botorch/acquisition/active_learning.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,6 @@

from __future__ import annotations

from typing import Optional

import torch
from botorch import settings
from botorch.acquisition.acquisition import AcquisitionFunction
Expand Down Expand Up @@ -53,9 +51,9 @@ def __init__(
self,
model: Model,
mc_points: Tensor,
sampler: Optional[MCSampler] = None,
posterior_transform: Optional[PosteriorTransform] = None,
X_pending: Optional[Tensor] = None,
sampler: MCSampler | None = None,
posterior_transform: PosteriorTransform | None = None,
X_pending: Tensor | None = None,
) -> None:
r"""q-Integrated Negative Posterior Variance.
Expand Down Expand Up @@ -140,7 +138,7 @@ def __init__(
self,
model: Model,
objective: MCAcquisitionObjective,
sampler: Optional[MCSampler] = None,
sampler: MCSampler | None = None,
) -> None:
r"""Pairwise Monte Carlo Posterior Variance
Expand Down
53 changes: 26 additions & 27 deletions botorch/acquisition/analytic.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@
from abc import ABC
from contextlib import nullcontext
from copy import deepcopy
from typing import Optional, Union

import torch
from botorch.acquisition.acquisition import AcquisitionFunction
Expand Down Expand Up @@ -52,7 +51,7 @@ class AnalyticAcquisitionFunction(AcquisitionFunction, ABC):
def __init__(
self,
model: Model,
posterior_transform: Optional[PosteriorTransform] = None,
posterior_transform: PosteriorTransform | None = None,
) -> None:
r"""Base constructor for analytic acquisition functions.
Expand All @@ -76,14 +75,14 @@ def __init__(
)
self.posterior_transform = posterior_transform

def set_X_pending(self, X_pending: Optional[Tensor] = None) -> None:
def set_X_pending(self, X_pending: Tensor | None = None) -> None:
raise UnsupportedError(
"Analytic acquisition functions do not account for X_pending yet."
)

def _mean_and_sigma(
self, X: Tensor, compute_sigma: bool = True, min_var: float = 1e-12
) -> tuple[Tensor, Optional[Tensor]]:
) -> tuple[Tensor, Tensor | None]:
"""Computes the first and second moments of the model posterior.
Args:
Expand Down Expand Up @@ -135,8 +134,8 @@ class LogProbabilityOfImprovement(AnalyticAcquisitionFunction):
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
posterior_transform: Optional[PosteriorTransform] = None,
best_f: float | Tensor,
posterior_transform: PosteriorTransform | None = None,
maximize: bool = True,
):
r"""Single-outcome Probability of Improvement.
Expand Down Expand Up @@ -189,8 +188,8 @@ class ProbabilityOfImprovement(AnalyticAcquisitionFunction):
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
posterior_transform: Optional[PosteriorTransform] = None,
best_f: float | Tensor,
posterior_transform: PosteriorTransform | None = None,
maximize: bool = True,
):
r"""Single-outcome Probability of Improvement.
Expand Down Expand Up @@ -237,8 +236,8 @@ class qAnalyticProbabilityOfImprovement(AnalyticAcquisitionFunction):
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
posterior_transform: Optional[PosteriorTransform] = None,
best_f: float | Tensor,
posterior_transform: PosteriorTransform | None = None,
maximize: bool = True,
) -> None:
"""qPI using an analytic approximation.
Expand Down Expand Up @@ -314,8 +313,8 @@ class ExpectedImprovement(AnalyticAcquisitionFunction):
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
posterior_transform: Optional[PosteriorTransform] = None,
best_f: float | Tensor,
posterior_transform: PosteriorTransform | None = None,
maximize: bool = True,
):
r"""Single-outcome Expected Improvement (analytic).
Expand Down Expand Up @@ -378,8 +377,8 @@ class LogExpectedImprovement(AnalyticAcquisitionFunction):
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
posterior_transform: Optional[PosteriorTransform] = None,
best_f: float | Tensor,
posterior_transform: PosteriorTransform | None = None,
maximize: bool = True,
):
r"""Logarithm of single-outcome Expected Improvement (analytic).
Expand Down Expand Up @@ -447,9 +446,9 @@ class LogConstrainedExpectedImprovement(AnalyticAcquisitionFunction):
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
best_f: float | Tensor,
objective_index: int,
constraints: dict[int, tuple[Optional[float], Optional[float]]],
constraints: dict[int, tuple[float | None, float | None]],
maximize: bool = True,
) -> None:
r"""Analytic Log Constrained Expected Improvement.
Expand Down Expand Up @@ -525,9 +524,9 @@ class ConstrainedExpectedImprovement(AnalyticAcquisitionFunction):
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
best_f: float | Tensor,
objective_index: int,
constraints: dict[int, tuple[Optional[float], Optional[float]]],
constraints: dict[int, tuple[float | None, float | None]],
maximize: bool = True,
) -> None:
r"""Analytic Constrained Expected Improvement.
Expand Down Expand Up @@ -606,7 +605,7 @@ def __init__(
X_observed: Tensor,
num_fantasies: int = 20,
maximize: bool = True,
posterior_transform: Optional[PosteriorTransform] = None,
posterior_transform: PosteriorTransform | None = None,
) -> None:
r"""Single-outcome Noisy Log Expected Improvement (via fantasies).
Expand Down Expand Up @@ -762,8 +761,8 @@ class UpperConfidenceBound(AnalyticAcquisitionFunction):
def __init__(
self,
model: Model,
beta: Union[float, Tensor],
posterior_transform: Optional[PosteriorTransform] = None,
beta: float | Tensor,
posterior_transform: PosteriorTransform | None = None,
maximize: bool = True,
) -> None:
r"""Single-outcome Upper Confidence Bound.
Expand Down Expand Up @@ -812,7 +811,7 @@ class PosteriorMean(AnalyticAcquisitionFunction):
def __init__(
self,
model: Model,
posterior_transform: Optional[PosteriorTransform] = None,
posterior_transform: PosteriorTransform | None = None,
maximize: bool = True,
) -> None:
r"""Single-outcome Posterior Mean.
Expand Down Expand Up @@ -857,7 +856,7 @@ def __init__(
self,
model: Model,
weights: Tensor,
posterior_transform: Optional[PosteriorTransform] = None,
posterior_transform: PosteriorTransform | None = None,
) -> None:
r"""Scalarized Posterior Mean.
Expand Down Expand Up @@ -919,7 +918,7 @@ class PosteriorStandardDeviation(AnalyticAcquisitionFunction):
def __init__(
self,
model: Model,
posterior_transform: Optional[PosteriorTransform] = None,
posterior_transform: PosteriorTransform | None = None,
maximize: bool = True,
) -> None:
r"""Single-outcome Posterior Mean.
Expand Down Expand Up @@ -1135,8 +1134,8 @@ def _get_noiseless_fantasy_model(


def _preprocess_constraint_bounds(
acqf: Union[LogConstrainedExpectedImprovement, ConstrainedExpectedImprovement],
constraints: dict[int, tuple[Optional[float], Optional[float]]],
acqf: LogConstrainedExpectedImprovement | ConstrainedExpectedImprovement,
constraints: dict[int, tuple[float | None, float | None]],
) -> None:
r"""Set up constraint bounds.
Expand Down Expand Up @@ -1180,7 +1179,7 @@ def _preprocess_constraint_bounds(


def _compute_log_prob_feas(
acqf: Union[LogConstrainedExpectedImprovement, ConstrainedExpectedImprovement],
acqf: LogConstrainedExpectedImprovement | ConstrainedExpectedImprovement,
means: Tensor,
sigmas: Tensor,
) -> Tensor:
Expand Down
10 changes: 4 additions & 6 deletions botorch/acquisition/bayesian_active_learning.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,6 @@

import warnings

from typing import Optional, Union

from botorch.acquisition.acquisition import AcquisitionFunction, MCSamplerMixin
from botorch.acquisition.objective import PosteriorTransform
from botorch.models import ModelListGP
Expand Down Expand Up @@ -79,10 +77,10 @@ class qBayesianActiveLearningByDisagreement(
):
def __init__(
self,
model: Union[ModelListGP, SaasFullyBayesianSingleTaskGP],
sampler: Optional[MCSampler] = None,
posterior_transform: Optional[PosteriorTransform] = None,
X_pending: Optional[Tensor] = None,
model: ModelListGP | SaasFullyBayesianSingleTaskGP,
sampler: MCSampler | None = None,
posterior_transform: PosteriorTransform | None = None,
X_pending: Tensor | None = None,
) -> None:
"""
Batch implementation [kirsch2019batchbald]_ of BALD [Houlsby2011bald]_,
Expand Down
3 changes: 1 addition & 2 deletions botorch/acquisition/cached_cholesky.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@
from __future__ import annotations

import warnings
from typing import Optional

import torch
from botorch.acquisition.acquisition import MCSamplerMixin
Expand Down Expand Up @@ -72,7 +71,7 @@ def __init__(
self,
model: Model,
cache_root: bool = False,
sampler: Optional[MCSampler] = None,
sampler: MCSampler | None = None,
) -> None:
r"""Set class attributes and perform compatibility checks.
Expand Down
14 changes: 7 additions & 7 deletions botorch/acquisition/cost_aware.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@

import warnings
from abc import ABC, abstractmethod
from typing import Callable, Optional, Union
from collections.abc import Callable

import torch
from botorch import settings
Expand All @@ -35,7 +35,7 @@ class CostAwareUtility(Module, ABC):

@abstractmethod
def forward(
self, X: Tensor, deltas: Tensor, sampler: Optional[MCSampler] = None
self, X: Tensor, deltas: Tensor, sampler: MCSampler | None = None
) -> Tensor:
r"""Evaluate the cost-aware utility on the candidates and improvements.
Expand Down Expand Up @@ -67,7 +67,7 @@ def __init__(self, cost: Callable[[Tensor, Tensor], Tensor]) -> None:
self._cost_callable: Callable[[Tensor, Tensor], Tensor] = cost

def forward(
self, X: Tensor, deltas: Tensor, sampler: Optional[MCSampler] = None
self, X: Tensor, deltas: Tensor, sampler: MCSampler | None = None
) -> Tensor:
r"""Evaluate the cost function on the candidates and improvements.
Expand Down Expand Up @@ -109,9 +109,9 @@ class InverseCostWeightedUtility(CostAwareUtility):

def __init__(
self,
cost_model: Union[DeterministicModel, GPyTorchModel],
cost_model: DeterministicModel | GPyTorchModel,
use_mean: bool = True,
cost_objective: Optional[MCAcquisitionObjective] = None,
cost_objective: MCAcquisitionObjective | None = None,
min_cost: float = 1e-2,
) -> None:
r"""Cost-aware utility that weights increase in utility by inverse cost.
Expand Down Expand Up @@ -153,8 +153,8 @@ def forward(
self,
X: Tensor,
deltas: Tensor,
sampler: Optional[MCSampler] = None,
X_evaluation_mask: Optional[Tensor] = None,
sampler: MCSampler | None = None,
X_evaluation_mask: Tensor | None = None,
) -> Tensor:
r"""Evaluate the cost function on the candidates and improvements. Note
that negative values of `deltas` are instead scaled by the cost, and not
Expand Down
13 changes: 6 additions & 7 deletions botorch/acquisition/decoupled.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@

import warnings
from abc import ABC
from typing import Optional

import torch
from botorch.acquisition.acquisition import AcquisitionFunction
Expand Down Expand Up @@ -52,7 +51,7 @@ class DecoupledAcquisitionFunction(AcquisitionFunction, ABC):
"""

def __init__(
self, model: ModelList, X_evaluation_mask: Optional[Tensor] = None, **kwargs
self, model: ModelList, X_evaluation_mask: Tensor | None = None, **kwargs
) -> None:
r"""Initialize.
Expand All @@ -71,12 +70,12 @@ def __init__(
self.X_pending = None

@property
def X_evaluation_mask(self) -> Optional[Tensor]:
def X_evaluation_mask(self) -> Tensor | None:
r"""Get the evaluation indices for the new candidate."""
return self._X_evaluation_mask

@X_evaluation_mask.setter
def X_evaluation_mask(self, X_evaluation_mask: Optional[Tensor] = None) -> None:
def X_evaluation_mask(self, X_evaluation_mask: Tensor | None = None) -> None:
r"""Set the evaluation indices for the new candidate."""
if X_evaluation_mask is not None:
# TODO: Add batch support
Expand All @@ -92,8 +91,8 @@ def X_evaluation_mask(self, X_evaluation_mask: Optional[Tensor] = None) -> None:

def set_X_pending(
self,
X_pending: Optional[Tensor] = None,
X_pending_evaluation_mask: Optional[Tensor] = None,
X_pending: Tensor | None = None,
X_pending_evaluation_mask: Tensor | None = None,
) -> None:
r"""Informs the AF about pending design points for different outcomes.
Expand Down Expand Up @@ -135,7 +134,7 @@ def set_X_pending(
self.X_pending = X_pending
self.X_pending_evaluation_mask = X_pending_evaluation_mask

def construct_evaluation_mask(self, X: Tensor) -> Optional[Tensor]:
def construct_evaluation_mask(self, X: Tensor) -> Tensor | None:
r"""Construct the boolean evaluation mask for X and X_pending
Args:
Expand Down
Loading

0 comments on commit a0a2c05

Please sign in to comment.