Skip to content

Commit

Permalink
Fix hard-coded double precision in test_functions to default dtype (#…
Browse files Browse the repository at this point in the history
…2597)

Summary:
## Motivation

This PR replaces the hard-coded double precision that was used in the initialization of `test_functions/base.py` to use `torch.get_default_dtype()` instead.

See #2596 for more details.

### Have you read the [Contributing Guidelines on pull requests](https://github.com/pytorch/botorch/blob/main/CONTRIBUTING.md#pull-requests)?

Yes, I have read it.

Pull Request resolved: #2597

Test Plan:
I ran code formatting via `ufmt` and checked the code via `pytest -ra`. All tests related to the changes here were adjusted in the second commit of the branch.
Locally, two tests failed for me, but it seems to me that these are not related to the fix implemented here. If it turns out they are, I'd be more than happy to further adjust.

## Related PRs

None, but #2596 is related.

Reviewed By: saitcakmak

Differential Revision: D65066231

Pulled By: Balandat

fbshipit-source-id: 4beac1fc9a1e5094fd4806958ac2441a12506eb7
  • Loading branch information
AVHopp authored and facebook-github-bot committed Oct 31, 2024
1 parent 9e44749 commit 10f6342
Show file tree
Hide file tree
Showing 6 changed files with 117 additions and 36 deletions.
10 changes: 7 additions & 3 deletions botorch/test_functions/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ def __init__(
self,
noise_std: None | float | list[float] = None,
negate: bool = False,
dtype: torch.dtype = torch.double,
) -> None:
r"""Base constructor for test functions.
Expand All @@ -37,6 +38,7 @@ def __init__(
provided, specifies separate noise standard deviations for each
objective in a multiobjective problem.
negate: If True, negate the function.
dtype: The dtype that is used for the bounds of the function.
"""
super().__init__()
self.noise_std = noise_std
Expand All @@ -47,7 +49,8 @@ def __init__(
f"Got {self.dim=} and {len(self._bounds)=}."
)
self.register_buffer(
"bounds", torch.tensor(self._bounds, dtype=torch.double).transpose(-1, -2)
"bounds",
torch.tensor(self._bounds, dtype=dtype).transpose(-1, -2),
)

def forward(self, X: Tensor, noise: bool = True) -> Tensor:
Expand Down Expand Up @@ -166,6 +169,7 @@ def __init__(
self,
noise_std: None | float | list[float] = None,
negate: bool = False,
dtype: torch.dtype = torch.double,
) -> None:
r"""Base constructor for multi-objective test functions.
Expand All @@ -180,8 +184,8 @@ def __init__(
f"If specified as a list, length of noise_std ({len(noise_std)}) "
f"must match the number of objectives ({len(self._ref_point)})"
)
super().__init__(noise_std=noise_std, negate=negate)
ref_point = torch.tensor(self._ref_point, dtype=torch.get_default_dtype())
super().__init__(noise_std=noise_std, negate=negate, dtype=dtype)
ref_point = torch.tensor(self._ref_point, dtype=dtype)
if negate:
ref_point *= -1
self.register_buffer("ref_point", ref_point)
Expand Down
19 changes: 15 additions & 4 deletions botorch/test_functions/multi_fidelity.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,13 +74,19 @@ class AugmentedHartmann(SyntheticTestFunction):
_optimizers = [(0.20169, 0.150011, 0.476874, 0.275332, 0.311652, 0.6573, 1.0)]
_check_grad_at_opt = False

def __init__(self, noise_std: float | None = None, negate: bool = False) -> None:
def __init__(
self,
noise_std: float | None = None,
negate: bool = False,
dtype: torch.dtype = torch.double,
) -> None:
r"""
Args:
noise_std: Standard deviation of the observation noise.
negate: If True, negate the function.
dtype: The dtype that is used for the bounds of the function.
"""
super().__init__(noise_std=noise_std, negate=negate)
super().__init__(noise_std=noise_std, negate=negate, dtype=dtype)
self.register_buffer("ALPHA", torch.tensor([1.0, 1.2, 3.0, 3.2]))
A = [
[10, 3, 17, 3.5, 1.7, 8],
Expand Down Expand Up @@ -126,13 +132,18 @@ class AugmentedRosenbrock(SyntheticTestFunction):
_optimal_value = 0.0

def __init__(
self, dim=3, noise_std: float | None = None, negate: bool = False
self,
dim=3,
noise_std: float | None = None,
negate: bool = False,
dtype: torch.dtype = torch.double,
) -> None:
r"""
Args:
dim: The (input) dimension. Must be at least 3.
noise_std: Standard deviation of the observation noise.
negate: If True, negate the function.
dtype: The dtype that is used for the bounds of the function.
"""
if dim < 3:
raise ValueError(
Expand All @@ -141,7 +152,7 @@ def __init__(
self.dim = dim
self._bounds = [(-5.0, 10.0) for _ in range(self.dim)]
self._optimizers = [tuple(1.0 for _ in range(self.dim))]
super().__init__(noise_std=noise_std, negate=negate)
super().__init__(noise_std=noise_std, negate=negate, dtype=dtype)

def evaluate_true(self, X: Tensor) -> Tensor:
X_curr = X[..., :-3]
Expand Down
28 changes: 21 additions & 7 deletions botorch/test_functions/multi_objective.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,13 +119,15 @@ def __init__(
self,
noise_std: None | float | list[float] = None,
negate: bool = False,
dtype: torch.dtype = torch.double,
) -> None:
r"""
Args:
noise_std: Standard deviation of the observation noise.
negate: If True, negate the objectives.
dtype: The dtype that is used for the bounds of the function.
"""
super().__init__(noise_std=noise_std, negate=negate)
super().__init__(noise_std=noise_std, negate=negate, dtype=dtype)
self._branin = Branin()

def _rescaled_branin(self, X: Tensor) -> Tensor:
Expand Down Expand Up @@ -179,12 +181,14 @@ def __init__(
dim: int,
noise_std: None | float | list[float] = None,
negate: bool = False,
dtype: torch.dtype = torch.double,
) -> None:
r"""
Args:
dim: The (input) dimension.
noise_std: Standard deviation of the observation noise.
negate: If True, negate the function.
dtype: The dtype that is used for the bounds of the function.
"""
if dim < self._min_dim:
raise ValueError(f"dim must be >= {self._min_dim}, but got dim={dim}!")
Expand All @@ -194,7 +198,7 @@ def __init__(
]
# max_hv is the area of the box minus the area of the curve formed by the PF.
self._max_hv = self._ref_point[0] * self._ref_point[1] - self._area_under_curve
super().__init__(noise_std=noise_std, negate=negate)
super().__init__(noise_std=noise_std, negate=negate, dtype=dtype)

@abstractmethod
def _h(self, X: Tensor) -> Tensor:
Expand Down Expand Up @@ -339,13 +343,15 @@ def __init__(
num_objectives: int = 2,
noise_std: None | float | list[float] = None,
negate: bool = False,
dtype: torch.dtype = torch.double,
) -> None:
r"""
Args:
dim: The (input) dimension of the function.
num_objectives: Must be less than dim.
noise_std: Standard deviation of the observation noise.
negate: If True, negate the function.
dtype: The dtype that is used for the bounds of the function.
"""
if dim <= num_objectives:
raise ValueError(
Expand All @@ -356,7 +362,7 @@ def __init__(
self.k = self.dim - self.num_objectives + 1
self._bounds = [(0.0, 1.0) for _ in range(self.dim)]
self._ref_point = [self._ref_val for _ in range(num_objectives)]
super().__init__(noise_std=noise_std, negate=negate)
super().__init__(noise_std=noise_std, negate=negate, dtype=dtype)


class DTLZ1(DTLZ):
Expand Down Expand Up @@ -608,12 +614,14 @@ def __init__(
noise_std: None | float | list[float] = None,
negate: bool = False,
num_objectives: int = 2,
dtype: torch.dtype = torch.double,
) -> None:
r"""
Args:
noise_std: Standard deviation of the observation noise.
negate: If True, negate the objectives.
num_objectives: The number of objectives.
dtype: The dtype that is used for the bounds of the function.
"""
if num_objectives not in (2, 3, 4):
raise UnsupportedError("GMM only currently supports 2 to 4 objectives.")
Expand All @@ -623,7 +631,7 @@ def __init__(
if num_objectives > 3:
self._ref_point.append(-0.1866)
self.num_objectives = num_objectives
super().__init__(noise_std=noise_std, negate=negate)
super().__init__(noise_std=noise_std, negate=negate, dtype=dtype)
gmm_pos = torch.tensor(
[
[[0.2, 0.2], [0.8, 0.2], [0.5, 0.7]],
Expand Down Expand Up @@ -935,13 +943,15 @@ def __init__(
num_objectives: int = 2,
noise_std: None | float | list[float] = None,
negate: bool = False,
dtype: torch.dtype = torch.double,
) -> None:
r"""
Args:
dim: The (input) dimension of the function.
num_objectives: Number of objectives. Must not be larger than dim.
noise_std: Standard deviation of the observation noise.
negate: If True, negate the function.
dtype: The dtype that is used for the bounds of the function.
"""
if num_objectives != 2:
raise NotImplementedError(
Expand All @@ -954,7 +964,7 @@ def __init__(
self.num_objectives = num_objectives
self.dim = dim
self._bounds = [(0.0, 1.0) for _ in range(self.dim)]
super().__init__(noise_std=noise_std, negate=negate)
super().__init__(noise_std=noise_std, negate=negate, dtype=dtype)

@staticmethod
def _g(X: Tensor) -> Tensor:
Expand Down Expand Up @@ -1246,15 +1256,17 @@ def __init__(
noise_std: None | float | list[float] = None,
constraint_noise_std: None | float | list[float] = None,
negate: bool = False,
dtype: torch.dtype = torch.double,
) -> None:
r"""
Args:
noise_std: Standard deviation of the observation noise of the objectives.
constraint_noise_std: Standard deviation of the observation noise of the
constraint.
negate: If True, negate the function.
dtype: The dtype that is used for the bounds of the function.
"""
super().__init__(noise_std=noise_std, negate=negate)
super().__init__(noise_std=noise_std, negate=negate, dtype=dtype)
con_bounds = torch.tensor(self._con_bounds, dtype=self.bounds.dtype).transpose(
-1, -2
)
Expand Down Expand Up @@ -1357,6 +1369,7 @@ def __init__(
noise_std: None | float | list[float] = None,
constraint_noise_std: None | float | list[float] = None,
negate: bool = False,
dtype: torch.dtype = torch.double,
) -> None:
r"""
Args:
Expand All @@ -1365,12 +1378,13 @@ def __init__(
constraint_noise_std: Standard deviation of the observation noise of the
constraints.
negate: If True, negate the function.
dtype: The dtype that is used for the bounds of the function.
"""
if dim < 2:
raise ValueError("dim must be greater than or equal to 2.")
self.dim = dim
self._bounds = [(0.0, 1.0) for _ in range(self.dim)]
super().__init__(noise_std=noise_std, negate=negate)
super().__init__(noise_std=noise_std, negate=negate, dtype=dtype)
self.constraint_noise_std = constraint_noise_std

def LA2(self, A, B, C, D, theta) -> Tensor:
Expand Down
25 changes: 19 additions & 6 deletions botorch/test_functions/sensitivity_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,13 +24,18 @@ class Ishigami(SyntheticTestFunction):
"""

def __init__(
self, b: float = 0.1, noise_std: float | None = None, negate: bool = False
self,
b: float = 0.1,
noise_std: float | None = None,
negate: bool = False,
dtype: torch.dtype = torch.double,
) -> None:
r"""
Args:
b: the b constant, should be 0.1 or 0.05.
noise_std: Standard deviation of the observation noise.
negative: If True, negative the objective.
dtype: The dtype that is used for the bounds of the function.
"""
self._optimizers = None
if b not in (0.1, 0.05):
Expand All @@ -52,7 +57,7 @@ def __init__(
self.dgsm_gradient_square = [2.8, 24.5, 11]
self._bounds = [(-math.pi, math.pi) for _ in range(self.dim)]
self.b = b
super().__init__(noise_std=noise_std, negate=negate)
super().__init__(noise_std=noise_std, negate=negate, dtype=dtype)

@property
def _optimal_value(self) -> float:
Expand Down Expand Up @@ -127,13 +132,15 @@ def __init__(
a: list = None,
noise_std: float | None = None,
negate: bool = False,
dtype: torch.dtype = torch.double,
) -> None:
r"""
Args:
dim: Dimensionality of the problem. If 6, 8, or 15, will use standard a.
a: a parameter, unless dim is 6, 8, or 15.
noise_std: Standard deviation of observation noise.
negate: Return negatie of function.
negate: Return negative of function.
dtype: The dtype that is used for the bounds of the function.
"""
self._optimizers = None
self.dim = dim
Expand Down Expand Up @@ -163,7 +170,7 @@ def __init__(
else:
self.a = a
self.optimal_sobol_indicies()
super().__init__(noise_std=noise_std, negate=negate)
super().__init__(noise_std=noise_std, negate=negate, dtype=dtype)

@property
def _optimal_value(self) -> float:
Expand Down Expand Up @@ -207,11 +214,17 @@ class Morris(SyntheticTestFunction):
Proposed to test sensitivity analysis methods
"""

def __init__(self, noise_std: float | None = None, negate: bool = False) -> None:
def __init__(
self,
noise_std: float | None = None,
negate: bool = False,
dtype: torch.dtype = torch.double,
) -> None:
r"""
Args:
noise_std: Standard deviation of observation noise.
negate: Return negative of function.
dtype: The dtype that is used for the bounds of the function.
"""
self._optimizers = None
self.dim = 20
Expand All @@ -238,7 +251,7 @@ def __init__(self, noise_std: float | None = None, negate: bool = False) -> None
0,
0,
]
super().__init__(noise_std=noise_std, negate=negate)
super().__init__(noise_std=noise_std, negate=negate, dtype=dtype)

@property
def _optimal_value(self) -> float:
Expand Down
Loading

0 comments on commit 10f6342

Please sign in to comment.