From e7169a0405c70b20e8bcae17b9e7d9526172300b Mon Sep 17 00:00:00 2001 From: Elizabeth Santorella Date: Wed, 28 Aug 2024 11:04:59 -0700 Subject: [PATCH] Make BaseTestFunction.evaluate_true accept 1d inputs (#2492) Summary: Pull Request resolved: https://github.com/pytorch/botorch/pull/2492 Context: Currently, every test function except for `AugmentedBranin` has an `evaluate_true` method that works with 1d inputs. It is actually surprising that so many work, since `BaseTestFunction` is currently written so that `BaseTestFunction.forward` casts inputs to 2d before passing them to `BaseTestFunction.evaluate_true`. So currently, it's not clear if we should expect `evaluate_true` to work with 1d inputs, but nonetheless this is happening downstream. This PR: * Requires `evaluate_true` to work with 1d inputs * Removes the logic that expands the dimension of unbatched tensors in `forward` before passign to `evaluate_true` and then removes the batch dimension in favor of leaving unbatched tensors unbatched everywhere * Changes `AugmentedBranin` to work with 1d inputs * Fixes a couple type errors * Expands docstrings Differential Revision: D61916387 --- botorch/test_functions/base.py | 19 ++++++++++----- botorch/test_functions/multi_fidelity.py | 2 +- botorch/utils/testing.py | 30 +++++++++++++++--------- 3 files changed, 33 insertions(+), 18 deletions(-) diff --git a/botorch/test_functions/base.py b/botorch/test_functions/base.py index 3c05d4fef7..b0dbf37f76 100644 --- a/botorch/test_functions/base.py +++ b/botorch/test_functions/base.py @@ -55,26 +55,33 @@ def forward(self, X: Tensor, noise: bool = True) -> Tensor: r"""Evaluate the function on a set of points. Args: - X: A `batch_shape x d`-dim tensor of point(s) at which to evaluate the - function. + X: A `(batch_shape) x d`-dim tensor of point(s) at which to evaluate + the function. noise: If `True`, add observation noise as specified by `noise_std`. Returns: A `batch_shape`-dim tensor ouf function evaluations. """ - batch = X.ndimension() > 1 - X = X if batch else X.unsqueeze(0) f = self.evaluate_true(X=X) if noise and self.noise_std is not None: _noise = torch.tensor(self.noise_std, device=X.device, dtype=X.dtype) f += _noise * torch.randn_like(f) if self.negate: f = -f - return f if batch else f.squeeze(0) + return f @abstractmethod def evaluate_true(self, X: Tensor) -> Tensor: - r"""Evaluate the function (w/o observation noise) on a set of points.""" + r""" + Evaluate the function (w/o observation noise) on a set of points. + + Args: + X: A `(batch_shape) x d`-dim tensor of point(s) at which to + evaluate. + + Returns: + A `batch_shape`-dim tensor. + """ pass # pragma: no cover diff --git a/botorch/test_functions/multi_fidelity.py b/botorch/test_functions/multi_fidelity.py index d072664a8c..dd2d057f0b 100644 --- a/botorch/test_functions/multi_fidelity.py +++ b/botorch/test_functions/multi_fidelity.py @@ -46,7 +46,7 @@ class AugmentedBranin(SyntheticTestFunction): def evaluate_true(self, X: Tensor) -> Tensor: t1 = ( X[..., 1] - - (5.1 / (4 * math.pi**2) - 0.1 * (1 - X[:, 2])) * X[:, 0].pow(2) + - (5.1 / (4 * math.pi**2) - 0.1 * (1 - X[..., 2])) * X[..., 0].pow(2) + 5 / math.pi * X[..., 0] - 6 ) diff --git a/botorch/utils/testing.py b/botorch/utils/testing.py index bcb41462b6..e0cfc6d035 100644 --- a/botorch/utils/testing.py +++ b/botorch/utils/testing.py @@ -11,6 +11,7 @@ from abc import abstractproperty from collections import OrderedDict from collections.abc import Sequence +from itertools import product from typing import Any, Optional from unittest import mock, TestCase @@ -102,15 +103,22 @@ def assertAllClose( class BaseTestProblemTestCaseMixIn: - def test_forward(self): - for dtype in (torch.float, torch.double): - for batch_shape in (torch.Size(), torch.Size([2]), torch.Size([2, 3])): - for f in self.functions: - f.to(device=self.device, dtype=dtype) - X = torch.rand(*batch_shape, f.dim, device=self.device, dtype=dtype) - X = f.bounds[0] + X * (f.bounds[1] - f.bounds[0]) - res = f(X) - f(X, noise=False) + def test_forward_and_evaluate_true(self): + dtypes = (torch.float, torch.double) + batch_shapes = (torch.Size(), torch.Size([2]), torch.Size([2, 3])) + for dtype, batch_shape, f in product(dtypes, batch_shapes, self.functions): + f.to(device=self.device, dtype=dtype) + X = torch.rand(*batch_shape, f.dim, device=self.device, dtype=dtype) + X = f.bounds[0] + X * (f.bounds[1] - f.bounds[0]) + res_forward = f(X) + res_evaluate_true = f.evaluate_true(X) + for method, res in { + "forward": res_forward, + "evaluate_true": res_evaluate_true, + }.items(): + with self.subTest( + f"{dtype}_{batch_shape}_{f.__class__.__name__}_{method}" + ): self.assertEqual(res.dtype, dtype) self.assertEqual(res.device.type, self.device.type) tail_shape = torch.Size( @@ -340,7 +348,7 @@ def posterior( X: Tensor, output_indices: Optional[list[int]] = None, posterior_transform: Optional[PosteriorTransform] = None, - observation_noise: bool = False, + observation_noise: bool | torch.Tensor = False, ) -> MockPosterior: if posterior_transform is not None: return posterior_transform(self._posterior) @@ -357,7 +365,7 @@ def batch_shape(self) -> torch.Size: extended_shape = self._posterior._extended_shape() return extended_shape[:-2] - def state_dict(self) -> None: + def state_dict(self, *args, **kwargs) -> None: pass def load_state_dict(