From d14cbc77c357471108ec2f8b36407a9804f9dd7c Mon Sep 17 00:00:00 2001 From: Elizabeth Santorella Date: Thu, 24 Oct 2024 05:35:12 -0700 Subject: [PATCH] Make BoTorch test problems into ParamBasedTestProblems (#2944) Summary: Context: In the benchmarks, we currently have a zoo of five runners designed to handle different ways of generating data. This will make it challenging to support other changes to runners, such as running trials asynchronously. This diff is a step towards consolidating into one Runner, abstracting away the data-generating process into a test problem. * BoTorchTestProblemRunner has a BoTorch problem * ParamBasedTestProblemRunner has a ParamBasedTestProblem * SurrogateRunner has a SurrogateProblem * `BoTorchTestProblemRunner` and `ParamBasedTestProblemRunner` subclass `BenchmarkRunner` * `SurrogateRunner` subclasses `BenchmarkRunner` This diff will enable going from 5 runners to 3, wrapping BoTorch problems with `ParamBasedTestProblem`. This will allow for combining `BoTorchTestProblemRunner`, `ParamBasedTestProblemRunner`, and `SyntheticProblemRunner`. This PR: * Introduces `BoTorchTestProblem`, which is a `ParamBasedTestProblem` and thus can be used with `ParamBasedTestProblemRunner`. It localizes tensor-related logic, including `modified_bounds`, into the BoTorch problem so it doesn't need to be handled by the runner. * Gets rid of `SyntheticProblemRunner`, merging it with `ParamBasedTestProblemRunner`. We may want to rename this class after more consolidation. * Makes `BoTorchTestProblemRunner` a do-nothing subclass of `ParamBasedTestProblemRunner` Reviewed By: Balandat Differential Revision: D63639190 --- ax/benchmark/benchmark_problem.py | 10 +- .../synthetic/discretized/mixed_integer.py | 12 +- .../problems/synthetic/hss/jenatton.py | 2 +- ax/benchmark/runners/botorch_test.py | 231 ++++++++---------- .../problems/test_mixed_integer_problems.py | 29 ++- .../runners/test_botorch_test_problem.py | 127 ++++++---- ax/benchmark/tests/test_benchmark_problem.py | 17 +- 7 files changed, 221 insertions(+), 207 deletions(-) diff --git a/ax/benchmark/benchmark_problem.py b/ax/benchmark/benchmark_problem.py index 971becca18c..154191531c2 100644 --- a/ax/benchmark/benchmark_problem.py +++ b/ax/benchmark/benchmark_problem.py @@ -13,7 +13,10 @@ from ax.benchmark.benchmark_metric import BenchmarkMetric from ax.benchmark.runners.base import BenchmarkRunner -from ax.benchmark.runners.botorch_test import BotorchTestProblemRunner +from ax.benchmark.runners.botorch_test import ( + BoTorchTestProblem, + ParamBasedTestProblemRunner, +) from ax.core.data import Data from ax.core.experiment import Experiment from ax.core.objective import MultiObjective, Objective @@ -384,9 +387,8 @@ def create_problem_from_botorch( name=name, search_space=search_space, optimization_config=optimization_config, - runner=BotorchTestProblemRunner( - # pyre-ignore[45]: Can't instantiate abstract class - test_problem=test_problem_class(**test_problem_kwargs), + runner=ParamBasedTestProblemRunner( + test_problem=BoTorchTestProblem(botorch_problem=test_problem), outcome_names=outcome_names, search_space_digest=extract_search_space_digest( search_space=search_space, diff --git a/ax/benchmark/problems/synthetic/discretized/mixed_integer.py b/ax/benchmark/problems/synthetic/discretized/mixed_integer.py index 98d72f97b7d..36359e9076e 100644 --- a/ax/benchmark/problems/synthetic/discretized/mixed_integer.py +++ b/ax/benchmark/problems/synthetic/discretized/mixed_integer.py @@ -21,7 +21,10 @@ from ax.benchmark.benchmark_metric import BenchmarkMetric from ax.benchmark.benchmark_problem import BenchmarkProblem -from ax.benchmark.runners.botorch_test import BotorchTestProblemRunner +from ax.benchmark.runners.botorch_test import ( + BoTorchTestProblem, + ParamBasedTestProblemRunner, +) from ax.core.objective import Objective from ax.core.optimization_config import OptimizationConfig from ax.core.parameter import ParameterType, RangeParameter @@ -101,8 +104,11 @@ def _get_problem_from_common_inputs( test_problem = test_problem_class(dim=dim) else: test_problem = test_problem_class(dim=dim, bounds=test_problem_bounds) - runner = BotorchTestProblemRunner( - test_problem=test_problem, outcome_names=[metric_name], modified_bounds=bounds + runner = ParamBasedTestProblemRunner( + test_problem=BoTorchTestProblem( + botorch_problem=test_problem, modified_bounds=bounds + ), + outcome_names=[metric_name], ) return BenchmarkProblem( name=benchmark_name + ("_observed_noise" if observe_noise_sd else ""), diff --git a/ax/benchmark/problems/synthetic/hss/jenatton.py b/ax/benchmark/problems/synthetic/hss/jenatton.py index 3c80b0c6091..664b488ea56 100644 --- a/ax/benchmark/problems/synthetic/hss/jenatton.py +++ b/ax/benchmark/problems/synthetic/hss/jenatton.py @@ -63,7 +63,7 @@ def evaluate_true(self, params: Mapping[str, float | int | None]) -> torch.Tenso # `jenatton_test_function`, for 1st positional argument, expected # `Optional[float]` but got `Union[None, bool, float, int, str]`. value = jenatton_test_function(**params) - return torch.tensor(value) + return torch.tensor(value, dtype=torch.double) def get_jenatton_benchmark_problem( diff --git a/ax/benchmark/runners/botorch_test.py b/ax/benchmark/runners/botorch_test.py index afd4a522ca0..8efefc9fc95 100644 --- a/ax/benchmark/runners/botorch_test.py +++ b/ax/benchmark/runners/botorch_test.py @@ -7,14 +7,14 @@ from abc import ABC, abstractmethod from collections.abc import Mapping -from dataclasses import asdict, dataclass +from dataclasses import dataclass +from itertools import islice import torch from ax.benchmark.runners.base import BenchmarkRunner -from ax.core.search_space import SearchSpaceDigest from ax.core.types import TParamValue -from ax.utils.common.base import Base -from ax.utils.common.equality import equality_typechecker +from ax.exceptions.core import UnsupportedError +from botorch.test_functions.multi_objective import MultiObjectiveTestProblem from botorch.test_functions.synthetic import BaseTestProblem, ConstrainedBaseTestProblem from botorch.utils.transforms import normalize, unnormalize from torch import Tensor @@ -23,8 +23,9 @@ @dataclass(kw_only=True) class ParamBasedTestProblem(ABC): """ - Similar to a BoTorch test problem, but evaluated using an Ax - TParameterization rather than a tensor. + The basic Ax class for generating deterministic data to benchmark against. + + (Noise - if desired - is added by the runner.) """ num_objectives: int @@ -46,17 +47,12 @@ def evaluate_slack_true(self, params: Mapping[str, TParamValue]) -> Tensor: @dataclass(kw_only=True) -class SyntheticProblemRunner(BenchmarkRunner, ABC): - """A Runner for evaluating synthetic problems, either BoTorch - `BaseTestProblem`s or Ax benchmarking `ParamBasedTestProblem`s. - - Given a trial, the Runner will evaluate the problem noiselessly for each - arm in the trial, as well as return some metadata about the underlying - problem such as the noise_std. +class BoTorchTestProblem(ParamBasedTestProblem): + """ + Class for generating data from a BoTorch ``BaseTestProblem``. Args: - test_problem: A BoTorch `BaseTestProblem` or Ax `ParamBasedTestProblem`. - outcome_names: The names of the outcomes returned by the problem. + botorch_problem: The BoTorch ``BaseTestProblem``. modified_bounds: The bounds that are used by the Ax search space while optimizing the problem. If different from the bounds of the test problem, we project the parameters into the test problem @@ -66,18 +62,95 @@ class SyntheticProblemRunner(BenchmarkRunner, ABC): 5 will correspond to 0.5 while evaluating the test problem. If modified bounds are not provided, the test problem will be evaluated using the raw parameter values. - search_space_digest: Used to extract target fidelity and task. + num_objectives: The number of objectives. """ - test_problem: BaseTestProblem | ParamBasedTestProblem + botorch_problem: BaseTestProblem modified_bounds: list[tuple[float, float]] | None = None - constraint_noise_std: float | list[float] | None = None + num_objectives: int = 1 + + def __post_init__(self) -> None: + if isinstance(self.botorch_problem, MultiObjectiveTestProblem): + self.num_objectives = self.botorch_problem.num_objectives + if self.botorch_problem.noise_std is not None: + raise ValueError( + "noise_std should be set on the runner, not the test problem." + ) + if getattr(self.botorch_problem, "constraint_noise_std", None) is not None: + raise ValueError( + "constraint_noise_std should be set on the runner, not the test " + "problem." + ) + if self.botorch_problem.negate: + raise ValueError( + "negate should be set on the runner, not the test problem." + ) + self.botorch_problem = self.botorch_problem.to(dtype=torch.double) + + def tensorize_params(self, params: Mapping[str, int | float]) -> torch.Tensor: + X = torch.tensor( + list(islice(params.values(), self.botorch_problem.dim)), + dtype=torch.double, + ) + + if self.modified_bounds is not None: + # Normalize from modified bounds to unit cube. + unit_X = normalize( + X, torch.tensor(self.modified_bounds, dtype=torch.double).T + ) + # Unnormalize from unit cube to original problem bounds. + X = unnormalize(unit_X, self.botorch_problem.bounds) + return X + + # pyre-fixme [14]: inconsistent override + def evaluate_true(self, params: Mapping[str, float | int]) -> torch.Tensor: + x = self.tensorize_params(params=params) + return self.botorch_problem.evaluate_true(x) + + # pyre-fixme [14]: inconsistent override + def evaluate_slack_true(self, params: Mapping[str, float | int]) -> torch.Tensor: + if not isinstance(self.botorch_problem, ConstrainedBaseTestProblem): + raise UnsupportedError( + "`evaluate_slack_true` is only supported when the BoTorch " + "problem is a `ConstrainedBaseTestProblem`." + ) + # todo: could return x so as to not recompute + # or could do both methods together, track indices of outcomes, + # and only negate the non-constraints + x = self.tensorize_params(params=params) + return self.botorch_problem.evaluate_slack_true(x) + + +@dataclass(kw_only=True) +class ParamBasedTestProblemRunner(BenchmarkRunner): + """ + A Runner for evaluating `ParamBasedTestProblem`s. + + Given a trial, the Runner will use its `test_problem` to evaluate the + problem noiselessly for each arm in the trial, and then add noise as + specified by the `noise_std` and `constraint_noise_std`. It will return + metadata including the outcome names and values of metrics. + + Args: + outcome_names: The names of the outcomes returned by the problem. + search_space_digest: Used to extract target fidelity and task. + test_problem: A ``ParamBasedTestProblem`` from which to generate + deterministic data before adding noise. + noise_std: The standard deviation of the noise added to the data. Can be + a list to be per-metric. + negate: Whether to negate the outcome. + """ + + test_problem: ParamBasedTestProblem noise_std: float | list[float] | None = None + constraint_noise_std: float | list[float] | None = None negate: bool = False @property def _is_constrained(self) -> bool: - return isinstance(self.test_problem, ConstrainedBaseTestProblem) + return isinstance(self.test_problem, BoTorchTestProblem) and isinstance( + self.test_problem.botorch_problem, ConstrainedBaseTestProblem + ) def get_noise_stds(self) -> None | float | dict[str, float]: noise_std = self.noise_std @@ -116,134 +189,24 @@ def get_noise_stds(self) -> None | float | dict[str, float]: return noise_std_dict - -@dataclass(kw_only=True) -class BotorchTestProblemRunner(SyntheticProblemRunner): - """ - A `SyntheticProblemRunner` for BoTorch `BaseTestProblem`s. - - Args: - test_problem: A BoTorch `BaseTestProblem`. - outcome_names: The names of the outcomes returned by the problem. - modified_bounds: The bounds that are used by the Ax search space - while optimizing the problem. If different from the bounds of the - test problem, we project the parameters into the test problem - bounds before evaluating the test problem. - For example, if the test problem is defined on [0, 1] but the Ax - search space is integers in [0, 10], an Ax parameter value of - 5 will correspond to 0.5 while evaluating the test problem. - If modified bounds are not provided, the test problem will be - evaluated using the raw parameter values. - search_space_digest: Used to extract target fidelity and task. - """ - - test_problem: BaseTestProblem - - def __post_init__(self, search_space_digest: SearchSpaceDigest | None) -> None: - super().__post_init__(search_space_digest=search_space_digest) - if self.test_problem.noise_std is not None: - raise ValueError( - "noise_std should be set on the runner, not the test problem." - ) - if ( - hasattr(self.test_problem, "constraint_noise_std") - and self.test_problem.constraint_noise_std is not None - ): - raise ValueError( - "constraint_noise_std should be set on the runner, not the test " - "problem." - ) - if self.test_problem.negate: - raise ValueError( - "negate should be set on the runner, not the test problem." - ) - self.test_problem = self.test_problem.to(dtype=torch.double) - def get_Y_true(self, params: Mapping[str, TParamValue]) -> Tensor: - """ - Convert the arm to a tensor and evaluate it on the base test problem. - - Convert the tensor to original bounds -- only if modified bounds were - provided -- and evaluates the test problem. See the docstring for - `modified_bounds` in `BotorchTestProblemRunner.__init__` for details. - - Args: - params: Parameterization to evaluate. It will be converted to a - `batch_shape x d`-dim tensor of point(s) at which to evaluate the - test problem. + """Evaluates the test problem. Returns: A `batch_shape x m`-dim tensor of ground truth (noiseless) evaluations. """ - X = torch.tensor( - [value for _key, value in [*params.items()][: self.test_problem.dim]], - dtype=torch.double, - ) - - if self.modified_bounds is not None: - # Normalize from modified bounds to unit cube. - unit_X = normalize( - X, torch.tensor(self.modified_bounds, dtype=torch.double).T - ) - # Unnormalize from unit cube to original problem bounds. - X = unnormalize(unit_X, self.test_problem.bounds) - - Y_true = self.test_problem.evaluate_true(X).view(-1) - # `BaseTestProblem.evaluate_true()` does not negate the outcome + Y_true = self.test_problem.evaluate_true(params).view(-1) + # `ParamBasedTestProblem.evaluate_true()` does not negate the outcome if self.negate: Y_true = -Y_true - if self._is_constrained: # Convention: Concatenate objective and black box constraints. `view()` # makes the inputs 1d, so the resulting `Y_true` are also 1d. Y_true = torch.cat( - [Y_true, self.test_problem.evaluate_slack_true(X).view(-1)], + [Y_true, self.test_problem.evaluate_slack_true(params).view(-1)], dim=-1, ) - return Y_true - @equality_typechecker - def __eq__(self, other: Base) -> bool: - """ - Compare equality by comparing dicts, except for `test_problem`. - - Dataclasses are compared by comparing the results of calling asdict on - them. However, equality checks don't work as needed with BoTorch test - problems, e.g. Branin() == Branin() is False. To get around that, the - test problem is stripped from the dictionary. This doesn't make the - check less sensitive, as long as the problem has not been modified, - because the test problem class and keyword arguments will still be - compared. - """ - if not isinstance(other, type(self)): - return False - self_as_dict = asdict(self) - other_as_dict = asdict(other) - self_as_dict.pop("test_problem") - other_as_dict.pop("test_problem") - return (self_as_dict == other_as_dict) and ( - type(self.test_problem) is type(other.test_problem) - ) - - -@dataclass(kw_only=True) -class ParamBasedTestProblemRunner(SyntheticProblemRunner): - """ - A `SyntheticProblemRunner` for `ParamBasedTestProblem`s. See - `SyntheticProblemRunner` for more information. - """ - - test_problem: ParamBasedTestProblem - def get_Y_true(self, params: Mapping[str, TParamValue]) -> Tensor: - """Evaluates the test problem. - - Returns: - A `batch_shape x m`-dim tensor of ground truth (noiseless) evaluations. - """ - Y_true = self.test_problem.evaluate_true(params).view(-1) - # `ParamBasedTestProblem.evaluate_true()` does not negate the outcome - if self.negate: - Y_true = -Y_true - return Y_true +BotorchTestProblemRunner = ParamBasedTestProblemRunner diff --git a/ax/benchmark/tests/problems/test_mixed_integer_problems.py b/ax/benchmark/tests/problems/test_mixed_integer_problems.py index 424a9cae608..2a38606542b 100644 --- a/ax/benchmark/tests/problems/test_mixed_integer_problems.py +++ b/ax/benchmark/tests/problems/test_mixed_integer_problems.py @@ -15,13 +15,16 @@ get_discrete_hartmann, get_discrete_rosenbrock, ) -from ax.benchmark.runners.botorch_test import BotorchTestProblemRunner +from ax.benchmark.runners.botorch_test import ( + BoTorchTestProblem, + ParamBasedTestProblemRunner, +) from ax.core.arm import Arm from ax.core.parameter import ParameterType from ax.core.trial import Trial from ax.utils.common.testutils import TestCase -from ax.utils.common.typeutils import checked_cast from botorch.test_functions.synthetic import Ackley, Hartmann, Rosenbrock +from pyre_extensions import assert_is_instance class MixedIntegerProblemsTest(TestCase): @@ -34,10 +37,10 @@ def test_problems(self) -> None: name = problem_cls.__name__ problem = constructor() self.assertEqual(f"Discrete {name}", problem.name) - self.assertIsInstance( - checked_cast(BotorchTestProblemRunner, problem.runner).test_problem, - problem_cls, - ) + runner = assert_is_instance(problem.runner, ParamBasedTestProblemRunner) + test_problem = assert_is_instance(runner.test_problem, BoTorchTestProblem) + botorch_problem = test_problem.botorch_problem + self.assertIsInstance(botorch_problem, problem_cls) self.assertEqual(len(problem.search_space.parameters), dim) self.assertEqual( sum( @@ -51,12 +54,7 @@ def test_problems(self) -> None: expected_bounds = [(-5.0, 10.0) for _ in range(dim)] else: expected_bounds = [(0.0, 1.0) for _ in range(dim)] - self.assertEqual( - checked_cast( - BotorchTestProblemRunner, problem.runner - ).test_problem._bounds, - expected_bounds, - ) + self.assertEqual(botorch_problem._bounds, expected_bounds) self.assertGreaterEqual(problem.optimal_value, problem_cls().optimal_value) # Test that they match correctly to the original problems. @@ -101,7 +99,8 @@ def test_problems(self) -> None: ] for problem, params, expected_arg in cases: - runner = checked_cast(BotorchTestProblemRunner, problem.runner) + runner = assert_is_instance(problem.runner, ParamBasedTestProblemRunner) + test_problem = assert_is_instance(runner.test_problem, BoTorchTestProblem) trial = Trial(experiment=MagicMock()) # pyre-fixme: Incompatible parameter type [6]: In call # `Arm.__init__`, for argument `parameters`, expected `Dict[str, @@ -109,9 +108,9 @@ def test_problems(self) -> None: arm = Arm(parameters=params, name="--") trial.add_arm(arm) with patch.object( - runner.test_problem, + test_problem.botorch_problem, attribute="evaluate_true", - wraps=runner.test_problem.evaluate_true, + wraps=test_problem.botorch_problem.evaluate_true, ) as mock_call: runner.run(trial) actual = mock_call.call_args[0][0] diff --git a/ax/benchmark/tests/runners/test_botorch_test_problem.py b/ax/benchmark/tests/runners/test_botorch_test_problem.py index 77e11fe42d5..fa2d9555c29 100644 --- a/ax/benchmark/tests/runners/test_botorch_test_problem.py +++ b/ax/benchmark/tests/runners/test_botorch_test_problem.py @@ -14,8 +14,9 @@ import numpy as np import torch +from ax.benchmark.problems.synthetic.hss.jenatton import get_jenatton_benchmark_problem from ax.benchmark.runners.botorch_test import ( - BotorchTestProblemRunner, + BoTorchTestProblem, ParamBasedTestProblemRunner, ) from ax.core.arm import Arm @@ -25,7 +26,6 @@ from ax.utils.common.testutils import TestCase from ax.utils.common.typeutils import checked_cast from ax.utils.testing.benchmark_stubs import TestParamBasedTestProblem -from botorch.test_functions.base import BaseTestProblem from botorch.test_functions.synthetic import Ackley, ConstrainedHartmann, Hartmann from botorch.utils.transforms import normalize @@ -35,32 +35,43 @@ def test_runner_raises_for_botorch_attrs(self) -> None: with self.assertRaisesRegex( ValueError, "noise_std should be set on the runner, not the test problem." ): - BotorchTestProblemRunner( - test_problem=Hartmann(dim=6, noise_std=0.1), + ParamBasedTestProblemRunner( + test_problem=BoTorchTestProblem( + botorch_problem=Hartmann(dim=6, noise_std=0.1) + ), outcome_names=["objective"], ) with self.assertRaisesRegex( ValueError, "constraint_noise_std should be set on the runner, not the test problem.", ): - BotorchTestProblemRunner( - test_problem=ConstrainedHartmann(dim=6, constraint_noise_std=0.1), + ParamBasedTestProblemRunner( + test_problem=BoTorchTestProblem( + botorch_problem=ConstrainedHartmann(dim=6, constraint_noise_std=0.1) + ), outcome_names=["objective", "constraint"], ) with self.assertRaisesRegex( ValueError, "negate should be set on the runner, not the test problem." ): - BotorchTestProblemRunner( - test_problem=Hartmann(dim=6, negate=True), + ParamBasedTestProblemRunner( + test_problem=BoTorchTestProblem( + botorch_problem=Hartmann(dim=6, negate=True) + ), outcome_names=["objective"], ) + def setUp(self) -> None: + super().setUp() + self.maxDiff = None + def test_synthetic_runner(self) -> None: botorch_cases = [ ( - BotorchTestProblemRunner, - test_problem_class(dim=6), - modified_bounds, + BoTorchTestProblem( + botorch_problem=test_problem_class(dim=6), + modified_bounds=modified_bounds, + ), noise_std, ) for test_problem_class, modified_bounds, noise_std in product( @@ -71,79 +82,96 @@ def test_synthetic_runner(self) -> None: ] param_based_cases = [ ( - ParamBasedTestProblemRunner, TestParamBasedTestProblem(num_objectives=num_objectives, dim=6), - None, noise_std, ) for num_objectives, noise_std in product((1, 2), (None, 0.0, 1.0)) ] - for ( - runner_cls, - test_problem, - modified_bounds, - noise_std, - ) in botorch_cases + param_based_cases: + for test_problem, noise_std in botorch_cases + param_based_cases: num_objectives = test_problem.num_objectives outcome_names = [f"objective_{i}" for i in range(num_objectives)] - if isinstance(test_problem, ConstrainedHartmann): + is_constrained = isinstance( + test_problem, BoTorchTestProblem + ) and isinstance(test_problem.botorch_problem, ConstrainedHartmann) + if is_constrained: outcome_names = outcome_names + ["constraint"] - runner = runner_cls( - # pyre-fixme[6]: Incompatible parameter type: In call - # `BotorchTestProblemRunner.__init__`, for argument - # `test_problem`, expected `BaseTestProblem` but got - # `Union[Hartmann, TestParamBasedTestProblem]`. + runner = ParamBasedTestProblemRunner( test_problem=test_problem, outcome_names=outcome_names, - modified_bounds=modified_bounds, noise_std=noise_std, ) + modified_bounds = ( + test_problem.modified_bounds + if isinstance(test_problem, BoTorchTestProblem) + else None + ) test_description: str = ( f"test problem: {test_problem.__class__.__name__}, " f"modified_bounds: {modified_bounds}, " f"noise_std: {noise_std}." ) + is_botorch = isinstance(test_problem, BoTorchTestProblem) with self.subTest(f"Test basic construction, {test_description}"): self.assertIs(runner.test_problem, test_problem) - self.assertEqual( - runner._is_constrained, - isinstance(test_problem, ConstrainedHartmann), - ) - self.assertEqual(runner.modified_bounds, modified_bounds) + self.assertEqual(runner._is_constrained, is_constrained) + self.assertEqual(runner.outcome_names, outcome_names) if noise_std is not None: self.assertEqual(runner.get_noise_stds(), noise_std) else: self.assertIsNone(runner.get_noise_stds()) # check equality - new_runner = replace(runner, test_problem=Ackley()) + new_runner = replace( + runner, test_problem=BoTorchTestProblem(botorch_problem=Ackley()) + ) self.assertNotEqual(runner, new_runner) self.assertEqual(runner, runner) - if isinstance(test_problem, BaseTestProblem): - self.assertEqual(test_problem.bounds.dtype, torch.double) + if isinstance(test_problem, BoTorchTestProblem): + self.assertEqual( + test_problem.botorch_problem.bounds.dtype, torch.double + ) with self.subTest(f"test `get_Y_true()`, {test_description}"): - X = torch.rand(1, 6, dtype=torch.double) - params = {f"x{i}": x.item() for i, x in enumerate(X.unbind(-1))} + dim = 6 if is_botorch else 9 + X = torch.rand(1, dim, dtype=torch.double) + param_names = ( + [f"x{i}" for i in range(6)] + if is_botorch + else list( + get_jenatton_benchmark_problem().search_space.parameters.keys() + ) + ) + params = dict(zip(param_names, (x.item() for x in X.unbind(-1)))) + Y = runner.get_Y_true(params=params) - if modified_bounds is not None: + if ( + isinstance(test_problem, BoTorchTestProblem) + and test_problem.modified_bounds is not None + ): X_tf = normalize( - X, torch.tensor(modified_bounds, dtype=torch.double).T + X, + torch.tensor( + test_problem.modified_bounds, dtype=torch.double + ).T, ) else: X_tf = X - if isinstance(test_problem, BaseTestProblem): - obj = test_problem.evaluate_true(X_tf) - if test_problem.negate: + if isinstance(test_problem, BoTorchTestProblem): + botorch_problem = test_problem.botorch_problem + obj = botorch_problem.evaluate_true(X_tf) + if runner.negate: obj = -obj if runner._is_constrained: expected_Y = torch.cat( - [obj.view(-1), test_problem.evaluate_slack(X_tf).view(-1)], + [ + obj.view(-1), + botorch_problem.evaluate_slack(X_tf).view(-1), + ], dim=-1, ) else: @@ -183,15 +211,15 @@ def test_synthetic_runner(self) -> None: with self.assertRaisesRegex( UnsupportedError, "serialize_init_args is not a supported method" ): - runner_cls.serialize_init_args(obj=runner) + ParamBasedTestProblemRunner.serialize_init_args(obj=runner) with self.assertRaisesRegex( UnsupportedError, "deserialize_init_args is not a supported method" ): - runner_cls.deserialize_init_args({}) + ParamBasedTestProblemRunner.deserialize_init_args({}) def test_botorch_test_problem_runner_heterogeneous_noise(self) -> None: - runner = BotorchTestProblemRunner( - test_problem=ConstrainedHartmann(dim=6), + runner = ParamBasedTestProblemRunner( + test_problem=BoTorchTestProblem(botorch_problem=ConstrainedHartmann(dim=6)), noise_std=0.1, constraint_noise_std=0.05, outcome_names=["objective", "constraint"], @@ -215,3 +243,10 @@ def test_botorch_test_problem_runner_heterogeneous_noise(self) -> None: self.assertSetEqual(set(res["Ys"].keys()), {"0_0"}) self.assertEqual(res["Ystds"]["0_0"], [0.1, 0.05]) self.assertEqual(res["outcome_names"], ["objective", "constraint"]) + + def test_unsupported_error(self) -> None: + test_function = BoTorchTestProblem(botorch_problem=Hartmann(dim=6)) + with self.assertRaisesRegex( + UnsupportedError, "`evaluate_slack_true` is only supported when" + ): + test_function.evaluate_slack_true({"a": 3}) diff --git a/ax/benchmark/tests/test_benchmark_problem.py b/ax/benchmark/tests/test_benchmark_problem.py index f9818f8cae1..4c836ed1d2b 100644 --- a/ax/benchmark/tests/test_benchmark_problem.py +++ b/ax/benchmark/tests/test_benchmark_problem.py @@ -14,7 +14,10 @@ from ax.benchmark.benchmark_metric import BenchmarkMetric from ax.benchmark.benchmark_problem import BenchmarkProblem, create_problem_from_botorch -from ax.benchmark.runners.botorch_test import BotorchTestProblemRunner +from ax.benchmark.runners.botorch_test import ( + BoTorchTestProblem, + ParamBasedTestProblemRunner, +) from ax.core.objective import MultiObjective, Objective from ax.core.optimization_config import ( MultiObjectiveOptimizationConfig, @@ -51,7 +54,10 @@ def test_inference_value_not_implemented(self) -> None: for name in ["Branin", "Currin"] ] optimization_config = OptimizationConfig(objective=objectives[0]) - runner = BotorchTestProblemRunner(test_problem=Branin(), outcome_names=["foo"]) + runner = ParamBasedTestProblemRunner( + test_problem=BoTorchTestProblem(botorch_problem=Branin()), + outcome_names=["foo"], + ) with self.assertRaisesRegex(NotImplementedError, "Only `n_best_points=1`"): BenchmarkProblem( name="foo", @@ -211,9 +217,12 @@ def _test_constrained_from_botorch( noise_std=objective_noise_std, constraint_noise_std=constraint_noise_std, ) - runner = checked_cast(BotorchTestProblemRunner, ax_problem.runner) + runner = assert_is_instance(ax_problem.runner, ParamBasedTestProblemRunner) self.assertTrue(runner._is_constrained) - botorch_problem = checked_cast(ConstrainedBaseTestProblem, runner.test_problem) + test_problem = assert_is_instance(runner.test_problem, BoTorchTestProblem) + botorch_problem = assert_is_instance( + test_problem.botorch_problem, ConstrainedBaseTestProblem + ) self.assertEqual(runner.noise_std, objective_noise_std) self.assertEqual(runner.constraint_noise_std, constraint_noise_std) opt_config = ax_problem.optimization_config