diff --git a/ax/benchmark/benchmark_problem.py b/ax/benchmark/benchmark_problem.py index d8711f0d2a4..17e118446d2 100644 --- a/ax/benchmark/benchmark_problem.py +++ b/ax/benchmark/benchmark_problem.py @@ -301,8 +301,7 @@ def create_problem_from_botorch( *, test_problem_class: type[BaseTestProblem], test_problem_kwargs: dict[str, Any], - noise_std: float | list[float] | None = None, - constraint_noise_std: float | list[float] | None = None, + noise_std: float | list[float] = 0.0, num_trials: int, lower_is_better: bool = True, observe_noise_sd: bool = False, @@ -321,11 +320,8 @@ def create_problem_from_botorch( to define the `search_space`, `optimization_config`, and `runner`. test_problem_kwargs: Keyword arguments used to instantiate the `test_problem_class`. - noise_std: Standard deviation of synthetic noise added to objectives. If - `None`, no noise is added. If a float, the same noise level is used - for all objectives. - constraint_noise_std: Standard deviation of synthetic noise added to - constraints. + noise_std: Standard deviation of synthetic noise added to outcomes. If a + float, the same noise level is used for all objectives. lower_is_better: Whether this is a minimization problem. For MOO, this applies to all objectives. num_trials: Simply the `num_trials` of the `BenchmarkProblem` created. @@ -392,7 +388,6 @@ def create_problem_from_botorch( param_names=list(search_space.parameters.keys()), ), noise_std=noise_std, - constraint_noise_std=constraint_noise_std, ), num_trials=num_trials, observe_noise_stds=observe_noise_sd, diff --git a/ax/benchmark/problems/hpo/torchvision.py b/ax/benchmark/problems/hpo/torchvision.py index 8edf610c5c8..654eb723d64 100644 --- a/ax/benchmark/problems/hpo/torchvision.py +++ b/ax/benchmark/problems/hpo/torchvision.py @@ -118,7 +118,6 @@ def train_and_evaluate( @dataclass(kw_only=True) class PyTorchCNNTorchvisionParamBasedProblem(ParamBasedTestProblem): name: str # The name of the dataset to load -- MNIST or FashionMNIST - num_objectives: int = 1 device: torch.device = field( default_factory=lambda: torch.device( "cuda" if torch.cuda.is_available() else "cpu" diff --git a/ax/benchmark/problems/synthetic/hss/jenatton.py b/ax/benchmark/problems/synthetic/hss/jenatton.py index 664b488ea56..fc2b40e6698 100644 --- a/ax/benchmark/problems/synthetic/hss/jenatton.py +++ b/ax/benchmark/problems/synthetic/hss/jenatton.py @@ -55,8 +55,6 @@ def jenatton_test_function( class Jenatton(ParamBasedTestProblem): """Jenatton test function for hierarchical search spaces.""" - num_objectives: int = 1 - # pyre-fixme[14]: Inconsistent override def evaluate_true(self, params: Mapping[str, float | int | None]) -> torch.Tensor: # pyre-fixme: Incompatible parameter type [6]: In call diff --git a/ax/benchmark/runners/base.py b/ax/benchmark/runners/base.py index 56796ac1b17..b00f324bd27 100644 --- a/ax/benchmark/runners/base.py +++ b/ax/benchmark/runners/base.py @@ -84,7 +84,7 @@ def evaluate_oracle(self, parameters: Mapping[str, TParamValue]) -> ndarray: return self.get_Y_true(params=params).numpy() @abstractmethod - def get_noise_stds(self) -> None | float | dict[str, float]: + def get_noise_stds(self) -> dict[str, float]: """ Return the standard errors for the synthetic noise to be applied to the observed values. @@ -110,7 +110,9 @@ def run(self, trial: BaseTrial) -> dict[str, Any]: Ys, Ystds = {}, {} noise_stds = self.get_noise_stds() - if noise_stds is not None: + noiseless = all(v == 0 for v in noise_stds.values()) + + if not noiseless: # extract arm weights to adjust noise levels accordingly if isinstance(trial, BatchTrial): # normalize arm weights (we assume that the noise level is defined) @@ -122,22 +124,15 @@ def run(self, trial: BaseTrial) -> dict[str, Any]: else: nlzd_arm_weights = {checked_cast(Trial, trial).arm: 1.0} # generate a tensor of noise levels that we'll reuse below - if isinstance(noise_stds, float): - noise_stds_tsr = torch.full( - (len(self.outcome_names),), - noise_stds, - dtype=torch.double, - ) - else: - noise_stds_tsr = torch.tensor( - [noise_stds[metric_name] for metric_name in self.outcome_names], - dtype=torch.double, - ) + noise_stds_tsr = torch.tensor( + [noise_stds[metric_name] for metric_name in self.outcome_names], + dtype=torch.double, + ) for arm in trial.arms: # Case where we do have a ground truth Y_true = self.get_Y_true(arm.parameters) - if noise_stds is None: + if noiseless: # No noise, so just return the true outcome. Ystds[arm.name] = [0.0] * len(Y_true) Ys[arm.name] = Y_true.tolist() diff --git a/ax/benchmark/runners/botorch_test.py b/ax/benchmark/runners/botorch_test.py index 35058855726..de18d01b6eb 100644 --- a/ax/benchmark/runners/botorch_test.py +++ b/ax/benchmark/runners/botorch_test.py @@ -13,8 +13,6 @@ import torch from ax.benchmark.runners.base import BenchmarkRunner from ax.core.types import TParamValue -from ax.exceptions.core import UnsupportedError -from botorch.test_functions.multi_objective import MultiObjectiveTestProblem from botorch.test_functions.synthetic import BaseTestProblem, ConstrainedBaseTestProblem from botorch.utils.transforms import normalize, unnormalize from torch import Tensor @@ -28,17 +26,15 @@ class ParamBasedTestProblem(ABC): (Noise - if desired - is added by the runner.) """ - num_objectives: int - @abstractmethod def evaluate_true(self, params: Mapping[str, TParamValue]) -> Tensor: - """Evaluate noiselessly.""" - ... + """ + Evaluate noiselessly. - def evaluate_slack_true(self, params: Mapping[str, TParamValue]) -> Tensor: - raise NotImplementedError( - f"{self.__class__.__name__} does not support constraints." - ) + Returns: + 1d tensor of shape (num_outcomes,). + """ + ... @dataclass(kw_only=True) @@ -57,24 +53,18 @@ class BoTorchTestProblem(ParamBasedTestProblem): 5 will correspond to 0.5 while evaluating the test problem. If modified bounds are not provided, the test problem will be evaluated using the raw parameter values. - num_objectives: The number of objectives. """ botorch_problem: BaseTestProblem modified_bounds: list[tuple[float, float]] | None = None - num_objectives: int = 1 def __post_init__(self) -> None: - if isinstance(self.botorch_problem, MultiObjectiveTestProblem): - self.num_objectives = self.botorch_problem.num_objectives - if self.botorch_problem.noise_std is not None: - raise ValueError( - "noise_std should be set on the runner, not the test problem." - ) - if getattr(self.botorch_problem, "constraint_noise_std", None) is not None: + if ( + self.botorch_problem.noise_std is not None + or getattr(self.botorch_problem, "constraint_noise_std", None) is not None + ): raise ValueError( - "constraint_noise_std should be set on the runner, not the test " - "problem." + "noise should be set on the `BenchmarkRunner`, not the test function." ) self.botorch_problem = self.botorch_problem.to(dtype=torch.double) @@ -96,20 +86,11 @@ def tensorize_params(self, params: Mapping[str, int | float]) -> torch.Tensor: # pyre-fixme [14]: inconsistent override def evaluate_true(self, params: Mapping[str, float | int]) -> torch.Tensor: x = self.tensorize_params(params=params) - return self.botorch_problem(x) - - # pyre-fixme [14]: inconsistent override - def evaluate_slack_true(self, params: Mapping[str, float | int]) -> torch.Tensor: - if not isinstance(self.botorch_problem, ConstrainedBaseTestProblem): - raise UnsupportedError( - "`evaluate_slack_true` is only supported when the BoTorch " - "problem is a `ConstrainedBaseTestProblem`." - ) - # todo: could return x so as to not recompute - # or could do both methods together, track indices of outcomes, - # and only negate the non-constraints - x = self.tensorize_params(params=params) - return self.botorch_problem.evaluate_slack_true(x) + objectives = self.botorch_problem(x).view(-1) + if isinstance(self.botorch_problem, ConstrainedBaseTestProblem): + constraints = self.botorch_problem.evaluate_slack_true(x).view(-1) + return torch.cat([objectives, constraints], dim=-1) + return objectives @dataclass(kw_only=True) @@ -119,7 +100,7 @@ class ParamBasedTestProblemRunner(BenchmarkRunner): Given a trial, the Runner will use its `test_problem` to evaluate the problem noiselessly for each arm in the trial, and then add noise as - specified by the `noise_std` and `constraint_noise_std`. It will return + specified by the `noise_std`. It will return metadata including the outcome names and values of metrics. Args: @@ -132,64 +113,26 @@ class ParamBasedTestProblemRunner(BenchmarkRunner): """ test_problem: ParamBasedTestProblem - noise_std: float | list[float] | None = None - constraint_noise_std: float | list[float] | None = None + noise_std: float | list[float] | dict[str, float] = 0.0 - @property - def _is_constrained(self) -> bool: - return isinstance(self.test_problem, BoTorchTestProblem) and isinstance( - self.test_problem.botorch_problem, ConstrainedBaseTestProblem - ) - - def get_noise_stds(self) -> None | float | dict[str, float]: + def get_noise_stds(self) -> dict[str, float]: noise_std = self.noise_std - noise_std_dict: dict[str, float] = {} - num_obj = self.test_problem.num_objectives - - # populate any noise_stds for constraints - if self._is_constrained: - constraint_noise_std = self.constraint_noise_std - if isinstance(constraint_noise_std, list): - for i, cns in enumerate(constraint_noise_std, start=num_obj): - if cns is not None: - noise_std_dict[self.outcome_names[i]] = cns - elif constraint_noise_std is not None: - noise_std_dict[self.outcome_names[num_obj]] = constraint_noise_std - - # if none of the constraints are subject to noise, then we may return - # a single float or None for the noise level - - if not noise_std_dict and not isinstance(noise_std, list): - return noise_std # either a float or None - - if isinstance(noise_std, list): - if not len(noise_std) == num_obj: - # this shouldn't be possible due to validation upon construction - # of the multi-objective problem, but better safe than sorry + if isinstance(noise_std, float): + return {name: noise_std for name in self.outcome_names} + elif isinstance(noise_std, dict): + if not set(noise_std.keys()) == set(self.outcome_names): raise ValueError( - "Noise std must have length equal to number of objectives." + "Noise std must have keys equal to outcome names if given as " + "a dict." ) - else: - noise_std = [noise_std for _ in range(num_obj)] - - for i, noise_std_ in enumerate(noise_std): - if noise_std_ is not None: - noise_std_dict[self.outcome_names[i]] = noise_std_ - - return noise_std_dict + return noise_std + # list of floats + return dict(zip(self.outcome_names, noise_std, strict=True)) def get_Y_true(self, params: Mapping[str, TParamValue]) -> Tensor: """Evaluates the test problem. Returns: - A `batch_shape x m`-dim tensor of ground truth (noiseless) evaluations. + An `m`-dim tensor of ground truth (noiseless) evaluations. """ - Y_true = self.test_problem.evaluate_true(params).view(-1) - if self._is_constrained: - # Convention: Concatenate objective and black box constraints. `view()` - # makes the inputs 1d, so the resulting `Y_true` are also 1d. - Y_true = torch.cat( - [Y_true, self.test_problem.evaluate_slack_true(params).view(-1)], - dim=-1, - ) - return Y_true + return torch.atleast_1d(self.test_problem.evaluate_true(params=params)) diff --git a/ax/benchmark/runners/surrogate.py b/ax/benchmark/runners/surrogate.py index 1128d961ff5..42c39a1bbbf 100644 --- a/ax/benchmark/runners/surrogate.py +++ b/ax/benchmark/runners/surrogate.py @@ -84,8 +84,11 @@ def datasets(self) -> list[SupervisedDataset]: self.set_surrogate_and_datasets() return none_throws(self._datasets) - def get_noise_stds(self) -> None | float | dict[str, float]: - return self.noise_stds + def get_noise_stds(self) -> dict[str, float]: + noise_std = self.noise_stds + if isinstance(noise_std, float): + return {name: noise_std for name in self.outcome_names} + return noise_std # pyre-fixme[14]: Inconsistent override def get_Y_true(self, params: Mapping[str, float | int]) -> Tensor: diff --git a/ax/benchmark/tests/runners/test_botorch_test_problem.py b/ax/benchmark/tests/runners/test_botorch_test_problem.py index 6f7eff3571c..8f89f2105c1 100644 --- a/ax/benchmark/tests/runners/test_botorch_test_problem.py +++ b/ax/benchmark/tests/runners/test_botorch_test_problem.py @@ -26,6 +26,7 @@ from ax.utils.common.testutils import TestCase from ax.utils.common.typeutils import checked_cast from ax.utils.testing.benchmark_stubs import TestParamBasedTestProblem +from botorch.test_functions.multi_objective import BraninCurrin from botorch.test_functions.synthetic import Ackley, ConstrainedHartmann, Hartmann from botorch.utils.transforms import normalize @@ -51,7 +52,7 @@ def test_negation(self) -> None: } self.assertEqual( evaluate_true_results["base Hartmann"], - evaluate_true_results["constrained Hartmann"], + evaluate_true_results["constrained Hartmann"][0], ) self.assertEqual( evaluate_true_results["base Hartmann"], @@ -59,38 +60,42 @@ def test_negation(self) -> None: ) self.assertEqual( evaluate_true_results["negated Hartmann"], - evaluate_true_results["negated constrained Hartmann"], + evaluate_true_results["negated constrained Hartmann"][0], ) - self.assertEqual( - self.botorch_test_problems["constrained Hartmann"].evaluate_slack_true( - params - ), - self.botorch_test_problems[ - "negated constrained Hartmann" - ].evaluate_slack_true(params), + evaluate_true_results["constrained Hartmann"][1], + evaluate_true_results["negated constrained Hartmann"][1], ) - def test_unsupported_error(self) -> None: - test_function = BoTorchTestProblem(botorch_problem=Hartmann(dim=6)) - with self.assertRaisesRegex( - UnsupportedError, "`evaluate_slack_true` is only supported when" - ): - test_function.evaluate_slack_true({"a": 3}) - def test_raises_for_botorch_attrs(self) -> None: - with self.assertRaisesRegex( - ValueError, "noise_std should be set on the runner, not the test problem." - ): + msg = "noise should be set on the `BenchmarkRunner`, not the test function." + with self.assertRaisesRegex(ValueError, msg): BoTorchTestProblem(botorch_problem=Hartmann(dim=6, noise_std=0.1)) - with self.assertRaisesRegex( - ValueError, - "constraint_noise_std should be set on the runner, not the test problem.", - ): + with self.assertRaisesRegex(ValueError, msg): BoTorchTestProblem( botorch_problem=ConstrainedHartmann(dim=6, constraint_noise_std=0.1) ) + def test_tensor_shapes(self) -> None: + params = {f"x{i}": 0.5 for i in range(6)} + evaluate_true_results = { + k: v.evaluate_true(params) for k, v in self.botorch_test_problems.items() + } + evaluate_true_results["BraninCurrin"] = BoTorchTestProblem( + botorch_problem=BraninCurrin() + ).evaluate_true(params) + expected_len = { + "base Hartmann": 1, + "constrained Hartmann": 2, + "negated Hartmann": 1, + "negated constrained Hartmann": 2, + "BraninCurrin": 2, + } + for name, result in evaluate_true_results.items(): + with self.subTest(name=name): + self.assertEqual(result.dtype, torch.double) + self.assertEqual(result.shape, torch.Size([expected_len[name]])) + class TestSyntheticRunner(TestCase): def setUp(self) -> None: @@ -105,29 +110,34 @@ def test_synthetic_runner(self) -> None: modified_bounds=modified_bounds, ), noise_std, + num_outcomes, + ) + for (test_problem_class, num_outcomes) in ( + (Hartmann, 1), + (ConstrainedHartmann, 2), ) - for test_problem_class, modified_bounds, noise_std in product( - (Hartmann, ConstrainedHartmann), + for modified_bounds, noise_std in product( (None, [(0.0, 2.0)] * 6), - (None, 0.1), + (0.0, [0.1] * num_outcomes), ) ] param_based_cases = [ ( - TestParamBasedTestProblem(num_objectives=num_objectives, dim=6), + TestParamBasedTestProblem(dim=6, num_outcomes=num_outcomes), noise_std, + num_outcomes, ) - for num_objectives, noise_std in product((1, 2), (None, 0.0, 1.0)) + for num_outcomes in (1, 2) + for noise_std in (0.0, [float(i) for i in range(num_outcomes)]) ] - for test_problem, noise_std in botorch_cases + param_based_cases: - num_objectives = test_problem.num_objectives - - outcome_names = [f"objective_{i}" for i in range(num_objectives)] + for test_problem, noise_std, num_outcomes in botorch_cases + param_based_cases: is_constrained = isinstance( test_problem, BoTorchTestProblem ) and isinstance(test_problem.botorch_problem, ConstrainedHartmann) - if is_constrained: - outcome_names = outcome_names + ["constraint"] + num_constraints = 1 if is_constrained else 0 + outcome_names = [ + f"objective_{i}" for i in range(num_outcomes - num_constraints) + ] + ["constraint"] * num_constraints runner = ParamBasedTestProblemRunner( test_problem=test_problem, @@ -149,12 +159,17 @@ def test_synthetic_runner(self) -> None: with self.subTest(f"Test basic construction, {test_description}"): self.assertIs(runner.test_problem, test_problem) - self.assertEqual(runner._is_constrained, is_constrained) self.assertEqual(runner.outcome_names, outcome_names) - if noise_std is not None: - self.assertEqual(runner.get_noise_stds(), noise_std) - else: - self.assertIsNone(runner.get_noise_stds()) + if isinstance(noise_std, list): + self.assertEqual( + runner.get_noise_stds(), + dict(zip(runner.outcome_names, noise_std)), + ) + else: # float + self.assertEqual( + runner.get_noise_stds(), + {name: noise_std for name in runner.outcome_names}, + ) # check equality new_runner = replace( @@ -196,7 +211,7 @@ def test_synthetic_runner(self) -> None: if isinstance(test_problem, BoTorchTestProblem): botorch_problem = test_problem.botorch_problem obj = botorch_problem.evaluate_true(X_tf) - if runner._is_constrained: + if isinstance(botorch_problem, ConstrainedHartmann): expected_Y = torch.cat( [ obj.view(-1), @@ -225,11 +240,15 @@ def test_synthetic_runner(self) -> None: res = runner.run(trial=trial) self.assertEqual({"Ys", "Ystds", "outcome_names"}, res.keys()) self.assertEqual({"0_0"}, res["Ys"].keys()) - if noise_std is not None: + + if isinstance(noise_std, list): + self.assertEqual(res["Ystds"]["0_0"], noise_std) + if all((n == 0 for n in noise_std)): + self.assertEqual(res["Ys"]["0_0"], Y.tolist()) + else: # float self.assertEqual(res["Ystds"]["0_0"], [noise_std] * len(Y)) - else: - self.assertEqual(res["Ys"]["0_0"], Y.tolist()) - self.assertEqual(res["Ystds"]["0_0"], [0.0] * len(Y)) + if noise_std == 0: + self.assertEqual(res["Ys"]["0_0"], Y.tolist()) self.assertEqual(res["outcome_names"], outcome_names) with self.subTest(f"test `poll_trial_status()`, {test_description}"): @@ -248,28 +267,30 @@ def test_synthetic_runner(self) -> None: ParamBasedTestProblemRunner.deserialize_init_args({}) def test_botorch_test_problem_runner_heterogeneous_noise(self) -> None: - runner = ParamBasedTestProblemRunner( - test_problem=BoTorchTestProblem(botorch_problem=ConstrainedHartmann(dim=6)), - noise_std=0.1, - constraint_noise_std=0.05, - outcome_names=["objective", "constraint"], - ) - self.assertDictEqual( - checked_cast(dict, runner.get_noise_stds()), - {"objective": 0.1, "constraint": 0.05}, - ) + for noise_std in [[0.1, 0.05], {"objective": 0.1, "constraint": 0.05}]: + runner = ParamBasedTestProblemRunner( + test_problem=BoTorchTestProblem( + botorch_problem=ConstrainedHartmann(dim=6) + ), + noise_std=noise_std, + outcome_names=["objective", "constraint"], + ) + self.assertDictEqual( + checked_cast(dict, runner.get_noise_stds()), + {"objective": 0.1, "constraint": 0.05}, + ) - X = torch.rand(1, 6, dtype=torch.double) - arm = Arm( - name="0_0", - parameters={f"x{i}": x.item() for i, x in enumerate(X.unbind(-1))}, - ) - trial = Mock(spec=Trial) - trial.arms = [arm] - trial.arm = arm - trial.index = 0 - res = runner.run(trial=trial) - self.assertSetEqual(set(res.keys()), {"Ys", "Ystds", "outcome_names"}) - self.assertSetEqual(set(res["Ys"].keys()), {"0_0"}) - self.assertEqual(res["Ystds"]["0_0"], [0.1, 0.05]) - self.assertEqual(res["outcome_names"], ["objective", "constraint"]) + X = torch.rand(1, 6, dtype=torch.double) + arm = Arm( + name="0_0", + parameters={f"x{i}": x.item() for i, x in enumerate(X.unbind(-1))}, + ) + trial = Mock(spec=Trial) + trial.arms = [arm] + trial.arm = arm + trial.index = 0 + res = runner.run(trial=trial) + self.assertSetEqual(set(res.keys()), {"Ys", "Ystds", "outcome_names"}) + self.assertSetEqual(set(res["Ys"].keys()), {"0_0"}) + self.assertEqual(res["Ystds"]["0_0"], [0.1, 0.05]) + self.assertEqual(res["outcome_names"], ["objective", "constraint"]) diff --git a/ax/benchmark/tests/test_benchmark_problem.py b/ax/benchmark/tests/test_benchmark_problem.py index 523a0d3444e..886d9f85cb8 100644 --- a/ax/benchmark/tests/test_benchmark_problem.py +++ b/ax/benchmark/tests/test_benchmark_problem.py @@ -204,8 +204,7 @@ def test_single_objective_from_botorch(self) -> None: def _test_constrained_from_botorch( self, observe_noise_sd: bool, - objective_noise_std: float | None, - constraint_noise_std: float | list[float] | None, + noise_std: float | list[float], test_problem_class: type[ConstrainedBaseTestProblem], ) -> None: ax_problem = create_problem_from_botorch( @@ -214,17 +213,14 @@ def _test_constrained_from_botorch( lower_is_better=True, num_trials=1, observe_noise_sd=observe_noise_sd, - noise_std=objective_noise_std, - constraint_noise_std=constraint_noise_std, + noise_std=noise_std, ) runner = assert_is_instance(ax_problem.runner, ParamBasedTestProblemRunner) - self.assertTrue(runner._is_constrained) test_problem = assert_is_instance(runner.test_problem, BoTorchTestProblem) botorch_problem = assert_is_instance( test_problem.botorch_problem, ConstrainedBaseTestProblem ) - self.assertEqual(runner.noise_std, objective_noise_std) - self.assertEqual(runner.constraint_noise_std, constraint_noise_std) + self.assertEqual(runner.noise_std, noise_std) opt_config = ax_problem.optimization_config outcome_constraints = opt_config.outcome_constraints self.assertEqual( @@ -251,26 +247,21 @@ def _test_constrained_from_botorch( ) def test_constrained_soo_from_botorch(self) -> None: - for observe_noise_sd, objective_noise_std, constraint_noise_std in product( - [False, True], [None, 0.1], [None, 0.2, [0.3, 0.4]] + for observe_noise_sd, noise_std in product( + [False, True], + [0.0, 0.1, [0.1, 0.3, 0.4]], ): - with self.subTest( - observe_noise_sd=observe_noise_sd, - objective_noise_std=objective_noise_std, - constraint_noise_std=constraint_noise_std, - ): + with self.subTest(observe_noise_sd=observe_noise_sd, noise_std=noise_std): self._test_constrained_from_botorch( observe_noise_sd=observe_noise_sd, - objective_noise_std=objective_noise_std, - constraint_noise_std=constraint_noise_std, + noise_std=noise_std, test_problem_class=ConstrainedGramacy, ) def test_constrained_moo_from_botorch(self) -> None: self._test_constrained_from_botorch( observe_noise_sd=False, - objective_noise_std=None, - constraint_noise_std=None, + noise_std=0.0, test_problem_class=ConstrainedBraninCurrin, ) diff --git a/ax/utils/testing/benchmark_stubs.py b/ax/utils/testing/benchmark_stubs.py index 0bf7d49c4dd..0c1a37c5a47 100644 --- a/ax/utils/testing/benchmark_stubs.py +++ b/ax/utils/testing/benchmark_stubs.py @@ -48,7 +48,7 @@ def get_single_objective_benchmark_problem( num_trials: int = 4, test_problem_kwargs: dict[str, Any] | None = None, report_inference_value_as_trace: bool = False, - noise_std: float | list[float] | None = None, + noise_std: float | list[float] = 0.0, ) -> BenchmarkProblem: return create_problem_from_botorch( test_problem_class=Branin, @@ -226,13 +226,14 @@ def get_aggregated_benchmark_result() -> AggregatedBenchmarkResult: @dataclass(kw_only=True) class TestParamBasedTestProblem(ParamBasedTestProblem): + num_outcomes: int = 1 dim: int = 6 # pyre-fixme[14]: Inconsistent override, as dict[str, float] is not a # `TParameterization` def evaluate_true(self, params: dict[str, float]) -> torch.Tensor: value = sum(elt**2 for elt in params.values()) - return value * torch.ones(self.num_objectives, dtype=torch.double) + return value * torch.ones(self.num_outcomes, dtype=torch.double) class TestDataset(Dataset):