From 880a44406c503047504f4e0ebfc0b84f281efcc7 Mon Sep 17 00:00:00 2001 From: Elizabeth Santorella Date: Wed, 30 Oct 2024 15:05:27 -0700 Subject: [PATCH 1/3] Rename and reorganize classes (#2977) Summary: Pull Request resolved: https://github.com/facebook/Ax/pull/2977 ** Context** The current structure of the code is such: * Each `BenchmarkProblem` has a `BenchmarkRunner` * ` BenchmarkRunner` is the only runner * A` BenchmarkRunner` has a `ParamBasedTestProblem`, which is either a `BoTorchTestProblem`, `SurrogateTestFunction`, or a special subclass such as `Jenatton`. The directory structure and names have gotten quite out of touch with the code. **New class names** * `ParamBasedTestProblem` -> `TestFunction` (maybe we should call this `BenchmarkTestFunction`?) * `BoTorchTestProblem` -> `BoTorchTestFunction` **New directory structure** | benchmark_problem.py | problems/ | | synthetic/hss/jenatton.py | | ... | benchmark_runner.py | test_function.py | test_function.py | test_functions/ | | botorch_test.py | | surrogate.py Future diffs: * rename `BenchmarkRunner.test_problem` to `BenchmarkRunner.test_function` (D65088791) Differential Revision: D64969707 Reviewed By: saitcakmak, Balandat --- ax/benchmark/benchmark_problem.py | 8 ++-- .../{runners/base.py => benchmark_runner.py} | 6 +-- ax/benchmark/benchmark_test_function.py | 32 ++++++++++++++ .../__init__.py | 0 .../botorch_test.py | 25 +---------- .../surrogate.py | 4 +- ax/benchmark/problems/hpo/torchvision.py | 14 +++--- .../synthetic/discretized/mixed_integer.py | 8 ++-- .../problems/synthetic/hss/jenatton.py | 6 +-- .../problems/test_mixed_integer_problems.py | 6 +-- .../runners/test_botorch_test_problem.py | 42 +++++++++--------- .../tests/runners/test_surrogate_runner.py | 2 +- ax/benchmark/tests/test_benchmark_problem.py | 8 ++-- ax/utils/testing/benchmark_stubs.py | 8 ++-- sphinx/source/benchmark.rst | 44 +++++++++---------- 15 files changed, 113 insertions(+), 100 deletions(-) rename ax/benchmark/{runners/base.py => benchmark_runner.py} (98%) create mode 100644 ax/benchmark/benchmark_test_function.py rename ax/benchmark/{runners => benchmark_test_functions}/__init__.py (100%) rename ax/benchmark/{runners => benchmark_test_functions}/botorch_test.py (82%) rename ax/benchmark/{runners => benchmark_test_functions}/surrogate.py (96%) diff --git a/ax/benchmark/benchmark_problem.py b/ax/benchmark/benchmark_problem.py index 234fe69a154..e970404e112 100644 --- a/ax/benchmark/benchmark_problem.py +++ b/ax/benchmark/benchmark_problem.py @@ -12,8 +12,8 @@ import pandas as pd from ax.benchmark.benchmark_metric import BenchmarkMetric -from ax.benchmark.runners.base import BenchmarkRunner -from ax.benchmark.runners.botorch_test import BoTorchTestProblem +from ax.benchmark.benchmark_runner import BenchmarkRunner +from ax.benchmark.benchmark_test_functions.botorch_test import BoTorchTestFunction from ax.core.data import Data from ax.core.experiment import Experiment from ax.core.objective import MultiObjective, Objective @@ -309,7 +309,7 @@ def create_problem_from_botorch( Create a `BenchmarkProblem` from a BoTorch `BaseTestProblem`. Uses specialized Metrics and Runners for benchmarking. The test problem's - result will be computed by the Runner, `BoTorchTestProblemRunner`, and + result will be computed by the Runner, `BenchmarkRunner`, and retrieved by the Metric(s), which are `BenchmarkMetric`s. Args: @@ -378,7 +378,7 @@ def create_problem_from_botorch( search_space=search_space, optimization_config=optimization_config, runner=BenchmarkRunner( - test_problem=BoTorchTestProblem(botorch_problem=test_problem), + test_problem=BoTorchTestFunction(botorch_problem=test_problem), outcome_names=outcome_names, search_space_digest=extract_search_space_digest( search_space=search_space, diff --git a/ax/benchmark/runners/base.py b/ax/benchmark/benchmark_runner.py similarity index 98% rename from ax/benchmark/runners/base.py rename to ax/benchmark/benchmark_runner.py index 3471231ebf2..55e934780dd 100644 --- a/ax/benchmark/runners/base.py +++ b/ax/benchmark/benchmark_runner.py @@ -13,7 +13,7 @@ import numpy.typing as npt import torch -from ax.benchmark.runners.botorch_test import ParamBasedTestProblem +from ax.benchmark.benchmark_test_function import BenchmarkTestFunction from ax.core.base_trial import BaseTrial, TrialStatus from ax.core.batch_trial import BatchTrial from ax.core.runner import Runner @@ -48,7 +48,7 @@ class BenchmarkRunner(Runner): Args: outcome_names: The names of the outcomes returned by the problem. - test_problem: A ``ParamBasedTestProblem`` from which to generate + test_problem: A ``BenchmarkTestFunction`` from which to generate deterministic data before adding noise. noise_std: The standard deviation of the noise added to the data. Can be a list or dict to be per-metric. @@ -56,7 +56,7 @@ class BenchmarkRunner(Runner): """ outcome_names: list[str] - test_problem: ParamBasedTestProblem + test_problem: BenchmarkTestFunction noise_std: float | list[float] | dict[str, float] = 0.0 # pyre-fixme[16]: Pyre doesn't understand InitVars search_space_digest: InitVar[SearchSpaceDigest | None] = None diff --git a/ax/benchmark/benchmark_test_function.py b/ax/benchmark/benchmark_test_function.py new file mode 100644 index 00000000000..f7546961156 --- /dev/null +++ b/ax/benchmark/benchmark_test_function.py @@ -0,0 +1,32 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-strict + +from abc import ABC, abstractmethod +from collections.abc import Mapping +from dataclasses import dataclass + +from ax.core.types import TParamValue +from torch import Tensor + + +@dataclass(kw_only=True) +class BenchmarkTestFunction(ABC): + """ + The basic Ax class for generating deterministic data to benchmark against. + + (Noise - if desired - is added by the runner.) + """ + + @abstractmethod + def evaluate_true(self, params: Mapping[str, TParamValue]) -> Tensor: + """ + Evaluate noiselessly. + + Returns: + 1d tensor of shape (num_outcomes,). + """ + ... diff --git a/ax/benchmark/runners/__init__.py b/ax/benchmark/benchmark_test_functions/__init__.py similarity index 100% rename from ax/benchmark/runners/__init__.py rename to ax/benchmark/benchmark_test_functions/__init__.py diff --git a/ax/benchmark/runners/botorch_test.py b/ax/benchmark/benchmark_test_functions/botorch_test.py similarity index 82% rename from ax/benchmark/runners/botorch_test.py rename to ax/benchmark/benchmark_test_functions/botorch_test.py index 321675afab3..af29f6166a1 100644 --- a/ax/benchmark/runners/botorch_test.py +++ b/ax/benchmark/benchmark_test_functions/botorch_test.py @@ -5,39 +5,18 @@ # pyre-strict -from abc import ABC, abstractmethod from collections.abc import Mapping from dataclasses import dataclass from itertools import islice import torch -from ax.core.types import TParamValue +from ax.benchmark.benchmark_test_function import BenchmarkTestFunction from botorch.test_functions.synthetic import BaseTestProblem, ConstrainedBaseTestProblem from botorch.utils.transforms import normalize, unnormalize -from torch import Tensor @dataclass(kw_only=True) -class ParamBasedTestProblem(ABC): - """ - The basic Ax class for generating deterministic data to benchmark against. - - (Noise - if desired - is added by the runner.) - """ - - @abstractmethod - def evaluate_true(self, params: Mapping[str, TParamValue]) -> Tensor: - """ - Evaluate noiselessly. - - Returns: - 1d tensor of shape (num_outcomes,). - """ - ... - - -@dataclass(kw_only=True) -class BoTorchTestProblem(ParamBasedTestProblem): +class BoTorchTestFunction(BenchmarkTestFunction): """ Class for generating data from a BoTorch ``BaseTestProblem``. diff --git a/ax/benchmark/runners/surrogate.py b/ax/benchmark/benchmark_test_functions/surrogate.py similarity index 96% rename from ax/benchmark/runners/surrogate.py rename to ax/benchmark/benchmark_test_functions/surrogate.py index ac562506d5d..381b013fd86 100644 --- a/ax/benchmark/runners/surrogate.py +++ b/ax/benchmark/benchmark_test_functions/surrogate.py @@ -9,7 +9,7 @@ from dataclasses import dataclass import torch -from ax.benchmark.runners.botorch_test import ParamBasedTestProblem +from ax.benchmark.benchmark_test_function import BenchmarkTestFunction from ax.core.observation import ObservationFeatures from ax.core.types import TParamValue from ax.modelbridge.torch import TorchModelBridge @@ -21,7 +21,7 @@ @dataclass(kw_only=True) -class SurrogateTestFunction(ParamBasedTestProblem): +class SurrogateTestFunction(BenchmarkTestFunction): """ Data-generating function for surrogate benchmark problems. diff --git a/ax/benchmark/problems/hpo/torchvision.py b/ax/benchmark/problems/hpo/torchvision.py index 3f7f7008537..8616dab4b47 100644 --- a/ax/benchmark/problems/hpo/torchvision.py +++ b/ax/benchmark/problems/hpo/torchvision.py @@ -14,8 +14,8 @@ BenchmarkProblem, get_soo_config_and_outcome_names, ) -from ax.benchmark.runners.base import BenchmarkRunner -from ax.benchmark.runners.botorch_test import ParamBasedTestProblem +from ax.benchmark.benchmark_runner import BenchmarkRunner +from ax.benchmark.benchmark_test_function import BenchmarkTestFunction from ax.core.parameter import ParameterType, RangeParameter from ax.core.search_space import SearchSpace from ax.exceptions.core import UserInputError @@ -113,7 +113,7 @@ def train_and_evaluate( @dataclass(kw_only=True) -class PyTorchCNNTorchvisionParamBasedProblem(ParamBasedTestProblem): +class PyTorchCNNTorchvisionBenchmarkTestFunction(BenchmarkTestFunction): name: str # The name of the dataset to load -- MNIST or FashionMNIST device: torch.device = field( default_factory=lambda: torch.device( @@ -151,7 +151,7 @@ def __post_init__(self, train_loader: None, test_loader: None) -> None: transform=transforms.ToTensor(), ) # pyre-fixme: Undefined attribute [16]: - # `PyTorchCNNTorchvisionParamBasedProblem` has no attribute + # `PyTorchCNNTorchvisionBenchmarkTestFunction` has no attribute # `train_loader`. self.train_loader = DataLoader(train_set, num_workers=1) # pyre-fixme @@ -163,10 +163,10 @@ def evaluate_true(self, params: Mapping[str, int | float]) -> Tensor: frac_correct = train_and_evaluate( **params, device=self.device, - # pyre-fixme[16]: `PyTorchCNNTorchvisionParamBasedProblem` has no + # pyre-fixme[16]: `PyTorchCNNTorchvisionBenchmarkTestFunction` has no # attribute `train_loader`. train_loader=self.train_loader, - # pyre-fixme[16]: `PyTorchCNNTorchvisionParamBasedProblem` has no + # pyre-fixme[16]: `PyTorchCNNTorchvisionBenchmarkTestFunction` has no # attribute `test_loader`. test_loader=self.test_loader, ) @@ -215,7 +215,7 @@ def get_pytorch_cnn_torchvision_benchmark_problem( objective_name="accuracy", ) runner = BenchmarkRunner( - test_problem=PyTorchCNNTorchvisionParamBasedProblem(name=name), + test_problem=PyTorchCNNTorchvisionBenchmarkTestFunction(name=name), outcome_names=outcome_names, ) return BenchmarkProblem( diff --git a/ax/benchmark/problems/synthetic/discretized/mixed_integer.py b/ax/benchmark/problems/synthetic/discretized/mixed_integer.py index b3d66ef062b..38659846f17 100644 --- a/ax/benchmark/problems/synthetic/discretized/mixed_integer.py +++ b/ax/benchmark/problems/synthetic/discretized/mixed_integer.py @@ -21,8 +21,8 @@ from ax.benchmark.benchmark_metric import BenchmarkMetric from ax.benchmark.benchmark_problem import BenchmarkProblem -from ax.benchmark.runners.base import BenchmarkRunner -from ax.benchmark.runners.botorch_test import BoTorchTestProblem +from ax.benchmark.benchmark_runner import BenchmarkRunner +from ax.benchmark.benchmark_test_functions.botorch_test import BoTorchTestFunction from ax.core.objective import Objective from ax.core.optimization_config import OptimizationConfig from ax.core.parameter import ParameterType, RangeParameter @@ -47,7 +47,7 @@ def _get_problem_from_common_inputs( Args: bounds: The parameter bounds. These will be passed to - `BotorchTestProblemRunner` as `modified_bounds`, and the parameters + `BotorchTestFunction` as `modified_bounds`, and the parameters will be renormalized from these bounds to the bounds of the original problem. For example, if `bounds` are [(0, 3)] and the test problem's original bounds are [(0, 2)], then the original problem @@ -103,7 +103,7 @@ def _get_problem_from_common_inputs( else: test_problem = test_problem_class(dim=dim, bounds=test_problem_bounds) runner = BenchmarkRunner( - test_problem=BoTorchTestProblem( + test_problem=BoTorchTestFunction( botorch_problem=test_problem, modified_bounds=bounds ), outcome_names=[metric_name], diff --git a/ax/benchmark/problems/synthetic/hss/jenatton.py b/ax/benchmark/problems/synthetic/hss/jenatton.py index 69cd824759d..3e0e5fb2f53 100644 --- a/ax/benchmark/problems/synthetic/hss/jenatton.py +++ b/ax/benchmark/problems/synthetic/hss/jenatton.py @@ -11,8 +11,8 @@ import torch from ax.benchmark.benchmark_metric import BenchmarkMetric from ax.benchmark.benchmark_problem import BenchmarkProblem -from ax.benchmark.runners.base import BenchmarkRunner -from ax.benchmark.runners.botorch_test import ParamBasedTestProblem +from ax.benchmark.benchmark_runner import BenchmarkRunner +from ax.benchmark.benchmark_test_function import BenchmarkTestFunction from ax.core.objective import Objective from ax.core.optimization_config import OptimizationConfig from ax.core.parameter import ChoiceParameter, ParameterType, RangeParameter @@ -50,7 +50,7 @@ def jenatton_test_function( @dataclass(kw_only=True) -class Jenatton(ParamBasedTestProblem): +class Jenatton(BenchmarkTestFunction): """Jenatton test function for hierarchical search spaces.""" # pyre-fixme[14]: Inconsistent override diff --git a/ax/benchmark/tests/problems/test_mixed_integer_problems.py b/ax/benchmark/tests/problems/test_mixed_integer_problems.py index c744c50f516..90676a343a0 100644 --- a/ax/benchmark/tests/problems/test_mixed_integer_problems.py +++ b/ax/benchmark/tests/problems/test_mixed_integer_problems.py @@ -9,13 +9,13 @@ import torch from ax.benchmark.benchmark_problem import BenchmarkProblem +from ax.benchmark.benchmark_test_functions.botorch_test import BoTorchTestFunction from ax.benchmark.problems.synthetic.discretized.mixed_integer import ( get_discrete_ackley, get_discrete_hartmann, get_discrete_rosenbrock, ) -from ax.benchmark.runners.botorch_test import BoTorchTestProblem from ax.core.arm import Arm from ax.core.parameter import ParameterType from ax.core.trial import Trial @@ -35,7 +35,7 @@ def test_problems(self) -> None: problem = constructor() self.assertEqual(f"Discrete {name}", problem.name) runner = problem.runner - test_problem = assert_is_instance(runner.test_problem, BoTorchTestProblem) + test_problem = assert_is_instance(runner.test_problem, BoTorchTestFunction) botorch_problem = test_problem.botorch_problem self.assertIsInstance(botorch_problem, problem_cls) self.assertEqual(len(problem.search_space.parameters), dim) @@ -97,7 +97,7 @@ def test_problems(self) -> None: for problem, params, expected_arg in cases: runner = problem.runner - test_problem = assert_is_instance(runner.test_problem, BoTorchTestProblem) + test_problem = assert_is_instance(runner.test_problem, BoTorchTestFunction) trial = Trial(experiment=MagicMock()) # pyre-fixme: Incompatible parameter type [6]: In call # `Arm.__init__`, for argument `parameters`, expected `Dict[str, diff --git a/ax/benchmark/tests/runners/test_botorch_test_problem.py b/ax/benchmark/tests/runners/test_botorch_test_problem.py index 67ed96f4095..2753221b9df 100644 --- a/ax/benchmark/tests/runners/test_botorch_test_problem.py +++ b/ax/benchmark/tests/runners/test_botorch_test_problem.py @@ -15,10 +15,10 @@ import numpy as np import torch +from ax.benchmark.benchmark_runner import BenchmarkRunner +from ax.benchmark.benchmark_test_functions.botorch_test import BoTorchTestFunction +from ax.benchmark.benchmark_test_functions.surrogate import SurrogateTestFunction from ax.benchmark.problems.synthetic.hss.jenatton import get_jenatton_benchmark_problem -from ax.benchmark.runners.base import BenchmarkRunner -from ax.benchmark.runners.botorch_test import BoTorchTestProblem -from ax.benchmark.runners.surrogate import SurrogateTestFunction from ax.core.arm import Arm from ax.core.base_trial import TrialStatus from ax.core.trial import Trial @@ -26,15 +26,15 @@ from ax.utils.common.testutils import TestCase from ax.utils.common.typeutils import checked_cast from ax.utils.testing.benchmark_stubs import ( + DummyTestFunction, get_soo_surrogate_test_function, - TestParamBasedTestProblem, ) from botorch.test_functions.multi_objective import BraninCurrin from botorch.test_functions.synthetic import Ackley, ConstrainedHartmann, Hartmann from botorch.utils.transforms import normalize -class TestBoTorchTestProblem(TestCase): +class TestBoTorchTestFunction(TestCase): def setUp(self) -> None: super().setUp() botorch_base_test_functions = { @@ -44,7 +44,7 @@ def setUp(self) -> None: "negated constrained Hartmann": ConstrainedHartmann(dim=6, negate=True), } self.botorch_test_problems = { - k: BoTorchTestProblem(botorch_problem=v) + k: BoTorchTestFunction(botorch_problem=v) for k, v in botorch_base_test_functions.items() } @@ -73,9 +73,9 @@ def test_negation(self) -> None: def test_raises_for_botorch_attrs(self) -> None: msg = "noise should be set on the `BenchmarkRunner`, not the test function." with self.assertRaisesRegex(ValueError, msg): - BoTorchTestProblem(botorch_problem=Hartmann(dim=6, noise_std=0.1)) + BoTorchTestFunction(botorch_problem=Hartmann(dim=6, noise_std=0.1)) with self.assertRaisesRegex(ValueError, msg): - BoTorchTestProblem( + BoTorchTestFunction( botorch_problem=ConstrainedHartmann(dim=6, constraint_noise_std=0.1) ) @@ -84,7 +84,7 @@ def test_tensor_shapes(self) -> None: evaluate_true_results = { k: v.evaluate_true(params) for k, v in self.botorch_test_problems.items() } - evaluate_true_results["BraninCurrin"] = BoTorchTestProblem( + evaluate_true_results["BraninCurrin"] = BoTorchTestFunction( botorch_problem=BraninCurrin() ).evaluate_true(params) expected_len = { @@ -108,7 +108,7 @@ def setUp(self) -> None: def test_synthetic_runner(self) -> None: botorch_cases = [ ( - BoTorchTestProblem( + BoTorchTestFunction( botorch_problem=test_problem_class(dim=6), modified_bounds=modified_bounds, ), @@ -126,7 +126,7 @@ def test_synthetic_runner(self) -> None: ] param_based_cases = [ ( - TestParamBasedTestProblem(dim=6, num_outcomes=num_outcomes), + DummyTestFunction(dim=6, num_outcomes=num_outcomes), noise_std, num_outcomes, ) @@ -141,12 +141,12 @@ def test_synthetic_runner(self) -> None: botorch_cases + param_based_cases + surrogate_cases ): # Set up outcome names - if isinstance(test_problem, BoTorchTestProblem): + if isinstance(test_problem, BoTorchTestFunction): if isinstance(test_problem.botorch_problem, ConstrainedHartmann): outcome_names = ["objective_0", "constraint"] else: outcome_names = ["objective_0"] - elif isinstance(test_problem, TestParamBasedTestProblem): + elif isinstance(test_problem, DummyTestFunction): outcome_names = [f"objective_{i}" for i in range(num_outcomes)] else: # SurrogateTestFunction outcome_names = ["branin"] @@ -177,17 +177,17 @@ def test_synthetic_runner(self) -> None: # check equality new_runner = replace( - runner, test_problem=BoTorchTestProblem(botorch_problem=Ackley()) + runner, test_problem=BoTorchTestFunction(botorch_problem=Ackley()) ) self.assertNotEqual(runner, new_runner) self.assertEqual(runner, runner) - if isinstance(test_problem, BoTorchTestProblem): + if isinstance(test_problem, BoTorchTestFunction): self.assertEqual( test_problem.botorch_problem.bounds.dtype, torch.double ) - is_botorch = isinstance(test_problem, BoTorchTestProblem) + is_botorch = isinstance(test_problem, BoTorchTestFunction) with self.subTest(f"test `get_Y_true()`, {test_description}"): dim = 6 if is_botorch else 9 X = torch.rand(1, dim, dtype=torch.double) @@ -204,7 +204,7 @@ def test_synthetic_runner(self) -> None: nullcontext() if not isinstance(test_problem, SurrogateTestFunction) else patch.object( - # pyre-fixme: ParamBasedTestProblem` has no attribute + # pyre-fixme: BenchmarkTestFunction` has no attribute # `_surrogate`. runner.test_problem._surrogate, "predict", @@ -215,7 +215,7 @@ def test_synthetic_runner(self) -> None: oracle = runner.evaluate_oracle(parameters=params) if ( - isinstance(test_problem, BoTorchTestProblem) + isinstance(test_problem, BoTorchTestFunction) and test_problem.modified_bounds is not None ): X_tf = normalize( @@ -226,7 +226,7 @@ def test_synthetic_runner(self) -> None: ) else: X_tf = X - if isinstance(test_problem, BoTorchTestProblem): + if isinstance(test_problem, BoTorchTestFunction): botorch_problem = test_problem.botorch_problem obj = botorch_problem.evaluate_true(X_tf) if isinstance(botorch_problem, ConstrainedHartmann): @@ -261,6 +261,8 @@ def test_synthetic_runner(self) -> None: nullcontext() if not isinstance(test_problem, SurrogateTestFunction) else patch.object( + # pyre-fixme: BenchmarkTestFunction` has no attribute + # `_surrogate`. runner.test_problem._surrogate, "predict", return_value=({"branin": [4.2]}, None), @@ -298,7 +300,7 @@ def test_synthetic_runner(self) -> None: def test_botorch_test_problem_runner_heterogeneous_noise(self) -> None: for noise_std in [[0.1, 0.05], {"objective": 0.1, "constraint": 0.05}]: runner = BenchmarkRunner( - test_problem=BoTorchTestProblem( + test_problem=BoTorchTestFunction( botorch_problem=ConstrainedHartmann(dim=6) ), noise_std=noise_std, diff --git a/ax/benchmark/tests/runners/test_surrogate_runner.py b/ax/benchmark/tests/runners/test_surrogate_runner.py index 9ad256f7f90..6a8abdcb113 100644 --- a/ax/benchmark/tests/runners/test_surrogate_runner.py +++ b/ax/benchmark/tests/runners/test_surrogate_runner.py @@ -8,7 +8,7 @@ from unittest.mock import MagicMock, patch import torch -from ax.benchmark.runners.surrogate import SurrogateTestFunction +from ax.benchmark.benchmark_test_functions.surrogate import SurrogateTestFunction from ax.modelbridge.torch import TorchModelBridge from ax.utils.common.testutils import TestCase from ax.utils.testing.benchmark_stubs import get_soo_surrogate_test_function diff --git a/ax/benchmark/tests/test_benchmark_problem.py b/ax/benchmark/tests/test_benchmark_problem.py index b8f948a1607..1b3d649d184 100644 --- a/ax/benchmark/tests/test_benchmark_problem.py +++ b/ax/benchmark/tests/test_benchmark_problem.py @@ -14,8 +14,8 @@ from ax.benchmark.benchmark_metric import BenchmarkMetric from ax.benchmark.benchmark_problem import BenchmarkProblem, create_problem_from_botorch -from ax.benchmark.runners.base import BenchmarkRunner -from ax.benchmark.runners.botorch_test import BoTorchTestProblem +from ax.benchmark.benchmark_runner import BenchmarkRunner +from ax.benchmark.benchmark_test_functions.botorch_test import BoTorchTestFunction from ax.core.objective import MultiObjective, Objective from ax.core.optimization_config import ( MultiObjectiveOptimizationConfig, @@ -53,7 +53,7 @@ def test_inference_value_not_implemented(self) -> None: ] optimization_config = OptimizationConfig(objective=objectives[0]) runner = BenchmarkRunner( - test_problem=BoTorchTestProblem(botorch_problem=Branin()), + test_problem=BoTorchTestFunction(botorch_problem=Branin()), outcome_names=["foo"], ) with self.assertRaisesRegex(NotImplementedError, "Only `n_best_points=1`"): @@ -214,7 +214,7 @@ def _test_constrained_from_botorch( noise_std=noise_std, ) runner = ax_problem.runner - test_problem = assert_is_instance(runner.test_problem, BoTorchTestProblem) + test_problem = assert_is_instance(runner.test_problem, BoTorchTestFunction) botorch_problem = assert_is_instance( test_problem.botorch_problem, ConstrainedBaseTestProblem ) diff --git a/ax/utils/testing/benchmark_stubs.py b/ax/utils/testing/benchmark_stubs.py index 0c462ac32eb..46ce4c089a7 100644 --- a/ax/utils/testing/benchmark_stubs.py +++ b/ax/utils/testing/benchmark_stubs.py @@ -15,9 +15,9 @@ from ax.benchmark.benchmark_metric import BenchmarkMetric from ax.benchmark.benchmark_problem import BenchmarkProblem, create_problem_from_botorch from ax.benchmark.benchmark_result import AggregatedBenchmarkResult, BenchmarkResult -from ax.benchmark.runners.base import BenchmarkRunner -from ax.benchmark.runners.botorch_test import ParamBasedTestProblem -from ax.benchmark.runners.surrogate import SurrogateTestFunction +from ax.benchmark.benchmark_runner import BenchmarkRunner +from ax.benchmark.benchmark_test_function import BenchmarkTestFunction +from ax.benchmark.benchmark_test_functions.surrogate import SurrogateTestFunction from ax.core.experiment import Experiment from ax.core.objective import MultiObjective, Objective from ax.core.optimization_config import ( @@ -242,7 +242,7 @@ def get_aggregated_benchmark_result() -> AggregatedBenchmarkResult: @dataclass(kw_only=True) -class TestParamBasedTestProblem(ParamBasedTestProblem): +class DummyTestFunction(BenchmarkTestFunction): num_outcomes: int = 1 dim: int = 6 diff --git a/sphinx/source/benchmark.rst b/sphinx/source/benchmark.rst index 50214b268ca..9cab15b40d7 100644 --- a/sphinx/source/benchmark.rst +++ b/sphinx/source/benchmark.rst @@ -34,7 +34,6 @@ Benchmark Problem :undoc-members: :show-inheritance: - Benchmark Result ~~~~~~~~~~~~~~~~ @@ -51,6 +50,22 @@ Benchmark :undoc-members: :show-inheritance: +Benchmark Runner +~~~~~~~~~~~~~~~~ + +.. automodule:: ax.benchmark.benchmark_runner + :members: + :undoc-members: + :show-inheritance: + +Benchmark Test Function +~~~~~~~~~~~~~~~~~~~~~~~ + +.. automodule:: ax.benchmark.benchmark_test_function + :members: + :undoc-members: + :show-inheritance: + Benchmark Methods Modular BoTorch ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -83,14 +98,6 @@ Benchmark Problems High Dimensional Embedding :undoc-members: :show-inheritance: -Benchmark Problems Surrogate -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: ax.benchmark.problems.surrogate - :members: - :undoc-members: - :show-inheritance: - Benchmark Problems Mixed Integer Synthetic ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -123,26 +130,19 @@ Benchmark Problems PyTorchCNN TorchVision :undoc-members: :show-inheritance: -Benchmark Runners Base -~~~~~~~~~~~~~~~~~~~~~~ -.. automodule:: ax.benchmark.runners.base - :members: - :undoc-members: - :show-inheritance: +Benchmark Test Functions: BoTorch Test +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Benchmark Runners BoTorch Test -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: ax.benchmark.runners.botorch_test +.. automodule:: ax.benchmark.benchmark_test_functions.botorch_test :members: :undoc-members: :show-inheritance: -Benchmark Runners Surrogate -~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Benchmark Test Functions: Surrogate +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. automodule:: ax.benchmark.runners.surrogate +.. automodule:: ax.benchmark.benchmark_test_functions.surrogate :members: :undoc-members: :show-inheritance: From 556c02f0a7efad3bcd8ade7672b2146fe3adcf8b Mon Sep 17 00:00:00 2001 From: Elizabeth Santorella Date: Wed, 30 Oct 2024 15:05:27 -0700 Subject: [PATCH 2/3] Rename `test_problem` attribute of `BenchmarkRunner` to `test_function`, because it is a `BenchmarkTestFunction` Summary: Rename `test_problem` attribute of `BenchmarkRunner` to `test_function`, because it is a `BenchmarkTestFunction` Differential Revision: D65088791 --- ax/benchmark/benchmark_problem.py | 2 +- ax/benchmark/benchmark_runner.py | 6 +-- ax/benchmark/problems/hpo/torchvision.py | 2 +- .../synthetic/discretized/mixed_integer.py | 2 +- .../problems/synthetic/hss/jenatton.py | 2 +- .../problems/test_mixed_integer_problems.py | 14 ++++-- .../runners/test_botorch_test_problem.py | 46 +++++++++---------- ax/benchmark/tests/test_benchmark_problem.py | 4 +- ax/utils/testing/benchmark_stubs.py | 4 +- 9 files changed, 43 insertions(+), 39 deletions(-) diff --git a/ax/benchmark/benchmark_problem.py b/ax/benchmark/benchmark_problem.py index e970404e112..52c658ec7d4 100644 --- a/ax/benchmark/benchmark_problem.py +++ b/ax/benchmark/benchmark_problem.py @@ -378,7 +378,7 @@ def create_problem_from_botorch( search_space=search_space, optimization_config=optimization_config, runner=BenchmarkRunner( - test_problem=BoTorchTestFunction(botorch_problem=test_problem), + test_function=BoTorchTestFunction(botorch_problem=test_problem), outcome_names=outcome_names, search_space_digest=extract_search_space_digest( search_space=search_space, diff --git a/ax/benchmark/benchmark_runner.py b/ax/benchmark/benchmark_runner.py index 55e934780dd..3e8ee1c35ec 100644 --- a/ax/benchmark/benchmark_runner.py +++ b/ax/benchmark/benchmark_runner.py @@ -48,7 +48,7 @@ class BenchmarkRunner(Runner): Args: outcome_names: The names of the outcomes returned by the problem. - test_problem: A ``BenchmarkTestFunction`` from which to generate + test_function: A ``BenchmarkTestFunction`` from which to generate deterministic data before adding noise. noise_std: The standard deviation of the noise added to the data. Can be a list or dict to be per-metric. @@ -56,7 +56,7 @@ class BenchmarkRunner(Runner): """ outcome_names: list[str] - test_problem: BenchmarkTestFunction + test_function: BenchmarkTestFunction noise_std: float | list[float] | dict[str, float] = 0.0 # pyre-fixme[16]: Pyre doesn't understand InitVars search_space_digest: InitVar[SearchSpaceDigest | None] = None @@ -77,7 +77,7 @@ def get_Y_true(self, params: Mapping[str, TParamValue]) -> Tensor: Returns: An `m`-dim tensor of ground truth (noiseless) evaluations. """ - return torch.atleast_1d(self.test_problem.evaluate_true(params=params)) + return torch.atleast_1d(self.test_function.evaluate_true(params=params)) def evaluate_oracle(self, parameters: Mapping[str, TParamValue]) -> npt.NDArray: """ diff --git a/ax/benchmark/problems/hpo/torchvision.py b/ax/benchmark/problems/hpo/torchvision.py index 8616dab4b47..ccb68d9dc53 100644 --- a/ax/benchmark/problems/hpo/torchvision.py +++ b/ax/benchmark/problems/hpo/torchvision.py @@ -215,7 +215,7 @@ def get_pytorch_cnn_torchvision_benchmark_problem( objective_name="accuracy", ) runner = BenchmarkRunner( - test_problem=PyTorchCNNTorchvisionBenchmarkTestFunction(name=name), + test_function=PyTorchCNNTorchvisionBenchmarkTestFunction(name=name), outcome_names=outcome_names, ) return BenchmarkProblem( diff --git a/ax/benchmark/problems/synthetic/discretized/mixed_integer.py b/ax/benchmark/problems/synthetic/discretized/mixed_integer.py index 38659846f17..8d797e1cdd2 100644 --- a/ax/benchmark/problems/synthetic/discretized/mixed_integer.py +++ b/ax/benchmark/problems/synthetic/discretized/mixed_integer.py @@ -103,7 +103,7 @@ def _get_problem_from_common_inputs( else: test_problem = test_problem_class(dim=dim, bounds=test_problem_bounds) runner = BenchmarkRunner( - test_problem=BoTorchTestFunction( + test_function=BoTorchTestFunction( botorch_problem=test_problem, modified_bounds=bounds ), outcome_names=[metric_name], diff --git a/ax/benchmark/problems/synthetic/hss/jenatton.py b/ax/benchmark/problems/synthetic/hss/jenatton.py index 3e0e5fb2f53..e97784adda3 100644 --- a/ax/benchmark/problems/synthetic/hss/jenatton.py +++ b/ax/benchmark/problems/synthetic/hss/jenatton.py @@ -119,7 +119,7 @@ def get_jenatton_benchmark_problem( search_space=search_space, optimization_config=optimization_config, runner=BenchmarkRunner( - test_problem=Jenatton(), outcome_names=[name], noise_std=noise_std + test_function=Jenatton(), outcome_names=[name], noise_std=noise_std ), num_trials=num_trials, observe_noise_stds=observe_noise_sd, diff --git a/ax/benchmark/tests/problems/test_mixed_integer_problems.py b/ax/benchmark/tests/problems/test_mixed_integer_problems.py index 90676a343a0..2c433ff55be 100644 --- a/ax/benchmark/tests/problems/test_mixed_integer_problems.py +++ b/ax/benchmark/tests/problems/test_mixed_integer_problems.py @@ -35,8 +35,10 @@ def test_problems(self) -> None: problem = constructor() self.assertEqual(f"Discrete {name}", problem.name) runner = problem.runner - test_problem = assert_is_instance(runner.test_problem, BoTorchTestFunction) - botorch_problem = test_problem.botorch_problem + test_function = assert_is_instance( + runner.test_function, BoTorchTestFunction + ) + botorch_problem = test_function.botorch_problem self.assertIsInstance(botorch_problem, problem_cls) self.assertEqual(len(problem.search_space.parameters), dim) self.assertEqual( @@ -97,7 +99,9 @@ def test_problems(self) -> None: for problem, params, expected_arg in cases: runner = problem.runner - test_problem = assert_is_instance(runner.test_problem, BoTorchTestFunction) + test_function = assert_is_instance( + runner.test_function, BoTorchTestFunction + ) trial = Trial(experiment=MagicMock()) # pyre-fixme: Incompatible parameter type [6]: In call # `Arm.__init__`, for argument `parameters`, expected `Dict[str, @@ -105,9 +109,9 @@ def test_problems(self) -> None: arm = Arm(parameters=params, name="--") trial.add_arm(arm) with patch.object( - test_problem.botorch_problem, + test_function.botorch_problem, attribute="evaluate_true", - wraps=test_problem.botorch_problem.evaluate_true, + wraps=test_function.botorch_problem.evaluate_true, ) as mock_call: runner.run(trial) actual = mock_call.call_args.kwargs["X"] diff --git a/ax/benchmark/tests/runners/test_botorch_test_problem.py b/ax/benchmark/tests/runners/test_botorch_test_problem.py index 2753221b9df..1a88099ea97 100644 --- a/ax/benchmark/tests/runners/test_botorch_test_problem.py +++ b/ax/benchmark/tests/runners/test_botorch_test_problem.py @@ -137,32 +137,32 @@ def test_synthetic_runner(self) -> None: (get_soo_surrogate_test_function(lazy=False), noise_std, 1) for noise_std in (0.0, 1.0, [0.0], [1.0]) ] - for test_problem, noise_std, num_outcomes in ( + for test_function, noise_std, num_outcomes in ( botorch_cases + param_based_cases + surrogate_cases ): # Set up outcome names - if isinstance(test_problem, BoTorchTestFunction): - if isinstance(test_problem.botorch_problem, ConstrainedHartmann): + if isinstance(test_function, BoTorchTestFunction): + if isinstance(test_function.botorch_problem, ConstrainedHartmann): outcome_names = ["objective_0", "constraint"] else: outcome_names = ["objective_0"] - elif isinstance(test_problem, DummyTestFunction): + elif isinstance(test_function, DummyTestFunction): outcome_names = [f"objective_{i}" for i in range(num_outcomes)] else: # SurrogateTestFunction outcome_names = ["branin"] # Set up runner runner = BenchmarkRunner( - test_problem=test_problem, + test_function=test_function, outcome_names=outcome_names, noise_std=noise_std, ) - test_description = f"{test_problem=}, {noise_std=}" + test_description = f"{test_function=}, {noise_std=}" with self.subTest( - f"Test basic construction, {test_problem=}, {noise_std=}" + f"Test basic construction, {test_function=}, {noise_std=}" ): - self.assertIs(runner.test_problem, test_problem) + self.assertIs(runner.test_function, test_function) self.assertEqual(runner.outcome_names, outcome_names) if isinstance(noise_std, list): self.assertEqual( @@ -177,17 +177,17 @@ def test_synthetic_runner(self) -> None: # check equality new_runner = replace( - runner, test_problem=BoTorchTestFunction(botorch_problem=Ackley()) + runner, test_function=BoTorchTestFunction(botorch_problem=Ackley()) ) self.assertNotEqual(runner, new_runner) self.assertEqual(runner, runner) - if isinstance(test_problem, BoTorchTestFunction): + if isinstance(test_function, BoTorchTestFunction): self.assertEqual( - test_problem.botorch_problem.bounds.dtype, torch.double + test_function.botorch_problem.bounds.dtype, torch.double ) - is_botorch = isinstance(test_problem, BoTorchTestFunction) + is_botorch = isinstance(test_function, BoTorchTestFunction) with self.subTest(f"test `get_Y_true()`, {test_description}"): dim = 6 if is_botorch else 9 X = torch.rand(1, dim, dtype=torch.double) @@ -202,11 +202,11 @@ def test_synthetic_runner(self) -> None: with ( nullcontext() - if not isinstance(test_problem, SurrogateTestFunction) + if not isinstance(test_function, SurrogateTestFunction) else patch.object( # pyre-fixme: BenchmarkTestFunction` has no attribute # `_surrogate`. - runner.test_problem._surrogate, + runner.test_function._surrogate, "predict", return_value=({"branin": [4.2]}, None), ) @@ -215,19 +215,19 @@ def test_synthetic_runner(self) -> None: oracle = runner.evaluate_oracle(parameters=params) if ( - isinstance(test_problem, BoTorchTestFunction) - and test_problem.modified_bounds is not None + isinstance(test_function, BoTorchTestFunction) + and test_function.modified_bounds is not None ): X_tf = normalize( X, torch.tensor( - test_problem.modified_bounds, dtype=torch.double + test_function.modified_bounds, dtype=torch.double ).T, ) else: X_tf = X - if isinstance(test_problem, BoTorchTestFunction): - botorch_problem = test_problem.botorch_problem + if isinstance(test_function, BoTorchTestFunction): + botorch_problem = test_function.botorch_problem obj = botorch_problem.evaluate_true(X_tf) if isinstance(botorch_problem, ConstrainedHartmann): expected_Y = torch.cat( @@ -239,7 +239,7 @@ def test_synthetic_runner(self) -> None: ) else: expected_Y = obj - elif isinstance(test_problem, SurrogateTestFunction): + elif isinstance(test_function, SurrogateTestFunction): expected_Y = torch.tensor([4.2], dtype=torch.double) else: expected_Y = torch.full( @@ -259,11 +259,11 @@ def test_synthetic_runner(self) -> None: with ( nullcontext() - if not isinstance(test_problem, SurrogateTestFunction) + if not isinstance(test_function, SurrogateTestFunction) else patch.object( # pyre-fixme: BenchmarkTestFunction` has no attribute # `_surrogate`. - runner.test_problem._surrogate, + runner.test_function._surrogate, "predict", return_value=({"branin": [4.2]}, None), ) @@ -300,7 +300,7 @@ def test_synthetic_runner(self) -> None: def test_botorch_test_problem_runner_heterogeneous_noise(self) -> None: for noise_std in [[0.1, 0.05], {"objective": 0.1, "constraint": 0.05}]: runner = BenchmarkRunner( - test_problem=BoTorchTestFunction( + test_function=BoTorchTestFunction( botorch_problem=ConstrainedHartmann(dim=6) ), noise_std=noise_std, diff --git a/ax/benchmark/tests/test_benchmark_problem.py b/ax/benchmark/tests/test_benchmark_problem.py index 1b3d649d184..3e388583155 100644 --- a/ax/benchmark/tests/test_benchmark_problem.py +++ b/ax/benchmark/tests/test_benchmark_problem.py @@ -53,7 +53,7 @@ def test_inference_value_not_implemented(self) -> None: ] optimization_config = OptimizationConfig(objective=objectives[0]) runner = BenchmarkRunner( - test_problem=BoTorchTestFunction(botorch_problem=Branin()), + test_function=BoTorchTestFunction(botorch_problem=Branin()), outcome_names=["foo"], ) with self.assertRaisesRegex(NotImplementedError, "Only `n_best_points=1`"): @@ -214,7 +214,7 @@ def _test_constrained_from_botorch( noise_std=noise_std, ) runner = ax_problem.runner - test_problem = assert_is_instance(runner.test_problem, BoTorchTestFunction) + test_problem = assert_is_instance(runner.test_function, BoTorchTestFunction) botorch_problem = assert_is_instance( test_problem.botorch_problem, ConstrainedBaseTestProblem ) diff --git a/ax/utils/testing/benchmark_stubs.py b/ax/utils/testing/benchmark_stubs.py index 46ce4c089a7..6aaf96ab9de 100644 --- a/ax/utils/testing/benchmark_stubs.py +++ b/ax/utils/testing/benchmark_stubs.py @@ -103,7 +103,7 @@ def get_soo_surrogate_test_function(lazy: bool = True) -> SurrogateTestFunction: def get_soo_surrogate() -> BenchmarkProblem: experiment = get_branin_experiment(with_completed_trial=True) test_function = get_soo_surrogate_test_function() - runner = BenchmarkRunner(test_problem=test_function, outcome_names=["branin"]) + runner = BenchmarkRunner(test_function=test_function, outcome_names=["branin"]) observe_noise_sd = True objective = Objective( @@ -140,7 +140,7 @@ def get_moo_surrogate() -> BenchmarkProblem: outcome_names=outcome_names, get_surrogate_and_datasets=lambda: (surrogate, []), ) - runner = BenchmarkRunner(test_problem=test_function, outcome_names=outcome_names) + runner = BenchmarkRunner(test_function=test_function, outcome_names=outcome_names) observe_noise_sd = True optimization_config = MultiObjectiveOptimizationConfig( objective=MultiObjective( From 05c52e24a5e093dd8a3f661cf49f71b6fece15b3 Mon Sep 17 00:00:00 2001 From: Elizabeth Santorella Date: Wed, 30 Oct 2024 15:08:52 -0700 Subject: [PATCH 3/3] Reorganize runner tests following previous class renaming (#2991) Summary: Pull Request resolved: https://github.com/facebook/Ax/pull/2991 See D64969707 for context. This diff: * Moves the parts of `tests/runners/test_botorch_test_problem` that pertain to the runner into `tests/test_benchmark_runner.py` * Moves the parts that pertain to `BoTorchTestFunction` to `tests/benchmark_test_functions/test_botorch_test_function.py` * Moves `tests/runners/test_surrogate_runner.py` to `tests/benchmark_test_functions/test_surrogate_test_function.py` Reviewed By: Balandat Differential Revision: D65090663 --- .../test_botorch_test_function.py | 80 +++++++++++++++++++ .../test_surrogate_test_function.py} | 0 ax/benchmark/tests/runners/__init__.py | 7 -- ...st_problem.py => test_benchmark_runner.py} | 73 +---------------- 4 files changed, 83 insertions(+), 77 deletions(-) create mode 100644 ax/benchmark/tests/benchmark_test_functions/test_botorch_test_function.py rename ax/benchmark/tests/{runners/test_surrogate_runner.py => benchmark_test_functions/test_surrogate_test_function.py} (100%) delete mode 100644 ax/benchmark/tests/runners/__init__.py rename ax/benchmark/tests/{runners/test_botorch_test_problem.py => test_benchmark_runner.py} (78%) diff --git a/ax/benchmark/tests/benchmark_test_functions/test_botorch_test_function.py b/ax/benchmark/tests/benchmark_test_functions/test_botorch_test_function.py new file mode 100644 index 00000000000..f3c0136e713 --- /dev/null +++ b/ax/benchmark/tests/benchmark_test_functions/test_botorch_test_function.py @@ -0,0 +1,80 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-strict + + +import torch +from ax.benchmark.benchmark_test_functions.botorch_test import BoTorchTestFunction +from ax.utils.common.testutils import TestCase +from botorch.test_functions.multi_objective import BraninCurrin +from botorch.test_functions.synthetic import ConstrainedHartmann, Hartmann + + +class TestBoTorchTestFunction(TestCase): + def setUp(self) -> None: + super().setUp() + botorch_base_test_functions = { + "base Hartmann": Hartmann(dim=6), + "negated Hartmann": Hartmann(dim=6, negate=True), + "constrained Hartmann": ConstrainedHartmann(dim=6), + "negated constrained Hartmann": ConstrainedHartmann(dim=6, negate=True), + } + self.botorch_test_problems = { + k: BoTorchTestFunction(botorch_problem=v) + for k, v in botorch_base_test_functions.items() + } + + def test_negation(self) -> None: + params = {f"x{i}": 0.5 for i in range(6)} + evaluate_true_results = { + k: v.evaluate_true(params) for k, v in self.botorch_test_problems.items() + } + self.assertEqual( + evaluate_true_results["base Hartmann"], + evaluate_true_results["constrained Hartmann"][0], + ) + self.assertEqual( + evaluate_true_results["base Hartmann"], + -evaluate_true_results["negated Hartmann"], + ) + self.assertEqual( + evaluate_true_results["negated Hartmann"], + evaluate_true_results["negated constrained Hartmann"][0], + ) + self.assertEqual( + evaluate_true_results["constrained Hartmann"][1], + evaluate_true_results["negated constrained Hartmann"][1], + ) + + def test_raises_for_botorch_attrs(self) -> None: + msg = "noise should be set on the `BenchmarkRunner`, not the test function." + with self.assertRaisesRegex(ValueError, msg): + BoTorchTestFunction(botorch_problem=Hartmann(dim=6, noise_std=0.1)) + with self.assertRaisesRegex(ValueError, msg): + BoTorchTestFunction( + botorch_problem=ConstrainedHartmann(dim=6, constraint_noise_std=0.1) + ) + + def test_tensor_shapes(self) -> None: + params = {f"x{i}": 0.5 for i in range(6)} + evaluate_true_results = { + k: v.evaluate_true(params) for k, v in self.botorch_test_problems.items() + } + evaluate_true_results["BraninCurrin"] = BoTorchTestFunction( + botorch_problem=BraninCurrin() + ).evaluate_true(params) + expected_len = { + "base Hartmann": 1, + "constrained Hartmann": 2, + "negated Hartmann": 1, + "negated constrained Hartmann": 2, + "BraninCurrin": 2, + } + for name, result in evaluate_true_results.items(): + with self.subTest(name=name): + self.assertEqual(result.dtype, torch.double) + self.assertEqual(result.shape, torch.Size([expected_len[name]])) diff --git a/ax/benchmark/tests/runners/test_surrogate_runner.py b/ax/benchmark/tests/benchmark_test_functions/test_surrogate_test_function.py similarity index 100% rename from ax/benchmark/tests/runners/test_surrogate_runner.py rename to ax/benchmark/tests/benchmark_test_functions/test_surrogate_test_function.py diff --git a/ax/benchmark/tests/runners/__init__.py b/ax/benchmark/tests/runners/__init__.py deleted file mode 100644 index c412c1b3b4f..00000000000 --- a/ax/benchmark/tests/runners/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Meta Platforms, Inc. and affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -# pyre-strict diff --git a/ax/benchmark/tests/runners/test_botorch_test_problem.py b/ax/benchmark/tests/test_benchmark_runner.py similarity index 78% rename from ax/benchmark/tests/runners/test_botorch_test_problem.py rename to ax/benchmark/tests/test_benchmark_runner.py index 1a88099ea97..f4e13aa6dbc 100644 --- a/ax/benchmark/tests/runners/test_botorch_test_problem.py +++ b/ax/benchmark/tests/test_benchmark_runner.py @@ -29,83 +29,16 @@ DummyTestFunction, get_soo_surrogate_test_function, ) -from botorch.test_functions.multi_objective import BraninCurrin from botorch.test_functions.synthetic import Ackley, ConstrainedHartmann, Hartmann from botorch.utils.transforms import normalize -class TestBoTorchTestFunction(TestCase): - def setUp(self) -> None: - super().setUp() - botorch_base_test_functions = { - "base Hartmann": Hartmann(dim=6), - "negated Hartmann": Hartmann(dim=6, negate=True), - "constrained Hartmann": ConstrainedHartmann(dim=6), - "negated constrained Hartmann": ConstrainedHartmann(dim=6, negate=True), - } - self.botorch_test_problems = { - k: BoTorchTestFunction(botorch_problem=v) - for k, v in botorch_base_test_functions.items() - } - - def test_negation(self) -> None: - params = {f"x{i}": 0.5 for i in range(6)} - evaluate_true_results = { - k: v.evaluate_true(params) for k, v in self.botorch_test_problems.items() - } - self.assertEqual( - evaluate_true_results["base Hartmann"], - evaluate_true_results["constrained Hartmann"][0], - ) - self.assertEqual( - evaluate_true_results["base Hartmann"], - -evaluate_true_results["negated Hartmann"], - ) - self.assertEqual( - evaluate_true_results["negated Hartmann"], - evaluate_true_results["negated constrained Hartmann"][0], - ) - self.assertEqual( - evaluate_true_results["constrained Hartmann"][1], - evaluate_true_results["negated constrained Hartmann"][1], - ) - - def test_raises_for_botorch_attrs(self) -> None: - msg = "noise should be set on the `BenchmarkRunner`, not the test function." - with self.assertRaisesRegex(ValueError, msg): - BoTorchTestFunction(botorch_problem=Hartmann(dim=6, noise_std=0.1)) - with self.assertRaisesRegex(ValueError, msg): - BoTorchTestFunction( - botorch_problem=ConstrainedHartmann(dim=6, constraint_noise_std=0.1) - ) - - def test_tensor_shapes(self) -> None: - params = {f"x{i}": 0.5 for i in range(6)} - evaluate_true_results = { - k: v.evaluate_true(params) for k, v in self.botorch_test_problems.items() - } - evaluate_true_results["BraninCurrin"] = BoTorchTestFunction( - botorch_problem=BraninCurrin() - ).evaluate_true(params) - expected_len = { - "base Hartmann": 1, - "constrained Hartmann": 2, - "negated Hartmann": 1, - "negated constrained Hartmann": 2, - "BraninCurrin": 2, - } - for name, result in evaluate_true_results.items(): - with self.subTest(name=name): - self.assertEqual(result.dtype, torch.double) - self.assertEqual(result.shape, torch.Size([expected_len[name]])) - - -class TestSyntheticRunner(TestCase): +class TestBenchmarkRunner(TestCase): def setUp(self) -> None: super().setUp() self.maxDiff = None - def test_synthetic_runner(self) -> None: + def test_runner(self) -> None: botorch_cases = [ ( BoTorchTestFunction( @@ -297,7 +230,7 @@ def test_synthetic_runner(self) -> None: ): BenchmarkRunner.deserialize_init_args({}) - def test_botorch_test_problem_runner_heterogeneous_noise(self) -> None: + def test_heterogeneous_noise(self) -> None: for noise_std in [[0.1, 0.05], {"objective": 0.1, "constraint": 0.05}]: runner = BenchmarkRunner( test_function=BoTorchTestFunction(