diff --git a/ax/benchmark/problems/surrogate.py b/ax/benchmark/problems/surrogate.py deleted file mode 100644 index 6c188540801..00000000000 --- a/ax/benchmark/problems/surrogate.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -# pyre-strict -""" -Benchmark problem based on surrogate. - -This problem class might appear to function identically to its non-surrogate -counterpart, `BenchmarkProblem`, aside from the restriction that its runners is -of type `SurrogateRunner`. However, it is treated specially within JSON storage -because surrogates cannot be easily serialized. -""" - -from dataclasses import dataclass, field - -from ax.benchmark.benchmark_problem import BenchmarkProblem -from ax.benchmark.runners.surrogate import SurrogateRunner - - -@dataclass(kw_only=True) -class SurrogateBenchmarkProblem(BenchmarkProblem): - """ - Benchmark problem whose `runner` is a `SurrogateRunner`. - - `SurrogateRunner` allows for the surrogate to be constructed lazily and for - datasets to be downloaded lazily. - - For argument descriptions, see `BenchmarkProblem`. - """ - - runner: SurrogateRunner = field(repr=False) diff --git a/ax/benchmark/tests/runners/test_botorch_test_problem.py b/ax/benchmark/tests/runners/test_botorch_test_problem.py index 02d183722a8..c7d4e11c248 100644 --- a/ax/benchmark/tests/runners/test_botorch_test_problem.py +++ b/ax/benchmark/tests/runners/test_botorch_test_problem.py @@ -150,7 +150,7 @@ def test_synthetic_runner(self) -> None: outcome_names = ["objective_0"] elif isinstance(test_problem, TestParamBasedTestProblem): outcome_names = [f"objective_{i}" for i in range(num_outcomes)] - elif isinstance(test_problem, SurrogateTestFunction): + else: outcome_names = ["branin"] # Set up runner diff --git a/ax/benchmark/tests/runners/test_surrogate_runner.py b/ax/benchmark/tests/runners/test_surrogate_runner.py index 099d09f2d5e..9ad256f7f90 100644 --- a/ax/benchmark/tests/runners/test_surrogate_runner.py +++ b/ax/benchmark/tests/runners/test_surrogate_runner.py @@ -8,15 +8,10 @@ from unittest.mock import MagicMock, patch import torch -from ax.benchmark.runners.surrogate import SurrogateRunner, SurrogateTestFunction -from ax.core.parameter import ParameterType, RangeParameter -from ax.core.search_space import SearchSpace +from ax.benchmark.runners.surrogate import SurrogateTestFunction from ax.modelbridge.torch import TorchModelBridge from ax.utils.common.testutils import TestCase -from ax.utils.testing.benchmark_stubs import ( - get_soo_surrogate_legacy, - get_soo_surrogate_test_function, -) +from ax.utils.testing.benchmark_stubs import get_soo_surrogate_test_function class TestSurrogateTestFunction(TestCase): @@ -84,84 +79,3 @@ def _construct_test_function(name: str) -> SurrogateTestFunction: self.assertEqual(runner_1, runner_1a) self.assertNotEqual(runner_1, runner_2) self.assertNotEqual(runner_1, 1) - - -class TestSurrogateRunner(TestCase): - def setUp(self) -> None: - super().setUp() - self.search_space = SearchSpace( - parameters=[ - RangeParameter("x", ParameterType.FLOAT, 0.0, 5.0), - RangeParameter("y", ParameterType.FLOAT, 1.0, 10.0, log_scale=True), - RangeParameter("z", ParameterType.INT, 1.0, 5.0, log_scale=True), - ] - ) - - def test_surrogate_runner(self) -> None: - # Construct a search space with log-scale parameters. - for noise_std in (0.0, 0.1, {"dummy_metric": 0.2}): - with self.subTest(noise_std=noise_std): - surrogate = MagicMock() - mock_mean = torch.tensor([[0.1234]], dtype=torch.double) - surrogate.predict = MagicMock(return_value=(mock_mean, 0)) - surrogate.device = torch.device("cpu") - surrogate.dtype = torch.double - runner = SurrogateRunner( - name="test runner", - _surrogate=surrogate, - _datasets=[], - outcome_names=["dummy_metric"], - noise_stds=noise_std, - ) - self.assertEqual(runner.name, "test runner") - self.assertIs(runner.surrogate, surrogate) - self.assertEqual(runner.outcome_names, ["dummy_metric"]) - self.assertEqual(runner.noise_stds, noise_std) - - def test_lazy_instantiation(self) -> None: - runner = get_soo_surrogate_legacy().runner - - self.assertIsNone(runner._surrogate) - self.assertIsNone(runner._datasets) - - # Accessing `surrogate` sets datasets and surrogate - self.assertIsInstance(runner.surrogate, TorchModelBridge) - self.assertIsInstance(runner._surrogate, TorchModelBridge) - self.assertIsInstance(runner._datasets, list) - - # Accessing `datasets` also sets datasets and surrogate - runner = get_soo_surrogate_legacy().runner - self.assertIsInstance(runner.datasets, list) - self.assertIsInstance(runner._surrogate, TorchModelBridge) - self.assertIsInstance(runner._datasets, list) - - with patch.object( - runner, - "get_surrogate_and_datasets", - wraps=runner.get_surrogate_and_datasets, - ) as mock_get_surrogate_and_datasets: - runner.surrogate - mock_get_surrogate_and_datasets.assert_not_called() - - def test_instantiation_raises_with_missing_args(self) -> None: - with self.assertRaisesRegex( - ValueError, "If `get_surrogate_and_datasets` is None, `_surrogate` and " - ): - SurrogateRunner(name="test runner", outcome_names=[], noise_stds=0.0) - - def test_equality(self) -> None: - def _construct_runner(name: str) -> SurrogateRunner: - return SurrogateRunner( - name=name, - _surrogate=MagicMock(), - _datasets=[], - outcome_names=["dummy_metric"], - noise_stds=0.0, - ) - - runner_1 = _construct_runner("test 1") - runner_2 = _construct_runner("test 2") - runner_1a = _construct_runner("test 1") - self.assertEqual(runner_1, runner_1a) - self.assertNotEqual(runner_1, runner_2) - self.assertNotEqual(runner_1, 1) diff --git a/ax/storage/json_store/registry.py b/ax/storage/json_store/registry.py index 0cdb49c32b6..a798c30b1fe 100644 --- a/ax/storage/json_store/registry.py +++ b/ax/storage/json_store/registry.py @@ -370,8 +370,6 @@ "SumConstraint": SumConstraint, "Surrogate": Surrogate, "SurrogateMetric": BenchmarkMetric, # backward-compatiblity - # NOTE: SurrogateRunners -> SyntheticRunner on load due to complications - "SurrogateRunner": SyntheticRunner, "SobolQMCNormalSampler": SobolQMCNormalSampler, "SyntheticRunner": SyntheticRunner, "SurrogateSpec": SurrogateSpec, diff --git a/ax/utils/testing/benchmark_stubs.py b/ax/utils/testing/benchmark_stubs.py index 60244511b6b..f5a7a72b717 100644 --- a/ax/utils/testing/benchmark_stubs.py +++ b/ax/utils/testing/benchmark_stubs.py @@ -15,12 +15,11 @@ from ax.benchmark.benchmark_metric import BenchmarkMetric from ax.benchmark.benchmark_problem import BenchmarkProblem, create_problem_from_botorch from ax.benchmark.benchmark_result import AggregatedBenchmarkResult, BenchmarkResult -from ax.benchmark.problems.surrogate import SurrogateBenchmarkProblem from ax.benchmark.runners.botorch_test import ( ParamBasedTestProblem, ParamBasedTestProblemRunner, ) -from ax.benchmark.runners.surrogate import SurrogateRunner, SurrogateTestFunction +from ax.benchmark.runners.surrogate import SurrogateTestFunction from ax.core.experiment import Experiment from ax.core.objective import MultiObjective, Objective from ax.core.optimization_config import ( @@ -129,41 +128,7 @@ def get_soo_surrogate() -> BenchmarkProblem: ) -def get_soo_surrogate_legacy() -> SurrogateBenchmarkProblem: - experiment = get_branin_experiment(with_completed_trial=True) - surrogate = TorchModelBridge( - experiment=experiment, - search_space=experiment.search_space, - model=BoTorchModel(surrogate=Surrogate(botorch_model_class=SingleTaskGP)), - data=experiment.lookup_data(), - transforms=[], - ) - runner = SurrogateRunner( - name="test", - outcome_names=["branin"], - get_surrogate_and_datasets=lambda: (surrogate, []), - ) - - observe_noise_sd = True - objective = Objective( - metric=BenchmarkMetric( - name="branin", lower_is_better=True, observe_noise_sd=observe_noise_sd - ), - ) - optimization_config = OptimizationConfig(objective=objective) - - return SurrogateBenchmarkProblem( - name="test", - search_space=experiment.search_space, - optimization_config=optimization_config, - num_trials=6, - observe_noise_stds=observe_noise_sd, - optimal_value=0.0, - runner=runner, - ) - - -def get_moo_surrogate() -> SurrogateBenchmarkProblem: +def get_moo_surrogate() -> BenchmarkProblem: experiment = get_branin_experiment_with_multi_objective(with_completed_trial=True) surrogate = TorchModelBridge( experiment=experiment, @@ -173,11 +138,15 @@ def get_moo_surrogate() -> SurrogateBenchmarkProblem: transforms=[], ) - runner = SurrogateRunner( + outcome_names = ["branin_a", "branin_b"] + test_function = SurrogateTestFunction( name="test", - outcome_names=["branin_a", "branin_b"], + outcome_names=outcome_names, get_surrogate_and_datasets=lambda: (surrogate, []), ) + runner = ParamBasedTestProblemRunner( + test_problem=test_function, outcome_names=outcome_names + ) observe_noise_sd = True optimization_config = MultiObjectiveOptimizationConfig( objective=MultiObjective( @@ -199,7 +168,7 @@ def get_moo_surrogate() -> SurrogateBenchmarkProblem: ], ) ) - return SurrogateBenchmarkProblem( + return BenchmarkProblem( name="test", search_space=experiment.search_space, optimization_config=optimization_config,