From 405dfad15a72dcf299d60b57e0c520c6ef8a3b32 Mon Sep 17 00:00:00 2001 From: Elizabeth Santorella Date: Wed, 30 Oct 2024 07:06:51 -0700 Subject: [PATCH] Rename `test_problem` attribute of `BenchmarkRunner` to `test_function`, because it is a `BenchmarkTestFunction` Summary: Rename `test_problem` attribute of `BenchmarkRunner` to `test_function`, because it is a `BenchmarkTestFunction` Differential Revision: D65088791 --- ax/benchmark/benchmark_problem.py | 2 +- ax/benchmark/benchmark_runner.py | 6 +-- ax/benchmark/problems/hpo/torchvision.py | 2 +- .../synthetic/discretized/mixed_integer.py | 2 +- .../problems/synthetic/hss/jenatton.py | 2 +- .../problems/test_mixed_integer_problems.py | 14 ++++-- .../runners/test_botorch_test_problem.py | 46 +++++++++---------- ax/benchmark/tests/test_benchmark_problem.py | 4 +- ax/utils/testing/benchmark_stubs.py | 4 +- 9 files changed, 43 insertions(+), 39 deletions(-) diff --git a/ax/benchmark/benchmark_problem.py b/ax/benchmark/benchmark_problem.py index e970404e112..52c658ec7d4 100644 --- a/ax/benchmark/benchmark_problem.py +++ b/ax/benchmark/benchmark_problem.py @@ -378,7 +378,7 @@ def create_problem_from_botorch( search_space=search_space, optimization_config=optimization_config, runner=BenchmarkRunner( - test_problem=BoTorchTestFunction(botorch_problem=test_problem), + test_function=BoTorchTestFunction(botorch_problem=test_problem), outcome_names=outcome_names, search_space_digest=extract_search_space_digest( search_space=search_space, diff --git a/ax/benchmark/benchmark_runner.py b/ax/benchmark/benchmark_runner.py index 55e934780dd..3e8ee1c35ec 100644 --- a/ax/benchmark/benchmark_runner.py +++ b/ax/benchmark/benchmark_runner.py @@ -48,7 +48,7 @@ class BenchmarkRunner(Runner): Args: outcome_names: The names of the outcomes returned by the problem. - test_problem: A ``BenchmarkTestFunction`` from which to generate + test_function: A ``BenchmarkTestFunction`` from which to generate deterministic data before adding noise. noise_std: The standard deviation of the noise added to the data. Can be a list or dict to be per-metric. @@ -56,7 +56,7 @@ class BenchmarkRunner(Runner): """ outcome_names: list[str] - test_problem: BenchmarkTestFunction + test_function: BenchmarkTestFunction noise_std: float | list[float] | dict[str, float] = 0.0 # pyre-fixme[16]: Pyre doesn't understand InitVars search_space_digest: InitVar[SearchSpaceDigest | None] = None @@ -77,7 +77,7 @@ def get_Y_true(self, params: Mapping[str, TParamValue]) -> Tensor: Returns: An `m`-dim tensor of ground truth (noiseless) evaluations. """ - return torch.atleast_1d(self.test_problem.evaluate_true(params=params)) + return torch.atleast_1d(self.test_function.evaluate_true(params=params)) def evaluate_oracle(self, parameters: Mapping[str, TParamValue]) -> npt.NDArray: """ diff --git a/ax/benchmark/problems/hpo/torchvision.py b/ax/benchmark/problems/hpo/torchvision.py index 8616dab4b47..ccb68d9dc53 100644 --- a/ax/benchmark/problems/hpo/torchvision.py +++ b/ax/benchmark/problems/hpo/torchvision.py @@ -215,7 +215,7 @@ def get_pytorch_cnn_torchvision_benchmark_problem( objective_name="accuracy", ) runner = BenchmarkRunner( - test_problem=PyTorchCNNTorchvisionBenchmarkTestFunction(name=name), + test_function=PyTorchCNNTorchvisionBenchmarkTestFunction(name=name), outcome_names=outcome_names, ) return BenchmarkProblem( diff --git a/ax/benchmark/problems/synthetic/discretized/mixed_integer.py b/ax/benchmark/problems/synthetic/discretized/mixed_integer.py index 38659846f17..8d797e1cdd2 100644 --- a/ax/benchmark/problems/synthetic/discretized/mixed_integer.py +++ b/ax/benchmark/problems/synthetic/discretized/mixed_integer.py @@ -103,7 +103,7 @@ def _get_problem_from_common_inputs( else: test_problem = test_problem_class(dim=dim, bounds=test_problem_bounds) runner = BenchmarkRunner( - test_problem=BoTorchTestFunction( + test_function=BoTorchTestFunction( botorch_problem=test_problem, modified_bounds=bounds ), outcome_names=[metric_name], diff --git a/ax/benchmark/problems/synthetic/hss/jenatton.py b/ax/benchmark/problems/synthetic/hss/jenatton.py index 3e0e5fb2f53..e97784adda3 100644 --- a/ax/benchmark/problems/synthetic/hss/jenatton.py +++ b/ax/benchmark/problems/synthetic/hss/jenatton.py @@ -119,7 +119,7 @@ def get_jenatton_benchmark_problem( search_space=search_space, optimization_config=optimization_config, runner=BenchmarkRunner( - test_problem=Jenatton(), outcome_names=[name], noise_std=noise_std + test_function=Jenatton(), outcome_names=[name], noise_std=noise_std ), num_trials=num_trials, observe_noise_stds=observe_noise_sd, diff --git a/ax/benchmark/tests/problems/test_mixed_integer_problems.py b/ax/benchmark/tests/problems/test_mixed_integer_problems.py index 90676a343a0..2c433ff55be 100644 --- a/ax/benchmark/tests/problems/test_mixed_integer_problems.py +++ b/ax/benchmark/tests/problems/test_mixed_integer_problems.py @@ -35,8 +35,10 @@ def test_problems(self) -> None: problem = constructor() self.assertEqual(f"Discrete {name}", problem.name) runner = problem.runner - test_problem = assert_is_instance(runner.test_problem, BoTorchTestFunction) - botorch_problem = test_problem.botorch_problem + test_function = assert_is_instance( + runner.test_function, BoTorchTestFunction + ) + botorch_problem = test_function.botorch_problem self.assertIsInstance(botorch_problem, problem_cls) self.assertEqual(len(problem.search_space.parameters), dim) self.assertEqual( @@ -97,7 +99,9 @@ def test_problems(self) -> None: for problem, params, expected_arg in cases: runner = problem.runner - test_problem = assert_is_instance(runner.test_problem, BoTorchTestFunction) + test_function = assert_is_instance( + runner.test_function, BoTorchTestFunction + ) trial = Trial(experiment=MagicMock()) # pyre-fixme: Incompatible parameter type [6]: In call # `Arm.__init__`, for argument `parameters`, expected `Dict[str, @@ -105,9 +109,9 @@ def test_problems(self) -> None: arm = Arm(parameters=params, name="--") trial.add_arm(arm) with patch.object( - test_problem.botorch_problem, + test_function.botorch_problem, attribute="evaluate_true", - wraps=test_problem.botorch_problem.evaluate_true, + wraps=test_function.botorch_problem.evaluate_true, ) as mock_call: runner.run(trial) actual = mock_call.call_args.kwargs["X"] diff --git a/ax/benchmark/tests/runners/test_botorch_test_problem.py b/ax/benchmark/tests/runners/test_botorch_test_problem.py index 2753221b9df..1a88099ea97 100644 --- a/ax/benchmark/tests/runners/test_botorch_test_problem.py +++ b/ax/benchmark/tests/runners/test_botorch_test_problem.py @@ -137,32 +137,32 @@ def test_synthetic_runner(self) -> None: (get_soo_surrogate_test_function(lazy=False), noise_std, 1) for noise_std in (0.0, 1.0, [0.0], [1.0]) ] - for test_problem, noise_std, num_outcomes in ( + for test_function, noise_std, num_outcomes in ( botorch_cases + param_based_cases + surrogate_cases ): # Set up outcome names - if isinstance(test_problem, BoTorchTestFunction): - if isinstance(test_problem.botorch_problem, ConstrainedHartmann): + if isinstance(test_function, BoTorchTestFunction): + if isinstance(test_function.botorch_problem, ConstrainedHartmann): outcome_names = ["objective_0", "constraint"] else: outcome_names = ["objective_0"] - elif isinstance(test_problem, DummyTestFunction): + elif isinstance(test_function, DummyTestFunction): outcome_names = [f"objective_{i}" for i in range(num_outcomes)] else: # SurrogateTestFunction outcome_names = ["branin"] # Set up runner runner = BenchmarkRunner( - test_problem=test_problem, + test_function=test_function, outcome_names=outcome_names, noise_std=noise_std, ) - test_description = f"{test_problem=}, {noise_std=}" + test_description = f"{test_function=}, {noise_std=}" with self.subTest( - f"Test basic construction, {test_problem=}, {noise_std=}" + f"Test basic construction, {test_function=}, {noise_std=}" ): - self.assertIs(runner.test_problem, test_problem) + self.assertIs(runner.test_function, test_function) self.assertEqual(runner.outcome_names, outcome_names) if isinstance(noise_std, list): self.assertEqual( @@ -177,17 +177,17 @@ def test_synthetic_runner(self) -> None: # check equality new_runner = replace( - runner, test_problem=BoTorchTestFunction(botorch_problem=Ackley()) + runner, test_function=BoTorchTestFunction(botorch_problem=Ackley()) ) self.assertNotEqual(runner, new_runner) self.assertEqual(runner, runner) - if isinstance(test_problem, BoTorchTestFunction): + if isinstance(test_function, BoTorchTestFunction): self.assertEqual( - test_problem.botorch_problem.bounds.dtype, torch.double + test_function.botorch_problem.bounds.dtype, torch.double ) - is_botorch = isinstance(test_problem, BoTorchTestFunction) + is_botorch = isinstance(test_function, BoTorchTestFunction) with self.subTest(f"test `get_Y_true()`, {test_description}"): dim = 6 if is_botorch else 9 X = torch.rand(1, dim, dtype=torch.double) @@ -202,11 +202,11 @@ def test_synthetic_runner(self) -> None: with ( nullcontext() - if not isinstance(test_problem, SurrogateTestFunction) + if not isinstance(test_function, SurrogateTestFunction) else patch.object( # pyre-fixme: BenchmarkTestFunction` has no attribute # `_surrogate`. - runner.test_problem._surrogate, + runner.test_function._surrogate, "predict", return_value=({"branin": [4.2]}, None), ) @@ -215,19 +215,19 @@ def test_synthetic_runner(self) -> None: oracle = runner.evaluate_oracle(parameters=params) if ( - isinstance(test_problem, BoTorchTestFunction) - and test_problem.modified_bounds is not None + isinstance(test_function, BoTorchTestFunction) + and test_function.modified_bounds is not None ): X_tf = normalize( X, torch.tensor( - test_problem.modified_bounds, dtype=torch.double + test_function.modified_bounds, dtype=torch.double ).T, ) else: X_tf = X - if isinstance(test_problem, BoTorchTestFunction): - botorch_problem = test_problem.botorch_problem + if isinstance(test_function, BoTorchTestFunction): + botorch_problem = test_function.botorch_problem obj = botorch_problem.evaluate_true(X_tf) if isinstance(botorch_problem, ConstrainedHartmann): expected_Y = torch.cat( @@ -239,7 +239,7 @@ def test_synthetic_runner(self) -> None: ) else: expected_Y = obj - elif isinstance(test_problem, SurrogateTestFunction): + elif isinstance(test_function, SurrogateTestFunction): expected_Y = torch.tensor([4.2], dtype=torch.double) else: expected_Y = torch.full( @@ -259,11 +259,11 @@ def test_synthetic_runner(self) -> None: with ( nullcontext() - if not isinstance(test_problem, SurrogateTestFunction) + if not isinstance(test_function, SurrogateTestFunction) else patch.object( # pyre-fixme: BenchmarkTestFunction` has no attribute # `_surrogate`. - runner.test_problem._surrogate, + runner.test_function._surrogate, "predict", return_value=({"branin": [4.2]}, None), ) @@ -300,7 +300,7 @@ def test_synthetic_runner(self) -> None: def test_botorch_test_problem_runner_heterogeneous_noise(self) -> None: for noise_std in [[0.1, 0.05], {"objective": 0.1, "constraint": 0.05}]: runner = BenchmarkRunner( - test_problem=BoTorchTestFunction( + test_function=BoTorchTestFunction( botorch_problem=ConstrainedHartmann(dim=6) ), noise_std=noise_std, diff --git a/ax/benchmark/tests/test_benchmark_problem.py b/ax/benchmark/tests/test_benchmark_problem.py index 1b3d649d184..3e388583155 100644 --- a/ax/benchmark/tests/test_benchmark_problem.py +++ b/ax/benchmark/tests/test_benchmark_problem.py @@ -53,7 +53,7 @@ def test_inference_value_not_implemented(self) -> None: ] optimization_config = OptimizationConfig(objective=objectives[0]) runner = BenchmarkRunner( - test_problem=BoTorchTestFunction(botorch_problem=Branin()), + test_function=BoTorchTestFunction(botorch_problem=Branin()), outcome_names=["foo"], ) with self.assertRaisesRegex(NotImplementedError, "Only `n_best_points=1`"): @@ -214,7 +214,7 @@ def _test_constrained_from_botorch( noise_std=noise_std, ) runner = ax_problem.runner - test_problem = assert_is_instance(runner.test_problem, BoTorchTestFunction) + test_problem = assert_is_instance(runner.test_function, BoTorchTestFunction) botorch_problem = assert_is_instance( test_problem.botorch_problem, ConstrainedBaseTestProblem ) diff --git a/ax/utils/testing/benchmark_stubs.py b/ax/utils/testing/benchmark_stubs.py index 46ce4c089a7..6aaf96ab9de 100644 --- a/ax/utils/testing/benchmark_stubs.py +++ b/ax/utils/testing/benchmark_stubs.py @@ -103,7 +103,7 @@ def get_soo_surrogate_test_function(lazy: bool = True) -> SurrogateTestFunction: def get_soo_surrogate() -> BenchmarkProblem: experiment = get_branin_experiment(with_completed_trial=True) test_function = get_soo_surrogate_test_function() - runner = BenchmarkRunner(test_problem=test_function, outcome_names=["branin"]) + runner = BenchmarkRunner(test_function=test_function, outcome_names=["branin"]) observe_noise_sd = True objective = Objective( @@ -140,7 +140,7 @@ def get_moo_surrogate() -> BenchmarkProblem: outcome_names=outcome_names, get_surrogate_and_datasets=lambda: (surrogate, []), ) - runner = BenchmarkRunner(test_problem=test_function, outcome_names=outcome_names) + runner = BenchmarkRunner(test_function=test_function, outcome_names=outcome_names) observe_noise_sd = True optimization_config = MultiObjectiveOptimizationConfig( objective=MultiObjective(