From 32b50914a5c294fa2129408f5126301be688d43d Mon Sep 17 00:00:00 2001 From: Elizabeth Santorella Date: Thu, 24 Oct 2024 13:36:48 -0700 Subject: [PATCH] Only support negation implicitly, through BoTorch test problems (#2962) Summary: Context: Negation is handled quite strangely in benchmark runners, which negate only objectives and not constraints (when `negate=True`). This happens for historical reasons because BoTorch test problems are set up that way. In theory, the `BenchmarkRunner` and its test function should not even need to know what is a constraint vs. an objective; they should just collect the data (D64909689). The `OptimizationConfig` should decide what is a metric and what is a constraint. This diff makes negation the responsibility of the `ParamBasedTestProblem` (soon to be renamed `TestFunction`). Currently, it is only supported for `BoTorchTestProblem`s, but could be easily added to other problems if needed in the future, which can choose to handle negation as they see fit. Yes, this is sort of reversing a recent change that moved the `negate` argument from the `ParamBasedTestProblem` to the `Runner`, but this is the first time that negation *behavior* has been handled within the test function. This diff * Removes the `negate` argument from `create_problem_from_botorch` and from `ParamBasedTestProblemRunner`, instead implicitly requiring it to be set on the BoTorch `BaseTestProblem`. * Removes a stray instance of `negate` in `PyTorchCNNTorchvisionParamBasedProblem`, which was already not doing anything * Removes an error complaining that negate should be set on the runner and not on a Botorch test function * **Important**: Changes a call to `botorch_problem.evaluate_true(x)` to `self.botorch_problem(x)`, which ensures that negation will be applied in the same way as the runner was applying it. Differential Revision: D64909689 --- ax/benchmark/benchmark_problem.py | 4 - ax/benchmark/problems/hpo/torchvision.py | 1 - ax/benchmark/runners/botorch_test.py | 18 +--- .../problems/test_mixed_integer_problems.py | 2 +- .../runners/test_botorch_test_problem.py | 85 ++++++++++++------- ax/benchmark/tests/test_benchmark_problem.py | 1 - 6 files changed, 57 insertions(+), 54 deletions(-) diff --git a/ax/benchmark/benchmark_problem.py b/ax/benchmark/benchmark_problem.py index 154191531c2..d8711f0d2a4 100644 --- a/ax/benchmark/benchmark_problem.py +++ b/ax/benchmark/benchmark_problem.py @@ -303,7 +303,6 @@ def create_problem_from_botorch( test_problem_kwargs: dict[str, Any], noise_std: float | list[float] | None = None, constraint_noise_std: float | list[float] | None = None, - negate: bool = False, num_trials: int, lower_is_better: bool = True, observe_noise_sd: bool = False, @@ -327,8 +326,6 @@ def create_problem_from_botorch( for all objectives. constraint_noise_std: Standard deviation of synthetic noise added to constraints. - negate: Whether the values produced by the test function should be - negated. Does not apply to constraints. lower_is_better: Whether this is a minimization problem. For MOO, this applies to all objectives. num_trials: Simply the `num_trials` of the `BenchmarkProblem` created. @@ -395,7 +392,6 @@ def create_problem_from_botorch( param_names=list(search_space.parameters.keys()), ), noise_std=noise_std, - negate=negate, constraint_noise_std=constraint_noise_std, ), num_trials=num_trials, diff --git a/ax/benchmark/problems/hpo/torchvision.py b/ax/benchmark/problems/hpo/torchvision.py index 2245423d2e2..8edf610c5c8 100644 --- a/ax/benchmark/problems/hpo/torchvision.py +++ b/ax/benchmark/problems/hpo/torchvision.py @@ -124,7 +124,6 @@ class PyTorchCNNTorchvisionParamBasedProblem(ParamBasedTestProblem): "cuda" if torch.cuda.is_available() else "cpu" ) ) - negate: bool = False # Using `InitVar` prevents the DataLoaders from being serialized; instead # they are reconstructed upon deserialization. # Pyre doesn't understand InitVars. diff --git a/ax/benchmark/runners/botorch_test.py b/ax/benchmark/runners/botorch_test.py index a1a96e08147..35058855726 100644 --- a/ax/benchmark/runners/botorch_test.py +++ b/ax/benchmark/runners/botorch_test.py @@ -32,12 +32,7 @@ class ParamBasedTestProblem(ABC): @abstractmethod def evaluate_true(self, params: Mapping[str, TParamValue]) -> Tensor: - """ - Evaluate noiselessly. - - This method should not depend on the value of `negate`, as negation is - handled by the runner. - """ + """Evaluate noiselessly.""" ... def evaluate_slack_true(self, params: Mapping[str, TParamValue]) -> Tensor: @@ -81,10 +76,6 @@ def __post_init__(self) -> None: "constraint_noise_std should be set on the runner, not the test " "problem." ) - if self.botorch_problem.negate: - raise ValueError( - "negate should be set on the runner, not the test problem." - ) self.botorch_problem = self.botorch_problem.to(dtype=torch.double) def tensorize_params(self, params: Mapping[str, int | float]) -> torch.Tensor: @@ -105,7 +96,7 @@ def tensorize_params(self, params: Mapping[str, int | float]) -> torch.Tensor: # pyre-fixme [14]: inconsistent override def evaluate_true(self, params: Mapping[str, float | int]) -> torch.Tensor: x = self.tensorize_params(params=params) - return self.botorch_problem.evaluate_true(x) + return self.botorch_problem(x) # pyre-fixme [14]: inconsistent override def evaluate_slack_true(self, params: Mapping[str, float | int]) -> torch.Tensor: @@ -138,13 +129,11 @@ class ParamBasedTestProblemRunner(BenchmarkRunner): deterministic data before adding noise. noise_std: The standard deviation of the noise added to the data. Can be a list to be per-metric. - negate: Whether to negate the outcome. """ test_problem: ParamBasedTestProblem noise_std: float | list[float] | None = None constraint_noise_std: float | list[float] | None = None - negate: bool = False @property def _is_constrained(self) -> bool: @@ -196,9 +185,6 @@ def get_Y_true(self, params: Mapping[str, TParamValue]) -> Tensor: A `batch_shape x m`-dim tensor of ground truth (noiseless) evaluations. """ Y_true = self.test_problem.evaluate_true(params).view(-1) - # `ParamBasedTestProblem.evaluate_true()` does not negate the outcome - if self.negate: - Y_true = -Y_true if self._is_constrained: # Convention: Concatenate objective and black box constraints. `view()` # makes the inputs 1d, so the resulting `Y_true` are also 1d. diff --git a/ax/benchmark/tests/problems/test_mixed_integer_problems.py b/ax/benchmark/tests/problems/test_mixed_integer_problems.py index 2a38606542b..694dfd63a66 100644 --- a/ax/benchmark/tests/problems/test_mixed_integer_problems.py +++ b/ax/benchmark/tests/problems/test_mixed_integer_problems.py @@ -113,5 +113,5 @@ def test_problems(self) -> None: wraps=test_problem.botorch_problem.evaluate_true, ) as mock_call: runner.run(trial) - actual = mock_call.call_args[0][0] + actual = mock_call.call_args.kwargs["X"] self.assertTrue(torch.allclose(actual, expected_arg)) diff --git a/ax/benchmark/tests/runners/test_botorch_test_problem.py b/ax/benchmark/tests/runners/test_botorch_test_problem.py index fa2d9555c29..6f7eff3571c 100644 --- a/ax/benchmark/tests/runners/test_botorch_test_problem.py +++ b/ax/benchmark/tests/runners/test_botorch_test_problem.py @@ -30,37 +30,69 @@ from botorch.utils.transforms import normalize -class TestSyntheticRunner(TestCase): - def test_runner_raises_for_botorch_attrs(self) -> None: +class TestBoTorchTestProblem(TestCase): + def setUp(self) -> None: + super().setUp() + botorch_base_test_functions = { + "base Hartmann": Hartmann(dim=6), + "negated Hartmann": Hartmann(dim=6, negate=True), + "constrained Hartmann": ConstrainedHartmann(dim=6), + "negated constrained Hartmann": ConstrainedHartmann(dim=6, negate=True), + } + self.botorch_test_problems = { + k: BoTorchTestProblem(botorch_problem=v) + for k, v in botorch_base_test_functions.items() + } + + def test_negation(self) -> None: + params = {f"x{i}": 0.5 for i in range(6)} + evaluate_true_results = { + k: v.evaluate_true(params) for k, v in self.botorch_test_problems.items() + } + self.assertEqual( + evaluate_true_results["base Hartmann"], + evaluate_true_results["constrained Hartmann"], + ) + self.assertEqual( + evaluate_true_results["base Hartmann"], + -evaluate_true_results["negated Hartmann"], + ) + self.assertEqual( + evaluate_true_results["negated Hartmann"], + evaluate_true_results["negated constrained Hartmann"], + ) + + self.assertEqual( + self.botorch_test_problems["constrained Hartmann"].evaluate_slack_true( + params + ), + self.botorch_test_problems[ + "negated constrained Hartmann" + ].evaluate_slack_true(params), + ) + + def test_unsupported_error(self) -> None: + test_function = BoTorchTestProblem(botorch_problem=Hartmann(dim=6)) + with self.assertRaisesRegex( + UnsupportedError, "`evaluate_slack_true` is only supported when" + ): + test_function.evaluate_slack_true({"a": 3}) + + def test_raises_for_botorch_attrs(self) -> None: with self.assertRaisesRegex( ValueError, "noise_std should be set on the runner, not the test problem." ): - ParamBasedTestProblemRunner( - test_problem=BoTorchTestProblem( - botorch_problem=Hartmann(dim=6, noise_std=0.1) - ), - outcome_names=["objective"], - ) + BoTorchTestProblem(botorch_problem=Hartmann(dim=6, noise_std=0.1)) with self.assertRaisesRegex( ValueError, "constraint_noise_std should be set on the runner, not the test problem.", ): - ParamBasedTestProblemRunner( - test_problem=BoTorchTestProblem( - botorch_problem=ConstrainedHartmann(dim=6, constraint_noise_std=0.1) - ), - outcome_names=["objective", "constraint"], - ) - with self.assertRaisesRegex( - ValueError, "negate should be set on the runner, not the test problem." - ): - ParamBasedTestProblemRunner( - test_problem=BoTorchTestProblem( - botorch_problem=Hartmann(dim=6, negate=True) - ), - outcome_names=["objective"], + BoTorchTestProblem( + botorch_problem=ConstrainedHartmann(dim=6, constraint_noise_std=0.1) ) + +class TestSyntheticRunner(TestCase): def setUp(self) -> None: super().setUp() self.maxDiff = None @@ -164,8 +196,6 @@ def test_synthetic_runner(self) -> None: if isinstance(test_problem, BoTorchTestProblem): botorch_problem = test_problem.botorch_problem obj = botorch_problem.evaluate_true(X_tf) - if runner.negate: - obj = -obj if runner._is_constrained: expected_Y = torch.cat( [ @@ -243,10 +273,3 @@ def test_botorch_test_problem_runner_heterogeneous_noise(self) -> None: self.assertSetEqual(set(res["Ys"].keys()), {"0_0"}) self.assertEqual(res["Ystds"]["0_0"], [0.1, 0.05]) self.assertEqual(res["outcome_names"], ["objective", "constraint"]) - - def test_unsupported_error(self) -> None: - test_function = BoTorchTestProblem(botorch_problem=Hartmann(dim=6)) - with self.assertRaisesRegex( - UnsupportedError, "`evaluate_slack_true` is only supported when" - ): - test_function.evaluate_slack_true({"a": 3}) diff --git a/ax/benchmark/tests/test_benchmark_problem.py b/ax/benchmark/tests/test_benchmark_problem.py index 4c836ed1d2b..523a0d3444e 100644 --- a/ax/benchmark/tests/test_benchmark_problem.py +++ b/ax/benchmark/tests/test_benchmark_problem.py @@ -386,7 +386,6 @@ def test_get_oracle_experiment_from_experiment(self) -> None: test_problem_class=Branin, test_problem_kwargs={}, num_trials=5, - negate=True, ) # empty experiment