diff --git a/botorch/acquisition/input_constructors.py b/botorch/acquisition/input_constructors.py index e4472a76c2..f145781da5 100644 --- a/botorch/acquisition/input_constructors.py +++ b/botorch/acquisition/input_constructors.py @@ -65,6 +65,7 @@ from botorch.acquisition.multi_objective.hypervolume_knowledge_gradient import ( _get_hv_value_function, qHypervolumeKnowledgeGradient, + qMultiFidelityHypervolumeKnowledgeGradient, ) from botorch.acquisition.multi_objective.logei import ( qLogExpectedHypervolumeImprovement, @@ -1274,21 +1275,6 @@ def construct_inputs_qKG( return inputs_qkg -def _get_ref_point( - objective_thresholds: Tensor, - objective: Optional[MCMultiOutputObjective] = None, -) -> Tensor: - - if objective is None: - ref_point = objective_thresholds - elif isinstance(objective, RiskMeasureMCObjective): - ref_point = objective.preprocessing_function(objective_thresholds) - else: - ref_point = objective(objective_thresholds) - - return ref_point - - @acqf_input_constructor(qHypervolumeKnowledgeGradient) def construct_inputs_qHVKG( model: Model, @@ -1381,6 +1367,76 @@ def construct_inputs_qMFKG( } +@acqf_input_constructor(qMultiFidelityHypervolumeKnowledgeGradient) +def construct_inputs_qMFHVKG( + model: Model, + training_data: MaybeDict[SupervisedDataset], + bounds: list[tuple[float, float]], + target_fidelities: dict[int, Union[int, float]], + objective_thresholds: Tensor, + objective: Optional[MCMultiOutputObjective] = None, + posterior_transform: Optional[PosteriorTransform] = None, + fidelity_weights: Optional[dict[int, float]] = None, + cost_intercept: float = 1.0, + num_trace_observations: int = 0, + num_fantasies: int = 8, + num_pareto: int = 10, + **optimize_objective_kwargs: TOptimizeObjectiveKwargs, +) -> dict[str, Any]: + r""" + Construct kwargs for `qMultiFidelityHypervolumeKnowledgeGradient` constructor. + """ + + inputs_mf = construct_inputs_mf_base( + target_fidelities=target_fidelities, + fidelity_weights=fidelity_weights, + cost_intercept=cost_intercept, + num_trace_observations=num_trace_observations, + ) + + if num_trace_observations > 0: + raise NotImplementedError( + "Trace observations are not currently supported " + "by `qMultiFidelityHypervolumeKnowledgeGradient`." + ) + + del inputs_mf["expand"] + + X = _get_dataset_field(training_data, "X", first_only=True) + _bounds = torch.as_tensor(bounds, dtype=X.dtype, device=X.device) + + ref_point = _get_ref_point( + objective_thresholds=objective_thresholds, objective=objective + ) + + acq_function = _get_hv_value_function( + model=model, + ref_point=ref_point, + use_posterior_mean=True, + objective=objective, + ) + + _, current_value = optimize_objective( + model=model, + bounds=_bounds.t(), + q=num_pareto, + acq_function=acq_function, + fixed_features=target_fidelities, + **optimize_objective_kwargs, + ) + + return { + "model": model, + "objective": objective, + "ref_point": ref_point, + "num_fantasies": num_fantasies, + "num_pareto": num_pareto, + "current_value": current_value.detach().cpu().max(), + "target_fidelities": target_fidelities, + **inputs_mf, + } + + @acqf_input_constructor(qMultiFidelityMaxValueEntropy) def construct_inputs_qMFMES( model: Model, @@ -1806,3 +1862,18 @@ def construct_inputs_NIPV( "posterior_transform": posterior_transform, } return inputs + + +def _get_ref_point( + objective_thresholds: Tensor, + objective: Optional[MCMultiOutputObjective] = None, +) -> Tensor: + + if objective is None: + ref_point = objective_thresholds + elif isinstance(objective, RiskMeasureMCObjective): + ref_point = objective.preprocessing_function(objective_thresholds) + else: + ref_point = objective(objective_thresholds) + + return ref_point diff --git a/botorch/acquisition/multi_objective/hypervolume_knowledge_gradient.py b/botorch/acquisition/multi_objective/hypervolume_knowledge_gradient.py index 260085df77..df62947074 100644 --- a/botorch/acquisition/multi_objective/hypervolume_knowledge_gradient.py +++ b/botorch/acquisition/multi_objective/hypervolume_knowledge_gradient.py @@ -389,7 +389,10 @@ def __init__( ) self.project = project if kwargs.get("expand") is not None: - raise NotImplementedError("Trace observations are not currently supported.") + raise NotImplementedError( + "Trace observations are not currently supported " + "by `qMultiFidelityHypervolumeKnowledgeGradient`." + ) self.expand = lambda X: X self.valfunc_cls = valfunc_cls self.valfunc_argfac = valfunc_argfac diff --git a/test/acquisition/test_input_constructors.py b/test/acquisition/test_input_constructors.py index f65e433786..ecab92375d 100644 --- a/test/acquisition/test_input_constructors.py +++ b/test/acquisition/test_input_constructors.py @@ -74,6 +74,7 @@ ) from botorch.acquisition.multi_objective.hypervolume_knowledge_gradient import ( qHypervolumeKnowledgeGradient, + qMultiFidelityHypervolumeKnowledgeGradient, ) from botorch.acquisition.multi_objective.logei import ( @@ -1296,26 +1297,20 @@ def test_construct_inputs_kg(self) -> None: ) self.assertNotIn("current_value", kwargs) - def test_construct_inputs_hvkg(self) -> None: - model = mock.Mock() - current_value = torch.tensor(1.23) + def test_construct_inputs_mfhvkg(self) -> None: - objective_thresholds = torch.rand(2) - objective = IdentityMCMultiOutputObjective() + get_kwargs = get_acqf_input_constructor( + qMultiFidelityHypervolumeKnowledgeGradient + ) - get_kwargs = get_acqf_input_constructor(qHypervolumeKnowledgeGradient) + model = mock.Mock() + objective = IdentityMCMultiOutputObjective() + objective_thresholds = torch.rand(2) - with ( - mock.patch( - target="botorch.acquisition.input_constructors._get_hv_value_function", - ) as mock_get_hv_value_function, - mock.patch( - target="botorch.acquisition.input_constructors.optimize_acqf", - return_value=(None, current_value), - ) as mock_optimize_acqf, + with self.assertRaisesRegex( + NotImplementedError, "Trace observations are not currently supported" ): - - kwargs = get_kwargs( + get_kwargs( model=model, training_data=self.blockX_blockY, objective_thresholds=objective_thresholds, @@ -1323,50 +1318,41 @@ def test_construct_inputs_hvkg(self) -> None: bounds=self.bounds, num_fantasies=33, num_pareto=11, + target_fidelities={0: 0.987}, + fidelity_weights={0: 0.654}, + cost_intercept=0.321, + num_trace_observations=5, ) - self.assertEqual( - mock_get_hv_value_function.call_args.kwargs["model"], model - ) - self.assertEqual( - mock_get_hv_value_function.call_args.kwargs["objective"], objective - ) - self.assertTrue( - torch.equal( - mock_get_hv_value_function.call_args.kwargs["ref_point"], - objective_thresholds, - ) - ) + @mock.patch("botorch.acquisition.input_constructors._get_hv_value_function") + def test_construct_inputs_hvkg(self, mock_get_hv_value_function) -> None: - # check that `optimize_acqf` is called with the desired value function - self.assertEqual( - mock_optimize_acqf.call_args.kwargs["acq_function"], - mock_get_hv_value_function(), - ) + current_value = torch.tensor(1.23) + objective_thresholds = torch.rand(2) - self.assertLessEqual( - { - "model", - "ref_point", - "num_fantasies", - "num_pareto", - "objective", - "current_value", - }, - set(kwargs.keys()), - ) - self.assertEqual(kwargs["num_fantasies"], 33) - self.assertEqual(kwargs["num_pareto"], 11) - self.assertEqual(kwargs["current_value"], current_value) - self.assertTrue(torch.equal(kwargs["ref_point"], objective_thresholds)) + for acqf_cls in ( + qHypervolumeKnowledgeGradient, + qMultiFidelityHypervolumeKnowledgeGradient, + ): + + get_kwargs = get_acqf_input_constructor(acqf_cls) + + model = mock.Mock() + objective = IdentityMCMultiOutputObjective() + + input_constructor_extra_kwargs = {} + if acqf_cls == qMultiFidelityHypervolumeKnowledgeGradient: + input_constructor_extra_kwargs.update( + target_fidelities={0: 0.987}, + fidelity_weights={0: 0.654}, + cost_intercept=0.321, + ) - with self.subTest("custom objective"): - weights = torch.rand(2) - objective = WeightedMCMultiOutputObjective(weights=weights) with mock.patch( target="botorch.acquisition.input_constructors.optimize_acqf", return_value=(None, current_value), ) as mock_optimize_acqf: + kwargs = get_kwargs( model=model, training_data=self.blockX_blockY, @@ -1375,22 +1361,53 @@ def test_construct_inputs_hvkg(self) -> None: bounds=self.bounds, num_fantasies=33, num_pareto=11, + **input_constructor_extra_kwargs, ) - self.assertIsInstance(kwargs["objective"], WeightedMCMultiOutputObjective) - self.assertTrue( - torch.equal(kwargs["ref_point"], objective_thresholds * weights) - ) - with self.subTest("risk measures"): - for use_preprocessing in (True, False): - objective = MultiOutputExpectation( - n_w=3, - preprocessing_function=( - WeightedMCMultiOutputObjective(torch.tensor([-1.0, -1.0])) - if use_preprocessing - else None - ), + self.assertEqual( + mock_get_hv_value_function.call_args.kwargs["model"], model + ) + self.assertEqual( + mock_get_hv_value_function.call_args.kwargs["objective"], objective + ) + self.assertTrue( + torch.equal( + mock_get_hv_value_function.call_args.kwargs["ref_point"], + objective_thresholds, + ) ) + + # check that `optimize_acqf` is called with the desired value function + if acqf_cls == qMultiFidelityHypervolumeKnowledgeGradient: + self.assertIsInstance( + mock_optimize_acqf.call_args.kwargs["acq_function"], + FixedFeatureAcquisitionFunction, + ) + else: + self.assertEqual( + mock_optimize_acqf.call_args.kwargs["acq_function"], + mock_get_hv_value_function(), + ) + + self.assertLessEqual( + { + "model", + "ref_point", + "num_fantasies", + "num_pareto", + "objective", + "current_value", + }, + set(kwargs.keys()), + ) + self.assertEqual(kwargs["num_fantasies"], 33) + self.assertEqual(kwargs["num_pareto"], 11) + self.assertEqual(kwargs["current_value"], current_value) + self.assertTrue(torch.equal(kwargs["ref_point"], objective_thresholds)) + + with self.subTest("custom objective"): + weights = torch.rand(2) + objective = WeightedMCMultiOutputObjective(weights=weights) with mock.patch( target="botorch.acquisition.input_constructors.optimize_acqf", return_value=(None, current_value), @@ -1403,11 +1420,45 @@ def test_construct_inputs_hvkg(self) -> None: bounds=self.bounds, num_fantasies=33, num_pareto=11, + **input_constructor_extra_kwargs, ) - expected_obj_t = ( - -objective_thresholds if use_preprocessing else objective_thresholds + self.assertIsInstance( + kwargs["objective"], WeightedMCMultiOutputObjective ) - self.assertTrue(torch.equal(kwargs["ref_point"], expected_obj_t)) + self.assertTrue( + torch.equal(kwargs["ref_point"], objective_thresholds * weights) + ) + + with self.subTest("risk measures"): + for use_preprocessing in (True, False): + objective = MultiOutputExpectation( + n_w=3, + preprocessing_function=( + WeightedMCMultiOutputObjective(torch.tensor([-1.0, -1.0])) + if use_preprocessing + else None + ), + ) + with mock.patch( + target="botorch.acquisition.input_constructors.optimize_acqf", + return_value=(None, current_value), + ) as mock_optimize_acqf: + kwargs = get_kwargs( + model=model, + training_data=self.blockX_blockY, + objective_thresholds=objective_thresholds, + objective=objective, + bounds=self.bounds, + num_fantasies=33, + num_pareto=11, + **input_constructor_extra_kwargs, + ) + expected_obj_t = ( + -objective_thresholds + if use_preprocessing + else objective_thresholds + ) + self.assertTrue(torch.equal(kwargs["ref_point"], expected_obj_t)) def test_construct_inputs_mes(self) -> None: func = get_acqf_input_constructor(qMaxValueEntropy) @@ -1733,7 +1784,6 @@ def setUp(self, suppress_input_warnings: bool = True) -> None: m1 = SingleTaskGP(X, Y1) m2 = SingleTaskGP(X, Y2) model_list = ModelListGP(m1, m2) - self.cases["HV Look-ahead"] = ( [qHypervolumeKnowledgeGradient], { @@ -1743,6 +1793,18 @@ def setUp(self, suppress_input_warnings: bool = True) -> None: "objective_thresholds": objective_thresholds, }, ) + self.cases["MF HV Look-ahead"] = ( + [qMultiFidelityHypervolumeKnowledgeGradient], + { + "model": model_list, + "training_data": self.blockX_blockY, + "bounds": bounds, + "target_fidelities": {0: 0.987}, + "num_fantasies": 30, + "objective_thresholds": objective_thresholds, + }, + ) + pref_model = self.mock_model pref_model.dim = 2 pref_model.datapoints = torch.tensor([])