Skip to content

Commit

Permalink
Fix input constructor for qMultiFidelityKnowledgeGradient by fixing…
Browse files Browse the repository at this point in the history
… fidelity dimensions when computing `current_value` (#2519)

Summary:
Pull Request resolved: #2519

The input constructor for `qMultiFidelityKnowledgeGradient` was computing `current_value` in a manner that was effectively not fidelity-aware (the fidelity dimension was not fixed in the optimization done to obtain `current_value`), since it was simply re-using functionality from the input constructor for `construct_inputs_qKG`.

This diff addresses this by making use of `target_fidelities` as the `fixed_features` argument to `D62380369`.

Reviewed By: Balandat

Differential Revision: D62391106

fbshipit-source-id: acf4f4a9a3644bb467decac70ce38a39c1c9ffc8
  • Loading branch information
ltiao authored and facebook-github-bot committed Sep 10, 2024
1 parent 33e11f4 commit d841f2b
Show file tree
Hide file tree
Showing 2 changed files with 62 additions and 17 deletions.
22 changes: 17 additions & 5 deletions botorch/acquisition/input_constructors.py
Original file line number Diff line number Diff line change
Expand Up @@ -1255,26 +1255,38 @@ def construct_inputs_qMFKG(
cost_intercept: float = 1.0,
num_trace_observations: int = 0,
num_fantasies: int = 64,
**optimize_objective_kwargs: TOptimizeObjectiveKwargs,
) -> dict[str, Any]:
r"""Construct kwargs for `qMultiFidelityKnowledgeGradient` constructor."""

X = _get_dataset_field(training_data, "X", first_only=True)
_bounds = torch.as_tensor(bounds, dtype=X.dtype, device=X.device)

inputs_mf = construct_inputs_mf_base(
target_fidelities=target_fidelities,
fidelity_weights=fidelity_weights,
cost_intercept=cost_intercept,
num_trace_observations=num_trace_observations,
)

inputs_kg = construct_inputs_qKG(
_, current_value = optimize_objective(
model=model,
training_data=training_data,
bounds=bounds,
bounds=_bounds.t(),
q=1,
objective=objective,
posterior_transform=posterior_transform,
num_fantasies=num_fantasies,
fixed_features=target_fidelities,
**optimize_objective_kwargs,
)

return {**inputs_mf, **inputs_kg}
return {
"model": model,
"objective": objective,
"posterior_transform": posterior_transform,
"num_fantasies": num_fantasies,
"current_value": current_value.detach().cpu().max(),
**inputs_mf,
}


@acqf_input_constructor(qMultiFidelityMaxValueEntropy)
Expand Down
57 changes: 45 additions & 12 deletions test/acquisition/test_input_constructors.py
Original file line number Diff line number Diff line change
Expand Up @@ -1304,28 +1304,49 @@ def test_construct_inputs_mf_base(self) -> None:
)

def test_construct_inputs_mfkg(self) -> None:
current_value = torch.tensor(1.23)

constructor_args = {
"model": None,
"model": self.mock_model,
"training_data": self.blockX_blockY,
"objective": None,
"bounds": self.bounds,
"num_fantasies": 123,
"target_fidelities": {0: 0.987},
"objective": None,
"fidelity_weights": {0: 0.654},
"cost_intercept": 0.321,
"num_fantasies": 123,
}

input_constructor = get_acqf_input_constructor(qMultiFidelityKnowledgeGradient)
with mock.patch(
target="botorch.acquisition.input_constructors.construct_inputs_mf_base",
return_value={"foo": 0},
), mock.patch(
target="botorch.acquisition.input_constructors.construct_inputs_qKG",
return_value={"bar": 1},
):
target="botorch.acquisition.input_constructors.optimize_acqf",
return_value=(None, current_value),
) as mock_optimize_acqf:
inputs_mfkg = input_constructor(**constructor_args)
inputs_test = {"foo": 0, "bar": 1}
self.assertEqual(inputs_mfkg, inputs_test)

mock_optimize_acqf_kwargs = mock_optimize_acqf.call_args.kwargs

self.assertIsInstance(
mock_optimize_acqf_kwargs["acq_function"],
FixedFeatureAcquisitionFunction,
)
self.assertLessEqual(
{
"model",
"objective",
"current_value",
"project",
"expand",
"cost_aware_utility",
"posterior_transform",
"num_fantasies",
},
set(inputs_mfkg.keys()),
)
self.assertEqual(
inputs_mfkg["num_fantasies"], constructor_args["num_fantasies"]
)
self.assertEqual(inputs_mfkg["current_value"], current_value)

def test_construct_inputs_mfmes(self) -> None:
target_fidelities = {0: 0.987}
Expand Down Expand Up @@ -1467,7 +1488,19 @@ def setUp(self, suppress_input_warnings: bool = True) -> None:
},
)
self.cases["MF look-ahead"] = (
[qMultiFidelityKnowledgeGradient, qMultiFidelityMaxValueEntropy],
[qMultiFidelityMaxValueEntropy],
{
"model": kg_model,
"training_data": self.blockX_blockY,
"bounds": bounds,
"target_fidelities": {0: 0.987},
"num_fantasies": 30,
},
)
bounds = torch.ones((2, 2))
kg_model = SingleTaskGP(train_X=torch.rand((3, 2)), train_Y=torch.rand((3, 1)))
self.cases["MF look-ahead (KG)"] = (
[qMultiFidelityKnowledgeGradient],
{
"model": kg_model,
"training_data": self.blockX_blockY,
Expand Down

0 comments on commit d841f2b

Please sign in to comment.