Skip to content

Commit

Permalink
Add input constructor for qMultiFidelityHypervolumeKnowledgeGradient (p…
Browse files Browse the repository at this point in the history
…ytorch#2524)

Summary:
Pull Request resolved: pytorch#2524

Adds new input constructors for qMultiFidelityHypervolumeKnowledgeGradient.

Differential Revision: D62459735
  • Loading branch information
ltiao authored and facebook-github-bot committed Sep 11, 2024
1 parent a7db82b commit dfa6494
Show file tree
Hide file tree
Showing 2 changed files with 200 additions and 97 deletions.
110 changes: 89 additions & 21 deletions botorch/acquisition/input_constructors.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@
from botorch.acquisition.multi_objective.hypervolume_knowledge_gradient import (
_get_hv_value_function,
qHypervolumeKnowledgeGradient,
qMultiFidelityHypervolumeKnowledgeGradient,
)
from botorch.acquisition.multi_objective.logei import (
qLogExpectedHypervolumeImprovement,
Expand Down Expand Up @@ -1223,18 +1224,25 @@ def construct_inputs_mf_base(
)
)

return {
inputs_mf = {
"cost_aware_utility": cost_aware_utility,
"expand": lambda X: expand_trace_observations(
X=X,
fidelity_dims=sorted(target_fidelities),
num_trace_obs=num_trace_observations,
),
"project": lambda X: project_to_target_fidelity(
X=X, target_fidelities=target_fidelities
),
}

def expand(X):
return expand_trace_observations(
X=X,
fidelity_dims=sorted(target_fidelities),
num_trace_obs=num_trace_observations,
)

if num_trace_observations > 0:
inputs_mf["expand"] = expand

return inputs_mf


@acqf_input_constructor(qKnowledgeGradient)
def construct_inputs_qKG(
Expand Down Expand Up @@ -1274,21 +1282,6 @@ def construct_inputs_qKG(
return inputs_qkg


def _get_ref_point(
objective_thresholds: Tensor,
objective: Optional[MCMultiOutputObjective] = None,
) -> Tensor:

if objective is None:
ref_point = objective_thresholds
elif isinstance(objective, RiskMeasureMCObjective):
ref_point = objective.preprocessing_function(objective_thresholds)
else:
ref_point = objective(objective_thresholds)

return ref_point


@acqf_input_constructor(qHypervolumeKnowledgeGradient)
def construct_inputs_qHVKG(
model: Model,
Expand Down Expand Up @@ -1381,6 +1374,66 @@ def construct_inputs_qMFKG(
}


@acqf_input_constructor(qMultiFidelityHypervolumeKnowledgeGradient)
def construct_inputs_qMFHVKG(
model: Model,
training_data: MaybeDict[SupervisedDataset],
bounds: list[tuple[float, float]],
target_fidelities: dict[int, Union[int, float]],
objective_thresholds: Tensor,
objective: Optional[MCMultiOutputObjective] = None,
posterior_transform: Optional[PosteriorTransform] = None,
fidelity_weights: Optional[dict[int, float]] = None,
cost_intercept: float = 1.0,
num_trace_observations: int = 0,
num_fantasies: int = 8,
num_pareto: int = 10,
**optimize_objective_kwargs: TOptimizeObjectiveKwargs,
) -> dict[str, Any]:
r"""Construct kwargs for `qMultiFidelityKnowledgeGradient` constructor."""

inputs_mf = construct_inputs_mf_base(
target_fidelities=target_fidelities,
fidelity_weights=fidelity_weights,
cost_intercept=cost_intercept,
num_trace_observations=num_trace_observations,
)

X = _get_dataset_field(training_data, "X", first_only=True)
_bounds = torch.as_tensor(bounds, dtype=X.dtype, device=X.device)

ref_point = _get_ref_point(
objective_thresholds=objective_thresholds, objective=objective
)

acq_function = _get_hv_value_function(
model=model,
ref_point=ref_point,
use_posterior_mean=True,
objective=objective,
)

_, current_value = optimize_objective(
model=model,
bounds=_bounds.t(),
q=num_pareto,
acq_function=acq_function,
fixed_features=target_fidelities,
**optimize_objective_kwargs,
)

return {
"model": model,
"objective": objective,
"ref_point": ref_point,
"num_fantasies": num_fantasies,
"num_pareto": num_pareto,
"current_value": current_value.detach().cpu().max(),
"target_fidelities": target_fidelities,
**inputs_mf,
}


@acqf_input_constructor(qMultiFidelityMaxValueEntropy)
def construct_inputs_qMFMES(
model: Model,
Expand Down Expand Up @@ -1806,3 +1859,18 @@ def construct_inputs_NIPV(
"posterior_transform": posterior_transform,
}
return inputs


def _get_ref_point(
objective_thresholds: Tensor,
objective: Optional[MCMultiOutputObjective] = None,
) -> Tensor:

if objective is None:
ref_point = objective_thresholds
elif isinstance(objective, RiskMeasureMCObjective):
ref_point = objective.preprocessing_function(objective_thresholds)
else:
ref_point = objective(objective_thresholds)

return ref_point
187 changes: 111 additions & 76 deletions test/acquisition/test_input_constructors.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@
)
from botorch.acquisition.multi_objective.hypervolume_knowledge_gradient import (
qHypervolumeKnowledgeGradient,
qMultiFidelityHypervolumeKnowledgeGradient,
)

from botorch.acquisition.multi_objective.logei import (
Expand Down Expand Up @@ -1296,77 +1297,35 @@ def test_construct_inputs_kg(self) -> None:
)
self.assertNotIn("current_value", kwargs)

def test_construct_inputs_hvkg(self) -> None:
model = mock.Mock()
current_value = torch.tensor(1.23)
@mock.patch("botorch.acquisition.input_constructors._get_hv_value_function")
def test_construct_inputs_hvkg(self, mock_get_hv_value_function) -> None:

current_value = torch.tensor(1.23)
objective_thresholds = torch.rand(2)
objective = IdentityMCMultiOutputObjective()

get_kwargs = get_acqf_input_constructor(qHypervolumeKnowledgeGradient)

with (
mock.patch(
target="botorch.acquisition.input_constructors._get_hv_value_function",
) as mock_get_hv_value_function,
mock.patch(
target="botorch.acquisition.input_constructors.optimize_acqf",
return_value=(None, current_value),
) as mock_optimize_acqf,
for acqf_cls in (
qHypervolumeKnowledgeGradient,
qMultiFidelityHypervolumeKnowledgeGradient,
):

kwargs = get_kwargs(
model=model,
training_data=self.blockX_blockY,
objective_thresholds=objective_thresholds,
objective=objective,
bounds=self.bounds,
num_fantasies=33,
num_pareto=11,
)
get_kwargs = get_acqf_input_constructor(acqf_cls)

self.assertEqual(
mock_get_hv_value_function.call_args.kwargs["model"], model
)
self.assertEqual(
mock_get_hv_value_function.call_args.kwargs["objective"], objective
)
self.assertTrue(
torch.equal(
mock_get_hv_value_function.call_args.kwargs["ref_point"],
objective_thresholds,
)
)

# check that `optimize_acqf` is called with the desired value function
self.assertEqual(
mock_optimize_acqf.call_args.kwargs["acq_function"],
mock_get_hv_value_function(),
)
model = mock.Mock()
objective = IdentityMCMultiOutputObjective()

self.assertLessEqual(
{
"model",
"ref_point",
"num_fantasies",
"num_pareto",
"objective",
"current_value",
},
set(kwargs.keys()),
)
self.assertEqual(kwargs["num_fantasies"], 33)
self.assertEqual(kwargs["num_pareto"], 11)
self.assertEqual(kwargs["current_value"], current_value)
self.assertTrue(torch.equal(kwargs["ref_point"], objective_thresholds))
input_constructor_extra_kwargs = {}
if acqf_cls == qMultiFidelityHypervolumeKnowledgeGradient:
input_constructor_extra_kwargs.update(
target_fidelities={0: 0.987},
fidelity_weights={0: 0.654},
cost_intercept=0.321,
)

with self.subTest("custom objective"):
weights = torch.rand(2)
objective = WeightedMCMultiOutputObjective(weights=weights)
with mock.patch(
target="botorch.acquisition.input_constructors.optimize_acqf",
return_value=(None, current_value),
) as mock_optimize_acqf:

kwargs = get_kwargs(
model=model,
training_data=self.blockX_blockY,
Expand All @@ -1375,22 +1334,53 @@ def test_construct_inputs_hvkg(self) -> None:
bounds=self.bounds,
num_fantasies=33,
num_pareto=11,
**input_constructor_extra_kwargs,
)
self.assertIsInstance(kwargs["objective"], WeightedMCMultiOutputObjective)
self.assertTrue(
torch.equal(kwargs["ref_point"], objective_thresholds * weights)
)

with self.subTest("risk measures"):
for use_preprocessing in (True, False):
objective = MultiOutputExpectation(
n_w=3,
preprocessing_function=(
WeightedMCMultiOutputObjective(torch.tensor([-1.0, -1.0]))
if use_preprocessing
else None
),
self.assertEqual(
mock_get_hv_value_function.call_args.kwargs["model"], model
)
self.assertEqual(
mock_get_hv_value_function.call_args.kwargs["objective"], objective
)
self.assertTrue(
torch.equal(
mock_get_hv_value_function.call_args.kwargs["ref_point"],
objective_thresholds,
)
)

# check that `optimize_acqf` is called with the desired value function
if acqf_cls == qMultiFidelityHypervolumeKnowledgeGradient:
self.assertIsInstance(
mock_optimize_acqf.call_args.kwargs["acq_function"],
FixedFeatureAcquisitionFunction,
)
else:
self.assertEqual(
mock_optimize_acqf.call_args.kwargs["acq_function"],
mock_get_hv_value_function(),
)

self.assertLessEqual(
{
"model",
"ref_point",
"num_fantasies",
"num_pareto",
"objective",
"current_value",
},
set(kwargs.keys()),
)
self.assertEqual(kwargs["num_fantasies"], 33)
self.assertEqual(kwargs["num_pareto"], 11)
self.assertEqual(kwargs["current_value"], current_value)
self.assertTrue(torch.equal(kwargs["ref_point"], objective_thresholds))

with self.subTest("custom objective"):
weights = torch.rand(2)
objective = WeightedMCMultiOutputObjective(weights=weights)
with mock.patch(
target="botorch.acquisition.input_constructors.optimize_acqf",
return_value=(None, current_value),
Expand All @@ -1403,11 +1393,45 @@ def test_construct_inputs_hvkg(self) -> None:
bounds=self.bounds,
num_fantasies=33,
num_pareto=11,
**input_constructor_extra_kwargs,
)
expected_obj_t = (
-objective_thresholds if use_preprocessing else objective_thresholds
self.assertIsInstance(
kwargs["objective"], WeightedMCMultiOutputObjective
)
self.assertTrue(
torch.equal(kwargs["ref_point"], objective_thresholds * weights)
)
self.assertTrue(torch.equal(kwargs["ref_point"], expected_obj_t))

with self.subTest("risk measures"):
for use_preprocessing in (True, False):
objective = MultiOutputExpectation(
n_w=3,
preprocessing_function=(
WeightedMCMultiOutputObjective(torch.tensor([-1.0, -1.0]))
if use_preprocessing
else None
),
)
with mock.patch(
target="botorch.acquisition.input_constructors.optimize_acqf",
return_value=(None, current_value),
) as mock_optimize_acqf:
kwargs = get_kwargs(
model=model,
training_data=self.blockX_blockY,
objective_thresholds=objective_thresholds,
objective=objective,
bounds=self.bounds,
num_fantasies=33,
num_pareto=11,
**input_constructor_extra_kwargs,
)
expected_obj_t = (
-objective_thresholds
if use_preprocessing
else objective_thresholds
)
self.assertTrue(torch.equal(kwargs["ref_point"], expected_obj_t))

def test_construct_inputs_mes(self) -> None:
func = get_acqf_input_constructor(qMaxValueEntropy)
Expand Down Expand Up @@ -1733,7 +1757,6 @@ def setUp(self, suppress_input_warnings: bool = True) -> None:
m1 = SingleTaskGP(X, Y1)
m2 = SingleTaskGP(X, Y2)
model_list = ModelListGP(m1, m2)

self.cases["HV Look-ahead"] = (
[qHypervolumeKnowledgeGradient],
{
Expand All @@ -1743,6 +1766,18 @@ def setUp(self, suppress_input_warnings: bool = True) -> None:
"objective_thresholds": objective_thresholds,
},
)
self.cases["MF HV Look-ahead"] = (
[qMultiFidelityHypervolumeKnowledgeGradient],
{
"model": model_list,
"training_data": self.blockX_blockY,
"bounds": bounds,
"target_fidelities": {0: 0.987},
"num_fantasies": 30,
"objective_thresholds": objective_thresholds,
},
)

pref_model = self.mock_model
pref_model.dim = 2
pref_model.datapoints = torch.tensor([])
Expand Down

0 comments on commit dfa6494

Please sign in to comment.