Skip to content

Commit 127fc08

Browse files
ltiaofacebook-github-bot
authored andcommitted
Add input constructor for qHypervolumeKnowledgeGradient (#2501)
Summary: Pull Request resolved: #2501 Adds new input constructors for `qHypervolumeKnowledgeGradient`. Reviewed By: Balandat Differential Revision: D62046832 fbshipit-source-id: acbb823c37774ec104fa86688ddbc3fed6996b8c
1 parent 4d49bf7 commit 127fc08

File tree

2 files changed

+214
-9
lines changed

2 files changed

+214
-9
lines changed

botorch/acquisition/input_constructors.py

Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,10 @@
6262
qExpectedHypervolumeImprovement,
6363
qNoisyExpectedHypervolumeImprovement,
6464
)
65+
from botorch.acquisition.multi_objective.hypervolume_knowledge_gradient import (
66+
_get_hv_value_function,
67+
qHypervolumeKnowledgeGradient,
68+
)
6569
from botorch.acquisition.multi_objective.logei import (
6670
qLogExpectedHypervolumeImprovement,
6771
qLogNoisyExpectedHypervolumeImprovement,
@@ -1270,6 +1274,67 @@ def construct_inputs_qKG(
12701274
return inputs_qkg
12711275

12721276

1277+
def _get_ref_point(
1278+
objective_thresholds: Tensor,
1279+
objective: Optional[MCMultiOutputObjective] = None,
1280+
) -> Tensor:
1281+
1282+
if objective is None:
1283+
ref_point = objective_thresholds
1284+
elif isinstance(objective, RiskMeasureMCObjective):
1285+
ref_point = objective.preprocessing_function(objective_thresholds)
1286+
else:
1287+
ref_point = objective(objective_thresholds)
1288+
1289+
return ref_point
1290+
1291+
1292+
@acqf_input_constructor(qHypervolumeKnowledgeGradient)
1293+
def construct_inputs_qHVKG(
1294+
model: Model,
1295+
training_data: MaybeDict[SupervisedDataset],
1296+
bounds: list[tuple[float, float]],
1297+
objective_thresholds: Tensor,
1298+
objective: Optional[MCMultiOutputObjective] = None,
1299+
posterior_transform: Optional[PosteriorTransform] = None,
1300+
num_fantasies: int = 8,
1301+
num_pareto: int = 10,
1302+
**optimize_objective_kwargs: TOptimizeObjectiveKwargs,
1303+
) -> dict[str, Any]:
1304+
r"""Construct kwargs for `qKnowledgeGradient` constructor."""
1305+
1306+
X = _get_dataset_field(training_data, "X", first_only=True)
1307+
_bounds = torch.as_tensor(bounds, dtype=X.dtype, device=X.device)
1308+
1309+
ref_point = _get_ref_point(
1310+
objective_thresholds=objective_thresholds, objective=objective
1311+
)
1312+
1313+
acq_function = _get_hv_value_function(
1314+
model=model,
1315+
ref_point=ref_point,
1316+
use_posterior_mean=True,
1317+
objective=objective,
1318+
)
1319+
1320+
_, current_value = optimize_objective(
1321+
model=model,
1322+
bounds=_bounds.t(),
1323+
q=num_pareto,
1324+
acq_function=acq_function,
1325+
**optimize_objective_kwargs,
1326+
)
1327+
1328+
return {
1329+
"model": model,
1330+
"objective": objective,
1331+
"ref_point": ref_point,
1332+
"num_fantasies": num_fantasies,
1333+
"num_pareto": num_pareto,
1334+
"current_value": current_value.detach().cpu().max(),
1335+
}
1336+
1337+
12731338
@acqf_input_constructor(qMultiFidelityKnowledgeGradient)
12741339
def construct_inputs_qMFKG(
12751340
model: Model,

test/acquisition/test_input_constructors.py

Lines changed: 149 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,10 @@
7272
qExpectedHypervolumeImprovement,
7373
qNoisyExpectedHypervolumeImprovement,
7474
)
75+
from botorch.acquisition.multi_objective.hypervolume_knowledge_gradient import (
76+
qHypervolumeKnowledgeGradient,
77+
)
78+
7579
from botorch.acquisition.multi_objective.logei import (
7680
qLogExpectedHypervolumeImprovement,
7781
qLogNoisyExpectedHypervolumeImprovement,
@@ -222,7 +226,7 @@ def test_optimize_objective(self, mock_optimize_acqf):
222226
mock_model = self.mock_model
223227
bounds = torch.rand(2, len(self.bounds))
224228

225-
with self.subTest("scalarObjective_acqusitionFunction"):
229+
with self.subTest("scalarObjective_acquisitionFunction"):
226230
optimize_objective(
227231
model=mock_model,
228232
bounds=bounds,
@@ -1072,8 +1076,9 @@ def test_construct_inputs_qEHVI(self) -> None:
10721076
self.assertIs(kwargs["constraints"], constraints)
10731077
self.assertEqual(kwargs["eta"], 1e-2)
10741078

1075-
with self.subTest("block designs"), self.assertRaisesRegex(
1076-
ValueError, "Field `X` must be shared"
1079+
with (
1080+
self.subTest("block designs"),
1081+
self.assertRaisesRegex(ValueError, "Field `X` must be shared"),
10771082
):
10781083
c(
10791084
model=mm,
@@ -1291,6 +1296,119 @@ def test_construct_inputs_kg(self) -> None:
12911296
)
12921297
self.assertNotIn("current_value", kwargs)
12931298

1299+
def test_construct_inputs_hvkg(self) -> None:
1300+
model = mock.Mock()
1301+
current_value = torch.tensor(1.23)
1302+
1303+
objective_thresholds = torch.rand(2)
1304+
objective = IdentityMCMultiOutputObjective()
1305+
1306+
get_kwargs = get_acqf_input_constructor(qHypervolumeKnowledgeGradient)
1307+
1308+
with (
1309+
mock.patch(
1310+
target="botorch.acquisition.input_constructors._get_hv_value_function",
1311+
) as mock_get_hv_value_function,
1312+
mock.patch(
1313+
target="botorch.acquisition.input_constructors.optimize_acqf",
1314+
return_value=(None, current_value),
1315+
) as mock_optimize_acqf,
1316+
):
1317+
1318+
kwargs = get_kwargs(
1319+
model=model,
1320+
training_data=self.blockX_blockY,
1321+
objective_thresholds=objective_thresholds,
1322+
objective=objective,
1323+
bounds=self.bounds,
1324+
num_fantasies=33,
1325+
num_pareto=11,
1326+
)
1327+
1328+
self.assertEqual(
1329+
mock_get_hv_value_function.call_args.kwargs["model"], model
1330+
)
1331+
self.assertEqual(
1332+
mock_get_hv_value_function.call_args.kwargs["objective"], objective
1333+
)
1334+
self.assertTrue(
1335+
torch.equal(
1336+
mock_get_hv_value_function.call_args.kwargs["ref_point"],
1337+
objective_thresholds,
1338+
)
1339+
)
1340+
1341+
# check that `optimize_acqf` is called with the desired value function
1342+
self.assertEqual(
1343+
mock_optimize_acqf.call_args.kwargs["acq_function"],
1344+
mock_get_hv_value_function(),
1345+
)
1346+
1347+
self.assertLessEqual(
1348+
{
1349+
"model",
1350+
"ref_point",
1351+
"num_fantasies",
1352+
"num_pareto",
1353+
"objective",
1354+
"current_value",
1355+
},
1356+
set(kwargs.keys()),
1357+
)
1358+
self.assertEqual(kwargs["num_fantasies"], 33)
1359+
self.assertEqual(kwargs["num_pareto"], 11)
1360+
self.assertEqual(kwargs["current_value"], current_value)
1361+
self.assertTrue(torch.equal(kwargs["ref_point"], objective_thresholds))
1362+
1363+
with self.subTest("custom objective"):
1364+
weights = torch.rand(2)
1365+
objective = WeightedMCMultiOutputObjective(weights=weights)
1366+
with mock.patch(
1367+
target="botorch.acquisition.input_constructors.optimize_acqf",
1368+
return_value=(None, current_value),
1369+
) as mock_optimize_acqf:
1370+
kwargs = get_kwargs(
1371+
model=model,
1372+
training_data=self.blockX_blockY,
1373+
objective_thresholds=objective_thresholds,
1374+
objective=objective,
1375+
bounds=self.bounds,
1376+
num_fantasies=33,
1377+
num_pareto=11,
1378+
)
1379+
self.assertIsInstance(kwargs["objective"], WeightedMCMultiOutputObjective)
1380+
self.assertTrue(
1381+
torch.equal(kwargs["ref_point"], objective_thresholds * weights)
1382+
)
1383+
1384+
with self.subTest("risk measures"):
1385+
for use_preprocessing in (True, False):
1386+
objective = MultiOutputExpectation(
1387+
n_w=3,
1388+
preprocessing_function=(
1389+
WeightedMCMultiOutputObjective(torch.tensor([-1.0, -1.0]))
1390+
if use_preprocessing
1391+
else None
1392+
),
1393+
)
1394+
with mock.patch(
1395+
target="botorch.acquisition.input_constructors.optimize_acqf",
1396+
return_value=(None, current_value),
1397+
) as mock_optimize_acqf:
1398+
kwargs = get_kwargs(
1399+
model=model,
1400+
training_data=self.blockX_blockY,
1401+
objective_thresholds=objective_thresholds,
1402+
objective=objective,
1403+
bounds=self.bounds,
1404+
num_fantasies=33,
1405+
num_pareto=11,
1406+
)
1407+
expected_obj_t = (
1408+
-objective_thresholds if use_preprocessing else objective_thresholds
1409+
)
1410+
self.assertTrue(torch.equal(kwargs["ref_point"], expected_obj_t))
1411+
12941412
def test_construct_inputs_mes(self) -> None:
12951413
func = get_acqf_input_constructor(qMaxValueEntropy)
12961414
n, d, m = 5, 2, 1
@@ -1429,12 +1547,17 @@ def test_construct_inputs_mfmes(self) -> None:
14291547
"cost_intercept": 0.321,
14301548
}
14311549
input_constructor = get_acqf_input_constructor(qMultiFidelityMaxValueEntropy)
1432-
with mock.patch(
1433-
target="botorch.acquisition.input_constructors.construct_inputs_mf_base",
1434-
return_value={"foo": 0},
1435-
), mock.patch(
1436-
target="botorch.acquisition.input_constructors.construct_inputs_qMES",
1437-
return_value={"bar": 1},
1550+
with (
1551+
mock.patch(
1552+
target=(
1553+
"botorch.acquisition.input_constructors.construct_inputs_mf_base"
1554+
),
1555+
return_value={"foo": 0},
1556+
),
1557+
mock.patch(
1558+
target="botorch.acquisition.input_constructors.construct_inputs_qMES",
1559+
return_value={"bar": 1},
1560+
),
14381561
):
14391562
inputs_mfmes = input_constructor(**constructor_args)
14401563
inputs_test = {"foo": 0, "bar": 1, "num_fantasies": 64}
@@ -1603,6 +1726,23 @@ def setUp(self, suppress_input_warnings: bool = True) -> None:
16031726
"training_data": self.blockX_blockY,
16041727
},
16051728
)
1729+
1730+
X = torch.rand(3, 2)
1731+
Y1 = torch.rand(3, 1)
1732+
Y2 = torch.rand(3, 1)
1733+
m1 = SingleTaskGP(X, Y1)
1734+
m2 = SingleTaskGP(X, Y2)
1735+
model_list = ModelListGP(m1, m2)
1736+
1737+
self.cases["HV Look-ahead"] = (
1738+
[qHypervolumeKnowledgeGradient],
1739+
{
1740+
"model": model_list,
1741+
"training_data": self.blockX_blockY,
1742+
"bounds": bounds,
1743+
"objective_thresholds": objective_thresholds,
1744+
},
1745+
)
16061746
pref_model = self.mock_model
16071747
pref_model.dim = 2
16081748
pref_model.datapoints = torch.tensor([])

0 commit comments

Comments
 (0)