diff --git a/ax/models/tests/test_botorch_kg.py b/ax/models/tests/test_botorch_kg.py deleted file mode 100644 index 1f0e0f4edfd..00000000000 --- a/ax/models/tests/test_botorch_kg.py +++ /dev/null @@ -1,452 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Meta Platforms, Inc. and affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -# pyre-strict - -import dataclasses -from typing import Any -from unittest import mock - -import torch -from ax.core.search_space import SearchSpaceDigest -from ax.models.torch.botorch_kg import _instantiate_KG, KnowledgeGradient -from ax.models.torch_base import TorchOptConfig -from ax.utils.common.testutils import TestCase -from ax.utils.testing.mock import fast_botorch_optimize -from botorch.acquisition.analytic import PosteriorMean -from botorch.acquisition.fixed_feature import FixedFeatureAcquisitionFunction -from botorch.acquisition.knowledge_gradient import qMultiFidelityKnowledgeGradient -from botorch.acquisition.monte_carlo import qSimpleRegret -from botorch.acquisition.objective import ( - LinearMCObjective, - ScalarizedPosteriorTransform, -) -from botorch.exceptions.errors import UnsupportedError -from botorch.models.transforms.input import Warp -from botorch.sampling.normal import IIDNormalSampler, SobolQMCNormalSampler -from botorch.utils.datasets import SupervisedDataset - - -def dummy_func(X: torch.Tensor) -> torch.Tensor: - return X - - -class KnowledgeGradientTest(TestCase): - def setUp(self) -> None: - super().setUp() - self.tkwargs: dict[str, Any] = { - "device": torch.device("cpu"), - "dtype": torch.double, - } - self.feature_names = ["x1", "x2", "x3"] - self.metric_names = ["y"] - self.dataset = SupervisedDataset( - X=torch.tensor([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]], **self.tkwargs), - Y=torch.tensor([[3.0], [4.0]], **self.tkwargs), - Yvar=torch.tensor([[0.0], [2.0]], **self.tkwargs), - feature_names=self.feature_names, - outcome_names=self.metric_names, - ) - self.bounds = [(0.0, 1.0), (1.0, 4.0), (2.0, 5.0)] - self.acq_options = {"num_fantasies": 30, "mc_samples": 30} - self.objective_weights = torch.tensor([1.0], **self.tkwargs) - self.optimizer_options = { - "num_restarts": 12, - "raw_samples": 12, - "options": { - "maxiter": 5, - "batch_limit": 1, - }, - } - self.optimize_acqf = "ax.models.torch.botorch_kg.optimize_acqf" - self.X_dummy = torch.ones(1, 3, **self.tkwargs) - self.outcome_constraints = (torch.tensor([[1.0]]), torch.tensor([[0.5]])) - self.objective_weights = torch.ones(1, **self.tkwargs) - self.moo_objective_weights = torch.ones(2, **self.tkwargs) - self.objective_thresholds = torch.tensor([0.5, 1.5]) - self.search_space_digest = SearchSpaceDigest( - feature_names=self.feature_names, - bounds=self.bounds, - ) - - @fast_botorch_optimize - def test_KnowledgeGradient(self) -> None: - model = KnowledgeGradient() - model.fit( - datasets=[self.dataset], - search_space_digest=self.search_space_digest, - ) - - n = 2 - - X_dummy = torch.rand(1, n, 4, **self.tkwargs) - acq_dummy = torch.tensor(0.0, **self.tkwargs) - - torch_opt_config = TorchOptConfig( - objective_weights=self.objective_weights, - model_gen_options={ - "acquisition_function_kwargs": self.acq_options, - "optimizer_kwargs": self.optimizer_options, - }, - ) - - with mock.patch(self.optimize_acqf) as mock_optimize_acqf: - mock_optimize_acqf.side_effect = [(X_dummy, acq_dummy)] - gen_results = model.gen( - n=n, - search_space_digest=self.search_space_digest, - torch_opt_config=torch_opt_config, - ) - self.assertTrue(torch.equal(gen_results.points, X_dummy.cpu())) - self.assertTrue( - torch.equal( - gen_results.weights, torch.ones(n, dtype=self.tkwargs["dtype"]) - ) - ) - - # called once, the best point call is not caught by mock - mock_optimize_acqf.assert_called_once() - - ini_dummy = torch.rand(10, 32, 3, **self.tkwargs) - optimizer_options2 = { - "num_restarts": 1, - "raw_samples": 1, - "options": { - "maxiter": 5, - "batch_limit": 1, - }, - "partial_restarts": 2, - } - torch_opt_config.model_gen_options["optimizer_kwargs"] = optimizer_options2 - with mock.patch( - "ax.models.torch.botorch_kg.gen_one_shot_kg_initial_conditions", - return_value=ini_dummy, - ) as mock_warmstart_initialization: - gen_results = model.gen( - n=n, - search_space_digest=self.search_space_digest, - torch_opt_config=torch_opt_config, - ) - mock_warmstart_initialization.assert_called_once() - - posterior_tf = ScalarizedPosteriorTransform(weights=self.objective_weights) - dummy_acq = PosteriorMean(model=model.model, posterior_transform=posterior_tf) - with mock.patch( - "ax.models.torch.utils.PosteriorMean", return_value=dummy_acq - ) as mock_posterior_mean, mock.patch( - "ax.models.torch.utils.get_botorch_objective_and_transform", - return_value=( - None, - ScalarizedPosteriorTransform( - weights=torch.tensor([1.0], dtype=torch.double) - ), - ), - ): - gen_results = model.gen( - n=n, - search_space_digest=self.search_space_digest, - torch_opt_config=torch_opt_config, - ) - self.assertEqual(mock_posterior_mean.call_count, 2) - - # Check best point selection within bounds (some numerical tolerance) - xbest = model.best_point( - search_space_digest=self.search_space_digest, - torch_opt_config=torch_opt_config, - ) - lb = torch.tensor([b[0] for b in self.bounds]) - 1e-5 - ub = torch.tensor([b[1] for b in self.bounds]) + 1e-5 - self.assertTrue(torch.all(xbest <= ub)) - self.assertTrue(torch.all(xbest >= lb)) - - # test error message - torch_opt_config = dataclasses.replace( - torch_opt_config, - linear_constraints=( - torch.tensor([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0]]), - torch.tensor([[0.5], [1.0]]), - ), - ) - with self.assertRaises(UnsupportedError): - gen_results = model.gen( - n=n, - search_space_digest=self.search_space_digest, - torch_opt_config=torch_opt_config, - ) - - # test input warping - self.assertFalse(model.use_input_warping) - model = KnowledgeGradient(use_input_warping=True) - model.fit( - datasets=[self.dataset], - search_space_digest=self.search_space_digest, - ) - self.assertTrue(model.use_input_warping) - self.assertTrue(hasattr(model.model, "input_transform")) - self.assertIsInstance(model.model.input_transform, Warp) - - # test loocv pseudo likelihood - self.assertFalse(model.use_loocv_pseudo_likelihood) - model = KnowledgeGradient(use_loocv_pseudo_likelihood=True) - model.fit( - datasets=[self.dataset], - search_space_digest=self.search_space_digest, - ) - self.assertTrue(model.use_loocv_pseudo_likelihood) - - @fast_botorch_optimize - def test_KnowledgeGradient_multifidelity(self) -> None: - search_space_digest = SearchSpaceDigest( - feature_names=self.feature_names, - bounds=self.bounds, - fidelity_features=[2], - target_values={2: 5.0}, - ) - model = KnowledgeGradient() - model.fit( - datasets=[self.dataset], - search_space_digest=search_space_digest, - ) - - torch_opt_config = TorchOptConfig( - objective_weights=self.objective_weights, - model_gen_options={ - "acquisition_function_kwargs": self.acq_options, - "optimizer_kwargs": self.optimizer_options, - }, - ) - # Check best point selection within bounds (some numerical tolerance) - xbest = model.best_point( - search_space_digest=search_space_digest, - torch_opt_config=torch_opt_config, - ) - lb = torch.tensor([b[0] for b in self.bounds]) - 1e-5 - ub = torch.tensor([b[1] for b in self.bounds]) + 1e-5 - self.assertTrue(torch.all(xbest <= ub)) - self.assertTrue(torch.all(xbest >= lb)) - - # check error when no target fidelities are specified - with self.assertRaises(RuntimeError): - model.best_point( - search_space_digest=dataclasses.replace( - search_space_digest, - target_values={}, - ), - torch_opt_config=torch_opt_config, - ) - - # check generation - n = 2 - X_dummy = torch.zeros(1, n, 3, **self.tkwargs) - acq_dummy = torch.tensor(0.0, **self.tkwargs) - dummy = (X_dummy, acq_dummy) - with mock.patch(self.optimize_acqf, side_effect=[dummy]) as mock_optimize_acqf: - gen_results = model.gen( - n=n, - search_space_digest=search_space_digest, - torch_opt_config=torch_opt_config, - ) - self.assertTrue(torch.equal(gen_results.points, X_dummy.cpu())) - self.assertTrue( - torch.equal( - gen_results.weights, torch.ones(n, dtype=self.tkwargs["dtype"]) - ) - ) - mock_optimize_acqf.assert_called() # called twice, once for best_point - - # test error message - with self.assertRaises(UnsupportedError): - xbest = model.best_point( - search_space_digest=search_space_digest, - torch_opt_config=dataclasses.replace( - torch_opt_config, - linear_constraints=( - torch.tensor([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0]]), - torch.tensor([[0.5], [1.0]]), - ), - ), - ) - - # test input warping - self.assertFalse(model.use_input_warping) - model = KnowledgeGradient(use_input_warping=True) - model.fit( - datasets=[self.dataset], - search_space_digest=search_space_digest, - ) - self.assertTrue(model.use_input_warping) - self.assertTrue(hasattr(model.model, "input_transform")) - self.assertIsInstance(model.model.input_transform, Warp) - - # test loocv pseudo likelihood - self.assertFalse(model.use_loocv_pseudo_likelihood) - model = KnowledgeGradient(use_loocv_pseudo_likelihood=True) - model.fit( - datasets=[self.dataset], - search_space_digest=search_space_digest, - ) - self.assertTrue(model.use_loocv_pseudo_likelihood) - - @fast_botorch_optimize - def test_KnowledgeGradient_helpers(self) -> None: - model = KnowledgeGradient() - model.fit( - datasets=[self.dataset], - search_space_digest=SearchSpaceDigest( - feature_names=self.feature_names, - bounds=self.bounds, - ), - ) - - # test _instantiate_KG - posterior_tf = ScalarizedPosteriorTransform(weights=self.objective_weights) - - # test acquisition setting - acq_function = _instantiate_KG( - model=model.model, - posterior_transform=posterior_tf, - n_fantasies=10, - qmc=True, - ) - self.assertIsInstance(acq_function.sampler, SobolQMCNormalSampler) - self.assertIsInstance( - acq_function.posterior_transform, ScalarizedPosteriorTransform - ) - self.assertEqual(acq_function.num_fantasies, 10) - - acq_function = _instantiate_KG( - model=model.model, - posterior_transform=posterior_tf, - n_fantasies=10, - qmc=False, - ) - self.assertIsInstance(acq_function.sampler, IIDNormalSampler) - - acq_function = _instantiate_KG( - model=model.model, - posterior_transform=posterior_tf, - qmc=False, - ) - self.assertIsNone(acq_function.inner_sampler) - - acq_function = _instantiate_KG( - model=model.model, - posterior_transform=posterior_tf, - qmc=True, - X_pending=self.X_dummy, - ) - self.assertIsNone(acq_function.inner_sampler) - self.assertTrue(torch.equal(acq_function.X_pending, self.X_dummy)) - - # test _get_best_point_acqf - acq_function, non_fixed_idcs = model._get_best_point_acqf( - objective_weights=self.objective_weights, - outcome_constraints=self.outcome_constraints, - X_observed=self.X_dummy, - ) - self.assertIsInstance(acq_function, qSimpleRegret) - self.assertIsInstance(acq_function.sampler, SobolQMCNormalSampler) - self.assertIsNone(non_fixed_idcs) - - acq_function, non_fixed_idcs = model._get_best_point_acqf( - objective_weights=self.objective_weights, - outcome_constraints=self.outcome_constraints, - X_observed=self.X_dummy, - qmc=False, - ) - self.assertIsInstance(acq_function.sampler, IIDNormalSampler) - self.assertIsNone(non_fixed_idcs) - - with self.assertRaises(RuntimeError): - model._get_best_point_acqf( - objective_weights=self.objective_weights, - outcome_constraints=self.outcome_constraints, - X_observed=self.X_dummy, - target_fidelities={1: 1.0}, - ) - - # multi-fidelity tests - - model = KnowledgeGradient() - model.fit( - datasets=[self.dataset], - search_space_digest=SearchSpaceDigest( - feature_names=self.feature_names, - bounds=self.bounds, - fidelity_features=[-1], - ), - ) - - acq_function = _instantiate_KG( - model=model.model, - posterior_transform=posterior_tf, - target_fidelities={2: 1.0}, - # pyre-fixme[6]: For 4th param expected `Optional[Tensor]` but got `int`. - current_value=0, - ) - self.assertIsInstance(acq_function, qMultiFidelityKnowledgeGradient) - - acq_function = _instantiate_KG( - model=model.model, - objective=LinearMCObjective(weights=self.objective_weights), - ) - self.assertIsInstance(acq_function.inner_sampler, SobolQMCNormalSampler) - - # test error that target fidelity and fidelity weight indices must match - with self.assertRaises(RuntimeError): - _instantiate_KG( - model=model.model, - posterior_transform=posterior_tf, - target_fidelities={1: 1.0}, - fidelity_weights={2: 1.0}, - # pyre-fixme[6]: For 5th param expected `Optional[Tensor]` but got - # `int`. - current_value=0, - ) - - # test _get_best_point_acqf - acq_function, non_fixed_idcs = model._get_best_point_acqf( - objective_weights=self.objective_weights, - outcome_constraints=self.outcome_constraints, - X_observed=self.X_dummy, - target_fidelities={2: 1.0}, - ) - self.assertIsInstance(acq_function, FixedFeatureAcquisitionFunction) - self.assertIsInstance(acq_function.acq_func.sampler, SobolQMCNormalSampler) - self.assertEqual(non_fixed_idcs, [0, 1]) - - acq_function, non_fixed_idcs = model._get_best_point_acqf( - objective_weights=self.objective_weights, - outcome_constraints=self.outcome_constraints, - X_observed=self.X_dummy, - target_fidelities={2: 1.0}, - qmc=False, - ) - self.assertIsInstance(acq_function, FixedFeatureAcquisitionFunction) - self.assertIsInstance(acq_function.acq_func.sampler, IIDNormalSampler) - self.assertEqual(non_fixed_idcs, [0, 1]) - - # test error that fixed features are provided - with self.assertRaises(RuntimeError): - model._get_best_point_acqf( - objective_weights=self.objective_weights, - outcome_constraints=self.outcome_constraints, - X_observed=self.X_dummy, - qmc=False, - ) - - # test error if fixed features are also fidelity features - with self.assertRaises(RuntimeError): - model._get_best_point_acqf( - objective_weights=self.objective_weights, - outcome_constraints=self.outcome_constraints, - X_observed=self.X_dummy, - fixed_features={2: 2.0}, - target_fidelities={2: 1.0}, - qmc=False, - ) - - # TODO: Test subsetting multi-output model diff --git a/ax/models/torch/botorch_kg.py b/ax/models/torch/botorch_kg.py deleted file mode 100644 index 99905d788af..00000000000 --- a/ax/models/torch/botorch_kg.py +++ /dev/null @@ -1,413 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Meta Platforms, Inc. and affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -# pyre-strict - -import dataclasses -from collections.abc import Callable -from typing import Any - -import torch -from ax.core.search_space import SearchSpaceDigest -from ax.models.torch.botorch import BotorchModel, get_rounding_func -from ax.models.torch.botorch_defaults import recommend_best_out_of_sample_point -from ax.models.torch.utils import ( - _get_X_pending_and_observed, - _to_inequality_constraints, - get_botorch_objective_and_transform, - get_out_of_sample_best_point_acqf, - subset_model, -) -from ax.models.torch_base import TorchGenResults, TorchOptConfig -from ax.utils.common.typeutils import not_none -from botorch.acquisition.acquisition import AcquisitionFunction -from botorch.acquisition.cost_aware import InverseCostWeightedUtility -from botorch.acquisition.knowledge_gradient import ( - qKnowledgeGradient, - qMultiFidelityKnowledgeGradient, -) -from botorch.acquisition.objective import MCAcquisitionObjective, PosteriorTransform -from botorch.acquisition.utils import ( - expand_trace_observations, - project_to_target_fidelity, -) -from botorch.exceptions.errors import UnsupportedError -from botorch.models.cost import AffineFidelityCostModel -from botorch.models.model import Model -from botorch.optim.initializers import gen_one_shot_kg_initial_conditions -from botorch.optim.optimize import optimize_acqf -from botorch.sampling.normal import IIDNormalSampler, SobolQMCNormalSampler -from torch import Tensor - - -class KnowledgeGradient(BotorchModel): - r"""The Knowledge Gradient with one shot optimization. - - Args: - cost_intercept: The cost intercept for the affine cost of the form - `cost_intercept + n`, where `n` is the number of generated points. - Only used for multi-fidelity optimzation (i.e., if fidelity_features - are present). - linear_truncated: If `False`, use an alternate downsampling + exponential - decay Kernel instead of the default `LinearTruncatedFidelityKernel` - (only relevant for multi-fidelity optimization). - kwargs: Model-specific kwargs. - """ - - def __init__( - self, - cost_intercept: float = 1.0, - linear_truncated: bool = True, - use_input_warping: bool = False, - **kwargs: Any, - ) -> None: - super().__init__( - best_point_recommender=recommend_best_out_of_sample_point, - linear_truncated=linear_truncated, - use_input_warping=use_input_warping, - **kwargs, - ) - self.cost_intercept = cost_intercept - - def gen( - self, - n: int, - search_space_digest: SearchSpaceDigest, - torch_opt_config: TorchOptConfig, - ) -> TorchGenResults: - r"""Generate new candidates. - - Args: - n: Number of candidates to generate. - search_space_digest: A SearchSpaceDigest object containing metadata - about the search space (e.g. bounds, parameter types). - torch_opt_config: A TorchOptConfig object containing optimization - arguments (e.g., objective weights, constraints). - - Returns: - A TorchGenResults container, containing - - - (n x d) tensor of generated points. - - n-tensor of weights for each point. - - Dictionary of model-specific metadata for the given - generation candidates. - """ - options = torch_opt_config.model_gen_options or {} - acf_options = options.get("acquisition_function_kwargs", {}) - optimizer_options = options.get("optimizer_kwargs", {}) - - X_pending, X_observed = _get_X_pending_and_observed( - Xs=self.Xs, - objective_weights=torch_opt_config.objective_weights, - bounds=search_space_digest.bounds, - pending_observations=torch_opt_config.pending_observations, - outcome_constraints=torch_opt_config.outcome_constraints, - linear_constraints=torch_opt_config.linear_constraints, - fixed_features=torch_opt_config.fixed_features, - ) - - # subset model only to the outcomes we need for the optimization - model = not_none(self.model) - if options.get("subset_model", True): - subset_model_results = subset_model( - model=model, - objective_weights=torch_opt_config.objective_weights, - outcome_constraints=torch_opt_config.outcome_constraints, - ) - model = subset_model_results.model - objective_weights = subset_model_results.objective_weights - outcome_constraints = subset_model_results.outcome_constraints - else: - objective_weights = torch_opt_config.objective_weights - outcome_constraints = torch_opt_config.outcome_constraints - - objective, posterior_transform = get_botorch_objective_and_transform( - botorch_acqf_class=qKnowledgeGradient, - model=model, - objective_weights=objective_weights, - outcome_constraints=outcome_constraints, - X_observed=X_observed, - ) - - inequality_constraints = _to_inequality_constraints( - torch_opt_config.linear_constraints - ) - # TODO: update optimizers to handle inequality_constraints - if inequality_constraints is not None: - raise UnsupportedError( - "Inequality constraints are not yet supported for KnowledgeGradient!" - ) - - # extract a few options - n_fantasies = acf_options.get("num_fantasies", 64) - qmc = acf_options.get("qmc", True) - seed_inner = acf_options.get("seed_inner", None) - num_restarts = optimizer_options.get("num_restarts", 40) - raw_samples = optimizer_options.get("raw_samples", 1024) - - # get current value - current_value = self._get_current_value( - model=model, - search_space_digest=search_space_digest, - torch_opt_config=dataclasses.replace( - torch_opt_config, - objective_weights=objective_weights, - outcome_constraints=outcome_constraints, - ), - X_observed=not_none(X_observed), - seed_inner=seed_inner, - qmc=qmc, - ) - - bounds_ = torch.tensor( - search_space_digest.bounds, dtype=self.dtype, device=self.device - ) - bounds_ = bounds_.transpose(0, 1) - - target_fidelities = { - k: v - for k, v in search_space_digest.target_values.items() - if k in search_space_digest.fidelity_features - } - # get acquisition function - acq_function = _instantiate_KG( - model=model, - objective=objective, - posterior_transform=posterior_transform, - qmc=qmc, - n_fantasies=n_fantasies, - num_trace_observations=options.get("num_trace_observations", 0), - mc_samples=acf_options.get("mc_samples", 256), - seed_inner=seed_inner, - seed_outer=acf_options.get("seed_outer", None), - X_pending=X_pending, - target_fidelities=target_fidelities, - fidelity_weights=options.get("fidelity_weights"), - current_value=current_value, - cost_intercept=self.cost_intercept, - ) - - # optimize and get new points - new_x = _optimize_and_get_candidates( - acq_function=acq_function, - bounds_=bounds_, - n=n, - num_restarts=num_restarts, - raw_samples=raw_samples, - optimizer_options=optimizer_options, - rounding_func=torch_opt_config.rounding_func, - inequality_constraints=inequality_constraints, - fixed_features=torch_opt_config.fixed_features, - ) - - return TorchGenResults(points=new_x, weights=torch.ones(n, dtype=self.dtype)) - - def _get_best_point_acqf( - self, - X_observed: Tensor, - objective_weights: Tensor, - mc_samples: int = 512, - fixed_features: dict[int, float] | None = None, - target_fidelities: dict[int, float] | None = None, - outcome_constraints: tuple[Tensor, Tensor] | None = None, - seed_inner: int | None = None, - qmc: bool = True, - **kwargs: Any, - ) -> tuple[AcquisitionFunction, list[int] | None]: - return get_out_of_sample_best_point_acqf( - model=not_none(self.model), - Xs=self.Xs, - objective_weights=objective_weights, - outcome_constraints=outcome_constraints, - X_observed=not_none(X_observed), - seed_inner=seed_inner, - fixed_features=fixed_features, - fidelity_features=self.fidelity_features, - target_fidelities=target_fidelities, - qmc=qmc, - ) - - def _get_current_value( - self, - model: Model, - search_space_digest: SearchSpaceDigest, - torch_opt_config: TorchOptConfig, - X_observed: Tensor, - seed_inner: int | None, - qmc: bool, - ) -> Tensor: - r"""Computes the value of the current best point. This is the current_value - passed to KG. - - NOTE: The current value is computed as the current value of the 'best point - acquisition function' (typically `PosteriorMean` or `qSimpleRegret`), not of - the Knowledge Gradient acquisition function. - """ - target_fidelities = { - k: v - for k, v in search_space_digest.target_values.items() - if k in search_space_digest.fidelity_features - } - best_point_acqf, non_fixed_idcs = get_out_of_sample_best_point_acqf( - model=model, - Xs=self.Xs, - objective_weights=torch_opt_config.objective_weights, - outcome_constraints=torch_opt_config.outcome_constraints, - X_observed=X_observed, - seed_inner=seed_inner, - fixed_features=torch_opt_config.fixed_features, - fidelity_features=self.fidelity_features, - target_fidelities=target_fidelities, - qmc=qmc, - ) - - # solution from previous iteration - recommended_point = self.best_point( - search_space_digest=search_space_digest, - torch_opt_config=torch_opt_config, - ) - # pyre-fixme[16]: `Optional` has no attribute `detach`. - recommended_point = recommended_point.detach().unsqueeze(0) - # ensure correct device (`best_point` always returns a CPU tensor) - recommended_point = recommended_point.to(device=self.device) - # Extract acquisition value (TODO: Make this less painful and repetitive) - if non_fixed_idcs is not None: - recommended_point = recommended_point[..., non_fixed_idcs] - current_value = best_point_acqf(recommended_point).max() - return current_value - - -def _instantiate_KG( - model: Model, - objective: MCAcquisitionObjective | None = None, - posterior_transform: PosteriorTransform | None = None, - qmc: bool = True, - n_fantasies: int = 64, - mc_samples: int = 256, - num_trace_observations: int = 0, - seed_inner: int | None = None, - seed_outer: int | None = None, - X_pending: Tensor | None = None, - current_value: Tensor | None = None, - target_fidelities: dict[int, float] | None = None, - fidelity_weights: dict[int, float] | None = None, - cost_intercept: float = 1.0, -) -> qKnowledgeGradient: - r"""Instantiate either a `qKnowledgeGradient` or `qMultiFidelityKnowledgeGradient` - acquisition function depending on whether `target_fidelities` is defined. - """ - sampler_cls = SobolQMCNormalSampler if qmc else IIDNormalSampler - fantasy_sampler = sampler_cls( - sample_shape=torch.Size([n_fantasies]), seed=seed_outer - ) - if isinstance(objective, MCAcquisitionObjective): - inner_sampler = sampler_cls( - sample_shape=torch.Size([mc_samples]), seed=seed_inner - ) - else: - inner_sampler = None - if target_fidelities: - if fidelity_weights is None: - fidelity_weights = {f: 1.0 for f in target_fidelities} - if not set(target_fidelities) == set(fidelity_weights): - raise RuntimeError( - "Must provide the same indices for target_fidelities " - f"({set(target_fidelities)}) and fidelity_weights " - f" ({set(fidelity_weights)})." - ) - cost_model = AffineFidelityCostModel( - fidelity_weights=fidelity_weights, fixed_cost=cost_intercept - ) - cost_aware_utility = InverseCostWeightedUtility(cost_model=cost_model) - - def project(X: Tensor) -> Tensor: - return project_to_target_fidelity(X=X, target_fidelities=target_fidelities) - - def expand(X: Tensor) -> Tensor: - return expand_trace_observations( - X=X, - fidelity_dims=sorted(target_fidelities), # pyre-ignore: [6] - num_trace_obs=num_trace_observations, - ) - - return qMultiFidelityKnowledgeGradient( - model=model, - num_fantasies=n_fantasies, - sampler=fantasy_sampler, - objective=objective, - posterior_transform=posterior_transform, - inner_sampler=inner_sampler, - X_pending=X_pending, - current_value=current_value, - cost_aware_utility=cost_aware_utility, - project=project, - expand=expand, - ) - - return qKnowledgeGradient( - model=model, - num_fantasies=n_fantasies, - sampler=fantasy_sampler, - objective=objective, - posterior_transform=posterior_transform, - inner_sampler=inner_sampler, - X_pending=X_pending, - current_value=current_value, - ) - - -def _optimize_and_get_candidates( - acq_function: qKnowledgeGradient, - bounds_: Tensor, - n: int, - num_restarts: int, - raw_samples: int, - # pyre-fixme[24]: Generic type `dict` expects 2 type parameters, use - # `typing.Dict` to avoid runtime subscripting errors. - optimizer_options: dict, - rounding_func: Callable[[Tensor], Tensor] | None, - inequality_constraints: list[tuple[Tensor, Tensor, float]] | None, - fixed_features: dict[int, float] | None, -) -> Tensor: - r"""Generates initial conditions for optimization, optimize the acquisition - function, and return the candidates. - """ - batch_initial_conditions = gen_one_shot_kg_initial_conditions( - acq_function=acq_function, - bounds=bounds_, - q=n, - num_restarts=num_restarts, - raw_samples=raw_samples, - options={ - "frac_random": optimizer_options.get("frac_random", 0.1), - "num_inner_restarts": num_restarts, - "raw_inner_samples": raw_samples, - }, - ) - - botorch_rounding_func = get_rounding_func(rounding_func) - - opt_options: dict[str, bool | float | int | str] = { - "batch_limit": 8, - "maxiter": 200, - "method": "L-BFGS-B", - "nonnegative": False, - } - opt_options.update(optimizer_options.get("options", {})) - candidates, _ = optimize_acqf( - acq_function=acq_function, - bounds=bounds_, - q=n, - inequality_constraints=inequality_constraints, - fixed_features=fixed_features, - post_processing_func=botorch_rounding_func, - num_restarts=num_restarts, - raw_samples=raw_samples, - options=opt_options, - batch_initial_conditions=batch_initial_conditions, - ) - new_x = candidates.detach().cpu() - return new_x diff --git a/sphinx/source/models.rst b/sphinx/source/models.rst index 79d1562ed2b..04f9386b26f 100644 --- a/sphinx/source/models.rst +++ b/sphinx/source/models.rst @@ -135,14 +135,6 @@ ax.models.torch.botorch_defaults module :undoc-members: :show-inheritance: -ax.models.torch.botorch_kg module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: ax.models.torch.botorch_kg - :members: - :undoc-members: - :show-inheritance: - ax.models.torch.botorch_moo module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~