Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Set deprecation for sooner (warning has been around for a year) #2351

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
78 changes: 11 additions & 67 deletions ax/models/torch/alebo.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,14 +34,14 @@
from ax.core.types import TCandidateMetadata
from ax.models.random.alebo_initializer import ALEBOInitializer
from ax.models.torch.botorch import BotorchModel
from ax.models.torch.botorch_defaults import get_qLogNEI
from ax.models.torch.botorch_defaults import ei_or_nei
from ax.models.torch.utils import _datasets_to_legacy_inputs

from ax.models.torch_base import TorchGenResults, TorchModel, TorchOptConfig
from ax.utils.common.docutils import copy_doc
from ax.utils.common.logger import get_logger
from ax.utils.common.typeutils import checked_cast
from botorch.acquisition.acquisition import AcquisitionFunction
from botorch.acquisition.analytic import ExpectedImprovement
from botorch.acquisition.objective import PosteriorTransform
from botorch.models.gp_regression import SingleTaskGP
from botorch.models.gpytorch import GPyTorchModel
Expand Down Expand Up @@ -264,7 +264,7 @@ def __init__(self, B: Tensor, batch_shape: torch.Size) -> None:
has_lengthscale=False, ard_num_dims=None, eps=0.0, batch_shape=batch_shape
)
warn(
"ALEBOKernel is deprecated and should be removed in Ax 0.5.0.",
"ALEBOKernel is deprecated and should be removed in Ax 0.3.9.",
DeprecationWarning,
)
# pyre-fixme[4]: Attribute must be annotated.
Expand Down Expand Up @@ -337,7 +337,7 @@ def __init__(
self, B: Tensor, train_X: Tensor, train_Y: Tensor, train_Yvar: Tensor
) -> None:
warn(
"ALEBOGP is deprecated and should be removed in Ax 0.5.0. SAASBO "
"ALEBOGP is deprecated and should be removed in Ax 0.3.9. SAASBO "
"(Models.SAASBO from ax.modelbridge.registry) likely provides better "
"performance.",
DeprecationWarning,
Expand Down Expand Up @@ -433,7 +433,7 @@ def get_fitted_model(
"""
warn(
"`get_fitted_model` from ax.models.torch.alebo.py is deprecated and "
"should be removed in Ax 0.5.0.",
"should be removed in Ax 0.3.9.",
DeprecationWarning,
)
# Get MAP estimate.
Expand Down Expand Up @@ -484,7 +484,7 @@ def get_map_model(
"""
warn(
"`get_map_model` from ax.models.torch.alebo.py is deprecated and should "
"be removed in Ax 0.5.0.",
"be removed in Ax 0.3.9.",
DeprecationWarning,
)
f_best = 1e8
Expand Down Expand Up @@ -530,7 +530,7 @@ def laplace_sample_U(
and output scale.
"""
warn(
"laplace_sample_U is deprecated and should be removed in Ax 0.5.0.",
"laplace_sample_U is deprecated and should be removed in Ax 0.3.9.",
DeprecationWarning,
)
# Estimate diagonal of the Hessian
Expand Down Expand Up @@ -600,7 +600,7 @@ def get_batch_model(
"""
warn(
"`get_batch_model` from ax.models.torch.alebo.py is deprecated and "
"should be removed in Ax 0.5.0.",
"should be removed in Ax 0.3.9.",
DeprecationWarning,
)
b = Uvec_batch.size(0)
Expand Down Expand Up @@ -641,7 +641,7 @@ def extract_map_statedict(
"""
warn(
"`extract_map_statedict` from ax.models.torch.alebo.py is deprecated and "
"should be removed in Ax 0.5.0.",
"should be removed in Ax 0.3.9.",
DeprecationWarning,
)
is_modellist = num_outputs > 1
Expand All @@ -666,62 +666,6 @@ def extract_map_statedict(
return map_sds


def ei_or_nei(
model: Union[ALEBOGP, ModelListGP],
objective_weights: Tensor,
outcome_constraints: Optional[Tuple[Tensor, Tensor]],
X_observed: Tensor,
X_pending: Optional[Tensor],
q: int,
noiseless: bool,
) -> AcquisitionFunction:
"""Use analytic EI if appropriate, otherwise Monte Carlo NEI.

Analytic EI can be used if: Single outcome, no constraints, no pending
points, not batch, and no noise.

Args:
model: GP.
objective_weights: Weights on each outcome for the objective.
outcome_constraints: Outcome constraints.
X_observed: Observed points for NEI.
X_pending: Pending points.
q: Batch size.
noiseless: True if evaluations are noiseless.

Returns: An AcquisitionFunction, either analytic EI or MC NEI.
"""
warn(
"`ei_or_nei` from ax.models.torch.alebo.py is deprecated and should be "
"removed in Ax 0.5.0.",
DeprecationWarning,
)
if (
len(objective_weights) == 1
and outcome_constraints is None
and X_pending is None
and q == 1
and noiseless
):
maximize = objective_weights[0] > 0
if maximize:
best_f = model.train_targets.max()
else:
best_f = model.train_targets.min()
# pyre-fixme[6]: For 3rd param expected `bool` but got `Tensor`.
return ExpectedImprovement(model=model, best_f=best_f, maximize=maximize)
else:
with gpytorch.settings.max_cholesky_size(2000):
acq = get_qLogNEI(
model=model,
objective_weights=objective_weights,
outcome_constraints=outcome_constraints,
X_observed=X_observed,
X_pending=X_pending,
)
return acq


def alebo_acqf_optimizer(
acq_function: AcquisitionFunction,
bounds: Tensor,
Expand All @@ -741,7 +685,7 @@ def alebo_acqf_optimizer(
lie within that polytope.
"""
warn(
"`alebo_acqf_optimizer` is deprecated and should be removed in Ax 0.5.0.",
"`alebo_acqf_optimizer` is deprecated and should be removed in Ax 0.3.9.",
DeprecationWarning,
)
candidate_list, acq_value_list = [], []
Expand Down Expand Up @@ -819,7 +763,7 @@ def __init__(
self, B: Tensor, laplace_nsamp: int = 25, fit_restarts: int = 10
) -> None:
warn(
"ALEBO is deprecated and should be removed in Ax 0.5.0.",
"ALEBO is deprecated and should be removed in Ax 0.3.9.",
DeprecationWarning,
)
self.B = B
Expand Down
60 changes: 60 additions & 0 deletions ax/models/torch/botorch_defaults.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,9 @@
from copy import deepcopy
from random import randint
from typing import Any, Callable, Dict, List, Optional, Protocol, Tuple, Type, Union
from warnings import warn

import gpytorch

import torch
from ax.models.model_utils import best_observed_point, get_observed
Expand All @@ -18,6 +21,7 @@
from ax.models.types import TConfig
from botorch.acquisition import get_acquisition_function
from botorch.acquisition.acquisition import AcquisitionFunction
from botorch.acquisition.analytic import ExpectedImprovement
from botorch.acquisition.fixed_feature import FixedFeatureAcquisitionFunction
from botorch.acquisition.objective import ConstrainedMCObjective, GenericMCObjective
from botorch.acquisition.utils import get_infeasible_cost
Expand Down Expand Up @@ -892,3 +896,59 @@ def get_warping_transform(
batch_shape=batch_shape,
)
return tf


def ei_or_nei(
model: ModelListGP,
objective_weights: Tensor,
outcome_constraints: Optional[Tuple[Tensor, Tensor]],
X_observed: Tensor,
X_pending: Optional[Tensor],
q: int,
noiseless: bool,
) -> AcquisitionFunction:
"""Use analytic EI if appropriate, otherwise Monte Carlo NEI.

Analytic EI can be used if: Single outcome, no constraints, no pending
points, not batch, and no noise.

Args:
model: GP.
objective_weights: Weights on each outcome for the objective.
outcome_constraints: Outcome constraints.
X_observed: Observed points for NEI.
X_pending: Pending points.
q: Batch size.
noiseless: True if evaluations are noiseless.

Returns: An AcquisitionFunction, either analytic EI or MC NEI.
"""
warn(
"`ei_or_nei` from ax.models.torch.alebo.py is deprecated and should be "
"removed in Ax 0.3.9.",
DeprecationWarning,
)
if (
len(objective_weights) == 1
and outcome_constraints is None
and X_pending is None
and q == 1
and noiseless
):
# TODO: Check if this is correct @no-commit
maximize = bool(objective_weights[0] > 0)
if maximize:
best_f = model.train_targets.max()
else:
best_f = model.train_targets.min()
return ExpectedImprovement(model=model, best_f=best_f, maximize=maximize)
else:
with gpytorch.settings.max_cholesky_size(2000):
acq = get_qLogNEI(
model=model,
objective_weights=objective_weights,
outcome_constraints=outcome_constraints,
X_observed=X_observed,
X_pending=X_pending,
)
return acq
2 changes: 1 addition & 1 deletion ax/models/torch/cbo_lcea.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@

from ax.core.search_space import SearchSpaceDigest
from ax.core.types import TCandidateMetadata
from ax.models.torch.alebo import ei_or_nei
from ax.models.torch.botorch import BotorchModel
from ax.models.torch.botorch_defaults import ei_or_nei
from ax.models.torch.cbo_sac import generate_model_space_decomposition
from ax.models.torch_base import TorchModel, TorchOptConfig
from ax.utils.common.docutils import copy_doc
Expand Down
2 changes: 1 addition & 1 deletion ax/models/torch/rembo.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def __init__(
) -> None:
warn(
"REMBO is deprecated and does not guarantee correctness. "
"It will be removed in Ax 0.5.0.",
"It will be removed in Ax 0.3.9.",
DeprecationWarning,
)
self.A = A
Expand Down