Skip to content

Commit

Permalink
Make BoTorch test problems into ParamBasedTestProblems (#2944)
Browse files Browse the repository at this point in the history
Summary:

Context:

In the benchmarks, we currently have a zoo of five runners designed to handle different ways of generating data. This will make it challenging to support other changes to runners, such as running trials asynchronously. This diff is a step towards consolidating into one Runner, abstracting away the data-generating process into a test problem.

* BoTorchTestProblemRunner has a BoTorch problem
* ParamBasedTestProblemRunner has a ParamBasedTestProblem
* SurrogateRunner has a SurrogateProblem
* `BoTorchTestProblemRunner` and `ParamBasedTestProblemRunner` subclass `BenchmarkRunner`
* `SurrogateRunner` subclasses `BenchmarkRunner`

This diff will enable going from 5 runners to 3, wrapping BoTorch problems with `ParamBasedTestProblem`. This will allow for combining `BoTorchTestProblemRunner`, `ParamBasedTestProblemRunner`, and `SyntheticProblemRunner`.

This PR:
* Introduces `BoTorchTestProblem`, which is a `ParamBasedTestProblem` and thus can be used with `ParamBasedTestProblemRunner`. It localizes tensor-related logic, including `modified_bounds`, into the BoTorch problem so it doesn't need to be handled by the runner.
* Gets rid of `SyntheticProblemRunner`, merging it with `ParamBasedTestProblemRunner`. We may want to rename this class after more consolidation.
* Makes `BoTorchTestProblemRunner` a do-nothing subclass of `ParamBasedTestProblemRunner`

Reviewed By: Balandat

Differential Revision: D63639190
  • Loading branch information
esantorella authored and facebook-github-bot committed Oct 24, 2024
1 parent e63b462 commit d14cbc7
Show file tree
Hide file tree
Showing 7 changed files with 221 additions and 207 deletions.
10 changes: 6 additions & 4 deletions ax/benchmark/benchmark_problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,10 @@

from ax.benchmark.benchmark_metric import BenchmarkMetric
from ax.benchmark.runners.base import BenchmarkRunner
from ax.benchmark.runners.botorch_test import BotorchTestProblemRunner
from ax.benchmark.runners.botorch_test import (
BoTorchTestProblem,
ParamBasedTestProblemRunner,
)
from ax.core.data import Data
from ax.core.experiment import Experiment
from ax.core.objective import MultiObjective, Objective
Expand Down Expand Up @@ -384,9 +387,8 @@ def create_problem_from_botorch(
name=name,
search_space=search_space,
optimization_config=optimization_config,
runner=BotorchTestProblemRunner(
# pyre-ignore[45]: Can't instantiate abstract class
test_problem=test_problem_class(**test_problem_kwargs),
runner=ParamBasedTestProblemRunner(
test_problem=BoTorchTestProblem(botorch_problem=test_problem),
outcome_names=outcome_names,
search_space_digest=extract_search_space_digest(
search_space=search_space,
Expand Down
12 changes: 9 additions & 3 deletions ax/benchmark/problems/synthetic/discretized/mixed_integer.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,10 @@
from ax.benchmark.benchmark_metric import BenchmarkMetric

from ax.benchmark.benchmark_problem import BenchmarkProblem
from ax.benchmark.runners.botorch_test import BotorchTestProblemRunner
from ax.benchmark.runners.botorch_test import (
BoTorchTestProblem,
ParamBasedTestProblemRunner,
)
from ax.core.objective import Objective
from ax.core.optimization_config import OptimizationConfig
from ax.core.parameter import ParameterType, RangeParameter
Expand Down Expand Up @@ -101,8 +104,11 @@ def _get_problem_from_common_inputs(
test_problem = test_problem_class(dim=dim)
else:
test_problem = test_problem_class(dim=dim, bounds=test_problem_bounds)
runner = BotorchTestProblemRunner(
test_problem=test_problem, outcome_names=[metric_name], modified_bounds=bounds
runner = ParamBasedTestProblemRunner(
test_problem=BoTorchTestProblem(
botorch_problem=test_problem, modified_bounds=bounds
),
outcome_names=[metric_name],
)
return BenchmarkProblem(
name=benchmark_name + ("_observed_noise" if observe_noise_sd else ""),
Expand Down
2 changes: 1 addition & 1 deletion ax/benchmark/problems/synthetic/hss/jenatton.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def evaluate_true(self, params: Mapping[str, float | int | None]) -> torch.Tenso
# `jenatton_test_function`, for 1st positional argument, expected
# `Optional[float]` but got `Union[None, bool, float, int, str]`.
value = jenatton_test_function(**params)
return torch.tensor(value)
return torch.tensor(value, dtype=torch.double)


def get_jenatton_benchmark_problem(
Expand Down
231 changes: 97 additions & 134 deletions ax/benchmark/runners/botorch_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,14 +7,14 @@

from abc import ABC, abstractmethod
from collections.abc import Mapping
from dataclasses import asdict, dataclass
from dataclasses import dataclass
from itertools import islice

import torch
from ax.benchmark.runners.base import BenchmarkRunner
from ax.core.search_space import SearchSpaceDigest
from ax.core.types import TParamValue
from ax.utils.common.base import Base
from ax.utils.common.equality import equality_typechecker
from ax.exceptions.core import UnsupportedError
from botorch.test_functions.multi_objective import MultiObjectiveTestProblem
from botorch.test_functions.synthetic import BaseTestProblem, ConstrainedBaseTestProblem
from botorch.utils.transforms import normalize, unnormalize
from torch import Tensor
Expand All @@ -23,8 +23,9 @@
@dataclass(kw_only=True)
class ParamBasedTestProblem(ABC):
"""
Similar to a BoTorch test problem, but evaluated using an Ax
TParameterization rather than a tensor.
The basic Ax class for generating deterministic data to benchmark against.
(Noise - if desired - is added by the runner.)
"""

num_objectives: int
Expand All @@ -46,17 +47,12 @@ def evaluate_slack_true(self, params: Mapping[str, TParamValue]) -> Tensor:


@dataclass(kw_only=True)
class SyntheticProblemRunner(BenchmarkRunner, ABC):
"""A Runner for evaluating synthetic problems, either BoTorch
`BaseTestProblem`s or Ax benchmarking `ParamBasedTestProblem`s.
Given a trial, the Runner will evaluate the problem noiselessly for each
arm in the trial, as well as return some metadata about the underlying
problem such as the noise_std.
class BoTorchTestProblem(ParamBasedTestProblem):
"""
Class for generating data from a BoTorch ``BaseTestProblem``.
Args:
test_problem: A BoTorch `BaseTestProblem` or Ax `ParamBasedTestProblem`.
outcome_names: The names of the outcomes returned by the problem.
botorch_problem: The BoTorch ``BaseTestProblem``.
modified_bounds: The bounds that are used by the Ax search space
while optimizing the problem. If different from the bounds of the
test problem, we project the parameters into the test problem
Expand All @@ -66,18 +62,95 @@ class SyntheticProblemRunner(BenchmarkRunner, ABC):
5 will correspond to 0.5 while evaluating the test problem.
If modified bounds are not provided, the test problem will be
evaluated using the raw parameter values.
search_space_digest: Used to extract target fidelity and task.
num_objectives: The number of objectives.
"""

test_problem: BaseTestProblem | ParamBasedTestProblem
botorch_problem: BaseTestProblem
modified_bounds: list[tuple[float, float]] | None = None
constraint_noise_std: float | list[float] | None = None
num_objectives: int = 1

def __post_init__(self) -> None:
if isinstance(self.botorch_problem, MultiObjectiveTestProblem):
self.num_objectives = self.botorch_problem.num_objectives
if self.botorch_problem.noise_std is not None:
raise ValueError(
"noise_std should be set on the runner, not the test problem."
)
if getattr(self.botorch_problem, "constraint_noise_std", None) is not None:
raise ValueError(
"constraint_noise_std should be set on the runner, not the test "
"problem."
)
if self.botorch_problem.negate:
raise ValueError(
"negate should be set on the runner, not the test problem."
)
self.botorch_problem = self.botorch_problem.to(dtype=torch.double)

def tensorize_params(self, params: Mapping[str, int | float]) -> torch.Tensor:
X = torch.tensor(
list(islice(params.values(), self.botorch_problem.dim)),
dtype=torch.double,
)

if self.modified_bounds is not None:
# Normalize from modified bounds to unit cube.
unit_X = normalize(
X, torch.tensor(self.modified_bounds, dtype=torch.double).T
)
# Unnormalize from unit cube to original problem bounds.
X = unnormalize(unit_X, self.botorch_problem.bounds)
return X

# pyre-fixme [14]: inconsistent override
def evaluate_true(self, params: Mapping[str, float | int]) -> torch.Tensor:
x = self.tensorize_params(params=params)
return self.botorch_problem.evaluate_true(x)

# pyre-fixme [14]: inconsistent override
def evaluate_slack_true(self, params: Mapping[str, float | int]) -> torch.Tensor:
if not isinstance(self.botorch_problem, ConstrainedBaseTestProblem):
raise UnsupportedError(
"`evaluate_slack_true` is only supported when the BoTorch "
"problem is a `ConstrainedBaseTestProblem`."
)
# todo: could return x so as to not recompute
# or could do both methods together, track indices of outcomes,
# and only negate the non-constraints
x = self.tensorize_params(params=params)
return self.botorch_problem.evaluate_slack_true(x)


@dataclass(kw_only=True)
class ParamBasedTestProblemRunner(BenchmarkRunner):
"""
A Runner for evaluating `ParamBasedTestProblem`s.
Given a trial, the Runner will use its `test_problem` to evaluate the
problem noiselessly for each arm in the trial, and then add noise as
specified by the `noise_std` and `constraint_noise_std`. It will return
metadata including the outcome names and values of metrics.
Args:
outcome_names: The names of the outcomes returned by the problem.
search_space_digest: Used to extract target fidelity and task.
test_problem: A ``ParamBasedTestProblem`` from which to generate
deterministic data before adding noise.
noise_std: The standard deviation of the noise added to the data. Can be
a list to be per-metric.
negate: Whether to negate the outcome.
"""

test_problem: ParamBasedTestProblem
noise_std: float | list[float] | None = None
constraint_noise_std: float | list[float] | None = None
negate: bool = False

@property
def _is_constrained(self) -> bool:
return isinstance(self.test_problem, ConstrainedBaseTestProblem)
return isinstance(self.test_problem, BoTorchTestProblem) and isinstance(
self.test_problem.botorch_problem, ConstrainedBaseTestProblem
)

def get_noise_stds(self) -> None | float | dict[str, float]:
noise_std = self.noise_std
Expand Down Expand Up @@ -116,134 +189,24 @@ def get_noise_stds(self) -> None | float | dict[str, float]:

return noise_std_dict


@dataclass(kw_only=True)
class BotorchTestProblemRunner(SyntheticProblemRunner):
"""
A `SyntheticProblemRunner` for BoTorch `BaseTestProblem`s.
Args:
test_problem: A BoTorch `BaseTestProblem`.
outcome_names: The names of the outcomes returned by the problem.
modified_bounds: The bounds that are used by the Ax search space
while optimizing the problem. If different from the bounds of the
test problem, we project the parameters into the test problem
bounds before evaluating the test problem.
For example, if the test problem is defined on [0, 1] but the Ax
search space is integers in [0, 10], an Ax parameter value of
5 will correspond to 0.5 while evaluating the test problem.
If modified bounds are not provided, the test problem will be
evaluated using the raw parameter values.
search_space_digest: Used to extract target fidelity and task.
"""

test_problem: BaseTestProblem

def __post_init__(self, search_space_digest: SearchSpaceDigest | None) -> None:
super().__post_init__(search_space_digest=search_space_digest)
if self.test_problem.noise_std is not None:
raise ValueError(
"noise_std should be set on the runner, not the test problem."
)
if (
hasattr(self.test_problem, "constraint_noise_std")
and self.test_problem.constraint_noise_std is not None
):
raise ValueError(
"constraint_noise_std should be set on the runner, not the test "
"problem."
)
if self.test_problem.negate:
raise ValueError(
"negate should be set on the runner, not the test problem."
)
self.test_problem = self.test_problem.to(dtype=torch.double)

def get_Y_true(self, params: Mapping[str, TParamValue]) -> Tensor:
"""
Convert the arm to a tensor and evaluate it on the base test problem.
Convert the tensor to original bounds -- only if modified bounds were
provided -- and evaluates the test problem. See the docstring for
`modified_bounds` in `BotorchTestProblemRunner.__init__` for details.
Args:
params: Parameterization to evaluate. It will be converted to a
`batch_shape x d`-dim tensor of point(s) at which to evaluate the
test problem.
"""Evaluates the test problem.
Returns:
A `batch_shape x m`-dim tensor of ground truth (noiseless) evaluations.
"""
X = torch.tensor(
[value for _key, value in [*params.items()][: self.test_problem.dim]],
dtype=torch.double,
)

if self.modified_bounds is not None:
# Normalize from modified bounds to unit cube.
unit_X = normalize(
X, torch.tensor(self.modified_bounds, dtype=torch.double).T
)
# Unnormalize from unit cube to original problem bounds.
X = unnormalize(unit_X, self.test_problem.bounds)

Y_true = self.test_problem.evaluate_true(X).view(-1)
# `BaseTestProblem.evaluate_true()` does not negate the outcome
Y_true = self.test_problem.evaluate_true(params).view(-1)
# `ParamBasedTestProblem.evaluate_true()` does not negate the outcome
if self.negate:
Y_true = -Y_true

if self._is_constrained:
# Convention: Concatenate objective and black box constraints. `view()`
# makes the inputs 1d, so the resulting `Y_true` are also 1d.
Y_true = torch.cat(
[Y_true, self.test_problem.evaluate_slack_true(X).view(-1)],
[Y_true, self.test_problem.evaluate_slack_true(params).view(-1)],
dim=-1,
)

return Y_true

@equality_typechecker
def __eq__(self, other: Base) -> bool:
"""
Compare equality by comparing dicts, except for `test_problem`.
Dataclasses are compared by comparing the results of calling asdict on
them. However, equality checks don't work as needed with BoTorch test
problems, e.g. Branin() == Branin() is False. To get around that, the
test problem is stripped from the dictionary. This doesn't make the
check less sensitive, as long as the problem has not been modified,
because the test problem class and keyword arguments will still be
compared.
"""
if not isinstance(other, type(self)):
return False
self_as_dict = asdict(self)
other_as_dict = asdict(other)
self_as_dict.pop("test_problem")
other_as_dict.pop("test_problem")
return (self_as_dict == other_as_dict) and (
type(self.test_problem) is type(other.test_problem)
)


@dataclass(kw_only=True)
class ParamBasedTestProblemRunner(SyntheticProblemRunner):
"""
A `SyntheticProblemRunner` for `ParamBasedTestProblem`s. See
`SyntheticProblemRunner` for more information.
"""

test_problem: ParamBasedTestProblem

def get_Y_true(self, params: Mapping[str, TParamValue]) -> Tensor:
"""Evaluates the test problem.
Returns:
A `batch_shape x m`-dim tensor of ground truth (noiseless) evaluations.
"""
Y_true = self.test_problem.evaluate_true(params).view(-1)
# `ParamBasedTestProblem.evaluate_true()` does not negate the outcome
if self.negate:
Y_true = -Y_true
return Y_true
BotorchTestProblemRunner = ParamBasedTestProblemRunner
Loading

0 comments on commit d14cbc7

Please sign in to comment.