Skip to content

Commit

Permalink
Rename test_problem attribute of BenchmarkRunner to `test_functio…
Browse files Browse the repository at this point in the history
…n`, because it is a `BenchmarkTestFunction`

Summary: Rename `test_problem` attribute of `BenchmarkRunner` to `test_function`, because it is a `BenchmarkTestFunction`

Differential Revision: D65088791
  • Loading branch information
esantorella authored and facebook-github-bot committed Oct 30, 2024
1 parent 69e877f commit d5510c6
Show file tree
Hide file tree
Showing 7 changed files with 40 additions and 36 deletions.
6 changes: 3 additions & 3 deletions ax/benchmark/benchmark_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,15 +48,15 @@ class BenchmarkRunner(Runner):
Args:
outcome_names: The names of the outcomes returned by the problem.
test_problem: A ``BenchmarkTestFunction`` from which to generate
test_function: A ``BenchmarkTestFunction`` from which to generate
deterministic data before adding noise.
noise_std: The standard deviation of the noise added to the data. Can be
a list or dict to be per-metric.
search_space_digest: Used to extract target fidelity and task.
"""

outcome_names: list[str]
test_problem: BenchmarkTestFunction
test_function: BenchmarkTestFunction
noise_std: float | list[float] | dict[str, float] = 0.0
# pyre-fixme[16]: Pyre doesn't understand InitVars
search_space_digest: InitVar[SearchSpaceDigest | None] = None
Expand All @@ -77,7 +77,7 @@ def get_Y_true(self, params: Mapping[str, TParamValue]) -> Tensor:
Returns:
An `m`-dim tensor of ground truth (noiseless) evaluations.
"""
return torch.atleast_1d(self.test_problem.evaluate_true(params=params))
return torch.atleast_1d(self.test_function.evaluate_true(params=params))

def evaluate_oracle(self, parameters: Mapping[str, TParamValue]) -> npt.NDArray:
"""
Expand Down
2 changes: 1 addition & 1 deletion ax/benchmark/problems/hpo/torchvision.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ def get_pytorch_cnn_torchvision_benchmark_problem(
objective_name="accuracy",
)
runner = BenchmarkRunner(
test_problem=PyTorchCNNTorchvisionBenchmarkTestFunction(name=name),
test_function=PyTorchCNNTorchvisionBenchmarkTestFunction(name=name),
outcome_names=outcome_names,
)
return BenchmarkProblem(
Expand Down
2 changes: 1 addition & 1 deletion ax/benchmark/problems/synthetic/hss/jenatton.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ def get_jenatton_benchmark_problem(
search_space=search_space,
optimization_config=optimization_config,
runner=BenchmarkRunner(
test_problem=Jenatton(), outcome_names=[name], noise_std=noise_std
test_function=Jenatton(), outcome_names=[name], noise_std=noise_std
),
num_trials=num_trials,
observe_noise_stds=observe_noise_sd,
Expand Down
14 changes: 9 additions & 5 deletions ax/benchmark/tests/problems/test_mixed_integer_problems.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,10 @@ def test_problems(self) -> None:
problem = constructor()
self.assertEqual(f"Discrete {name}", problem.name)
runner = problem.runner
test_problem = assert_is_instance(runner.test_problem, BoTorchTestFunction)
botorch_problem = test_problem.botorch_problem
test_function = assert_is_instance(
runner.test_function, BoTorchTestFunction
)
botorch_problem = test_function.botorch_problem
self.assertIsInstance(botorch_problem, problem_cls)
self.assertEqual(len(problem.search_space.parameters), dim)
self.assertEqual(
Expand Down Expand Up @@ -97,17 +99,19 @@ def test_problems(self) -> None:

for problem, params, expected_arg in cases:
runner = problem.runner
test_problem = assert_is_instance(runner.test_problem, BoTorchTestFunction)
test_function = assert_is_instance(
runner.test_function, BoTorchTestFunction
)
trial = Trial(experiment=MagicMock())
# pyre-fixme: Incompatible parameter type [6]: In call
# `Arm.__init__`, for argument `parameters`, expected `Dict[str,
# Union[None, bool, float, int, str]]` but got `dict[str, float]`.
arm = Arm(parameters=params, name="--")
trial.add_arm(arm)
with patch.object(
test_problem.botorch_problem,
test_function.botorch_problem,
attribute="evaluate_true",
wraps=test_problem.botorch_problem.evaluate_true,
wraps=test_function.botorch_problem.evaluate_true,
) as mock_call:
runner.run(trial)
actual = mock_call.call_args.kwargs["X"]
Expand Down
46 changes: 23 additions & 23 deletions ax/benchmark/tests/runners/test_botorch_test_problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,32 +137,32 @@ def test_synthetic_runner(self) -> None:
(get_soo_surrogate_test_function(lazy=False), noise_std, 1)
for noise_std in (0.0, 1.0, [0.0], [1.0])
]
for test_problem, noise_std, num_outcomes in (
for test_function, noise_std, num_outcomes in (
botorch_cases + param_based_cases + surrogate_cases
):
# Set up outcome names
if isinstance(test_problem, BoTorchTestFunction):
if isinstance(test_problem.botorch_problem, ConstrainedHartmann):
if isinstance(test_function, BoTorchTestFunction):
if isinstance(test_function.botorch_problem, ConstrainedHartmann):
outcome_names = ["objective_0", "constraint"]
else:
outcome_names = ["objective_0"]
elif isinstance(test_problem, DummyTestFunction):
elif isinstance(test_function, DummyTestFunction):
outcome_names = [f"objective_{i}" for i in range(num_outcomes)]
else: # SurrogateTestFunction
outcome_names = ["branin"]

# Set up runner
runner = BenchmarkRunner(
test_problem=test_problem,
test_function=test_function,
outcome_names=outcome_names,
noise_std=noise_std,
)

test_description = f"{test_problem=}, {noise_std=}"
test_description = f"{test_function=}, {noise_std=}"
with self.subTest(
f"Test basic construction, {test_problem=}, {noise_std=}"
f"Test basic construction, {test_function=}, {noise_std=}"
):
self.assertIs(runner.test_problem, test_problem)
self.assertIs(runner.test_function, test_function)
self.assertEqual(runner.outcome_names, outcome_names)
if isinstance(noise_std, list):
self.assertEqual(
Expand All @@ -177,17 +177,17 @@ def test_synthetic_runner(self) -> None:

# check equality
new_runner = replace(
runner, test_problem=BoTorchTestFunction(botorch_problem=Ackley())
runner, test_function=BoTorchTestFunction(botorch_problem=Ackley())
)
self.assertNotEqual(runner, new_runner)

self.assertEqual(runner, runner)
if isinstance(test_problem, BoTorchTestFunction):
if isinstance(test_function, BoTorchTestFunction):
self.assertEqual(
test_problem.botorch_problem.bounds.dtype, torch.double
test_function.botorch_problem.bounds.dtype, torch.double
)

is_botorch = isinstance(test_problem, BoTorchTestFunction)
is_botorch = isinstance(test_function, BoTorchTestFunction)
with self.subTest(f"test `get_Y_true()`, {test_description}"):
dim = 6 if is_botorch else 9
X = torch.rand(1, dim, dtype=torch.double)
Expand All @@ -202,11 +202,11 @@ def test_synthetic_runner(self) -> None:

with (
nullcontext()
if not isinstance(test_problem, SurrogateTestFunction)
if not isinstance(test_function, SurrogateTestFunction)
else patch.object(
# pyre-fixme: BenchmarkTestFunction` has no attribute
# `_surrogate`.
runner.test_problem._surrogate,
runner.test_function._surrogate,
"predict",
return_value=({"branin": [4.2]}, None),
)
Expand All @@ -215,19 +215,19 @@ def test_synthetic_runner(self) -> None:
oracle = runner.evaluate_oracle(parameters=params)

if (
isinstance(test_problem, BoTorchTestFunction)
and test_problem.modified_bounds is not None
isinstance(test_function, BoTorchTestFunction)
and test_function.modified_bounds is not None
):
X_tf = normalize(
X,
torch.tensor(
test_problem.modified_bounds, dtype=torch.double
test_function.modified_bounds, dtype=torch.double
).T,
)
else:
X_tf = X
if isinstance(test_problem, BoTorchTestFunction):
botorch_problem = test_problem.botorch_problem
if isinstance(test_function, BoTorchTestFunction):
botorch_problem = test_function.botorch_problem
obj = botorch_problem.evaluate_true(X_tf)
if isinstance(botorch_problem, ConstrainedHartmann):
expected_Y = torch.cat(
Expand All @@ -239,7 +239,7 @@ def test_synthetic_runner(self) -> None:
)
else:
expected_Y = obj
elif isinstance(test_problem, SurrogateTestFunction):
elif isinstance(test_function, SurrogateTestFunction):
expected_Y = torch.tensor([4.2], dtype=torch.double)
else:
expected_Y = torch.full(
Expand All @@ -259,11 +259,11 @@ def test_synthetic_runner(self) -> None:

with (
nullcontext()
if not isinstance(test_problem, SurrogateTestFunction)
if not isinstance(test_function, SurrogateTestFunction)
else patch.object(
# pyre-fixme: BenchmarkTestFunction` has no attribute
# `_surrogate`.
runner.test_problem._surrogate,
runner.test_function._surrogate,
"predict",
return_value=({"branin": [4.2]}, None),
)
Expand Down Expand Up @@ -300,7 +300,7 @@ def test_synthetic_runner(self) -> None:
def test_botorch_test_problem_runner_heterogeneous_noise(self) -> None:
for noise_std in [[0.1, 0.05], {"objective": 0.1, "constraint": 0.05}]:
runner = BenchmarkRunner(
test_problem=BoTorchTestFunction(
test_function=BoTorchTestFunction(
botorch_problem=ConstrainedHartmann(dim=6)
),
noise_std=noise_std,
Expand Down
2 changes: 1 addition & 1 deletion ax/benchmark/tests/test_benchmark_problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def test_inference_value_not_implemented(self) -> None:
]
optimization_config = OptimizationConfig(objective=objectives[0])
runner = BenchmarkRunner(
test_problem=BoTorchTestFunction(botorch_problem=Branin()),
test_function=BoTorchTestFunction(botorch_problem=Branin()),
outcome_names=["foo"],
)
with self.assertRaisesRegex(NotImplementedError, "Only `n_best_points=1`"):
Expand Down
4 changes: 2 additions & 2 deletions ax/utils/testing/benchmark_stubs.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ def get_soo_surrogate_test_function(lazy: bool = True) -> SurrogateTestFunction:
def get_soo_surrogate() -> BenchmarkProblem:
experiment = get_branin_experiment(with_completed_trial=True)
test_function = get_soo_surrogate_test_function()
runner = BenchmarkRunner(test_problem=test_function, outcome_names=["branin"])
runner = BenchmarkRunner(test_function=test_function, outcome_names=["branin"])

observe_noise_sd = True
objective = Objective(
Expand Down Expand Up @@ -140,7 +140,7 @@ def get_moo_surrogate() -> BenchmarkProblem:
outcome_names=outcome_names,
get_surrogate_and_datasets=lambda: (surrogate, []),
)
runner = BenchmarkRunner(test_problem=test_function, outcome_names=outcome_names)
runner = BenchmarkRunner(test_function=test_function, outcome_names=outcome_names)
observe_noise_sd = True
optimization_config = MultiObjectiveOptimizationConfig(
objective=MultiObjective(
Expand Down

0 comments on commit d5510c6

Please sign in to comment.