Skip to content

Commit

Permalink
Update some test helpers in core_stubs (#2319)
Browse files Browse the repository at this point in the history
Summary:
Pull Request resolved: #2319

I tried using `get_experiment_with_custom_runner_and_metric` and ran into some issues with model fitting due to its metrics and data not being consistent. This diff adds a couple options to the helper to make it more flexible for future use, and makes sure that the attached data corresponds to the metrics on the experiment.

Reviewed By: mgarrard

Differential Revision: D55727190

fbshipit-source-id: 5a96230db59842ccc9efe96b5bd169c28282f151
  • Loading branch information
saitcakmak authored and facebook-github-bot committed Apr 4, 2024
1 parent 1b29a78 commit c7601b4
Show file tree
Hide file tree
Showing 3 changed files with 56 additions and 26 deletions.
1 change: 1 addition & 0 deletions ax/storage/sqa_store/tests/test_sqa_store.py
Original file line number Diff line number Diff line change
Expand Up @@ -253,6 +253,7 @@ def test_LoadExperimentSkipMetricsAndRunners(self) -> None:
constrain_search_space=False,
immutable=immutable,
multi_objective=multi_objective,
num_trials=1,
)
if multi_objective:
custom_metric_names.extend(["m1", "m3"])
Expand Down
9 changes: 6 additions & 3 deletions ax/telemetry/tests/test_experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,9 @@

class TestExperiment(TestCase):
def test_experiment_created_record_from_experiment(self) -> None:
experiment = get_experiment_with_custom_runner_and_metric()
experiment = get_experiment_with_custom_runner_and_metric(
has_outcome_constraint=True
)

record = ExperimentCreatedRecord.from_experiment(experiment=experiment)
expected = ExperimentCreatedRecord(
Expand Down Expand Up @@ -42,8 +44,9 @@ def test_experiment_created_record_from_experiment(self) -> None:
self.assertEqual(record, expected)

def test_experiment_completed_record_from_experiment(self) -> None:
experiment = get_experiment_with_custom_runner_and_metric()

experiment = get_experiment_with_custom_runner_and_metric(
has_outcome_constraint=True, num_trials=1
)
record = ExperimentCompletedRecord.from_experiment(experiment=experiment)

# Calculate these here, may change from run to run
Expand Down
72 changes: 49 additions & 23 deletions ax/utils/testing/core_stubs.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@

from collections import OrderedDict
from datetime import datetime, timedelta

from logging import Logger
from pathlib import Path
from typing import (
Expand Down Expand Up @@ -158,18 +157,23 @@ def get_experiment_with_custom_runner_and_metric(
constrain_search_space: bool = True,
immutable: bool = False,
multi_objective: bool = False,
num_trials: int = 3,
has_outcome_constraint: bool = False,
) -> Experiment:

# Create experiment with custom runner and metric
experiment = Experiment(
name="test",
# Omit constraints to prevent Sobol rejection sampling below,
# which floods logs with "Unable to round" warnings.
search_space=get_search_space(constrain_search_space=constrain_search_space),
optimization_config=(
get_multi_objective_optimization_config(custom_metric=True)
get_multi_objective_optimization_config(
custom_metric=True,
outcome_constraint=has_outcome_constraint,
relative=False,
)
if multi_objective
else get_optimization_config()
else get_optimization_config(
outcome_constraint=has_outcome_constraint, relative=False
)
),
description="test description",
tracking_metrics=[
Expand All @@ -180,18 +184,30 @@ def get_experiment_with_custom_runner_and_metric(
)

# Create a trial, set its runner and complete it.
sobol_generator = get_sobol(search_space=experiment.search_space)
sobol_run = sobol_generator.gen(
n=1,
optimization_config=experiment.optimization_config if not immutable else None,
)
trial = experiment.new_trial(generator_run=sobol_run)
trial.runner = experiment.runner
trial.mark_running()
experiment.attach_data(get_data(metric_name="custom_test_metric"))
experiment.attach_data(get_data(metric_name="m1"))
experiment.attach_data(get_data(metric_name="m3"))
trial.mark_completed()
for _ in range(num_trials):
sobol_generator = get_sobol(
search_space=experiment.search_space,
)
sobol_run = sobol_generator.gen(
n=1,
optimization_config=(
experiment.optimization_config if not immutable else None
),
)
trial = experiment.new_trial(generator_run=sobol_run)
trial.runner = experiment.runner
trial.mark_running()
data = Data.from_multiple_data(
get_data(
metric_name=metric_name,
trial_index=trial.index,
num_non_sq_arms=len(trial.arms),
include_sq=False,
)
for metric_name in experiment.metrics
)
experiment.attach_data(data)
trial.mark_completed()

if immutable:
experiment._properties = {Keys.IMMUTABLE_SEARCH_SPACE_AND_OPT_CONF: True}
Expand Down Expand Up @@ -1494,8 +1510,10 @@ def get_objective_threshold(
)


def get_outcome_constraint() -> OutcomeConstraint:
return OutcomeConstraint(metric=Metric(name="m2"), op=ComparisonOp.GEQ, bound=-0.25)
def get_outcome_constraint(relative: bool = True) -> OutcomeConstraint:
return OutcomeConstraint(
metric=Metric(name="m2"), op=ComparisonOp.GEQ, bound=-0.25, relative=relative
)


def get_scalarized_outcome_constraint() -> ScalarizedOutcomeConstraint:
Expand Down Expand Up @@ -1599,9 +1617,13 @@ def get_augmented_hartmann_objective() -> Objective:
##############################


def get_optimization_config() -> OptimizationConfig:
def get_optimization_config(
outcome_constraint: bool = True, relative: bool = True
) -> OptimizationConfig:
objective = get_objective()
outcome_constraints = [get_outcome_constraint()]
outcome_constraints = (
[get_outcome_constraint(relative=relative)] if outcome_constraint else []
)
return OptimizationConfig(
objective=objective, outcome_constraints=outcome_constraints
)
Expand All @@ -1614,10 +1636,14 @@ def get_map_optimization_config() -> OptimizationConfig:

def get_multi_objective_optimization_config(
custom_metric: bool = False,
relative: bool = True,
outcome_constraint: bool = True,
) -> MultiObjectiveOptimizationConfig:

objective = get_custom_multi_objective() if custom_metric else get_multi_objective()
outcome_constraints = [get_outcome_constraint()]
outcome_constraints = (
[get_outcome_constraint(relative=relative)] if outcome_constraint else []
)
objective_thresholds = [
get_objective_threshold(metric_name="m1"),
get_objective_threshold(metric_name="m3", comparison_op=ComparisonOp.LEQ),
Expand Down

0 comments on commit c7601b4

Please sign in to comment.