Skip to content

Commit

Permalink
Convert directory fbcode/ax to use the Ruff Formatter
Browse files Browse the repository at this point in the history
Summary:
Converts the directory specified to use the Ruff formatter in pyfmt

ruff_dog

If this diff causes merge conflicts when rebasing, please run
`hg status -n -0 --change . -I '**/*.{py,pyi}' | xargs -0 arc pyfmt`
on your diff, and amend any changes before rebasing onto latest.
That should help reduce or eliminate any merge conflicts.

allow-large-files
bypass-github-export-checks

Reviewed By: amyreese

Differential Revision: D64265331

fbshipit-source-id: 710ebc894795eae3c73f836c378153da3d528cd8
  • Loading branch information
Thomas Polasek authored and facebook-github-bot committed Oct 16, 2024
1 parent e36bbc4 commit dacedfc
Show file tree
Hide file tree
Showing 74 changed files with 109 additions and 177 deletions.
1 change: 0 additions & 1 deletion ax/analysis/old/helpers/cross_validation_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,6 @@ def cv_results_to_df(
records = []

for i in range(len(arm_names)):

records.append(
{
"arm_name": arm_names[i],
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,6 @@ def test_get_min_max_with_errors(self) -> None:
self.assertAlmostEqual(max_, expected_max, delta=1e-4)

def test_obs_vs_pred_dropdown_plot(self) -> None:

cross_validation_plot = CrossValidationPlot(
experiment=self.exp, model=self.model
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,6 @@ def test_error_scatter_trace_branin(self) -> None:
self.assertEqual(result_analysis, result_plot)

def test_obs_vs_pred_dropdown_plot_branin(self) -> None:

label_dict = {"branin": "BrAnIn"}

cross_validation_plot = CrossValidationPlot(
Expand Down
1 change: 0 additions & 1 deletion ax/analysis/old/tests/test_analysis_report.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@


class TestCrossValidationPlot(TestCase):

class TestAnalysis(BaseAnalysis):
def get_df(self) -> pd.DataFrame:
return pd.DataFrame()
Expand Down
1 change: 0 additions & 1 deletion ax/analysis/plotly/parallel_coordinates.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,6 @@ def _prepare_data(experiment: Experiment, metric: str) -> pd.DataFrame:


def _prepare_plot(df: pd.DataFrame, metric_name: str) -> go.Figure:

# ParCoords requires that the dimensions are specified on continuous scales, so
# ChoiceParameters and FixedParameters must be preprocessed to allow for
# appropriate plotting.
Expand Down
9 changes: 2 additions & 7 deletions ax/analysis/plotly/tests/test_insample_effects.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@


class TestInsampleEffectsPlot(TestCase):

def setUp(self) -> None:
super().setUp()
self.generation_strategy = GenerationStrategy(
Expand Down Expand Up @@ -93,9 +92,7 @@ def test_compute_uses_gs_model_if_possible(self) -> None:
)
).set_status_quo_with_weight(
status_quo=experiment.status_quo, weight=1.0
).mark_completed(
unsafe=True
)
).mark_completed(unsafe=True)
experiment.fetch_data()
generation_strategy.gen_with_multiple_nodes(experiment=experiment, n=10)
# Ensure the current model is Botorch
Expand Down Expand Up @@ -323,9 +320,7 @@ def test_compute_requires_data_for_the_metric_on_the_trial_with_a_model(
)
).set_status_quo_with_weight(
status_quo=experiment.status_quo, weight=1.0
).mark_completed(
unsafe=True
)
).mark_completed(unsafe=True)
experiment.fetch_data()
# AND GIVEN the experiment has a trial with no data
empty_trial = experiment.new_batch_trial(
Expand Down
12 changes: 3 additions & 9 deletions ax/analysis/plotly/tests/test_predicted_effects.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,9 +131,7 @@ def test_compute(self) -> None:
)
).set_status_quo_with_weight(
status_quo=experiment.status_quo, weight=1.0
).mark_completed(
unsafe=True
)
).mark_completed(unsafe=True)
experiment.fetch_data()
experiment.new_batch_trial(
generator_runs=generation_strategy.gen_with_multiple_nodes(
Expand Down Expand Up @@ -200,17 +198,13 @@ def test_compute_multitask(self) -> None:
generator_run=generation_strategy.gen(experiment=experiment, n=10)
).set_status_quo_with_weight(
status_quo=experiment.status_quo, weight=1
).mark_completed(
unsafe=True
)
).mark_completed(unsafe=True)
experiment.fetch_data()
experiment.new_batch_trial(
generator_run=generation_strategy.gen(experiment=experiment, n=10)
).set_status_quo_with_weight(
status_quo=experiment.status_quo, weight=1
).mark_completed(
unsafe=True
)
).mark_completed(unsafe=True)
experiment.fetch_data()
# leave as a candidate
experiment.new_batch_trial(
Expand Down
2 changes: 1 addition & 1 deletion ax/analysis/plotly/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ def get_constraint_violated_probabilities(


def format_constraint_violated_probabilities(
constraints_violated: dict[str, float]
constraints_violated: dict[str, float],
) -> str:
"""Format the constraints violated for the tooltip."""
max_metric_length = 70
Expand Down
1 change: 0 additions & 1 deletion ax/benchmark/benchmark_problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,6 @@ def get_soo_config_and_outcome_names(
observe_noise_sd: bool,
objective_name: str,
) -> tuple[OptimizationConfig, list[str]]:

objective = Objective(
metric=BenchmarkMetric(
name=objective_name,
Expand Down
2 changes: 1 addition & 1 deletion ax/benchmark/problems/registry.py
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,7 @@ class BenchmarkProblemRegistryEntry:
def get_problem(
problem_key: str,
registry: Mapping[str, BenchmarkProblemRegistryEntry] | None = None,
**additional_kwargs: Any
**additional_kwargs: Any,
) -> BenchmarkProblem:
"""
Generate a benchmark problem from a key, registry, and additional arguments.
Expand Down
2 changes: 1 addition & 1 deletion ax/benchmark/runners/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def __init__(
self,
*,
outcome_names: list[str],
search_space_digest: SearchSpaceDigest | None = None
search_space_digest: SearchSpaceDigest | None = None,
) -> None:
"""
Args:
Expand Down
5 changes: 2 additions & 3 deletions ax/benchmark/runners/surrogate.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,9 +32,8 @@ def __init__(
surrogate: TorchModelBridge | None = None,
datasets: list[SupervisedDataset] | None = None,
noise_stds: float | dict[str, float] = 0.0,
get_surrogate_and_datasets: None | (
Callable[[], tuple[TorchModelBridge, list[SupervisedDataset]]]
) = None,
get_surrogate_and_datasets: None
| (Callable[[], tuple[TorchModelBridge, list[SupervisedDataset]]]) = None,
search_space_digest: SearchSpaceDigest | None = None,
) -> None:
"""Runner for surrogate benchmark problems.
Expand Down
1 change: 0 additions & 1 deletion ax/benchmark/tests/problems/hpo/test_torchvision.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ def test_problem_properties(self) -> None:
"ax.benchmark.problems.hpo.torchvision._REGISTRY",
{"MNIST": TestDataset, "FashionMNIST": TestDataset},
):

self.assertEqual(
get_problem(problem_key="hpo_pytorch_cnn_MNIST").name,
"HPO_PyTorchCNN_Torchvision::MNIST",
Expand Down
1 change: 0 additions & 1 deletion ax/benchmark/tests/problems/synthetic/hss/test_jenatton.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@


class JenattonTest(TestCase):

def test_jenatton_test_function(self) -> None:
benchmark_problem = get_jenatton_benchmark_problem()

Expand Down
1 change: 0 additions & 1 deletion ax/benchmark/tests/problems/test_mixed_integer_problems.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,6 @@ def test_problems(self) -> None:
]

for problem, params, expected_arg in cases:

runner = checked_cast(BotorchTestProblemRunner, problem.runner)
trial = Trial(experiment=MagicMock())
# pyre-fixme: Incompatible parameter type [6]: In call
Expand Down
1 change: 0 additions & 1 deletion ax/benchmark/tests/problems/test_problems.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@

class TestProblems(TestCase):
def test_load_problems(self) -> None:

# Make sure problem construction succeeds
for name in BENCHMARK_PROBLEM_REGISTRY.keys():
if "MNIST" in name:
Expand Down
4 changes: 1 addition & 3 deletions ax/benchmark/tests/runners/test_botorch_test_problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,9 +61,7 @@ def test_synthetic_runner(self) -> None:
test_problem_kwargs,
modified_bounds,
noise_std,
) in (
botorch_cases + param_based_cases
):
) in botorch_cases + param_based_cases:
if noise_std is not None:
# pyre-fixme[6]: Incompatible parameter type: Expected int, got float
test_problem_kwargs["noise_std"] = noise_std
Expand Down
1 change: 0 additions & 1 deletion ax/benchmark/tests/runners/test_surrogate_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,6 @@ def test_instantiation_raises_with_missing_args(self) -> None:
)

def test_equality(self) -> None:

def _construct_runner(name: str) -> SurrogateRunner:
return SurrogateRunner(
name=name,
Expand Down
1 change: 0 additions & 1 deletion ax/benchmark/tests/test_benchmark_metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,6 @@ def get_test_batch_trial() -> BatchTrial:


class BenchmarkMetricTest(TestCase):

def test_fetch_trial_data(self) -> None:
metric1 = BenchmarkMetric(name="test_metric1", lower_is_better=True)
metric2 = BenchmarkMetric(name="test_metric2", lower_is_better=True)
Expand Down
23 changes: 10 additions & 13 deletions ax/core/experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,9 +86,8 @@ def __init__(
experiment_type: str | None = None,
properties: dict[str, Any] | None = None,
default_data_type: DataType | None = None,
auxiliary_experiments_by_purpose: None | (
dict[AuxiliaryExperimentPurpose, list[AuxiliaryExperiment]]
) = None,
auxiliary_experiments_by_purpose: None
| (dict[AuxiliaryExperimentPurpose, list[AuxiliaryExperiment]]) = None,
) -> None:
"""Inits Experiment.
Expand Down Expand Up @@ -141,7 +140,7 @@ def __init__(

self.auxiliary_experiments_by_purpose: dict[
AuxiliaryExperimentPurpose, list[AuxiliaryExperiment]
] = (auxiliary_experiments_by_purpose or {})
] = auxiliary_experiments_by_purpose or {}

self.add_tracking_metrics(tracking_metrics or [])

Expand Down Expand Up @@ -852,9 +851,7 @@ def _get_last_data_without_similar_rows(
last_data_type = type(last_data)
merge_keys = ["trial_index", "metric_name", "arm_name"] + (
# pyre-ignore[16]
last_data.map_keys
if issubclass(last_data_type, MapData)
else []
last_data.map_keys if issubclass(last_data_type, MapData) else []
)
# this merge is like a SQL left join on merge keys
# it will return a dataframe with the columns in merge_keys
Expand Down Expand Up @@ -1742,14 +1739,14 @@ def metric_config_summary_df(self) -> pd.DataFrame:

for constraint in opt_config.all_constraints:
if not isinstance(constraint, ObjectiveThreshold):
records[constraint.metric.name][
METRIC_DF_COLNAMES["goal"]
] = "constrain"
records[constraint.metric.name][METRIC_DF_COLNAMES["goal"]] = (
"constrain"
)
op = ">= " if constraint.op == ComparisonOp.GEQ else "<= "
relative = "%" if constraint.relative else ""
records[constraint.metric.name][
METRIC_DF_COLNAMES["bound"]
] = f"{op}{constraint.bound}{relative}"
records[constraint.metric.name][METRIC_DF_COLNAMES["bound"]] = (
f"{op}{constraint.bound}{relative}"
)

for metric in self.tracking_metrics or []:
records[metric.name][METRIC_DF_COLNAMES["goal"]] = "track"
Expand Down
5 changes: 2 additions & 3 deletions ax/core/generator_run.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,9 +103,8 @@ def __init__(
gen_metadata: TGenMetadata | None = None,
model_state_after_gen: dict[str, Any] | None = None,
generation_step_index: int | None = None,
candidate_metadata_by_arm_signature: None | (
dict[str, TCandidateMetadata]
) = None,
candidate_metadata_by_arm_signature: None
| (dict[str, TCandidateMetadata]) = None,
generation_node_name: str | None = None,
) -> None:
"""
Expand Down
1 change: 0 additions & 1 deletion ax/core/map_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,6 @@ def filter(
trial_indices: Iterable[int] | None = None,
metric_names: Iterable[str] | None = None,
) -> MapData:

return MapData(
df=self._filter_df(
df=self.map_df, trial_indices=trial_indices, metric_names=metric_names
Expand Down
5 changes: 2 additions & 3 deletions ax/core/optimization_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -298,9 +298,8 @@ def clone_with_args(
self,
objective: MultiObjective | ScalarizedObjective | None = None,
outcome_constraints: None | (list[OutcomeConstraint]) = _NO_OUTCOME_CONSTRAINTS,
objective_thresholds: None | (
list[ObjectiveThreshold]
) = _NO_OBJECTIVE_THRESHOLDS,
objective_thresholds: None
| (list[ObjectiveThreshold]) = _NO_OBJECTIVE_THRESHOLDS,
risk_measure: RiskMeasure | None = _NO_RISK_MEASURE,
) -> "MultiObjectiveOptimizationConfig":
"""Make a copy of this optimization config."""
Expand Down
1 change: 0 additions & 1 deletion ax/core/parameter.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,6 @@ def available_flags(self) -> list[str]:
def summary_dict(
self,
) -> dict[str, TParamValueList | TParamValue | str | list[str]]:

# Assemble dict.
summary_dict = {
"name": self.name,
Expand Down
1 change: 0 additions & 1 deletion ax/core/tests/test_multi_type_experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,6 @@ def setUp(self) -> None:
self.experiment.new_batch_trial(trial_type="type2")

def test_filter_trials_by_type(self) -> None:

trials = self.experiment.trials.values()
self.assertEqual(len(trials), 2)
filtered = filter_trials_by_type(trials, trial_type="type1")
Expand Down
2 changes: 1 addition & 1 deletion ax/core/tests/test_objective.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def setUp(self) -> None:

def test_Init(self) -> None:
with self.assertRaisesRegex(UserInputError, "does not specify"):
Objective(metric=self.metrics["m1"]),
(Objective(metric=self.metrics["m1"]),)
with self.assertRaisesRegex(
UserInputError, "doesn't match the specified optimization direction"
):
Expand Down
1 change: 0 additions & 1 deletion ax/core/tests/test_outcome_constraint.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,6 @@ def test_Sortable(self) -> None:
self.assertTrue(constraint1 < constraint2)

def test_validate_constraint(self) -> None:

metric = Metric(name="metric0", lower_is_better=False)
oc = OutcomeConstraint(metric, bound=-3, relative=True, op=ComparisonOp.GEQ)
self.assertTrue(oc._validate_constraint()[0])
Expand Down
1 change: 0 additions & 1 deletion ax/early_stopping/strategies/logical.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ def should_stop_trials_early(
experiment: Experiment,
**kwargs: dict[str, Any],
) -> dict[int, str | None]:

left = self.left.should_stop_trials_early(
trial_indices=trial_indices, experiment=experiment, **kwargs
)
Expand Down
1 change: 0 additions & 1 deletion ax/metrics/branin_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,6 @@ def f(self, x: np.ndarray, timestamp: int) -> Mapping[str, Any]:


class BraninFidelityMapMetric(NoisyFunctionMapMetric):

map_key_info: MapKeyInfo[float] = MapKeyInfo(key="fidelity", default_value=0.0)

def __init__(
Expand Down
22 changes: 10 additions & 12 deletions ax/modelbridge/dispatch_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,9 +73,8 @@ def _make_botorch_step(
max_parallelism: int | None = None,
model: ModelRegistryBase = Models.BOTORCH_MODULAR,
model_kwargs: dict[str, Any] | None = None,
winsorization_config: None | (
WinsorizationConfig | dict[str, WinsorizationConfig]
) = None,
winsorization_config: None
| (WinsorizationConfig | dict[str, WinsorizationConfig]) = None,
no_winsorization: bool = False,
should_deduplicate: bool = False,
verbose: bool | None = None,
Expand All @@ -97,9 +96,9 @@ def _make_botorch_step(
"use_raw_status_quo": derelativize_with_raw_status_quo
}
model_kwargs["transform_configs"] = model_kwargs.get("transform_configs", {})
model_kwargs["transform_configs"][
"Derelativize"
] = derelativization_transform_config
model_kwargs["transform_configs"]["Derelativize"] = (
derelativization_transform_config
)
model_kwargs["fit_out_of_design"] = fit_out_of_design

if not no_winsorization:
Expand All @@ -108,9 +107,9 @@ def _make_botorch_step(
transforms = model_kwargs.get("transforms", default_transforms)
model_kwargs["transforms"] = [cast(type[Transform], Winsorize)] + transforms
if winsorization_transform_config is not None:
model_kwargs["transform_configs"][
"Winsorize"
] = winsorization_transform_config
model_kwargs["transform_configs"]["Winsorize"] = (
winsorization_transform_config
)

if MODEL_KEY_TO_MODEL_SETUP[model.value].model_class != ModularBoTorchModel:
if verbose is not None:
Expand Down Expand Up @@ -292,9 +291,8 @@ def choose_generation_strategy(
random_seed: int | None = None,
torch_device: torch.device | None = None,
no_winsorization: bool = False,
winsorization_config: None | (
WinsorizationConfig | dict[str, WinsorizationConfig]
) = None,
winsorization_config: None
| (WinsorizationConfig | dict[str, WinsorizationConfig]) = None,
derelativize_with_raw_status_quo: bool = False,
no_bayesian_optimization: bool | None = None,
force_random_search: bool = False,
Expand Down
Loading

0 comments on commit dacedfc

Please sign in to comment.