From cd971a081e267760ef4fcfa7fce0b53e2e6a0389 Mon Sep 17 00:00:00 2001 From: Jason Chow Date: Fri, 1 Nov 2024 16:05:21 -0700 Subject: [PATCH] Remove normalize_inputs and replace with parameter transform (#431) Summary: Pull Request resolved: https://github.com/facebookresearch/aepsych/pull/431 `normalize_inputs` (the one that min-max scales paraemters) is confusingly named (there's another `normalize_inputs` that concatenates data and ensures they're all the right types) and is a hard-coded transformation that is applied to all parameters. This means that there's no way to turn the behavior off selectively nor is it obvious that it is happening. This diff removes the normalize_inputs method and replaces it with an parameter transform that will also allow selective application of the transform via an index. Differential Revision: D65069497 --- aepsych/config.py | 45 +++++++----- aepsych/models/base.py | 9 +-- aepsych/models/monotonic_rejection_gp.py | 8 +-- aepsych/models/multitask_regression.py | 9 +-- aepsych/models/semi_p.py | 11 ++- aepsych/models/utils.py | 3 + aepsych/strategy.py | 4 +- aepsych/transforms/parameters.py | 56 +++++++++++---- docs/parameters.md | 29 +++++++- tests/generators/test_manual_generator.py | 4 +- tests/models/test_gp_classification.py | 20 ++++-- tests/models/test_gp_regression.py | 4 +- tests/models/test_pairwise_probit.py | 39 +++++++--- tests/test_config.py | 88 +++++++++++++++++++---- tests/test_strategy.py | 23 ++++-- tests/test_transforms.py | 49 +++++++++++-- 16 files changed, 304 insertions(+), 97 deletions(-) diff --git a/aepsych/config.py b/aepsych/config.py index c10302648..73bc339e6 100644 --- a/aepsych/config.py +++ b/aepsych/config.py @@ -8,6 +8,7 @@ import ast import configparser import json +import logging import re import warnings from types import ModuleType @@ -168,24 +169,36 @@ def update( # Warn if ub/lb is defined in common section if "ub" in self["common"] and "lb" in self["common"]: - warnings.warn( - "ub and lb have been defined in common section, ignoring parameter specific blocks, be very careful!" - ) - elif "parnames" in self["common"]: # it's possible to pass no parnames - par_names = self.getlist( - "common", "parnames", element_type=str, fallback=[] + logging.warning( + "ub and lb have been defined in common section, parameter-specific bounds take precendence over these." ) - lb = [None] * len(par_names) - ub = [None] * len(par_names) - for i, par_name in enumerate(par_names): - # Validate the parameter-specific block - self._check_param_settings(par_name) - lb[i] = self[par_name]["lower_bound"] - ub[i] = self[par_name]["upper_bound"] - - self["common"]["lb"] = f"[{', '.join(lb)}]" - self["common"]["ub"] = f"[{', '.join(ub)}]" + if "parnames" in self["common"]: # it's possible to pass no parnames + try: + par_names = self.getlist( + "common", "parnames", element_type=str, fallback=[] + ) + lb = [None] * len(par_names) + ub = [None] * len(par_names) + for i, par_name in enumerate(par_names): + # Validate the parameter-specific block + self._check_param_settings(par_name) + + lb[i] = self[par_name]["lower_bound"] + ub[i] = self[par_name]["upper_bound"] + + self["common"]["lb"] = f"[{', '.join(lb)}]" + self["common"]["ub"] = f"[{', '.join(ub)}]" + except ValueError: + # Check if ub/lb exists in common + if "ub" in self["common"] and "lb" in self["common"]: + logging.warning( + "Parameter-specific bounds are incomplete, falling back to ub/lb in [common]" + ) + else: + raise ValueError( + "Missing ub or lb in [common] with incomplete parameter-specific bounds, cannot fallback!" + ) # Deprecation warning for "experiment" section if "experiment" in self: diff --git a/aepsych/models/base.py b/aepsych/models/base.py index feef49456..d1b55f3e7 100644 --- a/aepsych/models/base.py +++ b/aepsych/models/base.py @@ -328,10 +328,6 @@ def set_train_data(self, inputs: Optional[torch.Tensor] = None, targets: Optiona if targets is not None: self.train_targets = targets - def normalize_inputs(self, x: torch.Tensor) -> torch.Tensor: - scale = self.ub - self.lb - return (x - self.lb) / scale - def forward(self, x: torch.Tensor) -> gpytorch.distributions.MultivariateNormal: """Evaluate GP @@ -342,9 +338,8 @@ def forward(self, x: torch.Tensor) -> gpytorch.distributions.MultivariateNormal: gpytorch.distributions.MultivariateNormal: Distribution object holding mean and covariance at x. """ - transformed_x = self.normalize_inputs(x) - mean_x = self.mean_module(transformed_x) - covar_x = self.covar_module(transformed_x) + mean_x = self.mean_module(x) + covar_x = self.covar_module(x) pred = gpytorch.distributions.MultivariateNormal(mean_x, covar_x) return pred diff --git a/aepsych/models/monotonic_rejection_gp.py b/aepsych/models/monotonic_rejection_gp.py index b35efcb05..de471522e 100644 --- a/aepsych/models/monotonic_rejection_gp.py +++ b/aepsych/models/monotonic_rejection_gp.py @@ -342,11 +342,7 @@ def forward(self, x: torch.Tensor) -> gpytorch.distributions.MultivariateNormal: gpytorch.distributions.MultivariateNormal: Distribution object holding mean and covariance at x. """ - - # final dim is deriv index, we only normalize the "real" dims - transformed_x = x.clone() - transformed_x[..., :-1] = self.normalize_inputs(transformed_x[..., :-1]) - mean_x = self.mean_module(transformed_x) - covar_x = self.covar_module(transformed_x) + mean_x = self.mean_module(x) + covar_x = self.covar_module(x) latent_pred = gpytorch.distributions.MultivariateNormal(mean_x, covar_x) return latent_pred diff --git a/aepsych/models/multitask_regression.py b/aepsych/models/multitask_regression.py index e1b683678..7fae06dc4 100644 --- a/aepsych/models/multitask_regression.py +++ b/aepsych/models/multitask_regression.py @@ -78,10 +78,11 @@ def __init__( self.covar_module, num_tasks=num_outputs, rank=rank ) - def forward(self, x: torch.Tensor) -> gpytorch.distributions.MultitaskMultivariateNormal: - transformed_x = self.normalize_inputs(x) - mean_x = self.mean_module(transformed_x) - covar_x = self.covar_module(transformed_x) + def forward( + self, x: torch.Tensor + ) -> gpytorch.distributions.MultitaskMultivariateNormal: + mean_x = self.mean_module(x) + covar_x = self.covar_module(x) return gpytorch.distributions.MultitaskMultivariateNormal(mean_x, covar_x) @classmethod diff --git a/aepsych/models/semi_p.py b/aepsych/models/semi_p.py index fd3821188..53c9e7e20 100644 --- a/aepsych/models/semi_p.py +++ b/aepsych/models/semi_p.py @@ -521,18 +521,17 @@ def forward(self, x: torch.Tensor) -> MultivariateNormal: Returns: MVN object evaluated at samples """ - transformed_x = self.normalize_inputs(x) # TODO: make slope prop to intensity width. - slope_mean = self.slope_mean_module(transformed_x) + slope_mean = self.slope_mean_module(x) # kc mvn - offset_mean = self.offset_mean_module(transformed_x) + offset_mean = self.offset_mean_module(x) - slope_cov = self.slope_covar_module(transformed_x) - offset_cov = self.offset_covar_module(transformed_x) + slope_cov = self.slope_covar_module(x) + offset_cov = self.offset_covar_module(x) mean_x, cov_x = _hadamard_mvn_approx( - x_intensity=transformed_x[..., self.stim_dim], + x_intensity=x[..., self.stim_dim], slope_mean=slope_mean, slope_cov=slope_cov, offset_mean=offset_mean, diff --git a/aepsych/models/utils.py b/aepsych/models/utils.py index 64998ce43..d55bc86d7 100644 --- a/aepsych/models/utils.py +++ b/aepsych/models/utils.py @@ -172,6 +172,9 @@ def get_extremum( timeout_sec=max_time, ) + if hasattr(model, "transforms"): + best_point = model.transforms.untransform(best_point) + # PosteriorMean flips the sign on minimize, we flip it back if extremum_type == "min": best_val = -best_val diff --git a/aepsych/strategy.py b/aepsych/strategy.py index 7c9bcab26..28ce61a2d 100644 --- a/aepsych/strategy.py +++ b/aepsych/strategy.py @@ -138,8 +138,8 @@ def __init__( self.transforms = transforms if self.transforms is not None: - self.lb = self.transforms.transform(self.lb) - self.ub = self.transforms.transform(self.ub) + self.lb = self.transforms.transform(self.lb.unsqueeze(0))[0] + self.ub = self.transforms.transform(self.ub.unsqueeze(0))[0] self.min_post_range = min_post_range if self.min_post_range is not None: diff --git a/aepsych/transforms/parameters.py b/aepsych/transforms/parameters.py index 4e9b86967..47a6dfc1f 100644 --- a/aepsych/transforms/parameters.py +++ b/aepsych/transforms/parameters.py @@ -6,6 +6,7 @@ # LICENSE file in the root directory of this source tree. import ast from abc import ABC, abstractmethod +from configparser import NoOptionError from copy import deepcopy from typing import Any, Callable, List, Optional, Tuple, Type @@ -16,7 +17,7 @@ from aepsych.generators.base import AEPsychGenerator from aepsych.models.base import AEPsychMixin, ModelProtocol from botorch.acquisition import AcquisitionFunction -from botorch.models.transforms.input import ChainedInputTransform, Log10 +from botorch.models.transforms.input import ChainedInputTransform, Log10, Normalize from botorch.models.transforms.utils import subset_transform from botorch.posteriors import Posterior from torch import Tensor @@ -107,23 +108,52 @@ def from_config(cls, config: Config) -> "ParameterTransforms": ParameterTransforms: A configured ParamaterTransform for the config. """ parnames: List[str] = config.getlist("common", "parnames", element_type=str) + + # Try to build a full array of bounds based on parameter-specific bounds + try: + _lower_bounds = torch.tensor( + [config.getfloat(par, "lower_bound") for par in parnames] + ) + _upper_bounds = torch.tensor( + [config.getfloat(par, "upper_bound") for par in parnames] + ) + + bounds = torch.stack((_lower_bounds, _upper_bounds)) + + except NoOptionError: # Look for general lb/ub array + _lb = config.gettensor("common", "lb") + _ub = config.gettensor("common", "ub") + bounds = torch.stack((_lb, _ub)) + transformDict = {} for i, par in enumerate(parnames): # This is the order that transforms are potentially applied, order matters # Log scale if config.getboolean(par, "log_scale", fallback=False): - lb = config.getfloat(par, "lower_bound") + lb = bounds[0, i].numpy() if lb < 0.0: - transformDict[f"{par}_Log10Plus"] = Log10Plus( - indices=[i], constant=np.abs(lb) + 1.0 - ) + xform = Log10Plus(indices=[i], constant=np.abs(lb) + 1.0) + transformDict[f"{par}_Log10Plus"] = xform + elif lb < 1.0: - transformDict[f"{par}_Log10Plus"] = Log10Plus( - indices=[i], constant=1.0 - ) + xform = Log10Plus(indices=[i], constant=1.0) + transformDict[f"{par}_Log10Plus"] = xform + else: - transformDict[f"{par}_Log10"] = Log10(indices=[i]) + xform = Log10(indices=[i]) + transformDict[f"{par}_Log10"] = xform + + # Transform bounds + bounds = xform.transform(bounds) + + # Normalize scale (defaults true) + if config.getboolean(par, "normalize_scale", fallback=True): + xform = Normalize(d=len(parnames), indices=[i], bounds=bounds) + transformDict[f"{par}_Normalize"] = xform + + # Transform bounds + bounds = xform.transform(bounds) return cls(**transformDict) @@ -192,9 +222,9 @@ def __init__( # Figure out what we need to do with generator if isinstance(generator, type): if "lb" in kwargs: - kwargs["lb"] = transforms.transform(kwargs["lb"].float()) + kwargs["lb"] = transforms.transform(kwargs["lb"].to(torch.float64)) if "ub" in kwargs: - kwargs["ub"] = transforms.transform(kwargs["ub"].float()) + kwargs["ub"] = transforms.transform(kwargs["ub"].to(torch.float64)) _base_obj = generator(**kwargs) else: _base_obj = generator @@ -326,9 +356,9 @@ def __init__( # Alternative instantiation method for analysis (and not live) if isinstance(model, type): if "lb" in kwargs: - kwargs["lb"] = transforms.transform(kwargs["lb"].float()) + kwargs["lb"] = transforms.transform(kwargs["lb"].to(torch.float64)) if "ub" in kwargs: - kwargs["ub"] = transforms.transform(kwargs["ub"].float()) + kwargs["ub"] = transforms.transform(kwargs["ub"].to(torch.float64)) _base_obj = model(**kwargs) else: _base_obj = model diff --git a/docs/parameters.md b/docs/parameters.md index 4aa0157d1..ef7dfaf14 100644 --- a/docs/parameters.md +++ b/docs/parameters.md @@ -15,7 +15,6 @@ what parameter types are used and whatever transformations are used. Currently, we only support continuous parameters. More parameter types soon to come!

Continuous

- ```ini [parameter] par_type = continuous @@ -58,3 +57,31 @@ For parameters with lower bounds that are positive but still less 1, we will alw a constant value of 1 (i.e., `Log10(x + 1)` and `10 ^ (x - 1)`). For parameters with lower bounds that are negative, we will use a constant value of the absolute value of the lower bound + 1 (i.e., `Log10(x + |lb| + 1)` and `10 ^ (x - |lb| - 1)`). + +

Normalize scale

+By default, all parameters will have their scale min-max normalized to the range of +[0, 1]. This prevents any particular parameter with a large scale to completely dominate +the other parameters. Very rarely, this behavior may not be desired and can be turned +off for specific parameters. + +```ini +[parameter] +par_type = continuous +lower_bound = 1 +upper_bound = 100 +normalize_scale = False # turn it on with any of true/yes/on, turn it off with any of false/no/off; case insensitive +``` + +By setting the `normalize_scale` option to False, this parameter will not be scaled +before being given to the model and therefore maintain its original magnitude. This is +very rarely necessary and should be used with caution. + +

Order of operations

+Parameter types and parameter-specific transforms are all handled by the +`ParameterTransform` API. Transforms built from config files will have a specific order +of operation, regardless of how the options were set in the config file. Each parameter +is transformed entirely separately. + +Currently, the order is as follows: +* Log scale +* Normalize scale \ No newline at end of file diff --git a/tests/generators/test_manual_generator.py b/tests/generators/test_manual_generator.py index dc895fd44..9126ff795 100644 --- a/tests/generators/test_manual_generator.py +++ b/tests/generators/test_manual_generator.py @@ -54,8 +54,8 @@ def test_manual_generator(self): config.update(config_str=config_str) # gen = ManualGenerator.from_config(config) gen = GeneratorWrapper.from_config("init_strat", config) - npt.assert_equal(gen.lb.numpy(), np.array([10, 10])) - npt.assert_equal(gen.ub.numpy(), np.array([11, 11])) + npt.assert_equal(gen.lb.numpy(), np.array([0, 0])) + npt.assert_equal(gen.ub.numpy(), np.array([1, 1])) self.assertFalse(gen.finished) p1 = list(gen.gen()[0]) diff --git a/tests/models/test_gp_classification.py b/tests/models/test_gp_classification.py index 90a022258..a59303147 100644 --- a/tests/models/test_gp_classification.py +++ b/tests/models/test_gp_classification.py @@ -23,6 +23,8 @@ from aepsych.generators import OptimizeAcqfGenerator, SobolGenerator from aepsych.models import GPClassificationModel from aepsych.strategy import SequentialStrategy, Strategy +from aepsych.transforms import ModelWrapper, ParameterTransforms +from aepsych.transforms.parameters import Normalize from botorch.acquisition import qUpperConfidenceBound from botorch.optim.fit import fit_gpytorch_mll_torch from botorch.optim.stopping import ExpMAStoppingCriterion @@ -208,11 +210,21 @@ def test_1d_classification_different_scales(self): X, y = torch.Tensor(X), torch.Tensor(y) X[:, 0] = X[:, 0] * 1000 X[:, 1] = X[:, 1] / 1000 - lb = [-3000, -0.003] - ub = [3000, 0.003] - - model = GPClassificationModel(lb=lb, ub=ub, inducing_size=20) + lb = torch.tensor([-3000, -0.003]) + ub = torch.tensor([3000, 0.003]) + transforms = ParameterTransforms( + normalize=Normalize( + 2, bounds=torch.stack((lb, ub)) + ) + ) + model = ModelWrapper( + model=GPClassificationModel, + lb=lb, + ub=ub, + inducing_size=20, + transforms=transforms, + ) model.fit(X[:50], y[:50]) # pspace diff --git a/tests/models/test_gp_regression.py b/tests/models/test_gp_regression.py index fa47df7b3..9b096df6a 100644 --- a/tests/models/test_gp_regression.py +++ b/tests/models/test_gp_regression.py @@ -89,8 +89,8 @@ def test_extremum(self): def test_from_config(self): model = self.server.strat.model - npt.assert_allclose(model.lb, [-1.0]) - npt.assert_allclose(model.ub, [3.0]) + npt.assert_allclose(model.transforms.untransform(model.lb), [-1.0]) + npt.assert_allclose(model.transforms.untransform(model.ub), [3.0]) self.assertEqual(model.dim, 1) self.assertIsInstance(model.likelihood, GaussianLikelihood) self.assertEqual(model.max_fit_time, 1) diff --git a/tests/models/test_pairwise_probit.py b/tests/models/test_pairwise_probit.py index 4b4f2eedf..9dfde3b79 100644 --- a/tests/models/test_pairwise_probit.py +++ b/tests/models/test_pairwise_probit.py @@ -22,6 +22,8 @@ from aepsych.server.message_handlers.handle_setup import configure from aepsych.server.message_handlers.handle_tell import tell from aepsych.strategy import SequentialStrategy, Strategy +from aepsych.transforms import GeneratorWrapper, ModelWrapper, ParameterTransforms +from aepsych.transforms.parameters import Normalize from botorch.acquisition import qUpperConfidenceBound from botorch.acquisition.active_learning import PairwiseMCPosteriorVariance from scipy.stats import bernoulli, norm, pearsonr @@ -192,30 +194,49 @@ def test_1d_pairwise_probit(self): np.random.seed(seed) n_init = 50 n_opt = 1 - lb = -4.0 - ub = 4.0 + lb = torch.tensor([-4.0]) + ub = torch.tensor([4.0]) extra_acqf_args = {"beta": 3.84} + transforms = ParameterTransforms( + normalize=Normalize(d=1, bounds=torch.stack([lb, ub])) + ) + sobol_gen = GeneratorWrapper( + generator=SobolGenerator, + lb=lb, + ub=ub, + seed=seed, + stimuli_per_trial=2, + transforms=transforms, + ) + acqf_gen = GeneratorWrapper( + generator=OptimizeAcqfGenerator, + acqf=qUpperConfidenceBound, + acqf_kwargs=extra_acqf_args, + stimuli_per_trial=2, + transforms=transforms, + ) + probit_model = ModelWrapper( + model=PairwiseProbitModel, lb=lb, ub=ub, transforms=transforms + ) model_list = [ Strategy( lb=lb, ub=ub, - generator=SobolGenerator(lb=lb, ub=ub, seed=seed, stimuli_per_trial=2), + generator=sobol_gen, min_asks=n_init, stimuli_per_trial=2, outcome_types=["binary"], + transforms=transforms, ), Strategy( lb=lb, ub=ub, - model=PairwiseProbitModel(lb=lb, ub=ub), - generator=OptimizeAcqfGenerator( - acqf=qUpperConfidenceBound, - acqf_kwargs=extra_acqf_args, - stimuli_per_trial=2, - ), + model=probit_model, + generator=acqf_gen, min_asks=n_opt, stimuli_per_trial=2, outcome_types=["binary"], + transforms=transforms, ), ] diff --git a/tests/test_config.py b/tests/test_config.py index 8a7df9a95..8be58dc38 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -32,7 +32,7 @@ from aepsych.server.message_handlers.handle_setup import configure from aepsych.strategy import SequentialStrategy, Strategy from aepsych.transforms import ParameterTransforms, transform_options -from aepsych.transforms.parameters import Log10 +from aepsych.transforms.parameters import Log10, Normalize from aepsych.version import __version__ from botorch.acquisition import qLogNoisyExpectedImprovement from botorch.acquisition.active_learning import PairwiseMCPosteriorVariance @@ -51,12 +51,12 @@ def test_single_probit_config(self): [par1] par_type = continuous - lower_bound = 0 - upper_bound = 1 + lower_bound = 1 + upper_bound = 10 [par2] par_type = continuous - lower_bound = 0 + lower_bound = -1 upper_bound = 1 [init_strat] @@ -124,9 +124,19 @@ def test_single_probit_config(self): self.assertTrue(strat.strat_list[0].outcome_types == ["binary"]) self.assertTrue(strat.strat_list[1].min_asks == 20) self.assertTrue(torch.all(strat.strat_list[0].lb == strat.strat_list[1].lb)) - self.assertTrue(torch.all(strat.strat_list[1].model.lb == torch.Tensor([0, 0]))) + self.assertTrue( + torch.all( + strat.transforms.untransform(strat.strat_list[1].model.lb) + == torch.Tensor([1, -1]) + ) + ) self.assertTrue(torch.all(strat.strat_list[0].ub == strat.strat_list[1].ub)) - self.assertTrue(torch.all(strat.strat_list[1].model.ub == torch.Tensor([1, 1]))) + self.assertTrue( + torch.all( + strat.transforms.untransform(strat.strat_list[1].model.ub) + == torch.Tensor([10, 1]) + ) + ) self.assertEqual(strat.strat_list[0].min_total_outcome_occurrences, 5) self.assertEqual(strat.strat_list[0].min_post_range, None) @@ -1041,6 +1051,7 @@ def test_derived_bounds(self): par_type = continuous lower_bound = -10 upper_bound = 10 + normalize_scale = False [init_strat] min_total_tells = 10 @@ -1064,7 +1075,7 @@ def test_derived_bounds(self): self.assertTrue(torch.all(model.lb == torch.Tensor([0, -10]))) self.assertTrue(torch.all(model.ub == torch.Tensor([1, 10]))) - def test_ignore_specific_bounds(self): + def test_ignore_common_bounds(self): config_str = """ [common] parnames = [par1, par2] @@ -1084,6 +1095,7 @@ def test_ignore_specific_bounds(self): par_type = continuous lower_bound = -5 upper_bound = 1 + normalize_scale = False [init_strat] min_total_tells = 10 @@ -1104,9 +1116,53 @@ def test_ignore_specific_bounds(self): opt_strat = strat.strat_list[1] model = opt_strat.model - self.assertTrue(torch.all(model.lb == torch.Tensor([0, 0]))) + self.assertTrue(torch.all(model.lb == torch.Tensor([0, -5]))) self.assertTrue(torch.all(model.ub == torch.Tensor([1, 1]))) + def test_common_fallback_bounds(self): + config_str = """ + [common] + parnames = [par1, par2] + lb = [0, 0] + ub = [1, 100] + stimuli_per_trial = 1 + outcome_types = [binary] + target = 0.75 + strategy_names = [init_strat, opt_strat] + + [par1] + par_type = continuous + lower_bound = 1 + upper_bound = 100 + + [par2] + par_type = continuous + # lower_bound = -5 + # upper_bound = 1 + normalize_scale = False + + [init_strat] + min_total_tells = 10 + generator = SobolGenerator + + [opt_strat] + min_total_tells = 20 + refit_every = 5 + generator = OptimizeAcqfGenerator + acqf = MCLevelSetEstimation + model = GPClassificationModel + """ + + config = Config() + config.update(config_str=config_str) + + strat = SequentialStrategy.from_config(config) + opt_strat = strat.strat_list[1] + model = opt_strat.model + + self.assertTrue(torch.all(model.lb == torch.Tensor([0, 0]))) + self.assertTrue(torch.all(model.ub == torch.Tensor([1, 100]))) + def test_parameter_setting_block_validation(self): config_str = """ [common] @@ -1216,8 +1272,8 @@ def test_clone_transform_options(self): self.assertFalse(torch.all(config_points == xformed_points)) self.assertFalse(torch.all(config_window == xformed_window)) - self.assertTrue(torch.allclose(xformed_lb, torch.tensor([0.0, 1.0]))) - self.assertTrue(torch.allclose(xformed_ub, torch.tensor([1.0, 2.0]))) + self.assertTrue(torch.allclose(xformed_lb, torch.tensor([0.0, 0.0]))) + self.assertTrue(torch.allclose(xformed_ub, torch.tensor([1.0, 1.0]))) transforms = ParameterTransforms.from_config(config) reversed_points = transforms.untransform(xformed_points) @@ -1248,11 +1304,15 @@ def test_build_transform(self): transforms = ParameterTransforms.from_config(config) - self.assertTrue(len(transforms.values()) == 1) + self.assertTrue(len(transforms.values()) == 3) - tf = list(transforms.items())[0] - self.assertTrue(tf[0] == "signal2_Log10") - self.assertTrue(isinstance(tf[1], Log10)) + expected_names = ["signal1_Normalize", "signal2_Log10", "signal2_Normalize"] + expected_transforms = [Normalize, Log10, Normalize] + for tf, name, transform in zip( + transforms.items(), expected_names, expected_transforms + ): + self.assertTrue(tf[0] == name) + self.assertTrue(isinstance(tf[1], transform)) if __name__ == "__main__": diff --git a/tests/test_strategy.py b/tests/test_strategy.py index 2bbb12adc..5bc6308da 100644 --- a/tests/test_strategy.py +++ b/tests/test_strategy.py @@ -15,6 +15,8 @@ from aepsych.models.gp_classification import GPClassificationModel from aepsych.models.monotonic_rejection_gp import MonotonicRejectionGP from aepsych.strategy import SequentialStrategy, Strategy +from aepsych.transforms import GeneratorWrapper, ModelWrapper, ParameterTransforms +from aepsych.transforms.parameters import Normalize class TestSequenceGenerators(unittest.TestCase): @@ -22,20 +24,29 @@ def setUp(self): seed = 1 torch.manual_seed(seed) np.random.seed(seed) - lb = [-1, -1] - ub = [1, 1] + lb = torch.tensor([-1, -1]) + ub = torch.tensor([1, 1]) extra_acqf_args = {"target": 0.75, "beta": 1.96} + transforms = ParameterTransforms( + normalize=Normalize(d=2, bounds=torch.stack([lb, ub])) + ) + self.strat = Strategy( - model=MonotonicRejectionGP( + model=ModelWrapper( + MonotonicRejectionGP, + transforms=transforms, + dim=2, lb=lb, ub=ub, - dim=2, monotonic_idxs=[1], ), - generator=MonotonicRejectionGenerator( - acqf=MonotonicMCLSE, acqf_kwargs=extra_acqf_args + generator=GeneratorWrapper( + MonotonicRejectionGenerator, + transforms=transforms, + acqf=MonotonicMCLSE, + acqf_kwargs=extra_acqf_args, ), min_asks=50, lb=lb, diff --git a/tests/test_transforms.py b/tests/test_transforms.py index 40caf71a2..ce778c088 100644 --- a/tests/test_transforms.py +++ b/tests/test_transforms.py @@ -13,7 +13,7 @@ from aepsych.models import GPClassificationModel from aepsych.strategy import SequentialStrategy from aepsych.transforms import GeneratorWrapper, ModelWrapper, ParameterTransforms -from aepsych.transforms.parameters import Log10 +from aepsych.transforms.parameters import Log10Plus, Normalize class TransformsConfigTest(unittest.TestCase): @@ -117,10 +117,15 @@ def test_transforms_in_strategy(self): class TransformsLog10Test(unittest.TestCase): - def test_transform_reshape(self): - x = torch.rand(4, 3, 2) + 1.0 - - transforms = ParameterTransforms(log10=Log10(indices=[0, 1, 2])) + def test_transform_reshape3D(self): + lb = torch.tensor([-1, 0, 10]) + ub = torch.tensor([-1e-6, 9, 99]) + x = SobolGenerator(lb=lb, ub=ub, stimuli_per_trial=2).gen(4) + + transforms = ParameterTransforms( + log10=Log10Plus(indices=[0, 1, 2], constant=2), + normalize=Normalize(d=3, bounds=torch.stack([lb, ub])), + ) transformed_x = transforms.transform(x) untransformed_x = transforms.untransform(transformed_x) @@ -139,12 +144,14 @@ def test_log_transform(self): lower_bound = -10 upper_bound = 10 log_scale = false + normalize_scale = no [signal2] par_type = continuous lower_bound = 1 upper_bound = 100 log_scale = true + normalize_scale = off """ config = Config() config.update(config_str=config_str) @@ -235,3 +242,35 @@ def test_log_model(self): est_max = x[np.argmin((zhat - target) ** 2)] diff = np.abs(est_max / 100 - target) self.assertTrue(diff < 0.15, f"Diff = {diff}") + + +class TransformsNormalize(unittest.TestCase): + def test_normalize_scale(self): + config_str = """ + [common] + parnames = [signal1, signal2] + stimuli_per_trial = 1 + outcome_types = [binary] + + [signal1] + par_type = continuous + lower_bound = -10 + upper_bound = 10 + normalize_scale = false + + [signal2] + par_type = continuous + lower_bound = 0 + upper_bound = 100 + """ + config = Config() + config.update(config_str=config_str) + + transforms = ParameterTransforms.from_config(config) + + values = torch.tensor([[-5.0, 20.0], [20.0, 1.0]]) + expected = torch.tensor([[-5.0, 0.2], [20.0, 0.01]]) + transformed = transforms.transform(values) + + self.assertTrue(torch.allclose(transformed, expected)) + self.assertTrue(torch.allclose(transforms.untransform(transformed), values))