From be441fae0e42a491b6c2657ccbc861696eec21ac Mon Sep 17 00:00:00 2001 From: I-Bouros Date: Mon, 15 Nov 2021 16:59:54 +0000 Subject: [PATCH 1/7] Binomial log prior class --- docs/source/log_priors.rst | 3 ++ pints/__init__.py | 1 + pints/_log_priors.py | 89 +++++++++++++++++++++++++++++++++- pints/tests/test_log_priors.py | 79 ++++++++++++++++++++++++++++++ 4 files changed, 171 insertions(+), 1 deletion(-) diff --git a/docs/source/log_priors.rst b/docs/source/log_priors.rst index 03a5291ce5..d06e67de74 100644 --- a/docs/source/log_priors.rst +++ b/docs/source/log_priors.rst @@ -15,6 +15,7 @@ Example:: Overview: - :class:`BetaLogPrior` +- :class:`BinomialLogPrior` - :class:`CauchyLogPrior` - :class:`ComposedLogPrior` - :class:`ExponentialLogPrior` @@ -32,6 +33,8 @@ Overview: .. autoclass:: BetaLogPrior +.. autoclass:: BinomialLogPrior + .. autoclass:: CauchyLogPrior .. autoclass:: ComposedLogPrior diff --git a/pints/__init__.py b/pints/__init__.py index df559710be..2a2cab203f 100644 --- a/pints/__init__.py +++ b/pints/__init__.py @@ -92,6 +92,7 @@ def version(formatted=False): # from ._log_priors import ( BetaLogPrior, + BinomialLogPrior, CauchyLogPrior, ComposedLogPrior, ExponentialLogPrior, diff --git a/pints/_log_priors.py b/pints/_log_priors.py index 99311fb7c9..5a8964d326 100644 --- a/pints/_log_priors.py +++ b/pints/_log_priors.py @@ -5,12 +5,14 @@ # released under the BSD 3-clause license. See accompanying LICENSE.md for # copyright notice and full license details. # -import pints + import numpy as np import scipy import scipy.special import scipy.stats +import pints + class BetaLogPrior(pints.LogPrior): r""" @@ -95,6 +97,91 @@ def sample(self, n=1): return np.random.beta(self._a, self._b, size=(n, 1)) +class BinomialLogPrior(pints.LogPrior): + r""" + Defines an binomial (log) prior with given number of trials parameter + ``trials`` and trial success probability parameter ``prob`` with + pdf + + .. math:: + f(x|\text{trials}, \text{prob}) = {\text{trials} \choose x}\; + \text{prob}^{x}\;(1-\text{prob})^(\text{trials}-x). + + A random variable :math:`X` distributed according to this pdf has + expectation + + .. math:: + \mathrm{E}(X)=\text{trials} \; \text{prob}. + + For example, to create a prior with ``trials=10`` and ``prob=0.5`` + use:: + + p = pints.BinomialLogPrior(10, 0.5) + + Extends :class:`LogPrior`. + """ + + def __init__(self, trials, prob): + # Parse input arguments + self._trials = float(trials) + self._prob = float(prob) + + # Validate inputs + if not self._trials.is_integer(): + raise TypeError('Count parameter "trials" must be \ + integer.') + if self._trials <= 0: + raise ValueError('Count parameter "trials" must be \ + positive.') + if not ((self._prob > 0) and (self._prob < 1)): + raise ValueError('Probability parameter "prob" must be \ + between 0 and 1.') + + # Cache constant + self._log_prob = np.log(self._prob) + self._log_opo_prob = np.log(1-self._prob) + + def __call__(self, x): + if x[0] < 0.0 or x[0] > 1.0 or not float(x[0]).is_integer(): + return -np.inf + else: + return scipy.special.comb(self._trials, x[0]) + self._log_opo_prob\ + * (self._trials - x[0]) + self._log_prob * x[0] + + def cdf(self, x): + """ See :meth:`LogPrior.cdf()`. """ + return scipy.stats.binom.cdf(x, n=self._trials, p=self._prob, loc=0) + + def icdf(self, q): + """ See :meth:`LogPrior.icdf()`. """ + return scipy.stats.binom.ppf(q, n=self._trials, p=self._prob, loc=0) + + def evaluateS1(self, x): + """ See :meth:`LogPDF.evaluateS1()`. """ + value = self(x) + _x = x[0] + + if _x < 0.0 or _x > 1.0: + return value, np.asarray([0.]) + else: + return value, np.asarray( + scipy.special.polygamma(0, self._trials - x[0] + 1) - + scipy.special.polygamma(0, x[0] + 1) + self._log_prob - + self._log_opo_prob) + + def mean(self): + """ See :meth:`LogPrior.mean()`. """ + return self._trials * self._prob + + def n_parameters(self): + """ See :meth:`LogPrior.n_parameters()`. """ + return 2 + + def sample(self, n=1): + """ See :meth:`LogPrior.sample()`. """ + return np.random.binomial(n=self._trials, p=self._prob, size=(n, 1)) + + class CauchyLogPrior(pints.LogPrior): r""" Defines a 1-d Cauchy (log) prior with a given ``location``, and ``scale``, diff --git a/pints/tests/test_log_priors.py b/pints/tests/test_log_priors.py index 82e1675bff..c749d55f82 100755 --- a/pints/tests/test_log_priors.py +++ b/pints/tests/test_log_priors.py @@ -102,6 +102,85 @@ def test_beta_prior_sampling(self): samples = p1.sample(n) self.assertTrue(np.abs(np.mean(samples) - 0.4) < 0.01) + def test_binomial_prior(self): + + # Test input parameters + self.assertRaises(TypeError, pints.BinomialLogPrior, 3.5, 0.5) + self.assertRaises(ValueError, pints.BinomialLogPrior, 0, 0.5) + self.assertRaises(ValueError, pints.BinomialLogPrior, 2, -2) + self.assertRaises(ValueError, pints.BinomialLogPrior, 2, 2) + + p1 = pints.BinomialLogPrior(5, 0.34) + p2 = pints.BinomialLogPrior(10.0, 0.56) + + points = [0, 1., 2, 3, 4, 5.] + + # Test means + self.assertAlmostEqual(p1.mean(), 1.7) + self.assertAlmostEqual(p2.mean(), 5.6) + + # Test CDFs + self.assertAlmostEqual(p1.cdf(2), 0.7801491456) + self.assertAlmostEqual(p1.cdf(3), 0.9513573696) + self.assertAlmostEqual(p2.cdf(5), 0.46958126185472589824) + self.assertAlmostEqual(p2.cdf(7), 0.88887567769391857664) + + # Test inverse-CDFs + self.assertAlmostEqual(p1.icdf(0.9), 3) + self.assertAlmostEqual(p1.icdf(0.996), 5) + self.assertAlmostEqual(p2.icdf(0.5), 6) + self.assertAlmostEqual(p2.icdf(0.9), 8) + + # Test n_parameters + self.assertEqual(p1.n_parameters(), 2) + + # Test specific points + for point in points: + to_test = [point] + self.assertAlmostEqual( + scipy.stats.binom.logpmf(to_test[0], 5, 0.34), + p1(to_test), + places=9) + self.assertAlmostEqual( + scipy.stats.binom.logpmf(to_test[0], 10.0, 0.56), + p2(to_test), + places=9) + + # Test derivatives + p1_derivs = [1.620039115923069, 0.420039115923069, + -0.329960884076930, -0.996627550743597, + -1.746627550743597, -2.946627550743597] + + p2_derivs = [3.170130310785142, 2.070130310785142, + 1.459019199674030, 1.000685866340697, + 0.607828723483554, 0.241162056816888] + + for point, deriv in zip(points, p1_derivs): + calc_val, calc_deriv = p1.evaluateS1([point]) + self.assertAlmostEqual(calc_deriv[0], deriv) + + for point, deriv in zip(points, p2_derivs): + calc_val, calc_deriv = p2.evaluateS1([point]) + self.assertAlmostEqual(calc_deriv[0], deriv) + + def test_binomial_prior_sampling(self): + p1 = pints.BinomialLogPrior(5, 0.34) + self.assertEqual(len(p1.sample()), 1) + + n = 100 + samples1 = p1.sample(n) + self.assertEqual(len(samples1), n) + + n = 10000 + p1 = pints.BinomialLogPrior(5, 0.34) + samples = p1.sample(n) + print(np.mean(samples)) + self.assertTrue(np.abs(np.mean(samples) - 1.7) < 0.01) + + p1 = pints.BinomialLogPrior(10, 0.56) + samples = p1.sample(n) + self.assertTrue(np.abs(np.mean(samples) - 5.6) < 0.01) + def test_cauchy_prior(self): # Test two specific function values p1 = pints.CauchyLogPrior(0, 10) From f214dfeaf87c9a21f877736e5314aeba96300c33 Mon Sep 17 00:00:00 2001 From: I-Bouros Date: Mon, 15 Nov 2021 22:49:43 +0000 Subject: [PATCH 2/7] Correct test for BinomialLogPrior class --- pints/_log_priors.py | 10 ++++++---- pints/tests/test_log_priors.py | 8 +++++--- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/pints/_log_priors.py b/pints/_log_priors.py index 5a8964d326..f2f9510ef4 100644 --- a/pints/_log_priors.py +++ b/pints/_log_priors.py @@ -142,11 +142,13 @@ def __init__(self, trials, prob): self._log_opo_prob = np.log(1-self._prob) def __call__(self, x): - if x[0] < 0.0 or x[0] > 1.0 or not float(x[0]).is_integer(): + if x[0] < 0.0 or x[0] > self._trials or not float(x[0]).is_integer(): return -np.inf else: - return scipy.special.comb(self._trials, x[0]) + self._log_opo_prob\ - * (self._trials - x[0]) + self._log_prob * x[0] + return np.log( + scipy.special.comb(self._trials, x[0])) +\ + self._log_opo_prob * (self._trials - x[0]) +\ + self._log_prob * x[0] def cdf(self, x): """ See :meth:`LogPrior.cdf()`. """ @@ -161,7 +163,7 @@ def evaluateS1(self, x): value = self(x) _x = x[0] - if _x < 0.0 or _x > 1.0: + if _x < 0.0 or _x > self._trials: return value, np.asarray([0.]) else: return value, np.asarray( diff --git a/pints/tests/test_log_priors.py b/pints/tests/test_log_priors.py index c749d55f82..e801172f31 100755 --- a/pints/tests/test_log_priors.py +++ b/pints/tests/test_log_priors.py @@ -138,7 +138,7 @@ def test_binomial_prior(self): for point in points: to_test = [point] self.assertAlmostEqual( - scipy.stats.binom.logpmf(to_test[0], 5, 0.34), + scipy.stats.binom.logpmf(to_test[0], 5.0, 0.34), p1(to_test), places=9) self.assertAlmostEqual( @@ -157,11 +157,13 @@ def test_binomial_prior(self): for point, deriv in zip(points, p1_derivs): calc_val, calc_deriv = p1.evaluateS1([point]) - self.assertAlmostEqual(calc_deriv[0], deriv) + self.assertAlmostEqual( + calc_deriv.item(), deriv) for point, deriv in zip(points, p2_derivs): calc_val, calc_deriv = p2.evaluateS1([point]) - self.assertAlmostEqual(calc_deriv[0], deriv) + self.assertAlmostEqual( + calc_deriv.item(), deriv) def test_binomial_prior_sampling(self): p1 = pints.BinomialLogPrior(5, 0.34) From 5394a792017a0cdbc1aa2e0850182023b3aaaf3f Mon Sep 17 00:00:00 2001 From: I-Bouros Date: Tue, 16 Nov 2021 00:23:01 +0000 Subject: [PATCH 3/7] Initial NegBinomialLogPrior class --- docs/source/log_priors.rst | 3 + pints/__init__.py | 103 +++++++++++++++++---------------- pints/_log_priors.py | 92 ++++++++++++++++++++++++++++- pints/tests/test_log_priors.py | 90 ++++++++++++++++++++++++++-- 4 files changed, 230 insertions(+), 58 deletions(-) diff --git a/docs/source/log_priors.rst b/docs/source/log_priors.rst index d06e67de74..df947d11cd 100644 --- a/docs/source/log_priors.rst +++ b/docs/source/log_priors.rst @@ -25,6 +25,7 @@ Overview: - :class:`InverseGammaLogPrior` - :class:`LogNormalLogPrior` - :class:`MultivariateGaussianLogPrior` +- :class:`NegBinomialLogPrior` - :class:`NormalLogPrior` - :class:`StudentTLogPrior` - :class:`TruncatedGaussianLogPrior` @@ -53,6 +54,8 @@ Overview: .. autoclass:: MultivariateGaussianLogPrior +.. autoclass:: NegBinomialLogPrior + .. autoclass:: NormalLogPrior .. autoclass:: StudentTLogPrior diff --git a/pints/__init__.py b/pints/__init__.py index 2a2cab203f..ef16ee4a2a 100644 --- a/pints/__init__.py +++ b/pints/__init__.py @@ -36,9 +36,11 @@ def _load_version_int(): except Exception as e: # pragma: no cover raise RuntimeError('Unable to read version number (' + str(e) + ').') + __version_int__ = _load_version_int() __version__ = '.'.join([str(x) for x in __version_int__]) + # # Expose pints version # @@ -64,21 +66,21 @@ def version(formatted=False): # # Core classes # -from ._core import ForwardModel, ForwardModelS1 -from ._core import TunableMethod -from ._core import SingleOutputProblem, MultiOutputProblem +from ._core import ForwardModel, ForwardModelS1 # noqa +from ._core import TunableMethod # noqa +from ._core import SingleOutputProblem, MultiOutputProblem # noqa # # Utility classes and methods # -from ._util import strfloat, vector, matrix2d -from ._util import Timer -from ._logger import Logger, Loggable +from ._util import strfloat, vector, matrix2d # noqa +from ._util import Timer # noqa +from ._logger import Logger, Loggable # noqa # # Logs of probability density functions (not necessarily normalised) # -from ._log_pdfs import ( +from ._log_pdfs import ( # noqa LogPDF, LogPrior, LogPosterior, @@ -90,7 +92,7 @@ def version(formatted=False): # # Log-priors # -from ._log_priors import ( +from ._log_priors import ( # noqa BetaLogPrior, BinomialLogPrior, CauchyLogPrior, @@ -102,6 +104,7 @@ def version(formatted=False): InverseGammaLogPrior, LogNormalLogPrior, MultivariateGaussianLogPrior, + NegBinomialLogPrior, NormalLogPrior, StudentTLogPrior, TruncatedGaussianLogPrior, @@ -111,7 +114,7 @@ def version(formatted=False): # # Log-likelihoods # -from ._log_likelihoods import ( +from ._log_likelihoods import ( # noqa AR1LogLikelihood, ARMA11LogLikelihood, CauchyLogLikelihood, @@ -129,7 +132,7 @@ def version(formatted=False): # # Boundaries # -from ._boundaries import ( +from ._boundaries import ( # noqa Boundaries, LogPDFBoundaries, RectangularBoundaries, @@ -138,7 +141,7 @@ def version(formatted=False): # # Error measures # -from ._error_measures import ( +from ._error_measures import ( # noqa ErrorMeasure, MeanSquaredError, NormalisedRootMeanSquaredError, @@ -152,7 +155,7 @@ def version(formatted=False): # # Parallel function evaluation # -from ._evaluation import ( +from ._evaluation import ( # noqa evaluate, Evaluator, ParallelEvaluator, @@ -163,7 +166,7 @@ def version(formatted=False): # # Optimisation # -from ._optimisers import ( +from ._optimisers import ( # noqa curve_fit, fmin, Optimisation, @@ -173,19 +176,19 @@ def version(formatted=False): PopulationBasedOptimiser, TriangleWaveTransform, ) -from ._optimisers._cmaes import CMAES -from ._optimisers._cmaes_bare import BareCMAES -from ._optimisers._gradient_descent import GradientDescent -from ._optimisers._nelder_mead import NelderMead -from ._optimisers._pso import PSO -from ._optimisers._snes import SNES -from ._optimisers._xnes import XNES +from ._optimisers._cmaes import CMAES # noqa +from ._optimisers._cmaes_bare import BareCMAES # noqa +from ._optimisers._gradient_descent import GradientDescent # noqa +from ._optimisers._nelder_mead import NelderMead # noqa +from ._optimisers._pso import PSO # noqa +from ._optimisers._snes import SNES # noqa +from ._optimisers._xnes import XNES # noqa # # Diagnostics # -from ._diagnostics import ( +from ._diagnostics import ( # noqa effective_sample_size, rhat, rhat_all_params, @@ -195,7 +198,7 @@ def version(formatted=False): # # MCMC # -from ._mcmc import ( +from ._mcmc import ( # noqa mcmc_sample, MCMCController, MCMCSampler, @@ -204,49 +207,49 @@ def version(formatted=False): SingleChainMCMC, ) # base classes first -from ._mcmc._adaptive_covariance import AdaptiveCovarianceMC +from ._mcmc._adaptive_covariance import AdaptiveCovarianceMC # noqa # methods -from ._mcmc._differential_evolution import DifferentialEvolutionMCMC -from ._mcmc._dram_ac import DramACMC -from ._mcmc._dream import DreamMCMC -from ._mcmc._dual_averaging import DualAveragingAdaption -from ._mcmc._emcee_hammer import EmceeHammerMCMC -from ._mcmc._haario_ac import HaarioACMC -from ._mcmc._haario_bardenet_ac import HaarioBardenetACMC -from ._mcmc._haario_bardenet_ac import AdaptiveCovarianceMCMC -from ._mcmc._hamiltonian import HamiltonianMCMC -from ._mcmc._mala import MALAMCMC -from ._mcmc._metropolis import MetropolisRandomWalkMCMC -from ._mcmc._monomial_gamma_hamiltonian import MonomialGammaHamiltonianMCMC -from ._mcmc._nuts import NoUTurnMCMC -from ._mcmc._population import PopulationMCMC -from ._mcmc._rao_blackwell_ac import RaoBlackwellACMC -from ._mcmc._relativistic import RelativisticMCMC -from ._mcmc._slice_doubling import SliceDoublingMCMC -from ._mcmc._slice_rank_shrinking import SliceRankShrinkingMCMC -from ._mcmc._slice_stepout import SliceStepoutMCMC -from ._mcmc._summary import MCMCSummary +from ._mcmc._differential_evolution import DifferentialEvolutionMCMC # noqa +from ._mcmc._dram_ac import DramACMC # noqa +from ._mcmc._dream import DreamMCMC # noqa +from ._mcmc._dual_averaging import DualAveragingAdaption # noqa +from ._mcmc._emcee_hammer import EmceeHammerMCMC # noqa +from ._mcmc._haario_ac import HaarioACMC # noqa +from ._mcmc._haario_bardenet_ac import HaarioBardenetACMC # noqa +from ._mcmc._haario_bardenet_ac import AdaptiveCovarianceMCMC # noqa +from ._mcmc._hamiltonian import HamiltonianMCMC # noqa +from ._mcmc._mala import MALAMCMC # noqa +from ._mcmc._metropolis import MetropolisRandomWalkMCMC # noqa +from ._mcmc._monomial_gamma_hamiltonian import MonomialGammaHamiltonianMCMC # noqa +from ._mcmc._nuts import NoUTurnMCMC # noqa +from ._mcmc._population import PopulationMCMC # noqa +from ._mcmc._rao_blackwell_ac import RaoBlackwellACMC # noqa +from ._mcmc._relativistic import RelativisticMCMC # noqa +from ._mcmc._slice_doubling import SliceDoublingMCMC # noqa +from ._mcmc._slice_rank_shrinking import SliceRankShrinkingMCMC # noqa +from ._mcmc._slice_stepout import SliceStepoutMCMC # noqa +from ._mcmc._summary import MCMCSummary # noqa # # Nested samplers # -from ._nested import NestedSampler -from ._nested import NestedController -from ._nested._rejection import NestedRejectionSampler -from ._nested._ellipsoid import NestedEllipsoidSampler +from ._nested import NestedSampler # noqa +from ._nested import NestedController # noqa +from ._nested._rejection import NestedRejectionSampler # noqa +from ._nested._ellipsoid import NestedEllipsoidSampler # noqa # # Sampling initialising # -from ._sample_initial_points import sample_initial_points +from ._sample_initial_points import sample_initial_points # noqa # # Transformations # -from ._transformation import ( +from ._transformation import ( # noqa ComposedTransformation, IdentityTransformation, LogitTransformation, @@ -264,7 +267,7 @@ def version(formatted=False): # # Noise generators (always import!) # -from . import noise +from . import noise # noqa # # Remove any imported modules, so we don't expose them as part of pints diff --git a/pints/_log_priors.py b/pints/_log_priors.py index f2f9510ef4..b15a982820 100644 --- a/pints/_log_priors.py +++ b/pints/_log_priors.py @@ -99,7 +99,7 @@ def sample(self, n=1): class BinomialLogPrior(pints.LogPrior): r""" - Defines an binomial (log) prior with given number of trials parameter + Defines a binomial (log) prior with given number of trials parameter ``trials`` and trial success probability parameter ``prob`` with pdf @@ -167,8 +167,8 @@ def evaluateS1(self, x): return value, np.asarray([0.]) else: return value, np.asarray( - scipy.special.polygamma(0, self._trials - x[0] + 1) - - scipy.special.polygamma(0, x[0] + 1) + self._log_prob - + scipy.special.polygamma(0, self._trials - _x + 1) - + scipy.special.polygamma(0, _x + 1) + self._log_prob - self._log_opo_prob) def mean(self): @@ -1066,6 +1066,92 @@ def sample(self, n=1): self._mean, self._cov, size=n) +class NegBinomialLogPrior(pints.LogPrior): + r""" + Defines a negative binomial (log) prior with given number of failures + parameter ``fails`` and trial success probability parameter ``prob`` with + pdf + + .. math:: + f(x|\text{fails}, \text{prob}) = {\text{fails}+x-1 \choose x}\; + \text{prob}^{x}\;(1-\text{prob})^(\text{fails}). + + A random variable :math:`X` distributed according to this pdf has + expectation + + .. math:: + \mathrm{E}(X)=\frac{\text{fails} \; \text{prob}}{1-\text{prob}}. + + For example, to create a prior with ``fails=10`` and ``prob=0.5`` + use:: + + p = pints.NegBinomialLogPrior(10, 0.5) + + Extends :class:`LogPrior`. + """ + + def __init__(self, fails, prob): + # Parse input arguments + self._fails = float(fails) + self._prob = float(prob) + + # Validate inputs + if not self._fails.is_integer(): + raise TypeError('Count parameter "fails" must be \ + integer.') + if self._fails <= 0: + raise ValueError('Count parameter "fails" must be \ + positive.') + if not ((self._prob > 0) and (self._prob < 1)): + raise ValueError('Probability parameter "prob" must be \ + between 0 and 1.') + + # Cache constant + self._log_prob = np.log(self._prob) + self._log_opo_prob = np.log(1-self._prob) + + def __call__(self, x): + if x[0] < 0.0 or not float(x[0]).is_integer(): + return -np.inf + else: + return np.log( + scipy.special.comb(self._fails + x[0] - 1, x[0]-1)) +\ + self._log_opo_prob * (self._fails) + self._log_prob * x[0] + + def cdf(self, x): + """ See :meth:`LogPrior.cdf()`. """ + return scipy.stats.nbinom.cdf(x, n=self._fails, p=self._prob, loc=0) + + def icdf(self, q): + """ See :meth:`LogPrior.icdf()`. """ + return scipy.stats.nbinom.ppf(q, n=self._fails, p=self._prob, loc=0) + + def evaluateS1(self, x): + """ See :meth:`LogPDF.evaluateS1()`. """ + value = self(x) + _x = x[0] + + if _x < 0.0: + return value, np.asarray([0.]) + else: + return value, np.asarray( + scipy.special.polygamma(0, self._fails + _x) - + scipy.special.polygamma(0, _x + 1) + self._log_prob) + + def mean(self): + """ See :meth:`LogPrior.mean()`. """ + return self._fails * self._prob / (1-self._prob) + + def n_parameters(self): + """ See :meth:`LogPrior.n_parameters()`. """ + return 2 + + def sample(self, n=1): + """ See :meth:`LogPrior.sample()`. """ + return np.random.negative_binomial( + n=self._fails, p=self._prob, size=(n, 1)) + + class NormalLogPrior(GaussianLogPrior): r""" Deprecated alias of :class:`GaussianLogPrior`. """ diff --git a/pints/tests/test_log_priors.py b/pints/tests/test_log_priors.py index e801172f31..547e68be2e 100755 --- a/pints/tests/test_log_priors.py +++ b/pints/tests/test_log_priors.py @@ -173,15 +173,15 @@ def test_binomial_prior_sampling(self): samples1 = p1.sample(n) self.assertEqual(len(samples1), n) - n = 10000 - p1 = pints.BinomialLogPrior(5, 0.34) + n = 1000000 + p1 = pints.BinomialLogPrior(50, 0.34) samples = p1.sample(n) print(np.mean(samples)) - self.assertTrue(np.abs(np.mean(samples) - 1.7) < 0.01) + self.assertTrue(np.abs(np.mean(samples) - 17) < 0.01) - p1 = pints.BinomialLogPrior(10, 0.56) + p1 = pints.BinomialLogPrior(100, 0.56) samples = p1.sample(n) - self.assertTrue(np.abs(np.mean(samples) - 5.6) < 0.01) + self.assertTrue(np.abs(np.mean(samples) - 56) < 0.01) def test_cauchy_prior(self): # Test two specific function values @@ -902,6 +902,86 @@ def test_multivariate_normal_sampling(self): self.assertTrue(np.all( np.abs(np.diag(covariance) - x.std(axis=0)**2) < 0.1)) + def test_neg_binomial_prior(self): + # Test input parameters + self.assertRaises(TypeError, pints.NegBinomialLogPrior, 3.5, 0.5) + self.assertRaises(ValueError, pints.NegBinomialLogPrior, 0, 0.5) + self.assertRaises(ValueError, pints.NegBinomialLogPrior, 2, -2) + self.assertRaises(ValueError, pints.NegBinomialLogPrior, 2, 2) + + p1 = pints.NegBinomialLogPrior(5, 0.34) + p2 = pints.NegBinomialLogPrior(10.0, 0.56) + + points = [0, 1, 3, 6., 10, 50.] + + # Test means + self.assertAlmostEqual(p1.mean(), 2.5757575757) + self.assertAlmostEqual(p2.mean(), 12.7272727272) + + # Test CDFs + self.assertAlmostEqual(p1.cdf(2), 0.5552842641984) + self.assertAlmostEqual(p1.cdf(13), 0.9998155602046) + self.assertAlmostEqual(p2.cdf(5), 0.066104999268668) + self.assertAlmostEqual(p2.cdf(30), 0.99602550697655) + + # Test inverse-CDFs + self.assertAlmostEqual(p1.icdf(0.9), 5) + self.assertAlmostEqual(p1.icdf(0.996), 10) + self.assertAlmostEqual(p2.icdf(0.5), 12) + self.assertAlmostEqual(p2.icdf(0.9), 20) + + # Test n_parameters + self.assertEqual(p1.n_parameters(), 2) + + # Test specific points + for point in points: + to_test = [point] + self.assertAlmostEqual( + scipy.stats.nbinom.logpmf(to_test[0], 5.0, 0.34), + p1(to_test), + places=9) + self.assertAlmostEqual( + scipy.stats.nbinom.logpmf(to_test[0], 10.0, 0.56), + p2(to_test), + places=9) + + # Test derivatives + p1_derivs = [1.620039115923069, 0.420039115923069, + -0.329960884076930, -0.996627550743597, + -1.746627550743597, -2.946627550743597] + + p2_derivs = [3.170130310785142, 2.070130310785142, + 1.459019199674030, 1.000685866340697, + 0.607828723483554, 0.241162056816888] + + for point, deriv in zip(points, p1_derivs): + calc_val, calc_deriv = p1.evaluateS1([point]) + self.assertAlmostEqual( + calc_deriv.item(), deriv) + + for point, deriv in zip(points, p2_derivs): + calc_val, calc_deriv = p2.evaluateS1([point]) + self.assertAlmostEqual( + calc_deriv.item(), deriv) + + def test_neg_binomial_prior_sampling(self): + p1 = pints.NegBinomialLogPrior(5, 0.34) + self.assertEqual(len(p1.sample()), 1) + + n = 100 + samples1 = p1.sample(n) + self.assertEqual(len(samples1), n) + + n = 1000000 + p1 = pints.NegBinomialLogPrior(5, 0.34) + samples = p1.sample(n) + print(np.mean(samples)) + self.assertTrue(np.abs(np.mean(samples) - 2.5757575757) < 0.01) + + p1 = pints.NegBinomialLogPrior(10., 0.56) + samples = p1.sample(n) + self.assertTrue(np.abs(np.mean(samples) - 12.7272727272) < 0.01) + def test_student_t_prior(self): # Test two specific function values p1 = pints.StudentTLogPrior(0, 2, 10) From 413d403a583f9bf18ff2325f1d73425b2ce239bc Mon Sep 17 00:00:00 2001 From: I-Bouros Date: Tue, 16 Nov 2021 11:28:35 +0000 Subject: [PATCH 4/7] Finish test class for NegBinomialLogPrior --- pints/_log_priors.py | 42 ++++++++++++++++----------------- pints/tests/test_log_priors.py | 43 +++++++++++++++++----------------- 2 files changed, 42 insertions(+), 43 deletions(-) diff --git a/pints/_log_priors.py b/pints/_log_priors.py index b15a982820..3c99171b04 100644 --- a/pints/_log_priors.py +++ b/pints/_log_priors.py @@ -1068,21 +1068,21 @@ def sample(self, n=1): class NegBinomialLogPrior(pints.LogPrior): r""" - Defines a negative binomial (log) prior with given number of failures - parameter ``fails`` and trial success probability parameter ``prob`` with - pdf + Defines a negative binomial (log) prior with given number of successes + parameter ``succs`` and trial success probability parameter ``prob`` + with pdf .. math:: - f(x|\text{fails}, \text{prob}) = {\text{fails}+x-1 \choose x}\; - \text{prob}^{x}\;(1-\text{prob})^(\text{fails}). + f(x|\text{succs}, \text{prob}) = {\text{succs}+x-1 \choose + \text{succs}-1}\;\text{prob}^{x}\;(1-\text{prob})^(\text{fails}). A random variable :math:`X` distributed according to this pdf has expectation .. math:: - \mathrm{E}(X)=\frac{\text{fails} \; \text{prob}}{1-\text{prob}}. + \mathrm{E}(X)=\frac{\text{succs} \; 1-\text{prob}}{\text{prob}}. - For example, to create a prior with ``fails=10`` and ``prob=0.5`` + For example, to create a prior with ``succs=10`` and ``prob=0.5`` use:: p = pints.NegBinomialLogPrior(10, 0.5) @@ -1090,17 +1090,17 @@ class NegBinomialLogPrior(pints.LogPrior): Extends :class:`LogPrior`. """ - def __init__(self, fails, prob): + def __init__(self, succs, prob): # Parse input arguments - self._fails = float(fails) + self._succs = float(succs) self._prob = float(prob) # Validate inputs - if not self._fails.is_integer(): - raise TypeError('Count parameter "fails" must be \ + if not self._succs.is_integer(): + raise TypeError('Count parameter "succs" must be \ integer.') - if self._fails <= 0: - raise ValueError('Count parameter "fails" must be \ + if self._succs <= 0: + raise ValueError('Count parameter "succs" must be \ positive.') if not ((self._prob > 0) and (self._prob < 1)): raise ValueError('Probability parameter "prob" must be \ @@ -1115,16 +1115,16 @@ def __call__(self, x): return -np.inf else: return np.log( - scipy.special.comb(self._fails + x[0] - 1, x[0]-1)) +\ - self._log_opo_prob * (self._fails) + self._log_prob * x[0] + scipy.special.comb(self._succs + x[0] - 1, self._succs - 1)) +\ + self._log_prob * self._succs + self._log_opo_prob * x[0] def cdf(self, x): """ See :meth:`LogPrior.cdf()`. """ - return scipy.stats.nbinom.cdf(x, n=self._fails, p=self._prob, loc=0) + return scipy.stats.nbinom.cdf(x, n=self._succs, p=self._prob, loc=0) def icdf(self, q): """ See :meth:`LogPrior.icdf()`. """ - return scipy.stats.nbinom.ppf(q, n=self._fails, p=self._prob, loc=0) + return scipy.stats.nbinom.ppf(q, n=self._succs, p=self._prob, loc=0) def evaluateS1(self, x): """ See :meth:`LogPDF.evaluateS1()`. """ @@ -1135,12 +1135,12 @@ def evaluateS1(self, x): return value, np.asarray([0.]) else: return value, np.asarray( - scipy.special.polygamma(0, self._fails + _x) - - scipy.special.polygamma(0, _x + 1) + self._log_prob) + scipy.special.polygamma(0, self._succs + _x) - + scipy.special.polygamma(0, _x + 1) + self._log_opo_prob) def mean(self): """ See :meth:`LogPrior.mean()`. """ - return self._fails * self._prob / (1-self._prob) + return self._succs * (1-self._prob) / self._prob def n_parameters(self): """ See :meth:`LogPrior.n_parameters()`. """ @@ -1149,7 +1149,7 @@ def n_parameters(self): def sample(self, n=1): """ See :meth:`LogPrior.sample()`. """ return np.random.negative_binomial( - n=self._fails, p=self._prob, size=(n, 1)) + n=self._succs, p=self._prob, size=(n, 1)) class NormalLogPrior(GaussianLogPrior): diff --git a/pints/tests/test_log_priors.py b/pints/tests/test_log_priors.py index 547e68be2e..e1df526cd8 100755 --- a/pints/tests/test_log_priors.py +++ b/pints/tests/test_log_priors.py @@ -173,15 +173,14 @@ def test_binomial_prior_sampling(self): samples1 = p1.sample(n) self.assertEqual(len(samples1), n) - n = 1000000 + n = 10000000 p1 = pints.BinomialLogPrior(50, 0.34) samples = p1.sample(n) - print(np.mean(samples)) self.assertTrue(np.abs(np.mean(samples) - 17) < 0.01) - p1 = pints.BinomialLogPrior(100, 0.56) + p1 = pints.BinomialLogPrior(20, 0.56) samples = p1.sample(n) - self.assertTrue(np.abs(np.mean(samples) - 56) < 0.01) + self.assertTrue(np.abs(np.mean(samples) - 11.2) < 0.01) def test_cauchy_prior(self): # Test two specific function values @@ -915,20 +914,20 @@ def test_neg_binomial_prior(self): points = [0, 1, 3, 6., 10, 50.] # Test means - self.assertAlmostEqual(p1.mean(), 2.5757575757) - self.assertAlmostEqual(p2.mean(), 12.7272727272) + self.assertAlmostEqual(p1.mean(), 9.70588235294) + self.assertAlmostEqual(p2.mean(), 7.85714285714) # Test CDFs - self.assertAlmostEqual(p1.cdf(2), 0.5552842641984) - self.assertAlmostEqual(p1.cdf(13), 0.9998155602046) - self.assertAlmostEqual(p2.cdf(5), 0.066104999268668) - self.assertAlmostEqual(p2.cdf(30), 0.99602550697655) + self.assertAlmostEqual(p1.cdf(2), 0.0492247383616) + self.assertAlmostEqual(p1.cdf(13), 0.7865907638488) + self.assertAlmostEqual(p2.cdf(5), 0.2869378948333) + self.assertAlmostEqual(p2.cdf(30), 0.9999833390404) # Test inverse-CDFs - self.assertAlmostEqual(p1.icdf(0.9), 5) - self.assertAlmostEqual(p1.icdf(0.996), 10) - self.assertAlmostEqual(p2.icdf(0.5), 12) - self.assertAlmostEqual(p2.icdf(0.9), 20) + self.assertAlmostEqual(p1.icdf(0.9), 17) + self.assertAlmostEqual(p1.icdf(0.996), 29) + self.assertAlmostEqual(p2.icdf(0.5), 7) + self.assertAlmostEqual(p2.icdf(0.9), 13) # Test n_parameters self.assertEqual(p1.n_parameters(), 2) @@ -946,13 +945,13 @@ def test_neg_binomial_prior(self): places=9) # Test derivatives - p1_derivs = [1.620039115923069, 0.420039115923069, - -0.329960884076930, -0.996627550743597, - -1.746627550743597, -2.946627550743597] + p1_derivs = [1.667817889371667, 0.867817889371667, + 0.344008365562143, 0.063452810006588, + -0.092921371367593, -0.339290388546821] - p2_derivs = [3.170130310785142, 2.070130310785142, - 1.459019199674030, 1.000685866340697, - 0.607828723483554, 0.241162056816888] + p2_derivs = [2.007987701898423, 1.107987701898423, + 0.448896792807514, 0.047248441159163, + -0.202209148894402, -0.656982144114184] for point, deriv in zip(points, p1_derivs): calc_val, calc_deriv = p1.evaluateS1([point]) @@ -976,11 +975,11 @@ def test_neg_binomial_prior_sampling(self): p1 = pints.NegBinomialLogPrior(5, 0.34) samples = p1.sample(n) print(np.mean(samples)) - self.assertTrue(np.abs(np.mean(samples) - 2.5757575757) < 0.01) + self.assertTrue(np.abs(np.mean(samples) - 9.70588235294) < 0.01) p1 = pints.NegBinomialLogPrior(10., 0.56) samples = p1.sample(n) - self.assertTrue(np.abs(np.mean(samples) - 12.7272727272) < 0.01) + self.assertTrue(np.abs(np.mean(samples) - 7.85714285714) < 0.01) def test_student_t_prior(self): # Test two specific function values From 74c1d44b7a352a81a71475ea26ad4606b15d3df3 Mon Sep 17 00:00:00 2001 From: I-Bouros Date: Tue, 16 Nov 2021 11:34:00 +0000 Subject: [PATCH 5/7] Run all tests --- pints/_log_priors.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pints/_log_priors.py b/pints/_log_priors.py index 3c99171b04..3579ff490f 100644 --- a/pints/_log_priors.py +++ b/pints/_log_priors.py @@ -139,7 +139,7 @@ def __init__(self, trials, prob): # Cache constant self._log_prob = np.log(self._prob) - self._log_opo_prob = np.log(1-self._prob) + self._log_opo_prob = np.log(1 - self._prob) def __call__(self, x): if x[0] < 0.0 or x[0] > self._trials or not float(x[0]).is_integer(): @@ -1108,7 +1108,7 @@ def __init__(self, succs, prob): # Cache constant self._log_prob = np.log(self._prob) - self._log_opo_prob = np.log(1-self._prob) + self._log_opo_prob = np.log(1 - self._prob) def __call__(self, x): if x[0] < 0.0 or not float(x[0]).is_integer(): @@ -1140,7 +1140,7 @@ def evaluateS1(self, x): def mean(self): """ See :meth:`LogPrior.mean()`. """ - return self._succs * (1-self._prob) / self._prob + return self._succs * (1 - self._prob) / self._prob def n_parameters(self): """ See :meth:`LogPrior.n_parameters()`. """ From 3b051bf6355ff47be2a02189ecbe761612a85518 Mon Sep 17 00:00:00 2001 From: I-Bouros Date: Wed, 9 Nov 2022 10:58:52 +0000 Subject: [PATCH 6/7] Add tests to improve codecov coverage --- pints/_log_priors.py | 4 ++-- pints/tests/test_log_priors.py | 22 ++++++++++++---------- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/pints/_log_priors.py b/pints/_log_priors.py index 3579ff490f..74550ac484 100644 --- a/pints/_log_priors.py +++ b/pints/_log_priors.py @@ -163,7 +163,7 @@ def evaluateS1(self, x): value = self(x) _x = x[0] - if _x < 0.0 or _x > self._trials: + if _x < 0.0 or _x > self._trials or not float(_x).is_integer(): return value, np.asarray([0.]) else: return value, np.asarray( @@ -1131,7 +1131,7 @@ def evaluateS1(self, x): value = self(x) _x = x[0] - if _x < 0.0: + if _x < 0.0 or not float(_x).is_integer(): return value, np.asarray([0.]) else: return value, np.asarray( diff --git a/pints/tests/test_log_priors.py b/pints/tests/test_log_priors.py index e1df526cd8..65d6dc859a 100755 --- a/pints/tests/test_log_priors.py +++ b/pints/tests/test_log_priors.py @@ -113,7 +113,7 @@ def test_binomial_prior(self): p1 = pints.BinomialLogPrior(5, 0.34) p2 = pints.BinomialLogPrior(10.0, 0.56) - points = [0, 1., 2, 3, 4, 5.] + points = [-2, 0, 1., 2, 3, 4, 5., 12., 6.3] # Test means self.assertAlmostEqual(p1.mean(), 1.7) @@ -147,13 +147,15 @@ def test_binomial_prior(self): places=9) # Test derivatives - p1_derivs = [1.620039115923069, 0.420039115923069, + p1_derivs = [0., 1.620039115923069, 0.420039115923069, -0.329960884076930, -0.996627550743597, - -1.746627550743597, -2.946627550743597] + -1.746627550743597, -2.946627550743597, + 0., 0.] - p2_derivs = [3.170130310785142, 2.070130310785142, + p2_derivs = [0., 3.170130310785142, 2.070130310785142, 1.459019199674030, 1.000685866340697, - 0.607828723483554, 0.241162056816888] + 0.607828723483554, 0.241162056816888, + 0., 0.] for point, deriv in zip(points, p1_derivs): calc_val, calc_deriv = p1.evaluateS1([point]) @@ -911,7 +913,7 @@ def test_neg_binomial_prior(self): p1 = pints.NegBinomialLogPrior(5, 0.34) p2 = pints.NegBinomialLogPrior(10.0, 0.56) - points = [0, 1, 3, 6., 10, 50.] + points = [-4, 0, 1, 3, 6., 10, 50., 4.5] # Test means self.assertAlmostEqual(p1.mean(), 9.70588235294) @@ -945,13 +947,13 @@ def test_neg_binomial_prior(self): places=9) # Test derivatives - p1_derivs = [1.667817889371667, 0.867817889371667, + p1_derivs = [0, 1.667817889371667, 0.867817889371667, 0.344008365562143, 0.063452810006588, - -0.092921371367593, -0.339290388546821] + -0.092921371367593, -0.339290388546821, 0] - p2_derivs = [2.007987701898423, 1.107987701898423, + p2_derivs = [0, 2.007987701898423, 1.107987701898423, 0.448896792807514, 0.047248441159163, - -0.202209148894402, -0.656982144114184] + -0.202209148894402, -0.656982144114184, 0] for point, deriv in zip(points, p1_derivs): calc_val, calc_deriv = p1.evaluateS1([point]) From cd95c71626a334c49f67bdc55dcf719c8341fd5b Mon Sep 17 00:00:00 2001 From: Michael Clerx Date: Tue, 7 Nov 2023 09:36:45 +0000 Subject: [PATCH 7/7] Undoing accidental change to init --- pints/__init__.py | 86 +++++++++++++++++++++++------------------------ 1 file changed, 43 insertions(+), 43 deletions(-) diff --git a/pints/__init__.py b/pints/__init__.py index 615b9d6842..0726b36b1a 100644 --- a/pints/__init__.py +++ b/pints/__init__.py @@ -66,21 +66,21 @@ def version(formatted=False): # # Core classes # -from ._core import ForwardModel, ForwardModelS1 # noqa -from ._core import TunableMethod # noqa -from ._core import SingleOutputProblem, MultiOutputProblem # noqa +from ._core import ForwardModel, ForwardModelS1 +from ._core import TunableMethod +from ._core import SingleOutputProblem, MultiOutputProblem # # Utility classes and methods # -from ._util import strfloat, vector, matrix2d # noqa -from ._util import Timer # noqa -from ._logger import Logger, Loggable # noqa +from ._util import strfloat, vector, matrix2d +from ._util import Timer +from ._logger import Logger, Loggable # # Logs of probability density functions (not necessarily normalised) # -from ._log_pdfs import ( # noqa +from ._log_pdfs import ( LogPDF, LogPrior, LogPosterior, @@ -92,7 +92,7 @@ def version(formatted=False): # # Log-priors # -from ._log_priors import ( # noqa +from ._log_priors import ( BetaLogPrior, BinomialLogPrior, CauchyLogPrior, @@ -114,7 +114,7 @@ def version(formatted=False): # # Log-likelihoods # -from ._log_likelihoods import ( # noqa +from ._log_likelihoods import ( AR1LogLikelihood, ARMA11LogLikelihood, CauchyLogLikelihood, @@ -133,7 +133,7 @@ def version(formatted=False): # # Boundaries # -from ._boundaries import ( # noqa +from ._boundaries import ( Boundaries, LogPDFBoundaries, RectangularBoundaries, @@ -142,7 +142,7 @@ def version(formatted=False): # # Error measures # -from ._error_measures import ( # noqa +from ._error_measures import ( ErrorMeasure, MeanSquaredError, NormalisedRootMeanSquaredError, @@ -156,7 +156,7 @@ def version(formatted=False): # # Parallel function evaluation # -from ._evaluation import ( # noqa +from ._evaluation import ( evaluate, Evaluator, ParallelEvaluator, @@ -168,7 +168,7 @@ def version(formatted=False): # # Optimisation # -from ._optimisers import ( # noqa +from ._optimisers import ( curve_fit, fmin, Optimisation, @@ -192,7 +192,7 @@ def version(formatted=False): # # Diagnostics # -from ._diagnostics import ( # noqa +from ._diagnostics import ( effective_sample_size, rhat, rhat_all_params, @@ -202,7 +202,7 @@ def version(formatted=False): # # MCMC # -from ._mcmc import ( # noqa +from ._mcmc import ( mcmc_sample, MCMCController, MCMCSampler, @@ -211,38 +211,38 @@ def version(formatted=False): SingleChainMCMC, ) # base classes first -from ._mcmc._adaptive_covariance import AdaptiveCovarianceMC # noqa +from ._mcmc._adaptive_covariance import AdaptiveCovarianceMC # methods -from ._mcmc._differential_evolution import DifferentialEvolutionMCMC # noqa -from ._mcmc._dram_ac import DramACMC # noqa -from ._mcmc._dream import DreamMCMC # noqa -from ._mcmc._dual_averaging import DualAveragingAdaption # noqa -from ._mcmc._emcee_hammer import EmceeHammerMCMC # noqa -from ._mcmc._haario_ac import HaarioACMC # noqa -from ._mcmc._haario_bardenet_ac import HaarioBardenetACMC # noqa -from ._mcmc._haario_bardenet_ac import AdaptiveCovarianceMCMC # noqa -from ._mcmc._hamiltonian import HamiltonianMCMC # noqa -from ._mcmc._mala import MALAMCMC # noqa -from ._mcmc._metropolis import MetropolisRandomWalkMCMC # noqa -from ._mcmc._monomial_gamma_hamiltonian import MonomialGammaHamiltonianMCMC # noqa -from ._mcmc._nuts import NoUTurnMCMC # noqa -from ._mcmc._population import PopulationMCMC # noqa -from ._mcmc._rao_blackwell_ac import RaoBlackwellACMC # noqa -from ._mcmc._relativistic import RelativisticMCMC # noqa -from ._mcmc._slice_doubling import SliceDoublingMCMC # noqa -from ._mcmc._slice_rank_shrinking import SliceRankShrinkingMCMC # noqa -from ._mcmc._slice_stepout import SliceStepoutMCMC # noqa -from ._mcmc._summary import MCMCSummary # noqa +from ._mcmc._differential_evolution import DifferentialEvolutionMCMC +from ._mcmc._dram_ac import DramACMC +from ._mcmc._dream import DreamMCMC +from ._mcmc._dual_averaging import DualAveragingAdaption +from ._mcmc._emcee_hammer import EmceeHammerMCMC +from ._mcmc._haario_ac import HaarioACMC +from ._mcmc._haario_bardenet_ac import HaarioBardenetACMC +from ._mcmc._haario_bardenet_ac import AdaptiveCovarianceMCMC +from ._mcmc._hamiltonian import HamiltonianMCMC +from ._mcmc._mala import MALAMCMC +from ._mcmc._metropolis import MetropolisRandomWalkMCMC +from ._mcmc._monomial_gamma_hamiltonian import MonomialGammaHamiltonianMCMC +from ._mcmc._nuts import NoUTurnMCMC +from ._mcmc._population import PopulationMCMC +from ._mcmc._rao_blackwell_ac import RaoBlackwellACMC +from ._mcmc._relativistic import RelativisticMCMC +from ._mcmc._slice_doubling import SliceDoublingMCMC +from ._mcmc._slice_rank_shrinking import SliceRankShrinkingMCMC +from ._mcmc._slice_stepout import SliceStepoutMCMC +from ._mcmc._summary import MCMCSummary # # Nested samplers # -from ._nested import NestedSampler # noqa -from ._nested import NestedController # noqa -from ._nested._rejection import NestedRejectionSampler # noqa -from ._nested._ellipsoid import NestedEllipsoidSampler # noqa +from ._nested import NestedSampler +from ._nested import NestedController +from ._nested._rejection import NestedRejectionSampler +from ._nested._ellipsoid import NestedEllipsoidSampler # @@ -257,13 +257,13 @@ def version(formatted=False): # # Sampling initialising # -from ._sample_initial_points import sample_initial_points # noqa +from ._sample_initial_points import sample_initial_points # # Transformations # -from ._transformation import ( # noqa +from ._transformation import ( ComposedTransformation, IdentityTransformation, LogitTransformation, @@ -283,7 +283,7 @@ def version(formatted=False): # # Noise generators (always import!) # -from . import noise # noqa +from . import noise # # Remove any imported modules, so we don't expose them as part of pints