Skip to content

Commit

Permalink
Merge pull request #396 from elfi-dev/dev
Browse files Browse the repository at this point in the history
Release v0.8.3
  • Loading branch information
hpesonen authored Feb 17, 2022
2 parents 1e43884 + 6d13c7a commit b4663c8
Show file tree
Hide file tree
Showing 20 changed files with 2,305 additions and 604 deletions.
8 changes: 7 additions & 1 deletion CHANGELOG.rst
Original file line number Diff line number Diff line change
@@ -1,6 +1,12 @@
Changelog
=========


0.8.3 (2021-02-17)
------------------
- Add a new inference method: BOLFIRE
- Fix the hessian approximation, visualizations and the line search algorithm in ROMC
- Add tests for all ROMC parts

0.8.2 (2021-10-13)
------------------
- Relax tightly pinned dependency on a version of dask[distributed]
Expand Down
5 changes: 3 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
**Version 0.8.2 released!** See the [CHANGELOG](CHANGELOG.rst) and [notebooks](https://github.com/elfi-dev/notebooks).
**Version 0.8.3 released!** See the [CHANGELOG](CHANGELOG.rst) and [notebooks](https://github.com/elfi-dev/notebooks).

<img src="https://raw.githubusercontent.com/elfi-dev/elfi/dev/docs/logos/elfi_logo_text_nobg.png" width="200" />

Expand All @@ -24,7 +24,8 @@ Currently implemented LFI methods:
- SMC-ABC sampler with [adaptive threshold selection](https://projecteuclid.org/journals/bayesian-analysis/advance-publication/Adaptive-Approximate-Bayesian-Computation-Tolerance-Selection/10.1214/20-BA1211.full)
- SMC-ABC sampler with [adaptive distance](https://projecteuclid.org/euclid.ba/1460641065)
- [Bayesian Optimization for Likelihood-Free Inference (BOLFI)](http://jmlr.csail.mit.edu/papers/v17/15-017.html)
- [Robust Optimisation Monte Carlo](https://arxiv.org/abs/1904.00670)
- [Robust Optimisation Monte Carlo (ROMC)](https://arxiv.org/abs/1904.00670)
- [Bayesian Optimization for Likelihood-Free Inference by Ratio Estimation (BOLFIRE)](https://helda.helsinki.fi/handle/10138/305039)

Other notable included algorithms and methods:
- Bayesian Optimization
Expand Down
2 changes: 1 addition & 1 deletion docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def __getattr__(cls, name):
'dask.delayed', 'scipy.linalg', 'scipy.optimize', 'scipy.stats', 'scipy.spatial',
'scipy.sparse', 'scipy.special', 'matplotlib.pyplot', 'numpy.random', 'networkx',
'ipyparallel', 'numpy.lib', 'numpy.lib.format', 'sklearn.linear_model',
'sklearn.pipeline', 'sklearn.preprocessing'
'sklearn.pipeline', 'sklearn.preprocessing', 'numdifftools'
]
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)

Expand Down
3 changes: 2 additions & 1 deletion elfi/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
from elfi.methods.diagnostics import TwoStageSelection
from elfi.methods.model_selection import *
from elfi.methods.inference.bolfi import *
from elfi.methods.inference.bolfire import *
from elfi.methods.inference.romc import *
from elfi.methods.inference.samplers import *
from elfi.methods.post_processing import adjust_posterior
Expand All @@ -30,4 +31,4 @@
__email__ = '[email protected]'

# make sure __version_ is on the last non-empty line (read by setup.py)
__version__ = '0.8.2'
__version__ = '0.8.3'
1 change: 1 addition & 0 deletions elfi/classifiers/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# noqa: D104
119 changes: 119 additions & 0 deletions elfi/classifiers/classifier.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,119 @@
"""Implementations for ratio estimation classifiers."""

import abc

import numpy as np
from sklearn.linear_model import LogisticRegression as LogReg
from sklearn.preprocessing import StandardScaler


class Classifier(abc.ABC):
"""An abstract base class for a ratio estimation classifier."""

@abc.abstractmethod
def __init__(self):
"""Initialize a classifier."""
raise NotImplementedError

@abc.abstractmethod
def fit(self, X, y):
"""Fit a classifier.
Parameters
----------
X: np.ndarray (n_samples, n_features)
Feature vectors of data.
y: np.ndarray (n_samples, )
Target values, must be binary.
"""
raise NotImplementedError

@abc.abstractmethod
def predict_log_likelihood_ratio(self, X):
"""Predict a log-likelihood ratio.
Parameters
----------
X: np.ndarray (n_samples, n_features)
Feature vectors of data.
Returns
-------
np.ndarray
"""
raise NotImplementedError

def predict_likelihood_ratio(self, X):
"""Predict a likelihood ratio.
Parameters
----------
X: np.ndarray (n_samples, n_features)
Feature vectors of data.
Returns
-------
np.ndarray
"""
return np.exp(self.predict_log_likelihood_ratio(X))

@property
@abc.abstractmethod
def attributes(self):
"""Return attributes dictionary."""
raise NotImplementedError


class LogisticRegression(Classifier):
"""A logistic regression classifier for ratio estimation."""

def __init__(self, config=None, class_min=0):
"""Initialize a logistic regression classifier."""
self.config = self._resolve_config(config)
self.class_min = self._resolve_class_min(class_min)
self.model = LogReg(**self.config)
self.scaler = StandardScaler()

def fit(self, X, y):
"""Fit a logistic regression classifier."""
Xs = self.scaler.fit_transform(X)
self.model.fit(Xs, y)

def predict_log_likelihood_ratio(self, X):
"""Predict a log-likelihood ratio."""
Xs = self.scaler.transform(X)
class_probs = np.maximum(self.model.predict_proba(Xs)[:, 1], self.class_min)
return np.log(class_probs / (1 - class_probs))

@property
def attributes(self):
"""Return an attributes dictionary."""
return {
'parameters': {
'coef_': self.model.coef_.tolist(),
'intercept_': self.model.intercept_.tolist(),
'n_iter': self.model.n_iter_.tolist()
}
}

def _default_config(self):
"""Return a default config."""
return {
'penalty': 'l1',
'solver': 'liblinear'
}

def _resolve_config(self, config):
"""Resolve a config for logistic regression classifier."""
if not isinstance(config, dict):
config = self._default_config()
return config

def _resolve_class_min(self, class_min):
"""Resolve a class min parameter that prevents negative inf values."""
if isinstance(class_min, int) or isinstance(class_min, float):
return class_min
raise TypeError('class_min has to be either non-negative int or float')
193 changes: 193 additions & 0 deletions elfi/examples/arch.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,193 @@
"""Example implementation of the ARCH(1) model."""

import logging
from itertools import combinations

import numpy as np

import elfi

logger = logging.getLogger(__name__)


def get_model(n_obs=100, true_params=None, seed_obs=None, n_lags=5):
"""Return a complete ARCH(1) model.
Parameters
----------
n_obs: int
Observation length of the ARCH(1) process.
true_params: list, optinal
Parameters with which the observed data are generated.
seed_obs: int, optional
Seed for the observed data generation.
n_lags: int, optional
Number of lags in summary statistics.
Returns
-------
elfi.ElfiModel
"""
if true_params is None:
true_params = [0.3, 0.7]
logger.info(f'true_params were not given. Now using [t1, t2] = {true_params}.')

# elfi model
m = elfi.ElfiModel()

# priors
t1 = elfi.Prior('uniform', -1, 2, model=m)
t2 = elfi.Prior('uniform', 0, 1, model=m)
priors = [t1, t2]

# observations
y_obs = arch(*true_params, n_obs=n_obs, random_state=np.random.RandomState(seed_obs))

# simulator
Y = elfi.Simulator(arch, *priors, observed=y_obs)

# summary statistics
ss = []
ss.append(elfi.Summary(sample_mean, Y, name='MU', model=m))
ss.append(elfi.Summary(sample_variance, Y, name='VAR', model=m))
for i in range(1, n_lags + 1):
ss.append(elfi.Summary(autocorr, Y, i, name=f'AC_{i}', model=m))
for i, j in combinations(range(1, n_lags + 1), 2):
ss.append(elfi.Summary(pairwise_autocorr, Y, i, j, name=f'PW_{i}_{j}', model=m))

# distance
elfi.Distance('euclidean', *ss, name='d', model=m)

return m


def arch(t1, t2, n_obs=100, batch_size=1, random_state=None):
"""Generate a sequence of samples from the ARCH(1) model.
Parameters
----------
t1: float
Mean process parameter in the ARCH(1) process.
t2: float
Variance process parameter in the ARCH(1) process.
n_obs: int, optional
Observation length of the ARCH(1) process.
batch_size: int, optional
Number of simulations.
random_state: np.random.RandomState, optional
Returns
-------
np.ndarray
"""
random_state = random_state or np.random
y = np.zeros((batch_size, n_obs + 1))
e = E(t2, n_obs, batch_size, random_state)
for i in range(1, n_obs + 1):
y[:, i] = t1 * y[:, i - 1] + e[:, i]
return y[:, 1:]


def E(t2, n_obs=100, batch_size=1, random_state=None):
"""Variance process function in the ARCH(1) model.
Parameters
----------
t2: float
Variance process parameter in the ARCH(1) process.
n_obs: int, optional
Observation length of the ARCH(1) process.
batch_size: int, optional
Number of simulations.
random_state: np.random.RandomState
Returns
-------
np.ndarray
"""
random_state = random_state or np.random
xi = random_state.normal(size=(batch_size, n_obs + 1))
e = np.zeros((batch_size, n_obs + 1))
e[:, 0] = random_state.normal(size=batch_size)
for i in range(1, n_obs + 1):
e[:, i] = xi[:, i] * np.sqrt(0.2 + t2 * np.power(e[:, i - 1], 2))
return e


def sample_mean(x):
"""Calculate the sample mean.
Parameters
----------
x: np.ndarray
Simulated/observed data.
Returns
-------
np.ndarray
"""
return np.mean(x, axis=1)


def sample_variance(x):
"""Calculate the sample variance.
Parameters
----------
x: np.ndarray
Simulated/observed data.
Returns
-------
np.ndarray
"""
return np.var(x, axis=1, ddof=1)


def autocorr(x, lag=1):
"""Calculate the autocorrelation.
Parameters
----------
x: np.ndarray
Simulated/observed data.
lag: int, optional
Lag in autocorrelation.
Returns
-------
np.ndarray
"""
n = x.shape[1]
x_mu = np.mean(x, axis=1)
x_std = np.std(x, axis=1, ddof=1)
sc_x = ((x.T - x_mu) / x_std).T
C = np.sum(sc_x[:, lag:] * sc_x[:, :-lag], axis=1) / (n - lag)
return C


def pairwise_autocorr(x, lag_i=1, lag_j=1):
"""Calculate the pairwise autocorrelation.
Parameters
x: np.ndarray
Simulated/observed data.
lag_i: int, optional
Lag in autocorrelation.
lag_j: int, optinal
Lag in autocorrelation.
Returns
-------
np.ndarray
"""
ac_i = autocorr(x, lag_i)
ac_j = autocorr(x, lag_j)
return ac_i * ac_j
Loading

0 comments on commit b4663c8

Please sign in to comment.