-
Notifications
You must be signed in to change notification settings - Fork 60
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #396 from elfi-dev/dev
Release v0.8.3
- Loading branch information
Showing
20 changed files
with
2,305 additions
and
604 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -14,6 +14,7 @@ | |
from elfi.methods.diagnostics import TwoStageSelection | ||
from elfi.methods.model_selection import * | ||
from elfi.methods.inference.bolfi import * | ||
from elfi.methods.inference.bolfire import * | ||
from elfi.methods.inference.romc import * | ||
from elfi.methods.inference.samplers import * | ||
from elfi.methods.post_processing import adjust_posterior | ||
|
@@ -30,4 +31,4 @@ | |
__email__ = '[email protected]' | ||
|
||
# make sure __version_ is on the last non-empty line (read by setup.py) | ||
__version__ = '0.8.2' | ||
__version__ = '0.8.3' |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
# noqa: D104 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,119 @@ | ||
"""Implementations for ratio estimation classifiers.""" | ||
|
||
import abc | ||
|
||
import numpy as np | ||
from sklearn.linear_model import LogisticRegression as LogReg | ||
from sklearn.preprocessing import StandardScaler | ||
|
||
|
||
class Classifier(abc.ABC): | ||
"""An abstract base class for a ratio estimation classifier.""" | ||
|
||
@abc.abstractmethod | ||
def __init__(self): | ||
"""Initialize a classifier.""" | ||
raise NotImplementedError | ||
|
||
@abc.abstractmethod | ||
def fit(self, X, y): | ||
"""Fit a classifier. | ||
Parameters | ||
---------- | ||
X: np.ndarray (n_samples, n_features) | ||
Feature vectors of data. | ||
y: np.ndarray (n_samples, ) | ||
Target values, must be binary. | ||
""" | ||
raise NotImplementedError | ||
|
||
@abc.abstractmethod | ||
def predict_log_likelihood_ratio(self, X): | ||
"""Predict a log-likelihood ratio. | ||
Parameters | ||
---------- | ||
X: np.ndarray (n_samples, n_features) | ||
Feature vectors of data. | ||
Returns | ||
------- | ||
np.ndarray | ||
""" | ||
raise NotImplementedError | ||
|
||
def predict_likelihood_ratio(self, X): | ||
"""Predict a likelihood ratio. | ||
Parameters | ||
---------- | ||
X: np.ndarray (n_samples, n_features) | ||
Feature vectors of data. | ||
Returns | ||
------- | ||
np.ndarray | ||
""" | ||
return np.exp(self.predict_log_likelihood_ratio(X)) | ||
|
||
@property | ||
@abc.abstractmethod | ||
def attributes(self): | ||
"""Return attributes dictionary.""" | ||
raise NotImplementedError | ||
|
||
|
||
class LogisticRegression(Classifier): | ||
"""A logistic regression classifier for ratio estimation.""" | ||
|
||
def __init__(self, config=None, class_min=0): | ||
"""Initialize a logistic regression classifier.""" | ||
self.config = self._resolve_config(config) | ||
self.class_min = self._resolve_class_min(class_min) | ||
self.model = LogReg(**self.config) | ||
self.scaler = StandardScaler() | ||
|
||
def fit(self, X, y): | ||
"""Fit a logistic regression classifier.""" | ||
Xs = self.scaler.fit_transform(X) | ||
self.model.fit(Xs, y) | ||
|
||
def predict_log_likelihood_ratio(self, X): | ||
"""Predict a log-likelihood ratio.""" | ||
Xs = self.scaler.transform(X) | ||
class_probs = np.maximum(self.model.predict_proba(Xs)[:, 1], self.class_min) | ||
return np.log(class_probs / (1 - class_probs)) | ||
|
||
@property | ||
def attributes(self): | ||
"""Return an attributes dictionary.""" | ||
return { | ||
'parameters': { | ||
'coef_': self.model.coef_.tolist(), | ||
'intercept_': self.model.intercept_.tolist(), | ||
'n_iter': self.model.n_iter_.tolist() | ||
} | ||
} | ||
|
||
def _default_config(self): | ||
"""Return a default config.""" | ||
return { | ||
'penalty': 'l1', | ||
'solver': 'liblinear' | ||
} | ||
|
||
def _resolve_config(self, config): | ||
"""Resolve a config for logistic regression classifier.""" | ||
if not isinstance(config, dict): | ||
config = self._default_config() | ||
return config | ||
|
||
def _resolve_class_min(self, class_min): | ||
"""Resolve a class min parameter that prevents negative inf values.""" | ||
if isinstance(class_min, int) or isinstance(class_min, float): | ||
return class_min | ||
raise TypeError('class_min has to be either non-negative int or float') |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,193 @@ | ||
"""Example implementation of the ARCH(1) model.""" | ||
|
||
import logging | ||
from itertools import combinations | ||
|
||
import numpy as np | ||
|
||
import elfi | ||
|
||
logger = logging.getLogger(__name__) | ||
|
||
|
||
def get_model(n_obs=100, true_params=None, seed_obs=None, n_lags=5): | ||
"""Return a complete ARCH(1) model. | ||
Parameters | ||
---------- | ||
n_obs: int | ||
Observation length of the ARCH(1) process. | ||
true_params: list, optinal | ||
Parameters with which the observed data are generated. | ||
seed_obs: int, optional | ||
Seed for the observed data generation. | ||
n_lags: int, optional | ||
Number of lags in summary statistics. | ||
Returns | ||
------- | ||
elfi.ElfiModel | ||
""" | ||
if true_params is None: | ||
true_params = [0.3, 0.7] | ||
logger.info(f'true_params were not given. Now using [t1, t2] = {true_params}.') | ||
|
||
# elfi model | ||
m = elfi.ElfiModel() | ||
|
||
# priors | ||
t1 = elfi.Prior('uniform', -1, 2, model=m) | ||
t2 = elfi.Prior('uniform', 0, 1, model=m) | ||
priors = [t1, t2] | ||
|
||
# observations | ||
y_obs = arch(*true_params, n_obs=n_obs, random_state=np.random.RandomState(seed_obs)) | ||
|
||
# simulator | ||
Y = elfi.Simulator(arch, *priors, observed=y_obs) | ||
|
||
# summary statistics | ||
ss = [] | ||
ss.append(elfi.Summary(sample_mean, Y, name='MU', model=m)) | ||
ss.append(elfi.Summary(sample_variance, Y, name='VAR', model=m)) | ||
for i in range(1, n_lags + 1): | ||
ss.append(elfi.Summary(autocorr, Y, i, name=f'AC_{i}', model=m)) | ||
for i, j in combinations(range(1, n_lags + 1), 2): | ||
ss.append(elfi.Summary(pairwise_autocorr, Y, i, j, name=f'PW_{i}_{j}', model=m)) | ||
|
||
# distance | ||
elfi.Distance('euclidean', *ss, name='d', model=m) | ||
|
||
return m | ||
|
||
|
||
def arch(t1, t2, n_obs=100, batch_size=1, random_state=None): | ||
"""Generate a sequence of samples from the ARCH(1) model. | ||
Parameters | ||
---------- | ||
t1: float | ||
Mean process parameter in the ARCH(1) process. | ||
t2: float | ||
Variance process parameter in the ARCH(1) process. | ||
n_obs: int, optional | ||
Observation length of the ARCH(1) process. | ||
batch_size: int, optional | ||
Number of simulations. | ||
random_state: np.random.RandomState, optional | ||
Returns | ||
------- | ||
np.ndarray | ||
""" | ||
random_state = random_state or np.random | ||
y = np.zeros((batch_size, n_obs + 1)) | ||
e = E(t2, n_obs, batch_size, random_state) | ||
for i in range(1, n_obs + 1): | ||
y[:, i] = t1 * y[:, i - 1] + e[:, i] | ||
return y[:, 1:] | ||
|
||
|
||
def E(t2, n_obs=100, batch_size=1, random_state=None): | ||
"""Variance process function in the ARCH(1) model. | ||
Parameters | ||
---------- | ||
t2: float | ||
Variance process parameter in the ARCH(1) process. | ||
n_obs: int, optional | ||
Observation length of the ARCH(1) process. | ||
batch_size: int, optional | ||
Number of simulations. | ||
random_state: np.random.RandomState | ||
Returns | ||
------- | ||
np.ndarray | ||
""" | ||
random_state = random_state or np.random | ||
xi = random_state.normal(size=(batch_size, n_obs + 1)) | ||
e = np.zeros((batch_size, n_obs + 1)) | ||
e[:, 0] = random_state.normal(size=batch_size) | ||
for i in range(1, n_obs + 1): | ||
e[:, i] = xi[:, i] * np.sqrt(0.2 + t2 * np.power(e[:, i - 1], 2)) | ||
return e | ||
|
||
|
||
def sample_mean(x): | ||
"""Calculate the sample mean. | ||
Parameters | ||
---------- | ||
x: np.ndarray | ||
Simulated/observed data. | ||
Returns | ||
------- | ||
np.ndarray | ||
""" | ||
return np.mean(x, axis=1) | ||
|
||
|
||
def sample_variance(x): | ||
"""Calculate the sample variance. | ||
Parameters | ||
---------- | ||
x: np.ndarray | ||
Simulated/observed data. | ||
Returns | ||
------- | ||
np.ndarray | ||
""" | ||
return np.var(x, axis=1, ddof=1) | ||
|
||
|
||
def autocorr(x, lag=1): | ||
"""Calculate the autocorrelation. | ||
Parameters | ||
---------- | ||
x: np.ndarray | ||
Simulated/observed data. | ||
lag: int, optional | ||
Lag in autocorrelation. | ||
Returns | ||
------- | ||
np.ndarray | ||
""" | ||
n = x.shape[1] | ||
x_mu = np.mean(x, axis=1) | ||
x_std = np.std(x, axis=1, ddof=1) | ||
sc_x = ((x.T - x_mu) / x_std).T | ||
C = np.sum(sc_x[:, lag:] * sc_x[:, :-lag], axis=1) / (n - lag) | ||
return C | ||
|
||
|
||
def pairwise_autocorr(x, lag_i=1, lag_j=1): | ||
"""Calculate the pairwise autocorrelation. | ||
Parameters | ||
x: np.ndarray | ||
Simulated/observed data. | ||
lag_i: int, optional | ||
Lag in autocorrelation. | ||
lag_j: int, optinal | ||
Lag in autocorrelation. | ||
Returns | ||
------- | ||
np.ndarray | ||
""" | ||
ac_i = autocorr(x, lag_i) | ||
ac_j = autocorr(x, lag_j) | ||
return ac_i * ac_j |
Oops, something went wrong.