Skip to content

Commit

Permalink
docstrings and comments
Browse files Browse the repository at this point in the history
  • Loading branch information
ces42 committed Aug 1, 2019
1 parent 77c1037 commit e87eb43
Showing 1 changed file with 47 additions and 42 deletions.
89 changes: 47 additions & 42 deletions uncertainties/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,30 +157,36 @@ def correlated_values(nom_values, covariance_mat, tags=None):

# We perform a cholesky decomposition of the covariance matrix.
# If the matrix is only positive semidefinite numpy will refuse to
# perform a cholesky decomposition so we 'manually' do a LDL
# perform a cholesky decomposition, so we 'manually' do a LDL
# decomposition
try:
L = numpy.linalg.cholesky(covariance_mat)
except numpy.linalg.LinAlgError:
L0, D = ldl(covariance_mat)
L = L0 * numpy.sqrt(D)

# Creation of new, independent variables:
if tags is None:
tags = (None, ) * len(nom_values)
variables = [Variable(0, 1, tag) for tag in tags]
variables = tuple(Variable(0, 1, tag) for tag in tags)

return nom_values + numpy.dot(L, variables)

__all__.append('correlated_values')

def ldl(A):
"""
Return the LDL factorisation of a symmetric, positive semidefinite
matrix. If the matrix is not square, symmetric or positive
matrix. This is a lower triangular matrix L and an array representing
the diagonal matrix D. If the matrix is not square or positive
semi-definite, an error is raised.
A -- a square symmetric positive semi-definite matrix
A -- a square (symmetric) positive semi-definite matrix. Only the
lower half of A is read.
"""
TOL = 1.49e-8 # square root of float64-accuracy
# square root of float64-accuracy. In places where there should be
# a positive number we will only accept numbers larger than -TOL
TOL = 1.49e-8

n, n_ = numpy.shape(A)
if n != n_:
Expand All @@ -195,7 +201,7 @@ def ldl(A):

a = A[i, i]
l = A[i+1:, i]
if a < -EPS or (a <= 0 and len(l) > 0 and abs(l).max() >= EPS):
if a < -TOL or (a <= 0 and len(l) > 0 and abs(l).max() >= TOL):
raise numpy.linalg.LinAlgError('matrix must be positive '
'semidefinite (failed on %s-th diagonal entry)' % i)

Expand Down Expand Up @@ -224,10 +230,8 @@ def correlated_values_norm(values_with_std_dev, correlation_mat,
deviation) pairs. The returned, correlated values have these
nominal values and standard deviations.
correlation_mat -- correlation matrix between the given values, except
that any value with a 0 standard deviation must have its correlations
set to 0, with a diagonal element set to an arbitrary value (something
close to 0-1 is recommended, for a better numerical precision). When
correlation_mat -- correlation matrix between the given values. The
entries corresponding to values with 0 variance are ignored. When
no value has a 0 variance, this is the covariance matrix normalized by
standard deviations, and thus a symmetric matrix with ones on its
diagonal. This matrix must be an NumPy array-like (list of lists,
Expand All @@ -243,42 +247,43 @@ def correlated_values_norm(values_with_std_dev, correlation_mat,

(nominal_values, std_devs) = numpy.transpose(values_with_std_dev)

# We diagonalize the correlation matrix instead of the
# covariance matrix, because this is generally more stable
# numerically. In fact, the covariance matrix can have
# coefficients with arbitrary values, through changes of units
# of its input variables. This creates numerical instabilities.
#
# The covariance matrix is diagonalized in order to define
# the independent variables that model the given values:
(variances, transform) = numpy.linalg.eigh(correlation_mat)
# For values with zero uncertainty we ignore the corresponding entries
# in the correlation matrix
zero_stdev = numpy.where(std_devs == 0)[0]
eff_corr_mat = numpy.delete(
numpy.delete(correlation_mat, zero_stdev, axis=0),
zero_stdev,
axis=1
)

# Numerical errors might make some variances negative: we set
# them to zero:
variances[variances < 0] = 0.
# We perform a cholesky decomposition of the correlation matrix.
# If the matrix is only positive semidefinite numpy will refuse to
# perform a cholesky decomposition, so we 'manually' do a LDL
# decomposition
try:
L = numpy.linalg.cholesky(eff_corr_mat)
except numpy.linalg.LinAlgError:
L0, D = ldl(eff_corr_mat)
L = L0 * numpy.sqrt(D)

# Creation of new, independent variables:

# We use the fact that the eigenvectors in 'transform' are
# special: 'transform' is unitary: its inverse is its transpose:

variables = tuple(
eff_variables = tuple(
# The variables represent "pure" uncertainties:
Variable(0, sqrt(variance), tag)
for (variance, tag) in zip(variances, tags))

# The coordinates of each new uncertainty as a function of the
# new variables must include the variable scale (standard deviation):
transform *= std_devs[:, numpy.newaxis]

# Representation of the initial correlated values:
values_funcs = tuple(
AffineScalarFunc(
value,
LinearCombination(dict(zip(variables, coords))))
for (coords, value) in zip(transform, nominal_values))

return values_funcs
Variable(0, 1, tag) for i, tag in enumerate(tags)
if std_devs[i] != 0
)
zero_stdev_variables = tuple(
Variable(0, 0, tag) for i, tag in enumerate(tags)
if std_devs[i] == 0
)

uncert = std_devs[std_devs != 0] * numpy.dot(L, eff_variables)
# we need to subtract arange(len(zero_stdev)) because the indices in
# zero_stdev refer to the original array
numpy.insert(uncert, zero_stdev - numpy.arange(len(zero_stdev)),
zero_stdev_variables)

return nominal_values + uncert

__all__.append('correlated_values_norm')

Expand Down

0 comments on commit e87eb43

Please sign in to comment.