@@ -86,7 +86,19 @@ def __repr__(self):
86
86
msg = 'MaxLikeFilter:\n '
87
87
return msg
88
88
89
- def maxlike (self , data = None , model = None , n_parameters = None , likelihood = None , solver = None , mask = None , n_components = None , n_currents = None , guess_runave_window = 50 , omega_fixed = None , write_log = True ):
89
+ def maxlike (self ,
90
+ data = None ,
91
+ model = None ,
92
+ n_parameters = None ,
93
+ likelihood = None ,
94
+ solver = None ,
95
+ mask = None ,
96
+ n_components = None ,
97
+ n_currents = None ,
98
+ guess_runave_window = 50 ,
99
+ omega_fixed = None ,
100
+ write_log = True ,
101
+ minimize_kwargs = None ):
90
102
"""
91
103
Perform the maximum-likelihood estimation.
92
104
"""
@@ -160,10 +172,13 @@ def maxlike(self, data=None, model=None, n_parameters=None, likelihood=None, sol
160
172
161
173
# Minimize the negative log-likelihood
162
174
self ._guess_data = guess_data
175
+ guess_par = guess_data #normalize_parameters(guess_data, guess_data)
176
+ # print(guess_data - denormalize_parameters(guess_par, guess_data))
163
177
res = minimize (fun = self .log_like ,
164
- x0 = guess_data ,
178
+ x0 = guess_par ,
165
179
args = (self .model , omega , self .omega_fixed , self .data , nu , ell ),
166
- method = self .solver )
180
+ method = self .solver ,
181
+ ** minimize_kwargs )
167
182
168
183
# Covariance of the parameters
169
184
try :
@@ -176,6 +191,7 @@ def maxlike(self, data=None, model=None, n_parameters=None, likelihood=None, sol
176
191
cov = None
177
192
178
193
self .parameters_mean = res .x
194
+ # self.parameters_mean = denormalize_parameters(res.x, self._guess_data)
179
195
if cov is not None :
180
196
self .parameters_std = np .sqrt (cov .diagonal ())
181
197
@@ -199,56 +215,110 @@ def guess_data(self, data, omega, omega_fixed, ell, nu, window=10, loglike='wish
199
215
200
216
guess_data = np .array ([guess_data [..., j ] for j in [np .argmin (np .abs (omega - omega_fixed [i ])) for i in range (len (omega_fixed ))]])
201
217
print (guess_data .shape )
202
-
203
218
if loglike == 'wishart' :
204
- guess_data = np .array ([cholesky (g , lower = False ) for g in guess_data ]) / np .sqrt (ell )
205
-
219
+ guess_data = np .array ([cholesky (g , lower = False ) for g in guess_data ]) #/ np.sqrt(ell)
206
220
upper_triangle_indices = np .triu_indices (nu )
207
- guess_data = guess_data [:, upper_triangle_indices [0 ], upper_triangle_indices [1 ]].T .reshape (- 1 )
208
221
209
- # guess_data = np.array([guess_data[:, 0, 0], guess_data[:, 0, 1], guess_data[:, 1, 1]]).reshape(-1)
210
-
222
+ nw = omega_fixed .shape [0 ]
223
+ ie = 0
224
+ if guess_data .dtype == np .complex128 :
225
+ guess_params = np .zeros ((nw , nu ** 2 ))
226
+ for i , j in zip (* upper_triangle_indices ):
227
+ if i == j :
228
+ guess_params [:, ie ] = guess_data [:, i , j ].real
229
+ ie += 1
230
+ else :
231
+ guess_params [:, ie ] = guess_data [:, i , j ].real
232
+ ie += 1
233
+ guess_params [:, ie ] = guess_data [:, i , j ].imag
234
+ ie += 1
235
+ guess_data = guess_params .flatten ()
236
+ else :
237
+ guess_params = np .zeros ((nw , nu * (nu + 1 )// 2 ))
238
+ for i , j in zip (* upper_triangle_indices ):
239
+ guess_params [:, ie ] = guess_data [:, i , j ]
240
+ ie += 1
241
+ guess_data = guess_params .flatten ()
242
+
211
243
return guess_data
212
-
213
- def log_likelihood_wishart (self , w , model , omega , omega_fixed , data_ , nu , ell , eps = 1e-9 ):
244
+
245
+ def log_likelihood_wishart (self , w , model , omega , omega_fixed , data_ , nu , ell , eps = 1e-3 ):
214
246
"""
215
247
Logarithm of the Wishart probability density function.
216
248
"""
217
249
n = ell
218
250
p = nu
219
251
220
252
# Compute scale matrix from the model (symmetrize to ensure positive definiteness)
221
- spline = model (omega_fixed , w )
222
- V = spline (omega )
223
- V = opt_einsum .contract ('wba,wbc->wac' , V , V ) / n
224
-
253
+ V = scale_matrix (model , w , omega , omega_fixed , p )
225
254
X = data_
255
+
226
256
if n < p :
227
- # Singular Wishart
228
- multig = multigammaln (0.5 * n , n )
229
- S = np .linalg .svd (X , hermitian = True , compute_uv = False )
257
+ S = np .linalg .svd (X , hermitian = True , compute_uv = False )
230
258
detX = np .array ([np .prod (s [abs (s ) > eps ]) for s in S ])
231
259
232
260
else :
233
- multig = multigammaln (0.5 * n , p )
234
261
detX = np .linalg .det (X )
235
262
236
263
invV = np .linalg .inv (V )
237
264
detV = np .linalg .det (V )
238
265
239
266
trinvV_X = opt_einsum .contract ('wab,wba->w' , invV , X )
240
267
241
- # log_pdf = - (0.5 * (-n * p * LOG2 - n * np.log(detV) + (n - p - 1) * np.log(detX) - trinvV_X) - multig)
242
268
coeff_detV = - n
243
269
coeff_detX = n - p - 1
244
- log_pdf = coeff_detV * np .log (detV ) + coeff_detX * np .log (detX ) - trinvV_X
245
- # print(-np.sum(log_pdf))
270
+
271
+ log_pdf = coeff_detV * np .log (detV + eps ) + coeff_detX * np .log (detX + eps ) - trinvV_X
272
+ tot = - np .sum (log_pdf )
273
+ return tot
274
+
275
+ def log_likelihood_complex_wishart (self , w , model , omega , omega_fixed , data_ , nu , ell , eps = 1e-9 ):
276
+ """
277
+ Logarithm of the Complex Wishart probability density function.
278
+ """
279
+ n = ell
280
+ p = nu
281
+
282
+ # Compute scale matrix from the model (symmetrize to ensure positive definiteness)
283
+ S = scale_matrix (model , w , omega , omega_fixed , p )
284
+ # nw = w.shape[0]//2
285
+ # real_part = model(omega_fixed, w[:nw])
286
+ # imag_part = model(omega_fixed, w[nw:])
287
+
288
+ # # Lower Cholesky factor of S
289
+ # L = (real_part(omega) + 1j*imag_part(omega))
290
+ # S = opt_einsum.contract('wba,wbc->wac', L.conj(), L)
291
+
292
+ # The distribution refers to the sample covariance matrix of the (complex) multinormal vectors, not their average
293
+ X = data_
294
+
295
+ if n < p :
296
+ raise ValueError ('n must be greater or equal than p' )
297
+ # Singular Wishart
298
+ multig = multigammaln (0.5 * n , n )
299
+ S = np .linalg .svd (X , hermitian = True , compute_uv = False )
300
+ detX = np .array ([np .prod (s [abs (s ) > eps ]) for s in S ])
301
+ else :
302
+ # multig = multigammaln(0.5 * n, p)
303
+ logdetX = np .log (np .abs (np .linalg .det (X ).real ))
304
+
305
+ invS = np .linalg .inv (S )
306
+ logdetS = np .log (np .abs (np .linalg .det (S ).real ))
307
+
308
+ trinvS_X = opt_einsum .contract ('wab,wba->w' , invS , X ).real
309
+
310
+ log_pdf = (n - p )* logdetX - trinvS_X - n * logdetS
311
+ # coeff_detV = -n
312
+ # coeff_detX = n-p-1
313
+ # log_pdf = coeff_detV * np.log(detV) + coeff_detX * np.log(detX) - trinvV_X
314
+ # # print(-np.sum(log_pdf))
246
315
return - np .sum (log_pdf )
247
316
248
317
def log_likelihood_offdiag (self , w , model , omega , omega_fixed , data_ , nu , ell ):
249
318
"""
250
319
Negative of the logarithm of the Variance-Gamma probability density function.
251
320
"""
321
+ print (w )
252
322
spline = model (omega_fixed , w )
253
323
rho = np .clip (spline (omega ), - 0.98 , 0.98 )
254
324
_alpha = 1 / (1 - rho ** 2 )
@@ -257,12 +327,18 @@ def log_likelihood_offdiag(self, w, model, omega, omega_fixed, data_, nu, ell):
257
327
_gamma2 = _alpha ** 2 - _beta ** 2
258
328
_lambda_minus_half = _lambda - 0.5
259
329
260
- # Non sono più sicuro sia sensata questa definizione di z. Non è semplicemtente data_? AH! Forse è la stessa cosa che succede al Chi2, va moltiplicato per il numero di dof. Capire meglio e fare prove.
261
- z = data_ * ell * nu
330
+ # Non sono più sicuro sia sensata questa definizione di z.
331
+ # Non è semplicemtente data_? AH! Forse è la stessa cosa che succede al Chi2, va moltiplicato per il numero di dof. Capire meglio e fare prove.
332
+ z = data_ * nu * ell
262
333
absz = np .abs (z )
263
- log_pdf = _lambda * np .log (_gamma2 ) + _lambda_minus_half * np .log (absz ) + np .log (sp .kv (_lambda_minus_half , _alpha * absz )) + \
264
- _beta * z - 0.5 * np .log (np .pi ) - np .log (sp .gamma (_lambda )) - _lambda_minus_half * np .log (2 * _alpha )
265
-
334
+ term1 = _lambda * np .log (_gamma2 )
335
+ term2 = _lambda_minus_half * np .log (absz )
336
+ term3 = np .log (sp .kv (_lambda_minus_half , _alpha * absz ))
337
+ term4 = _beta * z
338
+ term5 = - _lambda_minus_half * np .log (2 * _alpha )
339
+ # log_pdf = _lambda * np.log(_gamma2) + _lambda_minus_half * np.log(absz) + np.log(sp.kv(_lambda_minus_half, _alpha * absz)) + \
340
+ # _beta * z - _lambda_minus_half * np.log(2 * _alpha) # + const
341
+ log_pdf = term1 + term2 + term3 + term4 + term5
266
342
return - np .sum (log_pdf )
267
343
268
344
def log_likelihood_diag (self , w , model , omega , omega_fixed , data_ , nu , ell ):
@@ -327,6 +403,38 @@ def log_posterior_normal(self, w, omega, omega_fixed, data, nu=6, ell=3):
327
403
return self .log_prior_offdiag (w ) + self .log_likelihood_normal (w , omega , omega_fixed , data , nu , ell )
328
404
329
405
406
+ def scale_matrix (model , w , omega , omega_fixed , n ):
407
+ '''
408
+ '''
409
+ elements = model (omega_fixed , w )(omega )
410
+ ie = 0
411
+ if elements .dtype == np .complex128 :
412
+ L = np .zeros ((n , n , omega .shape [0 ]), dtype = np .complex128 )
413
+ for i , j in zip (* np .triu_indices (n )):
414
+ if i == j :
415
+ L [i , j ] = elements [:, ie ]
416
+ ie += 1
417
+ else :
418
+ L [i , j ] = elements [:, ie ] + 1j * elements [:, ie + 1 ]
419
+ ie += 2
420
+
421
+ S = np .einsum ('jiw,jkw->wik' , L .conj (), L )
422
+ else :
423
+ L = np .zeros ((n , n , omega .shape [0 ]))
424
+ for i , j in zip (* np .triu_indices (n )):
425
+ L [i , j ] = elements [:, ie ]
426
+ ie += 1
427
+
428
+ S = np .einsum ('jiw,jkw->wik' , L , L )
429
+ return S
430
+
431
+ def normalize_parameters (p , guess ):
432
+ mini , maxi = 1.2 * np .abs (guess ), - 1.2 * np .abs (guess )
433
+ return (p - mini ) / (maxi - mini )
434
+
435
+ def denormalize_parameters (p , guess ):
436
+ mini , maxi = 1.2 * np .abs (guess ), - 1.2 * np .abs (guess )
437
+ return p * (maxi - mini ) + mini
330
438
331
439
# # Methods to perform a bayesian estimation of the transport coefficients
332
440
0 commit comments