From 20f0e20b313c2a20efa4abd34ed7879635d02bd8 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Fri, 21 Apr 2023 17:55:05 +0200 Subject: [PATCH 01/80] First addition of FastShermanMorrison for testing --- enterprise/fastshermanmorrison/__init__.py | 1 + .../cython_fastshermanmorrison.pyx | 945 ++++++++++++++++++ .../fastshermanmorrison/fastshermanmorrison.c | 233 +++++ .../fastshermanmorrison.py | 163 +++ setup.py | 12 +- 5 files changed, 1353 insertions(+), 1 deletion(-) create mode 100644 enterprise/fastshermanmorrison/__init__.py create mode 100644 enterprise/fastshermanmorrison/cython_fastshermanmorrison.pyx create mode 100644 enterprise/fastshermanmorrison/fastshermanmorrison.c create mode 100644 enterprise/fastshermanmorrison/fastshermanmorrison.py diff --git a/enterprise/fastshermanmorrison/__init__.py b/enterprise/fastshermanmorrison/__init__.py new file mode 100644 index 00000000..3037395f --- /dev/null +++ b/enterprise/fastshermanmorrison/__init__.py @@ -0,0 +1 @@ +from . import fastshermanmorrison diff --git a/enterprise/fastshermanmorrison/cython_fastshermanmorrison.pyx b/enterprise/fastshermanmorrison/cython_fastshermanmorrison.pyx new file mode 100644 index 00000000..c8a3b8c2 --- /dev/null +++ b/enterprise/fastshermanmorrison/cython_fastshermanmorrison.pyx @@ -0,0 +1,945 @@ +cimport numpy as np +import numpy as np +np.import_array() + +from libc.math cimport log, sqrt +import cython +from scipy.linalg.cython_blas cimport dgemm, dger, dgemv + +cdef public void dgemm_(char *transa, char *transb, int *m, int *n, int *k, + double *alpha, double *a, int *lda, double *b, + int *ldb, double *beta, double *c, int *ldc): + """Public dgemm that can be used in the external C code""" + dgemm(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc) + +cdef public void dgemv_(char *trans, int *m, int *n, double *alpha, double *a, + int *lda, double *x, int *incx, double *beta, + double *y, int *incy): + """Public dgemv that can be used in the external C code""" + dgemv(trans, m, n, alpha, a, lda, x, incx, beta, y, incy) + +cdef public void dger_(int *m, int *n, double *alpha, double *x, int *incx, + double *y, int *incy, double *a, int *lda): + """Public dger that can be used in the external C code""" + dger(m, n, alpha, x, incx, y, incy, a, lda) + + +cdef extern from "fastshermanmorrison.c": + void blas_block_shermor_2D_asym( + int n_Z1_rows, + int n_Z1_cols, + int n_Z1_row_major, + double *pd_Z1, + int n_Z2_cols, + int n_Z2_row_major, + double *pd_Z2, + double *pd_Nvec, + int n_J_rows, + double *pd_Jvec, + int *pn_Uinds, + double *pd_ZNZ, + double *pd_Jldet + ) + +@cython.boundscheck(False) +@cython.wraparound(False) +def cython_block_shermor_0D( \ + np.ndarray[np.double_t,ndim=1] r, \ + np.ndarray[np.double_t,ndim=1] Nvec, \ + np.ndarray[np.double_t,ndim=1] Jvec, \ + np.ndarray[np.int_t,ndim=2] Uinds): + """ + Sherman-Morrison block-inversion for Jitter (Cythonized) + + @param r: The timing residuals, array (n) + @param Nvec: The white noise amplitude, array (n) + @param Jvec: The jitter amplitude, array (k) + @param Uinds: The start/finish indices for the jitter blocks (k x 2) + + For this version, the residuals need to be sorted properly so that all the + blocks are continuous in memory. Here, there are n residuals, and k jitter + parameters. + """ + cdef unsigned int cc, ii, cols = len(Jvec) + cdef double Jldet=0.0, ji, beta, nir, nisum + cdef np.ndarray[np.double_t,ndim=1] ni = np.empty(len(r), 'd') + cdef np.ndarray[np.double_t,ndim=1] Nx = r / Nvec + + ni = 1.0 / Nvec + + for cc in range(cols): + if Jvec[cc] > 0.0: + ji = 1.0 / Jvec[cc] + + nir = 0.0 + nisum = 0.0 + for ii in range(Uinds[cc,0],Uinds[cc,1]): + nisum += ni[ii] + nir += r[ii]*ni[ii] + + beta = 1.0 / (nisum + ji) + + for ii in range(Uinds[cc,0],Uinds[cc,1]): + Nx[ii] -= beta * nir * ni[ii] + + return Nx + +@cython.boundscheck(False) +@cython.wraparound(False) +def cython_block_shermor_0D_ld( \ + np.ndarray[np.double_t,ndim=1] r, \ + np.ndarray[np.double_t,ndim=1] Nvec, \ + np.ndarray[np.double_t,ndim=1] Jvec, \ + np.ndarray[np.int_t,ndim=2] Uinds): + """ + Sherman-Morrison block-inversion for Jitter (Cythonized) + + @param r: The timing residuals, array (n) + @param Nvec: The white noise amplitude, array (n) + @param Jvec: The jitter amplitude, array (k) + @param Uinds: The start/finish indices for the jitter blocks (k x 2) + + For this version, the residuals need to be sorted properly so that all the + blocks are continuous in memory. Here, there are n residuals, and k jitter + parameters. + """ + cdef unsigned int cc, ii, rows = len(r), cols = len(Jvec) + cdef double Jldet=0.0, ji, beta, nir, nisum + cdef np.ndarray[np.double_t,ndim=1] ni = np.empty(len(r), 'd') + cdef np.ndarray[np.double_t,ndim=1] Nx = r / Nvec + + ni = 1.0 / Nvec + + for cc in range(rows): + Jldet += log(Nvec[cc]) + + for cc in range(cols): + if Jvec[cc] > 0.0: + ji = 1.0 / Jvec[cc] + + nir = 0.0 + nisum = 0.0 + for ii in range(Uinds[cc,0],Uinds[cc,1]): + nisum += ni[ii] + nir += r[ii]*ni[ii] + + beta = 1.0 / (nisum + ji) + Jldet += log(Jvec[cc]) - log(beta) + + for ii in range(Uinds[cc,0],Uinds[cc,1]): + Nx[ii] -= beta * nir * ni[ii] + + return Jldet, Nx + + +def python_block_shermor_1D(r, Nvec, Jvec, Uinds): + """ + Sherman-Morrison block-inversion for Jitter + + @param r: The timing residuals, array (n) + @param Nvec: The white noise amplitude, array (n) + @param Jvec: The jitter amplitude, array (k) + @param Uinds: The start/finish indices for the jitter blocks (k x 2) + + For this version, the residuals need to be sorted properly so that all the + blocks are continuous in memory. Here, there are n residuals, and k jitter + parameters. + """ + ni = 1.0 / Nvec + Jldet = np.einsum('i->', np.log(Nvec)) + xNx = np.dot(r, r * ni) + + for cc, jv in enumerate(Jvec): + if jv > 0.0: + rblock = r[Uinds[cc,0]:Uinds[cc,1]] + niblock = ni[Uinds[cc,0]:Uinds[cc,1]] + + beta = 1.0 / (np.einsum('i->', niblock)+1.0/jv) + xNx -= beta * np.dot(rblock, niblock)**2 + Jldet += np.log(jv) - np.log(beta) + + return Jldet, xNx + +@cython.boundscheck(False) +@cython.wraparound(False) +def cython_block_shermor_1D( \ + np.ndarray[np.double_t,ndim=1] r, \ + np.ndarray[np.double_t,ndim=1] Nvec, \ + np.ndarray[np.double_t,ndim=1] Jvec, \ + np.ndarray[np.int_t,ndim=2] Uinds): + """ + Sherman-Morrison block-inversion for Jitter (Cythonized) + + @param r: The timing residuals, array (n) + @param Nvec: The white noise amplitude, array (n) + @param Jvec: The jitter amplitude, array (k) + @param Uinds: The start/finish indices for the jitter blocks (k x 2) + + For this version, the residuals need to be sorted properly so that all the + blocks are continuous in memory. Here, there are n residuals, and k jitter + parameters. + """ + cdef unsigned int cc, ii, rows = len(r), cols = len(Jvec) + cdef double Jldet=0.0, ji, beta, xNx=0.0, nir, nisum + cdef np.ndarray[np.double_t,ndim=1] ni = np.empty(rows, 'd') + + ni = 1.0 / Nvec + + for cc in range(rows): + Jldet += log(Nvec[cc]) + xNx += r[cc]*r[cc]*ni[cc] + + for cc in range(cols): + if Jvec[cc] > 0.0: + ji = 1.0 / Jvec[cc] + + nir = 0.0 + nisum = 0.0 + for ii in range(Uinds[cc,0],Uinds[cc,1]): + nisum += ni[ii] + nir += r[ii]*ni[ii] + + beta = 1.0 / (nisum + ji) + Jldet += log(Jvec[cc]) - log(beta) + xNx -= beta * nir * nir + + return Jldet, xNx + +@cython.boundscheck(False) +@cython.wraparound(False) +def cython_block_shermor_1D1( \ + np.ndarray[np.double_t,ndim=1] x, \ + np.ndarray[np.double_t,ndim=1] y, \ + np.ndarray[np.double_t,ndim=1] Nvec, \ + np.ndarray[np.double_t,ndim=1] Jvec, \ + np.ndarray[np.int_t,ndim=2] Uinds): + """ + Sherman-Morrison block-inversion for Jitter (Cythonized) + + @param r: The timing residuals, array (n) + @param Nvec: The white noise amplitude, array (n) + @param Jvec: The jitter amplitude, array (k) + @param Uinds: The start/finish indices for the jitter blocks (k x 2) + + For this version, the residuals need to be sorted properly so that all the + blocks are continuous in memory. Here, there are n residuals, and k jitter + parameters. + """ + cdef unsigned int cc, ii, rows = len(x), cols = len(Jvec) + cdef double Jldet=0.0, ji, beta, yNx=0.0, nix, niy, nisum + cdef np.ndarray[np.double_t,ndim=1] ni = np.empty(rows, 'd') + + ni = 1.0 / Nvec + + for cc in range(rows): + Jldet += log(Nvec[cc]) + yNx += y[cc]*x[cc]*ni[cc] + + for cc in range(cols): + if Jvec[cc] > 0.0: + ji = 1.0 / Jvec[cc] + + nix = 0.0 + niy = 0.0 + nisum = 0.0 + for ii in range(Uinds[cc,0],Uinds[cc,1]): + nisum += ni[ii] + nix += x[ii]*ni[ii] + niy += y[ii]*ni[ii] + + beta = 1.0 / (nisum + ji) + Jldet += log(Jvec[cc]) - log(beta) + yNx -= beta * nix * niy + + return Jldet, yNx + + +def python_block_shermor_2D(Z, Nvec, Jvec, Uinds): + """ + Sherman-Morrison block-inversion for Jitter, ZNiZ + + @param Z: The design matrix, array (n x m) + @param Nvec: The white noise amplitude, array (n) + @param Jvec: The jitter amplitude, array (k) + @param Uinds: The start/finish indices for the jitter blocks (k x 2) + + For this version, the residuals need to be sorted properly so that all the + blocks are continuous in memory. Here, there are n residuals, and k jitter + parameters. + + N = D + U*J*U.T + calculate: log(det(N)), Z.T * N^-1 * Z + """ + ni = 1.0 / Nvec + Jldet = np.einsum('i->', np.log(Nvec)) + zNz = np.dot(Z.T*ni, Z) + + for cc, jv in enumerate(Jvec): + if jv > 0.0: + Zblock = Z[Uinds[cc,0]:Uinds[cc,1], :] + niblock = ni[Uinds[cc,0]:Uinds[cc,1]] + + beta = 1.0 / (np.einsum('i->', niblock)+1.0/jv) + zn = np.dot(niblock, Zblock) + zNz -= beta * np.outer(zn.T, zn) + Jldet += np.log(jv) - np.log(beta) + + return Jldet, zNz + +@cython.boundscheck(False) +@cython.wraparound(False) +def cython_block_shermor_2D( \ + np.ndarray[np.double_t,ndim=2] Z, \ + np.ndarray[np.double_t,ndim=1] Nvec, \ + np.ndarray[np.double_t,ndim=1] Jvec, \ + np.ndarray[np.int_t,ndim=2] Uinds): + """ + Sherman-Morrison block-inversion for Jitter (Cythonized) + + @param Z: The design matrix, array (n x m) + @param Nvec: The white noise amplitude, array (n) + @param Jvec: The jitter amplitude, array (k) + @param Uinds: The start/finish indices for the jitter blocks (k x 2) + + For this version, the residuals need to be sorted properly so that all the + blocks are continuous in memory. Here, there are n residuals, and k jitter + parameters. + + N = D + U*J*U.T + calculate: log(det(N)), Z.T * N^-1 * Z + """ + cdef unsigned int cc, ii, rows = len(Nvec), cols = len(Jvec) + cdef double Jldet=0.0, ji, beta, nir, nisum + cdef np.ndarray[np.double_t,ndim=1] ni = np.empty(len(Nvec), 'd') + cdef np.ndarray[np.double_t,ndim=2] zNz + + ni = 1.0 / Nvec + zNz = np.dot(Z.T*ni, Z) + + for cc in range(rows): + Jldet += log(Nvec[cc]) + + for cc in range(cols): + if Jvec[cc] > 0.0: + Zblock = Z[Uinds[cc,0]:Uinds[cc,1], :] + niblock = ni[Uinds[cc,0]:Uinds[cc,1]] + + nisum = 0.0 + for ii in range(len(niblock)): + nisum += niblock[ii] + + beta = 1.0 / (nisum+1.0/Jvec[cc]) + Jldet += log(Jvec[cc]) - log(beta) + zn = np.dot(niblock, Zblock) + zNz -= beta * np.outer(zn.T, zn) + + return Jldet, zNz + +def python_block_shermor_2D_asymm(Z1, Z2, Nvec, Jvec, Uinds): + """ + Sherman-Morrison block-inversion for Jitter, ZNiZ + + @param Z: The design matrix, array (n x m) + @param Z2: The second design matrix, array (n x m2) + @param Nvec: The white noise amplitude, array (n) + @param Jvec: The jitter amplitude, array (k) + @param Uinds: The start/finish indices for the jitter blocks (k x 2) + + For this version, the residuals need to be sorted properly so that all the + blocks are continuous in memory. Here, there are n residuals, and k jitter + parameters. + + N = D + U*J*U.T + calculate: log(det(N)), Z.T * N^-1 * Z + """ + ni = 1.0 / Nvec + Jldet = np.einsum('i->', np.log(Nvec)) + zNz = np.dot(Z1.T*ni, Z2) + + for cc, jv in enumerate(Jvec): + if jv > 0.0: + Zblock1 = Z1[Uinds[cc,0]:Uinds[cc,1], :] + Zblock2 = Z2[Uinds[cc,0]:Uinds[cc,1], :] + niblock = ni[Uinds[cc,0]:Uinds[cc,1]] + + beta = 1.0 / (np.einsum('i->', niblock)+1.0/jv) + zn1 = np.dot(niblock, Zblock1) + zn2 = np.dot(niblock, Zblock2) + zNz -= beta * np.outer(zn1.T, zn2) + Jldet += np.log(jv) - np.log(beta) + + return Jldet, zNz + + +@cython.boundscheck(False) +@cython.wraparound(False) +def cython_block_shermor_2D_asymm( + np.ndarray[np.double_t,ndim=2] Z1, + np.ndarray[np.double_t,ndim=2] Z2, + np.ndarray[np.double_t,ndim=1] Nvec, + np.ndarray[np.double_t,ndim=1] Jvec, + np.ndarray[np.int_t,ndim=2] Uinds): + """ + Sherman-Morrison block-inversion for Jitter, ZNiZ + + @param Z: The design matrix, array (n x m) + @param Z2: The second design matrix, array (n x m2) + @param Nvec: The white noise amplitude, array (n) + @param Jvec: The jitter amplitude, array (k) + @param Uinds: The start/finish indices for the jitter blocks (k x 2) + + For this version, the residuals need to be sorted properly so that all the + blocks are continuous in memory. Here, there are n residuals, and k jitter + parameters. + + N = D + U*J*U.T + calculate: log(det(N)), Z.T * N^-1 * Z + """ + cdef unsigned int cc, ii, rows = len(Nvec), cols = len(Jvec) + cdef double Jldet=0.0, ji, beta, nir, nisum + cdef np.ndarray[np.double_t,ndim=1] ni = np.empty(len(Nvec), 'd') + cdef np.ndarray[np.double_t,ndim=2] zNz + + print("WARNING: cython_block_shermor_2D_asymm is deprecated.") + print(" use cython_blas_block_shermor_2D_asymm") + + ni = 1.0 / Nvec + for cc in range(rows): + Jldet += log(Nvec[cc]) + zNz = np.dot(Z1.T*ni, Z2) + + for cc in range(cols): + if Jvec[cc] > 0.0: + Zblock1 = Z1[Uinds[cc,0]:Uinds[cc,1], :] + Zblock2 = Z2[Uinds[cc,0]:Uinds[cc,1], :] + niblock = ni[Uinds[cc,0]:Uinds[cc,1]] + + nisum = 0.0 + for ii in range(len(niblock)): + nisum += niblock[ii] + + beta = 1.0 / (nisum+1.0/Jvec[cc]) + zn1 = np.dot(niblock, Zblock1) + zn2 = np.dot(niblock, Zblock2) + + zNz -= beta * np.outer(zn1.T, zn2) + Jldet += log(Jvec[cc]) - log(beta) + + return Jldet, zNz + + +def python_draw_ecor(r, Nvec, Jvec, Uinds): + """ + Given Jvec, draw new epoch-averaged residuals + + @param r: The timing residuals, array (n) + @param Nvec: The white noise amplitude, array (n) + @param Jvec: The jitter amplitude, array (k) + @param Uinds: The start/finish indices for the jitter blocks (k x 2) + + For this version, the residuals need to be sorted properly so that all the + blocks are continuous in memory. Here, there are n residuals, and k jitter + parameters. + + N = D + U*J*U.T + calculate: Norm(0, sqrt(J)) + (U^T * D^{-1} * U)^{-1}U.T D^{-1} r + """ + + rv = np.random.randn(len(Jvec)) * np.sqrt(Jvec) + ni = 1.0 / Nvec + + for cc in range(len(Jvec)): + rblock = r[Uinds[cc,0]:Uinds[cc,1]] + niblock = ni[Uinds[cc,0]:Uinds[cc,1]] + beta = 1.0 / np.einsum('i->', niblock) + + rv[cc] += beta * np.dot(rblock, niblock) + + return rv + +@cython.boundscheck(False) +@cython.wraparound(False) +def cython_draw_ecor( \ + np.ndarray[np.double_t,ndim=1] r, \ + np.ndarray[np.double_t,ndim=1] Nvec, \ + np.ndarray[np.double_t,ndim=1] Jvec, \ + np.ndarray[np.int_t,ndim=2] Uinds): + """ + Given Jvec, draw new epoch-averaged residuals + + @param r: The timing residuals, array (n) + @param Nvec: The white noise amplitude, array (n) + @param Jvec: The jitter amplitude, array (k) + @param Uinds: The start/finish indices for the jitter blocks (k x 2) + + For this version, the residuals need to be sorted properly so that all the + blocks are continuous in memory. Here, there are n residuals, and k jitter + parameters. + + N = D + U*J*U.T + calculate: Norm(0, sqrt(J)) + (U^T * D^{-1} * U)^{-1}U.T D^{-1} r + """ + cdef unsigned int cc, ii, rows = len(r), cols = len(Jvec) + cdef double ji, nir, nisum + cdef np.ndarray[np.double_t,ndim=1] ni = np.empty(rows, 'd') + cdef np.ndarray[np.double_t,ndim=1] rv = np.random.randn(cols) + + for cc in range(cols): + rv[cc] *= sqrt(Jvec[cc]) + + ni = 1.0 / Nvec + + for cc in range(cols): + ji = 1.0 / Jvec[cc] + + nir = 0.0 + nisum = 0.0 + for ii in range(Uinds[cc,0],Uinds[cc,1]): + nisum += ni[ii] + nir += r[ii]*ni[ii] + + rv[cc] += nir / nisum + + return rv + + +@cython.boundscheck(False) +@cython.wraparound(False) +def cython_shermor_draw_ecor( \ + np.ndarray[np.double_t,ndim=1] r, \ + np.ndarray[np.double_t,ndim=1] Nvec, \ + np.ndarray[np.double_t,ndim=1] Jvec, \ + np.ndarray[np.int_t,ndim=2] Uinds): + """ + Do both the Sherman-Morrison block-inversion for Jitter, + and the draw of the ecor parameters together (Cythonized) + + @param r: The timing residuals, array (n) + @param Nvec: The white noise amplitude, array (n) + @param Jvec: The jitter amplitude, array (k) + @param Uinds: The start/finish indices for the jitter blocks (k x 2) + + For this version, the residuals need to be sorted properly so that all the + blocks are continuous in memory. Here, there are n residuals, and k jitter + parameters. + + N = D + U*J*U.T + calculate: r.T * N^-1 * r, log(det(N)), Norm(0, sqrt(J)) + (U^T * D^{-1} * U)^{-1}U.T D^{-1} r + """ + cdef unsigned int cc, ii, rows = len(r), cols = len(Jvec) + cdef double Jldet=0.0, ji, beta, xNx=0.0, nir, nisum + cdef np.ndarray[np.double_t,ndim=1] ni = np.empty(rows, 'd') + cdef np.ndarray[np.double_t,ndim=1] rv = np.random.randn(cols) + + ni = 1.0 / Nvec + + for cc in range(cols): + rv[cc] *= sqrt(Jvec[cc]) + + for cc in range(rows): + Jldet += log(Nvec[cc]) + xNx += r[cc]*r[cc]*ni[cc] + + for cc in range(cols): + nir = 0.0 + nisum = 0.0 + for ii in range(Uinds[cc,0],Uinds[cc,1]): + nisum += ni[ii] + nir += r[ii]*ni[ii] + + rv[cc] += nir / nisum + + if Jvec[cc] > 0.0: + ji = 1.0 / Jvec[cc] + + beta = 1.0 / (nisum + ji) + Jldet += log(Jvec[cc]) - log(beta) + xNx -= beta * nir * nir + + return Jldet, xNx, rv + + +@cython.boundscheck(False) +@cython.wraparound(False) +def cython_update_ea_residuals( \ + np.ndarray[np.double_t,ndim=1] gibbsresiduals, \ + np.ndarray[np.double_t,ndim=1] gibbssubresiduals, \ + np.ndarray[np.double_t,ndim=1] eat, \ + np.ndarray[np.int_t,ndim=2] Uinds): + """ + Given epoch-averaged residuals, update the residuals, and the subtracted + residuals, so that these can be further processed by the other conditional + probability density functions. + + @param gibbsresiduals: The timing residuals, array (n) + @param gibbssubresiduals: The white noise amplitude, array (n) + @param eat: epoch averaged residuals (k) + @param Uinds: The start/finish indices for the jitter blocks + (k x 2) + + """ + cdef unsigned int k = Uinds.shape[0], ii, cc + + for cc in range(Uinds.shape[0]): + for ii in range(Uinds[cc,0],Uinds[cc,1]): + gibbssubresiduals[ii] += eat[cc] + gibbsresiduals[ii] -= eat[cc] + + return gibbsresiduals, gibbssubresiduals + + +@cython.boundscheck(False) +@cython.wraparound(False) +def cython_Uj(np.ndarray[np.double_t,ndim=1] j, \ + np.ndarray[np.int_t,ndim=2] Uinds, nobs): + """ + Given epoch-averaged residuals (j), get the residuals. + Used in 'updateDetSources' + + @param j: epoch averaged residuals (k) + @param Uinds: The start/finish indices for the jitter blocks + (k x 2) + @param nobs: Number of observations (length return vector) + + """ + cdef unsigned int k = Uinds.shape[0], ii, cc + cdef np.ndarray[np.double_t,ndim=1] Uj = np.zeros(nobs, 'd') + + for cc in range(k): + for ii in range(Uinds[cc,0],Uinds[cc,1]): + Uj[ii] += j[cc] + + return Uj + +@cython.boundscheck(False) +@cython.wraparound(False) +def cython_UTx(np.ndarray[np.double_t,ndim=1] x, \ + np.ndarray[np.int_t,ndim=2] Uinds): + """ + Given residuals (x), get np.dot(U.T, x) + Used in 'updateDetSources' + + @param j: epoch averaged residuals (k) + @param Uinds: The start/finish indices for the jitter blocks + (k x 2) + + """ + cdef unsigned int k = Uinds.shape[0], ii, cc + cdef np.ndarray[np.double_t,ndim=1] UTx = np.zeros(k, 'd') + + for cc in range(k): + for ii in range(Uinds[cc,0],Uinds[cc,1]): + UTx[cc] += x[ii] + + return UTx + +@cython.boundscheck(False) +@cython.wraparound(False) +def cython_logdet_dN( \ + np.ndarray[np.double_t,ndim=1] Nvec, \ + np.ndarray[np.double_t,ndim=1] Jvec, \ + np.ndarray[np.double_t,ndim=1] dNvec, \ + np.ndarray[np.int_t,ndim=2] Uinds): + """ + Sherman-Morrison block-inversion for Jitter (Cythonized) + + Calculates Trace(N^{-1} dN/dNp), where: + - N^{-1} is the ecorr-include N inverse + - dN/dNp is the diagonal derivate of N wrt Np + + @param Nvec: The white noise amplitude, array (n) + @param Jvec: The jitter amplitude, array (k) + @param dNvec: The white noise derivative, array (n) + @param Uinds: The start/finish indices for the jitter blocks (k x 2) + + For this version, the residuals need to be sorted properly so that all the + blocks are continuous in memory. Here, there are n residuals, and k jitter + parameters. + """ + cdef unsigned int cc, ii, rows = len(Nvec), cols = len(Jvec) + cdef double tr=0.0, ji, nisum, Nnisum + cdef np.ndarray[np.double_t,ndim=1] ni = np.empty(rows, 'd') + cdef np.ndarray[np.double_t,ndim=1] Nni = np.empty(rows, 'd') + + ni = 1.0 / Nvec + Nni = dNvec / Nvec**2 + + for cc in range(rows): + tr += dNvec[cc] * ni[cc] + + for cc in range(cols): + if Jvec[cc] > 0.0: + ji = 1.0 / Jvec[cc] + + nisum = 0.0 + Nnisum = 0.0 + for ii in range(Uinds[cc,0],Uinds[cc,1]): + nisum += ni[ii] + Nnisum += Nni[ii] + + tr -= Nnisum / (nisum + ji) + + return tr + +@cython.boundscheck(False) +@cython.wraparound(False) +def cython_logdet_dJ( \ + np.ndarray[np.double_t,ndim=1] Nvec, \ + np.ndarray[np.double_t,ndim=1] Jvec, \ + np.ndarray[np.double_t,ndim=1] dJvec, \ + np.ndarray[np.int_t,ndim=2] Uinds): + """ + Sherman-Morrison block-inversion for Jitter (Cythonized) + + Calculates Trace(N^{-1} dN/dJp), where: + - N^{-1} is the ecorr-include N inverse + - dN/dJp = U dJ/dJp U^{T}, with dJ/dJp the diagnal derivative of J wrt + Jp + + @param Nvec: The white noise amplitude, array (n) + @param Jvec: The jitter amplitude, array (k) + @param dJvec: The jitter derivative, array (k) + @param Uinds: The start/finish indices for the jitter blocks (k x 2) + + For this version, the residuals need to be sorted properly so that all the + blocks are continuous in memory. Here, there are n residuals, and k jitter + parameters. + """ + cdef unsigned int cc, ii, rows = len(Nvec), cols = len(Jvec) + cdef double dJldet=0.0, ji, beta, nisum + cdef np.ndarray[np.double_t,ndim=1] ni = np.empty(rows, 'd') + + ni = 1.0 / Nvec + + for cc in range(cols): + if Jvec[cc] > 0.0: + ji = 1.0 / Jvec[cc] + + nisum = 0.0 + for ii in range(Uinds[cc,0],Uinds[cc,1]): + nisum += ni[ii] + + beta = 1.0 / (nisum + ji) + + dJldet += dJvec[cc]*(nisum - beta*nisum**2) + + return dJldet + +@cython.boundscheck(False) +@cython.wraparound(False) +def cython_logdet_dN_dN( \ + np.ndarray[np.double_t,ndim=1] Nvec, \ + np.ndarray[np.double_t,ndim=1] Jvec, \ + np.ndarray[np.double_t,ndim=1] dNvec1, \ + np.ndarray[np.double_t,ndim=1] dNvec2, \ + np.ndarray[np.int_t,ndim=2] Uinds): + """ + Sherman-Morrison block-inversion for Jitter (Cythonized) + + Calculates Trace(N^{-1} dN/dNp1 N^{-1} dN/dNp2), where: + - N^{-1} is the ecorr-include N inverse + - dN/dNpx is the diagonal derivate of N wrt Npx + + @param Nvec: The white noise amplitude, array (n) + @param Jvec: The jitter amplitude, array (k) + @param dNvec1: The white noise derivative, array (n) + @param dNvec2: The white noise derivative, array (n) + @param Uinds: The start/finish indices for the jitter blocks (k x 2) + + For this version, the residuals need to be sorted properly so that all the + blocks are continuous in memory. Here, there are n residuals, and k jitter + parameters. + """ + cdef unsigned int cc, ii, rows = len(Nvec), cols = len(Jvec) + cdef double tr=0.0, ji, nisum, Nnisum1, Nnisum2, NniNnisum, beta + cdef np.ndarray[np.double_t,ndim=1] ni = np.empty(rows, 'd') + cdef np.ndarray[np.double_t,ndim=1] Nni1 = np.empty(rows, 'd') + cdef np.ndarray[np.double_t,ndim=1] Nni2 = np.empty(rows, 'd') + + ni = 1.0 / Nvec + Nni1 = dNvec1 / Nvec**2 + Nni2 = dNvec2 / Nvec**2 + + for cc in range(rows): + tr += dNvec1[cc] * dNvec2[cc] * ni[cc]**2 + + for cc in range(cols): + if Jvec[cc] > 0.0: + ji = 1.0 / Jvec[cc] + + nisum = 0.0 + Nnisum1 = 0.0 + Nnisum2 = 0.0 + NniNnisum = 0.0 + for ii in range(Uinds[cc,0],Uinds[cc,1]): + nisum += ni[ii] + Nnisum1 += Nni1[ii] + Nnisum2 += Nni2[ii] + NniNnisum += Nni1[ii]*Nni2[ii]*Nvec[ii] + + beta = 1.0 / (nisum + ji) + + tr += Nnisum1 * Nnisum2 * beta**2 + tr -= 2 * NniNnisum * beta + + return tr + +@cython.boundscheck(False) +@cython.wraparound(False) +def cython_logdet_dN_dJ( \ + np.ndarray[np.double_t,ndim=1] Nvec, \ + np.ndarray[np.double_t,ndim=1] Jvec, \ + np.ndarray[np.double_t,ndim=1] dNvec, \ + np.ndarray[np.double_t,ndim=1] dJvec, \ + np.ndarray[np.int_t,ndim=2] Uinds): + """ + Sherman-Morrison block-inversion for Jitter (Cythonized) + + Calculates Trace(N^{-1} dN/dNp N^{-1} dN/dJp), where: + - N^{-1} is the ecorr-include N inverse + - dN/dNp is the diagonal derivate of N wrt Np + - dN/dJp = U dJ/dJp U^{T}, with dJ/dJp the diagnal derivative of J wrt + Jp + + @param Nvec: The white noise amplitude, array (n) + @param Jvec: The jitter amplitude, array (k) + @param dNvec: The white noise derivative, array (n) + @param dJvec: The white noise ecor derivative, array (k) + @param Uinds: The start/finish indices for the jitter blocks (k x 2) + + For this version, the residuals need to be sorted properly so that all the + blocks are continuous in memory. Here, there are n residuals, and k jitter + parameters. + """ + cdef unsigned int cc, ii, rows = len(Nvec), cols = len(Jvec) + cdef double tr=0.0, ji, nisum, Nnisum, beta + cdef np.ndarray[np.double_t,ndim=1] ni = np.empty(rows, 'd') + cdef np.ndarray[np.double_t,ndim=1] Nni = np.empty(rows, 'd') + + ni = 1.0 / Nvec + Nni = dNvec / Nvec**2 + + for cc in range(cols): + if Jvec[cc] > 0.0: + ji = 1.0 / Jvec[cc] + + nisum = 0.0 + Nnisum = 0.0 + for ii in range(Uinds[cc,0],Uinds[cc,1]): + nisum += ni[ii] + Nnisum += Nni[ii] + + beta = 1.0 / (nisum + ji) + + tr += Nnisum * dJvec[cc] + tr -= 2 * nisum * dJvec[cc] * Nnisum * beta + tr += Nnisum * nisum**2 * dJvec[cc] *beta**2 + + return tr + +@cython.boundscheck(False) +@cython.wraparound(False) +def cython_logdet_dJ_dJ( \ + np.ndarray[np.double_t,ndim=1] Nvec, \ + np.ndarray[np.double_t,ndim=1] Jvec, \ + np.ndarray[np.double_t,ndim=1] dJvec1, \ + np.ndarray[np.double_t,ndim=1] dJvec2, \ + np.ndarray[np.int_t,ndim=2] Uinds): + """ + Sherman-Morrison block-inversion for Jitter (Cythonized) + + Calculates Trace(N^{-1} dN/dJp1 N^{-1} dN/dJp2), where: + - N^{-1} is the ecorr-include N inverse + - dN/dJpx = U dJ/dJpx U^{T}, with dJ/dJpx the diagnal derivative of J wrt + Jpx + + @param Nvec: The white noise amplitude, array (n) + @param Jvec: The jitter amplitude, array (k) + @param dJvec1: The white noise derivative, array (k) + @param dJvec2: The white noise derivative, array (k) + @param Uinds: The start/finish indices for the jitter blocks (k x 2) + + For this version, the residuals need to be sorted properly so that all the + blocks are continuous in memory. Here, there are n residuals, and k jitter + parameters. + """ + cdef unsigned int cc, ii, rows = len(Nvec), cols = len(Jvec) + cdef double tr=0.0, ji, nisum, beta + cdef np.ndarray[np.double_t,ndim=1] ni = np.empty(rows, 'd') + + ni = 1.0 / Nvec + + for cc in range(cols): + if Jvec[cc] > 0.0: + ji = 1.0 / Jvec[cc] + + nisum = 0.0 + for ii in range(Uinds[cc,0],Uinds[cc,1]): + nisum += ni[ii] + + beta = 1.0 / (nisum + ji) + + tr += dJvec1[cc] * dJvec2[cc] * nisum**2 + tr -= 2 * dJvec1[cc] * dJvec2[cc] * beta * nisum**3 + tr += dJvec1[cc] * dJvec2[cc] * beta**2 * nisum**4 + + return tr + + +@cython.boundscheck(False) +@cython.wraparound(False) +cpdef double c_blas_block_shermor_2D_asymm( + np.ndarray[np.double_t,ndim=2] Z1, + np.ndarray[np.double_t,ndim=2] Z2, + np.ndarray[np.double_t,ndim=1] Nvec, + np.ndarray[np.double_t,ndim=1] Jvec, + Uinds, #Need to copy, because Cython can't always use numpy integer arrays. Bah + np.ndarray[np.double_t,ndim=2] ZNZ, + ): + cdef double d_Jldet + + cdef int n_Z1_rows = Z1.shape[0] + cdef int n_Z1_cols = Z1.shape[1] + cdef int n_Z2_cols = Z2.shape[1] + cdef int n_J_rows = len(Jvec) + cdef int n_Z1_row_major, n_Z2_row_major + + n_Z1_row_major = 0 if Z1.flags['F_CONTIGUOUS'] else 1 + n_Z2_row_major = 0 if Z2.flags['F_CONTIGUOUS'] else 1 + + # Hack, because somehow we can't pass integer arrays? + # This is a tiny bit of overhead right here + cdef int [:] Uinds_new + + # This makes it into a C-ordered (Row-Major) array we can pass + Uinds_new = np.ascontiguousarray(Uinds.flatten(), dtype=np.dtype("i")) + + assert Z1.shape[0] == Z2.shape[0] + assert Z1.shape[0] == len(Nvec) + assert Uinds.shape[0] == len(Jvec) + + blas_block_shermor_2D_asym( + n_Z1_rows, + n_Z1_cols, + n_Z1_row_major, + &Z1[0,0], + n_Z2_cols, + n_Z2_row_major, + &Z2[0,0], + &Nvec[0], + n_J_rows, + &Jvec[0], + &Uinds_new[0], + &ZNZ[0,0], + &d_Jldet) + + return d_Jldet + +def cython_blas_block_shermor_2D_asymm(Z1, Z2, Nvec, Jvec, Uinds): + """Wrapper for the C/Cython code""" + + ZNZ = np.zeros((Z1.shape[1], Z2.shape[1]), order='F') + Jldet = c_blas_block_shermor_2D_asymm(Z1, Z2, Nvec, Jvec, np.array(Uinds, order="C"), ZNZ) + + return Jldet, ZNZ + diff --git a/enterprise/fastshermanmorrison/fastshermanmorrison.c b/enterprise/fastshermanmorrison/fastshermanmorrison.c new file mode 100644 index 00000000..f6f13c9d --- /dev/null +++ b/enterprise/fastshermanmorrison/fastshermanmorrison.c @@ -0,0 +1,233 @@ +/* cython_fastshermor.c + * + * Rutger van Haasteren, April 19 2023, Hannover + * + */ + +#include +#include +#include + + +extern void dgemm_(char *transa, char *transb, int *m, int *n, int *k, + double *alpha, double *a, int *lda, double *b, + int *ldb, double *beta, double *c, int *ldc); +extern void dgemv_(char *trans, int *m, int *n, double *alpha, double *a, + int *lda, double *x, int *incx, double *beta, + double *y, int *incy); +extern void dger_(int *m, int *n, double *alpha, double *x, int *incx, + double *y, int *incy, double *a, int *lda); + +static void blas_block_shermor_2D_asym( + int n_Z1_rows, + int n_Z1_cols, + int n_Z1_row_major, + double *pd_Z1, + int n_Z2_cols, + int n_Z2_row_major, + double *pd_Z2, + double *pd_Nvec, + int n_J_rows, + double *pd_Jvec, + int *pn_Uinds, + double *pd_ZNZ, + double *pd_Jldet + ) { + /* C implementation of python_block_shermor_2D_asym, because the python + * overhead is large + * + * parameters + * ---------- + * + * :param n_Z1_rows: Number of rows of Z1 + * :param n_Z1_cols: Number of columns of Z1 + * :param n_Z1_row_major: 1 if Z1 is Row-Major, 0 if Column-Major + * :param pd_Z1: The Z1 matrix + * :param n_Z2_cols: Number of columns of Z2 + * :param n_Z2_row_major: 1 if Z2 is Row-Major, 0 if Column-Major + * :param pd_Z2: The Z2 matrix + * :param pd_Nvec: The Nvec vector + * :param n_J_rows: The number of Jvec elements + * :param pd_Jvec: The Jvec vector + * :param pn_Uinds: The matrix of quantization indices (Row-Major) + * :param pd_ZNZ: The return value of ZNZ (Column-Major) + * :param pd_Jldet: The return value of log(det(J)) + */ + + double d_galpha=1.0, d_gbeta=0.0, d_nisum=0.0, d_beta; + double *pd_Z1ni, *pd_ZNZ_add, *pd_ni, *pd_zn1, *pd_zn2; + int cc, i, j, m, n, k, lda, ldb, ldc, n_jblock, n_jblock_i, n_index; + char *transa, *transb; + + pd_Z1ni = malloc(n_Z1_rows*n_Z1_cols * sizeof(double)); + pd_ZNZ_add = calloc(n_Z1_rows*n_Z1_cols, sizeof(double)); + pd_ni = malloc(n_Z1_rows * sizeof(double)); + pd_zn1 = calloc(n_Z1_cols, sizeof(double)); + pd_zn2 = calloc(n_Z2_cols, sizeof(double)); + + /* openmp this? */ + for(i=0; i 0.0) { + + /* Note: pn_Uinds is row-major */ + d_nisum = 0.0; + n_jblock_i = pn_Uinds[2*cc]; + n_jblock = pn_Uinds[2*cc+1] - pn_Uinds[2*cc]; + for(i=pn_Uinds[2*cc]; i 1: + rblock = x[slc] + niblock = 1 / self._nvec[slc] + beta = 1.0 / (np.einsum("i->", niblock) + 1.0 / jv) + Nx[slc] -= beta * np.dot(niblock, rblock) * niblock + return Nx + + def _solve_1D1(self, x, y): + """Solves :math:`y^T N^{-1}x`, where :math:`x` and + :math:`y` are vectors. + """ + + Nx = x / self._nvec + yNx = np.dot(y, Nx) + for slc, jv in zip(self._slices, self._jvec): + if slc.stop - slc.start > 1: + xblock = x[slc] + yblock = y[slc] + niblock = 1 / self._nvec[slc] + beta = 1.0 / (np.einsum("i->", niblock) + 1.0 / jv) + yNx -= beta * np.dot(niblock, xblock) * np.dot(niblock, yblock) + return yNx + + def _solve_2D2(self, X, Z): + """Solves :math:`Z^T N^{-1}X`, where :math:`X` + and :math:`Z` are 2-d arrays. + """ + + ZNX = np.dot(Z.T / self._nvec, X) + for slc, jv in zip(self._slices, self._jvec): + if slc.stop - slc.start > 1: + Zblock = Z[slc, :] + Xblock = X[slc, :] + niblock = 1 / self._nvec[slc] + beta = 1.0 / (np.einsum("i->", niblock) + 1.0 / jv) + zn = np.dot(niblock, Zblock) + xn = np.dot(niblock, Xblock) + ZNX -= beta * np.outer(zn.T, xn) + return ZNX + + def _get_logdet(self): + """Returns log determinant of :math:`N+UJU^{T}` where :math:`U` + is a quantization matrix. + """ + logdet = np.einsum("i->", np.log(self._nvec)) + for slc, jv in zip(self._slices, self._jvec): + if slc.stop - slc.start > 1: + niblock = 1 / self._nvec[slc] + beta = 1.0 / (np.einsum("i->", niblock) + 1.0 / jv) + logdet += np.log(jv) - np.log(beta) + return logdet + + def solve(self, other, left_array=None, logdet=False): + if other.ndim == 1: + if left_array is None: + ret = self._solve_D1(other) + elif left_array is not None and left_array.ndim == 1: + ret = self._solve_1D1(other, left_array) + elif left_array is not None and left_array.ndim == 2: + ret = np.dot(left_array.T, self._solve_D1(other)) + else: + raise TypeError + elif other.ndim == 2: + if left_array is None: + raise NotImplementedError("ShermanMorrison does not implement _solve_D2") + elif left_array is not None and left_array.ndim == 2: + ret = self._solve_2D2(other, left_array) + elif left_array is not None and left_array.ndim == 1: + ret = np.dot(other.T, self._solve_D1(left_array)) + else: + raise TypeError + else: + raise TypeError + + return (ret, self._get_logdet()) if logdet else ret + +class FastShermanMorrison(ShermanMorrison): + """Custom container class for Sherman-morrison array inversion.""" + + def __init__(self, jvec, slices, nvec=0.0): + self._uinds = np.vstack([[slc.start, slc.stop] for slc in slices]) + super().__init__(jvec, slices, nvec=nvec) + + def __add__(self, other): + nvec = self._nvec + other + return FastShermanMorrison(self._jvec, self._slices, nvec) + + def _solve_D1(self, x): + """Solves :math:`N^{-1}x` where :math:`x` is a vector.""" + return cfsm.cython_block_shermor_0D(x, self._nvec, self._jvec, self._uinds) + + def _solve_1D1(self, x, y): + """Solves :math:`y^T N^{-1}x`, where :math:`x` and + :math:`y` are vectors. + """ + logJdet, yNx = cfsm.cython_block_shermor_1D1(x, y, self._nvec, self._jvec, self._uinds) + return yNx + + def _solve_2D2(self, X, Z): + """Solves :math:`Z^T N^{-1}X`, where :math:`X` + and :math:`Z` are 2-d arrays. + """ + logJdet, ZNX = cfsm.cython_blas_block_shermor_2D_asymm(Z, X, self._nvec, self._jvec, self._uinds) + return ZNX + + def _get_logdet(self): + """Returns log determinant of :math:`N+UJU^{T}` where :math:`U` + is a quantization matrix. + """ + logJdet, xNx = cfsm.cython_block_shermor_1D(np.zeros_like(self._nvec), self._nvec, self._jvec, self._uinds) + return logJdet + + def solve(self, other, left_array=None, logdet=False): + if other.ndim == 1: + if left_array is None: + ret = self._solve_D1(other) + elif left_array is not None and left_array.ndim == 1: + ret = self._solve_1D1(other, left_array) + elif left_array is not None and left_array.ndim == 2: + ret = np.dot(left_array.T, self._solve_D1(other)) + else: + raise TypeError + elif other.ndim == 2: + if left_array is None: + raise NotImplementedError("ShermanMorrison does not implement _solve_D2") + elif left_array is not None and left_array.ndim == 2: + ret = self._solve_2D2(other, left_array) + elif left_array is not None and left_array.ndim == 1: + ret = np.dot(other.T, self._solve_D1(left_array)) + else: + raise TypeError + else: + raise TypeError + + return (ret, self._get_logdet()) if logdet else ret + diff --git a/setup.py b/setup.py index 2e5993a3..a04085da 100644 --- a/setup.py +++ b/setup.py @@ -2,6 +2,8 @@ # -*- coding: utf-8 -*- from setuptools import setup +from setuptools import Extension +from Cython.Build import cythonize with open("README.md", encoding="utf-8") as readme_file: readme = readme_file.read() @@ -14,6 +16,14 @@ "scikit-sparse>=0.4.5", "pint-pulsar>=0.8.3", "libstempo>=2.4.4", + "cython>=0.29.34", +] + +ext_modules=[ + Extension('enterprise.fastshermanmorrison.cython_fastshermanmorrison', + ['enterprise/fastshermanmorrison/cython_fastshermanmorrison.pyx'], + include_dirs = [numpy.get_include(), 'fastshermanmorrison/'], + extra_compile_args=["-O2", "-fno-wrapv"]) # 50% more efficient! ] test_requirements = [] @@ -26,7 +36,7 @@ author="Justin A. Ellis", author_email="justin.ellis18@gmail.com", url="https://github.com/nanograv/enterprise", - packages=["enterprise", "enterprise.signals"], + packages=["enterprise", "enterprise.signals", "enterprise.fastshermanmorrison"], package_dir={"enterprise": "enterprise"}, include_package_data=True, package_data={"enterprise": ["datafiles/*", "datafiles/ephemeris/*", "datafiles/ng9/*", "datafiles/mdc_open1/*"]}, From 312093b7128c9591b4ca0378603e6a3bb8853ec5 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Fri, 21 Apr 2023 18:11:19 +0200 Subject: [PATCH 02/80] Added numpy and cython extensions to setup.py --- setup.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/setup.py b/setup.py index a04085da..fb2efe85 100644 --- a/setup.py +++ b/setup.py @@ -1,6 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- +import numpy from setuptools import setup from setuptools import Extension from Cython.Build import cythonize @@ -61,4 +62,5 @@ ], test_suite="tests", tests_require=test_requirements, + ext_modules = cythonize(ext_modules) ) From 1b536bc39f3068fed0fa83a38f7f3429a9984669 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Mon, 24 Apr 2023 10:40:44 +0200 Subject: [PATCH 03/80] Updated docstrings and method selector for fast-sherman-morrison --- .../fastshermanmorrison.py | 107 +----------------- enterprise/signals/white_signals.py | 20 +++- 2 files changed, 21 insertions(+), 106 deletions(-) diff --git a/enterprise/fastshermanmorrison/fastshermanmorrison.py b/enterprise/fastshermanmorrison/fastshermanmorrison.py index db558403..5a374e24 100644 --- a/enterprise/fastshermanmorrison/fastshermanmorrison.py +++ b/enterprise/fastshermanmorrison/fastshermanmorrison.py @@ -1,108 +1,9 @@ import numpy as np from . import cython_fastshermanmorrison as cfsm +from enterprise.signals import signal_base -class ShermanMorrison(object): - """Custom container class for Sherman-morrison array inversion.""" - - def __init__(self, jvec, slices, nvec=0.0): - self._jvec = jvec - self._slices = slices - self._nvec = nvec - - def __add__(self, other): - nvec = self._nvec + other - return ShermanMorrison(self._jvec, self._slices, nvec) - - # hacky way to fix adding 0 - def __radd__(self, other): - if other == 0: - return self.__add__(other) - else: - raise TypeError - - def _solve_D1(self, x): - """Solves :math:`N^{-1}x` where :math:`x` is a vector.""" - - Nx = x / self._nvec - for slc, jv in zip(self._slices, self._jvec): - if slc.stop - slc.start > 1: - rblock = x[slc] - niblock = 1 / self._nvec[slc] - beta = 1.0 / (np.einsum("i->", niblock) + 1.0 / jv) - Nx[slc] -= beta * np.dot(niblock, rblock) * niblock - return Nx - - def _solve_1D1(self, x, y): - """Solves :math:`y^T N^{-1}x`, where :math:`x` and - :math:`y` are vectors. - """ - - Nx = x / self._nvec - yNx = np.dot(y, Nx) - for slc, jv in zip(self._slices, self._jvec): - if slc.stop - slc.start > 1: - xblock = x[slc] - yblock = y[slc] - niblock = 1 / self._nvec[slc] - beta = 1.0 / (np.einsum("i->", niblock) + 1.0 / jv) - yNx -= beta * np.dot(niblock, xblock) * np.dot(niblock, yblock) - return yNx - - def _solve_2D2(self, X, Z): - """Solves :math:`Z^T N^{-1}X`, where :math:`X` - and :math:`Z` are 2-d arrays. - """ - - ZNX = np.dot(Z.T / self._nvec, X) - for slc, jv in zip(self._slices, self._jvec): - if slc.stop - slc.start > 1: - Zblock = Z[slc, :] - Xblock = X[slc, :] - niblock = 1 / self._nvec[slc] - beta = 1.0 / (np.einsum("i->", niblock) + 1.0 / jv) - zn = np.dot(niblock, Zblock) - xn = np.dot(niblock, Xblock) - ZNX -= beta * np.outer(zn.T, xn) - return ZNX - - def _get_logdet(self): - """Returns log determinant of :math:`N+UJU^{T}` where :math:`U` - is a quantization matrix. - """ - logdet = np.einsum("i->", np.log(self._nvec)) - for slc, jv in zip(self._slices, self._jvec): - if slc.stop - slc.start > 1: - niblock = 1 / self._nvec[slc] - beta = 1.0 / (np.einsum("i->", niblock) + 1.0 / jv) - logdet += np.log(jv) - np.log(beta) - return logdet - - def solve(self, other, left_array=None, logdet=False): - if other.ndim == 1: - if left_array is None: - ret = self._solve_D1(other) - elif left_array is not None and left_array.ndim == 1: - ret = self._solve_1D1(other, left_array) - elif left_array is not None and left_array.ndim == 2: - ret = np.dot(left_array.T, self._solve_D1(other)) - else: - raise TypeError - elif other.ndim == 2: - if left_array is None: - raise NotImplementedError("ShermanMorrison does not implement _solve_D2") - elif left_array is not None and left_array.ndim == 2: - ret = self._solve_2D2(other, left_array) - elif left_array is not None and left_array.ndim == 1: - ret = np.dot(other.T, self._solve_D1(left_array)) - else: - raise TypeError - else: - raise TypeError - - return (ret, self._get_logdet()) if logdet else ret - -class FastShermanMorrison(ShermanMorrison): - """Custom container class for Sherman-morrison array inversion.""" +class FastShermanMorrison(signal_base.ShermanMorrison): + """Custom container class for Fast-Sherman-morrison array inversion.""" def __init__(self, jvec, slices, nvec=0.0): self._uinds = np.vstack([[slc.start, slc.stop] for slc in slices]) @@ -149,7 +50,7 @@ def solve(self, other, left_array=None, logdet=False): raise TypeError elif other.ndim == 2: if left_array is None: - raise NotImplementedError("ShermanMorrison does not implement _solve_D2") + raise NotImplementedError("FastShermanMorrison does not implement _solve_D2") elif left_array is not None and left_array.ndim == 2: ret = self._solve_2D2(other, left_array) elif left_array is not None and left_array.ndim == 1: diff --git a/enterprise/signals/white_signals.py b/enterprise/signals/white_signals.py index a7d794b0..77e077fb 100644 --- a/enterprise/signals/white_signals.py +++ b/enterprise/signals/white_signals.py @@ -8,6 +8,7 @@ import scipy.sparse from enterprise.signals import parameter, selections, signal_base, utils +from enterprise.fastshermanmorrison import fastshermanmorrison from enterprise.signals.parameter import function from enterprise.signals.selections import Selection @@ -114,7 +115,7 @@ def EquadNoise(*args, **kwargs): def EcorrKernelNoise( log10_ecorr=parameter.Uniform(-10, -5), selection=Selection(selections.no_selection), - method="sherman-morrison", + method="fast-sherman-morrison", name="", ): r"""Class factory for ECORR type noise. @@ -123,7 +124,8 @@ def EcorrKernelNoise( :param selection: ``Selection`` object specifying masks for backends, time segments, etc. :param method: Method for computing noise covariance matrix. - Options include `sherman-morrison`, `sparse`, and `block` + Options include `fast-sherman-morrison`, `sherman-morrison`, `sparse`, + and `block` :return: ``EcorrKernelNoise`` class. @@ -140,6 +142,12 @@ def EcorrKernelNoise( In this signal implementation we offer three methods of performing these matrix operations: + fast-sherman-morrison + Uses the `Sherman-Morrison`_ forumla to compute the matrix + inverse and other matrix operations. **Note:** This method can only + be used for covariances that make up ECorrKernelNoise, :math:`uv^T`. + This version is Cython optimized. + sherman-morrison Uses the `Sherman-Morrison`_ forumla to compute the matrix inverse and other matrix operations. **Note:** This method can only @@ -166,7 +174,7 @@ def EcorrKernelNoise( """ - if method not in ["sherman-morrison", "block", "sparse"]: + if method not in ["fast-sherman-morrison", "sherman-morrison", "block", "sparse"]: msg = "EcorrKernelNoise does not support method: {}".format(method) raise TypeError(msg) @@ -210,6 +218,8 @@ def ndiag_params(self): def get_ndiag(self, params): if method == "sherman-morrison": return self._get_ndiag_sherman_morrison(params) + elif method == "fast-sherman-morrison": + return self._get_ndiag_sparse(params) elif method == "sparse": return self._get_ndiag_sparse(params) elif method == "block": @@ -238,6 +248,10 @@ def _get_ndiag_sherman_morrison(self, params): slices, jvec = self._get_jvecs(params) return signal_base.ShermanMorrison(jvec, slices) + def _get_ndiag_fast_sherman_morrison(self, params): + slices, jvec = self._get_jvecs(params) + return fastshermanmorrison.FastShermanMorrison(jvec, slices) + def _get_ndiag_block(self, params): slices, jvec = self._get_jvecs(params) blocks = [] From db81f64e77eba74f633d35315831ed803da3e91a Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Mon, 24 Apr 2023 16:28:20 +0200 Subject: [PATCH 04/80] Added numpy and cython to requirements_dev.txt --- requirements_dev.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/requirements_dev.txt b/requirements_dev.txt index 73ae670a..86d0cbb2 100644 --- a/requirements_dev.txt +++ b/requirements_dev.txt @@ -14,4 +14,6 @@ sphinx-rtd-theme>=0.4.0 pytest-cov>=2.7.0 coverage-conditional-plugin>=0.4.0 jupyter>=1.0.0 -build==0.3.1.post1 \ No newline at end of file +build==0.3.1.post1 +numpy>=1.16.3 +cython>=0.29.34 From d8e1a3559ccaedc47aa140a1dd6243d713e717ee Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Mon, 24 Apr 2023 17:44:02 +0200 Subject: [PATCH 05/80] Moved FastShermanMorrison to external package --- enterprise/fastshermanmorrison/__init__.py | 1 - .../cython_fastshermanmorrison.pyx | 945 ------------------ .../fastshermanmorrison/fastshermanmorrison.c | 233 ----- .../fastshermanmorrison.py | 64 -- enterprise/signals/white_signals.py | 18 +- requirements_dev.txt | 2 - setup.py | 12 - 7 files changed, 16 insertions(+), 1259 deletions(-) delete mode 100644 enterprise/fastshermanmorrison/__init__.py delete mode 100644 enterprise/fastshermanmorrison/cython_fastshermanmorrison.pyx delete mode 100644 enterprise/fastshermanmorrison/fastshermanmorrison.c delete mode 100644 enterprise/fastshermanmorrison/fastshermanmorrison.py diff --git a/enterprise/fastshermanmorrison/__init__.py b/enterprise/fastshermanmorrison/__init__.py deleted file mode 100644 index 3037395f..00000000 --- a/enterprise/fastshermanmorrison/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from . import fastshermanmorrison diff --git a/enterprise/fastshermanmorrison/cython_fastshermanmorrison.pyx b/enterprise/fastshermanmorrison/cython_fastshermanmorrison.pyx deleted file mode 100644 index c8a3b8c2..00000000 --- a/enterprise/fastshermanmorrison/cython_fastshermanmorrison.pyx +++ /dev/null @@ -1,945 +0,0 @@ -cimport numpy as np -import numpy as np -np.import_array() - -from libc.math cimport log, sqrt -import cython -from scipy.linalg.cython_blas cimport dgemm, dger, dgemv - -cdef public void dgemm_(char *transa, char *transb, int *m, int *n, int *k, - double *alpha, double *a, int *lda, double *b, - int *ldb, double *beta, double *c, int *ldc): - """Public dgemm that can be used in the external C code""" - dgemm(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc) - -cdef public void dgemv_(char *trans, int *m, int *n, double *alpha, double *a, - int *lda, double *x, int *incx, double *beta, - double *y, int *incy): - """Public dgemv that can be used in the external C code""" - dgemv(trans, m, n, alpha, a, lda, x, incx, beta, y, incy) - -cdef public void dger_(int *m, int *n, double *alpha, double *x, int *incx, - double *y, int *incy, double *a, int *lda): - """Public dger that can be used in the external C code""" - dger(m, n, alpha, x, incx, y, incy, a, lda) - - -cdef extern from "fastshermanmorrison.c": - void blas_block_shermor_2D_asym( - int n_Z1_rows, - int n_Z1_cols, - int n_Z1_row_major, - double *pd_Z1, - int n_Z2_cols, - int n_Z2_row_major, - double *pd_Z2, - double *pd_Nvec, - int n_J_rows, - double *pd_Jvec, - int *pn_Uinds, - double *pd_ZNZ, - double *pd_Jldet - ) - -@cython.boundscheck(False) -@cython.wraparound(False) -def cython_block_shermor_0D( \ - np.ndarray[np.double_t,ndim=1] r, \ - np.ndarray[np.double_t,ndim=1] Nvec, \ - np.ndarray[np.double_t,ndim=1] Jvec, \ - np.ndarray[np.int_t,ndim=2] Uinds): - """ - Sherman-Morrison block-inversion for Jitter (Cythonized) - - @param r: The timing residuals, array (n) - @param Nvec: The white noise amplitude, array (n) - @param Jvec: The jitter amplitude, array (k) - @param Uinds: The start/finish indices for the jitter blocks (k x 2) - - For this version, the residuals need to be sorted properly so that all the - blocks are continuous in memory. Here, there are n residuals, and k jitter - parameters. - """ - cdef unsigned int cc, ii, cols = len(Jvec) - cdef double Jldet=0.0, ji, beta, nir, nisum - cdef np.ndarray[np.double_t,ndim=1] ni = np.empty(len(r), 'd') - cdef np.ndarray[np.double_t,ndim=1] Nx = r / Nvec - - ni = 1.0 / Nvec - - for cc in range(cols): - if Jvec[cc] > 0.0: - ji = 1.0 / Jvec[cc] - - nir = 0.0 - nisum = 0.0 - for ii in range(Uinds[cc,0],Uinds[cc,1]): - nisum += ni[ii] - nir += r[ii]*ni[ii] - - beta = 1.0 / (nisum + ji) - - for ii in range(Uinds[cc,0],Uinds[cc,1]): - Nx[ii] -= beta * nir * ni[ii] - - return Nx - -@cython.boundscheck(False) -@cython.wraparound(False) -def cython_block_shermor_0D_ld( \ - np.ndarray[np.double_t,ndim=1] r, \ - np.ndarray[np.double_t,ndim=1] Nvec, \ - np.ndarray[np.double_t,ndim=1] Jvec, \ - np.ndarray[np.int_t,ndim=2] Uinds): - """ - Sherman-Morrison block-inversion for Jitter (Cythonized) - - @param r: The timing residuals, array (n) - @param Nvec: The white noise amplitude, array (n) - @param Jvec: The jitter amplitude, array (k) - @param Uinds: The start/finish indices for the jitter blocks (k x 2) - - For this version, the residuals need to be sorted properly so that all the - blocks are continuous in memory. Here, there are n residuals, and k jitter - parameters. - """ - cdef unsigned int cc, ii, rows = len(r), cols = len(Jvec) - cdef double Jldet=0.0, ji, beta, nir, nisum - cdef np.ndarray[np.double_t,ndim=1] ni = np.empty(len(r), 'd') - cdef np.ndarray[np.double_t,ndim=1] Nx = r / Nvec - - ni = 1.0 / Nvec - - for cc in range(rows): - Jldet += log(Nvec[cc]) - - for cc in range(cols): - if Jvec[cc] > 0.0: - ji = 1.0 / Jvec[cc] - - nir = 0.0 - nisum = 0.0 - for ii in range(Uinds[cc,0],Uinds[cc,1]): - nisum += ni[ii] - nir += r[ii]*ni[ii] - - beta = 1.0 / (nisum + ji) - Jldet += log(Jvec[cc]) - log(beta) - - for ii in range(Uinds[cc,0],Uinds[cc,1]): - Nx[ii] -= beta * nir * ni[ii] - - return Jldet, Nx - - -def python_block_shermor_1D(r, Nvec, Jvec, Uinds): - """ - Sherman-Morrison block-inversion for Jitter - - @param r: The timing residuals, array (n) - @param Nvec: The white noise amplitude, array (n) - @param Jvec: The jitter amplitude, array (k) - @param Uinds: The start/finish indices for the jitter blocks (k x 2) - - For this version, the residuals need to be sorted properly so that all the - blocks are continuous in memory. Here, there are n residuals, and k jitter - parameters. - """ - ni = 1.0 / Nvec - Jldet = np.einsum('i->', np.log(Nvec)) - xNx = np.dot(r, r * ni) - - for cc, jv in enumerate(Jvec): - if jv > 0.0: - rblock = r[Uinds[cc,0]:Uinds[cc,1]] - niblock = ni[Uinds[cc,0]:Uinds[cc,1]] - - beta = 1.0 / (np.einsum('i->', niblock)+1.0/jv) - xNx -= beta * np.dot(rblock, niblock)**2 - Jldet += np.log(jv) - np.log(beta) - - return Jldet, xNx - -@cython.boundscheck(False) -@cython.wraparound(False) -def cython_block_shermor_1D( \ - np.ndarray[np.double_t,ndim=1] r, \ - np.ndarray[np.double_t,ndim=1] Nvec, \ - np.ndarray[np.double_t,ndim=1] Jvec, \ - np.ndarray[np.int_t,ndim=2] Uinds): - """ - Sherman-Morrison block-inversion for Jitter (Cythonized) - - @param r: The timing residuals, array (n) - @param Nvec: The white noise amplitude, array (n) - @param Jvec: The jitter amplitude, array (k) - @param Uinds: The start/finish indices for the jitter blocks (k x 2) - - For this version, the residuals need to be sorted properly so that all the - blocks are continuous in memory. Here, there are n residuals, and k jitter - parameters. - """ - cdef unsigned int cc, ii, rows = len(r), cols = len(Jvec) - cdef double Jldet=0.0, ji, beta, xNx=0.0, nir, nisum - cdef np.ndarray[np.double_t,ndim=1] ni = np.empty(rows, 'd') - - ni = 1.0 / Nvec - - for cc in range(rows): - Jldet += log(Nvec[cc]) - xNx += r[cc]*r[cc]*ni[cc] - - for cc in range(cols): - if Jvec[cc] > 0.0: - ji = 1.0 / Jvec[cc] - - nir = 0.0 - nisum = 0.0 - for ii in range(Uinds[cc,0],Uinds[cc,1]): - nisum += ni[ii] - nir += r[ii]*ni[ii] - - beta = 1.0 / (nisum + ji) - Jldet += log(Jvec[cc]) - log(beta) - xNx -= beta * nir * nir - - return Jldet, xNx - -@cython.boundscheck(False) -@cython.wraparound(False) -def cython_block_shermor_1D1( \ - np.ndarray[np.double_t,ndim=1] x, \ - np.ndarray[np.double_t,ndim=1] y, \ - np.ndarray[np.double_t,ndim=1] Nvec, \ - np.ndarray[np.double_t,ndim=1] Jvec, \ - np.ndarray[np.int_t,ndim=2] Uinds): - """ - Sherman-Morrison block-inversion for Jitter (Cythonized) - - @param r: The timing residuals, array (n) - @param Nvec: The white noise amplitude, array (n) - @param Jvec: The jitter amplitude, array (k) - @param Uinds: The start/finish indices for the jitter blocks (k x 2) - - For this version, the residuals need to be sorted properly so that all the - blocks are continuous in memory. Here, there are n residuals, and k jitter - parameters. - """ - cdef unsigned int cc, ii, rows = len(x), cols = len(Jvec) - cdef double Jldet=0.0, ji, beta, yNx=0.0, nix, niy, nisum - cdef np.ndarray[np.double_t,ndim=1] ni = np.empty(rows, 'd') - - ni = 1.0 / Nvec - - for cc in range(rows): - Jldet += log(Nvec[cc]) - yNx += y[cc]*x[cc]*ni[cc] - - for cc in range(cols): - if Jvec[cc] > 0.0: - ji = 1.0 / Jvec[cc] - - nix = 0.0 - niy = 0.0 - nisum = 0.0 - for ii in range(Uinds[cc,0],Uinds[cc,1]): - nisum += ni[ii] - nix += x[ii]*ni[ii] - niy += y[ii]*ni[ii] - - beta = 1.0 / (nisum + ji) - Jldet += log(Jvec[cc]) - log(beta) - yNx -= beta * nix * niy - - return Jldet, yNx - - -def python_block_shermor_2D(Z, Nvec, Jvec, Uinds): - """ - Sherman-Morrison block-inversion for Jitter, ZNiZ - - @param Z: The design matrix, array (n x m) - @param Nvec: The white noise amplitude, array (n) - @param Jvec: The jitter amplitude, array (k) - @param Uinds: The start/finish indices for the jitter blocks (k x 2) - - For this version, the residuals need to be sorted properly so that all the - blocks are continuous in memory. Here, there are n residuals, and k jitter - parameters. - - N = D + U*J*U.T - calculate: log(det(N)), Z.T * N^-1 * Z - """ - ni = 1.0 / Nvec - Jldet = np.einsum('i->', np.log(Nvec)) - zNz = np.dot(Z.T*ni, Z) - - for cc, jv in enumerate(Jvec): - if jv > 0.0: - Zblock = Z[Uinds[cc,0]:Uinds[cc,1], :] - niblock = ni[Uinds[cc,0]:Uinds[cc,1]] - - beta = 1.0 / (np.einsum('i->', niblock)+1.0/jv) - zn = np.dot(niblock, Zblock) - zNz -= beta * np.outer(zn.T, zn) - Jldet += np.log(jv) - np.log(beta) - - return Jldet, zNz - -@cython.boundscheck(False) -@cython.wraparound(False) -def cython_block_shermor_2D( \ - np.ndarray[np.double_t,ndim=2] Z, \ - np.ndarray[np.double_t,ndim=1] Nvec, \ - np.ndarray[np.double_t,ndim=1] Jvec, \ - np.ndarray[np.int_t,ndim=2] Uinds): - """ - Sherman-Morrison block-inversion for Jitter (Cythonized) - - @param Z: The design matrix, array (n x m) - @param Nvec: The white noise amplitude, array (n) - @param Jvec: The jitter amplitude, array (k) - @param Uinds: The start/finish indices for the jitter blocks (k x 2) - - For this version, the residuals need to be sorted properly so that all the - blocks are continuous in memory. Here, there are n residuals, and k jitter - parameters. - - N = D + U*J*U.T - calculate: log(det(N)), Z.T * N^-1 * Z - """ - cdef unsigned int cc, ii, rows = len(Nvec), cols = len(Jvec) - cdef double Jldet=0.0, ji, beta, nir, nisum - cdef np.ndarray[np.double_t,ndim=1] ni = np.empty(len(Nvec), 'd') - cdef np.ndarray[np.double_t,ndim=2] zNz - - ni = 1.0 / Nvec - zNz = np.dot(Z.T*ni, Z) - - for cc in range(rows): - Jldet += log(Nvec[cc]) - - for cc in range(cols): - if Jvec[cc] > 0.0: - Zblock = Z[Uinds[cc,0]:Uinds[cc,1], :] - niblock = ni[Uinds[cc,0]:Uinds[cc,1]] - - nisum = 0.0 - for ii in range(len(niblock)): - nisum += niblock[ii] - - beta = 1.0 / (nisum+1.0/Jvec[cc]) - Jldet += log(Jvec[cc]) - log(beta) - zn = np.dot(niblock, Zblock) - zNz -= beta * np.outer(zn.T, zn) - - return Jldet, zNz - -def python_block_shermor_2D_asymm(Z1, Z2, Nvec, Jvec, Uinds): - """ - Sherman-Morrison block-inversion for Jitter, ZNiZ - - @param Z: The design matrix, array (n x m) - @param Z2: The second design matrix, array (n x m2) - @param Nvec: The white noise amplitude, array (n) - @param Jvec: The jitter amplitude, array (k) - @param Uinds: The start/finish indices for the jitter blocks (k x 2) - - For this version, the residuals need to be sorted properly so that all the - blocks are continuous in memory. Here, there are n residuals, and k jitter - parameters. - - N = D + U*J*U.T - calculate: log(det(N)), Z.T * N^-1 * Z - """ - ni = 1.0 / Nvec - Jldet = np.einsum('i->', np.log(Nvec)) - zNz = np.dot(Z1.T*ni, Z2) - - for cc, jv in enumerate(Jvec): - if jv > 0.0: - Zblock1 = Z1[Uinds[cc,0]:Uinds[cc,1], :] - Zblock2 = Z2[Uinds[cc,0]:Uinds[cc,1], :] - niblock = ni[Uinds[cc,0]:Uinds[cc,1]] - - beta = 1.0 / (np.einsum('i->', niblock)+1.0/jv) - zn1 = np.dot(niblock, Zblock1) - zn2 = np.dot(niblock, Zblock2) - zNz -= beta * np.outer(zn1.T, zn2) - Jldet += np.log(jv) - np.log(beta) - - return Jldet, zNz - - -@cython.boundscheck(False) -@cython.wraparound(False) -def cython_block_shermor_2D_asymm( - np.ndarray[np.double_t,ndim=2] Z1, - np.ndarray[np.double_t,ndim=2] Z2, - np.ndarray[np.double_t,ndim=1] Nvec, - np.ndarray[np.double_t,ndim=1] Jvec, - np.ndarray[np.int_t,ndim=2] Uinds): - """ - Sherman-Morrison block-inversion for Jitter, ZNiZ - - @param Z: The design matrix, array (n x m) - @param Z2: The second design matrix, array (n x m2) - @param Nvec: The white noise amplitude, array (n) - @param Jvec: The jitter amplitude, array (k) - @param Uinds: The start/finish indices for the jitter blocks (k x 2) - - For this version, the residuals need to be sorted properly so that all the - blocks are continuous in memory. Here, there are n residuals, and k jitter - parameters. - - N = D + U*J*U.T - calculate: log(det(N)), Z.T * N^-1 * Z - """ - cdef unsigned int cc, ii, rows = len(Nvec), cols = len(Jvec) - cdef double Jldet=0.0, ji, beta, nir, nisum - cdef np.ndarray[np.double_t,ndim=1] ni = np.empty(len(Nvec), 'd') - cdef np.ndarray[np.double_t,ndim=2] zNz - - print("WARNING: cython_block_shermor_2D_asymm is deprecated.") - print(" use cython_blas_block_shermor_2D_asymm") - - ni = 1.0 / Nvec - for cc in range(rows): - Jldet += log(Nvec[cc]) - zNz = np.dot(Z1.T*ni, Z2) - - for cc in range(cols): - if Jvec[cc] > 0.0: - Zblock1 = Z1[Uinds[cc,0]:Uinds[cc,1], :] - Zblock2 = Z2[Uinds[cc,0]:Uinds[cc,1], :] - niblock = ni[Uinds[cc,0]:Uinds[cc,1]] - - nisum = 0.0 - for ii in range(len(niblock)): - nisum += niblock[ii] - - beta = 1.0 / (nisum+1.0/Jvec[cc]) - zn1 = np.dot(niblock, Zblock1) - zn2 = np.dot(niblock, Zblock2) - - zNz -= beta * np.outer(zn1.T, zn2) - Jldet += log(Jvec[cc]) - log(beta) - - return Jldet, zNz - - -def python_draw_ecor(r, Nvec, Jvec, Uinds): - """ - Given Jvec, draw new epoch-averaged residuals - - @param r: The timing residuals, array (n) - @param Nvec: The white noise amplitude, array (n) - @param Jvec: The jitter amplitude, array (k) - @param Uinds: The start/finish indices for the jitter blocks (k x 2) - - For this version, the residuals need to be sorted properly so that all the - blocks are continuous in memory. Here, there are n residuals, and k jitter - parameters. - - N = D + U*J*U.T - calculate: Norm(0, sqrt(J)) + (U^T * D^{-1} * U)^{-1}U.T D^{-1} r - """ - - rv = np.random.randn(len(Jvec)) * np.sqrt(Jvec) - ni = 1.0 / Nvec - - for cc in range(len(Jvec)): - rblock = r[Uinds[cc,0]:Uinds[cc,1]] - niblock = ni[Uinds[cc,0]:Uinds[cc,1]] - beta = 1.0 / np.einsum('i->', niblock) - - rv[cc] += beta * np.dot(rblock, niblock) - - return rv - -@cython.boundscheck(False) -@cython.wraparound(False) -def cython_draw_ecor( \ - np.ndarray[np.double_t,ndim=1] r, \ - np.ndarray[np.double_t,ndim=1] Nvec, \ - np.ndarray[np.double_t,ndim=1] Jvec, \ - np.ndarray[np.int_t,ndim=2] Uinds): - """ - Given Jvec, draw new epoch-averaged residuals - - @param r: The timing residuals, array (n) - @param Nvec: The white noise amplitude, array (n) - @param Jvec: The jitter amplitude, array (k) - @param Uinds: The start/finish indices for the jitter blocks (k x 2) - - For this version, the residuals need to be sorted properly so that all the - blocks are continuous in memory. Here, there are n residuals, and k jitter - parameters. - - N = D + U*J*U.T - calculate: Norm(0, sqrt(J)) + (U^T * D^{-1} * U)^{-1}U.T D^{-1} r - """ - cdef unsigned int cc, ii, rows = len(r), cols = len(Jvec) - cdef double ji, nir, nisum - cdef np.ndarray[np.double_t,ndim=1] ni = np.empty(rows, 'd') - cdef np.ndarray[np.double_t,ndim=1] rv = np.random.randn(cols) - - for cc in range(cols): - rv[cc] *= sqrt(Jvec[cc]) - - ni = 1.0 / Nvec - - for cc in range(cols): - ji = 1.0 / Jvec[cc] - - nir = 0.0 - nisum = 0.0 - for ii in range(Uinds[cc,0],Uinds[cc,1]): - nisum += ni[ii] - nir += r[ii]*ni[ii] - - rv[cc] += nir / nisum - - return rv - - -@cython.boundscheck(False) -@cython.wraparound(False) -def cython_shermor_draw_ecor( \ - np.ndarray[np.double_t,ndim=1] r, \ - np.ndarray[np.double_t,ndim=1] Nvec, \ - np.ndarray[np.double_t,ndim=1] Jvec, \ - np.ndarray[np.int_t,ndim=2] Uinds): - """ - Do both the Sherman-Morrison block-inversion for Jitter, - and the draw of the ecor parameters together (Cythonized) - - @param r: The timing residuals, array (n) - @param Nvec: The white noise amplitude, array (n) - @param Jvec: The jitter amplitude, array (k) - @param Uinds: The start/finish indices for the jitter blocks (k x 2) - - For this version, the residuals need to be sorted properly so that all the - blocks are continuous in memory. Here, there are n residuals, and k jitter - parameters. - - N = D + U*J*U.T - calculate: r.T * N^-1 * r, log(det(N)), Norm(0, sqrt(J)) + (U^T * D^{-1} * U)^{-1}U.T D^{-1} r - """ - cdef unsigned int cc, ii, rows = len(r), cols = len(Jvec) - cdef double Jldet=0.0, ji, beta, xNx=0.0, nir, nisum - cdef np.ndarray[np.double_t,ndim=1] ni = np.empty(rows, 'd') - cdef np.ndarray[np.double_t,ndim=1] rv = np.random.randn(cols) - - ni = 1.0 / Nvec - - for cc in range(cols): - rv[cc] *= sqrt(Jvec[cc]) - - for cc in range(rows): - Jldet += log(Nvec[cc]) - xNx += r[cc]*r[cc]*ni[cc] - - for cc in range(cols): - nir = 0.0 - nisum = 0.0 - for ii in range(Uinds[cc,0],Uinds[cc,1]): - nisum += ni[ii] - nir += r[ii]*ni[ii] - - rv[cc] += nir / nisum - - if Jvec[cc] > 0.0: - ji = 1.0 / Jvec[cc] - - beta = 1.0 / (nisum + ji) - Jldet += log(Jvec[cc]) - log(beta) - xNx -= beta * nir * nir - - return Jldet, xNx, rv - - -@cython.boundscheck(False) -@cython.wraparound(False) -def cython_update_ea_residuals( \ - np.ndarray[np.double_t,ndim=1] gibbsresiduals, \ - np.ndarray[np.double_t,ndim=1] gibbssubresiduals, \ - np.ndarray[np.double_t,ndim=1] eat, \ - np.ndarray[np.int_t,ndim=2] Uinds): - """ - Given epoch-averaged residuals, update the residuals, and the subtracted - residuals, so that these can be further processed by the other conditional - probability density functions. - - @param gibbsresiduals: The timing residuals, array (n) - @param gibbssubresiduals: The white noise amplitude, array (n) - @param eat: epoch averaged residuals (k) - @param Uinds: The start/finish indices for the jitter blocks - (k x 2) - - """ - cdef unsigned int k = Uinds.shape[0], ii, cc - - for cc in range(Uinds.shape[0]): - for ii in range(Uinds[cc,0],Uinds[cc,1]): - gibbssubresiduals[ii] += eat[cc] - gibbsresiduals[ii] -= eat[cc] - - return gibbsresiduals, gibbssubresiduals - - -@cython.boundscheck(False) -@cython.wraparound(False) -def cython_Uj(np.ndarray[np.double_t,ndim=1] j, \ - np.ndarray[np.int_t,ndim=2] Uinds, nobs): - """ - Given epoch-averaged residuals (j), get the residuals. - Used in 'updateDetSources' - - @param j: epoch averaged residuals (k) - @param Uinds: The start/finish indices for the jitter blocks - (k x 2) - @param nobs: Number of observations (length return vector) - - """ - cdef unsigned int k = Uinds.shape[0], ii, cc - cdef np.ndarray[np.double_t,ndim=1] Uj = np.zeros(nobs, 'd') - - for cc in range(k): - for ii in range(Uinds[cc,0],Uinds[cc,1]): - Uj[ii] += j[cc] - - return Uj - -@cython.boundscheck(False) -@cython.wraparound(False) -def cython_UTx(np.ndarray[np.double_t,ndim=1] x, \ - np.ndarray[np.int_t,ndim=2] Uinds): - """ - Given residuals (x), get np.dot(U.T, x) - Used in 'updateDetSources' - - @param j: epoch averaged residuals (k) - @param Uinds: The start/finish indices for the jitter blocks - (k x 2) - - """ - cdef unsigned int k = Uinds.shape[0], ii, cc - cdef np.ndarray[np.double_t,ndim=1] UTx = np.zeros(k, 'd') - - for cc in range(k): - for ii in range(Uinds[cc,0],Uinds[cc,1]): - UTx[cc] += x[ii] - - return UTx - -@cython.boundscheck(False) -@cython.wraparound(False) -def cython_logdet_dN( \ - np.ndarray[np.double_t,ndim=1] Nvec, \ - np.ndarray[np.double_t,ndim=1] Jvec, \ - np.ndarray[np.double_t,ndim=1] dNvec, \ - np.ndarray[np.int_t,ndim=2] Uinds): - """ - Sherman-Morrison block-inversion for Jitter (Cythonized) - - Calculates Trace(N^{-1} dN/dNp), where: - - N^{-1} is the ecorr-include N inverse - - dN/dNp is the diagonal derivate of N wrt Np - - @param Nvec: The white noise amplitude, array (n) - @param Jvec: The jitter amplitude, array (k) - @param dNvec: The white noise derivative, array (n) - @param Uinds: The start/finish indices for the jitter blocks (k x 2) - - For this version, the residuals need to be sorted properly so that all the - blocks are continuous in memory. Here, there are n residuals, and k jitter - parameters. - """ - cdef unsigned int cc, ii, rows = len(Nvec), cols = len(Jvec) - cdef double tr=0.0, ji, nisum, Nnisum - cdef np.ndarray[np.double_t,ndim=1] ni = np.empty(rows, 'd') - cdef np.ndarray[np.double_t,ndim=1] Nni = np.empty(rows, 'd') - - ni = 1.0 / Nvec - Nni = dNvec / Nvec**2 - - for cc in range(rows): - tr += dNvec[cc] * ni[cc] - - for cc in range(cols): - if Jvec[cc] > 0.0: - ji = 1.0 / Jvec[cc] - - nisum = 0.0 - Nnisum = 0.0 - for ii in range(Uinds[cc,0],Uinds[cc,1]): - nisum += ni[ii] - Nnisum += Nni[ii] - - tr -= Nnisum / (nisum + ji) - - return tr - -@cython.boundscheck(False) -@cython.wraparound(False) -def cython_logdet_dJ( \ - np.ndarray[np.double_t,ndim=1] Nvec, \ - np.ndarray[np.double_t,ndim=1] Jvec, \ - np.ndarray[np.double_t,ndim=1] dJvec, \ - np.ndarray[np.int_t,ndim=2] Uinds): - """ - Sherman-Morrison block-inversion for Jitter (Cythonized) - - Calculates Trace(N^{-1} dN/dJp), where: - - N^{-1} is the ecorr-include N inverse - - dN/dJp = U dJ/dJp U^{T}, with dJ/dJp the diagnal derivative of J wrt - Jp - - @param Nvec: The white noise amplitude, array (n) - @param Jvec: The jitter amplitude, array (k) - @param dJvec: The jitter derivative, array (k) - @param Uinds: The start/finish indices for the jitter blocks (k x 2) - - For this version, the residuals need to be sorted properly so that all the - blocks are continuous in memory. Here, there are n residuals, and k jitter - parameters. - """ - cdef unsigned int cc, ii, rows = len(Nvec), cols = len(Jvec) - cdef double dJldet=0.0, ji, beta, nisum - cdef np.ndarray[np.double_t,ndim=1] ni = np.empty(rows, 'd') - - ni = 1.0 / Nvec - - for cc in range(cols): - if Jvec[cc] > 0.0: - ji = 1.0 / Jvec[cc] - - nisum = 0.0 - for ii in range(Uinds[cc,0],Uinds[cc,1]): - nisum += ni[ii] - - beta = 1.0 / (nisum + ji) - - dJldet += dJvec[cc]*(nisum - beta*nisum**2) - - return dJldet - -@cython.boundscheck(False) -@cython.wraparound(False) -def cython_logdet_dN_dN( \ - np.ndarray[np.double_t,ndim=1] Nvec, \ - np.ndarray[np.double_t,ndim=1] Jvec, \ - np.ndarray[np.double_t,ndim=1] dNvec1, \ - np.ndarray[np.double_t,ndim=1] dNvec2, \ - np.ndarray[np.int_t,ndim=2] Uinds): - """ - Sherman-Morrison block-inversion for Jitter (Cythonized) - - Calculates Trace(N^{-1} dN/dNp1 N^{-1} dN/dNp2), where: - - N^{-1} is the ecorr-include N inverse - - dN/dNpx is the diagonal derivate of N wrt Npx - - @param Nvec: The white noise amplitude, array (n) - @param Jvec: The jitter amplitude, array (k) - @param dNvec1: The white noise derivative, array (n) - @param dNvec2: The white noise derivative, array (n) - @param Uinds: The start/finish indices for the jitter blocks (k x 2) - - For this version, the residuals need to be sorted properly so that all the - blocks are continuous in memory. Here, there are n residuals, and k jitter - parameters. - """ - cdef unsigned int cc, ii, rows = len(Nvec), cols = len(Jvec) - cdef double tr=0.0, ji, nisum, Nnisum1, Nnisum2, NniNnisum, beta - cdef np.ndarray[np.double_t,ndim=1] ni = np.empty(rows, 'd') - cdef np.ndarray[np.double_t,ndim=1] Nni1 = np.empty(rows, 'd') - cdef np.ndarray[np.double_t,ndim=1] Nni2 = np.empty(rows, 'd') - - ni = 1.0 / Nvec - Nni1 = dNvec1 / Nvec**2 - Nni2 = dNvec2 / Nvec**2 - - for cc in range(rows): - tr += dNvec1[cc] * dNvec2[cc] * ni[cc]**2 - - for cc in range(cols): - if Jvec[cc] > 0.0: - ji = 1.0 / Jvec[cc] - - nisum = 0.0 - Nnisum1 = 0.0 - Nnisum2 = 0.0 - NniNnisum = 0.0 - for ii in range(Uinds[cc,0],Uinds[cc,1]): - nisum += ni[ii] - Nnisum1 += Nni1[ii] - Nnisum2 += Nni2[ii] - NniNnisum += Nni1[ii]*Nni2[ii]*Nvec[ii] - - beta = 1.0 / (nisum + ji) - - tr += Nnisum1 * Nnisum2 * beta**2 - tr -= 2 * NniNnisum * beta - - return tr - -@cython.boundscheck(False) -@cython.wraparound(False) -def cython_logdet_dN_dJ( \ - np.ndarray[np.double_t,ndim=1] Nvec, \ - np.ndarray[np.double_t,ndim=1] Jvec, \ - np.ndarray[np.double_t,ndim=1] dNvec, \ - np.ndarray[np.double_t,ndim=1] dJvec, \ - np.ndarray[np.int_t,ndim=2] Uinds): - """ - Sherman-Morrison block-inversion for Jitter (Cythonized) - - Calculates Trace(N^{-1} dN/dNp N^{-1} dN/dJp), where: - - N^{-1} is the ecorr-include N inverse - - dN/dNp is the diagonal derivate of N wrt Np - - dN/dJp = U dJ/dJp U^{T}, with dJ/dJp the diagnal derivative of J wrt - Jp - - @param Nvec: The white noise amplitude, array (n) - @param Jvec: The jitter amplitude, array (k) - @param dNvec: The white noise derivative, array (n) - @param dJvec: The white noise ecor derivative, array (k) - @param Uinds: The start/finish indices for the jitter blocks (k x 2) - - For this version, the residuals need to be sorted properly so that all the - blocks are continuous in memory. Here, there are n residuals, and k jitter - parameters. - """ - cdef unsigned int cc, ii, rows = len(Nvec), cols = len(Jvec) - cdef double tr=0.0, ji, nisum, Nnisum, beta - cdef np.ndarray[np.double_t,ndim=1] ni = np.empty(rows, 'd') - cdef np.ndarray[np.double_t,ndim=1] Nni = np.empty(rows, 'd') - - ni = 1.0 / Nvec - Nni = dNvec / Nvec**2 - - for cc in range(cols): - if Jvec[cc] > 0.0: - ji = 1.0 / Jvec[cc] - - nisum = 0.0 - Nnisum = 0.0 - for ii in range(Uinds[cc,0],Uinds[cc,1]): - nisum += ni[ii] - Nnisum += Nni[ii] - - beta = 1.0 / (nisum + ji) - - tr += Nnisum * dJvec[cc] - tr -= 2 * nisum * dJvec[cc] * Nnisum * beta - tr += Nnisum * nisum**2 * dJvec[cc] *beta**2 - - return tr - -@cython.boundscheck(False) -@cython.wraparound(False) -def cython_logdet_dJ_dJ( \ - np.ndarray[np.double_t,ndim=1] Nvec, \ - np.ndarray[np.double_t,ndim=1] Jvec, \ - np.ndarray[np.double_t,ndim=1] dJvec1, \ - np.ndarray[np.double_t,ndim=1] dJvec2, \ - np.ndarray[np.int_t,ndim=2] Uinds): - """ - Sherman-Morrison block-inversion for Jitter (Cythonized) - - Calculates Trace(N^{-1} dN/dJp1 N^{-1} dN/dJp2), where: - - N^{-1} is the ecorr-include N inverse - - dN/dJpx = U dJ/dJpx U^{T}, with dJ/dJpx the diagnal derivative of J wrt - Jpx - - @param Nvec: The white noise amplitude, array (n) - @param Jvec: The jitter amplitude, array (k) - @param dJvec1: The white noise derivative, array (k) - @param dJvec2: The white noise derivative, array (k) - @param Uinds: The start/finish indices for the jitter blocks (k x 2) - - For this version, the residuals need to be sorted properly so that all the - blocks are continuous in memory. Here, there are n residuals, and k jitter - parameters. - """ - cdef unsigned int cc, ii, rows = len(Nvec), cols = len(Jvec) - cdef double tr=0.0, ji, nisum, beta - cdef np.ndarray[np.double_t,ndim=1] ni = np.empty(rows, 'd') - - ni = 1.0 / Nvec - - for cc in range(cols): - if Jvec[cc] > 0.0: - ji = 1.0 / Jvec[cc] - - nisum = 0.0 - for ii in range(Uinds[cc,0],Uinds[cc,1]): - nisum += ni[ii] - - beta = 1.0 / (nisum + ji) - - tr += dJvec1[cc] * dJvec2[cc] * nisum**2 - tr -= 2 * dJvec1[cc] * dJvec2[cc] * beta * nisum**3 - tr += dJvec1[cc] * dJvec2[cc] * beta**2 * nisum**4 - - return tr - - -@cython.boundscheck(False) -@cython.wraparound(False) -cpdef double c_blas_block_shermor_2D_asymm( - np.ndarray[np.double_t,ndim=2] Z1, - np.ndarray[np.double_t,ndim=2] Z2, - np.ndarray[np.double_t,ndim=1] Nvec, - np.ndarray[np.double_t,ndim=1] Jvec, - Uinds, #Need to copy, because Cython can't always use numpy integer arrays. Bah - np.ndarray[np.double_t,ndim=2] ZNZ, - ): - cdef double d_Jldet - - cdef int n_Z1_rows = Z1.shape[0] - cdef int n_Z1_cols = Z1.shape[1] - cdef int n_Z2_cols = Z2.shape[1] - cdef int n_J_rows = len(Jvec) - cdef int n_Z1_row_major, n_Z2_row_major - - n_Z1_row_major = 0 if Z1.flags['F_CONTIGUOUS'] else 1 - n_Z2_row_major = 0 if Z2.flags['F_CONTIGUOUS'] else 1 - - # Hack, because somehow we can't pass integer arrays? - # This is a tiny bit of overhead right here - cdef int [:] Uinds_new - - # This makes it into a C-ordered (Row-Major) array we can pass - Uinds_new = np.ascontiguousarray(Uinds.flatten(), dtype=np.dtype("i")) - - assert Z1.shape[0] == Z2.shape[0] - assert Z1.shape[0] == len(Nvec) - assert Uinds.shape[0] == len(Jvec) - - blas_block_shermor_2D_asym( - n_Z1_rows, - n_Z1_cols, - n_Z1_row_major, - &Z1[0,0], - n_Z2_cols, - n_Z2_row_major, - &Z2[0,0], - &Nvec[0], - n_J_rows, - &Jvec[0], - &Uinds_new[0], - &ZNZ[0,0], - &d_Jldet) - - return d_Jldet - -def cython_blas_block_shermor_2D_asymm(Z1, Z2, Nvec, Jvec, Uinds): - """Wrapper for the C/Cython code""" - - ZNZ = np.zeros((Z1.shape[1], Z2.shape[1]), order='F') - Jldet = c_blas_block_shermor_2D_asymm(Z1, Z2, Nvec, Jvec, np.array(Uinds, order="C"), ZNZ) - - return Jldet, ZNZ - diff --git a/enterprise/fastshermanmorrison/fastshermanmorrison.c b/enterprise/fastshermanmorrison/fastshermanmorrison.c deleted file mode 100644 index f6f13c9d..00000000 --- a/enterprise/fastshermanmorrison/fastshermanmorrison.c +++ /dev/null @@ -1,233 +0,0 @@ -/* cython_fastshermor.c - * - * Rutger van Haasteren, April 19 2023, Hannover - * - */ - -#include -#include -#include - - -extern void dgemm_(char *transa, char *transb, int *m, int *n, int *k, - double *alpha, double *a, int *lda, double *b, - int *ldb, double *beta, double *c, int *ldc); -extern void dgemv_(char *trans, int *m, int *n, double *alpha, double *a, - int *lda, double *x, int *incx, double *beta, - double *y, int *incy); -extern void dger_(int *m, int *n, double *alpha, double *x, int *incx, - double *y, int *incy, double *a, int *lda); - -static void blas_block_shermor_2D_asym( - int n_Z1_rows, - int n_Z1_cols, - int n_Z1_row_major, - double *pd_Z1, - int n_Z2_cols, - int n_Z2_row_major, - double *pd_Z2, - double *pd_Nvec, - int n_J_rows, - double *pd_Jvec, - int *pn_Uinds, - double *pd_ZNZ, - double *pd_Jldet - ) { - /* C implementation of python_block_shermor_2D_asym, because the python - * overhead is large - * - * parameters - * ---------- - * - * :param n_Z1_rows: Number of rows of Z1 - * :param n_Z1_cols: Number of columns of Z1 - * :param n_Z1_row_major: 1 if Z1 is Row-Major, 0 if Column-Major - * :param pd_Z1: The Z1 matrix - * :param n_Z2_cols: Number of columns of Z2 - * :param n_Z2_row_major: 1 if Z2 is Row-Major, 0 if Column-Major - * :param pd_Z2: The Z2 matrix - * :param pd_Nvec: The Nvec vector - * :param n_J_rows: The number of Jvec elements - * :param pd_Jvec: The Jvec vector - * :param pn_Uinds: The matrix of quantization indices (Row-Major) - * :param pd_ZNZ: The return value of ZNZ (Column-Major) - * :param pd_Jldet: The return value of log(det(J)) - */ - - double d_galpha=1.0, d_gbeta=0.0, d_nisum=0.0, d_beta; - double *pd_Z1ni, *pd_ZNZ_add, *pd_ni, *pd_zn1, *pd_zn2; - int cc, i, j, m, n, k, lda, ldb, ldc, n_jblock, n_jblock_i, n_index; - char *transa, *transb; - - pd_Z1ni = malloc(n_Z1_rows*n_Z1_cols * sizeof(double)); - pd_ZNZ_add = calloc(n_Z1_rows*n_Z1_cols, sizeof(double)); - pd_ni = malloc(n_Z1_rows * sizeof(double)); - pd_zn1 = calloc(n_Z1_cols, sizeof(double)); - pd_zn2 = calloc(n_Z2_cols, sizeof(double)); - - /* openmp this? */ - for(i=0; i 0.0) { - - /* Note: pn_Uinds is row-major */ - d_nisum = 0.0; - n_jblock_i = pn_Uinds[2*cc]; - n_jblock = pn_Uinds[2*cc+1] - pn_Uinds[2*cc]; - for(i=pn_Uinds[2*cc]; i=2.7.0 coverage-conditional-plugin>=0.4.0 jupyter>=1.0.0 build==0.3.1.post1 -numpy>=1.16.3 -cython>=0.29.34 diff --git a/setup.py b/setup.py index fb2efe85..45970a9b 100644 --- a/setup.py +++ b/setup.py @@ -1,10 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -import numpy from setuptools import setup -from setuptools import Extension -from Cython.Build import cythonize with open("README.md", encoding="utf-8") as readme_file: readme = readme_file.read() @@ -17,14 +14,6 @@ "scikit-sparse>=0.4.5", "pint-pulsar>=0.8.3", "libstempo>=2.4.4", - "cython>=0.29.34", -] - -ext_modules=[ - Extension('enterprise.fastshermanmorrison.cython_fastshermanmorrison', - ['enterprise/fastshermanmorrison/cython_fastshermanmorrison.pyx'], - include_dirs = [numpy.get_include(), 'fastshermanmorrison/'], - extra_compile_args=["-O2", "-fno-wrapv"]) # 50% more efficient! ] test_requirements = [] @@ -62,5 +51,4 @@ ], test_suite="tests", tests_require=test_requirements, - ext_modules = cythonize(ext_modules) ) From fb7f533bd71d528c4d8c41c9e997243691259f9c Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Mon, 24 Apr 2023 17:49:37 +0200 Subject: [PATCH 06/80] Removed fastshermanmorrison from setup.py --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 45970a9b..2e5993a3 100644 --- a/setup.py +++ b/setup.py @@ -26,7 +26,7 @@ author="Justin A. Ellis", author_email="justin.ellis18@gmail.com", url="https://github.com/nanograv/enterprise", - packages=["enterprise", "enterprise.signals", "enterprise.fastshermanmorrison"], + packages=["enterprise", "enterprise.signals"], package_dir={"enterprise": "enterprise"}, include_package_data=True, package_data={"enterprise": ["datafiles/*", "datafiles/ephemeris/*", "datafiles/ng9/*", "datafiles/mdc_open1/*"]}, From 50cc052626824bd0d607006c0a68318409ed8de9 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Mon, 24 Apr 2023 18:05:01 +0200 Subject: [PATCH 07/80] Conform to black linter --- enterprise/signals/white_signals.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/enterprise/signals/white_signals.py b/enterprise/signals/white_signals.py index ac319820..cd740648 100644 --- a/enterprise/signals/white_signals.py +++ b/enterprise/signals/white_signals.py @@ -20,6 +20,7 @@ # logging.basicConfig(format="%(levelname)s: %(name)s: %(message)s", level=logging.INFO) logger = logging.getLogger(__name__) + def WhiteNoise(varianceFunction, selection=Selection(selections.no_selection), name=""): """Class factory for generic white noise signals.""" @@ -185,7 +186,7 @@ def EcorrKernelNoise( msg = "EcorrKernelNoise does not support method: {}".format(method) raise TypeError(msg) - if method == 'fast-sherman-morrison' and fastshermanmorrison is None: + if method == "fast-sherman-morrison" and fastshermanmorrison is None: msg = "Package `fastshermanmorrison` not installed. Fallback to sherman-morrison" logger.warning(msg) From 39b6b0e59393741b52bdac4b13be28ecda5d1381 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Wed, 26 Apr 2023 13:55:46 +0200 Subject: [PATCH 08/80] Added test for fast-sherman-morrison --- tests/test_white_signals.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/test_white_signals.py b/tests/test_white_signals.py index 0736bf71..7f67d272 100644 --- a/tests/test_white_signals.py +++ b/tests/test_white_signals.py @@ -470,6 +470,10 @@ def test_ecorr_sherman_morrison(self): """Test of sherman-morrison ecorr signal and solve methods.""" self._ecorr_test(method="sherman-morrison") + def test_ecorr_fast_sherman_morrison(self): + """Test of fast-sherman-morrison ecorr signal and solve methods.""" + self._ecorr_test(method="fast-sherman-morrison") + def test_ecorr_block(self): """Test of block matrix ecorr signal and solve methods.""" self._ecorr_test(method="block") From 0e3435c370fcef368201a90818da986774b27b26 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Wed, 26 Apr 2023 13:59:21 +0200 Subject: [PATCH 09/80] Added requirements_tests.txt, so that the extensions can be loaded for testing. --- Makefile | 1 + requirements_tests.txt | 1 + 2 files changed, 2 insertions(+) create mode 100644 requirements_tests.txt diff --git a/Makefile b/Makefile index 70a5a1ed..43a249c3 100644 --- a/Makefile +++ b/Makefile @@ -31,6 +31,7 @@ init: @./.enterprise/bin/python3 -m pip install -U pip setuptools wheel @./.enterprise/bin/python3 -m pip install -r requirements.txt -U @./.enterprise/bin/python3 -m pip install -r requirements_dev.txt -U + @./.enterprise/bin/python3 -m pip install -r requirements_tests.txt -U @./.enterprise/bin/python3 -m pre_commit install --install-hooks --overwrite @./.enterprise/bin/python3 -m pip install -e . @echo "run source .enterprise/bin/activate to activate environment" diff --git a/requirements_tests.txt b/requirements_tests.txt new file mode 100644 index 00000000..7cfe1708 --- /dev/null +++ b/requirements_tests.txt @@ -0,0 +1 @@ +fastshermanmorrison-pulsar>=0.0.3 From 60a117ee2b3386e71524d39f5c43f7e5c9ee6314 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Wed, 26 Apr 2023 15:08:44 +0200 Subject: [PATCH 10/80] Added decorator to show fastshermanmorrison-pulsar warning only once --- enterprise/signals/utils.py | 12 ++++++++++++ enterprise/signals/white_signals.py | 6 ++++-- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/enterprise/signals/utils.py b/enterprise/signals/utils.py index df949af0..9efe6d4c 100644 --- a/enterprise/signals/utils.py +++ b/enterprise/signals/utils.py @@ -31,6 +31,18 @@ logger = logging.getLogger(__name__) +def static_vars(**kwargs): + """This decorator allows a class factory to have a static variable, for + things such as checking whether a python package exists. If the package + does not exist, a warning is shown only once + """ + def decorate(func): + for k in kwargs: + setattr(func, k, kwargs[k]) + return func + return decorate + + class ConditionalGP: def __init__(self, pta, phiinv_method="cliques"): """This class allows the computation of conditional means and diff --git a/enterprise/signals/white_signals.py b/enterprise/signals/white_signals.py index cd740648..fe8c1317 100644 --- a/enterprise/signals/white_signals.py +++ b/enterprise/signals/white_signals.py @@ -119,7 +119,7 @@ def EquadNoise(*args, **kwargs): " or TNEquadNoise to obtain legacy enterprise definition for EQUAD only [tnequad^2]." ) - +@utils.static_vars(shown_fastshermanmorrison_warning=False) def EcorrKernelNoise( log10_ecorr=parameter.Uniform(-10, -5), selection=Selection(selections.no_selection), @@ -186,9 +186,11 @@ def EcorrKernelNoise( msg = "EcorrKernelNoise does not support method: {}".format(method) raise TypeError(msg) - if method == "fast-sherman-morrison" and fastshermanmorrison is None: + if method == "fast-sherman-morrison" and fastshermanmorrison is None \ + and not EcorrKernelNoise.shown_fastshermanmorrison_warning: msg = "Package `fastshermanmorrison` not installed. Fallback to sherman-morrison" logger.warning(msg) + EcorrKernelNoise.shown_fastshermanmorrison_warning = True class EcorrKernelNoise(signal_base.Signal): signal_type = "white noise" From 851b13ef1096693fe2b2ce25c56d0e200ddba2f7 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Wed, 26 Apr 2023 15:18:08 +0200 Subject: [PATCH 11/80] Proper linting corrections --- enterprise/signals/utils.py | 2 ++ enterprise/signals/white_signals.py | 8 ++++++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/enterprise/signals/utils.py b/enterprise/signals/utils.py index 9efe6d4c..ee57f2f6 100644 --- a/enterprise/signals/utils.py +++ b/enterprise/signals/utils.py @@ -36,10 +36,12 @@ def static_vars(**kwargs): things such as checking whether a python package exists. If the package does not exist, a warning is shown only once """ + def decorate(func): for k in kwargs: setattr(func, k, kwargs[k]) return func + return decorate diff --git a/enterprise/signals/white_signals.py b/enterprise/signals/white_signals.py index fe8c1317..deb10c25 100644 --- a/enterprise/signals/white_signals.py +++ b/enterprise/signals/white_signals.py @@ -119,6 +119,7 @@ def EquadNoise(*args, **kwargs): " or TNEquadNoise to obtain legacy enterprise definition for EQUAD only [tnequad^2]." ) + @utils.static_vars(shown_fastshermanmorrison_warning=False) def EcorrKernelNoise( log10_ecorr=parameter.Uniform(-10, -5), @@ -186,8 +187,11 @@ def EcorrKernelNoise( msg = "EcorrKernelNoise does not support method: {}".format(method) raise TypeError(msg) - if method == "fast-sherman-morrison" and fastshermanmorrison is None \ - and not EcorrKernelNoise.shown_fastshermanmorrison_warning: + if ( + method == "fast-sherman-morrison" + and fastshermanmorrison is None + and not EcorrKernelNoise.shown_fastshermanmorrison_warning + ): msg = "Package `fastshermanmorrison` not installed. Fallback to sherman-morrison" logger.warning(msg) EcorrKernelNoise.shown_fastshermanmorrison_warning = True From bf1705e57042fae00fa62162f1b5fcfca22b2843 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Wed, 26 Apr 2023 15:48:48 +0200 Subject: [PATCH 12/80] Changed the EcorrKernelWarning machanism --- enterprise/signals/utils.py | 14 -------------- enterprise/signals/white_signals.py | 6 +++--- 2 files changed, 3 insertions(+), 17 deletions(-) diff --git a/enterprise/signals/utils.py b/enterprise/signals/utils.py index ee57f2f6..df949af0 100644 --- a/enterprise/signals/utils.py +++ b/enterprise/signals/utils.py @@ -31,20 +31,6 @@ logger = logging.getLogger(__name__) -def static_vars(**kwargs): - """This decorator allows a class factory to have a static variable, for - things such as checking whether a python package exists. If the package - does not exist, a warning is shown only once - """ - - def decorate(func): - for k in kwargs: - setattr(func, k, kwargs[k]) - return func - - return decorate - - class ConditionalGP: def __init__(self, pta, phiinv_method="cliques"): """This class allows the computation of conditional means and diff --git a/enterprise/signals/white_signals.py b/enterprise/signals/white_signals.py index deb10c25..5b9d8dd7 100644 --- a/enterprise/signals/white_signals.py +++ b/enterprise/signals/white_signals.py @@ -16,6 +16,7 @@ import fastshermanmorrison.fastshermanmorrison as fastshermanmorrison except ImportError: fastshermanmorrison = None + shown_fastshermanmorrison_warning = False # logging.basicConfig(format="%(levelname)s: %(name)s: %(message)s", level=logging.INFO) logger = logging.getLogger(__name__) @@ -120,7 +121,6 @@ def EquadNoise(*args, **kwargs): ) -@utils.static_vars(shown_fastshermanmorrison_warning=False) def EcorrKernelNoise( log10_ecorr=parameter.Uniform(-10, -5), selection=Selection(selections.no_selection), @@ -190,11 +190,11 @@ def EcorrKernelNoise( if ( method == "fast-sherman-morrison" and fastshermanmorrison is None - and not EcorrKernelNoise.shown_fastshermanmorrison_warning + and not shown_fastshermanmorrison_warning ): msg = "Package `fastshermanmorrison` not installed. Fallback to sherman-morrison" logger.warning(msg) - EcorrKernelNoise.shown_fastshermanmorrison_warning = True + shown_fastshermanmorrison_warning = True class EcorrKernelNoise(signal_base.Signal): signal_type = "white noise" From f8d71f5dd9c676ae7bc9943996bcf39de39d5423 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Wed, 26 Apr 2023 16:01:54 +0200 Subject: [PATCH 13/80] Blacking --- enterprise/signals/white_signals.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/enterprise/signals/white_signals.py b/enterprise/signals/white_signals.py index 5b9d8dd7..fd396840 100644 --- a/enterprise/signals/white_signals.py +++ b/enterprise/signals/white_signals.py @@ -187,11 +187,7 @@ def EcorrKernelNoise( msg = "EcorrKernelNoise does not support method: {}".format(method) raise TypeError(msg) - if ( - method == "fast-sherman-morrison" - and fastshermanmorrison is None - and not shown_fastshermanmorrison_warning - ): + if method == "fast-sherman-morrison" and fastshermanmorrison is None and not shown_fastshermanmorrison_warning: msg = "Package `fastshermanmorrison` not installed. Fallback to sherman-morrison" logger.warning(msg) shown_fastshermanmorrison_warning = True From 8c4bfa8a148725bd660a25ec444eb29ca71e56f4 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Wed, 26 Apr 2023 16:15:49 +0200 Subject: [PATCH 14/80] Lint update --- enterprise/signals/white_signals.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/enterprise/signals/white_signals.py b/enterprise/signals/white_signals.py index fd396840..ab28881b 100644 --- a/enterprise/signals/white_signals.py +++ b/enterprise/signals/white_signals.py @@ -16,7 +16,7 @@ import fastshermanmorrison.fastshermanmorrison as fastshermanmorrison except ImportError: fastshermanmorrison = None - shown_fastshermanmorrison_warning = False +shown_fastshermanmorrison_warning = False # logging.basicConfig(format="%(levelname)s: %(name)s: %(message)s", level=logging.INFO) logger = logging.getLogger(__name__) From fb01136445da5edd0c6e7cd1da51d0a98ab4c032 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Wed, 26 Apr 2023 18:35:32 +0200 Subject: [PATCH 15/80] Removed the have_shown stuff for now --- enterprise/signals/white_signals.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/enterprise/signals/white_signals.py b/enterprise/signals/white_signals.py index ab28881b..cd740648 100644 --- a/enterprise/signals/white_signals.py +++ b/enterprise/signals/white_signals.py @@ -16,7 +16,6 @@ import fastshermanmorrison.fastshermanmorrison as fastshermanmorrison except ImportError: fastshermanmorrison = None -shown_fastshermanmorrison_warning = False # logging.basicConfig(format="%(levelname)s: %(name)s: %(message)s", level=logging.INFO) logger = logging.getLogger(__name__) @@ -187,10 +186,9 @@ def EcorrKernelNoise( msg = "EcorrKernelNoise does not support method: {}".format(method) raise TypeError(msg) - if method == "fast-sherman-morrison" and fastshermanmorrison is None and not shown_fastshermanmorrison_warning: + if method == "fast-sherman-morrison" and fastshermanmorrison is None: msg = "Package `fastshermanmorrison` not installed. Fallback to sherman-morrison" logger.warning(msg) - shown_fastshermanmorrison_warning = True class EcorrKernelNoise(signal_base.Signal): signal_type = "white noise" From 3ff9921c8046b1a102dfceb72ae507d5571e3696 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Thu, 4 May 2023 17:21:25 +0200 Subject: [PATCH 16/80] Placed fastshermanmorrison into requirements_dev.txt --- Makefile | 1 - requirements_dev.txt | 1 + requirements_tests.txt | 1 - 3 files changed, 1 insertion(+), 2 deletions(-) delete mode 100644 requirements_tests.txt diff --git a/Makefile b/Makefile index 43a249c3..70a5a1ed 100644 --- a/Makefile +++ b/Makefile @@ -31,7 +31,6 @@ init: @./.enterprise/bin/python3 -m pip install -U pip setuptools wheel @./.enterprise/bin/python3 -m pip install -r requirements.txt -U @./.enterprise/bin/python3 -m pip install -r requirements_dev.txt -U - @./.enterprise/bin/python3 -m pip install -r requirements_tests.txt -U @./.enterprise/bin/python3 -m pre_commit install --install-hooks --overwrite @./.enterprise/bin/python3 -m pip install -e . @echo "run source .enterprise/bin/activate to activate environment" diff --git a/requirements_dev.txt b/requirements_dev.txt index 6d103434..b784df3d 100644 --- a/requirements_dev.txt +++ b/requirements_dev.txt @@ -15,3 +15,4 @@ pytest-cov>=2.7.0 coverage-conditional-plugin>=0.4.0 jupyter>=1.0.0 build==0.3.1.post1 +fastshermanmorrison-pulsar>=0.0.3 diff --git a/requirements_tests.txt b/requirements_tests.txt deleted file mode 100644 index 7cfe1708..00000000 --- a/requirements_tests.txt +++ /dev/null @@ -1 +0,0 @@ -fastshermanmorrison-pulsar>=0.0.3 From ef663fa5cadcc721c98b609c0f36375b5a14b850 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Thu, 21 Sep 2023 20:06:52 +0200 Subject: [PATCH 17/80] Added Percentage Point Function for all parameters --- enterprise/signals/parameter.py | 47 ++++++++++++++++++++++++++++++++- 1 file changed, 46 insertions(+), 1 deletion(-) diff --git a/enterprise/signals/parameter.py b/enterprise/signals/parameter.py index 38e4a4e2..be9b9466 100644 --- a/enterprise/signals/parameter.py +++ b/enterprise/signals/parameter.py @@ -6,6 +6,7 @@ import numpy as np from scipy.special import erf as _erf +import scipy.stats as sstats from enterprise.signals.selections import selection_func @@ -49,6 +50,11 @@ def __init__(self, name): msg = "Parameter classes need to define _prior, or _logprior." raise AttributeError(msg) + if hasattr(self, "_ppf"): + self.ppf = self._ppf(name) + else: + self.ppf = None + self.type = self.__class__.__name__.lower() def get_logpdf(self, value=None, **kwargs): @@ -94,6 +100,18 @@ def sample(self, **kwargs): else: return self.logprior(func=self._sampler, size=self._size, **kwargs) + def get_ppf(self, value=None, **kwargs): + if not isinstance(self, Parameter): + raise TypeError("You can only call get_pdf() on an " "instantiated (named) Parameter.") + + if self.ppf is None: + raise NotImplementedError("No ppf was implemented for this Parameter.") + + if value is None and "params" in kwargs: + value = kwargs["params"][self.name] + + return self.ppf(value, **kwargs) + @property def size(self): return self._size @@ -136,7 +154,7 @@ class GPCoefficients(Parameter): return GPCoefficients -def UserParameter(prior=None, logprior=None, sampler=None, size=None): +def UserParameter(prior=None, logprior=None, sampler=None, ppf=None, size=None): """Class factory for UserParameter, implementing Enterprise parameters with arbitrary priors. The prior is specified by way of an Enterprise ``Function`` of the form ``prior(value, [par1, par2])``. Optionally, @@ -147,6 +165,7 @@ def UserParameter(prior=None, logprior=None, sampler=None, size=None): :param prior: parameter prior pdf, given as Enterprise ``Function`` :param sampler: function returning a randomly sampled parameter according to prior + :param ppf: percentage point function (inverse cdf), for this parameter :param size: length for vector parameter :return: ``UserParameter`` class """ @@ -157,6 +176,8 @@ class UserParameter(Parameter): _prior = prior if logprior is not None: _logprior = logprior + if ppf is not None: + _ppf = ppf _sampler = None if sampler is None else staticmethod(sampler) _typename = "UserParameter" @@ -189,6 +210,12 @@ def UniformSampler(pmin, pmax, size=None): return np.random.uniform(pmin, pmax, size=size) +def UniformPPF(value, pmin, pmax): + """Percentage Point function for Uniform paramters.""" + + return sstats.uniform.ppf(value, loc=pmin, scale=pmax-pmin) + + def Uniform(pmin, pmax, size=None): """Class factory for Uniform parameters (with pdf(x) ~ 1/[pmax - pmin] inside [pmin,pmax], 0 outside. Handles vectors correctly, @@ -204,6 +231,7 @@ def Uniform(pmin, pmax, size=None): class Uniform(Parameter): _size = size _prior = Function(UniformPrior, pmin=pmin, pmax=pmax) + _ppf = Function(UniformPPF, pmin=pmin, pmax=pmax) _sampler = staticmethod(UniformSampler) _typename = _argrepr("Uniform", pmin=pmin, pmax=pmax) @@ -233,6 +261,14 @@ def NormalSampler(mu, sigma, size=None): return np.random.normal(mu, sigma, size=size) +def NormalPPF(value, mu, sigma): + """Prior function for Normal parameters. + Handles scalar mu and sigma, compatible vector value/mu/sigma, + vector value/mu and compatible covariance matrix sigma.""" + + return sstats.norm.ppf(value, loc=mu, scale=sigma) + + def Normal(mu=0, sigma=1, size=None): """Class factory for Normal parameters (with pdf(x) ~ N(``mu``,``sigma``)). Handles vectors correctly if ``size == len(mu) == len(sigma)``, @@ -249,6 +285,7 @@ def Normal(mu=0, sigma=1, size=None): class Normal(Parameter): _size = size _prior = Function(NormalPrior, mu=mu, sigma=sigma) + _ppf = Function(NormaPPF, mu=mu, sigma=sigma) _sampler = staticmethod(NormalSampler) _typename = _argrepr("Normal", mu=mu, sigma=sigma) @@ -346,6 +383,13 @@ def LinearExpSampler(pmin, pmax, size=None): return np.log10(np.random.uniform(10**pmin, 10**pmax, size)) +def LinearExpPPF(value, pmin, pmax): + """Percentage Point function for Uniform paramters.""" + + ev = sstats.uniform.ppf(value, loc=10**pmin, scale=10**pmax-10**pmin) + return np.log10(ev) + + def LinearExp(pmin, pmax, size=None): """Class factory for LinearExp parameters (with pdf(x) ~ 10^x, and 0 outside [``pmin``,``max``]). Handles vectors correctly @@ -361,6 +405,7 @@ def LinearExp(pmin, pmax, size=None): class LinearExp(Parameter): _size = size _prior = Function(LinearExpPrior, pmin=pmin, pmax=pmax) + _ppf = Function(LinearExpPPF, pmin=pmin, pmax=pmax) _sampler = staticmethod(LinearExpSampler) _typename = _argrepr("LinearExp", pmin=pmin, pmax=pmax) From 1fa8dd9dc8479edf08e0d0ab093a3646c503607d Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Thu, 21 Sep 2023 20:49:25 +0200 Subject: [PATCH 18/80] Added PPF tests --- enterprise/signals/parameter.py | 5 +++- tests/test_parameter.py | 43 ++++++++++++++++++++++++++++++--- 2 files changed, 44 insertions(+), 4 deletions(-) diff --git a/enterprise/signals/parameter.py b/enterprise/signals/parameter.py index be9b9466..b03c8974 100644 --- a/enterprise/signals/parameter.py +++ b/enterprise/signals/parameter.py @@ -266,6 +266,9 @@ def NormalPPF(value, mu, sigma): Handles scalar mu and sigma, compatible vector value/mu/sigma, vector value/mu and compatible covariance matrix sigma.""" + if np.ndim(sigma) == 2: + raise NotImplementedError("PPF not implemented when sigma is 2D") + return sstats.norm.ppf(value, loc=mu, scale=sigma) @@ -285,7 +288,7 @@ def Normal(mu=0, sigma=1, size=None): class Normal(Parameter): _size = size _prior = Function(NormalPrior, mu=mu, sigma=sigma) - _ppf = Function(NormaPPF, mu=mu, sigma=sigma) + _ppf = Function(NormalPPF, mu=mu, sigma=sigma) _sampler = staticmethod(NormalSampler) _typename = _argrepr("Normal", mu=mu, sigma=sigma) diff --git a/tests/test_parameter.py b/tests/test_parameter.py index b706b834..a299e28f 100644 --- a/tests/test_parameter.py +++ b/tests/test_parameter.py @@ -13,10 +13,10 @@ import numpy as np import scipy.stats -from enterprise.signals.parameter import UniformPrior, UniformSampler, Uniform -from enterprise.signals.parameter import NormalPrior, NormalSampler, Normal +from enterprise.signals.parameter import UniformPrior, UniformSampler, Uniform, UniformPPF +from enterprise.signals.parameter import NormalPrior, NormalSampler, Normal, NormalPPF from enterprise.signals.parameter import TruncNormalPrior, TruncNormalSampler, TruncNormal -from enterprise.signals.parameter import LinearExpPrior, LinearExpSampler +from enterprise.signals.parameter import LinearExpPrior, LinearExpSampler, LinearExpPPF class TestParameter(unittest.TestCase): @@ -35,6 +35,9 @@ def test_uniform(self): assert p_min < x1 < p_max, msg2 assert type(x1) == float, msg2 + msg3 = "Enterprise and scipy PPF do not match" + assert np.allclose(UniformPPF(x, p_min, p_max), scipy.stats.uniform.ppf(x, p_min, p_max - p_min)), msg3 + # vector argument x = np.array([0.5, 0.1]) assert np.allclose(UniformPrior(x, p_min, p_max), scipy.stats.uniform.pdf(x, p_min, p_max - p_min)), msg1 @@ -43,9 +46,13 @@ def test_uniform(self): assert np.all((p_min < x1) & (x1 < p_max)), msg2 assert x1.shape == (3,), msg2 + # vector argument + assert np.allclose(UniformPPF(x, p_min, p_max), scipy.stats.uniform.ppf(x, p_min, p_max - p_min)), msg3 + # vector bounds p_min, p_max = np.array([0.2, 0.3]), np.array([1.1, 1.2]) assert np.allclose(UniformPrior(x, p_min, p_max), scipy.stats.uniform.pdf(x, p_min, p_max - p_min)), msg1 + assert np.allclose(UniformPPF(x, p_min, p_max), scipy.stats.uniform.ppf(x, p_min, p_max - p_min)), msg3 x1 = UniformSampler(p_min, p_max) assert np.all((p_min < x1) & (x1 < p_max)), msg2 @@ -68,6 +75,10 @@ def test_linearexp(self): msg1b = "Scalar sampler out of range" assert p_min <= x <= p_max, msg1b + msg1c = "Scalar PPF does not match" + x = 0.5 + assert np.allclose(LinearExpPPF(x, p_min, p_max), np.log10(10**p_min + x*(10**p_max-10**p_min))), msg1c + # vector argument x = np.array([0, 1.5, 2.5]) msg2 = "Vector-argument prior does not match" @@ -79,6 +90,13 @@ def test_linearexp(self): msg2b = "Vector-argument sampler out of range" assert np.all((p_min < x) & (x < p_max)), msg2b + x = np.array([0.5, 0.75]) + msg2c = "Vector-argument PPF does not match" + assert np.allclose( + LinearExpPPF(x, p_min, p_max), + np.log10(10**p_min + x * (10**p_max-10**p_min)) + ), msg2c + # vector bounds p_min, p_max = np.array([0, 1]), np.array([2, 3]) x = np.array([1, 2]) @@ -88,6 +106,15 @@ def test_linearexp(self): np.array([10**1 / (10**2 - 10**0), 10**2 / (10**3 - 10**1)]) * np.log(10), ), msg3 + # Vector PPF + x = np.array([0.5, 0.75]) + p_min, p_max = np.array([0, 1]), np.array([2, 3]) + msg3c = "Vector-argument PPF+bounds does not match" + assert np.allclose( + LinearExpPPF(x, p_min, p_max), + np.log10(10**p_min + x * (10**p_max-10**p_min)) + ), msg3c + def test_normal(self): """Test Normal parameter prior and sampler for various combinations of scalar and vector arguments.""" @@ -105,6 +132,11 @@ def test_normal(self): # this should almost never fail assert -5 < (x1 - mu) / sigma < 5, msg2 + msg3 = "Enterprise and scipy PPF do not match" + assert np.allclose( + NormalPPF(x, mu, sigma), scipy.stats.norm.ppf(x, loc=mu, scale=sigma) + ), msg3 + # vector argument x = np.array([-0.2, 0.1, 0.5]) @@ -118,6 +150,11 @@ def test_normal(self): ) assert x1.shape == x2.shape, msg2 + x = np.array([0.1, 0.25, 0.65]) + assert np.allclose( + NormalPPF(x, mu, sigma), scipy.stats.norm.ppf(x, loc=mu, scale=sigma) + ), msg3 + # vector bounds; note the different semantics from `NormalPrior`, # which returns a vector consistently with `UniformPrior` mu, sigma = np.array([0.1, 0.15, 0.2]), np.array([2, 1, 2]) From 61599a802317d3bb952357774b92457c03273d71 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Thu, 21 Sep 2023 21:13:56 +0200 Subject: [PATCH 19/80] Done linting on parameter.py and test_parameter.py --- enterprise/signals/parameter.py | 6 ++---- tests/test_parameter.py | 18 +++++++----------- 2 files changed, 9 insertions(+), 15 deletions(-) diff --git a/enterprise/signals/parameter.py b/enterprise/signals/parameter.py index b03c8974..46ac90a6 100644 --- a/enterprise/signals/parameter.py +++ b/enterprise/signals/parameter.py @@ -213,7 +213,7 @@ def UniformSampler(pmin, pmax, size=None): def UniformPPF(value, pmin, pmax): """Percentage Point function for Uniform paramters.""" - return sstats.uniform.ppf(value, loc=pmin, scale=pmax-pmin) + return sstats.uniform.ppf(value, loc=pmin, scale=pmax - pmin) def Uniform(pmin, pmax, size=None): @@ -389,7 +389,7 @@ def LinearExpSampler(pmin, pmax, size=None): def LinearExpPPF(value, pmin, pmax): """Percentage Point function for Uniform paramters.""" - ev = sstats.uniform.ppf(value, loc=10**pmin, scale=10**pmax-10**pmin) + ev = sstats.uniform.ppf(value, loc=10**pmin, scale=10**pmax - 10**pmin) return np.log10(ev) @@ -487,7 +487,6 @@ def __init__(self, name, psr=None): for kw, arg in self.func_kwargs.items(): if isinstance(arg, type) and issubclass(arg, (Parameter, ConstantParameter)): - # parameter name template: # pname_[signalname_][fname_]parname pnames = [name, fname, kw] @@ -630,7 +629,6 @@ def wrapper(*args, **kwargs): and issubclass(arg, FunctionBase) or isinstance(arg, FunctionBase) ): - return Function(func, **kwargs) # otherwise, we simply call the function diff --git a/tests/test_parameter.py b/tests/test_parameter.py index a299e28f..9b60b36b 100644 --- a/tests/test_parameter.py +++ b/tests/test_parameter.py @@ -77,7 +77,9 @@ def test_linearexp(self): msg1c = "Scalar PPF does not match" x = 0.5 - assert np.allclose(LinearExpPPF(x, p_min, p_max), np.log10(10**p_min + x*(10**p_max-10**p_min))), msg1c + assert np.allclose( + LinearExpPPF(x, p_min, p_max), np.log10(10**p_min + x * (10**p_max - 10**p_min)) + ), msg1c # vector argument x = np.array([0, 1.5, 2.5]) @@ -93,8 +95,7 @@ def test_linearexp(self): x = np.array([0.5, 0.75]) msg2c = "Vector-argument PPF does not match" assert np.allclose( - LinearExpPPF(x, p_min, p_max), - np.log10(10**p_min + x * (10**p_max-10**p_min)) + LinearExpPPF(x, p_min, p_max), np.log10(10**p_min + x * (10**p_max - 10**p_min)) ), msg2c # vector bounds @@ -111,8 +112,7 @@ def test_linearexp(self): p_min, p_max = np.array([0, 1]), np.array([2, 3]) msg3c = "Vector-argument PPF+bounds does not match" assert np.allclose( - LinearExpPPF(x, p_min, p_max), - np.log10(10**p_min + x * (10**p_max-10**p_min)) + LinearExpPPF(x, p_min, p_max), np.log10(10**p_min + x * (10**p_max - 10**p_min)) ), msg3c def test_normal(self): @@ -133,9 +133,7 @@ def test_normal(self): assert -5 < (x1 - mu) / sigma < 5, msg2 msg3 = "Enterprise and scipy PPF do not match" - assert np.allclose( - NormalPPF(x, mu, sigma), scipy.stats.norm.ppf(x, loc=mu, scale=sigma) - ), msg3 + assert np.allclose(NormalPPF(x, mu, sigma), scipy.stats.norm.ppf(x, loc=mu, scale=sigma)), msg3 # vector argument x = np.array([-0.2, 0.1, 0.5]) @@ -151,9 +149,7 @@ def test_normal(self): assert x1.shape == x2.shape, msg2 x = np.array([0.1, 0.25, 0.65]) - assert np.allclose( - NormalPPF(x, mu, sigma), scipy.stats.norm.ppf(x, loc=mu, scale=sigma) - ), msg3 + assert np.allclose(NormalPPF(x, mu, sigma), scipy.stats.norm.ppf(x, loc=mu, scale=sigma)), msg3 # vector bounds; note the different semantics from `NormalPrior`, # which returns a vector consistently with `UniformPrior` From f78af6b081b533359d4ede53965c8eb874da2008 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Fri, 22 Sep 2023 22:45:34 +0200 Subject: [PATCH 20/80] Allow for index arrays in ShermanMorrison, so we do not need sorted TOAs for EcorrKernelNoise signals --- enterprise/signals/signal_base.py | 46 +++++++++++++++++++------------ enterprise/signals/utils.py | 16 ++++++----- 2 files changed, 37 insertions(+), 25 deletions(-) diff --git a/enterprise/signals/signal_base.py b/enterprise/signals/signal_base.py index e0fd3d32..9f700a77 100644 --- a/enterprise/signals/signal_base.py +++ b/enterprise/signals/signal_base.py @@ -1212,12 +1212,22 @@ def solve(self, other, left_array=None, logdet=False): return (ret, self._get_logdet()) if logdet else ret +def indices_from_slice(slc): + """Given a slice object, return an index arrays""" + + if isinstance(slc, np.ndarray): + return slc + else: + return np.arange(*slc.indices(slc.stop)) + + class ShermanMorrison(object): """Custom container class for Sherman-morrison array inversion.""" def __init__(self, jvec, slices, nvec=0.0): self._jvec = jvec self._slices = slices + self._idxs = [indices_from_slice(slc) for slc in slices] self._nvec = nvec def __add__(self, other): @@ -1235,12 +1245,12 @@ def _solve_D1(self, x): """Solves :math:`N^{-1}x` where :math:`x` is a vector.""" Nx = x / self._nvec - for slc, jv in zip(self._slices, self._jvec): - if slc.stop - slc.start > 1: - rblock = x[slc] - niblock = 1 / self._nvec[slc] + for idx, jv in zip(self._idxs, self._jvec): + if len(idx) > 1: + rblock = x[idx] + niblock = 1 / self._nvec[idx] beta = 1.0 / (np.einsum("i->", niblock) + 1.0 / jv) - Nx[slc] -= beta * np.dot(niblock, rblock) * niblock + Nx[idx] -= beta * np.dot(niblock, rblock) * niblock return Nx def _solve_1D1(self, x, y): @@ -1250,11 +1260,11 @@ def _solve_1D1(self, x, y): Nx = x / self._nvec yNx = np.dot(y, Nx) - for slc, jv in zip(self._slices, self._jvec): - if slc.stop - slc.start > 1: - xblock = x[slc] - yblock = y[slc] - niblock = 1 / self._nvec[slc] + for idx, jv in zip(self._idxs, self._jvec): + if len(idx) > 1: + xblock = x[idx] + yblock = y[idx] + niblock = 1 / self._nvec[idx] beta = 1.0 / (np.einsum("i->", niblock) + 1.0 / jv) yNx -= beta * np.dot(niblock, xblock) * np.dot(niblock, yblock) return yNx @@ -1265,11 +1275,11 @@ def _solve_2D2(self, X, Z): """ ZNX = np.dot(Z.T / self._nvec, X) - for slc, jv in zip(self._slices, self._jvec): - if slc.stop - slc.start > 1: - Zblock = Z[slc, :] - Xblock = X[slc, :] - niblock = 1 / self._nvec[slc] + for idx, jv in zip(self._idxs, self._jvec): + if len(idx) > 1: + Zblock = Z[idx, :] + Xblock = X[idx, :] + niblock = 1 / self._nvec[idx] beta = 1.0 / (np.einsum("i->", niblock) + 1.0 / jv) zn = np.dot(niblock, Zblock) xn = np.dot(niblock, Xblock) @@ -1281,9 +1291,9 @@ def _get_logdet(self): is a quantization matrix. """ logdet = np.einsum("i->", np.log(self._nvec)) - for slc, jv in zip(self._slices, self._jvec): - if slc.stop - slc.start > 1: - niblock = 1 / self._nvec[slc] + for idx, jv in zip(self._idxs, self._jvec): + if len(idx) > 1: + niblock = 1 / self._nvec[idx] beta = 1.0 / (np.einsum("i->", niblock) + 1.0 / jv) logdet += np.log(jv) - np.log(beta) return logdet diff --git a/enterprise/signals/utils.py b/enterprise/signals/utils.py index df949af0..d99e1b72 100644 --- a/enterprise/signals/utils.py +++ b/enterprise/signals/utils.py @@ -767,23 +767,25 @@ def create_quantization_matrix(toas, dt=1, nmin=2): return U, weights -def quant2ind(U): +def quant2ind(U, as_slice=False): """ - Use quantization matrix to return slices of non-zero elements. + Use quantization matrix to return indices of non-zero elements. :param U: quantization matrix + :param as_slice: whether to return a slice object - :return: list of `slice`s for non-zero elements of U + :return: list of `slice`s or indices for non-zero elements of U - .. note:: This function assumes that the pulsar TOAs were sorted by time. + .. note:: For slice objects the TOAs need to be sorted by time """ inds = [] for cc, col in enumerate(U.T): epinds = np.flatnonzero(col) - if epinds[-1] - epinds[0] + 1 != len(epinds): - raise ValueError("ERROR: TOAs not sorted properly!") - inds.append(slice(epinds[0], epinds[-1] + 1)) + if epinds[-1] - epinds[0] + 1 != len(epinds) or not as_slice: + inds.append(epinds) + else: + inds.append(slice(epinds[0], epinds[-1] + 1)) return inds From 4d678ae7270d6ae3e5bf43f70d0d87cc33d17cdb Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Sun, 24 Sep 2023 22:28:52 +0200 Subject: [PATCH 21/80] Updated all the white signals and block matrix code for index array indexing --- enterprise/signals/signal_base.py | 41 ++++++++++++----------------- enterprise/signals/utils.py | 9 +++++++ enterprise/signals/white_signals.py | 37 ++++++++++++++------------ 3 files changed, 46 insertions(+), 41 deletions(-) diff --git a/enterprise/signals/signal_base.py b/enterprise/signals/signal_base.py index 9f700a77..adc3125e 100644 --- a/enterprise/signals/signal_base.py +++ b/enterprise/signals/signal_base.py @@ -25,6 +25,7 @@ from enterprise.signals.parameter import function # noqa: F401 from enterprise.signals.parameter import ConstantParameter from enterprise.signals.utils import KernelMatrix +from enterprise.signals.utils import indices_from_slice from enterprise import __version__ from sys import version @@ -1118,6 +1119,7 @@ class BlockMatrix(object): def __init__(self, blocks, slices, nvec=0): self._blocks = blocks self._slices = slices + self._idxs = [indices_from_slice(slc) for slc in slices] self._nvec = nvec if np.any(nvec != 0): @@ -1152,15 +1154,15 @@ def _solve_ZNX(self, X, Z): ZNXr = np.dot(Z[self._idx, :].T, X[self._idx, :] / self._nvec[self._idx, None]) else: ZNXr = 0 - for slc, block in zip(self._slices, self._blocks): - Zblock = Z[slc, :] - Xblock = X[slc, :] + for idx, block in zip(self._idxs, self._blocks): + Zblock = Z[idx, :] + Xblock = X[idx, :] - if slc.stop - slc.start > 1: - cf = sl.cho_factor(block + np.diag(self._nvec[slc])) + if len(idx) > 1: + cf = sl.cho_factor(block + np.diag(self._nvec[idx])) bx = sl.cho_solve(cf, Xblock) else: - bx = Xblock / self._nvec[slc][:, None] + bx = Xblock / self._nvec[idx][:, None] ZNX += np.dot(Zblock.T, bx) ZNX += ZNXr return ZNX.squeeze() if len(ZNX) > 1 else float(ZNX) @@ -1173,11 +1175,11 @@ def _solve_NX(self, X): X = X.reshape(X.shape[0], 1) NX = X / self._nvec[:, None] - for slc, block in zip(self._slices, self._blocks): - Xblock = X[slc, :] - if slc.stop - slc.start > 1: - cf = sl.cho_factor(block + np.diag(self._nvec[slc])) - NX[slc] = sl.cho_solve(cf, Xblock) + for idx, block in zip(self._idxs, self._blocks): + Xblock = X[idx, :] + if len(idx) > 1: + cf = sl.cho_factor(block + np.diag(self._nvec[idx])) + NX[idx] = sl.cho_solve(cf, Xblock) return NX.squeeze() def _get_logdet(self): @@ -1188,12 +1190,12 @@ def _get_logdet(self): logdet = np.sum(np.log(self._nvec[self._idx])) else: logdet = 0 - for slc, block in zip(self._slices, self._blocks): - if slc.stop - slc.start > 1: - cf = sl.cho_factor(block + np.diag(self._nvec[slc])) + for idx, block in zip(self._idxs, self._blocks): + if len(idx) > 1: + cf = sl.cho_factor(block + np.diag(self._nvec[idx])) logdet += np.sum(2 * np.log(np.diag(cf[0]))) else: - logdet += np.sum(np.log(self._nvec[slc])) + logdet += np.sum(np.log(self._nvec[idx])) return logdet def solve(self, other, left_array=None, logdet=False): @@ -1212,15 +1214,6 @@ def solve(self, other, left_array=None, logdet=False): return (ret, self._get_logdet()) if logdet else ret -def indices_from_slice(slc): - """Given a slice object, return an index arrays""" - - if isinstance(slc, np.ndarray): - return slc - else: - return np.arange(*slc.indices(slc.stop)) - - class ShermanMorrison(object): """Custom container class for Sherman-morrison array inversion.""" diff --git a/enterprise/signals/utils.py b/enterprise/signals/utils.py index d99e1b72..eff440e1 100644 --- a/enterprise/signals/utils.py +++ b/enterprise/signals/utils.py @@ -789,6 +789,15 @@ def quant2ind(U, as_slice=False): return inds +def indices_from_slice(slc): + """Given a slice object, return an index arrays""" + + if isinstance(slc, np.ndarray): + return slc + else: + return np.arange(*slc.indices(slc.stop)) + + def linear_interp_basis(toas, dt=30 * 86400): """Provides a basis for linear interpolation. diff --git a/enterprise/signals/white_signals.py b/enterprise/signals/white_signals.py index a7d794b0..e68c9023 100644 --- a/enterprise/signals/white_signals.py +++ b/enterprise/signals/white_signals.py @@ -10,6 +10,7 @@ from enterprise.signals import parameter, selections, signal_base, utils from enterprise.signals.parameter import function from enterprise.signals.selections import Selection +from enterprise.signals.utils import indices_from_slice def WhiteNoise(varianceFunction, selection=Selection(selections.no_selection), name=""): @@ -191,6 +192,7 @@ def __init__(self, psr): nepoch = sum(U.shape[1] for U in Umats) U = np.zeros((len(psr.toas), nepoch)) self._slices = {} + self._idxs = {} netot = 0 for ct, (key, mask) in enumerate(zip(keys, masks)): nn = Umats[ct].shape[1] @@ -198,6 +200,10 @@ def __init__(self, psr): self._slices.update({key: utils.quant2ind(U[:, netot : nn + netot])}) netot += nn + self._idxs.update( + {key: [indices_from_slice(slc) for slc in slices] for (key, slices) in self._slices.items()} + ) + # initialize sparse matrix self._setup(psr) @@ -221,17 +227,17 @@ def _setup(self, psr): def _setup_sparse(self, psr): Ns = scipy.sparse.csc_matrix((len(psr.toas), len(psr.toas))) - for key, slices in self._slices.items(): - for slc in slices: - if slc.stop - slc.start > 1: - Ns[slc, slc] = 1.0 + for key, idxs in self._idxs.items(): + for idx in idxs: + if len(idx) > 1: + Ns[np.ix_(idx, idx)] = 1.0 self._Ns = signal_base.csc_matrix_alt(Ns) def _get_ndiag_sparse(self, params): for p in self._params: - for slc in self._slices[p]: - if slc.stop - slc.start > 1: - self._Ns[slc, slc] = 10 ** (2 * self.get(p, params)) + for idx in self._idxs[p]: + if len(idx) > 1: + self._Ns[np.ix_(idx, idx)] = 10 ** (2 * self.get(p, params)) return self._Ns def _get_ndiag_sherman_morrison(self, params): @@ -239,21 +245,18 @@ def _get_ndiag_sherman_morrison(self, params): return signal_base.ShermanMorrison(jvec, slices) def _get_ndiag_block(self, params): - slices, jvec = self._get_jvecs(params) + idxs, jvec = self._get_jvecs(params) blocks = [] - for jv, slc in zip(jvec, slices): - nb = slc.stop - slc.start + for jv, idx in zip(jvec, idxs): + nb = len(idx) blocks.append(np.ones((nb, nb)) * jv) - return signal_base.BlockMatrix(blocks, slices) + return signal_base.BlockMatrix(blocks, idxs) def _get_jvecs(self, params): - slices = sum([self._slices[key] for key in sorted(self._slices.keys())], []) + idxs = sum([self._idxs[key] for key in sorted(self._idxs.keys())], []) jvec = np.concatenate( - [ - np.ones(len(self._slices[key])) * 10 ** (2 * self.get(key, params)) - for key in sorted(self._slices.keys()) - ] + [np.ones(len(self._idxs[key])) * 10 ** (2 * self.get(key, params)) for key in sorted(self._idxs.keys())] ) - return (slices, jvec) + return (idxs, jvec) return EcorrKernelNoise From 37b4c9c2a8bf9020d8503d7ca74f7eee432a2381 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Mon, 25 Sep 2023 13:17:30 +0200 Subject: [PATCH 22/80] Provided mechanism for showing warning message about fastshermanmorrison, and updated fastshermanmorrison-pulsar in requirements_dev.txt --- enterprise/signals/white_signals.py | 8 +++++++- requirements_dev.txt | 2 +- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/enterprise/signals/white_signals.py b/enterprise/signals/white_signals.py index cd740648..72d079d4 100644 --- a/enterprise/signals/white_signals.py +++ b/enterprise/signals/white_signals.py @@ -14,8 +14,11 @@ try: import fastshermanmorrison.fastshermanmorrison as fastshermanmorrison + + fsm_warning_issued = False except ImportError: fastshermanmorrison = None + fsm_warning_issued = False # logging.basicConfig(format="%(levelname)s: %(name)s: %(message)s", level=logging.INFO) logger = logging.getLogger(__name__) @@ -182,13 +185,16 @@ def EcorrKernelNoise( """ + global fsm_warning_issued + if method not in ["fast-sherman-morrison", "sherman-morrison", "block", "sparse"]: msg = "EcorrKernelNoise does not support method: {}".format(method) raise TypeError(msg) - if method == "fast-sherman-morrison" and fastshermanmorrison is None: + if method == "fast-sherman-morrison" and fastshermanmorrison is None and not fsm_warning_issued: msg = "Package `fastshermanmorrison` not installed. Fallback to sherman-morrison" logger.warning(msg) + fsm_warning_issued = True class EcorrKernelNoise(signal_base.Signal): signal_type = "white noise" diff --git a/requirements_dev.txt b/requirements_dev.txt index b784df3d..d173ce59 100644 --- a/requirements_dev.txt +++ b/requirements_dev.txt @@ -15,4 +15,4 @@ pytest-cov>=2.7.0 coverage-conditional-plugin>=0.4.0 jupyter>=1.0.0 build==0.3.1.post1 -fastshermanmorrison-pulsar>=0.0.3 +fastshermanmorrison-pulsar>=0.1.0 From 1df974af108703e76c06c40073896d1947a07c6b Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Tue, 26 Sep 2023 18:34:10 +0200 Subject: [PATCH 23/80] Added pragmas for no coverage of exception results --- enterprise/signals/white_signals.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/enterprise/signals/white_signals.py b/enterprise/signals/white_signals.py index 72d079d4..f74f3d10 100644 --- a/enterprise/signals/white_signals.py +++ b/enterprise/signals/white_signals.py @@ -16,7 +16,7 @@ import fastshermanmorrison.fastshermanmorrison as fastshermanmorrison fsm_warning_issued = False -except ImportError: +except ImportError: #pragma: no cover fastshermanmorrison = None fsm_warning_issued = False @@ -191,7 +191,7 @@ def EcorrKernelNoise( msg = "EcorrKernelNoise does not support method: {}".format(method) raise TypeError(msg) - if method == "fast-sherman-morrison" and fastshermanmorrison is None and not fsm_warning_issued: + if method == "fast-sherman-morrison" and fastshermanmorrison is None and not fsm_warning_issued: # pragma: no cover msg = "Package `fastshermanmorrison` not installed. Fallback to sherman-morrison" logger.warning(msg) fsm_warning_issued = True @@ -239,7 +239,7 @@ def get_ndiag(self, params): elif method == "fast-sherman-morrison": if fastshermanmorrison: return self._get_ndiag_fast_sherman_morrison(params) - else: + else: # pragma: no cover return self._get_ndiag_sherman_morrison(params) elif method == "sparse": return self._get_ndiag_sparse(params) From 3698716cef980ffce1124149632e46b5f500a23d Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Tue, 26 Sep 2023 20:45:13 +0200 Subject: [PATCH 24/80] Linting: forgot a space in a pragma --- enterprise/signals/white_signals.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/enterprise/signals/white_signals.py b/enterprise/signals/white_signals.py index f74f3d10..a1df6f86 100644 --- a/enterprise/signals/white_signals.py +++ b/enterprise/signals/white_signals.py @@ -16,7 +16,7 @@ import fastshermanmorrison.fastshermanmorrison as fastshermanmorrison fsm_warning_issued = False -except ImportError: #pragma: no cover +except ImportError: # pragma: no cover fastshermanmorrison = None fsm_warning_issued = False From ca9a5a97add6be92a5005599e6f338bdafb3a553 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Wed, 27 Sep 2023 11:32:20 +0200 Subject: [PATCH 25/80] Modified pulsar.py to allow for additional pulsar types from external packages --- enterprise/pulsar.py | 129 ++++++++++++++++++++++++++++++++----------- 1 file changed, 98 insertions(+), 31 deletions(-) diff --git a/enterprise/pulsar.py b/enterprise/pulsar.py index b8f23cd5..7929182c 100644 --- a/enterprise/pulsar.py +++ b/enterprise/pulsar.py @@ -2,10 +2,12 @@ """Class containing pulsar data from timing package [tempo2/PINT]. """ +import contextlib import json import logging import os import pickle +from io import StringIO import astropy.constants as const import astropy.units as u @@ -149,9 +151,18 @@ def filter_data(self, start_time=None, end_time=None): self.sort_data() + def drop_not_picklable(self): + """Drop all attributes that cannot be pickled. + + Derived classes should implement this if they have + any such attributes. + """ + pass + def to_pickle(self, outdir=None): """Save object to pickle file.""" + self.drop_not_picklable() # drop t2pulsar object if hasattr(self, "t2pulsar"): del self.t2pulsar @@ -318,7 +329,11 @@ def __init__(self, toas, model, sort=True, drop_pintpsr=True, planets=True): if not drop_pintpsr: self.model = model + self.parfile = model.as_parfile() self.pint_toas = toas + with StringIO() as tim: + toas.write_TOA_file(tim) + self.timfile = tim.getvalue() # these are TDB but not barycentered # self._toas = np.array(toas.table["tdbld"], dtype="float64") * 86400 @@ -327,20 +342,17 @@ def __init__(self, toas, model, sort=True, drop_pintpsr=True, planets=True): self._stoas = np.array(toas.get_mjds().value, dtype="float64") * 86400 self._residuals = np.array(resids(toas, model).time_resids.to(u.s), dtype="float64") self._toaerrs = np.array(toas.get_errors().to(u.s), dtype="float64") - self._designmatrix = model.designmatrix(toas)[0] + self._designmatrix, self.fitpars, self.designmatrix_units = model.designmatrix(toas) self._ssbfreqs = np.array(model.barycentric_radio_freq(toas), dtype="float64") self._telescope = np.array(toas.get_obss()) - # fitted parameters - self.fitpars = ["Offset"] + [par for par in model.params if not getattr(model, par).frozen] - # gather DM/DMX information if available self._set_dm(model) # set parameters - spars = [par for par in model.params] - self.setpars = [sp for sp in spars if sp not in self.fitpars] + self.setpars = [sp for sp in model.params if sp not in self.fitpars] + # FIXME: this can be done more cleanly using PINT self._flags = {} for ii, obsflags in enumerate(toas.get_flags()): for jj, flag in enumerate(obsflags): @@ -351,6 +363,7 @@ def __init__(self, toas, model, sort=True, drop_pintpsr=True, planets=True): # convert flags to arrays # TODO probably better way to do this + # -- PINT always stores flags as strings for key, val in self._flags.items(): if isinstance(val[0], u.quantity.Quantity): self._flags[key] = np.array([v.value for v in val]) @@ -371,6 +384,21 @@ def __init__(self, toas, model, sort=True, drop_pintpsr=True, planets=True): self.sort_data() + def drop_pintpsr(self): + with contextlib.suppress(NameError): + del self.model + del self.parfile + del self.pint_toas + del self.timfile + + def drop_not_picklable(self): + with contextlib.suppress(AttributeError): + del self.model + del self.pint_toas + logger.warning("pint_toas and model objects cannot be pickled and have been removed.") + + return super().drop_not_picklable() + def _set_dm(self, model): pars = [par for par in model.params if not getattr(model, par).frozen] @@ -447,7 +475,15 @@ def _get_sunssb(self, toas, model): class Tempo2Pulsar(BasePulsar): - def __init__(self, t2pulsar, sort=True, drop_t2pulsar=True, planets=True): + def __init__( + self, + t2pulsar, + sort=True, + drop_t2pulsar=True, + planets=True, + par_name=None, + tim_name=None, + ): self._sort = sort self.t2pulsar = t2pulsar @@ -496,6 +532,11 @@ def __init__(self, t2pulsar, sort=True, drop_t2pulsar=True, planets=True): if drop_t2pulsar: del self.t2pulsar + else: + if par_name is not None and os.path.exists(par_name): + self.parfile = open(par_name).read() + if tim_name is not None and os.path.exists(tim_name): + self.timfile = open(tim_name).read() # gather DM/DMX information if available def _set_dm(self, t2pulsar): @@ -557,7 +598,7 @@ def _get_sunssb(self, t2pulsar): sunssb = None if self.planets: # for ii in range(1, 10): - # tag = 'DMASSPLANET' + str(ii) + # tag = 'DMASSPLANET' + str(ii)@pytest.mark.skipif(t2 is None, reason="TEMPO2/libstempo not available") # self.t2pulsar[tag].val = 0.0 self.t2pulsar.formbats() sunssb = np.zeros((len(self._toas), 6)) @@ -574,6 +615,12 @@ def _get_sunssb(self, t2pulsar): # then replace them with pickleable objects that can be inflated # to numpy arrays with SharedMemory storage + def drop_not_picklable(self): + with contextlib.suppress(AttributeError): + del self.t2pulsar + logger.warning("t2pulsar object cannot be pickled and has been removed.") + return super().drop_not_picklable() + _todeflate = ["_designmatrix", "_planetssb", "_sunssb", "_flags"] _deflated = "pristine" @@ -610,7 +657,9 @@ def Pulsar(*args, **kwargs): sort = kwargs.get("sort", True) drop_t2pulsar = kwargs.get("drop_t2pulsar", True) drop_pintpsr = kwargs.get("drop_pintpsr", True) - timing_package = kwargs.get("timing_package", "tempo2") + timing_package = kwargs.get("timing_package", None) + if timing_package is not None: + timing_package = timing_package.lower() if pint is not None: toas = [x for x in args if isinstance(x, TOAs)] @@ -638,28 +687,46 @@ def Pulsar(*args, **kwargs): reltimfile = timfiletup[-1] relparfile = os.path.relpath(parfile[0], dirname) + if timing_package is None: + if t2 is not None: + timing_package = "tempo2" + elif pint is not None: + timing_package = "pint" + else: + raise ValueError("No timing package available with which to load a pulsar") + # get current directory cwd = os.getcwd() - - # Change directory to the base directory of the tim-file to deal with - # INCLUDE statements in the tim-file - os.chdir(dirname) - - if timing_package.lower() == "pint": - if (clk is not None) and (bipm_version is None): - bipm_version = clk.split("(")[1][:-1] - model, toas = get_model_and_toas( - relparfile, reltimfile, ephem=ephem, bipm_version=bipm_version, planets=planets - ) - os.chdir(cwd) - return PintPulsar(toas, model, sort=sort, drop_pintpsr=drop_pintpsr, planets=planets) - - elif timing_package.lower() == "tempo2": - - # hack to set maxobs - maxobs = get_maxobs(reltimfile) + 100 - t2pulsar = t2.tempopulsar(relparfile, reltimfile, maxobs=maxobs, ephem=ephem, clk=clk) + try: + # Change directory to the base directory of the tim-file to deal with + # INCLUDE statements in the tim-file + os.chdir(dirname) + if timing_package.lower == "tempo2": + if t2 is None: + raise ValueError("tempo2 requested but tempo2 is not available") + # hack to set maxobs + maxobs = get_maxobs(reltimfile) + 100 + t2pulsar = t2.tempopulsar(relparfile, reltimfile, maxobs=maxobs, ephem=ephem, clk=clk) + return Tempo2Pulsar( + t2pulsar, + sort=sort, + drop_t2pulsar=drop_t2pulsar, + planets=planets, + par_name=relparfile, + tim_name=reltimfile, + ) + elif timing_package.lower() == "pint": + if pint is None: + raise ValueError("PINT requested but PINT is not available") + if (clk is not None) and (bipm_version is None): + bipm_version = clk.split("(")[1][:-1] + model, toas = get_model_and_toas( + relparfile, reltimfile, ephem=ephem, bipm_version=bipm_version, planets=planets + ) + os.chdir(cwd) + return PintPulsar(toas, model, sort=sort, drop_pintpsr=drop_pintpsr, planets=planets) + else: + raise ValueError(f"Unknown timing package {timing_package}") + finally: os.chdir(cwd) - return Tempo2Pulsar(t2pulsar, sort=sort, drop_t2pulsar=drop_t2pulsar, planets=planets) - - raise ValueError("Unknown arguments {}".format(args)) + raise ValueError("Pulsar (par/tim) not specified in {args} or {kwargs}") From 6703327454403e5a9eb2fa82fff26bec83759feb Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Wed, 27 Sep 2023 14:54:55 +0200 Subject: [PATCH 26/80] Moved drop t2pulsar code --- enterprise/pulsar.py | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/enterprise/pulsar.py b/enterprise/pulsar.py index 7929182c..f8589db4 100644 --- a/enterprise/pulsar.py +++ b/enterprise/pulsar.py @@ -163,17 +163,6 @@ def to_pickle(self, outdir=None): """Save object to pickle file.""" self.drop_not_picklable() - # drop t2pulsar object - if hasattr(self, "t2pulsar"): - del self.t2pulsar - msg = "t2pulsar object cannot be pickled and has been removed." - logger.warning(msg) - - if hasattr(self, "pint_toas"): - del self.pint_toas - del self.model - msg = "pint_toas and model objects cannot be pickled and have been removed." - logger.warning(msg) if outdir is None: outdir = os.getcwd() @@ -316,7 +305,7 @@ def sunssb(self): @property def telescope(self): - """Return telescope vector at all timestamps""" + """Return telescope name at all timestamps""" return self._telescope[self._isort] From d959b7a510396a0270798e0b28aabce08aeaf4d0 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Thu, 28 Sep 2023 09:00:26 +0200 Subject: [PATCH 27/80] Bugfix for bug #358 --- enterprise/signals/gp_signals.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/enterprise/signals/gp_signals.py b/enterprise/signals/gp_signals.py index 443927d9..5634eedd 100644 --- a/enterprise/signals/gp_signals.py +++ b/enterprise/signals/gp_signals.py @@ -851,7 +851,10 @@ def solve(self, right, left_array=None, logdet=False): if right.ndim == 1 and left_array is right: res = right - rNr, logdet_N = self.Nmat.solve(res, left_array=res, logdet=logdet) + if logdet: + rNr, logdet_N = self.Nmat.solve(res, left_array=res, logdet=logdet) + else: + rNr = self.Nmat.solve(res, left_array=res, logdet=logdet) MNr = self.MNr(res) ret = rNr - np.dot(MNr, self.cf(MNr)) From 860e2b6223b1caa51001524a6d9ec5d58b3abd17 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Thu, 28 Sep 2023 13:41:28 +0200 Subject: [PATCH 28/80] Linting --- enterprise/pulsar.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/enterprise/pulsar.py b/enterprise/pulsar.py index f8589db4..9217e5c4 100644 --- a/enterprise/pulsar.py +++ b/enterprise/pulsar.py @@ -311,7 +311,6 @@ def telescope(self): class PintPulsar(BasePulsar): def __init__(self, toas, model, sort=True, drop_pintpsr=True, planets=True): - self._sort = sort self.planets = planets self.name = model.PSR.value @@ -473,7 +472,6 @@ def __init__( par_name=None, tim_name=None, ): - self._sort = sort self.t2pulsar = t2pulsar self.planets = planets From 96af0baac1f402e16839838d48415b37b5bd9257 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Thu, 28 Sep 2023 14:55:26 +0200 Subject: [PATCH 29/80] Added tests for scrambled/unsorted IPTA datasets for EcorrKernelNoise --- tests/test_white_signals.py | 56 ++++++++++++++++++++++++++----------- 1 file changed, 40 insertions(+), 16 deletions(-) diff --git a/tests/test_white_signals.py b/tests/test_white_signals.py index 0736bf71..34bd331c 100644 --- a/tests/test_white_signals.py +++ b/tests/test_white_signals.py @@ -58,7 +58,14 @@ def setUpClass(cls): cls.psr = Pulsar(datadir + "/B1855+09_NANOGrav_9yv1.gls.par", datadir + "/B1855+09_NANOGrav_9yv1.tim") # IPTA-like pulsar - cls.ipsr = Pulsar(datadir + "/1713.Sep.T2.par", datadir + "/1713.Sep.T2.tim") + cls.ipsr = Pulsar(datadir + "/1713.Sep.T2.par", datadir + "/1713.Sep.T2.tim", sort=True) + + # Same pulsar, but with TOAs shuffled + cls.ipsr_shuffled = Pulsar(datadir + "/1713.Sep.T2.par", datadir + "/1713.Sep.T2.tim", sort=True) + rng = np.random.default_rng(seed=123) + rng.shuffle(cls.ipsr_shuffled._isort) + for ii, p in enumerate(cls.ipsr_shuffled._isort): + cls.ipsr_shuffled._iisort[p] = ii def test_efac(self): """Test that efac signal returns correct covariance.""" @@ -384,8 +391,13 @@ def _ecorr_test(self, method="sparse"): msg = "EFAC/ECORR {} 2D2 solve incorrect.".format(method) assert np.allclose(N.solve(T, left_array=T), np.dot(T.T, wd.solve(T)), rtol=1e-10), msg - def _ecorr_test_ipta(self, method="sparse"): + def _ecorr_test_ipta(self, method="sparse", shuffled=False): """Test of sparse/sherman-morrison ecorr signal and solve methods.""" + if shuffled: + ipsr = self.ipsr_shuffled + else: + ipsr = self.ipsr + selection = Selection(selections.nanograv_backends) efac = parameter.Uniform(0.1, 5) @@ -394,7 +406,7 @@ def _ecorr_test_ipta(self, method="sparse"): ec = white_signals.EcorrKernelNoise(log10_ecorr=ecorr, selection=selection, method=method) tm = gp_signals.TimingModel() s = ef + ec + tm - m = s(self.ipsr) + m = s(ipsr) # set parameters efacs = [1.3] @@ -412,18 +424,18 @@ def _ecorr_test_ipta(self, method="sparse"): } # get EFAC Nvec - nvec0 = efacs[0] ** 2 * self.ipsr.toaerrs**2 + nvec0 = efacs[0] ** 2 * ipsr.toaerrs**2 # get the basis flags = ["ASP-L", "ASP-S", "GASP-8", "GASP-L", "GUPPI-8", "GUPPI-L", "PUPPI-L", "PUPPI-S"] - bflags = self.ipsr.backend_flags + bflags = ipsr.backend_flags Umats = [] for flag in np.unique(bflags): if flag in flags: mask = bflags == flag - Umats.append(utils.create_quantization_matrix(self.ipsr.toas[mask], nmin=2)[0]) + Umats.append(utils.create_quantization_matrix(ipsr.toas[mask], nmin=2)[0]) nepoch = sum(U.shape[1] for U in Umats) - U = np.zeros((len(self.ipsr.toas), nepoch)) + U = np.zeros((len(ipsr.toas), nepoch)) jvec = np.zeros(nepoch) netot, ct = 0, 0 for flag in np.unique(bflags): @@ -441,22 +453,22 @@ def _ecorr_test_ipta(self, method="sparse"): # test msg = "EFAC/ECORR {} logdet incorrect.".format(method) N = m.get_ndiag(params) - assert np.allclose(N.solve(self.ipsr.residuals, logdet=True)[1], wd.logdet(), rtol=1e-8), msg + assert np.allclose(N.solve(ipsr.residuals, logdet=True)[1], wd.logdet(), rtol=1e-8), msg msg = "EFAC/ECORR {} D1 solve incorrect.".format(method) - assert np.allclose(N.solve(self.ipsr.residuals), wd.solve(self.ipsr.residuals), rtol=1e-8), msg + assert np.allclose(N.solve(ipsr.residuals), wd.solve(ipsr.residuals), rtol=1e-8), msg msg = "EFAC/ECORR {} 1D1 solve incorrect.".format(method) assert np.allclose( - N.solve(self.ipsr.residuals, left_array=self.ipsr.residuals), - np.dot(self.ipsr.residuals, wd.solve(self.ipsr.residuals)), + N.solve(ipsr.residuals, left_array=ipsr.residuals), + np.dot(ipsr.residuals, wd.solve(ipsr.residuals)), rtol=1e-8, ), msg msg = "EFAC/ECORR {} 2D1 solve incorrect.".format(method) T = m.get_basis() assert np.allclose( - N.solve(self.ipsr.residuals, left_array=T), np.dot(T.T, wd.solve(self.ipsr.residuals)), rtol=1e-8 + N.solve(ipsr.residuals, left_array=T), np.dot(T.T, wd.solve(ipsr.residuals)), rtol=1e-8 ), msg msg = "EFAC/ECORR {} 2D2 solve incorrect.".format(method) @@ -476,15 +488,18 @@ def test_ecorr_block(self): def test_ecorr_sparse_ipta(self): """Test of sparse ecorr signal and solve methods.""" - self._ecorr_test_ipta(method="sparse") + self._ecorr_test_ipta(method="sparse", shuffled=False) + self._ecorr_test_ipta(method="sparse", shuffled=True) def test_ecorr_sherman_morrison_ipta(self): """Test of sherman-morrison ecorr signal and solve methods.""" - self._ecorr_test_ipta(method="sherman-morrison") + self._ecorr_test_ipta(method="sherman-morrison", shuffled=False) + self._ecorr_test_ipta(method="sherman-morrison", shuffled=True) def test_ecorr_block_ipta(self): """Test of block matrix ecorr signal and solve methods.""" - self._ecorr_test_ipta(method="block") + self._ecorr_test_ipta(method="block", shuffled=False) + self._ecorr_test_ipta(method="block", shuffled=True) class TestWhiteSignalsPint(TestWhiteSignals): @@ -502,5 +517,14 @@ def setUpClass(cls): # IPTA-like pulsar cls.ipsr = Pulsar( - datadir + "/1713.Sep.T2.par", datadir + "/1713.Sep.T2.tim", ephem="DE421", timint_package="pint" + datadir + "/1713.Sep.T2.par", datadir + "/1713.Sep.T2.tim", ephem="DE421", timint_package="pint", sort=True + ) + + # Same pulsar, but with TOAs shuffled + cls.ipsr_shuffled = Pulsar( + datadir + "/1713.Sep.T2.par", datadir + "/1713.Sep.T2.tim", ephem="DE421", timint_package="pint", sort=True ) + rng = np.random.default_rng(seed=123) + rng.shuffle(cls.ipsr_shuffled._isort) + for ii, p in enumerate(cls.ipsr_shuffled._isort): + cls.ipsr_shuffled._iisort[p] = ii \ No newline at end of file From e1f09903e5c6b03e9012caac3413a78e33351e51 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Fri, 29 Sep 2023 07:35:33 +0200 Subject: [PATCH 30/80] Changed logdet=True in one call in MarginalizingNmat, so we don't need to include cumbersome tests to not decrease coverage. Is never called anyway, and it doesn't slow things down. --- enterprise/signals/gp_signals.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/enterprise/signals/gp_signals.py b/enterprise/signals/gp_signals.py index 5634eedd..c6f03126 100644 --- a/enterprise/signals/gp_signals.py +++ b/enterprise/signals/gp_signals.py @@ -851,10 +851,7 @@ def solve(self, right, left_array=None, logdet=False): if right.ndim == 1 and left_array is right: res = right - if logdet: - rNr, logdet_N = self.Nmat.solve(res, left_array=res, logdet=logdet) - else: - rNr = self.Nmat.solve(res, left_array=res, logdet=logdet) + rNr, logdet_N = self.Nmat.solve(res, left_array=res, logdet=True) MNr = self.MNr(res) ret = rNr - np.dot(MNr, self.cf(MNr)) From baf1f8df920bab5cca5b725ac651f421c773a6bb Mon Sep 17 00:00:00 2001 From: Aaron Date: Sun, 1 Oct 2023 08:31:06 -0700 Subject: [PATCH 31/80] Add Python 3.11, Remove Python 3.7 --- .github/workflows/ci_test.yml | 6 +++--- CONTRIBUTING.rst | 4 ++-- README.md | 4 ++-- docs/index.rst | 2 +- setup.py | 4 ++-- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/ci_test.yml b/.github/workflows/ci_test.yml index 0cece1bd..d4cfc02a 100644 --- a/.github/workflows/ci_test.yml +++ b/.github/workflows/ci_test.yml @@ -17,7 +17,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest, macos-latest] - python-version: ['3.7', '3.8', '3.9', '3.10'] + python-version: ['3.8', '3.9', '3.10', '3.11'] steps: - name: Checkout repository @@ -69,7 +69,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: "3.7" + python-version: "3.8" - name: Install non-python dependencies on linux run: | sudo apt-get install libsuitesparse-dev @@ -115,7 +115,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v4 with: - python-version: '3.7' + python-version: '3.8' - name: Install dependencies run: | python -m pip install --upgrade pip diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index ad6c38ec..801e4f37 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -114,8 +114,8 @@ Before you submit a pull request, check that it meets these guidelines: 1. The pull request should include tests. 2. If the pull request adds functionality, the docs should be updated. Put your new functionality into a function with a docstring. -3. The pull request should work for Python 2.6, 2.7, 3.3, 3.4 and 3.5, and for PyPy. Check - https://travis-ci.org/nanograv/enterprise/pull_requests +3. The pull request should work for Python 3.8, 3.9, 3.10, and 3.11. Check + https://github.com/nanograv/enterprise/pulls and make sure that the tests pass for all supported Python versions. Tips diff --git a/README.md b/README.md index 6c71c471..965396c1 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ [![Build Status](https://github.com/nanograv/enterprise/workflows/CI-Tests/badge.svg)](https://github.com/nanograv/enterprise/actions) [![Documentation Status](https://readthedocs.org/projects/enterprise/badge/?version=latest)](https://enterprise.readthedocs.io/en/latest/?badge=latest) [![Test Coverage](https://codecov.io/gh/nanograv/enterprise/branch/master/graph/badge.svg?token=YXSX3293VF)](https://codecov.io/gh/nanograv/enterprise) -![Python Versions](https://img.shields.io/badge/python-3.7%2C%203.8%2C%203.9%2C%203.10-blue.svg) +![Python Versions](https://img.shields.io/badge/python-3.8%2C%203.9%2C%203.10%2C%203.11-blue.svg) [![Zenodo DOI 4059815](https://zenodo.org/badge/DOI/10.5281/zenodo.4059815.svg)](https://doi.org/10.5281/zenodo.4059815) @@ -14,7 +14,7 @@ Inference SuitE) is a pulsar timing analysis code, aimed at noise analysis, gravitational-wave searches, and timing model analysis. - Note: `enterprise>=3.0` does not support Python2.7. You must use - Python \>= 3.7. + Python \>= 3.8. - Free software: MIT license - Documentation: . diff --git a/docs/index.rst b/docs/index.rst index 223d63dd..c778e692 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -15,7 +15,7 @@ Welcome to enterprise's documentation! .. image:: https://codecov.io/gh/nanograv/enterprise/branch/master/graph/badge.svg?token=YXSX3293VF :target: https://codecov.io/gh/nanograv/enterprise :alt: Test Coverage -.. image:: https://img.shields.io/badge/python-3.6%2C%203.7%2C%203.8-blue.svg +.. image:: https://img.shields.io/badge/python-3.8%2C%203.9%2C%203.10%2C%203.11-blue.svg :alt: Python Versions .. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.4059815.svg diff --git a/setup.py b/setup.py index 2e5993a3..1e068864 100644 --- a/setup.py +++ b/setup.py @@ -30,7 +30,7 @@ package_dir={"enterprise": "enterprise"}, include_package_data=True, package_data={"enterprise": ["datafiles/*", "datafiles/ephemeris/*", "datafiles/ng9/*", "datafiles/mdc_open1/*"]}, - python_requires=">=3.7, <3.11", + python_requires=">=3.8, <3.12", install_requires=requirements, license="MIT license", zip_safe=False, @@ -42,10 +42,10 @@ "License :: OSI Approved :: MIT License", "Natural Language :: English", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", "Topic :: Scientific/Engineering :: Astronomy", "Topic :: Scientific/Engineering :: Physics", ], From b852ee7523f87fca9db1a98da1a3d19b5cad7d15 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Mon, 6 Nov 2023 21:06:16 +0100 Subject: [PATCH 32/80] Linting of test_white_signals.py --- tests/test_white_signals.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/test_white_signals.py b/tests/test_white_signals.py index 34bd331c..6d15aeb1 100644 --- a/tests/test_white_signals.py +++ b/tests/test_white_signals.py @@ -467,9 +467,7 @@ def _ecorr_test_ipta(self, method="sparse", shuffled=False): msg = "EFAC/ECORR {} 2D1 solve incorrect.".format(method) T = m.get_basis() - assert np.allclose( - N.solve(ipsr.residuals, left_array=T), np.dot(T.T, wd.solve(ipsr.residuals)), rtol=1e-8 - ), msg + assert np.allclose(N.solve(ipsr.residuals, left_array=T), np.dot(T.T, wd.solve(ipsr.residuals)), rtol=1e-8), msg msg = "EFAC/ECORR {} 2D2 solve incorrect.".format(method) assert np.allclose(N.solve(T, left_array=T), np.dot(T.T, wd.solve(T)), rtol=1e-8), msg @@ -527,4 +525,4 @@ def setUpClass(cls): rng = np.random.default_rng(seed=123) rng.shuffle(cls.ipsr_shuffled._isort) for ii, p in enumerate(cls.ipsr_shuffled._isort): - cls.ipsr_shuffled._iisort[p] = ii \ No newline at end of file + cls.ipsr_shuffled._iisort[p] = ii From 15ec4e0ad75052f123781835acf665f9cfc5f05e Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Tue, 7 Nov 2023 09:24:08 +0100 Subject: [PATCH 33/80] Provided extra unit tests for the PPF functionality and parameters in general --- enterprise/signals/parameter.py | 6 ++--- tests/test_parameter.py | 42 ++++++++++++++++++++++++++++++++- 2 files changed, 44 insertions(+), 4 deletions(-) diff --git a/enterprise/signals/parameter.py b/enterprise/signals/parameter.py index 46ac90a6..c1c835fc 100644 --- a/enterprise/signals/parameter.py +++ b/enterprise/signals/parameter.py @@ -58,6 +58,7 @@ def __init__(self, name): self.type = self.__class__.__name__.lower() def get_logpdf(self, value=None, **kwargs): + # RvH: This exception cannot be triggered if not isinstance(self, Parameter): raise TypeError("You can only call get_logpdf() on an " "instantiated (named) Parameter.") @@ -72,6 +73,7 @@ def get_logpdf(self, value=None, **kwargs): return logpdf if self._size is None else np.sum(logpdf) def get_pdf(self, value=None, **kwargs): + # RvH: This exception cannot be triggered if not isinstance(self, Parameter): raise TypeError("You can only call get_pdf() on an " "instantiated (named) Parameter.") @@ -93,6 +95,7 @@ def sample(self, **kwargs): raise AttributeError("No sampler was provided for this Parameter.") else: if self.name in kwargs: + # RvH: This exception cannot be triggered raise ValueError("You shouldn't give me my value when you're sampling me.!") if hasattr(self, "prior"): @@ -101,9 +104,6 @@ def sample(self, **kwargs): return self.logprior(func=self._sampler, size=self._size, **kwargs) def get_ppf(self, value=None, **kwargs): - if not isinstance(self, Parameter): - raise TypeError("You can only call get_pdf() on an " "instantiated (named) Parameter.") - if self.ppf is None: raise NotImplementedError("No ppf was implemented for this Parameter.") diff --git a/tests/test_parameter.py b/tests/test_parameter.py index 9b60b36b..b193798e 100644 --- a/tests/test_parameter.py +++ b/tests/test_parameter.py @@ -13,12 +13,52 @@ import numpy as np import scipy.stats +from enterprise.signals.parameter import Parameter from enterprise.signals.parameter import UniformPrior, UniformSampler, Uniform, UniformPPF from enterprise.signals.parameter import NormalPrior, NormalSampler, Normal, NormalPPF from enterprise.signals.parameter import TruncNormalPrior, TruncNormalSampler, TruncNormal from enterprise.signals.parameter import LinearExpPrior, LinearExpSampler, LinearExpPPF +class TestParameterExceptions(unittest.TestCase): + + def test_missing_prior_attribute_error(self): + class MissingPriorParameter(Parameter): + pass # Do not define _prior or _logprior + + with self.assertRaises(AttributeError): + MissingPriorParameter("test") + + def test_methods_called_on_class_type_error(self): + UniformClass = Uniform(pmin=0, pmax=1) + with self.assertRaises(TypeError): + UniformClass.get_logpdf() + + def test_missing_sampler_attribute_error(self): + class MissingSamplerParameter(Parameter): + _prior = staticmethod(lambda x: x) + + def __init__(self, name): + super().__init__(name) + self._sampler = None + + missing_sampler_param = MissingSamplerParameter("test") + with self.assertRaises(AttributeError): + missing_sampler_param.sample() + + def test_missing_ppf_not_implemented_error(self): + class MissingPPFParameter(Parameter): + _prior = staticmethod(lambda x: x) + + def __init__(self, name): + super().__init__(name) + self.ppf = None + + missing_ppf_param = MissingPPFParameter("test") + with self.assertRaises(NotImplementedError): + missing_ppf_param.get_ppf() + + class TestParameter(unittest.TestCase): def test_uniform(self): """Test Uniform parameter prior and sampler for various combinations of scalar and vector arguments.""" @@ -232,4 +272,4 @@ def test_metaparam(self): paramA = TruncNormal(mu, sigma, pmin, pmax)("A") xs = np.array([-3.5, 3.5]) - assert np.alltrue(paramA.get_pdf(xs, mu=mu.sample()) == zeros), msg4 + assert np.all(paramA.get_pdf(xs, mu=mu.sample()) == zeros), msg4 From 11ffcc09feed715aab2780a748bb9e8925a4a950 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Tue, 7 Nov 2023 09:33:25 +0100 Subject: [PATCH 34/80] Linting: extra line of whitespace --- tests/test_parameter.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/test_parameter.py b/tests/test_parameter.py index b193798e..85f72904 100644 --- a/tests/test_parameter.py +++ b/tests/test_parameter.py @@ -21,7 +21,6 @@ class TestParameterExceptions(unittest.TestCase): - def test_missing_prior_attribute_error(self): class MissingPriorParameter(Parameter): pass # Do not define _prior or _logprior From dd2628d2fe3befeb412eb62becaa2f680b02ccd5 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Tue, 7 Nov 2023 09:49:14 +0100 Subject: [PATCH 35/80] Updated the ci_tests.yml to include the dev branch --- .github/workflows/ci_test.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci_test.yml b/.github/workflows/ci_test.yml index 0cece1bd..68008585 100644 --- a/.github/workflows/ci_test.yml +++ b/.github/workflows/ci_test.yml @@ -2,9 +2,9 @@ name: enterprise CI targets on: push: - branches: [ master ] + branches: [ master, dev ] pull_request: - branches: [ master ] + branches: [ master, dev ] release: types: - published @@ -130,4 +130,4 @@ jobs: TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} run: | - twine upload dist/* \ No newline at end of file + twine upload dist/* From c25f0a2e12ab17ecc7919855cdbbb2d695884580 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Tue, 7 Nov 2023 10:36:01 +0100 Subject: [PATCH 36/80] Added more unit tests for the parameter module --- tests/test_parameter.py | 49 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 47 insertions(+), 2 deletions(-) diff --git a/tests/test_parameter.py b/tests/test_parameter.py index 85f72904..c549ea27 100644 --- a/tests/test_parameter.py +++ b/tests/test_parameter.py @@ -13,11 +13,11 @@ import numpy as np import scipy.stats -from enterprise.signals.parameter import Parameter +from enterprise.signals.parameter import Parameter, UserParameter, Function from enterprise.signals.parameter import UniformPrior, UniformSampler, Uniform, UniformPPF from enterprise.signals.parameter import NormalPrior, NormalSampler, Normal, NormalPPF from enterprise.signals.parameter import TruncNormalPrior, TruncNormalSampler, TruncNormal -from enterprise.signals.parameter import LinearExpPrior, LinearExpSampler, LinearExpPPF +from enterprise.signals.parameter import LinearExpPrior, LinearExpSampler, LinearExpPPF, LinearExp class TestParameterExceptions(unittest.TestCase): @@ -57,6 +57,10 @@ def __init__(self, name): with self.assertRaises(NotImplementedError): missing_ppf_param.get_ppf() + def test_2D_NormalPPF_error(self): + with self.assertRaises(NotImplementedError): + NormalPPF(0.0, 1.0, np.array([[1.0, 1.0], [1.0, 1.0]])) + class TestParameter(unittest.TestCase): def test_uniform(self): @@ -77,6 +81,13 @@ def test_uniform(self): msg3 = "Enterprise and scipy PPF do not match" assert np.allclose(UniformPPF(x, p_min, p_max), scipy.stats.uniform.ppf(x, p_min, p_max - p_min)), msg3 + # As parameter dictionary or value for Uniform instantiated object + unipar = Uniform(pmin=p_min, pmax=p_max)("testpar") + assert np.allclose( + unipar.get_ppf(params=dict(testpar=x)), scipy.stats.uniform.ppf(x, p_min, p_max - p_min) + ), msg3 + assert np.allclose(unipar.get_ppf(x), scipy.stats.uniform.ppf(x, p_min, p_max - p_min)), msg3 + # vector argument x = np.array([0.5, 0.1]) assert np.allclose(UniformPrior(x, p_min, p_max), scipy.stats.uniform.pdf(x, p_min, p_max - p_min)), msg1 @@ -101,6 +112,33 @@ def test_uniform(self): assert np.all((p_min < x1) & (x1 < p_max)), msg2 assert x1.shape == (3, 2), msg2 + def test_userparameter(self): + """Test User-defined parameter prior, sampler, and ppf""" + + # scalar + p_min, p_max = 0.2, 1.1 + x = 0.5 + + # As parameter dictionary or value for Uniform instantiated object + unipar = Uniform(pmin=p_min, pmax=p_max)("testpar") + unipar = UserParameter( + prior=Function(UniformPrior, pmin=p_min, pmax=p_max), + sampler=staticmethod(UniformSampler), + ppf=Function(UniformPPF, pmin=p_min, pmax=p_max), + )("testpar") + + msg1 = "Enterprise and scipy prior do not match" + assert np.allclose( + unipar.get_pdf(params=dict(testpar=x)), scipy.stats.uniform.pdf(x, p_min, p_max - p_min) + ), msg1 + assert np.allclose(unipar.get_pdf(x), scipy.stats.uniform.pdf(x, p_min, p_max - p_min)), msg1 + + msg2 = "Enterprise and scipy PPF do not match" + assert np.allclose( + unipar.get_ppf(params=dict(testpar=x)), scipy.stats.uniform.ppf(x, p_min, p_max - p_min) + ), msg2 + assert np.allclose(unipar.get_ppf(x), scipy.stats.uniform.ppf(x, p_min, p_max - p_min)), msg2 + def test_linearexp(self): """Test LinearExp parameter prior and sampler.""" @@ -120,6 +158,13 @@ def test_linearexp(self): LinearExpPPF(x, p_min, p_max), np.log10(10**p_min + x * (10**p_max - 10**p_min)) ), msg1c + # As parameter dictionary or value for Uniform instantiated object + lepar = LinearExp(pmin=p_min, pmax=p_max)("testpar") + assert np.allclose( + lepar.get_ppf(params=dict(testpar=x)), np.log10(10**p_min + x * (10**p_max - 10**p_min)) + ), msg1 + assert np.allclose(lepar.get_ppf(x), np.log10(10**p_min + x * (10**p_max - 10**p_min))), msg1 + # vector argument x = np.array([0, 1.5, 2.5]) msg2 = "Vector-argument prior does not match" From 7f6795acac58a1e5340bcb219fbb4cc2a38fb25a Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Wed, 8 Nov 2023 20:28:38 +0100 Subject: [PATCH 37/80] Updated version of fastshermanmorrison-pulsar to 0.4.0, which has binary builds --- requirements_dev.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements_dev.txt b/requirements_dev.txt index d173ce59..e3ba1966 100644 --- a/requirements_dev.txt +++ b/requirements_dev.txt @@ -15,4 +15,4 @@ pytest-cov>=2.7.0 coverage-conditional-plugin>=0.4.0 jupyter>=1.0.0 build==0.3.1.post1 -fastshermanmorrison-pulsar>=0.1.0 +fastshermanmorrison-pulsar>=0.4.0 From 863a3887dff3c3fbc54a12a1b1d549504653b8a0 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Thu, 9 Nov 2023 14:17:09 +0100 Subject: [PATCH 38/80] Fixed typo --- enterprise/pulsar.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/enterprise/pulsar.py b/enterprise/pulsar.py index 9217e5c4..489a94eb 100644 --- a/enterprise/pulsar.py +++ b/enterprise/pulsar.py @@ -688,7 +688,7 @@ def Pulsar(*args, **kwargs): # Change directory to the base directory of the tim-file to deal with # INCLUDE statements in the tim-file os.chdir(dirname) - if timing_package.lower == "tempo2": + if timing_package.lower() == "tempo2": if t2 is None: raise ValueError("tempo2 requested but tempo2 is not available") # hack to set maxobs From 74ec30f989774ce7deecb872232559829f8bca81 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Thu, 9 Nov 2023 16:29:26 +0100 Subject: [PATCH 39/80] Added the necessary unit tests --- enterprise/pulsar.py | 14 +++++++++----- tests/test_pulsar.py | 43 +++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 50 insertions(+), 7 deletions(-) diff --git a/enterprise/pulsar.py b/enterprise/pulsar.py index 489a94eb..4a79c037 100644 --- a/enterprise/pulsar.py +++ b/enterprise/pulsar.py @@ -518,13 +518,17 @@ def __init__( self.sort_data() if drop_t2pulsar: - del self.t2pulsar + self.drop_tempopsr() else: if par_name is not None and os.path.exists(par_name): self.parfile = open(par_name).read() if tim_name is not None and os.path.exists(tim_name): self.timfile = open(tim_name).read() + def drop_tempopsr(self): + with contextlib.suppress(NameError): + del self.t2pulsar + # gather DM/DMX information if available def _set_dm(self, t2pulsar): pars = t2pulsar.pars(which="set") @@ -677,9 +681,9 @@ def Pulsar(*args, **kwargs): if timing_package is None: if t2 is not None: timing_package = "tempo2" - elif pint is not None: + elif pint is not None: # pragma: no cover timing_package = "pint" - else: + else: # pragma: no cover raise ValueError("No timing package available with which to load a pulsar") # get current directory @@ -689,7 +693,7 @@ def Pulsar(*args, **kwargs): # INCLUDE statements in the tim-file os.chdir(dirname) if timing_package.lower() == "tempo2": - if t2 is None: + if t2 is None: # pragma: no cover raise ValueError("tempo2 requested but tempo2 is not available") # hack to set maxobs maxobs = get_maxobs(reltimfile) + 100 @@ -703,7 +707,7 @@ def Pulsar(*args, **kwargs): tim_name=reltimfile, ) elif timing_package.lower() == "pint": - if pint is None: + if pint is None: # pragma: no cover raise ValueError("PINT requested but PINT is not available") if (clk is not None) and (bipm_version is None): bipm_version = clk.split("(")[1][:-1] diff --git a/tests/test_pulsar.py b/tests/test_pulsar.py index 6ab617e3..9fb1d0fc 100644 --- a/tests/test_pulsar.py +++ b/tests/test_pulsar.py @@ -25,18 +25,34 @@ from pint.models import get_model_and_toas +class TestTimingPackageExceptions(unittest.TestCase): + def test_unkown_timing_package(self): + # initialize Pulsar class + with self.assertRaises(ValueError): + self.psr = Pulsar(datadir + "/B1855+09_NANOGrav_9yv1.gls.par", datadir + "/B1855+09_NANOGrav_9yv1.tim", timing_package='foobar') + + def test_clk_but_no_bipm(self): + self.psr = Pulsar(datadir + "/B1855+09_NANOGrav_9yv1.gls.par", datadir + "/B1855+09_NANOGrav_9yv1.tim", clk='TT(BIPM2020)', timing_package='pint') + class TestPulsar(unittest.TestCase): @classmethod def setUpClass(cls): """Setup the Pulsar object.""" # initialize Pulsar class - cls.psr = Pulsar(datadir + "/B1855+09_NANOGrav_9yv1.gls.par", datadir + "/B1855+09_NANOGrav_9yv1.tim") + cls.psr = Pulsar(datadir + "/B1855+09_NANOGrav_9yv1.gls.par", datadir + "/B1855+09_NANOGrav_9yv1.tim", drop_t2pulsar=True) + cls.psr_nodrop = Pulsar(datadir + "/B1855+09_NANOGrav_9yv1.gls.par", datadir + "/B1855+09_NANOGrav_9yv1.tim", drop_t2pulsar=False) @classmethod def tearDownClass(cls): shutil.rmtree("pickle_dir", ignore_errors=True) + def test_droppsr(self): + self.psr_nodrop.drop_tempopsr() + + with self.assertRaises(AttributeError): + _ = self.psr.t2pulsar + def test_residuals(self): """Check Residual shape.""" @@ -195,6 +211,14 @@ def setUpClass(cls): # initialize Pulsar class cls.psr = Pulsar( + datadir + "/B1855+09_NANOGrav_9yv1.gls.par", + datadir + "/B1855+09_NANOGrav_9yv1.tim", + ephem="DE430", + drop_pintpsr=True, + timing_package="pint", + ) + + cls.psr_nodrop = Pulsar( datadir + "/B1855+09_NANOGrav_9yv1.gls.par", datadir + "/B1855+09_NANOGrav_9yv1.tim", ephem="DE430", @@ -202,6 +226,21 @@ def setUpClass(cls): timing_package="pint", ) + def test_droppsr(self): + self.psr_nodrop.drop_pintpsr() + + with self.assertRaises(AttributeError): + _ = self.psr_nodrop.model + + with self.assertRaises(AttributeError): + _ = self.psr_nodrop.parfile + + with self.assertRaises(AttributeError): + _ = self.psr_nodrop.pint_toas + + with self.assertRaises(AttributeError): + _ = self.psr_nodrop.timfile + def test_deflate_inflate(self): pass @@ -225,7 +264,7 @@ def test_no_planet(self): model, toas = get_model_and_toas( datadir + "/J0030+0451_NANOGrav_9yv1.gls.par", datadir + "/J0030+0451_NANOGrav_9yv1.tim", planets=False ) - Pulsar(model, toas, planets=True) + Pulsar(model, toas, planets=True, drop_pintpsr=False) msg = "obs_earth_pos is not in toas.table.colnames. Either " msg += "`planet` flag is not True in `toas` or further Pint " msg += "development to add additional planets is needed." From 8b7565068f14c929947cc7244c553909914fce63 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Thu, 9 Nov 2023 16:37:12 +0100 Subject: [PATCH 40/80] Linting --- enterprise/pulsar.py | 6 +++--- tests/test_pulsar.py | 22 ++++++++++++++++++---- 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/enterprise/pulsar.py b/enterprise/pulsar.py index 4a79c037..1af00e18 100644 --- a/enterprise/pulsar.py +++ b/enterprise/pulsar.py @@ -681,9 +681,9 @@ def Pulsar(*args, **kwargs): if timing_package is None: if t2 is not None: timing_package = "tempo2" - elif pint is not None: # pragma: no cover + elif pint is not None: # pragma: no cover timing_package = "pint" - else: # pragma: no cover + else: # pragma: no cover raise ValueError("No timing package available with which to load a pulsar") # get current directory @@ -707,7 +707,7 @@ def Pulsar(*args, **kwargs): tim_name=reltimfile, ) elif timing_package.lower() == "pint": - if pint is None: # pragma: no cover + if pint is None: # pragma: no cover raise ValueError("PINT requested but PINT is not available") if (clk is not None) and (bipm_version is None): bipm_version = clk.split("(")[1][:-1] diff --git a/tests/test_pulsar.py b/tests/test_pulsar.py index 9fb1d0fc..ce8cde49 100644 --- a/tests/test_pulsar.py +++ b/tests/test_pulsar.py @@ -29,10 +29,20 @@ class TestTimingPackageExceptions(unittest.TestCase): def test_unkown_timing_package(self): # initialize Pulsar class with self.assertRaises(ValueError): - self.psr = Pulsar(datadir + "/B1855+09_NANOGrav_9yv1.gls.par", datadir + "/B1855+09_NANOGrav_9yv1.tim", timing_package='foobar') + self.psr = Pulsar( + datadir + "/B1855+09_NANOGrav_9yv1.gls.par", + datadir + "/B1855+09_NANOGrav_9yv1.tim", + timing_package="foobar", + ) def test_clk_but_no_bipm(self): - self.psr = Pulsar(datadir + "/B1855+09_NANOGrav_9yv1.gls.par", datadir + "/B1855+09_NANOGrav_9yv1.tim", clk='TT(BIPM2020)', timing_package='pint') + self.psr = Pulsar( + datadir + "/B1855+09_NANOGrav_9yv1.gls.par", + datadir + "/B1855+09_NANOGrav_9yv1.tim", + clk="TT(BIPM2020)", + timing_package="pint", + ) + class TestPulsar(unittest.TestCase): @classmethod @@ -40,8 +50,12 @@ def setUpClass(cls): """Setup the Pulsar object.""" # initialize Pulsar class - cls.psr = Pulsar(datadir + "/B1855+09_NANOGrav_9yv1.gls.par", datadir + "/B1855+09_NANOGrav_9yv1.tim", drop_t2pulsar=True) - cls.psr_nodrop = Pulsar(datadir + "/B1855+09_NANOGrav_9yv1.gls.par", datadir + "/B1855+09_NANOGrav_9yv1.tim", drop_t2pulsar=False) + cls.psr = Pulsar( + datadir + "/B1855+09_NANOGrav_9yv1.gls.par", datadir + "/B1855+09_NANOGrav_9yv1.tim", drop_t2pulsar=True + ) + cls.psr_nodrop = Pulsar( + datadir + "/B1855+09_NANOGrav_9yv1.gls.par", datadir + "/B1855+09_NANOGrav_9yv1.tim", drop_t2pulsar=False + ) @classmethod def tearDownClass(cls): From 4b94571dc3aaca4730909bd807736054b45b7fb9 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Thu, 9 Nov 2023 17:28:04 +0100 Subject: [PATCH 41/80] Added extra unit test --- tests/test_pulsar.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tests/test_pulsar.py b/tests/test_pulsar.py index ce8cde49..25bfc022 100644 --- a/tests/test_pulsar.py +++ b/tests/test_pulsar.py @@ -255,6 +255,15 @@ def test_droppsr(self): with self.assertRaises(AttributeError): _ = self.psr_nodrop.timfile + def test_drop_not_picklable(self): + self.psr_nodrop.drop_not_picklable() + + with self.assertRaises(AttributeError): + _ = self.psr_nodrop.model + + with self.assertRaises(AttributeError): + _ = self.psr_nodrop.pint_toas + def test_deflate_inflate(self): pass From 87813ee2dd43fabfbcb2a7c4b4d20d4914c019d1 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Thu, 9 Nov 2023 18:52:00 +0100 Subject: [PATCH 42/80] Updated the test_pulsar.py --- tests/test_pulsar.py | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/tests/test_pulsar.py b/tests/test_pulsar.py index 25bfc022..6b490070 100644 --- a/tests/test_pulsar.py +++ b/tests/test_pulsar.py @@ -53,15 +53,16 @@ def setUpClass(cls): cls.psr = Pulsar( datadir + "/B1855+09_NANOGrav_9yv1.gls.par", datadir + "/B1855+09_NANOGrav_9yv1.tim", drop_t2pulsar=True ) - cls.psr_nodrop = Pulsar( - datadir + "/B1855+09_NANOGrav_9yv1.gls.par", datadir + "/B1855+09_NANOGrav_9yv1.tim", drop_t2pulsar=False - ) @classmethod def tearDownClass(cls): shutil.rmtree("pickle_dir", ignore_errors=True) def test_droppsr(self): + self.psr_nodrop = Pulsar( + datadir + "/B1855+09_NANOGrav_9yv1.gls.par", datadir + "/B1855+09_NANOGrav_9yv1.tim", drop_t2pulsar=False + ) + self.psr_nodrop.drop_tempopsr() with self.assertRaises(AttributeError): @@ -232,7 +233,8 @@ def setUpClass(cls): timing_package="pint", ) - cls.psr_nodrop = Pulsar( + def test_droppsr(self): + self.psr_nodrop = Pulsar( datadir + "/B1855+09_NANOGrav_9yv1.gls.par", datadir + "/B1855+09_NANOGrav_9yv1.tim", ephem="DE430", @@ -240,7 +242,6 @@ def setUpClass(cls): timing_package="pint", ) - def test_droppsr(self): self.psr_nodrop.drop_pintpsr() with self.assertRaises(AttributeError): @@ -256,6 +257,14 @@ def test_droppsr(self): _ = self.psr_nodrop.timfile def test_drop_not_picklable(self): + self.psr_nodrop = Pulsar( + datadir + "/B1855+09_NANOGrav_9yv1.gls.par", + datadir + "/B1855+09_NANOGrav_9yv1.tim", + ephem="DE430", + drop_pintpsr=False, + timing_package="pint", + ) + self.psr_nodrop.drop_not_picklable() with self.assertRaises(AttributeError): From 33fc82b9f1cbe1c1e8ff366235d7cfd5f88d7f24 Mon Sep 17 00:00:00 2001 From: "Paul T. Baker" Date: Wed, 15 Nov 2023 12:02:30 -0500 Subject: [PATCH 43/80] add conda stuff to docs (#336) * add conda stuff to docs * add conda stuff to docs * add flake8 to dev build * split out runtime deps from development deps in example * remove make lint from instructions (already run in make test) * link to apple silicon tips * update conda install instructions add test dependencies to conda env initialization following @aarchiba's suggestion --------- Co-authored-by: Rutger van Haasteren <3092444+vhaasteren@users.noreply.github.com> --- CONTRIBUTING.rst | 55 ++++++++++++++++++++++++++++++++++--------- docs/installation.rst | 19 +++++++++++++++ 2 files changed, 63 insertions(+), 11 deletions(-) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 801e4f37..30513938 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -57,12 +57,38 @@ If you are proposing a feature: Get Started! ------------ -Ready to contribute? Here's how to set up `enterprise` for local development. +Ready to contribute? Here's how to set up ``enterprise`` for local development. -1. Fork the `enterprise` repo on GitHub. +Install the dependencies +~~~~~~~~~~~~~~~~~~~~~~~~ + +``enterprise`` relies on a lot of other software to function. +If you use the Anaconda distribution of Python, you can get all of this software using ``conda``. +First, you install the latest stable version of ``enterprise``, which will come with all of the dependencies. +Then you remove ``enterprise`` leaving everything else intact. +This way you can use your development version of ``enterprise`` instead of the stable version. +We will also need some additional software that is required to run the tests. + +Start with a virtual environment with the extra dependencies required for running tests. In this case it is called ``ent_dev``:: + + $ conda create -n ent_dev -y -c conda-forge python=3.9 black=22.3.0 flake8 sphinx_rtd_theme pytest-cov + $ conda activate ent_dev + +Now install everything else by running the commands:: + + $ conda install -c conda-forge enterprise-pulsar + $ conda remove enterprise-pulsar --force + $ pip install coverage-conditional-plugin + + +Get the enterprise source code and get to work! +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +1. Fork the ``enterprise`` repo on GitHub. 2. Clone your fork locally:: $ git clone git@github.com:your_name_here/enterprise.git + $ cd enterprise/ 3. Set `enterprise/master` as upstream remote:: @@ -73,17 +99,25 @@ Ready to contribute? Here's how to set up `enterprise` for local development. $ git pull upstream master 4. This is how you set up your fork for local development: - + .. note:: - You will need to have ``tempo`` and ``suitesparse`` installed before - running the commands below. + You will need to have ``tempo2`` and ``suitesparse`` installed before + running these commands. - :: + If you installed the dependencies via conda, you are good to go! - $ cd enterprise/ + If you set up a ``conda`` virtual environment with the dependencies already, + you can add your local fork of ``enterprise`` to it by running:: + + $ pip install -e . + + If you manually installed the dependencies, this will make and activate a + Python3 virtual env with your local fork of ``enterprise``:: + $ make init $ source .enterprise/bin/activate + 5. Create a branch for local development:: $ git checkout -b name-of-your-bugfix-or-feature @@ -93,7 +127,6 @@ Ready to contribute? Here's how to set up `enterprise` for local development. 6. When you're done making changes, check that your changes pass flake8 and the tests, including testing other Python versions with tox (tox not implemented yet). Also check that any new docs are formatted correctly:: $ make test - $ make lint $ make docs To get flake8 and tox, just pip install them into your virtualenv. @@ -114,9 +147,8 @@ Before you submit a pull request, check that it meets these guidelines: 1. The pull request should include tests. 2. If the pull request adds functionality, the docs should be updated. Put your new functionality into a function with a docstring. -3. The pull request should work for Python 3.8, 3.9, 3.10, and 3.11. Check - https://github.com/nanograv/enterprise/pulls - and make sure that the tests pass for all supported Python versions. +3. The pull request should work for all supported versions of Python: 3.8, 3.9, 3.10, and 3.11. You + can see the progress of the tests in the `Checks` tab of your GitHub pull request. Tips ---- @@ -130,3 +162,4 @@ To track and checkout another user's branch:: $ git remote add other-user-username https://github.com/other-user-username/enterprise.git $ git fetch other-user-username $ git checkout --track -b branch-name other-user-username/branch-name + diff --git a/docs/installation.rst b/docs/installation.rst index 775ccaf3..d9716885 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -23,6 +23,17 @@ Installation .. .. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/ +Using conda +----------- + +``enterprise`` is available via conda-forge as `enterprise-pulsar `_. +If you use the Anaconda distribution of Python, we strongly recommend installing via the ``conda`` command: + +.. code-block:: console + + $ conda install -c conda-forge enterprise-pulsar + + From sources ------------ @@ -57,3 +68,11 @@ If you want to run tests or do any other development then also run: .. _Github repo: https://github.com/nanograv/enterprise .. _tarball: https://github.com/nanograv/enterprise/tarball/master + + +Tips +---- + +If you have a computer with an Apple Silicon chip, see `these instructions`_ on how to install Apple Intel packages using Rosetta. + +.. _these instructions: https://conda-forge.org/docs/user/tipsandtricks.html#installing-apple-intel-packages-on-apple-silicon From 8496cef7863a8f9adc4f356a82b748157d4e52e0 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Fri, 17 Nov 2023 09:50:36 +0100 Subject: [PATCH 44/80] Added tests for indices_from_slice and quant2ind --- tests/test_utils.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/tests/test_utils.py b/tests/test_utils.py index f8268cf3..8832a696 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -128,6 +128,27 @@ def test_quantization_matrix(self): assert U.shape == (4005, 235), msg1 assert all(np.sum(U, axis=0) > 1), msg2 + inds = utils.quant2ind(U, as_slice=False) + slcs = utils.quant2ind(U, as_slice=True) + inds_check = [utils.indices_from_slice(slc) for slc in slcs] + + msg3 = "Quantization Matrix slice not equal to quantization indices" + for ind, ind_c in zip(inds, inds_check): + assert np.all(ind == ind_c), msg3 + + def test_indices_from_slice(self): + """Test conversion of slices to numpy indices""" + ind_np = np.array([2, 4, 6, 8]) + ind_np_check = utils.indices_from_slice(ind_np) + + msg1 = "Numpy indices not left as-is by indices_from_slice" + assert np.all(ind_np == ind_np_check) + + slc = slice(2, 10, 2) + ind_np_check = utils.indices_from_slice(slc) + msg2 = "Slice not converted properly by indices_from_slice" + assert np.all(ind_np == ind_np_check), msg2 + def test_psd(self): """Test PSD functions.""" Tmax = self.psr.toas.max() - self.psr.toas.min() From b9a3bccde277886ab8c42549d729b0a36b781d53 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Fri, 17 Nov 2023 10:17:49 +0100 Subject: [PATCH 45/80] Added error message --- tests/test_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_utils.py b/tests/test_utils.py index 8832a696..a07af86e 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -142,7 +142,7 @@ def test_indices_from_slice(self): ind_np_check = utils.indices_from_slice(ind_np) msg1 = "Numpy indices not left as-is by indices_from_slice" - assert np.all(ind_np == ind_np_check) + assert np.all(ind_np == ind_np_check), msg1 slc = slice(2, 10, 2) ind_np_check = utils.indices_from_slice(slc) From 5b08965861521f2c6f024fa90dc72e5df48ffdd1 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Wed, 29 Nov 2023 07:57:11 +0100 Subject: [PATCH 46/80] Make astropy optional in pulsar.py --- enterprise/pulsar.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/enterprise/pulsar.py b/enterprise/pulsar.py index b8f23cd5..3b4b4422 100644 --- a/enterprise/pulsar.py +++ b/enterprise/pulsar.py @@ -7,8 +7,6 @@ import os import pickle -import astropy.constants as const -import astropy.units as u import numpy as np from ephem import Ecliptic, Equatorial @@ -34,6 +32,13 @@ logger.warning("PINT not installed. Will use libstempo instead.") # pragma: no cover pint = None +try: + import astropy.constants as const + import astropy.units as u +except ImportError: + const = None # pragma: no cover + u = None + if pint is None and t2 is None: err_msg = "Must have either PINT or libstempo timing package installed" raise ImportError(err_msg) From 8c82ecb357fd53dd0cf7314887cb0c2ff07f08aa Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Wed, 29 Nov 2023 08:12:44 +0100 Subject: [PATCH 47/80] Added space for black --- enterprise/pulsar.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/enterprise/pulsar.py b/enterprise/pulsar.py index 3b4b4422..4f392bbe 100644 --- a/enterprise/pulsar.py +++ b/enterprise/pulsar.py @@ -36,7 +36,7 @@ import astropy.constants as const import astropy.units as u except ImportError: - const = None # pragma: no cover + const = None # pragma: no cover u = None if pint is None and t2 is None: From c4fe43e72ba96d8fc862c980b7247721a047fa32 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Wed, 29 Nov 2023 08:34:42 +0100 Subject: [PATCH 48/80] Changed the location of the pragma: no cover --- enterprise/pulsar.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/enterprise/pulsar.py b/enterprise/pulsar.py index 4f392bbe..bb1f35a3 100644 --- a/enterprise/pulsar.py +++ b/enterprise/pulsar.py @@ -35,8 +35,8 @@ try: import astropy.constants as const import astropy.units as u -except ImportError: - const = None # pragma: no cover +except ImportError: # pragma: no cover + const = None u = None if pint is None and t2 is None: From e9b452b69e8c7f14cb7ee6551388c7f1e3c188e1 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Thu, 30 Nov 2023 09:27:45 +0100 Subject: [PATCH 49/80] Properly address units in PintPulsar position --- enterprise/pulsar.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/enterprise/pulsar.py b/enterprise/pulsar.py index bb1f35a3..e7c26950 100644 --- a/enterprise/pulsar.py +++ b/enterprise/pulsar.py @@ -399,12 +399,13 @@ def _set_dm(self, model): def _get_radec(self, model): if hasattr(model, "RAJ") and hasattr(model, "DECJ"): - return (model.RAJ.value, model.DECJ.value) + raj = model.RAJ.quantity.to(u.rad).value + decj = model.DECJ.quantity.to(u.rad).value + return raj, decj else: - # TODO: better way of dealing with units - d2r = np.pi / 180 - elong, elat = model.ELONG.value, model.ELAT.value - return self._get_radec_from_ecliptic(elong * d2r, elat * d2r) + elong = model.ELONG.quantity.to(u.rad).value + elat = model.ELAT.quantity.to(u.rad).value + return self._radec_from_ecliptic(elong, elat) def _get_ssb_lsec(self, toas, obs_planet): """Get the planet to SSB vector in lightseconds from Pint table""" From c58f8405ff5ebd83d0ef23adbbe3b6bce7809de7 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Thu, 30 Nov 2023 09:47:09 +0100 Subject: [PATCH 50/80] Fix typo --- enterprise/pulsar.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/enterprise/pulsar.py b/enterprise/pulsar.py index e7c26950..257041a7 100644 --- a/enterprise/pulsar.py +++ b/enterprise/pulsar.py @@ -405,7 +405,7 @@ def _get_radec(self, model): else: elong = model.ELONG.quantity.to(u.rad).value elat = model.ELAT.quantity.to(u.rad).value - return self._radec_from_ecliptic(elong, elat) + return self._get_radec_from_ecliptic(elong, elat) def _get_ssb_lsec(self, toas, obs_planet): """Get the planet to SSB vector in lightseconds from Pint table""" From b4b255eab8d7ee4bdeb7f83a7ad67d36c9ee7735 Mon Sep 17 00:00:00 2001 From: Chen Siyuan Date: Sat, 23 Dec 2023 21:06:06 +0800 Subject: [PATCH 51/80] add new EPTA and general selection functions --- enterprise/signals/selections.py | 86 ++++++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) diff --git a/enterprise/signals/selections.py b/enterprise/signals/selections.py index c701e7b0..9f2fba35 100644 --- a/enterprise/signals/selections.py +++ b/enterprise/signals/selections.py @@ -103,6 +103,24 @@ def by_band(flags): return {val: flags["B"] == val for val in flagvals} +def by_freq_band(bands = None): + def backends(freqs): + """Selection function to split by EPTA and custom frequency band values + bands: dict of bands and ranges + default recovers EPTA freq bands + """ + nonlocal bands + if isinstance(bands, dict): + pass + else: + bands = {"Band.1":[0,1000], "Band.2":[1000,2000], + "Band.3":[2000,3000], "Band.4":[3000,10000]} + return {val: (freqs>=fl) & (freqs=fl) & (freqs Date: Sat, 23 Dec 2023 23:29:25 +0800 Subject: [PATCH 52/80] Update gp_bases.py --- enterprise/signals/gp_bases.py | 112 ++++++++++++++++++++++++++++++++- 1 file changed, 110 insertions(+), 2 deletions(-) diff --git a/enterprise/signals/gp_bases.py b/enterprise/signals/gp_bases.py index 1847e54e..5681a423 100644 --- a/enterprise/signals/gp_bases.py +++ b/enterprise/signals/gp_bases.py @@ -13,9 +13,12 @@ __all__ = [ "createfourierdesignmatrix_red", "createfourierdesignmatrix_dm", + "createfourierdesignmatrix_dm_tn", "createfourierdesignmatrix_env", "createfourierdesignmatrix_ephem", "createfourierdesignmatrix_eph", + "createfourierdesignmatrix_chromatic", + "createfourierdesignmatrix_general", ] @@ -124,6 +127,42 @@ def createfourierdesignmatrix_dm( return F * Dm[:, None], Ffreqs +@function +def createfourierdesignmatrix_dm_tn( + toas, freqs, nmodes=30, Tspan=None, pshift=False, fref=1400, logf=False, fmin=None, fmax=None, idx=2, modes=None +): + """ + Construct DM-variation fourier design matrix. Current + normalization expresses DM signal as a deviation [seconds] + at fref [MHz] + + :param toas: vector of time series in seconds + :param freqs: radio frequencies of observations [MHz] + :param nmodes: number of fourier coefficients to use + :param Tspan: option to some other Tspan + :param pshift: option to add random phase shift + :param fref: reference frequency [MHz] + :param logf: use log frequency spacing + :param fmin: lower sampling frequency + :param fmax: upper sampling frequency + :param modes: option to provide explicit list or array of + sampling frequencies + + :return: F: DM-variation fourier design matrix + :return: f: Sampling frequencies + """ + + # get base fourier design matrix and frequencies + F, Ffreqs = createfourierdesignmatrix_red( + toas, nmodes=nmodes, Tspan=Tspan, logf=logf, fmin=fmin, fmax=fmax, pshift=pshift, modes=modes + ) + + # compute the DM-variation vectors + Dm = (fref / freqs) ** idx * np.sqrt(12) * np.pi / 1400 / 1400 / 2.41e-4 + + return F * Dm[:, None], Ffreqs + + @function def createfourierdesignmatrix_env( toas, @@ -218,7 +257,9 @@ def createfourierdesignmatrix_eph( @function -def createfourierdesignmatrix_chromatic(toas, freqs, nmodes=30, Tspan=None, logf=False, fmin=None, fmax=None, idx=4): +def createfourierdesignmatrix_chromatic( + toas, freqs, nmodes=30, Tspan=None, logf=False, fmin=None, fmax=None, idx=4, modes=None +): """ Construct Scattering-variation fourier design matrix. @@ -232,15 +273,82 @@ def createfourierdesignmatrix_chromatic(toas, freqs, nmodes=30, Tspan=None, logf :param fmin: lower sampling frequency :param fmax: upper sampling frequency :param idx: Index of chromatic effects + :param modes: option to provide explicit list or array of + sampling frequencies :return: F: Chromatic-variation fourier design matrix :return: f: Sampling frequencies """ # get base fourier design matrix and frequencies - F, Ffreqs = createfourierdesignmatrix_red(toas, nmodes=nmodes, Tspan=Tspan, logf=logf, fmin=fmin, fmax=fmax) + F, Ffreqs = createfourierdesignmatrix_red( + toas, nmodes=nmodes, Tspan=Tspan, logf=logf, fmin=fmin, fmax=fmax, modes=modes + ) # compute the DM-variation vectors Dm = (1400 / freqs) ** idx return F * Dm[:, None], Ffreqs + + +@function +def createfourierdesignmatrix_general( + toas, + freqs, + flags, + flagname="group", + flagval=None, + idx=None, + tndm=False, + nmodes=30, + Tspan=None, + psrTspan=True, + logf=False, + fmin=None, + fmax=None, + modes=None, + pshift=None, + pseed=None +): + """ + Construct fourier design matrix with possibility of adding selection and/or chromatic index envelope. + + :param toas: vector of time series in seconds + :param freqs: radio frequencies of observations [MHz] + :param flags: Flags from timfiles + :param nmodes: number of fourier coefficients to use + :param Tspan: option to some other Tspan + :param psrTspan: option to use pulsar time span. Used only if sub-group of ToAs is chosen + :param logf: use log frequency spacing + :param fmin: lower sampling frequency + :param fmax: upper sampling frequency + :param log10_Amp: log10 of the Amplitude [s] + :param idx: Index of chromatic effects + :param modes: option to provide explicit list or array of + sampling frequencies + + :return: F: fourier design matrix + :return: f: Sampling frequencies + """ + if flagval and not psrTspan: + sel_toas = toas[np.where(flags[flagname]==flagval)] + Tspan = sel_toas.max() - sel_toas.min() + + # get base fourier design matrix and frequencies + F, Ffreqs = createfourierdesignmatrix_red( + toas, nmodes=nmodes, Tspan=Tspan, logf=logf, fmin=fmin, fmax=fmax, modes=modes, pshift=pshift, pseed=pseed + ) + + # compute the chromatic-variation vectors + if idx: + if tndm: + chrom_fac = (1400 / freqs) ** idx * np.sqrt(12) * np.pi / 1400 / 1400 / 2.41e-4 + else: + chrom_fac = (1400 / freqs) ** idx + F *= chrom_fac[:, None] + + # compute the mask for the selection + if flagval: + F *= np.array([flags[flagname]==flagval]*F.shape[1]).T + + return F, Ffreqs From 34737cff8ebf93989546daca6562c9397b4b649d Mon Sep 17 00:00:00 2001 From: siyuan-chen <34459712+siyuan-chen@users.noreply.github.com> Date: Sat, 23 Dec 2023 23:30:21 +0800 Subject: [PATCH 53/80] Update gp_signals.py --- enterprise/signals/gp_signals.py | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/enterprise/signals/gp_signals.py b/enterprise/signals/gp_signals.py index c6f03126..3d72433c 100644 --- a/enterprise/signals/gp_signals.py +++ b/enterprise/signals/gp_signals.py @@ -192,6 +192,9 @@ def FourierBasisGP( components=20, selection=Selection(selections.no_selection), Tspan=None, + logf=False, + fmin=None, + fmax=None, modes=None, name="red_noise", pshift=False, @@ -200,7 +203,8 @@ def FourierBasisGP( """Convenience function to return a BasisGP class with a fourier basis.""" - basis = utils.createfourierdesignmatrix_red(nmodes=components, Tspan=Tspan, modes=modes, pshift=pshift, pseed=pseed) + basis = utils.createfourierdesignmatrix_red(nmodes=components, Tspan=Tspan, logf=logf, fmin=fmin, fmax=fmax, + modes=modes, pshift=pshift, pseed=pseed) BaseClass = BasisGP(spectrum, basis, coefficients=coefficients, combine=combine, selection=selection, name=name) class FourierBasisGP(BaseClass): @@ -211,24 +215,24 @@ class FourierBasisGP(BaseClass): return FourierBasisGP -def get_timing_model_basis(use_svd=False, normed=True): +def get_timing_model_basis(use_svd=False, normed=True, idx_exclude=None): if use_svd: if normed is not True: raise ValueError("use_svd == True requires normed == True") - return utils.svd_tm_basis() + return utils.svd_tm_basis(idx_exclude=idx_exclude) elif normed is True: - return utils.normed_tm_basis() + return utils.normed_tm_basis(idx_exclude=idx_exclude) elif normed is not False: - return utils.normed_tm_basis(norm=normed) + return utils.normed_tm_basis(norm=normed, idx_exclude=idx_exclude) else: - return utils.unnormed_tm_basis() + return utils.unnormed_tm_basis(idx_exclude=idx_exclude) -def TimingModel(coefficients=False, name="linear_timing_model", use_svd=False, normed=True): +def TimingModel(coefficients=False, name="linear_timing_model", use_svd=False, normed=True, idx_exclude=None): """Class factory for marginalized linear timing model signals.""" - basis = get_timing_model_basis(use_svd, normed) + basis = get_timing_model_basis(use_svd, normed, idx_exclude) prior = utils.tm_prior() BaseClass = BasisGP(prior, basis, coefficients=coefficients, name=name) @@ -413,6 +417,9 @@ def FourierBasisCommonGP( combine=True, components=20, Tspan=None, + logf=False, + fmin=None, + fmax=None, modes=None, name="common_fourier", pshift=False, @@ -424,7 +431,8 @@ def FourierBasisCommonGP( "With coefficients=True, FourierBasisCommonGP " + "requires that you specify Tspan explicitly." ) - basis = utils.createfourierdesignmatrix_red(nmodes=components, Tspan=Tspan, modes=modes, pshift=pshift, pseed=pseed) + basis = utils.createfourierdesignmatrix_red(nmodes=components, Tspan=Tspan, logf=logf, fmin=fmin, fmax=fmax, + modes=modes, pshift=pshift, pseed=pseed) BaseClass = BasisCommonGP(spectrum, basis, orf, coefficients=coefficients, combine=combine, name=name) class FourierBasisCommonGP(BaseClass): From 6b6912d7924f194c2e73edf8afbe1a0fc5fa0e0e Mon Sep 17 00:00:00 2001 From: siyuan-chen <34459712+siyuan-chen@users.noreply.github.com> Date: Sat, 23 Dec 2023 23:31:43 +0800 Subject: [PATCH 54/80] Update signal_base.py --- enterprise/signals/signal_base.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/enterprise/signals/signal_base.py b/enterprise/signals/signal_base.py index adc3125e..f092f05a 100644 --- a/enterprise/signals/signal_base.py +++ b/enterprise/signals/signal_base.py @@ -735,6 +735,9 @@ def summary(self, include_params=True, to_stdout=False): cpcount += 1 row = [sig.name, sig.__class__.__name__, len(sig.param_names)] summary += "{: <40} {: <30} {: <20}\n".format(*row) + if "BasisGP" in sig.__class__.__name__: + summary += "\nBasis shape (Ntoas x N basis functions): {}".format(str(sig.get_basis().shape)) + summary += "\nN selected toas: {}\n".format(str(len([i for i in sig._masks[0] if i]))) if include_params: summary += "\n" summary += "params:\n" From 9b69705a94cd7ff945a5c4202cd659b301adf089 Mon Sep 17 00:00:00 2001 From: siyuan-chen <34459712+siyuan-chen@users.noreply.github.com> Date: Sat, 23 Dec 2023 23:32:15 +0800 Subject: [PATCH 55/80] Update utils.py --- enterprise/signals/utils.py | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/enterprise/signals/utils.py b/enterprise/signals/utils.py index eff440e1..ea42e6b5 100644 --- a/enterprise/signals/utils.py +++ b/enterprise/signals/utils.py @@ -19,11 +19,14 @@ from enterprise import constants as const from enterprise import signals as sigs # noqa: F401 from enterprise.signals.gp_bases import ( # noqa: F401 + createfourierdesignmatrix_red, createfourierdesignmatrix_dm, + createfourierdesignmatrix_dm_tn, createfourierdesignmatrix_env, - createfourierdesignmatrix_eph, createfourierdesignmatrix_ephem, - createfourierdesignmatrix_red, + createfourierdesignmatrix_eph, + createfourierdesignmatrix_chromatic, + createfourierdesignmatrix_general, ) from enterprise.signals.gp_priors import powerlaw, turnover # noqa: F401 from enterprise.signals.parameter import function @@ -874,12 +877,19 @@ def anis_orf(pos1, pos2, params, **kwargs): @function -def unnormed_tm_basis(Mmat): +def unnormed_tm_basis(Mmat, idx_exclude=None): + if idx_exclude: + idxs = np.array([i for i in range(Mmat.shape[1]) if i not in idx_exclude]) + Mmat = Mmat[:,idxs] return Mmat, np.ones_like(Mmat.shape[1]) @function -def normed_tm_basis(Mmat, norm=None): +def normed_tm_basis(Mmat, norm=None, idx_exclude=None): + if idx_exclude: + idxs = np.array([i for i in range(Mmat.shape[1]) if i not in idx_exclude]) + Mmat = Mmat[:,idxs] + if norm is None: norm = np.sqrt(np.sum(Mmat**2, axis=0)) @@ -890,7 +900,11 @@ def normed_tm_basis(Mmat, norm=None): @function -def svd_tm_basis(Mmat): +def svd_tm_basis(Mmat, idx_exclude=None): + if idx_exclude: + idxs = np.array([i for i in range(Mmat.shape[1]) if i not in idx_exclude]) + Mmat = Mmat[:,idxs] + u, s, v = np.linalg.svd(Mmat, full_matrices=False) return u, np.ones_like(s) From 1b897e317f340949953155a5a67515b202fd61de Mon Sep 17 00:00:00 2001 From: Chen Siyuan Date: Tue, 6 Feb 2024 22:21:47 +0800 Subject: [PATCH 56/80] added doc string and explanation --- enterprise/signals/gp_bases.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/enterprise/signals/gp_bases.py b/enterprise/signals/gp_bases.py index 5681a423..93afa3e1 100644 --- a/enterprise/signals/gp_bases.py +++ b/enterprise/signals/gp_bases.py @@ -145,6 +145,7 @@ def createfourierdesignmatrix_dm_tn( :param logf: use log frequency spacing :param fmin: lower sampling frequency :param fmax: upper sampling frequency + :param idx: index of the radio frequency dependence :param modes: option to provide explicit list or array of sampling frequencies @@ -157,7 +158,8 @@ def createfourierdesignmatrix_dm_tn( toas, nmodes=nmodes, Tspan=Tspan, logf=logf, fmin=fmin, fmax=fmax, pshift=pshift, modes=modes ) - # compute the DM-variation vectors + # compute the DM-variation vectors in the temponest normalization + # amplitude normalization: sqrt(12)*pi, scaling to 1 MHz from 1400 MHz, DM constant: 2.41e-4 Dm = (fref / freqs) ** idx * np.sqrt(12) * np.pi / 1400 / 1400 / 2.41e-4 return F * Dm[:, None], Ffreqs From 219df07f337cbad47e5f1a6a74da1e4121fe7121 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Wed, 14 Feb 2024 11:30:15 +0100 Subject: [PATCH 57/80] Only do numerical Cholesky update once the likelihood has been calculated already --- enterprise/signals/signal_base.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/enterprise/signals/signal_base.py b/enterprise/signals/signal_base.py index adc3125e..d5b2323b 100644 --- a/enterprise/signals/signal_base.py +++ b/enterprise/signals/signal_base.py @@ -221,9 +221,17 @@ def __call__(self, xs, phiinv_method="cliques"): if self.cholesky_sparse: try: - cf = cholesky(TNT + sps.csc_matrix(phiinv)) # cf(Sigma) - expval = cf(TNr) - logdet_sigma = cf.logdet() + Sigma_sp = TNT + sps.csc_matrix(phiinv) + + if hasattr(self, 'cf_sp'): + # Have analytical decomposition already. Just do update + self.cf_sp.cholesky_inplace(Sigma_sp) + else: + # Do analytical and numerical Sparse Cholesky + self.cf_sp = cholesky(Sigma_sp) + + expval = self.cf_sp(TNr) + logdet_sigma = self.cf_sp.logdet() except CholmodError: # pragma: no cover return -np.inf else: From 6eceb84a90aa5b53088d35fbbaf80b59cf29c73b Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Wed, 14 Feb 2024 11:31:32 +0100 Subject: [PATCH 58/80] Linting of course --- enterprise/signals/signal_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/enterprise/signals/signal_base.py b/enterprise/signals/signal_base.py index d5b2323b..9baf777e 100644 --- a/enterprise/signals/signal_base.py +++ b/enterprise/signals/signal_base.py @@ -223,7 +223,7 @@ def __call__(self, xs, phiinv_method="cliques"): try: Sigma_sp = TNT + sps.csc_matrix(phiinv) - if hasattr(self, 'cf_sp'): + if hasattr(self, "cf_sp"): # Have analytical decomposition already. Just do update self.cf_sp.cholesky_inplace(Sigma_sp) else: From c47b04af2e58aa5b2c78f5feb1dd377823a0cef4 Mon Sep 17 00:00:00 2001 From: Chen Siyuan Date: Sun, 7 Apr 2024 21:43:44 +0800 Subject: [PATCH 59/80] liniting --- enterprise/signals/gp_bases.py | 1 - enterprise/signals/utils.py | 2 -- 2 files changed, 3 deletions(-) diff --git a/enterprise/signals/gp_bases.py b/enterprise/signals/gp_bases.py index 1847e54e..9b621f4e 100644 --- a/enterprise/signals/gp_bases.py +++ b/enterprise/signals/gp_bases.py @@ -219,7 +219,6 @@ def createfourierdesignmatrix_eph( @function def createfourierdesignmatrix_chromatic(toas, freqs, nmodes=30, Tspan=None, logf=False, fmin=None, fmax=None, idx=4): - """ Construct Scattering-variation fourier design matrix. diff --git a/enterprise/signals/utils.py b/enterprise/signals/utils.py index eff440e1..e2986f55 100644 --- a/enterprise/signals/utils.py +++ b/enterprise/signals/utils.py @@ -322,7 +322,6 @@ def create_stabletimingdesignmatrix(designmat, fastDesign=True): def make_ecc_interpolant(): - """ Make interpolation function from eccentricity file to determine number of harmonics to use for a given @@ -339,7 +338,6 @@ def make_ecc_interpolant(): def get_edot(F, mc, e): - """ Compute eccentricity derivative from Taylor et al. (2016) From d105ddd509405ba8800bf4065080a43010c9aa6d Mon Sep 17 00:00:00 2001 From: Chen Siyuan Date: Sun, 7 Apr 2024 21:50:21 +0800 Subject: [PATCH 60/80] more linting --- enterprise/signals/selections.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/enterprise/signals/selections.py b/enterprise/signals/selections.py index 9f2fba35..b0a570b8 100644 --- a/enterprise/signals/selections.py +++ b/enterprise/signals/selections.py @@ -103,7 +103,7 @@ def by_band(flags): return {val: flags["B"] == val for val in flagvals} -def by_freq_band(bands = None): +def by_freq_band(bands=None): def backends(freqs): """Selection function to split by EPTA and custom frequency band values bands: dict of bands and ranges @@ -113,10 +113,10 @@ def backends(freqs): if isinstance(bands, dict): pass else: - bands = {"Band.1":[0,1000], "Band.2":[1000,2000], - "Band.3":[2000,3000], "Band.4":[3000,10000]} - return {val: (freqs>=fl) & (freqs=fl) & (freqs= fl) & (freqs < fh) for val, (fl, fh) in bands.items() if any((freqs >= fl) & (freqs < fh)) + } return backends @@ -151,6 +151,7 @@ def by_index(name, idx): def indexvals(toas): """Selection function to split by ToA index values""" return {name: np.isin(np.arange(len(toas)), idx)} + return indexvals From 7905de239b41f5384f04597428791f4304482148 Mon Sep 17 00:00:00 2001 From: Chen Siyuan Date: Sun, 7 Apr 2024 21:57:15 +0800 Subject: [PATCH 61/80] linting --- enterprise/signals/gp_bases.py | 11 +++++------ enterprise/signals/gp_signals.py | 10 ++++++---- enterprise/signals/utils.py | 14 ++++++-------- 3 files changed, 17 insertions(+), 18 deletions(-) diff --git a/enterprise/signals/gp_bases.py b/enterprise/signals/gp_bases.py index 93afa3e1..f924be12 100644 --- a/enterprise/signals/gp_bases.py +++ b/enterprise/signals/gp_bases.py @@ -159,7 +159,7 @@ def createfourierdesignmatrix_dm_tn( ) # compute the DM-variation vectors in the temponest normalization - # amplitude normalization: sqrt(12)*pi, scaling to 1 MHz from 1400 MHz, DM constant: 2.41e-4 + # amplitude normalization: sqrt(12)*pi, scaling to 1 MHz from 1400 MHz, DM constant: 2.41e-4 Dm = (fref / freqs) ** idx * np.sqrt(12) * np.pi / 1400 / 1400 / 2.41e-4 return F * Dm[:, None], Ffreqs @@ -262,7 +262,6 @@ def createfourierdesignmatrix_eph( def createfourierdesignmatrix_chromatic( toas, freqs, nmodes=30, Tspan=None, logf=False, fmin=None, fmax=None, idx=4, modes=None ): - """ Construct Scattering-variation fourier design matrix. @@ -310,7 +309,7 @@ def createfourierdesignmatrix_general( fmax=None, modes=None, pshift=None, - pseed=None + pseed=None, ): """ Construct fourier design matrix with possibility of adding selection and/or chromatic index envelope. @@ -333,7 +332,7 @@ def createfourierdesignmatrix_general( :return: f: Sampling frequencies """ if flagval and not psrTspan: - sel_toas = toas[np.where(flags[flagname]==flagval)] + sel_toas = toas[np.where(flags[flagname] == flagval)] Tspan = sel_toas.max() - sel_toas.min() # get base fourier design matrix and frequencies @@ -351,6 +350,6 @@ def createfourierdesignmatrix_general( # compute the mask for the selection if flagval: - F *= np.array([flags[flagname]==flagval]*F.shape[1]).T - + F *= np.array([flags[flagname] == flagval] * F.shape[1]).T + return F, Ffreqs diff --git a/enterprise/signals/gp_signals.py b/enterprise/signals/gp_signals.py index 3d72433c..21ad177d 100644 --- a/enterprise/signals/gp_signals.py +++ b/enterprise/signals/gp_signals.py @@ -203,8 +203,9 @@ def FourierBasisGP( """Convenience function to return a BasisGP class with a fourier basis.""" - basis = utils.createfourierdesignmatrix_red(nmodes=components, Tspan=Tspan, logf=logf, fmin=fmin, fmax=fmax, - modes=modes, pshift=pshift, pseed=pseed) + basis = utils.createfourierdesignmatrix_red( + nmodes=components, Tspan=Tspan, logf=logf, fmin=fmin, fmax=fmax, modes=modes, pshift=pshift, pseed=pseed + ) BaseClass = BasisGP(spectrum, basis, coefficients=coefficients, combine=combine, selection=selection, name=name) class FourierBasisGP(BaseClass): @@ -431,8 +432,9 @@ def FourierBasisCommonGP( "With coefficients=True, FourierBasisCommonGP " + "requires that you specify Tspan explicitly." ) - basis = utils.createfourierdesignmatrix_red(nmodes=components, Tspan=Tspan, logf=logf, fmin=fmin, fmax=fmax, - modes=modes, pshift=pshift, pseed=pseed) + basis = utils.createfourierdesignmatrix_red( + nmodes=components, Tspan=Tspan, logf=logf, fmin=fmin, fmax=fmax, modes=modes, pshift=pshift, pseed=pseed + ) BaseClass = BasisCommonGP(spectrum, basis, orf, coefficients=coefficients, combine=combine, name=name) class FourierBasisCommonGP(BaseClass): diff --git a/enterprise/signals/utils.py b/enterprise/signals/utils.py index ea42e6b5..d896de37 100644 --- a/enterprise/signals/utils.py +++ b/enterprise/signals/utils.py @@ -325,7 +325,6 @@ def create_stabletimingdesignmatrix(designmat, fastDesign=True): def make_ecc_interpolant(): - """ Make interpolation function from eccentricity file to determine number of harmonics to use for a given @@ -342,7 +341,6 @@ def make_ecc_interpolant(): def get_edot(F, mc, e): - """ Compute eccentricity derivative from Taylor et al. (2016) @@ -879,16 +877,16 @@ def anis_orf(pos1, pos2, params, **kwargs): @function def unnormed_tm_basis(Mmat, idx_exclude=None): if idx_exclude: - idxs = np.array([i for i in range(Mmat.shape[1]) if i not in idx_exclude]) - Mmat = Mmat[:,idxs] + idxs = np.array([i for i in range(Mmat.shape[1]) if i not in idx_exclude]) + Mmat = Mmat[:, idxs] return Mmat, np.ones_like(Mmat.shape[1]) @function def normed_tm_basis(Mmat, norm=None, idx_exclude=None): if idx_exclude: - idxs = np.array([i for i in range(Mmat.shape[1]) if i not in idx_exclude]) - Mmat = Mmat[:,idxs] + idxs = np.array([i for i in range(Mmat.shape[1]) if i not in idx_exclude]) + Mmat = Mmat[:, idxs] if norm is None: norm = np.sqrt(np.sum(Mmat**2, axis=0)) @@ -902,8 +900,8 @@ def normed_tm_basis(Mmat, norm=None, idx_exclude=None): @function def svd_tm_basis(Mmat, idx_exclude=None): if idx_exclude: - idxs = np.array([i for i in range(Mmat.shape[1]) if i not in idx_exclude]) - Mmat = Mmat[:,idxs] + idxs = np.array([i for i in range(Mmat.shape[1]) if i not in idx_exclude]) + Mmat = Mmat[:, idxs] u, s, v = np.linalg.svd(Mmat, full_matrices=False) return u, np.ones_like(s) From 7ea2bfdf36bbe99da5cb5cddae5ffe9a405fee50 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Fri, 19 Apr 2024 08:40:47 +0200 Subject: [PATCH 62/80] Added test for the sparse matrix in-place solve --- tests/test_likelihood.py | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/tests/test_likelihood.py b/tests/test_likelihood.py index 10449010..9a4c3059 100644 --- a/tests/test_likelihood.py +++ b/tests/test_likelihood.py @@ -326,6 +326,43 @@ def test_compare_ecorr_likelihood(self): msg = "Likelihood mismatch between ECORR methods" assert np.allclose(l1, l2), msg + def test_like_sparse_cache(self): + """Test likelihood with sparse Cholesky caching""" + + # find the maximum time span to set GW frequency sampling + tmin = [p.toas.min() for p in self.psrs] + tmax = [p.toas.max() for p in self.psrs] + Tspan = np.max(tmax) - np.min(tmin) + + # setup basic model + efac = parameter.Constant(1.0) + log10_A = parameter.Constant(-15.0) + gamma = parameter.Constant(4.33) + + ef = white_signals.MeasurementNoise(efac) + pl = utils.powerlaw(log10_A=log10_A, gamma=gamma) + + orf = utils.hd_orf() + crn = gp_signals.FourierBasisCommonGP(pl, orf, components=20, name="GW", Tspan=Tspan) + + tm = gp_signals.TimingModel() + m = ef + crn + + # Two identical arrays that we'll compare with two sets of parameters + pta1 = signal_base.PTA([m(p) for p in self.psrs]) + pta2 = signal_base.PTA([m(p) for p in self.psrs]) + + params_init = parameter.sample(pta1.params) + params_check = parameter.sample(pta1.params) + + # First call for pta1 only initializes the sparse decomposition. Second one uses it + _ = pta1.get_lnlikelihood(params_init) + l1 = pta1.get_lnlikelihood(params_check) + l2 = pta2.get_lnlikelihood(params_check) + + msg = "Likelihood mismatch between sparse Cholesky full & inplace" + assert np.allclose(l1, l2), msg + class TestLikelihoodPint(TestLikelihood): @classmethod From 1a5d2e435099da08186d811e6507cfd807caa455 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Fri, 19 Apr 2024 09:03:11 +0200 Subject: [PATCH 63/80] Oopsie, timing model addition was not committed enough --- tests/test_likelihood.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_likelihood.py b/tests/test_likelihood.py index 9a4c3059..61eff926 100644 --- a/tests/test_likelihood.py +++ b/tests/test_likelihood.py @@ -346,7 +346,7 @@ def test_like_sparse_cache(self): crn = gp_signals.FourierBasisCommonGP(pl, orf, components=20, name="GW", Tspan=Tspan) tm = gp_signals.TimingModel() - m = ef + crn + m = tm + ef + crn # Two identical arrays that we'll compare with two sets of parameters pta1 = signal_base.PTA([m(p) for p in self.psrs]) From e54c3d203f5a5218a541d21a38e7796a5b03d137 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Fri, 19 Apr 2024 09:48:55 +0200 Subject: [PATCH 64/80] Added test for PINT pulsar with RAJ/DECJ --- tests/test_pulsar.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tests/test_pulsar.py b/tests/test_pulsar.py index 6ab617e3..6177db6b 100644 --- a/tests/test_pulsar.py +++ b/tests/test_pulsar.py @@ -218,6 +218,20 @@ def test_load_radec_psr(self): timing_package="pint", ) + def test_load_radec_psr_mdc(self): + """Setup the Pulsar object.""" + + # initialize Pulsar class with RAJ DECJ so _get_radec can be covered + Pulsar( + datadir + "/mdc1/J0030+0451.par", + datadir + "/mdc1/J0030+0451.tim", + ephem="DE430", + drop_pintpsr=False, + timing_package="pint", + ) + + assert self.psr.Mmat.shape == (130, 8), msg + def test_no_planet(self): """Test exception when incorrect par(tim) file given.""" From 9f2b6c910389d7342843cb4b42efbaaf9b075662 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Sun, 21 Apr 2024 14:00:54 +0200 Subject: [PATCH 65/80] Changed dimensionality constraint for normal ppf --- enterprise/signals/parameter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/enterprise/signals/parameter.py b/enterprise/signals/parameter.py index c1c835fc..34c8f3be 100644 --- a/enterprise/signals/parameter.py +++ b/enterprise/signals/parameter.py @@ -266,7 +266,7 @@ def NormalPPF(value, mu, sigma): Handles scalar mu and sigma, compatible vector value/mu/sigma, vector value/mu and compatible covariance matrix sigma.""" - if np.ndim(sigma) == 2: + if np.ndim(sigma) >= 2: raise NotImplementedError("PPF not implemented when sigma is 2D") return sstats.norm.ppf(value, loc=mu, scale=sigma) From 7b7eb229aca032463b3152f7e1d8dff079b51e75 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Sun, 21 Apr 2024 18:09:20 +0200 Subject: [PATCH 66/80] Fixed extra pulsar test --- tests/test_pulsar.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/test_pulsar.py b/tests/test_pulsar.py index 6177db6b..cbbfb369 100644 --- a/tests/test_pulsar.py +++ b/tests/test_pulsar.py @@ -222,7 +222,7 @@ def test_load_radec_psr_mdc(self): """Setup the Pulsar object.""" # initialize Pulsar class with RAJ DECJ so _get_radec can be covered - Pulsar( + psr = Pulsar( datadir + "/mdc1/J0030+0451.par", datadir + "/mdc1/J0030+0451.tim", ephem="DE430", @@ -230,7 +230,8 @@ def test_load_radec_psr_mdc(self): timing_package="pint", ) - assert self.psr.Mmat.shape == (130, 8), msg + msg = f"Pulsar not loaded properly {self.psr.Mmat.shape}" + assert psr.Mmat.shape == (130, 8), msg def test_no_planet(self): """Test exception when incorrect par(tim) file given.""" From 0c65521aca2af8a2e01015afa18d7898ed983c82 Mon Sep 17 00:00:00 2001 From: Rutger van Haasteren Date: Mon, 12 Aug 2024 19:17:46 +0200 Subject: [PATCH 67/80] Extra comment to make tests re-run --- enterprise/signals/parameter.py | 1 + 1 file changed, 1 insertion(+) diff --git a/enterprise/signals/parameter.py b/enterprise/signals/parameter.py index 34c8f3be..065f7f23 100644 --- a/enterprise/signals/parameter.py +++ b/enterprise/signals/parameter.py @@ -29,6 +29,7 @@ def _sample(parlist, parvalues): for par in parlist: if par not in parvalues: + # RvH: Hyper pars seem to be broken # sample hyperpars for this par, skip parameter itself parvalues.update(sample(par.params[1:])) From 9b41516aaf76b3bda1fced59cad8334f4b07451e Mon Sep 17 00:00:00 2001 From: Aaron Date: Tue, 13 Aug 2024 07:50:46 -0700 Subject: [PATCH 68/80] move to macos-13 --- .github/workflows/ci_test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci_test.yml b/.github/workflows/ci_test.yml index 4c399247..239d6497 100644 --- a/.github/workflows/ci_test.yml +++ b/.github/workflows/ci_test.yml @@ -16,7 +16,7 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-latest, macos-latest] + os: [ubuntu-latest, macos-13] python-version: ['3.7', '3.8', '3.9', '3.10'] steps: From 9863f8c5ab45d559a31b3ab0710c5bec67c573b5 Mon Sep 17 00:00:00 2001 From: Aaron Date: Tue, 13 Aug 2024 08:35:17 -0700 Subject: [PATCH 69/80] add 3.12, update codecov --- .github/workflows/ci_test.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci_test.yml b/.github/workflows/ci_test.yml index 239d6497..5bb5a112 100644 --- a/.github/workflows/ci_test.yml +++ b/.github/workflows/ci_test.yml @@ -17,7 +17,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest, macos-13] - python-version: ['3.7', '3.8', '3.9', '3.10'] + python-version: ['3.8', '3.9', '3.10', '3.11', '3.12'] steps: - name: Checkout repository @@ -56,7 +56,7 @@ jobs: - name: Test with pytest run: make test - name: Codecov - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4 #with: # fail_ci_if_error: true From bad8a1a1240b87d0276716a821d35aa249b422fa Mon Sep 17 00:00:00 2001 From: Aaron Date: Tue, 13 Aug 2024 08:56:00 -0700 Subject: [PATCH 70/80] Add Python 3.12 --- CONTRIBUTING.rst | 2 +- README.md | 2 +- docs/index.rst | 2 +- setup.py | 3 ++- 4 files changed, 5 insertions(+), 4 deletions(-) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 30513938..841886e5 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -147,7 +147,7 @@ Before you submit a pull request, check that it meets these guidelines: 1. The pull request should include tests. 2. If the pull request adds functionality, the docs should be updated. Put your new functionality into a function with a docstring. -3. The pull request should work for all supported versions of Python: 3.8, 3.9, 3.10, and 3.11. You +3. The pull request should work for all supported versions of Python: 3.8, 3.9, 3.10, 3.11, and 3.12. You can see the progress of the tests in the `Checks` tab of your GitHub pull request. Tips diff --git a/README.md b/README.md index 965396c1..34a24a64 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ [![Build Status](https://github.com/nanograv/enterprise/workflows/CI-Tests/badge.svg)](https://github.com/nanograv/enterprise/actions) [![Documentation Status](https://readthedocs.org/projects/enterprise/badge/?version=latest)](https://enterprise.readthedocs.io/en/latest/?badge=latest) [![Test Coverage](https://codecov.io/gh/nanograv/enterprise/branch/master/graph/badge.svg?token=YXSX3293VF)](https://codecov.io/gh/nanograv/enterprise) -![Python Versions](https://img.shields.io/badge/python-3.8%2C%203.9%2C%203.10%2C%203.11-blue.svg) +![Python Versions](https://img.shields.io/badge/python-3.8%2C%203.9%2C%203.10%2C%203.11%2C%203.12-blue.svg) [![Zenodo DOI 4059815](https://zenodo.org/badge/DOI/10.5281/zenodo.4059815.svg)](https://doi.org/10.5281/zenodo.4059815) diff --git a/docs/index.rst b/docs/index.rst index c778e692..f7d95823 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -15,7 +15,7 @@ Welcome to enterprise's documentation! .. image:: https://codecov.io/gh/nanograv/enterprise/branch/master/graph/badge.svg?token=YXSX3293VF :target: https://codecov.io/gh/nanograv/enterprise :alt: Test Coverage -.. image:: https://img.shields.io/badge/python-3.8%2C%203.9%2C%203.10%2C%203.11-blue.svg +.. image:: https://img.shields.io/badge/python-3.8%2C%203.9%2C%203.10%2C%203.11%2C%203.12-blue.svg :alt: Python Versions .. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.4059815.svg diff --git a/setup.py b/setup.py index 1e068864..a1625c05 100644 --- a/setup.py +++ b/setup.py @@ -30,7 +30,7 @@ package_dir={"enterprise": "enterprise"}, include_package_data=True, package_data={"enterprise": ["datafiles/*", "datafiles/ephemeris/*", "datafiles/ng9/*", "datafiles/mdc_open1/*"]}, - python_requires=">=3.8, <3.12", + python_requires=">=3.8, <3.13", install_requires=requirements, license="MIT license", zip_safe=False, @@ -46,6 +46,7 @@ "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", "Topic :: Scientific/Engineering :: Astronomy", "Topic :: Scientific/Engineering :: Physics", ], From 894b44117f6decaf570181c6d0c11c6a03c44b3a Mon Sep 17 00:00:00 2001 From: Aaron Date: Tue, 13 Aug 2024 10:05:40 -0700 Subject: [PATCH 71/80] undo codecov upgrade --- .github/workflows/ci_test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci_test.yml b/.github/workflows/ci_test.yml index 861fddc7..7334997c 100644 --- a/.github/workflows/ci_test.yml +++ b/.github/workflows/ci_test.yml @@ -56,7 +56,7 @@ jobs: - name: Test with pytest run: make test - name: Codecov - uses: codecov/codecov-action@v4 + uses: codecov/codecov-action@v3 #with: # fail_ci_if_error: true From 0a5fea837c123d27f39220c04fa0a8606f8e51cb Mon Sep 17 00:00:00 2001 From: Aaron Date: Tue, 13 Aug 2024 10:32:18 -0700 Subject: [PATCH 72/80] redo upgrade codecov, lower max healpy version --- .github/workflows/ci_test.yml | 2 +- requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci_test.yml b/.github/workflows/ci_test.yml index 7334997c..861fddc7 100644 --- a/.github/workflows/ci_test.yml +++ b/.github/workflows/ci_test.yml @@ -56,7 +56,7 @@ jobs: - name: Test with pytest run: make test - name: Codecov - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4 #with: # fail_ci_if_error: true diff --git a/requirements.txt b/requirements.txt index b31fb211..98837685 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ numpy>=1.16.3 scipy>=1.2.0 ephem>=3.7.6.0 -healpy>=1.14.0 +healpy>=1.14.0, <1.17.0 scikit-sparse>=0.4.5 pint-pulsar>=0.8.3 libstempo>=2.4.4 From ac45bbaf32de09b33fd9f256287c15c632db77cf Mon Sep 17 00:00:00 2001 From: Aaron Date: Tue, 13 Aug 2024 10:57:15 -0700 Subject: [PATCH 73/80] spacing is important... --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 98837685..3f19888c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ numpy>=1.16.3 scipy>=1.2.0 ephem>=3.7.6.0 -healpy>=1.14.0, <1.17.0 +healpy>=1.14.0,<1.17.0 scikit-sparse>=0.4.5 pint-pulsar>=0.8.3 libstempo>=2.4.4 From e75c28d7e0378d68af4e2c8b94da0e0a67a88628 Mon Sep 17 00:00:00 2001 From: Aaron Date: Tue, 13 Aug 2024 11:15:37 -0700 Subject: [PATCH 74/80] missed one... --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index a1625c05..7030e9ac 100644 --- a/setup.py +++ b/setup.py @@ -10,7 +10,7 @@ "numpy>=1.16.3", "scipy>=1.2.0", "ephem>=3.7.6.0", - "healpy>=1.14.0", + "healpy>=1.14.0,<1.17.0", "scikit-sparse>=0.4.5", "pint-pulsar>=0.8.3", "libstempo>=2.4.4", From ef756ad6626f10aa54b17cfedf14b592dfb84d53 Mon Sep 17 00:00:00 2001 From: Aaron Date: Tue, 13 Aug 2024 11:29:05 -0700 Subject: [PATCH 75/80] undo upper limit on healpy --- requirements.txt | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 3f19888c..b31fb211 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ numpy>=1.16.3 scipy>=1.2.0 ephem>=3.7.6.0 -healpy>=1.14.0,<1.17.0 +healpy>=1.14.0 scikit-sparse>=0.4.5 pint-pulsar>=0.8.3 libstempo>=2.4.4 diff --git a/setup.py b/setup.py index 7030e9ac..a1625c05 100644 --- a/setup.py +++ b/setup.py @@ -10,7 +10,7 @@ "numpy>=1.16.3", "scipy>=1.2.0", "ephem>=3.7.6.0", - "healpy>=1.14.0,<1.17.0", + "healpy>=1.14.0", "scikit-sparse>=0.4.5", "pint-pulsar>=0.8.3", "libstempo>=2.4.4", From 2096868943b3055c2971c989fb2945fd9a525f75 Mon Sep 17 00:00:00 2001 From: Alyssa Cassity Date: Thu, 15 Aug 2024 13:53:36 -0700 Subject: [PATCH 76/80] Update selections.py for "CHIME" Add "CHIME" to def nanograv_backends(backend_flags): --- enterprise/signals/selections.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/enterprise/signals/selections.py b/enterprise/signals/selections.py index c701e7b0..e579432e 100644 --- a/enterprise/signals/selections.py +++ b/enterprise/signals/selections.py @@ -118,7 +118,7 @@ def by_backend(backend_flags): def nanograv_backends(backend_flags): """Selection function to split by NANOGRav backend flags only.""" flagvals = np.unique(backend_flags) - ngb = ["ASP", "GASP", "GUPPI", "PUPPI", "YUPPI"] + ngb = ["ASP", "GASP", "GUPPI", "PUPPI", "YUPPI", "CHIME"] flagvals = [val for val in flagvals if any([b in val for b in ngb])] return {val: backend_flags == val for val in flagvals} From 25d4747964d1de386e547f472fcd1a5071877873 Mon Sep 17 00:00:00 2001 From: Alyssa Cassity Date: Thu, 22 Aug 2024 12:24:42 -0700 Subject: [PATCH 77/80] Update selections.py added "VEGAS" to list as requested --- enterprise/signals/selections.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/enterprise/signals/selections.py b/enterprise/signals/selections.py index e579432e..10300d35 100644 --- a/enterprise/signals/selections.py +++ b/enterprise/signals/selections.py @@ -118,7 +118,7 @@ def by_backend(backend_flags): def nanograv_backends(backend_flags): """Selection function to split by NANOGRav backend flags only.""" flagvals = np.unique(backend_flags) - ngb = ["ASP", "GASP", "GUPPI", "PUPPI", "YUPPI", "CHIME"] + ngb = ["ASP", "GASP", "GUPPI", "PUPPI", "YUPPI", "CHIME", "VEGAS"] flagvals = [val for val in flagvals if any([b in val for b in ngb])] return {val: backend_flags == val for val in flagvals} From 0cee27c88506b998aeaa8fb7debf7e17e4ffe293 Mon Sep 17 00:00:00 2001 From: Aaron Date: Sat, 24 Aug 2024 16:57:23 -0700 Subject: [PATCH 78/80] try to get verbose output --- .github/workflows/ci_test.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci_test.yml b/.github/workflows/ci_test.yml index 861fddc7..f2e292db 100644 --- a/.github/workflows/ci_test.yml +++ b/.github/workflows/ci_test.yml @@ -54,7 +54,8 @@ jobs: - name: Run lint run: make lint - name: Test with pytest - run: make test + run: | + pytest --tb=short --maxfail=10 - name: Codecov uses: codecov/codecov-action@v4 #with: From 70df5ec6fb556651b9bc22369b964bace348faa0 Mon Sep 17 00:00:00 2001 From: Aaron Date: Sat, 24 Aug 2024 20:25:08 -0700 Subject: [PATCH 79/80] attempt 7896868 --- .github/workflows/ci_test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci_test.yml b/.github/workflows/ci_test.yml index f2e292db..5835da55 100644 --- a/.github/workflows/ci_test.yml +++ b/.github/workflows/ci_test.yml @@ -16,7 +16,7 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-latest, macos-13] + os: [ubuntu-latest, macos-12] python-version: ['3.8', '3.9', '3.10', '3.11', '3.12'] steps: From 4f6154ab9dd51bd8639b5cb8b09da94fa6c71d82 Mon Sep 17 00:00:00 2001 From: Aaron Date: Sat, 24 Aug 2024 21:26:43 -0700 Subject: [PATCH 80/80] try to skip the problematic test --- .github/workflows/ci_test.yml | 5 ++--- tests/test_utils.py | 5 +++++ 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci_test.yml b/.github/workflows/ci_test.yml index 5835da55..861fddc7 100644 --- a/.github/workflows/ci_test.yml +++ b/.github/workflows/ci_test.yml @@ -16,7 +16,7 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-latest, macos-12] + os: [ubuntu-latest, macos-13] python-version: ['3.8', '3.9', '3.10', '3.11', '3.12'] steps: @@ -54,8 +54,7 @@ jobs: - name: Run lint run: make lint - name: Test with pytest - run: | - pytest --tb=short --maxfail=10 + run: make test - name: Codecov uses: codecov/codecov-action@v4 #with: diff --git a/tests/test_utils.py b/tests/test_utils.py index a07af86e..b6a8148f 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -8,7 +8,9 @@ Tests for `utils` module. """ +import os import unittest +import pytest import numpy as np @@ -18,6 +20,8 @@ from enterprise.signals import utils from tests.enterprise_test_data import datadir +IN_GITHUB_ACTIONS = os.getenv("GITHUB_ACTIONS") == "true" + class TestUtils(unittest.TestCase): @classmethod @@ -166,6 +170,7 @@ def test_psd(self): assert np.allclose(utils.powerlaw(f, log10_A, gamma), pl), msg assert np.allclose(utils.turnover(f, log10_A, gamma, lf0, kappa, beta), pt), msg + @pytest.mark.skipif(IN_GITHUB_ACTIONS, reason="Test doesn't work in Github Actions due to limited memory.") def test_orf(self): """Test ORF functions.""" p1 = np.array([0.3, 0.648, 0.7])