From 6d8342753ceaf060fff6d0850199c0eeaf9e9d7b Mon Sep 17 00:00:00 2001 From: RyoTerasawa <82135798+RyoTerasawa@users.noreply.github.com> Date: Tue, 27 Sep 2022 17:00:38 +0900 Subject: [PATCH 01/17] Update tk3d.py Add the function Tk3D_SSC_Terasawa22 for computing super-sample covariance. Also add some module to import. --- pyccl/tk3d.py | 156 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 156 insertions(+) diff --git a/pyccl/tk3d.py b/pyccl/tk3d.py index 81d98b13f..af8a6f85c 100644 --- a/pyccl/tk3d.py +++ b/pyccl/tk3d.py @@ -3,6 +3,11 @@ from .pyutils import check, _get_spline2d_arrays, _get_spline3d_arrays import numpy as np +## Terasawa +from . import core #Terasawa +from . import background # Terasawa +## + class Tk3D(object): """A container for \"isotropized\" connected trispectra relevant for @@ -213,3 +218,154 @@ def get_spline_arrays(self): [np.exp(tk, out=tk) for tk in out] return a_arr, lk_arr1, lk_arr2, out + +def Tk3D_SSC_Terasawa22(cosmo,deltah=0.02, + lk_arr=None, a_arr=None, + extrap_order_lok=1, extrap_order_hik=1, + use_log=False): + """ Returns a :class:`~pyccl.tk3d.Tk3D` object containing + the super-sample covariance trispectrum, given by the tensor + product of the power spectrum responses associated with the + two pairs of quantities being correlated. Currently this + function only applicable to matter power spectrum in flat + cosmology. Each response is calculated using the method + developed in Terasawa et al. 2022 (arXiv:2205.10339v2) as: + + .. math:: + \\frac{\\partial P_{mm}(k)}{\\partial\\delta_L} = + \\left(1 + \\frac{26}{21}T_{h}(k) -\\frac{1}{3}\\frac{d\\log P_{mm}(k)}{d\\log k}\\right) + P_{mm}(k), + + where the :math:`T_{h}(k)` is the normalized growth response to + the Hubble parameter defined as + :math:`T_{h}(k) = \\frac{d\\log P_{mm}(k)}{dh}/(2\\frac{d\\log D}{dh})`. + + Args: + cosmo (:class:`~pyccl.core.Cosmology`): a Cosmology object. + deltah (float): the variation of h to compute T_{h}(k) by + the two-sided numerical derivative method. + a_arr (array): an array holding values of the scale factor + at which the trispectrum should be calculated for + interpolation. If `None`, the internal values used + by `cosmo` will be used. + lk_arr (array): an array holding values of the natural + logarithm of the wavenumber (in units of Mpc^-1) at + which the trispectrum should be calculated for + interpolation. If `None`, the internal values used + by `cosmo` will be used. + extrap_order_lok (int): extrapolation order to be used on + k-values below the minimum of the splines. See + :class:`~pyccl.tk3d.Tk3D`. + extrap_order_hik (int): extrapolation order to be used on + k-values above the maximum of the splines. See + :class:`~pyccl.tk3d.Tk3D`. + use_log (bool): if `True`, the trispectrum will be + interpolated in log-space (unless negative or + zero values are found). + + Returns: + :class:`~pyccl.tk3d.Tk3D`: SSC effective trispectrum. + """ + + if lk_arr is None: + status = 0 + nk = lib.get_pk_spline_nk(cosmo.cosmo) + lk_arr, status = lib.get_pk_spline_lk(cosmo.cosmo, nk, status) + check(status, cosmo=cosmo) + if a_arr is None: + status = 0 + na = lib.get_pk_spline_na(cosmo.cosmo) + a_arr, status = lib.get_pk_spline_a(cosmo.cosmo, na, status) + check(status, cosmo=cosmo) + + k_use = np.exp(lk_arr) + + Omega_c = cosmo["Omega_c"] + Omega_b = cosmo["Omega_b"] + h = cosmo["h"] + n_s = cosmo["n_s"] + A_s = cosmo["A_s"] + + extra_parameters = {"camb": {"halofit_version": "original", + }} + #set h-modified cosmology to take finite differencing + hp = h + deltah + Omega_c_p = np.power((h/hp),2) * Omega_c #\Omega_c h^2 is fixed + Omega_b_p = np.power((h/hp),2) * Omega_b #\Omega_b h^2 is fixed + + hm = h - deltah + Omega_c_m = np.power((h/hm),2) * Omega_c #\Omega_c h^2 is fixed + Omega_b_m = np.power((h/hm),2) * Omega_b #\Omega_b h^2 is fixed + + cosmo_hp = core.Cosmology(Omega_c=Omega_c_p,Omega_b=Omega_b_p, + h=hp, n_s=n_s, A_s=A_s, + transfer_function="boltzmann_camb", + matter_power_spectrum="camb", + extra_parameters=extra_parameters) + + cosmo_hm = core.Cosmology(Omega_c=Omega_c_m,Omega_b=Omega_b_m, + h=hm, n_s=n_s, A_s=A_s, + transfer_function="boltzmann_camb", + matter_power_spectrum="camb", + extra_parameters=extra_parameters) + + # Growth factor + Dp = background.growth_factor_unnorm(cosmo_hp,a_arr) + Dm = background.growth_factor_unnorm(cosmo_hm,a_arr) + + # Power spectrum + cosmo.compute_linear_power() + cosmo_hp.compute_linear_power() + cosmo_hm.compute_linear_power() + + pk2dlin = cosmo.get_linear_power('delta_matter:delta_matter') + + pk2d = cosmo.get_nonlin_power('delta_matter:delta_matter') + pk2d_hp = cosmo_hp.get_nonlin_power('delta_matter:delta_matter') + pk2d_hm = cosmo_hm.get_nonlin_power('delta_matter:delta_matter') + + na = len(a_arr) + nk = len(k_use) + dpk12 = np.zeros([na,nk]) + dpk = np.zeros(nk) + T_h = np.zeros(nk) + + kmin = 1e-2 + for ia, aa in enumerate(a_arr): + + pk = pk2d.eval(k_use, aa, cosmo) + pk_hp_kh = pk2d_hp.eval(k_use, aa, cosmo_hp) + pk_hm_kh = pk2d_hm.eval(k_use, aa, cosmo_hm) + pk_hp = pk2d_hp.eval(k_use, aa, cosmo_hp) + pk_hm = pk2d_hm.eval(k_use, aa, cosmo_hm) + + dpknl = pk2d.eval_dlogpk_dlogk(k_use, aa, cosmo) + dpklin = pk2dlin.eval_dlogpk_dlogk(k_use, aa, cosmo) + + # use linear theory below kmin + T_h[k_use<=kmin] = 1 + + T_h[k_use>kmin] = (np.log(pk_hp[k_use>kmin])-np.log(pk_hm[k_use>kmin]))/(2*(np.log(Dp[ia])-np.log(Dm[ia]))) # (hp-hm) term is cancelled out + + dpk[k_use<=kmin] = dpklin[k_use<=kmin] + dpk[k_use>kmin] = dpknl[k_use>kmin] + + dpk12[ia, :] = pk * (1. + (26./21.)*T_h -dpk/3.) + #dpk34 = dpk12 + + if use_log: + if np.any(dpk12 <= 0): + warnings.warn( + "Some values were not positive. " + "Will not interpolate in log-space.", + category=CCLWarning) + use_log = False + else: + dpk12 = np.log(dpk12) + + tk3d = Tk3D(a_arr=a_arr, lk_arr=lk_arr, + pk1_arr=dpk12, pk2_arr=dpk12, + extrap_order_lok=extrap_order_lok, + extrap_order_hik=extrap_order_hik, is_logt=use_log) + return tk3d, pk, T_h, dpk, dpk12 #pk, T_h, dpk, dpk12 are outputs for debug + From 9dabae08bab6a29da981831d813f54cad6431ab6 Mon Sep 17 00:00:00 2001 From: RyoTerasawa <82135798+RyoTerasawa@users.noreply.github.com> Date: Tue, 27 Sep 2022 17:03:55 +0900 Subject: [PATCH 02/17] Update tk3d.py --- pyccl/tk3d.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/pyccl/tk3d.py b/pyccl/tk3d.py index af8a6f85c..6c9386848 100644 --- a/pyccl/tk3d.py +++ b/pyccl/tk3d.py @@ -3,11 +3,8 @@ from .pyutils import check, _get_spline2d_arrays, _get_spline3d_arrays import numpy as np -## Terasawa -from . import core #Terasawa -from . import background # Terasawa -## - +from . import core +from . import background class Tk3D(object): """A container for \"isotropized\" connected trispectra relevant for @@ -367,5 +364,5 @@ def Tk3D_SSC_Terasawa22(cosmo,deltah=0.02, pk1_arr=dpk12, pk2_arr=dpk12, extrap_order_lok=extrap_order_lok, extrap_order_hik=extrap_order_hik, is_logt=use_log) - return tk3d, pk, T_h, dpk, dpk12 #pk, T_h, dpk, dpk12 are outputs for debug + return tk3d From 24afef7366c5431e56b18177c88337fb2a5e4ef9 Mon Sep 17 00:00:00 2001 From: RyoTerasawa <82135798+RyoTerasawa@users.noreply.github.com> Date: Thu, 29 Sep 2022 13:50:09 +0900 Subject: [PATCH 03/17] Update tk3d.py --- pyccl/tk3d.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/pyccl/tk3d.py b/pyccl/tk3d.py index 6c9386848..855b2aa4d 100644 --- a/pyccl/tk3d.py +++ b/pyccl/tk3d.py @@ -326,7 +326,7 @@ def Tk3D_SSC_Terasawa22(cosmo,deltah=0.02, dpk12 = np.zeros([na,nk]) dpk = np.zeros(nk) T_h = np.zeros(nk) - + kmin = 1e-2 for ia, aa in enumerate(a_arr): @@ -337,7 +337,7 @@ def Tk3D_SSC_Terasawa22(cosmo,deltah=0.02, pk_hm = pk2d_hm.eval(k_use, aa, cosmo_hm) dpknl = pk2d.eval_dlogpk_dlogk(k_use, aa, cosmo) - dpklin = pk2dlin.eval_dlogpk_dlogk(k_use, aa, cosmo) + dpklin = pk2dlin.eval_dlogpk_dlogk(k_use, aa, cosmo) # use linear theory below kmin T_h[k_use<=kmin] = 1 @@ -348,8 +348,7 @@ def Tk3D_SSC_Terasawa22(cosmo,deltah=0.02, dpk[k_use>kmin] = dpknl[k_use>kmin] dpk12[ia, :] = pk * (1. + (26./21.)*T_h -dpk/3.) - #dpk34 = dpk12 - + if use_log: if np.any(dpk12 <= 0): warnings.warn( @@ -359,10 +358,9 @@ def Tk3D_SSC_Terasawa22(cosmo,deltah=0.02, use_log = False else: dpk12 = np.log(dpk12) - + tk3d = Tk3D(a_arr=a_arr, lk_arr=lk_arr, pk1_arr=dpk12, pk2_arr=dpk12, extrap_order_lok=extrap_order_lok, extrap_order_hik=extrap_order_hik, is_logt=use_log) return tk3d - From ec4054de27154f875d2f4335ed5c5a8cbdaa57e8 Mon Sep 17 00:00:00 2001 From: RyoTerasawa <82135798+RyoTerasawa@users.noreply.github.com> Date: Thu, 29 Sep 2022 14:05:19 +0900 Subject: [PATCH 04/17] Add files via upload --- pyccl/tk3d.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/pyccl/tk3d.py b/pyccl/tk3d.py index 855b2aa4d..c68de212f 100644 --- a/pyccl/tk3d.py +++ b/pyccl/tk3d.py @@ -3,9 +3,11 @@ from .pyutils import check, _get_spline2d_arrays, _get_spline3d_arrays import numpy as np -from . import core +from . import core from . import background + + class Tk3D(object): """A container for \"isotropized\" connected trispectra relevant for covariance matrix calculations. I.e. functions of 3 variables of the @@ -326,7 +328,7 @@ def Tk3D_SSC_Terasawa22(cosmo,deltah=0.02, dpk12 = np.zeros([na,nk]) dpk = np.zeros(nk) T_h = np.zeros(nk) - + kmin = 1e-2 for ia, aa in enumerate(a_arr): @@ -337,7 +339,7 @@ def Tk3D_SSC_Terasawa22(cosmo,deltah=0.02, pk_hm = pk2d_hm.eval(k_use, aa, cosmo_hm) dpknl = pk2d.eval_dlogpk_dlogk(k_use, aa, cosmo) - dpklin = pk2dlin.eval_dlogpk_dlogk(k_use, aa, cosmo) + dpklin = pk2dlin.eval_dlogpk_dlogk(k_use, aa, cosmo) # use linear theory below kmin T_h[k_use<=kmin] = 1 @@ -348,7 +350,7 @@ def Tk3D_SSC_Terasawa22(cosmo,deltah=0.02, dpk[k_use>kmin] = dpknl[k_use>kmin] dpk12[ia, :] = pk * (1. + (26./21.)*T_h -dpk/3.) - + if use_log: if np.any(dpk12 <= 0): warnings.warn( @@ -358,9 +360,9 @@ def Tk3D_SSC_Terasawa22(cosmo,deltah=0.02, use_log = False else: dpk12 = np.log(dpk12) - + tk3d = Tk3D(a_arr=a_arr, lk_arr=lk_arr, pk1_arr=dpk12, pk2_arr=dpk12, extrap_order_lok=extrap_order_lok, extrap_order_hik=extrap_order_hik, is_logt=use_log) - return tk3d + return tk3d From 192a98829a5235dc5533c58efc28f2b456d6bcd9 Mon Sep 17 00:00:00 2001 From: RyoTerasawa <82135798+RyoTerasawa@users.noreply.github.com> Date: Thu, 29 Sep 2022 14:17:00 +0900 Subject: [PATCH 05/17] Update tk3d.py --- pyccl/tk3d.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/pyccl/tk3d.py b/pyccl/tk3d.py index c68de212f..4c58a531a 100644 --- a/pyccl/tk3d.py +++ b/pyccl/tk3d.py @@ -3,8 +3,8 @@ from .pyutils import check, _get_spline2d_arrays, _get_spline3d_arrays import numpy as np -from . import core -from . import background +from . import core +from . import background @@ -218,8 +218,9 @@ def get_spline_arrays(self): return a_arr, lk_arr1, lk_arr2, out -def Tk3D_SSC_Terasawa22(cosmo,deltah=0.02, - lk_arr=None, a_arr=None, + +def Tk3D_SSC_Terasawa22(cosmo, deltah=0.02, + lk_arr=None, a_arr=None, extrap_order_lok=1, extrap_order_hik=1, use_log=False): """ Returns a :class:`~pyccl.tk3d.Tk3D` object containing @@ -296,13 +297,13 @@ def Tk3D_SSC_Terasawa22(cosmo,deltah=0.02, Omega_c_m = np.power((h/hm),2) * Omega_c #\Omega_c h^2 is fixed Omega_b_m = np.power((h/hm),2) * Omega_b #\Omega_b h^2 is fixed - cosmo_hp = core.Cosmology(Omega_c=Omega_c_p,Omega_b=Omega_b_p, + cosmo_hp = core.Cosmology(Omega_c=Omega_c_p, Omega_b=Omega_b_p, h=hp, n_s=n_s, A_s=A_s, transfer_function="boltzmann_camb", matter_power_spectrum="camb", extra_parameters=extra_parameters) - cosmo_hm = core.Cosmology(Omega_c=Omega_c_m,Omega_b=Omega_b_m, + cosmo_hm = core.Cosmology(Omega_c=Omega_c_m, Omega_b=Omega_b_m, h=hm, n_s=n_s, A_s=A_s, transfer_function="boltzmann_camb", matter_power_spectrum="camb", From 9c70f42f7a88b026e1efe717fa109c74b3d1e959 Mon Sep 17 00:00:00 2001 From: RyoTerasawa <82135798+RyoTerasawa@users.noreply.github.com> Date: Thu, 29 Sep 2022 14:17:25 +0900 Subject: [PATCH 06/17] Update tk3d.py --- pyccl/tk3d.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pyccl/tk3d.py b/pyccl/tk3d.py index 4c58a531a..bbdf47aa6 100644 --- a/pyccl/tk3d.py +++ b/pyccl/tk3d.py @@ -7,7 +7,6 @@ from . import background - class Tk3D(object): """A container for \"isotropized\" connected trispectra relevant for covariance matrix calculations. I.e. functions of 3 variables of the From c0ab9d394f787937171c52279c4c0bf326802a9f Mon Sep 17 00:00:00 2001 From: RyoTerasawa <82135798+RyoTerasawa@users.noreply.github.com> Date: Sun, 2 Oct 2022 04:06:39 +0900 Subject: [PATCH 07/17] remove the background dependency --- pyccl/tk3d.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pyccl/tk3d.py b/pyccl/tk3d.py index bbdf47aa6..70fb600b3 100644 --- a/pyccl/tk3d.py +++ b/pyccl/tk3d.py @@ -4,7 +4,6 @@ import numpy as np from . import core -from . import background class Tk3D(object): @@ -309,8 +308,8 @@ def Tk3D_SSC_Terasawa22(cosmo, deltah=0.02, extra_parameters=extra_parameters) # Growth factor - Dp = background.growth_factor_unnorm(cosmo_hp,a_arr) - Dm = background.growth_factor_unnorm(cosmo_hm,a_arr) + Dp = cosmo_hp.growth_factor_unnorm(a_arr) + Dm = cosmo_hm.growth_factor_unnorm(a_arr) # Power spectrum cosmo.compute_linear_power() From 2c5ead9ec1c0e3ee160c9c5259e272292115a76e Mon Sep 17 00:00:00 2001 From: Yue Nan <104014129+YueNan-c@users.noreply.github.com> Date: Wed, 5 Oct 2022 16:51:09 +0900 Subject: [PATCH 08/17] Update tk3d.py --- pyccl/tk3d.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pyccl/tk3d.py b/pyccl/tk3d.py index 70fb600b3..42d86beed 100644 --- a/pyccl/tk3d.py +++ b/pyccl/tk3d.py @@ -215,7 +215,6 @@ def get_spline_arrays(self): [np.exp(tk, out=tk) for tk in out] return a_arr, lk_arr1, lk_arr2, out - def Tk3D_SSC_Terasawa22(cosmo, deltah=0.02, lk_arr=None, a_arr=None, @@ -224,18 +223,19 @@ def Tk3D_SSC_Terasawa22(cosmo, deltah=0.02, """ Returns a :class:`~pyccl.tk3d.Tk3D` object containing the super-sample covariance trispectrum, given by the tensor product of the power spectrum responses associated with the - two pairs of quantities being correlated. Currently this - function only applicable to matter power spectrum in flat - cosmology. Each response is calculated using the method + two pairs of quantities being correlated. Currently this + function only applicable to matter power spectrum in flat + cosmology. Each response is calculated using the method developed in Terasawa et al. 2022 (arXiv:2205.10339v2) as: .. math:: \\frac{\\partial P_{mm}(k)}{\\partial\\delta_L} = - \\left(1 + \\frac{26}{21}T_{h}(k) -\\frac{1}{3}\\frac{d\\log P_{mm}(k)}{d\\log k}\\right) + \\left(1 + \\frac{26}{21}T_{h}(k) + -\\frac{1}{3}\\frac{d\\log P_{mm}(k)}{d\\log k}\\right) P_{mm}(k), where the :math:`T_{h}(k)` is the normalized growth response to - the Hubble parameter defined as + the Hubble parameter defined as :math:`T_{h}(k) = \\frac{d\\log P_{mm}(k)}{dh}/(2\\frac{d\\log D}{dh})`. Args: From c5c235276f418ec37600afaffd154506a729394d Mon Sep 17 00:00:00 2001 From: Yue Nan <104014129+YueNan-c@users.noreply.github.com> Date: Wed, 5 Oct 2022 17:03:24 +0900 Subject: [PATCH 09/17] Update tk3d.py --- pyccl/tk3d.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/pyccl/tk3d.py b/pyccl/tk3d.py index 42d86beed..ec07892b3 100644 --- a/pyccl/tk3d.py +++ b/pyccl/tk3d.py @@ -216,10 +216,11 @@ def get_spline_arrays(self): return a_arr, lk_arr1, lk_arr2, out + def Tk3D_SSC_Terasawa22(cosmo, deltah=0.02, - lk_arr=None, a_arr=None, - extrap_order_lok=1, extrap_order_hik=1, - use_log=False): + lk_arr=None, a_arr=None, + extrap_order_lok=1, extrap_order_hik=1, + use_log=False): """ Returns a :class:`~pyccl.tk3d.Tk3D` object containing the super-sample covariance trispectrum, given by the tensor product of the power spectrum responses associated with the @@ -230,7 +231,7 @@ def Tk3D_SSC_Terasawa22(cosmo, deltah=0.02, .. math:: \\frac{\\partial P_{mm}(k)}{\\partial\\delta_L} = - \\left(1 + \\frac{26}{21}T_{h}(k) + \\left(1 + \\frac{26}{21}T_{h}(k) -\\frac{1}{3}\\frac{d\\log P_{mm}(k)}{d\\log k}\\right) P_{mm}(k), From 5ad223d68979ada8b4724949dc0e30616f90638c Mon Sep 17 00:00:00 2001 From: Yue Nan <104014129+YueNan-c@users.noreply.github.com> Date: Wed, 5 Oct 2022 17:28:04 +0900 Subject: [PATCH 10/17] Update tk3d.py Lint errors correction --- pyccl/tk3d.py | 86 +++++++++++++++++++++++++-------------------------- 1 file changed, 42 insertions(+), 44 deletions(-) diff --git a/pyccl/tk3d.py b/pyccl/tk3d.py index ec07892b3..71cb1429f 100644 --- a/pyccl/tk3d.py +++ b/pyccl/tk3d.py @@ -215,9 +215,9 @@ def get_spline_arrays(self): [np.exp(tk, out=tk) for tk in out] return a_arr, lk_arr1, lk_arr2, out - - -def Tk3D_SSC_Terasawa22(cosmo, deltah=0.02, + + +def Tk3D_SSC_Terasawa22(cosmo, deltah=0.02, lk_arr=None, a_arr=None, extrap_order_lok=1, extrap_order_hik=1, use_log=False): @@ -238,7 +238,7 @@ def Tk3D_SSC_Terasawa22(cosmo, deltah=0.02, where the :math:`T_{h}(k)` is the normalized growth response to the Hubble parameter defined as :math:`T_{h}(k) = \\frac{d\\log P_{mm}(k)}{dh}/(2\\frac{d\\log D}{dh})`. - + Args: cosmo (:class:`~pyccl.core.Cosmology`): a Cosmology object. deltah (float): the variation of h to compute T_{h}(k) by @@ -284,73 +284,71 @@ def Tk3D_SSC_Terasawa22(cosmo, deltah=0.02, h = cosmo["h"] n_s = cosmo["n_s"] A_s = cosmo["A_s"] - - extra_parameters = {"camb": {"halofit_version": "original", - }} - #set h-modified cosmology to take finite differencing - hp = h + deltah - Omega_c_p = np.power((h/hp),2) * Omega_c #\Omega_c h^2 is fixed - Omega_b_p = np.power((h/hp),2) * Omega_b #\Omega_b h^2 is fixed - hm = h - deltah - Omega_c_m = np.power((h/hm),2) * Omega_c #\Omega_c h^2 is fixed - Omega_b_m = np.power((h/hm),2) * Omega_b #\Omega_b h^2 is fixed + extra_parameters = {"camb": {"halofit_version": "original", }} + +# set h-modified cosmology to take finite differencing + hp = h + deltah + Omega_c_p = np.power((h/hp), 2) * Omega_c # \Omega_c h^2 is fixed + Omega_b_p = np.power((h/hp), 2) * Omega_b # \Omega_b h^2 is fixed + + hm = h - deltah + Omega_c_m = np.power((h/hm), 2) * Omega_c # \Omega_c h^2 is fixed + Omega_b_m = np.power((h/hm), 2) * Omega_b # \Omega_b h^2 is fixed cosmo_hp = core.Cosmology(Omega_c=Omega_c_p, Omega_b=Omega_b_p, - h=hp, n_s=n_s, A_s=A_s, - transfer_function="boltzmann_camb", - matter_power_spectrum="camb", - extra_parameters=extra_parameters) - + h=hp, n_s=n_s, A_s=A_s, + transfer_function="boltzmann_camb", + matter_power_spectrum="camb", + extra_parameters=extra_parameters) + cosmo_hm = core.Cosmology(Omega_c=Omega_c_m, Omega_b=Omega_b_m, - h=hm, n_s=n_s, A_s=A_s, - transfer_function="boltzmann_camb", - matter_power_spectrum="camb", - extra_parameters=extra_parameters) - + h=hm, n_s=n_s, A_s=A_s, + transfer_function="boltzmann_camb", + matter_power_spectrum="camb", + extra_parameters=extra_parameters) + # Growth factor Dp = cosmo_hp.growth_factor_unnorm(a_arr) Dm = cosmo_hm.growth_factor_unnorm(a_arr) - + # Power spectrum cosmo.compute_linear_power() cosmo_hp.compute_linear_power() cosmo_hm.compute_linear_power() - + pk2dlin = cosmo.get_linear_power('delta_matter:delta_matter') pk2d = cosmo.get_nonlin_power('delta_matter:delta_matter') pk2d_hp = cosmo_hp.get_nonlin_power('delta_matter:delta_matter') pk2d_hm = cosmo_hm.get_nonlin_power('delta_matter:delta_matter') - + na = len(a_arr) nk = len(k_use) - dpk12 = np.zeros([na,nk]) + dpk12 = np.zeros([na, nk]) dpk = np.zeros(nk) T_h = np.zeros(nk) - + kmin = 1e-2 for ia, aa in enumerate(a_arr): - - pk = pk2d.eval(k_use, aa, cosmo) - pk_hp_kh = pk2d_hp.eval(k_use, aa, cosmo_hp) - pk_hm_kh = pk2d_hm.eval(k_use, aa, cosmo_hm) + + pk = pk2d.eval(k_use, aa, cosmo) pk_hp = pk2d_hp.eval(k_use, aa, cosmo_hp) pk_hm = pk2d_hm.eval(k_use, aa, cosmo_hm) - + dpknl = pk2d.eval_dlogpk_dlogk(k_use, aa, cosmo) dpklin = pk2dlin.eval_dlogpk_dlogk(k_use, aa, cosmo) - + # use linear theory below kmin - T_h[k_use<=kmin] = 1 - - T_h[k_use>kmin] = (np.log(pk_hp[k_use>kmin])-np.log(pk_hm[k_use>kmin]))/(2*(np.log(Dp[ia])-np.log(Dm[ia]))) # (hp-hm) term is cancelled out - - dpk[k_use<=kmin] = dpklin[k_use<=kmin] - dpk[k_use>kmin] = dpknl[k_use>kmin] - + T_h[k_use <= kmin] = 1 + + T_h[k_use > kmin] = (np.log(pk_hp[k_use > kmin])-np.log(pk_hm[k_use > kmin]))/(2*(np.log(Dp[ia])-np.log(Dm[ia]))) # (hp-hm) term is cancelled out + + dpk[k_use <= kmin] = dpklin[k_use <= kmin] + dpk[k_use > kmin] = dpknl[k_use > kmin] + dpk12[ia, :] = pk * (1. + (26./21.)*T_h -dpk/3.) - + if use_log: if np.any(dpk12 <= 0): warnings.warn( @@ -360,7 +358,7 @@ def Tk3D_SSC_Terasawa22(cosmo, deltah=0.02, use_log = False else: dpk12 = np.log(dpk12) - + tk3d = Tk3D(a_arr=a_arr, lk_arr=lk_arr, pk1_arr=dpk12, pk2_arr=dpk12, extrap_order_lok=extrap_order_lok, From a9dca0dc9f1fd15a3d670886343444146dc4ac2b Mon Sep 17 00:00:00 2001 From: RyoTerasawa <82135798+RyoTerasawa@users.noreply.github.com> Date: Wed, 5 Oct 2022 17:38:36 +0900 Subject: [PATCH 11/17] Update tk3d.py --- pyccl/tk3d.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/pyccl/tk3d.py b/pyccl/tk3d.py index 71cb1429f..cca059464 100644 --- a/pyccl/tk3d.py +++ b/pyccl/tk3d.py @@ -4,7 +4,8 @@ import numpy as np from . import core - +import warnings +from .errors import CCLWarning class Tk3D(object): """A container for \"isotropized\" connected trispectra relevant for @@ -331,23 +332,24 @@ def Tk3D_SSC_Terasawa22(cosmo, deltah=0.02, kmin = 1e-2 for ia, aa in enumerate(a_arr): - - pk = pk2d.eval(k_use, aa, cosmo) + + pk = pk2d.eval(k_use, aa, cosmo) pk_hp = pk2d_hp.eval(k_use, aa, cosmo_hp) pk_hm = pk2d_hm.eval(k_use, aa, cosmo_hm) - dpknl = pk2d.eval_dlogpk_dlogk(k_use, aa, cosmo) - dpklin = pk2dlin.eval_dlogpk_dlogk(k_use, aa, cosmo) + dpknl = pk2d.eval_dlogpk_dlogk(k_use, aa, cosmo) + dpklin = pk2dlin.eval_dlogpk_dlogk(k_use, aa, cosmo) # use linear theory below kmin T_h[k_use <= kmin] = 1 - T_h[k_use > kmin] = (np.log(pk_hp[k_use > kmin])-np.log(pk_hm[k_use > kmin]))/(2*(np.log(Dp[ia])-np.log(Dm[ia]))) # (hp-hm) term is cancelled out + T_h[k_use > kmin] = (np.log(pk_hp[k_use > kmin])-np.log(pk_hm[k_use > kmin])) + /(2*(np.log(Dp[ia])-np.log(Dm[ia]))) # (hp-hm) term is cancelled out dpk[k_use <= kmin] = dpklin[k_use <= kmin] dpk[k_use > kmin] = dpknl[k_use > kmin] - dpk12[ia, :] = pk * (1. + (26./21.)*T_h -dpk/3.) + dpk12[ia, :] = pk * (1. + (26./21.) * T_h -dpk/3.) if use_log: if np.any(dpk12 <= 0): From 68993c17b34a6b716aafb90b81f5b195fe4f2c3e Mon Sep 17 00:00:00 2001 From: RyoTerasawa <82135798+RyoTerasawa@users.noreply.github.com> Date: Wed, 5 Oct 2022 17:41:49 +0900 Subject: [PATCH 12/17] Update tk3d.py --- pyccl/tk3d.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyccl/tk3d.py b/pyccl/tk3d.py index cca059464..257c5104a 100644 --- a/pyccl/tk3d.py +++ b/pyccl/tk3d.py @@ -343,7 +343,7 @@ def Tk3D_SSC_Terasawa22(cosmo, deltah=0.02, # use linear theory below kmin T_h[k_use <= kmin] = 1 - T_h[k_use > kmin] = (np.log(pk_hp[k_use > kmin])-np.log(pk_hm[k_use > kmin])) + T_h[k_use > kmin] = (np.log(pk_hp[k_use > kmin])-np.log(pk_hm[k_use > kmin]))\\ /(2*(np.log(Dp[ia])-np.log(Dm[ia]))) # (hp-hm) term is cancelled out dpk[k_use <= kmin] = dpklin[k_use <= kmin] From f2178a324391a330fc2f6f0369da5620585797af Mon Sep 17 00:00:00 2001 From: RyoTerasawa <82135798+RyoTerasawa@users.noreply.github.com> Date: Wed, 5 Oct 2022 17:47:58 +0900 Subject: [PATCH 13/17] Update tk3d.py --- pyccl/tk3d.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyccl/tk3d.py b/pyccl/tk3d.py index 257c5104a..78406b77e 100644 --- a/pyccl/tk3d.py +++ b/pyccl/tk3d.py @@ -343,7 +343,7 @@ def Tk3D_SSC_Terasawa22(cosmo, deltah=0.02, # use linear theory below kmin T_h[k_use <= kmin] = 1 - T_h[k_use > kmin] = (np.log(pk_hp[k_use > kmin])-np.log(pk_hm[k_use > kmin]))\\ + T_h[k_use > kmin] = (np.log(pk_hp[k_use > kmin])-np.log(pk_hm[k_use > kmin]))\ /(2*(np.log(Dp[ia])-np.log(Dm[ia]))) # (hp-hm) term is cancelled out dpk[k_use <= kmin] = dpklin[k_use <= kmin] From 00cc3f56a3d8e08798ba187942c133900efd55f9 Mon Sep 17 00:00:00 2001 From: Yue Nan <104014129+YueNan-c@users.noreply.github.com> Date: Thu, 6 Oct 2022 17:17:16 +0900 Subject: [PATCH 14/17] Lint-check for SSC-Terasawa --- pyccl/tk3d.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/pyccl/tk3d.py b/pyccl/tk3d.py index 78406b77e..0379af5c0 100644 --- a/pyccl/tk3d.py +++ b/pyccl/tk3d.py @@ -7,6 +7,7 @@ import warnings from .errors import CCLWarning + class Tk3D(object): """A container for \"isotropized\" connected trispectra relevant for covariance matrix calculations. I.e. functions of 3 variables of the @@ -333,7 +334,7 @@ def Tk3D_SSC_Terasawa22(cosmo, deltah=0.02, kmin = 1e-2 for ia, aa in enumerate(a_arr): - pk = pk2d.eval(k_use, aa, cosmo) + pk = pk2d.eval(k_use, aa, cosmo) pk_hp = pk2d_hp.eval(k_use, aa, cosmo_hp) pk_hm = pk2d_hm.eval(k_use, aa, cosmo_hm) @@ -343,13 +344,15 @@ def Tk3D_SSC_Terasawa22(cosmo, deltah=0.02, # use linear theory below kmin T_h[k_use <= kmin] = 1 - T_h[k_use > kmin] = (np.log(pk_hp[k_use > kmin])-np.log(pk_hm[k_use > kmin]))\ - /(2*(np.log(Dp[ia])-np.log(Dm[ia]))) # (hp-hm) term is cancelled out + T_h[k_use > kmin] = (np.log(pk_hp[k_use > kmin]) - + np.log(pk_hm[k_use > kmin])) / \ + (2 * (np.log(Dp[ia]) - np.log(Dm[ia]))) + # (hp-hm) term is cancelled out dpk[k_use <= kmin] = dpklin[k_use <= kmin] dpk[k_use > kmin] = dpknl[k_use > kmin] - dpk12[ia, :] = pk * (1. + (26./21.) * T_h -dpk/3.) + dpk12[ia, :] = pk * (1. + (26. / 21.) * T_h - dpk / 3.) if use_log: if np.any(dpk12 <= 0): From d9ec7b85a81902d2ca5a3d2ce8de6b8369b6865b Mon Sep 17 00:00:00 2001 From: RyoTerasawa Date: Wed, 16 Nov 2022 15:58:33 +0900 Subject: [PATCH 15/17] add files and modules for SSC of g-g lensing --- pyccl/__init__.py | 5 + pyccl/darkemulator.py | 1132 ++++++++++++++++++++++++++++++++++ pyccl/halos/__init__.py | 5 +- pyccl/halos/concentration.py | 165 ++++- pyccl/halos/halo_model.py | 248 +++++++- pyccl/halos/hmfunc.py | 63 ++ pyccl/tk3d.py | 414 +++++++++++-- 7 files changed, 1968 insertions(+), 64 deletions(-) create mode 100644 pyccl/darkemulator.py diff --git a/pyccl/__init__.py b/pyccl/__init__.py index b57a89545..ff5c2dc22 100644 --- a/pyccl/__init__.py +++ b/pyccl/__init__.py @@ -153,6 +153,11 @@ nfw_profile_2d, ) +from .darkemulator import ( + darkemu_Tk3D_SSC, + b2H17, + b2L16, +) __all__ = ( 'lib', diff --git a/pyccl/darkemulator.py b/pyccl/darkemulator.py new file mode 100644 index 000000000..ea9201a60 --- /dev/null +++ b/pyccl/darkemulator.py @@ -0,0 +1,1132 @@ +from . import ccllib as lib + +from .pyutils import check, _get_spline2d_arrays, _get_spline3d_arrays +import numpy as np + +from . import core +import warnings +from .errors import CCLWarning +from .pk2d import Pk2D +from .tk3d import Tk3D + +from dark_emulator import darkemu +from dark_emulator import model_hod +from scipy import integrate +from scipy.interpolate import InterpolatedUnivariateSpline as ius +from . import halos + +def darkemu_Tk3D_SSC(cosmo, prof1, deltah=0.02, + log10Mh_min=12.0,log10Mh_max=15.9, + normprof1=False, + lk_arr=None, a_arr=None, + extrap_order_lok=1, extrap_order_hik=1, + use_log=False): + """ Returns a :class:`~pyccl.tk3d.Tk3D` object containing + the super-sample covariance trispectrum, given by the tensor + product of the power spectrum responses associated with the + two pairs of quantities being correlated. Each response is + calculated as: + + .. math:: + \\frac{\\partial P_{u,v}(k)}{\\partial\\delta_L} = + \\left(\\frac{68}{21}-\\frac{d\\log k^3P_L(k)}{d\\log k}\\right) + P_L(k)I^1_1(k,|u)I^1_1(k,|v)+I^1_2(k|u,v) - (b_{u} + b_{v}) + P_{u,v}(k) + + where the :math:`I^a_b` are defined in the documentation + of :meth:`~HMCalculator.I_1_1` and :meth:`~HMCalculator.I_1_2` and + :math:`b_{u}` and :math:`b_{v}` are the linear halo biases for quantities + :math:`u` and :math:`v`, respectively (zero if they are not clustering). + + Args: + cosmo (:class:`~pyccl.core.Cosmology`): a Cosmology object. + hmc (:class:`HMCalculator`): a halo model calculator. + prof1 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`u_1` above. + + p_of_k_a (:class:`~pyccl.pk2d.Pk2D`): a `Pk2D` object to + be used as the linear matter power spectrum. If `None`, + the power spectrum stored within `cosmo` will be used. + a_arr (array): an array holding values of the scale factor + at which the trispectrum should be calculated for + interpolation. If `None`, the internal values used + by `cosmo` will be used. + lk_arr (array): an array holding values of the natural + logarithm of the wavenumber (in units of Mpc^-1) at + which the trispectrum should be calculated for + interpolation. If `None`, the internal values used + by `cosmo` will be used. + extrap_order_lok (int): extrapolation order to be used on + k-values below the minimum of the splines. See + :class:`~pyccl.tk3d.Tk3D`. + extrap_order_hik (int): extrapolation order to be used on + k-values above the maximum of the splines. See + :class:`~pyccl.tk3d.Tk3D`. + use_log (bool): if `True`, the trispectrum will be + interpolated in log-space (unless negative or + zero values are found). + + Returns: + :class:`~pyccl.tk3d.Tk3D`: SSC effective trispectrum. + """ + if lk_arr is None: + status = 0 + nk = lib.get_pk_spline_nk(cosmo.cosmo) + lk_arr, status = lib.get_pk_spline_lk(cosmo.cosmo, nk, status) + check(status, cosmo=cosmo) + if a_arr is None: + status = 0 + na = lib.get_pk_spline_na(cosmo.cosmo) + a_arr, status = lib.get_pk_spline_a(cosmo.cosmo, na, status) + check(status, cosmo=cosmo) + + k_use = np.exp(lk_arr) + + # Check inputs + if not isinstance(prof1, halos.profiles.HaloProfile): + raise TypeError("prof1 must be of type `HaloProfile`") + + h = cosmo["h"] + k_emu = k_use / h # [h/Mpc] + + cosmo.compute_linear_power() + pk2dlin = cosmo.get_linear_power('delta_matter:delta_matter') + + # set cosmology for dark emulator + emu = darkemu_set_cosmology(cosmo) + + # set h-modified cosmology to take finite differencing + hp = h + deltah + hm = h - deltah + cosmo_hp, cosmo_hm = set_hmodified_cosmology(cosmo,deltah) + + emu_p = darkemu_set_cosmology(cosmo_hp) + emu_m = darkemu_set_cosmology(cosmo_hm) + + # Growth factor + Dp = cosmo_hp.growth_factor_unnorm(a_arr) + Dm = cosmo_hm.growth_factor_unnorm(a_arr) + + na = len(a_arr) + nk = len(k_use) + dpk12 = np.zeros([na, nk]) + pk12 = np.zeros([na, nk]) + #dpk34 = np.zeros([na, nk]) + Mfor_hmf = np.linspace(8,17,200) + Mh = np.linspace(log10Mh_min,log10Mh_max,2**5+1) # M_sol/h + M = np.log10(10**Mh/h) + dM = M[1] - M[0] + dlogM = dM + b1_th_tink = np.zeros(len(Mh)) + #b2_th_tink = np.zeros(len(Mh)) + Pth = [0] * len(Mh) + Pnth_hp = [0] * len(Mh) + Pnth_hm = [0] * len(Mh) + Pbin = [0] * len(Mh) + nths = np.zeros(len(Mh)) + + mass_def=halos.MassDef200m() + hmf_DE = halos.MassFuncDarkEmulator(cosmo,mass_def=mass_def) + hbf = halos.hbias.HaloBiasTinker10(cosmo,mass_def=mass_def) + + if np.any(a_arr < 1/(1+1.48)): + hmf = halos.MassFuncTinker10(cosmo,mass_def=mass_def) + + nfw = halos.HaloProfileNFW(halos.ConcentrationDuffy08(mass_def), + fourier_analytic=True) + hmc = halos.HMCalculator(cosmo, hmf, hbf, mass_def) + + halomod_pk_arr = halos.halomod_power_spectrum(cosmo, hmc, k_use, a_arr, + prof=nfw, prof_2pt=None, + prof2=prof1, p_of_k_a=None, + normprof1=True, normprof2=True, + get_1h=True, get_2h=True, + smooth_transition=None, + supress_1h=None) + + halomod_tk3D, dpk12_halomod = halos.halomod_Tk3D_SSC(cosmo=cosmo, hmc=hmc, + prof1=nfw, + prof2=prof1, + prof12_2pt=None, + normprof1=True, normprof2=True, + lk_arr=np.log(k_use), a_arr=a_arr, + use_log=use_log) + + for ia, aa in enumerate(a_arr): + z = 1. / aa - 1 # dark emulator is valid for 0 =< z <= 1.48 + if z > 1.48: + dpk12[ia, :] = dpk12_halomod[ia, :] + pk12[ia, :] = halomod_pk_arr[ia, :] + print("use halo model for z={:.2f}>1.48".format(z)) + else: + # mass function + dndlog10m_emu = ius(Mfor_hmf ,hmf_DE.get_mass_function(cosmo, 10**Mfor_hmf ,aa)) # Mpc^-3 #ius(np.log10(Mlist), dndm_emu * Mlist * np.log(10) * h ** 3) + + if Mh[0] < 12.0: # Msol/h + Pth[0] = emu.get_phm_massthreshold(k_emu,10**12,z) * (1/h)**3 + nths12 = emu.mass_to_dens(10**12,z) * h**3 + Pnth_hp[0] = emu_p.get_phm(k_emu*(h/hp),np.log10(nths12*(1/hp)**3),z)*(1/hp)**3 + Pnth_hm[0] = emu_m.get_phm(k_emu*(h/hm),np.log10(nths12*(1/hm)**3),z)*(1/hm)**3 + Pbin[0] = emu.get_phm_mass(k_emu, 10 ** 12, z) * (1/h)**3 + else: + Pth[0] = emu.get_phm_massthreshold(k_emu,10**Mh[0],z) * (1/h)**3 + nths[0] = emu.mass_to_dens(10**Mh[0],z) * h**3 + Pnth_hp[0] = emu_p.get_phm(k_emu*(h/hp),np.log10(nths[0]*(1/hp)**3),z)*(1/hp)**3 + Pnth_hm[0] = emu_m.get_phm(k_emu*(h/hm),np.log10(nths[0]*(1/hm)**3),z)*(1/hm)**3 + Pbin[0] = emu.get_phm_mass(k_emu, 10 ** Mh[0], z) * (1/h)**3 + + for m in range(1,len(Mh)): + if Mh[m] < 12.0: # Msol/h + Pth[m] = Pth[0] + Pnth_hp[m] = Pnth_hp[0] + Pnth_hm[m] = Pnth_hm[0] + Pbin[m] = Pbin[0] + else: + Pth[m] = emu.get_phm_massthreshold(k_emu,10**Mh[m],z) * (1/h)**3 + nths[m] = emu.mass_to_dens(10**Mh[m],z) * h**3 + Pnth_hp[m] = emu_p.get_phm(k_emu*(h/hp),np.log10(nths[m]*(1/hp)**3),z)*(1/hp)**3 + Pnth_hm[m] = emu_m.get_phm(k_emu*(h/hm),np.log10(nths[m]*(1/hm)**3),z)*(1/hm)**3 + Pbin[m] = emu.get_phm_mass(k_emu, 10 ** Mh[m], z) * (1/h)**3 + + + + M1 = np.linspace(M[m], M[-1], 2**5+1) + dM1 = M[1] - M[0] + b1_th_tink[m] = integrate.romb(dndlog10m_emu(M1) * hbf.get_halo_bias(cosmo,(10 ** M1), aa), dx = dM1)\ + /integrate.romb(dndlog10m_emu(M1), dx = dM1) + + + Nc = prof1._Nc(10 ** M, aa) + Ns = prof1._Ns(10 ** M, aa) + fc = prof1._fc(aa) + Ng = Nc * (fc + Ns) + Mps = M + dlogM + Mms = M - dlogM + + prof_Mp = prof1.fourier(cosmo, k_use, (10 ** Mps), aa, mass_def) # def _fourier(self, cosmo, k, M, a, mass_def): + prof_Mm = prof1.fourier(cosmo, k_use, (10 ** Mms), aa, mass_def) # def _fourier(self, cosmo, k, M, a, mass_def): + prof = prof1.fourier(cosmo, k_use,(10 ** M), aa, mass_def) # def _fourier(self, cosmo, k, M, a, mass_def): + + dprof_dlogM = (prof_Mp - prof_Mm) / (2 * dlogM)#*np.log(10)) + nth_mat = np.tile(nths, (len(k_use), 1)).transpose() + ng = integrate.romb(dndlog10m_emu(M) * Ng, dx = dM, axis = 0) + bgE = integrate.romb(dndlog10m_emu(M) * Ng * \ + (hbf.get_halo_bias(cosmo,(10 ** M), aa)), dx = dM, axis = 0) / ng + + bgE2 = integrate.romb(dndlog10m_emu(M) * Ng * \ + b2H17(hbf.get_halo_bias(cosmo,(10 ** M), aa)), dx = dM, axis = 0) / ng + bgL = bgE - 1 + + dndlog10m_func_mat = np.tile(dndlog10m_emu(M), (len(k_emu), 1)).transpose() # M_sol,Mpc^-3 + b1L_th_mat = np.tile(b1_th_tink -1, (len(k_emu), 1)).transpose() + Pgm = integrate.romb(dprof_dlogM * (nth_mat * np.array(Pth)), \ + dx = dM, axis = 0) / ng + + dPhm_db_nfix = (26. / 21.) * (np.array(Pnth_hp) - np.array(Pnth_hm)) / \ + (2 * (np.log(Dp[ia]) - np.log(Dm[ia]))) # Mpc^3 + + dnP_hm_db_emu = nth_mat * (dPhm_db_nfix + b1L_th_mat * np.array(Pbin)) # Dless + + # stitching + k_switch = 0.08 # [h/Mpc] + kmin = 1e-2 # [h/Mpc] + dnP_gm_db = integrate.romb(dprof_dlogM * (dnP_hm_db_emu), dx = dM, axis = 0) #Dless + + Pgm_growth = dnP_gm_db / ng - bgL * Pgm # Dless + + Pgm_d = -1. / 3. * np.gradient(np.log(Pgm)) / np.gradient(np.log(k_use)) * Pgm #Dless + + dpklin = pk2dlin.eval_dlogpk_dlogk(k_use, aa, cosmo) + + Pgm_lin = bgE * pk2dlin.eval(k_use, aa, cosmo) + dPgm_db_lin = (47/21 + bgE2/bgE - bgE -1/3 * dpklin) * \ + bgE * pk2dlin.eval(k_use, aa, cosmo) + dPgm_db = dPgm_db_lin * np.exp(-k_emu/k_switch) + \ + (Pgm_growth + Pgm_d) * (1 - np.exp(-k_emu/k_switch)) + + Pgm = Pgm_lin * np.exp(-k_emu/k_switch) + \ + Pgm * (1 - np.exp(-k_emu/k_switch)) + + # use linear theory below kmin + dPgm_db[k_emu < kmin] = dPgm_db_lin[k_emu < kmin] + dpk12[ia, :] = dPgm_db + + Pgm[k_emu < kmin] = Pgm_lin[k_emu < kmin] + pk12[ia, :] = Pgm + + + if use_log: + if np.any(dpk12 <= 0): + warnings.warn( + "Some values were not positive. " + "Will not interpolate in log-space.", + category=CCLWarning) + use_log = False + else: + dpk12 = np.log(dpk12) + + pk2d = Pk2D(a_arr=a_arr, lk_arr=lk_arr, pk_arr=pk12, + extrap_order_lok=extrap_order_lok, + extrap_order_hik=extrap_order_hik, + cosmo=cosmo, is_logp=False) + + tk3d = Tk3D(a_arr=a_arr, lk_arr=lk_arr, + pk1_arr=dpk12, pk2_arr=dpk12, + extrap_order_lok=extrap_order_lok, + extrap_order_hik=extrap_order_hik, is_logt=use_log) + return tk3d, pk2d + +def darkemu_pkarr_SSC(cosmo, prof1, deltah=0.02, + log10Mh_min=12.0,log10Mh_max=15.9, + normprof1=False, kmax=2.0, + lk_arr=None, a_arr=None, + extrap_order_lok=1, extrap_order_hik=1, + use_log=False, highk_HM=True): + """ Returns a 2D array with shape `[na,nk]` describing the + first function :math:`f_1(k,a)` that makes up a factorizable + trispectrum :math:`T(k_1,k_2,a)=f_1(k_1,a)f_2(k_2,a)` The response is + calculated as: + + .. math:: + \\frac{\\partial P_{u,v}(k)}{\\partial\\delta_L} = + \\left(\\frac{68}{21}-\\frac{d\\log k^3P_L(k)}{d\\log k}\\right) + P_L(k)I^1_1(k,|u)I^1_1(k,|v)+I^1_2(k|u,v) - (b_{u} + b_{v}) + P_{u,v}(k) + + where the :math:`I^a_b` are defined in the documentation + of :meth:`~HMCalculator.I_1_1` and :meth:`~HMCalculator.I_1_2` and + :math:`b_{u}` and :math:`b_{v}` are the linear halo biases for quantities + :math:`u` and :math:`v`, respectively (zero if they are not clustering). + + Args: + cosmo (:class:`~pyccl.core.Cosmology`): a Cosmology object. + hmc (:class:`HMCalculator`): a halo model calculator. + prof1 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`u_1` above. + + p_of_k_a (:class:`~pyccl.pk2d.Pk2D`): a `Pk2D` object to + be used as the linear matter power spectrum. If `None`, + the power spectrum stored within `cosmo` will be used. + a_arr (array): an array holding values of the scale factor + at which the trispectrum should be calculated for + interpolation. If `None`, the internal values used + by `cosmo` will be used. + lk_arr (array): an array holding values of the natural + logarithm of the wavenumber (in units of Mpc^-1) at + which the trispectrum should be calculated for + interpolation. If `None`, the internal values used + by `cosmo` will be used. + extrap_order_lok (int): extrapolation order to be used on + k-values below the minimum of the splines. See + :class:`~pyccl.tk3d.Tk3D`. + extrap_order_hik (int): extrapolation order to be used on + k-values above the maximum of the splines. See + :class:`~pyccl.tk3d.Tk3D`. + use_log (bool): if `True`, the trispectrum will be + interpolated in log-space (unless negative or + zero values are found). + + Returns: + :class:`~pyccl.tk3d.Tk3D`: SSC effective trispectrum. + """ + if lk_arr is None: + status = 0 + nk = lib.get_pk_spline_nk(cosmo.cosmo) + lk_arr, status = lib.get_pk_spline_lk(cosmo.cosmo, nk, status) + check(status, cosmo=cosmo) + if a_arr is None: + status = 0 + na = lib.get_pk_spline_na(cosmo.cosmo) + a_arr, status = lib.get_pk_spline_a(cosmo.cosmo, na, status) + check(status, cosmo=cosmo) + + k_use = np.exp(lk_arr) + + # Check inputs + if not isinstance(prof1, halos.profiles.HaloProfile): + raise TypeError("prof1 must be of type `HaloProfile`") + + h = cosmo["h"] + k_emu = k_use / h # [h/Mpc] + Omega_m = cosmo["Omega_b"] + cosmo["Omega_c"] + 0.00064/(h**2) + cosmo.compute_linear_power() + pk2dlin = cosmo.get_linear_power('delta_matter:delta_matter') + + # set cosmology for dark emulator + emu = darkemu_set_cosmology(cosmo) + + # set h-modified cosmology to take finite differencing + hp = h + deltah + hm = h - deltah + cosmo_hp, cosmo_hm = set_hmodified_cosmology(cosmo,deltah) + + emu_p = darkemu_set_cosmology(cosmo_hp) + emu_m = darkemu_set_cosmology(cosmo_hm) + + # Growth factor + Dp = cosmo_hp.growth_factor_unnorm(a_arr) + Dm = cosmo_hm.growth_factor_unnorm(a_arr) + + na = len(a_arr) + nk = len(k_use) + dpk12 = np.zeros([na, nk]) + pk12 = np.zeros([na, nk]) + #dpk34 = np.zeros([na, nk]) + Mfor_hmf = np.linspace(8,17,200) + Mh = np.linspace(log10Mh_min,log10Mh_max,2**5+1) # M_sol/h + M = np.log10(10**Mh/h) + dM = M[1] - M[0] + dlogM = dM + b1_th_tink = np.zeros(len(Mh)) + #b2_th_tink = np.zeros(len(Mh)) + Pth = [0] * len(Mh) + Pnth_hp = [0] * len(Mh) + Pnth_hm = [0] * len(Mh) + Pbin = [0] * len(Mh) + nths = np.zeros(len(Mh)) + + mass_def=halos.MassDef200m() + #mdef_other=halos.MassDef200m() + + hmf_DE = halos.MassFuncDarkEmulator(cosmo,mass_def=mass_def) + hbf = halos.hbias.HaloBiasTinker10(cosmo,mass_def=mass_def) + + #kmax = 2 + if np.any(a_arr < 1/(1+1.48)) or k_use[-1] > kmax: + #hmf = halos.MassFuncTinker10(cosmo,mass_def=mass_def) + nfw = halos.HaloProfileNFW(halos.ConcentrationDuffy08(mass_def), + fourier_analytic=True) + + #nfw = halos.HaloProfileNFW(halos.ConcentrationDiemer15_colossus(mass_def), + # fourier_analytic=True) + hmc = halos.HMCalculator(cosmo, hmf_DE, hbf, mass_def,log10M_min=np.log10(M[0]),log10M_max=np.log10(M[-1])) + + halomod_pk_arr = halos.halomod_power_spectrum(cosmo, hmc, k_use, a_arr, + prof=nfw, prof_2pt=None, + prof2=prof1, p_of_k_a=None, + normprof1=True, normprof2=True, + get_1h=True, get_2h=True, + smooth_transition=None, + supress_1h=None) + + halomod_tk3D, dpk12_halomod = halos.halomod_Tk3D_SSC(cosmo=cosmo, hmc=hmc, + prof1=nfw, + prof2=prof1, + prof12_2pt=None, + normprof1=True, normprof2=True, + lk_arr=np.log(k_use), a_arr=a_arr, + use_log=use_log) + + for ia, aa in enumerate(a_arr): + z = 1. / aa - 1 # dark emulator is valid for 0 =< z <= 1.48 + if z > 1.48: + dpk12[ia, :] = dpk12_halomod[ia, :] + pk12[ia, :] = halomod_pk_arr[ia, :] + print("use halo model for z={:.2f}>1.48".format(z)) + else: + # mass function + dndlog10m_emu = ius(Mfor_hmf ,hmf_DE.get_mass_function(cosmo, 10**Mfor_hmf ,aa)) # Mpc^-3 #ius(np.log10(Mlist), dndm_emu * Mlist * np.log(10) * h ** 3) + + if Mh[0] < 12.0: # Msol/h + Pth12 = emu.get_phm_massthreshold(k_emu,10**12,z) * (1/h)**3 + nths12 = emu.mass_to_dens(10**12,z) * h**3 + Pnth_hp12 = emu_p.get_phm(k_emu*(h/hp),np.log10(nths12*(1/hp)**3),z)*(1/hp)**3 + Pnth_hm12 = emu_m.get_phm(k_emu*(h/hm),np.log10(nths12*(1/hm)**3),z)*(1/hm)**3 + Pbin12 = emu.get_phm_mass(k_emu, 10 ** 12, z) * (1/h)**3 +# else: +# Pth[0] = emu.get_phm_massthreshold(k_emu,10**Mh[0],z) * (1/h)**3 +# nths[0] = emu.mass_to_dens(10**Mh[0],z) * h**3 +# Pnth_hp[0] = emu_p.get_phm(k_emu*(h/hp),np.log10(nths[0]*(1/hp)**3),z)*(1/hp)**3 +# Pnth_hm[0] = emu_m.get_phm(k_emu*(h/hm),np.log10(nths[0]*(1/hm)**3),z)*(1/hm)**3 +# Pbin[0] = emu.get_phm_mass(k_emu, 10 ** Mh[0], z) * (1/h)**3 + + for m in range(0,len(Mh)): + if Mh[m] < 12.0: # Msol/h + Pth[m] = Pth12 * hbf.get_halo_bias(cosmo,(10 ** M1), aa) + Pnth_hp[m] = Pnth_hp[0] + Pnth_hm[m] = Pnth_hm[0] + Pbin[m] = Pbin[0] + else: + Pth[m] = emu.get_phm_massthreshold(k_emu,10**Mh[m],z) * (1/h)**3 + nths[m] = emu.mass_to_dens(10**Mh[m],z) * h**3 + Pnth_hp[m] = emu_p.get_phm(k_emu*(h/hp),np.log10(nths[m]*(1/hp)**3),z)*(1/hp)**3 + Pnth_hm[m] = emu_m.get_phm(k_emu*(h/hm),np.log10(nths[m]*(1/hm)**3),z)*(1/hm)**3 + Pbin[m] = emu.get_phm_mass(k_emu, 10 ** Mh[m], z) * (1/h)**3 + + + + M1 = np.linspace(M[m], M[-1], 2**5+1) + dM1 = M[1] - M[0] + b1_th_tink[m] = integrate.romb(dndlog10m_emu(M1) * hbf.get_halo_bias(cosmo,(10 ** M1), aa), dx = dM1)\ + /integrate.romb(dndlog10m_emu(M1), dx = dM1) + + + Nc = prof1._Nc(10 ** M, aa) + Ns = prof1._Ns(10 ** M, aa) + fc = prof1._fc(aa) + Ng = Nc * (fc + Ns) + Mps = M + dlogM + Mms = M - dlogM + + prof_Mp = prof1.fourier(cosmo, k_use, (10 ** Mps), aa, mass_def) # def _fourier(self, cosmo, k, M, a, mass_def): + prof_Mm = prof1.fourier(cosmo, k_use, (10 ** Mms), aa, mass_def) # def _fourier(self, cosmo, k, M, a, mass_def): + prof = prof1.fourier(cosmo, k_use,(10 ** M), aa, mass_def) # def _fourier(self, cosmo, k, M, a, mass_def): + #uk = prof1._usat_fourier(cosmo, k_use,(10 ** M), aa, mass_def) + #rho_cr = 2.775*h**2*1e11 # M_solMpc^-3 (w/o h in units) + #factor_mat = np.tile(10**M/(Omega_m*rho_cr), (len(k_emu), 1)).transpose() + + dprof_dlogM = (prof_Mp - prof_Mm) / (2 * dlogM)#*np.log(10)) + nth_mat = np.tile(nths, (len(k_use), 1)).transpose() + ng = integrate.romb(dndlog10m_emu(M) * Ng, dx = dM, axis = 0) + bgE = integrate.romb(dndlog10m_emu(M) * Ng * \ + (hbf.get_halo_bias(cosmo,(10 ** M), aa)), dx = dM, axis = 0) / ng + + bgE2 = integrate.romb(dndlog10m_emu(M) * Ng * \ + b2H17(hbf.get_halo_bias(cosmo,(10 ** M), aa)), dx = dM, axis = 0) / ng + bgL = bgE - 1 + + dndlog10m_func_mat = np.tile(dndlog10m_emu(M), (len(k_emu), 1)).transpose() # M_sol,Mpc^-3 + b1E_mat = np.tile((hbf.get_halo_bias(cosmo,(10 ** M), aa)), (len(k_emu), 1)).transpose() + + b1L_th_mat = np.tile(b1_th_tink -1, (len(k_emu), 1)).transpose() + Pgm = integrate.romb(dprof_dlogM * (nth_mat * np.array(Pth)), \ + dx = dM, axis = 0) / ng + + dPhm_db_nfix = (26. / 21.) * (np.array(Pnth_hp) - np.array(Pnth_hm)) / \ + (2 * (np.log(Dp[ia]) - np.log(Dm[ia]))) # Mpc^3 + + dnP_hm_db_emu = nth_mat * (dPhm_db_nfix + b1L_th_mat * np.array(Pbin)) # Dless + + dnP_gm_db = integrate.romb(dprof_dlogM * (dnP_hm_db_emu), dx = dM, axis = 0) #Dless + + Pgm_growth = dnP_gm_db / ng - bgL * Pgm # Dless + + Pgm_d = -1. / 3. * np.gradient(np.log(Pgm)) / np.gradient(np.log(k_use)) * Pgm #Dless + + dPgm_db = (Pgm_growth + Pgm_d) + + dpklin = pk2dlin.eval_dlogpk_dlogk(k_use, aa, cosmo) + + Pgm_lin = bgE * pk2dlin.eval(k_use, aa, cosmo) + dPgm_db_lin = (47/21 + bgE2/bgE - bgE -1/3 * dpklin) * \ + bgE * pk2dlin.eval(k_use, aa, cosmo) + + # stitching + k_switch = 0.08 # [h/Mpc] + kmin = 1e-2 # [h/Mpc] + + dPgm_db = dPgm_db_lin * np.exp(-k_emu/k_switch) + \ + (Pgm_growth + Pgm_d) * (1 - np.exp(-k_emu/k_switch)) + + Pgm = Pgm_lin * np.exp(-k_emu/k_switch) + \ + Pgm * (1 - np.exp(-k_emu/k_switch)) + + # use linear theory below kmin + dPgm_db[k_emu < kmin] = dPgm_db_lin[k_emu < kmin] + dpk12[ia, :] = dPgm_db + + Pgm[k_emu < kmin] = Pgm_lin[k_emu < kmin] + pk12[ia, :] = Pgm + + # use Halo Model above kmax + if k_use[-1] > kmax: + #i12 = integrate.romb(dndlog10m_func_mat * b1E_mat * prof * factor_mat * uk, dx = dM, axis = 0) /ng + + #i02 = integrate.romb(dndlog10m_func_mat * prof * factor_mat * uk, dx = dM, axis = 0) /ng + #HM_1h_resp = i12 - bgE * i02 + k_HM = 1 + dPgm_db1 = dPgm_db * np.exp(-k_use/k_HM) + \ + HM_1h_resp * (1 - np.exp(-k_use/k_HM)) + dPgm_db[k_use > kmax] = dPgm_db1[k_use > kmax] + + + if use_log: + if np.any(dpk12 <= 0): + warnings.warn( + "Some values were not positive. " + "The negative values are substituted by 1e-5.", + category=CCLWarning) + np.where(dpk12 <= 0, 1e-5, dpk12) + + dpk12 = np.log(dpk12) + + pk2d = Pk2D(a_arr=a_arr, lk_arr=lk_arr, pk_arr=pk12, + extrap_order_lok=extrap_order_lok, + extrap_order_hik=extrap_order_hik, + cosmo=cosmo, is_logp=False) + + return dpk12, pk2d + + +def halomod_Tk3D_SSC(cosmo, prof1, + normprof1=False, + lk_arr=None, a_arr=None, + extrap_order_lok=1, extrap_order_hik=1, + use_log=False): + + if lk_arr is None: + status = 0 + nk = lib.get_pk_spline_nk(cosmo.cosmo) + lk_arr, status = lib.get_pk_spline_lk(cosmo.cosmo, nk, status) + check(status, cosmo=cosmo) + if a_arr is None: + status = 0 + na = lib.get_pk_spline_na(cosmo.cosmo) + a_arr, status = lib.get_pk_spline_a(cosmo.cosmo, na, status) + check(status, cosmo=cosmo) + + # Check inputs + if not isinstance(prof1, halos.profiles.HaloProfile): + raise TypeError("prof1 must be of type `HaloProfile`") + + k_use = np.exp(lk_arr) + na = len(a_arr) + nk = len(k_use) + dpk12 = np.zeros([na, nk]) + + mass_def=halos.MassDef200m() + hbf = halos.hbias.HaloBiasTinker10(cosmo,mass_def=mass_def) + hmf = halos.MassFuncTinker10(cosmo,mass_def=mass_def) + nfw = halos.HaloProfileNFW(halos.ConcentrationDuffy08(mass_def), + fourier_analytic=True) + hmc = halos.HMCalculator(cosmo, hmf, hbf, mass_def) + + halomod_tk3D, dpk12_halomod = halos.halomod_Tk3D_SSC(cosmo=cosmo, hmc=hmc, + prof1=nfw, + prof2=prof1, + prof12_2pt=None, + normprof1=True, normprof2=True, + lk_arr=np.log(k_use), a_arr=a_arr, + use_log=use_log) + + for ia, aa in enumerate(a_arr): + dpk12[ia, :] = dpk12_halomod[ia, :] #np.sqrt(np.diag(halomod_tk3D.eval(k=k_use, a=aa))) + + if use_log: + if np.any(dpk12 <= 0): + warnings.warn( + "Some values were not positive. " + "Will not interpolate in log-space.", + category=CCLWarning) + use_log = False + else: + dpk12 = np.log(dpk12) + + tk3d = Tk3D(a_arr=a_arr, lk_arr=lk_arr, + pk1_arr=dpk12, pk2_arr=dpk12, + extrap_order_lok=extrap_order_lok, + extrap_order_hik=extrap_order_hik, is_logt=use_log) + return tk3d + + + +def b2H17(b1):#H17 + b2 = 0.77 - (2.43 * b1) + ( b1 * b1) + return b2 + + +def b2L16(b1):#L16 + b2 = 0.412 - (2.143 * b1) + (0.929 * b1 * b1) + (0.008 * b1 * b1 * b1) + return b2 + +def darkemu_set_cosmology(cosmo): + Omega_c = cosmo["Omega_c"] + Omega_b = cosmo["Omega_b"] + h = cosmo["h"] + n_s = cosmo["n_s"] + A_s = cosmo["A_s"] + + omega_c = Omega_c * h ** 2 + omega_b = Omega_b * h ** 2 + omega_nu = 0.00064 + Omega_L = 1 - ((omega_c + omega_b + omega_nu) / h **2) + + emu = darkemu.de_interface.base_class() + + #Parameters cparam (numpy array) : Cosmological parameters (𝜔𝑏, 𝜔𝑐, Ω𝑑𝑒, ln(10^10 𝐴𝑠), 𝑛𝑠, 𝑤) + cparam = np.array([omega_b,omega_c,Omega_L,np.log(10 ** 10 * A_s),n_s,-1.]) + emu.set_cosmology(cparam) + + return emu + +def set_hmodified_cosmology(cosmo,deltah): + Omega_c = cosmo["Omega_c"] + Omega_b = cosmo["Omega_b"] + h = cosmo["h"] + n_s = cosmo["n_s"] + A_s = cosmo["A_s"] + + hp = h + deltah + Omega_c_p = np.power((h/hp),2) * Omega_c #\Omega_c h^2 is fixed + Omega_b_p = np.power((h/hp),2) * Omega_b #\Omega_b h^2 is fixed + + hm = h - deltah + Omega_c_m = np.power((h/hm),2) * Omega_c #\Omega_c h^2 is fixed + Omega_b_m = np.power((h/hm),2) * Omega_b #\Omega_b h^2 is fixed + + cosmo_hp = core.Cosmology(Omega_c=Omega_c_p,Omega_b=Omega_b_p, + h=hp, n_s=n_s, A_s=A_s) + + cosmo_hm = core.Cosmology(Omega_c=Omega_c_m,Omega_b=Omega_b_m, + h=hm, n_s=n_s, A_s=A_s) + + return cosmo_hp, cosmo_hm + +def darkemu_Tk3D_SSC_test(cosmo, prof1, deltah=0.02, + log10Mh_min=12.0,log10Mh_max=15.9, + normprof1=False, + lk_arr=None, a_arr=None, + extrap_order_lok=1, extrap_order_hik=1, + use_log=False): + + if lk_arr is None: + status = 0 + nk = lib.get_pk_spline_nk(cosmo.cosmo) + lk_arr, status = lib.get_pk_spline_lk(cosmo.cosmo, nk, status) + check(status, cosmo=cosmo) + if a_arr is None: + status = 0 + na = lib.get_pk_spline_na(cosmo.cosmo) + a_arr, status = lib.get_pk_spline_a(cosmo.cosmo, na, status) + check(status, cosmo=cosmo) + + k_use = np.exp(lk_arr) + + # Check inputs + if not isinstance(prof1, halos.profiles.HaloProfile): + raise TypeError("prof1 must be of type `HaloProfile`") + + h = cosmo["h"] + k_emu = k_use / h # [h/Mpc] + + cosmo.compute_linear_power() + pk2dlin = cosmo.get_linear_power('delta_matter:delta_matter') + + # set cosmology for dark emulator + emu = darkemu_set_cosmology(cosmo) + + # set h-modified cosmology to take finite differencing + hp = h + deltah + hm = h - deltah + cosmo_hp, cosmo_hm = set_hmodified_cosmology(cosmo,deltah) + + emu_p = darkemu_set_cosmology(cosmo_hp) + emu_m = darkemu_set_cosmology(cosmo_hm) + + # Growth factor + Dp = cosmo_hp.growth_factor_unnorm(a_arr) + Dm = cosmo_hm.growth_factor_unnorm(a_arr) + + na = len(a_arr) + nk = len(k_use) + dpk12 = np.zeros([na, nk]) + pk12 = np.zeros([na, nk]) + #dpk34 = np.zeros([na, nk]) + Mfor_hmf = np.linspace(8,16,200) + Mh = np.linspace(log10Mh_min,log10Mh_max,2**5+1)#M_sol/h + dMh = Mh[1] - Mh[0] + dlogM = dMh + b1_th_tink = np.zeros(len(Mh)) + #b2_th_tink = np.zeros(len(Mh)) + Pth = [0] * len(Mh) + Pnth_hp = [0] * len(Mh) + Pnth_hm = [0] * len(Mh) + Pbin = [0] * len(Mh) + nths = np.zeros(len(Mh)) + + mass_def=halos.MassDef200m() + hmf_DE = halos.MassFuncDarkEmulator(cosmo,mass_def=mass_def) + hbf = halos.hbias.HaloBiasTinker10(cosmo,mass_def=mass_def) + + if np.any(a_arr < 1/(1+1.48)): + hmf = halos.MassFuncTinker10(cosmo,mass_def=mass_def) + nfw = halos.HaloProfileNFW(halos.ConcentrationDuffy08(mass_def), + fourier_analytic=True) + hmc = halos.HMCalculator(cosmo, hmf, hbf, mass_def) + + halomod_pk_arr = halos.halomod_power_spectrum(cosmo, hmc, k_use, a_arr, + prof=nfw, prof_2pt=None, + prof2=prof1, p_of_k_a=None, + normprof1=True, normprof2=True, + get_1h=True, get_2h=True, + smooth_transition=None, + supress_1h=None) + + halomod_tk3D, dpk12_halomod = halos.halomod_Tk3D_SSC(cosmo=cosmo, hmc=hmc, + prof1=nfw, + prof2=prof1, + prof12_2pt=None, + normprof1=True, normprof2=True, + lk_arr=np.log(k_use), a_arr=a_arr, + use_log=use_log) + + for ia, aa in enumerate(a_arr): + z = 1. / aa - 1 # dark emulator is valid for 0 =< z <= 1.48 + if z > 1.48: + dpk12[ia, :] = dpk12_halomod[ia, :] + pk12[ia, :] = halomod_pk_arr[ia, :] + print("use halo model for z={:.2f}>1.48".format(z)) + else: + # mass function + #dndlog10m_emu = ius(Mfor_hmf ,hmf_DE.get_mass_function(cosmo, 10**Mfor_hmf ,aa)) # Mpc^-3 #ius(np.log10(Mlist), dndm_emu * Mlist * np.log(10) * h ** 3) + Mlist, dndm_emu = emu.get_dndm(z) # Mlist [Msol/h] + dndlog10m_emu = ius(np.log10(Mlist), dndm_emu * Mlist * np.log(10) * h ** 3) + + + for m in range(len(Mh)): + Pth[m] = emu.get_phm_massthreshold(k_emu,10**Mh[m],z) * (1/h)**3 + nths[m] = emu.mass_to_dens(10**Mh[m],z) * h**3 + + Pnth_hp[m] = emu_p.get_phm(k_emu*(h/hp),np.log10(nths[m]*(1/hp)**3),z)*(1/hp)**3 + Pnth_hm[m] = emu_m.get_phm(k_emu*(h/hm),np.log10(nths[m]*(1/hm)**3),z)*(1/hm)**3 + Pbin[m] = emu.get_phm_mass(k_emu, 10 ** Mh[m], z) * (1/h)**3 + + Mh1 = np.linspace(Mh[m],15.9,2**5+1) + dMh1 = Mh[1] - Mh[0] + b1_th_tink[m] = integrate.romb(dndlog10m_emu(Mh1) * hbf.get_halo_bias(cosmo,(10 ** Mh1) / h, aa), dx = dMh1)\ + /integrate.romb(dndlog10m_emu(Mh1), dx = dMh1) + + #b2_th_tink[m] = integrate.romb(dndlog10m_emu(Mh1) * b2H17(hbf.get_halo_bias(cosmo,(10 ** Mh1) / h, aa)), dx = dMh1)\ + # /integrate.romb(dndlog10m_emu(Mh1), dx = dMh1) + + Nc = prof1._Nc(10 ** Mh / h, aa) + Ns = prof1._Ns(10 ** Mh / h, aa) + fc = prof1._fc(aa) + Ng = Nc * (fc + Ns) + Mps = Mh + dlogM + Mms = Mh - dlogM + + prof_Mp = prof1.fourier(cosmo, k_use, (10 ** Mps) / h, aa, mass_def) # def _fourier(self, cosmo, k, M, a, mass_def): + prof_Mm = prof1.fourier(cosmo, k_use, (10 ** Mms) / h, aa, mass_def) # def _fourier(self, cosmo, k, M, a, mass_def): + prof = prof1.fourier(cosmo, k_use,(10 ** Mh) / h, aa, mass_def) # def _fourier(self, cosmo, k, M, a, mass_def): + + dprof_dlogM = (prof_Mp - prof_Mm) / (2 * dlogM)#*np.log(10)) + nth_mat = np.tile(nths, (len(k_use), 1)).transpose() + ng = integrate.romb(dndlog10m_emu(Mh) * Ng, dx = dMh, axis = 0) + bgE = integrate.romb(dndlog10m_emu(Mh) * Ng * \ + (hbf.get_halo_bias(cosmo,(10 ** Mh) / h, aa)), dx = dMh, axis = 0) / ng + + bgE2 = integrate.romb(dndlog10m_emu(Mh) * Ng * \ + b2H17(hbf.get_halo_bias(cosmo,(10 ** Mh) / h, aa)), dx = dMh, axis = 0) / ng + bgL = bgE - 1 + + dndlog10m_func_mat = np.tile(dndlog10m_emu(Mh), (len(k_emu), 1)).transpose() # M_sol,Mpc^-3 + b1L_th_mat = np.tile(b1_th_tink -1, (len(k_emu), 1)).transpose() + Pgm = integrate.romb(dprof_dlogM * (nth_mat * np.array(Pth)), \ + dx = dMh, axis = 0) / ng + + dPhm_db_nfix = (26. / 21.) * (np.array(Pnth_hp) - np.array(Pnth_hm)) / \ + (2 * (np.log(Dp[ia]) - np.log(Dm[ia]))) # Mpc^3 + + dnP_hm_db_emu = nth_mat * (dPhm_db_nfix + b1L_th_mat * np.array(Pbin)) # Dless + + # stitching + k_switch = 0.08 # [h/Mpc] + kmin = 1e-2 # [h/Mpc] + dnP_gm_db = integrate.romb(dprof_dlogM * (dnP_hm_db_emu), dx = dMh, axis = 0) #Dless + + Pgm_growth = dnP_gm_db / ng - bgL * Pgm # Dless + + Pgm_d = -1. / 3. * np.gradient(np.log(Pgm)) / np.gradient(np.log(k_emu)) * Pgm #Dless + + dpklin = pk2dlin.eval_dlogpk_dlogk(k_use, aa, cosmo) + + Pgm_lin = bgE * pk2dlin.eval(k_use, aa, cosmo) + dPgm_db_lin = (47/21 + bgE2/bgE - bgE -1/3 * dpklin) * \ + bgE * pk2dlin.eval(k_use, aa, cosmo) + dPgm_db = dPgm_db_lin * np.exp(-k_emu/k_switch) + \ + (Pgm_growth + Pgm_d) * (1 - np.exp(-k_emu/k_switch)) + + Pgm = Pgm_lin * np.exp(-k_emu/k_switch) + \ + Pgm * (1 - np.exp(-k_emu/k_switch)) + + # use linear theory below kmin + dPgm_db[k_emu < kmin] = dPgm_db_lin[k_emu < kmin] + dpk12[ia, :] = dPgm_db + + Pgm[k_emu < kmin] = Pgm_lin[k_emu < kmin] + pk12[ia, :] = Pgm + + + if use_log: + if np.any(dpk12 <= 0): + warnings.warn( + "Some values were not positive. " + "Will not interpolate in log-space.", + category=CCLWarning) + use_log = False + else: + dpk12 = np.log(dpk12) + + pk2d = Pk2D(a_arr=a_arr, lk_arr=lk_arr, pk_arr=pk12, + extrap_order_lok=extrap_order_lok, + extrap_order_hik=extrap_order_hik, + cosmo=cosmo, is_logp=False) + + tk3d = Tk3D(a_arr=a_arr, lk_arr=lk_arr, + pk1_arr=dpk12, pk2_arr=dpk12, + extrap_order_lok=extrap_order_lok, + extrap_order_hik=extrap_order_hik, is_logt=use_log) + return tk3d, pk2d + +def darkemu_Tk3D_SSC_old(cosmo, prof1, deltah=0.02, + normprof1=False, + lk_arr=None, a_arr=None, + extrap_order_lok=1, extrap_order_hik=1, + use_log=False): + """ Returns a :class:`~pyccl.tk3d.Tk3D` object containing + the super-sample covariance trispectrum, given by the tensor + product of the power spectrum responses associated with the + two pairs of quantities being correlated. Each response is + calculated as: + + .. math:: + \\frac{\\partial P_{u,v}(k)}{\\partial\\delta_L} = + \\left(\\frac{68}{21}-\\frac{d\\log k^3P_L(k)}{d\\log k}\\right) + P_L(k)I^1_1(k,|u)I^1_1(k,|v)+I^1_2(k|u,v) - (b_{u} + b_{v}) + P_{u,v}(k) + + where the :math:`I^a_b` are defined in the documentation + of :meth:`~HMCalculator.I_1_1` and :meth:`~HMCalculator.I_1_2` and + :math:`b_{u}` and :math:`b_{v}` are the linear halo biases for quantities + :math:`u` and :math:`v`, respectively (zero if they are not clustering). + + Args: + cosmo (:class:`~pyccl.core.Cosmology`): a Cosmology object. + hmc (:class:`HMCalculator`): a halo model calculator. + prof1 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`u_1` above. + prof2 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`u_2` above. If `None`, + `prof1` will be used as `prof2`. + prof12_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): + a profile covariance object returning the the two-point + moment of `prof1` and `prof2`. If `None`, the default + second moment will be used, corresponding to the + products of the means of both profiles. + prof3 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`v_1` above. If `None`, + `prof1` will be used as `prof3`. + prof4 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`v_2` above. If `None`, + `prof3` will be used as `prof4`. + prof34_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): + same as `prof12_2pt` for `prof3` and `prof4`. + normprof1 (bool): if `True`, this integral will be + normalized by :math:`I^0_1(k\\rightarrow 0,a|u)` + (see :meth:`~HMCalculator.I_0_1`), where + :math:`u` is the profile represented by `prof1`. + normprof2 (bool): same as `normprof1` for `prof2`. + normprof3 (bool): same as `normprof1` for `prof3`. + normprof4 (bool): same as `normprof1` for `prof4`. + p_of_k_a (:class:`~pyccl.pk2d.Pk2D`): a `Pk2D` object to + be used as the linear matter power spectrum. If `None`, + the power spectrum stored within `cosmo` will be used. + a_arr (array): an array holding values of the scale factor + at which the trispectrum should be calculated for + interpolation. If `None`, the internal values used + by `cosmo` will be used. + lk_arr (array): an array holding values of the natural + logarithm of the wavenumber (in units of Mpc^-1) at + which the trispectrum should be calculated for + interpolation. If `None`, the internal values used + by `cosmo` will be used. + extrap_order_lok (int): extrapolation order to be used on + k-values below the minimum of the splines. See + :class:`~pyccl.tk3d.Tk3D`. + extrap_order_hik (int): extrapolation order to be used on + k-values above the maximum of the splines. See + :class:`~pyccl.tk3d.Tk3D`. + use_log (bool): if `True`, the trispectrum will be + interpolated in log-space (unless negative or + zero values are found). + + Returns: + :class:`~pyccl.tk3d.Tk3D`: SSC effective trispectrum. + """ + if lk_arr is None: + status = 0 + nk = lib.get_pk_spline_nk(cosmo.cosmo) + lk_arr, status = lib.get_pk_spline_lk(cosmo.cosmo, nk, status) + check(status, cosmo=cosmo) + if a_arr is None: + status = 0 + na = lib.get_pk_spline_na(cosmo.cosmo) + a_arr, status = lib.get_pk_spline_a(cosmo.cosmo, na, status) + check(status, cosmo=cosmo) + + k_use = np.exp(lk_arr) + + # Check inputs + if not isinstance(prof1, halos.profiles.HaloProfile): + raise TypeError("prof1 must be of type `HaloProfile`") + + h = cosmo["h"] + k_emu = k_use / h # [h/Mpc] + + cosmo.compute_linear_power() + pk2dlin = cosmo.get_linear_power('delta_matter:delta_matter') + + # set cosmology for dark emulator + emu = darkemu_set_cosmology(cosmo) + + # set h-modified cosmology to take finite differencing + hp = h + deltah + hm = h - deltah + cosmo_hp, cosmo_hm = set_hmodified_cosmology(cosmo,deltah) + + emu_p = darkemu_set_cosmology(cosmo_hp) + emu_m = darkemu_set_cosmology(cosmo_hm) + + # Growth factor + Dp = cosmo_hp.growth_factor_unnorm(a_arr) + Dm = cosmo_hm.growth_factor_unnorm(a_arr) + + na = len(a_arr) + nk = len(k_use) + dpk12 = np.zeros([na, nk]) + pk12 = np.zeros([na, nk]) + #dpk34 = np.zeros([na, nk]) + + Mh = np.linspace(12.,15.9,2**5+1)#M_sol/h + dMh = Mh[1] - Mh[0] + dlogM = dMh + b1_th_tink = np.zeros(len(Mh)) + #b2_th_tink = np.zeros(len(Mh)) + Pth = [0] * len(Mh) + Pnth_hp = [0] * len(Mh) + Pnth_hm = [0] * len(Mh) + Pbin = [0] * len(Mh) + nths = np.zeros(len(Mh)) + + mass_def=halos.MassDef200m() + hbf = halos.hbias.HaloBiasTinker10(cosmo,mass_def=mass_def) + hmf = halos.MassFuncTinker10(cosmo,mass_def=mass_def) + nfw = halos.HaloProfileNFW(halos.ConcentrationDuffy08(mass_def), + fourier_analytic=True) + hmc = halos.HMCalculator(cosmo, hmf, hbf, mass_def) + + halomod_pk_arr = halos.halomod_power_spectrum(cosmo, hmc, k_use, a_arr, + prof=nfw, prof_2pt=None, + prof2=prof1, p_of_k_a=None, + normprof1=True, normprof2=True, + get_1h=True, get_2h=True, + smooth_transition=None, + supress_1h=None) + + halomod_tk3D, dpk12_halomod = halos.halomod_Tk3D_SSC(cosmo=cosmo, hmc=hmc, + prof1=nfw, + prof2=prof1, + prof12_2pt=None, + normprof1=True, normprof2=True, + lk_arr=np.log(k_use), a_arr=a_arr, + use_log=use_log) + + for ia, aa in enumerate(a_arr): + z = 1. / aa - 1 # dark emulator is valid for 0 =< z <= 1.48 + if z > 1.48: + dpk12[ia, :] = dpk12_halomod[ia, :] + pk12[ia, :] = halomod_pk_arr[ia, :] + print("use halo model for z={:.2f}>1.48".format(z)) + else: + # mass function + Mlist, dndm_emu = emu.get_dndm(z) # Mlist [Msol/h] + dndlog10m_emu = ius(np.log10(Mlist), dndm_emu * Mlist * np.log(10) * h ** 3) + + for m in range(len(Mh)): + Pth[m] = emu.get_phm_massthreshold(k_emu,10**Mh[m],z) * (1/h)**3 + nths[m] = emu.mass_to_dens(10**Mh[m],z) * h**3 + + Pnth_hp[m] = emu_p.get_phm(k_emu*(h/hp),np.log10(nths[m]*(1/hp)**3),z)*(1/hp)**3 + Pnth_hm[m] = emu_m.get_phm(k_emu*(h/hm),np.log10(nths[m]*(1/hm)**3),z)*(1/hm)**3 + Pbin[m] = emu.get_phm_mass(k_emu, 10 ** Mh[m], z) * (1/h)**3 + + Mh1 = np.linspace(Mh[m],15.9,2**5+1) + dMh1 = Mh[1] - Mh[0] + b1_th_tink[m] = integrate.romb(dndlog10m_emu(Mh1) * hbf.get_halo_bias(cosmo,(10 ** Mh1) / h, aa), dx = dMh1)\ + /integrate.romb(dndlog10m_emu(Mh1), dx = dMh1) + + #b2_th_tink[m] = integrate.romb(dndlog10m_emu(Mh1) * b2H17(hbf.get_halo_bias(cosmo,(10 ** Mh1) / h, aa)), dx = dMh1)\ + # /integrate.romb(dndlog10m_emu(Mh1), dx = dMh1) + + Nc = prof1._Nc(10 ** Mh / h, aa) + Ns = prof1._Ns(10 ** Mh / h, aa) + fc = prof1._fc(aa) + Ng = Nc * (fc + Ns) + Mps = Mh + dlogM + Mms = Mh - dlogM + + prof_Mp = prof1.fourier(cosmo, k_use, (10 ** Mps) / h, aa, mass_def) # def _fourier(self, cosmo, k, M, a, mass_def): + prof_Mm = prof1.fourier(cosmo, k_use, (10 ** Mms) / h, aa, mass_def) # def _fourier(self, cosmo, k, M, a, mass_def): + prof = prof1.fourier(cosmo, k_use,(10 ** Mh) / h, aa, mass_def) # def _fourier(self, cosmo, k, M, a, mass_def): + + dprof_dlogM = (prof_Mp - prof_Mm) / (2 * dlogM)#*np.log(10)) + nth_mat = np.tile(nths, (len(k_use), 1)).transpose() + ng = integrate.romb(dndlog10m_emu(Mh) * Ng, dx = dMh, axis = 0) + bgE = integrate.romb(dndlog10m_emu(Mh) * Ng * \ + (hbf.get_halo_bias(cosmo,(10 ** Mh) / h, aa)), dx = dMh, axis = 0) / ng + + bgE2 = integrate.romb(dndlog10m_emu(Mh) * Ng * \ + b2H17(hbf.get_halo_bias(cosmo,(10 ** Mh) / h, aa)), dx = dMh, axis = 0) / ng + bgL = bgE - 1 + + dndlog10m_func_mat = np.tile(dndlog10m_emu(Mh), (len(k_emu), 1)).transpose() # M_sol,Mpc^-3 + b1L_th_mat = np.tile(b1_th_tink -1, (len(k_emu), 1)).transpose() + Pgm = integrate.romb(dprof_dlogM * (nth_mat * np.array(Pth)), \ + dx = dMh, axis = 0) / ng + + dPhm_db_nfix = (26. / 21.) * (np.array(Pnth_hp) - np.array(Pnth_hm)) / \ + (2 * (np.log(Dp[ia]) - np.log(Dm[ia]))) # Mpc^3 + + dnP_hm_db_emu = nth_mat * (dPhm_db_nfix + b1L_th_mat * np.array(Pbin)) # Dless + + # stitching + k_switch = 0.08 # [h/Mpc] + kmin = 1e-2 # [h/Mpc] + dnP_gm_db = integrate.romb(dprof_dlogM * (dnP_hm_db_emu), dx = dMh, axis = 0) #Dless + + Pgm_growth = dnP_gm_db / ng - bgL * Pgm # Dless + + Pgm_d = -1. / 3. * np.gradient(np.log(Pgm)) / np.gradient(np.log(k_emu)) * Pgm #Dless + + dpklin = pk2dlin.eval_dlogpk_dlogk(k_use, aa, cosmo) + + Pgm_lin = bgE * pk2dlin.eval(k_use, aa, cosmo) + dPgm_db_lin = (47/21 + bgE2/bgE - bgE -1/3 * dpklin) * \ + bgE * pk2dlin.eval(k_use, aa, cosmo) + dPgm_db = dPgm_db_lin * np.exp(-k_emu/k_switch) + \ + (Pgm_growth + Pgm_d) * (1 - np.exp(-k_emu/k_switch)) + + Pgm = Pgm_lin * np.exp(-k_emu/k_switch) + \ + Pgm * (1 - np.exp(-k_emu/k_switch)) + + # use linear theory below kmin + dPgm_db[k_emu < kmin] = dPgm_db_lin[k_emu < kmin] + dpk12[ia, :] = dPgm_db + + Pgm[k_emu < kmin] = Pgm_lin[k_emu < kmin] + pk12[ia, :] = Pgm + + + if use_log: + if np.any(dpk12 <= 0): + warnings.warn( + "Some values were not positive. " + "Will not interpolate in log-space.", + category=CCLWarning) + use_log = False + else: + dpk12 = np.log(dpk12) + + pk2d = Pk2D(a_arr=a_arr, lk_arr=lk_arr, pk_arr=pk12, + extrap_order_lok=extrap_order_lok, + extrap_order_hik=extrap_order_hik, + cosmo=cosmo, is_logp=False) + + tk3d = Tk3D(a_arr=a_arr, lk_arr=lk_arr, + pk1_arr=dpk12, pk2_arr=dpk12, + extrap_order_lok=extrap_order_lok, + extrap_order_hik=extrap_order_hik, is_logt=use_log) + return tk3d, pk2d + + diff --git a/pyccl/halos/__init__.py b/pyccl/halos/__init__.py index 244966eef..16716b385 100644 --- a/pyccl/halos/__init__.py +++ b/pyccl/halos/__init__.py @@ -12,6 +12,8 @@ # Halo mass-concentration relations from .concentration import ( Concentration, + ConcentrationDiemer15_colossus, + ConcentrationDiemer15_ius, ConcentrationDiemer15, ConcentrationBhattacharya13, ConcentrationPrada12, @@ -34,6 +36,7 @@ MassFuncAngulo12, MassFuncDespali16, MassFuncBocquet16, + MassFuncDarkEmulator, mass_function_from_name, ) @@ -95,7 +98,7 @@ 'MassFunc', 'MassFuncPress74', 'MassFuncSheth99', 'MassFuncJenkins01', 'MassFuncTinker08', 'MassFuncTinker10', 'MassFuncWatson13', 'MassFuncAngulo12', 'MassFuncDespali16', 'MassFuncBocquet16', - 'MassFuncBocquet20', 'mass_function_from_name', + 'MassFuncBocquet20', 'MassFuncDarkEmulator', 'mass_function_from_name', 'HaloBias', 'HaloBiasSheth99', 'HaloBiasSheth01', 'HaloBiasTinker10', 'HaloBiasBhattacharya11', 'halo_bias_from_name', 'HaloProfile', 'HaloProfileGaussian', 'HaloProfilePowerLaw', diff --git a/pyccl/halos/concentration.py b/pyccl/halos/concentration.py index d095d460c..ab73a3478 100644 --- a/pyccl/halos/concentration.py +++ b/pyccl/halos/concentration.py @@ -2,11 +2,20 @@ from ..pyutils import check from ..background import growth_factor, growth_rate from .massdef import MassDef, mass2radius_lagrangian -from ..power import linear_matter_power, sigmaM +from ..power import linear_matter_power, sigmaM, sigma8 import numpy as np from scipy.optimize import brentq, root_scalar import functools +# Terasawa +from scipy import optimize +from colossus.cosmology import cosmology as colcosmology +from colossus.halo import concentration as colconcentration +from scipy.interpolate import InterpolatedUnivariateSpline as ius + + +# + class Concentration(object): """ This class enables the calculation of halo concentrations. @@ -18,13 +27,15 @@ class Concentration(object): """ name = 'default' - def __init__(self, mass_def=None): + def __init__(self, mass_def=None, mdef_other=None): if mass_def is not None: if self._check_mdef(mass_def): - raise ValueError( - f"Mass definition {mass_def.Delta}-{mass_def.rho_type} " - f"is not compatible with c(M) {self.name} configuration.") + if mdef_other is None: + raise ValueError( + f"Mass definition {mass_def.Delta}-{mass_def.rho_type} " + f"is not compatible with c(M) {self.name} configuration.") self.mdef = mass_def + self.mdef_other = mdef_other else: self._default_mdef() self._setup() @@ -74,10 +85,11 @@ def _get_consistent_mass(self, cosmo, M, a, mdef_other): float or array_like: mass according to this object's mass definition. """ - if mdef_other is not None: - M_use = mdef_other.translate_mass(cosmo, M, a, self.mdef) - else: - M_use = M +# if mdef_other is not None: +# M_use = mdef_other.translate_mass(cosmo, M, a, self.mdef) +# else: +# M_use = M + M_use = M return M_use def get_concentration(self, cosmo, M, a, mdef_other=None): @@ -119,7 +131,7 @@ def from_name(cls, name): raise ValueError(f"Concentration {name} not implemented.") -class ConcentrationDiemer15(Concentration): +class ConcentrationDiemer15_colossus(Concentration): """ Concentration-mass relation by Diemer & Kravtsov 2015 (arXiv:1407.4730). This parametrization is only valid for S.O. masses with Delta = 200-critical. @@ -133,7 +145,137 @@ class ConcentrationDiemer15(Concentration): name = 'Diemer15' def __init__(self, mdef=None): - super(ConcentrationDiemer15, self).__init__(mdef) + super(ConcentrationDiemer15_colossus, self).__init__(mdef) + + def _default_mdef(self): + self.mdef = MassDef(200, 'matter') + + def _setup(self): + self.kappa = 1.0 + self.phi_0 = 6.58 + self.phi_1 = 1.27 + self.eta_0 = 7.28 + self.eta_1 = 1.56 + self.alpha = 1.08 + self.beta = 1.77 + + def _check_mdef(self, mdef): + if isinstance(mdef.Delta, str): + return True + elif not ((int(mdef.Delta) == 200) and + (mdef.rho_type == 'matter')): + return True + return False + + def _concentration(self, cosmo, M, a):#='diemer15' + Oc0 = cosmo["Omega_c"] + Ob0 = cosmo["Omega_b"] + h = cosmo["h"] + n_s = cosmo["n_s"] + if cosmo["sigma8"] is None: + self.sigma8 = sigma8(cosmo) + else: + self.sigma8 = cosmo["sigma8"] + H0 = 100 * h + Om0 = Oc0 + Ob0 + Mh = M * h # Msol/h + Mh_int = np.logspace(12,17) + params = {'flat': True, 'H0': H0, 'Om0': Om0, + 'Ob0': Ob0, 'sigma8': self.sigma8, 'ns': n_s, 'persistence': ''} + colcosmo = colcosmology.setCosmology('myCosmo', params) + c = np.zeros(len(Mh_int)) + for i in range(len(Mh_int)): + c[i] = colconcentration.concentration(Mh_int[i], '200m', z=1./a -1, model="diemer15") + + c_func = ius(Mh_int,c) + c_out = c_func(Mh) + return c_out + + +class ConcentrationDiemer15_ius(Concentration): + """ Concentration-mass relation by Diemer & Kravtsov 2015 + (arXiv:1407.4730). This parametrization is only valid for + S.O. masses with Delta = 200-critical. + + Args: + mdef (:class:`~pyccl.halos.massdef.MassDef`): + a mass definition object that fixes + the mass definition used by this c(M) + parametrization. + """ + name = 'Diemer15' + + def __init__(self, mdef=None, mdef_other=None): + super(ConcentrationDiemer15_ius, self).__init__(mdef,mdef_other) + + def _default_mdef(self): + self.mdef = MassDef(200, 'critical') + + def _setup(self): + self.kappa = 1.0 + self.phi_0 = 6.58 + self.phi_1 = 1.27 + self.eta_0 = 7.28 + self.eta_1 = 1.56 + self.alpha = 1.08 + self.beta = 1.77 + + def _check_mdef(self, mdef): + if isinstance(mdef.Delta, str): + return True + elif not ((int(mdef.Delta) == 200) and + (mdef.rho_type == 'critical')): + return True + return False + + def _concentration(self, cosmo, M, a): + M_int = np.logspace(7,17) #np.atleast_1d(M) + + # Compute power spectrum slope + R = mass2radius_lagrangian(cosmo, M_int) + lk_R = np.log(2.0 * np.pi / R * self.kappa) + # Using central finite differences + lk_hi = lk_R + 0.005 + lk_lo = lk_R - 0.005 + dlpk = np.log(linear_matter_power(cosmo, np.exp(lk_hi), a) / + linear_matter_power(cosmo, np.exp(lk_lo), a)) + dlk = lk_hi - lk_lo + n = dlpk / dlk + + sig = sigmaM(cosmo, M_int, a) + delta_c = 1.68647 + nu = delta_c / sig + + floor = self.phi_0 + n * self.phi_1 + nu0 = self.eta_0 + n * self.eta_1 + c = 0.5 * floor * ((nu0 / nu)**self.alpha + + (nu / nu0)**self.beta) + #if np.ndim(M) == 0: + # c = c[0] + + if self.mdef_other is not None: + M_other = self.mdef.translate_mass(cosmo, M_int, a, self.mdef_other) # mdef into m_def_other (:obj:`MassDef`): another mass definition. + + c_func = ius(M_other,c) + c_out = c_func(M) + + return c_out + +class ConcentrationDiemer15(Concentration): + """ Concentration-mass relation by Diemer & Kravtsov 2015 + (arXiv:1407.4730). This parametrization is only valid for + S.O. masses with Delta = 200-critical. + + Args: + mdef (:class:`~pyccl.halos.massdef.MassDef`): + a mass definition object that fixes + the mass definition used by this c(M) + parametrization. + """ + name = 'Diemer15' + + def __init__(self, mdef=None, mdef_other=None): + super(ConcentrationDiemer15, self).__init__(mdef,mdef_other) def _default_mdef(self): self.mdef = MassDef(200, 'critical') @@ -182,7 +324,6 @@ def _concentration(self, cosmo, M, a): return c - class ConcentrationBhattacharya13(Concentration): """ Concentration-mass relation by Bhattacharya et al. 2013 (arXiv:1112.5479). This parametrization is only valid for diff --git a/pyccl/halos/halo_model.py b/pyccl/halos/halo_model.py index bc539b16e..ac2018225 100644 --- a/pyccl/halos/halo_model.py +++ b/pyccl/halos/halo_model.py @@ -1412,4 +1412,250 @@ def get_norm(normprof, prof, sf): pk1_arr=dpk12, pk2_arr=dpk34, extrap_order_lok=extrap_order_lok, extrap_order_hik=extrap_order_hik, is_logt=use_log) - return tk3d + return tk3d, dpk12 + +def halomod_Tk3D_SSC_debug(cosmo, hmc, + prof1, prof2=None, prof12_2pt=None, + prof3=None, prof4=None, prof34_2pt=None, + normprof1=False, normprof2=False, + normprof3=False, normprof4=False, + p_of_k_a=None, lk_arr=None, a_arr=None, + extrap_order_lok=1, extrap_order_hik=1, + use_log=False): + """ Returns a :class:`~pyccl.tk3d.Tk3D` object containing + the super-sample covariance trispectrum, given by the tensor + product of the power spectrum responses associated with the + two pairs of quantities being correlated. Each response is + calculated as: + + .. math:: + \\frac{\\partial P_{u,v}(k)}{\\partial\\delta_L} = + \\left(\\frac{68}{21}-\\frac{d\\log k^3P_L(k)}{d\\log k}\\right) + P_L(k)I^1_1(k,|u)I^1_1(k,|v)+I^1_2(k|u,v) - (b_{u} + b_{v}) + P_{u,v}(k) + + where the :math:`I^a_b` are defined in the documentation + of :meth:`~HMCalculator.I_1_1` and :meth:`~HMCalculator.I_1_2` and + :math:`b_{u}` and :math:`b_{v}` are the linear halo biases for quantities + :math:`u` and :math:`v`, respectively (zero if they are not clustering). + + Args: + cosmo (:class:`~pyccl.core.Cosmology`): a Cosmology object. + hmc (:class:`HMCalculator`): a halo model calculator. + prof1 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`u_1` above. + prof2 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`u_2` above. If `None`, + `prof1` will be used as `prof2`. + prof12_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): + a profile covariance object returning the the two-point + moment of `prof1` and `prof2`. If `None`, the default + second moment will be used, corresponding to the + products of the means of both profiles. + prof3 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`v_1` above. If `None`, + `prof1` will be used as `prof3`. + prof4 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`v_2` above. If `None`, + `prof3` will be used as `prof4`. + prof34_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): + same as `prof12_2pt` for `prof3` and `prof4`. + normprof1 (bool): if `True`, this integral will be + normalized by :math:`I^0_1(k\\rightarrow 0,a|u)` + (see :meth:`~HMCalculator.I_0_1`), where + :math:`u` is the profile represented by `prof1`. + normprof2 (bool): same as `normprof1` for `prof2`. + normprof3 (bool): same as `normprof1` for `prof3`. + normprof4 (bool): same as `normprof1` for `prof4`. + p_of_k_a (:class:`~pyccl.pk2d.Pk2D`): a `Pk2D` object to + be used as the linear matter power spectrum. If `None`, + the power spectrum stored within `cosmo` will be used. + a_arr (array): an array holding values of the scale factor + at which the trispectrum should be calculated for + interpolation. If `None`, the internal values used + by `cosmo` will be used. + lk_arr (array): an array holding values of the natural + logarithm of the wavenumber (in units of Mpc^-1) at + which the trispectrum should be calculated for + interpolation. If `None`, the internal values used + by `cosmo` will be used. + extrap_order_lok (int): extrapolation order to be used on + k-values below the minimum of the splines. See + :class:`~pyccl.tk3d.Tk3D`. + extrap_order_hik (int): extrapolation order to be used on + k-values above the maximum of the splines. See + :class:`~pyccl.tk3d.Tk3D`. + use_log (bool): if `True`, the trispectrum will be + interpolated in log-space (unless negative or + zero values are found). + + Returns: + :class:`~pyccl.tk3d.Tk3D`: SSC effective trispectrum. + """ + if lk_arr is None: + status = 0 + nk = lib.get_pk_spline_nk(cosmo.cosmo) + lk_arr, status = lib.get_pk_spline_lk(cosmo.cosmo, nk, status) + check(status, cosmo=cosmo) + if a_arr is None: + status = 0 + na = lib.get_pk_spline_na(cosmo.cosmo) + a_arr, status = lib.get_pk_spline_a(cosmo.cosmo, na, status) + check(status, cosmo=cosmo) + + k_use = np.exp(lk_arr) + + # Check inputs + if not isinstance(prof1, HaloProfile): + raise TypeError("prof1 must be of type `HaloProfile`") + if (prof2 is not None) and (not isinstance(prof2, HaloProfile)): + raise TypeError("prof2 must be of type `HaloProfile` or `None`") + if (prof3 is not None) and (not isinstance(prof3, HaloProfile)): + raise TypeError("prof3 must be of type `HaloProfile` or `None`") + if (prof4 is not None) and (not isinstance(prof4, HaloProfile)): + raise TypeError("prof4 must be of type `HaloProfile` or `None`") + if prof12_2pt is None: + prof12_2pt = Profile2pt() + elif not isinstance(prof12_2pt, Profile2pt): + raise TypeError("prof12_2pt must be of type " + "`Profile2pt` or `None`") + if (prof34_2pt is not None) and (not isinstance(prof34_2pt, Profile2pt)): + raise TypeError("prof34_2pt must be of type `Profile2pt` or `None`") + + # number counts profiles must be normalized + profs = {prof1: normprof1, prof2: normprof2, + prof3: normprof3, prof4: normprof4} + + for i, (profile, normalization) in enumerate(profs.items()): + if (profile is not None + and profile.is_number_counts + and not normalization): + raise ValueError( + f"normprof{i+1} must be True if prof{i+1} is number counts") + + if prof3 is None: + prof3_bak = prof1 + else: + prof3_bak = prof3 + if prof34_2pt is None: + prof34_2pt_bak = prof12_2pt + else: + prof34_2pt_bak = prof34_2pt + + # Power spectrum + if isinstance(p_of_k_a, Pk2D): + pk2d = p_of_k_a + elif (p_of_k_a is None) or (str(p_of_k_a) == 'linear'): + pk2d = cosmo.get_linear_power('delta_matter:delta_matter') + elif str(p_of_k_a) == 'nonlinear': + pk2d = cosmo.get_nonlin_power('delta_matter:delta_matter') + else: + raise TypeError("p_of_k_a must be `None`, \'linear\', " + "\'nonlinear\' or a `Pk2D` object") + + def get_norm(normprof, prof, sf): + if normprof: + return hmc.profile_norm(cosmo, sf, prof) + else: + return 1 + + na = len(a_arr) + nk = len(k_use) + dpk12 = np.zeros([na, nk]) + dpk34 = np.zeros([na, nk]) + for ia, aa in enumerate(a_arr): + # Compute profile normalizations + norm1 = get_norm(normprof1, prof1, aa) + i11_1 = hmc.I_1_1(cosmo, k_use, aa, prof1) + # Compute second profile normalization + if prof2 is None: + norm2 = norm1 + i11_2 = i11_1 + else: + norm2 = get_norm(normprof2, prof2, aa) + i11_2 = hmc.I_1_1(cosmo, k_use, aa, prof2) + if prof3 is None: + norm3 = norm1 + i11_3 = i11_1 + else: + norm3 = get_norm(normprof3, prof3, aa) + i11_3 = hmc.I_1_1(cosmo, k_use, aa, prof3) + if prof4 is None: + norm4 = norm3 + i11_4 = i11_3 + else: + norm4 = get_norm(normprof4, prof4, aa) + i11_4 = hmc.I_1_1(cosmo, k_use, aa, prof4) + + i12_12 = hmc.I_1_2(cosmo, k_use, aa, prof1, + prof12_2pt, prof2) + if (prof3 is None) and (prof4 is None) and (prof34_2pt is None): + i12_34 = i12_12 + else: + i12_34 = hmc.I_1_2(cosmo, k_use, aa, prof3_bak, + prof34_2pt_bak, prof4) + norm12 = norm1 * norm2 + norm34 = norm3 * norm4 + + pk = pk2d.eval(k_use, aa, cosmo) + dpk = pk2d.eval_dlogpk_dlogk(k_use, aa, cosmo) + # (47/21 - 1/3 dlogPk/dlogk) * I11 * I11 * Pk+I12 + dpk12[ia, :] = norm12*((2.2380952381-dpk/3)*i11_1*i11_2*pk+i12_12) + dpk34[ia, :] = norm34*((2.2380952381-dpk/3)*i11_3*i11_4*pk+i12_34) + + # Counter terms for clustering (i.e. - (bA + bB) * PAB + if prof1.is_number_counts or (prof2 is None or prof2.is_number_counts): + b1 = b2 = np.zeros_like(k_use) + i02_12 = hmc.I_0_2(cosmo, k_use, aa, prof1, prof12_2pt, prof2) + P_12 = norm12 * (pk * i11_1 * i11_2 + i02_12) + + if prof1.is_number_counts: + b1 = i11_1 * norm1 + + if prof2 is None: + b2 = b1 + elif prof2.is_number_counts: + b2 = i11_2 * norm2 + + dpk12[ia, :] -= (b1 + b2) * P_12 + + if prof3_bak.is_number_counts or \ + ((prof3_bak.is_number_counts and prof4 is None) or + (prof4 is not None) and prof4.is_number_counts): + b3 = b4 = np.zeros_like(k_use) + if (prof3 is None) and (prof4 is None) and (prof34_2pt is None): + i02_34 = i02_12 + else: + i02_34 = hmc.I_0_2(cosmo, k_use, aa, prof3_bak, prof34_2pt_bak, + prof4) + P_34 = norm34 * (pk * i11_3 * i11_4 + i02_34) + + if prof3 is None: + b3 = b1 + elif prof3.is_number_counts: + b3 = i11_3 * norm3 + + if prof4 is None: + b4 = b3 + elif prof4.is_number_counts: + b4 = i11_4 * norm4 + + dpk34[ia, :] -= (b3 + b4) * P_34 + + if use_log: + if np.any(dpk12 <= 0) or np.any(dpk34 <= 0): + warnings.warn( + "Some values were not positive. " + "Will not interpolate in log-space.", + category=CCLWarning) + use_log = False + else: + dpk12 = np.log(dpk12) + dpk34 = np.log(dpk34) + + tk3d = Tk3D(a_arr=a_arr, lk_arr=lk_arr, + pk1_arr=dpk12, pk2_arr=dpk34, + extrap_order_lok=extrap_order_lok, + extrap_order_hik=extrap_order_hik, is_logt=use_log) + return tk3d, dpk12 + diff --git a/pyccl/halos/hmfunc.py b/pyccl/halos/hmfunc.py index b9441053e..4e738321e 100644 --- a/pyccl/halos/hmfunc.py +++ b/pyccl/halos/hmfunc.py @@ -6,6 +6,7 @@ import numpy as np import functools +from dark_emulator import darkemu class MassFunc(object): """ This class enables the calculation of halo mass functions. @@ -766,7 +767,69 @@ def _get_fsigma(self, cosmo, sigM, a, lnM): return self.A * ((self.a / sigM)**self.b + 1.) * \ np.exp(-self.c / sigM**2) +class MassFuncDarkEmulator(MassFunc): + """ Implements mass function described in 2019ApJ...884..29P. + This parametrization is only valid for '200m' masses. + + Args: + cosmo (:class:`~pyccl.core.Cosmology`): A Cosmology object. + mass_def (:class:`~pyccl.halos.massdef.MassDef`): + a mass definition object. + this parametrization accepts FoF masses only. + If `None`, FoF masses will be used. + mass_def_strict (bool): if False, consistency of the mass + definition will be ignored. + """ + name = 'DarkEmulator' + + def __init__(self, cosmo, mass_def=None, mass_def_strict=True): + super(MassFuncDarkEmulator, self).__init__(cosmo, + mass_def, + mass_def_strict) + def _default_mdef(self): + self.mdef = MassDef200m() + + def _setup(self, cosmo): + Omega_c = cosmo["Omega_c"] + Omega_b = cosmo["Omega_b"] + h = cosmo["h"] + n_s = cosmo["n_s"] + A_s = cosmo["A_s"] + + omega_c = Omega_c * h ** 2 + omega_b = Omega_b * h ** 2 + omega_nu = 0.00064 + Omega_L = 1 - ((omega_c + omega_b + omega_nu) / h **2) + + emu = darkemu.de_interface.base_class() + + #Parameters cparam (numpy array) : Cosmological parameters (𝜔𝑏, 𝜔𝑐, Ω𝑑𝑒, ln(10^10 𝐴𝑠), 𝑛𝑠, 𝑤) + cparam = np.array([omega_b,omega_c,Omega_L,np.log(10 ** 10 * A_s),n_s,-1.]) + emu.set_cosmology(cparam) + self.emu = emu + + def _check_mdef_strict(self, mdef): + if isinstance(mdef.Delta, str): + return True + elif int(mdef.Delta) == 200: + if (mdef.rho_type != 'matter'): + return True + return False + + def _get_fsigma(self, cosmo, sigM, a, lnM): + + alpha = 10**(-(0.75/(np.log10(200/75.)))**1.2) + + self.A = self.emu.get_A_HMF(1/a - 1) + self.a = self.emu.get_a_HMF(1/a - 1) + self.b = 2.57 * a ** alpha + self.c = 1.19 + + return self.A * ((self.b / sigM)**self.a + 1.) * \ + np.exp(-self.c / sigM**2) + @functools.wraps(MassFunc.from_name) def mass_function_from_name(name): return MassFunc.from_name(name) + diff --git a/pyccl/tk3d.py b/pyccl/tk3d.py index 0379af5c0..6c4a94eb2 100644 --- a/pyccl/tk3d.py +++ b/pyccl/tk3d.py @@ -3,10 +3,19 @@ from .pyutils import check, _get_spline2d_arrays, _get_spline3d_arrays import numpy as np -from . import core +from . import core import warnings from .errors import CCLWarning +#for P_gm +import sys +import os +darkemu_path = os.path.realpath(os.path.join(os.getcwd(),'/Users/terasawaryo_other/OneDrive - The University of Tokyo/Master/code/dark_emulator_public')) +sys.path.insert(0,darkemu_path) +from dark_emulator import darkemu +from dark_emulator import model_hod + + class Tk3D(object): """A container for \"isotropized\" connected trispectra relevant for @@ -218,29 +227,28 @@ def get_spline_arrays(self): return a_arr, lk_arr1, lk_arr2, out - -def Tk3D_SSC_Terasawa22(cosmo, deltah=0.02, - lk_arr=None, a_arr=None, - extrap_order_lok=1, extrap_order_hik=1, - use_log=False): +def Tk3D_SSC_Terasawa22(cosmo,deltah=0.02, + extra_parameters={"camb": {"halofit_version": "original",}}, + lk_arr=None, a_arr=None, + extrap_order_lok=1, extrap_order_hik=1, + use_log=False): """ Returns a :class:`~pyccl.tk3d.Tk3D` object containing the super-sample covariance trispectrum, given by the tensor product of the power spectrum responses associated with the - two pairs of quantities being correlated. Currently this - function only applicable to matter power spectrum in flat - cosmology. Each response is calculated using the method + two pairs of quantities being correlated. Currently this + function only applicable to matter power spectrum in flat + cosmology. Each response is calculated using the method developed in Terasawa et al. 2022 (arXiv:2205.10339v2) as: .. math:: \\frac{\\partial P_{mm}(k)}{\\partial\\delta_L} = - \\left(1 + \\frac{26}{21}T_{h}(k) - -\\frac{1}{3}\\frac{d\\log P_{mm}(k)}{d\\log k}\\right) + \\left(1 + \\frac{26}{21}T_{h}(k) -\\frac{1}{3}\\frac{d\\log P_{mm}(k)}{d\\log k}\\right) P_{mm}(k), where the :math:`T_{h}(k)` is the normalized growth response to - the Hubble parameter defined as + the Hubble parameter defined as :math:`T_{h}(k) = \\frac{d\\log P_{mm}(k)}{dh}/(2\\frac{d\\log D}{dh})`. - + Args: cosmo (:class:`~pyccl.core.Cosmology`): a Cosmology object. deltah (float): the variation of h to compute T_{h}(k) by @@ -286,76 +294,381 @@ def Tk3D_SSC_Terasawa22(cosmo, deltah=0.02, h = cosmo["h"] n_s = cosmo["n_s"] A_s = cosmo["A_s"] + + #extra_parameters = {"camb": {"halofit_version": "original",}} + + #set h-modified cosmology to take finite differencing + hp = h + deltah + Omega_c_p = np.power((h/hp),2) * Omega_c #\Omega_c h^2 is fixed + Omega_b_p = np.power((h/hp),2) * Omega_b #\Omega_b h^2 is fixed + + hm = h - deltah + Omega_c_m = np.power((h/hm),2) * Omega_c #\Omega_c h^2 is fixed + Omega_b_m = np.power((h/hm),2) * Omega_b #\Omega_b h^2 is fixed + + cosmo_hp = core.Cosmology(Omega_c=Omega_c_p,Omega_b=Omega_b_p, + h=hp, n_s=n_s, A_s=A_s, + transfer_function="boltzmann_camb", + matter_power_spectrum="camb", + extra_parameters=extra_parameters) + + cosmo_hm = core.Cosmology(Omega_c=Omega_c_m,Omega_b=Omega_b_m, + h=hm, n_s=n_s, A_s=A_s, + transfer_function="boltzmann_camb", + matter_power_spectrum="camb", + extra_parameters=extra_parameters) + + # Growth factor + Dp = cosmo_hp.growth_factor_unnorm(a_arr) + Dm = cosmo_hm.growth_factor_unnorm(a_arr) + + # Power spectrum + cosmo.compute_linear_power() + cosmo_hp.compute_linear_power() + cosmo_hm.compute_linear_power() + + pk2dlin = cosmo.get_linear_power('delta_matter:delta_matter') + + pk2d = cosmo.get_nonlin_power('delta_matter:delta_matter') + pk2d_hp = cosmo_hp.get_nonlin_power('delta_matter:delta_matter') + pk2d_hm = cosmo_hm.get_nonlin_power('delta_matter:delta_matter') + + na = len(a_arr) + nk = len(k_use) + dpk12 = np.zeros([na,nk]) + dpk = np.zeros(nk) + T_h = np.zeros(nk) + + kmin = 1e-2 + for ia, aa in enumerate(a_arr): + + pk = pk2d.eval(k_use, aa, cosmo) + pk_hp_kh = pk2d_hp.eval(k_use, aa, cosmo_hp) + pk_hm_kh = pk2d_hm.eval(k_use, aa, cosmo_hm) + pk_hp = pk2d_hp.eval(k_use, aa, cosmo_hp) + pk_hm = pk2d_hm.eval(k_use, aa, cosmo_hm) + + dpknl = pk2d.eval_dlogpk_dlogk(k_use, aa, cosmo) + dpklin = pk2dlin.eval_dlogpk_dlogk(k_use, aa, cosmo) + + # use linear theory below kmin + T_h[k_use<=kmin] = 1 + + T_h[k_use>kmin] = (np.log(pk_hp[k_use>kmin])-np.log(pk_hm[k_use>kmin]))/(2*(np.log(Dp[ia])-np.log(Dm[ia]))) # (hp-hm) term is cancelled out + + dpk[k_use<=kmin] = dpklin[k_use<=kmin] + dpk[k_use>kmin] = dpknl[k_use>kmin] + + dpk12[ia, :] = pk * (1. + (26./21.)*T_h -dpk/3.) + + if use_log: + if np.any(dpk12 <= 0): + warnings.warn( + "Some values were not positive. " + "Will not interpolate in log-space.", + category=CCLWarning) + use_log = False + else: + dpk12 = np.log(dpk12) + + tk3d = Tk3D(a_arr=a_arr, lk_arr=lk_arr, + pk1_arr=dpk12, pk2_arr=dpk12, + extrap_order_lok=extrap_order_lok, + extrap_order_hik=extrap_order_hik, is_logt=use_log) + return tk3d - extra_parameters = {"camb": {"halofit_version": "original", }} -# set h-modified cosmology to take finite differencing - hp = h + deltah - Omega_c_p = np.power((h/hp), 2) * Omega_c # \Omega_c h^2 is fixed - Omega_b_p = np.power((h/hp), 2) * Omega_b # \Omega_b h^2 is fixed +def pkarr_SSC_Terasawa22(cosmo,deltah=0.02, + extra_parameters={"camb": {"halofit_version": "original",}}, + lk_arr=None, a_arr=None, + extrap_order_lok=1, extrap_order_hik=1, + use_log=False): + """ Returns a :class:`~pyccl.tk3d.Tk3D` object containing + the super-sample covariance trispectrum, given by the tensor + product of the power spectrum responses associated with the + two pairs of quantities being correlated. Currently this + function only applicable to matter power spectrum in flat + cosmology. Each response is calculated using the method + developed in Terasawa et al. 2022 (arXiv:2205.10339v2) as: + + .. math:: + \\frac{\\partial P_{mm}(k)}{\\partial\\delta_L} = + \\left(1 + \\frac{26}{21}T_{h}(k) -\\frac{1}{3}\\frac{d\\log P_{mm}(k)}{d\\log k}\\right) + P_{mm}(k), + + where the :math:`T_{h}(k)` is the normalized growth response to + the Hubble parameter defined as + :math:`T_{h}(k) = \\frac{d\\log P_{mm}(k)}{dh}/(2\\frac{d\\log D}{dh})`. + + Args: + cosmo (:class:`~pyccl.core.Cosmology`): a Cosmology object. + deltah (float): the variation of h to compute T_{h}(k) by + the two-sided numerical derivative method. + a_arr (array): an array holding values of the scale factor + at which the trispectrum should be calculated for + interpolation. If `None`, the internal values used + by `cosmo` will be used. + lk_arr (array): an array holding values of the natural + logarithm of the wavenumber (in units of Mpc^-1) at + which the trispectrum should be calculated for + interpolation. If `None`, the internal values used + by `cosmo` will be used. + extrap_order_lok (int): extrapolation order to be used on + k-values below the minimum of the splines. See + :class:`~pyccl.tk3d.Tk3D`. + extrap_order_hik (int): extrapolation order to be used on + k-values above the maximum of the splines. See + :class:`~pyccl.tk3d.Tk3D`. + use_log (bool): if `True`, the trispectrum will be + interpolated in log-space (unless negative or + zero values are found). - hm = h - deltah - Omega_c_m = np.power((h/hm), 2) * Omega_c # \Omega_c h^2 is fixed - Omega_b_m = np.power((h/hm), 2) * Omega_b # \Omega_b h^2 is fixed + Returns: + :class:`~pyccl.tk3d.Tk3D`: SSC effective trispectrum. + """ - cosmo_hp = core.Cosmology(Omega_c=Omega_c_p, Omega_b=Omega_b_p, - h=hp, n_s=n_s, A_s=A_s, - transfer_function="boltzmann_camb", - matter_power_spectrum="camb", - extra_parameters=extra_parameters) + if lk_arr is None: + status = 0 + nk = lib.get_pk_spline_nk(cosmo.cosmo) + lk_arr, status = lib.get_pk_spline_lk(cosmo.cosmo, nk, status) + check(status, cosmo=cosmo) + if a_arr is None: + status = 0 + na = lib.get_pk_spline_na(cosmo.cosmo) + a_arr, status = lib.get_pk_spline_a(cosmo.cosmo, na, status) + check(status, cosmo=cosmo) - cosmo_hm = core.Cosmology(Omega_c=Omega_c_m, Omega_b=Omega_b_m, - h=hm, n_s=n_s, A_s=A_s, - transfer_function="boltzmann_camb", - matter_power_spectrum="camb", - extra_parameters=extra_parameters) + k_use = np.exp(lk_arr) + Omega_c = cosmo["Omega_c"] + Omega_b = cosmo["Omega_b"] + h = cosmo["h"] + n_s = cosmo["n_s"] + A_s = cosmo["A_s"] + + #extra_parameters = {"camb": {"halofit_version": "original",}} + + #set h-modified cosmology to take finite differencing + hp = h + deltah + Omega_c_p = np.power((h/hp),2) * Omega_c #\Omega_c h^2 is fixed + Omega_b_p = np.power((h/hp),2) * Omega_b #\Omega_b h^2 is fixed + + hm = h - deltah + Omega_c_m = np.power((h/hm),2) * Omega_c #\Omega_c h^2 is fixed + Omega_b_m = np.power((h/hm),2) * Omega_b #\Omega_b h^2 is fixed + + cosmo_hp = core.Cosmology(Omega_c=Omega_c_p,Omega_b=Omega_b_p, + h=hp, n_s=n_s, A_s=A_s, + transfer_function="boltzmann_camb", + matter_power_spectrum="camb", + extra_parameters=extra_parameters) + + cosmo_hm = core.Cosmology(Omega_c=Omega_c_m,Omega_b=Omega_b_m, + h=hm, n_s=n_s, A_s=A_s, + transfer_function="boltzmann_camb", + matter_power_spectrum="camb", + extra_parameters=extra_parameters) + # Growth factor Dp = cosmo_hp.growth_factor_unnorm(a_arr) Dm = cosmo_hm.growth_factor_unnorm(a_arr) - + # Power spectrum cosmo.compute_linear_power() cosmo_hp.compute_linear_power() cosmo_hm.compute_linear_power() - + pk2dlin = cosmo.get_linear_power('delta_matter:delta_matter') pk2d = cosmo.get_nonlin_power('delta_matter:delta_matter') pk2d_hp = cosmo_hp.get_nonlin_power('delta_matter:delta_matter') pk2d_hm = cosmo_hm.get_nonlin_power('delta_matter:delta_matter') - + na = len(a_arr) nk = len(k_use) - dpk12 = np.zeros([na, nk]) + dpk12 = np.zeros([na,nk]) dpk = np.zeros(nk) T_h = np.zeros(nk) - + kmin = 1e-2 for ia, aa in enumerate(a_arr): - + pk = pk2d.eval(k_use, aa, cosmo) + pk_hp_kh = pk2d_hp.eval(k_use, aa, cosmo_hp) + pk_hm_kh = pk2d_hm.eval(k_use, aa, cosmo_hm) pk_hp = pk2d_hp.eval(k_use, aa, cosmo_hp) pk_hm = pk2d_hm.eval(k_use, aa, cosmo_hm) + + dpknl = pk2d.eval_dlogpk_dlogk(k_use, aa, cosmo) + dpklin = pk2dlin.eval_dlogpk_dlogk(k_use, aa, cosmo) + + # use linear theory below kmin + T_h[k_use<=kmin] = 1 + + T_h[k_use>kmin] = (np.log(pk_hp[k_use>kmin])-np.log(pk_hm[k_use>kmin]))/(2*(np.log(Dp[ia])-np.log(Dm[ia]))) # (hp-hm) term is cancelled out + + dpk[k_use<=kmin] = dpklin[k_use<=kmin] + dpk[k_use>kmin] = dpknl[k_use>kmin] + + dpk12[ia, :] = pk * (1. + (26./21.)*T_h -dpk/3.) + + if use_log: + if np.any(dpk12 <= 0): + warnings.warn( + "Some values were not positive. " + "Will not interpolate in log-space.", + category=CCLWarning) + use_log = False + else: + dpk12 = np.log(dpk12) + + return dpk12 + + +def Terasawa_Tk3D_SSC_linear_bias(cosmo, bias1=1, bias2=1, bias3=1, + bias4=1, + is_number_counts1=False, + is_number_counts2=False, + is_number_counts3=False, + is_number_counts4=False, + p_of_k_a=None, lk_arr=None, + a_arr=None, extrap_order_lok=1, + extrap_order_hik=1, use_log=False): + """ Returns a :class:`~pyccl.tk3d.Tk3D` object containing + the super-sample covariance trispectrum, given by the tensor + product of the power spectrum responses associated with the + two pairs of quantities being correlated. Each response is + calculated as: - dpknl = pk2d.eval_dlogpk_dlogk(k_use, aa, cosmo) - dpklin = pk2dlin.eval_dlogpk_dlogk(k_use, aa, cosmo) + .. math:: + \\frac{\\partial P_{u,v}(k)}{\\partial\\delta_L} = b_u b_v \\left( + \\left(\\frac{68}{21}-\\frac{d\\log k^3P_L(k)}{d\\log k}\\right) + P_L(k)+I^1_2(k|u,v) - (b_{u} + b_{v}) P_{u,v}(k) \\right) - # use linear theory below kmin - T_h[k_use <= kmin] = 1 + where the :math:`I^1_2` is defined in the documentation + :meth:`~HMCalculator.I_1_2` and :math:`b_{}` and :math:`b_{vv}` are the + linear halo biases for quantities :math:`u` and :math:`v`, respectively + (zero if they are not clustering). + + Args: + cosmo (:class:`~pyccl.core.Cosmology`): a Cosmology object. + hmc (:class:`HMCalculator`): a halo model calculator. + prof (:class:`~pyccl.halos.profiles.HaloProfile`): halo NFW + profile. + bias1 (float or array): linear galaxy bias for quantity 1. If an array, + it has to have the shape of `a_arr`. + bias2 (float or array): linear galaxy bias for quantity 2. + bias3 (float or array): linear galaxy bias for quantity 3. + bias4 (float or array): linear galaxy bias for quantity 4. + is_number_counts1 (bool): If True, quantity 1 will be considered + number counts and the clustering counter terms computed. Default False. + is_number_counts2 (bool): as is_number_counts1 but for quantity 2. + is_number_counts3 (bool): as is_number_counts1 but for quantity 3. + is_number_counts4 (bool): as is_number_counts1 but for quantity 4. + p_of_k_a (:class:`~pyccl.pk2d.Pk2D`): a `Pk2D` object to + be used as the linear matter power spectrum. If `None`, + the power spectrum stored within `cosmo` will be used. + a_arr (array): an array holding values of the scale factor + at which the trispectrum should be calculated for + interpolation. If `None`, the internal values used + by `cosmo` will be used. + lk_arr (array): an array holding values of the natural + logarithm of the wavenumber (in units of Mpc^-1) at + which the trispectrum should be calculated for + interpolation. If `None`, the internal values used + by `cosmo` will be used. + extrap_order_lok (int): extrapolation order to be used on + k-values below the minimum of the splines. See + :class:`~pyccl.tk3d.Tk3D`. + extrap_order_hik (int): extrapolation order to be used on + k-values above the maximum of the splines. See + :class:`~pyccl.tk3d.Tk3D`. + use_log (bool): if `True`, the trispectrum will be + interpolated in log-space (unless negative or + zero values are found). + + Returns: + :class:`~pyccl.tk3d.Tk3D`: SSC effective trispectrum. + """ + if lk_arr is None: + status = 0 + nk = lib.get_pk_spline_nk(cosmo.cosmo) + lk_arr, status = lib.get_pk_spline_lk(cosmo.cosmo, nk, status) + check(status, cosmo=cosmo) + if a_arr is None: + status = 0 + na = lib.get_pk_spline_na(cosmo.cosmo) + a_arr, status = lib.get_pk_spline_a(cosmo.cosmo, na, status) + check(status, cosmo=cosmo) - T_h[k_use > kmin] = (np.log(pk_hp[k_use > kmin]) - - np.log(pk_hm[k_use > kmin])) / \ - (2 * (np.log(Dp[ia]) - np.log(Dm[ia]))) - # (hp-hm) term is cancelled out + # Make sure biases are of the form number of a x number of k + ones = np.ones_like(a_arr) + bias1 *= ones + bias2 *= ones + bias3 *= ones + bias4 *= ones + + k_use = np.exp(lk_arr) - dpk[k_use <= kmin] = dpklin[k_use <= kmin] - dpk[k_use > kmin] = dpknl[k_use > kmin] + # Power spectrum + #if isinstance(p_of_k_a, Pk2D): + # pk2d = p_of_k_a +# elif (p_of_k_a is None) or (str(p_of_k_a) == 'linear'): +# pk2d = cosmo.get_linear_power('delta_matter:delta_matter') +# elif str(p_of_k_a) == 'nonlinear': +# pk2d = cosmo.get_nonlin_power('delta_matter:delta_matter') + #else: + cosmo.compute_linear_power() + + pk2d = cosmo.get_nonlin_power('delta_matter:delta_matter') + + #raise TypeError("p_of_k_a must be `None`, \'linear\', " + # "\'nonlinear\' or a `Pk2D` object") - dpk12[ia, :] = pk * (1. + (26. / 21.) * T_h - dpk / 3.) + na = len(a_arr) + nk = len(k_use) + dpk12 = np.zeros([na, nk]) + dpk34 = np.zeros([na, nk]) + + dpk = pkarr_SSC_Terasawa22(cosmo,deltah=0.02, + extra_parameters={"camb": {"halofit_version": "original",}}, + lk_arr=lk_arr, a_arr=a_arr, + extrap_order_lok=extrap_order_lok, extrap_order_hik=extrap_order_hik, + use_log=False) + + for ia, aa in enumerate(a_arr): + + pk = pk2d.eval(k_use, aa, cosmo) + + # ~ [(47/21 - 1/3 dlogPk/dlogk) * Pk+I12] + #dpk12[ia] = ((2.2380952381-dpk/3)*pk + i12) + dpk12[ia] = dpk[ia] + dpk34[ia] = dpk12[ia].copy() # Avoid surprises + + # Counter terms for clustering (i.e. - (bA + bB) * PAB + if is_number_counts1 or is_number_counts2 or is_number_counts3 or \ + is_number_counts4: + b1 = b2 = b3 = b4 = 0 + + P_12 = P_34 = pk #+ i02 + + if is_number_counts1: + b1 = bias1[ia] + if is_number_counts2: + b2 = bias2[ia] + if is_number_counts3: + b3 = bias3[ia] + if is_number_counts4: + b4 = bias4[ia] + + dpk12[ia, :] -= (b1 + b2) * P_12 + dpk34[ia, :] -= (b3 + b4) * P_34 + + dpk12[ia] *= bias1[ia] * bias2[ia] + dpk34[ia] *= bias3[ia] * bias4[ia] if use_log: - if np.any(dpk12 <= 0): + if np.any(dpk12 <= 0) or np.any(dpk34 <= 0): warnings.warn( "Some values were not positive. " "Will not interpolate in log-space.", @@ -363,9 +676,10 @@ def Tk3D_SSC_Terasawa22(cosmo, deltah=0.02, use_log = False else: dpk12 = np.log(dpk12) + dpk34 = np.log(dpk34) tk3d = Tk3D(a_arr=a_arr, lk_arr=lk_arr, - pk1_arr=dpk12, pk2_arr=dpk12, + pk1_arr=dpk12, pk2_arr=dpk34, extrap_order_lok=extrap_order_lok, extrap_order_hik=extrap_order_hik, is_logt=use_log) return tk3d From 4e64fc26402acb1078af614ac8caf18ada3396c0 Mon Sep 17 00:00:00 2001 From: RyoTerasawa Date: Mon, 1 May 2023 07:01:02 +0900 Subject: [PATCH 16/17] update darkemulator.py and related files --- pyccl/__init__.py | 1 + pyccl/core.py | 42 + pyccl/darkemulator.py | 1892 +++++++++++++++++++++++++++++++--- pyccl/halos/__init__.py | 34 +- pyccl/halos/concentration.py | 136 ++- pyccl/halos/halo_model.py | 1851 +++++++++++++++++++++++++-------- pyccl/halos/hmfunc.py | 46 +- pyccl/halos/profiles.py | 175 +++- pyccl/halos/profiles_2pt.py | 29 +- 9 files changed, 3576 insertions(+), 630 deletions(-) mode change 100644 => 100755 pyccl/darkemulator.py diff --git a/pyccl/__init__.py b/pyccl/__init__.py index ff5c2dc22..306c4a956 100644 --- a/pyccl/__init__.py +++ b/pyccl/__init__.py @@ -70,6 +70,7 @@ # Generalized connected trispectra from .tk3d import Tk3D +from .tk3d import Tk3D_SSC_Terasawa22 #RT: do we need it for test? # Power spectrum calculations, sigma8 and kNL from .power import ( diff --git a/pyccl/core.py b/pyccl/core.py index e833264de..de594d786 100644 --- a/pyccl/core.py +++ b/pyccl/core.py @@ -254,6 +254,8 @@ def __init__( self._pk_lin = {} self._pk_nl = {} + self._cM = {} + self.has_cM = False def _build_cosmo(self): """Assemble all of the input data into a valid ccl_cosmology object.""" @@ -841,7 +843,47 @@ def compute_linear_power(self): # Assign self._pk_lin['delta_matter:delta_matter'] = pk + + def compute_ConcentrationDiemer15_200m(self): + if self.has_cM: + return + from .power import sigma8 + from scipy.interpolate import RectBivariateSpline as rbs + from colossus.cosmology import cosmology as colcosmology + from colossus.halo import concentration as colconcentration + from colossus.halo.mass_defs import changeMassDefinition as colchangeMassDefinition + + h = self["h"] + H0 = 100 * h + Om0 = self["Omega_c"] + self["Omega_b"] + + if np.isnan(self["sigma8"]): + sigma8(self) + + Mh = np.logspace(8,17,100) # Msol/h + a = np.linspace(1/(1+1.5),1.,10) + params = {'flat': True, 'H0': H0, 'Om0': Om0, + 'Ob0': self["Omega_b"], 'sigma8': self["sigma8"], 'ns': self["n_s"], 'persistence': ''} + colcosmo = colcosmology.setCosmology('myCosmo', params) + + c200m = np.zeros((len(a),len(Mh))) + c_vir = np.zeros((len(a),len(Mh))) + + for i in range(len(a)): + c200m[i] = colconcentration.concentration(Mh, '200m', 1/a[i] -1, model="diemer15") + M_vir, R_vir, c_vir[i] = colchangeMassDefinition(Mh, c200m[i], 1/a[i] -1, mdef_in='200m', mdef_out='vir', profile='nfw') + + c200m_rbs = rbs(a,np.log10(Mh/h),c200m) + c_vir_rbs = rbs(a,np.log10(Mh/h),c_vir) + + self._cM['200m'] = c200m_rbs + self._cM['vir'] = c_vir_rbs + self.has_cM = True + + def get_ConcentrationDiemer15_200m(self, name='200m'): + return self._cM[name] + def _get_halo_model_nonlin_power(self): from . import halos as hal mdef = hal.MassDef('vir', 'matter') diff --git a/pyccl/darkemulator.py b/pyccl/darkemulator.py old mode 100644 new mode 100755 index ea9201a60..4e81d53bd --- a/pyccl/darkemulator.py +++ b/pyccl/darkemulator.py @@ -10,9 +10,10 @@ from .tk3d import Tk3D from dark_emulator import darkemu -from dark_emulator import model_hod +#from dark_emulator import model_hod from scipy import integrate from scipy.interpolate import InterpolatedUnivariateSpline as ius +from scipy.special import sici from . import halos def darkemu_Tk3D_SSC(cosmo, prof1, deltah=0.02, @@ -160,7 +161,11 @@ def darkemu_Tk3D_SSC(cosmo, prof1, deltah=0.02, print("use halo model for z={:.2f}>1.48".format(z)) else: # mass function - dndlog10m_emu = ius(Mfor_hmf ,hmf_DE.get_mass_function(cosmo, 10**Mfor_hmf ,aa)) # Mpc^-3 #ius(np.log10(Mlist), dndm_emu * Mlist * np.log(10) * h ** 3) + Mlist, dndm_emu = emu.get_dndm(z) + dndlog10m_emu = ius(np.log10(Mlist/h), dndm_emu * Mlist * np.log(10) * h ** 3) + + # mass function + #dndlog10m_emu = ius(Mfor_hmf ,hmf_DE.get_mass_function(cosmo, 10**Mfor_hmf ,aa)) # Mpc^-3 #ius(np.log10(Mlist), dndm_emu * Mlist * np.log(10) * h ** 3) if Mh[0] < 12.0: # Msol/h Pth[0] = emu.get_phm_massthreshold(k_emu,10**12,z) * (1/h)**3 @@ -203,9 +208,9 @@ def darkemu_Tk3D_SSC(cosmo, prof1, deltah=0.02, Mps = M + dlogM Mms = M - dlogM - prof_Mp = prof1.fourier(cosmo, k_use, (10 ** Mps), aa, mass_def) # def _fourier(self, cosmo, k, M, a, mass_def): - prof_Mm = prof1.fourier(cosmo, k_use, (10 ** Mms), aa, mass_def) # def _fourier(self, cosmo, k, M, a, mass_def): - prof = prof1.fourier(cosmo, k_use,(10 ** M), aa, mass_def) # def _fourier(self, cosmo, k, M, a, mass_def): + prof_Mp = prof1.fourier(cosmo, k_use, (10 ** Mps), aa, mass_def) + prof_Mm = prof1.fourier(cosmo, k_use, (10 ** Mms), aa, mass_def) + prof = prof1.fourier(cosmo, k_use,(10 ** M), aa, mass_def) dprof_dlogM = (prof_Mp - prof_Mm) / (2 * dlogM)#*np.log(10)) nth_mat = np.tile(nths, (len(k_use), 1)).transpose() @@ -276,12 +281,14 @@ def darkemu_Tk3D_SSC(cosmo, prof1, deltah=0.02, extrap_order_hik=extrap_order_hik, is_logt=use_log) return tk3d, pk2d -def darkemu_pkarr_SSC(cosmo, prof1, deltah=0.02, +def darkemu_pkarr_SSC(cosmo, prof_hod, deltah=0.02, log10Mh_min=12.0,log10Mh_max=15.9, - normprof1=False, kmax=2.0, + log10Mh_pivot=12.5, + normprof_hod=False, k_max=2.0, lk_arr=None, a_arr=None, extrap_order_lok=1, extrap_order_hik=1, - use_log=False, highk_HM=True): + use_log=False, highk_HM=True, surface=False, + highz_HMresp=True): """ Returns a 2D array with shape `[na,nk]` describing the first function :math:`f_1(k,a)` that makes up a factorizable trispectrum :math:`T(k_1,k_2,a)=f_1(k_1,a)f_2(k_2,a)` The response is @@ -301,7 +308,7 @@ def darkemu_pkarr_SSC(cosmo, prof1, deltah=0.02, Args: cosmo (:class:`~pyccl.core.Cosmology`): a Cosmology object. hmc (:class:`HMCalculator`): a halo model calculator. - prof1 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + prof_hod (:class:`~pyccl.halos.profiles.HaloProfile`): halo profile (corresponding to :math:`u_1` above. p_of_k_a (:class:`~pyccl.pk2d.Pk2D`): a `Pk2D` object to @@ -343,12 +350,12 @@ def darkemu_pkarr_SSC(cosmo, prof1, deltah=0.02, k_use = np.exp(lk_arr) # Check inputs - if not isinstance(prof1, halos.profiles.HaloProfile): - raise TypeError("prof1 must be of type `HaloProfile`") + if not isinstance(prof_hod, halos.profiles.HaloProfile): + raise TypeError("prof_hod must be of type `HaloProfile`") h = cosmo["h"] k_emu = k_use / h # [h/Mpc] - Omega_m = cosmo["Omega_b"] + cosmo["Omega_c"] + 0.00064/(h**2) + #Omega_m = cosmo["Omega_b"] + cosmo["Omega_c"] + 0.00064/(h**2) cosmo.compute_linear_power() pk2dlin = cosmo.get_linear_power('delta_matter:delta_matter') @@ -358,7 +365,7 @@ def darkemu_pkarr_SSC(cosmo, prof1, deltah=0.02, # set h-modified cosmology to take finite differencing hp = h + deltah hm = h - deltah - cosmo_hp, cosmo_hm = set_hmodified_cosmology(cosmo,deltah) + cosmo_hp, cosmo_hm = set_hmodified_cosmology(cosmo, deltah) emu_p = darkemu_set_cosmology(cosmo_hp) emu_m = darkemu_set_cosmology(cosmo_hm) @@ -372,50 +379,61 @@ def darkemu_pkarr_SSC(cosmo, prof1, deltah=0.02, dpk12 = np.zeros([na, nk]) pk12 = np.zeros([na, nk]) #dpk34 = np.zeros([na, nk]) - Mfor_hmf = np.linspace(8,17,200) - Mh = np.linspace(log10Mh_min,log10Mh_max,2**5+1) # M_sol/h - M = np.log10(10**Mh/h) - dM = M[1] - M[0] - dlogM = dM - b1_th_tink = np.zeros(len(Mh)) - #b2_th_tink = np.zeros(len(Mh)) - Pth = [0] * len(Mh) - Pnth_hp = [0] * len(Mh) - Pnth_hm = [0] * len(Mh) - Pbin = [0] * len(Mh) - nths = np.zeros(len(Mh)) + logMfor_hmf = np.linspace(8,17,200) + logMh = np.linspace(log10Mh_min,log10Mh_max,2**5+1) # M_sol/h + logM = np.log10(10**logMh/h) + Mh = 10**logMh + M = 10**logM + nM = len(M) + Mh_pivot = 10**log10Mh_pivot # M_sol/h + M_pivot = 10**log10Mh_pivot/h # M_sol + dlogM = logM[1] - logM[0] + b1_th_tink = np.zeros(nM) + #b2_th_tink = np.zeros(nM) + Pth = np.zeros((nM,nk)) + Pnth_hp = np.zeros((nM,nk)) + Pnth_hm = np.zeros((nM,nk)) + Pbin = np.zeros((nM,nk)) + surface_pgm = np.zeros((nM,nk)) + surface_resp = np.zeros((nM,nk)) + + nths = np.zeros(nM) + mass_hp = np.zeros(nM) + mass_hm = np.zeros(nM) mass_def=halos.MassDef200m() - #mdef_other=halos.MassDef200m() - hmf_DE = halos.MassFuncDarkEmulator(cosmo,mass_def=mass_def) - hbf = halos.hbias.HaloBiasTinker10(cosmo,mass_def=mass_def) + hmf_DE = halos.MassFuncDarkEmulator(cosmo, mass_def=mass_def, darkemulator=emu) + hbf = halos.hbias.HaloBiasTinker10(cosmo, mass_def=mass_def) + cM = halos.ConcentrationDiemer15_colossus200m(mass_def) + #cM_vir = halos.ConcentrationDiemer15_colossus_vir(mass_def) - #kmax = 2 - if np.any(a_arr < 1/(1+1.48)) or k_use[-1] > kmax: - #hmf = halos.MassFuncTinker10(cosmo,mass_def=mass_def) - nfw = halos.HaloProfileNFW(halos.ConcentrationDuffy08(mass_def), - fourier_analytic=True) + if np.any(a_arr < 1/(1+1.48)): - #nfw = halos.HaloProfileNFW(halos.ConcentrationDiemer15_colossus(mass_def), - # fourier_analytic=True) - hmc = halos.HMCalculator(cosmo, hmf_DE, hbf, mass_def,log10M_min=np.log10(M[0]),log10M_max=np.log10(M[-1])) + nfw = halos.HaloProfileNFW(cM, fourier_analytic=True) + hmc = halos.HMCalculator(cosmo, hmf_DE, hbf, mass_def, log10M_min=logM[0], log10M_max=logM[-1]) halomod_pk_arr = halos.halomod_power_spectrum(cosmo, hmc, k_use, a_arr, prof=nfw, prof_2pt=None, - prof2=prof1, p_of_k_a=None, + prof2=prof_hod, p_of_k_a=None, normprof1=True, normprof2=True, get_1h=True, get_2h=True, smooth_transition=None, supress_1h=None) - halomod_tk3D, dpk12_halomod = halos.halomod_Tk3D_SSC(cosmo=cosmo, hmc=hmc, - prof1=nfw, - prof2=prof1, - prof12_2pt=None, + if np.any(a_arr < 1/(1+1.48)) or (highk_HM and k_use[-1] > k_max): + + nfw = halos.HaloProfileNFW(cM, fourier_analytic=True) + hmc = halos.HMCalculator(cosmo, hmf_DE, hbf, mass_def, log10M_min=logM[0], log10M_max=logM[-1]) + + halomod_tk3D, dpk12_halomod = halos.halomod_Tk3D_SSC_orig(cosmo=cosmo, hmc=hmc, + prof1=nfw, prof2=prof_hod, + prof3=nfw, prof4=prof_hod, + prof12_2pt=None, prof34_2pt=None, normprof1=True, normprof2=True, + normprof3=True, normprof4=True, lk_arr=np.log(k_use), a_arr=a_arr, - use_log=use_log) + use_log=False) for ia, aa in enumerate(a_arr): z = 1. / aa - 1 # dark emulator is valid for 0 =< z <= 1.48 @@ -425,137 +443,1158 @@ def darkemu_pkarr_SSC(cosmo, prof1, deltah=0.02, print("use halo model for z={:.2f}>1.48".format(z)) else: # mass function - dndlog10m_emu = ius(Mfor_hmf ,hmf_DE.get_mass_function(cosmo, 10**Mfor_hmf ,aa)) # Mpc^-3 #ius(np.log10(Mlist), dndm_emu * Mlist * np.log(10) * h ** 3) - - if Mh[0] < 12.0: # Msol/h - Pth12 = emu.get_phm_massthreshold(k_emu,10**12,z) * (1/h)**3 - nths12 = emu.mass_to_dens(10**12,z) * h**3 - Pnth_hp12 = emu_p.get_phm(k_emu*(h/hp),np.log10(nths12*(1/hp)**3),z)*(1/hp)**3 - Pnth_hm12 = emu_m.get_phm(k_emu*(h/hm),np.log10(nths12*(1/hm)**3),z)*(1/hm)**3 - Pbin12 = emu.get_phm_mass(k_emu, 10 ** 12, z) * (1/h)**3 -# else: -# Pth[0] = emu.get_phm_massthreshold(k_emu,10**Mh[0],z) * (1/h)**3 -# nths[0] = emu.mass_to_dens(10**Mh[0],z) * h**3 -# Pnth_hp[0] = emu_p.get_phm(k_emu*(h/hp),np.log10(nths[0]*(1/hp)**3),z)*(1/hp)**3 -# Pnth_hm[0] = emu_m.get_phm(k_emu*(h/hm),np.log10(nths[0]*(1/hm)**3),z)*(1/hm)**3 -# Pbin[0] = emu.get_phm_mass(k_emu, 10 ** Mh[0], z) * (1/h)**3 - - for m in range(0,len(Mh)): - if Mh[m] < 12.0: # Msol/h - Pth[m] = Pth12 * hbf.get_halo_bias(cosmo,(10 ** M1), aa) - Pnth_hp[m] = Pnth_hp[0] - Pnth_hm[m] = Pnth_hm[0] - Pbin[m] = Pbin[0] + #Mlist, dndm_emu = emu.get_dndm(z) + #dndlog10m_emu = ius(np.log10(Mlist/h), dndm_emu * Mlist * np.log(10) * h ** 3) + + # mass function + dndlog10m_emu = ius(logMfor_hmf ,hmf_DE.get_mass_function(cosmo, 10**logMfor_hmf ,aa)) # Mpc^-3 + if logMh[0] < log10Mh_pivot or highz_HMresp: # Msol/h + nfw = halos.HaloProfileNFW(cM, fourier_analytic=True) + + rho_m = cosmo.rho_x(1, "matter", is_comoving=True) # same for h_plus/minus cosmology + hmf_hp = halos.MassFuncDarkEmulator(cosmo_hp, mass_def=mass_def, darkemulator=emu_p) + dndlog10m_emu_hp = ius(logMfor_hmf, hmf_hp.get_mass_function(cosmo_hp, 10**logMfor_hmf ,aa)) # Mpc^-3 + hbf_hp = halos.hbias.HaloBiasTinker10(cosmo_hp, mass_def=mass_def) + + hmf_hm = halos.MassFuncDarkEmulator(cosmo_hm, mass_def=mass_def, darkemulator=emu_m) + dndlog10m_emu_hm = ius(logMfor_hmf, hmf_DE.get_mass_function(cosmo_hm, 10**logMfor_hmf ,aa)) # Mpc^-3 + hbf_hm = halos.hbias.HaloBiasTinker10(cosmo_hm, mass_def=mass_def) + + for m in range(nM): + if logMh[m] < log10Mh_pivot: # Msol/h + nths[m] = mass_to_dens(dndlog10m_emu, cosmo, M[m]) + mass_hp[m] = dens_to_mass(dndlog10m_emu_hp, cosmo_hp, nths[m]) + mass_hm[m] = dens_to_mass(dndlog10m_emu_hm, cosmo_hm, nths[m]) + else: - Pth[m] = emu.get_phm_massthreshold(k_emu,10**Mh[m],z) * (1/h)**3 - nths[m] = emu.mass_to_dens(10**Mh[m],z) * h**3 - Pnth_hp[m] = emu_p.get_phm(k_emu*(h/hp),np.log10(nths[m]*(1/hp)**3),z)*(1/hp)**3 - Pnth_hm[m] = emu_m.get_phm(k_emu*(h/hm),np.log10(nths[m]*(1/hm)**3),z)*(1/hm)**3 - Pbin[m] = emu.get_phm_mass(k_emu, 10 ** Mh[m], z) * (1/h)**3 - + Pth[m] = emu.get_phm_massthreshold(k_emu, Mh[m], z) * (1/h)**3 + Pbin[m] = emu.get_phm_mass(k_emu, Mh[m], z) * (1/h)**3 - - M1 = np.linspace(M[m], M[-1], 2**5+1) - dM1 = M[1] - M[0] - b1_th_tink[m] = integrate.romb(dndlog10m_emu(M1) * hbf.get_halo_bias(cosmo,(10 ** M1), aa), dx = dM1)\ - /integrate.romb(dndlog10m_emu(M1), dx = dM1) + #nths[m] = mass_to_dens(dndlog10m_emu, cosmo, M[m]) + nths[m] = emu.mass_to_dens(Mh[m] ,z) * h**3 - - Nc = prof1._Nc(10 ** M, aa) - Ns = prof1._Ns(10 ** M, aa) - fc = prof1._fc(aa) + if highz_HMresp and z > 0.5: + mass_hp[m] = dens_to_mass(dndlog10m_emu_hp, cosmo_hp, nths[m]) + mass_hm[m] = dens_to_mass(dndlog10m_emu_hm, cosmo_hm, nths[m]) + + Pnth_hp[m] = Pth_hm_HM_linb(k_use, mass_hp[m], cosmo_hp, dndlog10m_emu_hp, nfw, rho_m, hbf_hp, mass_def, aa) + Pnth_hm[m] = Pth_hm_HM_linb(k_use, mass_hm[m], cosmo_hm, dndlog10m_emu_hm, nfw, rho_m, hbf_hm, mass_def, aa) + + else: + Pnth_hp[m] = emu_p.get_phm(k_emu*(h/hp), np.log10(nths[m]*(1/hp)**3), z) * (1/hp)**3 + Pnth_hm[m] = emu_m.get_phm(k_emu*(h/hm), np.log10(nths[m]*(1/hm)**3), z) * (1/hm)**3 + + #logM1 = np.linspace(logM[m], np.log10(10**16./cosmo["h"]), 2**5+1) + logM1 = np.linspace(logM[m], logM[-1], 2**5+1) + dlogM1 = logM[1] - logM[0] + #b1_th_tink[m] = integrate.romb(dndlog10m_emu(logM1) * hbf.get_halo_bias(cosmo, (10 ** logM1), aa), \ + # dx = dlogM1) / nths[m] + b1_th_tink[m] = integrate.romb(dndlog10m_emu(logM1) * hbf.get_halo_bias(cosmo, (10 ** logM1), aa), \ + dx = dlogM1) / integrate.romb(dndlog10m_emu(logM1), dx = dlogM1) + + if logMh[0] < log10Mh_pivot: # Msol/h + Pth[logMh < log10Mh_pivot] = Pth_hm_lowmass_BMO(k_use, M[logMh < log10Mh_pivot], M_pivot, emu, cosmo, dndlog10m_emu, hbf, cM, cM_vir, \ + mass_def, rho_m, aa, b1_th_tink[logMh < log10Mh_pivot]) + Pnth_hp[logMh < log10Mh_pivot] = Pth_hm_lowmass_BMO(k_use, mass_hp[logMh < log10Mh_pivot], M_pivot, emu_p, cosmo_hp, dndlog10m_emu_hp, hbf_hp, cM, cM_vir, \ + mass_def, rho_m, aa) + Pnth_hm[logMh < log10Mh_pivot] = Pth_hm_lowmass_BMO(k_use, mass_hm[logMh < log10Mh_pivot], M_pivot, emu_m, cosmo_hm, dndlog10m_emu_hm, hbf_hm, cM, cM_vir, \ + mass_def, rho_m, aa) + Pbin[logMh < log10Mh_pivot] = Pbin_hm_lowmass_BMO(k_use, M[logMh < log10Mh_pivot], M_pivot, emu, cosmo, hbf, cM, cM_vir, mass_def, \ + pk2dlin, rho_m ,aa) + + Nc = prof_hod._Nc(M, aa) + Ns = prof_hod._Ns(M, aa) + fc = prof_hod._fc(aa) Ng = Nc * (fc + Ns) - Mps = M + dlogM - Mms = M - dlogM + logMps = logM + dlogM + logMms = logM - dlogM - prof_Mp = prof1.fourier(cosmo, k_use, (10 ** Mps), aa, mass_def) # def _fourier(self, cosmo, k, M, a, mass_def): - prof_Mm = prof1.fourier(cosmo, k_use, (10 ** Mms), aa, mass_def) # def _fourier(self, cosmo, k, M, a, mass_def): - prof = prof1.fourier(cosmo, k_use,(10 ** M), aa, mass_def) # def _fourier(self, cosmo, k, M, a, mass_def): - #uk = prof1._usat_fourier(cosmo, k_use,(10 ** M), aa, mass_def) + prof_Mp = prof_hod.fourier(cosmo, k_use, (10 ** logMps), aa, mass_def) + prof_Mm = prof_hod.fourier(cosmo, k_use, (10 ** logMms), aa, mass_def) + prof = prof_hod.fourier(cosmo, k_use, M, aa, mass_def) + #uk = prof_hod._usat_fourier(cosmo, k_use,(10 ** M), aa, mass_def) #rho_cr = 2.775*h**2*1e11 # M_solMpc^-3 (w/o h in units) #factor_mat = np.tile(10**M/(Omega_m*rho_cr), (len(k_emu), 1)).transpose() dprof_dlogM = (prof_Mp - prof_Mm) / (2 * dlogM)#*np.log(10)) nth_mat = np.tile(nths, (len(k_use), 1)).transpose() - ng = integrate.romb(dndlog10m_emu(M) * Ng, dx = dM, axis = 0) - bgE = integrate.romb(dndlog10m_emu(M) * Ng * \ - (hbf.get_halo_bias(cosmo,(10 ** M), aa)), dx = dM, axis = 0) / ng + ng = integrate.romb(dndlog10m_emu(logM) * Ng, dx = dlogM, axis = 0) + bgE = integrate.romb(dndlog10m_emu(logM) * Ng * \ + (hbf.get_halo_bias(cosmo, M, aa)), dx = dlogM, axis = 0) / ng - bgE2 = integrate.romb(dndlog10m_emu(M) * Ng * \ - b2H17(hbf.get_halo_bias(cosmo,(10 ** M), aa)), dx = dM, axis = 0) / ng + bgE2 = integrate.romb(dndlog10m_emu(logM) * Ng * \ + b2H17(hbf.get_halo_bias(cosmo, M, aa)), dx = dlogM, axis = 0) / ng bgL = bgE - 1 - dndlog10m_func_mat = np.tile(dndlog10m_emu(M), (len(k_emu), 1)).transpose() # M_sol,Mpc^-3 - b1E_mat = np.tile((hbf.get_halo_bias(cosmo,(10 ** M), aa)), (len(k_emu), 1)).transpose() + dndlog10m_func_mat = np.tile(dndlog10m_emu(logM), (len(k_emu), 1)).transpose() # M_sol,Mpc^-3 + b1E_mat = np.tile((hbf.get_halo_bias(cosmo, M, aa)), (len(k_emu), 1)).transpose() b1L_th_mat = np.tile(b1_th_tink -1, (len(k_emu), 1)).transpose() - Pgm = integrate.romb(dprof_dlogM * (nth_mat * np.array(Pth)), \ - dx = dM, axis = 0) / ng + + #dPhm_db_nfix = (26. / 21.) * (np.array(Pnth_hp) - np.array(Pnth_hm)) / \ + # (2 * (np.log(Dp[ia]) - np.log(Dm[ia]))) # Mpc^3 - dPhm_db_nfix = (26. / 21.) * (np.array(Pnth_hp) - np.array(Pnth_hm)) / \ + dPhm_db_nfix = (26. / 21.) * np.log(np.array(Pnth_hp) / np.array(Pnth_hm)) * np.array(Pth) / \ (2 * (np.log(Dp[ia]) - np.log(Dm[ia]))) # Mpc^3 dnP_hm_db_emu = nth_mat * (dPhm_db_nfix + b1L_th_mat * np.array(Pbin)) # Dless - dnP_gm_db = integrate.romb(dprof_dlogM * (dnP_hm_db_emu), dx = dM, axis = 0) #Dless + Pgm = integrate.romb(dprof_dlogM * (nth_mat * np.array(Pth)), \ + dx = dlogM, axis = 0) / ng + + dnP_gm_db = integrate.romb(dprof_dlogM * (dnP_hm_db_emu), dx = dlogM, axis = 0) #Dless + if surface: + surface_pgm[ia, :] = ((prof[0] * nth_mat[0] * np.array(Pth)[0]) - (prof[-1] * nth_mat[-1] * np.array(Pth))[-1]) / ng + Pgm += surface_pgm[ia, :] + + surface_resp[ia, :] = (prof[0] * dnP_hm_db_emu[0]) - (prof[-1] * dnP_hm_db_emu[-1]) + dnP_gm_db += surface_resp[ia, :] + Pgm_growth = dnP_gm_db / ng - bgL * Pgm # Dless - Pgm_d = -1. / 3. * np.gradient(np.log(Pgm)) / np.gradient(np.log(k_use)) * Pgm #Dless - - dPgm_db = (Pgm_growth + Pgm_d) - - dpklin = pk2dlin.eval_dlogpk_dlogk(k_use, aa, cosmo) + Pgm_d = -1. / 3. * np.gradient(np.log(Pgm)) / np.gradient(np.log(k_use)) * Pgm #Dless + + dPgm_db_emu = (Pgm_growth + Pgm_d) + + dpklin = pk2dlin.eval_dlogpk_dlogk(k_use, aa, cosmo) + + Pgm_lin = bgE * pk2dlin.eval(k_use, aa, cosmo) + dPgm_db_lin = (47/21 + bgE2/bgE - bgE -1/3 * dpklin) * \ + bgE * pk2dlin.eval(k_use, aa, cosmo) + + # stitching + k_switch = 0.08 # [h/Mpc] + + dPgm_db = dPgm_db_lin * np.exp(-k_emu/k_switch) + \ + dPgm_db_emu * (1 - np.exp(-k_emu/k_switch)) + + Pgm = Pgm_lin * np.exp(-k_emu/k_switch) + \ + Pgm * (1 - np.exp(-k_emu/k_switch)) + + # use linear theory below kmin + kmin = 1e-2 # [h/Mpc] + + dPgm_db[k_emu < kmin] = dPgm_db_lin[k_emu < kmin] + dpk12[ia, :] = dPgm_db + + Pgm[k_emu < kmin] = Pgm_lin[k_emu < kmin] + pk12[ia, :] = Pgm + + # use Halo Model above k_max + if highk_HM and k_use[-1] > k_max: + k_HM = 1 # Mpc^-1 + dPgm_db = dPgm_db * np.exp(-k_use/k_HM) + \ + dpk12_halomod[ia, :] * (1 - np.exp(-k_use/k_HM)) + dPgm_db[k_use > k_max] = dpk12_halomod[ia, k_use > k_max] + + #dpk_HM = dpk12_halomod[ia, :] - bgE * Pgm + #dPgm_db = dPgm_db * np.exp(-k_use/k_HM) + \ + # dpk_HM * (1 - np.exp(-k_use/k_HM)) + #dPgm_db[k_use > k_max] = dpk_HM[k_use > k_max] + + dpk12[ia, :] = dPgm_db + + if use_log: + if np.any(dpk12 <= 0): + warnings.warn( + "Some values were not positive. " + "Will not interpolate in log-space.", + category=CCLWarning) + use_log = False + else: + dpk12 = np.log(dpk12) + + #if use_log: + # if np.any(dpk12 <= 0): + ## warnings.warn( + # "Some values were not positive. " + # "The negative values are substituted by 1e-5.", + # category=CCLWarning) + # np.where(dpk12 <= 0, 1e-5, dpk12) + # + # dpk12 = np.log(dpk12) + + pk2d = Pk2D(a_arr=a_arr, lk_arr=lk_arr, pk_arr=pk12, + extrap_order_lok=extrap_order_lok, + extrap_order_hik=extrap_order_hik, + cosmo=cosmo, is_logp=False) + + return dpk12, pk2d, surface_pgm, surface_resp + +def darkemu_Pgg_SSC_zresp(cosmo, prof_hod, deltaz=0.1, + log10Mh_min=12.0,log10Mh_max=15.9, + log10Mh_pivot=12.5, + normprof_hod=False, + lk_arr=None, a_arr=None, + extrap_order_lok=1, extrap_order_hik=1, + use_log=False, surface=False): + """ Returns a 2D array with shape `[na,nk]` describing the + first function :math:`f_1(k,a)` that makes up a factorizable + trispectrum :math:`T(k_1,k_2,a)=f_1(k_1,a)f_2(k_2,a)` The response is + calculated as: + + .. math:: + \\frac{\\partial P_{u,v}(k)}{\\partial\\delta_L} = + \\left(\\frac{68}{21}-\\frac{d\\log k^3P_L(k)}{d\\log k}\\right) + P_L(k)I^1_1(k,|u)I^1_1(k,|v)+I^1_2(k|u,v) - (b_{u} + b_{v}) + P_{u,v}(k) + + where the :math:`I^a_b` are defined in the documentation + of :meth:`~HMCalculator.I_1_1` and :meth:`~HMCalculator.I_1_2` and + :math:`b_{u}` and :math:`b_{v}` are the linear halo biases for quantities + :math:`u` and :math:`v`, respectively (zero if they are not clustering). + + Args: + cosmo (:class:`~pyccl.core.Cosmology`): a Cosmology object. + hmc (:class:`HMCalculator`): a halo model calculator. + prof_hod (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`u_1` above. + + p_of_k_a (:class:`~pyccl.pk2d.Pk2D`): a `Pk2D` object to + be used as the linear matter power spectrum. If `None`, + the power spectrum stored within `cosmo` will be used. + a_arr (array): an array holding values of the scale factor + at which the trispectrum should be calculated for + interpolation. If `None`, the internal values used + by `cosmo` will be used. + lk_arr (array): an array holding values of the natural + logarithm of the wavenumber (in units of Mpc^-1) at + which the trispectrum should be calculated for + interpolation. If `None`, the internal values used + by `cosmo` will be used. + extrap_order_lok (int): extrapolation order to be used on + k-values below the minimum of the splines. See + :class:`~pyccl.tk3d.Tk3D`. + extrap_order_hik (int): extrapolation order to be used on + k-values above the maximum of the splines. See + :class:`~pyccl.tk3d.Tk3D`. + use_log (bool): if `True`, the trispectrum will be + interpolated in log-space (unless negative or + zero values are found). + + Returns: + :class:`~pyccl.tk3d.Tk3D`: SSC effective trispectrum. + """ + if lk_arr is None: + status = 0 + nk = lib.get_pk_spline_nk(cosmo.cosmo) + lk_arr, status = lib.get_pk_spline_lk(cosmo.cosmo, nk, status) + check(status, cosmo=cosmo) + if a_arr is None: + status = 0 + na = lib.get_pk_spline_na(cosmo.cosmo) + a_arr, status = lib.get_pk_spline_a(cosmo.cosmo, na, status) + check(status, cosmo=cosmo) + + k_use = np.exp(lk_arr) + + # Check inputs + if not isinstance(prof_hod, halos.profiles.HaloProfile): + raise TypeError("prof_hod must be of type `HaloProfile`") + + h = cosmo["h"] + k_emu = k_use / h # [h/Mpc] + #Omega_m = cosmo["Omega_b"] + cosmo["Omega_c"] + 0.00064/(h**2) + cosmo.compute_linear_power() + pk2dlin = cosmo.get_linear_power('delta_matter:delta_matter') + + # set cosmology for dark emulator + emu = darkemu_set_cosmology(cosmo) + + na = len(a_arr) + nk = len(k_use) + dpk12 = np.zeros([na, nk]) + pk12 = np.zeros([na, nk]) + #Gresp2h_nfix = np.zeros([na, nk]) + #Gresp2h_thbin = np.zeros([na, nk]) + Gresp2h = np.zeros([na, nk]) + Gresp1h = np.zeros([na, nk]) + Pgg_2h = np.zeros([na, nk]) + Pgg_1h = np.zeros([na, nk]) + surface_pgg = np.zeros([na, nk]) + surface_resp1 = np.zeros([na, nk]) + surface_resp2 = np.zeros([na, nk]) + + #dpk34 = np.zeros([na, nk]) + logMfor_hmf = np.linspace(8,17,200) + logMh = np.linspace(log10Mh_min,log10Mh_max,2**5+1) # M_sol/h + logM = np.log10(10**logMh/h) + Mh = 10**logMh + M = 10**logM + nM = len(M) + Mh_pivot = 10**log10Mh_pivot # M_sol/h + M_pivot = 10**log10Mh_pivot/h # M_sol + dlogM = logM[1] - logM[0] + b1_th_tink = np.zeros(nM) + #b2_th_tink = np.zeros(nM) + Pth = np.zeros((nM,nM,nk)) + Pth_zp = np.zeros((nM,nM,nk)) + Pth_zm = np.zeros((nM,nM,nk)) + + Pth_bin = np.zeros((nM,nM,nk)) + nths = np.zeros(nM) + + mass_def=halos.MassDef200m() + + hmf_DE = halos.MassFuncDarkEmulator(cosmo, mass_def=mass_def, darkemulator=emu) + hbf = halos.hbias.HaloBiasTinker10(cosmo, mass_def=mass_def) + + for ia, aa in enumerate(a_arr): + z = 1. / aa - 1 # dark emulator is valid for 0 =< z <= 1.48 + zp = z + deltaz + zm = z - deltaz + if zm < 0: zm = 0 + ap = 1/(1+zp) + am = 1/(1+zm) + #compute linear growth factor for its derivative of z + D_ap = cosmo.growth_factor_unnorm(ap) + D_am = cosmo.growth_factor_unnorm(am) + + if z > 1.5: + print("dark emulator is valid for z={:.2f}<1.48") + else: + # mass function + dndlog10m_emu = ius(logMfor_hmf ,hmf_DE.get_mass_function(cosmo, 10**logMfor_hmf ,aa)) # Mpc^-3 + + for m in range(nM): + + nths[m] = mass_to_dens(dndlog10m_emu, cosmo, M[m]) + #nths[m] = emu.mass_to_dens(Mh[m] ,z) * h**3 + + #logM1 = np.linspace(logM[m], np.log10(10**16./cosmo["h"]), 2**5+1) + logM1 = np.linspace(logM[m], logM[-1], 2**5+1) + dlogM1 = logM[1] - logM[0] + #b1_th_tink[m] = integrate.romb(dndlog10m_emu(logM1) * hbf.get_halo_bias(cosmo, (10 ** logM1), aa), \ + # dx = dlogM1) / nths[m] + b1_th_tink[m] = integrate.romb(dndlog10m_emu(logM1) * hbf.get_halo_bias(cosmo, (10 ** logM1), aa), \ + dx = dlogM1) / integrate.romb(dndlog10m_emu(logM1), dx = dlogM1) + + for m in range(nM): + for n in range(nM): + Pth[m,n] = emu.get_phh(k_emu, np.log10(nths[m]/(h**3)), np.log10(nths[n]/(h**3)), z) * (1/h)**3 + Pth_zp[m,n] = emu.get_phh(k_emu, np.log10(nths[m]/(h**3)), np.log10(nths[n]/(h**3)), zp) * (1/h)**3 + Pth_zm[m,n] = emu.get_phh(k_emu, np.log10(nths[m]/(h**3)), np.log10(nths[n]/(h**3)), zm) * (1/h)**3 + Pth_bin[m,n] = emu.get_phh_massthreshold_mass(k_emu, Mh[m], Mh[n], z) * (1/h)**3 + + Nc = prof_hod._Nc(M, aa) + Ns = prof_hod._Ns(M, aa) + fc = prof_hod._fc(aa) + Ng = Nc * (fc + Ns) + logMps = logM + dlogM + logMms = logM - dlogM + + prof_Mp = prof_hod.fourier(cosmo, k_use, (10 ** logMps), aa, mass_def) + prof_Mm = prof_hod.fourier(cosmo, k_use, (10 ** logMms), aa, mass_def) + prof = prof_hod.fourier(cosmo, k_use, M, aa, mass_def) + uk = prof_hod._usat_fourier(cosmo, k_use, M, aa, mass_def) + prof_1h = Nc[:, None] * ((2 * fc * Ns[:, None] * uk) + (Ns[:, None] ** 2 * uk ** 2)) + + dprof_dlogM = (prof_Mp - prof_Mm) / (2 * dlogM)#*np.log(10)) + nth_mat = np.tile(nths, (len(k_use), 1)).transpose() + ng = integrate.romb(dndlog10m_emu(logM) * Ng, dx = dlogM, axis = 0) + b1 = hbf.get_halo_bias(cosmo, M, aa) + bgE = integrate.romb(dndlog10m_emu(logM) * Ng * \ + b1, dx = dlogM, axis = 0) / ng + + bgE2 = integrate.romb(dndlog10m_emu(logM) * Ng * \ + b2H17(b1), dx = dlogM, axis = 0) / ng + bgL = bgE - 1 + + dndlog10m_func_mat = np.tile(dndlog10m_emu(logM), (len(k_emu), 1)).transpose() # M_sol,Mpc^-3 + + b1L_mat = np.tile(b1-1, (len(k_emu), 1)).transpose() + b1L_th_mat = np.tile(b1_th_tink -1, (len(k_emu), 1)).transpose() + + ### P_gg(k) + _Pgg_1h = integrate.romb(dndlog10m_func_mat * prof_1h, \ + dx = dlogM, axis = 0) / (ng ** 2) + + Pgg_2h_int = list() + for m in range(nM): + Pgg_2h_int.append(integrate.romb( + Pth[m] * nth_mat * dprof_dlogM, axis=0, dx=dlogM)) + Pgg_2h_int = np.array(Pgg_2h_int) + _Pgg_2h = integrate.romb( + Pgg_2h_int * nth_mat * dprof_dlogM, axis=0, dx=dlogM)/ (ng ** 2) + + if surface: + surface1 = (((prof[-1] * nth_mat[-1]) ** 2 * Pth[-1,-1]) \ + - ((prof[0] * nth_mat[0]) ** 2 * Pth[0,0]) \ + - 2 * ((prof[0] * nth_mat[0]) * (prof[-1] * nth_mat[-1]) * Pth[0,-1]) \ + ) / (ng ** 2) + + surface2_int = (((prof[-1] * nth_mat[-1]) * Pth[-1]) \ + - ((prof[0] * nth_mat[0]) * Pth[0])) + surface2 = - 2 * integrate.romb( + surface2_int * nth_mat * dprof_dlogM, axis=0, dx=dlogM) / (ng ** 2) + + surface_pgg[ia, :] = surface1 + surface2 + _Pgg_2h += surface1 + surface2 + + Pgg = _Pgg_2h + _Pgg_1h + + ### 2-halo response + dPhh_db_nfix = (26. / 21.) * (Pth_zp - Pth_zm)/ \ + (2 * (np.log(D_ap) - np.log(D_am))) # Mpc^3 + + resp_2h_int = list() + for m in range(nM): + dP_hh_db_tot = dPhh_db_nfix[m] + 2 * b1L_th_mat * Pth_bin[m] + resp_2h_int.append(integrate.romb( + dP_hh_db_tot * nth_mat * dprof_dlogM, axis=0, dx=dlogM)) + resp_2h_int = np.array(resp_2h_int) + resp_2h = integrate.romb( + resp_2h_int * nth_mat * dprof_dlogM, axis=0, dx=dlogM)/ (ng ** 2) + + #resp_2h_nfix_int = list() + #resp_2h_thbin_int = list() + #for m in range(nM): + # resp_2h_nfix_int.append(integrate.romb( + # dPhh_db_nfix[m] * nth_mat * dprof_dlogM, axis=0, dx=dlogM)) + # resp_2h_thbin_int.append(integrate.romb( + # (2 * b1L_th_mat * Pth_bin[m]) * nth_mat * dprof_dlogM, axis=0, dx=dlogM)) + #resp_2h_nfix_int = np.array(resp_2h_nfix_int) + #resp_2h_thbin_int = np.array(resp_2h_thbin_int) + + #resp_2h_nfix = integrate.romb( + #resp_2h_nfix_int * nth_mat * dprof_dlogM, axis=0, dx=dlogM)/ (ng ** 2) + + #resp_2h_thbin = integrate.romb( + #resp_2h_thbin_int * nth_mat * dprof_dlogM, axis=0, dx=dlogM)/ (ng ** 2) + + #resp_2h = resp_2h_nfix + resp_2h_thbin + + if surface: + surface1_thbin = (((prof[-1] * nth_mat[-1]) ** 2 * 2 * b1L_th_mat[-1] * Pth_bin[-1,-1]) \ + - ((prof[0] * nth_mat[0]) ** 2 * 2 * b1L_th_mat[0] * Pth_bin[0,0]) \ + - 2 * ((prof[0] * nth_mat[0]) * (prof[-1] * nth_mat[-1]) \ + * ((b1L_th_mat[-1] * Pth_bin[0,-1]) + (b1L_th_mat[0] * Pth_bin[-1,0]))) \ + ) / (ng ** 2) + + surface1_nfix = (((prof[-1] * nth_mat[-1]) ** 2 * dPhh_db_nfix[-1,-1]) \ + - ((prof[0] * nth_mat[0]) ** 2 * dPhh_db_nfix[0,0]) \ + - 2 * ((prof[0] * nth_mat[0]) * (prof[-1] * nth_mat[-1]) * dPhh_db_nfix[0,-1]) \ + ) / (ng ** 2) + surface1 = surface1_nfix + surface1_thbin + + surface2_int = (prof[-1] * nth_mat[-1]) * ((dPhh_db_nfix[-1]) \ + + b1L_th_mat[-1] * Pth_bin[:,-1] + b1L_th_mat * Pth_bin[-1]) \ + - ((prof[0] * nth_mat[0])) * ((dPhh_db_nfix[0]) \ + + b1L_th_mat[0] * Pth_bin[:,0] + b1L_th_mat * Pth_bin[0]) + surface2 = - 2 * integrate.romb( + surface2_int * nth_mat * dprof_dlogM, axis=0, dx=dlogM) / (ng ** 2) + + resp_2h += surface1 + surface2 + surface_resp1[ia, :] = surface1 + surface_resp2[ia, :] = surface2 + + ### 1-halo response + resp_1h = integrate.romb(dndlog10m_func_mat * b1L_mat * prof_1h, \ + dx = dlogM, axis = 0) / (ng ** 2) + + Pgg_growth = (resp_1h + resp_2h) - 2 * bgL * Pgg + + Pgg_d = -1. / 3. * np.gradient(np.log(Pgg)) / np.gradient(np.log(k_use)) * Pgg + + dPgg_db_emu = Pgg_growth + Pgg_d - Pgg + + dpklin = pk2dlin.eval_dlogpk_dlogk(k_use, aa, cosmo) + + Pgg_lin = bgE **2 * pk2dlin.eval(k_use, aa, cosmo) + dPgg_db_lin = (47/21 + 2 * bgE2/bgE - 2 * bgE -1/3 * dpklin) * \ + Pgg_lin + # stitching + k_switch = 0.08 # [h/Mpc] + + dPgg_db = dPgg_db_lin * np.exp(-k_emu/k_switch) + \ + dPgg_db_emu * (1 - np.exp(-k_emu/k_switch)) + + Pgg = Pgg_lin * np.exp(-k_emu/k_switch) + \ + Pgg * (1 - np.exp(-k_emu/k_switch)) + + # use linear theory below kmin + kmin = 1e-2 # [h/Mpc] + + dPgg_db[k_emu < kmin] = dPgg_db_lin[k_emu < kmin] + dpk12[ia, :] = dPgg_db + + Pgg[k_emu < kmin] = Pgg_lin[k_emu < kmin] + pk12[ia, :] = Pgg + dpk12[ia, :] = dPgg_db + + #Gresp2h_nfix[ia, :] = resp_2h_nfix + #Gresp2h_thbin[ia, :] = resp_2h_thbin + Gresp2h[ia, :] = resp_2h + Gresp1h[ia, :] = resp_1h + Pgg_2h[ia, :] = _Pgg_2h + Pgg_1h[ia, :] = _Pgg_1h + + + if use_log: + if np.any(dpk12 <= 0): + warnings.warn( + "Some values were not positive. " + "Will not interpolate in log-space.", + category=CCLWarning) + use_log = False + else: + dpk12 = np.log(dpk12) + + pk2d = Pk2D(a_arr=a_arr, lk_arr=lk_arr, pk_arr=pk12, + extrap_order_lok=extrap_order_lok, + extrap_order_hik=extrap_order_hik, + cosmo=cosmo, is_logp=False) + + #return dpk12, pk2d, Gresp2h_nfix, Gresp2h_thbin, Gresp1h, Pgg_2h, Pgg_1h + return dpk12, pk2d, Gresp2h, Gresp1h, Pgg_2h, Pgg_1h, surface_pgg, surface_resp1, surface_resp2 + + +def darkemu_Pgg_SSC_Asresp(cosmo, prof_hod, deltalnAs=0.03, + log10Mh_min=12.0,log10Mh_max=15.9, + log10Mh_pivot=12.5, + normprof_hod=False, + lk_arr=None, a_arr=None, + extrap_order_lok=1, extrap_order_hik=1, + use_log=False, surface=False): + """ Returns a 2D array with shape `[na,nk]` describing the + first function :math:`f_1(k,a)` that makes up a factorizable + trispectrum :math:`T(k_1,k_2,a)=f_1(k_1,a)f_2(k_2,a)` The response is + calculated as: + + .. math:: + \\frac{\\partial P_{u,v}(k)}{\\partial\\delta_L} = + \\left(\\frac{68}{21}-\\frac{d\\log k^3P_L(k)}{d\\log k}\\right) + P_L(k)I^1_1(k,|u)I^1_1(k,|v)+I^1_2(k|u,v) - (b_{u} + b_{v}) + P_{u,v}(k) + + where the :math:`I^a_b` are defined in the documentation + of :meth:`~HMCalculator.I_1_1` and :meth:`~HMCalculator.I_1_2` and + :math:`b_{u}` and :math:`b_{v}` are the linear halo biases for quantities + :math:`u` and :math:`v`, respectively (zero if they are not clustering). + + Args: + cosmo (:class:`~pyccl.core.Cosmology`): a Cosmology object. + hmc (:class:`HMCalculator`): a halo model calculator. + prof_hod (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`u_1` above. + + p_of_k_a (:class:`~pyccl.pk2d.Pk2D`): a `Pk2D` object to + be used as the linear matter power spectrum. If `None`, + the power spectrum stored within `cosmo` will be used. + a_arr (array): an array holding values of the scale factor + at which the trispectrum should be calculated for + interpolation. If `None`, the internal values used + by `cosmo` will be used. + lk_arr (array): an array holding values of the natural + logarithm of the wavenumber (in units of Mpc^-1) at + which the trispectrum should be calculated for + interpolation. If `None`, the internal values used + by `cosmo` will be used. + extrap_order_lok (int): extrapolation order to be used on + k-values below the minimum of the splines. See + :class:`~pyccl.tk3d.Tk3D`. + extrap_order_hik (int): extrapolation order to be used on + k-values above the maximum of the splines. See + :class:`~pyccl.tk3d.Tk3D`. + use_log (bool): if `True`, the trispectrum will be + interpolated in log-space (unless negative or + zero values are found). + + Returns: + :class:`~pyccl.tk3d.Tk3D`: SSC effective trispectrum. + """ + if lk_arr is None: + status = 0 + nk = lib.get_pk_spline_nk(cosmo.cosmo) + lk_arr, status = lib.get_pk_spline_lk(cosmo.cosmo, nk, status) + check(status, cosmo=cosmo) + if a_arr is None: + status = 0 + na = lib.get_pk_spline_na(cosmo.cosmo) + a_arr, status = lib.get_pk_spline_a(cosmo.cosmo, na, status) + check(status, cosmo=cosmo) + + k_use = np.exp(lk_arr) + + # Check inputs + if not isinstance(prof_hod, halos.profiles.HaloProfile): + raise TypeError("prof_hod must be of type `HaloProfile`") + + h = cosmo["h"] + k_emu = k_use / h # [h/Mpc] + #Omega_m = cosmo["Omega_b"] + cosmo["Omega_c"] + 0.00064/(h**2) + cosmo.compute_linear_power() + pk2dlin = cosmo.get_linear_power('delta_matter:delta_matter') + + # set cosmology for dark emulator + emu = darkemu_set_cosmology(cosmo) + emu_Ap, emu_Am = darkemu_set_cosmology_forAsresp(cosmo, deltalnAs) + + na = len(a_arr) + nk = len(k_use) + dpk12 = np.zeros([na, nk]) + pk12 = np.zeros([na, nk]) + #Gresp2h_nfix = np.zeros([na, nk]) + #Gresp2h_thbin = np.zeros([na, nk]) + Gresp2h = np.zeros([na, nk]) + Gresp1h = np.zeros([na, nk]) + Pgg_2h = np.zeros([na, nk]) + Pgg_1h = np.zeros([na, nk]) + surface_pgg = np.zeros([na, nk]) + surface_resp = np.zeros([na, nk]) + + #dpk34 = np.zeros([na, nk]) + logMfor_hmf = np.linspace(8,17,200) + logMh = np.linspace(log10Mh_min,log10Mh_max,2**5+1) # M_sol/h + logM = np.log10(10**logMh/h) + Mh = 10**logMh + M = 10**logM + nM = len(M) + Mh_pivot = 10**log10Mh_pivot # M_sol/h + M_pivot = 10**log10Mh_pivot/h # M_sol + dlogM = logM[1] - logM[0] + b1_th_tink = np.zeros(nM) + Pth = np.zeros((nM,nM,nk)) + Pth_Ap = np.zeros((nM,nM,nk)) + Pth_Am = np.zeros((nM,nM,nk)) + Pth_bin = np.zeros((nM,nM,nk)) + nths = np.zeros(nM) + + mass_def=halos.MassDef200m() + + hmf_DE = halos.MassFuncDarkEmulator(cosmo, mass_def=mass_def, darkemulator=emu) + hbf = halos.hbias.HaloBiasTinker10(cosmo, mass_def=mass_def) + + for ia, aa in enumerate(a_arr): + z = 1. / aa - 1 # dark emulator is valid for 0 =< z <= 1.48 + + if z > 1.5: + print("dark emulator is valid for z={:.2f}<1.48") + else: + # mass function + dndlog10m_emu = ius(logMfor_hmf ,hmf_DE.get_mass_function(cosmo, 10**logMfor_hmf ,aa)) # Mpc^-3 + + for m in range(nM): + + nths[m] = mass_to_dens(dndlog10m_emu, cosmo, M[m]) + #nths[m] = emu.mass_to_dens(Mh[m] ,z) * h**3 + + #logM1 = np.linspace(logM[m], np.log10(10**16./cosmo["h"]), 2**5+1) + logM1 = np.linspace(logM[m], logM[-1], 2**5+1) + dlogM1 = logM[1] - logM[0] + #b1_th_tink[m] = integrate.romb(dndlog10m_emu(logM1) * hbf.get_halo_bias(cosmo, (10 ** logM1), aa), \ + # dx = dlogM1) / nths[m] + b1_th_tink[m] = integrate.romb(dndlog10m_emu(logM1) * hbf.get_halo_bias(cosmo, (10 ** logM1), aa), \ + dx = dlogM1) / integrate.romb(dndlog10m_emu(logM1), dx = dlogM1) + + for m in range(nM): + for n in range(nM): + Pth[m,n] = emu.get_phh(k_emu, np.log10(nths[m]/(h**3)), np.log10(nths[n]/(h**3)), z) * (1/h)**3 + Pth_Ap[m,n] = emu_Ap.get_phh(k_emu, np.log10(nths[m]/(h**3)), np.log10(nths[n]/(h**3)), z) * (1/h)**3 + Pth_Am[m,n] = emu_Am.get_phh(k_emu, np.log10(nths[m]/(h**3)), np.log10(nths[n]/(h**3)), z) * (1/h)**3 + Pth_bin[m,n] = emu.get_phh_massthreshold_mass(k_emu, Mh[m], Mh[n], z) * (1/h)**3 + + Nc = prof_hod._Nc(M, aa) + Ns = prof_hod._Ns(M, aa) + fc = prof_hod._fc(aa) + Ng = Nc * (fc + Ns) + logMps = logM + dlogM + logMms = logM - dlogM + + prof_Mp = prof_hod.fourier(cosmo, k_use, (10 ** logMps), aa, mass_def) + prof_Mm = prof_hod.fourier(cosmo, k_use, (10 ** logMms), aa, mass_def) + prof = prof_hod.fourier(cosmo, k_use, M, aa, mass_def) + uk = prof_hod._usat_fourier(cosmo, k_use, M, aa, mass_def) + prof_1h = Nc[:, None] * ((2 * fc * Ns[:, None] * uk) + (Ns[:, None] ** 2 * uk ** 2)) + + dprof_dlogM = (prof_Mp - prof_Mm) / (2 * dlogM)#*np.log(10)) + nth_mat = np.tile(nths, (len(k_use), 1)).transpose() + ng = integrate.romb(dndlog10m_emu(logM) * Ng, dx = dlogM, axis = 0) + b1 = hbf.get_halo_bias(cosmo, M, aa) + bgE = integrate.romb(dndlog10m_emu(logM) * Ng * \ + b1, dx = dlogM, axis = 0) / ng + + bgE2 = integrate.romb(dndlog10m_emu(logM) * Ng * \ + b2H17(b1), dx = dlogM, axis = 0) / ng + bgL = bgE - 1 + + dndlog10m_func_mat = np.tile(dndlog10m_emu(logM), (len(k_emu), 1)).transpose() # M_sol,Mpc^-3 + + b1L_mat = np.tile(b1-1, (len(k_emu), 1)).transpose() + b1L_th_mat = np.tile(b1_th_tink -1, (len(k_emu), 1)).transpose() + + ### P_gg(k) + _Pgg_1h = integrate.romb(dndlog10m_func_mat * prof_1h, \ + dx = dlogM, axis = 0) / (ng ** 2) + + Pgg_2h_int = list() + for m in range(nM): + Pgg_2h_int.append(integrate.romb( + Pth[m] * nth_mat * dprof_dlogM, axis=0, dx=dlogM)) + Pgg_2h_int = np.array(Pgg_2h_int) + _Pgg_2h = integrate.romb( + Pgg_2h_int * nth_mat * dprof_dlogM, axis=0, dx=dlogM) / (ng ** 2) + + if surface: + surface1 = (((prof[-1] * nth_mat[-1]) ** 2 * Pth[-1,-1]) \ + - ((prof[0] * nth_mat[0]) ** 2 * Pth[0,0]) \ + - 2 * ((prof[0] * nth_mat[0]) * (prof[-1] * nth_mat[-1]) * Pth[0,-1]) \ + ) / (ng ** 2) + + surface2_int = (((prof[-1] * nth_mat[-1]) * Pth[-1]) \ + - ((prof[0] * nth_mat[0]) * Pth[0])) + surface2 = - 2 * integrate.romb( + surface2_int * nth_mat * dprof_dlogM, axis=0, dx=dlogM) / (ng ** 2) + + surface_pgg[ia, :] = surface1 + surface2 + _Pgg_2h += surface1 + surface2 + + + Pgg = _Pgg_2h + _Pgg_1h + + ### 2-halo response + dPhh_db_nfix = (26. / 21.) * (Pth_Ap - Pth_Am)/(2 * deltalnAs) + + resp_2h_int = list() + for m in range(nM): + dP_hh_db_tot = dPhh_db_nfix[m] + 2 * b1L_th_mat * Pth_bin[m] + resp_2h_int.append(integrate.romb( + dP_hh_db_tot * nth_mat * dprof_dlogM, axis=0, dx=dlogM)) + resp_2h_int = np.array(resp_2h_int) + resp_2h = integrate.romb( + resp_2h_int * nth_mat * dprof_dlogM, axis=0, dx=dlogM)/ (ng ** 2) + + #resp_2h_nfix_int = list() + #resp_2h_thbin_int = list() + #for m in range(nM): + # resp_2h_nfix_int.append(integrate.romb( + # dPhh_db_nfix[m] * nth_mat * dprof_dlogM, axis=0, dx=dlogM)) + # resp_2h_thbin_int.append(integrate.romb( + # (2 * b1L_th_mat * Pth_bin[m]) * nth_mat * dprof_dlogM, axis=0, dx=dlogM)) + #resp_2h_nfix_int = np.array(resp_2h_nfix_int) + #resp_2h_thbin_int = np.array(resp_2h_thbin_int) + + #resp_2h_nfix = integrate.romb( + #resp_2h_nfix_int * nth_mat * dprof_dlogM, axis=0, dx=dlogM)/ (ng ** 2) + + #resp_2h_thbin = integrate.romb( + #resp_2h_thbin_int * nth_mat * dprof_dlogM, axis=0, dx=dlogM)/ (ng ** 2) + + #resp_2h = resp_2h_nfix + resp_2h_thbin + + if surface: + surface1_thbin = (((prof[-1] * nth_mat[-1]) ** 2 * 2 * b1L_th_mat[-1] * Pth_bin[-1,-1]) \ + - ((prof[0] * nth_mat[0]) ** 2 * 2 * b1L_th_mat[0] * Pth_bin[0,0]) \ + - 2 * ((prof[0] * nth_mat[0]) * (prof[-1] * nth_mat[-1]) \ + * ((b1L_th_mat[-1] * Pth_bin[0,-1]) + (b1L_th_mat[0] * Pth_bin[-1,0]))) \ + ) / (ng ** 2) + + surface1_nfix = (((prof[-1] * nth_mat[-1]) ** 2 * dPhh_db_nfix[-1,-1]) \ + - ((prof[0] * nth_mat[0]) ** 2 * dPhh_db_nfix[0,0]) \ + - 2 * ((prof[0] * nth_mat[0]) * (prof[-1] * nth_mat[-1]) * dPhh_db_nfix[0,-1]) \ + ) / (ng ** 2) + surface1 = surface1_nfix + surface1_thbin + + surface2_int = (prof[-1] * nth_mat[-1]) * ((dPhh_db_nfix[-1]) \ + + b1L_th_mat[-1] * Pth_bin[:,-1] + b1L_th_mat * Pth_bin[-1]) \ + - ((prof[0] * nth_mat[0])) * ((dPhh_db_nfix[0]) \ + + b1L_th_mat[0] * Pth_bin[:,0] + b1L_th_mat * Pth_bin[0]) + surface2 = - 2 * integrate.romb( + surface2_int * nth_mat * dprof_dlogM, axis=0, dx=dlogM) / (ng ** 2) + + resp_2h += surface1 + surface2 + surface_resp[ia, :] = surface1 + surface2 + + ### 1-halo response + resp_1h = integrate.romb(dndlog10m_func_mat * b1L_mat * prof_1h, \ + dx = dlogM, axis = 0) / (ng ** 2) + + + Pgg_growth = (resp_1h + resp_2h) - 2 * bgL * Pgg + + Pgg_d = -1. / 3. * np.gradient(np.log(Pgg)) / np.gradient(np.log(k_use)) * Pgg + + dPgg_db_emu = Pgg_growth + Pgg_d - Pgg + + dpklin = pk2dlin.eval_dlogpk_dlogk(k_use, aa, cosmo) + + Pgg_lin = bgE **2 * pk2dlin.eval(k_use, aa, cosmo) + dPgg_db_lin = (47/21 + 2 * bgE2/bgE - 2 * bgE -1/3 * dpklin) * \ + Pgg_lin + # stitching + k_switch = 0.08 # [h/Mpc] + + dPgg_db = dPgg_db_lin * np.exp(-k_emu/k_switch) + \ + dPgg_db_emu * (1 - np.exp(-k_emu/k_switch)) + + Pgg = Pgg_lin * np.exp(-k_emu/k_switch) + \ + Pgg * (1 - np.exp(-k_emu/k_switch)) + + # use linear theory below kmin + kmin = 1e-2 # [h/Mpc] + + dPgg_db[k_emu < kmin] = dPgg_db_lin[k_emu < kmin] + dpk12[ia, :] = dPgg_db + + Pgg[k_emu < kmin] = Pgg_lin[k_emu < kmin] + pk12[ia, :] = Pgg + dpk12[ia, :] = dPgg_db + + #Gresp2h_nfix[ia, :] = resp_2h_nfix + #Gresp2h_thbin[ia, :] = resp_2h_thbin + Gresp2h[ia, :] = resp_2h + Gresp1h[ia, :] = resp_1h + Pgg_2h[ia, :] = _Pgg_2h + Pgg_1h[ia, :] = _Pgg_1h + + + if use_log: + if np.any(dpk12 <= 0): + warnings.warn( + "Some values were not positive. " + "Will not interpolate in log-space.", + category=CCLWarning) + use_log = False + else: + dpk12 = np.log(dpk12) + + pk2d = Pk2D(a_arr=a_arr, lk_arr=lk_arr, pk_arr=pk12, + extrap_order_lok=extrap_order_lok, + extrap_order_hik=extrap_order_hik, + cosmo=cosmo, is_logp=False) + + #return dpk12, pk2d, Gresp2h_nfix, Gresp2h_thbin, Gresp1h, Pgg_2h, Pgg_1h + return dpk12, pk2d, Gresp2h, Gresp1h, Pgg_2h, Pgg_1h, surface_pgg, surface_resp + + +def darkemu_pgg(cosmo, prof_hod, + log10Mh_min=12.0,log10Mh_max=15.9, + log10Mh_pivot=12.5, + normprof_hod=False, k_max=2.0, + lk_arr=None, a_arr=None, + extrap_order_lok=1, extrap_order_hik=1, + use_log=False): + """ Returns a 2D array with shape `[na,nk]` describing the + first function :math:`f_1(k,a)` that makes up a factorizable + trispectrum :math:`T(k_1,k_2,a)=f_1(k_1,a)f_2(k_2,a)` The response is + calculated as: + + .. math:: + \\frac{\\partial P_{u,v}(k)}{\\partial\\delta_L} = + \\left(\\frac{68}{21}-\\frac{d\\log k^3P_L(k)}{d\\log k}\\right) + P_L(k)I^1_1(k,|u)I^1_1(k,|v)+I^1_2(k|u,v) - (b_{u} + b_{v}) + P_{u,v}(k) + + Args: + cosmo (:class:`~pyccl.core.Cosmology`): a Cosmology object. + hmc (:class:`HMCalculator`): a halo model calculator. + prof_hod (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`u_1` above. + + p_of_k_a (:class:`~pyccl.pk2d.Pk2D`): a `Pk2D` object to + be used as the linear matter power spectrum. If `None`, + the power spectrum stored within `cosmo` will be used. + a_arr (array): an array holding values of the scale factor + at which the trispectrum should be calculated for + interpolation. If `None`, the internal values used + by `cosmo` will be used. + lk_arr (array): an array holding values of the natural + logarithm of the wavenumber (in units of Mpc^-1) at + which the trispectrum should be calculated for + interpolation. If `None`, the internal values used + by `cosmo` will be used. + extrap_order_lok (int): extrapolation order to be used on + k-values below the minimum of the splines. See + :class:`~pyccl.tk3d.Tk3D`. + extrap_order_hik (int): extrapolation order to be used on + k-values above the maximum of the splines. See + :class:`~pyccl.tk3d.Tk3D`. + use_log (bool): if `True`, the trispectrum will be + interpolated in log-space (unless negative or + zero values are found). + + Returns: + :class:`~pyccl.tk3d.Tk3D`: SSC effective trispectrum. + """ + if lk_arr is None: + status = 0 + nk = lib.get_pk_spline_nk(cosmo.cosmo) + lk_arr, status = lib.get_pk_spline_lk(cosmo.cosmo, nk, status) + check(status, cosmo=cosmo) + if a_arr is None: + status = 0 + na = lib.get_pk_spline_na(cosmo.cosmo) + a_arr, status = lib.get_pk_spline_a(cosmo.cosmo, na, status) + check(status, cosmo=cosmo) + + k_use = np.exp(lk_arr) + + # Check inputs + if not isinstance(prof_hod, halos.profiles.HaloProfile): + raise TypeError("prof_hod must be of type `HaloProfile`") + + h = cosmo["h"] + k_emu = k_use / h # [h/Mpc] + #Omega_m = cosmo["Omega_b"] + cosmo["Omega_c"] + 0.00064/(h**2) + cosmo.compute_linear_power() + pk2dlin = cosmo.get_linear_power('delta_matter:delta_matter') + + # set cosmology for dark emulator + emu = darkemu_set_cosmology(cosmo) + + na = len(a_arr) + nk = len(k_use) + pk12 = np.zeros([na, nk]) + pk12_1h = np.zeros([na, nk]) + pk12_2h = np.zeros([na, nk]) + + logMfor_hmf = np.linspace(8,17,200) + logMh = np.linspace(log10Mh_min,log10Mh_max,2**5+1) # M_sol/h + logM = np.log10(10**logMh/h) + Mh = 10**logMh + M = 10**logM + nM = len(M) + Mh_pivot = 10**log10Mh_pivot # M_sol/h + M_pivot = 10**log10Mh_pivot/h # M_sol + dlogM = logM[1] - logM[0] + Pth = np.zeros((nM,nM,nk)) + #dprof_dlogM_mat = np.zeros((nM,nM,nk)) + nths = np.zeros(nM) + mass_def=halos.MassDef200m() + + hmf_DE = halos.MassFuncDarkEmulator(cosmo, mass_def=mass_def, darkemulator=emu) + hbf = halos.hbias.HaloBiasTinker10(cosmo, mass_def=mass_def) + + for ia, aa in enumerate(a_arr): + z = 1. / aa - 1 # dark emulator is valid for 0 =< z <= 1.48 + # mass function + dndlog10m_emu = ius(logMfor_hmf ,hmf_DE.get_mass_function(cosmo, 10**logMfor_hmf ,aa)) # Mpc^-3 + + for m in range(nM): + #nths[m] = emu.mass_to_dens(Mh[m] ,z) * h**3 + nths[m] = mass_to_dens(dndlog10m_emu, cosmo, M[m]) + + for m in range(nM): + for n in range(nM): + Pth[m,n] = emu.get_phh(k_emu, np.log10(nths[m]/(h**3)), np.log10(nths[n]/(h**3)), z) * (1/h)**3 + + Nc = prof_hod._Nc(M, aa) + Ns = prof_hod._Ns(M, aa) + fc = prof_hod._fc(aa) + Ng = Nc * (fc + Ns) + logMps = logM + dlogM + logMms = logM - dlogM + + prof_Mp = prof_hod.fourier(cosmo, k_use, (10 ** logMps), aa, mass_def) + prof_Mm = prof_hod.fourier(cosmo, k_use, (10 ** logMms), aa, mass_def) + uk = prof_hod._usat_fourier(cosmo, k_use, M, aa, mass_def) + prof_1h = Nc[:, None] * ((2 * fc * Ns[:, None] * uk) + (Ns[:, None] ** 2 * uk ** 2)) + + dprof_dlogM = (prof_Mp - prof_Mm) / (2 * dlogM)#*np.log(10)) + #for m in range(nM): + # dprof_dlogM_mat[m] = dprof_dlogM + nth_mat = np.tile(nths, (len(k_use), 1)).transpose() + ng = integrate.romb(dndlog10m_emu(logM) * Ng, dx = dlogM, axis = 0) + bgE = integrate.romb(dndlog10m_emu(logM) * Ng * \ + (hbf.get_halo_bias(cosmo, M, aa)), dx = dlogM, axis = 0) / ng + + dndlog10m_func_mat = np.tile(dndlog10m_emu(logM), (len(k_emu), 1)).transpose() # M_sol,Mpc^-3 + + Pgg_1h = integrate.romb(dndlog10m_func_mat * prof_1h, \ + dx = dlogM, axis = 0) / (ng ** 2) + + Pgg_2h_int = list() + for m in range(nM): + Pgg_2h_int.append(integrate.romb( + Pth[m] * nth_mat * dprof_dlogM, axis=0, dx=dlogM)) + Pgg_2h_int = np.array(Pgg_2h_int) + Pgg_2h = integrate.romb( + Pgg_2h_int * nth_mat * dprof_dlogM, axis=0, dx=dlogM)/ (ng ** 2) + + Pgg = Pgg_2h + Pgg_1h + pk12_1h[ia, :] = Pgg_1h + pk12_2h[ia, :] = Pgg_2h + + Pgg_lin = bgE**2 * pk2dlin.eval(k_use, aa, cosmo) + + # stitching + k_switch = 0.08 # [h/Mpc] + + Pgg = Pgg_lin * np.exp(-k_emu/k_switch) + \ + Pgg * (1 - np.exp(-k_emu/k_switch)) + + # use linear theory below kmin + kmin = 1e-2 # [h/Mpc] + + Pgg[k_emu < kmin] = Pgg_lin[k_emu < kmin] + pk12[ia, :] = Pgg + + + pk2d = Pk2D(a_arr=a_arr, lk_arr=lk_arr, pk_arr=pk12, + extrap_order_lok=extrap_order_lok, + extrap_order_hik=extrap_order_hik, + cosmo=cosmo, is_logp=False) + + return pk2d, pk12, pk12_1h, pk12_2h, bgE + +def darkemu_pgg_massbin(cosmo, prof_hod, + log10Mh_min=12.0,log10Mh_max=15.9, + log10Mh_pivot=12.5, + normprof_hod=False, k_max=2.0, + lk_arr=None, a_arr=None, + extrap_order_lok=1, extrap_order_hik=1, + use_log=False): + """ Returns a 2D array with shape `[na,nk]` describing the + first function :math:`f_1(k,a)` that makes up a factorizable + trispectrum :math:`T(k_1,k_2,a)=f_1(k_1,a)f_2(k_2,a)` The response is + calculated as: + + .. math:: + \\frac{\\partial P_{u,v}(k)}{\\partial\\delta_L} = + \\left(\\frac{68}{21}-\\frac{d\\log k^3P_L(k)}{d\\log k}\\right) + P_L(k)I^1_1(k,|u)I^1_1(k,|v)+I^1_2(k|u,v) - (b_{u} + b_{v}) + P_{u,v}(k) + + Args: + cosmo (:class:`~pyccl.core.Cosmology`): a Cosmology object. + hmc (:class:`HMCalculator`): a halo model calculator. + prof_hod (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`u_1` above. + + p_of_k_a (:class:`~pyccl.pk2d.Pk2D`): a `Pk2D` object to + be used as the linear matter power spectrum. If `None`, + the power spectrum stored within `cosmo` will be used. + a_arr (array): an array holding values of the scale factor + at which the trispectrum should be calculated for + interpolation. If `None`, the internal values used + by `cosmo` will be used. + lk_arr (array): an array holding values of the natural + logarithm of the wavenumber (in units of Mpc^-1) at + which the trispectrum should be calculated for + interpolation. If `None`, the internal values used + by `cosmo` will be used. + extrap_order_lok (int): extrapolation order to be used on + k-values below the minimum of the splines. See + :class:`~pyccl.tk3d.Tk3D`. + extrap_order_hik (int): extrapolation order to be used on + k-values above the maximum of the splines. See + :class:`~pyccl.tk3d.Tk3D`. + use_log (bool): if `True`, the trispectrum will be + interpolated in log-space (unless negative or + zero values are found). + + Returns: + :class:`~pyccl.tk3d.Tk3D`: SSC effective trispectrum. + """ + if lk_arr is None: + status = 0 + nk = lib.get_pk_spline_nk(cosmo.cosmo) + lk_arr, status = lib.get_pk_spline_lk(cosmo.cosmo, nk, status) + check(status, cosmo=cosmo) + if a_arr is None: + status = 0 + na = lib.get_pk_spline_na(cosmo.cosmo) + a_arr, status = lib.get_pk_spline_a(cosmo.cosmo, na, status) + check(status, cosmo=cosmo) + + k_use = np.exp(lk_arr) + + # Check inputs + if not isinstance(prof_hod, halos.profiles.HaloProfile): + raise TypeError("prof_hod must be of type `HaloProfile`") + + h = cosmo["h"] + k_emu = k_use / h # [h/Mpc] + #Omega_m = cosmo["Omega_b"] + cosmo["Omega_c"] + 0.00064/(h**2) + cosmo.compute_linear_power() + pk2dlin = cosmo.get_linear_power('delta_matter:delta_matter') + + # set cosmology for dark emulator + emu = darkemu_set_cosmology(cosmo) + + na = len(a_arr) + nk = len(k_use) + pk12 = np.zeros([na, nk]) + pk12_1h = np.zeros([na, nk]) + pk12_2h = np.zeros([na, nk]) + + logMfor_hmf = np.linspace(8,17,200) + logMh = np.linspace(log10Mh_min,log10Mh_max,2**5+1) # M_sol/h + logM = np.log10(10**logMh/h) + Mh = 10**logMh + M = 10**logM + nM = len(M) + Mh_pivot = 10**log10Mh_pivot # M_sol/h + M_pivot = 10**log10Mh_pivot/h # M_sol + dlogM = logM[1] - logM[0] + Pbin = np.zeros((nM,nM,nk)) + mass_def=halos.MassDef200m() + + hmf_DE = halos.MassFuncDarkEmulator(cosmo, mass_def=mass_def, darkemulator=emu) + hbf = halos.hbias.HaloBiasTinker10(cosmo, mass_def=mass_def) + + for ia, aa in enumerate(a_arr): + z = 1. / aa - 1 # dark emulator is valid for 0 =< z <= 1.48 + # mass function + dndlog10m_emu = ius(logMfor_hmf ,hmf_DE.get_mass_function(cosmo, 10**logMfor_hmf ,aa)) # Mpc^-3 + + for m in range(nM): + for n in range(nM): + Pbin[m,n] = emu.get_phh_mass(k_emu, Mh[m], Mh[n], z) * (1/h)**3 + + Nc = prof_hod._Nc(M, aa) + Ns = prof_hod._Ns(M, aa) + fc = prof_hod._fc(aa) + Ng = Nc * (fc + Ns) + uk = prof_hod._usat_fourier(cosmo, k_use, M, aa, mass_def) + prof = prof_hod.fourier(cosmo, k_use, M, aa, mass_def) + + prof_1h = Nc[:, None] * ((2 * fc * Ns[:, None] * uk) + (Ns[:, None] ** 2 * uk ** 2)) + + ng = integrate.romb(dndlog10m_emu(logM) * Ng, dx = dlogM, axis = 0) + bgE = integrate.romb(dndlog10m_emu(logM) * Ng * \ + (hbf.get_halo_bias(cosmo, M, aa)), dx = dlogM, axis = 0) / ng + + dndlog10m_func_mat = np.tile(dndlog10m_emu(logM), (len(k_emu), 1)).transpose() # M_sol,Mpc^-3 + + Pgg_1h = integrate.romb(dndlog10m_func_mat * prof_1h, \ + dx = dlogM, axis = 0) / (ng ** 2) - Pgm_lin = bgE * pk2dlin.eval(k_use, aa, cosmo) - dPgm_db_lin = (47/21 + bgE2/bgE - bgE -1/3 * dpklin) * \ - bgE * pk2dlin.eval(k_use, aa, cosmo) + + Pgg_2h_int = list() + for m in range(nM): + Pgg_2h_int.append(integrate.romb( + Pbin[m] * dndlog10m_func_mat * prof, axis=0, dx=dlogM)) + Pgg_2h_int = np.array(Pgg_2h_int) + Pgg_2h = integrate.romb( + Pgg_2h_int * dndlog10m_func_mat * prof, axis=0, dx=dlogM)/ (ng ** 2) + + Pgg = Pgg_2h + Pgg_1h + pk12_1h[ia, :] = Pgg_1h + pk12_2h[ia, :] = Pgg_2h + + Pgg_lin = bgE**2 * pk2dlin.eval(k_use, aa, cosmo) - # stitching - k_switch = 0.08 # [h/Mpc] - kmin = 1e-2 # [h/Mpc] + # stitching + k_switch = 0.08 # [h/Mpc] - dPgm_db = dPgm_db_lin * np.exp(-k_emu/k_switch) + \ - (Pgm_growth + Pgm_d) * (1 - np.exp(-k_emu/k_switch)) - - Pgm = Pgm_lin * np.exp(-k_emu/k_switch) + \ - Pgm * (1 - np.exp(-k_emu/k_switch)) - - # use linear theory below kmin - dPgm_db[k_emu < kmin] = dPgm_db_lin[k_emu < kmin] - dpk12[ia, :] = dPgm_db + Pgg = Pgg_lin * np.exp(-k_emu/k_switch) + \ + Pgg * (1 - np.exp(-k_emu/k_switch)) - Pgm[k_emu < kmin] = Pgm_lin[k_emu < kmin] - pk12[ia, :] = Pgm + # use linear theory below kmin + kmin = 1e-2 # [h/Mpc] - # use Halo Model above kmax - if k_use[-1] > kmax: - #i12 = integrate.romb(dndlog10m_func_mat * b1E_mat * prof * factor_mat * uk, dx = dM, axis = 0) /ng - - #i02 = integrate.romb(dndlog10m_func_mat * prof * factor_mat * uk, dx = dM, axis = 0) /ng - #HM_1h_resp = i12 - bgE * i02 - k_HM = 1 - dPgm_db1 = dPgm_db * np.exp(-k_use/k_HM) + \ - HM_1h_resp * (1 - np.exp(-k_use/k_HM)) - dPgm_db[k_use > kmax] = dPgm_db1[k_use > kmax] - - - if use_log: - if np.any(dpk12 <= 0): - warnings.warn( - "Some values were not positive. " - "The negative values are substituted by 1e-5.", - category=CCLWarning) - np.where(dpk12 <= 0, 1e-5, dpk12) + Pgg[k_emu < kmin] = Pgg_lin[k_emu < kmin] + pk12[ia, :] = Pgg - dpk12 = np.log(dpk12) pk2d = Pk2D(a_arr=a_arr, lk_arr=lk_arr, pk_arr=pk12, extrap_order_lok=extrap_order_lok, extrap_order_hik=extrap_order_hik, cosmo=cosmo, is_logp=False) - return dpk12, pk2d + return pk2d, pk12, pk12_1h, pk12_2h def halomod_Tk3D_SSC(cosmo, prof1, @@ -618,6 +1657,299 @@ def halomod_Tk3D_SSC(cosmo, prof1, extrap_order_hik=extrap_order_hik, is_logt=use_log) return tk3d +def Pth_hm_HM_linb(k, M, cosmo, dndlog10m_emu, nfw, rho_m, hbf, mass_def, a): + + logM1 = np.linspace(np.log10(M), np.log10(10**15.9/cosmo["h"]), 2**5+1) + dlogM1 = logM1[1] - logM1[0] + dens = integrate.romb(dndlog10m_emu(logM1), dx = dlogM1) + dndlog10m_func_mat = np.tile(dndlog10m_emu(logM1), (len(k), 1)).transpose() # M_sol,Mpc^-3 + + # 1 halo term + rho_h = nfw.fourier(cosmo, k, 10**logM1, a, mass_def) + P1h = integrate.romb(dndlog10m_func_mat * rho_h/rho_m, dx = dlogM1, axis=0)/dens + + # 2 halo term + cosmo.compute_linear_power() + pk2dlin = cosmo.get_linear_power('delta_matter:delta_matter') + pklin = pk2dlin.eval(k,a) + b1_th = integrate.romb(dndlog10m_func_mat * hbf.get_halo_bias(cosmo,(10 ** logM1), a)[:, None] , dx = dlogM1, axis=0)\ + /dens + P2h = b1_th * pklin + + P_HM = P1h + P2h + + return P_HM + + +def Pth_hm_lowmass_HM_linb(k, M, M_pivot, emu, cosmo, hmf, hbf, nfw, mass_def, a): + # pivot mass (Dark Emulator) + z = 1/a -1 + Pth_hm_pivot = emu.get_phm_massthreshold(k/cosmo["h"], M_pivot*cosmo["h"], z) / (cosmo["h"]**3) + + rho_m = ccl.rho_x(cosmo, a, "matter", is_comoving=True) + cosmo.compute_linear_power() + pk2dlin = cosmo.get_linear_power('delta_matter:delta_matter') + pklin = pk2dlin.eval(k,a) + + Mfor_hmf = np.linspace(8,17,600) # Msol + dndlog10m_emu = ius(Mfor_hmf ,hmf.get_mass_function(cosmo, 10**Mfor_hmf ,a)) # Mpc^-3 + + M1 = np.linspace(np.log10(M), np.log10(10**15.9/cosmo["h"]), 2**5+1) + dM1 = M1[1] - M1[0] + dens = integrate.romb(dndlog10m_emu(M1), dx = dM1) + dndlog10m_func_mat = np.tile(dndlog10m_emu(M1), (len(k), 1)).transpose() # M_sol,Mpc^-3 + + # 1 halo term + rho_h = nfw.fourier(cosmo, k, 10**M1, a, mass_def) + + P1h = integrate.romb(dndlog10m_func_mat * rho_h/rho_m, dx = dM1, axis=0)/dens + + # 2 halo term + b1_th = integrate.romb(dndlog10m_func_mat * hbf.get_halo_bias(cosmo,(10 ** M1), a)[:, None] , dx = dM1, axis=0)\ + /dens + + P2h = b1_th * pklin + + M1 = np.linspace(np.log10(M_pivot), np.log10(10**15.9/cosmo["h"]), 2**5+1) + dM1 = M1[1] - M1[0] + dens = integrate.romb(dndlog10m_emu(M1), dx = dM1) + + dndlog10m_func_mat = np.tile(dndlog10m_emu(M1), (len(k), 1)).transpose() # M_sol,Mpc^-3 + + rho_h_pivot = nfw.fourier(cosmo, k, 10**M1, a, mass_def) + + P1h_pivot = integrate.romb(dndlog10m_func_mat * rho_h_pivot/rho_m, dx = dM1, axis=0)/dens + + # 2 halo term + b1_th_pivot = integrate.romb(dndlog10m_func_mat * hbf.get_halo_bias(cosmo,(10 ** M1), a)[:, None] , dx = dM1, axis=0)\ + /dens + + P2h_pivot = b1_th_pivot * pklin + + P_HM = P1h + P2h + P_HM_pivot = P1h_pivot + P2h_pivot + + # rescaling + Pth_hm = Pth_hm_pivot * (P_HM/P_HM_pivot) + + return Pth_hm + +def Pth_hm_lowmass_BMO(k, M, M_pivot, emu, cosmo, dndlog10m_emu, hbf, cM, cM_vir, mass_def, rho_m, a, b1_th_tink=None): + M_use = np.atleast_1d(M) + k_use = np.atleast_1d(k) + P1h = np.zeros((len(M_use),len(k_use))) + if b1_th_tink is None: + b1_th = np.zeros(len(M_use)) + else: + b1_th = b1_th_tink + for i in range(len(M_use)): + logM1 = np.linspace(np.log10(M_use[i]), np.log10(10**15.9/cosmo["h"]), 2**5+1) + dlogM1 = logM1[1] - logM1[0] + M1 = 10**logM1 + dens = integrate.romb(dndlog10m_emu(logM1), dx = dlogM1) + if b1_th_tink is None: + b1_th[i] = integrate.romb(dndlog10m_emu(logM1) * hbf.get_halo_bias(cosmo, M1, a), dx = dlogM1)\ + /dens + dndlog10m_func_mat = np.tile(dndlog10m_emu(logM1), (len(k_use), 1)).transpose() # M_sol,Mpc^-3 + + # 1 halo term + P1h_bin = (M1[:, None]/rho_m) * u_M(k_use, M1, cosmo, cM, cM_vir, mass_def, a) + + P1h[i] = integrate.romb(dndlog10m_func_mat * P1h_bin, dx = dlogM1, axis=0)/dens + + # pivot mass + Pth_pivot = emu.get_phm_massthreshold(k_use / cosmo["h"], M_pivot * cosmo["h"], 1./a -1) * (1/cosmo["h"])**3 + + logM1 = np.linspace(np.log10(M_pivot), np.log10(10**15.9/cosmo["h"]), 2**5+1) + dM1 = logM1[1] - logM1[0] + M1 = 10**logM1 + dens = integrate.romb(dndlog10m_emu(logM1), dx = dlogM1) + + b1_th_pivot = integrate.romb(dndlog10m_emu(logM1) * hbf.get_halo_bias(cosmo, M1, a), dx = dlogM1)\ + /dens + dndlog10m_func_mat = np.tile(dndlog10m_emu(logM1), (len(k_use), 1)).transpose() # M_sol,Mpc^-3 + + P1h_bin_pivot = (M1[:, None]/rho_m) * u_M(k_use, M1, cosmo, cM, cM_vir, mass_def, a) + + P1h_pivot = integrate.romb(dndlog10m_func_mat * P1h_bin_pivot, dx = dlogM1, axis=0)/dens + + # 2 halo term + cosmo.compute_linear_power() + pk2dlin = cosmo.get_linear_power('delta_matter:delta_matter') + pklin = pk2dlin.eval(k_use,a) + + P2h = b1_th[:, None] * pklin[None, :] + P2h_pivot = b1_th_pivot * pklin + + P_HM = P1h + P2h + P_HM_pivot = P1h_pivot + P2h_pivot + + # rescaling + Pth = Pth_pivot[None, :] * (P_HM/P_HM_pivot[None, :]) + + if np.ndim(k) == 0: + Pth = np.squeeze(Pth, axis=-1) + if np.ndim(M) == 0: + Pth = np.squeeze(Pth, axis=0) + + return Pth + +def Pbin_hm_lowmass_BMO_Mvector(k, M, M_pivot, emu, cosmo, hbf, cM, mass_def, a, tau_v): + M_use = np.atleast_1d(M) + k_use = np.atleast_1d(k) + + # pivot mass (Dark Emulator) + z = 1/a -1 + Pbin_hm_pivot = emu.get_phm_mass(k/cosmo["h"], M_pivot*cosmo["h"], z) / (cosmo["h"]**3) + + # 1 halo term + rho_m = ccl.rho_x(cosmo, a, "matter", is_comoving=True) + P1h = (M_use[:, None]/rho_m) * u_M(k_use, M_use, cosmo, cM, mass_def, a, tau_v) + P1h_pivot = (M_pivot/rho_m) * u_M(k_use, M_pivot, cosmo, cM, mass_def, a, tau_v) + + # 2 halo term + cosmo.compute_linear_power() + pk2dlin = cosmo.get_linear_power('delta_matter:delta_matter') + pklin = pk2dlin.eval(k,a) + b1 = hbf.get_halo_bias(cosmo, M_use, a) + b1_pivot = hbf.get_halo_bias(cosmo, M_pivot, a) + + P2h = b1[:, None] * pklin[None, :] + P2h_pivot = b1_pivot * pklin + + P_HM = P1h + P2h + P_HM_pivot = P1h_pivot + P2h_pivot + + # rescaling + Pbin_hm = Pbin_hm_pivot[None, :] * (P_HM/P_HM_pivot[None, :]) + + return Pbin_hm, P1h, P2h, P_HM, P1h_pivot, P2h_pivot, P_HM_pivot + +def Pbin_hm_lowmass_BMO(k, M, M_pivot, emu, cosmo, hbf, cM, cM_vir, mass_def, pk2dlin, rho_m ,a): + M_use = np.atleast_1d(M) + k_use = np.atleast_1d(k) + + # 1 halo term + P1h = (M_use[:, None]/rho_m) * u_M(k_use, M_use, cosmo, cM, cM_vir, mass_def, a) + P1h_pivot = (M_pivot/rho_m) * u_M(k_use, M_pivot, cosmo, cM, cM_vir, mass_def, a) + + # 2 halo term + pklin = pk2dlin.eval(k_use,a) + b1 = hbf.get_halo_bias(cosmo, M_use, a) + b1_pivot = hbf.get_halo_bias(cosmo, M_pivot, a) + + P2h = b1[:, None] * pklin[None, :] + P2h_pivot = b1_pivot * pklin + + P_HM = P1h + P2h + P_HM_pivot = P1h_pivot + P2h_pivot + + # rescaling + Pbin_pivot = emu.get_phm_mass(k_use / cosmo["h"], M_pivot * cosmo["h"], 1./a -1) * (1/cosmo["h"])**3 + + Pbin = Pbin_pivot[None, :] * (P_HM/P_HM_pivot[None, :]) + + if np.ndim(k) == 0: + Pbin = np.squeeze(Pbin, axis=-1) + if np.ndim(M) == 0: + Pbin = np.squeeze(Pbin, axis=0) + + return Pbin + + +def mass_to_dens(dndlog10m_emu, cosmo, mass_thre): + logM1 = np.linspace(np.log10(mass_thre), np.log10(10**16./cosmo["h"]), 2**6+1) + dlogM1 = logM1[1] - logM1[0] + dens = integrate.romb(dndlog10m_emu(logM1), dx = dlogM1) + + return dens + +def dens_to_mass(dndlog10m_emu, cosmo, dens, nint=60):#:, integration="quad"): + mlist = np.linspace(8, np.log10(10**15.8/cosmo["h"]), nint) + dlist = np.log(np.array([mass_to_dens( + dndlog10m_emu, cosmo, 10**mlist[i]) for i in range(nint)])) + d_to_m_interp = ius(-dlist, mlist) + return 10**d_to_m_interp(-np.log(dens)) + + + +def u_M(k, M, cosmo, cM, cM_vir, mass_def, a): + M_use = np.atleast_1d(M) + k_use = np.atleast_1d(k) + + c = cM.get_concentration(cosmo, M_use, a) + c_vir = cM_vir.get_concentration(cosmo, M_use, a) + R = mass_def.get_radius(cosmo, M_use, a) / a # comoving halo radius + + r_s = R/c # scale radius from R_200m, c_200m + x = k_use[None, :] *r_s[:, None]/a + + tau_v = 2.6 + + tau1 = tau_v * c_vir + tau = tau1[:, None] + m_nfw = np.log(1 + c) - c/(1+c) + prefactor = tau/(4 * m_nfw[:, None] * (1+tau**2)**3 * x) + + Si, Ci = sici(x) + + F1 = 2 * (3 * tau**4 - 6 * tau**2 - 1) * P_fit(tau * x) + + F2 = -2 * tau * (tau**4 - 1) * x * Q_fit(tau * x) + + F3 = -2 * tau**2 * np.pi * np.exp(-tau*x) * ((tau**2 + 1) * x + 4 * tau) + + F4 = 2 * tau**3 * (np.pi - 2 * Si) * (4 * np.cos(x) + (tau**2 + 1) * x * np.sin(x)) + + F5 = 4 * tau**3 * Ci * (4 * np.sin(x) - (tau**2 + 1) * x * np.cos(x)) + + u_M = prefactor * (F1 + F2 + F3 + F4 + F5) + + if np.ndim(k) == 0: + u_M = np.squeeze(u_M, axis=-1) + if np.ndim(M) == 0: + u_M = np.squeeze(u_M, axis=0) + + return u_M + + +def P_fit(x): + a = 1.5652 + b = 3.38723 + c = 6.34891 + d = 0.817677 + e = -0.0895584 + f = 0.877375 + + gamma = 0.57721566 + + F1 = - (1/x + (b * x**e)/(c + (x - d)**2)) + F2 = (x**4/(x**4 + a**4))**f + + F3 = x * (gamma + np.log(x) - 1) + F4 = (a**4/(x**4 + a**4))**f + + return F1*F2 + F3*F4 + +def Q_fit(x): + a = 2.26901 + b = -2839.04 + c = 265.511 + d = -1.12459 + e = -2.90136 + f = 1.86475 + g = 1.52197 + + gamma = 0.57721566 + + F1 = 1/x**2 + (b * x**e)/(c + (x - d)**4) + F2 = (x**4/(x**4 + a**4))**g + + F3 = (gamma + np.log(x)) * (1 + x**2 / 2) - 3/4 * x**2 + F4 = (a**4/(x**4 + a**4))**f + + return F1*F2 + F3*F4 + def b2H17(b1):#H17 @@ -649,6 +1981,32 @@ def darkemu_set_cosmology(cosmo): return emu + +def darkemu_set_cosmology_forAsresp(cosmo, deltalnAs): + Omega_c = cosmo["Omega_c"] + Omega_b = cosmo["Omega_b"] + h = cosmo["h"] + n_s = cosmo["n_s"] + A_s = cosmo["A_s"] + + omega_c = Omega_c * h ** 2 + omega_b = Omega_b * h ** 2 + omega_nu = 0.00064 + Omega_L = 1 - ((omega_c + omega_b + omega_nu) / h **2) + + emu_Ap = darkemu.de_interface.base_class() + #Parameters cparam (numpy array) : Cosmological parameters (𝜔𝑏, 𝜔𝑐, Ω𝑑𝑒, ln(10^10 𝐴𝑠), 𝑛𝑠, 𝑤) + cparam = np.array([omega_b,omega_c,Omega_L,np.log(10 ** 10 * A_s) + deltalnAs, n_s, -1.]) + emu_Ap.set_cosmology(cparam) + + emu_Am = darkemu.de_interface.base_class() + #Parameters cparam (numpy array) : Cosmological parameters (𝜔𝑏, 𝜔𝑐, Ω𝑑𝑒, ln(10^10 𝐴𝑠), 𝑛𝑠, 𝑤) + cparam = np.array([omega_b,omega_c,Omega_L,np.log(10 ** 10 * A_s) - deltalnAs, n_s, -1.]) + emu_Am.set_cosmology(cparam) + + return emu_Ap, emu_Am + + def set_hmodified_cosmology(cosmo,deltah): Omega_c = cosmo["Omega_c"] Omega_b = cosmo["Omega_b"] @@ -671,6 +2029,7 @@ def set_hmodified_cosmology(cosmo,deltah): h=hm, n_s=n_s, A_s=A_s) return cosmo_hp, cosmo_hm + def darkemu_Tk3D_SSC_test(cosmo, prof1, deltah=0.02, log10Mh_min=12.0,log10Mh_max=15.9, @@ -796,9 +2155,9 @@ def darkemu_Tk3D_SSC_test(cosmo, prof1, deltah=0.02, Mps = Mh + dlogM Mms = Mh - dlogM - prof_Mp = prof1.fourier(cosmo, k_use, (10 ** Mps) / h, aa, mass_def) # def _fourier(self, cosmo, k, M, a, mass_def): - prof_Mm = prof1.fourier(cosmo, k_use, (10 ** Mms) / h, aa, mass_def) # def _fourier(self, cosmo, k, M, a, mass_def): - prof = prof1.fourier(cosmo, k_use,(10 ** Mh) / h, aa, mass_def) # def _fourier(self, cosmo, k, M, a, mass_def): + prof_Mp = prof1.fourier(cosmo, k_use, (10 ** Mps) / h, aa, mass_def) + prof_Mm = prof1.fourier(cosmo, k_use, (10 ** Mms) / h, aa, mass_def) + prof = prof1.fourier(cosmo, k_use,(10 ** Mh) / h, aa, mass_def) dprof_dlogM = (prof_Mp - prof_Mm) / (2 * dlogM)#*np.log(10)) nth_mat = np.tile(nths, (len(k_use), 1)).transpose() @@ -1056,9 +2415,9 @@ def darkemu_Tk3D_SSC_old(cosmo, prof1, deltah=0.02, Mps = Mh + dlogM Mms = Mh - dlogM - prof_Mp = prof1.fourier(cosmo, k_use, (10 ** Mps) / h, aa, mass_def) # def _fourier(self, cosmo, k, M, a, mass_def): - prof_Mm = prof1.fourier(cosmo, k_use, (10 ** Mms) / h, aa, mass_def) # def _fourier(self, cosmo, k, M, a, mass_def): - prof = prof1.fourier(cosmo, k_use,(10 ** Mh) / h, aa, mass_def) # def _fourier(self, cosmo, k, M, a, mass_def): + prof_Mp = prof1.fourier(cosmo, k_use, (10 ** Mps) / h, aa, mass_def) + prof_Mm = prof1.fourier(cosmo, k_use, (10 ** Mms) / h, aa, mass_def) + prof = prof1.fourier(cosmo, k_use,(10 ** Mh) / h, aa, mass_def) dprof_dlogM = (prof_Mp - prof_Mm) / (2 * dlogM)#*np.log(10)) nth_mat = np.tile(nths, (len(k_use), 1)).transpose() @@ -1129,4 +2488,249 @@ def darkemu_Tk3D_SSC_old(cosmo, prof1, deltah=0.02, extrap_order_hik=extrap_order_hik, is_logt=use_log) return tk3d, pk2d +def halomod_Tk3D_SSC_orig(cosmo, hmc, + prof1, prof2=None, prof12_2pt=None, + prof3=None, prof4=None, prof34_2pt=None, + normprof1=False, normprof2=False, + normprof3=False, normprof4=False, + p_of_k_a=None, lk_arr=None, a_arr=None, + extrap_order_lok=1, extrap_order_hik=1, + use_log=False): + """ Returns a :class:`~pyccl.tk3d.Tk3D` object containing + the super-sample covariance trispectrum, given by the tensor + product of the power spectrum responses associated with the + two pairs of quantities being correlated. Each response is + calculated as: + + .. math:: + \\frac{\\partial P_{u,v}(k)}{\\partial\\delta_L} = + \\left(\\frac{68}{21}-\\frac{d\\log k^3P_L(k)}{d\\log k}\\right) + P_L(k)I^1_1(k,|u)I^1_1(k,|v)+I^1_2(k|u,v) - (b_{u} + b_{v}) + P_{u,v}(k) + + where the :math:`I^a_b` are defined in the documentation + of :meth:`~HMCalculator.I_1_1` and :meth:`~HMCalculator.I_1_2` and + :math:`b_{u}` and :math:`b_{v}` are the linear halo biases for quantities + :math:`u` and :math:`v`, respectively (zero if they are not clustering). + + Args: + cosmo (:class:`~pyccl.core.Cosmology`): a Cosmology object. + hmc (:class:`HMCalculator`): a halo model calculator. + prof1 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`u_1` above. + prof2 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`u_2` above. If `None`, + `prof1` will be used as `prof2`. + prof12_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): + a profile covariance object returning the the two-point + moment of `prof1` and `prof2`. If `None`, the default + second moment will be used, corresponding to the + products of the means of both profiles. + prof3 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`v_1` above. If `None`, + `prof1` will be used as `prof3`. + prof4 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`v_2` above. If `None`, + `prof3` will be used as `prof4`. + prof34_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): + same as `prof12_2pt` for `prof3` and `prof4`. + normprof1 (bool): if `True`, this integral will be + normalized by :math:`I^0_1(k\\rightarrow 0,a|u)` + (see :meth:`~HMCalculator.I_0_1`), where + :math:`u` is the profile represented by `prof1`. + normprof2 (bool): same as `normprof1` for `prof2`. + normprof3 (bool): same as `normprof1` for `prof3`. + normprof4 (bool): same as `normprof1` for `prof4`. + p_of_k_a (:class:`~pyccl.pk2d.Pk2D`): a `Pk2D` object to + be used as the linear matter power spectrum. If `None`, + the power spectrum stored within `cosmo` will be used. + a_arr (array): an array holding values of the scale factor + at which the trispectrum should be calculated for + interpolation. If `None`, the internal values used + by `cosmo` will be used. + lk_arr (array): an array holding values of the natural + logarithm of the wavenumber (in units of Mpc^-1) at + which the trispectrum should be calculated for + interpolation. If `None`, the internal values used + by `cosmo` will be used. + extrap_order_lok (int): extrapolation order to be used on + k-values below the minimum of the splines. See + :class:`~pyccl.tk3d.Tk3D`. + extrap_order_hik (int): extrapolation order to be used on + k-values above the maximum of the splines. See + :class:`~pyccl.tk3d.Tk3D`. + use_log (bool): if `True`, the trispectrum will be + interpolated in log-space (unless negative or + zero values are found). + + Returns: + :class:`~pyccl.tk3d.Tk3D`: SSC effective trispectrum. + """ + if lk_arr is None: + status = 0 + nk = lib.get_pk_spline_nk(cosmo.cosmo) + lk_arr, status = lib.get_pk_spline_lk(cosmo.cosmo, nk, status) + check(status, cosmo=cosmo) + if a_arr is None: + status = 0 + na = lib.get_pk_spline_na(cosmo.cosmo) + a_arr, status = lib.get_pk_spline_a(cosmo.cosmo, na, status) + check(status, cosmo=cosmo) + + k_use = np.exp(lk_arr) + + # Check inputs + if not isinstance(prof1, HaloProfile): + raise TypeError("prof1 must be of type `HaloProfile`") + if (prof2 is not None) and (not isinstance(prof2, HaloProfile)): + raise TypeError("prof2 must be of type `HaloProfile` or `None`") + if (prof3 is not None) and (not isinstance(prof3, HaloProfile)): + raise TypeError("prof3 must be of type `HaloProfile` or `None`") + if (prof4 is not None) and (not isinstance(prof4, HaloProfile)): + raise TypeError("prof4 must be of type `HaloProfile` or `None`") + if prof12_2pt is None: + prof12_2pt = Profile2pt() + elif not isinstance(prof12_2pt, Profile2pt): + raise TypeError("prof12_2pt must be of type " + "`Profile2pt` or `None`") + if (prof34_2pt is not None) and (not isinstance(prof34_2pt, Profile2pt)): + raise TypeError("prof34_2pt must be of type `Profile2pt` or `None`") + + # number counts profiles must be normalized + profs = {prof1: normprof1, prof2: normprof2, + prof3: normprof3, prof4: normprof4} + + for i, (profile, normalization) in enumerate(profs.items()): + if (profile is not None + and profile.is_number_counts + and not normalization): + raise ValueError( + f"normprof{i+1} must be True if prof{i+1} is number counts") + + if prof3 is None: + prof3_bak = prof1 + else: + prof3_bak = prof3 + if prof34_2pt is None: + prof34_2pt_bak = prof12_2pt + else: + prof34_2pt_bak = prof34_2pt + + # Power spectrum + if isinstance(p_of_k_a, Pk2D): + pk2d = p_of_k_a + elif (p_of_k_a is None) or (str(p_of_k_a) == 'linear'): + pk2d = cosmo.get_linear_power('delta_matter:delta_matter') + elif str(p_of_k_a) == 'nonlinear': + pk2d = cosmo.get_nonlin_power('delta_matter:delta_matter') + else: + raise TypeError("p_of_k_a must be `None`, \'linear\', " + "\'nonlinear\' or a `Pk2D` object") + + def get_norm(normprof, prof, sf): + if normprof: + return hmc.profile_norm(cosmo, sf, prof) + else: + return 1 + + na = len(a_arr) + nk = len(k_use) + dpk12 = np.zeros([na, nk]) + dpk34 = np.zeros([na, nk]) + for ia, aa in enumerate(a_arr): + # Compute profile normalizations + norm1 = get_norm(normprof1, prof1, aa) + i11_1 = hmc.I_1_1(cosmo, k_use, aa, prof1) + # Compute second profile normalization + if prof2 is None: + norm2 = norm1 + i11_2 = i11_1 + else: + norm2 = get_norm(normprof2, prof2, aa) + i11_2 = hmc.I_1_1(cosmo, k_use, aa, prof2) + if prof3 is None: + norm3 = norm1 + i11_3 = i11_1 + else: + norm3 = get_norm(normprof3, prof3, aa) + i11_3 = hmc.I_1_1(cosmo, k_use, aa, prof3) + if prof4 is None: + norm4 = norm3 + i11_4 = i11_3 + else: + norm4 = get_norm(normprof4, prof4, aa) + i11_4 = hmc.I_1_1(cosmo, k_use, aa, prof4) + + i12_12 = hmc.I_1_2(cosmo, k_use, aa, prof1, + prof12_2pt, prof2) + if (prof3 is None) and (prof4 is None) and (prof34_2pt is None): + i12_34 = i12_12 + else: + i12_34 = hmc.I_1_2(cosmo, k_use, aa, prof3_bak, + prof34_2pt_bak, prof4) + norm12 = norm1 * norm2 + norm34 = norm3 * norm4 + + pk = pk2d.eval(k_use, aa, cosmo) + dpk = pk2d.eval_dlogpk_dlogk(k_use, aa, cosmo) + # (47/21 - 1/3 dlogPk/dlogk) * I11 * I11 * Pk+I12 + dpk12[ia, :] = norm12*((2.2380952381-dpk/3)*i11_1*i11_2*pk+i12_12) + dpk34[ia, :] = norm34*((2.2380952381-dpk/3)*i11_3*i11_4*pk+i12_34) + + # Counter terms for clustering (i.e. - (bA + bB) * PAB + if prof1.is_number_counts or (prof2 is None or prof2.is_number_counts): + b1 = b2 = np.zeros_like(k_use) + i02_12 = hmc.I_0_2(cosmo, k_use, aa, prof1, prof12_2pt, prof2) + P_12 = norm12 * (pk * i11_1 * i11_2 + i02_12) + + if prof1.is_number_counts: + b1 = i11_1 * norm1 + + if prof2 is None: + b2 = b1 + elif prof2.is_number_counts: + b2 = i11_2 * norm2 + + dpk12[ia, :] -= (b1 + b2) * P_12 + + if prof3_bak.is_number_counts or \ + ((prof3_bak.is_number_counts and prof4 is None) or + (prof4 is not None) and prof4.is_number_counts): + b3 = b4 = np.zeros_like(k_use) + if (prof3 is None) and (prof4 is None) and (prof34_2pt is None): + i02_34 = i02_12 + else: + i02_34 = hmc.I_0_2(cosmo, k_use, aa, prof3_bak, prof34_2pt_bak, + prof4) + P_34 = norm34 * (pk * i11_3 * i11_4 + i02_34) + + if prof3 is None: + b3 = b1 + elif prof3.is_number_counts: + b3 = i11_3 * norm3 + + if prof4 is None: + b4 = b3 + elif prof4.is_number_counts: + b4 = i11_4 * norm4 + + dpk34[ia, :] -= (b3 + b4) * P_34 + + if use_log: + if np.any(dpk12 <= 0) or np.any(dpk34 <= 0): + warnings.warn( + "Some values were not positive. " + "Will not interpolate in log-space.", + category=CCLWarning) + use_log = False + else: + dpk12 = np.log(dpk12) + dpk34 = np.log(dpk34) + + tk3d = Tk3D(a_arr=a_arr, lk_arr=lk_arr, + pk1_arr=dpk12, pk2_arr=dpk34, + extrap_order_lok=extrap_order_lok, + extrap_order_hik=extrap_order_hik, is_logt=use_log) + return tk3d, dpk12 + diff --git a/pyccl/halos/__init__.py b/pyccl/halos/__init__.py index 16716b385..f12b8a287 100644 --- a/pyccl/halos/__init__.py +++ b/pyccl/halos/__init__.py @@ -12,7 +12,8 @@ # Halo mass-concentration relations from .concentration import ( Concentration, - ConcentrationDiemer15_colossus, + ConcentrationDiemer15_colossus200m, + ConcentrationDiemer15_colossus_vir, ConcentrationDiemer15_ius, ConcentrationDiemer15, ConcentrationBhattacharya13, @@ -60,6 +61,7 @@ HaloProfileHernquist, HaloProfilePressureGNFW, HaloProfileHOD, + HaloProfileDK14, ) # Halo profile 2-point cumulants @@ -70,15 +72,37 @@ # Halo model power spectrum from .halo_model import ( +# HMCalculator, +# halomod_mean_profile_1pt, +# halomod_bias_1pt, +# halomod_power_spectrum, +# halomod_Pk2D, + halomod_trispectrum_1h, + halomod_trispectrum_2h_13, + halomod_trispectrum_2h_22, + halomod_trispectrum_3h, + halomod_trispectrum_4h, + halomod_Tk3D_1h, + halomod_Tk3D_2h, + halomod_Tk3D_3h, + halomod_Tk3D_4h, + halomod_Tk3D_SSC, + halomod_Tk3D_SSC_linear_bias, + halomod_Tk3D_cNG +) + +from .halo_model_orig import ( HMCalculator, halomod_mean_profile_1pt, halomod_bias_1pt, halomod_power_spectrum, halomod_Pk2D, - halomod_trispectrum_1h, - halomod_Tk3D_1h, - halomod_Tk3D_SSC, - halomod_Tk3D_SSC_linear_bias, +# halomod_trispectrum_1h, +# halomod_Tk3D_1h, + halomod_Tk3D_SSC_orig, + halomod_Tk3D_SSC_debug, + + halomod_Tk3D_SSC_linear_bias_orig, ) # CIB profiles diff --git a/pyccl/halos/concentration.py b/pyccl/halos/concentration.py index ab73a3478..63451ce4b 100644 --- a/pyccl/halos/concentration.py +++ b/pyccl/halos/concentration.py @@ -8,12 +8,11 @@ import functools # Terasawa -from scipy import optimize -from colossus.cosmology import cosmology as colcosmology -from colossus.halo import concentration as colconcentration -from scipy.interpolate import InterpolatedUnivariateSpline as ius - - +#from scipy import optimize +#from colossus.cosmology import cosmology as colcosmology +#from colossus.halo import concentration as colconcentration +#from colossus.halo.mass_defs import changeMassDefinition as colchangeMassDefinition +#from scipy.interpolate import InterpolatedUnivariateSpline as ius # @@ -30,14 +29,16 @@ class Concentration(object): def __init__(self, mass_def=None, mdef_other=None): if mass_def is not None: if self._check_mdef(mass_def): - if mdef_other is None: raise ValueError( f"Mass definition {mass_def.Delta}-{mass_def.rho_type} " f"is not compatible with c(M) {self.name} configuration.") self.mdef = mass_def - self.mdef_other = mdef_other else: self._default_mdef() + + if mdef_other is None: + self.mdef_other = mdef_other + self._setup() def _default_mdef(self): @@ -85,11 +86,10 @@ def _get_consistent_mass(self, cosmo, M, a, mdef_other): float or array_like: mass according to this object's mass definition. """ -# if mdef_other is not None: -# M_use = mdef_other.translate_mass(cosmo, M, a, self.mdef) -# else: -# M_use = M - M_use = M + if mdef_other is not None: + M_use = mdef_other.translate_mass(cosmo, M, a, self.mdef) + else: + M_use = M return M_use def get_concentration(self, cosmo, M, a, mdef_other=None): @@ -131,7 +131,7 @@ def from_name(cls, name): raise ValueError(f"Concentration {name} not implemented.") -class ConcentrationDiemer15_colossus(Concentration): +class ConcentrationDiemer15_colossus200m(Concentration): """ Concentration-mass relation by Diemer & Kravtsov 2015 (arXiv:1407.4730). This parametrization is only valid for S.O. masses with Delta = 200-critical. @@ -145,20 +145,14 @@ class ConcentrationDiemer15_colossus(Concentration): name = 'Diemer15' def __init__(self, mdef=None): - super(ConcentrationDiemer15_colossus, self).__init__(mdef) + super(ConcentrationDiemer15_colossus200m, self).__init__(mdef) def _default_mdef(self): self.mdef = MassDef(200, 'matter') def _setup(self): - self.kappa = 1.0 - self.phi_0 = 6.58 - self.phi_1 = 1.27 - self.eta_0 = 7.28 - self.eta_1 = 1.56 - self.alpha = 1.08 - self.beta = 1.77 - + pass + def _check_mdef(self, mdef): if isinstance(mdef.Delta, str): return True @@ -168,29 +162,83 @@ def _check_mdef(self, mdef): return False def _concentration(self, cosmo, M, a):#='diemer15' - Oc0 = cosmo["Omega_c"] - Ob0 = cosmo["Omega_b"] - h = cosmo["h"] - n_s = cosmo["n_s"] - if cosmo["sigma8"] is None: - self.sigma8 = sigma8(cosmo) - else: - self.sigma8 = cosmo["sigma8"] - H0 = 100 * h - Om0 = Oc0 + Ob0 - Mh = M * h # Msol/h - Mh_int = np.logspace(12,17) - params = {'flat': True, 'H0': H0, 'Om0': Om0, - 'Ob0': Ob0, 'sigma8': self.sigma8, 'ns': n_s, 'persistence': ''} - colcosmo = colcosmology.setCosmology('myCosmo', params) - c = np.zeros(len(Mh_int)) - for i in range(len(Mh_int)): - c[i] = colconcentration.concentration(Mh_int[i], '200m', z=1./a -1, model="diemer15") +# Oc0 = cosmo["Omega_c"] +# Ob0 = cosmo["Omega_b"] +# h = cosmo["h"] +# n_s = cosmo["n_s"] +# if np.isnan(cosmo["sigma8"]): +# sigma8(cosmo) + +# H0 = 100 * h +# Om0 = Oc0 + Ob0 +# Mh = M * h # Msol/h - c_func = ius(Mh_int,c) - c_out = c_func(Mh) - return c_out +# params = {'flat': True, 'H0': H0, 'Om0': Om0, +# 'Ob0': Ob0, 'sigma8': cosmo["sigma8"], 'ns': n_s, 'persistence': ''} +# colcosmo = colcosmology.setCosmology('myCosmo', params) + +# c = colconcentration.concentration(Mh, '200m', z=1./a -1, model="diemer15") + cosmo.compute_ConcentrationDiemer15_200m() + c_rbs = cosmo.get_ConcentrationDiemer15_200m('200m') + c = c_rbs(a, np.log10(M))[0] + + return c + +class ConcentrationDiemer15_colossus_vir(Concentration): + """ Concentration-mass relation by Diemer & Kravtsov 2015 + (arXiv:1407.4730). This parametrization is only valid for + S.O. masses with Delta = 200-critical. + + Args: + mdef (:class:`~pyccl.halos.massdef.MassDef`): + a mass definition object that fixes + the mass definition used by this c(M) + parametrization. + """ + name = 'Diemer15' + + def __init__(self, mdef=None): + super(ConcentrationDiemer15_colossus_vir, self).__init__(mdef) + + def _default_mdef(self): + self.mdef = MassDef(200, 'matter') + + def _setup(self): + pass + def _check_mdef(self, mdef): + if isinstance(mdef.Delta, str): + return True + elif not ((int(mdef.Delta) == 200) and + (mdef.rho_type == 'matter')): + return True + return False + + + def _concentration(self, cosmo, M, a):#='diemer15' +# Oc0 = cosmo["Omega_c"] +# Ob0 = cosmo["Omega_b"] +# h = cosmo["h"] +# n_s = cosmo["n_s"] +# if np.isnan(cosmo["sigma8"]): +# sigma8(cosmo) + +# H0 = 100 * h +# Om0 = Oc0 + Ob0 +# Mh = M * h # Msol/h + +# params = {'flat': True, 'H0': H0, 'Om0': Om0, +# 'Ob0': Ob0, 'sigma8': cosmo["sigma8"], 'ns': n_s, 'persistence': ''} +# colcosmo = colcosmology.setCosmology('myCosmo', params) + +# c200m = colconcentration.concentration(Mh, '200m', z=1./a -1, model="diemer15") + +# M_vir, R_vir, c_vir = colchangeMassDefinition(Mh, c200m, z=1./a -1, mdef_in='200m', mdef_out='vir', profile='nfw') + cosmo.compute_ConcentrationDiemer15_200m() + c_rbs = cosmo.get_ConcentrationDiemer15_200m("vir") + c_vir = c_rbs(a, np.log10(M))[0] + + return c_vir class ConcentrationDiemer15_ius(Concentration): """ Concentration-mass relation by Diemer & Kravtsov 2015 diff --git a/pyccl/halos/halo_model.py b/pyccl/halos/halo_model.py index ac2018225..2f413db86 100644 --- a/pyccl/halos/halo_model.py +++ b/pyccl/halos/halo_model.py @@ -1,19 +1,21 @@ import warnings from .. import ccllib as lib -from .massdef import MassDef from .hmfunc import MassFunc from .hbias import HaloBias -from .profiles import HaloProfile, HaloProfileNFW +from .profiles import HaloProfile from .profiles_2pt import Profile2pt from ..core import check from ..pk2d import Pk2D from ..tk3d import Tk3D from ..power import linear_matter_power, nonlin_matter_power +from ..background import rho_x from ..pyutils import _spline_integrate from .. import background from ..errors import CCLWarning -from ..parameters import physical_constants import numpy as np +import scipy + +physical_constants = lib.cvar.constants class HMCalculator(object): @@ -56,35 +58,14 @@ def __init__(self, cosmo, massfunc, hbias, mass_def, log10M_min=8., log10M_max=16., nlog10M=128, integration_method_M='simpson', k_min=1E-5): - # halo mass definition - if isinstance(mass_def, MassDef): - self._mdef = mass_def - elif isinstance(mass_def, str): - self._mdef = MassDef.from_name(mass_def)() - else: - raise TypeError("mass_def must be of type `MassDef` " - "or a mass definition name string") - - # halo mass function - if isinstance(massfunc, MassFunc): - self._massfunc = massfunc - elif isinstance(massfunc, str): - nMclass = MassFunc.from_name(massfunc) - self._massfunc = nMclass(cosmo, mass_def=self._mdef) - else: - raise TypeError("mass_function must be of type `MassFunc` " - "or a mass function name string") - - # halo bias function - if isinstance(hbias, HaloBias): - self._hbias = hbias - elif isinstance(hbias, str): - bMclass = HaloBias.from_name(hbias) - self._hbias = bMclass(cosmo, mass_def=self._mdef) - else: - raise TypeError("halo_bias must be of type `HaloBias` " - "or a halo bias name string") - + self._rho0 = rho_x(cosmo, 1., 'matter', is_comoving=True) + if not isinstance(massfunc, MassFunc): + raise TypeError("massfunc must be of type `MassFunc`") + self._massfunc = massfunc + if not isinstance(hbias, HaloBias): + raise TypeError("hbias must be of type `HaloBias`") + self._hbias = hbias + self._mdef = mass_def self._prec = {'log10M_min': log10M_min, 'log10M_max': log10M_max, 'nlog10M': nlog10M, @@ -116,23 +97,19 @@ def _integ_spline(self, fM, lM): def _get_ingredients(self, a, cosmo, get_bf): # Compute mass function and bias (if needed) at a new # value of the scale factor. - rho0 = None if a != self._a_current_mf: - rho0 = cosmo.rho_x(1., "matter", is_comoving=True) self.mf = self._massfunc.get_mass_function(cosmo, self._mass, a, mdef_other=self._mdef) - self.mf0 = (rho0 - + self.mf0 = (self._rho0 - self._integrator(self.mf * self._mass, self._lmass)) / self._m0 self._a_current_mf = a if get_bf: if a != self._a_current_bf: - if rho0 is None: - rho0 = cosmo.rho_x(1., "matter", is_comoving=True) self.bf = self._hbias.get_halo_bias(cosmo, self._mass, a, mdef_other=self._mdef) - self.mbf0 = (rho0 - + self.mbf0 = (self._rho0 - self._integrator(self.mf * self.bf * self._mass, self._lmass)) / self._m0 self._a_current_bf = a @@ -286,6 +263,46 @@ def I_1_1(self, cosmo, k, a, prof): i11 = self._integrate_over_mbf(uk) return i11 + def I_1_3(self, cosmo, k, a, prof1, prof_2pt, prof2=None, prof3=None): + """ Solves the integral: + + .. math:: + I^1_3(k,a|u_2, v_1, _v2) = \\int dM\\,n(M,a)\\,b(M,a)\\, + \\langle u_2(k,a|M) v_1(k',a|M) v_2(k',a|M)\\rangle, + + approximated to + .. math:: + I^1_3(k,a|u_2, v_1, _v2) = I^1_1(k,a|u_2) I^1_2(k',a|v_1, v_2) + + where :math:`n(M,a)` is the halo mass function, + :math:`b(M,a)` is the halo bias, and + :math:`\\langle u_2(k,a|M) v_1(k',a|M) v_2(k',a|M)\\rangle` is the + 3pt halo profile as a function of scales `k` and `k'`, scale factor + and halo mass. + + Args: + cosmo (:class:`~pyccl.core.Cosmology`): a Cosmology object. + k (float or array_like): comoving wavenumber in Mpc^-1. + a (float): scale factor. + prof (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile. + + Returns: + float or array_like: integral values evaluated at each + value of `k`. Its shape will be `(N_k, N_k)`, with `N_k` the + size of the `k` array. + """ + # Compute mass function and halo bias + # and transpose to move the M-axis last + self._get_ingredients(a, cosmo, True) + uk1 = prof1.fourier(cosmo, k, self._mass, a, mass_def=self._mdef) + uk23 = prof_2pt.fourier_2pt(prof2, cosmo, k, self._mass, a, + prof2=prof3, mass_def=self._mdef) + + uk = uk1[:, :, None] * uk23[:, None, :] + i13 = self._integrate_over_mbf(uk.T) + return i13 + def I_0_2(self, cosmo, k, a, prof1, prof_2pt, prof2=None): """ Solves the integral: @@ -323,7 +340,7 @@ def I_0_2(self, cosmo, k, a, prof1, prof_2pt, prof2=None): i02 = self._integrate_over_mf(uk) return i02 - def I_1_2(self, cosmo, k, a, prof1, prof_2pt, prof2=None): + def I_1_2(self, cosmo, k, a, prof1, prof_2pt, prof2=None, diag=True): """ Solves the integral: .. math:: @@ -348,6 +365,8 @@ def I_1_2(self, cosmo, k, a, prof1, prof_2pt, prof2=None): prof2 (:class:`~pyccl.halos.profiles.HaloProfile`): a second halo profile. If `None`, `prof` will be used as `prof2`. + diag (bool): If True, both halo profiles depend on the same k. If + False, they will depend on k and k', respectively. Default True. Returns: float or array_like: integral values evaluated at each @@ -357,9 +376,13 @@ def I_1_2(self, cosmo, k, a, prof1, prof_2pt, prof2=None): self._get_ingredients(a, cosmo, True) uk = prof_2pt.fourier_2pt(prof1, cosmo, k, self._mass, a, prof2=prof2, - mass_def=self._mdef).T - i02 = self._integrate_over_mbf(uk) - return i02 + mass_def=self._mdef, diag=diag) + if diag is True: + uk = uk.T + else: + uk = np.transpose(uk, axes=[1, 2, 0]) + i12 = self._integrate_over_mbf(uk) + return i12 def I_0_22(self, cosmo, k, a, prof1, prof12_2pt, prof2=None, @@ -915,22 +938,35 @@ def get_norm(normprof, prof, sf): return out -def halomod_Tk3D_1h(cosmo, hmc, - prof1, prof2=None, prof12_2pt=None, - prof3=None, prof4=None, prof34_2pt=None, - normprof1=False, normprof2=False, - normprof3=False, normprof4=False, - lk_arr=None, a_arr=None, - extrap_order_lok=1, extrap_order_hik=1, - use_log=False): - """ Returns a :class:`~pyccl.tk3d.Tk3D` object containing - the 1-halo trispectrum for four quantities defined by - their respective halo profiles. See :meth:`halomod_trispectrum_1h` - for more details about the actual calculation. +def halomod_trispectrum_2h_22(cosmo, hmc, k, a, prof1, prof2=None, + prof3=None, prof4=None, + prof13_2pt=None, prof14_2pt=None, + prof24_2pt=None, prof32_2pt=None, + normprof1=False, normprof2=False, + normprof3=False, normprof4=False, p_of_k_a=None): + """ Computes the isotropized halo model 2-halo trispectrum for four profiles + :math:`u_{1,2}`, :math:`v_{1,2}` as + + .. math:: + \\bar{T}^{2h}_{22}(k_1, k_2, a) = \\int \\frac{d\\varphi_1}{2\\pi} + \\int \\frac{d\\varphi_2}{2\\pi} + T^{2h}_{22}({\\bf k_1},-{\\bf k_1},{\\bf k_2},-{\\bf k_2}), + + with + + .. math:: + T^{2h}_{22}_{u_1,u_2;v_1,v_2}(k_u,k_v,a) = + P_lin(|k_{u_1} + k_{u_2}|)\\, I^1_2(k_{u_1}, k_{u_2}|u})\\, + I^1_2(k_{v_1}, k_{v_2}|v}) + 2 perm + + where :math:`I^1_2` is defined in the documentation + of :math:`~HMCalculator.I_1_2`. Args: cosmo (:class:`~pyccl.core.Cosmology`): a Cosmology object. hmc (:class:`HMCalculator`): a halo model calculator. + k (float or array_like): comoving wavenumber in Mpc^-1. + a (float or array_like): scale factor. prof1 (:class:`~pyccl.halos.profiles.HaloProfile`): halo profile (corresponding to :math:`u_1` above. prof2 (:class:`~pyccl.halos.profiles.HaloProfile`): halo @@ -947,8 +983,14 @@ def halomod_Tk3D_1h(cosmo, hmc, prof4 (:class:`~pyccl.halos.profiles.HaloProfile`): halo profile (corresponding to :math:`v_2` above. If `None`, `prof3` will be used as `prof4`. - prof34_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): - same as `prof12_2pt` for `prof3` and `prof4`. + prof13_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): + same as `prof12_2pt` for `prof1` and `prof3`. + prof14_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): + same as `prof14_2pt` for `prof1` and `prof4`. + prof24_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): + same as `prof14_2pt` for `prof2` and `prof4`. + prof32_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): + same as `prof14_2pt` for `prof3` and `prof2`. normprof1 (bool): if `True`, this integral will be normalized by :math:`I^0_1(k\\rightarrow 0,a|u)` (see :meth:`~HMCalculator.I_0_1`), where @@ -956,214 +998,1185 @@ def halomod_Tk3D_1h(cosmo, hmc, normprof2 (bool): same as `normprof1` for `prof2`. normprof3 (bool): same as `normprof1` for `prof3`. normprof4 (bool): same as `normprof1` for `prof4`. - a_arr (array): an array holding values of the scale factor - at which the trispectrum should be calculated for - interpolation. If `None`, the internal values used - by `cosmo` will be used. - lk_arr (array): an array holding values of the natural - logarithm of the wavenumber (in units of Mpc^-1) at - which the trispectrum should be calculated for - interpolation. If `None`, the internal values used - by `cosmo` will be used. - extrap_order_lok (int): extrapolation order to be used on - k-values below the minimum of the splines. See - :class:`~pyccl.tk3d.Tk3D`. - extrap_order_hik (int): extrapolation order to be used on - k-values above the maximum of the splines. See - :class:`~pyccl.tk3d.Tk3D`. - use_log (bool): if `True`, the trispectrum will be - interpolated in log-space (unless negative or - zero values are found). + p_of_k_a (:class:`~pyccl.pk2d.Pk2D`): a `Pk2D` object to + be used as the linear matter power spectrum. If `None`, the power + spectrum stored within `cosmo` will be used. Returns: - :class:`~pyccl.tk3d.Tk3D`: 1-halo trispectrum. + float or array_like: integral values evaluated at each + combination of `k` and `a`. The shape of the output will + be `(N_a, N_k, N_k)` where `N_k` and `N_a` are the sizes of + `k` and `a` respectively. The ordering is such that + `output[ia, ik2, ik1] = T(k[ik1], k[ik2], a[ia])` + If `k` or `a` are scalars, the corresponding dimension will + be squeezed out on output. """ - if lk_arr is None: - status = 0 - nk = lib.get_pk_spline_nk(cosmo.cosmo) - lk_arr, status = lib.get_pk_spline_lk(cosmo.cosmo, nk, status) - check(status, cosmo=cosmo) - if a_arr is None: - status = 0 - na = lib.get_pk_spline_na(cosmo.cosmo) - a_arr, status = lib.get_pk_spline_a(cosmo.cosmo, na, status) - check(status, cosmo=cosmo) + a_use = np.atleast_1d(a) + k_use = np.atleast_1d(k) - tkk = halomod_trispectrum_1h(cosmo, hmc, np.exp(lk_arr), a_arr, - prof1, prof2=prof2, - prof12_2pt=prof12_2pt, - prof3=prof3, prof4=prof4, - prof34_2pt=prof34_2pt, - normprof1=normprof1, normprof2=normprof2, - normprof3=normprof3, normprof4=normprof4) - if use_log: - if np.any(tkk <= 0): - warnings.warn( - "Some values were not positive. " - "Will not interpolate in log-space.", - category=CCLWarning) - use_log = False + # Romberg needs 1 + 2^n points + # Since the functions we average depend only on cos(theta) we can rewrite + # the integrals as \int_0^2pi dtheta f(cos theta) / 2pi as + # \int_0^pi dtheta f(cos theta) / pi + # Exclude theta = pi to avoid k + k' = 0 + theta = np.linspace(0, np.pi - 1e-5, 129) + dtheta = theta[1] - theta[0] + cth = np.cos(theta) + + kr = np.sqrt(k_use[:, None, None] ** 2 + k_use[None, :, None] ** 2 + + 2 * k_use[:, None, None] * k_use[None, :, None] + * cth[None, None, :]) + + # Check inputs + if not isinstance(prof1, HaloProfile): + raise TypeError("prof1 must be of type `HaloProfile`") + if prof2 is None: + prof2 = prof1 + elif not isinstance(prof2, HaloProfile): + raise TypeError("prof2 must be of type `HaloProfile` or `None`") + if prof3 is None: + prof3 = prof1 + elif not isinstance(prof3, HaloProfile): + raise TypeError("prof3 must be of type `HaloProfile` or `None`") + if prof4 is None: + prof4 = prof3 + elif not isinstance(prof4, HaloProfile): + raise TypeError("prof4 must be of type `HaloProfile` or `None`") + + if prof13_2pt is None: + prof13_2pt = Profile2pt() + elif not isinstance(prof13_2pt, Profile2pt): + raise TypeError("prof13_2pt must be of type `Profile2pt` or `None`") + if (prof24_2pt is not None) and (not isinstance(prof24_2pt, Profile2pt)): + raise TypeError("prof13_2pt must be of type `Profile2pt` or `None`") + else: + prof24_2pt = prof13_2pt + if (prof14_2pt is not None) and (not isinstance(prof14_2pt, Profile2pt)): + raise TypeError("prof14_2pt must be of type `Profile2pt` or `None`") + else: + prof14_2pt = prof13_2pt + if (prof32_2pt is not None) and (not isinstance(prof32_2pt, Profile2pt)): + raise TypeError("prof32_2pt must be of type `Profile2pt` or `None`") + else: + prof32_2pt = prof13_2pt + + def get_norm(normprof, prof, sf): + if normprof: + return hmc.profile_norm(cosmo, sf, prof) else: - tkk = np.log(tkk) + return 1 - tk3d = Tk3D(a_arr=a_arr, lk_arr=lk_arr, tkk_arr=tkk, - extrap_order_lok=extrap_order_lok, - extrap_order_hik=extrap_order_hik, is_logt=use_log) - return tk3d + na = len(a_use) + nk = len(k_use) + # Power spectrum + def get_isotropized_pkr(p_of_k_a, aa): + kk = kr.flatten() + # This returns int dphi / 2pi int dphi' / 2pi P(kk) + if isinstance(p_of_k_a, Pk2D): + pk = p_of_k_a.eval(kk, aa, cosmo) + elif (p_of_k_a is None) or (str(p_of_k_a) == 'linear'): + pk = linear_matter_power(cosmo, kk, aa) + elif str(p_of_k_a) == 'nonlinear': + pk = nonlin_matter_power(cosmo, kk, aa) + else: + raise TypeError("p_of_k_a must be `None`, \'linear\', " + "\'nonlinear\' or a `Pk2D` object") -def halomod_Tk3D_SSC_linear_bias(cosmo, hmc, prof, bias1=1, bias2=1, bias3=1, - bias4=1, - is_number_counts1=False, - is_number_counts2=False, - is_number_counts3=False, - is_number_counts4=False, - p_of_k_a=None, lk_arr=None, - a_arr=None, extrap_order_lok=1, - extrap_order_hik=1, use_log=False): - """ Returns a :class:`~pyccl.tk3d.Tk3D` object containing - the super-sample covariance trispectrum, given by the tensor - product of the power spectrum responses associated with the - two pairs of quantities being correlated. Each response is + pk = pk.reshape((nk, nk, theta.size)) + int_pk = scipy.integrate.romb(pk, dtheta, axis=-1) + return int_pk / np.pi + + out = np.zeros([na, nk, nk]) + for ia, aa in enumerate(a_use): + # Compute profile normalizations + norm1 = get_norm(normprof1, prof1, aa) + # Compute second profile normalization + if prof2 is None: + norm2 = norm1 + else: + norm2 = get_norm(normprof2, prof2, aa) + if prof3 is None: + norm3 = norm1 + else: + norm3 = get_norm(normprof3, prof3, aa) + if prof4 is None: + norm4 = norm3 + else: + norm4 = get_norm(normprof4, prof4, aa) + + norm = norm1 * norm2 * norm3 * norm4 + p = get_isotropized_pkr(p_of_k_a, aa) + + # Compute trispectrum at this redshift + # P(k1 - k1 = 0) = 0 + # p12 = get_isotropized_pk(p_of_k_a, 0 * kkth, aa) + # i12 = hmc.I_1_2(cosmo, k_use, aa, prof1, prof12_2pt, + # prof2=prof2)[:, None] + # i34 = hmc.I_1_2(cosmo, k_use, aa, prof3, prof34_2pt, + # prof2=prof4)[None, :] + # Permutation 1 + i13 = hmc.I_1_2(cosmo, k_use, aa, prof1, prof13_2pt, prof2=prof3, + diag=False) + i24 = hmc.I_1_2(cosmo, k_use, aa, prof2, prof24_2pt, prof2=prof4, + diag=False) + # Permutation 2 + i14 = hmc.I_1_2(cosmo, k_use, aa, prof1, prof14_2pt, prof2=prof4, + diag=False) + i32 = hmc.I_1_2(cosmo, k_use, aa, prof3, prof32_2pt, prof2=prof2, + diag=False) + + tk_2h_22 = p * (i13 * i24 + i14 * i32) + # Normalize + out[ia, :, :] = tk_2h_22 * norm + + if np.ndim(a) == 0: + out = np.squeeze(out, axis=0) + if np.ndim(k) == 0: + out = np.squeeze(out, axis=-1) + out = np.squeeze(out, axis=-1) + return out + + +def halomod_trispectrum_2h_13(cosmo, hmc, k, a, prof1, + prof2=None, prof3=None, prof4=None, + prof12_2pt=None, prof34_2pt=None, + normprof1=False, normprof2=False, + normprof3=False, normprof4=False, p_of_k_a=None): + """ Computes the isotropized halo model 2-halo trispectrum for four different + quantities defined by their respective halo profiles. The 2-halo + trispectrum for four profiles :math:`u_{1,2}`, :math:`v_{1,2}` is calculated as: .. math:: - \\frac{\\partial P_{u,v}(k)}{\\partial\\delta_L} = b_u b_v \\left( - \\left(\\frac{68}{21}-\\frac{d\\log k^3P_L(k)}{d\\log k}\\right) - P_L(k)+I^1_2(k|u,v) - (b_{u} + b_{v}) P_{u,v}(k) \\right) + T^{2h}_{13}_{u_1,u_2,v_1,v_2}(k_u,k_v,a) = + P_lin(k_u)\\, I^1_1(k_{u_1}|u_1)\\, + I^1_3(k_{u_1}, k_{v_1}, k_{v_2}|u_1, v}) + 3 perm + + where :math:`I^1_1` is defined in the documentation of + :meth:`~HMCalculator.I_1_1` and :math:`I^1_3` is defined in the + documentation of :meth:`~HMCalculator.I_1_3`. Then, this function returns + + .. math:: + \\bar{T}^{2h}_{13}(k_1, k_2, a) = \\int \\frac{d\\varphi_1}{2\\pi} + \\int \\frac{d\\varphi_2}{2\\pi} + T^{1h}_{13}({\\bf k_1},-{\\bf k_1},{\\bf k_2},-{\\bf k_2}), - where the :math:`I^1_2` is defined in the documentation - :meth:`~HMCalculator.I_1_2` and :math:`b_{}` and :math:`b_{vv}` are the - linear halo biases for quantities :math:`u` and :math:`v`, respectively - (zero if they are not clustering). Args: cosmo (:class:`~pyccl.core.Cosmology`): a Cosmology object. hmc (:class:`HMCalculator`): a halo model calculator. - prof (:class:`~pyccl.halos.profiles.HaloProfile`): halo NFW + k (float or array_like): comoving wavenumber in Mpc^-1. + a (float or array_like): scale factor. + prof1 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`u_1` above. + prof2 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`u_2` above. If `None`, + `prof1` will be used as `prof2`. + prof3 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`v_1` above. If `None`, + `prof1` will be used as `prof3`. + prof4 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`v_2` above. If `None`, + `prof3` will be used as `prof4`. + prof12_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): + a profile covariance object returning the 2-point + moment of `prof1`, `prof2`. If `None`, the default second moment + will be used, corresponding to the products of the means of each profile. - bias1 (float or array): linear galaxy bias for quantity 1. If an array, - it has to have the shape of `a_arr`. - bias2 (float or array): linear galaxy bias for quantity 2. - bias3 (float or array): linear galaxy bias for quantity 3. - bias4 (float or array): linear galaxy bias for quantity 4. - is_number_counts1 (bool): If True, quantity 1 will be considered - number counts and the clustering counter terms computed. Default False. - is_number_counts2 (bool): as is_number_counts1 but for quantity 2. - is_number_counts3 (bool): as is_number_counts1 but for quantity 3. - is_number_counts4 (bool): as is_number_counts1 but for quantity 4. + prof34_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): + same as `prof34_2pt` for `prof3` and `prof4`. + normprof1 (bool): if `True`, this integral will be + normalized by :math:`I^0_1(k\\rightarrow 0,a|u)` + (see :meth:`~HMCalculator.I_0_1`), where + :math:`u` is the profile represented by `prof1`. + normprof2 (bool): same as `normprof1` for `prof2`. + normprof3 (bool): same as `normprof1` for `prof3`. + normprof4 (bool): same as `normprof1` for `prof4`. p_of_k_a (:class:`~pyccl.pk2d.Pk2D`): a `Pk2D` object to - be used as the linear matter power spectrum. If `None`, - the power spectrum stored within `cosmo` will be used. - a_arr (array): an array holding values of the scale factor - at which the trispectrum should be calculated for - interpolation. If `None`, the internal values used - by `cosmo` will be used. - lk_arr (array): an array holding values of the natural - logarithm of the wavenumber (in units of Mpc^-1) at - which the trispectrum should be calculated for - interpolation. If `None`, the internal values used - by `cosmo` will be used. - extrap_order_lok (int): extrapolation order to be used on - k-values below the minimum of the splines. See - :class:`~pyccl.tk3d.Tk3D`. - extrap_order_hik (int): extrapolation order to be used on - k-values above the maximum of the splines. See - :class:`~pyccl.tk3d.Tk3D`. - use_log (bool): if `True`, the trispectrum will be - interpolated in log-space (unless negative or - zero values are found). + be used as the linear matter power spectrum. If `None`, the power + spectrum stored within `cosmo` will be used. Returns: - :class:`~pyccl.tk3d.Tk3D`: SSC effective trispectrum. + float or array_like: integral values evaluated at each + combination of `k` and `a`. The shape of the output will + be `(N_a, N_k, N_k)` where `N_k` and `N_a` are the sizes of + `k` and `a` respectively. The ordering is such that + `output[ia, ik2, ik1] = T(k[ik1], k[ik2], a[ia])` + If `k` or `a` are scalars, the corresponding dimension will + be squeezed out on output. """ - if lk_arr is None: - status = 0 - nk = lib.get_pk_spline_nk(cosmo.cosmo) - lk_arr, status = lib.get_pk_spline_lk(cosmo.cosmo, nk, status) - check(status, cosmo=cosmo) - if a_arr is None: - status = 0 - na = lib.get_pk_spline_na(cosmo.cosmo) - a_arr, status = lib.get_pk_spline_a(cosmo.cosmo, na, status) - check(status, cosmo=cosmo) + a_use = np.atleast_1d(a) + k_use = np.atleast_1d(k) - # Make sure biases are of the form number of a x number of k - ones = np.ones_like(a_arr) - bias1 *= ones - bias2 *= ones - bias3 *= ones - bias4 *= ones + # Check inputs + if not isinstance(prof1, HaloProfile): + raise TypeError("prof1 must be of type `HaloProfile`") + if prof2 is None: + prof2 = prof1 + elif not isinstance(prof2, HaloProfile): + raise TypeError("prof2 must be of type `HaloProfile` or `None`") + if prof3 is None: + prof3 = prof1 + elif not isinstance(prof3, HaloProfile): + raise TypeError("prof3 must be of type `HaloProfile` or `None`") + if prof4 is None: + prof4 = prof3 + elif not isinstance(prof4, HaloProfile): + raise TypeError("prof4 must be of type `HaloProfile` or `None`") - k_use = np.exp(lk_arr) + if prof12_2pt is None: + prof12_2pt = Profile2pt() + elif not isinstance(prof12_2pt, Profile2pt): + raise TypeError("prof12_2pt must be of type `Profile2pt` or `None`") + if prof34_2pt is None: + prof34_2pt = prof12_2pt + elif not isinstance(prof12_2pt, Profile2pt): + raise TypeError("prof12_2pt must be of type `Profile2pt` or `None`") - # Check inputs - if not isinstance(prof, HaloProfileNFW): - raise TypeError("prof must be of type `HaloProfileNFW`") - prof_2pt = Profile2pt() + def get_norm(normprof, prof, sf): + if normprof: + return hmc.profile_norm(cosmo, sf, prof) + else: + return 1 # Power spectrum - if isinstance(p_of_k_a, Pk2D): - pk2d = p_of_k_a - elif (p_of_k_a is None) or (str(p_of_k_a) == 'linear'): - pk2d = cosmo.get_linear_power('delta_matter:delta_matter') - elif str(p_of_k_a) == 'nonlinear': - pk2d = cosmo.get_nonlin_power('delta_matter:delta_matter') + def get_pk(p_of_k_a): + if isinstance(p_of_k_a, Pk2D): + def pkf(sf): + return p_of_k_a.eval(k_use, sf, cosmo) + elif (p_of_k_a is None) or (str(p_of_k_a) == 'linear'): + def pkf(sf): + return linear_matter_power(cosmo, k_use, sf) + elif str(p_of_k_a) == 'nonlinear': + def pkf(sf): + return nonlin_matter_power(cosmo, k_use, sf) + else: + raise TypeError("p_of_k_a must be `None`, \'linear\', " + "\'nonlinear\' or a `Pk2D` object") + return pkf + + na = len(a_use) + nk = len(k_use) + out = np.zeros([na, nk, nk]) + for ia, aa in enumerate(a_use): + # Compute profile normalizations + norm1 = get_norm(normprof1, prof1, aa) + # Compute second profile normalization + if prof2 is None: + norm2 = norm1 + else: + norm2 = get_norm(normprof2, prof2, aa) + if prof3 is None: + norm3 = norm1 + else: + norm3 = get_norm(normprof3, prof3, aa) + if prof4 is None: + norm4 = norm3 + else: + norm4 = get_norm(normprof4, prof4, aa) + + norm = norm1 * norm2 * norm3 * norm4 + + # Compute trispectrum at this redshift + p1 = get_pk(p_of_k_a)(aa)[:, None] + i1 = hmc.I_1_1(cosmo, k_use, aa, prof1)[:, None] + i234 = hmc.I_1_3(cosmo, k_use, aa, prof2, prof34_2pt, prof2=prof3, + prof3=prof4) + # Permutation 1 + # p2 = p1 # (because k_a = k_b) + i2 = hmc.I_1_1(cosmo, k_use, aa, prof2)[:, None] + i134 = hmc.I_1_3(cosmo, k_use, aa, prof1, prof34_2pt, prof2=prof3, + prof3=prof4) + # Attention to axis order change! + # Permutation 2 + p3 = p1.T + i3 = hmc.I_1_1(cosmo, k_use, aa, prof3)[None, :] + i124 = hmc.I_1_3(cosmo, k_use, aa, prof4, prof12_2pt, prof2=prof1, + prof3=prof2).T + # Permutation 4 + # p4 = p3 # (because k_c = k_d) + i4 = hmc.I_1_1(cosmo, k_use, aa, prof3)[None, :] + i123 = hmc.I_1_3(cosmo, k_use, aa, prof3, prof12_2pt, prof2=prof1, + prof3=prof2).T + #### + + # print(i1.shape) + # print(i234.shape) + # print(i4.shape) + # print(i123.shape) + tk_2h_13 = p1 * (i1 * i234 + i2 * i134) + p3 * (i3 * i124 + i4 * i123) + + # Normalize + out[ia, :, :] = tk_2h_13 * norm + + if np.ndim(a) == 0: + out = np.squeeze(out, axis=0) + if np.ndim(k) == 0: + out = np.squeeze(out, axis=-1) + out = np.squeeze(out, axis=-1) + return out + + +def halomod_trispectrum_3h(cosmo, hmc, k, a, prof1, prof2=None, + prof3=None, prof4=None, + prof13_2pt=None, prof14_2pt=None, + prof24_2pt=None, prof32_2pt=None, + normprof1=False, normprof2=False, normprof3=False, + normprof4=False, p_of_k_a=None): + """ Computes the isotropized halo model 3-halo trispectrum for four profiles + :math:`u_{1,2}`, :math:`v_{1,2}` as + + .. math:: + \\bar{T}^{3h}(k_1, k_2, a) = \\int \\frac{d\\varphi_1}{2\\pi} + \\int \\frac{d\\varphi_2}{2\\pi} + T^{2h}_{22}({\\bf k_1},-{\\bf k_1},{\\bf k_2},-{\\bf k_2}), + + with + + .. math:: + T^{3h}{u_1,u_2;v_1,v_2}(k_u,k_v,a) = + B^{PT}({\bf k_{u_1}}, {\bf k_{u_2}}, {\bf k_{v_1}} + {\bf k_{v_2}}) \\, + I^1_1(k_{u_1} | u) I^1_1(k_{u_2} | u) I^1_2(k_{v_1}, k_{v_2}|v}) \\, + + 5 perm + + where :math:`I^1_1` and :math:`I^1_2` are defined in the documentation + of :math:`~HMCalculator.I_1_1` and :math:`~HMCalculator.I_1_2`, + respectively; and :math:`B^{PT}` can be found in Eq. 30 of arXiv:1302.6994. + + Args: + cosmo (:class:`~pyccl.core.Cosmology`): a Cosmology object. + hmc (:class:`HMCalculator`): a halo model calculator. + k (float or array_like): comoving wavenumber in Mpc^-1. + a (float or array_like): scale factor. + prof1 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`u_1` above. + prof2 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`u_2` above. If `None`, + `prof1` will be used as `prof2`. + prof3 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`v_1` above. If `None`, + `prof1` will be used as `prof3`. + prof4 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`v_2` above. If `None`, + `prof3` will be used as `prof4`. + prof13_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): + a profile covariance object returning the the two-point + moment of `prof1` and `prof3`. If `None`, the default + second moment will be used, corresponding to the + products of the means of both profiles. + prof14_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): + same as `prof14_2pt` for `prof1` and `prof4`. + prof24_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): + same as `prof14_2pt` for `prof2` and `prof4`. + prof32_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): + same as `prof14_2pt` for `prof3` and `prof2`. + normprof1 (bool): if `True`, this integral will be + normalized by :math:`I^0_1(k\\rightarrow 0,a|u)` + (see :meth:`~HMCalculator.I_0_1`), where + :math:`u` is the profile represented by `prof1`. + normprof2 (bool): same as `normprof1` for `prof2`. + normprof3 (bool): same as `normprof1` for `prof3`. + normprof4 (bool): same as `normprof1` for `prof4`. + p_of_k_a (:class:`~pyccl.pk2d.Pk2D`): a `Pk2D` object to + be used as the linear matter power spectrum. If `None`, the power + spectrum stored within `cosmo` will be used. + + Returns: + float or array_like: integral values evaluated at each + combination of `k` and `a`. The shape of the output will + be `(N_a, N_k, N_k)` where `N_k` and `N_a` are the sizes of + `k` and `a` respectively. The ordering is such that + `output[ia, ik2, ik1] = T(k[ik1], k[ik2], a[ia])` + If `k` or `a` are scalars, the corresponding dimension will + be squeezed out on output. + """ + a_use = np.atleast_1d(a) + k_use = np.atleast_1d(k) + + # Romberg needs 1 + 2^n points + # Since the functions we average depend only on cos(theta) we can rewrite + # the integrals as \int_0^2pi dtheta f(cos theta) / 2pi as + # \int_0^pi dtheta f(cos theta) / pi + # Exclude theta = pi to avoid k + k' = 0 + theta = np.linspace(0, np.pi - 1e-5, 129) + dtheta = theta[1] - theta[0] + cth = np.cos(theta) + + # Check inputs + if not isinstance(prof1, HaloProfile): + raise TypeError("prof1 must be of type `HaloProfile`") + if prof2 is None: + prof2 = prof1 + elif not isinstance(prof2, HaloProfile): + raise TypeError("prof2 must be of type `HaloProfile` or `None`") + if prof3 is None: + prof3 = prof1 + elif not isinstance(prof3, HaloProfile): + raise TypeError("prof3 must be of type `HaloProfile` or `None`") + if prof4 is None: + prof4 = prof3 + elif not isinstance(prof4, HaloProfile): + raise TypeError("prof4 must be of type `HaloProfile` or `None`") + + if prof13_2pt is None: + prof13_2pt = Profile2pt() + elif not isinstance(prof13_2pt, Profile2pt): + raise TypeError("prof13_2pt must be of type `Profile2pt` or `None`") + if (prof14_2pt is not None) and (not isinstance(prof14_2pt, Profile2pt)): + raise TypeError("prof14_2pt must be of type `Profile2pt` or `None`") else: - raise TypeError("p_of_k_a must be `None`, \'linear\', " - "\'nonlinear\' or a `Pk2D` object") + prof14_2pt = prof13_2pt + if (prof24_2pt is not None) and (not isinstance(prof24_2pt, Profile2pt)): + raise TypeError("prof14_2pt must be of type `Profile2pt` or `None`") + else: + prof24_2pt = prof13_2pt + if (prof32_2pt is not None) and (not isinstance(prof32_2pt, Profile2pt)): + raise TypeError("prof32_2pt must be of type `Profile2pt` or `None`") + else: + prof32_2pt = prof13_2pt - na = len(a_arr) + def get_norm(normprof, prof, sf): + if normprof: + return hmc.profile_norm(cosmo, sf, prof) + else: + return 1 + + # Power spectrum + def get_pk(k, a): + if isinstance(p_of_k_a, Pk2D): + pk = p_of_k_a.eval(k, a, cosmo) + elif (p_of_k_a is None) or (str(p_of_k_a) == 'linear'): + pk = linear_matter_power(cosmo, k, a) + elif str(p_of_k_a) == 'nonlinear': + pk = nonlin_matter_power(cosmo, k, a) + else: + raise TypeError("p_of_k_a must be `None`, \'linear\', " + "\'nonlinear\' or a `Pk2D` object") + + return pk + + # Compute bispectrum + # Encapsulate code in a function + def get_kr_and_f2(): + kk = k_use[:, None, None] + kp = k_use[None, :, None] + kr2 = kk ** 2 + kp ** 2 + 2 * kk * kp * cth + kr = np.sqrt(kr2) + + f2 = 5./7. - 0.5 * (1 + kk ** 2 / kr2) * (1 + kp / kk * cth) + \ + 2/7. * kk ** 2 / kr2 * (1 + kp / kk * cth)**2 + # When kr = 0: + # k^2 / kr^2 (1 + k / kr cos) -> k^2/(2k^2 + 2k^2 cos)*(1 + cos) = 1/2 + # k^2 / kr^2 (1 + k / kr cos)^2 -> (1 + cos)/2 = 0 + f2[np.where(kr == 0)] = 13. / 28 + + return kr, f2 + + kr, f2 = get_kr_and_f2() + + def get_Bpt(a): + # We only need to compute the independent k * k * cos(theta) since Pk + # only depends on the module of ki + kj + pk = get_pk(k_use, a)[:, None] + pkr = get_pk(kr.flatten(), a).reshape(kr.shape) + P3 = scipy.integrate.romb(pkr * f2, dtheta, axis=-1) + + Bpt = 6. / 7. * pk * pk.T + 2 * pk * P3 + Bpt += Bpt.T + + return Bpt + + na = len(a_use) nk = len(k_use) - dpk12 = np.zeros([na, nk]) - dpk34 = np.zeros([na, nk]) - for ia, aa in enumerate(a_arr): + + out = np.zeros([na, nk, nk]) + for ia, aa in enumerate(a_use): # Compute profile normalizations - norm = hmc.profile_norm(cosmo, aa, prof) ** 2 - i12 = hmc.I_1_2(cosmo, k_use, aa, prof, prof_2pt, prof) * norm + norm1 = get_norm(normprof1, prof1, aa) + # Compute second profile normalization + if prof2 is None: + norm2 = norm1 + else: + norm2 = get_norm(normprof2, prof2, aa) + if prof3 is None: + norm3 = norm1 + else: + norm3 = get_norm(normprof3, prof3, aa) + if prof4 is None: + norm4 = norm3 + else: + norm4 = get_norm(normprof4, prof4, aa) - pk = pk2d.eval(k_use, aa, cosmo) - dpk = pk2d.eval_dlogpk_dlogk(k_use, aa, cosmo) - # ~ [(47/21 - 1/3 dlogPk/dlogk) * Pk+I12] - dpk12[ia] = ((2.2380952381-dpk/3)*pk + i12) - dpk34[ia] = dpk12[ia].copy() # Avoid surprises + norm = norm1 * norm2 * norm3 * norm4 - # Counter terms for clustering (i.e. - (bA + bB) * PAB - if is_number_counts1 or is_number_counts2 or is_number_counts3 or \ - is_number_counts4: - b1 = b2 = b3 = b4 = 0 + # Permutation 0 + # Bpt_1_2_34 = 0 + # i1 = hmc.I_1_1(cosmo, k_use, aa, prof1)[:, None] + # i2 = hmc.I_1_1(cosmo, k_use, aa, prof2)[:, None] + # i34 = hmc.I_1_2(cosmo, k_use, aa, prof3, prof34_2pt, prof2=prof4) + + i1 = hmc.I_1_1(cosmo, k_use, aa, prof1)[:, None] + i2 = hmc.I_1_1(cosmo, k_use, aa, prof2)[:, None] + i3 = hmc.I_1_1(cosmo, k_use, aa, prof3)[None, :] + i4 = hmc.I_1_1(cosmo, k_use, aa, prof4)[None, :] + + # Permutation 1: 2 <-> 3 + i24 = hmc.I_1_2(cosmo, k_use, aa, prof2, prof24_2pt, prof2=prof4, + diag=False) + # Permutation 2: 2 <-> 4 + i32 = hmc.I_1_2(cosmo, k_use, aa, prof3, prof32_2pt, prof2=prof2, + diag=False) + # Permutation 3: 1 <-> 3 + i14 = hmc.I_1_2(cosmo, k_use, aa, prof1, prof14_2pt, prof2=prof4, + diag=False) + # Permutation 4: 1 <-> 4 + i31 = hmc.I_1_2(cosmo, k_use, aa, prof3, prof13_2pt, prof2=prof1, + diag=False) + + # Permutation 5: 12 <-> 34 + # Bpt_3_4_12 = 0 + # i3 = hmc.I_1_1(cosmo, k_use, aa, prof3)[None, :] + # i4 = hmc.I_1_1(cosmo, k_use, aa, prof4)[None, :] + # i12 = hmc.I_1_2(cosmo, k_use, aa, prof1, prof12_2pt, prof2=prof2) + + Bpt = get_Bpt(aa) + tk_3h = Bpt * (i1 * i3 * i24 + i1 * i4 * i32 + + i3 * i2 * i14 + i4 * i2 * i31) - i02 = hmc.I_0_2(cosmo, k_use, aa, prof, prof_2pt, prof) * norm - P_12 = P_34 = pk + i02 + # Normalize + out[ia, :, :] = tk_3h * norm - if is_number_counts1: - b1 = bias1[ia] - if is_number_counts2: - b2 = bias2[ia] - if is_number_counts3: - b3 = bias3[ia] - if is_number_counts4: - b4 = bias4[ia] + if np.ndim(a) == 0: + out = np.squeeze(out, axis=0) + if np.ndim(k) == 0: + out = np.squeeze(out, axis=-1) + out = np.squeeze(out, axis=-1) + return out - dpk12[ia, :] -= (b1 + b2) * P_12 - dpk34[ia, :] -= (b3 + b4) * P_34 - dpk12[ia] *= bias1[ia] * bias2[ia] - dpk34[ia] *= bias3[ia] * bias4[ia] +def halomod_trispectrum_4h(cosmo, hmc, k, a, prof1, prof2=None, + prof3=None, prof4=None, normprof1=False, + normprof2=False, normprof3=False, normprof4=False, + p_of_k_a=None): + """ Computes the isotropized halo model 4-halo trispectrum for four + profiles :math:`u_{1,2}`, :math:`v_{1,2}` as + + .. math:: + \\bar{T}^{4h}(k_1, k_2, a) = \\int \\frac{d\\varphi_1}{2\\pi} + \\int \\frac{d\\varphi_2}{2\\pi} + T^{4h}({\\bf k_1},-{\\bf k_1},{\\bf k_2},-{\\bf k_2}), + + with + + .. math:: + T^{4h}{u_1,u_2;v_1,v_2}(k_u,k_v,a) = + T^{PT}({\bf k_{u_1}}, {\bf k_{u_2}}, {\bf k_{v_1}}, {\bf k_{v_2}}) \\, + I^1_1(k_{u_1} | u) I^1_1(k_{u_2} | u) I^1_1(k_{v_1} | v) \\, + I^1_1(k_{v_2} | v) \\, + + where :math:`I^1_1` is defined in the documentation + of :math:`~HMCalculator.I_1_1` and :math:`P^{PT}` can be found in Eq. 30 + of arXiv:1302.6994. + + Args: + cosmo (:class:`~pyccl.core.Cosmology`): a Cosmology object. + hmc (:class:`HMCalculator`): a halo model calculator. + k (float or array_like): comoving wavenumber in Mpc^-1. + a (float or array_like): scale factor. + prof1 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`u_1` above. + prof2 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`u_2` above. If `None`, + `prof1` will be used as `prof2`. + prof3 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`v_1` above. If `None`, + `prof1` will be used as `prof3`. + prof4 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`v_2` above. If `None`, + `prof3` will be used as `prof4`. + normprof1 (bool): if `True`, this integral will be + normalized by :math:`I^0_1(k\\rightarrow 0,a|u)` + (see :meth:`~HMCalculator.I_0_1`), where + :math:`u` is the profile represented by `prof1`. + normprof2 (bool): same as `normprof1` for `prof2`. + normprof3 (bool): same as `normprof1` for `prof3`. + normprof4 (bool): same as `normprof1` for `prof4`. + p_of_k_a (:class:`~pyccl.pk2d.Pk2D`): a `Pk2D` object to + be used as the linear matter power spectrum. If `None`, the power + spectrum stored within `cosmo` will be used. + + Returns: + float or array_like: integral values evaluated at each + combination of `k` and `a`. The shape of the output will + be `(N_a, N_k, N_k)` where `N_k` and `N_a` are the sizes of + `k` and `a` respectively. The ordering is such that + `output[ia, ik2, ik1] = T(k[ik1], k[ik2], a[ia])` + If `k` or `a` are scalars, the corresponding dimension will + be squeezed out on output. + """ + a_use = np.atleast_1d(a) + k_use = np.atleast_1d(k) + + # Check inputs + if not isinstance(prof1, HaloProfile): + raise TypeError("prof1 must be of type `HaloProfile`") + if prof2 is None: + prof2 = prof1 + elif not isinstance(prof2, HaloProfile): + raise TypeError("prof2 must be of type `HaloProfile` or `None`") + if prof3 is None: + prof3 = prof1 + elif not isinstance(prof3, HaloProfile): + raise TypeError("prof3 must be of type `HaloProfile` or `None`") + if prof4 is None: + prof4 = prof3 + elif not isinstance(prof4, HaloProfile): + raise TypeError("prof4 must be of type `HaloProfile` or `None`") + + def get_norm(normprof, prof, sf): + if normprof: + return hmc.profile_norm(cosmo, sf, prof) + else: + return 1 + + na = len(a_use) + nk = len(k_use) + + # Power spectrum + def get_pk(k, a): + # This returns int dphi / 2pi int dphi' / 2pi P(kkth) + if isinstance(p_of_k_a, Pk2D): + pk = p_of_k_a.eval(k, a, cosmo) + elif (p_of_k_a is None) or (str(p_of_k_a) == 'linear'): + pk = linear_matter_power(cosmo, k, a) + elif str(p_of_k_a) == 'nonlinear': + pk = nonlin_matter_power(cosmo, k, a) + else: + raise TypeError("p_of_k_a must be `None`, \'linear\', " + "\'nonlinear\' or a `Pk2D` object") + + return pk + + # Romberg needs 1 + 2^n points + # Since the functions we average depend only on cos(theta) we can rewrite + # the integrals as \int_0^2pi dtheta f(cos theta) / 2pi as + # \int_0^pi dtheta f(cos theta) / pi + # Exclude theta = pi to avoid k + k' = 0 + theta = np.linspace(0, np.pi - 1e-5, 129) + dtheta = theta[1] - theta[0] + cth = np.cos(theta) + + def isotropize(arr): + int_arr = scipy.integrate.romb(arr, dtheta, axis=-1) + return int_arr / (2 * np.pi) + + def get_kr_f2_f2T_X(): + k = k_use[:, None, None] + kp = k_use[None, :, None] + kr2 = k ** 2 + kp ** 2 + 2 * k * kp * cth + kr = np.sqrt(kr2) + + f2 = 5./7. - 0.5 * (1 + k ** 2 / kr2) * (1 + kp / k * cth) + \ + 2/7. * k ** 2 / kr2 * (1 + kp / k * cth)**2 + f2[np.where(kr == 0)] = 13. / 28 + + # k <-> k' + f2T = np.transpose(f2, (1, 0, 2)) + + r = kp / k + intd = (5 * r + (7 - 2*r**2)*cth) / (1 + r**2 + 2*r*cth) * \ + (3/7. * r + 0.5 * (1 + r**2) * cth + 4/7. * r * cth**2) + # When kr = 0, r = 1 and intd = 0 + intd[np.where(kr == 0)] = 0 + X = -7./4. * (1 + r.reshape(nk, nk)**2) + isotropize(intd) + + return kr, f2, f2T, X + + kr, f2, f2T, X = get_kr_f2_f2T_X() + + out = np.zeros([na, nk, nk]) + for ia, aa in enumerate(a_use): + # Compute profile normalizations + norm1 = get_norm(normprof1, prof1, aa) + # Compute second profile normalization + if prof2 is None: + norm2 = norm1 + else: + norm2 = get_norm(normprof2, prof2, aa) + if prof3 is None: + norm3 = norm1 + else: + norm3 = get_norm(normprof3, prof3, aa) + if prof4 is None: + norm4 = norm3 + else: + norm4 = get_norm(normprof4, prof4, aa) + + norm = norm1 * norm2 * norm3 * norm4 + + pk = get_pk(k_use, aa)[:, None] + pkr = get_pk(kr.flatten(), aa).reshape((nk, nk, theta.size)) + + P4A = isotropize(f2 ** 2 * pkr) + P4X = isotropize(f2 * f2T * pkr) + + t1113 = 4/9. * pk**2 * pk.T * X + t1113 += t1113.T + + t1122 = 8 * (pk**2 * P4A + pk * pk.T * P4X) + t1122 += t1122.T + + # Now the halo model integrals + i1 = hmc.I_1_1(cosmo, k_use, aa, prof1)[:, None] + i2 = hmc.I_1_1(cosmo, k_use, aa, prof2)[:, None] + i3 = hmc.I_1_1(cosmo, k_use, aa, prof3)[None, :] + i4 = hmc.I_1_1(cosmo, k_use, aa, prof4)[None, :] + + tk_4h = i1 * i2 * i3 * i4 * (t1113 + t1122) + + # Normalize + out[ia, :, :] = tk_4h * norm + + if np.ndim(a) == 0: + out = np.squeeze(out, axis=0) + if np.ndim(k) == 0: + out = np.squeeze(out, axis=-1) + out = np.squeeze(out, axis=-1) + return out + + +def halomod_Tk3D_1h(cosmo, hmc, + prof1, prof2=None, prof12_2pt=None, + prof3=None, prof4=None, prof34_2pt=None, + normprof1=False, normprof2=False, + normprof3=False, normprof4=False, + lk_arr=None, a_arr=None, + extrap_order_lok=1, extrap_order_hik=1, + use_log=False): + """ Returns a :class:`~pyccl.tk3d.Tk3D` object containing + the 1-halo trispectrum for four quantities defined by + their respective halo profiles. See :meth:`halomod_trispectrum_1h` + for more details about the actual calculation. + + Args: + cosmo (:class:`~pyccl.core.Cosmology`): a Cosmology object. + hmc (:class:`HMCalculator`): a halo model calculator. + prof1 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`u_1` above. + prof2 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`u_2` above. If `None`, + `prof1` will be used as `prof2`. + prof12_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): + a profile covariance object returning the the two-point + moment of `prof1` and `prof2`. If `None`, the default + second moment will be used, corresponding to the + products of the means of both profiles. + prof3 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`v_1` above. If `None`, + `prof1` will be used as `prof3`. + prof4 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`v_2` above. If `None`, + `prof3` will be used as `prof4`. + prof34_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): + same as `prof12_2pt` for `prof3` and `prof4`. + normprof1 (bool): if `True`, this integral will be + normalized by :math:`I^0_1(k\\rightarrow 0,a|u)` + (see :meth:`~HMCalculator.I_0_1`), where + :math:`u` is the profile represented by `prof1`. + normprof2 (bool): same as `normprof1` for `prof2`. + normprof3 (bool): same as `normprof1` for `prof3`. + normprof4 (bool): same as `normprof1` for `prof4`. + a_arr (array): an array holding values of the scale factor + at which the trispectrum should be calculated for + interpolation. If `None`, the internal values used + by `cosmo` will be used. + lk_arr (array): an array holding values of the natural + logarithm of the wavenumber (in units of Mpc^-1) at + which the trispectrum should be calculated for + interpolation. If `None`, the internal values used + by `cosmo` will be used. + extrap_order_lok (int): extrapolation order to be used on + k-values below the minimum of the splines. See + :class:`~pyccl.tk3d.Tk3D`. + extrap_order_hik (int): extrapolation order to be used on + k-values above the maximum of the splines. See + :class:`~pyccl.tk3d.Tk3D`. + use_log (bool): if `True`, the trispectrum will be + interpolated in log-space (unless negative or + zero values are found). + + Returns: + :class:`~pyccl.tk3d.Tk3D`: 1-halo trispectrum. + """ + if lk_arr is None: + status = 0 + nk = lib.get_pk_spline_nk(cosmo.cosmo) + lk_arr, status = lib.get_pk_spline_lk(cosmo.cosmo, nk, status) + check(status, cosmo=cosmo) + if a_arr is None: + status = 0 + na = lib.get_pk_spline_na(cosmo.cosmo) + a_arr, status = lib.get_pk_spline_a(cosmo.cosmo, na, status) + check(status, cosmo=cosmo) + + tkk = halomod_trispectrum_1h(cosmo, hmc, np.exp(lk_arr), a_arr, + prof1, prof2=prof2, + prof12_2pt=prof12_2pt, + prof3=prof3, prof4=prof4, + prof34_2pt=prof34_2pt, + normprof1=normprof1, normprof2=normprof2, + normprof3=normprof3, normprof4=normprof4) + if use_log: + if np.any(tkk <= 0): + warnings.warn( + "Some values were not positive. " + "Will not interpolate in log-space.", + category=CCLWarning) + use_log = False + else: + tkk = np.log(tkk) + + tk3d = Tk3D(a_arr=a_arr, lk_arr=lk_arr, tkk_arr=tkk, + extrap_order_lok=extrap_order_lok, + extrap_order_hik=extrap_order_hik, is_logt=use_log) + return tk3d + + +def halomod_Tk3D_2h(cosmo, hmc, + prof1, prof2=None, + prof3=None, prof4=None, + prof12_2pt=None, prof13_2pt=None, prof14_2pt=None, + prof24_2pt=None, prof32_2pt=None, prof34_2pt=None, + normprof1=False, normprof2=False, + normprof3=False, normprof4=False, p_of_k_a=None, + lk_arr=None, a_arr=None, + extrap_order_lok=1, extrap_order_hik=1, use_log=False): + """ Returns a :class:`~pyccl.tk3d.Tk3D` object containing the 2-halo + trispectrum for four quantities defined by their respective halo profiles. + See :meth:`halomod_trispectrum_1h` for more details about the actual + calculation. + + Args: + cosmo (:class:`~pyccl.core.Cosmology`): a Cosmology object. + hmc (:class:`HMCalculator`): a halo model calculator. + prof1 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`u_1` above. + prof2 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`u_2` above. If `None`, + `prof1` will be used as `prof2`. + prof3 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`v_1` above. If `None`, + `prof1` will be used as `prof3`. + prof4 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`v_2` above. If `None`, + `prof3` will be used as `prof4`. + prof12_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): + a profile covariance object returning the the two-point + moment of `prof1` and `prof2`. If `None`, the default + second moment will be used, corresponding to the + products of the means of both profiles. + prof13_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): + same as `prof12_2pt` for `prof1` and `prof3`. + prof14_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): + same as `prof14_2pt` for `prof1` and `prof4`. + prof24_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): + same as `prof14_2pt` for `prof2` and `prof4`. + prof32_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): + same as `prof14_2pt` for `prof3` and `prof2`. + prof34_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): + same as `prof34_2pt` for `prof3` and `prof4`. + p13_of_k_a (:class:`~pyccl.pk2d.Pk2D`): same as p12_of_k_a for 13 + p14_of_k_a (:class:`~pyccl.pk2d.Pk2D`): same as p12_of_k_a for 14 + normprof1 (bool): if `True`, this integral will be + normalized by :math:`I^0_1(k\\rightarrow 0,a|u)` + (see :meth:`~HMCalculator.I_0_1`), where + :math:`u` is the profile represented by `prof1`. + normprof2 (bool): same as `normprof1` for `prof2`. + normprof3 (bool): same as `normprof1` for `prof3`. + normprof4 (bool): same as `normprof1` for `prof4`. + p_of_k_a (:class:`~pyccl.pk2d.Pk2D`): a `Pk2D` object to + be used as the linear matter power spectrum. If `None`, the power + spectrum stored within `cosmo` will be used. + a_arr (array): an array holding values of the scale factor + at which the trispectrum should be calculated for + interpolation. If `None`, the internal values used + by `cosmo` will be used. + lk_arr (array): an array holding values of the natural + logarithm of the wavenumber (in units of Mpc^-1) at + which the trispectrum should be calculated for + interpolation. If `None`, the internal values used + by `cosmo` will be used. + extrap_order_lok (int): extrapolation order to be used on + k-values below the minimum of the splines. See + :class:`~pyccl.tk3d.Tk3D`. + extrap_order_hik (int): extrapolation order to be used on + k-values above the maximum of the splines. See + :class:`~pyccl.tk3d.Tk3D`. + use_log (bool): if `True`, the trispectrum will be + interpolated in log-space (unless negative or + zero values are found). + + Returns: + :class:`~pyccl.tk3d.Tk3D`: 2-halo trispectrum. + """ + if lk_arr is None: + status = 0 + nk = lib.get_pk_spline_nk(cosmo.cosmo) + lk_arr, status = lib.get_pk_spline_lk(cosmo.cosmo, nk, status) + check(status) + if a_arr is None: + status = 0 + na = lib.get_pk_spline_na(cosmo.cosmo) + a_arr, status = lib.get_pk_spline_a(cosmo.cosmo, na, status) + check(status) + + tkk_2h_22 = halomod_trispectrum_2h_22(cosmo, hmc, np.exp(lk_arr), a_arr, + prof1, prof2=prof2, + prof3=prof3, prof4=prof4, + prof13_2pt=prof13_2pt, + prof14_2pt=prof14_2pt, + prof24_2pt=prof24_2pt, + prof32_2pt=prof32_2pt, + normprof1=normprof1, + normprof2=normprof2, + normprof3=normprof3, + normprof4=normprof4, + p_of_k_a=p_of_k_a) + + tkk_2h_13 = halomod_trispectrum_2h_13(cosmo, hmc, np.exp(lk_arr), a_arr, + prof1, prof2=prof2, + prof3=prof3, prof4=prof4, + prof12_2pt=prof12_2pt, + prof34_2pt=prof34_2pt, + normprof1=normprof1, + normprof2=normprof2, + normprof3=normprof3, + normprof4=normprof4, + p_of_k_a=p_of_k_a) + + tkk = tkk_2h_22 + tkk_2h_13 + + if use_log: + if np.any(tkk <= 0): + warnings.warn( + "Some values were not positive. " + "Will not interpolate in log-space.", + category=CCLWarning) + use_log = False + else: + tkk = np.log(tkk) + + tk3d = Tk3D(a_arr=a_arr, lk_arr=lk_arr, tkk_arr=tkk, + extrap_order_lok=extrap_order_lok, + extrap_order_hik=extrap_order_hik, is_logt=use_log) + return tk3d + + +def halomod_Tk3D_3h(cosmo, hmc, + prof1, prof2=None, prof3=None, prof4=None, + prof13_2pt=None, prof14_2pt=None, prof24_2pt=None, + prof32_2pt=None, + normprof1=False, normprof2=False, + normprof3=False, normprof4=False, + lk_arr=None, a_arr=None, p_of_k_a=None, + extrap_order_lok=1, extrap_order_hik=1, + use_log=False): + """ Returns a :class:`~pyccl.tk3d.Tk3D` object containing + the 3-halo trispectrum for four quantities defined by + their respective halo profiles. See :meth:`halomod_trispectrum_3h` + for more details about the actual calculation. + + Args: + cosmo (:class:`~pyccl.core.Cosmology`): a Cosmology object. + hmc (:class:`HMCalculator`): a halo model calculator. + prof1 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`u_1` above. + prof2 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`u_2` above. If `None`, + `prof1` will be used as `prof2`. + prof3 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`v_1` above. If `None`, + `prof1` will be used as `prof3`. + prof4 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`v_2` above. If `None`, + `prof3` will be used as `prof4`. + prof13_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): + a profile covariance object returning the the two-point + moment of `prof1` and `prof3`. If `None`, the default + second moment will be used, corresponding to the + products of the means of both profiles. + prof14_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): + same as `prof14_2pt` for `prof1` and `prof4`. + prof24_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): + same as `prof14_2pt` for `prof2` and `prof4`. + prof32_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): + same as `prof14_2pt` for `prof3` and `prof2`. + normprof1 (bool): if `True`, this integral will be + normalized by :math:`I^0_1(k\\rightarrow 0,a|u)` + (see :meth:`~HMCalculator.I_0_1`), where + :math:`u` is the profile represented by `prof1`. + normprof2 (bool): same as `normprof1` for `prof2`. + normprof3 (bool): same as `normprof1` for `prof3`. + normprof4 (bool): same as `normprof1` for `prof4`. + lk_arr (array): an array holding values of the natural + logarithm of the wavenumber (in units of Mpc^-1) at + which the trispectrum should be calculated for + interpolation. If `None`, the internal values used + by `cosmo` will be used. + a_arr (array): an array holding values of the scale factor + at which the trispectrum should be calculated for + interpolation. If `None`, the internal values used + by `cosmo` will be used. + p_of_k_a (:class:`~pyccl.pk2d.Pk2D`): a `Pk2D` object to + be used as the linear matter power spectrum. If `None`, the power + spectrum stored within `cosmo` will be used. + extrap_order_lok (int): extrapolation order to be used on + k-values below the minimum of the splines. See + :class:`~pyccl.tk3d.Tk3D`. + extrap_order_hik (int): extrapolation order to be used on + k-values above the maximum of the splines. See + :class:`~pyccl.tk3d.Tk3D`. + use_log (bool): if `True`, the trispectrum will be + interpolated in log-space (unless negative or + zero values are found). + + Returns: + :class:`~pyccl.tk3d.Tk3D`: 3-halo trispectrum. + """ + if lk_arr is None: + status = 0 + nk = lib.get_pk_spline_nk(cosmo.cosmo) + lk_arr, status = lib.get_pk_spline_lk(cosmo.cosmo, nk, status) + check(status) + if a_arr is None: + status = 0 + na = lib.get_pk_spline_na(cosmo.cosmo) + a_arr, status = lib.get_pk_spline_a(cosmo.cosmo, na, status) + check(status) + + tkk = halomod_trispectrum_3h(cosmo, hmc, np.exp(lk_arr), a_arr, + prof1=prof1, + prof2=prof2, + prof3=prof3, + prof4=prof4, + prof13_2pt=prof13_2pt, + prof14_2pt=prof14_2pt, + prof24_2pt=prof24_2pt, + prof32_2pt=prof32_2pt, + normprof1=normprof1, + normprof2=normprof2, + normprof3=normprof3, + normprof4=normprof4, + p_of_k_a=p_of_k_a) + + if use_log: + if np.any(tkk <= 0): + warnings.warn( + "Some values were not positive. " + "Will not interpolate in log-space.", + category=CCLWarning) + use_log = False + else: + tkk = np.log(tkk) + + tk3d = Tk3D(a_arr=a_arr, lk_arr=lk_arr, tkk_arr=tkk, + extrap_order_lok=extrap_order_lok, + extrap_order_hik=extrap_order_hik, is_logt=use_log) + return tk3d + + +def halomod_Tk3D_4h(cosmo, hmc, + prof1, prof2=None, prof3=None, prof4=None, + normprof1=False, normprof2=False, + normprof3=False, normprof4=False, + lk_arr=None, a_arr=None, p_of_k_a=None, + extrap_order_lok=1, extrap_order_hik=1, + use_log=False): + """ Returns a :class:`~pyccl.tk3d.Tk3D` object containing + the 3-halo trispectrum for four quantities defined by + their respective halo profiles. See :meth:`halomod_trispectrum_4h` + for more details about the actual calculation. + + Args: + cosmo (:class:`~pyccl.core.Cosmology`): a Cosmology object. + hmc (:class:`HMCalculator`): a halo model calculator. + prof1 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`u_1` above. + prof2 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`u_2` above. If `None`, + `prof1` will be used as `prof2`. + prof3 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`v_1` above. If `None`, + `prof1` will be used as `prof3`. + prof4 (:class:`~pyccl.halos.profiles.HaloProfile`): halo + profile (corresponding to :math:`v_2` above. If `None`, + `prof3` will be used as `prof4`. + normprof1 (bool): if `True`, this integral will be + normalized by :math:`I^0_1(k\\rightarrow 0,a|u)` + (see :meth:`~HMCalculator.I_0_1`), where + :math:`u` is the profile represented by `prof1`. + normprof2 (bool): same as `normprof1` for `prof2`. + normprof3 (bool): same as `normprof1` for `prof3`. + normprof4 (bool): same as `normprof1` for `prof4`. + lk_arr (array): an array holding values of the natural + logarithm of the wavenumber (in units of Mpc^-1) at + which the trispectrum should be calculated for + interpolation. If `None`, the internal values used + by `cosmo` will be used. + a_arr (array): an array holding values of the scale factor + at which the trispectrum should be calculated for + interpolation. If `None`, the internal values used + by `cosmo` will be used. + p_of_k_a (:class:`~pyccl.pk2d.Pk2D`): a `Pk2D` object to + be used as the linear matter power spectrum. If `None`, the power + spectrum stored within `cosmo` will be used. + extrap_order_lok (int): extrapolation order to be used on + k-values below the minimum of the splines. See + :class:`~pyccl.tk3d.Tk3D`. + extrap_order_hik (int): extrapolation order to be used on + k-values above the maximum of the splines. See + :class:`~pyccl.tk3d.Tk3D`. + use_log (bool): if `True`, the trispectrum will be + interpolated in log-space (unless negative or + zero values are found). + + Returns: + :class:`~pyccl.tk3d.Tk3D`: 4-halo trispectrum. + """ + if lk_arr is None: + status = 0 + nk = lib.get_pk_spline_nk(cosmo.cosmo) + lk_arr, status = lib.get_pk_spline_lk(cosmo.cosmo, nk, status) + check(status) + if a_arr is None: + status = 0 + na = lib.get_pk_spline_na(cosmo.cosmo) + a_arr, status = lib.get_pk_spline_a(cosmo.cosmo, na, status) + check(status) + + tkk = halomod_trispectrum_4h(cosmo, hmc, np.exp(lk_arr), a_arr, + prof1=prof1, + prof2=prof2, + prof3=prof3, + prof4=prof4, + normprof1=normprof1, + normprof2=normprof2, + normprof3=normprof3, + normprof4=normprof4, + p_of_k_a=None) if use_log: - if np.any(dpk12 <= 0) or np.any(dpk34 <= 0): + if np.any(tkk <= 0): warnings.warn( "Some values were not positive. " "Will not interpolate in log-space.", category=CCLWarning) use_log = False else: - dpk12 = np.log(dpk12) - dpk34 = np.log(dpk34) + tkk = np.log(tkk) - tk3d = Tk3D(a_arr=a_arr, lk_arr=lk_arr, - pk1_arr=dpk12, pk2_arr=dpk34, + tk3d = Tk3D(a_arr=a_arr, lk_arr=lk_arr, tkk_arr=tkk, extrap_order_lok=extrap_order_lok, extrap_order_hik=extrap_order_hik, is_logt=use_log) return tk3d @@ -1186,13 +2199,10 @@ def halomod_Tk3D_SSC(cosmo, hmc, .. math:: \\frac{\\partial P_{u,v}(k)}{\\partial\\delta_L} = \\left(\\frac{68}{21}-\\frac{d\\log k^3P_L(k)}{d\\log k}\\right) - P_L(k)I^1_1(k,|u)I^1_1(k,|v)+I^1_2(k|u,v) - (b_{u} + b_{v}) - P_{u,v}(k) + P_L(k)I^1_1(k,|u)I^1_1(k,|v)+I^1_2(k|u,v) where the :math:`I^a_b` are defined in the documentation - of :meth:`~HMCalculator.I_1_1` and :meth:`~HMCalculator.I_1_2` and - :math:`b_{u}` and :math:`b_{v}` are the linear halo biases for quantities - :math:`u` and :math:`v`, respectively (zero if they are not clustering). + of :meth:`~HMCalculator.I_1_1` and :meth:`~HMCalculator.I_1_2`. Args: cosmo (:class:`~pyccl.core.Cosmology`): a Cosmology object. @@ -1277,17 +2287,6 @@ def halomod_Tk3D_SSC(cosmo, hmc, if (prof34_2pt is not None) and (not isinstance(prof34_2pt, Profile2pt)): raise TypeError("prof34_2pt must be of type `Profile2pt` or `None`") - # number counts profiles must be normalized - profs = {prof1: normprof1, prof2: normprof2, - prof3: normprof3, prof4: normprof4} - - for i, (profile, normalization) in enumerate(profs.items()): - if (profile is not None - and profile.is_number_counts - and not normalization): - raise ValueError( - f"normprof{i+1} must be True if prof{i+1} is number counts") - if prof3 is None: prof3_bak = prof1 else: @@ -1358,45 +2357,6 @@ def get_norm(normprof, prof, sf): dpk12[ia, :] = norm12*((2.2380952381-dpk/3)*i11_1*i11_2*pk+i12_12) dpk34[ia, :] = norm34*((2.2380952381-dpk/3)*i11_3*i11_4*pk+i12_34) - # Counter terms for clustering (i.e. - (bA + bB) * PAB - if prof1.is_number_counts or (prof2 is None or prof2.is_number_counts): - b1 = b2 = np.zeros_like(k_use) - i02_12 = hmc.I_0_2(cosmo, k_use, aa, prof1, prof12_2pt, prof2) - P_12 = norm12 * (pk * i11_1 * i11_2 + i02_12) - - if prof1.is_number_counts: - b1 = i11_1 * norm1 - - if prof2 is None: - b2 = b1 - elif prof2.is_number_counts: - b2 = i11_2 * norm2 - - dpk12[ia, :] -= (b1 + b2) * P_12 - - if prof3_bak.is_number_counts or \ - ((prof3_bak.is_number_counts and prof4 is None) or - (prof4 is not None) and prof4.is_number_counts): - b3 = b4 = np.zeros_like(k_use) - if (prof3 is None) and (prof4 is None) and (prof34_2pt is None): - i02_34 = i02_12 - else: - i02_34 = hmc.I_0_2(cosmo, k_use, aa, prof3_bak, prof34_2pt_bak, - prof4) - P_34 = norm34 * (pk * i11_3 * i11_4 + i02_34) - - if prof3 is None: - b3 = b1 - elif prof3.is_number_counts: - b3 = i11_3 * norm3 - - if prof4 is None: - b4 = b3 - elif prof4.is_number_counts: - b4 = i11_4 * norm4 - - dpk34[ia, :] -= (b3 + b4) * P_34 - if use_log: if np.any(dpk12 <= 0) or np.any(dpk34 <= 0): warnings.warn( @@ -1414,30 +2374,17 @@ def get_norm(normprof, prof, sf): extrap_order_hik=extrap_order_hik, is_logt=use_log) return tk3d, dpk12 -def halomod_Tk3D_SSC_debug(cosmo, hmc, - prof1, prof2=None, prof12_2pt=None, - prof3=None, prof4=None, prof34_2pt=None, - normprof1=False, normprof2=False, - normprof3=False, normprof4=False, - p_of_k_a=None, lk_arr=None, a_arr=None, - extrap_order_lok=1, extrap_order_hik=1, - use_log=False): - """ Returns a :class:`~pyccl.tk3d.Tk3D` object containing - the super-sample covariance trispectrum, given by the tensor - product of the power spectrum responses associated with the - two pairs of quantities being correlated. Each response is - calculated as: - - .. math:: - \\frac{\\partial P_{u,v}(k)}{\\partial\\delta_L} = - \\left(\\frac{68}{21}-\\frac{d\\log k^3P_L(k)}{d\\log k}\\right) - P_L(k)I^1_1(k,|u)I^1_1(k,|v)+I^1_2(k|u,v) - (b_{u} + b_{v}) - P_{u,v}(k) - where the :math:`I^a_b` are defined in the documentation - of :meth:`~HMCalculator.I_1_1` and :meth:`~HMCalculator.I_1_2` and - :math:`b_{u}` and :math:`b_{v}` are the linear halo biases for quantities - :math:`u` and :math:`v`, respectively (zero if they are not clustering). +def halomod_Tk3D_cNG(cosmo, hmc, prof1, prof2=None, prof3=None, prof4=None, + prof12_2pt=None, prof13_2pt=None, prof14_2pt=None, + prof24_2pt=None, prof32_2pt=None, prof34_2pt=None, + normprof1=False, normprof2=False, + normprof3=False, normprof4=False, p_of_k_a=None, + lk_arr=None, a_arr=None, extrap_order_lok=1, + extrap_order_hik=1, use_log=False): + """ Returns a :class:`~pyccl.tk3d.Tk3D` object containing the non-Gaussian + covariance trispectrum for four quantities defined by their respective halo + profiles. This is the sum of the trispectrum terms 1h + 2h + 3h + 4h. Args: cosmo (:class:`~pyccl.core.Cosmology`): a Cosmology object. @@ -1447,19 +2394,29 @@ def halomod_Tk3D_SSC_debug(cosmo, hmc, prof2 (:class:`~pyccl.halos.profiles.HaloProfile`): halo profile (corresponding to :math:`u_2` above. If `None`, `prof1` will be used as `prof2`. - prof12_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): - a profile covariance object returning the the two-point - moment of `prof1` and `prof2`. If `None`, the default - second moment will be used, corresponding to the - products of the means of both profiles. prof3 (:class:`~pyccl.halos.profiles.HaloProfile`): halo profile (corresponding to :math:`v_1` above. If `None`, `prof1` will be used as `prof3`. prof4 (:class:`~pyccl.halos.profiles.HaloProfile`): halo profile (corresponding to :math:`v_2` above. If `None`, `prof3` will be used as `prof4`. + prof12_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): + a profile covariance object returning the the two-point + moment of `prof1` and `prof2`. If `None`, the default + second moment will be used, corresponding to the + products of the means of both profiles. + prof13_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): + same as `prof12_2pt` for `prof1` and `prof3`. + prof14_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): + same as `prof12_2pt` for `prof1` and `prof4`. + prof24_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): + same as `prof12_2pt` for `prof2` and `prof4`. + prof32_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): + same as `prof12_2pt` for `prof3` and `prof2`. prof34_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): same as `prof12_2pt` for `prof3` and `prof4`. + p13_of_k_a (:class:`~pyccl.pk2d.Pk2D`): same as p12_of_k_a for 13 + p14_of_k_a (:class:`~pyccl.pk2d.Pk2D`): same as p12_of_k_a for 14 normprof1 (bool): if `True`, this integral will be normalized by :math:`I^0_1(k\\rightarrow 0,a|u)` (see :meth:`~HMCalculator.I_0_1`), where @@ -1467,6 +2424,155 @@ def halomod_Tk3D_SSC_debug(cosmo, hmc, normprof2 (bool): same as `normprof1` for `prof2`. normprof3 (bool): same as `normprof1` for `prof3`. normprof4 (bool): same as `normprof1` for `prof4`. + p_of_k_a (:class:`~pyccl.pk2d.Pk2D`): a `Pk2D` object to + be used as the linear matter power spectrum. If `None`, the power + spectrum stored within `cosmo` will be used. + a_arr (array): an array holding values of the scale factor + at which the trispectrum should be calculated for + interpolation. If `None`, the internal values used + by `cosmo` will be used. + lk_arr (array): an array holding values of the natural + logarithm of the wavenumber (in units of Mpc^-1) at + which the trispectrum should be calculated for + interpolation. If `None`, the internal values used + by `cosmo` will be used. + extrap_order_lok (int): extrapolation order to be used on + k-values below the minimum of the splines. See + :class:`~pyccl.tk3d.Tk3D`. + extrap_order_hik (int): extrapolation order to be used on + k-values above the maximum of the splines. See + :class:`~pyccl.tk3d.Tk3D`. + use_log (bool): if `True`, the trispectrum will be + interpolated in log-space (unless negative or + zero values are found). + + Returns: + :class:`~pyccl.tk3d.Tk3D`: 2-halo trispectrum. + """ + if lk_arr is None: + status = 0 + nk = lib.get_pk_spline_nk(cosmo.cosmo) + lk_arr, status = lib.get_pk_spline_lk(cosmo.cosmo, nk, status) + check(status) + if a_arr is None: + status = 0 + na = lib.get_pk_spline_na(cosmo.cosmo) + a_arr, status = lib.get_pk_spline_a(cosmo.cosmo, na, status) + check(status) + + tkk = halomod_trispectrum_1h(cosmo, hmc, np.exp(lk_arr), a_arr, + prof1, prof2=prof2, + prof12_2pt=prof12_2pt, + prof3=prof3, prof4=prof4, + prof34_2pt=prof34_2pt, + normprof1=normprof1, normprof2=normprof2, + normprof3=normprof3, normprof4=normprof4) + + tkk += halomod_trispectrum_2h_22(cosmo, hmc, np.exp(lk_arr), a_arr, + prof1, prof2=prof2, + prof3=prof3, prof4=prof4, + prof13_2pt=prof13_2pt, + prof14_2pt=prof14_2pt, + prof24_2pt=prof24_2pt, + prof32_2pt=prof32_2pt, + normprof1=normprof1, + normprof2=normprof2, + normprof3=normprof3, + normprof4=normprof4, + p_of_k_a=p_of_k_a) + + tkk += halomod_trispectrum_2h_13(cosmo, hmc, np.exp(lk_arr), a_arr, + prof1, prof2=prof2, + prof3=prof3, prof4=prof4, + prof12_2pt=prof12_2pt, + prof34_2pt=prof34_2pt, + normprof1=normprof1, + normprof2=normprof2, + normprof3=normprof3, + normprof4=normprof4, + p_of_k_a=p_of_k_a) + + tkk += halomod_trispectrum_3h(cosmo, hmc, np.exp(lk_arr), a_arr, + prof1=prof1, + prof2=prof2, + prof3=prof3, + prof4=prof4, + prof13_2pt=prof13_2pt, + prof14_2pt=prof14_2pt, + prof24_2pt=prof24_2pt, + prof32_2pt=prof32_2pt, + normprof1=normprof1, + normprof2=normprof2, + normprof3=normprof3, + normprof4=normprof4, + p_of_k_a=None) + + tkk += halomod_trispectrum_4h(cosmo, hmc, np.exp(lk_arr), a_arr, + prof1=prof1, + prof2=prof2, + prof3=prof3, + prof4=prof4, + normprof1=normprof1, + normprof2=normprof2, + normprof3=normprof3, + normprof4=normprof4, + p_of_k_a=None) + + if use_log: + if np.any(tkk <= 0): + warnings.warn( + "Some values were not positive. " + "Will not interpolate in log-space.", + category=CCLWarning) + use_log = False + else: + tkk = np.log(tkk) + + tk3d = Tk3D(a_arr=a_arr, lk_arr=lk_arr, tkk_arr=tkk, + extrap_order_lok=extrap_order_lok, + extrap_order_hik=extrap_order_hik, is_logt=use_log) + return tk3d + +def halomod_Tk3D_SSC_linear_bias(cosmo, hmc, prof, bias1=1, bias2=1, bias3=1, + bias4=1, + is_number_counts1=False, + is_number_counts2=False, + is_number_counts3=False, + is_number_counts4=False, + p_of_k_a=None, lk_arr=None, + a_arr=None, extrap_order_lok=1, + extrap_order_hik=1, use_log=False): + """ Returns a :class:`~pyccl.tk3d.Tk3D` object containing + the super-sample covariance trispectrum, given by the tensor + product of the power spectrum responses associated with the + two pairs of quantities being correlated. Each response is + calculated as: + + .. math:: + \\frac{\\partial P_{u,v}(k)}{\\partial\\delta_L} = b_u b_v \\left( + \\left(\\frac{68}{21}-\\frac{d\\log k^3P_L(k)}{d\\log k}\\right) + P_L(k)+I^1_2(k|u,v) - (b_{u} + b_{v}) P_{u,v}(k) \\right) + + where the :math:`I^1_2` is defined in the documentation + :meth:`~HMCalculator.I_1_2` and :math:`b_{}` and :math:`b_{vv}` are the + linear halo biases for quantities :math:`u` and :math:`v`, respectively + (zero if they are not clustering). + + Args: + cosmo (:class:`~pyccl.core.Cosmology`): a Cosmology object. + hmc (:class:`HMCalculator`): a halo model calculator. + prof (:class:`~pyccl.halos.profiles.HaloProfile`): halo NFW + profile. + bias1 (float or array): linear galaxy bias for quantity 1. If an array, + it has to have the shape of `a_arr`. + bias2 (float or array): linear galaxy bias for quantity 2. + bias3 (float or array): linear galaxy bias for quantity 3. + bias4 (float or array): linear galaxy bias for quantity 4. + is_number_counts1 (bool): If True, quantity 1 will be considered + number counts and the clustering counter terms computed. Default False. + is_number_counts2 (bool): as is_number_counts1 but for quantity 2. + is_number_counts3 (bool): as is_number_counts1 but for quantity 3. + is_number_counts4 (bool): as is_number_counts1 but for quantity 4. p_of_k_a (:class:`~pyccl.pk2d.Pk2D`): a `Pk2D` object to be used as the linear matter power spectrum. If `None`, the power spectrum stored within `cosmo` will be used. @@ -1503,44 +2609,19 @@ def halomod_Tk3D_SSC_debug(cosmo, hmc, a_arr, status = lib.get_pk_spline_a(cosmo.cosmo, na, status) check(status, cosmo=cosmo) + # Make sure biases are of the form number of a x number of k + ones = np.ones_like(a_arr) + bias1 *= ones + bias2 *= ones + bias3 *= ones + bias4 *= ones + k_use = np.exp(lk_arr) # Check inputs - if not isinstance(prof1, HaloProfile): - raise TypeError("prof1 must be of type `HaloProfile`") - if (prof2 is not None) and (not isinstance(prof2, HaloProfile)): - raise TypeError("prof2 must be of type `HaloProfile` or `None`") - if (prof3 is not None) and (not isinstance(prof3, HaloProfile)): - raise TypeError("prof3 must be of type `HaloProfile` or `None`") - if (prof4 is not None) and (not isinstance(prof4, HaloProfile)): - raise TypeError("prof4 must be of type `HaloProfile` or `None`") - if prof12_2pt is None: - prof12_2pt = Profile2pt() - elif not isinstance(prof12_2pt, Profile2pt): - raise TypeError("prof12_2pt must be of type " - "`Profile2pt` or `None`") - if (prof34_2pt is not None) and (not isinstance(prof34_2pt, Profile2pt)): - raise TypeError("prof34_2pt must be of type `Profile2pt` or `None`") - - # number counts profiles must be normalized - profs = {prof1: normprof1, prof2: normprof2, - prof3: normprof3, prof4: normprof4} - - for i, (profile, normalization) in enumerate(profs.items()): - if (profile is not None - and profile.is_number_counts - and not normalization): - raise ValueError( - f"normprof{i+1} must be True if prof{i+1} is number counts") - - if prof3 is None: - prof3_bak = prof1 - else: - prof3_bak = prof3 - if prof34_2pt is None: - prof34_2pt_bak = prof12_2pt - else: - prof34_2pt_bak = prof34_2pt + if not isinstance(prof, HaloProfileNFW): + raise TypeError("prof must be of type `HaloProfileNFW`") + prof_2pt = Profile2pt() # Power spectrum if isinstance(p_of_k_a, Pk2D): @@ -1553,95 +2634,44 @@ def halomod_Tk3D_SSC_debug(cosmo, hmc, raise TypeError("p_of_k_a must be `None`, \'linear\', " "\'nonlinear\' or a `Pk2D` object") - def get_norm(normprof, prof, sf): - if normprof: - return hmc.profile_norm(cosmo, sf, prof) - else: - return 1 - na = len(a_arr) nk = len(k_use) dpk12 = np.zeros([na, nk]) dpk34 = np.zeros([na, nk]) for ia, aa in enumerate(a_arr): # Compute profile normalizations - norm1 = get_norm(normprof1, prof1, aa) - i11_1 = hmc.I_1_1(cosmo, k_use, aa, prof1) - # Compute second profile normalization - if prof2 is None: - norm2 = norm1 - i11_2 = i11_1 - else: - norm2 = get_norm(normprof2, prof2, aa) - i11_2 = hmc.I_1_1(cosmo, k_use, aa, prof2) - if prof3 is None: - norm3 = norm1 - i11_3 = i11_1 - else: - norm3 = get_norm(normprof3, prof3, aa) - i11_3 = hmc.I_1_1(cosmo, k_use, aa, prof3) - if prof4 is None: - norm4 = norm3 - i11_4 = i11_3 - else: - norm4 = get_norm(normprof4, prof4, aa) - i11_4 = hmc.I_1_1(cosmo, k_use, aa, prof4) - - i12_12 = hmc.I_1_2(cosmo, k_use, aa, prof1, - prof12_2pt, prof2) - if (prof3 is None) and (prof4 is None) and (prof34_2pt is None): - i12_34 = i12_12 - else: - i12_34 = hmc.I_1_2(cosmo, k_use, aa, prof3_bak, - prof34_2pt_bak, prof4) - norm12 = norm1 * norm2 - norm34 = norm3 * norm4 + norm = hmc.profile_norm(cosmo, aa, prof) ** 2 + i12 = hmc.I_1_2(cosmo, k_use, aa, prof, prof_2pt, prof) * norm pk = pk2d.eval(k_use, aa, cosmo) dpk = pk2d.eval_dlogpk_dlogk(k_use, aa, cosmo) - # (47/21 - 1/3 dlogPk/dlogk) * I11 * I11 * Pk+I12 - dpk12[ia, :] = norm12*((2.2380952381-dpk/3)*i11_1*i11_2*pk+i12_12) - dpk34[ia, :] = norm34*((2.2380952381-dpk/3)*i11_3*i11_4*pk+i12_34) + # ~ [(47/21 - 1/3 dlogPk/dlogk) * Pk+I12] + dpk12[ia] = ((2.2380952381-dpk/3)*pk + i12) + dpk34[ia] = dpk12[ia].copy() # Avoid surprises # Counter terms for clustering (i.e. - (bA + bB) * PAB - if prof1.is_number_counts or (prof2 is None or prof2.is_number_counts): - b1 = b2 = np.zeros_like(k_use) - i02_12 = hmc.I_0_2(cosmo, k_use, aa, prof1, prof12_2pt, prof2) - P_12 = norm12 * (pk * i11_1 * i11_2 + i02_12) + if is_number_counts1 or is_number_counts2 or is_number_counts3 or \ + is_number_counts4: + b1 = b2 = b3 = b4 = 0 - if prof1.is_number_counts: - b1 = i11_1 * norm1 + i02 = hmc.I_0_2(cosmo, k_use, aa, prof, prof_2pt, prof) * norm + P_12 = P_34 = pk + i02 - if prof2 is None: - b2 = b1 - elif prof2.is_number_counts: - b2 = i11_2 * norm2 + if is_number_counts1: + b1 = bias1[ia] + if is_number_counts2: + b2 = bias2[ia] + if is_number_counts3: + b3 = bias3[ia] + if is_number_counts4: + b4 = bias4[ia] dpk12[ia, :] -= (b1 + b2) * P_12 - - if prof3_bak.is_number_counts or \ - ((prof3_bak.is_number_counts and prof4 is None) or - (prof4 is not None) and prof4.is_number_counts): - b3 = b4 = np.zeros_like(k_use) - if (prof3 is None) and (prof4 is None) and (prof34_2pt is None): - i02_34 = i02_12 - else: - i02_34 = hmc.I_0_2(cosmo, k_use, aa, prof3_bak, prof34_2pt_bak, - prof4) - P_34 = norm34 * (pk * i11_3 * i11_4 + i02_34) - - if prof3 is None: - b3 = b1 - elif prof3.is_number_counts: - b3 = i11_3 * norm3 - - if prof4 is None: - b4 = b3 - elif prof4.is_number_counts: - b4 = i11_4 * norm4 - dpk34[ia, :] -= (b3 + b4) * P_34 + dpk12[ia] *= bias1[ia] * bias2[ia] + dpk34[ia] *= bias3[ia] * bias4[ia] + if use_log: if np.any(dpk12 <= 0) or np.any(dpk34 <= 0): warnings.warn( @@ -1657,5 +2687,4 @@ def get_norm(normprof, prof, sf): pk1_arr=dpk12, pk2_arr=dpk34, extrap_order_lok=extrap_order_lok, extrap_order_hik=extrap_order_hik, is_logt=use_log) - return tk3d, dpk12 - + return tk3d diff --git a/pyccl/halos/hmfunc.py b/pyccl/halos/hmfunc.py index 4e738321e..208cc61b8 100644 --- a/pyccl/halos/hmfunc.py +++ b/pyccl/halos/hmfunc.py @@ -33,7 +33,7 @@ class MassFunc(object): """ name = 'default' - def __init__(self, cosmo, mass_def=None, mass_def_strict=True): + def __init__(self, cosmo, mass_def=None, mass_def_strict=True, darkemulator=None): # Initialize sigma(M) splines if needed cosmo.compute_sigma() self.mass_def_strict = mass_def_strict @@ -48,8 +48,10 @@ def __init__(self, cosmo, mass_def=None, mass_def_strict=True): self.mdef = mass_def else: self._default_mdef() + + self.emu = darkemulator self._setup(cosmo) - + def _default_mdef(self): """ Assigns a default mass definition for this object if none is passed at initialization. @@ -782,31 +784,33 @@ class MassFuncDarkEmulator(MassFunc): """ name = 'DarkEmulator' - def __init__(self, cosmo, mass_def=None, mass_def_strict=True): + def __init__(self, cosmo, mass_def=None, mass_def_strict=True, darkemulator=None): super(MassFuncDarkEmulator, self).__init__(cosmo, mass_def, - mass_def_strict) + mass_def_strict, + darkemulator) def _default_mdef(self): self.mdef = MassDef200m() def _setup(self, cosmo): - Omega_c = cosmo["Omega_c"] - Omega_b = cosmo["Omega_b"] - h = cosmo["h"] - n_s = cosmo["n_s"] - A_s = cosmo["A_s"] - - omega_c = Omega_c * h ** 2 - omega_b = Omega_b * h ** 2 - omega_nu = 0.00064 - Omega_L = 1 - ((omega_c + omega_b + omega_nu) / h **2) - - emu = darkemu.de_interface.base_class() - - #Parameters cparam (numpy array) : Cosmological parameters (𝜔𝑏, 𝜔𝑐, Ω𝑑𝑒, ln(10^10 𝐴𝑠), 𝑛𝑠, 𝑤) - cparam = np.array([omega_b,omega_c,Omega_L,np.log(10 ** 10 * A_s),n_s,-1.]) - emu.set_cosmology(cparam) - self.emu = emu + if self.emu == None: + Omega_c = cosmo["Omega_c"] + Omega_b = cosmo["Omega_b"] + h = cosmo["h"] + n_s = cosmo["n_s"] + A_s = cosmo["A_s"] + + omega_c = Omega_c * h ** 2 + omega_b = Omega_b * h ** 2 + omega_nu = 0.00064 + Omega_L = 1 - ((omega_c + omega_b + omega_nu) / h **2) + + emu = darkemu.de_interface.base_class() + + #Parameters cparam (numpy array) : Cosmological parameters (𝜔𝑏, 𝜔𝑐, Ω𝑑𝑒, ln(10^10 𝐴𝑠), 𝑛𝑠, 𝑤) + cparam = np.array([omega_b,omega_c,Omega_L,np.log(10 ** 10 * A_s),n_s,-1.]) + emu.set_cosmology(cparam) + self.emu = emu def _check_mdef_strict(self, mdef): if isinstance(mdef.Delta, str): diff --git a/pyccl/halos/profiles.py b/pyccl/halos/profiles.py index 02c057399..7975f394f 100644 --- a/pyccl/halos/profiles.py +++ b/pyccl/halos/profiles.py @@ -915,7 +915,6 @@ def _real(self, cosmo, r, M, a, mass_def): prof = np.squeeze(prof, axis=0) return prof - class HaloProfileHernquist(HaloProfile): """ Hernquist (1990ApJ...356..359H). @@ -1711,3 +1710,177 @@ def _Ns(self, M, a): M1 = 10.**(self.lM1_0 + self.lM1_p * (a - self.a_pivot)) alpha = self.alpha_0 + self.alpha_p * (a - self.a_pivot) return np.heaviside(M-M0, 1) * (np.fabs(M-M0) / M1)**alpha + + +class HaloProfileDK14(HaloProfile): + """ Einasto profile (1965TrAlm...5...87E). + + .. math:: + \\rho(r) = \\rho_0\\,\\exp(-2 ((r/r_s)^\\alpha-1) / \\alpha) + + where :math:`r_s` is related to the spherical overdensity + halo radius :math:`R_\\Delta(M)` through the concentration + parameter :math:`c(M)` as + + .. math:: + R_\\Delta(M) = c(M)\\,r_s + + and the normalization :math:`\\rho_0` is the mean density + within the :math:`R_\\Delta(M)` of the halo. The index + :math:`\\alpha` depends on halo mass and redshift, and we + use the parameterization of Diemer & Kravtsov + (arXiv:1401.1216). + + By default, this profile is truncated at :math:`r = R_\\Delta(M)`. + + Args: + c_M_relation (:obj:`Concentration`): concentration-mass + relation to use with this profile. + truncated (bool): set to `True` if the profile should be + truncated at :math:`r = R_\\Delta` (i.e. zero at larger + radii. + alpha (float, 'cosmo'): Set the Einasto alpha parameter or set to + 'cosmo' to calculate the value from cosmology. Default: 'cosmo' + """ + name = 'DK14' + + def __init__(self, c_M_relation, hbf, pl, adjusted_2hterm=False ,force_to_2hterm=False, M_pivot=None, beta=4, gamma=8, be=1.0, se=1.5): + if not isinstance(c_M_relation, Concentration): + raise TypeError("c_M_relation must be of type `Concentration`)") + + self.cM = c_M_relation + self.hbf = hbf + self.pl = pl + self.force = force_to_2hterm + self.adjusted = adjusted_2hterm + self.M_pivot = M_pivot + self.beta = beta + self.gamma = gamma + self.be = be + self.se = se + super(HaloProfileDK14, self).__init__() + self.update_precision_fftlog(padding_hi_fftlog=1E2, + padding_lo_fftlog=1E-2, + n_per_decade=1000, + plaw_fourier=-2.) + + def update_parameters(self, alpha=None): + """Update any of the parameters associated with this profile. + Any parameter set to ``None`` won't be updated. + + Arguments + --------- + alpha : float, 'cosmo' + Profile shape parameter. Set to + 'cosmo' to calculate the value from cosmology + """ + if alpha is not None and alpha != self.alpha: + self.alpha = alpha + + def _get_cM(self, cosmo, M, a, mdef=None): + return self.cM.get_concentration(cosmo, M, a, mdef_other=mdef) + + def _get_alpha_rt(self, cosmo, M, a, mdef): + if self.alpha == 'cosmo': + mdef_vir = MassDef('vir', 'matter') + Mvir = mdef.translate_mass(cosmo, M, a, mdef_vir) + sM = sigmaM(cosmo, Mvir, a) + nu = 1.686 / sM + alpha = 0.155 + 0.0095 * nu * nu + + mdef_200m = MassDef('200m', 'matter') + M200m = mdef.translate_mass(cosmo, M, a, mdef_200m) + R_200m = mass_def.get_radius(cosmo, M200m, a) / a + r_t = (1.9 - 0.18 *nu) * R_200m + + else: + alpha = np.full_like(M, self.alpha) + return alpha, r_t + + + def _norm(self, M, Rs, c, alpha): + # Einasto normalization from mass, radius, concentration and alpha + return M / (np.pi * Rs**3 * 2**(2-3/alpha) * alpha**(-1+3/alpha) + * np.exp(2/alpha) + * gamma(3/alpha) * gammainc(3/alpha, 2/alpha*c**alpha)) + + def _real(self, cosmo, r, M, a, mass_def): + r_use = np.atleast_1d(r) + M_use = np.atleast_1d(M) + + # Comoving virial radius + R_M = mass_def.get_radius(cosmo, M_use, a) / a + c_M = self._get_cM(cosmo, M_use, a, mdef=mass_def) + R_s = R_M / c_M + + mdef_vir = MassDef('vir', 'matter') + Mvir = mass_def.translate_mass(cosmo, M_use, a, mdef_vir) + Rvir = mdef_vir.get_radius(cosmo, Mvir, a) / a + sM = sigmaM(cosmo, Mvir, a) + nu = 1.686 / sM + alpha = 0.155 + 0.0095 * nu * nu + + mdef_200m = MassDef(200, 'matter') + M200m = mass_def.translate_mass(cosmo, M_use, a, mdef_200m) + R_200m = mdef_200m.get_radius(cosmo, M200m, a) / a + r_t = (1.9 - 0.18 * nu) * R_200m + + #alpha, r_t = self._get_alpha(cosmo, M_use, a, mass_def) + + norm = self._norm(M_use, R_s, c_M, alpha) + + x = r_use[None, :] / R_s[:, None] + prof_in = norm[:, None] * np.exp(-2. * (x**alpha[:, None] - 1) / + alpha[:, None]) + + beta = self.beta + gamma = self.gamma + + f_trans = (1 + (r_use[None, :]/r_t[:, None])**beta) **(-gamma/beta) + + rho_m = cosmo.rho_x(1, 'matter', is_comoving=True) + + bias = self.hbf.get_halo_bias(cosmo, M_use, a) + xi_mm = cosmo.correlation_3d(a, r_use, p_of_k_a=None) + prof_corr = rho_m * (bias[:, None] * xi_mm[None, :] + 1) + + if self.pl: + be = self.be + se = self.se + + if self.force: + prof_out = rho_m * (bias[:, None] * xi_mm[None, :] * (1 + be * (r_use[None, :]/(5*R_200m[:, None])) ** (-se)) + 1) + + prof = prof_in * f_trans + prof_out + + + else: + if self.adjusted: + prof_out = rho_m * (bias[:, None] * xi_mm[None, :] * (be * (r_use[None, :]/(5*R_200m[:, None])) ** (-se)) + 1) + + prof = prof_in * f_trans + prof_out + + else: + prof_out = rho_m * (be * (r_use[None, :]/(5*R_200m[:, None])) ** (-se) + 1) + + prof = prof_in * f_trans + prof_out + + # r > 9 R_vir + if self.M_pivot == None: + prof[r_use[None, :] > 9 * Rvir[:, None]] = prof_corr[r_use[None, :] > 9 * Rvir[:, None]] + + else: + M_pivot_vir = mass_def.translate_mass(cosmo, self.M_pivot, a, mdef_vir) + R_pivot = mdef_vir.get_radius(cosmo, M_pivot_vir, a) / a + prof[r_use[None, :] > 9 * R_pivot] = prof_corr[r_use[None, :] > 9 * R_pivot] + + else: + prof = prof_in * f_trans + prof_corr + + if np.ndim(r) == 0: + prof = np.squeeze(prof, axis=-1) + if np.ndim(M) == 0: + prof = np.squeeze(prof, axis=0) + + return prof + diff --git a/pyccl/halos/profiles_2pt.py b/pyccl/halos/profiles_2pt.py index 5731deaa3..a802679f3 100644 --- a/pyccl/halos/profiles_2pt.py +++ b/pyccl/halos/profiles_2pt.py @@ -34,7 +34,7 @@ def update_parameters(self, r_corr=None): self.r_corr = r_corr def fourier_2pt(self, prof, cosmo, k, M, a, - prof2=None, mass_def=None): + prof2=None, mass_def=None, diag=True): """ Return the Fourier-space two-point moment between two profiles. @@ -59,6 +59,8 @@ def fourier_2pt(self, prof, cosmo, k, M, a, an auto-correlation, and `prof` will be used as `prof2`. mass_def (:obj:`~pyccl.halos.massdef.MassDef`): a mass definition object. + diag (bool): If True, both halo profiles depend on the same k. If + False, they will depend on k and k', respectively. Default True. Returns: float or array_like: second-order Fourier-space @@ -81,7 +83,14 @@ def fourier_2pt(self, prof, cosmo, k, M, a, uk2 = prof2.fourier(cosmo, k, M, a, mass_def=mass_def) - return uk1 * uk2 * (1 + self.r_corr) + if (diag is True) or (isinstance(k, float)): + output = uk1 * uk2 * (1 + self.r_corr) + elif isinstance(M, float): + output = uk1[:, None] * uk2[None, :] * (1 + self.r_corr) + else: + output = uk1[:, :, None] * uk2[:, None, :] * (1 + self.r_corr) + + return output class Profile2ptHOD(Profile2pt): @@ -97,7 +106,7 @@ class Profile2ptHOD(Profile2pt): :class:`~pyccl.halos.profiles.HaloProfileHOD`. """ def fourier_2pt(self, prof, cosmo, k, M, a, - prof2=None, mass_def=None): + prof2=None, mass_def=None, diag=True): """ Returns the Fourier-space two-point moment for the HOD profile. @@ -116,6 +125,9 @@ def fourier_2pt(self, prof, cosmo, k, M, a, are allowed in this case. mass_def (:obj:`~pyccl.halos.massdef.MassDef`): a mass definition object. + diag (bool): If True, both halo profiles depend on the same k. If + False, they will depend on k and k', respectively and we will + approximate to . Default True. Returns: float or array_like: second-order Fourier-space @@ -131,4 +143,13 @@ def fourier_2pt(self, prof, cosmo, k, M, a, if prof2 is not prof: raise ValueError("prof2 must be the same as prof") - return prof._fourier_variance(cosmo, k, M, a, mass_def) + if (diag is True) or (isinstance(k, float)): + output = prof._fourier_variance(cosmo, k, M, a, mass_def) + elif isinstance(M, float): + uk1 = prof.fourier(cosmo, k, M, a, mass_def=mass_def) + output = uk1[:, None] * uk1[None, :] * (1 + self.r_corr) + else: + uk1 = prof.fourier(cosmo, k, M, a, mass_def=mass_def) + output = uk1[:, :, None] * uk1[:, None, :] * (1 + self.r_corr) + + return output From d2c9b6a2762193ff870829f57d5c419bedf46374 Mon Sep 17 00:00:00 2001 From: RyoTerasawa Date: Mon, 1 May 2023 21:52:02 +0900 Subject: [PATCH 17/17] update darkemulator.py --- pyccl/darkemulator.py | 1349 +---------------------------------------- 1 file changed, 28 insertions(+), 1321 deletions(-) diff --git a/pyccl/darkemulator.py b/pyccl/darkemulator.py index 4e81d53bd..8e1483acb 100755 --- a/pyccl/darkemulator.py +++ b/pyccl/darkemulator.py @@ -10,13 +10,11 @@ from .tk3d import Tk3D from dark_emulator import darkemu -#from dark_emulator import model_hod from scipy import integrate from scipy.interpolate import InterpolatedUnivariateSpline as ius -from scipy.special import sici from . import halos -def darkemu_Tk3D_SSC(cosmo, prof1, deltah=0.02, +def darkemu_Pgm_Tk3D_SSC(cosmo, prof1, deltah=0.02, log10Mh_min=12.0,log10Mh_max=15.9, normprof1=False, lk_arr=None, a_arr=None, @@ -28,26 +26,11 @@ def darkemu_Tk3D_SSC(cosmo, prof1, deltah=0.02, two pairs of quantities being correlated. Each response is calculated as: - .. math:: - \\frac{\\partial P_{u,v}(k)}{\\partial\\delta_L} = - \\left(\\frac{68}{21}-\\frac{d\\log k^3P_L(k)}{d\\log k}\\right) - P_L(k)I^1_1(k,|u)I^1_1(k,|v)+I^1_2(k|u,v) - (b_{u} + b_{v}) - P_{u,v}(k) - - where the :math:`I^a_b` are defined in the documentation - of :meth:`~HMCalculator.I_1_1` and :meth:`~HMCalculator.I_1_2` and - :math:`b_{u}` and :math:`b_{v}` are the linear halo biases for quantities - :math:`u` and :math:`v`, respectively (zero if they are not clustering). - Args: cosmo (:class:`~pyccl.core.Cosmology`): a Cosmology object. - hmc (:class:`HMCalculator`): a halo model calculator. prof1 (:class:`~pyccl.halos.profiles.HaloProfile`): halo profile (corresponding to :math:`u_1` above. - p_of_k_a (:class:`~pyccl.pk2d.Pk2D`): a `Pk2D` object to - be used as the linear matter power spectrum. If `None`, - the power spectrum stored within `cosmo` will be used. a_arr (array): an array holding values of the scale factor at which the trispectrum should be calculated for interpolation. If `None`, the internal values used @@ -164,9 +147,6 @@ def darkemu_Tk3D_SSC(cosmo, prof1, deltah=0.02, Mlist, dndm_emu = emu.get_dndm(z) dndlog10m_emu = ius(np.log10(Mlist/h), dndm_emu * Mlist * np.log(10) * h ** 3) - # mass function - #dndlog10m_emu = ius(Mfor_hmf ,hmf_DE.get_mass_function(cosmo, 10**Mfor_hmf ,aa)) # Mpc^-3 #ius(np.log10(Mlist), dndm_emu * Mlist * np.log(10) * h ** 3) - if Mh[0] < 12.0: # Msol/h Pth[0] = emu.get_phm_massthreshold(k_emu,10**12,z) * (1/h)**3 nths12 = emu.mass_to_dens(10**12,z) * h**3 @@ -230,16 +210,16 @@ def darkemu_Tk3D_SSC(cosmo, prof1, deltah=0.02, dPhm_db_nfix = (26. / 21.) * (np.array(Pnth_hp) - np.array(Pnth_hm)) / \ (2 * (np.log(Dp[ia]) - np.log(Dm[ia]))) # Mpc^3 - dnP_hm_db_emu = nth_mat * (dPhm_db_nfix + b1L_th_mat * np.array(Pbin)) # Dless + dnP_hm_db_emu = nth_mat * (dPhm_db_nfix + b1L_th_mat * np.array(Pbin)) # stitching k_switch = 0.08 # [h/Mpc] kmin = 1e-2 # [h/Mpc] - dnP_gm_db = integrate.romb(dprof_dlogM * (dnP_hm_db_emu), dx = dM, axis = 0) #Dless + dnP_gm_db = integrate.romb(dprof_dlogM * (dnP_hm_db_emu), dx = dM, axis = 0) - Pgm_growth = dnP_gm_db / ng - bgL * Pgm # Dless + Pgm_growth = dnP_gm_db / ng - bgL * Pgm - Pgm_d = -1. / 3. * np.gradient(np.log(Pgm)) / np.gradient(np.log(k_use)) * Pgm #Dless + Pgm_d = -1. / 3. * np.gradient(np.log(Pgm)) / np.gradient(np.log(k_use)) * Pgm dpklin = pk2dlin.eval_dlogpk_dlogk(k_use, aa, cosmo) @@ -281,7 +261,7 @@ def darkemu_Tk3D_SSC(cosmo, prof1, deltah=0.02, extrap_order_hik=extrap_order_hik, is_logt=use_log) return tk3d, pk2d -def darkemu_pkarr_SSC(cosmo, prof_hod, deltah=0.02, +def darkemu_Pgm_resp(cosmo, prof_hod, deltah=0.02, log10Mh_min=12.0,log10Mh_max=15.9, log10Mh_pivot=12.5, normprof_hod=False, k_max=2.0, @@ -289,53 +269,7 @@ def darkemu_pkarr_SSC(cosmo, prof_hod, deltah=0.02, extrap_order_lok=1, extrap_order_hik=1, use_log=False, highk_HM=True, surface=False, highz_HMresp=True): - """ Returns a 2D array with shape `[na,nk]` describing the - first function :math:`f_1(k,a)` that makes up a factorizable - trispectrum :math:`T(k_1,k_2,a)=f_1(k_1,a)f_2(k_2,a)` The response is - calculated as: - - .. math:: - \\frac{\\partial P_{u,v}(k)}{\\partial\\delta_L} = - \\left(\\frac{68}{21}-\\frac{d\\log k^3P_L(k)}{d\\log k}\\right) - P_L(k)I^1_1(k,|u)I^1_1(k,|v)+I^1_2(k|u,v) - (b_{u} + b_{v}) - P_{u,v}(k) - - where the :math:`I^a_b` are defined in the documentation - of :meth:`~HMCalculator.I_1_1` and :meth:`~HMCalculator.I_1_2` and - :math:`b_{u}` and :math:`b_{v}` are the linear halo biases for quantities - :math:`u` and :math:`v`, respectively (zero if they are not clustering). - - Args: - cosmo (:class:`~pyccl.core.Cosmology`): a Cosmology object. - hmc (:class:`HMCalculator`): a halo model calculator. - prof_hod (:class:`~pyccl.halos.profiles.HaloProfile`): halo - profile (corresponding to :math:`u_1` above. - p_of_k_a (:class:`~pyccl.pk2d.Pk2D`): a `Pk2D` object to - be used as the linear matter power spectrum. If `None`, - the power spectrum stored within `cosmo` will be used. - a_arr (array): an array holding values of the scale factor - at which the trispectrum should be calculated for - interpolation. If `None`, the internal values used - by `cosmo` will be used. - lk_arr (array): an array holding values of the natural - logarithm of the wavenumber (in units of Mpc^-1) at - which the trispectrum should be calculated for - interpolation. If `None`, the internal values used - by `cosmo` will be used. - extrap_order_lok (int): extrapolation order to be used on - k-values below the minimum of the splines. See - :class:`~pyccl.tk3d.Tk3D`. - extrap_order_hik (int): extrapolation order to be used on - k-values above the maximum of the splines. See - :class:`~pyccl.tk3d.Tk3D`. - use_log (bool): if `True`, the trispectrum will be - interpolated in log-space (unless negative or - zero values are found). - - Returns: - :class:`~pyccl.tk3d.Tk3D`: SSC effective trispectrum. - """ if lk_arr is None: status = 0 nk = lib.get_pk_spline_nk(cosmo.cosmo) @@ -355,7 +289,6 @@ def darkemu_pkarr_SSC(cosmo, prof_hod, deltah=0.02, h = cosmo["h"] k_emu = k_use / h # [h/Mpc] - #Omega_m = cosmo["Omega_b"] + cosmo["Omega_c"] + 0.00064/(h**2) cosmo.compute_linear_power() pk2dlin = cosmo.get_linear_power('delta_matter:delta_matter') @@ -442,9 +375,6 @@ def darkemu_pkarr_SSC(cosmo, prof_hod, deltah=0.02, pk12[ia, :] = halomod_pk_arr[ia, :] print("use halo model for z={:.2f}>1.48".format(z)) else: - # mass function - #Mlist, dndm_emu = emu.get_dndm(z) - #dndlog10m_emu = ius(np.log10(Mlist/h), dndm_emu * Mlist * np.log(10) * h ** 3) # mass function dndlog10m_emu = ius(logMfor_hmf ,hmf_DE.get_mass_function(cosmo, 10**logMfor_hmf ,aa)) # Mpc^-3 @@ -470,7 +400,6 @@ def darkemu_pkarr_SSC(cosmo, prof_hod, deltah=0.02, Pth[m] = emu.get_phm_massthreshold(k_emu, Mh[m], z) * (1/h)**3 Pbin[m] = emu.get_phm_mass(k_emu, Mh[m], z) * (1/h)**3 - #nths[m] = mass_to_dens(dndlog10m_emu, cosmo, M[m]) nths[m] = emu.mass_to_dens(Mh[m] ,z) * h**3 if highz_HMresp and z > 0.5: @@ -484,11 +413,9 @@ def darkemu_pkarr_SSC(cosmo, prof_hod, deltah=0.02, Pnth_hp[m] = emu_p.get_phm(k_emu*(h/hp), np.log10(nths[m]*(1/hp)**3), z) * (1/hp)**3 Pnth_hm[m] = emu_m.get_phm(k_emu*(h/hm), np.log10(nths[m]*(1/hm)**3), z) * (1/hm)**3 - #logM1 = np.linspace(logM[m], np.log10(10**16./cosmo["h"]), 2**5+1) logM1 = np.linspace(logM[m], logM[-1], 2**5+1) dlogM1 = logM[1] - logM[0] - #b1_th_tink[m] = integrate.romb(dndlog10m_emu(logM1) * hbf.get_halo_bias(cosmo, (10 ** logM1), aa), \ - # dx = dlogM1) / nths[m] + b1_th_tink[m] = integrate.romb(dndlog10m_emu(logM1) * hbf.get_halo_bias(cosmo, (10 ** logM1), aa), \ dx = dlogM1) / integrate.romb(dndlog10m_emu(logM1), dx = dlogM1) @@ -512,9 +439,7 @@ def darkemu_pkarr_SSC(cosmo, prof_hod, deltah=0.02, prof_Mp = prof_hod.fourier(cosmo, k_use, (10 ** logMps), aa, mass_def) prof_Mm = prof_hod.fourier(cosmo, k_use, (10 ** logMms), aa, mass_def) prof = prof_hod.fourier(cosmo, k_use, M, aa, mass_def) - #uk = prof_hod._usat_fourier(cosmo, k_use,(10 ** M), aa, mass_def) - #rho_cr = 2.775*h**2*1e11 # M_solMpc^-3 (w/o h in units) - #factor_mat = np.tile(10**M/(Omega_m*rho_cr), (len(k_emu), 1)).transpose() + dprof_dlogM = (prof_Mp - prof_Mm) / (2 * dlogM)#*np.log(10)) nth_mat = np.tile(nths, (len(k_use), 1)).transpose() @@ -531,18 +456,15 @@ def darkemu_pkarr_SSC(cosmo, prof_hod, deltah=0.02, b1L_th_mat = np.tile(b1_th_tink -1, (len(k_emu), 1)).transpose() - #dPhm_db_nfix = (26. / 21.) * (np.array(Pnth_hp) - np.array(Pnth_hm)) / \ - # (2 * (np.log(Dp[ia]) - np.log(Dm[ia]))) # Mpc^3 - dPhm_db_nfix = (26. / 21.) * np.log(np.array(Pnth_hp) / np.array(Pnth_hm)) * np.array(Pth) / \ (2 * (np.log(Dp[ia]) - np.log(Dm[ia]))) # Mpc^3 - dnP_hm_db_emu = nth_mat * (dPhm_db_nfix + b1L_th_mat * np.array(Pbin)) # Dless + dnP_hm_db_emu = nth_mat * (dPhm_db_nfix + b1L_th_mat * np.array(Pbin)) Pgm = integrate.romb(dprof_dlogM * (nth_mat * np.array(Pth)), \ dx = dlogM, axis = 0) / ng - dnP_gm_db = integrate.romb(dprof_dlogM * (dnP_hm_db_emu), dx = dlogM, axis = 0) #Dless + dnP_gm_db = integrate.romb(dprof_dlogM * (dnP_hm_db_emu), dx = dlogM, axis = 0) if surface: surface_pgm[ia, :] = ((prof[0] * nth_mat[0] * np.array(Pth)[0]) - (prof[-1] * nth_mat[-1] * np.array(Pth))[-1]) / ng @@ -551,9 +473,9 @@ def darkemu_pkarr_SSC(cosmo, prof_hod, deltah=0.02, surface_resp[ia, :] = (prof[0] * dnP_hm_db_emu[0]) - (prof[-1] * dnP_hm_db_emu[-1]) dnP_gm_db += surface_resp[ia, :] - Pgm_growth = dnP_gm_db / ng - bgL * Pgm # Dless + Pgm_growth = dnP_gm_db / ng - bgL * Pgm - Pgm_d = -1. / 3. * np.gradient(np.log(Pgm)) / np.gradient(np.log(k_use)) * Pgm #Dless + Pgm_d = -1. / 3. * np.gradient(np.log(Pgm)) / np.gradient(np.log(k_use)) * Pgm dPgm_db_emu = (Pgm_growth + Pgm_d) @@ -588,11 +510,6 @@ def darkemu_pkarr_SSC(cosmo, prof_hod, deltah=0.02, dpk12_halomod[ia, :] * (1 - np.exp(-k_use/k_HM)) dPgm_db[k_use > k_max] = dpk12_halomod[ia, k_use > k_max] - #dpk_HM = dpk12_halomod[ia, :] - bgE * Pgm - #dPgm_db = dPgm_db * np.exp(-k_use/k_HM) + \ - # dpk_HM * (1 - np.exp(-k_use/k_HM)) - #dPgm_db[k_use > k_max] = dpk_HM[k_use > k_max] - dpk12[ia, :] = dPgm_db if use_log: @@ -604,78 +521,22 @@ def darkemu_pkarr_SSC(cosmo, prof_hod, deltah=0.02, use_log = False else: dpk12 = np.log(dpk12) - - #if use_log: - # if np.any(dpk12 <= 0): - ## warnings.warn( - # "Some values were not positive. " - # "The negative values are substituted by 1e-5.", - # category=CCLWarning) - # np.where(dpk12 <= 0, 1e-5, dpk12) - # - # dpk12 = np.log(dpk12) pk2d = Pk2D(a_arr=a_arr, lk_arr=lk_arr, pk_arr=pk12, extrap_order_lok=extrap_order_lok, extrap_order_hik=extrap_order_hik, cosmo=cosmo, is_logp=False) - return dpk12, pk2d, surface_pgm, surface_resp + return dpk12, pk2d -def darkemu_Pgg_SSC_zresp(cosmo, prof_hod, deltaz=0.1, +def darkemu_Pgg_resp_zresp(cosmo, prof_hod, deltaz=0.1, log10Mh_min=12.0,log10Mh_max=15.9, log10Mh_pivot=12.5, normprof_hod=False, lk_arr=None, a_arr=None, extrap_order_lok=1, extrap_order_hik=1, use_log=False, surface=False): - """ Returns a 2D array with shape `[na,nk]` describing the - first function :math:`f_1(k,a)` that makes up a factorizable - trispectrum :math:`T(k_1,k_2,a)=f_1(k_1,a)f_2(k_2,a)` The response is - calculated as: - - .. math:: - \\frac{\\partial P_{u,v}(k)}{\\partial\\delta_L} = - \\left(\\frac{68}{21}-\\frac{d\\log k^3P_L(k)}{d\\log k}\\right) - P_L(k)I^1_1(k,|u)I^1_1(k,|v)+I^1_2(k|u,v) - (b_{u} + b_{v}) - P_{u,v}(k) - - where the :math:`I^a_b` are defined in the documentation - of :meth:`~HMCalculator.I_1_1` and :meth:`~HMCalculator.I_1_2` and - :math:`b_{u}` and :math:`b_{v}` are the linear halo biases for quantities - :math:`u` and :math:`v`, respectively (zero if they are not clustering). - - Args: - cosmo (:class:`~pyccl.core.Cosmology`): a Cosmology object. - hmc (:class:`HMCalculator`): a halo model calculator. - prof_hod (:class:`~pyccl.halos.profiles.HaloProfile`): halo - profile (corresponding to :math:`u_1` above. - p_of_k_a (:class:`~pyccl.pk2d.Pk2D`): a `Pk2D` object to - be used as the linear matter power spectrum. If `None`, - the power spectrum stored within `cosmo` will be used. - a_arr (array): an array holding values of the scale factor - at which the trispectrum should be calculated for - interpolation. If `None`, the internal values used - by `cosmo` will be used. - lk_arr (array): an array holding values of the natural - logarithm of the wavenumber (in units of Mpc^-1) at - which the trispectrum should be calculated for - interpolation. If `None`, the internal values used - by `cosmo` will be used. - extrap_order_lok (int): extrapolation order to be used on - k-values below the minimum of the splines. See - :class:`~pyccl.tk3d.Tk3D`. - extrap_order_hik (int): extrapolation order to be used on - k-values above the maximum of the splines. See - :class:`~pyccl.tk3d.Tk3D`. - use_log (bool): if `True`, the trispectrum will be - interpolated in log-space (unless negative or - zero values are found). - - Returns: - :class:`~pyccl.tk3d.Tk3D`: SSC effective trispectrum. - """ if lk_arr is None: status = 0 nk = lib.get_pk_spline_nk(cosmo.cosmo) @@ -695,7 +556,6 @@ def darkemu_Pgg_SSC_zresp(cosmo, prof_hod, deltaz=0.1, h = cosmo["h"] k_emu = k_use / h # [h/Mpc] - #Omega_m = cosmo["Omega_b"] + cosmo["Omega_c"] + 0.00064/(h**2) cosmo.compute_linear_power() pk2dlin = cosmo.get_linear_power('delta_matter:delta_matter') @@ -760,13 +620,8 @@ def darkemu_Pgg_SSC_zresp(cosmo, prof_hod, deltaz=0.1, for m in range(nM): nths[m] = mass_to_dens(dndlog10m_emu, cosmo, M[m]) - #nths[m] = emu.mass_to_dens(Mh[m] ,z) * h**3 - - #logM1 = np.linspace(logM[m], np.log10(10**16./cosmo["h"]), 2**5+1) logM1 = np.linspace(logM[m], logM[-1], 2**5+1) dlogM1 = logM[1] - logM[0] - #b1_th_tink[m] = integrate.romb(dndlog10m_emu(logM1) * hbf.get_halo_bias(cosmo, (10 ** logM1), aa), \ - # dx = dlogM1) / nths[m] b1_th_tink[m] = integrate.romb(dndlog10m_emu(logM1) * hbf.get_halo_bias(cosmo, (10 ** logM1), aa), \ dx = dlogM1) / integrate.romb(dndlog10m_emu(logM1), dx = dlogM1) @@ -847,24 +702,7 @@ def darkemu_Pgg_SSC_zresp(cosmo, prof_hod, deltaz=0.1, resp_2h = integrate.romb( resp_2h_int * nth_mat * dprof_dlogM, axis=0, dx=dlogM)/ (ng ** 2) - #resp_2h_nfix_int = list() - #resp_2h_thbin_int = list() - #for m in range(nM): - # resp_2h_nfix_int.append(integrate.romb( - # dPhh_db_nfix[m] * nth_mat * dprof_dlogM, axis=0, dx=dlogM)) - # resp_2h_thbin_int.append(integrate.romb( - # (2 * b1L_th_mat * Pth_bin[m]) * nth_mat * dprof_dlogM, axis=0, dx=dlogM)) - #resp_2h_nfix_int = np.array(resp_2h_nfix_int) - #resp_2h_thbin_int = np.array(resp_2h_thbin_int) - #resp_2h_nfix = integrate.romb( - #resp_2h_nfix_int * nth_mat * dprof_dlogM, axis=0, dx=dlogM)/ (ng ** 2) - - #resp_2h_thbin = integrate.romb( - #resp_2h_thbin_int * nth_mat * dprof_dlogM, axis=0, dx=dlogM)/ (ng ** 2) - - #resp_2h = resp_2h_nfix + resp_2h_thbin - if surface: surface1_thbin = (((prof[-1] * nth_mat[-1]) ** 2 * 2 * b1L_th_mat[-1] * Pth_bin[-1,-1]) \ - ((prof[0] * nth_mat[0]) ** 2 * 2 * b1L_th_mat[0] * Pth_bin[0,0]) \ @@ -946,64 +784,17 @@ def darkemu_Pgg_SSC_zresp(cosmo, prof_hod, deltaz=0.1, extrap_order_hik=extrap_order_hik, cosmo=cosmo, is_logp=False) - #return dpk12, pk2d, Gresp2h_nfix, Gresp2h_thbin, Gresp1h, Pgg_2h, Pgg_1h - return dpk12, pk2d, Gresp2h, Gresp1h, Pgg_2h, Pgg_1h, surface_pgg, surface_resp1, surface_resp2 + return dpk12, pk2d -def darkemu_Pgg_SSC_Asresp(cosmo, prof_hod, deltalnAs=0.03, +def darkemu_Pgg_resp_Asresp(cosmo, prof_hod, deltalnAs=0.03, log10Mh_min=12.0,log10Mh_max=15.9, log10Mh_pivot=12.5, normprof_hod=False, lk_arr=None, a_arr=None, extrap_order_lok=1, extrap_order_hik=1, use_log=False, surface=False): - """ Returns a 2D array with shape `[na,nk]` describing the - first function :math:`f_1(k,a)` that makes up a factorizable - trispectrum :math:`T(k_1,k_2,a)=f_1(k_1,a)f_2(k_2,a)` The response is - calculated as: - - .. math:: - \\frac{\\partial P_{u,v}(k)}{\\partial\\delta_L} = - \\left(\\frac{68}{21}-\\frac{d\\log k^3P_L(k)}{d\\log k}\\right) - P_L(k)I^1_1(k,|u)I^1_1(k,|v)+I^1_2(k|u,v) - (b_{u} + b_{v}) - P_{u,v}(k) - - where the :math:`I^a_b` are defined in the documentation - of :meth:`~HMCalculator.I_1_1` and :meth:`~HMCalculator.I_1_2` and - :math:`b_{u}` and :math:`b_{v}` are the linear halo biases for quantities - :math:`u` and :math:`v`, respectively (zero if they are not clustering). - - Args: - cosmo (:class:`~pyccl.core.Cosmology`): a Cosmology object. - hmc (:class:`HMCalculator`): a halo model calculator. - prof_hod (:class:`~pyccl.halos.profiles.HaloProfile`): halo - profile (corresponding to :math:`u_1` above. - p_of_k_a (:class:`~pyccl.pk2d.Pk2D`): a `Pk2D` object to - be used as the linear matter power spectrum. If `None`, - the power spectrum stored within `cosmo` will be used. - a_arr (array): an array holding values of the scale factor - at which the trispectrum should be calculated for - interpolation. If `None`, the internal values used - by `cosmo` will be used. - lk_arr (array): an array holding values of the natural - logarithm of the wavenumber (in units of Mpc^-1) at - which the trispectrum should be calculated for - interpolation. If `None`, the internal values used - by `cosmo` will be used. - extrap_order_lok (int): extrapolation order to be used on - k-values below the minimum of the splines. See - :class:`~pyccl.tk3d.Tk3D`. - extrap_order_hik (int): extrapolation order to be used on - k-values above the maximum of the splines. See - :class:`~pyccl.tk3d.Tk3D`. - use_log (bool): if `True`, the trispectrum will be - interpolated in log-space (unless negative or - zero values are found). - - Returns: - :class:`~pyccl.tk3d.Tk3D`: SSC effective trispectrum. - """ if lk_arr is None: status = 0 nk = lib.get_pk_spline_nk(cosmo.cosmo) @@ -1078,13 +869,9 @@ def darkemu_Pgg_SSC_Asresp(cosmo, prof_hod, deltalnAs=0.03, for m in range(nM): nths[m] = mass_to_dens(dndlog10m_emu, cosmo, M[m]) - #nths[m] = emu.mass_to_dens(Mh[m] ,z) * h**3 - - #logM1 = np.linspace(logM[m], np.log10(10**16./cosmo["h"]), 2**5+1) logM1 = np.linspace(logM[m], logM[-1], 2**5+1) dlogM1 = logM[1] - logM[0] - #b1_th_tink[m] = integrate.romb(dndlog10m_emu(logM1) * hbf.get_halo_bias(cosmo, (10 ** logM1), aa), \ - # dx = dlogM1) / nths[m] + b1_th_tink[m] = integrate.romb(dndlog10m_emu(logM1) * hbf.get_halo_bias(cosmo, (10 ** logM1), aa), \ dx = dlogM1) / integrate.romb(dndlog10m_emu(logM1), dx = dlogM1) @@ -1165,23 +952,6 @@ def darkemu_Pgg_SSC_Asresp(cosmo, prof_hod, deltalnAs=0.03, resp_2h = integrate.romb( resp_2h_int * nth_mat * dprof_dlogM, axis=0, dx=dlogM)/ (ng ** 2) - #resp_2h_nfix_int = list() - #resp_2h_thbin_int = list() - #for m in range(nM): - # resp_2h_nfix_int.append(integrate.romb( - # dPhh_db_nfix[m] * nth_mat * dprof_dlogM, axis=0, dx=dlogM)) - # resp_2h_thbin_int.append(integrate.romb( - # (2 * b1L_th_mat * Pth_bin[m]) * nth_mat * dprof_dlogM, axis=0, dx=dlogM)) - #resp_2h_nfix_int = np.array(resp_2h_nfix_int) - #resp_2h_thbin_int = np.array(resp_2h_thbin_int) - - #resp_2h_nfix = integrate.romb( - #resp_2h_nfix_int * nth_mat * dprof_dlogM, axis=0, dx=dlogM)/ (ng ** 2) - - #resp_2h_thbin = integrate.romb( - #resp_2h_thbin_int * nth_mat * dprof_dlogM, axis=0, dx=dlogM)/ (ng ** 2) - - #resp_2h = resp_2h_nfix + resp_2h_thbin if surface: surface1_thbin = (((prof[-1] * nth_mat[-1]) ** 2 * 2 * b1L_th_mat[-1] * Pth_bin[-1,-1]) \ @@ -1264,59 +1034,16 @@ def darkemu_Pgg_SSC_Asresp(cosmo, prof_hod, deltalnAs=0.03, extrap_order_hik=extrap_order_hik, cosmo=cosmo, is_logp=False) - #return dpk12, pk2d, Gresp2h_nfix, Gresp2h_thbin, Gresp1h, Pgg_2h, Pgg_1h - return dpk12, pk2d, Gresp2h, Gresp1h, Pgg_2h, Pgg_1h, surface_pgg, surface_resp - + return dpk12, pk2d -def darkemu_pgg(cosmo, prof_hod, +def darkemu_Pgg(cosmo, prof_hod, log10Mh_min=12.0,log10Mh_max=15.9, log10Mh_pivot=12.5, normprof_hod=False, k_max=2.0, lk_arr=None, a_arr=None, extrap_order_lok=1, extrap_order_hik=1, use_log=False): - """ Returns a 2D array with shape `[na,nk]` describing the - first function :math:`f_1(k,a)` that makes up a factorizable - trispectrum :math:`T(k_1,k_2,a)=f_1(k_1,a)f_2(k_2,a)` The response is - calculated as: - - .. math:: - \\frac{\\partial P_{u,v}(k)}{\\partial\\delta_L} = - \\left(\\frac{68}{21}-\\frac{d\\log k^3P_L(k)}{d\\log k}\\right) - P_L(k)I^1_1(k,|u)I^1_1(k,|v)+I^1_2(k|u,v) - (b_{u} + b_{v}) - P_{u,v}(k) - - Args: - cosmo (:class:`~pyccl.core.Cosmology`): a Cosmology object. - hmc (:class:`HMCalculator`): a halo model calculator. - prof_hod (:class:`~pyccl.halos.profiles.HaloProfile`): halo - profile (corresponding to :math:`u_1` above. - p_of_k_a (:class:`~pyccl.pk2d.Pk2D`): a `Pk2D` object to - be used as the linear matter power spectrum. If `None`, - the power spectrum stored within `cosmo` will be used. - a_arr (array): an array holding values of the scale factor - at which the trispectrum should be calculated for - interpolation. If `None`, the internal values used - by `cosmo` will be used. - lk_arr (array): an array holding values of the natural - logarithm of the wavenumber (in units of Mpc^-1) at - which the trispectrum should be calculated for - interpolation. If `None`, the internal values used - by `cosmo` will be used. - extrap_order_lok (int): extrapolation order to be used on - k-values below the minimum of the splines. See - :class:`~pyccl.tk3d.Tk3D`. - extrap_order_hik (int): extrapolation order to be used on - k-values above the maximum of the splines. See - :class:`~pyccl.tk3d.Tk3D`. - use_log (bool): if `True`, the trispectrum will be - interpolated in log-space (unless negative or - zero values are found). - - Returns: - :class:`~pyccl.tk3d.Tk3D`: SSC effective trispectrum. - """ if lk_arr is None: status = 0 nk = lib.get_pk_spline_nk(cosmo.cosmo) @@ -1436,57 +1163,16 @@ def darkemu_pgg(cosmo, prof_hod, extrap_order_hik=extrap_order_hik, cosmo=cosmo, is_logp=False) - return pk2d, pk12, pk12_1h, pk12_2h, bgE + return pk2d -def darkemu_pgg_massbin(cosmo, prof_hod, +def darkemu_Pgg_massbin(cosmo, prof_hod, log10Mh_min=12.0,log10Mh_max=15.9, log10Mh_pivot=12.5, normprof_hod=False, k_max=2.0, lk_arr=None, a_arr=None, extrap_order_lok=1, extrap_order_hik=1, use_log=False): - """ Returns a 2D array with shape `[na,nk]` describing the - first function :math:`f_1(k,a)` that makes up a factorizable - trispectrum :math:`T(k_1,k_2,a)=f_1(k_1,a)f_2(k_2,a)` The response is - calculated as: - - .. math:: - \\frac{\\partial P_{u,v}(k)}{\\partial\\delta_L} = - \\left(\\frac{68}{21}-\\frac{d\\log k^3P_L(k)}{d\\log k}\\right) - P_L(k)I^1_1(k,|u)I^1_1(k,|v)+I^1_2(k|u,v) - (b_{u} + b_{v}) - P_{u,v}(k) - - Args: - cosmo (:class:`~pyccl.core.Cosmology`): a Cosmology object. - hmc (:class:`HMCalculator`): a halo model calculator. - prof_hod (:class:`~pyccl.halos.profiles.HaloProfile`): halo - profile (corresponding to :math:`u_1` above. - p_of_k_a (:class:`~pyccl.pk2d.Pk2D`): a `Pk2D` object to - be used as the linear matter power spectrum. If `None`, - the power spectrum stored within `cosmo` will be used. - a_arr (array): an array holding values of the scale factor - at which the trispectrum should be calculated for - interpolation. If `None`, the internal values used - by `cosmo` will be used. - lk_arr (array): an array holding values of the natural - logarithm of the wavenumber (in units of Mpc^-1) at - which the trispectrum should be calculated for - interpolation. If `None`, the internal values used - by `cosmo` will be used. - extrap_order_lok (int): extrapolation order to be used on - k-values below the minimum of the splines. See - :class:`~pyccl.tk3d.Tk3D`. - extrap_order_hik (int): extrapolation order to be used on - k-values above the maximum of the splines. See - :class:`~pyccl.tk3d.Tk3D`. - use_log (bool): if `True`, the trispectrum will be - interpolated in log-space (unless negative or - zero values are found). - - Returns: - :class:`~pyccl.tk3d.Tk3D`: SSC effective trispectrum. - """ if lk_arr is None: status = 0 nk = lib.get_pk_spline_nk(cosmo.cosmo) @@ -1594,7 +1280,7 @@ def darkemu_pgg_massbin(cosmo, prof_hod, extrap_order_hik=extrap_order_hik, cosmo=cosmo, is_logp=False) - return pk2d, pk12, pk12_1h, pk12_2h + return pk2d def halomod_Tk3D_SSC(cosmo, prof1, @@ -1657,205 +1343,6 @@ def halomod_Tk3D_SSC(cosmo, prof1, extrap_order_hik=extrap_order_hik, is_logt=use_log) return tk3d -def Pth_hm_HM_linb(k, M, cosmo, dndlog10m_emu, nfw, rho_m, hbf, mass_def, a): - - logM1 = np.linspace(np.log10(M), np.log10(10**15.9/cosmo["h"]), 2**5+1) - dlogM1 = logM1[1] - logM1[0] - dens = integrate.romb(dndlog10m_emu(logM1), dx = dlogM1) - dndlog10m_func_mat = np.tile(dndlog10m_emu(logM1), (len(k), 1)).transpose() # M_sol,Mpc^-3 - - # 1 halo term - rho_h = nfw.fourier(cosmo, k, 10**logM1, a, mass_def) - P1h = integrate.romb(dndlog10m_func_mat * rho_h/rho_m, dx = dlogM1, axis=0)/dens - - # 2 halo term - cosmo.compute_linear_power() - pk2dlin = cosmo.get_linear_power('delta_matter:delta_matter') - pklin = pk2dlin.eval(k,a) - b1_th = integrate.romb(dndlog10m_func_mat * hbf.get_halo_bias(cosmo,(10 ** logM1), a)[:, None] , dx = dlogM1, axis=0)\ - /dens - P2h = b1_th * pklin - - P_HM = P1h + P2h - - return P_HM - - -def Pth_hm_lowmass_HM_linb(k, M, M_pivot, emu, cosmo, hmf, hbf, nfw, mass_def, a): - # pivot mass (Dark Emulator) - z = 1/a -1 - Pth_hm_pivot = emu.get_phm_massthreshold(k/cosmo["h"], M_pivot*cosmo["h"], z) / (cosmo["h"]**3) - - rho_m = ccl.rho_x(cosmo, a, "matter", is_comoving=True) - cosmo.compute_linear_power() - pk2dlin = cosmo.get_linear_power('delta_matter:delta_matter') - pklin = pk2dlin.eval(k,a) - - Mfor_hmf = np.linspace(8,17,600) # Msol - dndlog10m_emu = ius(Mfor_hmf ,hmf.get_mass_function(cosmo, 10**Mfor_hmf ,a)) # Mpc^-3 - - M1 = np.linspace(np.log10(M), np.log10(10**15.9/cosmo["h"]), 2**5+1) - dM1 = M1[1] - M1[0] - dens = integrate.romb(dndlog10m_emu(M1), dx = dM1) - dndlog10m_func_mat = np.tile(dndlog10m_emu(M1), (len(k), 1)).transpose() # M_sol,Mpc^-3 - - # 1 halo term - rho_h = nfw.fourier(cosmo, k, 10**M1, a, mass_def) - - P1h = integrate.romb(dndlog10m_func_mat * rho_h/rho_m, dx = dM1, axis=0)/dens - - # 2 halo term - b1_th = integrate.romb(dndlog10m_func_mat * hbf.get_halo_bias(cosmo,(10 ** M1), a)[:, None] , dx = dM1, axis=0)\ - /dens - - P2h = b1_th * pklin - - M1 = np.linspace(np.log10(M_pivot), np.log10(10**15.9/cosmo["h"]), 2**5+1) - dM1 = M1[1] - M1[0] - dens = integrate.romb(dndlog10m_emu(M1), dx = dM1) - - dndlog10m_func_mat = np.tile(dndlog10m_emu(M1), (len(k), 1)).transpose() # M_sol,Mpc^-3 - - rho_h_pivot = nfw.fourier(cosmo, k, 10**M1, a, mass_def) - - P1h_pivot = integrate.romb(dndlog10m_func_mat * rho_h_pivot/rho_m, dx = dM1, axis=0)/dens - - # 2 halo term - b1_th_pivot = integrate.romb(dndlog10m_func_mat * hbf.get_halo_bias(cosmo,(10 ** M1), a)[:, None] , dx = dM1, axis=0)\ - /dens - - P2h_pivot = b1_th_pivot * pklin - - P_HM = P1h + P2h - P_HM_pivot = P1h_pivot + P2h_pivot - - # rescaling - Pth_hm = Pth_hm_pivot * (P_HM/P_HM_pivot) - - return Pth_hm - -def Pth_hm_lowmass_BMO(k, M, M_pivot, emu, cosmo, dndlog10m_emu, hbf, cM, cM_vir, mass_def, rho_m, a, b1_th_tink=None): - M_use = np.atleast_1d(M) - k_use = np.atleast_1d(k) - P1h = np.zeros((len(M_use),len(k_use))) - if b1_th_tink is None: - b1_th = np.zeros(len(M_use)) - else: - b1_th = b1_th_tink - for i in range(len(M_use)): - logM1 = np.linspace(np.log10(M_use[i]), np.log10(10**15.9/cosmo["h"]), 2**5+1) - dlogM1 = logM1[1] - logM1[0] - M1 = 10**logM1 - dens = integrate.romb(dndlog10m_emu(logM1), dx = dlogM1) - if b1_th_tink is None: - b1_th[i] = integrate.romb(dndlog10m_emu(logM1) * hbf.get_halo_bias(cosmo, M1, a), dx = dlogM1)\ - /dens - dndlog10m_func_mat = np.tile(dndlog10m_emu(logM1), (len(k_use), 1)).transpose() # M_sol,Mpc^-3 - - # 1 halo term - P1h_bin = (M1[:, None]/rho_m) * u_M(k_use, M1, cosmo, cM, cM_vir, mass_def, a) - - P1h[i] = integrate.romb(dndlog10m_func_mat * P1h_bin, dx = dlogM1, axis=0)/dens - - # pivot mass - Pth_pivot = emu.get_phm_massthreshold(k_use / cosmo["h"], M_pivot * cosmo["h"], 1./a -1) * (1/cosmo["h"])**3 - - logM1 = np.linspace(np.log10(M_pivot), np.log10(10**15.9/cosmo["h"]), 2**5+1) - dM1 = logM1[1] - logM1[0] - M1 = 10**logM1 - dens = integrate.romb(dndlog10m_emu(logM1), dx = dlogM1) - - b1_th_pivot = integrate.romb(dndlog10m_emu(logM1) * hbf.get_halo_bias(cosmo, M1, a), dx = dlogM1)\ - /dens - dndlog10m_func_mat = np.tile(dndlog10m_emu(logM1), (len(k_use), 1)).transpose() # M_sol,Mpc^-3 - - P1h_bin_pivot = (M1[:, None]/rho_m) * u_M(k_use, M1, cosmo, cM, cM_vir, mass_def, a) - - P1h_pivot = integrate.romb(dndlog10m_func_mat * P1h_bin_pivot, dx = dlogM1, axis=0)/dens - - # 2 halo term - cosmo.compute_linear_power() - pk2dlin = cosmo.get_linear_power('delta_matter:delta_matter') - pklin = pk2dlin.eval(k_use,a) - - P2h = b1_th[:, None] * pklin[None, :] - P2h_pivot = b1_th_pivot * pklin - - P_HM = P1h + P2h - P_HM_pivot = P1h_pivot + P2h_pivot - - # rescaling - Pth = Pth_pivot[None, :] * (P_HM/P_HM_pivot[None, :]) - - if np.ndim(k) == 0: - Pth = np.squeeze(Pth, axis=-1) - if np.ndim(M) == 0: - Pth = np.squeeze(Pth, axis=0) - - return Pth - -def Pbin_hm_lowmass_BMO_Mvector(k, M, M_pivot, emu, cosmo, hbf, cM, mass_def, a, tau_v): - M_use = np.atleast_1d(M) - k_use = np.atleast_1d(k) - - # pivot mass (Dark Emulator) - z = 1/a -1 - Pbin_hm_pivot = emu.get_phm_mass(k/cosmo["h"], M_pivot*cosmo["h"], z) / (cosmo["h"]**3) - - # 1 halo term - rho_m = ccl.rho_x(cosmo, a, "matter", is_comoving=True) - P1h = (M_use[:, None]/rho_m) * u_M(k_use, M_use, cosmo, cM, mass_def, a, tau_v) - P1h_pivot = (M_pivot/rho_m) * u_M(k_use, M_pivot, cosmo, cM, mass_def, a, tau_v) - - # 2 halo term - cosmo.compute_linear_power() - pk2dlin = cosmo.get_linear_power('delta_matter:delta_matter') - pklin = pk2dlin.eval(k,a) - b1 = hbf.get_halo_bias(cosmo, M_use, a) - b1_pivot = hbf.get_halo_bias(cosmo, M_pivot, a) - - P2h = b1[:, None] * pklin[None, :] - P2h_pivot = b1_pivot * pklin - - P_HM = P1h + P2h - P_HM_pivot = P1h_pivot + P2h_pivot - - # rescaling - Pbin_hm = Pbin_hm_pivot[None, :] * (P_HM/P_HM_pivot[None, :]) - - return Pbin_hm, P1h, P2h, P_HM, P1h_pivot, P2h_pivot, P_HM_pivot - -def Pbin_hm_lowmass_BMO(k, M, M_pivot, emu, cosmo, hbf, cM, cM_vir, mass_def, pk2dlin, rho_m ,a): - M_use = np.atleast_1d(M) - k_use = np.atleast_1d(k) - - # 1 halo term - P1h = (M_use[:, None]/rho_m) * u_M(k_use, M_use, cosmo, cM, cM_vir, mass_def, a) - P1h_pivot = (M_pivot/rho_m) * u_M(k_use, M_pivot, cosmo, cM, cM_vir, mass_def, a) - - # 2 halo term - pklin = pk2dlin.eval(k_use,a) - b1 = hbf.get_halo_bias(cosmo, M_use, a) - b1_pivot = hbf.get_halo_bias(cosmo, M_pivot, a) - - P2h = b1[:, None] * pklin[None, :] - P2h_pivot = b1_pivot * pklin - - P_HM = P1h + P2h - P_HM_pivot = P1h_pivot + P2h_pivot - - # rescaling - Pbin_pivot = emu.get_phm_mass(k_use / cosmo["h"], M_pivot * cosmo["h"], 1./a -1) * (1/cosmo["h"])**3 - - Pbin = Pbin_pivot[None, :] * (P_HM/P_HM_pivot[None, :]) - - if np.ndim(k) == 0: - Pbin = np.squeeze(Pbin, axis=-1) - if np.ndim(M) == 0: - Pbin = np.squeeze(Pbin, axis=0) - - return Pbin - def mass_to_dens(dndlog10m_emu, cosmo, mass_thre): logM1 = np.linspace(np.log10(mass_thre), np.log10(10**16./cosmo["h"]), 2**6+1) @@ -1872,92 +1359,16 @@ def dens_to_mass(dndlog10m_emu, cosmo, dens, nint=60):#:, integration="quad"): return 10**d_to_m_interp(-np.log(dens)) - -def u_M(k, M, cosmo, cM, cM_vir, mass_def, a): - M_use = np.atleast_1d(M) - k_use = np.atleast_1d(k) - - c = cM.get_concentration(cosmo, M_use, a) - c_vir = cM_vir.get_concentration(cosmo, M_use, a) - R = mass_def.get_radius(cosmo, M_use, a) / a # comoving halo radius - - r_s = R/c # scale radius from R_200m, c_200m - x = k_use[None, :] *r_s[:, None]/a - - tau_v = 2.6 - - tau1 = tau_v * c_vir - tau = tau1[:, None] - m_nfw = np.log(1 + c) - c/(1+c) - prefactor = tau/(4 * m_nfw[:, None] * (1+tau**2)**3 * x) - - Si, Ci = sici(x) - - F1 = 2 * (3 * tau**4 - 6 * tau**2 - 1) * P_fit(tau * x) - - F2 = -2 * tau * (tau**4 - 1) * x * Q_fit(tau * x) - - F3 = -2 * tau**2 * np.pi * np.exp(-tau*x) * ((tau**2 + 1) * x + 4 * tau) - - F4 = 2 * tau**3 * (np.pi - 2 * Si) * (4 * np.cos(x) + (tau**2 + 1) * x * np.sin(x)) - - F5 = 4 * tau**3 * Ci * (4 * np.sin(x) - (tau**2 + 1) * x * np.cos(x)) - - u_M = prefactor * (F1 + F2 + F3 + F4 + F5) - - if np.ndim(k) == 0: - u_M = np.squeeze(u_M, axis=-1) - if np.ndim(M) == 0: - u_M = np.squeeze(u_M, axis=0) - - return u_M - - -def P_fit(x): - a = 1.5652 - b = 3.38723 - c = 6.34891 - d = 0.817677 - e = -0.0895584 - f = 0.877375 - - gamma = 0.57721566 - - F1 = - (1/x + (b * x**e)/(c + (x - d)**2)) - F2 = (x**4/(x**4 + a**4))**f - - F3 = x * (gamma + np.log(x) - 1) - F4 = (a**4/(x**4 + a**4))**f - - return F1*F2 + F3*F4 - -def Q_fit(x): - a = 2.26901 - b = -2839.04 - c = 265.511 - d = -1.12459 - e = -2.90136 - f = 1.86475 - g = 1.52197 - - gamma = 0.57721566 - - F1 = 1/x**2 + (b * x**e)/(c + (x - d)**4) - F2 = (x**4/(x**4 + a**4))**g - - F3 = (gamma + np.log(x)) * (1 + x**2 / 2) - 3/4 * x**2 - F4 = (a**4/(x**4 + a**4))**f - - return F1*F2 + F3*F4 - - - -def b2H17(b1):#H17 +def b2H17(b1): + """ Implements fitting formula for secondary halo bias, b_2, described in arXiv:1607.01024. + """ b2 = 0.77 - (2.43 * b1) + ( b1 * b1) return b2 -def b2L16(b1):#L16 +def b2L16(b1): + """ Implements fitting formula for secondary halo bias, b_2, described in arXiv:1511.01096. + """ b2 = 0.412 - (2.143 * b1) + (0.929 * b1 * b1) + (0.008 * b1 * b1 * b1) return b2 @@ -2030,707 +1441,3 @@ def set_hmodified_cosmology(cosmo,deltah): return cosmo_hp, cosmo_hm - -def darkemu_Tk3D_SSC_test(cosmo, prof1, deltah=0.02, - log10Mh_min=12.0,log10Mh_max=15.9, - normprof1=False, - lk_arr=None, a_arr=None, - extrap_order_lok=1, extrap_order_hik=1, - use_log=False): - - if lk_arr is None: - status = 0 - nk = lib.get_pk_spline_nk(cosmo.cosmo) - lk_arr, status = lib.get_pk_spline_lk(cosmo.cosmo, nk, status) - check(status, cosmo=cosmo) - if a_arr is None: - status = 0 - na = lib.get_pk_spline_na(cosmo.cosmo) - a_arr, status = lib.get_pk_spline_a(cosmo.cosmo, na, status) - check(status, cosmo=cosmo) - - k_use = np.exp(lk_arr) - - # Check inputs - if not isinstance(prof1, halos.profiles.HaloProfile): - raise TypeError("prof1 must be of type `HaloProfile`") - - h = cosmo["h"] - k_emu = k_use / h # [h/Mpc] - - cosmo.compute_linear_power() - pk2dlin = cosmo.get_linear_power('delta_matter:delta_matter') - - # set cosmology for dark emulator - emu = darkemu_set_cosmology(cosmo) - - # set h-modified cosmology to take finite differencing - hp = h + deltah - hm = h - deltah - cosmo_hp, cosmo_hm = set_hmodified_cosmology(cosmo,deltah) - - emu_p = darkemu_set_cosmology(cosmo_hp) - emu_m = darkemu_set_cosmology(cosmo_hm) - - # Growth factor - Dp = cosmo_hp.growth_factor_unnorm(a_arr) - Dm = cosmo_hm.growth_factor_unnorm(a_arr) - - na = len(a_arr) - nk = len(k_use) - dpk12 = np.zeros([na, nk]) - pk12 = np.zeros([na, nk]) - #dpk34 = np.zeros([na, nk]) - Mfor_hmf = np.linspace(8,16,200) - Mh = np.linspace(log10Mh_min,log10Mh_max,2**5+1)#M_sol/h - dMh = Mh[1] - Mh[0] - dlogM = dMh - b1_th_tink = np.zeros(len(Mh)) - #b2_th_tink = np.zeros(len(Mh)) - Pth = [0] * len(Mh) - Pnth_hp = [0] * len(Mh) - Pnth_hm = [0] * len(Mh) - Pbin = [0] * len(Mh) - nths = np.zeros(len(Mh)) - - mass_def=halos.MassDef200m() - hmf_DE = halos.MassFuncDarkEmulator(cosmo,mass_def=mass_def) - hbf = halos.hbias.HaloBiasTinker10(cosmo,mass_def=mass_def) - - if np.any(a_arr < 1/(1+1.48)): - hmf = halos.MassFuncTinker10(cosmo,mass_def=mass_def) - nfw = halos.HaloProfileNFW(halos.ConcentrationDuffy08(mass_def), - fourier_analytic=True) - hmc = halos.HMCalculator(cosmo, hmf, hbf, mass_def) - - halomod_pk_arr = halos.halomod_power_spectrum(cosmo, hmc, k_use, a_arr, - prof=nfw, prof_2pt=None, - prof2=prof1, p_of_k_a=None, - normprof1=True, normprof2=True, - get_1h=True, get_2h=True, - smooth_transition=None, - supress_1h=None) - - halomod_tk3D, dpk12_halomod = halos.halomod_Tk3D_SSC(cosmo=cosmo, hmc=hmc, - prof1=nfw, - prof2=prof1, - prof12_2pt=None, - normprof1=True, normprof2=True, - lk_arr=np.log(k_use), a_arr=a_arr, - use_log=use_log) - - for ia, aa in enumerate(a_arr): - z = 1. / aa - 1 # dark emulator is valid for 0 =< z <= 1.48 - if z > 1.48: - dpk12[ia, :] = dpk12_halomod[ia, :] - pk12[ia, :] = halomod_pk_arr[ia, :] - print("use halo model for z={:.2f}>1.48".format(z)) - else: - # mass function - #dndlog10m_emu = ius(Mfor_hmf ,hmf_DE.get_mass_function(cosmo, 10**Mfor_hmf ,aa)) # Mpc^-3 #ius(np.log10(Mlist), dndm_emu * Mlist * np.log(10) * h ** 3) - Mlist, dndm_emu = emu.get_dndm(z) # Mlist [Msol/h] - dndlog10m_emu = ius(np.log10(Mlist), dndm_emu * Mlist * np.log(10) * h ** 3) - - - for m in range(len(Mh)): - Pth[m] = emu.get_phm_massthreshold(k_emu,10**Mh[m],z) * (1/h)**3 - nths[m] = emu.mass_to_dens(10**Mh[m],z) * h**3 - - Pnth_hp[m] = emu_p.get_phm(k_emu*(h/hp),np.log10(nths[m]*(1/hp)**3),z)*(1/hp)**3 - Pnth_hm[m] = emu_m.get_phm(k_emu*(h/hm),np.log10(nths[m]*(1/hm)**3),z)*(1/hm)**3 - Pbin[m] = emu.get_phm_mass(k_emu, 10 ** Mh[m], z) * (1/h)**3 - - Mh1 = np.linspace(Mh[m],15.9,2**5+1) - dMh1 = Mh[1] - Mh[0] - b1_th_tink[m] = integrate.romb(dndlog10m_emu(Mh1) * hbf.get_halo_bias(cosmo,(10 ** Mh1) / h, aa), dx = dMh1)\ - /integrate.romb(dndlog10m_emu(Mh1), dx = dMh1) - - #b2_th_tink[m] = integrate.romb(dndlog10m_emu(Mh1) * b2H17(hbf.get_halo_bias(cosmo,(10 ** Mh1) / h, aa)), dx = dMh1)\ - # /integrate.romb(dndlog10m_emu(Mh1), dx = dMh1) - - Nc = prof1._Nc(10 ** Mh / h, aa) - Ns = prof1._Ns(10 ** Mh / h, aa) - fc = prof1._fc(aa) - Ng = Nc * (fc + Ns) - Mps = Mh + dlogM - Mms = Mh - dlogM - - prof_Mp = prof1.fourier(cosmo, k_use, (10 ** Mps) / h, aa, mass_def) - prof_Mm = prof1.fourier(cosmo, k_use, (10 ** Mms) / h, aa, mass_def) - prof = prof1.fourier(cosmo, k_use,(10 ** Mh) / h, aa, mass_def) - - dprof_dlogM = (prof_Mp - prof_Mm) / (2 * dlogM)#*np.log(10)) - nth_mat = np.tile(nths, (len(k_use), 1)).transpose() - ng = integrate.romb(dndlog10m_emu(Mh) * Ng, dx = dMh, axis = 0) - bgE = integrate.romb(dndlog10m_emu(Mh) * Ng * \ - (hbf.get_halo_bias(cosmo,(10 ** Mh) / h, aa)), dx = dMh, axis = 0) / ng - - bgE2 = integrate.romb(dndlog10m_emu(Mh) * Ng * \ - b2H17(hbf.get_halo_bias(cosmo,(10 ** Mh) / h, aa)), dx = dMh, axis = 0) / ng - bgL = bgE - 1 - - dndlog10m_func_mat = np.tile(dndlog10m_emu(Mh), (len(k_emu), 1)).transpose() # M_sol,Mpc^-3 - b1L_th_mat = np.tile(b1_th_tink -1, (len(k_emu), 1)).transpose() - Pgm = integrate.romb(dprof_dlogM * (nth_mat * np.array(Pth)), \ - dx = dMh, axis = 0) / ng - - dPhm_db_nfix = (26. / 21.) * (np.array(Pnth_hp) - np.array(Pnth_hm)) / \ - (2 * (np.log(Dp[ia]) - np.log(Dm[ia]))) # Mpc^3 - - dnP_hm_db_emu = nth_mat * (dPhm_db_nfix + b1L_th_mat * np.array(Pbin)) # Dless - - # stitching - k_switch = 0.08 # [h/Mpc] - kmin = 1e-2 # [h/Mpc] - dnP_gm_db = integrate.romb(dprof_dlogM * (dnP_hm_db_emu), dx = dMh, axis = 0) #Dless - - Pgm_growth = dnP_gm_db / ng - bgL * Pgm # Dless - - Pgm_d = -1. / 3. * np.gradient(np.log(Pgm)) / np.gradient(np.log(k_emu)) * Pgm #Dless - - dpklin = pk2dlin.eval_dlogpk_dlogk(k_use, aa, cosmo) - - Pgm_lin = bgE * pk2dlin.eval(k_use, aa, cosmo) - dPgm_db_lin = (47/21 + bgE2/bgE - bgE -1/3 * dpklin) * \ - bgE * pk2dlin.eval(k_use, aa, cosmo) - dPgm_db = dPgm_db_lin * np.exp(-k_emu/k_switch) + \ - (Pgm_growth + Pgm_d) * (1 - np.exp(-k_emu/k_switch)) - - Pgm = Pgm_lin * np.exp(-k_emu/k_switch) + \ - Pgm * (1 - np.exp(-k_emu/k_switch)) - - # use linear theory below kmin - dPgm_db[k_emu < kmin] = dPgm_db_lin[k_emu < kmin] - dpk12[ia, :] = dPgm_db - - Pgm[k_emu < kmin] = Pgm_lin[k_emu < kmin] - pk12[ia, :] = Pgm - - - if use_log: - if np.any(dpk12 <= 0): - warnings.warn( - "Some values were not positive. " - "Will not interpolate in log-space.", - category=CCLWarning) - use_log = False - else: - dpk12 = np.log(dpk12) - - pk2d = Pk2D(a_arr=a_arr, lk_arr=lk_arr, pk_arr=pk12, - extrap_order_lok=extrap_order_lok, - extrap_order_hik=extrap_order_hik, - cosmo=cosmo, is_logp=False) - - tk3d = Tk3D(a_arr=a_arr, lk_arr=lk_arr, - pk1_arr=dpk12, pk2_arr=dpk12, - extrap_order_lok=extrap_order_lok, - extrap_order_hik=extrap_order_hik, is_logt=use_log) - return tk3d, pk2d - -def darkemu_Tk3D_SSC_old(cosmo, prof1, deltah=0.02, - normprof1=False, - lk_arr=None, a_arr=None, - extrap_order_lok=1, extrap_order_hik=1, - use_log=False): - """ Returns a :class:`~pyccl.tk3d.Tk3D` object containing - the super-sample covariance trispectrum, given by the tensor - product of the power spectrum responses associated with the - two pairs of quantities being correlated. Each response is - calculated as: - - .. math:: - \\frac{\\partial P_{u,v}(k)}{\\partial\\delta_L} = - \\left(\\frac{68}{21}-\\frac{d\\log k^3P_L(k)}{d\\log k}\\right) - P_L(k)I^1_1(k,|u)I^1_1(k,|v)+I^1_2(k|u,v) - (b_{u} + b_{v}) - P_{u,v}(k) - - where the :math:`I^a_b` are defined in the documentation - of :meth:`~HMCalculator.I_1_1` and :meth:`~HMCalculator.I_1_2` and - :math:`b_{u}` and :math:`b_{v}` are the linear halo biases for quantities - :math:`u` and :math:`v`, respectively (zero if they are not clustering). - - Args: - cosmo (:class:`~pyccl.core.Cosmology`): a Cosmology object. - hmc (:class:`HMCalculator`): a halo model calculator. - prof1 (:class:`~pyccl.halos.profiles.HaloProfile`): halo - profile (corresponding to :math:`u_1` above. - prof2 (:class:`~pyccl.halos.profiles.HaloProfile`): halo - profile (corresponding to :math:`u_2` above. If `None`, - `prof1` will be used as `prof2`. - prof12_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): - a profile covariance object returning the the two-point - moment of `prof1` and `prof2`. If `None`, the default - second moment will be used, corresponding to the - products of the means of both profiles. - prof3 (:class:`~pyccl.halos.profiles.HaloProfile`): halo - profile (corresponding to :math:`v_1` above. If `None`, - `prof1` will be used as `prof3`. - prof4 (:class:`~pyccl.halos.profiles.HaloProfile`): halo - profile (corresponding to :math:`v_2` above. If `None`, - `prof3` will be used as `prof4`. - prof34_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): - same as `prof12_2pt` for `prof3` and `prof4`. - normprof1 (bool): if `True`, this integral will be - normalized by :math:`I^0_1(k\\rightarrow 0,a|u)` - (see :meth:`~HMCalculator.I_0_1`), where - :math:`u` is the profile represented by `prof1`. - normprof2 (bool): same as `normprof1` for `prof2`. - normprof3 (bool): same as `normprof1` for `prof3`. - normprof4 (bool): same as `normprof1` for `prof4`. - p_of_k_a (:class:`~pyccl.pk2d.Pk2D`): a `Pk2D` object to - be used as the linear matter power spectrum. If `None`, - the power spectrum stored within `cosmo` will be used. - a_arr (array): an array holding values of the scale factor - at which the trispectrum should be calculated for - interpolation. If `None`, the internal values used - by `cosmo` will be used. - lk_arr (array): an array holding values of the natural - logarithm of the wavenumber (in units of Mpc^-1) at - which the trispectrum should be calculated for - interpolation. If `None`, the internal values used - by `cosmo` will be used. - extrap_order_lok (int): extrapolation order to be used on - k-values below the minimum of the splines. See - :class:`~pyccl.tk3d.Tk3D`. - extrap_order_hik (int): extrapolation order to be used on - k-values above the maximum of the splines. See - :class:`~pyccl.tk3d.Tk3D`. - use_log (bool): if `True`, the trispectrum will be - interpolated in log-space (unless negative or - zero values are found). - - Returns: - :class:`~pyccl.tk3d.Tk3D`: SSC effective trispectrum. - """ - if lk_arr is None: - status = 0 - nk = lib.get_pk_spline_nk(cosmo.cosmo) - lk_arr, status = lib.get_pk_spline_lk(cosmo.cosmo, nk, status) - check(status, cosmo=cosmo) - if a_arr is None: - status = 0 - na = lib.get_pk_spline_na(cosmo.cosmo) - a_arr, status = lib.get_pk_spline_a(cosmo.cosmo, na, status) - check(status, cosmo=cosmo) - - k_use = np.exp(lk_arr) - - # Check inputs - if not isinstance(prof1, halos.profiles.HaloProfile): - raise TypeError("prof1 must be of type `HaloProfile`") - - h = cosmo["h"] - k_emu = k_use / h # [h/Mpc] - - cosmo.compute_linear_power() - pk2dlin = cosmo.get_linear_power('delta_matter:delta_matter') - - # set cosmology for dark emulator - emu = darkemu_set_cosmology(cosmo) - - # set h-modified cosmology to take finite differencing - hp = h + deltah - hm = h - deltah - cosmo_hp, cosmo_hm = set_hmodified_cosmology(cosmo,deltah) - - emu_p = darkemu_set_cosmology(cosmo_hp) - emu_m = darkemu_set_cosmology(cosmo_hm) - - # Growth factor - Dp = cosmo_hp.growth_factor_unnorm(a_arr) - Dm = cosmo_hm.growth_factor_unnorm(a_arr) - - na = len(a_arr) - nk = len(k_use) - dpk12 = np.zeros([na, nk]) - pk12 = np.zeros([na, nk]) - #dpk34 = np.zeros([na, nk]) - - Mh = np.linspace(12.,15.9,2**5+1)#M_sol/h - dMh = Mh[1] - Mh[0] - dlogM = dMh - b1_th_tink = np.zeros(len(Mh)) - #b2_th_tink = np.zeros(len(Mh)) - Pth = [0] * len(Mh) - Pnth_hp = [0] * len(Mh) - Pnth_hm = [0] * len(Mh) - Pbin = [0] * len(Mh) - nths = np.zeros(len(Mh)) - - mass_def=halos.MassDef200m() - hbf = halos.hbias.HaloBiasTinker10(cosmo,mass_def=mass_def) - hmf = halos.MassFuncTinker10(cosmo,mass_def=mass_def) - nfw = halos.HaloProfileNFW(halos.ConcentrationDuffy08(mass_def), - fourier_analytic=True) - hmc = halos.HMCalculator(cosmo, hmf, hbf, mass_def) - - halomod_pk_arr = halos.halomod_power_spectrum(cosmo, hmc, k_use, a_arr, - prof=nfw, prof_2pt=None, - prof2=prof1, p_of_k_a=None, - normprof1=True, normprof2=True, - get_1h=True, get_2h=True, - smooth_transition=None, - supress_1h=None) - - halomod_tk3D, dpk12_halomod = halos.halomod_Tk3D_SSC(cosmo=cosmo, hmc=hmc, - prof1=nfw, - prof2=prof1, - prof12_2pt=None, - normprof1=True, normprof2=True, - lk_arr=np.log(k_use), a_arr=a_arr, - use_log=use_log) - - for ia, aa in enumerate(a_arr): - z = 1. / aa - 1 # dark emulator is valid for 0 =< z <= 1.48 - if z > 1.48: - dpk12[ia, :] = dpk12_halomod[ia, :] - pk12[ia, :] = halomod_pk_arr[ia, :] - print("use halo model for z={:.2f}>1.48".format(z)) - else: - # mass function - Mlist, dndm_emu = emu.get_dndm(z) # Mlist [Msol/h] - dndlog10m_emu = ius(np.log10(Mlist), dndm_emu * Mlist * np.log(10) * h ** 3) - - for m in range(len(Mh)): - Pth[m] = emu.get_phm_massthreshold(k_emu,10**Mh[m],z) * (1/h)**3 - nths[m] = emu.mass_to_dens(10**Mh[m],z) * h**3 - - Pnth_hp[m] = emu_p.get_phm(k_emu*(h/hp),np.log10(nths[m]*(1/hp)**3),z)*(1/hp)**3 - Pnth_hm[m] = emu_m.get_phm(k_emu*(h/hm),np.log10(nths[m]*(1/hm)**3),z)*(1/hm)**3 - Pbin[m] = emu.get_phm_mass(k_emu, 10 ** Mh[m], z) * (1/h)**3 - - Mh1 = np.linspace(Mh[m],15.9,2**5+1) - dMh1 = Mh[1] - Mh[0] - b1_th_tink[m] = integrate.romb(dndlog10m_emu(Mh1) * hbf.get_halo_bias(cosmo,(10 ** Mh1) / h, aa), dx = dMh1)\ - /integrate.romb(dndlog10m_emu(Mh1), dx = dMh1) - - #b2_th_tink[m] = integrate.romb(dndlog10m_emu(Mh1) * b2H17(hbf.get_halo_bias(cosmo,(10 ** Mh1) / h, aa)), dx = dMh1)\ - # /integrate.romb(dndlog10m_emu(Mh1), dx = dMh1) - - Nc = prof1._Nc(10 ** Mh / h, aa) - Ns = prof1._Ns(10 ** Mh / h, aa) - fc = prof1._fc(aa) - Ng = Nc * (fc + Ns) - Mps = Mh + dlogM - Mms = Mh - dlogM - - prof_Mp = prof1.fourier(cosmo, k_use, (10 ** Mps) / h, aa, mass_def) - prof_Mm = prof1.fourier(cosmo, k_use, (10 ** Mms) / h, aa, mass_def) - prof = prof1.fourier(cosmo, k_use,(10 ** Mh) / h, aa, mass_def) - - dprof_dlogM = (prof_Mp - prof_Mm) / (2 * dlogM)#*np.log(10)) - nth_mat = np.tile(nths, (len(k_use), 1)).transpose() - ng = integrate.romb(dndlog10m_emu(Mh) * Ng, dx = dMh, axis = 0) - bgE = integrate.romb(dndlog10m_emu(Mh) * Ng * \ - (hbf.get_halo_bias(cosmo,(10 ** Mh) / h, aa)), dx = dMh, axis = 0) / ng - - bgE2 = integrate.romb(dndlog10m_emu(Mh) * Ng * \ - b2H17(hbf.get_halo_bias(cosmo,(10 ** Mh) / h, aa)), dx = dMh, axis = 0) / ng - bgL = bgE - 1 - - dndlog10m_func_mat = np.tile(dndlog10m_emu(Mh), (len(k_emu), 1)).transpose() # M_sol,Mpc^-3 - b1L_th_mat = np.tile(b1_th_tink -1, (len(k_emu), 1)).transpose() - Pgm = integrate.romb(dprof_dlogM * (nth_mat * np.array(Pth)), \ - dx = dMh, axis = 0) / ng - - dPhm_db_nfix = (26. / 21.) * (np.array(Pnth_hp) - np.array(Pnth_hm)) / \ - (2 * (np.log(Dp[ia]) - np.log(Dm[ia]))) # Mpc^3 - - dnP_hm_db_emu = nth_mat * (dPhm_db_nfix + b1L_th_mat * np.array(Pbin)) # Dless - - # stitching - k_switch = 0.08 # [h/Mpc] - kmin = 1e-2 # [h/Mpc] - dnP_gm_db = integrate.romb(dprof_dlogM * (dnP_hm_db_emu), dx = dMh, axis = 0) #Dless - - Pgm_growth = dnP_gm_db / ng - bgL * Pgm # Dless - - Pgm_d = -1. / 3. * np.gradient(np.log(Pgm)) / np.gradient(np.log(k_emu)) * Pgm #Dless - - dpklin = pk2dlin.eval_dlogpk_dlogk(k_use, aa, cosmo) - - Pgm_lin = bgE * pk2dlin.eval(k_use, aa, cosmo) - dPgm_db_lin = (47/21 + bgE2/bgE - bgE -1/3 * dpklin) * \ - bgE * pk2dlin.eval(k_use, aa, cosmo) - dPgm_db = dPgm_db_lin * np.exp(-k_emu/k_switch) + \ - (Pgm_growth + Pgm_d) * (1 - np.exp(-k_emu/k_switch)) - - Pgm = Pgm_lin * np.exp(-k_emu/k_switch) + \ - Pgm * (1 - np.exp(-k_emu/k_switch)) - - # use linear theory below kmin - dPgm_db[k_emu < kmin] = dPgm_db_lin[k_emu < kmin] - dpk12[ia, :] = dPgm_db - - Pgm[k_emu < kmin] = Pgm_lin[k_emu < kmin] - pk12[ia, :] = Pgm - - - if use_log: - if np.any(dpk12 <= 0): - warnings.warn( - "Some values were not positive. " - "Will not interpolate in log-space.", - category=CCLWarning) - use_log = False - else: - dpk12 = np.log(dpk12) - - pk2d = Pk2D(a_arr=a_arr, lk_arr=lk_arr, pk_arr=pk12, - extrap_order_lok=extrap_order_lok, - extrap_order_hik=extrap_order_hik, - cosmo=cosmo, is_logp=False) - - tk3d = Tk3D(a_arr=a_arr, lk_arr=lk_arr, - pk1_arr=dpk12, pk2_arr=dpk12, - extrap_order_lok=extrap_order_lok, - extrap_order_hik=extrap_order_hik, is_logt=use_log) - return tk3d, pk2d - -def halomod_Tk3D_SSC_orig(cosmo, hmc, - prof1, prof2=None, prof12_2pt=None, - prof3=None, prof4=None, prof34_2pt=None, - normprof1=False, normprof2=False, - normprof3=False, normprof4=False, - p_of_k_a=None, lk_arr=None, a_arr=None, - extrap_order_lok=1, extrap_order_hik=1, - use_log=False): - """ Returns a :class:`~pyccl.tk3d.Tk3D` object containing - the super-sample covariance trispectrum, given by the tensor - product of the power spectrum responses associated with the - two pairs of quantities being correlated. Each response is - calculated as: - - .. math:: - \\frac{\\partial P_{u,v}(k)}{\\partial\\delta_L} = - \\left(\\frac{68}{21}-\\frac{d\\log k^3P_L(k)}{d\\log k}\\right) - P_L(k)I^1_1(k,|u)I^1_1(k,|v)+I^1_2(k|u,v) - (b_{u} + b_{v}) - P_{u,v}(k) - - where the :math:`I^a_b` are defined in the documentation - of :meth:`~HMCalculator.I_1_1` and :meth:`~HMCalculator.I_1_2` and - :math:`b_{u}` and :math:`b_{v}` are the linear halo biases for quantities - :math:`u` and :math:`v`, respectively (zero if they are not clustering). - - Args: - cosmo (:class:`~pyccl.core.Cosmology`): a Cosmology object. - hmc (:class:`HMCalculator`): a halo model calculator. - prof1 (:class:`~pyccl.halos.profiles.HaloProfile`): halo - profile (corresponding to :math:`u_1` above. - prof2 (:class:`~pyccl.halos.profiles.HaloProfile`): halo - profile (corresponding to :math:`u_2` above. If `None`, - `prof1` will be used as `prof2`. - prof12_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): - a profile covariance object returning the the two-point - moment of `prof1` and `prof2`. If `None`, the default - second moment will be used, corresponding to the - products of the means of both profiles. - prof3 (:class:`~pyccl.halos.profiles.HaloProfile`): halo - profile (corresponding to :math:`v_1` above. If `None`, - `prof1` will be used as `prof3`. - prof4 (:class:`~pyccl.halos.profiles.HaloProfile`): halo - profile (corresponding to :math:`v_2` above. If `None`, - `prof3` will be used as `prof4`. - prof34_2pt (:class:`~pyccl.halos.profiles_2pt.Profile2pt`): - same as `prof12_2pt` for `prof3` and `prof4`. - normprof1 (bool): if `True`, this integral will be - normalized by :math:`I^0_1(k\\rightarrow 0,a|u)` - (see :meth:`~HMCalculator.I_0_1`), where - :math:`u` is the profile represented by `prof1`. - normprof2 (bool): same as `normprof1` for `prof2`. - normprof3 (bool): same as `normprof1` for `prof3`. - normprof4 (bool): same as `normprof1` for `prof4`. - p_of_k_a (:class:`~pyccl.pk2d.Pk2D`): a `Pk2D` object to - be used as the linear matter power spectrum. If `None`, - the power spectrum stored within `cosmo` will be used. - a_arr (array): an array holding values of the scale factor - at which the trispectrum should be calculated for - interpolation. If `None`, the internal values used - by `cosmo` will be used. - lk_arr (array): an array holding values of the natural - logarithm of the wavenumber (in units of Mpc^-1) at - which the trispectrum should be calculated for - interpolation. If `None`, the internal values used - by `cosmo` will be used. - extrap_order_lok (int): extrapolation order to be used on - k-values below the minimum of the splines. See - :class:`~pyccl.tk3d.Tk3D`. - extrap_order_hik (int): extrapolation order to be used on - k-values above the maximum of the splines. See - :class:`~pyccl.tk3d.Tk3D`. - use_log (bool): if `True`, the trispectrum will be - interpolated in log-space (unless negative or - zero values are found). - - Returns: - :class:`~pyccl.tk3d.Tk3D`: SSC effective trispectrum. - """ - if lk_arr is None: - status = 0 - nk = lib.get_pk_spline_nk(cosmo.cosmo) - lk_arr, status = lib.get_pk_spline_lk(cosmo.cosmo, nk, status) - check(status, cosmo=cosmo) - if a_arr is None: - status = 0 - na = lib.get_pk_spline_na(cosmo.cosmo) - a_arr, status = lib.get_pk_spline_a(cosmo.cosmo, na, status) - check(status, cosmo=cosmo) - - k_use = np.exp(lk_arr) - - # Check inputs - if not isinstance(prof1, HaloProfile): - raise TypeError("prof1 must be of type `HaloProfile`") - if (prof2 is not None) and (not isinstance(prof2, HaloProfile)): - raise TypeError("prof2 must be of type `HaloProfile` or `None`") - if (prof3 is not None) and (not isinstance(prof3, HaloProfile)): - raise TypeError("prof3 must be of type `HaloProfile` or `None`") - if (prof4 is not None) and (not isinstance(prof4, HaloProfile)): - raise TypeError("prof4 must be of type `HaloProfile` or `None`") - if prof12_2pt is None: - prof12_2pt = Profile2pt() - elif not isinstance(prof12_2pt, Profile2pt): - raise TypeError("prof12_2pt must be of type " - "`Profile2pt` or `None`") - if (prof34_2pt is not None) and (not isinstance(prof34_2pt, Profile2pt)): - raise TypeError("prof34_2pt must be of type `Profile2pt` or `None`") - - # number counts profiles must be normalized - profs = {prof1: normprof1, prof2: normprof2, - prof3: normprof3, prof4: normprof4} - - for i, (profile, normalization) in enumerate(profs.items()): - if (profile is not None - and profile.is_number_counts - and not normalization): - raise ValueError( - f"normprof{i+1} must be True if prof{i+1} is number counts") - - if prof3 is None: - prof3_bak = prof1 - else: - prof3_bak = prof3 - if prof34_2pt is None: - prof34_2pt_bak = prof12_2pt - else: - prof34_2pt_bak = prof34_2pt - - # Power spectrum - if isinstance(p_of_k_a, Pk2D): - pk2d = p_of_k_a - elif (p_of_k_a is None) or (str(p_of_k_a) == 'linear'): - pk2d = cosmo.get_linear_power('delta_matter:delta_matter') - elif str(p_of_k_a) == 'nonlinear': - pk2d = cosmo.get_nonlin_power('delta_matter:delta_matter') - else: - raise TypeError("p_of_k_a must be `None`, \'linear\', " - "\'nonlinear\' or a `Pk2D` object") - - def get_norm(normprof, prof, sf): - if normprof: - return hmc.profile_norm(cosmo, sf, prof) - else: - return 1 - - na = len(a_arr) - nk = len(k_use) - dpk12 = np.zeros([na, nk]) - dpk34 = np.zeros([na, nk]) - for ia, aa in enumerate(a_arr): - # Compute profile normalizations - norm1 = get_norm(normprof1, prof1, aa) - i11_1 = hmc.I_1_1(cosmo, k_use, aa, prof1) - # Compute second profile normalization - if prof2 is None: - norm2 = norm1 - i11_2 = i11_1 - else: - norm2 = get_norm(normprof2, prof2, aa) - i11_2 = hmc.I_1_1(cosmo, k_use, aa, prof2) - if prof3 is None: - norm3 = norm1 - i11_3 = i11_1 - else: - norm3 = get_norm(normprof3, prof3, aa) - i11_3 = hmc.I_1_1(cosmo, k_use, aa, prof3) - if prof4 is None: - norm4 = norm3 - i11_4 = i11_3 - else: - norm4 = get_norm(normprof4, prof4, aa) - i11_4 = hmc.I_1_1(cosmo, k_use, aa, prof4) - - i12_12 = hmc.I_1_2(cosmo, k_use, aa, prof1, - prof12_2pt, prof2) - if (prof3 is None) and (prof4 is None) and (prof34_2pt is None): - i12_34 = i12_12 - else: - i12_34 = hmc.I_1_2(cosmo, k_use, aa, prof3_bak, - prof34_2pt_bak, prof4) - norm12 = norm1 * norm2 - norm34 = norm3 * norm4 - - pk = pk2d.eval(k_use, aa, cosmo) - dpk = pk2d.eval_dlogpk_dlogk(k_use, aa, cosmo) - # (47/21 - 1/3 dlogPk/dlogk) * I11 * I11 * Pk+I12 - dpk12[ia, :] = norm12*((2.2380952381-dpk/3)*i11_1*i11_2*pk+i12_12) - dpk34[ia, :] = norm34*((2.2380952381-dpk/3)*i11_3*i11_4*pk+i12_34) - - # Counter terms for clustering (i.e. - (bA + bB) * PAB - if prof1.is_number_counts or (prof2 is None or prof2.is_number_counts): - b1 = b2 = np.zeros_like(k_use) - i02_12 = hmc.I_0_2(cosmo, k_use, aa, prof1, prof12_2pt, prof2) - P_12 = norm12 * (pk * i11_1 * i11_2 + i02_12) - - if prof1.is_number_counts: - b1 = i11_1 * norm1 - - if prof2 is None: - b2 = b1 - elif prof2.is_number_counts: - b2 = i11_2 * norm2 - - dpk12[ia, :] -= (b1 + b2) * P_12 - - if prof3_bak.is_number_counts or \ - ((prof3_bak.is_number_counts and prof4 is None) or - (prof4 is not None) and prof4.is_number_counts): - b3 = b4 = np.zeros_like(k_use) - if (prof3 is None) and (prof4 is None) and (prof34_2pt is None): - i02_34 = i02_12 - else: - i02_34 = hmc.I_0_2(cosmo, k_use, aa, prof3_bak, prof34_2pt_bak, - prof4) - P_34 = norm34 * (pk * i11_3 * i11_4 + i02_34) - - if prof3 is None: - b3 = b1 - elif prof3.is_number_counts: - b3 = i11_3 * norm3 - - if prof4 is None: - b4 = b3 - elif prof4.is_number_counts: - b4 = i11_4 * norm4 - - dpk34[ia, :] -= (b3 + b4) * P_34 - - if use_log: - if np.any(dpk12 <= 0) or np.any(dpk34 <= 0): - warnings.warn( - "Some values were not positive. " - "Will not interpolate in log-space.", - category=CCLWarning) - use_log = False - else: - dpk12 = np.log(dpk12) - dpk34 = np.log(dpk34) - - tk3d = Tk3D(a_arr=a_arr, lk_arr=lk_arr, - pk1_arr=dpk12, pk2_arr=dpk34, - extrap_order_lok=extrap_order_lok, - extrap_order_hik=extrap_order_hik, is_logt=use_log) - return tk3d, dpk12 - -