diff --git a/.gitignore b/.gitignore
index e60185d..20e1f17 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,29 +1,34 @@
-# Files from personal projects
-acropolis/prj/*
-acropolis/data/cn.dat
-
# Reference paper
-paper*
+/paper*
# AlterBBN files
-alterbbn*
+/alterbbn/*
# Temporary files
CHANGES
TODO
-manual/v*
-plots/v*
+
+# Project files
+/prj/*
+/acropolis/prj
+
+# Files for later versions
+/acropolis/v*
+
+# Backup files
+/acropolis/backup
# Testing scripts
-test.py
+/test.py
+/test.ipynb
-# Plots
-plots/data/*
+# Plot data
+/plots/data/*
# Build files
-build/*
-dist/*
-ACROPOLIS.egg-info
+/build/*
+/dist/*
+/ACROPOLIS.egg-info
# Byte-compiled / optimized / DLL files
__pycache__
@@ -35,7 +40,6 @@ __pycache__
*.sty
*.bst
*.bib
-manual/plots/*
*.bbl
*.log
diff --git a/README.md b/README.md
index d921dc9..5aa4333 100644
--- a/README.md
+++ b/README.md
@@ -4,8 +4,8 @@
![arXiv: 2011.06518](https://img.shields.io/badge/arXiv-2011.06518-red.svg?style=flat-square)
![Language: Python3](https://img.shields.io/badge/Language-Python3-blue.svg?style=flat-square)
-![Version: 1.2.2](https://img.shields.io/badge/Current_Version-1.2.2-green.svg?style=flat-square)
-![DevVersion: 1.3](https://img.shields.io/badge/Current_Dev_Version-1.3-orange.svg?style=flat-square)
+![Version: 1.3.0](https://img.shields.io/badge/Current_Version-1.3.0-green.svg?style=flat-square)
+![DevVersion: 2.0.0](https://img.shields.io/badge/Current_Dev_Version-2.0.0-orange.svg?style=flat-square)
When using this code for your own scientific publications, please cite
@@ -29,38 +29,54 @@ The remarkable agreement between observations of the primordial light element ab
# Changelog
-v1.2.2 (April 6, 2022)
+v1.3.0 (September 17, 2024)
+
+- Implemented the model ``acropolis.ext.models.ResonanceModel``, which can be used to calculate PDI constraints for models with resonantly-enhanced DM annihilations
+- Updated the initial abundances, which have now be calculated with ``PArthENoPE v3.0`` and hence include the updated deuterium reaction rates
+ - Added PDG2021 and PDG2022 values to ``acropolis.obs``
+ - Implemented the new package ``acropolis.jit`` to fixed warnings caused by new versions of ``numba``
+ - Removed the requirement for the data in ``cosmo_file.dat`` to be equidistant in log space
+ - Improved the progress indicator when running parameter scans without a ``fast`` parameter
+ - Unified the plotting script in ``plots/plot_scan_results.py`` by using the methods defined in ``acropolis.plots``
+ - Added additional plotting functionality in ``acropolis.plots`` (extracting contours, specifying the ``x`` and ``y`` data for the plot, ...)
+
+
+
+v1.2.2 (April 6, 2022)
+
- Implemented fixes for the issues #10 and #11 on GitHub
- Made some initial plotting functions available in ``acropolis.plots``, which can be used to easily plot the results of parameter scans
- Improved the output that is printed to the screen (especially for parameter scans if ``verbose=True``)
- Updated the neutron lifetime to the PDG 2020 recommended value
- Included some example files, e.g. for parameter scans, in the directory examples/
- - Included a new c-file tools/create_sm_abundance_file.c, which can be used with [``AlterBBN``](https://alterbbn.hepforge.org/) to generate the file ``abundance_file.dat`` for sm.tar.gz
+ - Included a new c-file ./tools/create_sm_abundance_file.c, which can be used with [``AlterBBN``](https://alterbbn.hepforge.org/) to generate the file ``abundance_file.dat`` for sm.tar.gz
- Fixed a bug that prohibited running 2d parameter scans without 'fast' parameters
- Fixed a bug that caused INFO messages to be printed even for ``verbose=False``
+
v1.2.1 (February 16, 2021)
-
+
- Fixed a bug in ``DecayModel``. Results that have been obtained with older versions can be corrected by multiplying the parameter ``n0a`` with an additional factor ``2.7012``. All results of our papers remain unchanged.
- Updated the set of initial abundances to the most recent values returned by [``AlterBBN``](https://alterbbn.hepforge.org/) v2.2 (explicitly, we used ``failsafe=12``)
+
v1.2 (January 15, 2021)
-
+
- Speed improvements when running non-thermal nucleosynthesis (by a factor 7)
- Modified the directory structure by moving ./data to ./acropolis/data to transform ``ACROPOLIS`` into a proper package, which can be installed via ``python3 -m pip install . --user`` (also putting the executables ``decay`` and ``annihilation`` into your ``PATH``)
- Added the decay of neutrons and tritium to the calculation
- For AnnihilationModel, it is now possible to freely choose the dark-matter density parameter (default is 0.12)
+
-
v1.1 (December 1, 2020)
-
+
- For the source terms it is now possible to specify arbitrary monochromatic and continuous contributions, meaning that the latter one is no longer limited to only final-state radiation of photons
- By including additional JIT compilation steps, the runtime without database files was drastically decreased (by approximately a factor 15)
- The previously mentioned performance improvements also allowed to drop the large database files alltogether, which results in a better user experience (all database files are now part of the git repo and no additional download is required) and a significantly reduced RAM usage (∼900MB → ∼20MB)
@@ -68,12 +84,14 @@ The remarkable agreement between observations of the primordial light element ab
- Added a unified way to print the final abundances in order to declutter the wrapper scripts. This makes it easier to focus on the actual important parts when learning how to use ``ACROPOLIS``
- Moved from bytecode to simple text files for the remaining database file, as the former leads to unexpected behaviour on some machines
- Added additional info and warning messages for the user's convenience
+
v1.0 (November 12, 2020)
- Initial release
+
# Installation from PyPI
diff --git a/VERSION b/VERSION
new file mode 100644
index 0000000..f0bb29e
--- /dev/null
+++ b/VERSION
@@ -0,0 +1 @@
+1.3.0
diff --git a/acropolis/cache.py b/acropolis/cache.py
index 9f91433..e3b4e22 100644
--- a/acropolis/cache.py
+++ b/acropolis/cache.py
@@ -2,13 +2,13 @@
from functools import wraps
-def cached_member(f_uncached):
+def cached(f):
# Define the cache as a dictionary
cache = {}
Tc = {"_": -1.}
# Define the wrapper function
- @wraps(f_uncached)
+ @wraps(f)
def f_cached(*args):
T = args[-1]
# Drop the first argument 'self'
@@ -22,7 +22,7 @@ def f_cached(*args):
cache.clear()
if pargs not in cache:
- cache[pargs] = f_uncached(*args)
+ cache[pargs] = f(*args)
return cache[pargs]
diff --git a/acropolis/cascade.py b/acropolis/cascade.py
index 946f75d..7c89562 100644
--- a/acropolis/cascade.py
+++ b/acropolis/cascade.py
@@ -4,30 +4,28 @@
import numpy as np
# scipy
from scipy.integrate import quad, dblquad
-from scipy.integrate import IntegrationWarning
-# numba
-import numba as nb
-# warnings
-import warnings
+# abc
+from abc import ABCMeta, abstractmethod
+# jit
+from acropolis.jit import jit
# db
from acropolis.db import import_data_from_db
from acropolis.db import in_rate_db, interp_rate_db
# cache
-from acropolis.cache import cached_member
+from acropolis.cache import cached
# pprint
-from acropolis.pprint import print_warning, print_error
+from acropolis.pprint import print_error
# params
-from acropolis.params import me, me2, mm, mm2, alpha, re, hbar, tau_m
+from acropolis.params import me, me2, alpha, re
from acropolis.params import zeta3, pi2
-from acropolis.params import FX
from acropolis.params import Emin, approx_zero, eps, Ephb_T_max
from acropolis.params import NE_pd, NE_min
# _ReactionWrapperScaffold ####################################################
-@nb.jit(cache=True)
+@jit
def _JIT_F(Eph, Ee, Ephb):
# ATTENTION: Here we use the range given in '10.1103/PhysRev.167.1159',
# because the translation to 0 < q < 1 is questionable
@@ -45,7 +43,7 @@ def _JIT_F(Eph, Ee, Ephb):
return 2.*q*log(q) + (1.+2.*q)*(1.-q) + (G*q)**2. * (1.-q)/(2.+2.*G*q)
-@nb.jit(cache=True)
+@jit
def _JIT_G(Ee, Eph, Ephb):
# Define the energy of the positron
Eep = Eph + Ephb - Ee
@@ -74,8 +72,8 @@ def _JIT_G(Ee, Eph, Ephb):
sud += 4.*( (Ee + Eep)**2. )*log( (4.*Ephb*Ee*Eep)/( me2*(Ee + Eep) ) )/( Ee*Eep )
sud += ( me2/( Ephb*(Ee + Eep) ) - 1. ) * ( (Ee + Eep)**4. )/( (Ee**2.)*(Eep**2.) )
# ATTENTION: no additional minus sign in sud[2]
- # It is unclear whether it is a type or an artifact
- # of the scan (in the original paper)
+ # It is unclear whether it is a typo or an artifact
+ # of scanning the original document
sud += 2.*( 2.*Ephb*(Ee + Eep) - me2 ) * ( (Ee + Eep)**2. )/( me2*Ee*Eep )
sud += -8.*Ephb*(Ee + Eep)/me2
@@ -84,8 +82,8 @@ def _JIT_G(Ee, Eph, Ephb):
# _PhotonReactionWrapper ######################################################
-@nb.jit(cache=True)
-def _JIT_ph_rate_pair_creation(logy, logx, T):
+@jit
+def _JIT_ph_rate_pair_creation_ae(logy, logx, T):
# Return the integrand for the 2d integral in log-space
x, y = exp(logx), exp(logy)
@@ -105,7 +103,7 @@ def _JIT_ph_rate_pair_creation(logy, logx, T):
return ( 1./(pi**2) )/( exp(x/T) - 1. ) * y * sig_pc * (x*y)
-@nb.jit(cache=True)
+@jit
def _JIT_ph_kernel_inverse_compton(logx, E, Ep, T):
# Return the integrand for the 1d-integral in log-space; x = Ephb
x = exp(logx)
@@ -115,13 +113,13 @@ def _JIT_ph_kernel_inverse_compton(logx, E, Ep, T):
# _ElectronReactionWrapper ####################################################
-@nb.jit(cache=True)
+@jit
def _JIT_el_rate_inverse_compton(y, x, E, T):
# Return the integrand for the 2d-integral; y = Eph, x = Ephb
return _JIT_F(y, E, x)*x/( (pi**2.)*(exp(x/T) - 1.) )
-@nb.jit(cache=True)
+@jit
def _JIT_el_kernel_inverse_compton(logx, E, Ep, T):
# Define the integrand for the 1d-integral in log-space; x = Ephb
x = exp(logx)
@@ -129,21 +127,21 @@ def _JIT_el_kernel_inverse_compton(logx, E, Ep, T):
return _JIT_F(Ep+x-E, Ep, x)*( x/(pi**2) )/( exp(x/T) - 1. ) * x
-@nb.jit(cache=True)
-def _JIT_el_kernel_pair_creation(logx, E, Ep, T):
+@jit
+def _JIT_el_kernel_pair_creation_ae(logx, E, Ep, T):
# Define the integrand for the 1d-integral in log-space; x = Ephb
x = exp(logx)
return _JIT_G(E, Ep, x)/( (pi**2.)*(exp(x/T) - 1.) ) * x
-@nb.jit(cache=True)
+@jit
def _JIT_dsdE_Z2(Ee, Eph):
# Define the energies (here: nucleon is at rest)
Em = Ee # E_-
Ep = Eph - Ee # E_+
- # Define the various parameters that enter the x-section
+ # Define the various parameters that enter the cross-section
pm = sqrt(Em*Em - me2) # p_-
pp = sqrt(Ep*Ep - me2) # p_+
@@ -168,74 +166,77 @@ def _JIT_dsdE_Z2(Ee, Eph):
# SpectrumGenerator ###########################################################
-@nb.jit(cache=True)
+@jit
def _JIT_set_spectra(F, i, Fi, cond=False):
F[:, i] = Fi
# In the strongly compressed regime, manually
# set the photon spectrum to zero in order to
# avoid floating-point errors
- if cond: F[0, i] = 0.
+ if cond:
+ F[0, i] = 0.
-@nb.jit(cache=True)
-def _JIT_solve_cascade_equation(E_rt, G, K, E0, S0, Sc, T):
+@jit
+def _JIT_solve_cascade_equation(E_grid, G, K, S0, SC, T):
# Extract the number of particle species...
NX = len(G)
- # ...and the number of points in energy.
- NE = len(E_rt)
+ # ...and the number of energy points
+ NE = len(E_grid)
- dy = log(E_rt[-1]/Emin)/(NE-1)
+ dy = log(E_grid[-1]/Emin)/(NE-1)
# Generate the grid for the different spectra
- # First index: X = photon, electron, positron
- F_rt = np.zeros( (NX, NE) )
+ # 1. index: X = photon, electron, positron
+ # 2. index: Position in the energy grid
+ F_grid = np.zeros( (NX, NE) )
+
+ # Calculate F_X(E_0), last index NE-1
+ FX_E0 = np.array([
+ SC[X,-1]/G[X,-1] + np.sum(K[X,:,-1,-1]*S0[:]/(G[:,-1]*G[X,-1])) for X in range(NX)
+ ])
+ # -->
+ _JIT_set_spectra(F_grid, -1, FX_E0)
- # Calculate F_X(E_S), NE-1
- _JIT_set_spectra(F_rt, -1, np.array([
- Sc[X,-1]/G[X,-1] + np.sum(K[X,:,-1,-1]*S0[:]/(G[:,-1]*G[X,-1])) for X in range(NX)
- ]))
# Loop over all energies
i = (NE - 1) - 1 # start at the second to last index, NE-2
- while i >= 0:
+ while i >= 0: # Counting down
B = np.zeros( (NX, NX) )
a = np.zeros( (NX, ) )
+ I = np.identity(NX)
# Calculate the matrix B and the vector a
for X in range(NX):
- # Calculate B
- B[X,:] = .5*dy*E_rt[i]*K[X,:,i,i]/G[X,i]
+ # Calculate B, : <--> Xp
+ B[X,:] = -.5*dy*E_grid[i]*K[X,:,i,i] + G[X,i]*I[X,:]
# Calculate a
- a[X] = Sc[X,i]/G[X,i]
-
- a0 = K[X,:,i,-1]*S0[:]/G[:,-1] + .5*dy*E_rt[-1]*K[X,:,i,-1]*F_rt[:,-1]
- for j in range(i+1, NE-1): # Goes to NE-2
- a0 += dy*E_rt[j]*K[X,:,i,j]*F_rt[:,j]
-
- for a0X in a0:
- a[X] += a0X/G[X,i]
-
- # Solve the system of linear equations for F
- _JIT_set_spectra(F_rt, i,
- np.linalg.solve(np.identity(NX)-B, a)
+ a[X] = SC[X,i]
+ for Xp in range(NX):
+ a[X] += K[X,Xp,i,-1]*S0[Xp]/G[Xp,-1] + .5*dy*E_grid[-1]*K[X,Xp,i,-1]*F_grid[Xp,-1]
+ for j in range(i+1, NE-1): # Goes from i+1 to NE-2
+ a[X] += dy*E_grid[j]*K[X,Xp,i,j]*F_grid[Xp,j]
+
+ # Solve the system of linear equations of the form BF = a
+ _JIT_set_spectra(F_grid, i,
+ np.linalg.solve(B, a)
)
i -= 1
# Remove potential zeros
- F_rt = F_rt.reshape( NX*NE )
- for i, f in enumerate(F_rt):
+ F_grid = F_grid.reshape( NX*NE )
+ for i, f in enumerate(F_grid):
if f < approx_zero:
- F_rt[i] = approx_zero
- F_rt = F_rt.reshape( (NX, NE) )
+ F_grid[i] = approx_zero
+ F_grid = F_grid.reshape( (NX, NE) )
- # Define the result array...
- res = np.zeros( (NX+1, NE) )
+ # Define the output array...
+ sol = np.zeros( (NX+1, NE) )
# ...and fill it
- res[0 , :] = E_rt
- res[1:NX+1, :] = F_rt
+ sol[0 , :] = E_grid
+ sol[1:NX+1, :] = F_grid
- return res
+ return sol
###############################################################################
@@ -243,9 +244,11 @@ def _JIT_solve_cascade_equation(E_rt, G, K, E0, S0, Sc, T):
class _ReactionWrapperScaffold(object):
- def __init__(self, Y0, eta, db):
- self._sY0 = Y0
- self._sEta = eta
+ def __init__(self, ii, db):
+ self._sII = ii
+
+ self._sY0 = self._sII.bbn_abundances_0()
+ self._sEta = self._sII.parameter("eta")
self._sRateDb = db
@@ -270,17 +273,8 @@ def _nNZ2(self, T):
class _PhotonReactionWrapper(_ReactionWrapperScaffold):
- def __init__(self, Y0, eta, db):
- super(_PhotonReactionWrapper, self).__init__(Y0, eta, db)
-
-
- # CONTINUOUS ENERGY LOSS ##################################################
- # E is the energy of the loosing particle
- # T is the temperature of the background photons
-
- # TOTAL CONTINUOUS ENERGY LOSS ############################################
- def total_eloss(E, T):
- return 0.
+ def __init__(self, ii, db):
+ super(_PhotonReactionWrapper, self).__init__(ii, db)
# RATES ###################################################################
@@ -303,7 +297,7 @@ def _rate_compton(self, E, T):
return ( 2.*pi*(re**2.)/x ) * self._ne(T) * ( (1. - 4./x - 8./(x**2.))*log(1.+x) + .5 + 8./x - 1./(2.*(1.+x)**2.) )
- # BETHE-HEITLER PAIR PRODUCTION ###########################################
+ # BETHE-HEITLER PAIR CREATION #############################################
def _rate_bethe_heitler(self, E, T):
# For small energies, the rate can be approximated by a constant
# (cf. 'hep-ph/0604251') --- NOT USED HERE
@@ -338,8 +332,8 @@ def _rate_bethe_heitler(self, E, T):
)
- # DOUBLE PHOTON PAIR PRODUCTION ###########################################
- def _rate_pair_creation(self, E, T):
+ # DOUBLE PHOTON TO ELECTRON POSITRON PAIR CREATION ########################
+ def _rate_pair_creation_ae(self, E, T):
# In general, the threshold is E ~ me^2/(22*T)
# However, here we use a slighlty smaller threshold
# in order to guarantee a smooth transition
@@ -356,7 +350,7 @@ def _rate_pair_creation(self, E, T):
# Perform the integration in log-log space
# The limits for s are always in ascending order,
# i.e. 4*me2 < 4*E*x, since x > me2/E
- I_fso_E2 = dblquad(_JIT_ph_rate_pair_creation, log(llim), log(ulim), \
+ I_fso_E2 = dblquad(_JIT_ph_rate_pair_creation_ae, log(llim), log(ulim), \
lambda logx: log(4.*me2), lambda logx: log(4.*E) + logx, \
epsrel=eps, epsabs=0, args=(T,)
)
@@ -364,20 +358,20 @@ def _rate_pair_creation(self, E, T):
return I_fso_E2[0]/( 8.*E**2. )
- def _rate_pair_creation_db(self, E, T):
+ def _rate_pair_creation_ae_db(self, E, T):
if E < me2/(50.*T):
return 0.
E_log, T_log = log10(E), log10(T)
if ( self._sRateDb is None ) or ( not in_rate_db(E_log, T_log) ):
- return self._rate_pair_creation(E, T)
+ return self._rate_pair_creation_ae(E, T)
- return interp_rate_db(self._sRateDb, 'ph:rate_pair_creation', E_log, T_log)
+ return interp_rate_db(self._sRateDb, 'ph:rate_pair_creation_ae', E_log, T_log)
# TOTAL RATE ##############################################################
def total_rate(self, E, T):
- return self._rate_photon_photon(E, T) + self._rate_compton(E, T) + self._rate_bethe_heitler(E, T) + self._rate_pair_creation_db(E, T)
+ return self._rate_photon_photon(E, T) + self._rate_compton(E, T) + self._rate_bethe_heitler(E, T) + self._rate_pair_creation_ae_db(E, T)
# INTEGRAL KERNELS ########################################################
@@ -409,7 +403,7 @@ def _kernel_compton(self, E, Ep, T):
# INVERSE COMPTON SCATTERING ##############################################
- @cached_member
+ @cached
def _kernel_inverse_compton(self, E, Ep, T):
# Incorporate the non-generic integration limit as
# the algorithm requires Ep > E and not Ep > E + me
@@ -443,13 +437,16 @@ def _kernel_inverse_compton(self, E, Ep, T):
# TOTAL INTEGRAL KERNEL ####################################################
def total_kernel_x(self, E, Ep, T, X):
- if X == 0: return self._kernel_photon_photon(E, Ep, T) + self._kernel_compton(E, Ep, T)
+ if X == 0:
+ return self._kernel_photon_photon(E, Ep, T) + self._kernel_compton(E, Ep, T)
# Photon -> Photon
- if X == 1: return self._kernel_inverse_compton(E, Ep, T)
+ if X == 1:
+ return self._kernel_inverse_compton(E, Ep, T)
# Electron -> Photon
- if X == 2: return self._kernel_inverse_compton(E, Ep, T)
+ if X == 2:
+ return self._kernel_inverse_compton(E, Ep, T)
# Positron -> Photon
print_error(
@@ -458,10 +455,10 @@ def total_kernel_x(self, E, Ep, T, X):
)
-class _ElectronReactionWrapper(_ReactionWrapperScaffold):
+class _AbstractElectronReactionWrapper(_ReactionWrapperScaffold, metaclass=ABCMeta):
- def __init__(self, Y0, eta, db):
- super(_ElectronReactionWrapper, self).__init__(Y0, eta, db)
+ def __init__(self, ii, db):
+ super(_AbstractElectronReactionWrapper, self).__init__(ii, db)
# RATES ###################################################################
@@ -469,7 +466,7 @@ def __init__(self, Y0, eta, db):
# T is the temperature of the background photons
# INVERSE COMPTON SCATTERING ##############################################
- @cached_member
+ @cached
def _rate_inverse_compton(self, E, T):
# Define the upper limit for the integration over x
ulim = min( E - me2/(4.*E), Ephb_T_max*T )
@@ -507,11 +504,11 @@ def total_rate(self, E, T):
# T is the temperature of the background photons
# INVERSE COMPTON SCATTERING ##############################################
- @cached_member
+ @cached
def _kernel_inverse_compton(self, E, Ep, T):
# E == Ep leads to a divergence in
# the Bose-Einstein distribution
- # TODO ???
+ # TODO: Check if this can be handled any better
if E == Ep:
return 0.
@@ -544,29 +541,13 @@ def _kernel_inverse_compton(self, E, Ep, T):
# COMPTON SCATTERING ######################################################
+ @abstractmethod
def _kernel_compton(self, E, Ep, T):
- # Perform a subsitution of the parameters.
- # Compared to the formula for photons, only
- # the arguments of the cross-section are different
- E_s = Ep + me - E # E , substituted
- Ep_s = Ep # Ep, substituted
-
- # Use the same formula as in case of photons with
- # E -> E_s
- # Ep -> Ep_s
- # Check that the energies do not exceed the 'Compton edge'
- # ATTENTION: This condition is missing in some other papers
- if Ep_s/(1. + 2.*Ep_s/me) > E_s:
- return 0.
-
- # ATTENTION:
- # If the last term is + 2.*me*(1./E_s - 1./Ep_s), Serpico
- # If the last term is - 2.*me*(1./E_s - 1./Ep_s), correct
- return pi*(re**2.)*me/(Ep_s**2.) * self._ne(T) * ( Ep_s/E_s + E_s/Ep_s + (me/E_s - me/Ep_s)**2. - 2.*me*(1./E_s - 1./Ep_s) )
+ pass
# BETHE_HEITLER PAIR CREATION #############################################
- @cached_member
+ @cached
def _kernel_bethe_heitler(self, E, Ep, T):
# Incorporate the non-generic integration limit as
# the algorithm requires Ep > E and not Ep > E + me
@@ -577,9 +558,9 @@ def _kernel_bethe_heitler(self, E, Ep, T):
return self._nNZ2(T)*_JIT_dsdE_Z2(E, Ep)
- # DOUBLE PHOTON PAIR CREATION #############################################
- @cached_member
- def _kernel_pair_creation(self, E, Ep, T):
+ # DOUBLE PHOTON TO ELECTRON POSITRON PAIR CREATION ########################
+ @cached
+ def _kernel_pair_creation_ae(self, E, Ep, T):
# In general, the threshold is Ep >~ me^2/(22*T)
# However, here we use a slighlty smaller threshold
# in acordance with the implementation we use in
@@ -612,20 +593,65 @@ def _kernel_pair_creation(self, E, Ep, T):
return 0.
# Perform the integration in log space
- I_fG_E2 = quad(_JIT_el_kernel_pair_creation, log(llim), log(ulim), epsrel=eps, epsabs=0, args=(E, Ep, T))
+ I_fG_E2 = quad(_JIT_el_kernel_pair_creation_ae, log(llim), log(ulim), epsrel=eps, epsabs=0, args=(E, Ep, T))
return 0.25*pi*(alpha**2.)*me2*I_fG_E2[0]/(Ep**3.)
+ # TOTAL INTEGRAL KERNEL ####################################################
+ @abstractmethod
+ def total_kernel_x(self, E, Ep, T, X):
+ pass
+
+
+class _ElectronReactionWrapper(_AbstractElectronReactionWrapper):
+
+ def __init__(self, ii, db):
+ super(_ElectronReactionWrapper, self).__init__(ii, db)
+
+
+ # INTEGRAL KERNELS ########################################################
+ # E is the energy of the outgoing particle
+ # Ep is the energy of the incoming particle
+ # T is the temperature of the background photons
+
+ # [...]
+
+ # COMPTON SCATTERING ######################################################
+ def _kernel_compton(self, E, Ep, T):
+ # Perform a subsitution of the parameters.
+ # Compared to the formula for photons, only
+ # the arguments of the cross-section are different
+ E_s = Ep + me - E # E , substituted
+ Ep_s = Ep # Ep, substituted
+
+ # Use the same formula as in case of photons with
+ # E -> E_s
+ # Ep -> Ep_s
+ # Check that the energies do not exceed the 'Compton edge'
+ # ATTENTION: This condition is missing in some other papers
+ if Ep_s/(1. + 2.*Ep_s/me) > E_s:
+ return 0.
+
+ # ATTENTION:
+ # If the last term is + 2.*me*(1./E_s - 1./Ep_s), Serpico
+ # If the last term is - 2.*me*(1./E_s - 1./Ep_s), correct
+ return pi*(re**2.)*me/(Ep_s**2.) * self._ne(T) * ( Ep_s/E_s + E_s/Ep_s + (me/E_s - me/Ep_s)**2. - 2.*me*(1./E_s - 1./Ep_s) )
+
+ # [...]
+
# TOTAL INTEGRAL KERNEL ####################################################
def total_kernel_x(self, E, Ep, T, X):
- if X == 0: return self._kernel_compton(E, Ep, T) + self._kernel_bethe_heitler(E, Ep, T) + self._kernel_pair_creation(E, Ep, T)
+ if X == 0:
+ return self._kernel_compton(E, Ep, T) + self._kernel_bethe_heitler(E, Ep, T) + self._kernel_pair_creation_ae(E, Ep, T)
# Photon -> Electron
- if X == 1: return self._kernel_inverse_compton(E, Ep, T)
+ if X == 1:
+ return self._kernel_inverse_compton(E, Ep, T)
# Electron -> Electron
- if X == 2: return 0.
+ if X == 2:
+ return 0.
# Positron -> Electron
print_error(
@@ -634,24 +660,10 @@ def total_kernel_x(self, E, Ep, T, X):
)
-class _PositronReactionWrapper(object):
-
- def __init__(self, Y0, eta, db):
- self._sER = _ElectronReactionWrapper(Y0, eta, db)
+class _PositronReactionWrapper(_AbstractElectronReactionWrapper):
-
- # RATES ###################################################################
- # E is the energy of the incoming particle
- # T is the temperature of the background photons
-
- # INVERSE COMPTON SCATTERING ##############################################
- def _rate_inverse_compton_db(self, E, T):
- return self._sER._rate_inverse_compton_db(E, T)
-
-
- # TOTAL RATE ##############################################################
- def total_rate(self, E, T):
- return self._rate_inverse_compton_db(E, T)
+ def __init__(self, ii, db):
+ super(_PositronReactionWrapper, self).__init__(ii, db)
# INTEGRAL KERNELS ########################################################
@@ -659,36 +671,27 @@ def total_rate(self, E, T):
# Ep is the energy of the incoming particle
# T is the temperature of the background photons
- # INVERSE COMPTON SCATTERING ##############################################
- def _kernel_inverse_compton(self, E, Ep, T):
- return self._sER._kernel_inverse_compton(E, Ep, T)
-
+ # [...]
# COMPTON SCATTERING ######################################################
def _kernel_compton(self, E, Ep, T):
- # There are no thermal positrons
+ # There are (almost) no thermal positrons
return 0.
-
- # BETHE_HEITLER PAIR CREATION #############################################
- def _kernel_bethe_heitler(self, E, Ep, T):
- return self._sER._kernel_bethe_heitler(E, Ep, T)
-
-
- # DOUBLE PHOTON PAIR CREATION #############################################
- def _kernel_pair_creation(self, E, Ep, T):
- return self._sER._kernel_pair_creation(E, Ep, T)
-
+ # [...]
# TOTAL INTEGRAL KERNEL ####################################################
def total_kernel_x(self, E, Ep, T, X):
- if X == 0: return self._kernel_compton(E, Ep, T) + self._kernel_bethe_heitler(E, Ep, T) + self._kernel_pair_creation(E, Ep, T)
+ if X == 0:
+ return self._kernel_compton(E, Ep, T) + self._kernel_bethe_heitler(E, Ep, T) + self._kernel_pair_creation_ae(E, Ep, T)
# Photon -> Positron
- if X == 1: return 0.
+ if X == 1:
+ return 0.
# Electron -> Positron
- if X == 2: return self._kernel_inverse_compton(E, Ep, T)
+ if X == 2:
+ return self._kernel_inverse_compton(E, Ep, T)
# Positron -> Positron
print_error(
@@ -697,48 +700,22 @@ def total_kernel_x(self, E, Ep, T, X):
)
-# TODO: Not yet fully implemented
-# Goal is ACROPOLIS v1.3
-class _MuonReactionWrapper(_ReactionWrapperScaffold):
-
- # RATES ###################################################################
- # E is the energy of the incoming particle
- # T is the temperature of the background photons
-
- # MUON DECAY ##############################################################
- def _rate_muon_decay(self, E, T):
- return hbar*mm/(tau_m*E)
-
-
- # INVERSE COMPTON SCATTERING ##############################################
- def _rate_inverse_compton(self, E, T):
- return 0.
-
-
- # TOTAL RATE ##############################################################
- def total_rate(self, E, T):
- return self._rate_inverse_compton(E, T) + self._rate_muon_decay(E, T)
-
-
class SpectrumGenerator(object):
- def __init__(self, Y0, eta):
+ def __init__(self, ii):
# Extract the data from the databases; If there is
# no data in the folder 'data/', db = (None, None)
db = import_data_from_db()
- # Define a dictionary containing the BBN parameter
- self._sY0 = Y0
-
# Define a dictionary containing all reaction wrappers
self._sRW = {
- 0: _PhotonReactionWrapper (self._sY0, eta, db),
- 1: _ElectronReactionWrapper(self._sY0, eta, db),
- 2: _PositronReactionWrapper(self._sY0, eta, db)
+ 0: _PhotonReactionWrapper (ii, db),
+ 1: _ElectronReactionWrapper(ii, db),
+ 2: _PositronReactionWrapper(ii, db)
}
# Set the number of particle species (in the cascade)
- self._sNX = 1 + 2*FX
+ self._sNX = 3
def _rate_x(self, X, E, T):
@@ -753,41 +730,48 @@ def rate_photon(self, E, T):
return self._rate_x(0, E, T)
- def get_spectrum(self, E0, S0, Sc, T, allX=False):
+ def get_spectrum(self, E0, S0f, SCf, T, allX=False):
# Define the dimension of the grid
- # as defined in 'params.py'...
+ # from the params in 'params.py'...
NE = int(log10(E0/Emin)*NE_pd)
- # ... but not less than NE_min points
+ # ... but do not use less than NE_min
+ # points
NE = max(NE, NE_min)
+ # Save the dimension of the species grid
+ NX = self._sNX
+
# Generate the grid for the energy
- E_rt = np.logspace(log(Emin), log(E0), NE, base=np.e)
+ E_grid = np.logspace(log(Emin), log(E0), NE, base=np.e)
+
+ # Generate the grid for the different species
+ X_grid = np.arange(NX)
# Generate the grid for the rates
- G = np.array([[self._rate_x(X, E, T) for E in E_rt] for X in range(self._sNX)])
+ G = np.array([[self._rate_x(X, E, T) for E in E_grid] for X in X_grid])
# first index: X, second index according to energy E
# Generate the grid for the kernels
- K = np.array([[[[self._kernel_x_xp(X, Xp, E, Ep, T) if Ep >= E else 0. for Ep in E_rt] for E in E_rt] for Xp in range(self._sNX)] for X in range(self._sNX)])
+ K = np.array([[[[self._kernel_x_xp(X, Xp, E, Ep, T) if Ep >= E else 0. for Ep in E_grid] for E in E_grid] for Xp in X_grid] for X in X_grid])
# first index: X, second index: Xp
# third index according to energy E
# fourth index according to energy Ep;
# For Ep < E, the kernel is simply 0.
# Generate the grids for the source terms
- # injection + final-state radiation
- S0 = np.array([S(T) for S in S0])
- Sc = np.array([[ScX(E, T) for E in E_rt] for ScX in Sc])
+ # monochromatic + continuous
+ S0 = np.array([ S0X(T) for S0X in S0f])
+ SC = np.array([[SCX(E, T) for E in E_grid] for SCX in SCf])
# Calculate the spectra by solving
# the cascade equation
- res = _JIT_solve_cascade_equation(E_rt, G, K, E0, S0, Sc, T)
+ sol = _JIT_solve_cascade_equation(E_grid, G, K, S0, SC, T)
- # 'res' always has at least two columns
- return res[0:2,:] if allX == False else res
+ # 'sol' always has at least two columns
+ return sol[0:2,:] if not allX else sol
- def get_universal_spectrum(self, E0, S0, Sc, T, offset=0.):
+ def get_universal_spectrum(self, E0, S0f, SCf, T, offset=0.):
# Define EC and EX as in 'astro-ph/0211258'
EC = me2/(22.*T)
EX = me2/(80.*T)
@@ -802,26 +786,27 @@ def get_universal_spectrum(self, E0, S0, Sc, T, offset=0.):
NE = max(NE, NE_min)
# Generate the grid for the energy
- E_rt = np.logspace(log(Emin), log(E0), NE, base=np.e)
+ E_grid = np.logspace(log(Emin), log(E0), NE, base=np.e)
# Generate the grid for the photon spectrum
- F_rt = np.zeros(NE)
+ F_grid = np.zeros(NE)
# Calculate the spectrum for the different energies
- # TODO: Perform integration
- S0N = lambda T: sum(S0X(T) for S0X in S0)
- for i, E in enumerate(E_rt):
+ # TODO: Incoporate the continuous source terms in the
+ # normalization by integrating it over the energy
+ SN = lambda T: sum(S0X(T) for S0X in S0f) # Normalization
+ for i, E in enumerate(E_grid):
if E < EX:
- F_rt[i] = S0N(T) * K0 * (EX/E)**1.5/self.rate_photon(E, T)
+ F_grid[i] = SN(T) * K0 * (EX/E)**1.5/self.rate_photon(E, T)
elif E >= EX and E <= (1. + offset)*EC: # an offset enables better interpolation
- F_rt[i] = S0N(T) * K0 * (EX/E)**2.0/self.rate_photon(E, T)
+ F_grid[i] = SN(T) * K0 * (EX/E)**2.0/self.rate_photon(E, T)
# Remove potential zeros
- F_rt[F_rt < approx_zero] = approx_zero
+ F_grid[F_grid < approx_zero] = approx_zero
- # Define the result array...
- res = np.zeros( (2, NE) )
+ # Define the output array...
+ sol = np.zeros( (2, NE) )
# ...and fill it
- res[0, :] = E_rt
- res[1, :] = F_rt
+ sol[0, :] = E_grid
+ sol[1, :] = F_grid
- return res
+ return sol
diff --git a/acropolis/data/INFO b/acropolis/data/INFO
new file mode 100644
index 0000000..b52840e
--- /dev/null
+++ b/acropolis/data/INFO
@@ -0,0 +1,3 @@
+The abundance file in alterbbn_v2_2_failsafe_12.tar.gz has been generated with
+
+./abundances.x 12 6.137
diff --git a/acropolis/data/alterbbn_v2_2_failsafe_12.tar.gz b/acropolis/data/alterbbn_v2_2_failsafe_12.tar.gz
new file mode 100644
index 0000000..97cde96
Binary files /dev/null and b/acropolis/data/alterbbn_v2_2_failsafe_12.tar.gz differ
diff --git a/acropolis/data/alterbbn_v2_2_nr_failsafe_12.tar.gz b/acropolis/data/alterbbn_v2_2_nr_failsafe_12.tar.gz
new file mode 100644
index 0000000..f8feeca
Binary files /dev/null and b/acropolis/data/alterbbn_v2_2_nr_failsafe_12.tar.gz differ
diff --git a/acropolis/data/parthenope_v3_0.tar.gz b/acropolis/data/parthenope_v3_0.tar.gz
new file mode 100644
index 0000000..c788e6b
Binary files /dev/null and b/acropolis/data/parthenope_v3_0.tar.gz differ
diff --git a/acropolis/data/sm.tar.gz b/acropolis/data/sm.tar.gz
deleted file mode 100644
index 97cde96..0000000
Binary files a/acropolis/data/sm.tar.gz and /dev/null differ
diff --git a/acropolis/data/sm.tar.gz b/acropolis/data/sm.tar.gz
new file mode 120000
index 0000000..8a092e8
--- /dev/null
+++ b/acropolis/data/sm.tar.gz
@@ -0,0 +1 @@
+parthenope_v3_0.tar.gz
\ No newline at end of file
diff --git a/acropolis/data/sm.v1.2.tar.gz b/acropolis/data/sm_v1_2_0.tar.gz
similarity index 100%
rename from acropolis/data/sm.v1.2.tar.gz
rename to acropolis/data/sm_v1_2_0.tar.gz
diff --git a/acropolis/data/sm_v1_2_2.tar.gz b/acropolis/data/sm_v1_2_2.tar.gz
new file mode 100644
index 0000000..97cde96
Binary files /dev/null and b/acropolis/data/sm_v1_2_2.tar.gz differ
diff --git a/acropolis/db.py b/acropolis/db.py
index 6cdd919..9199e69 100644
--- a/acropolis/db.py
+++ b/acropolis/db.py
@@ -4,15 +4,16 @@
import pickle
# os
from os import path
-# numba
-import numba as nb
# time
from time import time
+# jit
+from acropolis.jit import jit
# pprint
from acropolis.pprint import print_info
+# flags
+import acropolis.flags as flags
# params
-from acropolis.params import usedb
from acropolis.params import Emin_log, Emax_log, Enum
from acropolis.params import Tmin_log, Tmax_log, Tnum
@@ -22,7 +23,7 @@ def import_data_from_db():
db_file = path.join(pkg_dir, "data", "rates.db.gz")
ratedb = None
- if not usedb or not path.exists(db_file):
+ if not flags.usedb or not path.exists(db_file):
return ratedb
start_time = time()
@@ -62,17 +63,17 @@ def in_kernel_db(E_log, Ep_log, T_log):
return False
-@nb.jit(cache=True)
+@jit
def _get_E_log(i):
return Emin_log + (Emax_log - Emin_log)*i/(Enum - 1)
-@nb.jit(cache=True)
+@jit
def _get_T_log(i):
return Tmin_log + (Tmax_log - Tmin_log)*i/(Tnum - 1)
-@nb.jit(cache=True)
+@jit
def _get_E_index(E_log):
index = int( ( Enum - 1 ) * ( E_log - Emin_log ) / ( Emax_log - Emin_log ) )
@@ -80,7 +81,7 @@ def _get_E_index(E_log):
return index if index != Enum - 1 else index - 1
-@nb.jit(cache=True)
+@jit
def _get_T_index(T_log):
index = int( ( Tnum - 1 ) * ( T_log - Tmin_log ) / ( Tmax_log - Tmin_log ) )
@@ -88,12 +89,12 @@ def _get_T_index(T_log):
return index if index != Tnum - 1 else index - 1
-@nb.jit(cache=True)
+@jit
def interp_rate_db(rate_db, id, E_log, T_log):
# Extract the correct index for the datafile
c = {
- 'ph:rate_pair_creation' : 0,
- 'el:rate_inverse_compton': 1
+ 'ph:rate_pair_creation_ae': 0,
+ 'el:rate_inverse_compton' : 1
}[id]
# Calculate the respective indices in the interpolation file
@@ -104,7 +105,6 @@ def interp_rate_db(rate_db, id, E_log, T_log):
x , y = T_log, E_log
x0, y0 = _get_T_log(iT ), _get_E_log(iE )
x1, y1 = _get_T_log(iT+1), _get_E_log(iE+1)
- xd, yd = (x-x0)/(x1-x0), (y-y0)/(y1-y0)
# Define the index function
k = lambda jT, jE: jT*Enum + jE
@@ -124,14 +124,11 @@ def interp_rate_db(rate_db, id, E_log, T_log):
return 10.**( a0 + a1*x + a2*y + a3*x*y )
-@nb.jit(cache=True)
+@jit
def interp_kernel_db(kernel_db, id, E_log, Ep_log, T_log):
- c = {
- 'ph:kernel_inverse_compton': 0,
- 'el:kernel_pair_creation' : 1,
- 'el:kernel_inverse_compton': 2
- }[id]
+ raise NotImplementedError
+ """
# Calculate the respective indices in the interpolation file
iE, iEp, iT = _get_E_index(E_log), _get_E_index(Ep_log), _get_T_index(T_log)
@@ -180,3 +177,4 @@ def interp_kernel_db(kernel_db, id, E_log, Ep_log, T_log):
a7 = ( c000 - c001 - c010 + c011 - c100 + c101 + c110 - c111 )/d
return 10.**( a0 + a1*x + a2*y + a3*z + a4*x*y + a5*x*z + a6*y*z + a7*x*y*z )
+ """
diff --git a/acropolis/tmp/lhe.py b/acropolis/ext/__init__.py
similarity index 100%
rename from acropolis/tmp/lhe.py
rename to acropolis/ext/__init__.py
diff --git a/acropolis/ext/benchmarks.py b/acropolis/ext/benchmarks.py
new file mode 100644
index 0000000..13703e9
--- /dev/null
+++ b/acropolis/ext/benchmarks.py
@@ -0,0 +1,52 @@
+# functools
+from functools import partial
+
+# params
+from acropolis.params import me2
+from acropolis.params import pi
+
+# ext.models
+from acropolis.ext.models import ResonanceModel, estimate_tempkd_ee
+
+
+# Benchmark (1)
+def sigma_ee_b1(s, mchi, delta, gammad, gammav):
+ mx2 = mchi**2.
+ mx4 = mchi**4.
+
+ pref = 16. * pi * gammad * gammav / mx4 / (2. + delta)**4. / s**2.
+ # -->
+ return pref * mx2 * ( s*s - 2.*s*(mx2 - 3.*me2) + (me2 - mx2)**2. )
+
+
+# Benchmark (2)
+def sigma_ee_b2(s, mchi, delta, gammad, gammav):
+ mx2 = mchi**2.
+ mx4 = mchi**4.
+
+ me4 = me2*me2
+
+ pref = 16. * pi * gammad * gammav / mx4 / (2. + delta)**4. / s**2.
+ # -->
+ #return 3. * pref * ( 2.*(mx2 - me2)**4. - 8.*(mx4 - me4)*(mx2 - me2)*s \
+ # + (15.*mx4 + 26.*mx2*me2 + 15.*me4)*s*s - 14.*(mx2 + me2)*(s**3.) + 5.*(s**4.) ) / s
+ return .25* pref * ( 4.*(s**3.) - 10.*(s**2.)*(mx2 + me2) + s*(9.*mx4 + 22.*mx2*me2 + 9.*me4) - 4.*(mx4 - me4)*(mx2 - me2) + (mx2 - me2)**4./s )
+
+
+# Benchmark (3)
+def sigma_ee_b3(s, mchi, delta, gammad, gammav):
+ mx2 = pow(mchi, 2.)
+ mx4 = pow(mchi, 4.)
+
+ me4 = me2*me2
+
+ pref = 16. * pi * gammad * gammav / mx4 / (2. + delta)**4. / s**2.
+ # -->
+ #return 48. * pref * ( mx4 * (me2 + 3.*s) + (me2 - s)**2. * (me2 + 3.*s) - 2.*mx2 * (me2**2. - 4.*me2*s + 3.*s*s) )
+ return 4.5 * pref * ( mx4*(me2 + s) - 2.*mx2*(me2 - s)**2. + (me4 - s**2.)*(me2 - s) )
+
+
+# -->
+BenchmarkModel1 = partial(ResonanceModel, nd = 0., S = 1./2., tempkd = partial(estimate_tempkd_ee, sigma_ee=sigma_ee_b1)) # (1)
+BenchmarkModel2 = partial(ResonanceModel, nd = 0., S = 3./4., tempkd = partial(estimate_tempkd_ee, sigma_ee=sigma_ee_b2)) # (2)
+BenchmarkModel3 = partial(ResonanceModel, nd = 1., S = 3./2., tempkd = partial(estimate_tempkd_ee, sigma_ee=sigma_ee_b3)) # (3)
\ No newline at end of file
diff --git a/acropolis/ext/models.py b/acropolis/ext/models.py
new file mode 100644
index 0000000..8298f0d
--- /dev/null
+++ b/acropolis/ext/models.py
@@ -0,0 +1,309 @@
+# math
+from math import sqrt, exp, log
+# numpy
+import numpy as np
+# scipy
+from scipy.integrate import quad
+from scipy.optimize import root
+from scipy.special import kn
+
+# models
+from acropolis.models import AnnihilationModel
+# params
+from acropolis.params import me
+from acropolis.params import pi, pi2, zeta3
+from acropolis.params import eps
+# pprint
+from acropolis.pprint import print_error
+
+
+# https://stackoverflow.com/questions/1167617/in-python-how-do-i-indicate-im-overriding-a-method
+def overrides(interface_class):
+ def overrider(method):
+ assert(method.__name__ in dir(interface_class))
+
+ return method
+
+ return overrider
+
+
+def estimate_tempkd_ee(mchi, delta, gammad, gammav, nd, S, ii, sigma_ee):
+ fac = 200
+
+ # The integration kernel for calculating the
+ # thermally averaged cross-section for
+ # \chi e^\pm -> \chi e^\pm scattering
+ def _sigma_v_ee_kernel(logz, T):
+ z = exp(logz)
+ # -->
+ s = (z + me + mchi)**2. # sqrt_s = z + me + mchi
+ # -->
+ sqrt_s = sqrt(s)
+
+ sigma = sigma_ee(s=s, mchi=mchi, delta=delta, gammad=gammad, gammav=gammav)
+
+ bessel_term = 0
+
+ def _k1_apx_wo_exp(y):
+ return sqrt(pi/2) * ( y**0.5 + (3./8.) * y**1.5 - (15./128.) * y**2.5 + (105./1024.) * y**3.5)
+
+ def _k2_apx_wo_exp(y):
+ return sqrt(pi/2) * ( y**0.5 + (15./8.) * y**1.5 + (105./128.) * y**2.5 - (315./1024.) * y**3.5)
+
+ # Define the cutoff for when to Taylor expand the Bessel functions
+ xmax = 400
+
+ ye, ychi, ys = T/me, T/mchi, T/sqrt_s
+ # For mchi/T > xmax, we have sqrt_s/T > (me+mchi)/T > xmax
+ if me/T < xmax < mchi/T:
+ # Expand K1(sqrt_s/T) and K2(mchi/T)
+ # z + me = sqrt_s - mchi
+ bessel_term = exp( -(z + me)/T ) * _k1_apx_wo_exp(ys) / _k2_apx_wo_exp(ychi) / kn(2, me/T)
+ elif me/T > xmax:
+ # Expand K1(sqrt_s/T), K2(mchi/T) and K2(me/T)
+ # z = sqrt_s - me - mchi
+ bessel_term = exp( -z/T ) * _k1_apx_wo_exp(ys) / ( _k2_apx_wo_exp(ychi) * _k2_apx_wo_exp(ye) )
+ else:
+ # Expand nothing
+ bessel_term = kn(1, sqrt_s/T) / ( kn(2, me/T) * kn(2, mchi/T) )
+
+ # s = (z + me + mchi)^2 => ds/dz = 2(z + me + mchi) = 2 sqrt_s
+ return z * (2.*sqrt_s) * sigma * ( s - (me + mchi)**2. ) * ( s - (me - mchi)**2. ) * bessel_term / sqrt_s
+
+
+ # The thermally averaged cross-section for
+ # \chi e^\pm -> \chi e^\pm scattering
+ def _sigma_v_ee(T):
+ zmin = T/fac
+ zmax = fac*T
+
+ integral = quad(_sigma_v_ee_kernel, log(zmin), log(zmax), epsrel=eps, epsabs=0, limit=100, args=(T,))
+
+ return integral[0]/( 8. * me**2. * mchi**2. * T)
+
+
+ # The number density of e^\pm
+ def _nee(T):
+ xe = me/T
+
+ # 1. EQUILIBRIUM WITH VANISHING CHEM. POTENTIAL
+ f_kernel = lambda y, x: y * sqrt(y**2. - x**2.) / ( exp(y) + 1 ) if x <= y else 0. # x = me/T, y = Ee/T
+ # -->
+ f = quad(f_kernel, xe, fac, epsabs=0, epsrel=eps, args=(xe,))[0]
+ # -->
+ nee_1 = 4. * f * (T**3.) / ( 2. * pi2 )
+ # ne = ge * T^3 * quad(f) / 2 pi^2
+
+ # 1. EQUILIBRIUM WITH NON-VANISHING CHEM. POTENTIAL
+ Y = ii.bbn_abundances_0()
+ # -->
+ nee_2 = ( Y[1] + 2.*Y[5] ) * ii.parameter('eta') * ( 2.*zeta3/pi2 ) * (T**3.)
+
+ return max(nee_1, nee_2)
+
+
+ # Ncol * H ~ sig_v * _nee
+ def _tempkd_ee_root(logT):
+ T = exp( logT )
+
+ Ncol = max(1., mchi/T)
+
+ return log( _sigma_v_ee(T) * _nee(T) / ii.hubble_rate(T) / Ncol )
+
+ # -->
+ tempkd = exp( root(_tempkd_ee_root, 0.).x )
+
+ # Assume that kinetic decoupling happens at least
+ # together with chemical decoupling at T ~ mchi/20
+ return min(tempkd, mchi/20.)
+
+
+# This model has been created in collaboration with Pieter Braat (pbraat@nikhef.nl)
+# When using this model, please cite arXiv:2406:XXXX
+class ResonanceModel(AnnihilationModel):
+ def __init__(self, mchi, delta, gammad, gammav, nd, tempkd, S=1, omegah2=0.12):
+
+ # CALL THE SUPER CONSTRUCTOR (OF ANNIHILATION_MODEL) ##################
+ #######################################################################
+
+ super(ResonanceModel, self).__init__(
+ # mchi, a , b , tempkd, bree, braa, omegah2
+ mchi, None, None, None , 1 , 0 , omegah2
+ # | tempkd will be set below
+ )
+
+ # CALCULATE TKD FROM THE GIVEN FUNCTION IF REQUESTED ##################
+ #######################################################################
+ if callable(tempkd):
+ self._sTkd = tempkd(
+ mchi=mchi, delta=delta, gammad=gammad, gammav=gammav, nd=nd, S=S, ii=self._sII
+ )
+ else:
+ self._sTkd = tempkd
+
+
+ # SPECIFY THE NEW PARAMETERS ##########################################
+ #######################################################################
+
+ # The mass splitting between the resonant
+ # particle and the dark-matter particle
+ # mr = mchi * ( 2 + delta )
+ self._sDelta = delta
+ # The couplings to the dark and to the
+ # visible sector
+ self._sGammad = gammad
+ self._sGammav = gammav
+ # The parameters to distinguish between
+ # s- (nd=0) and p-wave (nd=1) processes
+ self._sNd = nd
+ self._sNv = 1 # NOTE: Only electrons are allowed in the final state for now
+ # The symmetry factor for the annihilation cross-section
+ self._sS = S
+
+ # The mass of the resonant particle
+ self._sMR = self._sMchi * ( 2. + self._sDelta ) # in MeV
+ # The resonance momentum
+ self._sPR = self._sMchi * sqrt(self._sDelta) # in MeV
+ # The total decay width of the resonant particle
+ self._sWidth = self._decay_width(self._sPR) # in MeV
+
+
+ # CHECK THE INPUT PARAMATERS ##########################################
+ #######################################################################
+
+ self._check_input_parameters()
+
+
+ def _check_nwa(self, eps=0.1):
+ y = self._sWidth / self._sMR
+ # In the NWA limit, we have y -> 0, i.e. y < eps
+
+ if y < eps and self._sNd != 0:
+ return True
+
+ return False
+
+
+ def _check_input_parameters(self):
+ if self._sDelta > 1:
+ print_error(
+ "The mass splitting must be < 1. The calculation cannot be trusted.",
+ "acropolis.models.ResonanceModel._check_input_parameters"
+ )
+
+ if (self._sGammad >= 1 or self._sGammav >= 1):
+ print_error(
+ "The couplings must be small (< 1). The calculation cannot be trusted.",
+ "acropolis.model.ResonanceModel._check_input_parameters"
+ )
+
+ if self._sNd not in [0, 1]:
+ print_error(
+ "Currently only s-wave annihilations with 'nd = 0' and p-wave " + \
+ "annihilations with 'nd = 1' are supported.",
+ "acropolis.models.ResonanceModel._check_input_parameters"
+ )
+
+
+ # DEPENDENT QUANTITIES ##############################################################
+
+
+ # The total decay width of the resonant
+ # particle into dark-sector states
+ def _decay_width_d(self, p):
+ return self._sGammad * self._sMR * (p / self._sMchi)**(2.*self._sNd + 1.)
+
+
+ # The total decay width of the resonant
+ # particle into visible-sector states
+ def _decay_width_v(self, p):
+ return self._sGammav * self._sMR * (p / self._sMchi)**(2.*self._sNv + 1.)
+
+
+ # The total decay width of the resonant
+ # particle
+ def _decay_width(self, p):
+ return self._decay_width_d(p) + self._decay_width_v(p)
+
+
+ # The thermally average annihilation
+ # cross-section in the resonant regime
+ def _sigma_v_res(self, T):
+ x = self._sMchi/T
+
+ return 8. * self._sS * (pi*x)**1.5 * self._sGammad * self._sGammav * self._sMR**2. * self._sDelta**(self._sNd+.5) * exp(-self._sDelta*x) \
+ / self._sWidth / self._sMchi**3.
+
+
+ # The thermally averaged annihilation
+ # cross-section in the non-resonant regime
+ def _sigma_v_non_res(self, T):
+ x = self._sMchi/T
+
+ # Speed up the calculation: only nd = 0, 1 are allowed
+ # gamma(self._sNd+1.5)
+ gamma = {
+ 0: sqrt(pi) / 2., # \gamma(1.5)
+ 1: 3. * sqrt(pi) / 4. # \gamma(2.5)
+ }[self._sNd]
+
+ return 4. * self._sS * sqrt(pi) * x**(-self._sNd) * self._sGammad * self._sGammav * self._sMR**2. * gamma \
+ / ( self._sMchi**4. * self._sDelta**2. )
+
+
+ # The full (non-approximate) thermally
+ # averaged annihilation cross section
+ def _sigma_v_full(self, T):
+ x = self._sMchi/T
+
+ uR = self._sPR**2.
+ m2 = self._sMchi**2.
+
+ # Set the maximal exponent in the integral
+ # beyond which the integrand is cut off
+ exp_cutoff = 200.
+
+ # Define the prefactor
+ pref = 4. * x * sqrt(x*pi) * self._sS * self._sGammad * self._sGammav * (self._sMR/self._sMchi)**2.
+
+ # Define the integration kernel
+ def _sigma_v_full_kernel(log_u):
+ u = exp(log_u) # = p^2
+ sqrt_u = sqrt(u) # = p
+
+ width_u = self._decay_width(sqrt_u)
+
+ # | from log integration
+ return u * exp( -u*x/m2 ) * ( u/m2 )**(self._sNd+.5) / ( (u - uR)**2. + ( self._sMchi*width_u/2. )**2. )
+
+ # Calculate the upper integration limit
+ umax = exp_cutoff * m2 / x
+
+ uR_l, uR_h = uR*(1-eps), uR*(1+eps)
+
+ # uR_h/uR_lt ~ 1.002
+ # This is still good with exp_cutoff
+ uR_l = min(uR_l, umax)
+
+ # Perform the integration in three steps
+ # BELOW RESONANCE
+ I1 = quad(
+ _sigma_v_full_kernel, -np.inf , log(uR_l), epsrel=eps, epsabs=0
+ )[0]
+ # AROUND RESONANCE
+ I2 = quad(
+ _sigma_v_full_kernel, log(uR_l), log(uR_h), epsrel=eps, epsabs=0, limit=100, points=(log(uR),)
+ )[0] if uR_l < umax else 0.
+ # ABOVE RESONANCE
+ I3 = quad(
+ _sigma_v_full_kernel, log(uR_h), log(umax), epsrel=eps, epsabs=0
+ )[0] if uR_l < umax else 0.
+
+ return pref*(I1+I2+I3)
+
+
+ @overrides(AnnihilationModel)
+ def _sigma_v(self, T):
+ return self._sigma_v_full(
+ self._dm_temperature(T)
+ )
\ No newline at end of file
diff --git a/acropolis/flags.py b/acropolis/flags.py
new file mode 100644
index 0000000..3c2567b
--- /dev/null
+++ b/acropolis/flags.py
@@ -0,0 +1,42 @@
+# ATTENTION !!!!!111elf
+# Only parameters that specify a default value are
+# meant to be changed by the user, i.e. everything
+# under FLAGS and ALGORITHM-SPECIFIC PARAMETERS
+
+
+# FLAGS #############################################################
+
+# If this flag is set to 'True',
+# the pregenerated databases
+# will be used to interpolate
+# the different reaction rates
+# Default: True
+usedb = True
+
+# If this flag is set to 'True',
+# additional output is printed
+# to the screen
+# Default: True
+verbose = True
+
+# If this flag is set to 'True',
+# additional debug information
+# is printed to the screen
+# Default: False
+debug = False
+
+# If this flag is set to 'True',
+# the universal spectrum is used
+# for all points in parameter space
+# ATTENTION:
+# Change with caution and only if
+# you know what you are doing.
+# World destruction possible!
+# Default: False
+universal = False
+
+
+def set_universal(value):
+ global universal
+
+ universal = value
\ No newline at end of file
diff --git a/acropolis/info.py b/acropolis/info.py
index 421ece1..60eda5f 100644
--- a/acropolis/info.py
+++ b/acropolis/info.py
@@ -1,8 +1,8 @@
# The current version of ACROPOLIS
-version = "1.2.2"
+version = "1.3.0"
# The current dev version of ACROPOLIS
-dev_version = "1.2.2"
+dev_version = "2.0.0"
# The short description of ACROPOLIS
description = "A generiC fRamework fOr Photodisintegration Of LIght elementS"
diff --git a/acropolis/input.py b/acropolis/input.py
index f7a00a0..041d758 100644
--- a/acropolis/input.py
+++ b/acropolis/input.py
@@ -18,29 +18,38 @@
from acropolis.params import NY, NC
-def locate_sm_file():
+def locate_data_file(filename):
pkg_dir, _ = path.split(__file__)
- sm_file = path.join(pkg_dir, "data", "sm.tar.gz")
+ data_file = path.join(pkg_dir, "data", filename)
+
+ return data_file
+
+
+def locate_sm_file():
+ return locate_data_file("sm.tar.gz")
+
- return sm_file
+def input_data_from_dir(dirname):
+ cosmo_data = np.genfromtxt(dirname + "/cosmo_file.dat")
+ abund_data = np.genfromtxt(dirname + "/abundance_file.dat")
+ param_data = np.genfromtxt(dirname + "/param_file.dat", delimiter="=", dtype=None, encoding=None)
+
+ return InputData(cosmo_data, abund_data, param_data)
-def data_from_file(filename):
+def input_data_from_file(filename):
# Read the input file
tf, tc = tarfile.open(filename, "r:gz"), {}
# Extract the different files and
# store them in a dictionary
- for m in tf.getmembers(): tc[m.name] = tf.extractfile(m)
+ for m in tf.getmembers():
+ tc[m.name] = tf.extractfile(m)
# READ THE PREVIOUSLY GENERATED DATA
cosmo_data = np.genfromtxt(tc["cosmo_file.dat"] )
abund_data = np.genfromtxt(tc["abundance_file.dat"])
- param_data = np.genfromtxt(tc["param_file.dat"],
- delimiter="=",
- dtype=None,
- encoding=None
- )
+ param_data = np.genfromtxt(tc["param_file.dat"], delimiter="=", dtype=None, encoding=None)
return InputData(cosmo_data, abund_data, param_data)
@@ -82,10 +91,30 @@ def get_param_data(self):
class InputInterface(object):
- def __init__(self, input_data):
- # If input_data is a filename, extract the data first
- if type(input_data) == str:
- input_data = data_from_file(input_data)
+ def __init__(self, input_data, type="file"):
+ if type == "file":
+ if not isinstance(input_data, str):
+ raise ValueError(
+ "If 'type == file', 'input_data' must be a string"
+ )
+
+ input_data = input_data_from_file(input_data)
+ elif type == "dir" :
+ if not isinstance(input_data, str):
+ raise ValueError(
+ "If 'type == dir', 'input_data' must be be a string"
+ )
+
+ input_data = input_data_from_dir(input_data)
+ elif type == "raw" :
+ if not isinstance(input_data, InputData):
+ raise ValueError(
+ "If 'type == raw', 'input_data' must be an instance of InputData"
+ )
+ else:
+ raise ValueError(
+ "Unknown type for input data: Only 'file', 'dir' and 'raw' are supported"
+ )
# Extract the provided input data
self._sCosmoData = input_data.get_cosmo_data()
@@ -95,6 +124,9 @@ def __init__(self, input_data):
# Calculate the scale factor and add it
sf = np.exp( cumsimp(self._sCosmoData[:,0]/hbar, self._sCosmoData[:,4]) )
self._sCosmoData = np.column_stack( [self._sCosmoData, sf] )
+ # The corresponding index will be -1,
+ # irregardless of the number of additional
+ # columns in the cosmo-file
# Log the cosmo data for the interpolation
# ATTENTION: At this point we have to take the
@@ -143,23 +175,31 @@ def _check_data(self):
# 1. COSMO_DATA ###########################################################
def _find_index(self, x, x0):
- # Returns an index ix such that x0
- # lies between x[ix] and x[ix+1]
- ix = np.argmin( np.abs( x - x0 ) )
+ # Check if the array is in ascending
+ # or decending order
+ ascending = x[0] < x[1]
+ # -->
+ decending = not ascending
+
+ # Calculate the size of the array
+ size = self._sCosmoDataShp[0]
- # Check the edge of the array
- if ix == self._sCosmoDataShp[0] - 1:
- # In this case, the condition
- # below is always False
- # --> No additional -1
- ix -= 1
+ # Perform a binary search
+ left = 0
+ right = size - 1
- # If x0 is not between ix and ix+1,...
- if not (x[ix] <= x0 <= x[ix+1] or x[ix] >= x0 >= x[ix+1]):
- # ...it must be between ix-1 and ix
- ix -= 1
+ while left <= right:
+ mid = int(left + (right - left) / 2)
- return ix
+ if ((ascending and x[mid] <= x0 and (mid == size - 1 or x[mid + 1] > x0)) or (decending and x[mid] >= x0 and (mid == size - 1 or x[mid + 1] < x0))):
+ return mid
+
+ if ((ascending and x[mid] < x0) or (decending and x[mid] > x0)):
+ left = mid + 1
+ else:
+ right = mid - 1
+
+ return -1
def _interp_cosmo_data(self, val, xc, yc):
@@ -171,9 +211,15 @@ def _interp_cosmo_data(self, val, xc, yc):
val_log = log10(val)
- # Extract the index closest to 'val_log'
+ # Extract the index ix for which val_log
+ # is between x[ix] and x[ix+1]
ix = self._find_index(x, val_log)
+ if ix == -1 or not (x[ix] <= val_log <= x[ix+1] or x[ix] >= val_log >= x[ix+1]):
+ raise ValueError(
+ "Interpolation error: Index out of range"
+ )
+
m = (y[ix+1] - y[ix])/(x[ix+1] - x[ix])
b = y[ix] - m*x[ix]
@@ -208,7 +254,7 @@ def cosmo_column(self, yc, val, xc=1):
return self._interp_cosmo_data(val, xc, yc)
- def cosmo_range(self):
+ def temperature_range(self):
return ( min(self._sCosmoData[:,1]), max(self._sCosmoData[:,1]) )
@@ -217,6 +263,7 @@ def cosmo_range(self):
def bbn_abundances(self):
return self._sAbundData
+
def bbn_abundances_0(self):
return self._sAbundData[:,0]
@@ -225,3 +272,7 @@ def bbn_abundances_0(self):
def parameter(self, key):
return self._sParamData[key]
+
+
+ def param_keys(self):
+ return self._sParamData.keys()
diff --git a/acropolis/jit.py b/acropolis/jit.py
new file mode 100644
index 0000000..5e02e6d
--- /dev/null
+++ b/acropolis/jit.py
@@ -0,0 +1,14 @@
+_use_numba_jit = True
+
+# numba
+try:
+ import numba as nb
+except ImportError:
+ _use_numba_jit = False
+
+
+def _null_decorator(f):
+ return f
+
+
+jit = nb.njit(cache=True) if _use_numba_jit else _null_decorator
\ No newline at end of file
diff --git a/acropolis/models.py b/acropolis/models.py
index c6b1edc..476901f 100644
--- a/acropolis/models.py
+++ b/acropolis/models.py
@@ -15,7 +15,8 @@
from acropolis.params import zeta3
from acropolis.params import hbar, c_si, me2, alpha, tau_t
from acropolis.params import Emin, NY
-from acropolis.params import universal
+# flags
+import acropolis.flags as flags
# pprint
from acropolis.pprint import print_info, print_warning
@@ -43,7 +44,7 @@ def run_disintegration(self):
# Print a warning if the injection energy
# is larger than 1GeV, as this might lead
# to wrong results
- if not universal and int( self._sE0 ) > 1e3:
+ if not flags.universal and int( self._sE0 ) > 1e3:
print_warning(
"Injection energy > 1 GeV. Results cannot be trusted.",
"acropolis.models.AbstractMode.run_disintegration"
@@ -52,7 +53,7 @@ def run_disintegration(self):
# Print a warning if the temperature range
# of the model is not covered by the data
# in cosmo_file.dat
- cf_temp_rg = self._sII.cosmo_range()
+ cf_temp_rg = self._sII.temperature_range()
if not (cf_temp_rg[0] <= self._sTrg[0] <= self._sTrg[1] <= cf_temp_rg[1]):
print_warning(
"Temperature range not covered by input data. Results cannot be trusted.",
@@ -297,8 +298,8 @@ def __init__(self, mchi, a, b, tempkd, bree, braa, omegah2=0.12):
# The s-wave and p-wave parts of
self._sSwave = a # in cm^3/s
self._sPwave = b # in cm^3/s
- # The dark matter decoupling temperature in MeV
- # For Tkd=0, the dark matter partices stays in
+ # The dark-matter decoupling temperature. For
+ # Tkd=0, the dark matter partices stays in
# kinetic equilibrium with the SM heat bath
self._sTkd = tempkd # in MeV
# The injection energy
@@ -380,4 +381,4 @@ def _source_photon_c(self, E, T):
_sp = self._source_electron_0(T)
- return (_sp/EX) * (alpha/pi) * ( 1. + (1.-x)**2. )/x * log( (1.-x)/y )
+ return (_sp/EX) * (alpha/pi) * ( 1. + (1.-x)**2. )/x * log( (1.-x)/y )
\ No newline at end of file
diff --git a/acropolis/nucl.py b/acropolis/nucl.py
index fcfe1a7..10d608e 100644
--- a/acropolis/nucl.py
+++ b/acropolis/nucl.py
@@ -7,7 +7,6 @@
# scipy
from scipy.integrate import quad
from scipy.integrate import IntegrationWarning
-from scipy.linalg import expm
# time
from time import time
# warnings
@@ -18,10 +17,11 @@
# pprint
from acropolis.pprint import print_error, print_warning, print_info
# params
-from acropolis.params import me, me2, hbar, tau_n, tau_t
+from acropolis.params import me2, hbar, tau_n, tau_t
from acropolis.params import approx_zero, eps, E_EC_max
from acropolis.params import NT_pd, NY
-from acropolis.params import universal
+# flags
+import acropolis.flags as flags
# cascade
from acropolis.cascade import SpectrumGenerator
@@ -90,6 +90,7 @@
# the different reaction rates (taken from 2006.14803)
# in terms of a relative deviation from the mean value
# 8 (He4->d+d); 10, 11; (Li6->...); 12, 14 (Li7->...)
+# TODO: Perform literature search and find sensible values
_rdev = {
1: 0.00,
2: 0.00,
@@ -187,27 +188,21 @@ class NuclearReactor(object):
def __init__(self, s0, sc, temp_rg, e0, ii):
self._sII = ii
- # A dictionary containing the BBN parameters
- self._sY0 = self._sII.bbn_abundances_0()
-
# The injection energy
self._sE0 = e0
- # The baryon-to-photon ratio at the time of the CMB
- self._sEta = self._sII.parameter("eta")
-
- # The source terms without the detla function
+ # The prefactor of the delta source term
self._sS0 = s0
- # The FSR source terms
- self._sSc = sc
+ # The continouos source terms
+ self._sSC = sc
# The approximate decay temperature of the mediator
self._sTrg = temp_rg
# An instance of 'Spectrum_Generator' in order to calculate
# the photon spectrum in the function 'get_reaction_rate(reaction_id, T)'
- self._sGen = SpectrumGenerator(self._sY0, self._sEta)
+ self._sGen = SpectrumGenerator(self._sII)
# BEGIN REACTIONS ###############################################
@@ -357,28 +352,45 @@ def _Be7a_ppnHe4(self, E):
@_convert_mb_to_iMeV2
def get_cross_section(self, reaction_id, E):
# There is no switch statement in python :(
- if reaction_id == 1: return self._da_np(E) # 1. d + a -> n + p
- if reaction_id == 2: return self._ta_nd(E) # 2. t + a -> n + d
- if reaction_id == 3: return self._ta_npn(E) # 3. t + a -> 2n + p
- if reaction_id == 4: return self._He3a_pd(E) # 4. He3 + a -> p + d
- if reaction_id == 5: return self._He3a_npp(E) # 5. He3 + a -> n + 2p
- if reaction_id == 6: return self._He4a_pt(E) # 6. He4 + a -> p + t
- if reaction_id == 7: return self._He4a_nHe3(E) # 7. He4 + a -> n + He3
- if reaction_id == 8: return self._He4a_dd(E) # 8. He4 + a -> 2d
- if reaction_id == 9: return self._He4a_npd(E) # 9. He4 + a -> n + p + d
- if reaction_id == 10: return self._Li6a_npHe4(E) # 10. Li6 + a -> n + p + He4
- if reaction_id == 11: return self._Li6a_XA3(E) # 11. Li7 + a -> X + A3
- if reaction_id == 12: return self._Li7a_tHe4(E) # 12. Li7 + a -> t + He4
- if reaction_id == 13: return self._Li7a_nLi6(E) # 13. Li7 + a -> n + Li6
- if reaction_id == 14: return self._Li7a_nnpHe4(E) # 14. Li7 + a -> 2n + p + He4
- if reaction_id == 15: return self._Be7a_He3He4(E) # 15. Be7 + a -> He3 + He4
- if reaction_id == 16: return self._Be7a_pLi6(E) # 16. Be7 + a -> p + Li6
- if reaction_id == 17: return self._Be7a_ppnHe4(E) # 17. Be7 + a -> 2p + n + He4
-
-
- # If no match is found, return 0.
+ if reaction_id == 1:
+ return self._da_np(E) # 1. d + a -> n + p
+ if reaction_id == 2:
+ return self._ta_nd(E) # 2. t + a -> n + d
+ if reaction_id == 3:
+ return self._ta_npn(E) # 3. t + a -> 2n + p
+ if reaction_id == 4:
+ return self._He3a_pd(E) # 4. He3 + a -> p + d
+ if reaction_id == 5:
+ return self._He3a_npp(E) # 5. He3 + a -> n + 2p
+ if reaction_id == 6:
+ return self._He4a_pt(E) # 6. He4 + a -> p + t
+ if reaction_id == 7:
+ return self._He4a_nHe3(E) # 7. He4 + a -> n + He3
+ if reaction_id == 8:
+ return self._He4a_dd(E) # 8. He4 + a -> 2d
+ if reaction_id == 9:
+ return self._He4a_npd(E) # 9. He4 + a -> n + p + d
+ if reaction_id == 10:
+ return self._Li6a_npHe4(E) # 10. Li6 + a -> n + p + He4
+ if reaction_id == 11:
+ return self._Li6a_XA3(E) # 11. Li7 + a -> X + A3
+ if reaction_id == 12:
+ return self._Li7a_tHe4(E) # 12. Li7 + a -> t + He4
+ if reaction_id == 13:
+ return self._Li7a_nLi6(E) # 13. Li7 + a -> n + Li6
+ if reaction_id == 14:
+ return self._Li7a_nnpHe4(E) # 14. Li7 + a -> 2n + p + He4
+ if reaction_id == 15:
+ return self._Be7a_He3He4(E) # 15. Be7 + a -> He3 + He4
+ if reaction_id == 16:
+ return self._Be7a_pLi6(E) # 16. Be7 + a -> p + Li6
+ if reaction_id == 17:
+ return self._Be7a_ppnHe4(E) # 17. Be7 + a -> 2p + n + He4
+
+
+ # If no match is found, exit with error
print_error(
- "Reaction with reaction_id" + str(reaction_id) + "does not exist.",
+ "Reaction with reaction_id " + str(reaction_id) + " does not exist.",
"acropolis.nucl.NuclearReactor.get_cross_section"
)
@@ -397,13 +409,13 @@ def _pdi_rates(self, T):
pdi_rates = {rid:approx_zero for rid in _lrid}
# Calculate the spectra for the given temperature
- if not universal:
+ if not flags.universal:
xsp, ysp = self._sGen.get_spectrum(
- self._sE0, self._sS0, self._sSc, T
+ self._sE0, self._sS0, self._sSC, T
)
else:
xsp, ysp = self._sGen.get_universal_spectrum(
- self._sE0, self._sS0, self._sSc, T, offset=5e-2
+ self._sE0, self._sS0, self._sSC, T, offset=5e-2
)
# For performance reasons, also
# cut the energy at threshold
@@ -415,7 +427,9 @@ def _pdi_rates(self, T):
Fph = LogInterp(xsp, ysp) # Interpolation on: Emin -> E0
# Calculate the kernel for the integration in log-space
def Fph_s(log_E, rid):
- E = exp( log_E ); return Fph( E ) * E * self.get_cross_section(rid, E)
+ E = exp( log_E )
+
+ return Fph( E ) * E * self.get_cross_section(rid, E)
# Define the total rate of interactions altering the photon spectrum,
# evaluated at the relevant injection energy E0
@@ -596,8 +610,8 @@ def get_matp(self, T):
Ik_dcy = lambda y: self._dcy_kernel_ij( nr, nc, exp(y) ) * exp(y)
# Perform the integration (in log-log space)
- mpdi[nr, nc] = quad(Ik_pdi, log(self._sTmax), log(T), epsrel=eps, epsabs=0)[0]
- mdcy[nr, nc] = quad(Ik_dcy, log(self._sTmax), log(T), epsrel=eps, epsabs=0)[0]
+ mpdi[nr, nc] = quad(Ik_pdi, log(self._sTmax), log(T), epsrel=eps, epsabs=0, limit=100)[0]
+ mdcy[nr, nc] = quad(Ik_dcy, log(self._sTmax), log(T), epsrel=eps, epsabs=0, limit=100)[0]
end_time = time()
print_info(
diff --git a/acropolis/obs.py b/acropolis/obs.py
index 2e99dc6..4a55b74 100644
--- a/acropolis/obs.py
+++ b/acropolis/obs.py
@@ -5,9 +5,23 @@ def __init__(self, mean, err):
self.err = err
+# 2020 ####################################################
pdg2020 = {
"Yp" : AbundanceObservation( 2.45e-1, 0.03e-1),
"DH" : AbundanceObservation(2.547e-5, 0.035e-5),
"HeD": AbundanceObservation( 8.3e-1, 1.5e-1),
"LiH": AbundanceObservation( 1.6e-10, 0.3e-10)
}
+
+# 2021 ####################################################
+pdg2021 = {
+ "Yp" : AbundanceObservation( 2.45e-1, 0.03e-1),
+ "DH" : AbundanceObservation(2.547e-5, 0.025e-5),
+ "HeD": AbundanceObservation( 8.3e-1, 1.5e-1),
+ "LiH": AbundanceObservation( 1.6e-10, 0.3e-10)
+}
+# Smaller error on DH compared to 2020
+
+# 2022 ####################################################
+pdg2022 = pdg2021
+# No change compared to 2021
\ No newline at end of file
diff --git a/acropolis/params.py b/acropolis/params.py
index 85b6291..b2e9513 100644
--- a/acropolis/params.py
+++ b/acropolis/params.py
@@ -4,44 +4,6 @@
from scipy.special import zeta
-# ATTENTION !!!!!111elf
-# Only parameters that specify a default value are
-# meant to be changed by the user, i.e. everything
-# under FLAGS and ALGORITHM-SPECIFIC PARAMETERS
-
-
-# FLAGS #############################################################
-
-# If this flag is set to 'True',
-# the pregenerated databases
-# will be used to interpolate
-# the different reaction rates
-# Default: True
-usedb = True
-
-# If this flag is set to 'True',
-# additional output is printed
-# to the screen
-# Default: True
-verbose = True
-
-# If this flag is set to 'True',
-# additional debug information
-# is printed to the screen
-# Default: False
-debug = False
-
-# If this flag is set to 'True',
-# the universal spectrum is used
-# for all points in parameter space
-# ATTENTION:
-# Change with caution and only if
-# you know what you are doing.
-# World destruction possible!
-# Default: False
-universal = False
-
-
# PHYSICAL CONSTANTS ################################################
# The fine-structure constant
@@ -49,16 +11,9 @@
# The electron mass (in MeV)
me = 0.511
-
-# The electron mass squared (in MeV^2)
+# -->
me2 = me**2.
-# The muon mass (in MeV)
-mm = 105.658
-
-# The muon mass squared (in MeV^2)
-mm2 = mm**2.
-
# The classical electron radius (in 1/MeV)
re = alpha/me
@@ -71,9 +26,6 @@
# The speed of light (in cm/s)
c_si = 2.99792458e10
-# The muon lifetime (in s)
-tau_m = 2.1969811e-6
-
# The neutron lifetime (in s)
tau_n = 8.794e2 # pre PDG2020: 8.802e2
@@ -104,17 +56,6 @@
# ALGORITHM-SPECIFIC PARAMETERS #####################################
-# A specifier to descide which particles
-# are considered in the Boltzmann equation
-# for the electromagnetic cascade
-# ATTENTION: Do not use a value that does
-# not include all injected particle types
-# 0: Photons
-# 1: Photons, Electrons/Positrons
-# 2: Photons, Electrons/Positrons, Anti-/Muons (not yet implemented)
-# Default: 1
-FX = 1
-
# The number of nuclei that are
# considered in the Boltzmann equation
# for non-thermal nucleosynthesis
diff --git a/acropolis/plots.py b/acropolis/plots.py
index 203ae5f..c14ca4e 100644
--- a/acropolis/plots.py
+++ b/acropolis/plots.py
@@ -9,7 +9,7 @@
import warnings
# obs
-from acropolis.obs import pdg2020
+from acropolis.obs import pdg2022
# pprint
from acropolis.pprint import print_info
# params
@@ -30,7 +30,7 @@
_plot_number = 0
-# The number of sigmas at which a
+# The number of sigmas beyond which a
# point is considered excluded
_95cl = 1.95996 # 95% C.L.
@@ -38,8 +38,11 @@
# DATA EXTRACTION ###################################################
def _get_abundance(data, i):
+ cols = data.shape[1]
+
+ offset = cols - 3*NY
# Add + 2 for the two parameters in the first two columns
- i0 = i + 2
+ i0 = i + offset
# Extract the different abundances...
mean, high, low = data[:,i0], data[:,i0+NY], data[:,i0+2*NY]
@@ -90,8 +93,9 @@ def _get_deviations(data, obs):
pass
# Take care of potential NaNs
- HeD[ mDH < obs['DH'].err ] = 10
- DH [ np.isnan(DH) ] = -10
+ HeD[ mDH < obs['DH' ].err ] = 10
+ HeD[ mHeD < obs['HeD'].err ] = 10
+ DH [ np.isnan(DH) ] = -10
# Return (without reshaping)
return Yp, DH, HeD, LiH
@@ -133,10 +137,10 @@ def tex_title(**kwargs):
# that need to be printed in scientific
# notation
def _val_to_string(val):
- if type(val) == float:
+ if isinstance(val, float):
power = log10( val )
if power != int(power):
- # TODO
+ # TODO: Implement this special case
pass
return r'10^' + str( int(power) )
@@ -180,7 +184,7 @@ def tex_labels(key_x, key_y):
# FIGURE HANDLING ###################################################
-def _init_figure():
+def init_figure():
fig = plt.figure(figsize=(4.8, 4.4), dpi=150, edgecolor='white')
ax = fig.add_subplot(1, 1, 1)
@@ -237,7 +241,7 @@ def _set_tick_labels(ax, x, y):
ax.set_ylim(ymin_log, ymax_log)
-def save_figure(output_file=None):
+def save_figure(output_file=None, show_fig=False):
global _plot_number
# If no name for the output file is given
@@ -247,6 +251,9 @@ def save_figure(output_file=None):
_plot_number += 1
plt.savefig(output_file)
+ # Show the figure on request
+ if show_fig:
+ plt.show()
print_info(
"Figure has been saved as '{}'".format(output_file),
@@ -254,13 +261,13 @@ def save_figure(output_file=None):
)
-def plot_scan_results(data, output_file=None, title='', labels=('', ''), save_pdf=True, show_fig=False, obs=pdg2020):
+def plot_scan_results(data, output_file=None, contour_file=None, title='', labels=('', ''), fix_helium=False, show_fig=False, obs=pdg2022, xc=0, yc=1):
# If data is a filename, load the data first
- if type(data) == str:
+ if isinstance(data, str):
data = np.loadtxt(data)
# Get the set of input parameters...
- x, y = data[:,0], data[:,1]
+ x, y = data[:,xc], data[:,yc]
# ...and determine the shape of the data
N = len(x)
@@ -281,12 +288,25 @@ def plot_scan_results(data, output_file=None, title='', labels=('', ''), save_pd
HeD = HeD.reshape(shape)
LiH = LiH.reshape(shape)
+ # Fix potential 'holes' in the
+ # exclusion region of HeD
+ if fix_helium:
+ for j, column in enumerate(HeD.T):
+
+ excl = False
+ for i, el in enumerate(column):
+ if el < _95cl and excl:
+ HeD[i, j] = 10
+
+ if el > _95cl and not excl:
+ excl = True
+
# Extract the overall exclusion limit
max = np.maximum( np.abs(DH), np.abs(Yp) )
max = np.maximum( max, HeD )
# Init the figure and...
- fig, ax = _init_figure()
+ fig, ax = init_figure()
# ...set the tick labels
_set_tick_labels(ax, x, y)
@@ -328,7 +348,7 @@ def plot_scan_results(data, output_file=None, title='', labels=('', ''), save_pd
levels=[_95cl], colors='mediumseagreen', linestyles='-'
)
# Overall high/low (line)
- ax.contour(np.log10(x), np.log10(y), max,
+ cs = ax.contour(np.log10(x), np.log10(y), max,
levels=[_95cl], colors='black', linestyles='-'
)
@@ -341,10 +361,36 @@ def plot_scan_results(data, output_file=None, title='', labels=('', ''), save_pd
# Set tight layout
plt.tight_layout()
- if save_pdf == True:
+ if contour_file is not None:
+ extraction_failed = False
+
+ if len(cs.collections) == 1:
+ paths = cs.collections[0].get_paths()
+
+ if len(paths) == 1:
+ xy_cs = paths[0].vertices
+
+ np.savetxt(contour_file, xy_cs)
+
+ print_info(
+ "Overall exclusion line has been saved as '{}'".format(contour_file),
+ "acropolis.plot.plot_scan_results"
+ )
+ else:
+ extraction_failed = True
+ else:
+ extraction_failed = True
+
+ if extraction_failed:
+ print_info(
+ "Could not extract unique contour. No data has been saved.",
+ "acropolis.plot.plot_scan_results"
+ )
+
+ if output_file is not None:
save_figure(output_file)
- if show_fig == True:
+ if show_fig:
plt.show()
# Return figure and axis in case
diff --git a/acropolis/pprint.py b/acropolis/pprint.py
index 81f7433..86a2e21 100644
--- a/acropolis/pprint.py
+++ b/acropolis/pprint.py
@@ -1,17 +1,19 @@
# sys
-from sys import stdout, stderr
+from sys import stdout, stderr, exit
-# params
-from acropolis.params import verbose, debug
+# flags
+import acropolis.flags as flags
# info
from acropolis.info import version, dev_version, url
_max_verbose_level = 1
+_use_color = True
+
def print_version():
- if verbose == True:
+ if flags.verbose:
# Differentiate between stable and dev version
version_str = ""
# Stable version
@@ -21,16 +23,31 @@ def print_version():
else:
version_str = "v{} [dev]".format(dev_version)
- stdout.write( "\x1B[38;5;209mACROPOLIS {} ({})\x1B[0m\n\n".format(version_str, url) )
+ if _use_color:
+ ctxt = "\x1B[38;5;209m"
+ cend = "\x1B[0m"
+ else:
+ ctxt = cend = ""
+
+ stdout.write( f"{ctxt}ACROPOLIS {version_str} ({url}){cend}\n\n" )
def print_Yf(Yf, header=["mean", "high", "low"]):
# If not verbose, simply print one line
# including all abundances
- if not verbose:
+ if not flags.verbose:
print(*Yf.transpose().reshape(1, Yf.size)[0,:])
return
+ # Define the colors
+ if _use_color:
+ chdr = "\x1B[35m"
+ celm = "\x1B[34m"
+ cdcy = "\x1B[36m"
+ cend = "\x1B[0m"
+ else:
+ chdr = celm = cdcy = cend = ""
+
# Fill potentially missing header entries
NYf = Yf.shape[1]
header.extend( [""] * ( NYf - len(header) ) )
@@ -45,41 +62,64 @@ def print_Yf(Yf, header=["mean", "high", "low"]):
# Print the header
header_str = "\n{:^4}"
for i in range(NYf):
- header_str += " | \x1B[35m{:^11}\x1B[0m"
+ header_str += f" | {chdr} {{:8}}{cend}"
print( header_str.format("", *header) )
- print("----------------------------------------------")
+ print("-------------------------------------------------")
# Print the different abundances
- for j, l in enumerate(labels):
- line = "\x1B[34m{:>4}\x1B[0m"
+ for j, label in enumerate(labels):
+ line = f"{celm}{{:>4}}{cend}"
for i in range(NYf):
- line += " | {:11.5e}"
+ line += " | {:11.6e}"
- if l in ['n', 'H3', 'Be7']:
- line += " [\x1B[36m{:7}\x1B[0m]"
+ if label in ['n', 'H3', 'Be7']:
+ line += f" [{cdcy}{{:7}}{cend}]"
- print( line.format(l, *Yf[j], 'decayed') )
+ print( line.format(label, *Yf[j], 'decayed') )
-def print_error(error, loc="", eol="\n"):
- locf = ""
- if debug == True and loc != "":
- locf = " \x1B[1;35m(" + loc + ")\x1B[0m"
+def print_error(error, loc="", eol="\n", flush=False):
+ # Define the colors
+ if _use_color:
+ cloc = "\x1B[1;35m"
+ ctyp = "\x1B[1;31m"
+ cend = "\x1B[0m"
+ else:
+ cloc = ctyp = cend = ""
- stderr.write("\x1B[1;31mERROR \x1B[0m: " + error + locf + eol)
+ locf = ""
+ if flags.debug and loc != "":
+ locf = f" {cloc}({loc}){cend}"
+
+ stderr.write(f"{ctyp}ERROR {cend}: {error}{locf}{eol}")
+
+ if flush:
+ stderr.flush()
+
exit(1)
-def print_warning(warning, loc="", eol="\n"):
+def print_warning(warning, loc="", eol="\n", flush=False):
+ # Define the colors
+ if _use_color:
+ cloc = "\x1B[1;35m"
+ ctyp = "\x1B[1;33m"
+ cend = "\x1B[0m"
+ else:
+ cloc = ctyp = cend = ""
+
locf = ""
- if debug == True and loc != "":
- locf = " \x1B[1;35m(" + loc + ")\x1B[0m"
+ if flags.debug and loc != "":
+ locf = f" {cloc}({loc}){cend}"
- stdout.write("\x1B[1;33mWARNING\x1B[0m: " + warning + locf + eol)
+ stdout.write(f"{ctyp}WARNING{cend}: {warning}{locf}{eol}")
+
+ if flush:
+ stdout.flush()
-def print_info(info, loc="", eol="\n", verbose_level=None):
+def print_info(info, loc="", eol="\n", flush=False, verbose_level=None):
global _max_verbose_level
if verbose_level is None:
@@ -87,12 +127,23 @@ def print_info(info, loc="", eol="\n", verbose_level=None):
_max_verbose_level = max( _max_verbose_level, verbose_level )
+ # Define the colors
+ if _use_color:
+ cloc = "\x1B[1;35m"
+ ctyp = "\x1B[1;32m"
+ cend = "\x1B[0m"
+ else:
+ cloc = ctyp = cend = ""
+
locf = ""
- if debug == True and loc != "":
- locf = " \x1B[1;35m(" + loc + ")\x1B[0m"
+ if flags.debug and loc != "":
+ locf = f" {cloc}({loc}){cend}"
- if verbose and verbose_level >= _max_verbose_level:
- stdout.write("\x1B[1;32mINFO \x1B[0m: " + info + locf + eol)
+ if flags.verbose and verbose_level >= _max_verbose_level:
+ stdout.write(f"{ctyp}INFO {cend}: {info}{locf}{eol}")
+
+ if flush:
+ stdout.flush()
def set_max_verbose_level(max_verbose_level=None):
@@ -102,3 +153,9 @@ def set_max_verbose_level(max_verbose_level=None):
max_verbose_level = 1
_max_verbose_level = max_verbose_level
+
+
+def disable_color():
+ global _use_color
+
+ _use_color = False
\ No newline at end of file
diff --git a/acropolis/ruff.toml b/acropolis/ruff.toml
new file mode 100644
index 0000000..09f241a
--- /dev/null
+++ b/acropolis/ruff.toml
@@ -0,0 +1,14 @@
+exclude = [
+ "~*",
+ "future",
+ "v1.3",
+ "prj/_backup",
+
+]
+
+indent-width = 4
+
+lint.ignore = [
+ "E731",
+ "E741"
+]
\ No newline at end of file
diff --git a/acropolis/scans.py b/acropolis/scans.py
index ce91f48..1a3d259 100644
--- a/acropolis/scans.py
+++ b/acropolis/scans.py
@@ -1,27 +1,18 @@
+# functools
+from functools import partial
# numpy
import numpy as np
# time
from time import time, sleep
-# itertools
-from itertools import product
# multiprocessing
from multiprocessing import Pool, cpu_count
# pprint
from acropolis.pprint import print_info, print_error
-# params
-from acropolis.params import NY
# models
from acropolis.models import AbstractModel
-class _Batch(object):
-
- def __init__(self, length, is_fast):
- self.length = length
- self.is_fast = is_fast
-
-
class ScanParameter(object):
def __init__(self, ivalue, fvalue, num, spacing="log", fast=False):
@@ -46,200 +37,184 @@ def is_fast(self):
class BufferedScanner(object):
def __init__(self, model, **kwargs):
+ self._sModelClass = model.func if type(model) is partial else model
+
# Store the requested model
# self._sModel(...) afterwards creates
# a new instance of the requested model
- if not issubclass(model, AbstractModel):
+ if not issubclass(self._sModelClass, AbstractModel):
print_error(
- model.__name__ + " is not a subclass of AbstractModel",
+ self._sModelClass.__name__ + " is not a subclass of AbstractModel",
"acropolis.scans.BufferedScanner.__init__"
)
self._sModel = model
-
+
#######################################################################
- # Initialize the various sets
- self._sFixed = {} # Fixed parameter
- self._sScanp = {} # Scan parameters...
- self._sFastf = {} # ...w/o fast scanning
+ # Initialize the sets that store the verious parameters
+ self._sFixed = {} # Fixed parameters
+ self._sScanp = {} # Scan parameters
- # Initialize the number of scan parameters...
+ # Initialize the number of scan parameters
self._sNP = 0 # (all)
self._sNP_fast = 0 # (only fast)
+ # Initialize the key of the 'fast' parameter
+ self._sFast_id = None
+
# Parse the keyword arguments and build up the
# sets 'self._sFixed' and 'self._sScanp'
self._parse_arguments(**kwargs)
#######################################################################
- # Generate the keys for the scan parameters
+ # Extract the keys of the scan parameters
self._sScanp_id = list( self._sScanp.keys() )
- # Determine the parameter for the parallelisation
- # In case there is a 'fast' parameter, this whould be
- # one of the 'non-fast' parameters
- #
- # Sort the keys in order for the fast parameters
- # to be at he beginning of the array
- list.sort( self._sScanp_id, key=lambda id: self._sFastf[id], reverse=True )
- # Choose the last parameter, which in any case is not the
- # 'fast' parameter and therefore can be calculated in parallel
- self._sId_pp = self._sScanp_id[-1]
-
- #######################################################################
-
- # Extract the dimension of parallel/sequential jobs
- self._sDp, self._sDs = 0, 0
- for id in self._sScanp_id:
- if id == self._sId_pp:
- self._sDp += len( self._sScanp[id] )
- else:
- self._sDs += len( self._sScanp[id] )
-
def _parse_arguments(self, **kwargs):
# Loop over the different parameters
for key in kwargs.keys():
param = kwargs[key]
- # Extract the fixed values
- if type(param) in [int, float]:
- self._sFixed[key] = float(param)
# Extract the scan parameters
- elif isinstance(param, ScanParameter):
+ if isinstance(param, ScanParameter):
self._sNP += 1
- # Save the relevant range of all paremeters
+ # Save the relevant parameter range
self._sScanp[key] = param.get_range()
- # Save the 'is_fast' status of all parameters
- self._sFastf[key] = param.is_fast()
- else:
- print_error(
- "All parameters must either be 'int', 'float' or an instance of 'ScanParameter'",
- "acropolis.scans.BufferedScanner._parse_arguments"
- )
- # Get the number of 'fast' parameters (Np_fast <= Np - 1)
- self._sNP_fast = list( self._sFastf.values() ).count(True)
+ # Check if the parameter is 'fast'
+ if param.is_fast():
+ self._sNP_fast += 1
+
+ # Save the corresponding id
+ self._sFast_id = key
+ # Extract the fixed parameters
+ else:
+ self._sFixed[key] = param
- # ERRORS for not-yet-implemented features (TODO) ################################
+ # Exit in case of unsupported scenarios
if self._sNP_fast > 1 or self._sNP != 2:
print_error(
- "Currently only exactly 2 scan parameters with <= 1 fast parameter are supported!",
+ "The class BufferedScanner only supports 2d parameter scans with <= 1 fast parameters!",
"acropolis.scans.BufferedScanner._parse_arguments"
)
- # TODO!!!
- def _build_batches(self):
- # Generate all possible parameter combinations, thereby
- # NOT! including the parameter used for the parallelisation
- scanp_ls = product( *[self._sScanp[id] for id in self._sScanp_id[:-1]] )
- # Right now: One sequential parameter, which is either fast or not
- scanp_bt = [ _Batch(self._sDs, self._sNP_fast != 0), ]
+ def _rescale_matp_buffer(self, buffer, factor):
+ return (factor*buffer[0], buffer[1])
- return scanp_ls, scanp_bt
+ def _run_single(self, scanp_set):
+ assert self._sFast_id is None and self._sNP_fast == 0 and self._sNP == 2
+
+ # Define the set that contains all parameters
+ fullp_set = scanp_set.copy()
+ fullp_set.update( self._sFixed )
- def rescale_matp_buffer(self, buffer, factor):
- return (factor*buffer[0], buffer[1])
+ ##############################################################
+ Yb = self._sModel(**fullp_set).run_disintegration()
+ ##############################################################
+ # Sort the scan parameters to guarantee a consistent output
+ sorted_scanp = [scanp_set[key] for key in sorted(scanp_set)]
- def _perform_non_parallel_scan(self, pp):
- # Build the relevant batches
- scanp_ls, scanp_bt = self._build_batches()
+ return [*sorted_scanp, *Yb.transpose().reshape(Yb.size)]
- # Determine the dimensions of the 'result grid'
- dx = self._sDs # rows
- dy = self._sNP + 3*NY # columns
- results = np.zeros( ( dx, dy ) )
+
+ def _run_batch(self, pp_set):
+ assert self._sFast_id is not None and self._sNP_fast == 1 and self._sNP == 2
+
+ # Extract the range of 'fast' parameters to loop over
+ fast_params = self._sScanp[self._sFast_id]
+
+ # Initialize the results array
+ results = []
# Initialize the buffer
matpb = None
- nb, ib = 0, 0
- # Loop over the non-parallel parameter(s)
- for i, scanp in enumerate(scanp_ls):
- # Store the current batch
- batch = scanp_bt[nb]
+ # Initialize fast_param_0
+ fast_param_0 = fast_params[0]
- # Check if a reset is required
- reset_required = (ib == 0)
-
- # Define the set that contains only scan parameters
- scanp_set = dict( zip(self._sScanp_id, scanp) )
- scanp_set.update( {self._sId_pp: pp} )
+ for fast_param in fast_params:
# Define the set that contains all parameters
+ scanp_set = pp_set.copy()
+ scanp_set.update( {self._sFast_id: fast_param} )
+ # -->
fullp_set = scanp_set.copy()
fullp_set.update( self._sFixed )
# Initialize the model of choice
model = self._sModel(**fullp_set)
- scanp_set_id_0 = scanp_set[self._sScanp_id[0]]
- # Rescale the rates with the 'fast' parameter
- # but only if the current parameter is 'fast'
- if batch.is_fast and (not reset_required):
- if matpb is not None:
+ if matpb is not None:
# matpb might still be None if E0 < Emin
- # save, since parameters determining the
- # injection energy, should never be fast
- factor = scanp_set_id_0/fastp
- model.set_matp_buffer( self.rescale_matp_buffer(matpb, factor) )
+
+ factor = fast_param/fast_param_0
+ model.set_matp_buffer( self._rescale_matp_buffer(matpb, factor) )
##############################################################
Yb = model.run_disintegration()
##############################################################
- # Reset the buffer/rescaling
- if batch.is_fast and reset_required:
+ # Set up the rescaling
+ if matpb is None:
matpb = model.get_matp_buffer()
- fastp = scanp_set_id_0
-
- # For the output, use the following format
- # 1. The 'non fast' parameters
- # 3. The 'fast' parameters
- sortp_ls = list( zip( scanp_set.keys(), scanp_set.values() ) )
- list.sort(sortp_ls, key=lambda el: self._sFastf[ el[0] ]) # False...True
- sortp_ls = [ el[1] for el in sortp_ls ]
-
- results[i] = [*sortp_ls, *Yb.transpose().reshape(Yb.size)]
-
- # Update the batch index
- if ib == batch.length - 1: # next batch
- ib = 0
- nb += 1
- else:
- ib += 1
-
+ fast_param_0 = fast_param
+
+ # Sort the scan parameters to guarantee a consistent output
+ sorted_scanp = [scanp_set[key] for key in sorted(scanp_set)]
+ # -->
+ results.append(
+ [*sorted_scanp, *Yb.transpose().reshape(Yb.size)]
+ )
+
return results
def perform_scan(self, cores=1):
+ assert self._sNP_fast <= 1 and self._sNP == 2
+
num_cpus = cpu_count() if cores == -1 else cores
start_time = time()
print_info(
- "Running scan for {} on {} cores.".format(self._sModel.__name__, num_cpus),
+ "Running scan for {} on {} cores.".format(self._sModelClass.__name__, num_cpus),
"acropolis.scans.BufferedScanner.perform_scan",
verbose_level=3
)
+ key1, key2 = self._sScanp_id
+ # Generate all possible parameter combinations
+ if self._sNP_fast == 0:
+ map_params = [
+ {key1: val1, key2: val2} for val1 in self._sScanp[key1] \
+ for val2 in self._sScanp[key2]
+ ]
+
+ map_func = self._run_single
+ else: # self._sNP_fast == 1
+ # Extract the non-fast, i.e. parallel, parameter
+ keyp = key1 if key1 != self._sFast_id else key2
+
+ map_params = [
+ {keyp: valp} for valp in self._sScanp[keyp]
+ ]
+
+ map_func = self._run_batch
+
with Pool(processes=num_cpus) as pool:
- # Loop over all possible combinations, by...
- # ...1. looping over the 'parallel' parameter (map)
- # ...2. looping over all parameter combinations,
- # thereby exclusing the 'parallel' parameter (perform_non_parallel_scan)
- async_results = pool.map_async(
- self._perform_non_parallel_scan, self._sScanp[self._sId_pp], 1
- )
+ # Loop over all possible parameter combinations
+ async_results = pool.map_async(map_func, map_params, 1)
progress = 0
+ # Track and print the progress
while ( progress < 100 ) or ( not async_results.ready() ):
- progress = 100*( self._sDp - async_results._number_left )/self._sDp
+ progress = 100*( len(map_params) - async_results._number_left )/len(map_params)
print_info(
"Progress: {:.1f}%".format(progress),
"acropolis.scans.BufferedScanner.perform_scan",
@@ -251,10 +226,6 @@ def perform_scan(self, cores=1):
parallel_results = async_results.get()
pool.terminate()
- parallel_results = np.array(parallel_results)
- old_shape = parallel_results.shape
- parallel_results.shape = (old_shape[0]*old_shape[1], len( self._sScanp_id ) + 3*NY) # + 1)
-
end_time = time()
print_info(
"Finished after {:.1f}min.".format( (end_time - start_time)/60 ),
@@ -262,4 +233,13 @@ def perform_scan(self, cores=1):
verbose_level=3
)
- return parallel_results
+ parallel_results = np.array(parallel_results)
+
+ # Reshape the array and return
+ old_shape = parallel_results.shape
+ # -->
+ new_shape = (
+ np.prod(old_shape[:-1]), old_shape[-1]
+ )
+
+ return parallel_results.reshape(new_shape)
diff --git a/acropolis/tmp/eloss.py b/acropolis/tmp/eloss.py
deleted file mode 100644
index e9c4958..0000000
--- a/acropolis/tmp/eloss.py
+++ /dev/null
@@ -1,174 +0,0 @@
-# math
-from math import log, exp, sqrt
-# numpy
-import numpy as np
-# numba
-import numba as nb
-# scipy
-from scipy.integrate import quad
-
-# params
-from acropolis.params import pi, pi2, zeta3
-from acropolis.params import alpha, me, me2
-from acropolis.params import eps, Ephb_T_max
-
-
-@nb.jit(cache=True)
-def _JIT_phi(x):
- a = [
- 0.8048,
- 0.1459,
- 1.1370e-3,
- -3.8790e-6
- ]
- b = [
- -86.07,
- 50.96,
- -14.45,
- 8./3.,
- ]
- c = [
- 2.910,
- 78.35,
- 1.837e3,
- ]
-
- if x <= 25:
- asum = 0
- for i in range(4): asum += a[i]*( (x-2.)**(i+1) )
-
- return (pi/12.)*(x-2.)**4./( 1. + asum )
-
- bsum, csum = 0, 0
- for j in range(4): bsum += b[j]*(log(x)**j)
- for k in range(3): csum += c[k]/( x**(k+1) )
-
- return x*bsum/( 1. - csum )
-
-
-@nb.jit(cache=True)
-def _JIT_eloss_bethe_heitler(logx, E, T, m):
- x = np.exp(logx) # kappa
-
- # Calculate gamma
- ga = E/m
- # Calculate nu (https://journals.aps.org/prd/pdf/10.1103/PhysRevD.1.1596)
- nu = me/(2*ga*T)
-
- # log
- return x * _JIT_phi(x)/( np.exp(nu*x) - 1. )
-
-
-class InteractingParticle(object):
-
- def __init__(self, m, q=1, a=0):
- # The mass of the particle
- self._sM = m # in MeV
- # The charge of the particle
- self._sQ = q # in units of e
- # The anamolous mangentic moment
- self._sA = a
-
-
- # TODO: Interface correctly with ACROPOLIS
- def _ne(self, T):
- # The number density of photons
- na = 2.*zeta3*(T**3.)/pi2
-
- if T >= me:
- return 1.5*na
-
- if me > T >= me/26.:
- return 4.*exp(-me/T)*( me*T/(2.*pi) )**1.5
-
- # The baryon-to-photon ratio
- eta = 6.137e-10
- # The abundance of helium-4
- Y = 0.2474
-
- return (1. - Y/2.)*eta*na
-
-
- # CHARGED PARTICLES #######################################################
-
- # TODO
- def _dEdt_coulomb(self, E, T):
- # The plasma frequency
- wp2 = 4.*pi*self._ne(T)*alpha/me
- wp = sqrt(wp2)
-
- # The gamma factor of the charged particle
- ga = E/self._sM
-
- # The velocity of the charged particle
- v = sqrt(1. - 1./ga**2.) if ga > 1 else 0
-
- if v < sqrt( 2*T/me ):
- # TODO
- return 0.
-
- Z = self._sQ
- # The 'b-factor'
- b = max( 1, Z*alpha/v )/( ga*me*v )
-
- return -(Z**2.)*alpha*wp2*( log( 0.76*v/(wp*b) ) + v**2./2. )/v
-
-
- def _dEdt_thompson(self, E, T):
- # The gamma factor of the charged particle
- ga = E/self._sM
-
- Z = self._sQ
- # This holds true also for non-relativistic
- # particles, in which case gamma^2-1 = 0
- return -32.*(pi**3.)*(alpha**2.)*(ga**2. - 1)*(T**4.)*(Z**4.)/(135.*self._sM**2.)
-
-
- def _dEdt_bethe_heitler(self, E, T):
- # The gamma factor of the charged particle
- ga = E/self._sM
-
- # The velocity of the charged particle
- v = sqrt(1. - 1./ga**2.) if ga > 1 else 0
-
- Z = self._sQ
- # Define the prefactor
- pref = (alpha**3.)*(Z**2.)*me2*v/( 4.*(ga**2.)*pi2 )
-
- # Calculate the appropriate integration limits
- Emax = Ephb_T_max*T
- xmax = 2*ga*Emax/me
- # -->
- xmin_log, xmax_log = log(2), log(xmax)
-
- # Perform the integration
- I = quad(_JIT_eloss_bethe_heitler, xmin_log, xmax_log,
- epsabs=0, epsrel=eps, args=(E, T, self._sM))
-
- return -pref*I[0]
-
-
- def _dEdt_charged(self, E, T):
- return self._dEdt_thompson(E, T) + self._dEdt_bethe_heitler(E, T) + self._dEdt_coulomb(E, T)
-
-
- # NEUTRAL PARTICLES #######################################################
-
- def _dEdt_magnetic_moment(self, E, T):
- return 0.
-
-
- def _dEdt_neutral(self, E, T):
- return self._dEdt_magnetic_moment(E, T)
-
-
- # COMBINED ################################################################
-
- def dEdt(self, E, T):
- if E <= self._sM:
- return 0.
-
- if self._sQ == 0:
- return self._dEdt_neutral(E, T)
-
- return self._dEdt_charged(E, T)
diff --git a/acropolis/utils.py b/acropolis/utils.py
index d9a5533..eed11b9 100644
--- a/acropolis/utils.py
+++ b/acropolis/utils.py
@@ -2,6 +2,8 @@
from math import log, pow
# numpy
import numpy as np
+# scipy
+from scipy.integrate import cumulative_simpson
class LogInterp(object):
@@ -17,10 +19,16 @@ def __init__(self, x_grid, y_grid, base=np.e, fill_value=None):
self._sXminLog = self._sXLog[ 0]
self._sXmaxLog = self._sXLog[-1]
- if self._sXmaxLog <= self._sXminLog:
+
+ xdiff = np.diff(self._sXLog)
+ if not np.all( xdiff >= 0 ):
raise ValueError(
"The values in x_grid need to be in ascending order."
)
+ if not np.allclose( xdiff, xdiff[0] ):
+ raise ValueError(
+ "The values in x_grid need to be equidistant in log space."
+ )
self._sN = len(self._sXLog)
@@ -41,7 +49,8 @@ def _perform_interp(self, x):
ix = int( ( x_log - self._sXminLog )*( self._sN - 1 )/( self._sXmaxLog - self._sXminLog ) )
# Handle the case for which ix+1 is out-of-bounds
- if ix == self._sN - 1: ix -= 1
+ if ix == self._sN - 1:
+ ix -= 1
x1_log, x2_log = self._sXLog[ix], self._sXLog[ix+1]
y1_log, y2_log = self._sYLog[ix], self._sYLog[ix+1]
@@ -61,21 +70,4 @@ def __call__(self, x):
# Cummulative numerical Simpson integration
def cumsimp(x_grid, y_grid):
- n = len(x_grid)
-
- delta_z = log( x_grid[-1]/x_grid[0] )/( n-1 )
- g_grid = x_grid*y_grid
-
- i_grid = np.zeros( n )
-
- last_even_int = 0.
- for i in range(1, n//2 + 1):
- ie = 2 * i
- io = 2 * i - 1
-
- i_grid[io] = last_even_int + 0.5 * delta_z * (g_grid[io-1] + g_grid[io])
- if ie < n:
- i_grid[ie] = last_even_int + delta_z * (g_grid[ie-2] + 4.*g_grid[ie-1] + g_grid[ie])/3.
- last_even_int = i_grid[ie]
-
- return i_grid
+ return cumulative_simpson(x_grid*y_grid, x=np.log(x_grid), initial=0.)
diff --git a/annihilation b/annihilation
index 9eb9a75..4d96707 100755
--- a/annihilation
+++ b/annihilation
@@ -15,7 +15,7 @@ print_version()
# Extact the number of command line arguments...
N = len(sys.argv)
-# ...and check if there are exactly six
+# ...and check if there are six or seven
if N not in [7, 8]:
print_error("Would you kindly specify the following command-line arguments:\n"
+ " 1. The mass of the dark-matter particle [in MeV]\n"
diff --git a/build_pypi b/build_pypi
index c95a211..31207d3 100755
--- a/build_pypi
+++ b/build_pypi
@@ -1,6 +1,10 @@
#! /usr/bin/env bash
-python3 setup.py sdist bdist_wheel
+echo "Select what package formats to build:"
+# Ask the user to specify the build type
+build_types=$(gum choose --no-limit --selected=sdist,bdist_wheel 'sdist' 'bdist_wheel' 'bdist_egg')
+
+python3 setup.py $build_types
rm -rf build/ ACROPOLIS.egg-info/
twine check dist/*
diff --git a/examples/scan_pwave_ee b/examples/scan_pwave_ee
index 7a2df27..a402c08 100755
--- a/examples/scan_pwave_ee
+++ b/examples/scan_pwave_ee
@@ -25,7 +25,7 @@ N = 200
scan_result = BufferedScanner( AnnihilationModel,
mchi = ScanParameter( 0, 3, N),
a = 0.,
- b = ScanParameter(-21, -10, N, fast=True),
+ b = ScanParameter(-23, -10, N, fast=True),
tempkd = tempkd,
bree = 1.,
braa = 0.
diff --git a/examples/scan_swave_ee b/examples/scan_swave_ee
index 59c2625..f173fee 100755
--- a/examples/scan_swave_ee
+++ b/examples/scan_swave_ee
@@ -23,7 +23,7 @@ scan_result = BufferedScanner( AnnihilationModel,
mchi = ScanParameter( 0, 3, N),
a = ScanParameter(-27, -16, N, fast=True),
b = 0.,
- tempkd = 0.,
+ tempkd = 0., # irrelevant for s-wave annihilations
bree = 1.,
braa = 0.
).perform_scan(cores=-1)
diff --git a/hepforge/index.html b/hepforge/index.html
index c8f45f9..bde49dc 100755
--- a/hepforge/index.html
+++ b/hepforge/index.html
@@ -30,6 +30,33 @@
+
+
v1.3.0 « Click here to download
+ (September 17, 2024)
+
+
+ - Implemented the model acropolis.ext.models.ResonanceModel, which can be used to calculate PDI constraints for models with resonantly-enhanced DM annihilations
+ - Updated the initial abundances, which have now be calculated with PArthENoPE v3.0 and hence include the updated deuterium reaction rates
+ - Added PDG2021 and PDG2022 values to acropolis.obs
+ - Implemented the new package acropolis.jit to fixed warnings caused by new versions of numba
+ - Removed the requirement for the data in cosmo_file.dat to be equidistant in log space
+ - Improved the progress indicator when running parameter scans without a fast parameter
+ - Unified the plotting script in plots/plot_scan_results.py by using the methods defined in acropolis.plots
+ - Added additional plotting functionality in acropolis.plots (extracting contours, specifying the x and y data for the plot, ...)
+
+
+
+
+
v1.2.3 « Click here to download
+ (September 7, 2023)
+
+
+ - Fixed warnings caused by new versions of numba by using the new package acropolis.jit
+ - Unified the plotting script in plots/plot_scan_results.py by using the methods defined in acropolis.plots
+ - Added PDG2021 and PDG2022 values to acropolis.obs
+
+
+
v1.2.2 « Click here to download
(April 6, 2022)
diff --git a/manual/manual.pdf b/manual/manual.pdf
index 33472d8..df1c3ee 100644
Binary files a/manual/manual.pdf and b/manual/manual.pdf differ
diff --git a/manual/src.tar.gz b/manual/src.tar.gz
index 4d686f4..562c3b4 100644
Binary files a/manual/src.tar.gz and b/manual/src.tar.gz differ
diff --git a/plots/NE_pd.pdf b/plots/NE_pd.pdf
index 116dcc4..3b3b06e 100644
Binary files a/plots/NE_pd.pdf and b/plots/NE_pd.pdf differ
diff --git a/plots/NT_pd.pdf b/plots/NT_pd.pdf
index 8d57ead..c1cca59 100644
Binary files a/plots/NT_pd.pdf and b/plots/NT_pd.pdf differ
diff --git a/plots/annih_pwave_1e0MeV_ee.pdf b/plots/annih_pwave_1e0MeV_ee.pdf
index 8cb5ee0..77b8655 100644
Binary files a/plots/annih_pwave_1e0MeV_ee.pdf and b/plots/annih_pwave_1e0MeV_ee.pdf differ
diff --git a/plots/annih_swave_ee.pdf b/plots/annih_swave_ee.pdf
index ddd3333..025f37d 100644
Binary files a/plots/annih_swave_ee.pdf and b/plots/annih_swave_ee.pdf differ
diff --git a/plots/decay_1e7s_aa.pdf b/plots/decay_1e7s_aa.pdf
index bc49462..b6f7fe5 100644
Binary files a/plots/decay_1e7s_aa.pdf and b/plots/decay_1e7s_aa.pdf differ
diff --git a/plots/decay_50MeV_aa.pdf b/plots/decay_50MeV_aa.pdf
index 6d8bef4..c2c7c69 100644
Binary files a/plots/decay_50MeV_aa.pdf and b/plots/decay_50MeV_aa.pdf differ
diff --git a/plots/plot_NE_pd.py b/plots/plot_NE_pd.py
deleted file mode 100644
index 85f0fac..0000000
--- a/plots/plot_NE_pd.py
+++ /dev/null
@@ -1,70 +0,0 @@
-#! /usr/bin/python3
-
-# sys
-import sys; sys.path.append('..')
-# numpy
-import numpy as np
-# matplotlib
-import matplotlib.pyplot as plt
-from matplotlib.ticker import FixedLocator, NullLocator, FixedFormatter
-
-# acropolis
-from acropolis.params import NY
-
-# Set the font
-plt.rc('text', usetex=True)
-plt.rc('font', family='serif', size=14)
-plt.rcParams['text.latex.preamble'] = r"\usepackage{amsmath}"
-
-# Sepcify some properties of the figure
-fig = plt.figure(figsize=(0.4*12.0, 0.4*11.0), dpi=150, edgecolor="white")
-ax = fig.add_subplot(1,1,1)
-ax.tick_params(axis='both', which='both', labelsize=11, direction="in", width=0.5)
-ax.xaxis.set_ticks_position('both')
-ax.yaxis.set_ticks_position('both')
-for axis in ['top','bottom','left','right']:
- ax.spines[axis].set_linewidth(0.5)
-
-# Function to exteract the abundances for the data file
-def get_abd(data, i):
- i0 = i + 1
-
- # Get the length of the data
- N = len( data )
-
- # Extract the different abundances...
- mean, high, low = data[:,i0], data[:,i0+NY], data[:,i0+2*NY]
-
- return mean, high, low
-
-data = np.loadtxt("data/NE_pd.dat")
-
-NE = data[:,0]
-N = len(NE)
-
-mD, hD, lD = get_abd(data, 2)
-
-plt.plot([150]*2, [1, 2], color="black", linestyle="--")
-
-for i in range(N):
- plt.plot(NE[i], mD[i]*1e5, '*', color="dodgerblue")
- plt.plot(NE[i], hD[i]*1e5, '*', color="crimson")
- plt.plot(NE[i], lD[i]*1e5, '*', color="mediumseagreen")
-
-plt.text(380, 1.784, "low", color="mediumseagreen")
-plt.text(380, 1.741, "mean", color="dodgerblue")
-plt.text(380, 1.699, "high", color="crimson")
-
-plt.text(360, 1.83, r"$\texttt{NT\_pd=50}$")
-
-
-plt.xlabel(r"$\texttt{NE\_pd}$")
-plt.xlim(0, 500)
-
-plt.ylabel(r"$Y_\text{D}\;[\times 10^5]$")
-plt.ylim(1.65, 1.85)
-
-plt.title(r"$m_\phi = 50\,\mathrm{MeV},\;\tau_\phi = 10^5\,\mathrm{s},\;(n_\phi/n_\gamma)|_{T=10\,\mathrm{MeV}} = 10^{-8},\;\text{BR}_{\gamma\gamma}=1$", fontsize=10)
-plt.tight_layout()
-plt.savefig("NE_pd.pdf")
-plt.show()
diff --git a/plots/plot_NT_pd.py b/plots/plot_NT_pd.py
deleted file mode 100644
index dd4cbe3..0000000
--- a/plots/plot_NT_pd.py
+++ /dev/null
@@ -1,70 +0,0 @@
-#! /usr/bin/python3
-
-# sys
-import sys; sys.path.append('..')
-# numpy
-import numpy as np
-# matplotlib
-import matplotlib.pyplot as plt
-from matplotlib.ticker import FixedLocator, NullLocator, FixedFormatter
-
-# acropolis
-from acropolis.params import NY
-
-# Set the font
-plt.rc('text', usetex=True)
-plt.rc('font', family='serif', size=14)
-plt.rcParams['text.latex.preamble'] = r"\usepackage{amsmath}"
-
-# Sepcify some properties of the figure
-fig = plt.figure(figsize=(0.4*12.0, 0.4*11.0), dpi=150, edgecolor="white")
-ax = fig.add_subplot(1,1,1)
-ax.tick_params(axis='both', which='both', labelsize=11, direction="in", width=0.5)
-ax.xaxis.set_ticks_position('both')
-ax.yaxis.set_ticks_position('both')
-for axis in ['top','bottom','left','right']:
- ax.spines[axis].set_linewidth(0.5)
-
-# Function to exteract the abundances for the data file
-def get_abd(data, i):
- i0 = i + 1
-
- # Get the length of the data
- N = len( data )
-
- # Extract the different abundances...
- mean, high, low = data[:,i0], data[:,i0+NY], data[:,i0+2*NY]
-
- return mean, high, low
-
-data = np.loadtxt("data/NT_pd.dat")
-
-NT = data[:,0]
-N = len(NT)
-
-mD, hD, lD = get_abd(data, 2)
-
-plt.plot([50]*2, [1, 2], color="black", linestyle="--")
-
-for i in range(N):
- plt.plot(NT[i], mD[i]*1e5, '*', color="dodgerblue")
- plt.plot(NT[i], hD[i]*1e5, '*', color="crimson")
- plt.plot(NT[i], lD[i]*1e5, '*', color="mediumseagreen")
-
-plt.text(160, 1.794, "low", color="mediumseagreen")
-plt.text(160, 1.751, "mean", color="dodgerblue")
-plt.text(160, 1.708, "high", color="crimson")
-
-plt.text(135, 1.83, r"$\texttt{NE\_pd=150}$")
-
-
-plt.xlabel(r"$\texttt{NT\_pd}$")
-plt.xlim(0, 200)
-
-plt.ylabel(r"$Y_\text{D}\;[\times 10^5]$")
-plt.ylim(1.65, 1.85)
-
-plt.title(r"$m_\phi = 50\,\mathrm{MeV},\;\tau_\phi = 10^5\,\mathrm{s},\;(n_\phi/n_\gamma)|_{T=10\,\mathrm{MeV}} = 10^{-8},\;\text{BR}_{\gamma\gamma}=1$", fontsize=10)
-plt.tight_layout()
-plt.savefig("NT_pd.pdf")
-plt.show()
diff --git a/plots/plot_annih_pwave_1MeV_ee.py b/plots/plot_annih_pwave_1MeV_ee.py
deleted file mode 100644
index 5c7c854..0000000
--- a/plots/plot_annih_pwave_1MeV_ee.py
+++ /dev/null
@@ -1,131 +0,0 @@
-#! /usr/bin/python3
-
-# sys
-import sys; sys.path.append('..')
-# numpy
-import numpy as np
-# matplotlib
-import matplotlib.pyplot as plt
-from matplotlib.ticker import FixedLocator, NullLocator, FixedFormatter
-
-# acropolis
-from acropolis.params import NY
-
-# Set the font
-plt.rc('text', usetex=True)
-plt.rc('font', family='serif', size=14)
-plt.rcParams['text.latex.preamble'] = r"\usepackage{amsmath}"
-
-# Sepcify some properties of the figure
-fig = plt.figure(figsize=(0.4*12.0, 0.4*11.0), dpi=150, edgecolor="white")
-ax = fig.add_subplot(1,1,1)
-ax.tick_params(axis='both', which='both', labelsize=11, direction="in", width=0.5)
-ax.xaxis.set_ticks_position('both')
-ax.yaxis.set_ticks_position('both')
-for axis in ['top','bottom','left','right']:
- ax.spines[axis].set_linewidth(0.5)
-
-# Deint the ticks for the x-...
-xtMajor = [np.log10(10**j) for j in np.linspace(0, 3, 4)]
-xtMinor = [np.log10(i*10**j) for j in xtMajor for i in range(10)[1:10]]
-xlMajor = [r"$10^{" + str(int(i)) + "}$" if i in xtMajor else "" for i in xtMajor]
-xMajorLocator = FixedLocator(xtMajor)
-xMinorLocator = FixedLocator(xtMinor)
-xMajorFormatter = FixedFormatter(xlMajor)
-# ... and y-axis
-ytMajor = np.linspace(-21, -10, 12)
-ytMinor = [np.log10(i*10**(j)) for i in range(10)[1:] for j in ytMajor]
-ylMajor = [r"$10^{" + str(int(i)) + "}$" if i in ytMajor else "" for i in ytMajor]
-yMajorLocator = FixedLocator(ytMajor)
-yMinorLocator = FixedLocator(ytMinor)
-yMajorFormatter = FixedFormatter(ylMajor)
-
-# Function to exteract the abundances for the data file
-def get_abd(data, i):
- i0 = i + 2
-
- # Get the length of the data
- N = len( data )
-
- # Extract the different abundances...
- mean, high, low = data[:,i0], data[:,i0+NY], data[:,i0+2*NY]
- # ...and calculate an estimate for the error
- diff = np.minimum( np.abs( mean - high ), np.abs( mean - low ) )
-
- return mean, diff
-
-shape=(200, 200)
-
-# Read the data file
-data = np.loadtxt(f"data/annih_pwave_1e0MeV_ee.dat")
-mchi = data[:,0].reshape(shape)
-b = data[:,1].reshape(shape)
-
-# Extract the different abundances
-mn, en = get_abd(data, 0)
-mp, ep = get_abd(data, 1)
-mH, eH = mn + mp, np.sqrt( en**2. + ep**2. )
-
-mD , eD = get_abd(data, 2)
-mT , eT = get_abd(data, 3)
-mHe3, eHe3 = get_abd(data, 4)
-mHe4, eHe4 = get_abd(data, 5)
-m3 , e3 = mT + mHe3, np.sqrt( eT**2. + eHe3**2. )
-mLi7, eLi7 = get_abd(data, 7)
-mBe7, eBe7 = get_abd(data, 8)
-m7, e7 = mLi7 + mBe7, np.sqrt( eLi7**2. + eBe7**2. )
-
-mYp , eYp = 4.*mHe4, 4.*eHe4
-mDH , eDH = mD/mH, (mD/mH)*np.sqrt( (eD/mD)**2. + (eH/mH)**2. )
-mHeD, eHeD = m3/mD, (m3/mD)*np.sqrt( (e3/m3)**2. + (eD/mD)**2. )
-mLiH, eLiH = m7/mH, (m7/mH)*np.sqrt( (e7/m7)**2. + (eH/mH)**2. )
-
-# Calculate the deviations...
-Yp = (mYp - 2.45e-1) / np.sqrt((0.03e-1)**2 + eYp**2)
-DH = (mDH - 2.547e-5) / np.sqrt((0.035e-5)**2 + eDH**2)
-HeD = (mHeD - 8.3e-1) / np.sqrt((1.5e-1)**2 + eHeD**2.)
-LiH = (mLiH - 1.6e-10) / np.sqrt((0.3e-10)**2. + eLiH**2.)
-HeD[mDH<0.035e-5] = 10
-# ...and reshape
-Yp = Yp.reshape(shape)
-DH = DH.reshape(shape)
-HeD = HeD.reshape(shape)
-LiH = LiH.reshape(shape)
-
-# Extract the overall exclusion line
-max = np.maximum( np.abs(DH), np.abs(Yp) )
-max = np.maximum( max, HeD )
-
-sig = 1.95996
-
-plt.contourf(np.log10(mchi), np.log10(b), DH, levels=[-1e10, -sig, sig, 1e10], colors=["0.6","white", "tomato"], alpha=0.2)
-plt.contourf(np.log10(mchi), np.log10(b), Yp, levels=[-1e10, -sig, sig, 1e10], colors=["dodgerblue","white", "lightcoral"], alpha=0.2)
-plt.contourf(np.log10(mchi), np.log10(b), HeD, levels=[sig, 1e10], colors=["mediumseagreen"], alpha=0.2)
-
-# plt.contour(np.log10(mchi), np.log10(b), LiH, levels=[0], colors='#fe46a5', linestyles='--')
-# plt.contour(np.log10(mchi), np.log10(b), LiH, levels=[-sig], colors='#fe46a5', linestyles='-')
-# plt.contour(np.log10(mchi), np.log10(b), LiH, levels=[+sig], colors='#fe46a5', linestyles='-')
-
-plt.contour(np.log10(mchi), np.log10(b), DH, levels=[-sig], colors='0.6', linestyles='-')
-plt.contour(np.log10(mchi), np.log10(b), DH, levels=[sig], colors='tomato', linestyles='-')
-plt.contour(np.log10(mchi), np.log10(b), Yp, levels=[-sig], colors='dodgerblue', linestyles='-')
-plt.contour(np.log10(mchi), np.log10(b), HeD, levels=[sig], colors='mediumseagreen', linestyles='-')
-plt.contour(np.log10(mchi), np.log10(b), max, levels=[sig], colors='black', linewidths=1.5, linestyles='-')
-
-ax.xaxis.set_label_text(r"$m_\chi\;\;[\mathrm{MeV}]$")
-ax.xaxis.set_major_locator(xMajorLocator)
-ax.xaxis.set_minor_locator(xMinorLocator)
-ax.xaxis.set_major_formatter(xMajorFormatter)
-ax.set_xlim(0, 3)
-
-ax.yaxis.set_label_text(r"$b\;\;[\mathrm{cm^3/s}]$")
-ax.yaxis.set_major_locator(yMajorLocator)
-ax.yaxis.set_minor_locator(yMinorLocator)
-ax.yaxis.set_major_formatter(yMajorFormatter)
-ax.set_ylim(-21, -10)
-
-plt.title(r"$a = 0\,\mathrm{cm}^3/\mathrm{s},\;T_\text{kd}=1\,\mathrm{MeV},\;\text{BR}_{\gamma\gamma}=1-\text{BR}_{e^+e^-} = 0$", fontsize=11)
-
-plt.tight_layout()
-plt.savefig(f"annih_pwave_1e0MeV_ee.pdf")
-plt.show()
diff --git a/plots/plot_annih_swave_ee.py b/plots/plot_annih_swave_ee.py
deleted file mode 100644
index 2f6dbde..0000000
--- a/plots/plot_annih_swave_ee.py
+++ /dev/null
@@ -1,132 +0,0 @@
-#! /usr/bin/python3
-
-# sys
-import sys; sys.path.append('..')
-# numpy
-import numpy as np
-# matplotlib
-import matplotlib.pyplot as plt
-from matplotlib.ticker import FixedLocator, NullLocator, FixedFormatter
-
-# acropolis
-from acropolis.params import NY
-
-# Set the font
-plt.rc('text', usetex=True)
-plt.rc('font', family='serif', size=14)
-plt.rcParams['text.latex.preamble'] = r"\usepackage{amsmath}"
-
-# Sepcify some properties of the figure
-fig = plt.figure(figsize=(0.4*12.0, 0.4*11.0), dpi=150, edgecolor="white")
-ax = fig.add_subplot(1,1,1)
-ax.tick_params(axis='both', which='both', labelsize=11, direction="in", width=0.5)
-ax.xaxis.set_ticks_position('both')
-ax.yaxis.set_ticks_position('both')
-for axis in ['top','bottom','left','right']:
- ax.spines[axis].set_linewidth(0.5)
-
-# Deint the ticks for the x-...
-xtMajor = [np.log10(10**j) for j in np.linspace(0, 3, 4)]
-xtMinor = [np.log10(i*10**j) for j in xtMajor for i in range(10)[1:10]]
-xlMajor = [r"$10^{" + str(int(i)) + "}$" if i in xtMajor else "" for i in xtMajor]
-xMajorLocator = FixedLocator(xtMajor)
-xMinorLocator = FixedLocator(xtMinor)
-xMajorFormatter = FixedFormatter(xlMajor)
-# ... and y-axis
-ytMajor = np.linspace(-27, -18, 10)
-ytMinor = [np.log10(i*10**(j)) for i in range(10)[1:] for j in ytMajor]
-ylMajor = [r"$10^{" + str(int(i)) + "}$" if i in ytMajor else "" for i in ytMajor]
-yMajorLocator = FixedLocator(ytMajor)
-yMinorLocator = FixedLocator(ytMinor)
-yMajorFormatter = FixedFormatter(ylMajor)
-
-# Function to exteract the abundances for the data file
-def get_abd(data, i):
- i0 = i + 2
-
- # Get the length of the data
- N = len( data )
-
- # Extract the different abundances...
- mean, high, low = data[:,i0], data[:,i0+NY], data[:,i0+2*NY]
- # ...and calculate an estimate for the error
- diff = np.minimum( np.abs( mean - high ), np.abs( mean - low ) )
-
- return mean, diff
-
-shape=(200, 200)
-
-# Read the data file
-data = np.loadtxt(f"data/annih_swave_ee.dat")
-mchi = data[:,0].reshape(shape)
-a = data[:,1].reshape(shape)
-
-# Extract the different abundances
-mn, en = get_abd(data, 0)
-mp, ep = get_abd(data, 1)
-mH, eH = mn + mp, np.sqrt( en**2. + ep**2. )
-
-mD , eD = get_abd(data, 2)
-mT , eT = get_abd(data, 3)
-mHe3, eHe3 = get_abd(data, 4)
-mHe4, eHe4 = get_abd(data, 5)
-m3 , e3 = mT + mHe3, np.sqrt( eT**2. + eHe3**2. )
-mLi7, eLi7 = get_abd(data, 7)
-mBe7, eBe7 = get_abd(data, 8)
-m7, e7 = mLi7 + mBe7, np.sqrt( eLi7**2. + eBe7**2. )
-
-mYp , eYp = 4.*mHe4, 4.*eHe4
-mDH , eDH = mD/mH, (mD/mH)*np.sqrt( (eD/mD)**2. + (eH/mH)**2. )
-mHeD, eHeD = m3/mD, (m3/mD)*np.sqrt( (e3/m3)**2. + (eD/mD)**2. )
-mLiH, eLiH = m7/mH, (m7/mH)*np.sqrt( (e7/m7)**2. + (eH/mH)**2. )
-
-# Calculate the deviations...
-Yp = (mYp - 2.45e-1) / np.sqrt((0.03e-1)**2 + eYp**2)
-DH = (mDH - 2.547e-5) / np.sqrt((0.035e-5)**2 + eDH**2)
-HeD = (mHeD - 8.3e-1) / np.sqrt((1.5e-1)**2 + eHeD**2.)
-LiH = (mLiH - 1.6e-10) / np.sqrt((0.3e-10)**2. + eLiH**2.)
-HeD[mDH<0.035e-5] = 10
-# ...and reshape
-Yp = Yp.reshape(shape)
-DH = DH.reshape(shape)
-HeD = HeD.reshape(shape)
-LiH = LiH.reshape(shape)
-
-DH[np.isnan(DH)] = -10
-
-# Extract the overall exclusion line
-max = np.maximum( np.abs(DH), np.abs(Yp) )
-max = np.maximum( max, HeD )
-
-sig = 1.95996
-
-plt.contourf(np.log10(mchi), np.log10(a), DH, levels=[-1e10, -sig, sig, 1e10], colors=["0.6","white", "tomato"], alpha=0.2)
-plt.contourf(np.log10(mchi), np.log10(a), Yp, levels=[-1e10, -sig, sig, 1e10], colors=["dodgerblue","white", "lightcoral"], alpha=0.2)
-plt.contourf(np.log10(mchi), np.log10(a), HeD, levels=[sig, 1e10], colors=["mediumseagreen"], alpha=0.2)
-
-# plt.contour(np.log10(mchi), np.log10(a), LiH, levels=[0], colors='#fe46a5', linestyles='--')
-# plt.contour(np.log10(mchi), np.log10(a), LiH, levels=[-sig], colors='#fe46a5', linestyles='-')
-# plt.contour(np.log10(mchi), np.log10(a), LiH, levels=[+sig], colors='#fe46a5', linestyles='-')
-
-plt.contour(np.log10(mchi), np.log10(a), DH, levels=[-sig], colors='0.6', linestyles='-')
-plt.contour(np.log10(mchi), np.log10(a), DH, levels=[sig], colors='tomato', linestyles='-')
-plt.contour(np.log10(mchi), np.log10(a), Yp, levels=[-sig], colors='dodgerblue', linestyles='-')
-plt.contour(np.log10(mchi), np.log10(a), HeD, levels=[sig], colors='mediumseagreen', linestyles='-')
-plt.contour(np.log10(mchi), np.log10(a), max, levels=[sig], colors='black', linewidths=1.5, linestyles='-')
-
-ax.xaxis.set_label_text(r"$m_\chi\;\;[\mathrm{MeV}]$")
-ax.xaxis.set_major_locator(xMajorLocator)
-ax.xaxis.set_minor_locator(xMinorLocator)
-ax.xaxis.set_major_formatter(xMajorFormatter)
-ax.set_xlim(0, 3)
-
-ax.yaxis.set_label_text(r"$a\;\;[\mathrm{cm^3/s}]$")
-ax.yaxis.set_major_locator(yMajorLocator)
-ax.yaxis.set_minor_locator(yMinorLocator)
-ax.yaxis.set_major_formatter(yMajorFormatter)
-ax.set_ylim(-27, -18)
-
-plt.title(r"$b = 0\,\mathrm{cm}^3/\mathrm{s},\;T_\text{kd}=0\,\mathrm{MeV},\;\text{BR}_{\gamma\gamma}=1-\text{BR}_{e^+e^-} = 0$", fontsize=11)
-plt.tight_layout()
-plt.savefig(f"annih_swave_ee.pdf")
-plt.show()
diff --git a/plots/plot_decay_mphi_50MeV_aa.py b/plots/plot_decay_mphi_50MeV_aa.py
deleted file mode 100644
index 5d793c8..0000000
--- a/plots/plot_decay_mphi_50MeV_aa.py
+++ /dev/null
@@ -1,138 +0,0 @@
-#! /usr/bin/python3
-
-# sys
-import sys; sys.path.append('..')
-# numpy
-import numpy as np
-# matplotlib
-import matplotlib.pyplot as plt
-from matplotlib.ticker import FixedLocator, NullLocator, FixedFormatter
-
-# acropolis
-from acropolis.params import NY
-
-# Set the font
-plt.rc('text', usetex=True)
-plt.rc('font', family='serif', size=14)
-plt.rcParams['text.latex.preamble'] = r"\usepackage{amsmath}"
-
-# Sepcify some properties of the figure
-fig = plt.figure(figsize=(0.4*12.0, 0.4*11.0), dpi=150, edgecolor="white")
-ax = fig.add_subplot(1,1,1)
-ax.tick_params(axis='both', which='both', labelsize=11, direction="in", width=0.5)
-ax.xaxis.set_ticks_position('both')
-ax.yaxis.set_ticks_position('both')
-for axis in ['top','bottom','left','right']:
- ax.spines[axis].set_linewidth(0.5)
-
-# Deint the ticks for the x-...
-xtMajor = [np.log10(10**j) for j in np.linspace(3, 10, 8)]
-xtMinor = [np.log10(i*10**j) for j in xtMajor for i in range(10)[1:10]]
-xlMajor = [r"$10^{" + str(int(i)) + "}$" if i in xtMajor else "" for i in xtMajor]
-xMajorLocator = FixedLocator(xtMajor)
-xMinorLocator = FixedLocator(xtMinor)
-xMajorFormatter = FixedFormatter(xlMajor)
-# ... and y-axis
-ytMajor = np.linspace(-15, -3, 13)
-ytMinor = [np.log10(i*10**(j)) for i in range(10)[1:] for j in ytMajor]
-ylMajor = [r"$10^{" + str(int(i)) + "}$" if i in ytMajor else "" for i in ytMajor]
-yMajorLocator = FixedLocator(ytMajor)
-yMinorLocator = FixedLocator(ytMinor)
-yMajorFormatter = FixedFormatter(ylMajor)
-
-# Function to exteract the abundances for the data file
-def get_abd(data, i):
- i0 = i + 2
-
- # Get the length of the data
- N = len( data )
-
- # Extract the different abundances...
- mean, high, low = data[:,i0], data[:,i0+NY], data[:,i0+2*NY]
- # ...and calculate an estimate for the error
- diff = np.minimum( np.abs( mean - high ), np.abs( mean - low ) )
-
- return mean, diff
-
-shape=(200, 200)
-
-# Read the data file
-data = np.loadtxt(f"data/decay_50MeV_aa.dat")
-tau = data[:,0].reshape(shape)
-n0a = data[:,1].reshape(shape)
-
-# Extract the different abundances
-mn, en = get_abd(data, 0)
-mp, ep = get_abd(data, 1)
-mH, eH = mn + mp, np.sqrt( en**2. + ep**2. )
-
-mD , eD = get_abd(data, 2)
-mT , eT = get_abd(data, 3)
-mHe3, eHe3 = get_abd(data, 4)
-mHe4, eHe4 = get_abd(data, 5)
-m3 , e3 = mT + mHe3, np.sqrt( eT**2. + eHe3**2. )
-mLi7, eLi7 = get_abd(data, 7)
-mBe7, eBe7 = get_abd(data, 8)
-m7, e7 = mLi7 + mBe7, np.sqrt( eLi7**2. + eBe7**2. )
-
-mYp , eYp = 4.*mHe4, 4.*eHe4
-mDH , eDH = mD/mH, (mD/mH)*np.sqrt( (eD/mD)**2. + (eH/mH)**2. )
-mHeD, eHeD = m3/mD, (m3/mD)*np.sqrt( (e3/m3)**2. + (eD/mD)**2. )
-mLiH, eLiH = m7/mH, (m7/mH)*np.sqrt( (e7/m7)**2. + (eH/mH)**2. )
-
-# Calculate the deviations...
-Yp = (mYp - 2.45e-1) / np.sqrt((0.03e-1)**2 + eYp**2)
-DH = (mDH - 2.547e-5) / np.sqrt((0.035e-5)**2 + eDH**2)
-HeD = (mHeD - 8.3e-1) / np.sqrt((1.5e-1)**2 + eHeD**2.)
-LiH = (mLiH - 1.6e-10) / np.sqrt((0.3e-10)**2. + eLiH**2.)
-HeD[mDH<0.035e-5] = 10
-# ...and reshape
-Yp = Yp.reshape(shape)
-DH = DH.reshape(shape)
-HeD = HeD.reshape(shape)
-LiH = LiH.reshape(shape)
-
-HeD[(tau>1e7)*(n0a>1e-8)*(n0a<1e-6)] = 10
-DH[np.isnan(DH)] = -10
-
-# Extract the overall exclusion line
-max = np.maximum( np.abs(DH), np.abs(Yp) )
-max = np.maximum( max, HeD )
-
-sig = 1.95996
-
-plt.contourf(np.log10(tau), np.log10(n0a), DH, levels=[-1e10, -sig, sig, 1e10], colors=["0.6","white", "tomato"], alpha=0.2)
-plt.contourf(np.log10(tau), np.log10(n0a), Yp, levels=[-1e10, -sig, sig, 1e10], colors=["dodgerblue","white", "lightcoral"], alpha=0.2)
-plt.contourf(np.log10(tau), np.log10(n0a), HeD, levels=[sig, 1e10], colors=["mediumseagreen"], alpha=0.2)
-
-# plt.contour(np.log10(tau), np.log10(n0a), LiH, levels=[0], colors='#fe46a5', linestyles='--')
-# plt.contour(np.log10(tau), np.log10(n0a), LiH, levels=[-sig], colors='#fe46a5', linestyles='-')
-# plt.contour(np.log10(tau), np.log10(n0a), LiH, levels=[+sig], colors='#fe46a5', linestyles='-')
-
-plt.contour(np.log10(tau), np.log10(n0a), DH, levels=[-sig], colors='0.6', linestyles='-')
-plt.contour(np.log10(tau), np.log10(n0a), DH, levels=[sig], colors='tomato', linestyles='-')
-plt.contour(np.log10(tau), np.log10(n0a), Yp, levels=[-sig], colors='dodgerblue', linestyles='-')
-plt.contour(np.log10(tau), np.log10(n0a), HeD, levels=[sig], colors='mediumseagreen', linestyles='-')
-plt.contour(np.log10(tau), np.log10(n0a), max, levels=[sig], colors='black', linewidths=1.5, linestyles='-')
-
-plt.text(4, -10, r"D/$^1$H low", color='0.6', fontsize=16)
-plt.text(7.5, -11, r"$^3$He/D high", color='mediumseagreen', fontsize=16)
-plt.text(6.3, -4.2, r"$\mathcal{Y}_\text{p}$ low", color='dodgerblue', fontsize=16)
-plt.text(7.3, -8, r"D/$^1$H high", color='tomato', fontsize=16)
-
-ax.xaxis.set_label_text(r"$\tau_\phi\;[\mathrm{s}]$")
-ax.xaxis.set_major_locator(xMajorLocator)
-ax.xaxis.set_minor_locator(xMinorLocator)
-ax.xaxis.set_major_formatter(xMajorFormatter)
-ax.set_xlim(3, 10)
-
-ax.yaxis.set_label_text(r"$(n_\phi/n_\gamma)|_{T=T_0}$")
-ax.yaxis.set_major_locator(yMajorLocator)
-ax.yaxis.set_minor_locator(yMinorLocator)
-ax.yaxis.set_major_formatter(yMajorFormatter)
-ax.set_ylim(-15, -3)
-
-plt.title(r"$m_\phi = 50\,\mathrm{MeV},\;T_0=10\,\mathrm{MeV},\;\text{BR}_{\gamma\gamma}=1-\text{BR}_{e^+e^-} = 1$", fontsize=11)
-plt.tight_layout()
-plt.savefig(f"decay_50MeV_aa.pdf")
-plt.show()
diff --git a/plots/plot_decay_tau_1e7s_aa.py b/plots/plot_decay_tau_1e7s_aa.py
deleted file mode 100644
index 8fa7c64..0000000
--- a/plots/plot_decay_tau_1e7s_aa.py
+++ /dev/null
@@ -1,136 +0,0 @@
-#! /usr/bin/python3
-
-# sys
-import sys; sys.path.append('..')
-# numpy
-import numpy as np
-# matplotlib
-import matplotlib.pyplot as plt
-from matplotlib.ticker import FixedLocator, NullLocator, FixedFormatter
-
-# acropolis
-from acropolis.params import NY
-
-# Set the font
-plt.rc('text', usetex=True)
-plt.rc('font', family='serif', size=14)
-plt.rcParams['text.latex.preamble'] = r"\usepackage{amsmath}"
-
-# Sepcify some properties of the figure
-fig = plt.figure(figsize=(0.4*12.0, 0.4*11.0), dpi=150, edgecolor="white")
-ax = fig.add_subplot(1,1,1)
-ax.tick_params(axis='both', which='both', labelsize=11, direction="in", width=0.5)
-ax.xaxis.set_ticks_position('both')
-ax.yaxis.set_ticks_position('both')
-for axis in ['top','bottom','left','right']:
- ax.spines[axis].set_linewidth(0.5)
-
-# Deint the ticks for the x-...
-xtMajor = [np.log10(10**j) for j in np.linspace(0, 3, 4)]
-xtMinor = [np.log10(i*10**j) for j in xtMajor for i in range(10)[1:10]]
-xlMajor = [r"$10^{" + str(int(i)) + "}$" if i in xtMajor else "" for i in xtMajor]
-xMajorLocator = FixedLocator(xtMajor)
-xMinorLocator = FixedLocator(xtMinor)
-xMajorFormatter = FixedFormatter(xlMajor)
-# ... and y-axis
-ytMajor = np.linspace(-15, -3, 13)
-ytMinor = [np.log10(i*10**(j)) for i in range(10)[1:] for j in ytMajor]
-ylMajor = [r"$10^{" + str(int(i)) + "}$" if i in ytMajor else "" for i in ytMajor]
-yMajorLocator = FixedLocator(ytMajor)
-yMinorLocator = FixedLocator(ytMinor)
-yMajorFormatter = FixedFormatter(ylMajor)
-
-# Function to exteract the abundances for the data file
-def get_abd(data, i):
- i0 = i + 2
-
- # Get the length of the data
- N = len( data )
-
- # Extract the different abundances...
- mean, high, low = data[:,i0], data[:,i0+NY], data[:,i0+2*NY]
- # ...and calculate an estimate for the error
- diff = np.minimum( np.abs( mean - high ), np.abs( mean - low ) )
-
- return mean, diff
-
-shape=(200, 200)
-
-# Read the data file
-data = np.loadtxt(f"data/decay_1e7s_aa.dat")
-mphi = data[:,0].reshape(shape)
-n0a = data[:,1].reshape(shape)
-
-# Extract the different abundances
-mn, en = get_abd(data, 0)
-mp, ep = get_abd(data, 1)
-mH, eH = mn + mp, np.sqrt( en**2. + ep**2. )
-mLi7, eLi7 = get_abd(data, 7)
-mBe7, eBe7 = get_abd(data, 8)
-m7, e7 = mLi7 + mBe7, np.sqrt( eLi7**2. + eBe7**2. )
-
-mD , eD = get_abd(data, 2)
-mT , eT = get_abd(data, 3)
-mHe3, eHe3 = get_abd(data, 4)
-mHe4, eHe4 = get_abd(data, 5)
-m3 , e3 = mT + mHe3, np.sqrt( eT**2. + eHe3**2. )
-
-mYp , eYp = 4.*mHe4, 4.*eHe4
-mDH , eDH = mD/mH, (mD/mH)*np.sqrt( (eD/mD)**2. + (eH/mH)**2. )
-mHeD, eHeD = m3/mD, (m3/mD)*np.sqrt( (e3/m3)**2. + (eD/mD)**2. )
-mLiH, eLiH = m7/mH, (m7/mH)*np.sqrt( (e7/m7)**2. + (eH/mH)**2. )
-
-# Calculate the deviations...
-Yp = (mYp - 2.45e-1) / np.sqrt((0.03e-1)**2 + eYp**2)
-DH = (mDH - 2.547e-5) / np.sqrt((0.035e-5)**2 + eDH**2)
-HeD = (mHeD - 8.3e-1) / np.sqrt((1.5e-1)**2 + eHeD**2.)
-LiH = (mLiH - 1.6e-10) / np.sqrt((0.3e-10)**2. + eLiH**2.)
-HeD[mDH<0.035e-5] = 10
-
-# ...and reshape
-Yp = Yp.reshape(shape)
-DH = DH.reshape(shape)
-HeD = HeD.reshape(shape)
-LiH = LiH.reshape(shape)
-
-HeD[(n0a>1e-7)*(mphi>1e1)] = 10
-DH[np.isnan(DH)] = -10
-
-
-# Extract the overall exclusion line
-max = np.maximum( np.abs(DH), np.abs(Yp) )
-max = np.maximum( max, HeD )
-
-sig = 1.95996
-
-plt.contourf(np.log10(mphi), np.log10(n0a), DH, levels=[-1e10, -sig, sig, 1e10], colors=["0.6","white", "tomato"], alpha=0.2)
-plt.contourf(np.log10(mphi), np.log10(n0a), Yp, levels=[-1e10, -sig, sig, 1e10], colors=["dodgerblue","white", "lightcoral"], alpha=0.2)
-plt.contourf(np.log10(mphi), np.log10(n0a), HeD, levels=[sig, 1e10], colors=["mediumseagreen"], alpha=0.2)
-
-# plt.contour(np.log10(mphi), np.log10(n0a), LiH, levels=[0], colors='#fe46a5', linestyles='--')
-# plt.contour(np.log10(mphi), np.log10(n0a), LiH, levels=[-sig], colors='#fe46a5', linestyles='-')
-# plt.contour(np.log10(mphi), np.log10(n0a), LiH, levels=[+sig], colors='#fe46a5', linestyles='-')
-
-plt.contour(np.log10(mphi), np.log10(n0a), DH, levels=[-sig], colors='0.6', linestyles='-')
-plt.contour(np.log10(mphi), np.log10(n0a), DH, levels=[sig], colors='tomato', linestyles='-')
-
-plt.contour(np.log10(mphi), np.log10(n0a), Yp, levels=[-sig], colors='dodgerblue', linestyles='-')
-plt.contour(np.log10(mphi), np.log10(n0a), HeD, levels=[sig], colors='mediumseagreen', linestyles='-')
-plt.contour(np.log10(mphi), np.log10(n0a), max, levels=[sig], colors='black', linewidths=1.5, linestyles='-')
-
-ax.xaxis.set_label_text(r"$m_\phi\;[\mathrm{MeV}]$")
-ax.xaxis.set_major_locator(xMajorLocator)
-ax.xaxis.set_minor_locator(xMinorLocator)
-ax.xaxis.set_major_formatter(xMajorFormatter)
-ax.set_xlim(0, 3)
-
-ax.yaxis.set_label_text(r"$(n_\phi/n_\gamma)|_{T=T_0}$")
-ax.yaxis.set_major_locator(yMajorLocator)
-ax.yaxis.set_minor_locator(yMinorLocator)
-ax.yaxis.set_major_formatter(yMajorFormatter)
-ax.set_ylim(-15, -3)
-
-plt.title(r"$\tau_\phi = 10^7\,\mathrm{s},\;T_0=10\,\mathrm{MeV},\;\text{BR}_{\gamma\gamma}=1-\text{BR}_{e^+e^-} = 1$", fontsize=11)
-plt.tight_layout()
-plt.savefig(f"decay_1e7s_aa.pdf")
-plt.show()
diff --git a/plots/plot_pd_results.py b/plots/plot_pd_results.py
new file mode 100644
index 0000000..056245b
--- /dev/null
+++ b/plots/plot_pd_results.py
@@ -0,0 +1,95 @@
+#! /usr/bin/env python3
+
+# sys
+import sys; sys.path.append('..')
+
+# numpy
+import numpy as np
+# matplotlib
+from matplotlib.lines import Line2D
+
+# plots
+from acropolis.plots import init_figure, save_figure
+
+
+ref_values = {
+ 'NE_pd': 150,
+ 'NT_pd': 50
+}
+
+xlims = {
+ 'NE_pd': (10, 500),
+ 'NT_pd': (10, 200)
+}
+
+ylims = {
+ 'NE_pd': (1.4, 1.9),
+ 'NT_pd': (1.4, 1.9)
+}
+
+colors = {
+ 'annih': 'crimson',
+ 'decay': 'mediumorchid'
+}
+
+
+def invert(param):
+ if param == 'NE_pd':
+ return 'NT_pd'
+
+ if param == 'NT_pd':
+ return 'NE_pd'
+
+ return None
+
+
+# Loop over both parameters, i.e. 'NE_pd' and 'NT_pd'
+for param in ['NE_pd', 'NT_pd']:
+ iparam = invert(param)
+
+ # Initialize the figure
+ fig, ax = init_figure()
+
+ # Loop over both data samples, i.e. 'decay' and 'annih'
+ for run in ['annih', 'decay']:
+
+ data = np.loadtxt(f'../tools/data/{param}_{run}.dat')
+ # -->
+ N_pd = data[:,0]
+ Y2H_low = data[:,1]
+ Y2H_mean = data[:,2]
+ Y2H_high = data[:,3]
+
+ ax.plot(N_pd, Y2H_low *1e5, '-' , color=colors[run])
+ ax.plot(N_pd, Y2H_mean*1e5, '--', color=colors[run])
+ ax.plot(N_pd, Y2H_high*1e5, '-.', color=colors[run])
+
+ # Plot the reference line for the current parameter
+ ax.plot([ ref_values[param] ]*2, [ *ylims[param] ], ':', color='black', zorder=-1)
+
+ # Plot the reference value for the other paramater
+ ax.text(xlims[param][1]*( 1 - 3e-2 ), ylims[param][0]*(1 + 2e-2), rf"$\texttt{{{iparam}={ref_values[iparam]}}}$", ha='right')
+
+ # Plot the legend for the different line styles
+ custom_lines = [
+ Line2D([0], [0], linestyle='-' , color='black', lw=1),
+ Line2D([0], [0], linestyle='--', color='black', lw=1),
+ Line2D([0], [0], linestyle='-.', color='black', lw=1)
+ ]
+ # -->
+ ax.legend(custom_lines, ['Low', 'Mean', 'High'], loc='upper right', fontsize=11, frameon=False)
+
+ # Set the labels for the x- and y-axis
+ ax.set_xlabel(rf'$\texttt{{{param}}}$')
+ ax.set_ylabel(r'$Y_{{}^2\mathrm{H}}\;\;[\times 10^5]$')
+
+ # Set the limits for the x- and y-axis
+ ax.set_xlim(*xlims[param])
+ ax.set_ylim(*ylims[param])
+
+ # Apply a tighter layout
+ fig.tight_layout()
+
+ # Save the figure
+ save_figure(f'{param}.pdf', show_fig=True)
+
diff --git a/plots/plot_scan_results.py b/plots/plot_scan_results.py
new file mode 100644
index 0000000..aa43be8
--- /dev/null
+++ b/plots/plot_scan_results.py
@@ -0,0 +1,64 @@
+#! /usr/bin/env python3
+
+# sys
+import sys; sys.path.append('..')
+
+# plots
+from acropolis.plots import tex_title, tex_labels
+from acropolis.plots import plot_scan_results, save_figure
+
+
+# Set the data directory
+data_dir = 'data/'
+
+
+# PLOT 1 ######################################################################
+# s-wave, a vs mchi ###########################################################
+_, ax1 = plot_scan_results(
+ data_dir + 'annih_swave_ee.dat', output_file=None,
+ title=tex_title(b=0, tempkd=0, braa=0), labels=tex_labels('mchi', 'a')
+)
+
+# Plot the reference point for the NE_pd/NT_pd scans
+ax1.plot(1, -24, '*', color='crimson')
+# -->
+save_figure('annih_swave_ee.pdf')
+
+
+
+# PLOT 2 ######################################################################
+# p-wave, b vs mchi, tempkd = 1 MeV ###########################################
+plot_scan_results(
+ data_dir + 'annih_pwave_Tkd_1e+00MeV_ee.dat', output_file='annih_pwave_1e0MeV_ee.pdf',
+ title=tex_title(a=0, tempkd=1, braa=0), labels=tex_labels('mchi', 'b')
+)
+
+
+
+# PLOT 3 ######################################################################
+# decay, n0a vs mphi, tau = 1e7 s #############################################
+plot_scan_results(
+ data_dir + 'decay_tau_1e+07s_aa.dat', output_file='decay_1e7s_aa.pdf',
+ title=tex_title(tau=1e7, temp0=10, braa=1), labels=tex_labels('mphi', 'n0a')
+)
+
+
+
+# PLOT 4 ######################################################################
+# decay, n0a vs tau, mphi = 50 MeV ############################################
+_, ax4 = plot_scan_results(
+ data_dir + 'decay_mphi_5e+01MeV_aa.dat', output_file=None, fix_helium=True,
+ title=tex_title(mphi=50, temp0=10, braa=1), labels=tex_labels('tau', 'n0a')
+)
+
+# Add labels for the different abundances
+ax4.text(4.0, -10.0, r"D/$^1$H low" , color='0.6' , fontsize=16)
+ax4.text(7.5, -11.0, r"$^3$He/D high" , color='mediumseagreen', fontsize=16)
+ax4.text(6.3, -4.2, r"$\mathcal{Y}_\text{p}$ low", color='dodgerblue' , fontsize=16)
+ax4.text(7.3, -8.0, r"D/$^1$H high" , color='tomato' , fontsize=16)
+
+# Plot the reference point for the NE_pd/NT_pd scans
+ax4.plot(5, -8, '*', color='mediumorchid')
+
+# -->
+save_figure('decay_50MeV_aa.pdf')
diff --git a/setup.py b/setup.py
index e2a820f..2427d3a 100644
--- a/setup.py
+++ b/setup.py
@@ -23,10 +23,11 @@
author=authors,
license='GPL3',
packages=[
- 'acropolis'
+ 'acropolis',
+ 'acropolis/ext'
],
package_data={
- 'acropolis': ['data/*']
+ 'acropolis': ['data/rates.db.gz', 'data/sm.tar.gz', 'data/parthenope_v3_0.tar.gz']
},
include_package_data=True,
scripts=[
diff --git a/tools/create_sm_abundance_file.c b/tools/create_sm_abundance_file.c
index 6c571be..c028b6f 100644
--- a/tools/create_sm_abundance_file.c
+++ b/tools/create_sm_abundance_file.c
@@ -18,7 +18,7 @@ void ratioH_to_Y0(double ratioH[], double Y0[]) {
// Extract the baryon-to-photon ratio
Y0[0] = ratioH[0];
- // Handle the special case 'p'
+ // Handle the special case 'p'/'H'
Y0[2] = ratioH[2];
// Handle the secial case 'He4'
Y0[6] = ratioH[6]/4;
@@ -38,7 +38,7 @@ void ratioH_to_Y0(double ratioH[], double Y0[]) {
Y0[2] = Y0[2] + Y0[1]; // p
Y0[1] = 0.; // n
- /* The result of this function are:
+ /* The results of this function are:
Y0[0] eta_final
Y0[1] 0
Y0[2] (n_n + n_p) / n_b
@@ -65,7 +65,7 @@ int main( int argc, char** argv ) {
" 10...12 = RK4 method with adaptative stepsize (10=5%%, 11=1%%, 12=0.1%%)\n"
" 20...22 = Fehlberg RK4-5 method (20=5%%, 21=1%%, 22=0.1%%)\n"
" 30...32 = Cash-Karp RK4-5 method (30=1%%, 31=1e-4, 32=1e-5).\n"
- " 2. eta10 The baryon-to-photon ratio times 1e10.");
+ " 2. eta10 The baryon-to-photon ratio times 1e10.\n");
return 1;
} else {
diff --git a/tools/data/NE_pd_annih.dat b/tools/data/NE_pd_annih.dat
new file mode 100644
index 0000000..fb31b0b
--- /dev/null
+++ b/tools/data/NE_pd_annih.dat
@@ -0,0 +1,29 @@
+10 1.65987e-05 1.61862e-05 1.70146e-05
+20 1.63701e-05 1.59634e-05 1.67803e-05
+30 1.61568e-05 1.57554e-05 1.65616e-05
+40 1.60063e-05 1.56086e-05 1.64073e-05
+50 1.58812e-05 1.54866e-05 1.62790e-05
+60 1.57961e-05 1.54036e-05 1.61918e-05
+70 1.57277e-05 1.53369e-05 1.61217e-05
+80 1.56712e-05 1.52818e-05 1.60638e-05
+90 1.56185e-05 1.52305e-05 1.60098e-05
+100 1.55788e-05 1.51917e-05 1.59691e-05
+110 1.55446e-05 1.51584e-05 1.59340e-05
+120 1.55147e-05 1.51292e-05 1.59033e-05
+130 1.54853e-05 1.51005e-05 1.58732e-05
+140 1.54622e-05 1.50781e-05 1.58496e-05
+150 1.54416e-05 1.50580e-05 1.58285e-05
+160 1.54230e-05 1.50398e-05 1.58094e-05
+170 1.54042e-05 1.50215e-05 1.57901e-05
+180 1.53890e-05 1.50066e-05 1.57745e-05
+190 1.53751e-05 1.49931e-05 1.57603e-05
+200 1.53624e-05 1.49808e-05 1.57473e-05
+220 1.53386e-05 1.49575e-05 1.57229e-05
+240 1.53193e-05 1.49387e-05 1.57031e-05
+260 1.53017e-05 1.49215e-05 1.56850e-05
+280 1.52871e-05 1.49073e-05 1.56701e-05
+300 1.52735e-05 1.48940e-05 1.56561e-05
+350 1.52464e-05 1.48677e-05 1.56284e-05
+400 1.52255e-05 1.48472e-05 1.56069e-05
+450 1.52088e-05 1.48309e-05 1.55898e-05
+500 1.51951e-05 1.48176e-05 1.55758e-05
diff --git a/tools/data/NE_pd_decay.dat b/tools/data/NE_pd_decay.dat
new file mode 100644
index 0000000..3bae8aa
--- /dev/null
+++ b/tools/data/NE_pd_decay.dat
@@ -0,0 +1,29 @@
+10 1.78764e-05 1.74322e-05 1.83244e-05
+20 1.77356e-05 1.72949e-05 1.81801e-05
+30 1.76454e-05 1.72068e-05 1.80875e-05
+40 1.75832e-05 1.71462e-05 1.80238e-05
+50 1.75344e-05 1.70986e-05 1.79738e-05
+60 1.75000e-05 1.70651e-05 1.79386e-05
+70 1.74728e-05 1.70386e-05 1.79107e-05
+80 1.74507e-05 1.70170e-05 1.78880e-05
+90 1.74321e-05 1.69989e-05 1.78690e-05
+100 1.74152e-05 1.69824e-05 1.78517e-05
+110 1.74020e-05 1.69696e-05 1.78381e-05
+120 1.73906e-05 1.69584e-05 1.78264e-05
+130 1.73805e-05 1.69485e-05 1.78160e-05
+140 1.73709e-05 1.69392e-05 1.78062e-05
+150 1.73630e-05 1.69315e-05 1.77981e-05
+160 1.73560e-05 1.69246e-05 1.77909e-05
+170 1.73496e-05 1.69184e-05 1.77844e-05
+180 1.73438e-05 1.69128e-05 1.77785e-05
+190 1.73382e-05 1.69073e-05 1.77727e-05
+200 1.73334e-05 1.69026e-05 1.77678e-05
+220 1.73250e-05 1.68944e-05 1.77592e-05
+240 1.73175e-05 1.68872e-05 1.77515e-05
+260 1.73113e-05 1.68811e-05 1.77451e-05
+280 1.73057e-05 1.68756e-05 1.77394e-05
+300 1.73010e-05 1.68710e-05 1.77345e-05
+350 1.72911e-05 1.68614e-05 1.77244e-05
+400 1.72835e-05 1.68540e-05 1.77166e-05
+450 1.72775e-05 1.68481e-05 1.77104e-05
+500 1.72726e-05 1.68433e-05 1.77054e-05
diff --git a/tools/data/NT_pd_annih.dat b/tools/data/NT_pd_annih.dat
new file mode 100644
index 0000000..9754013
--- /dev/null
+++ b/tools/data/NT_pd_annih.dat
@@ -0,0 +1,20 @@
+10 1.55225e-05 1.51368e-05 1.59114e-05
+20 1.54592e-05 1.50751e-05 1.58464e-05
+30 1.54483e-05 1.50645e-05 1.58354e-05
+40 1.54434e-05 1.50597e-05 1.58302e-05
+50 1.54416e-05 1.50580e-05 1.58285e-05
+60 1.54407e-05 1.50571e-05 1.58276e-05
+70 1.54400e-05 1.50564e-05 1.58269e-05
+80 1.54396e-05 1.50560e-05 1.58264e-05
+90 1.54392e-05 1.50556e-05 1.58260e-05
+100 1.54391e-05 1.50555e-05 1.58259e-05
+110 1.54391e-05 1.50555e-05 1.58258e-05
+120 1.54388e-05 1.50553e-05 1.58256e-05
+130 1.54388e-05 1.50553e-05 1.58256e-05
+140 1.54387e-05 1.50551e-05 1.58255e-05
+150 1.54388e-05 1.50552e-05 1.58255e-05
+160 1.54386e-05 1.50551e-05 1.58254e-05
+170 1.54386e-05 1.50550e-05 1.58254e-05
+180 1.54385e-05 1.50550e-05 1.58253e-05
+190 1.54386e-05 1.50550e-05 1.58253e-05
+200 1.54385e-05 1.50550e-05 1.58253e-05
diff --git a/tools/data/NT_pd_decay.dat b/tools/data/NT_pd_decay.dat
new file mode 100644
index 0000000..c05e9c2
--- /dev/null
+++ b/tools/data/NT_pd_decay.dat
@@ -0,0 +1,20 @@
+10 1.74384e-05 1.70051e-05 1.78754e-05
+20 1.73791e-05 1.69472e-05 1.78147e-05
+30 1.73684e-05 1.69367e-05 1.78036e-05
+40 1.73647e-05 1.69331e-05 1.77998e-05
+50 1.73630e-05 1.69315e-05 1.77981e-05
+60 1.73620e-05 1.69306e-05 1.77971e-05
+70 1.73614e-05 1.69300e-05 1.77965e-05
+80 1.73611e-05 1.69296e-05 1.77962e-05
+90 1.73609e-05 1.69294e-05 1.77959e-05
+100 1.73607e-05 1.69292e-05 1.77957e-05
+110 1.73606e-05 1.69291e-05 1.77956e-05
+120 1.73604e-05 1.69290e-05 1.77955e-05
+130 1.73603e-05 1.69289e-05 1.77954e-05
+140 1.73602e-05 1.69288e-05 1.77952e-05
+150 1.73604e-05 1.69289e-05 1.77954e-05
+160 1.73603e-05 1.69288e-05 1.77953e-05
+170 1.73602e-05 1.69288e-05 1.77952e-05
+180 1.73602e-05 1.69287e-05 1.77952e-05
+190 1.73601e-05 1.69287e-05 1.77951e-05
+200 1.73601e-05 1.69287e-05 1.77951e-05
diff --git a/tools/data/params.py.rpl b/tools/data/params.py.rpl
deleted file mode 100644
index 56541db..0000000
--- a/tools/data/params.py.rpl
+++ /dev/null
@@ -1,132 +0,0 @@
-# math
-from math import pi
-# scipy
-from scipy.special import zeta
-
-
-# FLAGS #############################################################
-
-# If this flag is set to 'True',
-# the pregenerated databases
-# will be used to interpolate
-# the different reaction rates
-# Default: True
-usedb = True
-
-# If this flag is set to 'True',
-# additional output is printed
-# to the screen
-# Default: True
-verbose = False
-
-# If this flag is set to 'True',
-# additional debug information
-# is printed to the screen
-# Default: False
-debug = False
-
-
-# PHYSICAL CONSTANTS ################################################
-
-# The fine-structure constant
-alpha = 1./137.036
-
-# The electron mass (in MeV)
-me = 0.511
-
-# The electron mass squared (in MeV^2)
-me2 = me**2.
-
-# The classical electron radius (in 1/MeV)
-re = alpha/me
-
-# The gravitational constant (in 1/MeV^2)
-GN = 6.70861e-45
-
-# The reduced Planck constant (in MeV*s)
-hbar = 6.582119514e-22
-
-# The speed of light (in cm/s)
-c_si = 2.99792458e10
-
-# The muon lifetime (in s)
-tau_m = 2.1969811e-6
-
-# The neutron lifetime (in s)
-tau_n = 8.802e2
-
-# The lifetime of tritium (in s)
-# T_(1/2) = 3.885e8
-tau_t = 5.605e8
-
-
-# MATHEMATICAL CONSTANTS ############################################
-
-# The Riemann-Zeta function at point 3
-zeta3 = zeta(3.)
-
-# The beautiful value of pi, squared
-pi2 = pi**2.
-
-
-# INTERPOLATION-SPECIFIC PARAMETERS #################################
-
-# Boundery values
-Emin_log, Tmin_log = 0, -6
-Emax_log, Tmax_log = 3, -1
-# Number of entries...
-num_pd = 150 # ...per decade
-Enum = (Emax_log - Emin_log)*num_pd
-Tnum = (Tmax_log - Tmin_log)*num_pd
-
-
-# ALGORITHM-SPECIFIC PARAMETERS #####################################
-
-# The number of elements/isotops that
-# are considered in the calculation
-NY = 9
-
-# The number of mandatory columns in
-# 'cosmo_file.dat'
-NC = 5
-
-# Minimum energy for the different spectra (in MeV)
-# This value should not be larger than the minimal
-# nucleon-interaction threshold of 1.586627 MeV
-# (reaction_id: 15 in 'astro-ph/0211258')
-Emin = 1.5
-
-# The value that is used for 'approximately' zero
-# Default: 1e-200
-approx_zero = 1e-200
-
-# The relative accuracy for each integral
-# Default: 1e-3
-eps = 1e-3
-
-# The maximal value of x = Ephb/T to avoid
-# overflow in the exponential function
-# Default: 200
-Ephb_T_max = 200.
-
-# The energy in units of EC at which to
-# cutoff strongly suppressed spectra
-# Default: 500.
-E_EC_cut = 500.
-
-# The number of points per decade for
-# the energy grid, which is used within
-# the solution of the cascade equation
-# Default: 150
-NE_pd = __NE_PD__
-# The minimal number of points for
-# the energy grid
-# Default: 10
-NE_min = 10
-
-# The number of points per decade for
-# the temperature grid, which us used
-# for the interpolation of the thermal
-# nuclear rates
-# Default: 50
-NT_pd = __NT_PD__
diff --git a/tools/scan_pd.sh b/tools/scan_pd.sh
index 9123387..5122c32 100755
--- a/tools/scan_pd.sh
+++ b/tools/scan_pd.sh
@@ -1,49 +1,113 @@
#! /usr/bin/env bash
+# Define a function that is called when 'Ctrl+C' presses
+function control_c {
+ # Cleanup
+ if [ "$dir" == "tools" ] && [ -f "$dir/data/params.py~" ]; then
+ # Go back to the original directory
+ cd $dir
+
+ # Restore the original 'params.py' file
+ mv data/params.py~ ../acropolis/params.py
+
+ # Remove unfinished data files
+ rm -f data/NE_pd.dat data/NT_pd.dat
+ fi
+
+ # Exit
+ exit
+}
+# -->
+trap control_c SIGINT
+
+
+# Define a function to replace NE_pd and NT_pd
+# in the original 'params.py' file
+function replace {
+ cp $data/params.py~ acropolis/params.py
+
+ sed -i "s/^NE_pd.*$/NE_pd = ${1}/" acropolis/params.py
+ sed -i "s/^NT_pd.*$/NT_pd = ${2}/" acropolis/params.py
+}
+
+
+# Define a function to extract the deuterium
+# abundances from the output of ACROPOLIS
+function extract_deuterium {
+ echo "${1}" | awk '/H2/ {print $4 " " $6 " " $8}'
+}
+
+
+# START #######################################################################
+
+cmd_flag=1
+# Check if there are a least 8 command-line arguments,
+# the first of which is either 'decay' or 'annihilation'
+if [ $# -ge 7 ]; then
+ if [ "$1" == "decay" ] || [ "$1" == "annihilation" ]; then
+ cmd_flag=0
+ fi
+fi
+# -->
+# Stop if the previous check did not succeed
+if [ $cmd_flag == 1 ]; then
+ echo "ERROR: The command-line arguments must be either 'decay [...]' or 'annihilation [...]'. Stop!"
+ exit 1
+fi
+
# Extract the current working directory
dir=$(basename $PWD)
-# Check if the current working directory is correct
-if [ "$dir" != "tools" ];
-then
+# Check if the current working directory is 'acropolis/tools'
+if [ "$dir" != "tools" ]; then
echo "ERROR: This script needs to be executed in the tools/ directory. Stop!"
exit 1
fi
-# Define the data path
-data="tools/data"
+# Define the data directory
+data="$dir/data"
-# Define a function to replace NE_pd and NT_pd
-function replace {
- cp $data/params.py.rpl acropolis/params.py
- sed -i "s/__NE_PD__/${1}/" acropolis/params.py
- sed -i "s/__NT_PD__/${2}/" acropolis/params.py
-}
+# Cleanup files from previous runs
+rm -f $data/NE_pd.dat $data/NT_pd.dat
# Change the directory
cd ..
-# Back up the old param-file
+# Back up the original 'params.py' file
cp acropolis/params.py $data/params.py~
-# Scan the different values for NE_pd
+# Scan over the different values for NE_pd
+echo "NE_pd"
for NE_PD in $(cat $data/NE_pd.list); do
- echo $NE_PD
-
+ # Adjust NE_pd
replace $NE_PD 50
- echo $NE_PD $(./$@) >> $data/NE_pd.dat
+
+ # Run ACROPOLIS...
+ result=$(./$@)
+ # ...and extract the deuterium abundance
+ Y2H=$(extract_deuterium "$result")
+
+ echo $NE_PD $Y2H 2>&1 | tee -a $data/NE_pd.dat
done
-# Scan the different values for NT_pd
+# Scan over the different values for NT_pd
+echo "NT_pd"
for NT_PD in $(cat $data/NT_pd.list); do
- echo $NT_PD
-
+ # Adjust NT_pd
replace 150 $NT_PD
- echo $NT_PD $(./$@) >> $data/NT_pd.dat
+
+ # Run ACROPOLIS...
+ result=$(./$@)
+ # ...and extract the deuterium abundance
+ Y2H=$(extract_deuterium "$result")
+
+ echo $NT_PD $Y2H 2>&1 | tee -a $data/NT_pd.dat
done
-# Restore the old param-file
+# Restore the original 'params.py' file
mv $data/params.py~ acropolis/params.py
# Go back to the original directory
cd tools/
+
+# END #########################################################################
diff --git a/upload_pypi b/upload_pypi
new file mode 100755
index 0000000..946e902
--- /dev/null
+++ b/upload_pypi
@@ -0,0 +1,8 @@
+#! /usr/bin/env bash
+
+echo "Select the upload repository:"
+# Ask the user to select a repository
+repo=$(gum choose 'testpypi' 'pypi')
+
+# Upload to the selected repository
+twine upload --repository $repo dist/*