diff --git a/Docs/source/conf.py b/Docs/source/conf.py index a1ef15b09bf..a053224de02 100644 --- a/Docs/source/conf.py +++ b/Docs/source/conf.py @@ -31,7 +31,7 @@ import urllib.request import pybtex.plugin -import sphinx_rtd_theme +import sphinx_rtd_theme # noqa from pybtex.style.formatting.unsrt import Style as UnsrtStyle sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../Regression/Checksum')) diff --git a/Docs/source/index.rst b/Docs/source/index.rst index 89bc2e417cb..b270bdd4a6e 100644 --- a/Docs/source/index.rst +++ b/Docs/source/index.rst @@ -140,7 +140,6 @@ Maintenance :hidden: maintenance/release - maintenance/performance_tests Epilogue -------- diff --git a/Docs/source/maintenance/performance_tests.rst b/Docs/source/maintenance/performance_tests.rst deleted file mode 100644 index 8b902297ccb..00000000000 --- a/Docs/source/maintenance/performance_tests.rst +++ /dev/null @@ -1,135 +0,0 @@ -.. _developers-performance_tests: - -Automated performance tests -=========================== - -WarpX has automated performance test scripts, which run weak scalings for various tests on a weekly basis. The results are stored in the `perf_logs repo `_ and plots of the performance history can be found on `this page `_. - -These performance tests run automatically, so they need to do ``git`` operations etc. For this reason, they need a separate clone of the source repos, so they don't conflict with one's usual operations. This is typically in a sub-directory in the ``$HOME``, with variable ``$AUTOMATED_PERF_TESTS`` pointing to it. Similarly, a directory is needed to run the simulations and store the results. By default, it is ``$SCRATCH/performance_warpx``. - -The test runs a weak scaling (1,2,8,64,256,512 nodes) for 6 different tests ``Tools/PerformanceTests/automated_test_{1,2,3,4,5,6}_*``, gathered in 1 batch job per number of nodes to avoid submitting too many jobs. - -Setup on Summit @ OLCF ----------------------- - -Here is an example setup for Summit: - -.. code-block:: sh - - # I put the next three lines in $HOME/my_bashrc.sh - export proj=aph114 # project for job submission - export AUTOMATED_PERF_TESTS=$HOME/AUTOMATED_PERF_TESTS/ - export SCRATCH=/gpfs/alpine/scratch/$(whoami)/$proj/ - - mkdir $HOME/AUTOMATED_PERF_TESTS - cd $AUTOMATED_PERF_TESTS - git clone https://github.com/ECP-WarpX/WarpX.git warpx - git clone https://github.com/ECP-WarpX/picsar.git - git clone https://github.com/AMReX-Codes/amrex.git - git clone https://github.com/ECP-WarpX/perf_logs.git - -Then, in ``$AUTOMATED_PERF_TESTS``, create a file ``run_automated_performance_tests_512.sh`` with the following content: - -.. code-block:: sh - - #!/bin/bash -l - #BSUB -P APH114 - #BSUB -W 00:15 - #BSUB -nnodes 1 - #BSUB -J PERFTEST - #BSUB -e err_automated_tests.txt - #BSUB -o out_automated_tests.txt - - module load nano - module load cmake/3.20.2 - module load gcc/9.3.0 - module load cuda/11.0.3 - module load blaspp/2021.04.01 - module load lapackpp/2021.04.00 - module load boost/1.76.0 - module load adios2/2.7.1 - module load hdf5/1.12.2 - - module unload darshan-runtime - - export AMREX_CUDA_ARCH=7.0 - export CC=$(which gcc) - export CXX=$(which g++) - export FC=$(which gfortran) - export CUDACXX=$(which nvcc) - export CUDAHOSTCXX=$(which g++) - - # Make sure all dependencies are installed and loaded - cd $HOME - module load python/3.8.10 - module load freetype/2.10.4 # matplotlib - module load openblas/0.3.5-omp - export BLAS=$OLCF_OPENBLAS_ROOT/lib/libopenblas.so - export LAPACK=$OLCF_OPENBLAS_ROOT/lib/libopenblas.so - python3 -m pip install --user --upgrade pip - python3 -m pip install --user virtualenv - python3 -m venv $HOME/sw/venvs/warpx-perftest - source $HOME/sw/venvs/warpx-perftest/bin/activate - # While setting up the performance tests for the first time, - # execute the lines above this comment and then the commented - # lines below this comment once, before submission. - # The commented lines take too long for the job script. - #python3 -m pip install --upgrade pip - #python3 -m pip install --upgrade build packaging setuptools wheel - #python3 -m pip install --upgrade cython - #python3 -m pip install --upgrade numpy - #python3 -m pip install --upgrade markupsafe - #python3 -m pip install --upgrade pandas - #python3 -m pip install --upgrade matplotlib==3.2.2 # does not try to build freetype itself - #python3 -m pip install --upgrade bokeh - #python3 -m pip install --upgrade gitpython - #python3 -m pip install --upgrade tables - - # Run the performance test suite - cd $AUTOMATED_PERF_TESTS/warpx/Tools/PerformanceTests/ - python run_automated.py --n_node_list='1,2,8,64,256,512' --automated - - # submit next week's job - cd $AUTOMATED_PERF_TESTS/ - next_date=`date -d "+7 days" '+%Y:%m:%d:%H:%M'` - bsub -b $next_date ./run_automated_performance_tests_512.sh - -Then, running - -.. code-block:: sh - - bsub run_automated_performance_tests_512.sh - -will submit this job once, and all the following ones. It will: - - - Create directory ``$SCRATCH/performance_warpx`` if doesn't exist. - - Create 1 sub-directory per week per number of nodes (1,2,8,64,256,512). - - Submit one job per number of nodes. It will run 6 different tests, each twice (to detect fluctuations). - - Submit an analysis job, that will read the results ONLY AFTER all runs are finished. This uses the dependency feature of the batch system. - - This job reads the Tiny Profiler output for each run, and stores the results in a pandas file at the hdf5 format. - - Execute ``write_csv.py`` from the ``perf_logs`` repo to append a csv and a hdf5 file with the new results. - - Commit the results (but DO NOT PUSH YET) - -Then, the user periodically has to - -.. code-block:: sh - - cd $AUTOMATED_PERF_TESTS/perf_logs - git pull # to get updates from someone else, or from another supercomputer - git push - -This will update the database but not the online plots. For this, you need to periodically run something like - -.. code-block:: sh - - cd $AUTOMATED_PERF_TESTS/perf_logs - git pull - python generate_index_html.py - git add -u - git commit -m "upload new html page" - git push - -Setup on Cori @ NERSC ---------------------- - -Still to be written! diff --git a/Docs/source/usage/workflows/ml_materials/train.py b/Docs/source/usage/workflows/ml_materials/train.py index 4de11b9c99e..23b1d0abcd4 100644 --- a/Docs/source/usage/workflows/ml_materials/train.py +++ b/Docs/source/usage/workflows/ml_materials/train.py @@ -85,7 +85,7 @@ train_loss_list = [] test_loss_list = [] -model.to(device=device); +model.to(device=device) ########## train and test functions #### # Manual: Train function START diff --git a/Docs/source/usage/workflows/ml_materials/visualize.py b/Docs/source/usage/workflows/ml_materials/visualize.py index 920efe29909..e9f6128b84d 100644 --- a/Docs/source/usage/workflows/ml_materials/visualize.py +++ b/Docs/source/usage/workflows/ml_materials/visualize.py @@ -65,7 +65,7 @@ act = activation_type ) model.load_state_dict(model_data['model_state_dict']) -model.to(device=device); +model.to(device=device) ###### load model data ############### dataset_filename = f'dataset_{species}.pt' diff --git a/Examples/Tests/AcceleratorLattice/analysis.py b/Examples/Tests/AcceleratorLattice/analysis.py index 5f3de4543d6..9c6589825a1 100755 --- a/Examples/Tests/AcceleratorLattice/analysis.py +++ b/Examples/Tests/AcceleratorLattice/analysis.py @@ -42,7 +42,7 @@ # The simulation data is in the boosted frame. # Transform the z position to the lab frame. time = ds.current_time.value - zz_sim = gamma_boost*zz_sim + uz_boost*time; + zz_sim = gamma_boost*zz_sim + uz_boost*time # Fetch the quadrupole lattice data quad_starts = [] diff --git a/Examples/Tests/btd_rz/analysis_BTD_laser_antenna.py b/Examples/Tests/btd_rz/analysis_BTD_laser_antenna.py index e2f174d6e29..e5d35b2ba2e 100755 --- a/Examples/Tests/btd_rz/analysis_BTD_laser_antenna.py +++ b/Examples/Tests/btd_rz/analysis_BTD_laser_antenna.py @@ -38,7 +38,7 @@ def fit_function(z, z0_phase): # The values must be consistent with the values provided in the simulation input t_current = 80e-15 # Time of the snapshot1 -c = 299792458; +c = 299792458 z0_antenna = -1.e-6 # position of laser lambda0 = 0.8e-6 # wavelength of the signal tau0 = 10e-15 # duration of the signal diff --git a/Examples/Tests/collision/analysis_collision_1d.py b/Examples/Tests/collision/analysis_collision_1d.py index 7775a476dae..c96677637e6 100755 --- a/Examples/Tests/collision/analysis_collision_1d.py +++ b/Examples/Tests/collision/analysis_collision_1d.py @@ -43,17 +43,17 @@ # Find the index 'Npmin' that separates macroparticles from group A and group B Np = len(sorted_wp) -wpmin = sorted_wp.min(); -wpmax = sorted_wp.max(); +wpmin = sorted_wp.min() +wpmax = sorted_wp.max() for i in range(len(sorted_wp)): if sorted_wp[i] > wpmin: Npmin = i break NpA = Npmin -wpA = wpmin; +wpA = wpmin NpB = Np - Npmin -wpB = wpmax; +wpB = wpmax NpAs = 0 NpAe = Npmin NpBs = Npmin @@ -61,10 +61,10 @@ ############# -sorted_px_sum = np.abs(sorted_px).sum(); -sorted_py_sum = np.abs(sorted_py).sum(); -sorted_pz_sum = np.abs(sorted_pz).sum(); -sorted_wp_sum = np.abs(sorted_wp).sum(); +sorted_px_sum = np.abs(sorted_px).sum() +sorted_py_sum = np.abs(sorted_py).sum() +sorted_pz_sum = np.abs(sorted_pz).sum() +sorted_wp_sum = np.abs(sorted_wp).sum() # compute mean velocities wAtot = wpA*NpA @@ -118,8 +118,8 @@ TApar_30ps_soln = 6.15e3 # TA parallel solution at t = 30 ps error = np.abs(TApar-TApar_30ps_soln)/TApar_30ps_soln tolerance = 0.02 -print('TApar at 30ps error = ', error); -print('tolerance = ', tolerance); +print('TApar at 30ps error = ', error) +print('tolerance = ', tolerance) assert error < tolerance test_name = os.path.split(os.getcwd())[1] diff --git a/Examples/Tests/electrostatic_sphere/analysis_electrostatic_sphere.py b/Examples/Tests/electrostatic_sphere/analysis_electrostatic_sphere.py index e63a3f02ba8..f25348f4c9c 100755 --- a/Examples/Tests/electrostatic_sphere/analysis_electrostatic_sphere.py +++ b/Examples/Tests/electrostatic_sphere/analysis_electrostatic_sphere.py @@ -84,14 +84,15 @@ # r(0) = r_0, r'(0) = 0, and a = q_e*q_tot/(4*pi*eps_0*m_e) # # The E was calculated at the end of the last time step -v_exact = lambda r: np.sqrt((q_e*q_tot)/(2*pi*m_e*eps_0)*(1/r_0-1/r)) -t_exact = lambda r: np.sqrt(r_0**3*2*pi*m_e*eps_0/(q_e*q_tot)) \ - * (np.sqrt(r/r_0-1)*np.sqrt(r/r_0) \ - + np.log(np.sqrt(r/r_0-1)+np.sqrt(r/r_0))) -func = lambda rho: t_exact(rho) - t_max #Objective function to find r(t_max) +def v_exact(r): + return np.sqrt(q_e * q_tot / (2 * pi * m_e * eps_0) * (1 / r_0 - 1 / r)) +def t_exact(r): + return np.sqrt(r_0 ** 3 * 2 * pi * m_e * eps_0 / (q_e * q_tot)) * (np.sqrt(r / r_0 - 1) * np.sqrt(r / r_0) + np.log(np.sqrt(r / r_0 - 1) + np.sqrt(r / r_0))) +def func(rho): + return t_exact(rho) - t_max #Objective function to find r(t_max) r_end = fsolve(func,r_0)[0] #Numerically solve for r(t_max) -E_exact = lambda r: np.sign(r)*(q_tot/(4*pi*eps_0*r**2)*(abs(r)>=r_end) \ - + q_tot*abs(r)/(4*pi*eps_0*r_end**3)*(abs(r)= r_end) + q_tot * abs(r) / (4 * pi * eps_0 * r_end ** 3) * (abs(r) < r_end)) # Load data pertaining to fields data = ds.covering_grid(level=0, diff --git a/Examples/Tests/laser_injection_from_file/analysis_from_RZ_file.py b/Examples/Tests/laser_injection_from_file/analysis_from_RZ_file.py index 44bbd00a7f4..87d0c6265db 100755 --- a/Examples/Tests/laser_injection_from_file/analysis_from_RZ_file.py +++ b/Examples/Tests/laser_injection_from_file/analysis_from_RZ_file.py @@ -148,7 +148,7 @@ def launch_analysis(executable): def main() : from lasy.laser import Laser - from lasy.profiles import CombinedLongitudinalTransverseProfile, GaussianProfile + from lasy.profiles import CombinedLongitudinalTransverseProfile from lasy.profiles.longitudinal import GaussianLongitudinalProfile from lasy.profiles.transverse import LaguerreGaussianTransverseProfile diff --git a/Examples/Tests/nci_fdtd_stability/analysis_ncicorr.py b/Examples/Tests/nci_fdtd_stability/analysis_ncicorr.py index 118b03fed89..8f3d8d5acd1 100755 --- a/Examples/Tests/nci_fdtd_stability/analysis_ncicorr.py +++ b/Examples/Tests/nci_fdtd_stability/analysis_ncicorr.py @@ -22,7 +22,7 @@ import checksumAPI fn = sys.argv[1] -use_MR = re.search( 'nci_correctorMR', fn ) != None +use_MR = re.search( 'nci_correctorMR', fn ) is not None if use_MR: energy_corrector_off = 5.e32 diff --git a/Examples/Tests/nuclear_fusion/analysis_two_product_fusion.py b/Examples/Tests/nuclear_fusion/analysis_two_product_fusion.py index ad8b7f70e10..d0190e6a330 100755 --- a/Examples/Tests/nuclear_fusion/analysis_two_product_fusion.py +++ b/Examples/Tests/nuclear_fusion/analysis_two_product_fusion.py @@ -243,30 +243,32 @@ def cross_section( E_keV ): ## Returns cross section in b, using the analytical fits given ## in H.-S. Bosch and G.M. Hale 1992 Nucl. Fusion 32 611 joule_to_keV = 1.e-3/scc.e - B_G = scc.pi * scc.alpha * np.sqrt( 2.*m_reduced * scc.c**2 * joule_to_keV ); + B_G = scc.pi * scc.alpha * np.sqrt( 2.*m_reduced * scc.c**2 * joule_to_keV ) if reaction_type == 'DT': - A1 = 6.927e4; - A2 = 7.454e8; - A3 = 2.050e6; - A4 = 5.2002e4; - A5 = 0; - B1 = 6.38e1; - B2 = -9.95e-1; - B3 = 6.981e-5; - B4 = 1.728e-4; + A1 = 6.927e4 + A2 = 7.454e8 + A3 = 2.050e6 + A4 = 5.2002e4 + A5 = 0 + B1 = 6.38e1 + B2 = -9.95e-1 + B3 = 6.981e-5 + B4 = 1.728e-4 elif reaction_type == 'DD': - A1 = 5.3701e4; - A2 = 3.3027e2; - A3 = -1.2706e-1; - A4 = 2.9327e-5; - A5 = -2.5151e-9; - B1 = 0; - B2 = 0; - B3 = 0; - B4 = 0; - - astrophysical_factor = (A1 + E_keV*(A2 + E_keV*(A3 + E_keV*(A4 + E_keV*A5)))) / (1 + E_keV*(B1 + E_keV*(B2 + E_keV*(B3 + E_keV*B4)))); - millibarn_to_barn = 1.e-3; + A1 = 5.3701e4 + A2 = 3.3027e2 + A3 = -1.2706e-1 + A4 = 2.9327e-5 + A5 = -2.5151e-9 + B1 = 0 + B2 = 0 + B3 = 0 + B4 = 0 + else: + raise RuntimeError(f"Reaction type '{reaction_type}' not implemented.") + + astrophysical_factor = (A1 + E_keV*(A2 + E_keV*(A3 + E_keV*(A4 + E_keV*A5)))) / (1 + E_keV*(B1 + E_keV*(B2 + E_keV*(B3 + E_keV*B4)))) + millibarn_to_barn = 1.e-3 return millibarn_to_barn * astrophysical_factor/E_keV * np.exp(-B_G/np.sqrt(E_keV)) def E_com_to_p_sq_com(m1, m2, E): @@ -405,7 +407,7 @@ def main(): ds_start = yt.load(filename_start) ad_end = ds_end.all_data() ad_start = ds_start.all_data() - dt = float(ds_end.current_time - ds_start.current_time) + dt = float(ds_end.current_time - ds_start.current_time) # noqa field_data_end = ds_end.covering_grid(level=0, left_edge=ds_end.domain_left_edge, dims=ds_end.domain_dimensions) field_data_start = ds_start.covering_grid(level=0, left_edge=ds_start.domain_left_edge, diff --git a/Examples/Tests/ohm_solver_EM_modes/PICMI_inputs.py b/Examples/Tests/ohm_solver_EM_modes/PICMI_inputs.py index ec67282fd88..a14787fc8e5 100644 --- a/Examples/Tests/ohm_solver_EM_modes/PICMI_inputs.py +++ b/Examples/Tests/ohm_solver_EM_modes/PICMI_inputs.py @@ -93,7 +93,7 @@ def __init__(self, test, dim, B_dir, verbose): # dump all the current attributes to a dill pickle file if comm.rank == 0: - with open(f'sim_parameters.dpkl', 'wb') as f: + with open('sim_parameters.dpkl', 'wb') as f: dill.dump(self, f) # print out plasma parameters diff --git a/Examples/Tests/ohm_solver_EM_modes/PICMI_inputs_rz.py b/Examples/Tests/ohm_solver_EM_modes/PICMI_inputs_rz.py index 5bd1e3518f9..9d5cc8fe977 100644 --- a/Examples/Tests/ohm_solver_EM_modes/PICMI_inputs_rz.py +++ b/Examples/Tests/ohm_solver_EM_modes/PICMI_inputs_rz.py @@ -82,7 +82,7 @@ def __init__(self, test, verbose): # dump all the current attributes to a dill pickle file if comm.rank == 0: - with open(f'sim_parameters.dpkl', 'wb') as f: + with open('sim_parameters.dpkl', 'wb') as f: dill.dump(self, f) # print out plasma parameters diff --git a/Examples/Tests/ohm_solver_EM_modes/analysis.py b/Examples/Tests/ohm_solver_EM_modes/analysis.py index ad832dc2f50..f2f71cd53ed 100755 --- a/Examples/Tests/ohm_solver_EM_modes/analysis.py +++ b/Examples/Tests/ohm_solver_EM_modes/analysis.py @@ -14,7 +14,7 @@ matplotlib.rcParams.update({'font.size': 20}) # load simulation parameters -with open(f'sim_parameters.dpkl', 'rb') as f: +with open('sim_parameters.dpkl', 'rb') as f: sim = dill.load(f) if sim.B_dir == 'z': diff --git a/Examples/Tests/ohm_solver_EM_modes/analysis_rz.py b/Examples/Tests/ohm_solver_EM_modes/analysis_rz.py index 58e615c5332..f96dd590eee 100755 --- a/Examples/Tests/ohm_solver_EM_modes/analysis_rz.py +++ b/Examples/Tests/ohm_solver_EM_modes/analysis_rz.py @@ -16,7 +16,7 @@ constants = picmi.constants # load simulation parameters -with open(f'sim_parameters.dpkl', 'rb') as f: +with open('sim_parameters.dpkl', 'rb') as f: sim = dill.load(f) diag_dir = "diags/field_diags" @@ -124,8 +124,8 @@ def process(it): omega_fast = sim.vA * np.sqrt(R2 + np.sqrt(R2**2 - P4)) omega_slow = sim.vA * np.sqrt(R2 - np.sqrt(R2**2 - P4)) # Upper right corner - ax.plot(k*sim.l_i, omega_fast/sim.w_ci, 'w--', label = f"$\omega_{{fast}}$") - ax.plot(k*sim.l_i, omega_slow/sim.w_ci, color='white', linestyle='--', label = f"$\omega_{{slow}}$") + ax.plot(k*sim.l_i, omega_fast/sim.w_ci, 'w--', label = "$\omega_{fast}$") + ax.plot(k*sim.l_i, omega_slow/sim.w_ci, color='white', linestyle='--', label = "$\omega_{slow}$") # Thermal resonance thermal_res = sim.w_ci + 3*sim.v_ti*k ax.plot(k*sim.l_i, thermal_res/sim.w_ci, color='magenta', linestyle='--', label = "$\omega = \Omega_i + 3v_{th,i}k$") diff --git a/Examples/Tests/ohm_solver_ion_Landau_damping/analysis.py b/Examples/Tests/ohm_solver_ion_Landau_damping/analysis.py index ade7187ebe3..68e231c6c55 100755 --- a/Examples/Tests/ohm_solver_ion_Landau_damping/analysis.py +++ b/Examples/Tests/ohm_solver_ion_Landau_damping/analysis.py @@ -14,7 +14,7 @@ matplotlib.rcParams.update({'font.size': 20}) # load simulation parameters -with open(f'sim_parameters.dpkl', 'rb') as f: +with open('sim_parameters.dpkl', 'rb') as f: sim = dill.load(f) # theoretical damping rates were taken from Fig. 14b of Munoz et al. diff --git a/Examples/Tests/ohm_solver_ion_beam_instability/analysis.py b/Examples/Tests/ohm_solver_ion_beam_instability/analysis.py index 6a57b1c1046..7fd6746eafe 100755 --- a/Examples/Tests/ohm_solver_ion_beam_instability/analysis.py +++ b/Examples/Tests/ohm_solver_ion_beam_instability/analysis.py @@ -15,7 +15,7 @@ matplotlib.rcParams.update({'font.size': 20}) # load simulation parameters -with open(f'sim_parameters.dpkl', 'rb') as f: +with open('sim_parameters.dpkl', 'rb') as f: sim = dill.load(f) if sim.resonant: diff --git a/Examples/Tests/ohm_solver_magnetic_reconnection/PICMI_inputs.py b/Examples/Tests/ohm_solver_magnetic_reconnection/PICMI_inputs.py index 56173893b7f..556eb252856 100644 --- a/Examples/Tests/ohm_solver_magnetic_reconnection/PICMI_inputs.py +++ b/Examples/Tests/ohm_solver_magnetic_reconnection/PICMI_inputs.py @@ -102,7 +102,7 @@ def __init__(self, test, verbose): # dump all the current attributes to a dill pickle file if comm.rank == 0: - with open(f'sim_parameters.dpkl', 'wb') as f: + with open('sim_parameters.dpkl', 'wb') as f: dill.dump(self, f) # print out plasma parameters diff --git a/Examples/Tests/ohm_solver_magnetic_reconnection/analysis.py b/Examples/Tests/ohm_solver_magnetic_reconnection/analysis.py index 0fb1c05ae1a..23fc3ae2809 100755 --- a/Examples/Tests/ohm_solver_magnetic_reconnection/analysis.py +++ b/Examples/Tests/ohm_solver_magnetic_reconnection/analysis.py @@ -12,7 +12,7 @@ plt.rcParams.update({'font.size': 20}) # load simulation parameters -with open(f'sim_parameters.dpkl', 'rb') as f: +with open('sim_parameters.dpkl', 'rb') as f: sim = dill.load(f) x_idx = 2 @@ -20,7 +20,7 @@ Ey_idx = 6 Bx_idx = 8 -plane_data = np.loadtxt(f'diags/plane.dat', skiprows=1) +plane_data = np.loadtxt('diags/plane.dat', skiprows=1) steps = np.unique(plane_data[:,0]) num_steps = len(steps) diff --git a/Examples/Tests/photon_pusher/analysis_photon_pusher.py b/Examples/Tests/photon_pusher/analysis_photon_pusher.py index 7518bd5adb0..72074d75ccb 100755 --- a/Examples/Tests/photon_pusher/analysis_photon_pusher.py +++ b/Examples/Tests/photon_pusher/analysis_photon_pusher.py @@ -57,8 +57,8 @@ #________________________________________ #Tolerance -tol_pos = 1.0e-14; -tol_mom = 0.0; #momentum should be conserved exactly +tol_pos = 1.0e-14 +tol_mom = 0.0 #momentum should be conserved exactly #________________________________________ #Input filename @@ -152,7 +152,7 @@ def generate(): f.write("{}.single_particle_u = {} {} {}\n". format(name, velx, vely, velz)) f.write("{}.single_particle_weight = 1.0\n".format(name)) - f.write("\n".format(name)) + f.write("\n") def main(): if (len(sys.argv) < 2): diff --git a/Examples/Tests/qed/breit_wheeler/analysis_core.py b/Examples/Tests/qed/breit_wheeler/analysis_core.py index 61d928bf2cc..9d961fe5732 100755 --- a/Examples/Tests/qed/breit_wheeler/analysis_core.py +++ b/Examples/Tests/qed/breit_wheeler/analysis_core.py @@ -96,7 +96,7 @@ def BW_inner(x): def BW_X(chi_phot, chi_ele): div = (chi_ele*(chi_phot-chi_ele)) - div = np.where(np.logical_and(chi_phot > chi_ele, chi_ele != 0), div, 1.0); + div = np.where(np.logical_and(chi_phot > chi_ele, chi_ele != 0), div, 1.0) res = np.where(np.logical_and(chi_phot > chi_ele, chi_ele != 0), np.power(chi_phot/div, 2./3.), np.inf) return res diff --git a/Examples/Tests/qed/quantum_synchrotron/analysis.py b/Examples/Tests/qed/quantum_synchrotron/analysis.py index ee6139f75c8..b1986930f36 100755 --- a/Examples/Tests/qed/quantum_synchrotron/analysis.py +++ b/Examples/Tests/qed/quantum_synchrotron/analysis.py @@ -93,7 +93,8 @@ def calc_chi_part(p, E, B): #Auxiliary functions @np.vectorize def IC_inner_alternative(y): - ff = lambda x : np.exp(-y*(1+(4*x**2)/3)*np.sqrt(1+x*x/3))*(9+36*x**2 + 16*x**4)/(3 + 4*x**2)/np.sqrt(1+(x**2)/3) + def ff(x): + return np.exp(-y * (1 + 4 * x ** 2 / 3) * np.sqrt(1 + x * x / 3)) * (9 + 36 * x ** 2 + 16 * x ** 4) / (3 + 4 * x ** 2) / np.sqrt(1 + x ** 2 / 3) # This integration may not converge in some cases, in which case a python warning message can # be issued. This is probably not a significant issue for this test case and these warnings can # be ignored. diff --git a/Examples/Tests/radiation_reaction/test_const_B_analytical/analysis_classicalRR.py b/Examples/Tests/radiation_reaction/test_const_B_analytical/analysis_classicalRR.py index e123562fbe1..93e814759c0 100755 --- a/Examples/Tests/radiation_reaction/test_const_B_analytical/analysis_classicalRR.py +++ b/Examples/Tests/radiation_reaction/test_const_B_analytical/analysis_classicalRR.py @@ -140,7 +140,6 @@ def check(): for sp in spec_names]) for cc in zip(cases, res_mom): - init_gamma = gamma(cc[0].init_mom) end_gamma = gamma(cc[1]/m_e/c) exp_gamma = exp_res(cc[0], sim_time) diff --git a/Examples/Tests/restart/PICMI_inputs_id_cpu_read.py b/Examples/Tests/restart/PICMI_inputs_id_cpu_read.py index d400924a378..88053fd7f5a 100755 --- a/Examples/Tests/restart/PICMI_inputs_id_cpu_read.py +++ b/Examples/Tests/restart/PICMI_inputs_id_cpu_read.py @@ -66,7 +66,7 @@ name = 'diag1', period = 10, write_dir = '.', - warpx_file_prefix = f'Python_restart_runtime_components_plt' + warpx_file_prefix = 'Python_restart_runtime_components_plt' ) field_diag = picmi.FieldDiagnostic( name = 'diag1', @@ -74,7 +74,7 @@ period = 10, data_list = ['phi'], write_dir = '.', - warpx_file_prefix = f'Python_restart_runtime_components_plt' + warpx_file_prefix = 'Python_restart_runtime_components_plt' ) checkpoint = picmi.Checkpoint( @@ -82,7 +82,7 @@ period = 5, write_dir = '.', warpx_file_min_digits = 5, - warpx_file_prefix = f'Python_restart_runtime_components_chk' + warpx_file_prefix = 'Python_restart_runtime_components_chk' ) ########################## diff --git a/Examples/Tests/restart/PICMI_inputs_runtime_component_analyze.py b/Examples/Tests/restart/PICMI_inputs_runtime_component_analyze.py index 32c9f4e5808..b6e28076cbd 100755 --- a/Examples/Tests/restart/PICMI_inputs_runtime_component_analyze.py +++ b/Examples/Tests/restart/PICMI_inputs_runtime_component_analyze.py @@ -67,7 +67,7 @@ name = 'diag1', period = 10, write_dir = '.', - warpx_file_prefix = f'Python_restart_runtime_components_plt' + warpx_file_prefix = 'Python_restart_runtime_components_plt' ) field_diag = picmi.FieldDiagnostic( name = 'diag1', @@ -75,7 +75,7 @@ period = 10, data_list = ['phi'], write_dir = '.', - warpx_file_prefix = f'Python_restart_runtime_components_plt' + warpx_file_prefix = 'Python_restart_runtime_components_plt' ) checkpoint = picmi.Checkpoint( @@ -83,7 +83,7 @@ period = 5, write_dir = '.', warpx_file_min_digits = 5, - warpx_file_prefix = f'Python_restart_runtime_components_chk' + warpx_file_prefix = 'Python_restart_runtime_components_chk' ) ########################## diff --git a/Examples/Tests/restart_eb/PICMI_inputs_restart_eb.py b/Examples/Tests/restart_eb/PICMI_inputs_restart_eb.py index a4727053334..8457d6e051a 100755 --- a/Examples/Tests/restart_eb/PICMI_inputs_restart_eb.py +++ b/Examples/Tests/restart_eb/PICMI_inputs_restart_eb.py @@ -93,7 +93,7 @@ period = diagnostic_intervals, write_dir = '.', warpx_file_min_digits = 5, - warpx_file_prefix = f'Python_restart_eb_chk' + warpx_file_prefix = 'Python_restart_eb_chk' ) ########################## diff --git a/Python/pywarpx/__init__.py b/Python/pywarpx/__init__.py index 037598f4ed4..7531f764a48 100644 --- a/Python/pywarpx/__init__.py +++ b/Python/pywarpx/__init__.py @@ -23,26 +23,26 @@ if os.path.exists(p_abs): os.add_dll_directory(p_abs) -from ._libwarpx import libwarpx -from .Algo import algo -from .Amr import amr -from .Amrex import amrex -from .Boundary import boundary -from .Collisions import collisions -from .Constants import my_constants -from .Diagnostics import diagnostics, reduced_diagnostics -from .EB2 import eb2 -from .Geometry import geometry -from .HybridPICModel import hybridpicmodel -from .Interpolation import interpolation -from .Lasers import lasers -from .LoadThirdParty import load_cupy -from .Particles import newspecies, particles -from .PSATD import psatd -from .WarpX import warpx +from ._libwarpx import libwarpx # noqa +from .Algo import algo # noqa +from .Amr import amr # noqa +from .Amrex import amrex # noqa +from .Boundary import boundary # noqa +from .Collisions import collisions # noqa +from .Constants import my_constants # noqa +from .Diagnostics import diagnostics, reduced_diagnostics # noqa +from .EB2 import eb2 # noqa +from .Geometry import geometry # noqa +from .HybridPICModel import hybridpicmodel # noqa +from .Interpolation import interpolation # noqa +from .Lasers import lasers # noqa +from .LoadThirdParty import load_cupy # noqa +from .Particles import newspecies, particles # noqa +from .PSATD import psatd # noqa +from .WarpX import warpx # noqa # This is a circular import and must happen after the import of libwarpx -from . import picmi # isort:skip +from . import picmi # noqa # isort:skip # intentionally query the value - only set once sim dimension is known def __getattr__(name): diff --git a/Python/pywarpx/_libwarpx.py b/Python/pywarpx/_libwarpx.py index 1eb410e65b8..6e347ff5fd7 100755 --- a/Python/pywarpx/_libwarpx.py +++ b/Python/pywarpx/_libwarpx.py @@ -15,8 +15,6 @@ import atexit import os -import numpy as np - from .Geometry import geometry diff --git a/Python/pywarpx/fields.py b/Python/pywarpx/fields.py index 0f680595ef4..4c2c26dcb38 100644 --- a/Python/pywarpx/fields.py +++ b/Python/pywarpx/fields.py @@ -141,16 +141,12 @@ def mesh(self, direction): try: if libwarpx.geometry_dim == '3d': idir = ['x', 'y', 'z'].index(direction) - celldir = idir elif libwarpx.geometry_dim == '2d': idir = ['x', 'z'].index(direction) - celldir = 2*idir elif libwarpx.geometry_dim == 'rz': idir = ['r', 'z'].index(direction) - celldir = 2*idir elif libwarpx.geometry_dim == '1d': idir = ['z'].index(direction) - celldir = idir except ValueError: raise Exception('Inappropriate direction given') @@ -511,10 +507,14 @@ def __setitem__(self, index, value): global_shape = list(value3d.shape) # The shape of 1 is added for the extra dimensions and when index is an integer # (in which case the dimension was not in the input array). - if not isinstance(ii[0], slice): global_shape[0:0] = [1] - if not isinstance(ii[1], slice): global_shape[1:1] = [1] - if not isinstance(ii[2], slice): global_shape[2:2] = [1] - if not isinstance(ic , slice) or len(global_shape) < 4: global_shape[3:3] = [1] + if not isinstance(ii[0], slice): + global_shape[0:0] = [1] + if not isinstance(ii[1], slice): + global_shape[1:1] = [1] + if not isinstance(ii[2], slice): + global_shape[2:2] = [1] + if not isinstance(ic , slice) or len(global_shape) < 4: + global_shape[3:3] = [1] value3d.shape = global_shape if libwarpx.libwarpx_so.Config.have_gpu: diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index a7ef3470c52..4f3993c911c 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -1667,11 +1667,11 @@ def applied_field_initialize_inputs(self): class AnalyticInitialField(picmistandard.PICMI_AnalyticAppliedField): def init(self, kw): self.mangle_dict = None - self.maxlevel_extEMfield_init = kw.pop('warpx_maxlevel_extEMfield_init', None); + self.maxlevel_extEMfield_init = kw.pop('warpx_maxlevel_extEMfield_init', None) def applied_field_initialize_inputs(self): # Note that lower and upper_bound are not used by WarpX - pywarpx.warpx.maxlevel_extEMfield_init = self.maxlevel_extEMfield_init; + pywarpx.warpx.maxlevel_extEMfield_init = self.maxlevel_extEMfield_init if self.mangle_dict is None: # Only do this once so that the same variables are used in this distribution diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index 8e08344f873..b035ddb9a2a 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -2141,90 +2141,6 @@ useOMP = 1 numthreads = 1 analysisRoutine = Examples/analysis_default_regression.py -[Performance_works_1_uniform_rest_32ppc] -buildDir = . -inputFile = Examples/Tests/performance_tests/automated_test_1_uniform_rest_32ppc -runtime_params = amr.max_grid_size=32 amr.n_cell=32 32 32 max_step=5 diagnostics.diags_names=diag1 diag1.intervals=0 diag1.diag_type=Full -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = - -[Performance_works_2_uniform_rest_1ppc] -buildDir = . -inputFile = Examples/Tests/performance_tests/automated_test_2_uniform_rest_1ppc -runtime_params = amr.max_grid_size=32 amr.n_cell=32 32 32 max_step=5 diagnostics.diags_names=diag1 diag1.intervals=0 diag1.diag_type=Full -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = - -[Performance_works_3_uniform_drift_4ppc] -buildDir = . -inputFile = Examples/Tests/performance_tests/automated_test_3_uniform_drift_4ppc -runtime_params = amr.max_grid_size=32 amr.n_cell=32 32 32 max_step=5 diagnostics.diags_names=diag1 diag1.intervals=0 diag1.diag_type=Full -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = - -[Performance_works_4_labdiags_2ppc] -buildDir = . -inputFile = Examples/Tests/performance_tests/automated_test_4_labdiags_2ppc -runtime_params = amr.n_cell=64 64 64 max_step=10 diagnostics.diags_names=diag1 diag1.intervals=0 diag1.diag_type=Full -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = - -[Performance_works_5_loadimbalance] -buildDir = . -inputFile = Examples/Tests/performance_tests/automated_test_5_loadimbalance -runtime_params = amr.max_grid_size=32 amr.n_cell=32 32 32 max_step=5 diagnostics.diags_names=diag1 diag1.intervals=0 diag1.diag_type=Full -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = - -[Performance_works_6_output_2ppc] -buildDir = . -inputFile = Examples/Tests/performance_tests/automated_test_6_output_2ppc -runtime_params = amr.n_cell=64 64 64 max_step=10 -dim = 3 -addToCompileString = -cmakeSetupOpts = -DWarpX_DIMS=3 -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -analysisRoutine = - [photon_pusher] buildDir = . inputFile = Examples/Tests/photon_pusher/inputs_3d diff --git a/Regression/prepare_file_ci.py b/Regression/prepare_file_ci.py index f7b610dace9..16f78e074ea 100644 --- a/Regression/prepare_file_ci.py +++ b/Regression/prepare_file_ci.py @@ -106,10 +106,10 @@ def select_tests(blocks, match_string_list, do_test): """Remove or keep tests from list in WarpX-tests.ini according to do_test variable""" if do_test not in [True, False]: raise ValueError("do_test must be True or False") - if (do_test == False): + if (do_test is False): for match_string in match_string_list: print('Selecting tests without ' + match_string) - blocks = [ block for block in blocks if not match_string in block ] + blocks = [ block for block in blocks if match_string not in block ] else: for match_string in match_string_list: print('Selecting tests with ' + match_string) diff --git a/Tools/Algorithms/psatd.ipynb b/Tools/Algorithms/psatd.ipynb index c2f326e4110..9fb0f62c18d 100644 --- a/Tools/Algorithms/psatd.ipynb +++ b/Tools/Algorithms/psatd.ipynb @@ -555,7 +555,7 @@ " rhs = rhs.simplify()\n", " diff = lhs - rhs\n", " diff = diff.simplify()\n", - " assert (diff == 0), f'Integration of linear system of ODEs failed'" + " assert (diff == 0), 'Integration of linear system of ODEs failed'" ] }, { diff --git a/Tools/Algorithms/psatd_pml.ipynb b/Tools/Algorithms/psatd_pml.ipynb index 897ffa70b9e..db4a22c9006 100644 --- a/Tools/Algorithms/psatd_pml.ipynb +++ b/Tools/Algorithms/psatd_pml.ipynb @@ -8,7 +8,6 @@ }, "outputs": [], "source": [ - "import inspect\n", "import sympy as sp\n", "from sympy import *\n", "from sympy.solvers.solveset import linsolve\n", @@ -558,7 +557,6 @@ "outputs": [], "source": [ "# Code generation\n", - "from sympy.codegen.ast import Assignment\n", "\n", "# 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11\n", "# EG: Exx, Exy, Exz, Eyx, Eyy, Eyz, Ezx, Ezy, Ezz, Gx, Gy, Gz\n", diff --git a/Tools/Ascent/ascent_replay_warpx.ipynb b/Tools/Ascent/ascent_replay_warpx.ipynb index 636e9721c74..dab791f0af9 100644 --- a/Tools/Ascent/ascent_replay_warpx.ipynb +++ b/Tools/Ascent/ascent_replay_warpx.ipynb @@ -19,7 +19,6 @@ "import conduit.relay\n", "import ascent\n", "\n", - "from IPython.display import Image\n", "\n", "import glob" ] diff --git a/Tools/DevUtils/compare_wx_w_3d.ipynb b/Tools/DevUtils/compare_wx_w_3d.ipynb index d4b0520dd36..34e6748d1c6 100644 --- a/Tools/DevUtils/compare_wx_w_3d.ipynb +++ b/Tools/DevUtils/compare_wx_w_3d.ipynb @@ -23,15 +23,10 @@ "outputs": [], "source": [ "# Import statements\n", - "import sys\n", - "from tqdm import tqdm\n", - "import yt, glob\n", + "import yt\n", + "import glob\n", "yt.funcs.mylog.setLevel(50)\n", - "from IPython.display import clear_output\n", - "import numpy as np\n", "from ipywidgets import interact, RadioButtons, IntSlider\n", - "from openpmd_viewer import OpenPMDTimeSeries\n", - "from yt.units import volt\n", "import matplotlib.pyplot as plt\n", "%matplotlib" ] diff --git a/Tools/DevUtils/update_benchmarks_from_azure_output.py b/Tools/DevUtils/update_benchmarks_from_azure_output.py index ec7b17d1050..ec344988b81 100644 --- a/Tools/DevUtils/update_benchmarks_from_azure_output.py +++ b/Tools/DevUtils/update_benchmarks_from_azure_output.py @@ -46,7 +46,7 @@ # "----------------" # which indicates that we have read the new file entirely - if not closing_string in line: + if closing_string not in line: if not first_line_read: # Raw Azure output comes with a prefix at the beginning of each line that we do # not need here. The first line that we will read is the prefix followed by the diff --git a/Tools/PerformanceTests/automated_test_1_uniform_rest_32ppc b/Tools/PerformanceTests/automated_test_1_uniform_rest_32ppc deleted file mode 100644 index e5e722aa761..00000000000 --- a/Tools/PerformanceTests/automated_test_1_uniform_rest_32ppc +++ /dev/null @@ -1,58 +0,0 @@ -# Maximum number of time steps: command-line argument -# number of grid points: command-line argument - -# Maximum allowable size of each subdomain in the problem domain; -# this is used to decompose the domain for parallel calculations. -amr.max_grid_size = 64 - -# Maximum level in hierarchy (for now must be 0, i.e., one level in total) -amr.max_level = 0 - -# Geometry -geometry.dims = 3 -geometry.prob_lo = -20.e-6 -20.e-6 -20.e-6 # physical domain -geometry.prob_hi = 20.e-6 20.e-6 20.e-6 - -# Boundaries -boundary.field_lo = pec pec periodic -boundary.field_hi = pec pec periodic -boundary.particle_lo = absorbing absorbing periodic -boundary.particle_hi = absorbing absorbing periodic - -# Verbosity -warpx.verbose = 1 - -algo.particle_shape = 3 - -# CFL -warpx.cfl = 1.0 - -particles.species_names = electrons ions - -electrons.charge = -q_e -electrons.mass = m_e -electrons.injection_style = "NUniformPerCell" -electrons.num_particles_per_cell_each_dim = 2 2 4 -electrons.profile = constant -electrons.density = 1.e20 # number of electrons per m^3 -electrons.momentum_distribution_type = "gaussian" -electrons.ux_th = 0.01 -electrons.uy_th = 0.01 -electrons.uz_th = 0.01 -electrons.ux_m = 0. -electrons.uy_m = 0. -electrons.uz_m = 0. - -ions.charge = q_e -ions.mass = m_p -ions.injection_style = "NUniformPerCell" -ions.num_particles_per_cell_each_dim = 2 2 4 -ions.profile = constant -ions.density = 1.e20 # number of electrons per m^3 -ions.momentum_distribution_type = "gaussian" -ions.ux_th = 0.01 -ions.uy_th = 0.01 -ions.uz_th = 0.01 -ions.ux_m = 0. -ions.uy_m = 0. -ions.uz_m = 0. diff --git a/Tools/PerformanceTests/automated_test_2_uniform_rest_1ppc b/Tools/PerformanceTests/automated_test_2_uniform_rest_1ppc deleted file mode 100644 index 610990ac140..00000000000 --- a/Tools/PerformanceTests/automated_test_2_uniform_rest_1ppc +++ /dev/null @@ -1,44 +0,0 @@ -# Maximum number of time steps: command-line argument -# number of grid points: command-line argument - -# Maximum allowable size of each subdomain in the problem domain; -# this is used to decompose the domain for parallel calculations. -amr.max_grid_size = 32 - -# Maximum level in hierarchy (for now must be 0, i.e., one level in total) -amr.max_level = 0 - -# Geometry -geometry.dims = 3 -geometry.prob_lo = -20.e-6 -20.e-6 -20.e-6 # physical domain -geometry.prob_hi = 20.e-6 20.e-6 20.e-6 - -# Boundaries -boundary.field_lo = pec pec pec -boundary.field_hi = pec pec pec -boundary.particle_lo = absorbing absorbing absorbing -boundary.particle_hi = absorbing absorbing absorbing - -# Verbosity -warpx.verbose = 1 - -algo.particle_shape = 3 - -# CFL -warpx.cfl = 1.0 - -particles.species_names = electrons - -electrons.charge = -q_e -electrons.mass = m_e -electrons.injection_style = "NUniformPerCell" -electrons.num_particles_per_cell_each_dim = 1 1 1 -electrons.profile = constant -electrons.density = 1.e20 # number of electrons per m^3 -electrons.momentum_distribution_type = "gaussian" -electrons.ux_th = 0.01 -electrons.uy_th = 0.01 -electrons.uz_th = 0.01 -electrons.ux_m = 0. -electrons.uy_m = 0. -electrons.uz_m = 0. diff --git a/Tools/PerformanceTests/automated_test_3_uniform_drift_4ppc b/Tools/PerformanceTests/automated_test_3_uniform_drift_4ppc deleted file mode 100644 index 5a834cb4117..00000000000 --- a/Tools/PerformanceTests/automated_test_3_uniform_drift_4ppc +++ /dev/null @@ -1,59 +0,0 @@ -# Maximum number of time steps: command-line argument -# number of grid points: command-line argument - -# Maximum allowable size of each subdomain in the problem domain; -# this is used to decompose the domain for parallel calculations. -amr.max_grid_size = 64 - -# Maximum level in hierarchy (for now must be 0, i.e., one level in total) -amr.max_level = 0 - -# Geometry -geometry.dims = 3 -geometry.prob_lo = -20.e-6 -20.e-6 -20.e-6 # physical domain -geometry.prob_hi = 20.e-6 20.e-6 20.e-6 - -# Boundaries -boundary.field_lo = pec pec periodic -boundary.field_hi = pec pec periodic -boundary.particle_lo = absorbing absorbing periodic -boundary.particle_hi = absorbing absorbing periodic - -# Verbosity -warpx.verbose = 1 - -# Algorithms -algo.particle_shape = 3 - -# CFL -warpx.cfl = 1.0 - -particles.species_names = electrons ions - -electrons.charge = -q_e -electrons.mass = m_e -electrons.injection_style = "NUniformPerCell" -electrons.num_particles_per_cell_each_dim = 2 2 4 -electrons.profile = constant -electrons.density = 1.e20 # number of electrons per m^3 -electrons.momentum_distribution_type = "gaussian" -electrons.ux_th = 0.01 -electrons.uy_th = 0.01 -electrons.uz_th = 0.01 -electrons.ux_m = 0. -electrons.uy_m = 0. -electrons.uz_m = 100. - -ions.charge = q_e -ions.mass = m_p -ions.injection_style = "NUniformPerCell" -ions.num_particles_per_cell_each_dim = 2 2 4 -ions.profile = constant -ions.density = 1.e20 # number of electrons per m^3 -ions.momentum_distribution_type = "gaussian" -ions.ux_th = 0.01 -ions.uy_th = 0.01 -ions.uz_th = 0.01 -ions.ux_m = 0. -ions.uy_m = 0. -ions.uz_m = 100. diff --git a/Tools/PerformanceTests/automated_test_4_labdiags_2ppc b/Tools/PerformanceTests/automated_test_4_labdiags_2ppc deleted file mode 100644 index f49d92acf26..00000000000 --- a/Tools/PerformanceTests/automated_test_4_labdiags_2ppc +++ /dev/null @@ -1,79 +0,0 @@ -# Maximum number of time steps: command-line argument -# number of grid points: command-line argument - -amr.max_grid_size = 64 - -# Maximum level in hierarchy (for now must be 0, i.e., one level in total) -amr.max_level = 0 - -# Geometry -geometry.dims = 3 -geometry.prob_lo = -150.e-6 -150.e-6 -80.e-6 # physical domain -geometry.prob_hi = 150.e-6 150.e-6 0. - -# Boundaries -boundary.field_lo = pec pec pec -boundary.field_hi = pec pec pec -boundary.particle_lo = absorbing absorbing absorbing -boundary.particle_hi = absorbing absorbing absorbing - -# Verbosity -warpx.verbose = 1 - -# Numerics -algo.particle_shape = 3 -warpx.use_filter = 1 -warpx.cfl = 1.0 - -# Moving window -warpx.do_moving_window = 1 -warpx.moving_window_dir = z -warpx.moving_window_v = 1.0 # in units of the speed of light - -# Boosted frame -warpx.gamma_boost = 15. -warpx.boost_direction = z - -# Species -particles.species_names = electrons ions - -electrons.charge = -q_e -electrons.mass = m_e -electrons.injection_style = "NUniformPerCell" -electrons.xmin = -150.e-6 -electrons.xmax = 150.e-6 -electrons.ymin = -150.e-6 -electrons.ymax = 150.e-6 -electrons.zmin = 0.e-6 -electrons.num_particles_per_cell_each_dim = 1 1 1 -electrons.profile = constant -electrons.density = 1. -electrons.momentum_distribution_type = "at_rest" -electrons.do_continuous_injection = 1 - -ions.charge = q_e -ions.mass = m_p -ions.injection_style = "NUniformPerCell" -ions.xmin = -150.e-6 -ions.xmax = 150.e-6 -ions.ymin = -150.e-6 -ions.ymax = 150.e-6 -ions.zmin = 0.e-6 -ions.num_particles_per_cell_each_dim = 1 1 1 -ions.profile = constant -ions.density = 1. -ions.momentum_distribution_type = "at_rest" -ions.do_continuous_injection = 1 - -# Laser -lasers.names = laser -laser.profile = Gaussian -laser.position = 0. 0. -1.e-6 # This point is on the laser plane -laser.direction = 0. 0. 1. # The plane normal direction -laser.polarization = 1. 0. 0. # The main polarization vector -laser.e_max = 8.e12 # Maximum amplitude of the laser field (in V/m) -laser.profile_waist = 5.e-5 # The waist of the laser (in meters) -laser.profile_duration = 16.7e-15 # The duration of the laser (in seconds) -laser.profile_t_peak = 33.4e-15 # The time at which the laser reaches its peak (in seconds) -laser.profile_focal_distance = 0.e-6 # Focal distance from the antenna (in meters) -laser.wavelength = 0.8e-6 # The wavelength of the laser (in meters) diff --git a/Tools/PerformanceTests/automated_test_5_loadimbalance b/Tools/PerformanceTests/automated_test_5_loadimbalance deleted file mode 100644 index 76b1a53efdb..00000000000 --- a/Tools/PerformanceTests/automated_test_5_loadimbalance +++ /dev/null @@ -1,59 +0,0 @@ -# Maximum number of time steps: command-line argument -# number of grid points: command-line argument - -# Maximum allowable size of each subdomain in the problem domain; -# this is used to decompose the domain for parallel calculations. -amr.max_grid_size = 32 - -# Maximum level in hierarchy (for now must be 0, i.e., one level in total) -amr.max_level = 0 - -# Geometry -geometry.dims = 3 -geometry.prob_lo = -20.e-6 -20.e-6 -20.e-6 # physical domain -geometry.prob_hi = 20.e-6 20.e-6 20.e-6 - -# Boundaries -boundary.field_lo = pec pec periodic -boundary.field_hi = pec pec periodic -boundary.particle_lo = absorbing absorbing periodic -boundary.particle_hi = absorbing absorbing periodic - -warpx.verbose = 1 -algo.load_balance_intervals = -5 -algo.particle_shape = 3 - -# CFL -warpx.cfl = 1.0 - -particles.species_names = electrons ions - -electrons.charge = -q_e -electrons.mass = m_e -electrons.injection_style = "NUniformPerCell" -electrons.num_particles_per_cell_each_dim = 2 2 4 -electrons.profile = constant -electrons.density = 1.e20 # number of electrons per m^3 -electrons.momentum_distribution_type = "gaussian" -electrons.zmax = 0. -electrons.ux_th = 0.01 -electrons.uy_th = 0.01 -electrons.uz_th = 0.01 -electrons.ux_m = 0. -electrons.uy_m = 0. -electrons.uz_m = 0. - -ions.charge = q_e -ions.mass = m_p -ions.injection_style = "NUniformPerCell" -ions.num_particles_per_cell_each_dim = 2 2 4 -ions.profile = constant -ions.density = 1.e20 # number of electrons per m^3 -ions.momentum_distribution_type = "gaussian" -ions.zmax = 0. -ions.ux_th = 0.01 -ions.uy_th = 0.01 -ions.uz_th = 0.01 -ions.ux_m = 0. -ions.uy_m = 0. -ions.uz_m = 0. diff --git a/Tools/PerformanceTests/automated_test_6_output_2ppc b/Tools/PerformanceTests/automated_test_6_output_2ppc deleted file mode 100644 index afd4a6df336..00000000000 --- a/Tools/PerformanceTests/automated_test_6_output_2ppc +++ /dev/null @@ -1,64 +0,0 @@ -# Maximum number of time steps: command-line argument -# number of grid points: command-line argument - -# Maximum allowable size of each subdomain in the problem domain; -# this is used to decompose the domain for parallel calculations. -amr.max_grid_size = 64 - -# Maximum level in hierarchy (for now must be 0, i.e., one level in total) -amr.max_level = 0 - -# Geometry -geometry.dims = 3 -geometry.prob_lo = -20.e-6 -20.e-6 -20.e-6 # physical domain -geometry.prob_hi = 20.e-6 20.e-6 20.e-6 - -# Boundaries -boundary.field_lo = pec pec pec -boundary.field_hi = pec pec pec -boundary.particle_lo = absorbing absorbing absorbing -boundary.particle_hi = absorbing absorbing absorbing - -# Verbosity -warpx.verbose = 1 - -algo.particle_shape = 3 - -# CFL -warpx.cfl = 1.0 - -particles.species_names = electrons ions - -electrons.charge = -q_e -electrons.mass = m_e -electrons.injection_style = "NUniformPerCell" -electrons.num_particles_per_cell_each_dim = 1 1 1 -electrons.profile = constant -electrons.density = 1.e20 # number of electrons per m^3 -electrons.momentum_distribution_type = "gaussian" -electrons.ux_th = 0.01 -electrons.uy_th = 0.01 -electrons.uz_th = 0.01 -electrons.ux_m = 0. -electrons.uy_m = 0. -electrons.uz_m = 0. - -ions.charge = q_e -ions.mass = m_p -ions.injection_style = "NUniformPerCell" -ions.num_particles_per_cell_each_dim = 1 1 1 -ions.profile = constant -ions.density = 1.e20 # number of electrons per m^3 -ions.momentum_distribution_type = "gaussian" -ions.ux_th = 0.01 -ions.uy_th = 0.01 -ions.uz_th = 0.01 -ions.ux_m = 0. -ions.uy_m = 0. -ions.uz_m = 0. - -# Diagnostics -diagnostics.diags_names = diag1 -diag1.intervals = 1 -diag1.file_prefix = "./diags/plt" -diag1.diag_type = Full diff --git a/Tools/PerformanceTests/cori.py b/Tools/PerformanceTests/cori.py deleted file mode 100644 index 046767713f0..00000000000 --- a/Tools/PerformanceTests/cori.py +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright 2019 Axel Huebl, Luca Fedeli, Maxence Thevenet -# -# -# This file is part of WarpX. -# -# License: BSD-3-Clause-LBNL - -import copy -import os - -from functions_perftest import test_element - -module_name = {'cpu': 'haswell.', 'knl': 'mic-knl.', 'gpu':'.'} - -def executable_name(compiler, architecture): - return 'perf_tests3d.' + compiler + \ - '.' + module_name[architecture] + 'TPROF.MTMPI.OMP.QED.ex' - -def get_config_command(compiler, architecture): - config_command = '' - config_command += 'module unload darshan;' - if architecture == 'knl': - if compiler == 'intel': - config_command += 'module unload PrgEnv-gnu;' - config_command += 'module load PrgEnv-intel;' - elif compiler == 'gnu': - config_command += 'module unload PrgEnv-intel;' - config_command += 'module load PrgEnv-gnu;' - config_command += 'module unload craype-haswell;' - config_command += 'module load craype-mic-knl;' - elif architecture == 'cpu': - if compiler == 'intel': - config_command += 'module unload PrgEnv-gnu;' - config_command += 'module load PrgEnv-intel;' - elif compiler == 'gnu': - config_command += 'module unload PrgEnv-intel;' - config_command += 'module load PrgEnv-gnu;' - config_command += 'module unload craype-mic-knl;' - config_command += 'module load craype-haswell;' - return config_command - -# This function runs a batch script with -# dependencies to perform the analysis -# after all performance tests are done. -def process_analysis(automated, cwd, compiler, architecture, n_node_list, start_date, path_source, path_results): - dependencies = '' - f_log = open(cwd + 'log_jobids_tmp.txt' ,'r') - for line in f_log.readlines(): - dependencies += line.split()[3] + ':' - - batch_string = '''#!/bin/bash -#SBATCH --job-name=warpx_1node_read -#SBATCH --time=00:07:00 -#SBATCH -C knl -#SBATCH -N 1 -#SBATCH -S 4 -#SBATCH -q regular -#SBATCH -e read_error.txt -#SBATCH -o read_output.txt -#SBATCH --mail-type=end -#SBATCH --account=m2852 -module load h5py-parallel -''' - batch_string += 'python run_automated.py --compiler=' + \ - compiler + ' --architecture=' + architecture + \ - ' --mode=read' + \ - ' --n_node_list=' + '"' + n_node_list + '"' + \ - ' --start_date=' + start_date + \ - ' --path_source=' + path_source + \ - ' --path_results=' + path_results - if automated == True: - batch_string += ' --automated' - batch_string += '\n' - batch_file = 'slurm_perfread' - f_exe = open(batch_file,'w') - f_exe.write(batch_string) - f_exe.close() - os.system('chmod 700 ' + batch_file) - print( 'process_analysis line: ' + 'sbatch --dependency afterok:' + dependencies[0:-1] + ' ' + batch_file) - os.system('sbatch --dependency afterok:' + dependencies[0:-1] + ' ' + batch_file) - -# Calculate simulation time. Take 5 min + 5 min / simulation -def time_min(nb_simulations): - return 5. + nb_simulations*5. - -def get_submit_job_command(): - return ' sbatch ' - -def get_batch_string(test_list, job_time_min, Cname, n_node): - - job_time_str = str(int(job_time_min/60)) + ':' + str(int(job_time_min%60)) + ':00' - - batch_string = '' - batch_string += '#!/bin/bash\n' - batch_string += '#SBATCH --job-name=' + test_list[0].input_file + '\n' - batch_string += '#SBATCH --time=' + job_time_str + '\n' - batch_string += '#SBATCH -C ' + Cname + '\n' - batch_string += '#SBATCH -N ' + str(n_node) + '\n' - batch_string += '#SBATCH -q regular\n' - batch_string += '#SBATCH -e error.txt\n' - batch_string += '#SBATCH --account=m2852\n' - batch_string += 'module unload PrgEnv-gnu\n' - batch_string += 'module load PrgEnv-intel\n' - return batch_string - -def get_run_string(current_test, architecture, n_node, count, bin_name, runtime_param_string): - srun_string = '' - srun_string += 'export OMP_NUM_THREADS=' + str(current_test.n_omp) + '\n' - # number of logical cores per MPI process - if architecture == 'cpu': - cflag_value = max(1, int(32/current_test.n_mpi_per_node) * 2) # Follow NERSC directives - elif architecture == 'knl': - cflag_value = max(1, int(64/current_test.n_mpi_per_node) * 4) # Follow NERSC directives - output_filename = 'out_' + '_'.join([current_test.input_file, str(n_node), str(current_test.n_mpi_per_node), str(current_test.n_omp), str(count)]) + '.txt' - srun_string += 'srun --cpu_bind=cores '+ \ - ' -n ' + str(n_node*current_test.n_mpi_per_node) + \ - ' -c ' + str(cflag_value) + \ - ' ./' + bin_name + \ - ' ' + current_test.input_file + \ - runtime_param_string + \ - ' > ' + output_filename + '\n' - return srun_string - -def get_test_list(n_repeat): - test_list_unq = [] - # n_node is kept to None and passed in functions as an external argument - # That way, several test_element_instance run with the same n_node on the same batch job - test_list_unq.append( test_element(input_file='automated_test_1_uniform_rest_32ppc', - n_mpi_per_node=8, - n_omp=8, - n_cell=[128, 128, 128], - max_grid_size=64, - blocking_factor=32, - n_step=10) ) - test_list_unq.append( test_element(input_file='automated_test_2_uniform_rest_1ppc', - n_mpi_per_node=8, - n_omp=8, - n_cell=[256, 256, 512], - max_grid_size=64, - blocking_factor=32, - n_step=10) ) - test_list_unq.append( test_element(input_file='automated_test_3_uniform_drift_4ppc', - n_mpi_per_node=8, - n_omp=8, - n_cell=[128, 128, 128], - max_grid_size=64, - blocking_factor=32, - n_step=10) ) - test_list_unq.append( test_element(input_file='automated_test_4_labdiags_2ppc', - n_mpi_per_node=8, - n_omp=8, - n_cell=[64, 64, 128], - max_grid_size=64, - blocking_factor=32, - n_step=50) ) - test_list_unq.append( test_element(input_file='automated_test_5_loadimbalance', - n_mpi_per_node=8, - n_omp=8, - n_cell=[128, 128, 128], - max_grid_size=64, - blocking_factor=32, - n_step=10) ) - test_list_unq.append( test_element(input_file='automated_test_6_output_2ppc', - n_mpi_per_node=8, - n_omp=8, - n_cell=[128, 256, 256], - max_grid_size=64, - blocking_factor=32, - n_step=1) ) - test_list = [copy.deepcopy(item) for item in test_list_unq for _ in range(n_repeat) ] - return test_list diff --git a/Tools/PerformanceTests/functions_perftest.py b/Tools/PerformanceTests/functions_perftest.py deleted file mode 100644 index 8d7f4e29246..00000000000 --- a/Tools/PerformanceTests/functions_perftest.py +++ /dev/null @@ -1,255 +0,0 @@ -# Copyright 2018-2019 Axel Huebl, Luca Fedeli, Maxence Thevenet -# Remi Lehe -# -# This file is part of WarpX. -# -# License: BSD-3-Clause-LBNL - -import copy -import os -import re -import shutil - -import git -import numpy as np -import pandas as pd - -# import cori -# import summit - -# Each instance of this class contains information for a single test. -class test_element(): - def __init__(self, input_file=None, n_node=None, n_mpi_per_node=None, - n_omp=None, n_cell=None, n_step=None, max_grid_size=None, - blocking_factor=None): - self.input_file = input_file - self.n_node = n_node - self.n_mpi_per_node = n_mpi_per_node - self.n_omp = n_omp - self.n_cell = n_cell - self.n_step = n_step - self.max_grid_size = max_grid_size - self.blocking_factor = blocking_factor - - def scale_n_cell(self, n_node=0): - n_cell_scaled = copy.deepcopy(self.n_cell) - index_dim = 0 - while n_node > 1: - n_cell_scaled[index_dim] *= 2 - n_node /= 2 - index_dim = (index_dim+1) % 3 - self.n_cell = n_cell_scaled - -def scale_n_cell(ncell, n_node): - ncell_scaled = ncell[:] - index_dim = 0 - while n_node > 1: - ncell_scaled[index_dim] *= 2 - n_node /= 2 - index_dim = (index_dim+1) % 3 - return ncell_scaled - -def store_git_hash(repo_path=None, filename=None, name=None): - repo = git.Repo(path=repo_path) - sha = repo.head.object.hexsha - file_handler = open( filename, 'a+' ) - file_handler.write( name + ':' + sha + ' ') - file_handler.close() - -def get_file_content(filename=None): - file_handler = open( filename, 'r' ) - file_content = file_handler.read() - file_handler.close() - return file_content - -def run_batch(run_name, res_dir, bin_name, config_command, architecture='knl',\ - Cname='knl', n_node=1, n_mpi=1, n_omp=1): - # Clean res_dir - if os.path.exists(res_dir): - shutil.rmtree(res_dir) - os.makedirs(res_dir) - # Copy files to res_dir - cwd = os.environ['WARPX'] + '/Tools/PerformanceTests/' - bin_dir = cwd + 'Bin/' - shutil.copy(bin_dir + bin_name, res_dir) - shutil.copyfile(cwd + run_name, res_dir + 'inputs') - os.chdir(res_dir) - batch_string = '' - batch_string += '#!/bin/bash\n' - batch_string += '#SBATCH --job-name=' + run_name + str(n_node) + str(n_mpi) + str(n_omp) + '\n' - batch_string += '#SBATCH --time=00:23:00\n' - batch_string += '#SBATCH -C ' + Cname + '\n' - batch_string += '#SBATCH -N ' + str(n_node) + '\n' - batch_string += '#SBATCH -q regular\n' - batch_string += '#SBATCH -e error.txt\n' - batch_string += '#SBATCH --account=m2852\n' - batch_string += 'export OMP_NUM_THREADS=' + str(n_omp) + '\n' - if architecture == 'cpu': - cflag_value = max(1, int(32/n_mpi) * 2) # Follow NERSC directives - batch_string += 'srun --cpu_bind=cores '+ \ - ' -n ' + str(n_node*n_mpi) + \ - ' -c ' + str(cflag_value) + \ - ' ./' + bin_name + ' inputs > perf_output.txt' - elif architecture == 'knl': - # number of logical cores per MPI process - cflag_value = max(1, int(64/n_mpi) * 4) # Follow NERSC directives - batch_string += 'srun --cpu_bind=cores ' + \ - ' -n ' + str(n_node*n_mpi) + \ - ' -c ' + str(cflag_value) + \ - ' ./' + bin_name + ' inputs > perf_output.txt\n' - batch_file = 'slurm' - f_exe = open(batch_file,'w') - f_exe.write(batch_string) - f_exe.close() - os.system('chmod 700 ' + bin_name) - os.system(config_command + 'sbatch ' + batch_file + ' >> ' + cwd + 'log_jobids_tmp.txt') - return 0 - -def run_batch_nnode(test_list, res_dir, cwd, bin_name, config_command, batch_string, submit_job_command): - # Clean res_dir - if os.path.exists(res_dir): - shutil.rmtree(res_dir, ignore_errors=True) - os.makedirs(res_dir) - # Copy files to res_dir - bin_dir = cwd + 'Bin/' - shutil.copy(bin_dir + bin_name, res_dir) - os.chdir(res_dir) - - for count, current_test in enumerate(test_list): - shutil.copy(cwd + current_test.input_file, res_dir) - batch_file = 'batch_script.sh' - f_exe = open(batch_file,'w') - f_exe.write(batch_string) - f_exe.close() - os.system('chmod 700 ' + bin_name) - os.system(config_command + submit_job_command + batch_file +\ - ' >> ' + cwd + 'log_jobids_tmp.txt') - -# Read output file and return init time and 1-step time -def read_run_perf(filename, n_steps): - timing_list = [] - # Search inclusive time to get simulation step time - partition_limit = 'NCalls Incl. Min Incl. Avg Incl. Max Max %' - with open(filename) as file_handler: - output_text = file_handler.read() - # Get total simulation time - line_match_totaltime = re.search('TinyProfiler total time across processes.*', output_text) - total_time = float(line_match_totaltime.group(0).split()[8]) - search_area = output_text.partition(partition_limit)[2] - line_match_looptime = re.search('\nWarpX::Evolve().*', search_area) - time_wo_initialization = float(line_match_looptime.group(0).split()[3]) - timing_list += [str(total_time - time_wo_initialization)] - timing_list += [str(time_wo_initialization/n_steps)] - partition_limit1 = 'NCalls Excl. Min Excl. Avg Excl. Max Max %' - partition_limit2 = 'NCalls Incl. Min Incl. Avg Incl. Max Max %' - file_handler.close() - with open(filename) as file_handler: - output_text = file_handler.read() - # Search EXCLISUSIVE routine timings - search_area = output_text.partition(partition_limit1)[2].partition(partition_limit2)[0] - pattern_list = ['\nParticleContainer::Redistribute().*',\ - '\nFabArray::FillBoundary().*',\ - '\nFabArray::ParallelCopy().*',\ - '\nPPC::CurrentDeposition.*',\ - '\nPPC::FieldGather.*',\ - '\nPPC::ParticlePush.*',\ - '\nPPC::Evolve::Copy.*',\ - '\nWarpX::Evolve().*',\ - 'Checkpoint().*',\ - 'WriteParticles().*',\ - '\nVisMF::Write(FabArray).*',\ - '\nWriteMultiLevelPlotfile().*',\ - '\nParticleContainer::RedistributeMPI().*'] - for pattern in pattern_list: - timing = '0' - line_match = re.search(pattern, search_area) - if line_match is not None: - timing = [str(float(line_match.group(0).split()[3])/n_steps)] - timing_list += timing - return timing_list - -# Write time into logfile -def write_perf_logfile(log_file, log_line): - f_log = open(log_file, 'a') - f_log.write(log_line) - f_log.close() - return 0 - -def get_nsteps(run_name): - with open(run_name) as file_handler: - run_name_text = file_handler.read() - line_match_nsteps = re.search('\nmax_step.*', run_name_text) - nsteps = float(line_match_nsteps.group(0).split()[2]) - return nsteps - -def extract_dataframe(filename, n_steps): - # Get init time and total time through Inclusive time - partition_limit_start = 'NCalls Incl. Min Incl. Avg Incl. Max Max %' - print(filename) - with open(filename) as file_handler: - output_text = file_handler.read() - # get total simulation time - line_match_totaltime = re.search('TinyProfiler total time across processes.*', output_text) - total_time = float(line_match_totaltime.group(0).split()[8]) - # get time performing steps as Inclusive WarpX::Evolve() time - search_area = output_text.partition(partition_limit_start)[2] - line_match_looptime = re.search('\nWarpX::Evolve().*', search_area) - time_wo_initialization = float(line_match_looptime.group(0).split()[3]) - # New, might break something - line_match_WritePlotFile = re.search('\nDiagnostics::FilterComputePackFlush().*', search_area) - if line_match_WritePlotFile is not None: - time_WritePlotFile = float(line_match_WritePlotFile.group(0).split()[3]) - else: - time_WritePlotFile = 0. - # Get timers for all routines - # Where to start and stop in the output_file - partition_limit_start = 'NCalls Excl. Min Excl. Avg Excl. Max Max %' - partition_limit_end = 'NCalls Incl. Min Incl. Avg Incl. Max Max %' - # Put file content in a string - with open(filename) as file_handler: - output_text = file_handler.read() - # Keep only profiling data - search_area = output_text.partition(partition_limit_start)[2]\ - .partition(partition_limit_end)[0] - list_string = search_area.split('\n')[2:-4] - time_array = np.zeros(len(list_string)) - column_list= [] - for i in np.arange(len(list_string)): - column_list.append(list_string[i].split()[0]) - time_array[i] = float(list_string[i].split()[3]) - df = pd.DataFrame(columns=column_list) - df.loc[0] = time_array - df['time_initialization'] = total_time - time_wo_initialization - df['time_running'] = time_wo_initialization - df['time_WritePlotFile'] = time_WritePlotFile - # df['string_output'] = partition_limit_start + '\n' + search_area - return df - -# Run a performance test in an interactive allocation -# def run_interactive(run_name, res_dir, n_node=1, n_mpi=1, n_omp=1): -# # Clean res_dir # -# if os.path.exists(res_dir): -# shutil.rmtree(res_dir) -# os.makedirs(res_dir) -# # Copy files to res_dir # -# shutil.copyfile(bin_dir + bin_name, res_dir + bin_name) -# shutil.copyfile(cwd + run_name, res_dir + 'inputs') -# os.chdir(res_dir) -# if args.architecture == 'cpu': -# cflag_value = max(1, int(32/n_mpi) * 2) # Follow NERSC directives # -# exec_command = 'export OMP_NUM_THREADS=' + str(n_omp) + ';' +\ -# 'srun --cpu_bind=cores ' + \ -# ' -n ' + str(n_node*n_mpi) + \ -# ' -c ' + str(cflag_value) + \ -# ' ./' + bin_name + ' inputs > perf_output.txt' -# elif args.architecture == 'knl': -# # number of logical cores per MPI process # -# cflag_value = max(1,int(68/n_mpi) * 4) # Follow NERSC directives # -# exec_command = 'export OMP_NUM_THREADS=' + str(n_omp) + ';' +\ -# 'srun --cpu_bind=cores ' + \ -# ' -n ' + str(n_node*n_mpi) + \ -# ' -c ' + str(cflag_value) + \ -# ' ./' + bin_name + ' inputs > perf_output.txt' -# os.system('chmod 700 ' + bin_name) -# os.system(config_command + exec_command) -# return 0 diff --git a/Tools/PerformanceTests/performance_log.txt b/Tools/PerformanceTests/performance_log.txt deleted file mode 100644 index 72fece34939..00000000000 --- a/Tools/PerformanceTests/performance_log.txt +++ /dev/null @@ -1,81 +0,0 @@ -## year month day run_name compiler architecture n_node n_mpi n_omp time_initialization time_one_iteration Redistribute FillBoundary ParallelCopy CurrentDeposition FieldGather ParthiclePush Copy Evolve Checkpoint WriteParticles Write_FabArray WriteMultiLevelPlotfile(unit: second) RedistributeMPI -2018 01 31 automated_test_1_uniform_rest_32ppc intel knl 1 16 8 3.14 0.3986 0.1713 0.01719 0.01615 0.06987 0.03636 0.01901 0.01999 0.003602 0 0 0 0 0.007262 -2018 01 31 automated_test_1_uniform_rest_32ppc intel knl 1 16 8 3.39 0.4009 0.1712 0.01676 0.01583 0.07061 0.03684 0.01926 0.02011 0.003687 0 0 0 0 0.007841 -2018 01 31 automated_test_1_uniform_rest_32ppc intel knl 1 16 8 2.91 0.4024 0.1716 0.01826 0.01918 0.0703 0.0363 0.01912 0.01989 0.003017 0 0 0 0 0.007256 -2018 01 31 automated_test_1_uniform_rest_32ppc intel knl 1 16 8 3.21 0.3997 0.1717 0.01706 0.0162 0.07026 0.03655 0.01928 0.01999 0.003687 0 0 0 0 0.006799 -2018 01 31 automated_test_2_uniform_rest_1ppc intel knl 1 16 8 0.89 0.4779 0.04441 0.1143 0.09117 0.1072 0.01254 0.003702 0.004217 0.01247 0 0 0 0 0.003441 -2018 01 31 automated_test_2_uniform_rest_1ppc intel knl 1 16 8 1.58 0.4626 0.04424 0.1048 0.0851 0.1073 0.01259 0.003767 0.004282 0.01311 0 0 0 0 0.002798 -2018 01 31 automated_test_2_uniform_rest_1ppc intel knl 1 16 8 1.63 0.4616 0.04441 0.1033 0.08398 0.1079 0.01312 0.003802 0.004224 0.01278 0 0 0 0 0.003188 -2018 01 31 automated_test_2_uniform_rest_1ppc intel knl 1 16 8 1.72 0.461 0.04419 0.1038 0.08424 0.1074 0.01257 0.003799 0.0043 0.01318 0 0 0 0 0.002816 -2018 01 31 automated_test_1_uniform_rest_32ppc intel knl 1 16 8 3.32 0.3986 0.1712 0.01804 0.01697 0.06999 0.03615 0.01842 0.01896 0.003445 0 0 0 0 0.00738 -2018 01 31 automated_test_1_uniform_rest_32ppc intel knl 1 16 8 3.17 0.3974 0.1711 0.01722 0.01587 0.07016 0.03642 0.01844 0.01902 0.003431 0 0 0 0 0.007332 -2018 01 31 automated_test_1_uniform_rest_32ppc intel knl 1 16 8 2.88 0.3946 0.1709 0.01686 0.01562 0.06972 0.03595 0.01848 0.01916 0.003269 0 0 0 0 0.006887 -2018 01 31 automated_test_1_uniform_rest_32ppc intel knl 1 16 8 2.95 0.4094 0.1708 0.01761 0.01632 0.07001 0.03651 0.01863 0.01906 0.003314 0 0 0 0 0.01898 -2018 01 31 automated_test_2_uniform_rest_1ppc intel knl 1 16 8 1.3 0.4787 0.04447 0.1139 0.09124 0.108 0.01287 0.003811 0.004205 0.01249 0 0 0 0 0.003045 -2018 01 31 automated_test_2_uniform_rest_1ppc intel knl 1 16 8 3.16 0.4578 0.04412 0.1015 0.08339 0.1078 0.01301 0.003919 0.004182 0.0125 0 0 0 0 0.002701 -2018 01 31 automated_test_2_uniform_rest_1ppc intel knl 1 16 8 2.78 0.4679 0.04418 0.1035 0.08456 0.1079 0.01303 0.003902 0.004214 0.0127 0 0 0 0 0.009118 -2018 01 31 automated_test_2_uniform_rest_1ppc intel knl 1 16 8 1.12 0.4613 0.04425 0.1043 0.08517 0.1073 0.01242 0.003797 0.004221 0.01239 0 0 0 0 0.003665 -2018 01 31 automated_test_3_uniform_drift_4ppc intel knl 1 16 8 0.48 0.1237 0.03056 0.01622 0.01468 0.02039 0.005016 0.003737 0.002632 0.00326 0 0 0 0 0.006871 -2018 01 31 automated_test_3_uniform_drift_4ppc intel knl 1 16 8 0.79 0.1287 0.0308 0.01706 0.01715 0.02042 0.005452 0.003636 0.002797 0.003143 0 0 0 0 0.007324 -2018 01 31 automated_test_3_uniform_drift_4ppc intel knl 1 16 8 0.9 0.1296 0.03084 0.01711 0.01731 0.02053 0.005379 0.003641 0.002843 0.003137 0 0 0 0 0.008151 -2018 01 31 automated_test_3_uniform_drift_4ppc intel knl 1 16 8 0.9 0.1323 0.03081 0.01703 0.01736 0.02065 0.005339 0.003638 0.002751 0.004008 0 0 0 0 0.01015 -2018 01 31 automated_test_4_labdiags_2ppc intel knl 1 16 8 0.85 0.2896 0.03832 0.06449 0.07493 0.003507 0.002987 0.0001515 0.0001762 0.007921 0.0371 0.001537 0 0.0004387 0.03832 -2018 01 31 automated_test_4_labdiags_2ppc intel knl 1 16 8 1.12 0.2895 0.03845 0.06423 0.07481 0.003489 0.002994 0.000152 0.0001779 0.00834 0.0357 0.001545 0 0.0005249 0.03845 -2018 01 31 automated_test_4_labdiags_2ppc intel knl 1 16 8 0.76 0.3243 0.03804 0.0646 0.07462 0.003483 0.002991 0.0001508 0.0001769 0.008051 0.05983 0.001565 0 0.005392 0.03804 -2018 01 31 automated_test_4_labdiags_2ppc intel knl 1 16 8 0.74 0.3143 0.03941 0.06478 0.07547 0.003486 0.003007 0.0001518 0.0001808 0.007845 0.05079 0.001543 0 0.0007033 0.03941 -2018 01 31 automated_test_5_loadimbalance intel knl 1 16 8 9.2 0.3845 0.08558 0.1042 0.1332 0 0 0 0 0.01226 0 0 0 0 0.08558 -2018 01 31 automated_test_5_loadimbalance intel knl 1 16 8 9.19 0.3864 0.085 0.1051 0.134 0 0 0 0 0.01202 0 0 0 0 0.085 -2018 01 31 automated_test_5_loadimbalance intel knl 1 16 8 8.98 0.3912 0.08665 0.1061 0.1356 0 0 0 0 0.01193 0 0 0 0 0.08665 -2018 01 31 automated_test_5_loadimbalance intel knl 1 16 8 9.03 0.3826 0.08484 0.1031 0.1329 0 0 0 0 0.01205 0 0 0 0 0.08484 -2018 01 31 automated_test_6_output_2ppc intel knl 1 16 8 3.6 1.086 0.0898 0.1311 0.09441 0.1345 0.027 0.008783 0.009792 0.02151 0.08454 0.04962 0 0.0008218 0.005303 -2018 01 31 automated_test_6_output_2ppc intel knl 1 16 8 4.7 1.136 0.09059 0.1437 0.09535 0.1358 0.02915 0.009238 0.01002 0.02315 0.09088 0.05006 0 0.01081 0.005381 -2018 01 31 automated_test_6_output_2ppc intel knl 1 16 8 4.0 1.132 0.09145 0.1377 0.09592 0.1365 0.02817 0.009353 0.0103 0.02447 0.066 0.05309 0 0.02047 0.009196 -2018 01 31 automated_test_6_output_2ppc intel knl 1 16 8 3.8 1.135 0.09088 0.1308 0.09623 0.135 0.02762 0.008839 0.009758 0.02561 0.1144 0.04874 0 0.0008693 0.008112 -2018 02 13 automated_test_1_uniform_rest_32ppc intel knl 1 16 8 3.87 0.4053 0.1754 0.01914 0.01871 0.0691 0.03648 0.01879 0.0193 0.003268 0 0 0 0 0.007445 -2018 02 13 automated_test_1_uniform_rest_32ppc intel knl 1 16 8 4.38 0.405 0.1741 0.01901 0.01839 0.07034 0.03718 0.01894 0.0195 0.003845 0 0 0 0 0.007187 -2018 02 13 automated_test_1_uniform_rest_32ppc intel knl 1 16 8 3.79 0.3999 0.1739 0.01859 0.01631 0.06918 0.0367 0.01906 0.01952 0.003278 0 0 0 0 0.006658 -2018 02 13 automated_test_1_uniform_rest_32ppc intel knl 1 16 8 3.93 0.4044 0.1746 0.01854 0.01695 0.06975 0.03721 0.0191 0.01941 0.003979 0 0 0 0 0.007381 -2018 02 13 automated_test_2_uniform_rest_1ppc intel knl 1 16 8 1.83 0.4773 0.04582 0.1089 0.08772 0.1072 0.01304 0.003335 0.004231 0.01385 0 0 0 0 0.002991 -2018 02 13 automated_test_2_uniform_rest_1ppc intel knl 1 16 8 1.51 0.4654 0.04556 0.1027 0.08351 0.1068 0.01292 0.003114 0.004249 0.01356 0 0 0 0 0.002748 -2018 02 13 automated_test_2_uniform_rest_1ppc intel knl 1 16 8 1.62 0.4755 0.0457 0.1082 0.08761 0.1069 0.0131 0.003205 0.00431 0.01388 0 0 0 0 0.002738 -2018 02 13 automated_test_2_uniform_rest_1ppc intel knl 1 16 8 1.44 0.4798 0.04561 0.1133 0.08962 0.1064 0.01246 0.003076 0.004241 0.01318 0 0 0 0 0.003164 -2018 02 13 automated_test_3_uniform_drift_4ppc intel knl 1 16 8 0.92 0.1282 0.03185 0.01747 0.01557 0.01956 0.005103 0.003455 0.00274 0.00346 0 0 0 0 0.007196 -2018 02 13 automated_test_3_uniform_drift_4ppc intel knl 1 16 8 0.97 0.1301 0.03157 0.01788 0.01732 0.01957 0.00508 0.003335 0.002803 0.003454 0 0 0 0 0.007446 -2018 02 13 automated_test_3_uniform_drift_4ppc intel knl 1 16 8 0.91 0.1289 0.03137 0.01765 0.0155 0.02026 0.005636 0.003513 0.002716 0.003381 0 0 0 0 0.007087 -2018 02 13 automated_test_3_uniform_drift_4ppc intel knl 1 16 8 0.64 0.1308 0.03142 0.0181 0.01777 0.01953 0.005204 0.003371 0.002782 0.003057 0 0 0 0 0.007769 -2018 02 13 automated_test_4_labdiags_2ppc intel knl 1 16 8 3.19 0.3005 0.0383 0.06396 0.07274 0.003502 0.003005 0.0001628 0.0001839 0.008869 0.04427 0.001522 0 0.0005522 0.0383 -2018 02 13 automated_test_4_labdiags_2ppc intel knl 1 16 8 1.41 0.2945 0.0389 0.06251 0.0723 0.003508 0.003009 0.000164 0.0001825 0.009131 0.04042 0.001538 0 0.0005936 0.0389 -2018 02 13 automated_test_4_labdiags_2ppc intel knl 1 16 8 1.32 0.3066 0.0387 0.06558 0.07547 0.003463 0.003017 0.0001631 0.0001837 0.008431 0.04955 0.001555 0 0.000454 0.0387 -2018 02 13 automated_test_4_labdiags_2ppc intel knl 1 16 8 0.71 0.3391 0.03987 0.06534 0.07626 0.003475 0.003004 0.0001643 0.0001821 0.008152 0.06677 0.001534 0 0.01029 0.03987 -2018 02 13 automated_test_5_loadimbalance intel knl 1 16 8 9.68 0.3956 0.08701 0.1051 0.1352 0 0 0 0 0.01387 0 0 0 0 0.08701 -2018 02 13 automated_test_5_loadimbalance intel knl 1 16 8 10.65 0.3987 0.0866 0.1051 0.1332 0 0 0 0 0.0191 0 0 0 0 0.0866 -2018 02 13 automated_test_5_loadimbalance intel knl 1 16 8 10.11 0.4013 0.08782 0.1087 0.1359 0 0 0 0 0.01379 0 0 0 0 0.08782 -2018 02 13 automated_test_5_loadimbalance intel knl 1 16 8 9.94 0.39 0.08702 0.1028 0.132 0 0 0 0 0.0142 0 0 0 0 0.08702 -2018 02 13 automated_test_6_output_2ppc intel knl 1 16 8 1.292 0.2639 0.01424 0.03424 0.01742 0.01893 0.003449 0.001364 0.001712 0.009362 0.04053 0.01765 0 0.002558 0.001185 -2018 02 13 automated_test_6_output_2ppc intel knl 1 16 8 0.779 0.3155 0.01125 0.03605 0.01628 0.02431 0.009672 0.002843 0.001334 0.008876 0.05925 0.02047 0 0.001897 0.0006917 -2018 02 13 automated_test_6_output_2ppc intel knl 1 16 8 0.635 0.2568 0.01083 0.03443 0.01592 0.01963 0.003027 0.001439 0.001286 0.009288 0.03879 0.01815 0 0.001509 0.0007743 -2018 02 13 automated_test_6_output_2ppc intel knl 1 16 8 1.371 0.2648 0.01401 0.03376 0.01593 0.01936 0.003443 0.001351 0.00169 0.01161 0.03936 0.01785 0 0.002107 0.001171 -2018 02 20 automated_test_1_uniform_rest_32ppc intel knl 1 16 8 3.7 0.4573 0.01159 0.02139 0.02206 0.06934 0.03845 0.0192 0.02062 0.003496 0 0 0 0 0.01159 -2018 02 20 automated_test_1_uniform_rest_32ppc intel knl 1 16 8 3.45 0.4603 0.01356 0.02085 0.02488 0.06946 0.03777 0.01908 0.02031 0.003356 0 0 0 0 0.01356 -2018 02 20 automated_test_1_uniform_rest_32ppc intel knl 1 16 8 3.72 0.4552 0.01245 0.02003 0.02013 0.06874 0.03766 0.01907 0.0203 0.003667 0 0 0 0 0.01245 -2018 02 20 automated_test_1_uniform_rest_32ppc intel knl 1 16 8 2.94 0.4557 0.01381 0.01979 0.02053 0.0687 0.03694 0.01886 0.02012 0.006396 0 0 0 0 0.01381 -2018 02 20 automated_test_2_uniform_rest_1ppc intel knl 1 16 8 1.33 0.4937 0.005316 0.1103 0.09802 0.1071 0.01258 0.00326 0.004435 0.01347 0 0 0 0 0.005316 -2018 02 20 automated_test_2_uniform_rest_1ppc intel knl 1 16 8 1.27 0.5063 0.004948 0.1213 0.1019 0.1067 0.01183 0.003056 0.004479 0.01327 0 0 0 0 0.004948 -2018 02 20 automated_test_2_uniform_rest_1ppc intel knl 1 16 8 2.2 0.4983 0.005787 0.1141 0.1002 0.1067 0.0121 0.00307 0.00445 0.01343 0 0 0 0 0.005787 -2018 02 20 automated_test_2_uniform_rest_1ppc intel knl 1 16 8 1.39 0.5018 0.005339 0.1152 0.1007 0.1073 0.01249 0.003196 0.004484 0.01348 0 0 0 0 0.005339 -2018 02 20 automated_test_3_uniform_drift_4ppc intel knl 1 16 8 0.98 0.1342 0.007843 0.01855 0.01798 0.01936 0.005198 0.003471 0.002626 0.003161 0 0 0 0 0.007843 -2018 02 20 automated_test_3_uniform_drift_4ppc intel knl 1 16 8 0.63 0.1367 0.008055 0.01917 0.01818 0.01992 0.006097 0.003388 0.002639 0.003079 0 0 0 0 0.008055 -2018 02 20 automated_test_3_uniform_drift_4ppc intel knl 1 16 8 0.66 0.1365 0.008017 0.0196 0.01819 0.01979 0.005769 0.00331 0.002668 0.003111 0 0 0 0 0.008017 -2018 02 20 automated_test_3_uniform_drift_4ppc intel knl 1 16 8 0.89 0.1367 0.008249 0.01947 0.01818 0.01956 0.005585 0.003341 0.002697 0.003217 0 0 0 0 0.008249 -2018 02 20 automated_test_4_labdiags_2ppc intel knl 1 16 8 1.14 0.3087 0.04174 0.0637 0.0734 0.00345 0.002967 0.0001664 0.0001849 0.008714 0.05156 0.001539 0 0.0004984 0.04174 -2018 02 20 automated_test_4_labdiags_2ppc intel knl 1 16 8 1.21 0.3407 0.07513 0.07261 0.0713 0.003428 0.002994 0.0001638 0.0001848 0.009408 0.003442 0.00173 0 0.0005256 0.07513 -2018 02 20 automated_test_4_labdiags_2ppc intel knl 1 16 8 1.73 0.347 0.04077 0.06476 0.07148 0.00345 0.002998 0.0001637 0.0001829 0.009379 0.03947 0.001574 0 0.04989 0.04077 -2018 02 20 automated_test_4_labdiags_2ppc intel knl 1 16 8 1.52 0.3469 0.04088 0.06365 0.07183 0.003493 0.002957 0.0001659 0.0001827 0.009064 0.04694 0.001959 0 0.04099 0.04088 -2018 02 20 automated_test_5_loadimbalance intel knl 1 16 8 9.92 0.4206 0.08811 0.1186 0.1402 0 0 0 0 0.01443 0 0 0 0 0.08811 -2018 02 20 automated_test_5_loadimbalance intel knl 1 16 8 9.12 0.3884 0.08626 0.1027 0.1305 0 0 0 0 0.01368 0 0 0 0 0.08626 -2018 02 20 automated_test_5_loadimbalance intel knl 1 16 8 9.91 0.4097 0.08598 0.1119 0.1381 0 0 0 0 0.01414 0 0 0 0 0.08598 -2018 02 20 automated_test_5_loadimbalance intel knl 1 16 8 9.63 0.4257 0.0876 0.1213 0.1441 0 0 0 0 0.01422 0 0 0 0 0.0876 -2018 02 20 automated_test_6_output_2ppc intel knl 1 16 8 1.23 0.274 0.003227 0.03782 0.01724 0.01945 0.003219 0.001468 0.0014 0.01094 0.03943 0.0175 0 0.00509 0.001122 -2018 02 20 automated_test_6_output_2ppc intel knl 1 16 8 2.076 0.3023 0.002995 0.035 0.01619 0.02462 0.01126 0.006984 0.001548 0.01009 0.04604 0.01734 0 0.08398 0.001151 -2018 02 20 automated_test_6_output_2ppc intel knl 1 16 8 1.378 0.273 0.004545 0.03721 0.01754 0.02039 0.003415 0.00145 0.001561 0.01058 0.04009 0.01763 0 0.002519 0.001187 -2018 02 20 automated_test_6_output_2ppc intel knl 1 16 8 1.61 0.2911 0.004065 0.03726 0.01782 0.02439 0.01289 0.003463 0.001689 0.008778 0.03975 0.01723 0 0.00247 0.00129 diff --git a/Tools/PerformanceTests/run_alltests.py b/Tools/PerformanceTests/run_alltests.py deleted file mode 100644 index b1083fc6f45..00000000000 --- a/Tools/PerformanceTests/run_alltests.py +++ /dev/null @@ -1,353 +0,0 @@ -# Copyright 2017-2020 Luca Fedeli, Maxence Thevenet, Remi Lehe -# -# -# This file is part of WarpX. -# -# License: BSD-3-Clause-LBNL - -import argparse -import datetime -import os -import re -import shutil -import time - -from functions_perftest import * - -# This script runs automated performance tests for WarpX. -# It runs tests in list test_list defined below, and write -# results in file performance_log.txt in warpx/Tools/PerformanceTests/ - -# ---- User's manual ---- -# Before running performance tests, make sure you have the latest version -# of performance_log.txt -# A typical execution reads: -# > python run_alltests.py --no-recompile --compiler=gnu --architecture=cpu --mode=run --log_file='my_performance_log.txt' -# These are default values, and will give the same result as -# > python run_alltests.py -# To add a new test item, extent the test_list with a line like -# test_list.extend([['my_input_file', n_node, n_mpi, n_omp]]*3) -# - my_input_file must be in warpx/Tools/PerformanceTests -# - the test will run 3 times, to have some statistics -# - the test must take <1h or it will timeout - -# ---- Developer's manual ---- -# This script can run in two modes: -# - 'run' mode: for each test item, a batch job is executed. -# create folder '$SCRATCH/performance_warpx/' -# recompile the code if option --recompile is used -# loop over test_list and submit one batch script per item -# Submit a batch job that executes the script in read mode -# This last job runs once all others are completed -# - 'read' mode: Get performance data from all test items -# create performance log file if does not exist -# loop over test_file -# read initialization time and step time -# write data into the performance log file -# push file performance_log.txt on the repo - -# Define the list of tests to run -# ------------------------------- -# each element of test_list contains -# [str runname, int n_node, int n_mpi PER NODE, int n_omp] -test_list = [] -n_repeat = 3 - -test_list.extend([['ompscaling_32ppc' , 1, 1, 1]]*n_repeat) -test_list.extend([['ompscaling_32ppc' , 1, 1, 2]]*n_repeat) -test_list.extend([['ompscaling_32ppc' , 1, 1, 4]]*n_repeat) -test_list.extend([['ompscaling_32ppc' , 1, 1, 8]]*n_repeat) -test_list.extend([['ompscaling_32ppc' , 1, 1, 16]]*n_repeat) -test_list.extend([['ompscaling_32ppc' , 1, 1, 32]]*n_repeat) -test_list.extend([['ompscaling_32ppc' , 1, 1, 64]]*n_repeat) -test_list.extend([['ompscaling_32ppc' , 1, 1,128]]*n_repeat) -test_list.extend([['ompscaling_32ppc' , 1, 1,256]]*n_repeat) - -#test_list.extend([['mil_weak1_0ppc_1' , 1, 8, 8]]*n_repeat) -#test_list.extend([['mil_weak1_0ppc_8' , 8, 8, 8]]*n_repeat) -#test_list.extend([['mil_weak1_0ppc_64' , 64, 8, 8]]*n_repeat) -#test_list.extend([['mil_weak1_0ppc_512' , 512, 8, 8]]*n_repeat) -#test_list.extend([['mil_weak1_0ppc_1024' , 1024, 8, 8]]*n_repeat) -#test_list.extend([['mil_weak1_0ppc_2048' , 2048, 8, 8]]*n_repeat) -#test_list.extend([['mil_weak1_0ppc_4096' , 4096, 8, 8]]*n_repeat) - -#test_list.extend([['mil_weak1_32ppc_1' , 1, 8, 8]]*n_repeat) -#test_list.extend([['mil_weak1_32ppc_8' , 8, 8, 8]]*n_repeat) -#test_list.extend([['mil_weak1_32ppc_64' , 64, 8, 8]]*n_repeat) -#test_list.extend([['mil_weak1_32ppc_512' , 512, 8, 8]]*n_repeat) -#test_list.extend([['mil_weak1_32ppc_1024' , 1024, 8, 8]]*n_repeat) -#test_list.extend([['mil_weak1_32ppc_2048' , 2048, 8, 8]]*n_repeat) -#test_list.extend([['mil_weak1_32ppc_4096' , 4096, 8, 8]]*n_repeat) - -#test_list.extend([['strong_32ppc1' , 1, 8, 8]]*n_repeat) -#test_list.extend([['strong_32ppc1' , 8, 8, 8]]*n_repeat) -#test_list.extend([['strong_32ppc1' , 64, 8, 8]]*n_repeat) -#test_list.extend([['strong_32ppc1' , 128, 8, 8]]*n_repeat) -#test_list.extend([['strong_32ppc128' , 128, 8, 8]]*n_repeat) -#test_list.extend([['strong_32ppc128' , 256, 8, 8]]*n_repeat) -#test_list.extend([['strong_32ppc128' , 512, 8, 8]]*n_repeat) -#test_list.extend([['strong_32ppc128' , 1024, 8, 8]]*n_repeat) -#test_list.extend([['strong_32ppc128' , 2048, 8, 8]]*n_repeat) -#test_list.extend([['strong_32ppc128' , 4096, 8, 8]]*n_repeat) - -#test_list.extend([['strong1_0ppc_1' , 1, 8, 8]]*n_repeat) -#test_list.extend([['strong1_0ppc_1' , 8, 8, 8]]*n_repeat) -#test_list.extend([['strong1_0ppc_1' , 64, 8, 8]]*n_repeat) -#test_list.extend([['strong1_0ppc_1' , 128, 8, 8]]*n_repeat) -#test_list.extend([['strong1_0ppc_1' , 256, 8, 8]]*n_repeat) - -#test_list.extend([['strong1_0ppc_128' , 128, 8, 8]]*n_repeat) -#test_list.extend([['strong1_0ppc_128' , 256, 8, 8]]*n_repeat) -#test_list.extend([['strong1_0ppc_128' , 512, 8, 8]]*n_repeat) -#test_list.extend([['strong1_0ppc_128' , 1024, 8, 8]]*n_repeat) -#test_list.extend([['strong1_0ppc_128' , 2048, 8, 8]]*n_repeat) -#test_list.extend([['strong1_0ppc_128' , 4096, 8, 8]]*n_repeat) - -n_tests = len(test_list) - -# Read command-line arguments -# --------------------------- -# Create parser and read arguments -parser = argparse.ArgumentParser( - description='Run performance tests and write results in files') -parser.add_argument('--recompile', dest='recompile', action='store_true', default=False) -parser.add_argument('--no-recompile', dest='recompile', action='store_false', default=False) -parser.add_argument('--commit', dest='commit', action='store_true', default=False) -parser.add_argument( '--compiler', choices=['gnu', 'intel'], default='gnu', - help='which compiler to use') -parser.add_argument( '--architecture', choices=['cpu', 'knl'], default='cpu', - help='which architecture to cross-compile for NERSC machines') -parser.add_argument( '--mode', choices=['run', 'read'], default='run', - help='whether to run perftests or read their perf output. run calls read') -parser.add_argument( '--log_file', dest = 'log_file', default='my_performance_log.txt', - help='name of log file where data will be written. ignored if option --commit is used') -parser.add_argument( '--n_steps', dest = 'n_steps', default=None, - help='Number of time steps in the simulation. Should be read automatically from the input file') - -args = parser.parse_args() - -log_file = args.log_file -if args.commit == True: - log_file = 'performance_log.txt' - -# Dictionaries -# compiler names. Used for WarpX executable name -compiler_name = {'intel': 'intel', 'gnu': 'gcc'} -# architecture. Used for WarpX executable name -module_name = {'cpu': 'haswell', 'knl': 'mic-knl'} -# architecture. Used in batch scripts -module_Cname = {'cpu': 'haswell', 'knl': 'knl,quad,cache'} -# Define environment variables -cwd = os.getcwd() + '/' -res_dir_base = os.environ['SCRATCH'] + '/performance_warpx/' -bin_dir = cwd + 'Bin/' -bin_name = 'perf_tests3d.' + args.compiler + '.' + module_name[args.architecture] + '.TPROF.MTMPI.OMP.QED.ex' -log_dir = cwd - -perf_database_file = cwd + 'perf_database_warpx.h5' -do_rename = False -store_test = False - -day = time.strftime('%d') -month = time.strftime('%m') -year = time.strftime('%Y') - -# Initialize tests -# ---------------- -if args.mode == 'run': -# Set default options for compilation and execution - config_command = '' - config_command += 'module unload darshan;' - config_command += 'module load craype-hugepages4M;' - if args.architecture == 'knl': - if args.compiler == 'intel': - config_command += 'module unload PrgEnv-gnu;' - config_command += 'module load PrgEnv-intel;' - elif args.compiler == 'gnu': - config_command += 'module unload PrgEnv-intel;' - config_command += 'module load PrgEnv-gnu;' - config_command += 'module unload craype-haswell;' - config_command += 'module load craype-mic-knl;' - elif args.architecture == 'cpu': - if args.compiler == 'intel': - config_command += 'module unload PrgEnv-gnu;' - config_command += 'module load PrgEnv-intel;' - elif args.compiler == 'gnu': - config_command += 'module unload PrgEnv-intel;' - config_command += 'module load PrgEnv-gnu;' - config_command += 'module unload craype-mic-knl;' - config_command += 'module load craype-haswell;' - # Create main result directory if does not exist - if not os.path.exists(res_dir_base): - os.mkdir(res_dir_base) - -# Recompile if requested -if args.recompile == True: - with open(cwd + 'GNUmakefile_perftest') as makefile_handler: - makefile_text = makefile_handler.read() - makefile_text = re.sub('\nCOMP.*', '\nCOMP=%s' %compiler_name[args.compiler], makefile_text) - with open(cwd + 'GNUmakefile_perftest', 'w') as makefile_handler: - makefile_handler.write( makefile_text ) - os.system(config_command + " make -f GNUmakefile_perftest realclean ; " + " rm -r tmp_build_dir *.mod; make -j 8 -f GNUmakefile_perftest") - -# Define functions to run a test and analyse results -# -------------------------------------------------- -def process_analysis(): - dependencies = '' - f_log = open(cwd + 'log_jobids_tmp.txt','r') - for count, current_run in enumerate(test_list): - line = f_log.readline() - print(line) - dependencies += line.split()[3] + ':' - batch_string = '' - batch_string += '#!/bin/bash\n' - batch_string += '#SBATCH --job-name=warpx_read\n' - batch_string += '#SBATCH --time=00:05:00\n' - batch_string += '#SBATCH -C ' + module_Cname[args.architecture] + '\n' - batch_string += '#SBATCH -N 1\n' - batch_string += '#SBATCH -S 4\n' - batch_string += '#SBATCH -q regular\n' - batch_string += '#SBATCH -e read_error.txt\n' - batch_string += '#SBATCH -o read_output.txt\n' - batch_string += '#SBATCH --mail-type=end\n' - batch_string += '#SBATCH --account=m2852\n' - batch_string += 'python ' + __file__ + ' --no-recompile --compiler=' + \ - args.compiler + ' --architecture=' + args.architecture + \ - ' --mode=read' + ' --log_file=' + log_file - if args.commit == True: - batch_string += ' --commit' - batch_string += '\n' - batch_file = 'slurm_perfread' - f_exe = open(batch_file,'w') - f_exe.write(batch_string) - f_exe.close() - os.system('chmod 700 ' + batch_file) - os.system('sbatch --dependency afterok:' + dependencies[0:-1] + ' ' + batch_file) - return 0 - -# Loop over the tests and return run time + details -# ------------------------------------------------- -if args.mode == 'run': - # Remove file log_jobids_tmp.txt if exists. - # This file contains the jobid of every perf test - # It is used to manage the analysis script dependencies - if os.path.isfile(cwd + 'log_jobids_tmp.txt'): - os.remove(cwd + 'log_jobids_tmp.txt') - for count, current_run in enumerate(test_list): - # Results folder - print('run ' + str(current_run)) - run_name = current_run[0] - n_node = current_run[1] - n_mpi = current_run[2] - n_omp = current_run[3] - n_steps = get_nsteps(cwd + run_name) - res_dir = res_dir_base - res_dir += '_'.join([run_name, args.compiler,\ - args.architecture, str(n_node), str(n_mpi),\ - str(n_omp), str(count)]) + '/' - # Run the simulation. - # If you are currently in an interactive session and want to run interactive, - # just replace run_batch with run_interactive - run_batch(run_name, res_dir, bin_name, config_command, architecture=args.architecture, \ - Cname=module_Cname[args.architecture], n_node=n_node, n_mpi=n_mpi, n_omp=n_omp) - os.chdir(cwd) - process_analysis() - -if args.mode == 'read': - # Create log_file for performance tests if does not exist - if not os.path.isfile(log_dir + log_file): - log_line = '## year month day run_name compiler architecture n_node n_mpi ' +\ - 'n_omp time_initialization time_one_iteration Redistribute '+\ - 'FillBoundary ParallelCopy CurrentDeposition FieldGather '+\ - 'ParticlePush Copy Evolve Checkpoint '+\ - 'WriteParticles Write_FabArray '+\ - 'WriteMultiLevelPlotfile '+\ - 'RedistributeMPI(unit: second)\n' - f_log = open(log_dir + log_file, 'a') - f_log.write(log_line) - f_log.close() - for count, current_run in enumerate(test_list): - # Results folder - print('read ' + str(current_run)) - run_name = current_run[0] - n_node = current_run[1] - n_mpi = current_run[2] - n_omp = current_run[3] - if args.n_steps is None: - n_steps = get_nsteps(cwd + run_name) - else: - n_steps = int(args.n_steps) - res_dir = res_dir_base - res_dir += '_'.join([run_name, args.compiler,\ - args.architecture, str(n_node), str(n_mpi),\ - str(n_omp), str(count)]) + '/' - # Read to store in text file - # -------------------------- - output_filename = 'perf_output.txt' - timing_list = read_run_perf(res_dir + output_filename, n_steps) - # Write performance data to the performance log file - log_line = ' '.join([year, month, day, run_name, args.compiler,\ - args.architecture, str(n_node), str(n_mpi),\ - str(n_omp)] + timing_list + ['\n']) - write_perf_logfile(log_dir + log_file, log_line) - - # Read data for all test to put in hdf5 a database - # ------------------------------------------------ - # This is an hdf5 file containing ALL the simulation parameters and results. Might be too large for a repo - df_newline = extract_dataframe(res_dir + 'perf_output.txt', n_steps) - # Add all simulation parameters to the dataframe - df_newline['run_name'] = run_name - df_newline['n_node'] = n_node - df_newline['n_mpi'] = n_mpi - df_newline['n_omp'] = n_omp - df_newline['n_steps'] = n_steps - df_newline['rep'] = count - df_newline['date'] = datetime.datetime.now() - input_file = open(cwd + run_name, 'r') - input_file_content = input_file.read() - input_file.close() - df_newline['inputs_content'] = input_file_content - if os.path.exists(perf_database_file): - df_base = pd.read_hdf(perf_database_file, 'all_data') - updated_df = df_base.append(df_newline, ignore_index=True) - else: - updated_df = df_newline - updated_df.to_hdf(perf_database_file, key='all_data', mode='w') - - # Store test parameters for record if requested - if store_test == True: - dir_record_base = './perf_warpx_record/' - if not os.path.exists(dir_record_base): - os.mkdir(dir_record_base) - count = 0 - dir_record = dir_record_base + '_'.join([year, month, day]) + '_0' - while os.path.exists(dir_record): - dir_record = dir_record[:-1] + str(count) - os.mkdir(dir_record) - shutil.copy(__file__, dir_record) - shutil.copy(log_dir + log_file, dir_record) - for count, current_run in enumerate(test_list): - shutil.copy(current_run[0], dir_record) - - if do_rename == True: - # Rename files if requested - for count, current_run in enumerate(test_list): - run_name = current_run[0] - n_node = current_run[1] - n_mpi = current_run[2] - n_omp = current_run[3] - res_dir = res_dir_base - res_dir += '_'.join([run_name, args.compiler,\ - args.architecture, str(n_node), str(n_mpi),\ - str(n_omp), str(count)]) + '/' - res_dir_arch = res_dir_base - res_dir_arch += '_'.join([year, month, day, run_name, args.compiler,\ - args.architecture, str(n_node), str(n_mpi), \ - str(n_omp), str(count)]) + '/' - os.rename(res_dir, res_dir_arch) - - # Commit results to the Repo - if args.commit == True: - os.system('git add ' + log_dir + log_file + ';'\ - 'git commit -m "performance tests";'\ - 'git push -u origin development') diff --git a/Tools/PerformanceTests/run_alltests_1node.py b/Tools/PerformanceTests/run_alltests_1node.py deleted file mode 100644 index f112552b36e..00000000000 --- a/Tools/PerformanceTests/run_alltests_1node.py +++ /dev/null @@ -1,362 +0,0 @@ -# Copyright 2018-2020 Luca Fedeli, Maxence Thevenet -# -# This file is part of WarpX. -# -# License: BSD-3-Clause-LBNL - -import argparse -import datetime -import os -import re -import shutil -import time - -from functions_perftest import * - -# This script runs automated performance tests for WarpX. -# It runs tests in list test_list defined below, and write -# results in file performance_log.txt in warpx/Tools/PerformanceTests/ - -# ---- User's manual ---- -# Before running performance tests, make sure you have the latest version -# of performance_log.txt - -# ---- Running a custom set of performance tests ---- -# > python run_alltests_1node.py --no-recompile --compiler=intel -# > --architecture=knl --mode=run --input_file=uniform_plasma -# > --n_node=1 --log_file='my_performance_log.txt' - -# ---- Running the pre-defined automated tests ---- -# Compile and run: -# > python run_alltests_1node.py --automated --recompile -# Just run: -# > python run_alltests_1node.py --automated - -# To add a new test item, extent the test_list with a line like -# test_list.extend([['my_input_file', n_node, n_mpi, n_omp]]*n_repeat) -# - my_input_file must be in warpx/Tools/PerformanceTests - -# ---- Developer's manual ---- -# This script can run in two modes: -# - 'run' mode: for each test item, a batch job is executed. -# create folder '$SCRATCH/performance_warpx/' -# recompile the code if option --recompile is used -# loop over test_list and submit one batch script per item -# Submit a batch job that executes the script in read mode -# This last job runs once all others are completed -# - 'read' mode: Get performance data from all test items -# create performance log file if does not exist -# loop over test_file -# read initialization time and step time -# write data into the performance log file -# push file performance_log.txt on the repo - -# Read command-line arguments -# --------------------------- -# Create parser and read arguments -parser = argparse.ArgumentParser( - description='Run performance tests and write results in files') -parser.add_argument('--recompile', dest='recompile', action='store_true', default=False) -parser.add_argument('--no-recompile', dest='recompile', action='store_false', default=False) -parser.add_argument('--commit', dest='commit', action='store_true', default=False) -parser.add_argument( '--compiler', choices=['gnu', 'intel'], default='intel', - help='which compiler to use') -parser.add_argument( '--architecture', choices=['cpu', 'knl'], default='knl', - help='which architecture to cross-compile for NERSC machines') -parser.add_argument( '--mode', choices=['run', 'read'], default='run', - help='whether to run perftests or read their perf output. run calls read') -parser.add_argument( '--log_file', dest = 'log_file', default='my_performance_log.txt', - help='name of log file where data will be written. ignored if option --commit is used') -parser.add_argument('--n_node', dest='n_node', default=1, help='nomber of nodes for the runs') -parser.add_argument('--input_file', dest='input_file', default='input_file.pixr', - type=str, help='input file to run') -parser.add_argument('--automated', dest='automated', action='store_true', default=False, - help='Use to run the automated test list') - -args = parser.parse_args() -log_file = args.log_file -do_commit = args.commit -run_name = args.input_file - -# list of tests to run and analyse. -# Note: This is overwritten if option --automated is used -# each element of test_list contains -# [str input_file, int n_node, int n_mpi PER NODE, int n_omp] -test_list = [] -n_repeat = 2 -filename1 = args.input_file -test_list.extend([[filename1, 1, 128, 1]]*n_repeat) -test_list.extend([[filename1, 1, 64, 2]]*n_repeat) - -# Nothing should be changed after this line -# if flag --automated is used, test_list and do_commit are -# overwritten - -if args.automated == True: - test_list = [] - n_repeat = 2 - test_list.extend([['automated_test_1_uniform_rest_32ppc', 1, 16, 8]]*n_repeat) - test_list.extend([['automated_test_2_uniform_rest_1ppc', 1, 16, 8]]*n_repeat) - test_list.extend([['automated_test_3_uniform_drift_4ppc', 1, 16, 8]]*n_repeat) - test_list.extend([['automated_test_4_labdiags_2ppc', 1, 16, 8]]*n_repeat) - test_list.extend([['automated_test_5_loadimbalance', 1, 16, 8]]*n_repeat) - test_list.extend([['automated_test_6_output_2ppc', 1, 16, 8]]*n_repeat) - do_commit = False - run_name = 'automated_tests' - -n_tests = len(test_list) -if do_commit == True: - log_file = 'performance_log.txt' - -# Dictionaries -# compiler names. Used for WarpX executable name -compiler_name = {'intel': 'intel', 'gnu': 'gcc'} -# architecture. Used for WarpX executable name -module_name = {'cpu': 'haswell', 'knl': 'mic-knl'} -# architecture. Used in batch scripts -module_Cname = {'cpu': 'haswell', 'knl': 'knl,quad,cache'} -# Define environment variables -cwd = os.getcwd() + '/' -res_dir_base = os.environ['SCRATCH'] + '/performance_warpx/' -bin_dir = cwd + 'Bin/' -bin_name = 'perf_tests3d.' + args.compiler + '.' + module_name[args.architecture] + '.TPROF.MTMPI.OMP.QED.ex' -log_dir = cwd - -day = time.strftime('%d') -month = time.strftime('%m') -year = time.strftime('%Y') -n_node = int(args.n_node) - -perf_database_file = cwd + 'perf_database_warpx.h5' - -# Initialize tests -# ---------------- -if args.mode == 'run': -# Set default options for compilation and execution - config_command = '' - config_command += 'module unload darshan;' - config_command += 'module load craype-hugepages4M;' - if args.architecture == 'knl': - if args.compiler == 'intel': - config_command += 'module unload PrgEnv-gnu;' - config_command += 'module load PrgEnv-intel;' - elif args.compiler == 'gnu': - config_command += 'module unload PrgEnv-intel;' - config_command += 'module load PrgEnv-gnu;' - config_command += 'module unload craype-haswell;' - config_command += 'module load craype-mic-knl;' - elif args.architecture == 'cpu': - if args.compiler == 'intel': - config_command += 'module unload PrgEnv-gnu;' - config_command += 'module load PrgEnv-intel;' - elif args.compiler == 'gnu': - config_command += 'module unload PrgEnv-intel;' - config_command += 'module load PrgEnv-gnu;' - config_command += 'module unload craype-mic-knl;' - config_command += 'module load craype-haswell;' - # Create main result directory if does not exist - if not os.path.exists(res_dir_base): - os.mkdir(res_dir_base) - -# Recompile if requested -if args.recompile == True: - with open(cwd + 'GNUmakefile_perftest') as makefile_handler: - makefile_text = makefile_handler.read() - makefile_text = re.sub('\nCOMP.*', '\nCOMP=%s' %compiler_name[args.compiler], makefile_text) - with open(cwd + 'GNUmakefile_perftest', 'w') as makefile_handler: - makefile_handler.write( makefile_text ) - os.system(config_command + " make -f GNUmakefile_perftest realclean ; " + " rm -r tmp_build_dir *.mod; make -j 8 -f GNUmakefile_perftest") - -# This function runs a batch script with dependencies to perform the analysis -# when performance runs are done. -def process_analysis(): - dependencies = '' - f_log = open(cwd + 'log_jobids_tmp.txt','r') - line = f_log.readline() - print(line) - dependencies += line.split()[3] + ':' - batch_string = '' - batch_string += '#!/bin/bash\n' - batch_string += '#SBATCH --job-name=warpx_1node_read\n' - batch_string += '#SBATCH --time=00:05:00\n' - batch_string += '#SBATCH -C haswell\n' - batch_string += '#SBATCH -N 1\n' - batch_string += '#SBATCH -S 4\n' - batch_string += '#SBATCH -q regular\n' - batch_string += '#SBATCH -e read_error.txt\n' - batch_string += '#SBATCH -o read_output.txt\n' - batch_string += '#SBATCH --mail-type=end\n' - batch_string += '#SBATCH --account=m2852\n' - batch_string += 'python ' + __file__ + ' --no-recompile --compiler=' + \ - args.compiler + ' --architecture=' + args.architecture + \ - ' --mode=read' + ' --log_file=' + log_file + \ - ' --input_file=' + args.input_file - if do_commit == True: - batch_string += ' --commit' - if args.automated == True: - batch_string += ' --automated' - batch_string += '\n' - batch_file = 'slurm_perfread' - f_exe = open(batch_file,'w') - f_exe.write(batch_string) - f_exe.close() - os.system('chmod 700 ' + batch_file) - os.system('sbatch --dependency afterok:' + dependencies[0:-1] + ' ' + batch_file) - return 0 - -# Loop over the tests and return run time + details -# ------------------------------------------------- -if args.mode == 'run': - # Remove file log_jobids_tmp.txt if exists. - # This file contains the jobid of every perf test - # It is used to manage the analysis script dependencies - if os.path.isfile(cwd + 'log_jobids_tmp.txt'): - os.remove(cwd + 'log_jobids_tmp.txt') - res_dir = res_dir_base - res_dir += '_'.join([run_name, args.compiler,\ - args.architecture, str(n_node)]) + '/' - # Run the simulation. - run_batch_nnode(test_list, res_dir, bin_name, config_command,\ - architecture=args.architecture, Cname=module_Cname[args.architecture], \ - n_node=n_node) - os.chdir(cwd) - process_analysis() - -if args.mode == 'read': - # Create log_file for performance tests if does not exist - if not os.path.isfile(log_dir + log_file): - log_line = '## year month day input_file compiler architecture n_node n_mpi ' +\ - 'n_omp time_initialization time_one_iteration Redistribute '+\ - 'FillBoundary ParallelCopy CurrentDeposition FieldGather '+\ - 'ParthiclePush Copy Evolve Checkpoint '+\ - 'WriteParticles Write_FabArray '+\ - 'WriteMultiLevelPlotfile(unit: second) '+\ - 'RedistributeMPI\n' - f_log = open(log_dir + log_file, 'a') - f_log.write(log_line) - f_log.close() - for count, current_run in enumerate(test_list): - # Results folder - print('read ' + str(current_run)) - input_file = current_run[0] - # Do not read n_node = current_run[1], it is an external parameter - n_mpi = current_run[2] - n_omp = current_run[3] - n_steps = get_nsteps(cwd + input_file) - print('n_steps = ' + str(n_steps)) - res_dir = res_dir_base - res_dir += '_'.join([run_name, args.compiler,\ - args.architecture, str(n_node)]) + '/' - # Read performance data from the output file - output_filename = 'out_' + '_'.join([input_file, str(n_node), str(n_mpi), str(n_omp), str(count)]) + '.txt' - timing_list = read_run_perf(res_dir + output_filename, n_steps) - # Write performance data to the performance log file - log_line = ' '.join([year, month, day, input_file, args.compiler,\ - args.architecture, str(n_node), str(n_mpi),\ - str(n_omp)] + timing_list + ['\n']) - write_perf_logfile(log_dir + log_file, log_line) - # Read data for all test to put in hdf5 a database - # ------------------------------------------------ - # This is an hdf5 file containing ALL the simulation parameters and results. Might be too large for a repo - df_newline = extract_dataframe(res_dir + output_filename, n_steps) - # Add all simulation parameters to the dataframe - df_newline['run_name'] = run_name - df_newline['n_node'] = n_node - df_newline['n_mpi'] = n_mpi - df_newline['n_omp'] = n_omp - df_newline['n_steps'] = n_steps - df_newline['rep'] = count - df_newline['date'] = datetime.datetime.now() - input_file_open = open(cwd + input_file, 'r') - input_file_content = input_file_open.read() - input_file_open.close() - df_newline['inputs_content'] = input_file_content - if os.path.exists(perf_database_file): - df_base = pd.read_hdf(perf_database_file, 'all_data') - updated_df = df_base.append(df_newline, ignore_index=True) - else: - updated_df = df_newline - updated_df.to_hdf(perf_database_file, key='all_data', mode='w') - - # Store test parameters fot record - dir_record_base = './perf_warpx_record/' - if not os.path.exists(dir_record_base): - os.mkdir(dir_record_base) - count = 0 - dir_record = dir_record_base + '_'.join([year, month, day]) + '_0' - while os.path.exists(dir_record): - count += 1 - dir_record = dir_record[:-1] + str(count) - os.mkdir(dir_record) - shutil.copy(__file__, dir_record) - shutil.copy(log_dir + log_file, dir_record) - for count, current_run in enumerate(test_list): - shutil.copy(current_run[0], dir_record) - - # Rename directory with precise date for archive purpose - res_dir_arch = res_dir_base - res_dir_arch += '_'.join([year, month, day, run_name, args.compiler,\ - args.architecture, str(n_node)]) + '/' - os.rename(res_dir, res_dir_arch) - - # Commit results to the Repo - if do_commit == True: - os.system('git add ' + log_dir + log_file + ';'\ - 'git commit -m "performance tests";'\ - 'git push -u origin development') - - # Plot file - import matplotlib - import numpy as np - matplotlib.use('Agg') - import matplotlib.pyplot as plt - filename0 = 'my_performance_log' - filename = filename0 + '.txt' - fontsize = 14 - matplotlib.rcParams.update({'font.size': fontsize}) - nsteps = 100. - nrepeat = 4 - legends = [ 'n_node', 'n_mpi', 'n_omp', 'time_initialization', 'time_one_iteration', \ - 'Redistribute', 'FillBoundary', 'ParallelCopy', 'CurrentDeposition', \ - 'FieldGather', 'ParthiclePush', 'Copy', 'Evolve', 'Checkpoint', \ - 'WriteParticles', 'Write_FabArray', 'WriteMultiLevelPlotfile', \ - 'RedistributeMPI'] - date = np.loadtxt( filename, usecols = np.arange(0, 3 )) - data = np.loadtxt( filename, usecols = np.arange(6, 6+len(legends)) ) - # Read run name - with open(filename) as f: - namelist_tmp = zip(*[line.split() for line in f])[3] - # Remove first line = comments - namelist = list(namelist_tmp[1:]) - selector_list = ['automated_test_1_uniform_rest_32ppc',\ - 'automated_test_2_uniform_rest_1ppc',\ - 'automated_test_3_uniform_drift_4ppc',\ - 'automated_test_4_labdiags_2ppc',\ - 'automated_test_5_loadimbalance',\ - 'automated_test_6_output_2ppc'] - selector_string = selector_list[0] - selector = [idx for idx in range(len(namelist)) if selector_string in namelist[idx]] - lin_date = date[:,0]+date[:,1]/12.+date[:,2]/366. - unique_lin_date = np.unique(lin_date) - my_xticks = unique_lin_date -# cmap = plt.get_cmap("tab20") - cycle = plt.rcParams['axes.prop_cycle'].by_key()['color'] - for selector_string in selector_list: - selector = [idx for idx in range(len(namelist)) if selector_string in namelist[idx]] - plt.figure(num=0, figsize=(8,4)) - plt.clf() - plt.title('warpx ' + selector_string) - for i in np.arange(data.shape[1]): - icolors = i-3 - if i>3 and (data[selector,i] > 5./100*data[selector,4]).any(): - plt.plot(lin_date[selector], data[selector,i],'+', ms=6, \ - mew=2, label=legends[i] ) - # plt.plot(lin_date[selector], data[selector,i],'+', ms=6, \ - # mew=2, label=legends[i], color=cmap(i) ) - plt.xlabel('date') - plt.ylabel('time/step (s)') - plt.grid() - plt.legend(loc='best') - plt.legend(bbox_to_anchor=(1.1, 1.05)) - plt.savefig( selector_string + '.pdf', bbox_inches='tight') - plt.savefig( selector_string + '.png', bbox_inches='tight') diff --git a/Tools/PerformanceTests/run_automated.py b/Tools/PerformanceTests/run_automated.py deleted file mode 100644 index f03ead05376..00000000000 --- a/Tools/PerformanceTests/run_automated.py +++ /dev/null @@ -1,350 +0,0 @@ -# Copyright 2018-2019 Axel Huebl, Luca Fedeli, Maxence Thevenet -# -# -# This file is part of WarpX. -# -# License: BSD-3-Clause-LBNL - -import argparse -import copy -import datetime -import os -import shutil -import sys -import time - -import git -import pandas as pd -from functions_perftest import ( - extract_dataframe, - get_file_content, - run_batch_nnode, - store_git_hash, -) - -# Get name of supercomputer and import configuration functions from -# machine-specific file -if os.getenv("LMOD_SYSTEM_NAME") == 'summit': - machine = 'summit' - from summit import ( - executable_name, - get_batch_string, - get_config_command, - get_run_string, - get_submit_job_command, - get_test_list, - process_analysis, - time_min, - ) -if os.getenv("NERSC_HOST") == 'cori': - machine = 'cori' - from cori import ( - executable_name, - get_batch_string, - get_config_command, - get_run_string, - get_submit_job_command, - get_test_list, - process_analysis, - time_min, - ) - -# typical use: python run_automated.py --n_node_list='1,8,16,32' --automated -# Assume warpx, picsar, amrex and perf_logs repos ar in the same directory and -# environment variable AUTOMATED_PERF_TESTS contains the path to this directory - -# requirements: -# - python packages: gitpython and pandas -# - AUTOMATED_PERF_TESTS: environment variables where warpx, -# amrex and picsar are installed ($AUTOMATED_PERF_TESTS/warpx etc.) -# - SCRATCH: environment variable where performance results are written. -# This script will create folder $SCRATCH/performance_warpx/ - -if "AUTOMATED_PERF_TESTS" not in os.environ: - raise ValueError("environment variable AUTOMATED_PERF_TESTS is not defined.\n" - "It should contain the path to the directory where WarpX, " - "AMReX and PICSAR repos are.") -if "SCRATCH" not in os.environ: - raise ValueError("environment variable SCRATCH is not defined.\n" - "This script will create $SCRATCH/performance_warpx/ " - "to store performance results.") -# Handle parser -############### -parser = argparse.ArgumentParser( description='Run performance tests and write results in files' ) -parser.add_argument('--recompile', - dest='recompile', - action='store_true', - default=False) -parser.add_argument('--commit', - dest='commit', - action='store_true', - default=False) -parser.add_argument('--automated', - dest='automated', - action='store_true', - default=False, - help='Use to run the automated test list') -parser.add_argument('--n_node_list', - dest='n_node_list', - default=[], - help='list of number of nodes for the runs', type=str) -parser.add_argument('--start_date', - dest='start_date' ) -parser.add_argument('--compiler', - choices=['gnu', 'intel', 'pgi'], - default='intel', - help='which compiler to use') -parser.add_argument('--architecture', - choices=['cpu', 'knl', 'gpu'], - default='knl', - help='which architecture to cross-compile for NERSC machines') -parser.add_argument('--mode', - choices=['run', 'read', 'browse_output_files'], - default='run', - help='whether to run perftests or read their perf output. run calls read') -parser.add_argument('--path_source', - default=None, - help='path to parent folder containing amrex, picsar and warpx folders') -parser.add_argument('--path_results', - default=None, - help='path to result directory, where simulations run') - -args = parser.parse_args() -n_node_list_string = args.n_node_list.split(',') -n_node_list = [int(i) for i in n_node_list_string] -start_date = args.start_date - -# Set behavior variables -######################## -run_name = 'custom_perftest' -perf_database_file = 'my_tests_database.h5' -rename_archive = False -store_full_input = False -update_perf_log_repo = False -push_on_perf_log_repo = False -recompile = args.recompile -pull_3_repos = False -recompile = True -compiler = args.compiler -architecture = args.architecture -source_dir_base = args.path_source -res_dir_base = args.path_results - -browse_output_files = False -if args.mode == 'browse_output_files': - browse_output_file = True -if args.mode == 'read': - browse_output_files = True - -if args.automated == True: - run_name = 'automated_tests' - perf_database_file = machine + '_results.h5' - rename_archive = True - store_full_input = False - update_perf_log_repo = True - push_on_perf_log_repo = False - pull_3_repos = True - recompile = True - source_dir_base = os.environ['AUTOMATED_PERF_TESTS'] - res_dir_base = os.environ['SCRATCH'] + '/performance_warpx/' - if machine == 'summit': - compiler = 'gnu' - architecture = 'gpu' - -# List of tests to perform -# ------------------------ -# Each test runs n_repeat times -n_repeat = 2 -# test_list is machine-specific -test_list = get_test_list(n_repeat) - -# Define directories -# ------------------ -warpx_dir = source_dir_base + '/warpx/' -picsar_dir = source_dir_base + '/picsar/' -amrex_dir = source_dir_base + '/amrex/' -perf_logs_repo = source_dir_base + 'perf_logs/' - -# Define dictionaries -# ------------------- -compiler_name = {'intel': 'intel', 'gnu': 'gcc', 'pgi':'pgi'} -module_Cname = {'cpu': 'haswell', 'knl': 'knl,quad,cache', 'gpu':''} -csv_file = {'cori':'cori_knl.csv', 'summit':'summit.csv'} -# cwd = os.getcwd() + '/' -cwd = warpx_dir + 'Tools/PerformanceTests/' - -path_hdf5 = cwd -if args.automated: - path_hdf5 = perf_logs_repo + '/logs_hdf5/' - -bin_dir = cwd + 'Bin/' -bin_name = executable_name(compiler, architecture) - -log_dir = cwd -day = time.strftime('%d') -month = time.strftime('%m') -year = time.strftime('%Y') - -# Initialize tests -# ---------------- -if args.mode == 'run': - start_date = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S") - # Set default options for compilation and execution - config_command = get_config_command(compiler, architecture) - # Create main result directory if does not exist - if not os.path.exists(res_dir_base): - os.mkdir(res_dir_base) - - # Recompile if requested - # ---------------------- - if recompile == True: - if pull_3_repos == True: - git_repo = git.cmd.Git( picsar_dir ) - git_repo.pull() - git_repo = git.cmd.Git( amrex_dir ) - git_repo.pull() - git_repo = git.cmd.Git( warpx_dir ) - git_repo.pull() - - # Copy WarpX/GNUmakefile to current directory and recompile - # with specific options for automated performance tests. - # This way, performance test compilation does not mess with user's - # compilation - shutil.copyfile("../../GNUmakefile","./GNUmakefile") - make_realclean_command = " make realclean WARPX_HOME=../.. " \ - "AMREX_HOME=../../../amrex/ PICSAR_HOME=../../../picsar/ " \ - "EBASE=perf_tests COMP=%s" %compiler_name[compiler] + ";" - make_command = "make -j 16 WARPX_HOME=../.. " \ - "AMREX_HOME=../../../amrex/ PICSAR_HOME=../../../picsar/ " \ - "EBASE=perf_tests COMP=%s" %compiler_name[compiler] - if machine == 'summit': - make_command += ' USE_GPU=TRUE ' - os.system(config_command + make_realclean_command + \ - "rm -r tmp_build_dir *.mod; " + make_command ) - - # Store git hashes for WarpX, AMReX and PICSAR into file, so that - # they can be read when running the analysis. - if os.path.exists( cwd + 'store_git_hashes.txt' ): - os.remove( cwd + 'store_git_hashes.txt' ) - store_git_hash(repo_path=picsar_dir, filename=cwd + 'store_git_hashes.txt', name='picsar') - store_git_hash(repo_path=amrex_dir , filename=cwd + 'store_git_hashes.txt', name='amrex' ) - store_git_hash(repo_path=warpx_dir , filename=cwd + 'store_git_hashes.txt', name='warpx' ) - -# Loop over the tests and run all simulations: -# One batch job submitted per n_node. Several -# tests run within the same batch job. -# -------------------------------------------- -if args.mode == 'run': - if os.path.exists( 'log_jobids_tmp.txt' ): - os.remove( 'log_jobids_tmp.txt' ) - # loop on n_node. One batch script per n_node - for n_node in n_node_list: - res_dir = res_dir_base - res_dir += '_'.join([run_name, compiler, architecture, str(n_node)]) + '/' - runtime_param_list = [] - # Deep copy as we change the attribute n_cell of - # each instance of class test_element - test_list_n_node = copy.deepcopy(test_list) - job_time_min = time_min(len(test_list)) - batch_string = get_batch_string(test_list_n_node, job_time_min, module_Cname[architecture], n_node) - # Loop on tests - for count, current_run in enumerate(test_list_n_node): - current_run.scale_n_cell(n_node) - runtime_param_string = ' amr.n_cell=' + ' '.join(str(i) for i in current_run.n_cell) - runtime_param_string += ' amr.max_grid_size=' + str(current_run.max_grid_size) - runtime_param_string += ' amr.blocking_factor=' + str(current_run.blocking_factor) - runtime_param_string += ' max_step=' + str( current_run.n_step ) - # runtime_param_list.append( runtime_param_string ) - run_string = get_run_string(current_run, architecture, n_node, count, bin_name, runtime_param_string) - batch_string += run_string - batch_string += 'rm -rf plotfiles lab_frame_data diags\n' - - submit_job_command = get_submit_job_command() - # Run the simulations. - run_batch_nnode(test_list_n_node, res_dir, cwd, bin_name, config_command, batch_string, submit_job_command) - os.chdir(cwd) - # submit batch for analysis - if os.path.exists( 'read_error.txt' ): - os.remove( 'read_error.txt' ) - if os.path.exists( 'read_output.txt' ): - os.remove( 'read_output.txt' ) - process_analysis(args.automated, cwd, compiler, architecture, - args.n_node_list, start_date, source_dir_base, res_dir_base) - -# read the output file from each test and store timers in -# hdf5 file with pandas format -# ------------------------------------------------------- -for n_node in n_node_list: - print(n_node) - if browse_output_files: - res_dir = res_dir_base - res_dir += '_'.join([run_name, compiler,\ - architecture, str(n_node)]) + '/' - for count, current_run in enumerate(test_list): - # Read performance data from the output file - output_filename = 'out_' + '_'.join([current_run.input_file, str(n_node), str(current_run.n_mpi_per_node), str(current_run.n_omp), str(count)]) + '.txt' - # Read data for all test to put in hdf5 a database - # This is an hdf5 file containing ALL the simulation - # parameters and results. Might be too large for a repo - df_newline = extract_dataframe(res_dir + output_filename, current_run.n_step) - # Add all simulation parameters to the dataframe - df_newline['git_hashes'] = get_file_content(filename=cwd+'store_git_hashes.txt') - df_newline['start_date'] = start_date - df_newline['run_name'] = run_name - df_newline['input_file'] = current_run.input_file - df_newline['n_node'] = n_node - df_newline['n_mpi_per_node'] = current_run.n_mpi_per_node - df_newline['n_omp'] = current_run.n_omp - df_newline['n_steps'] = current_run.n_step - df_newline['rep'] = count%n_repeat - df_newline['date'] = datetime.datetime.now() - if store_full_input: - df_newline['inputs_content'] = get_file_content( filename=cwd+current_run.input_file ) - # Load file perf_database_file if exists, and - # append with results from this scan - if os.path.exists(path_hdf5 + perf_database_file): - df_base = pd.read_hdf(path_hdf5 + perf_database_file, 'all_data') - updated_df = df_base.append(df_newline, ignore_index=True) - else: - updated_df = df_newline - # Write dataframe to file perf_database_file - # (overwrite if file exists) - updated_df.to_hdf(path_hdf5 + perf_database_file, key='all_data', mode='w', format='table') - -# Extract sub-set of pandas data frame, write it to -# csv file and copy this file to perf_logs repo -# ------------------------------------------------- -if args.mode=='read' and update_perf_log_repo: - # get perf_logs repo - git_repo = git.Repo( perf_logs_repo ) - if push_on_perf_log_repo: - git_repo.git.stash('save') - git_repo.git.pull() - os.chdir( perf_logs_repo ) - sys.path.append('./') - import write_csv - git_repo.git.add('./logs_csv/' + csv_file[machine]) - git_repo.git.add('./logs_hdf5/' + perf_database_file) - index = git_repo.index - index.commit("automated tests") - -# Rename all result directories for archiving purposes: -# include date in the name, and a counter to avoid over-writing -for n_node in n_node_list: - if browse_output_files: - res_dir = res_dir_base - res_dir += '_'.join([run_name, compiler,\ - architecture, str(n_node)]) + '/' - # Rename directory with precise date+hour for archive purpose - if rename_archive == True: - loc_counter = 0 - res_dir_arch = res_dir_base - res_dir_arch += '_'.join([year, month, day, run_name, compiler,\ - architecture, str(n_node), str(loc_counter)]) + '/' - while os.path.exists( res_dir_arch ): - loc_counter += 1 - res_dir_arch = res_dir_base - res_dir_arch += '_'.join([year, month, day, run_name, compiler,\ - architecture, str(n_node), str(loc_counter)]) + '/' - print("renaming " + res_dir + " -> " + res_dir_arch) - os.rename( res_dir, res_dir_arch ) diff --git a/Tools/PerformanceTests/summit.py b/Tools/PerformanceTests/summit.py deleted file mode 100644 index c2ba6c70a2e..00000000000 --- a/Tools/PerformanceTests/summit.py +++ /dev/null @@ -1,146 +0,0 @@ -# Copyright 2019 Axel Huebl, Luca Fedeli, Maxence Thevenet -# -# -# This file is part of WarpX. -# -# License: BSD-3-Clause-LBNL - -# requirements: -# - module load python/3.7.0-anaconda3-5.3.0 - -import copy -import os - -from functions_perftest import test_element - - -def executable_name(compiler,architecture): - return 'perf_tests3d.' + compiler + '.TPROF.MTMPI.CUDA.QED.ex' - -def get_config_command(compiler, architecture): - config_command = '' - config_command += 'module load gcc;' - config_command += 'module load cuda;' - return config_command - -# This function runs a batch script with -# dependencies to perform the analysis -# after all performance tests are done. -def process_analysis(automated, cwd, compiler, architecture, n_node_list, start_date, path_source, path_results): - - batch_string = '''#!/bin/bash -#BSUB -P APH114 -#BSUB -W 00:10 -#BSUB -nnodes 1 -#BSUB -J perf_test -#BSUB -o read_output.txt -#BSUB -e read_error.txt -''' - f_log = open(cwd + 'log_jobids_tmp.txt' ,'r') - for line in f_log.readlines(): - dependency = line.split()[1][1:-1] - batch_string += '#BSUB -w ended(' + dependency + ')\n' - - batch_string += 'python run_automated.py --compiler=' + \ - compiler + ' --architecture=' + architecture + \ - ' --mode=read' + \ - ' --n_node_list=' + '"' + n_node_list + '"' + \ - ' --start_date=' + start_date + \ - ' --path_source=' + path_source + \ - ' --path_results=' + path_results - if automated == True: - batch_string += ' --automated' - batch_string += '\n' - batch_file = 'bsub_perfread' - f_exe = open(batch_file,'w') - f_exe.write(batch_string) - f_exe.close() - os.system('chmod 700 ' + batch_file) - print( 'process_analysis line: ' + 'bsub ' + batch_file) - os.system('bsub ' + batch_file) - -# Calculate simulation time. Take 2 min + 2 min / simulation -def time_min(nb_simulations): - return 2. + nb_simulations*2. - -def get_submit_job_command(): - return ' bsub ' - -def get_batch_string(test_list, job_time_min, Cname, n_node): - - job_time_str = str(int(job_time_min/60)) + ':' + str(int(job_time_min%60)) - - batch_string = '' - batch_string += '#!/bin/bash\n' - batch_string += '#BSUB -P APH114\n' - batch_string += '#BSUB -W ' + job_time_str + '\n' - batch_string += '#BSUB -nnodes ' + str(n_node) + '\n' - batch_string += '#BSUB -J ' + test_list[0].input_file + '\n' - batch_string += '#BSUB -e error.txt\n' - batch_string += 'module load gcc\n' - batch_string += 'module load cuda\n' - return batch_string - -def get_run_string(current_test, architecture, n_node, count, bin_name, runtime_param_string): - - output_filename = 'out_' + '_'.join([current_test.input_file, str(n_node), str(current_test.n_mpi_per_node), str(current_test.n_omp), str(count)]) + '.txt' - - ngpu = str(current_test.n_mpi_per_node) - srun_string = '' - srun_string += 'jsrun ' - srun_string += ' -n ' + str(n_node) - srun_string += ' -a ' + ngpu + ' -g ' + ngpu + ' -c ' + ngpu + ' --bind=packed:1 ' - srun_string += ' ./' + bin_name + ' ' - srun_string += current_test.input_file + ' ' - srun_string += runtime_param_string - srun_string += ' > ' + output_filename + '\n' - return srun_string - -def get_test_list(n_repeat): - test_list_unq = [] - # n_node is kept to None and passed in functions as an external argument - # That way, several test_element_instance run with the same n_node on the same batch job - test_list_unq.append( test_element(input_file='automated_test_1_uniform_rest_32ppc', - n_mpi_per_node=6, - n_omp=1, - n_cell=[128, 128, 192], - max_grid_size=256, - blocking_factor=32, - n_step=10) ) - test_list_unq.append( test_element(input_file='automated_test_2_uniform_rest_1ppc', - n_mpi_per_node=6, - n_omp=1, - n_cell=[256, 512, 768], - max_grid_size=512, - blocking_factor=256, - n_step=10) ) - test_list_unq.append( test_element(input_file='automated_test_3_uniform_drift_4ppc', - n_mpi_per_node=6, - n_omp=1, - n_cell=[128, 128, 384], - max_grid_size=256, - blocking_factor=64, - n_step=10) ) - test_list_unq.append( test_element(input_file='automated_test_4_labdiags_2ppc', - n_mpi_per_node=6, - n_omp=1, - n_cell=[384, 256, 512], - max_grid_size=256, - blocking_factor=128, - n_step=50) ) - test_list_unq.append( test_element(input_file='automated_test_5_loadimbalance', - n_mpi_per_node=6, - n_omp=1, - n_cell=[64, 64, 192], - max_grid_size=64, - blocking_factor=32, - n_step=10) ) - test_list_unq.append( test_element(input_file='automated_test_6_output_2ppc', - n_mpi_per_node=6, - n_omp=1, - n_cell=[384, 256, 512], - max_grid_size=256, - blocking_factor=64, - n_step=1) ) - test_list = [copy.deepcopy(item) for item in test_list_unq for _ in range(n_repeat) ] - return test_list diff --git a/Tools/PostProcessing/Visualization.ipynb b/Tools/PostProcessing/Visualization.ipynb index ef05b69c2c0..c13051a0c76 100644 --- a/Tools/PostProcessing/Visualization.ipynb +++ b/Tools/PostProcessing/Visualization.ipynb @@ -17,8 +17,6 @@ "source": [ "# Import statements\n", "import yt ; yt.funcs.mylog.setLevel(50)\n", - "import numpy as np\n", - "import scipy.constants as scc\n", "import matplotlib.pyplot as plt\n", "%matplotlib notebook" ] diff --git a/Tools/PostProcessing/plot_distribution_mapping.py b/Tools/PostProcessing/plot_distribution_mapping.py index 4b0cdfd532b..db95c862bd5 100644 --- a/Tools/PostProcessing/plot_distribution_mapping.py +++ b/Tools/PostProcessing/plot_distribution_mapping.py @@ -26,7 +26,7 @@ def __call__(self, i): print("No data_fields!") return - if not i in self.keys: + if i not in self.keys: print("Index is out of range!") print("Valid keys are ", self.keys) return diff --git a/Tools/PostProcessing/plot_nci_growth_rate.ipynb b/Tools/PostProcessing/plot_nci_growth_rate.ipynb index 559d5250237..ee010fab1e6 100644 --- a/Tools/PostProcessing/plot_nci_growth_rate.ipynb +++ b/Tools/PostProcessing/plot_nci_growth_rate.ipynb @@ -24,7 +24,8 @@ "from scipy.constants import c\n", "import numpy as np\n", "import scipy.constants as scc\n", - "import yt ; yt.funcs.mylog.setLevel(50)\n", + "import yt\n", + "yt.funcs.mylog.setLevel(50)\n", "import glob\n", "%matplotlib inline" ] @@ -71,13 +72,12 @@ " iteration=200\n", " dsx = yt.load( path + 'diag1%05d/' %iteration )\n", " dxx = dsx.domain_width/dsx.domain_dimensions\n", - " dx=dxx[0];\n", + " dx=dxx[0]\n", " dx = 1.*dx.ndarray_view()\n", "\n", - " dz=dxx[1];\n", + " dz=dxx[1]\n", " \n", " dz = 1.*dz.ndarray_view()\n", - " cell_volume_x = np.prod(dxx)\n", "\n", " ds1 = yt.load(path+'/diag100100/')\n", " ds2 = yt.load(path+'/diag100200/')\n", @@ -86,7 +86,7 @@ " cur_t2 = ds2.current_time \n", " cur_t2.to_ndarray\n", " dt = (cur_t2-cur_t1)/100\n", - " dt = 1.*dt.ndarray_view();\n", + " dt = 1.*dt.ndarray_view()\n", " return dx, dz, dt" ] }, @@ -165,8 +165,8 @@ " spec2 = np.where( abs(spec2) > np.exp(threshold), spec2, np.exp(threshold) )\n", " diff_growth = np.log( abs(spec2) ) - np.log( abs(spec1) )\n", "\n", - " diff_time = (iteration2-iteration1)*dt;\n", - " growth_rate = diff_growth/diff_time/c;\n", + " diff_time = (iteration2-iteration1)*dt\n", + " growth_rate = diff_growth/diff_time/c\n", "\n", " return( growth_rate, [0, kxmax, 0, kzmax] )" ] diff --git a/Tools/PostProcessing/plot_parallel.py b/Tools/PostProcessing/plot_parallel.py index a4309b3896e..9719b7006c3 100644 --- a/Tools/PostProcessing/plot_parallel.py +++ b/Tools/PostProcessing/plot_parallel.py @@ -240,7 +240,7 @@ def reduce_evolved_quantity(z, q): nfiles = len(file_list) # Get list of particle species to plot -pslist = get_species(file_list); +pslist = get_species(file_list) rank = 0 size = 1 diff --git a/Tools/Release/updateAMReX.py b/Tools/Release/updateAMReX.py index 9dfa7fbeb41..b01014852d4 100755 --- a/Tools/Release/updateAMReX.py +++ b/Tools/Release/updateAMReX.py @@ -38,7 +38,7 @@ REPLY = input("Are you sure you want to continue? [y/N] ") print() -if not REPLY in ["Y", "y"]: +if REPLY not in ["Y", "y"]: print("You did not confirm with 'y', aborting.") sys.exit(1) @@ -78,7 +78,7 @@ print(f"Currently, WarpX builds against this AMReX commit/branch/sha: {amrex_branch}") print(f"AMReX HEAD commit (development branch): {amrex_HEAD}") -amrex_new_branch = input(f"Update AMReX commit/branch/sha: ").strip() +amrex_new_branch = input("Update AMReX commit/branch/sha: ").strip() if not amrex_new_branch: amrex_new_branch = amrex_branch print(f"--> Nothing entered, will keep: {amrex_branch}") @@ -97,7 +97,7 @@ REPLY = input("Is this information correct? Will now start updating! [y/N] ") print() -if not REPLY in ["Y", "y"]: +if REPLY not in ["Y", "y"]: print("You did not confirm with 'y', aborting.") sys.exit(1) diff --git a/Tools/Release/updatePICSAR.py b/Tools/Release/updatePICSAR.py index 7e61679d371..fe15e5b120e 100755 --- a/Tools/Release/updatePICSAR.py +++ b/Tools/Release/updatePICSAR.py @@ -29,7 +29,7 @@ REPLY = input("Are you sure you want to continue? [y/N] ") print() -if not REPLY in ["Y", "y"]: +if REPLY not in ["Y", "y"]: print("You did not confirm with 'y', aborting.") sys.exit(1) @@ -69,7 +69,7 @@ print(f"Currently, WarpX builds against this PICSAR commit/branch/sha: {PICSAR_branch}") print(f"PICSAR HEAD commit (development branch): {PICSAR_HEAD}") -PICSAR_new_branch = input(f"Update PICSAR commit/branch/sha: ").strip() +PICSAR_new_branch = input("Update PICSAR commit/branch/sha: ").strip() if not PICSAR_new_branch: PICSAR_new_branch = PICSAR_branch print(f"--> Nothing entered, will keep: {PICSAR_branch}") @@ -88,7 +88,7 @@ REPLY = input("Is this information correct? Will now start updating! [y/N] ") print() -if not REPLY in ["Y", "y"]: +if REPLY not in ["Y", "y"]: print("You did not confirm with 'y', aborting.") sys.exit(1) diff --git a/Tools/Release/updatepyAMReX.py b/Tools/Release/updatepyAMReX.py index 500781e0880..04887dc4988 100755 --- a/Tools/Release/updatepyAMReX.py +++ b/Tools/Release/updatepyAMReX.py @@ -29,7 +29,7 @@ REPLY = input("Are you sure you want to continue? [y/N] ") print() -if not REPLY in ["Y", "y"]: +if REPLY not in ["Y", "y"]: print("You did not confirm with 'y', aborting.") sys.exit(1) @@ -69,7 +69,7 @@ print(f"Currently, WarpX builds against this pyAMReX commit/branch/sha: {pyamrex_branch}") print(f"pyAMReX HEAD commit (development branch): {pyamrex_HEAD}") -pyamrex_new_branch = input(f"Update pyAMReX commit/branch/sha: ").strip() +pyamrex_new_branch = input("Update pyAMReX commit/branch/sha: ").strip() if not pyamrex_new_branch: pyamrex_new_branch = pyamrex_branch print(f"--> Nothing entered, will keep: {pyamrex_branch}") @@ -88,7 +88,7 @@ REPLY = input("Is this information correct? Will now start updating! [y/N] ") print() -if not REPLY in ["Y", "y"]: +if REPLY not in ["Y", "y"]: print("You did not confirm with 'y', aborting.") sys.exit(1)