From 7e86c97418f5acde26a96ab82e86751692c6d1bf Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Tue, 16 Apr 2024 17:34:46 -0700 Subject: [PATCH] EB: Introduce Runtime Parameter --- Docs/source/usage/parameters.rst | 9 +- Source/BoundaryConditions/PML.H | 1 + Source/BoundaryConditions/PML.cpp | 35 +-- Source/BoundaryConditions/WarpXEvolvePML.cpp | 30 ++- .../BoundaryScrapingDiagnostics.cpp | 9 +- .../Diagnostics/ReducedDiags/ChargeOnEB.cpp | 10 + Source/Diagnostics/WarpXIO.cpp | 2 +- Source/EmbeddedBoundary/CMakeLists.txt | 1 + Source/EmbeddedBoundary/DistanceToEB.H | 6 +- Source/EmbeddedBoundary/Enabled.H | 18 ++ Source/EmbeddedBoundary/Enabled.cpp | 45 ++++ .../EmbeddedBoundary/WarpXFaceExtensions.cpp | 70 +++--- Source/EmbeddedBoundary/WarpXInitEB.cpp | 11 +- Source/Evolve/WarpXEvolve.cpp | 13 +- Source/FieldSolver/ElectrostaticSolver.cpp | 98 ++++----- .../FiniteDifferenceSolver/EvolveB.cpp | 3 - .../FiniteDifferenceSolver/EvolveE.cpp | 58 +++-- .../HybridPICModel/HybridPICModel.cpp | 34 ++- .../HybridPICSolveE.cpp | 141 ++++++------ .../MacroscopicEvolveE.cpp | 30 +-- .../MagnetostaticSolver.cpp | 1 + Source/Initialization/WarpXInitData.cpp | 104 ++++----- Source/Parallelization/WarpXRegrid.cpp | 45 ++-- Source/Particles/MultiParticleContainer.cpp | 10 +- Source/Particles/ParticleBoundaryBuffer.cpp | 200 +++++++++--------- .../Particles/PhysicalParticleContainer.cpp | 15 +- Source/Particles/WarpXParticleContainer.cpp | 9 +- Source/Utils/WarpXAlgorithmSelection.cpp | 2 +- Source/WarpX.H | 4 +- Source/WarpX.cpp | 155 +++++++++----- Source/ablastr/fields/PoissonSolver.H | 180 +++++++++------- Source/ablastr/fields/VectorPoissonSolver.H | 47 ++-- 32 files changed, 787 insertions(+), 609 deletions(-) create mode 100644 Source/EmbeddedBoundary/Enabled.H create mode 100644 Source/EmbeddedBoundary/Enabled.cpp diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index 5f97730e880..d2830edb46a 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -490,13 +490,18 @@ Additional PML parameters .. _running-cpp-parameters-eb: Embedded Boundary Conditions ----------------------------- +--------------------------- * ``warpx.eb_implicit_function`` (`string`) A function of `x`, `y`, `z` that defines the surface of the embedded boundary. That surface lies where the function value is 0 ; the physics simulation area is where the function value is negative ; - the interior of the embeddded boundary is where the function value is positive. + the interior of the embedded boundary is where the function value is positive. + +* ``eb2.geom_type = stl`` (`string`, default: empty) and ``eb2.stl_file`` (`string` with a filepath to a STL file) + Alternatively to defining an embedded boundary via an implicit function, one can load + a computer-aided design (CAD) file in the `STL file format `__. + `See the AMReX documentation for more details `__. * ``warpx.eb_potential(x,y,z,t)`` (`string`) Gives the value of the electric potential at the surface of the embedded boundary, diff --git a/Source/BoundaryConditions/PML.H b/Source/BoundaryConditions/PML.H index 42a96f3628a..78a69cc9e6e 100644 --- a/Source/BoundaryConditions/PML.H +++ b/Source/BoundaryConditions/PML.H @@ -146,6 +146,7 @@ public: bool do_pml_dive_cleaning, bool do_pml_divb_cleaning, const amrex::IntVect& fill_guards_fields, const amrex::IntVect& fill_guards_current, + bool eb_enabled, int max_guard_EB, amrex::Real v_sigma_sb, amrex::IntVect do_pml_Lo = amrex::IntVect::TheUnitVector(), amrex::IntVect do_pml_Hi = amrex::IntVect::TheUnitVector()); diff --git a/Source/BoundaryConditions/PML.cpp b/Source/BoundaryConditions/PML.cpp index 25b34818dd1..8fc292d2b78 100644 --- a/Source/BoundaryConditions/PML.cpp +++ b/Source/BoundaryConditions/PML.cpp @@ -554,6 +554,7 @@ PML::PML (const int lev, const BoxArray& grid_ba, const bool do_pml_dive_cleaning, const bool do_pml_divb_cleaning, const amrex::IntVect& fill_guards_fields, const amrex::IntVect& fill_guards_current, + bool eb_enabled, int max_guard_EB, const amrex::Real v_sigma_sb, const amrex::IntVect do_pml_Lo, const amrex::IntVect do_pml_Hi) : m_dive_cleaning(do_pml_dive_cleaning), @@ -563,6 +564,10 @@ PML::PML (const int lev, const BoxArray& grid_ba, m_geom(geom), m_cgeom(cgeom) { +#ifndef AMREX_USE_EB + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(!eb_enabled, "PML: eb_enabled is true but was not compiled in."); +#endif + // When `do_pml_in_domain` is true, the PML overlap with the last `ncell` of the physical domain or fine patch(es) // (instead of extending `ncell` outside of the physical domain or fine patch(es)) // In order to implement this, we define a new reduced Box Array ensuring that it does not @@ -669,9 +674,11 @@ PML::PML (const int lev, const BoxArray& grid_ba, } #ifdef AMREX_USE_EB - pml_field_factory = amrex::makeEBFabFactory(*geom, ba, dm, - {max_guard_EB, max_guard_EB, max_guard_EB}, - amrex::EBSupport::full); + if (eb_enabled) { + pml_field_factory = amrex::makeEBFabFactory(*geom, ba, dm, + {max_guard_EB, max_guard_EB, max_guard_EB}, + amrex::EBSupport::full); + } #else amrex::ignore_unused(max_guard_EB); pml_field_factory = std::make_unique(); @@ -703,20 +710,22 @@ PML::PML (const int lev, const BoxArray& grid_ba, WarpX::AllocInitMultiFab(pml_j_fp[2], ba_jz, dm, 1, ngb, lev, "pml_j_fp[z]", 0.0_rt); #ifdef AMREX_USE_EB - const amrex::IntVect max_guard_EB_vect = amrex::IntVect(max_guard_EB); - WarpX::AllocInitMultiFab(pml_edge_lengths[0], ba_Ex, dm, WarpX::ncomps, max_guard_EB_vect, lev, "pml_edge_lengths[x]", 0.0_rt); - WarpX::AllocInitMultiFab(pml_edge_lengths[1], ba_Ey, dm, WarpX::ncomps, max_guard_EB_vect, lev, "pml_edge_lengths[y]", 0.0_rt); - WarpX::AllocInitMultiFab(pml_edge_lengths[2], ba_Ez, dm, WarpX::ncomps, max_guard_EB_vect, lev, "pml_edge_lengths[z]", 0.0_rt); + if (eb_enabled) { + const amrex::IntVect max_guard_EB_vect = amrex::IntVect(max_guard_EB); + WarpX::AllocInitMultiFab(pml_edge_lengths[0], ba_Ex, dm, WarpX::ncomps, max_guard_EB_vect, lev, "pml_edge_lengths[x]", 0.0_rt); + WarpX::AllocInitMultiFab(pml_edge_lengths[1], ba_Ey, dm, WarpX::ncomps, max_guard_EB_vect, lev, "pml_edge_lengths[y]", 0.0_rt); + WarpX::AllocInitMultiFab(pml_edge_lengths[2], ba_Ez, dm, WarpX::ncomps, max_guard_EB_vect, lev, "pml_edge_lengths[z]", 0.0_rt); - if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::Yee || - WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::CKC || - WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::Yee || + WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::CKC || + WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { - auto const eb_fact = fieldEBFactory(); + auto const eb_fact = fieldEBFactory(); - WarpX::ComputeEdgeLengths(pml_edge_lengths, eb_fact); - WarpX::ScaleEdges(pml_edge_lengths, WarpX::CellSize(lev)); + WarpX::ComputeEdgeLengths(pml_edge_lengths, eb_fact); + WarpX::ScaleEdges(pml_edge_lengths, WarpX::CellSize(lev)); + } } #endif diff --git a/Source/BoundaryConditions/WarpXEvolvePML.cpp b/Source/BoundaryConditions/WarpXEvolvePML.cpp index af721d70b6d..65175396920 100644 --- a/Source/BoundaryConditions/WarpXEvolvePML.cpp +++ b/Source/BoundaryConditions/WarpXEvolvePML.cpp @@ -269,13 +269,16 @@ WarpX::DampJPML (int lev, PatchType patch_type) const Real* sigma_star_cumsum_fac_j_z = sigba[mfi].sigma_star_cumsum_fac[1].data(); #endif -#ifdef AMREX_USE_EB - const auto& pml_edge_lenghts = pml[lev]->Get_edge_lengths(); - - auto const& pml_lxfab = pml_edge_lenghts[0]->array(mfi); - auto const& pml_lyfab = pml_edge_lenghts[1]->array(mfi); - auto const& pml_lzfab = pml_edge_lenghts[2]->array(mfi); -#endif + amrex::Array4 pml_lxfab, pml_lyfab, pml_lzfab; + if (m_eb_enabled) { + const auto &pml_edge_lenghts = pml[lev]->Get_edge_lengths(); + + pml_lxfab = pml_edge_lenghts[0]->array(mfi); + pml_lyfab = pml_edge_lenghts[1]->array(mfi); + pml_lzfab = pml_edge_lenghts[2]->array(mfi); + } else { + amrex::ignore_unused(pml_lxfab, pml_lyfab, pml_lzfab); + } const Box& tjx = mfi.tilebox( pml_j[0]->ixType().toIntVect() ); const Box& tjy = mfi.tilebox( pml_j[1]->ixType().toIntVect() ); @@ -299,29 +302,24 @@ WarpX::DampJPML (int lev, PatchType patch_type) int const zs_lo = sigba[mfi].sigma_star_cumsum_fac[1].lo(); #endif + bool const eb_enabled = m_eb_enabled; amrex::ParallelFor( tjx, tjy, tjz, [=] AMREX_GPU_DEVICE (int i, int j, int k) { -#ifdef AMREX_USE_EB - if(pml_lxfab(i, j, k) <= 0) return; -#endif + if (eb_enabled && pml_lxfab(i, j, k) <= 0) return; damp_jx_pml(i, j, k, pml_jxfab, sigma_star_cumsum_fac_j_x, sigma_cumsum_fac_j_y, sigma_cumsum_fac_j_z, xs_lo,y_lo, z_lo); }, [=] AMREX_GPU_DEVICE (int i, int j, int k) { -#ifdef AMREX_USE_EB - if(pml_lyfab(i, j, k) <= 0) return; -#endif + if (eb_enabled && pml_lyfab(i, j, k) <= 0) return; damp_jy_pml(i, j, k, pml_jyfab, sigma_cumsum_fac_j_x, sigma_star_cumsum_fac_j_y, sigma_cumsum_fac_j_z, x_lo,ys_lo, z_lo); }, [=] AMREX_GPU_DEVICE (int i, int j, int k) { -#ifdef AMREX_USE_EB - if(pml_lzfab(i, j, k)<=0) return; -#endif + if (eb_enabled && pml_lzfab(i, j, k)<=0) return; damp_jz_pml(i, j, k, pml_jzfab, sigma_cumsum_fac_j_x, sigma_cumsum_fac_j_y, sigma_star_cumsum_fac_j_z, diff --git a/Source/Diagnostics/BoundaryScrapingDiagnostics.cpp b/Source/Diagnostics/BoundaryScrapingDiagnostics.cpp index da1e5fdcc00..3757082ab4d 100644 --- a/Source/Diagnostics/BoundaryScrapingDiagnostics.cpp +++ b/Source/Diagnostics/BoundaryScrapingDiagnostics.cpp @@ -6,6 +6,7 @@ */ #include "BoundaryScrapingDiagnostics.H" +#include "EmbeddedBoundary/Enabled.H" #include "ComputeDiagFunctors/ComputeDiagFunctor.H" #include "Diagnostics/Diagnostics.H" #include "Diagnostics/FlushFormats/FlushFormat.H" @@ -39,11 +40,11 @@ BoundaryScrapingDiagnostics::ReadParameters () // num_buffers corresponds to the number of boundaries // (upper/lower domain boundary in each dimension) - // + the EB boundary if available m_num_buffers = AMREX_SPACEDIM*2; -#ifdef AMREX_USE_EB - m_num_buffers += 1; -#endif + + // + the EB boundary if available + bool const eb_enabled = EB::enabled(); + if (eb_enabled) { m_num_buffers += 1; } // Do a few checks #ifndef WARPX_USE_OPENPMD diff --git a/Source/Diagnostics/ReducedDiags/ChargeOnEB.cpp b/Source/Diagnostics/ReducedDiags/ChargeOnEB.cpp index 2991831420e..f899464c3b7 100644 --- a/Source/Diagnostics/ReducedDiags/ChargeOnEB.cpp +++ b/Source/Diagnostics/ReducedDiags/ChargeOnEB.cpp @@ -8,6 +8,7 @@ #include "ChargeOnEB.H" #include "Diagnostics/ReducedDiags/ReducedDiags.H" +#include "EmbeddedBoundary/Enabled.H" #include "FieldSolver/Fields.H" #include "Utils/TextMsg.H" #include "Utils/WarpXConst.H" @@ -24,11 +25,13 @@ #include #include +#include #include using namespace amrex; using namespace warpx::fields; + // constructor ChargeOnEB::ChargeOnEB (const std::string& rd_name) : ReducedDiags{rd_name} @@ -44,6 +47,10 @@ ChargeOnEB::ChargeOnEB (const std::string& rd_name) "ChargeOnEB reduced diagnostics only works when compiling with EB support"); #endif + if (!EB::enabled()) { + throw std::runtime_error("ChargeOnEB reduced diagnostics only works when EBs are enabled at runtime"); + } + // resize data array m_data.resize(1, 0.0_rt); @@ -87,6 +94,9 @@ void ChargeOnEB::ComputeDiags (const int step) // Judge whether the diags should be done if (!m_intervals.contains(step+1)) { return; } + if (!EB::enabled()) { + throw std::runtime_error("ComputeDiags only works when EBs are enabled at runtime"); + } #if ((defined WARPX_DIM_3D) && (defined AMREX_USE_EB)) // get a reference to WarpX instance auto & warpx = WarpX::GetInstance(); diff --git a/Source/Diagnostics/WarpXIO.cpp b/Source/Diagnostics/WarpXIO.cpp index 91409bc294d..57b4b22e2ae 100644 --- a/Source/Diagnostics/WarpXIO.cpp +++ b/Source/Diagnostics/WarpXIO.cpp @@ -393,7 +393,7 @@ WarpX::InitFromCheckpoint () } } - InitializeEBGridData(maxLevel()); + if (m_eb_enabled) { InitializeEBGridData(maxLevel()); } // Initialize particles mypc->AllocData(); diff --git a/Source/EmbeddedBoundary/CMakeLists.txt b/Source/EmbeddedBoundary/CMakeLists.txt index 3fa0ea0228f..d70d157eb2e 100644 --- a/Source/EmbeddedBoundary/CMakeLists.txt +++ b/Source/EmbeddedBoundary/CMakeLists.txt @@ -2,6 +2,7 @@ foreach(D IN LISTS WarpX_DIMS) warpx_set_suffix_dims(SD ${D}) target_sources(lib_${SD} PRIVATE + Enabled.cpp WarpXInitEB.cpp WarpXFaceExtensions.cpp WarpXFaceInfoBox.H diff --git a/Source/EmbeddedBoundary/DistanceToEB.H b/Source/EmbeddedBoundary/DistanceToEB.H index 7ee47c1172c..72dfde7d3ee 100644 --- a/Source/EmbeddedBoundary/DistanceToEB.H +++ b/Source/EmbeddedBoundary/DistanceToEB.H @@ -14,8 +14,6 @@ #include #include -#ifdef AMREX_USE_EB - namespace DistanceToEB { @@ -120,13 +118,11 @@ amrex::RealVect interp_normal (int i, int j, int k, const amrex::Real W[AMREX_SP #else amrex::ignore_unused(i, j, k, ic, jc, kc, W, Wc, phi, dxi); - amrex::RealVect normal{0.0, 0.0}; + amrex::RealVect normal({0.0}); WARPX_ABORT_WITH_MESSAGE("Error: interp_distance not yet implemented in 1D"); #endif return normal; } } - -#endif // AMREX_USE_EB #endif // WARPX_DISTANCETOEB_H_ diff --git a/Source/EmbeddedBoundary/Enabled.H b/Source/EmbeddedBoundary/Enabled.H new file mode 100644 index 00000000000..90ea5f35101 --- /dev/null +++ b/Source/EmbeddedBoundary/Enabled.H @@ -0,0 +1,18 @@ +/* Copyright 2024 Axel Huebl + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ +#ifndef WARPX_EB_ENABLED_H_ +#define WARPX_EB_ENABLED_H_ + +#include + +namespace EB +{ + /** Are embedded boundaries enabled? */ + bool enabled (); + +} // namespace EB +#endif // WARPX_EB_ENABLED_H_ diff --git a/Source/EmbeddedBoundary/Enabled.cpp b/Source/EmbeddedBoundary/Enabled.cpp new file mode 100644 index 00000000000..2f5e92cdeab --- /dev/null +++ b/Source/EmbeddedBoundary/Enabled.cpp @@ -0,0 +1,45 @@ +/* Copyright 2024 Axel Huebl + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ +#include "Enabled.H" + +#ifdef AMREX_USE_EB +#include +#endif +#if defined(AMREX_USE_EB) && defined(WARPX_DIM_RZ) +#include +#endif + + +namespace EB +{ + bool enabled () + { +#ifndef AMREX_USE_EB + return false; +#else + amrex::ParmParse const pp_warpx("warpx"); + amrex::ParmParse const pp_eb2("eb2"); + + // test various runtime options to enable EBs + std::string eb_implicit_function; + bool eb_enabled = pp_warpx.query("eb_implicit_function", eb_implicit_function); + + // https://amrex-codes.github.io/amrex/docs_html/EB.html + std::string eb_stl; + eb_enabled |= pp_eb2.query("geom_type", eb_stl); + +#if defined(WARPX_DIM_RZ) + if (eb_enabled) { + throw std::runtime_error("RZ Geometry does not yet support EBs, but EBs are enabled in runtime inputs."); + } +#endif + + return eb_enabled; +#endif + } + +} // namespace EB diff --git a/Source/EmbeddedBoundary/WarpXFaceExtensions.cpp b/Source/EmbeddedBoundary/WarpXFaceExtensions.cpp index 21c13f23845..d624e7b4fe7 100644 --- a/Source/EmbeddedBoundary/WarpXFaceExtensions.cpp +++ b/Source/EmbeddedBoundary/WarpXFaceExtensions.cpp @@ -6,6 +6,7 @@ */ #include "WarpXFaceInfoBox.H" +#include "EmbeddedBoundary/Enabled.H" #include "Utils/TextMsg.H" #include "WarpX.H" @@ -163,43 +164,49 @@ ComputeSStab(const int i, const int j, const int k, amrex::Array1D -WarpX::CountExtFaces() { +WarpX::CountExtFaces () { amrex::Array1D sums{0, 0, 0}; #ifdef AMREX_USE_EB + if (EB::enabled()) { #ifndef WARPX_DIM_RZ #ifdef WARPX_DIM_XZ - // In 2D we change the extrema of the for loop so that we only have the case idim=1 - for(int idim = 1; idim < AMREX_SPACEDIM; ++idim) { + // In 2D we change the extrema of the for loop so that we only have the case idim=1 + for(int idim = 1; idim < AMREX_SPACEDIM; ++idim) { #elif defined(WARPX_DIM_3D) - for(int idim = 0; idim < AMREX_SPACEDIM; ++idim) { + for(int idim = 0; idim < AMREX_SPACEDIM; ++idim) { #else - WARPX_ABORT_WITH_MESSAGE( - "CountExtFaces: Only implemented in 2D3V and 3D3V"); + WARPX_ABORT_WITH_MESSAGE( + "CountExtFaces: Only implemented in 2D3V and 3D3V"); #endif - amrex::ReduceOps reduce_ops; - amrex::ReduceData reduce_data(reduce_ops); - for (amrex::MFIter mfi(*m_flag_ext_face[maxLevel()][idim]); mfi.isValid(); ++mfi) { - amrex::Box const &box = mfi.validbox(); - auto const &flag_ext_face = m_flag_ext_face[maxLevel()][idim]->array(mfi); - reduce_ops.eval(box, reduce_data, - [=] AMREX_GPU_DEVICE(int i, int j, int k) -> amrex::GpuTuple { - return flag_ext_face(i, j, k); - }); - } + amrex::ReduceOps reduce_ops; + amrex::ReduceData reduce_data(reduce_ops); + for (amrex::MFIter mfi(*m_flag_ext_face[maxLevel()][idim]); mfi.isValid(); ++mfi) { + amrex::Box const &box = mfi.validbox(); + auto const &flag_ext_face = m_flag_ext_face[maxLevel()][idim]->array(mfi); + reduce_ops.eval(box, reduce_data, + [=] AMREX_GPU_DEVICE(int i, int j, int k) -> amrex::GpuTuple { + return flag_ext_face(i, j, k); + }); + } - auto r = reduce_data.value(); - sums(idim) = amrex::get<0>(r); - } + auto r = reduce_data.value(); + sums(idim) = amrex::get<0>(r); + } - amrex::ParallelDescriptor::ReduceIntSum(&(sums(0)), AMREX_SPACEDIM); + amrex::ParallelDescriptor::ReduceIntSum(&(sums(0)), AMREX_SPACEDIM); #endif + } #endif return sums; } void -WarpX::ComputeFaceExtensions(){ +WarpX::ComputeFaceExtensions () +{ + if (!EB::enabled()) { + throw std::runtime_error("ComputeFaceExtensions only works when EBs are enabled at runtime"); + } #ifdef AMREX_USE_EB amrex::Array1D N_ext_faces = CountExtFaces(); ablastr::warn_manager::WMRecordWarning("Embedded Boundary", @@ -421,7 +428,11 @@ ComputeNBorrowEightFacesExtension(const amrex::Dim3 cell, const amrex::Real S_ex void -WarpX::ComputeOneWayExtensions() { +WarpX::ComputeOneWayExtensions () +{ + if (!EB::enabled()) { + throw std::runtime_error("ComputeOneWayExtensions only works when EBs are enabled at runtime"); + } #ifdef AMREX_USE_EB #ifndef WARPX_DIM_RZ auto const eb_fact = fieldEBFactory(maxLevel()); @@ -545,7 +556,11 @@ WarpX::ComputeOneWayExtensions() { void -WarpX::ComputeEightWaysExtensions() { +WarpX::ComputeEightWaysExtensions () +{ + if (!EB::enabled()) { + throw std::runtime_error("ComputeEightWaysExtensions only works when EBs are enabled at runtime"); + } #ifdef AMREX_USE_EB #ifndef WARPX_DIM_RZ auto const &cell_size = CellSize(maxLevel()); @@ -703,7 +718,11 @@ WarpX::ComputeEightWaysExtensions() { } void -WarpX::ApplyBCKCorrection(const int idim) { +WarpX::ApplyBCKCorrection (const int idim) +{ + if (!EB::enabled()) { + throw std::runtime_error("ApplyBCKCorrection only works when EBs are enabled at runtime"); + } #if defined(AMREX_USE_EB) and !defined(WARPX_DIM_RZ) const std::array &cell_size = CellSize(maxLevel()); @@ -736,7 +755,8 @@ WarpX::ApplyBCKCorrection(const int idim) { } void -WarpX::ShrinkBorrowing() { +WarpX::ShrinkBorrowing () +{ for(int idim = 0; idim < AMREX_SPACEDIM; idim++) { for (amrex::MFIter mfi(*Bfield_fp[maxLevel()][idim]); mfi.isValid(); ++mfi) { auto &borrowing = (*m_borrowing[maxLevel()][idim])[mfi]; diff --git a/Source/EmbeddedBoundary/WarpXInitEB.cpp b/Source/EmbeddedBoundary/WarpXInitEB.cpp index 655bec0dc29..a26db49f4a4 100644 --- a/Source/EmbeddedBoundary/WarpXInitEB.cpp +++ b/Source/EmbeddedBoundary/WarpXInitEB.cpp @@ -7,6 +7,7 @@ #include "WarpX.H" +#include "EmbeddedBoundary/Enabled.H" #ifdef AMREX_USE_EB # include "Utils/Parser/ParserUtils.H" # include "Utils/TextMsg.H" @@ -80,6 +81,9 @@ namespace { void WarpX::InitEB () { + if (!EB::enabled()) { + throw std::runtime_error("InitEB only works when EBs are enabled at runtime"); + } #ifdef AMREX_USE_EB BL_PROFILE("InitEB"); @@ -106,7 +110,6 @@ WarpX::InitEB () // See the comment above on amrex::EB2::Build for the hard-wired number 20. amrex::EB2::Build(Geom(maxLevel()), maxLevel(), maxLevel()+20); } - #endif } @@ -397,7 +400,11 @@ WarpX::MarkCells(){ #endif void -WarpX::ComputeDistanceToEB () { +WarpX::ComputeDistanceToEB () +{ + if (!EB::enabled()) { + throw std::runtime_error("ComputeDistanceToEB only works when EBs are enabled at runtime"); + } #ifdef AMREX_USE_EB BL_PROFILE("ComputeDistanceToEB"); const amrex::EB2::IndexSpace& eb_is = amrex::EB2::IndexSpace::top(); diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index 51fcb5c262c..c3acdfb9fe1 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -489,7 +489,7 @@ void WarpX::HandleParticlesAtBoundaries (int step, amrex::Real cur_time, int num mypc->ContinuousFluxInjection(cur_time, dt[0]); mypc->ApplyBoundaryConditions(); - m_particle_boundary_buffer->gatherParticlesFromDomainBoundaries(*mypc); + if (m_eb_enabled) { m_particle_boundary_buffer->gatherParticlesFromDomainBoundaries(*mypc); } // Non-Maxwell solver: particles can move by an arbitrary number of cells if( electromagnetic_solver_id == ElectromagneticSolverAlgo::None || @@ -518,11 +518,12 @@ void WarpX::HandleParticlesAtBoundaries (int step, amrex::Real cur_time, int num } // interact the particles with EB walls (if present) -#ifdef AMREX_USE_EB - mypc->ScrapeParticlesAtEB(amrex::GetVecOfConstPtrs(m_distance_to_eb)); - m_particle_boundary_buffer->gatherParticlesFromEmbeddedBoundaries(*mypc, amrex::GetVecOfConstPtrs(m_distance_to_eb)); - mypc->deleteInvalidParticles(); -#endif + if (m_eb_enabled) { + mypc->ScrapeParticlesAtEB(amrex::GetVecOfConstPtrs(m_distance_to_eb)); + m_particle_boundary_buffer->gatherParticlesFromEmbeddedBoundaries( + *mypc, amrex::GetVecOfConstPtrs(m_distance_to_eb)); + mypc->deleteInvalidParticles(); + } if (sort_intervals.contains(step+1)) { if (verbose) { diff --git a/Source/FieldSolver/ElectrostaticSolver.cpp b/Source/FieldSolver/ElectrostaticSolver.cpp index 5bb50ada278..eb514e69b8a 100644 --- a/Source/FieldSolver/ElectrostaticSolver.cpp +++ b/Source/FieldSolver/ElectrostaticSolver.cpp @@ -277,11 +277,10 @@ WarpX::AddSpaceChargeFieldLabFrame () // Compute the electric field. Note that if an EB is used the electric // field will be calculated in the computePhi call. -#ifndef AMREX_USE_EB - computeE( Efield_fp, phi_fp, beta ); -#else - if ( IsPythonCallbackInstalled("poissonsolver") ) computeE( Efield_fp, phi_fp, beta ); -#endif + if (!m_eb_enabled) { computeE( Efield_fp, phi_fp, beta ); } + else { + if (IsPythonCallbackInstalled("poissonsolver")) computeE(Efield_fp, phi_fp, beta); + } // Compute the magnetic field computeB( Bfield_fp, phi_fp, beta ); @@ -311,64 +310,66 @@ WarpX::computePhi (const amrex::Vector >& rho, Real const required_precision, Real absolute_tolerance, int const max_iters, - int const verbosity) const -{ + int const verbosity) const { // create a vector to our fields, sorted by level - amrex::Vector sorted_rho; - amrex::Vector sorted_phi; + amrex::Vector sorted_rho; + amrex::Vector sorted_phi; for (int lev = 0; lev <= finest_level; ++lev) { sorted_rho.emplace_back(rho[lev].get()); sorted_phi.emplace_back(phi[lev].get()); } -#if defined(AMREX_USE_EB) - std::optional post_phi_calculation; - - // EB: use AMReX to directly calculate the electric field since with EB's the - // simple finite difference scheme in WarpX::computeE sometimes fails - if (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrame || - electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic) +#ifdef AMREX_USE_EB + // TODO: double check no overhead occurs on "m_eb_enabled == false" + std::optional > eb_farray_box_factory; +#else + std::optional > eb_farray_box_factory; +#endif + if (m_eb_enabled) { - // TODO: maybe make this a helper function or pass Efield_fp directly - amrex::Vector< - amrex::Array - > e_field; - for (int lev = 0; lev <= finest_level; ++lev) { - e_field.push_back( + // EB: use AMReX to directly calculate the electric field since with EB's the + // simple finite difference scheme in WarpX::computeE sometimes fails + if (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrame || + electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic) + { + // TODO: maybe make this a helper function or pass Efield_fp directly + amrex::Vector< + amrex::Array + > e_field; + for (int lev = 0; lev <= finest_level; ++lev) { + e_field.push_back( # if defined(WARPX_DIM_1D_Z) - amrex::Array{ - getFieldPointer(FieldType::Efield_fp, lev, 2) - } + amrex::Array{ + getFieldPointer(FieldType::Efield_fp, lev, 2) + } # elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - amrex::Array{ - getFieldPointer(FieldType::Efield_fp, lev, 0), - getFieldPointer(FieldType::Efield_fp, lev, 2) - } + amrex::Array{ + getFieldPointer(FieldType::Efield_fp, lev, 0), + getFieldPointer(FieldType::Efield_fp, lev, 2) + } # elif defined(WARPX_DIM_3D) - amrex::Array{ - getFieldPointer(FieldType::Efield_fp, lev, 0), - getFieldPointer(FieldType::Efield_fp, lev, 1), - getFieldPointer(FieldType::Efield_fp, lev, 2) - } + amrex::Array{ + getFieldPointer(FieldType::Efield_fp, lev, 0), + getFieldPointer(FieldType::Efield_fp, lev, 1), + getFieldPointer(FieldType::Efield_fp, lev, 2) + } # endif - ); + ); + } + post_phi_calculation = ElectrostaticSolver::EBCalcEfromPhiPerLevel(e_field); } - post_phi_calculation = ElectrostaticSolver::EBCalcEfromPhiPerLevel(e_field); - } - std::optional > eb_farray_box_factory; - amrex::Vector< - amrex::EBFArrayBoxFactory const * - > factories; - for (int lev = 0; lev <= finest_level; ++lev) { - factories.push_back(&WarpX::fieldEBFactory(lev)); - } - eb_farray_box_factory = factories; -#else - const std::optional post_phi_calculation; - const std::optional > eb_farray_box_factory; +#ifdef AMREX_USE_EB + amrex::Vector< + amrex::EBFArrayBoxFactory const * + > factories; + for (int lev = 0; lev <= finest_level; ++lev) { + factories.push_back(&WarpX::fieldEBFactory(lev)); + } + eb_farray_box_factory = factories; #endif + } bool const is_solver_multigrid = WarpX::poisson_solver_id != PoissonSolverAlgo::IntegratedGreenFunction; @@ -386,6 +387,7 @@ WarpX::computePhi (const amrex::Vector >& rho, this->grids, this->m_poisson_boundary_handler, is_solver_multigrid, + m_eb_enabled, WarpX::do_single_precision_comms, this->ref_ratio, post_phi_calculation, diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp index fbc1397b413..8be305b76ab 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp @@ -87,12 +87,9 @@ void FiniteDifferenceSolver::EvolveB ( } else if (m_fdtd_algo == ElectromagneticSolverAlgo::CKC) { EvolveBCartesian ( Bfield, Efield, Gfield, lev, dt ); -#ifdef AMREX_USE_EB } else if (m_fdtd_algo == ElectromagneticSolverAlgo::ECT) { - EvolveBCartesianECT(Bfield, face_areas, area_mod, ECTRhofield, Venl, flag_info_cell, borrowing, lev, dt); -#endif #endif } else { WARPX_ABORT_WITH_MESSAGE("EvolveB: Unknown algorithm"); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp index 5f707fbc927..710508d6946 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp @@ -13,6 +13,7 @@ #else # include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/CylindricalYeeAlgorithm.H" #endif +#include "EmbeddedBoundary/Enabled.H" #include "Utils/TextMsg.H" #include "Utils/WarpXAlgorithmSelection.H" #include "Utils/WarpXConst.H" @@ -55,13 +56,9 @@ void FiniteDifferenceSolver::EvolveE ( std::unique_ptr const& Ffield, int lev, amrex::Real const dt ) { -#ifdef AMREX_USE_EB if (m_fdtd_algo != ElectromagneticSolverAlgo::ECT) { amrex::ignore_unused(face_areas, ECTRhofield); } -#else - amrex::ignore_unused(face_areas, ECTRhofield); -#endif // Select algorithm (The choice of algorithm is a runtime option, // but we compile code for each algorithm, using templates) @@ -129,11 +126,13 @@ void FiniteDifferenceSolver::EvolveECartesian ( Array4 const& jy = Jfield[1]->array(mfi); Array4 const& jz = Jfield[2]->array(mfi); -#ifdef AMREX_USE_EB - amrex::Array4 const& lx = edge_lengths[0]->array(mfi); - amrex::Array4 const& ly = edge_lengths[1]->array(mfi); - amrex::Array4 const& lz = edge_lengths[2]->array(mfi); -#endif + const bool eb_enabled = EB::enabled(); + amrex::Array4 lx, ly, lz; + if (eb_enabled) { + lx = edge_lengths[0]->array(mfi); + ly = edge_lengths[1]->array(mfi); + lz = edge_lengths[2]->array(mfi); + } // Extract stencil coefficients Real const * const AMREX_RESTRICT coefs_x = m_stencil_coefs_x.dataPtr(); @@ -152,10 +151,9 @@ void FiniteDifferenceSolver::EvolveECartesian ( amrex::ParallelFor(tex, tey, tez, [=] AMREX_GPU_DEVICE (int i, int j, int k){ -#ifdef AMREX_USE_EB // Skip field push if this cell is fully covered by embedded boundaries - if (lx(i, j, k) <= 0) return; -#endif + if (eb_enabled && lx(i, j, k) <= 0) return; + Ex(i, j, k) += c2 * dt * ( - T_Algo::DownwardDz(By, coefs_z, n_coefs_z, i, j, k) + T_Algo::DownwardDy(Bz, coefs_y, n_coefs_y, i, j, k) @@ -163,16 +161,15 @@ void FiniteDifferenceSolver::EvolveECartesian ( }, [=] AMREX_GPU_DEVICE (int i, int j, int k){ -#ifdef AMREX_USE_EB // Skip field push if this cell is fully covered by embedded boundaries #ifdef WARPX_DIM_3D - if (ly(i,j,k) <= 0) return; + if (eb_enabled && ly(i,j,k) <= 0) return; #elif defined(WARPX_DIM_XZ) //In XZ Ey is associated with a mesh node, so we need to check if the mesh node is covered amrex::ignore_unused(ly); - if (lx(i, j, k)<=0 || lx(i-1, j, k)<=0 || lz(i, j-1, k)<=0 || lz(i, j, k)<=0) return; -#endif + if (eb_enabled && (lx(i, j, k)<=0 || lx(i-1, j, k)<=0 || lz(i, j-1, k)<=0 || lz(i, j, k)<=0)) return; #endif + Ey(i, j, k) += c2 * dt * ( - T_Algo::DownwardDx(Bz, coefs_x, n_coefs_x, i, j, k) + T_Algo::DownwardDz(Bx, coefs_z, n_coefs_z, i, j, k) @@ -180,10 +177,8 @@ void FiniteDifferenceSolver::EvolveECartesian ( }, [=] AMREX_GPU_DEVICE (int i, int j, int k){ -#ifdef AMREX_USE_EB // Skip field push if this cell is fully covered by embedded boundaries - if (lz(i,j,k) <= 0) return; -#endif + if (eb_enabled && lz(i,j,k) <= 0) return; Ez(i, j, k) += c2 * dt * ( - T_Algo::DownwardDy(Bx, coefs_y, n_coefs_y, i, j, k) + T_Algo::DownwardDx(By, coefs_x, n_coefs_x, i, j, k) @@ -265,10 +260,12 @@ void FiniteDifferenceSolver::EvolveECylindrical ( Array4 const& jt = Jfield[1]->array(mfi); Array4 const& jz = Jfield[2]->array(mfi); -#ifdef AMREX_USE_EB - amrex::Array4 const& lr = edge_lengths[0]->array(mfi); - amrex::Array4 const& lz = edge_lengths[2]->array(mfi); -#endif + const bool eb_enabled = EB::enabled(); + amrex::Array4 lr, lz; + if (eb_enabled) { + lr = edge_lengths[0]->array(mfi); + lz = edge_lengths[2]->array(mfi); + } // Extract stencil coefficients Real const * const AMREX_RESTRICT coefs_r = m_stencil_coefs_r.dataPtr(); @@ -292,10 +289,9 @@ void FiniteDifferenceSolver::EvolveECylindrical ( amrex::ParallelFor(ter, tet, tez, [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ -#ifdef AMREX_USE_EB // Skip field push if this cell is fully covered by embedded boundaries - if (lr(i, j, 0) <= 0) return; -#endif + if (eb_enabled && lr(i, j, 0) <= 0) return; + Real const r = rmin + (i + 0.5_rt)*dr; // r on cell-centered point (Er is cell-centered in r) Er(i, j, 0, 0) += c2 * dt*( - T_Algo::DownwardDz(Bt, coefs_z, n_coefs_z, i, j, 0, 0) @@ -313,11 +309,10 @@ void FiniteDifferenceSolver::EvolveECylindrical ( }, [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ -#ifdef AMREX_USE_EB // Skip field push if this cell is fully covered by embedded boundaries // The Et field is at a node, so we need to check if the node is covered - if (lr(i, j, 0)<=0 || lr(i-1, j, 0)<=0 || lz(i, j-1, 0)<=0 || lz(i, j, 0)<=0) return; -#endif + if (eb_enabled && (lr(i, j, 0)<=0 || lr(i-1, j, 0)<=0 || lz(i, j-1, 0)<=0 || lz(i, j, 0)<=0)) return; + Real const r = rmin + i*dr; // r on a nodal grid (Et is nodal in r) if (r != 0) { // Off-axis, regular Maxwell equations Et(i, j, 0, 0) += c2 * dt*( @@ -359,10 +354,9 @@ void FiniteDifferenceSolver::EvolveECylindrical ( }, [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ -#ifdef AMREX_USE_EB // Skip field push if this cell is fully covered by embedded boundaries - if (lz(i, j, 0) <= 0) return; -#endif + if (eb_enabled && lz(i, j, 0) <= 0) return; + Real const r = rmin + i*dr; // r on a nodal grid (Ez is nodal in r) if (r != 0) { // Off-axis, regular Maxwell equations Ez(i, j, 0, 0) += c2 * dt*( diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index 8979036fbea..b2b3a3d7fc4 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -10,6 +10,7 @@ #include "HybridPICModel.H" #include "FieldSolver/Fields.H" +#include "EmbeddedBoundary/Enabled.H" using namespace amrex; using namespace warpx::fields; @@ -296,23 +297,20 @@ void HybridPICModel::GetCurrentExternal ( auto const& mfyfab = mfy->array(mfi); auto const& mfzfab = mfz->array(mfi); -#ifdef AMREX_USE_EB - amrex::Array4 const& lx = edge_lengths[0]->array(mfi); - amrex::Array4 const& ly = edge_lengths[1]->array(mfi); - amrex::Array4 const& lz = edge_lengths[2]->array(mfi); -#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - amrex::ignore_unused(ly); -#endif -#else - amrex::ignore_unused(edge_lengths); -#endif + + const bool eb_enabled = EB::enabled(); + amrex::Array4 lx, ly, lz; + if (eb_enabled) { + lx = edge_lengths[0]->array(mfi); + ly = edge_lengths[1]->array(mfi); + lz = edge_lengths[2]->array(mfi); + } amrex::ParallelFor (tbx, tby, tbz, [=] AMREX_GPU_DEVICE (int i, int j, int k) { // skip if node is covered by an embedded boundary -#ifdef AMREX_USE_EB - if (lx(i, j, k) <= 0) return; -#endif + if (eb_enabled && lx(i, j, k) <= 0) return; + // Shift required in the x-, y-, or z- position // depending on the index type of the multifab #if defined(WARPX_DIM_1D_Z) @@ -339,9 +337,8 @@ void HybridPICModel::GetCurrentExternal ( }, [=] AMREX_GPU_DEVICE (int i, int j, int k) { // skip if node is covered by an embedded boundary -#ifdef AMREX_USE_EB - if (ly(i, j, k) <= 0) return; -#endif + if (eb_enabled && ly(i, j, k) <= 0) return; + #if defined(WARPX_DIM_1D_Z) const amrex::Real x = 0._rt; const amrex::Real y = 0._rt; @@ -366,9 +363,8 @@ void HybridPICModel::GetCurrentExternal ( }, [=] AMREX_GPU_DEVICE (int i, int j, int k) { // skip if node is covered by an embedded boundary -#ifdef AMREX_USE_EB - if (lz(i, j, k) <= 0) return; -#endif + if (eb_enabled && lz(i, j, k) <= 0) return; + #if defined(WARPX_DIM_1D_Z) const amrex::Real x = 0._rt; const amrex::Real y = 0._rt; diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index 456c542a534..373be641b41 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -9,6 +9,7 @@ #include "FiniteDifferenceSolver.H" +#include "EmbeddedBoundary/Enabled.H" #ifdef WARPX_DIM_RZ # include "FiniteDifferenceAlgorithms/CylindricalYeeAlgorithm.H" #else @@ -67,15 +68,14 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCylindrical ( // for the profiler amrex::LayoutData* cost = WarpX::getCosts(lev); -#ifndef AMREX_USE_EB - amrex::ignore_unused(edge_lengths); -#endif - // reset Jfield Jfield[0]->setVal(0); Jfield[1]->setVal(0); Jfield[2]->setVal(0); + // EB active? + bool eb_enabled = EB::enabled(); + // Loop through the grids, and over the tiles within each grid #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) @@ -95,11 +95,13 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCylindrical ( Array4 const& Bt = Bfield[1]->array(mfi); Array4 const& Bz = Bfield[2]->array(mfi); -#ifdef AMREX_USE_EB - amrex::Array4 const& lr = edge_lengths[0]->array(mfi); - amrex::Array4 const& lt = edge_lengths[1]->array(mfi); - amrex::Array4 const& lz = edge_lengths[2]->array(mfi); -#endif + amrex::Array4 lr, lt, lz; + + if (eb_enabled) { + lr = edge_lengths[0]->array(mfi); + lt = edge_lengths[1]->array(mfi); + lz = edge_lengths[2]->array(mfi); + } // Extract stencil coefficients Real const * const AMREX_RESTRICT coefs_r = m_stencil_coefs_r.dataPtr(); @@ -125,10 +127,8 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCylindrical ( // Jr calculation [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ -#ifdef AMREX_USE_EB // Skip if this cell is fully covered by embedded boundaries - if (lr(i, j, 0) <= 0) return; -#endif + if (eb_enabled && lr(i, j, 0) <= 0) return; // Mode m=0 Jr(i, j, 0, 0) = one_over_mu0 * ( - T_Algo::DownwardDz(Bt, coefs_z, n_coefs_z, i, j, 0, 0) @@ -151,11 +151,9 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCylindrical ( // Jt calculation [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ -#ifdef AMREX_USE_EB // In RZ Jt is associated with a mesh node, so we need to check if the mesh node is covered - amrex::ignore_unused(lt); - if (lr(i, j, 0)<=0 || lr(i-1, j, 0)<=0 || lz(i, j-1, 0)<=0 || lz(i, j, 0)<=0) return; -#endif + if (eb_enabled && (lr(i, j, 0)<=0 || lr(i-1, j, 0)<=0 || lz(i, j-1, 0)<=0 || lz(i, j, 0)<=0)) return; + // r on a nodal point (Jt is nodal in r) Real const r = rmin + i*dr; // Off-axis, regular curl @@ -199,10 +197,8 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCylindrical ( // Jz calculation [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ -#ifdef AMREX_USE_EB // Skip if this cell is fully covered by embedded boundaries - if (lz(i, j, 0) <= 0) return; -#endif + if (eb_enabled && lz(i, j, 0) <= 0) return; // r on a nodal point (Jz is nodal in r) Real const r = rmin + i*dr; // Off-axis, regular curl @@ -258,39 +254,38 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCartesian ( // for the profiler amrex::LayoutData* cost = WarpX::getCosts(lev); -#ifndef AMREX_USE_EB - amrex::ignore_unused(edge_lengths); -#endif - // reset Jfield Jfield[0]->setVal(0); Jfield[1]->setVal(0); Jfield[2]->setVal(0); + // EB active? + bool eb_enabled = EB::enabled(); + // Loop through the grids, and over the tiles within each grid #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif for ( MFIter mfi(*Jfield[0], TilingIfNotGPU()); mfi.isValid(); ++mfi ) { - if (cost && WarpX::load_balance_costs_update_algo == LoadBalanceCostsUpdateAlgo::Timers) - { + if (cost && WarpX::load_balance_costs_update_algo == LoadBalanceCostsUpdateAlgo::Timers) { amrex::Gpu::synchronize(); } auto wt = static_cast(amrex::second()); // Extract field data for this grid/tile - Array4 const& Jx = Jfield[0]->array(mfi); - Array4 const& Jy = Jfield[1]->array(mfi); - Array4 const& Jz = Jfield[2]->array(mfi); - Array4 const& Bx = Bfield[0]->const_array(mfi); - Array4 const& By = Bfield[1]->const_array(mfi); - Array4 const& Bz = Bfield[2]->const_array(mfi); - -#ifdef AMREX_USE_EB - amrex::Array4 const& lx = edge_lengths[0]->array(mfi); - amrex::Array4 const& ly = edge_lengths[1]->array(mfi); - amrex::Array4 const& lz = edge_lengths[2]->array(mfi); -#endif + Array4 const &Jx = Jfield[0]->array(mfi); + Array4 const &Jy = Jfield[1]->array(mfi); + Array4 const &Jz = Jfield[2]->array(mfi); + Array4 const &Bx = Bfield[0]->const_array(mfi); + Array4 const &By = Bfield[1]->const_array(mfi); + Array4 const &Bz = Bfield[2]->const_array(mfi); + + amrex::Array4 lx, ly, lz; + if (eb_enabled) { + lx = edge_lengths[0]->array(mfi); + ly = edge_lengths[1]->array(mfi); + lz = edge_lengths[2]->array(mfi); + } // Extract stencil coefficients Real const * const AMREX_RESTRICT coefs_x = m_stencil_coefs_x.dataPtr(); @@ -313,10 +308,9 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCartesian ( // Jx calculation [=] AMREX_GPU_DEVICE (int i, int j, int k){ -#ifdef AMREX_USE_EB // Skip if this cell is fully covered by embedded boundaries - if (lx(i, j, k) <= 0) return; -#endif + if (eb_enabled && lx(i, j, k) <= 0) return; + Jx(i, j, k) = one_over_mu0 * ( - T_Algo::DownwardDz(By, coefs_z, n_coefs_z, i, j, k) + T_Algo::DownwardDy(Bz, coefs_y, n_coefs_y, i, j, k) @@ -325,15 +319,13 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCartesian ( // Jy calculation [=] AMREX_GPU_DEVICE (int i, int j, int k){ -#ifdef AMREX_USE_EB // Skip if this cell is fully covered by embedded boundaries #ifdef WARPX_DIM_3D - if (ly(i,j,k) <= 0) return; + if (eb_enabled && ly(i,j,k) <= 0) return; #elif defined(WARPX_DIM_XZ) // In XZ Jy is associated with a mesh node, so we need to check if the mesh node is covered amrex::ignore_unused(ly); - if (lx(i, j, k)<=0 || lx(i-1, j, k)<=0 || lz(i, j-1, k)<=0 || lz(i, j, k)<=0) return; -#endif + if (eb_enabled && (lx(i, j, k)<=0 || lx(i-1, j, k)<=0 || lz(i, j-1, k)<=0 || lz(i, j, k)<=0)) return; #endif Jy(i, j, k) = one_over_mu0 * ( - T_Algo::DownwardDx(Bz, coefs_x, n_coefs_x, i, j, k) @@ -343,10 +335,9 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCartesian ( // Jz calculation [=] AMREX_GPU_DEVICE (int i, int j, int k){ -#ifdef AMREX_USE_EB // Skip if this cell is fully covered by embedded boundaries - if (lz(i,j,k) <= 0) return; -#endif + if (eb_enabled && lz(i,j,k) <= 0) return; + Jz(i, j, k) = one_over_mu0 * ( - T_Algo::DownwardDy(Bx, coefs_y, n_coefs_y, i, j, k) + T_Algo::DownwardDx(By, coefs_x, n_coefs_x, i, j, k) @@ -415,10 +406,6 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( int lev, HybridPICModel const* hybrid_model, const bool include_resistivity_term ) { -#ifndef AMREX_USE_EB - amrex::ignore_unused(edge_lengths); -#endif - // Both steps below do not currently support m > 0 and should be // modified if such support wants to be added WARPX_ALWAYS_ASSERT_WITH_MESSAGE( @@ -428,6 +415,9 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // for the profiler amrex::LayoutData* cost = WarpX::getCosts(lev); + // EB active? + bool eb_enabled = EB::enabled(); + using namespace ablastr::coarsen::sample; // get hybrid model parameters @@ -561,11 +551,12 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( Array4 const& rho = rhofield->const_array(mfi); Array4 const& Pe = Pefield->array(mfi); -#ifdef AMREX_USE_EB - amrex::Array4 const& lr = edge_lengths[0]->array(mfi); - amrex::Array4 const& lt = edge_lengths[1]->array(mfi); - amrex::Array4 const& lz = edge_lengths[2]->array(mfi); -#endif + amrex::Array4 lr, lt, lz; + if (eb_enabled) { + lr = edge_lengths[0]->array(mfi); + lt = edge_lengths[1]->array(mfi); + lz = edge_lengths[2]->array(mfi); + } // Extract stencil coefficients Real const * const AMREX_RESTRICT coefs_r = m_stencil_coefs_r.dataPtr(); @@ -586,10 +577,9 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // Er calculation [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ -#ifdef AMREX_USE_EB // Skip if this cell is fully covered by embedded boundaries - if (lr(i, j, 0) <= 0) return; -#endif + if (eb_enabled && lr(i, j, 0) <= 0) return; + // Interpolate to get the appropriate charge density in space Real rho_val = Interp(rho, nodal, Er_stag, coarsen, i, j, 0, 0); @@ -627,11 +617,10 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // Et calculation [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ -#ifdef AMREX_USE_EB // In RZ Et is associated with a mesh node, so we need to check if the mesh node is covered amrex::ignore_unused(lt); - if (lr(i, j, 0)<=0 || lr(i-1, j, 0)<=0 || lz(i, j-1, 0)<=0 || lz(i, j, 0)<=0) return; -#endif + if (eb_enabled && (lr(i, j, 0)<=0 || lr(i-1, j, 0)<=0 || lz(i, j-1, 0)<=0 || lz(i, j, 0)<=0)) return; + // r on a nodal grid (Et is nodal in r) Real const r = rmin + i*dr; // Mode m=0: // Ensure that Et remains 0 on axis @@ -672,10 +661,9 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // Ez calculation [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ -#ifdef AMREX_USE_EB // Skip field solve if this cell is fully covered by embedded boundaries - if (lz(i,j,0) <= 0) { return; } -#endif + if (eb_enabled && lz(i,j,0) <= 0) { return; } + // Interpolate to get the appropriate charge density in space Real rho_val = Interp(rho, nodal, Ez_stag, coarsen, i, j, 0, 0); @@ -733,13 +721,12 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( int lev, HybridPICModel const* hybrid_model, const bool include_resistivity_term ) { -#ifndef AMREX_USE_EB - amrex::ignore_unused(edge_lengths); -#endif - // for the profiler amrex::LayoutData* cost = WarpX::getCosts(lev); + // EB active? + bool eb_enabled = EB::enabled(); + using namespace ablastr::coarsen::sample; // get hybrid model parameters @@ -873,11 +860,12 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( Array4 const& rho = rhofield->const_array(mfi); Array4 const& Pe = Pefield->array(mfi); -#ifdef AMREX_USE_EB - amrex::Array4 const& lx = edge_lengths[0]->array(mfi); - amrex::Array4 const& ly = edge_lengths[1]->array(mfi); - amrex::Array4 const& lz = edge_lengths[2]->array(mfi); -#endif + amrex::Array4 lx, ly, lz; + if (eb_enabled) { + lx = edge_lengths[0]->array(mfi); + ly = edge_lengths[1]->array(mfi); + lz = edge_lengths[2]->array(mfi); + } // Extract stencil coefficients Real const * const AMREX_RESTRICT coefs_x = m_stencil_coefs_x.dataPtr(); @@ -896,10 +884,9 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // Ex calculation [=] AMREX_GPU_DEVICE (int i, int j, int k){ -#ifdef AMREX_USE_EB // Skip if this cell is fully covered by embedded boundaries - if (lx(i, j, k) <= 0) return; -#endif + if (eb_enabled && lx(i, j, k) <= 0) return; + // Interpolate to get the appropriate charge density in space Real rho_val = Interp(rho, nodal, Ex_stag, coarsen, i, j, k, 0); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp index 3aee7697073..3d5a3badc9a 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp @@ -7,6 +7,7 @@ # include "FiniteDifferenceAlgorithms/CartesianCKCAlgorithm.H" # include "FiniteDifferenceAlgorithms/FieldAccessorFunctors.H" #endif +#include "EmbeddedBoundary/Enabled.H" #include "MacroscopicProperties/MacroscopicProperties.H" #include "Utils/TextMsg.H" #include "Utils/WarpXAlgorithmSelection.H" @@ -139,11 +140,13 @@ void FiniteDifferenceSolver::MacroscopicEvolveECartesian ( Array4 const& jy = Jfield[1]->array(mfi); Array4 const& jz = Jfield[2]->array(mfi); -#ifdef AMREX_USE_EB - amrex::Array4 const& lx = edge_lengths[0]->array(mfi); - amrex::Array4 const& ly = edge_lengths[1]->array(mfi); - amrex::Array4 const& lz = edge_lengths[2]->array(mfi); -#endif + const bool eb_enabled = EB::enabled(); + amrex::Array4 lx, ly, lz; + if (eb_enabled) { + lx = edge_lengths[0]->array(mfi); + ly = edge_lengths[1]->array(mfi); + lz = edge_lengths[2]->array(mfi); + } // material prop // amrex::Array4 const& sigma_arr = sigma_mf.array(mfi); @@ -174,10 +177,9 @@ void FiniteDifferenceSolver::MacroscopicEvolveECartesian ( // Loop over the cells and update the fields amrex::ParallelFor(tex, tey, tez, [=] AMREX_GPU_DEVICE (int i, int j, int k){ -#ifdef AMREX_USE_EB // Skip field push if this cell is fully covered by embedded boundaries - if (lx(i, j, k) <= 0) return; -#endif + if (eb_enabled && lx(i, j, k) <= 0) return; + // Interpolate conductivity, sigma, to Ex position on the grid amrex::Real const sigma_interp = ablastr::coarsen::sample::Interp(sigma_arr, sigma_stag, Ex_stag, macro_cr, i, j, k, scomp); @@ -193,15 +195,14 @@ void FiniteDifferenceSolver::MacroscopicEvolveECartesian ( }, [=] AMREX_GPU_DEVICE (int i, int j, int k){ -#ifdef AMREX_USE_EB #ifdef WARPX_DIM_3D - if (ly(i,j,k) <= 0) return; + if (eb_enabled && ly(i,j,k) <= 0) return; #elif defined(WARPX_DIM_XZ) //In XZ Ey is associated with a mesh node, so we need to check if the mesh node is covered amrex::ignore_unused(ly); - if (lx(i, j, k)<=0 || lx(i-1, j, k)<=0 || lz(i, j, k)<=0 || lz(i, j-1, k)<=0) return; -#endif + if (eb_enabled && (lx(i, j, k)<=0 || lx(i-1, j, k)<=0 || lz(i, j, k)<=0 || lz(i, j-1, k)<=0)) return; #endif + // Interpolate conductivity, sigma, to Ey position on the grid amrex::Real const sigma_interp = ablastr::coarsen::sample::Interp(sigma_arr, sigma_stag, Ey_stag, macro_cr, i, j, k, scomp); @@ -218,10 +219,9 @@ void FiniteDifferenceSolver::MacroscopicEvolveECartesian ( }, [=] AMREX_GPU_DEVICE (int i, int j, int k){ -#ifdef AMREX_USE_EB // Skip field push if this cell is fully covered by embedded boundaries - if (lz(i,j,k) <= 0) return; -#endif + if (eb_enabled && lz(i,j,k) <= 0) return; + // Interpolate conductivity, sigma, to Ez position on the grid amrex::Real const sigma_interp = ablastr::coarsen::sample::Interp(sigma_arr, sigma_stag, Ez_stag, macro_cr, i, j, k, scomp); diff --git a/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp b/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp index 26ac1ac96c8..d715e64cdaa 100644 --- a/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp +++ b/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp @@ -184,6 +184,7 @@ WarpX::computeVectorPotential (const amrex::Vectordmap, this->grids, this->m_vector_poisson_boundary_handler, + m_eb_enabled, WarpX::do_single_precision_comms, this->ref_ratio, post_A_calculation, diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index e0c095918fa..e3099a43e11 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -604,6 +604,7 @@ WarpX::InitPML () psatd_solution_type, J_in_time, rho_in_time, do_pml_dive_cleaning, do_pml_divb_cleaning, amrex::IntVect(0), amrex::IntVect(0), + m_eb_enabled, guard_cells.ng_FieldSolver.max(), v_particle_pml, do_pml_Lo[0], do_pml_Hi[0]); @@ -643,6 +644,7 @@ WarpX::InitPML () do_moving_window, pml_has_particles, do_pml_in_domain, psatd_solution_type, J_in_time, rho_in_time, do_pml_dive_cleaning, do_pml_divb_cleaning, amrex::IntVect(0), amrex::IntVect(0), + m_eb_enabled, guard_cells.ng_FieldSolver.max(), v_particle_pml, do_pml_Lo[lev], do_pml_Hi[lev]); @@ -823,7 +825,7 @@ WarpX::InitLevelData (int lev, Real /*time*/) } #ifdef AMREX_USE_EB - InitializeEBGridData(lev); + if (m_eb_enabled) { InitializeEBGridData(lev); } #endif // if the input string for the B-field is "parse_b_ext_grid_function", @@ -896,11 +898,13 @@ WarpX::InitLevelData (int lev, Real /*time*/) lev, PatchType::fine); #ifdef AMREX_USE_EB - // We initialize ECTRhofield consistently with the Efield - if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { - m_fdtd_solver_fp[lev]->EvolveECTRho( - Efield_fp[lev], m_edge_lengths[lev], - m_face_areas[lev], ECTRhofield[lev], lev); + if (m_eb_enabled) { + // We initialize ECTRhofield consistently with the Efield + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { + m_fdtd_solver_fp[lev]->EvolveECTRho( + Efield_fp[lev], m_edge_lengths[lev], + m_face_areas[lev], ECTRhofield[lev], lev); + } } #endif @@ -929,11 +933,13 @@ WarpX::InitLevelData (int lev, Real /*time*/) 'E', lev, PatchType::coarse); #ifdef AMREX_USE_EB - if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { - // We initialize ECTRhofield consistently with the Efield - m_fdtd_solver_cp[lev]->EvolveECTRho(Efield_cp[lev], m_edge_lengths[lev], - m_face_areas[lev], ECTRhofield[lev], lev); + if (m_eb_enabled) { + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { + // We initialize ECTRhofield consistently with the Efield + m_fdtd_solver_cp[lev]->EvolveECTRho(Efield_cp[lev], m_edge_lengths[lev], + m_face_areas[lev], ECTRhofield[lev], lev); + } } #endif } @@ -957,7 +963,7 @@ WarpX::InitializeExternalFieldsOnGridUsingParser ( ParserExecutor<3> const& zfield_parser, std::array< std::unique_ptr, 3 > const& edge_lengths, std::array< std::unique_ptr, 3 > const& face_areas, - const char field, + [[maybe_unused]] const char field, const int lev, PatchType patch_type) { @@ -973,49 +979,48 @@ WarpX::InitializeExternalFieldsOnGridUsingParser ( const amrex::IntVect y_nodal_flag = mfy->ixType().toIntVect(); const amrex::IntVect z_nodal_flag = mfz->ixType().toIntVect(); - for ( MFIter mfi(*mfx, TilingIfNotGPU()); mfi.isValid(); ++mfi) - { - const amrex::Box& tbx = mfi.tilebox( x_nodal_flag, mfx->nGrowVect() ); - const amrex::Box& tby = mfi.tilebox( y_nodal_flag, mfy->nGrowVect() ); - const amrex::Box& tbz = mfi.tilebox( z_nodal_flag, mfz->nGrowVect() ); - - auto const& mfxfab = mfx->array(mfi); - auto const& mfyfab = mfy->array(mfi); - auto const& mfzfab = mfz->array(mfi); - -#ifdef AMREX_USE_EB - amrex::Array4 const& lx = edge_lengths[0]->array(mfi); - amrex::Array4 const& ly = edge_lengths[1]->array(mfi); - amrex::Array4 const& lz = edge_lengths[2]->array(mfi); - amrex::Array4 const& Sx = face_areas[0]->array(mfi); - amrex::Array4 const& Sy = face_areas[1]->array(mfi); - amrex::Array4 const& Sz = face_areas[2]->array(mfi); - -#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - const amrex::Dim3 lx_lo = amrex::lbound(lx); - const amrex::Dim3 lx_hi = amrex::ubound(lx); - const amrex::Dim3 lz_lo = amrex::lbound(lz); - const amrex::Dim3 lz_hi = amrex::ubound(lz); -#endif + bool const eb_enabled = m_eb_enabled; + + for ( MFIter mfi(*mfx, TilingIfNotGPU()); mfi.isValid(); ++mfi) { + const amrex::Box &tbx = mfi.tilebox(x_nodal_flag, mfx->nGrowVect()); + const amrex::Box &tby = mfi.tilebox(y_nodal_flag, mfy->nGrowVect()); + const amrex::Box &tbz = mfi.tilebox(z_nodal_flag, mfz->nGrowVect()); + + auto const &mfxfab = mfx->array(mfi); + auto const &mfyfab = mfy->array(mfi); + auto const &mfzfab = mfz->array(mfi); + + amrex::Array4 lx, ly, lz, Sx, Sy, Sz; + if (eb_enabled) { + lx = edge_lengths[0]->array(mfi); + ly = edge_lengths[1]->array(mfi); + lz = edge_lengths[2]->array(mfi); + Sx = face_areas[0]->array(mfi); + Sy = face_areas[1]->array(mfi); + Sz = face_areas[2]->array(mfi); + } + amrex::Dim3 lx_lo, lx_hi, lz_lo, lz_hi; + if (eb_enabled) { #if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - amrex::ignore_unused(ly, Sx, Sz); -#elif defined(WARPX_DIM_1D_Z) - amrex::ignore_unused(lx, ly, lz, Sx, Sy, Sz); -#endif - -#else - amrex::ignore_unused(edge_lengths, face_areas, field); + lx_lo = amrex::lbound(lx); + lx_hi = amrex::ubound(lx); + lz_lo = amrex::lbound(lz); + lz_hi = amrex::ubound(lz); #endif + } else + { + amrex::ignore_unused(lx_lo, lx_hi, lz_lo, lz_hi); + } amrex::ParallelFor (tbx, tby, tbz, [=] AMREX_GPU_DEVICE (int i, int j, int k) { #ifdef AMREX_USE_EB #ifdef WARPX_DIM_3D - if((field=='E' and lx(i, j, k)<=0) or (field=='B' and Sx(i, j, k)<=0)) return; + if(eb_enabled && ((field=='E' and lx(i, j, k)<=0) or (field=='B' and Sx(i, j, k)<=0))) return; #elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) //In XZ and RZ Ex is associated with a x-edge, while Bx is associated with a z-edge - if((field=='E' and lx(i, j, k)<=0) or (field=='B' and lz(i, j, k)<=0)) return; + if(eb_enabled && ((field=='E' and lx(i, j, k)<=0) or (field=='B' and lz(i, j, k)<=0))) return; #endif #endif // Shift required in the x-, y-, or z- position @@ -1045,14 +1050,15 @@ WarpX::InitializeExternalFieldsOnGridUsingParser ( [=] AMREX_GPU_DEVICE (int i, int j, int k) { #ifdef AMREX_USE_EB #ifdef WARPX_DIM_3D - if((field=='E' and ly(i, j, k)<=0) or (field=='B' and Sy(i, j, k)<=0)) return; + if(eb_enabled && ((field=='E' and ly(i, j, k)<=0) or (field=='B' and Sy(i, j, k)<=0))) return; #elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) //In XZ and RZ Ey is associated with a mesh node, so we need to check if the mesh node is covered - if((field=='E' and (lx(std::min(i , lx_hi.x), std::min(j , lx_hi.y), k)<=0 + if(eb_enabled && + ((field=='E' and (lx(std::min(i , lx_hi.x), std::min(j , lx_hi.y), k)<=0 || lx(std::max(i-1, lx_lo.x), std::min(j , lx_hi.y), k)<=0 || lz(std::min(i , lz_hi.x), std::min(j , lz_hi.y), k)<=0 || lz(std::min(i , lz_hi.x), std::max(j-1, lz_lo.y), k)<=0)) or - (field=='B' and Sy(i,j,k)<=0)) return; + (field=='B' and Sy(i,j,k)<=0))) return; #endif #endif #if defined(WARPX_DIM_1D_Z) @@ -1080,10 +1086,10 @@ WarpX::InitializeExternalFieldsOnGridUsingParser ( [=] AMREX_GPU_DEVICE (int i, int j, int k) { #ifdef AMREX_USE_EB #ifdef WARPX_DIM_3D - if((field=='E' and lz(i, j, k)<=0) or (field=='B' and Sz(i, j, k)<=0)) return; + if(eb_enabled && ((field=='E' and lz(i, j, k)<=0) or (field=='B' and Sz(i, j, k)<=0))) return; #elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) //In XZ and RZ Ez is associated with a z-edge, while Bz is associated with a x-edge - if((field=='E' and lz(i, j, k)<=0) or (field=='B' and lx(i, j, k)<=0)) return; + if(eb_enabled && ((field=='E' and lz(i, j, k)<=0) or (field=='B' and lx(i, j, k)<=0))) return; #endif #endif #if defined(WARPX_DIM_1D_Z) diff --git a/Source/Parallelization/WarpXRegrid.cpp b/Source/Parallelization/WarpXRegrid.cpp index 22979912a84..e1ef6505186 100644 --- a/Source/Parallelization/WarpXRegrid.cpp +++ b/Source/Parallelization/WarpXRegrid.cpp @@ -212,20 +212,20 @@ WarpX::RemakeLevel (int lev, Real /*time*/, const BoxArray& ba, const Distributi RemakeMultiFab(m_hybrid_pic_model->current_fp_ampere[lev][idim], dm, false, lev); RemakeMultiFab(m_hybrid_pic_model->current_fp_external[lev][idim], dm, true, lev); } -#ifdef AMREX_USE_EB - if (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD) { - RemakeMultiFab(m_edge_lengths[lev][idim], dm, false ,lev); - RemakeMultiFab(m_face_areas[lev][idim], dm, false ,lev); - if(WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT){ - RemakeMultiFab(Venl[lev][idim], dm, false ,lev); - RemakeMultiFab(m_flag_info_face[lev][idim], dm, false ,lev); - RemakeMultiFab(m_flag_ext_face[lev][idim], dm, false ,lev); - RemakeMultiFab(m_area_mod[lev][idim], dm, false ,lev); - RemakeMultiFab(ECTRhofield[lev][idim], dm, false ,lev); - m_borrowing[lev][idim] = std::make_unique>(amrex::convert(ba, Bfield_fp[lev][idim]->ixType().toIntVect()), dm); + if (m_eb_enabled) { + if (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD) { + RemakeMultiFab(m_edge_lengths[lev][idim], dm, false ,lev); + RemakeMultiFab(m_face_areas[lev][idim], dm, false ,lev); + if(WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT){ + RemakeMultiFab(Venl[lev][idim], dm, false ,lev); + RemakeMultiFab(m_flag_info_face[lev][idim], dm, false ,lev); + RemakeMultiFab(m_flag_ext_face[lev][idim], dm, false ,lev); + RemakeMultiFab(m_area_mod[lev][idim], dm, false ,lev); + RemakeMultiFab(ECTRhofield[lev][idim], dm, false ,lev); + m_borrowing[lev][idim] = std::make_unique>(amrex::convert(ba, Bfield_fp[lev][idim]->ixType().toIntVect()), dm); + } } } -#endif } RemakeMultiFab(F_fp[lev], dm, true ,lev); @@ -239,18 +239,19 @@ WarpX::RemakeLevel (int lev, Real /*time*/, const BoxArray& ba, const Distributi RemakeMultiFab(m_hybrid_pic_model->electron_pressure_fp[lev], dm, false, lev); } -#ifdef AMREX_USE_EB - RemakeMultiFab(m_distance_to_eb[lev], dm, false ,lev); - - int max_guard = guard_cells.ng_FieldSolver.max(); - m_field_factory[lev] = amrex::makeEBFabFactory(Geom(lev), ba, dm, - {max_guard, max_guard, max_guard}, - amrex::EBSupport::full); + if (m_eb_enabled) { + RemakeMultiFab(m_distance_to_eb[lev], dm, false, lev); - InitializeEBGridData(lev); -#else - m_field_factory[lev] = std::make_unique(); +#ifdef AMREX_USE_EB + int max_guard = guard_cells.ng_FieldSolver.max(); + m_field_factory[lev] = amrex::makeEBFabFactory(Geom(lev), ba, dm, + {max_guard, max_guard, max_guard}, + amrex::EBSupport::full); #endif + InitializeEBGridData(lev); + } else { + m_field_factory[lev] = std::make_unique(); + } #ifdef WARPX_USE_PSATD if (electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { diff --git a/Source/Particles/MultiParticleContainer.cpp b/Source/Particles/MultiParticleContainer.cpp index fc496217388..cc3cd89cd7d 100644 --- a/Source/Particles/MultiParticleContainer.cpp +++ b/Source/Particles/MultiParticleContainer.cpp @@ -39,10 +39,8 @@ #include "Utils/WarpXAlgorithmSelection.H" #include "Utils/WarpXProfilerWrapper.H" #include "Utils/WarpXUtil.H" -#ifdef AMREX_USE_EB -# include "EmbeddedBoundary/ParticleScraper.H" -# include "EmbeddedBoundary/ParticleBoundaryProcess.H" -#endif +#include "EmbeddedBoundary/ParticleScraper.H" +#include "EmbeddedBoundary/ParticleBoundaryProcess.H" #include "WarpX.H" @@ -958,13 +956,9 @@ void MultiParticleContainer::CheckIonizationProductSpecies() void MultiParticleContainer::ScrapeParticlesAtEB (const amrex::Vector& distance_to_eb) { -#ifdef AMREX_USE_EB for (auto& pc : allcontainers) { scrapeParticlesAtEB(*pc, distance_to_eb, ParticleBoundaryProcess::Absorb()); } -#else - amrex::ignore_unused(distance_to_eb); -#endif } #ifdef WARPX_QED diff --git a/Source/Particles/ParticleBoundaryBuffer.cpp b/Source/Particles/ParticleBoundaryBuffer.cpp index c8c683f0abf..6fc5d18b307 100644 --- a/Source/Particles/ParticleBoundaryBuffer.cpp +++ b/Source/Particles/ParticleBoundaryBuffer.cpp @@ -6,6 +6,7 @@ */ #include "WarpX.H" +#include "EmbeddedBoundary/Enabled.H" #include "EmbeddedBoundary/DistanceToEB.H" #include "Particles/ParticleBoundaryBuffer.H" #include "Particles/MultiParticleContainer.H" @@ -23,8 +24,10 @@ #include #include #include + using namespace amrex::literals; + struct IsOutsideDomainBoundary { amrex::GpuArray m_plo; amrex::GpuArray m_phi; @@ -46,7 +49,6 @@ struct IsOutsideDomainBoundary { } }; -#ifdef AMREX_USE_EB struct FindEmbeddedBoundaryIntersection { const int m_step_index; const int m_delta_index; @@ -160,7 +162,6 @@ struct FindEmbeddedBoundaryIntersection { #endif } }; -#endif struct CopyAndTimestamp { int m_step_index; @@ -231,6 +232,8 @@ ParticleBoundaryBuffer::ParticleBoundaryBuffer () constexpr auto idx_zhi = 5; #endif + bool const eb_enabled = EB::enabled(); + for (int ispecies = 0; ispecies < numSpecies(); ++ispecies) { const amrex::ParmParse pp_species(getSpeciesNames()[ispecies]); @@ -250,9 +253,9 @@ ParticleBoundaryBuffer::ParticleBoundaryBuffer () pp_species.query("save_particles_at_zlo", m_do_boundary_buffer[idx_zlo][ispecies]); pp_species.query("save_particles_at_zhi", m_do_boundary_buffer[idx_zhi][ispecies]); #endif -#ifdef AMREX_USE_EB - pp_species.query("save_particles_at_eb", m_do_boundary_buffer[AMREX_SPACEDIM*2][ispecies]); -#endif + + if (eb_enabled) { pp_species.query("save_particles_at_eb", m_do_boundary_buffer[AMREX_SPACEDIM*2][ispecies]); } + // Set the flag whether the boundary is active or any species for (int i = 0; i < numBoundaries(); ++i) { if (m_do_boundary_buffer[i][ispecies]) { m_do_any_boundary[i] = 1; } @@ -275,10 +278,7 @@ ParticleBoundaryBuffer::ParticleBoundaryBuffer () m_boundary_names[idx_zlo] = "zlo"; m_boundary_names[idx_zhi] = "zhi"; #endif -#ifdef AMREX_USE_EB - m_boundary_names[AMREX_SPACEDIM*2] = "eb"; -#endif - + if (eb_enabled) { m_boundary_names[AMREX_SPACEDIM*2] = "eb"; } } void ParticleBoundaryBuffer::printNumParticles () const { @@ -298,17 +298,17 @@ void ParticleBoundaryBuffer::printNumParticles () const { } } } -#ifdef AMREX_USE_EB - auto& buffer = m_particle_containers[2*AMREX_SPACEDIM]; - for (int i = 0; i < numSpecies(); ++i) - { - const auto np = buffer[i].isDefined() ? buffer[i].TotalNumberOfParticles(false) : 0; - amrex::Print() << Utils::TextMsg::Info( - "Species " + getSpeciesNames()[i] + " has " - + std::to_string(np) + " particles in the EB boundary buffer" - ); + + if (EB::enabled()) { + auto &buffer = m_particle_containers[2 * AMREX_SPACEDIM]; + for (int i = 0; i < numSpecies(); ++i) { + const auto np = buffer[i].isDefined() ? buffer[i].TotalNumberOfParticles(false) : 0; + amrex::Print() << Utils::TextMsg::Info( + "Species " + getSpeciesNames()[i] + " has " + + std::to_string(np) + " particles in the EB boundary buffer" + ); + } } -#endif } void ParticleBoundaryBuffer::redistribute () { @@ -443,99 +443,97 @@ void ParticleBoundaryBuffer::gatherParticlesFromDomainBoundaries (MultiParticleC void ParticleBoundaryBuffer::gatherParticlesFromEmbeddedBoundaries ( MultiParticleContainer& mypc, const amrex::Vector& distance_to_eb) { -#ifdef AMREX_USE_EB - WARPX_PROFILE("ParticleBoundaryBuffer::gatherParticles::EB"); - - using PIter = amrex::ParConstIterSoA; - const auto& warpx_instance = WarpX::GetInstance(); - const amrex::Geometry& geom = warpx_instance.Geom(0); - auto plo = geom.ProbLoArray(); - - auto& buffer = m_particle_containers[m_particle_containers.size()-1]; - for (int i = 0; i < numSpecies(); ++i) - { - if (!m_do_boundary_buffer[AMREX_SPACEDIM*2][i]) continue; - const auto& pc = mypc.GetParticleContainer(i); - if (!buffer[i].isDefined()) - { - buffer[i] = pc.make_alike(); - buffer[i].AddIntComp("stepScraped", false); - buffer[i].AddRealComp("deltaTimeScraped", false); - buffer[i].AddRealComp("nx", false); - buffer[i].AddRealComp("ny", false); - buffer[i].AddRealComp("nz", false); + if (EB::enabled()) { + WARPX_PROFILE("ParticleBoundaryBuffer::gatherParticles::EB"); + + + using PIter = amrex::ParConstIterSoA; + const auto &warpx_instance = WarpX::GetInstance(); + const amrex::Geometry &geom = warpx_instance.Geom(0); + auto plo = geom.ProbLoArray(); + + auto &buffer = m_particle_containers[m_particle_containers.size() - 1]; + for (int i = 0; i < numSpecies(); ++i) { + if (!m_do_boundary_buffer[AMREX_SPACEDIM * 2][i]) continue; + const auto &pc = mypc.GetParticleContainer(i); + if (!buffer[i].isDefined()) { + buffer[i] = pc.make_alike(); + buffer[i].AddIntComp("stepScraped", false); + buffer[i].AddRealComp("deltaTimeScraped", false); + buffer[i].AddRealComp("nx", false); + buffer[i].AddRealComp("ny", false); + buffer[i].AddRealComp("nz", false); - } - auto& species_buffer = buffer[i]; - for (int lev = 0; lev < pc.numLevels(); ++lev) - { - const auto& plevel = pc.GetParticles(lev); - auto dxi = warpx_instance.Geom(lev).InvCellSizeArray(); + } + auto &species_buffer = buffer[i]; + for (int lev = 0; lev < pc.numLevels(); ++lev) { + const auto &plevel = pc.GetParticles(lev); + auto dxi = warpx_instance.Geom(lev).InvCellSizeArray(); #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif - for(PIter pti(pc, lev); pti.isValid(); ++pti) - { - auto phiarr = (*distance_to_eb[lev])[pti].array(); // signed distance function - auto index = std::make_pair(pti.index(), pti.LocalTileIndex()); - if(plevel.find(index) == plevel.end()) continue; - - const auto getPosition = GetParticlePosition(pti); - auto& ptile_buffer = species_buffer.DefineAndReturnParticleTile(lev, pti.index(), - pti.LocalTileIndex()); - const auto& ptile = plevel.at(index); - auto np = ptile.numParticles(); - if (np == 0) { continue; } - - using SrcData = WarpXParticleContainer::ParticleTileType::ConstParticleTileDataType; - auto predicate = [=] AMREX_GPU_HOST_DEVICE (const SrcData& /*src*/, const int ip) - /* NVCC 11.3.109 chokes in C++17 on this: noexcept */ - { - amrex::ParticleReal xp, yp, zp; - getPosition(ip, xp, yp, zp); - - amrex::Real phi_value = ablastr::particles::doGatherScalarFieldNodal( - xp, yp, zp, phiarr, dxi, plo - ); - return phi_value < 0.0 ? 1 : 0; - }; - - const auto ptile_data = ptile.getConstParticleTileData(); - - amrex::ReduceOps reduce_op; - amrex::ReduceData reduce_data(reduce_op); - { - WARPX_PROFILE("ParticleBoundaryBuffer::gatherParticles::count_out_of_boundsEB"); - reduce_op.eval(np, reduce_data, [=] AMREX_GPU_HOST_DEVICE (int ip) - { return predicate(ptile_data, ip) ? 1 : 0; }); - } + for (PIter pti(pc, lev); pti.isValid(); ++pti) { + auto phiarr = (*distance_to_eb[lev])[pti].array(); // signed distance function + auto index = std::make_pair(pti.index(), pti.LocalTileIndex()); + if (plevel.find(index) == plevel.end()) continue; + + const auto getPosition = GetParticlePosition(pti); + auto &ptile_buffer = species_buffer.DefineAndReturnParticleTile(lev, pti.index(), + pti.LocalTileIndex()); + const auto &ptile = plevel.at(index); + auto np = ptile.numParticles(); + if (np == 0) { continue; } + + using SrcData = WarpXParticleContainer::ParticleTileType::ConstParticleTileDataType; + auto predicate = [=] AMREX_GPU_HOST_DEVICE(const SrcData & /*src*/, const int ip) + /* NVCC 11.3.109 chokes in C++17 on this: noexcept */ + { + amrex::ParticleReal xp, yp, zp; + getPosition(ip, xp, yp, zp); - auto dst_index = ptile_buffer.numParticles(); - { - WARPX_PROFILE("ParticleBoundaryBuffer::gatherParticles::resize_eb"); - ptile_buffer.resize(dst_index + amrex::get<0>(reduce_data.value())); - } - auto& warpx = WarpX::GetInstance(); - const auto dt = warpx.getdt(pti.GetLevel()); - auto string_to_index_intcomp = buffer[i].getParticleRuntimeiComps(); - const int step_scraped_index = string_to_index_intcomp.at("stepScraped"); - auto string_to_index_realcomp = buffer[i].getParticleRuntimeComps(); - const int delta_index = string_to_index_realcomp.at("deltaTimeScraped"); - const int normal_index = string_to_index_realcomp.at("nx"); - const int step = warpx_instance.getistep(0); + amrex::Real phi_value = ablastr::particles::doGatherScalarFieldNodal( + xp, yp, zp, phiarr, dxi, plo + ); + return phi_value < 0.0 ? 1 : 0; + }; - { - WARPX_PROFILE("ParticleBoundaryBuffer::gatherParticles::filterTransformEB"); - amrex::filterAndTransformParticles(ptile_buffer, ptile, predicate, - FindEmbeddedBoundaryIntersection{step_scraped_index,delta_index, normal_index, step, dt, phiarr, dxi, plo}, 0, dst_index); + const auto ptile_data = ptile.getConstParticleTileData(); + amrex::ReduceOps reduce_op; + amrex::ReduceData reduce_data(reduce_op); + { + WARPX_PROFILE("ParticleBoundaryBuffer::gatherParticles::count_out_of_boundsEB"); + reduce_op.eval(np, reduce_data, + [=] AMREX_GPU_HOST_DEVICE(int ip) { return predicate(ptile_data, ip) ? 1 : 0; }); + } + + auto dst_index = ptile_buffer.numParticles(); + { + WARPX_PROFILE("ParticleBoundaryBuffer::gatherParticles::resize_eb"); + ptile_buffer.resize(dst_index + amrex::get<0>(reduce_data.value())); + } + auto &warpx = WarpX::GetInstance(); + const auto dt = warpx.getdt(pti.GetLevel()); + auto string_to_index_intcomp = buffer[i].getParticleRuntimeiComps(); + const int step_scraped_index = string_to_index_intcomp.at("stepScraped"); + auto string_to_index_realcomp = buffer[i].getParticleRuntimeComps(); + const int delta_index = string_to_index_realcomp.at("deltaTimeScraped"); + const int normal_index = string_to_index_realcomp.at("nx"); + const int step = warpx_instance.getistep(0); + + { + WARPX_PROFILE("ParticleBoundaryBuffer::gatherParticles::filterTransformEB"); + amrex::filterAndTransformParticles(ptile_buffer, ptile, predicate, + FindEmbeddedBoundaryIntersection{step_scraped_index, + delta_index, normal_index, + step, dt, phiarr, dxi, plo}, + 0, dst_index); + + } } } } } -#else - amrex::ignore_unused(mypc, distance_to_eb); -#endif } int ParticleBoundaryBuffer::getNumParticlesInContainer( diff --git a/Source/Particles/PhysicalParticleContainer.cpp b/Source/Particles/PhysicalParticleContainer.cpp index c7f44903d9b..bdcc2ae4cfc 100644 --- a/Source/Particles/PhysicalParticleContainer.cpp +++ b/Source/Particles/PhysicalParticleContainer.cpp @@ -39,6 +39,7 @@ #include "Utils/WarpXAlgorithmSelection.H" #include "Utils/WarpXConst.H" #include "Utils/WarpXProfilerWrapper.H" +#include "EmbeddedBoundary/Enabled.H" #ifdef AMREX_USE_EB # include "EmbeddedBoundary/ParticleBoundaryProcess.H" # include "EmbeddedBoundary/ParticleScraper.H" @@ -1475,8 +1476,11 @@ PhysicalParticleContainer::AddPlasma (PlasmaInjector const& plasma_injector, int // Remove particles that are inside the embedded boundaries #ifdef AMREX_USE_EB - auto & distance_to_eb = WarpX::GetInstance().GetDistanceToEB(); - scrapeParticlesAtEB( *this, amrex::GetVecOfConstPtrs(distance_to_eb), ParticleBoundaryProcess::Absorb()); + if (EB::enabled()) + { + auto &distance_to_eb = WarpX::GetInstance().GetDistanceToEB(); + scrapeParticlesAtEB(*this, amrex::GetVecOfConstPtrs(distance_to_eb), ParticleBoundaryProcess::Absorb()); + } #endif // The function that calls this is responsible for redistributing particles. @@ -1972,8 +1976,11 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, // Remove particles that are inside the embedded boundaries #ifdef AMREX_USE_EB - auto & distance_to_eb = WarpX::GetInstance().GetDistanceToEB(); - scrapeParticlesAtEB(tmp_pc, amrex::GetVecOfConstPtrs(distance_to_eb), ParticleBoundaryProcess::Absorb()); + if (EB::enabled()) + { + auto & distance_to_eb = WarpX::GetInstance().GetDistanceToEB(); + scrapeParticlesAtEB(tmp_pc, amrex::GetVecOfConstPtrs(distance_to_eb), ParticleBoundaryProcess::Absorb()); + } #endif // Redistribute the new particles that were added to the temporary container. diff --git a/Source/Particles/WarpXParticleContainer.cpp b/Source/Particles/WarpXParticleContainer.cpp index 8692dff3302..1b4e3218cf4 100644 --- a/Source/Particles/WarpXParticleContainer.cpp +++ b/Source/Particles/WarpXParticleContainer.cpp @@ -13,6 +13,7 @@ #include "Deposition/ChargeDeposition.H" #include "Deposition/CurrentDeposition.H" #include "Deposition/SharedDepositionUtils.H" +#include "EmbeddedBoundary/Enabled.H" #include "Pusher/GetAndSetPosition.H" #include "Pusher/UpdatePosition.H" #include "ParticleBoundaries_K.H" @@ -300,9 +301,11 @@ WarpXParticleContainer::AddNParticles (int /*lev*/, long n, // Remove particles that are inside the embedded boundaries #ifdef AMREX_USE_EB - auto & distance_to_eb = WarpX::GetInstance().GetDistanceToEB(); - scrapeParticlesAtEB( *this, amrex::GetVecOfConstPtrs(distance_to_eb), ParticleBoundaryProcess::Absorb()); - deleteInvalidParticles(); + if (EB::enabled()) { + auto & distance_to_eb = WarpX::GetInstance().GetDistanceToEB(); + scrapeParticlesAtEB( *this, amrex::GetVecOfConstPtrs(distance_to_eb), ParticleBoundaryProcess::Absorb()); + deleteInvalidParticles(); + } #endif } diff --git a/Source/Utils/WarpXAlgorithmSelection.cpp b/Source/Utils/WarpXAlgorithmSelection.cpp index b1f93c2f6c8..3e148e75557 100644 --- a/Source/Utils/WarpXAlgorithmSelection.cpp +++ b/Source/Utils/WarpXAlgorithmSelection.cpp @@ -152,7 +152,7 @@ const std::map ReductionType_algo_to_int = { }; int -GetAlgorithmInteger(const amrex::ParmParse& pp, const char* pp_search_key ){ +GetAlgorithmInteger (const amrex::ParmParse& pp, const char* pp_search_key ){ // Read user input ; use "default" if it is not found std::string algo = "default"; diff --git a/Source/WarpX.H b/Source/WarpX.H index d19843ca636..1e3cd3cc1d1 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -947,6 +947,8 @@ public: */ [[nodiscard]] amrex::IntVect get_numprocs() const {return numprocs;} + /** Enable embedded boundaries */ + bool m_eb_enabled = false; bool m_boundary_potential_specified = false; ElectrostaticSolver::PoissonBoundaryHandler m_poisson_boundary_handler; void ComputeSpaceChargeField (bool reset_fields); @@ -1013,7 +1015,7 @@ public: amrex::ParserExecutor<3> const& zfield_parser, std::array< std::unique_ptr, 3 > const& edge_lengths, std::array< std::unique_ptr, 3 > const& face_areas, - char field, + [[maybe_unused]] char field, int lev, PatchType patch_type); /** diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 9a29d7c2354..a1a0da74d63 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -14,6 +14,7 @@ #include "BoundaryConditions/PML.H" #include "Diagnostics/MultiDiagnostics.H" #include "Diagnostics/ReducedDiags/MultiReducedDiags.H" +#include "EmbeddedBoundary/Enabled.H" #include "EmbeddedBoundary/WarpXFaceInfoBox.H" #include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H" #include "FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties.H" @@ -81,6 +82,7 @@ #include #include #include +#include #include #include @@ -265,7 +267,7 @@ WarpX::WarpX () BackwardCompatibility(); - InitEB(); + if (m_eb_enabled) { InitEB(); } ablastr::utils::SignalHandling::InitSignalHandling(); @@ -539,10 +541,14 @@ WarpX::ReadParameters () { const ParmParse pp_algo("algo"); electromagnetic_solver_id = static_cast(GetAlgorithmInteger(pp_algo, "maxwell_solver")); + + if (electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT && !EB::enabled()) { + throw std::runtime_error("ECP Solver requires to enable embedded boundaries at runtime."); + } } { - const ParmParse pp_warpx("warpx"); + ParmParse pp_warpx("warpx"); //"Synthetic" warning messages may be injected in the Warning Manager via // inputfile for debug&testing purposes. @@ -784,6 +790,14 @@ WarpX::ReadParameters () "The FFT Poisson solver is not implemented in labframe-electromagnetostatic mode yet." ); + m_eb_enabled = EB::enabled(); +#if !defined(AMREX_USE_EB) + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + !m_eb_enabled, + "Embedded boundaries are requested via warpx.eb_enabled but were not compiled!" + ); +#endif + // Parse the input file for domain boundary potentials const ParmParse pp_boundary("boundary"); bool potential_specified = false; @@ -794,9 +808,9 @@ WarpX::ReadParameters () potential_specified |= pp_boundary.query("potential_hi_y", m_poisson_boundary_handler.potential_yhi_str); potential_specified |= pp_boundary.query("potential_lo_z", m_poisson_boundary_handler.potential_zlo_str); potential_specified |= pp_boundary.query("potential_hi_z", m_poisson_boundary_handler.potential_zhi_str); -#if defined(AMREX_USE_EB) - potential_specified |= pp_warpx.query("eb_potential(x,y,z,t)", m_poisson_boundary_handler.potential_eb_str); -#endif + if (m_eb_enabled) { + potential_specified |= pp_warpx.query("eb_potential(x,y,z,t)", m_poisson_boundary_handler.potential_eb_str); + } m_boundary_potential_specified = potential_specified; if (potential_specified & (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::HybridPIC)) { ablastr::warn_manager::WMRecordWarning( @@ -2150,14 +2164,17 @@ WarpX::AllocLevelData (int lev, const BoxArray& ba, const DistributionMapping& d bilinear_filter.stencil_length_each_dir); + + if (m_eb_enabled) { #ifdef AMREX_USE_EB - int max_guard = guard_cells.ng_FieldSolver.max(); - m_field_factory[lev] = amrex::makeEBFabFactory(Geom(lev), ba, dm, - {max_guard, max_guard, max_guard}, - amrex::EBSupport::full); -#else - m_field_factory[lev] = std::make_unique(); + int max_guard = guard_cells.ng_FieldSolver.max(); + m_field_factory[lev] = amrex::makeEBFabFactory(Geom(lev), ba, dm, + {max_guard, max_guard, max_guard}, + amrex::EBSupport::full); #endif + } else { + m_field_factory[lev] = std::make_unique(); + } if (mypc->nSpeciesDepositOnMainGrid() && n_current_deposition_buffer == 0) { @@ -2386,51 +2403,81 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm AllocInitMultiFab(Efield_avg_fp[lev][2], amrex::convert(ba, Ez_nodal_flag), dm, ncomps, ngEB, lev, "Efield_avg_fp[z]", 0.0_rt); } -#ifdef AMREX_USE_EB - constexpr int nc_ls = 1; - amrex::IntVect ng_ls(2); - AllocInitMultiFab(m_distance_to_eb[lev], amrex::convert(ba, IntVect::TheNodeVector()), dm, nc_ls, ng_ls, lev, "m_distance_to_eb"); - - // EB info are needed only at the finest level - if (lev == maxLevel()) - { - if (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD) { - AllocInitMultiFab(m_edge_lengths[lev][0], amrex::convert(ba, Ex_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_edge_lengths[x]"); - AllocInitMultiFab(m_edge_lengths[lev][1], amrex::convert(ba, Ey_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_edge_lengths[y]"); - AllocInitMultiFab(m_edge_lengths[lev][2], amrex::convert(ba, Ez_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_edge_lengths[z]"); - AllocInitMultiFab(m_face_areas[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_face_areas[x]"); - AllocInitMultiFab(m_face_areas[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_face_areas[y]"); - AllocInitMultiFab(m_face_areas[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_face_areas[z]"); - } - if(WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { - AllocInitMultiFab(m_edge_lengths[lev][0], amrex::convert(ba, Ex_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_edge_lengths[x]"); - AllocInitMultiFab(m_edge_lengths[lev][1], amrex::convert(ba, Ey_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_edge_lengths[y]"); - AllocInitMultiFab(m_edge_lengths[lev][2], amrex::convert(ba, Ez_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_edge_lengths[z]"); - AllocInitMultiFab(m_face_areas[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_face_areas[x]"); - AllocInitMultiFab(m_face_areas[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_face_areas[y]"); - AllocInitMultiFab(m_face_areas[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_face_areas[z]"); - AllocInitMultiFab(m_flag_info_face[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_flag_info_face[x]"); - AllocInitMultiFab(m_flag_info_face[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_flag_info_face[y]"); - AllocInitMultiFab(m_flag_info_face[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_flag_info_face[z]"); - AllocInitMultiFab(m_flag_ext_face[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_flag_ext_face[x]"); - AllocInitMultiFab(m_flag_ext_face[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_flag_ext_face[y]"); - AllocInitMultiFab(m_flag_ext_face[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_flag_ext_face[z]"); - AllocInitMultiFab(m_area_mod[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_area_mod[x]"); - AllocInitMultiFab(m_area_mod[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_area_mod[y]"); - AllocInitMultiFab(m_area_mod[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_area_mod[z]"); - m_borrowing[lev][0] = std::make_unique>(amrex::convert(ba, Bx_nodal_flag), dm); - m_borrowing[lev][1] = std::make_unique>(amrex::convert(ba, By_nodal_flag), dm); - m_borrowing[lev][2] = std::make_unique>(amrex::convert(ba, Bz_nodal_flag), dm); - AllocInitMultiFab(Venl[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "Venl[x]"); - AllocInitMultiFab(Venl[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "Venl[y]"); - AllocInitMultiFab(Venl[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "Venl[z]"); - - AllocInitMultiFab(ECTRhofield[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "ECTRhofield[x]", 0.0_rt); - AllocInitMultiFab(ECTRhofield[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "ECTRhofield[y]", 0.0_rt); - AllocInitMultiFab(ECTRhofield[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "ECTRhofield[z]", 0.0_rt); + if (m_eb_enabled) { + constexpr int nc_ls = 1; + amrex::IntVect ng_ls(2); + AllocInitMultiFab(m_distance_to_eb[lev], amrex::convert(ba, IntVect::TheNodeVector()), dm, nc_ls, ng_ls, lev, + "m_distance_to_eb"); + + // EB info are needed only at the finest level + if (lev == maxLevel()) { + if (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD) { + AllocInitMultiFab(m_edge_lengths[lev][0], amrex::convert(ba, Ex_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_edge_lengths[x]"); + AllocInitMultiFab(m_edge_lengths[lev][1], amrex::convert(ba, Ey_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_edge_lengths[y]"); + AllocInitMultiFab(m_edge_lengths[lev][2], amrex::convert(ba, Ez_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_edge_lengths[z]"); + AllocInitMultiFab(m_face_areas[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_face_areas[x]"); + AllocInitMultiFab(m_face_areas[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_face_areas[y]"); + AllocInitMultiFab(m_face_areas[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_face_areas[z]"); + } + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { + AllocInitMultiFab(m_edge_lengths[lev][0], amrex::convert(ba, Ex_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_edge_lengths[x]"); + AllocInitMultiFab(m_edge_lengths[lev][1], amrex::convert(ba, Ey_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_edge_lengths[y]"); + AllocInitMultiFab(m_edge_lengths[lev][2], amrex::convert(ba, Ez_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_edge_lengths[z]"); + AllocInitMultiFab(m_face_areas[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_face_areas[x]"); + AllocInitMultiFab(m_face_areas[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_face_areas[y]"); + AllocInitMultiFab(m_face_areas[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_face_areas[z]"); + AllocInitMultiFab(m_flag_info_face[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_flag_info_face[x]"); + AllocInitMultiFab(m_flag_info_face[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_flag_info_face[y]"); + AllocInitMultiFab(m_flag_info_face[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_flag_info_face[z]"); + AllocInitMultiFab(m_flag_ext_face[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_flag_ext_face[x]"); + AllocInitMultiFab(m_flag_ext_face[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_flag_ext_face[y]"); + AllocInitMultiFab(m_flag_ext_face[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_flag_ext_face[z]"); + AllocInitMultiFab(m_area_mod[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_area_mod[x]"); + AllocInitMultiFab(m_area_mod[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_area_mod[y]"); + AllocInitMultiFab(m_area_mod[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_area_mod[z]"); + m_borrowing[lev][0] = std::make_unique>( + amrex::convert(ba, Bx_nodal_flag), dm); + m_borrowing[lev][1] = std::make_unique>( + amrex::convert(ba, By_nodal_flag), dm); + m_borrowing[lev][2] = std::make_unique>( + amrex::convert(ba, Bz_nodal_flag), dm); + AllocInitMultiFab(Venl[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "Venl[x]"); + AllocInitMultiFab(Venl[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "Venl[y]"); + AllocInitMultiFab(Venl[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "Venl[z]"); + + AllocInitMultiFab(ECTRhofield[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "ECTRhofield[x]", 0.0_rt); + AllocInitMultiFab(ECTRhofield[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "ECTRhofield[y]", 0.0_rt); + AllocInitMultiFab(ECTRhofield[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "ECTRhofield[z]", 0.0_rt); + } } } -#endif int rho_ncomps = 0; if( (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrame) || diff --git a/Source/ablastr/fields/PoissonSolver.H b/Source/ablastr/fields/PoissonSolver.H index da0078f8b5a..8275da739d1 100644 --- a/Source/ablastr/fields/PoissonSolver.H +++ b/Source/ablastr/fields/PoissonSolver.H @@ -45,9 +45,7 @@ #include #include #include -#if defined(AMREX_USE_EB) || defined(WARPX_DIM_RZ) -# include -#endif +#include #ifdef AMREX_USE_EB # include #endif @@ -83,6 +81,7 @@ namespace ablastr::fields { * \param[in] grids the grids per level (e.g., from AmrMesh) * \param[in] boundary_handler a handler for boundary conditions, for example @see ElectrostaticSolver::PoissonBoundaryHandler * \param[in] is_solver_multigrid boolean to select the Poisson solver: 1 for Multigrid, 0 for FFT + * \param[in] eb_enabled solve with embedded boundaries * \param[in] do_single_precision_comms perform communications in single precision * \param[in] rel_ref_ratio mesh refinement ratio between levels (default: 1) * \param[in] post_phi_calculation perform a calculation per level directly after phi was calculated; required for embedded boundaries (default: none) @@ -98,16 +97,17 @@ void computePhi (amrex::Vector const & rho, amrex::Vector & phi, std::array const beta, - amrex::Real const relative_tolerance, + amrex::Real relative_tolerance, amrex::Real absolute_tolerance, - int const max_iters, - int const verbosity, + int max_iters, + int verbosity, amrex::Vector const& geom, amrex::Vector const& dmap, amrex::Vector const& grids, T_BoundaryHandler const boundary_handler, [[maybe_unused]] bool is_solver_multigrid, - bool const do_single_precision_comms = false, + bool eb_enabled = false, + bool do_single_precision_comms = false, std::optional > rel_ref_ratio = std::nullopt, [[maybe_unused]] T_PostPhiCalculationFunctor post_phi_calculation = std::nullopt, [[maybe_unused]] std::optional current_time = std::nullopt, // only used for EB @@ -124,6 +124,11 @@ computePhi (amrex::Vector const & rho, rel_ref_ratio = amrex::Vector{{amrex::IntVect(AMREX_D_DECL(1, 1, 1))}}; } +#if !defined(AMREX_USE_EB) + ABLASTR_ALWAYS_ASSERT_WITH_MESSAGE(!eb_enabled, + "Embedded boundary solve requested but not compiled in"); +#endif + auto const finest_level = static_cast(rho.size() - 1); // determine if rho is zero everywhere @@ -143,24 +148,20 @@ computePhi (amrex::Vector const & rho, ); } -#if !(defined(AMREX_USE_EB) || defined(WARPX_DIM_RZ)) amrex::LPInfo info; -#else - const amrex::LPInfo info; -#endif for (int lev=0; lev<=finest_level; lev++) { // Set the value of beta - amrex::Array beta_solver = + amrex::Array beta_solver = #if defined(WARPX_DIM_1D_Z) - {{ beta[2] }}; // beta_x and beta_z + {{beta[2]}}; // beta_x and beta_z #elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - {{ beta[0], beta[2] }}; // beta_x and beta_z + {{ beta[0], beta[2] }}; // beta_x and beta_z #else - {{ beta[0], beta[1], beta[2] }}; + {{ beta[0], beta[1], beta[2] }}; #endif -#if (defined(WARPX_USE_PSATD) && defined(WARPX_DIM_3D)) +#if (defined(WARPX_USE_PSATD) && defined(WARPX_DIM_3D)) // Use the Integrated Green Function solver (FFT) on the coarsest level if it was selected if(!is_solver_multigrid && lev==0){ amrex::Array const dx_igf @@ -179,77 +180,106 @@ computePhi (amrex::Vector const & rho, // Use the Multigrid (MLMG) solver if selected or on refined patches // but first scale rho appropriately using namespace ablastr::constant::SI; - rho[lev]->mult(-1._rt/ep0); // TODO: when do we "un-multiply" this? We need to document this side-effect! - -#if !(defined(AMREX_USE_EB) || defined(WARPX_DIM_RZ)) - // Determine whether to use semi-coarsening - amrex::Array dx_scaled - {AMREX_D_DECL(geom[lev].CellSize(0)/std::sqrt(1._rt-beta_solver[0]*beta_solver[0]), - geom[lev].CellSize(1)/std::sqrt(1._rt-beta_solver[1]*beta_solver[1]), - geom[lev].CellSize(2)/std::sqrt(1._rt-beta_solver[2]*beta_solver[2]))}; - int max_semicoarsening_level = 0; - int semicoarsening_direction = -1; - const auto min_dir = static_cast(std::distance(dx_scaled.begin(), - std::min_element(dx_scaled.begin(),dx_scaled.end()))); - const auto max_dir = static_cast(std::distance(dx_scaled.begin(), - std::max_element(dx_scaled.begin(),dx_scaled.end()))); - if (dx_scaled[max_dir] > dx_scaled[min_dir]) { - semicoarsening_direction = max_dir; - max_semicoarsening_level = static_cast - (std::log2(dx_scaled[max_dir]/dx_scaled[min_dir])); - } - if (max_semicoarsening_level > 0) { - info.setSemicoarsening(true); - info.setMaxSemicoarseningLevel(max_semicoarsening_level); - info.setSemicoarseningDirection(semicoarsening_direction); - } + rho[lev]->mult(-1._rt / ep0); // TODO: when do we "un-multiply" this? We need to document this side-effect! + +#ifdef WARPX_DIM_RZ + constexpr bool is_rz = true; +#else + constexpr bool is_rz = false; #endif -#if defined(AMREX_USE_EB) || defined(WARPX_DIM_RZ) - // In the presence of EB or RZ: the solver assumes that the beam is - // propagating along one of the axes of the grid, i.e. that only *one* - // of the components of `beta` is non-negligible. - amrex::MLEBNodeFDLaplacian linop( {geom[lev]}, {grids[lev]}, {dmap[lev]}, info + if (!eb_enabled && !is_rz) { + // Determine whether to use semi-coarsening + amrex::Array dx_scaled + {AMREX_D_DECL(geom[lev].CellSize(0) / std::sqrt(1._rt - beta_solver[0] * beta_solver[0]), + geom[lev].CellSize(1) / std::sqrt(1._rt - beta_solver[1] * beta_solver[1]), + geom[lev].CellSize(2) / std::sqrt(1._rt - beta_solver[2] * beta_solver[2]))}; + int max_semicoarsening_level = 0; + int semicoarsening_direction = -1; + const auto min_dir = static_cast(std::distance(dx_scaled.begin(), + std::min_element(dx_scaled.begin(), dx_scaled.end()))); + const auto max_dir = static_cast(std::distance(dx_scaled.begin(), + std::max_element(dx_scaled.begin(), dx_scaled.end()))); + if (dx_scaled[max_dir] > dx_scaled[min_dir]) { + semicoarsening_direction = max_dir; + max_semicoarsening_level = static_cast + (std::log2(dx_scaled[max_dir] / dx_scaled[min_dir])); + } + if (max_semicoarsening_level > 0) { + info.setSemicoarsening(true); + info.setMaxSemicoarseningLevel(max_semicoarsening_level); + info.setSemicoarseningDirection(semicoarsening_direction); + } + } + + std::unique_ptr linop; + if (eb_enabled || is_rz) { + // In the presence of EB or RZ: the solver assumes that the beam is + // propagating along one of the axes of the grid, i.e. that only *one* + // of the components of `beta` is non-negligible. + auto linop_nodelap = std::make_unique(); + if (eb_enabled) { #if defined(AMREX_USE_EB) - , {eb_farray_box_factory.value()[lev]} + linop_nodelap->define( + amrex::Vector{geom[lev]}, + amrex::Vector{grids[lev]}, + amrex::Vector{dmap[lev]}, + info, + amrex::Vector{eb_farray_box_factory.value()[lev]} + ); #endif - ); + } + else { + // TODO: rather use MLNodeTensorLaplacian (for RZ w/o EB) here? Semi-Coarsening would be nice here + linop_nodelap->define( + amrex::Vector{geom[lev]}, + amrex::Vector{grids[lev]}, + amrex::Vector{dmap[lev]}, + info + ); + } - // Note: this assumes that the beam is propagating along - // one of the axes of the grid, i.e. that only *one* of the - // components of `beta` is non-negligible. // we use this + // Note: this assumes that the beam is propagating along + // one of the axes of the grid, i.e. that only *one* of the + // components of `beta` is non-negligible. // we use this #if defined(WARPX_DIM_RZ) - linop.setSigma({0._rt, 1._rt-beta_solver[1]*beta_solver[1]}); + linop_nodelap->setRZ(true); + linop_nodelap->setSigma({0._rt, 1._rt-beta_solver[1]*beta_solver[1]}); #else - linop.setSigma({AMREX_D_DECL( - 1._rt-beta_solver[0]*beta_solver[0], - 1._rt-beta_solver[1]*beta_solver[1], - 1._rt-beta_solver[2]*beta_solver[2])}); + linop_nodelap->setSigma({AMREX_D_DECL( + 1._rt-beta_solver[0]*beta_solver[0], + 1._rt-beta_solver[1]*beta_solver[1], + 1._rt-beta_solver[2]*beta_solver[2])}); #endif #if defined(AMREX_USE_EB) - // if the EB potential only depends on time, the potential can be passed - // as a float instead of a callable - if (boundary_handler.phi_EB_only_t) { - linop.setEBDirichlet(boundary_handler.potential_eb_t(current_time.value())); - } - else - linop.setEBDirichlet(boundary_handler.getPhiEB(current_time.value())); -#endif -#else - // In the absence of EB and RZ: use a more generic solver - // that can handle beams propagating in any direction - amrex::MLNodeTensorLaplacian linop( {geom[lev]}, {grids[lev]}, - {dmap[lev]}, info ); - linop.setBeta( beta_solver ); // for the non-axis-aligned solver + if (eb_enabled) { + // if the EB potential only depends on time, the potential can be passed + // as a float instead of a callable + if (boundary_handler.phi_EB_only_t) { + linop_nodelap->setEBDirichlet(boundary_handler.potential_eb_t(current_time.value())); + } else + linop_nodelap->setEBDirichlet(boundary_handler.getPhiEB(current_time.value())); + } #endif + linop = std::move(linop_nodelap); + } else { + // In the absence of EB and RZ: use a more generic solver + // that can handle beams propagating in any direction + auto linop_tenslap = std::make_unique( + amrex::Vector{geom[lev]}, + amrex::Vector{grids[lev]}, + amrex::Vector{dmap[lev]}, + info + ); + linop_tenslap->setBeta( beta_solver ); // for the non-axis-aligned solver + linop = std::move(linop_tenslap); + } // Solve the Poisson equation - linop.setDomainBC( boundary_handler.lobc, boundary_handler.hibc ); -#ifdef WARPX_DIM_RZ - linop.setRZ(true); -#endif - amrex::MLMG mlmg(linop); // actual solver defined here + linop->setDomainBC( boundary_handler.lobc, boundary_handler.hibc ); + + amrex::MLMG mlmg(*linop); // actual solver defined here mlmg.setVerbose(verbosity); mlmg.setMaxIter(max_iters); mlmg.setAlwaysUseBNorm(always_use_bnorm); @@ -269,7 +299,7 @@ computePhi (amrex::Vector const & rho, amrex::BoxArray ba = phi[lev+1]->boxArray(); const amrex::IntVect& refratio = rel_ref_ratio.value()[lev]; ba.coarsen(refratio); - const int ncomp = linop.getNComp(); + const int ncomp = linop->getNComp(); amrex::MultiFab phi_cp(ba, phi[lev+1]->DistributionMap(), ncomp, 1); // Copy from phi[lev] to phi_cp (in parallel) diff --git a/Source/ablastr/fields/VectorPoissonSolver.H b/Source/ablastr/fields/VectorPoissonSolver.H index d49335723d8..1a014119232 100644 --- a/Source/ablastr/fields/VectorPoissonSolver.H +++ b/Source/ablastr/fields/VectorPoissonSolver.H @@ -71,6 +71,7 @@ namespace ablastr::fields { * \param[in] dmap the distribution mapping per level (e.g., from AmrMesh) * \param[in] grids the grids per level (e.g., from AmrMesh) * \param[in] boundary_handler a handler for boundary conditions, for example @see MagnetostaticSolver::VectorPoissonBoundaryHandler + * \param[in] eb_enabled solve with embedded boundaries * \param[in] do_single_precision_comms perform communications in single precision * \param[in] rel_ref_ratio mesh refinement ratio between levels (default: 1) * \param[in] post_A_calculation perform a calculation per level directly after A was calculated; required for embedded boundaries (default: none) @@ -85,15 +86,16 @@ template< void computeVectorPotential ( amrex::Vector > const & curr, amrex::Vector > & A, - amrex::Real const relative_tolerance, + amrex::Real relative_tolerance, amrex::Real absolute_tolerance, - int const max_iters, - int const verbosity, + int max_iters, + int verbosity, amrex::Vector const& geom, amrex::Vector const& dmap, amrex::Vector const& grids, T_BoundaryHandler const boundary_handler, - bool const do_single_precision_comms = false, + bool eb_enabled = false, + bool do_single_precision_comms = false, std::optional > rel_ref_ratio = std::nullopt, [[maybe_unused]] T_PostACalculationFunctor post_A_calculation = std::nullopt, [[maybe_unused]] std::optional current_time = std::nullopt, // only used for EB @@ -108,6 +110,11 @@ computeVectorPotential ( amrex::Vector > co rel_ref_ratio = amrex::Vector{{amrex::IntVect(AMREX_D_DECL(1, 1, 1))}}; } +#if !defined(AMREX_USE_EB) + ABLASTR_ALWAYS_ASSERT_WITH_MESSAGE(!eb_enabled, + "Embedded boundary solve requested but not compiled in"); +#endif + auto const finest_level = static_cast(curr.size()) - 1; // scale J appropriately; also determine if current is zero everywhere @@ -134,24 +141,18 @@ computeVectorPotential ( amrex::Vector > co // Loop over dimensions of A to solve each component individually for (int lev=0; lev<=finest_level; lev++) { - amrex::MLEBNodeFDLaplacian linopx( - {geom[lev]}, {grids[lev]}, {dmap[lev]}, info -#if defined(AMREX_USE_EB) - , {eb_farray_box_factory.value()[lev]} -#endif - ); - amrex::MLEBNodeFDLaplacian linopy( - {geom[lev]}, {grids[lev]}, {dmap[lev]}, info -#if defined(AMREX_USE_EB) - , {eb_farray_box_factory.value()[lev]} -#endif - ); - amrex::MLEBNodeFDLaplacian linopz( - {geom[lev]}, {grids[lev]}, {dmap[lev]}, info -#if defined(AMREX_USE_EB) - , {eb_farray_box_factory.value()[lev]} + amrex::MLEBNodeFDLaplacian linopx, linopy, linopz; + if (eb_enabled) { +#ifdef AMREX_USE_EB + linopx.define({geom[lev]}, {grids[lev]}, {dmap[lev]}, info, {eb_farray_box_factory.value()[lev]}); + linopy.define({geom[lev]}, {grids[lev]}, {dmap[lev]}, info, {eb_farray_box_factory.value()[lev]}); + linopz.define({geom[lev]}, {grids[lev]}, {dmap[lev]}, info, {eb_farray_box_factory.value()[lev]}); #endif - ); + } else { + linopx.define({geom[lev]}, {grids[lev]}, {dmap[lev]}, info); + linopy.define({geom[lev]}, {grids[lev]}, {dmap[lev]}, info); + linopz.define({geom[lev]}, {grids[lev]}, {dmap[lev]}, info); + } amrex::Array linop = {&linopx,&linopy,&linopz}; amrex::Array,3> mlmg; @@ -163,9 +164,9 @@ computeVectorPotential ( amrex::Vector > co // Note: this assumes that beta is zero linop[adim]->setSigma({AMREX_D_DECL(1._rt, 1._rt, 1._rt)}); -#if defined(AMREX_USE_EB) // Set Homogeneous Dirichlet Boundary on EB - linop[adim]->setEBDirichlet(0_rt); +#if defined(AMREX_USE_EB) + if (eb_enabled) { linop[adim]->setEBDirichlet(0_rt); } #endif #ifdef WARPX_DIM_RZ