Skip to content

Commit

Permalink
update to Tests/EB_CNS so that it now uses redistribution routines in…
Browse files Browse the repository at this point in the history
… amrex/Src/EB
  • Loading branch information
asalmgren committed Jun 20, 2023
1 parent de000d2 commit c7a40a4
Show file tree
Hide file tree
Showing 5 changed files with 32 additions and 231 deletions.
16 changes: 0 additions & 16 deletions Tests/EB_CNS/Source/CNS.H
Original file line number Diff line number Diff line change
Expand Up @@ -210,22 +210,6 @@ public:
amrex::Array4<int const> const& lev_mask,
amrex::Real dt);

void cns_flux_redistribute (const amrex::Box& bx,
amrex::Array4<amrex::Real > const& dqdt,
amrex::Array4<amrex::Real > const& divc,
amrex::Array4<amrex::Real > const& optmp,
amrex::Array4<amrex::Real > const& del_m,
amrex::Array4<amrex::Real const> const& redistwgt,
amrex::Array4<amrex::Real const> const& vfrac,
amrex::Array4<amrex::EBCellFlag const> const& flag,
int as_crse,
amrex::Array4<amrex::Real > const& drho_as_crse,
amrex::Array4<int const> const& rrflag_as_crse,
int as_fine,
amrex::Array4<amrex::Real > const& dm_as_fine,
amrex::Array4<int const> const& lev_mask,
amrex::Real dt);

static Parm* h_parm;
static Parm* d_parm;
static ProbParm* h_prob_parm;
Expand Down
2 changes: 1 addition & 1 deletion Tests/EB_CNS/Source/CNS.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,7 @@ CNS::printTotal () const
Lazy::QueueReduction( [=] () mutable {
#endif
ParallelDescriptor::ReduceRealSum(tot.data(), 5, ParallelDescriptor::IOProcessorNumber());
amrex::Print().SetPrecision(17) << "\n[CNS] Total mass is " << tot[0] << "\n"
amrex::Print().SetPrecision(15) << "\n[CNS] Total mass is " << tot[0] << "\n"
<< " Total x-momentum is " << tot[1] << "\n"
<< " Total y-momentum is " << tot[2] << "\n"
#if (AMREX_SPACEDIM == 3)
Expand Down
59 changes: 31 additions & 28 deletions Tests/EB_CNS/Source/CNS_advance_box_eb.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,10 @@
#include <CNS_divop_K.H>
#include <CNS_diffusion_eb_K.H>

#include <CNS_diffusion_eb_K.H>

#include <AMReX_EBFArrayBox.H>
#include <AMReX_EB_Redistribution.H>
#include <AMReX_MultiCutFab.H>

#if (AMREX_SPACEDIM == 2)
Expand All @@ -18,8 +21,8 @@ using namespace amrex;

void
CNS::compute_dSdt_box_eb (const Box& bx,
Array4<Real const> const& sfab,
Array4<Real > const& dsdtfab,
Array4<Real const> const& s_arr,
Array4<Real > const& dsdt_arr,
std::array<FArrayBox*, AMREX_SPACEDIM> const& flux,
Array4<EBCellFlag const> const& flag,
Array4<Real const> const& vfrac,
Expand Down Expand Up @@ -51,16 +54,12 @@ CNS::compute_dSdt_box_eb (const Box& bx,
const auto dxinv = geom.InvCellSizeArray();

// Quantities for redistribution
FArrayBox divc,optmp,redistwgt,delta_m;
FArrayBox divc,redistwgt;
divc.resize(bxg2,NEQNS);
optmp.resize(bxg2,NEQNS);
delta_m.resize(bxg1,NEQNS);
redistwgt.resize(bxg2,1);

// Set to zero just in case
divc.setVal<RunOn::Device>(0.0);
optmp.setVal<RunOn::Device>(0.0);
delta_m.setVal<RunOn::Device>(0.0);
redistwgt.setVal<RunOn::Device>(0.0);

// Primitive variables
Expand All @@ -86,9 +85,9 @@ CNS::compute_dSdt_box_eb (const Box& bx,

Parm const* lparm = d_parm;

AMREX_D_TERM(auto const& fxfab = flux_tmp[0].array();,
auto const& fyfab = flux_tmp[1].array();,
auto const& fzfab = flux_tmp[2].array(););
AMREX_D_TERM(auto const& fx_arr = flux_tmp[0].array();,
auto const& fy_arr = flux_tmp[1].array();,
auto const& fz_arr = flux_tmp[2].array(););

auto const& q = qtmp.array();

Expand All @@ -110,7 +109,7 @@ CNS::compute_dSdt_box_eb (const Box& bx,
amrex::ParallelFor(bxg5,
[=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept
{
cns_ctoprim(i, j, k, sfab, q, *lparm);
cns_ctoprim(i, j, k, s_arr, q, *lparm);
});

if (do_visc == 1)
Expand Down Expand Up @@ -149,8 +148,8 @@ CNS::compute_dSdt_box_eb (const Box& bx,
amrex::ParallelFor(xflxbx,
[=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept
{
cns_riemann_x(i, j, k, fxfab, slope, q, *lparm);
for (int n = NEQNS; n < NCONS; ++n) fxfab(i,j,k,n) = Real(0.0);
cns_riemann_x(i, j, k, fx_arr, slope, q, *lparm);
for (int n = NEQNS; n < NCONS; ++n) fx_arr(i,j,k,n) = Real(0.0);
});


Expand All @@ -160,7 +159,7 @@ CNS::compute_dSdt_box_eb (const Box& bx,
amrex::ParallelFor(xflxbx,
[=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept
{
cns_diff_eb_x(i, j, k, q, coefs, flag, dxinv, weights, fxfab);
cns_diff_eb_x(i, j, k, q, coefs, flag, dxinv, weights, fx_arr);
});
}

Expand All @@ -178,8 +177,8 @@ CNS::compute_dSdt_box_eb (const Box& bx,
amrex::ParallelFor(yflxbx,
[=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept
{
cns_riemann_y(i, j, k, fyfab, slope, q, *lparm);
for (int n = NEQNS; n < NCONS; ++n) fyfab(i,j,k,n) = Real(0.0);
cns_riemann_y(i, j, k, fy_arr, slope, q, *lparm);
for (int n = NEQNS; n < NCONS; ++n) fy_arr(i,j,k,n) = Real(0.0);
});

if(do_visc == 1)
Expand All @@ -188,7 +187,7 @@ CNS::compute_dSdt_box_eb (const Box& bx,
amrex::ParallelFor(yflxbx,
[=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept
{
cns_diff_eb_y(i, j, k, q, coefs, flag, dxinv, weights, fyfab);
cns_diff_eb_y(i, j, k, q, coefs, flag, dxinv, weights, fy_arr);
});
}

Expand All @@ -206,8 +205,8 @@ CNS::compute_dSdt_box_eb (const Box& bx,
amrex::ParallelFor(zflxbx,
[=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept
{
cns_riemann_z(i, j, k, fzfab, slope, q, *lparm);
for (int n = NEQNS; n < NCONS; ++n) fzfab(i,j,k,n) = Real(0.0);
cns_riemann_z(i, j, k, fz_arr, slope, q, *lparm);
for (int n = NEQNS; n < NCONS; ++n) fz_arr(i,j,k,n) = Real(0.0);
});

if(do_visc == 1)
Expand All @@ -216,7 +215,7 @@ CNS::compute_dSdt_box_eb (const Box& bx,
amrex::ParallelFor(zflxbx,
[=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept
{
cns_diff_eb_z(i, j, k, q, coefs, flag, dxinv, weights, fzfab);
cns_diff_eb_z(i, j, k, q, coefs, flag, dxinv, weights, fz_arr);
});
}
#endif
Expand All @@ -236,7 +235,7 @@ CNS::compute_dSdt_box_eb (const Box& bx,
auto const& bhi = bx.bigEnd();

// Because we are going to redistribute, we put the divergence into divc
// rather than directly into dsdtfab
// rather than directly into dsdt_arr
auto const& divc_arr = divc.array();

bool l_do_visc = do_visc;
Expand All @@ -258,12 +257,16 @@ CNS::compute_dSdt_box_eb (const Box& bx,
AMREX_D_DECL(fcx, fcy, fcz), dxinv, *lparm, l_eb_weights_type, l_do_visc);
});

auto const& optmp_arr = optmp.array();
auto const& del_m_arr = delta_m.array();

// Now do redistribution
cns_flux_redistribute(bx,dsdtfab,divc_arr,optmp_arr,del_m_arr,redistwgt_arr,vfrac,flag,
as_crse, drho_as_crse, rrflag_as_crse, as_fine, dm_as_fine, lev_mask, dt);
int icomp = 0;
int ncomp = NEQNS;
int level_mask_not_covered = lparm->level_mask_notcovered;
bool use_wts_in_divnc = false;
amrex_flux_redistribute(bx, dsdt_arr, divc_arr, redistwgt_arr, vfrac, flag,
as_crse, drho_as_crse, rrflag_as_crse,
as_fine, dm_as_fine, lev_mask, geom, use_wts_in_divnc,
level_mask_not_covered, icomp, ncomp, dt);
// apply_flux_redistribution(bx, dsdt_arr, divc_arr, redistwgt_arr, icomp, ncomp, flag, vfrac, geom);

if (gravity != Real(0.0))
{
Expand All @@ -278,8 +281,8 @@ CNS::compute_dSdt_box_eb (const Box& bx,
amrex::ParallelFor(bx,
[=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept
{
dsdtfab(i,j,k,imz ) += g * sfab(i,j,k,irho);
dsdtfab(i,j,k,irhoE) += g * sfab(i,j,k,imz);
dsdt_arr(i,j,k,imz ) += g * s_arr(i,j,k,irho);
dsdt_arr(i,j,k,irhoE) += g * s_arr(i,j,k,imz);
});
}

Expand Down
1 change: 0 additions & 1 deletion Tests/EB_CNS/Source/Make.package
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ CEXE_sources += CNS_advance_box.cpp
CEXE_sources += CNS_advance_box_eb.cpp
CEXE_sources += CNS_bcfill.cpp
CEXE_sources += CNS_derive.cpp
CEXE_sources += CNS_flux_redistribute.cpp
CEXE_sources += CNS.cpp
CEXE_sources += CNSBld.cpp
CEXE_sources += CNS_io.cpp
Expand Down
185 changes: 0 additions & 185 deletions Tests/EB_CNS/Source/hydro/CNS_flux_redistribute.cpp

This file was deleted.

0 comments on commit c7a40a4

Please sign in to comment.