412 const amrex::BoxArray m_pb_ba =
pb_ba[lev];
413 amrex::Real* m_pb_mag =
pb_mag[lev].data();
414 m_pb_mag[boxIdx] = 0.;
420 amrex::Vector<amrex::Real>
avg_h(n_avg,0.);
421 amrex::Gpu::DeviceVector<amrex::Real> avg_d(n_avg,0.);
422 amrex::Real* avg = avg_d.data();
425 for (amrex::MFIter mfi(mf_cons,
TileNoZ()); mfi.isValid(); ++mfi) {
428 const amrex::Box& vbx = mfi.validbox();
431 auto ixtype_u = mf_xvel.boxArray().ixType();
432 amrex::Box vbx_u = amrex::convert(vbx,ixtype_u);
433 amrex::Box pbx_u = amrex::convert(m_pb_ba[boxIdx], ixtype_u);
434 amrex::Box ubx_u = pbx_u & vbx_u;
437 auto ixtype_v = mf_yvel.boxArray().ixType();
438 amrex::Box vbx_v = amrex::convert(vbx,ixtype_v);
439 amrex::Box pbx_v = amrex::convert(m_pb_ba[boxIdx], ixtype_v);
440 amrex::Box ubx_v = pbx_v & vbx_v;
444 const amrex::Array4<const amrex::Real>& xvel_arry = mf_xvel.const_array(mfi);
446 #ifdef USE_VOLUME_AVERAGE
447 int npts = ubx_u.numPts();
448 amrex::Real norm = 1.0 / (amrex::Real) npts;
449 ParallelFor(amrex::Gpu::KernelInfo().setReduction(
true), ubx_u, [=]
450 AMREX_GPU_DEVICE(
int i,
int j,
int k, amrex::Gpu::Handler
const& handler) noexcept {
451 amrex::Gpu::deviceReduceSum(&avg[0], xvel_arry(i,j,k)*norm, handler);
455 #ifdef USE_SLAB_AVERAGE
456 amrex::Box ubxSlab_lo = makeSlab(ubx_u,2,ubx_u.smallEnd(2));
457 amrex::Box ubxSlab_hi = makeSlab(ubx_u,2,ubx_u.bigEnd(2));
458 int npts_lo = ubxSlab_lo.numPts();
459 int npts_hi = ubxSlab_hi.numPts();
460 amrex::Real norm_lo = 1.0 / (amrex::Real) npts_lo;
461 amrex::Real norm_hi = 1.0 / (amrex::Real) npts_hi;
464 ParallelFor(amrex::Gpu::KernelInfo().setReduction(
true), ubxSlab_lo, [=]
465 AMREX_GPU_DEVICE(
int i,
int j,
int k, amrex::Gpu::Handler
const& handler) noexcept {
466 amrex::Gpu::deviceReduceSum(&avg[0], xvel_arry(i,j,k)*norm_lo, handler);
470 ParallelFor(amrex::Gpu::KernelInfo().setReduction(
true), ubxSlab_hi, [=]
471 AMREX_GPU_DEVICE(
int i,
int j,
int k, amrex::Gpu::Handler
const& handler) noexcept {
472 amrex::Gpu::deviceReduceSum(&avg[2], xvel_arry(i,j,k)*norm_hi, handler);
479 const amrex::Array4<const amrex::Real>& yvel_arry = mf_yvel.const_array(mfi);
481 #ifdef USE_VOLUME_AVERAGE
482 int npts = ubx_v.numPts();
483 amrex::Real norm = 1.0 / (amrex::Real) npts;
484 ParallelFor(amrex::Gpu::KernelInfo().setReduction(
true), ubx_v, [=]
485 AMREX_GPU_DEVICE(
int i,
int j,
int k, amrex::Gpu::Handler
const& handler) noexcept {
486 amrex::Gpu::deviceReduceSum(&avg[1], yvel_arry(i,j,k)*norm, handler);
490 #ifdef USE_SLAB_AVERAGE
491 amrex::Box ubxSlab_lo = makeSlab(ubx_v,2,ubx_v.smallEnd(2));
492 amrex::Box ubxSlab_hi = makeSlab(ubx_v,2,ubx_v.bigEnd(2));
493 int npts_lo = ubxSlab_lo.numPts();
494 int npts_hi = ubxSlab_hi.numPts();
495 amrex::Real norm_lo = 1.0 / (amrex::Real) npts_lo;
496 amrex::Real norm_hi = 1.0 / (amrex::Real) npts_hi;
499 ParallelFor(amrex::Gpu::KernelInfo().setReduction(
true), ubxSlab_lo, [=]
500 AMREX_GPU_DEVICE(
int i,
int j,
int k, amrex::Gpu::Handler
const& handler) noexcept {
501 amrex::Gpu::deviceReduceSum(&avg[1], yvel_arry(i,j,k)*norm_lo, handler);
505 ParallelFor(amrex::Gpu::KernelInfo().setReduction(
true), ubxSlab_hi, [=]
506 AMREX_GPU_DEVICE(
int i,
int j,
int k, amrex::Gpu::Handler
const& handler) noexcept {
507 amrex::Gpu::deviceReduceSum(&avg[3], yvel_arry(i,j,k)*norm_hi, handler);
514 amrex::Gpu::copy(amrex::Gpu::deviceToHost, avg_d.begin(), avg_d.end(),
avg_h.begin());
517 #ifdef USE_VOLUME_AVERAGE
521 #ifdef USE_SLAB_AVERAGE
AMREX_FORCE_INLINE amrex::IntVect TileNoZ()
Definition: ERF_TileNoZ.H:11
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE amrex::Real avg_h(amrex::Real XXXm, amrex::Real XXXp)
Definition: ERF_EBUtils.H:10