500 const amrex::BoxArray m_pb_ba =
pb_ba[lev];
501 amrex::Real* m_pb_mag =
pb_mag[lev].data();
502 amrex::Real* m_pb_dir =
pb_dir[lev].data();
503 m_pb_mag[boxIdx] = 0.;
504 m_pb_dir[boxIdx] = 0.;
510 amrex::Vector<amrex::Real>
avg_h(n_avg,0.);
511 amrex::Gpu::DeviceVector<amrex::Real> avg_d(n_avg,0.);
512 amrex::Real* avg = avg_d.data();
515 for (amrex::MFIter mfi(mf_cons,
TileNoZ()); mfi.isValid(); ++mfi) {
518 const amrex::Box& vbx = mfi.validbox();
521 auto ixtype_u = mf_xvel.boxArray().ixType();
522 amrex::Box vbx_u = amrex::convert(vbx,ixtype_u);
523 amrex::Box pbx_u = amrex::convert(m_pb_ba[boxIdx], ixtype_u);
524 amrex::Box ubx_u = pbx_u & vbx_u;
527 auto ixtype_v = mf_yvel.boxArray().ixType();
528 amrex::Box vbx_v = amrex::convert(vbx,ixtype_v);
529 amrex::Box pbx_v = amrex::convert(m_pb_ba[boxIdx], ixtype_v);
530 amrex::Box ubx_v = pbx_v & vbx_v;
534 const amrex::Array4<const amrex::Real>& xvel_arry = mf_xvel.const_array(mfi);
536 #ifdef USE_VOLUME_AVERAGE
537 int npts = ubx_u.numPts();
538 amrex::Real norm = 1.0 / (amrex::Real) npts;
539 ParallelFor(amrex::Gpu::KernelInfo().setReduction(
true), ubx_u, [=]
540 AMREX_GPU_DEVICE(
int i,
int j,
int k, amrex::Gpu::Handler
const& handler) noexcept {
541 amrex::Gpu::deviceReduceSum(&avg[0], xvel_arry(i,j,k)*norm, handler);
545 #ifdef USE_SLAB_AVERAGE
546 amrex::Box ubxSlab_lo = makeSlab(ubx_u,2,ubx_u.smallEnd(2));
547 amrex::Box ubxSlab_hi = makeSlab(ubx_u,2,ubx_u.bigEnd(2));
548 int npts_lo = ubxSlab_lo.numPts();
549 int npts_hi = ubxSlab_hi.numPts();
550 amrex::Real norm_lo = 1.0 / (amrex::Real) npts_lo;
551 amrex::Real norm_hi = 1.0 / (amrex::Real) npts_hi;
554 ParallelFor(amrex::Gpu::KernelInfo().setReduction(
true), ubxSlab_lo, [=]
555 AMREX_GPU_DEVICE(
int i,
int j,
int k, amrex::Gpu::Handler
const& handler) noexcept {
556 amrex::Gpu::deviceReduceSum(&avg[0], xvel_arry(i,j,k)*norm_lo, handler);
560 ParallelFor(amrex::Gpu::KernelInfo().setReduction(
true), ubxSlab_hi, [=]
561 AMREX_GPU_DEVICE(
int i,
int j,
int k, amrex::Gpu::Handler
const& handler) noexcept {
562 amrex::Gpu::deviceReduceSum(&avg[2], xvel_arry(i,j,k)*norm_hi, handler);
569 const amrex::Array4<const amrex::Real>& yvel_arry = mf_yvel.const_array(mfi);
571 #ifdef USE_VOLUME_AVERAGE
572 int npts = ubx_v.numPts();
573 amrex::Real norm = 1.0 / (amrex::Real) npts;
574 ParallelFor(amrex::Gpu::KernelInfo().setReduction(
true), ubx_v, [=]
575 AMREX_GPU_DEVICE(
int i,
int j,
int k, amrex::Gpu::Handler
const& handler) noexcept {
576 amrex::Gpu::deviceReduceSum(&avg[1], yvel_arry(i,j,k)*norm, handler);
580 #ifdef USE_SLAB_AVERAGE
581 amrex::Box ubxSlab_lo = makeSlab(ubx_v,2,ubx_v.smallEnd(2));
582 amrex::Box ubxSlab_hi = makeSlab(ubx_v,2,ubx_v.bigEnd(2));
583 int npts_lo = ubxSlab_lo.numPts();
584 int npts_hi = ubxSlab_hi.numPts();
585 amrex::Real norm_lo = 1.0 / (amrex::Real) npts_lo;
586 amrex::Real norm_hi = 1.0 / (amrex::Real) npts_hi;
589 ParallelFor(amrex::Gpu::KernelInfo().setReduction(
true), ubxSlab_lo, [=]
590 AMREX_GPU_DEVICE(
int i,
int j,
int k, amrex::Gpu::Handler
const& handler) noexcept {
591 amrex::Gpu::deviceReduceSum(&avg[1], yvel_arry(i,j,k)*norm_lo, handler);
595 ParallelFor(amrex::Gpu::KernelInfo().setReduction(
true), ubxSlab_hi, [=]
596 AMREX_GPU_DEVICE(
int i,
int j,
int k, amrex::Gpu::Handler
const& handler) noexcept {
597 amrex::Gpu::deviceReduceSum(&avg[3], yvel_arry(i,j,k)*norm_hi, handler);
604 amrex::Gpu::copy(amrex::Gpu::deviceToHost, avg_d.begin(), avg_d.end(),
avg_h.begin());
607 #ifdef USE_VOLUME_AVERAGE
612 #ifdef USE_SLAB_AVERAGE
AMREX_FORCE_INLINE amrex::IntVect TileNoZ()
Definition: ERF_TileNoZ.H:11
real(c_double), parameter epsilon
Definition: ERF_module_model_constants.F90:12
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE amrex::Real avg_h(amrex::Real XXXm, amrex::Real XXXp)
Definition: ERF_EBUtils.H:10
amrex::Vector< amrex::Vector< amrex::Real > > pb_dir
Definition: ERF_TurbPertStruct.H:643