388 const amrex::BoxArray m_pb_ba =
pb_ba[lev];
390 m_pb_mag[boxIdx] = 0.;
396 amrex::Vector<amrex::Real> avg_h(n_avg,0.);
397 amrex::Gpu::DeviceVector<amrex::Real> avg_d(n_avg,0.);
398 amrex::Real* avg = avg_d.data();
401 for (amrex::MFIter mfi(mf_cons,
TileNoZ()); mfi.isValid(); ++mfi) {
404 const amrex::Box& vbx = mfi.validbox();
407 auto ixtype_u = mf_xvel.boxArray().ixType();
408 amrex::Box vbx_u = amrex::convert(vbx,ixtype_u);
409 amrex::Box pbx_u = amrex::convert(m_pb_ba[boxIdx], ixtype_u);
410 amrex::Box ubx_u = pbx_u & vbx_u;
413 auto ixtype_v = mf_yvel.boxArray().ixType();
414 amrex::Box vbx_v = amrex::convert(vbx,ixtype_v);
415 amrex::Box pbx_v = amrex::convert(m_pb_ba[boxIdx], ixtype_v);
416 amrex::Box ubx_v = pbx_v & vbx_v;
420 const amrex::Array4<const amrex::Real>& xvel_arry = mf_xvel.const_array(mfi);
422 #ifdef USE_VOLUME_AVERAGE
423 int npts = ubx_u.numPts();
424 amrex::Real norm = 1.0 / (amrex::Real) npts;
425 ParallelFor(amrex::Gpu::KernelInfo().setReduction(
true), ubx_u, [=]
426 AMREX_GPU_DEVICE(
int i,
int j,
int k, amrex::Gpu::Handler
const& handler) noexcept {
427 amrex::Gpu::deviceReduceSum(&avg[0], xvel_arry(i,j,k)*norm, handler);
431 #ifdef USE_SLAB_AVERAGE
432 amrex::Box ubxSlab_lo = makeSlab(ubx_u,2,ubx_u.smallEnd(2));
433 amrex::Box ubxSlab_hi = makeSlab(ubx_u,2,ubx_u.bigEnd(2));
434 int npts_lo = ubxSlab_lo.numPts();
435 int npts_hi = ubxSlab_hi.numPts();
436 amrex::Real norm_lo = 1.0 / (amrex::Real) npts_lo;
437 amrex::Real norm_hi = 1.0 / (amrex::Real) npts_hi;
440 ParallelFor(amrex::Gpu::KernelInfo().setReduction(
true), ubxSlab_lo, [=]
441 AMREX_GPU_DEVICE(
int i,
int j,
int k, amrex::Gpu::Handler
const& handler) noexcept {
442 amrex::Gpu::deviceReduceSum(&avg[0], xvel_arry(i,j,k)*norm_lo, handler);
446 ParallelFor(amrex::Gpu::KernelInfo().setReduction(
true), ubxSlab_hi, [=]
447 AMREX_GPU_DEVICE(
int i,
int j,
int k, amrex::Gpu::Handler
const& handler) noexcept {
448 amrex::Gpu::deviceReduceSum(&avg[2], xvel_arry(i,j,k)*norm_hi, handler);
455 const amrex::Array4<const amrex::Real>& yvel_arry = mf_yvel.const_array(mfi);
457 #ifdef USE_VOLUME_AVERAGE
458 int npts = ubx_v.numPts();
459 amrex::Real norm = 1.0 / (amrex::Real) npts;
460 ParallelFor(amrex::Gpu::KernelInfo().setReduction(
true), ubx_v, [=]
461 AMREX_GPU_DEVICE(
int i,
int j,
int k, amrex::Gpu::Handler
const& handler) noexcept {
462 amrex::Gpu::deviceReduceSum(&avg[1], yvel_arry(i,j,k)*norm, handler);
466 #ifdef USE_SLAB_AVERAGE
467 amrex::Box ubxSlab_lo = makeSlab(ubx_v,2,ubx_v.smallEnd(2));
468 amrex::Box ubxSlab_hi = makeSlab(ubx_v,2,ubx_v.bigEnd(2));
469 int npts_lo = ubxSlab_lo.numPts();
470 int npts_hi = ubxSlab_hi.numPts();
471 amrex::Real norm_lo = 1.0 / (amrex::Real) npts_lo;
472 amrex::Real norm_hi = 1.0 / (amrex::Real) npts_hi;
475 ParallelFor(amrex::Gpu::KernelInfo().setReduction(
true), ubxSlab_lo, [=]
476 AMREX_GPU_DEVICE(
int i,
int j,
int k, amrex::Gpu::Handler
const& handler) noexcept {
477 amrex::Gpu::deviceReduceSum(&avg[1], yvel_arry(i,j,k)*norm_lo, handler);
481 ParallelFor(amrex::Gpu::KernelInfo().setReduction(
true), ubxSlab_hi, [=]
482 AMREX_GPU_DEVICE(
int i,
int j,
int k, amrex::Gpu::Handler
const& handler) noexcept {
483 amrex::Gpu::deviceReduceSum(&avg[3], yvel_arry(i,j,k)*norm_hi, handler);
490 amrex::Gpu::copy(amrex::Gpu::deviceToHost, avg_d.begin(), avg_d.end(), avg_h.begin());
493 #ifdef USE_VOLUME_AVERAGE
494 m_pb_mag[boxIdx] = sqrt(avg_h[0]*avg_h[0] + avg_h[1]*avg_h[1]);
497 #ifdef USE_SLAB_AVERAGE
498 m_pb_mag[boxIdx] = 0.5*( sqrt(avg_h[0]*avg_h[0] + avg_h[1]*avg_h[1])
499 + sqrt(avg_h[2]*avg_h[2] + avg_h[3]*avg_h[3]));
AMREX_FORCE_INLINE amrex::IntVect TileNoZ()
Definition: ERF_TileNoZ.H:11
amrex::Real * get_pb_mag()
Definition: ERF_TurbPertStruct.H:504