Function to compute average over a plane.
764 const auto & geom =
m_geom[lev];
779 Real d_fact_new, d_fact_old;
789 Gpu::DeviceVector<Real> pavg(plane_average.size(), 0.0);
790 Real* plane_avg = pavg.data();
793 Vector<Real> denom(plane_average.size(),0.0);
794 Vector<Real> val_old(plane_average.size(),0.0);
801 Box domain = geom.Domain();
803 Array<int,AMREX_SPACEDIM> is_per = {0,0,0};
804 for (
int idim(0); idim < AMREX_SPACEDIM-1; ++idim) {
805 if (geom.isPeriodic(idim)) is_per[idim] = 1;
809 for (
int imf(0); imf < 4; ++imf) {
812 if (!fields[imf])
continue;
814 denom[imf] = 1.0 / (
Real)ncell_plane[imf];
815 val_old[imf] = plane_average[imf]*d_fact_old;
818 #pragma omp parallel if (Gpu::notInLaunchRegion())
820 for (MFIter mfi(*fields[imf],
TileNoZ()); mfi.isValid(); ++mfi) {
821 Box vbx = mfi.validbox();
822 Box pbx = mfi.tilebox();
823 pbx.setSmall(2,0); pbx.setBig(2,0);
827 IndexType ixt = averages[imf]->boxArray().ixType();
828 for (
int idim(0); idim < AMREX_SPACEDIM-1; ++idim) {
829 if ( ixt.nodeCentered(idim) && (pbx.bigEnd(idim) == vbx.bigEnd(idim)) ) {
830 int dom_hi = domain.bigEnd(idim)+1;
831 if (pbx.bigEnd(idim) <
dom_hi || is_per[idim]) {
837 auto mf_arr = (
m_rotate) ? rot_fields[imf]->const_array(mfi) :
838 fields[imf]->const_array(mfi);
841 const auto plo = geom.ProbLoArray();
842 const auto dxInv = geom.InvCellSizeArray();
843 const auto z_phys_arr = z_phys->const_array(mfi);
844 auto x_pos_arr = x_pos->array(mfi);
845 auto y_pos_arr = y_pos->array(mfi);
846 auto z_pos_arr = z_pos->array(mfi);
847 ParallelFor(Gpu::KernelInfo().setReduction(
true), pbx, [=]
848 AMREX_GPU_DEVICE(
int i,
int j,
int k, Gpu::Handler
const& handler) noexcept
852 &interp, mf_arr, z_phys_arr, plo,
dxInv, 1);
854 Gpu::deviceReduceSum(&plane_avg[imf], val, handler);
857 auto k_arr = k_indx->const_array(mfi);
858 auto j_arr = j_indx ? j_indx->const_array(mfi) : Array4<const int> {};
859 auto i_arr = i_indx ? i_indx->const_array(mfi) : Array4<const int> {};
860 ParallelFor(Gpu::KernelInfo().setReduction(
true), pbx, [=]
861 AMREX_GPU_DEVICE(
int i,
int j,
int k, Gpu::Handler
const& handler) noexcept
863 int mk = k_arr(i,j,k);
864 int mj = j_arr ? j_arr(i,j,k) : j;
865 int mi = i_arr ? i_arr(i,j,k) : i;
866 Real val = mf_arr(mi,mj,mk);
867 Gpu::deviceReduceSum(&plane_avg[imf], val, handler);
882 denom[iavg] = 1.0 / (
Real)ncell_plane[iavg];
883 val_old[iavg] = plane_average[iavg]*d_fact_old;
886 #pragma omp parallel if (Gpu::notInLaunchRegion())
888 for (MFIter mfi(*averages[iavg],
TileNoZ()); mfi.isValid(); ++mfi)
890 Box pbx = mfi.tilebox();
891 pbx.setSmall(2,0); pbx.setBig(2,0);
893 const Array4<Real const>& T_mf_arr = fields[2]->const_array(mfi);
894 const Array4<Real const>& qv_mf_arr = (fields[3])? fields[3]->const_array(mfi) : Array4<const Real>{};
895 const Array4<Real const>& qr_mf_arr = (fields[4])? fields[4]->const_array(mfi) : Array4<const Real>{};
898 const auto plo =
m_geom[lev].ProbLoArray();
900 const auto z_phys_arr = z_phys->const_array(mfi);
901 auto x_pos_arr = x_pos->array(mfi);
902 auto y_pos_arr = y_pos->array(mfi);
903 auto z_pos_arr = z_pos->array(mfi);
904 ParallelFor(Gpu::KernelInfo().setReduction(
true), pbx, [=]
905 AMREX_GPU_DEVICE(
int i,
int j,
int k, Gpu::Handler
const& handler) noexcept
910 &T_interp, T_mf_arr, z_phys_arr, plo,
dxInv, 1);
912 &qv_interp, qv_mf_arr, z_phys_arr, plo,
dxInv, 1);
918 &qr_interp, qr_mf_arr, z_phys_arr, plo,
dxInv, 1);
919 vfac = 1.0 + 0.61*qv_interp - qr_interp;
921 vfac = 1.0 + 0.61*qv_interp;
924 Gpu::deviceReduceSum(&plane_avg[iavg], val, handler);
927 auto k_arr = k_indx->const_array(mfi);
928 auto j_arr = j_indx ? j_indx->const_array(mfi) : Array4<const int> {};
929 auto i_arr = i_indx ? i_indx->const_array(mfi) : Array4<const int> {};
930 ParallelFor(Gpu::KernelInfo().setReduction(
true), pbx, [=]
931 AMREX_GPU_DEVICE(
int i,
int j,
int k, Gpu::Handler
const& handler) noexcept
933 int mk = k_arr(i,j,k);
934 int mj = j_arr ? j_arr(i,j,k) : j;
935 int mi = i_arr ? i_arr(i,j,k) : i;
939 vfac = 1.0 + 0.61*qv_mf_arr(mi,mj,mk) - qr_mf_arr(mi,mj,mk);
941 vfac = 1.0 + 0.61*qv_mf_arr(mi,mj,mk);
943 const Real val = T_mf_arr(mi,mj,mk) *
vfac;
944 Gpu::deviceReduceSum(&plane_avg[iavg], val, handler);
952 denom[iavg] = 1.0 / (
Real)ncell_plane[iavg];
954 Gpu::copy(Gpu::deviceToDevice, pavg.begin() + 2, pavg.begin() + 3,
955 pavg.begin() + iavg);
967 denom[iavg] = 1.0 / (
Real)ncell_plane[iavg];
968 val_old[iavg] = plane_average[iavg]*d_fact_old;
973 #pragma omp parallel if (Gpu::notInLaunchRegion())
975 for (MFIter mfi(*averages[iavg],
TileNoZ()); mfi.isValid(); ++mfi)
977 Box pbx = mfi.tilebox();
978 pbx.setSmall(2,0); pbx.setBig(2,0);
981 auto u_mf_arr = (
m_rotate) ? rot_fields[imf ]->const_array(mfi) :
982 fields[imf ]->const_array(mfi);
983 auto v_mf_arr = (
m_rotate) ? rot_fields[imf+1]->const_array(mfi) :
984 fields[imf+1]->const_array(mfi);
987 const auto plo =
m_geom[lev].ProbLoArray();
989 const auto z_phys_arr = z_phys->const_array(mfi);
990 auto x_pos_arr = x_pos->array(mfi);
991 auto y_pos_arr = y_pos->array(mfi);
992 auto z_pos_arr = z_pos->array(mfi);
993 ParallelFor(Gpu::KernelInfo().setReduction(
true), pbx, [=]
994 AMREX_GPU_DEVICE(
int i,
int j,
int k, Gpu::Handler
const& handler) noexcept
999 &u_interp, u_mf_arr, z_phys_arr, plo,
dxInv, 1);
1001 &v_interp, v_mf_arr, z_phys_arr, plo,
dxInv, 1);
1002 const Real val = std::sqrt(u_interp*u_interp + v_interp*v_interp + Vsg*Vsg);
1003 Gpu::deviceReduceSum(&plane_avg[iavg], val, handler);
1006 auto k_arr = k_indx->const_array(mfi);
1007 auto j_arr = j_indx ? j_indx->const_array(mfi) : Array4<const int> {};
1008 auto i_arr = i_indx ? i_indx->const_array(mfi) : Array4<const int> {};
1009 ParallelFor(Gpu::KernelInfo().setReduction(
true), pbx, [=]
1010 AMREX_GPU_DEVICE(
int i,
int j,
int k, Gpu::Handler
const& handler) noexcept
1012 int mk = k_arr(i,j,k);
1013 int mj = j_arr ? j_arr(i,j,k) : j;
1014 int mi = i_arr ? i_arr(i,j,k) : i;
1015 const Real u_val = 0.5 * (u_mf_arr(mi,mj,mk) + u_mf_arr(mi+1,mj ,mk));
1016 const Real v_val = 0.5 * (v_mf_arr(mi,mj,mk) + v_mf_arr(mi ,mj+1,mk));
1017 const Real val = std::sqrt(u_val*u_val + v_val*v_val + Vsg*Vsg);
1018 Gpu::deviceReduceSum(&plane_avg[iavg], val, handler);
1025 Gpu::copy(Gpu::deviceToHost, pavg.begin(), pavg.end(), plane_average.begin());
1026 ParallelDescriptor::ReduceRealSum(plane_average.data(), plane_average.size());
1029 for (
int iavg(0); iavg <
m_navg; ++iavg){
1030 plane_average[iavg] *= denom[iavg]*d_fact_new;
1031 plane_average[iavg] += val_old[iavg];
1032 averages[iavg]->setVal(plane_average[iavg]);
amrex::GpuArray< Real, AMREX_SPACEDIM > dxInv
Definition: ERF_InitCustomPertVels_ParticleTests.H:16
Real vfac
Definition: ERF_InitCustomPertVels_Terrain3DHemisphere.H:24
ParallelFor(bx, [=] AMREX_GPU_DEVICE(int i, int j, int k) noexcept { const auto prob_lo=geomdata.ProbLo();const auto dx=geomdata.CellSize();const Real x=(prob_lo[0]+(i+0.5) *dx[0])/mf_m(i, j, 0);const Real z=z_cc(i, j, k);Real L=std::sqrt(std::pow((x - x_c)/x_r, 2)+std::pow((z - z_c)/z_r, 2));if(L<=1.0) { Real dT=T_pert *(std::cos(PI *L)+1.0)/2.0;Real Tbar_hse=p_hse(i, j, k)/(R_d *r_hse(i, j, k));Real theta_perturbed=(Tbar_hse+dT) *std::pow(p_0/p_hse(i, j, k), rdOcp);Real theta_0=(Tbar_hse) *std::pow(p_0/p_hse(i, j, k), rdOcp);if(const_rho) { state_pert(i, j, k, RhoTheta_comp)=r_hse(i, j, k) *(theta_perturbed - theta_0);} else { state_pert(i, j, k, Rho_comp)=getRhoThetagivenP(p_hse(i, j, k))/theta_perturbed - r_hse(i, j, k);} } })
const auto & dom_hi
Definition: ERF_SetupVertDiff.H:2
amrex::Real Real
Definition: ERF_ShocInterface.H:19
AMREX_FORCE_INLINE amrex::IntVect TileNoZ()
Definition: ERF_TileNoZ.H:11
int m_navg
Definition: ERF_MOSTAverage.H:199
amrex::Vector< amrex::Vector< std::unique_ptr< amrex::MultiFab > > > m_averages
Definition: ERF_MOSTAverage.H:210
amrex::Vector< std::unique_ptr< amrex::MultiFab > > m_y_pos
Definition: ERF_MOSTAverage.H:205
amrex::Vector< std::unique_ptr< amrex::iMultiFab > > m_i_indx
Definition: ERF_MOSTAverage.H:207
amrex::Vector< amrex::MultiFab * > m_z_phys_nd
Definition: ERF_MOSTAverage.H:191
amrex::Vector< std::unique_ptr< amrex::MultiFab > > m_x_pos
Definition: ERF_MOSTAverage.H:204
amrex::Vector< amrex::Real > m_Vsg
Definition: ERF_MOSTAverage.H:239
amrex::Vector< amrex::Vector< std::unique_ptr< amrex::MultiFab > > > m_rot_fields
Definition: ERF_MOSTAverage.H:211
amrex::Vector< amrex::Vector< amrex::Real > > m_plane_average
Definition: ERF_MOSTAverage.H:216
amrex::Vector< std::unique_ptr< amrex::MultiFab > > m_z_pos
Definition: ERF_MOSTAverage.H:206
amrex::Vector< amrex::Vector< int > > m_ncell_plane
Definition: ERF_MOSTAverage.H:215
amrex::Real m_fact_new
Definition: ERF_MOSTAverage.H:234
amrex::Vector< std::unique_ptr< amrex::iMultiFab > > m_j_indx
Definition: ERF_MOSTAverage.H:208
amrex::Vector< amrex::Vector< amrex::MultiFab * > > m_fields
Definition: ERF_MOSTAverage.H:190
amrex::Real m_fact_old
Definition: ERF_MOSTAverage.H:234
bool m_interp
Definition: ERF_MOSTAverage.H:226
const amrex::Vector< amrex::Geometry > m_geom
Definition: ERF_MOSTAverage.H:189
AMREX_GPU_HOST_DEVICE static AMREX_INLINE void trilinear_interp_T(const amrex::Real &xp, const amrex::Real &yp, const amrex::Real &zp, amrex::Real *interp_vals, amrex::Array4< amrex::Real const > const &interp_array, amrex::Array4< amrex::Real const > const &z_arr, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > &plo, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > &dxi, const int interp_comp)
Definition: ERF_MOSTAverage.H:120
amrex::Vector< std::unique_ptr< amrex::iMultiFab > > m_k_indx
Definition: ERF_MOSTAverage.H:209