228 const int ncomp_mf = varnames.size();
232 if (ncomp_mf == 0)
return;
243 for (
int lev = 1; lev <= finest_level; ++lev) {
244 bool fillset =
false;
254 for (
int lev = 0; lev <= finest_level; ++lev) {
255 for (
int mvar(0); mvar<
qmoist[lev].size(); ++mvar) {
256 qmoist[lev][mvar] =
micro->Get_Qmoist_Ptr(lev,mvar);
261 Vector<MultiFab> mf(finest_level+1);
262 for (
int lev = 0; lev <= finest_level; ++lev) {
263 mf[lev].define(grids[lev], dmap[lev], ncomp_mf, 0);
267 Vector<MultiFab> mf_nd(finest_level+1);
269 for (
int lev = 0; lev <= finest_level; ++lev) {
270 BoxArray nodal_grids(grids[lev]); nodal_grids.surroundingNodes();
271 mf_nd[lev].define(nodal_grids, dmap[lev], 3, 0);
272 mf_nd[lev].setVal(0.);
277 Vector<MultiFab> mf_u(finest_level+1);
278 Vector<MultiFab> mf_v(finest_level+1);
279 Vector<MultiFab> mf_w(finest_level+1);
281 for (
int lev = 0; lev <= finest_level; ++lev) {
282 BoxArray grid_stag_u(grids[lev]); grid_stag_u.surroundingNodes(0);
283 BoxArray grid_stag_v(grids[lev]); grid_stag_v.surroundingNodes(1);
284 BoxArray grid_stag_w(grids[lev]); grid_stag_w.surroundingNodes(2);
285 mf_u[lev].define(grid_stag_u, dmap[lev], 1, 0);
286 mf_v[lev].define(grid_stag_v, dmap[lev], 1, 0);
287 mf_w[lev].define(grid_stag_w, dmap[lev], 1, 0);
295 Vector<MultiFab> mf_cc_vel(finest_level+1);
305 for (
int lev = 0; lev <= finest_level; ++lev) {
306 mf_cc_vel[lev].define(grids[lev], dmap[lev], AMREX_SPACEDIM, IntVect(1,1,1));
307 average_face_to_cellcenter(mf_cc_vel[lev],0,
319 amrex::Interpolater* mapper = &cell_cons_interp;
320 for (
int lev = 1; lev <= finest_level; ++lev)
322 Vector<MultiFab*> fmf = {&(mf_cc_vel[lev]), &(mf_cc_vel[lev])};
323 Vector<Real> ftime = {
t_new[lev],
t_new[lev]};
324 Vector<MultiFab*> cmf = {&mf_cc_vel[lev-1], &mf_cc_vel[lev-1]};
325 Vector<Real> ctime = {
t_new[lev],
t_new[lev]};
327 amrex::FillPatchTwoLevels(mf_cc_vel[lev],
t_new[lev], cmf, ctime, fmf, ftime,
328 0, 0, AMREX_SPACEDIM, geom[lev-1], geom[lev],
337 for (
int lev = 0; lev <= finest_level; ++lev)
351 MultiFab::Copy(mf[lev], mf_cc_vel[lev], 0, mf_comp, 1, 0);
355 MultiFab::Copy(mf[lev], mf_cc_vel[lev], 1, mf_comp, 1, 0);
359 MultiFab::Copy(mf[lev], mf_cc_vel[lev], 2, mf_comp, 1, 0);
365 auto calculate_derived = [&](
const std::string& der_name,
370 MultiFab dmf(mf[lev], make_alias, mf_comp, 1);
372 #pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
374 for (MFIter mfi(dmf, TilingIfNotGPU()); mfi.isValid(); ++mfi)
376 const Box& bx = mfi.tilebox();
377 auto& dfab = dmf[mfi];
378 auto& sfab = src_mf[mfi];
379 der_function(bx, dfab, 0, 1, sfab, Geom(lev),
t_new[0],
nullptr, lev);
405 MultiFab dmf(mf[lev], make_alias, mf_comp, 1);
406 Array<MultiFab const*, AMREX_SPACEDIM> u;
419 MultiFab::Copy(mf[lev],p_hse,0,mf_comp,1,0);
424 MultiFab::Copy(mf[lev],r_hse,0,mf_comp,1,0);
429 MultiFab::Copy(mf[lev],th_hse,0,mf_comp,1,0);
436 MultiFab::Copy(mf[lev], p_hse, 0, mf_comp, 1, 0);
439 #pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
442 for ( MFIter mfi(mf[lev],TilingIfNotGPU()); mfi.isValid(); ++mfi)
444 const Box& bx = mfi.tilebox();
445 const Array4<Real >& derdat = mf[lev].array(mfi);
449 ParallelFor(bx, [=] AMREX_GPU_DEVICE(
int i,
int j,
int k) noexcept
453 derdat(i, j, k, mf_comp) =
getPgivenRTh(rhotheta,qv_for_p);
463 MultiFab::Copy(mf[lev],
pp_inc[lev], 0, mf_comp, 1, 0);
466 #pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
469 for ( MFIter mfi(mf[lev],TilingIfNotGPU()); mfi.isValid(); ++mfi)
471 const Box& bx = mfi.tilebox();
472 const Array4<Real>& derdat = mf[lev].array(mfi);
473 const Array4<Real const>& p0_arr = p_hse.const_array(mfi);
477 ParallelFor(bx, [=] AMREX_GPU_DEVICE(
int i,
int j,
int k) noexcept
481 derdat(i, j, k, mf_comp) =
getPgivenRTh(rhotheta,qv_for_p) - p0_arr(i,j,k);
491 #pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
493 for ( MFIter mfi(mf[lev],TilingIfNotGPU()); mfi.isValid(); ++mfi)
495 const Box& bx = mfi.tilebox();
496 const Array4<Real>& derdat = mf[lev].array(mfi);
498 const Array4<Real const>& r0_arr = r_hse.const_array(mfi);
499 ParallelFor(bx, [=] AMREX_GPU_DEVICE(
int i,
int j,
int k) noexcept {
500 derdat(i, j, k, mf_comp) = S_arr(i,j,k,
Rho_comp) - r0_arr(i,j,k);
509 #pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
511 for ( MFIter mfi(mf[lev],TilingIfNotGPU()); mfi.isValid(); ++mfi)
513 const Box& bx = mfi.tilebox();
514 const Array4<Real>& derdat = mf[lev].array(mfi);
517 ParallelFor(bx, [=] AMREX_GPU_DEVICE(
int i,
int j,
int k) noexcept {
525 derdat(i, j, k, mf_comp) =
T*std::pow((pressure - pv)/
p_0, -
R_d/fac)*std::exp(
L_v*
qv/(fac*
T)) ;
534 MultiFab::Copy(mf[lev],*terrain_blank,0,mf_comp,1,0);
538 #ifdef ERF_USE_WINDFARM
543 MultiFab::Copy(mf[lev],Nturb[lev],0,mf_comp,1,0);
551 MultiFab::Copy(mf[lev],SMark[lev],0,mf_comp,1,0);
558 MultiFab::Copy(mf[lev],SMark[lev],1,mf_comp,1,0);
564 int klo = geom[lev].Domain().smallEnd(2);
565 int khi = geom[lev].Domain().bigEnd(2);
569 auto dxInv = geom[lev].InvCellSizeArray();
572 #pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
574 for ( MFIter mfi(mf[lev],TilingIfNotGPU()); mfi.isValid(); ++mfi)
577 const Box& gbx = mfi.growntilebox(1);
578 const Array4<Real > & p_arr =
pres.array(mfi);
579 const Array4<Real const> & hse_arr =
base_state[lev].const_array(mfi);
582 ParallelFor(gbx, [=] AMREX_GPU_DEVICE (
int i,
int j,
int k) noexcept {
583 p_arr(i,j,k) = hse_arr(i,j,k,1);
586 ParallelFor(gbx, [=] AMREX_GPU_DEVICE (
int i,
int j,
int k) noexcept {
591 pres.FillBoundary(geom[lev].periodicity());
594 #pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
596 for ( MFIter mfi(mf[lev],TilingIfNotGPU()); mfi.isValid(); ++mfi)
599 const Box& bx = mfi.tilebox();
600 const Array4<Real>& derdat = mf[lev].array(mfi);
601 const Array4<Real> & p_arr =
pres.array(mfi);
604 const Array4<Real const>& z_nd =
z_phys_nd[lev]->const_array(mfi);
606 ParallelFor(bx, [=] AMREX_GPU_DEVICE(
int i,
int j,
int k) noexcept {
611 Real gp_xi_lo = dxInv[0] * (p_arr(i,j,k) - p_arr(i-1,j,k));
612 Real gp_zeta_on_iface_lo;
614 gp_zeta_on_iface_lo = 0.5 * dxInv[2] * (
615 p_arr(i-1,j,k+1) + p_arr(i,j,k+1)
616 - p_arr(i-1,j,k ) - p_arr(i,j,k ) );
617 }
else if (k == khi) {
618 gp_zeta_on_iface_lo = 0.5 * dxInv[2] * (
619 p_arr(i-1,j,k ) + p_arr(i,j,k )
620 - p_arr(i-1,j,k-1) - p_arr(i,j,k-1) );
622 gp_zeta_on_iface_lo = 0.25 * dxInv[2] * (
623 p_arr(i-1,j,k+1) + p_arr(i,j,k+1)
624 - p_arr(i-1,j,k-1) - p_arr(i,j,k-1) );
626 Real gpx_lo = gp_xi_lo - (met_h_xi_lo/ met_h_zeta_lo) * gp_zeta_on_iface_lo;
631 Real gp_xi_hi = dxInv[0] * (p_arr(i+1,j,k) - p_arr(i,j,k));
632 Real gp_zeta_on_iface_hi;
634 gp_zeta_on_iface_hi = 0.5 * dxInv[2] * (
635 p_arr(i+1,j,k+1) + p_arr(i,j,k+1)
636 - p_arr(i+1,j,k ) - p_arr(i,j,k ) );
637 }
else if (k == khi) {
638 gp_zeta_on_iface_hi = 0.5 * dxInv[2] * (
639 p_arr(i+1,j,k ) + p_arr(i,j,k )
640 - p_arr(i+1,j,k-1) - p_arr(i,j,k-1) );
642 gp_zeta_on_iface_hi = 0.25 * dxInv[2] * (
643 p_arr(i+1,j,k+1) + p_arr(i,j,k+1)
644 - p_arr(i+1,j,k-1) - p_arr(i,j,k-1) );
646 Real gpx_hi = gp_xi_hi - (met_h_xi_hi/ met_h_zeta_hi) * gp_zeta_on_iface_hi;
649 derdat(i ,j ,k, mf_comp) = 0.5 * (gpx_lo + gpx_hi);
652 ParallelFor(bx, [=] AMREX_GPU_DEVICE(
int i,
int j,
int k) noexcept {
653 derdat(i ,j ,k, mf_comp) = 0.5 * (p_arr(i+1,j,k) - p_arr(i-1,j,k)) * dxInv[0];
662 auto dxInv = geom[lev].InvCellSizeArray();
666 #pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
668 for ( MFIter mfi(mf[lev],TilingIfNotGPU()); mfi.isValid(); ++mfi)
671 const Box& gbx = mfi.growntilebox(1);
672 const Array4<Real > & p_arr =
pres.array(mfi);
673 const Array4<Real const> & hse_arr =
base_state[lev].const_array(mfi);
676 ParallelFor(gbx, [=] AMREX_GPU_DEVICE (
int i,
int j,
int k) noexcept {
677 p_arr(i,j,k) = hse_arr(i,j,k,1);
680 ParallelFor(gbx, [=] AMREX_GPU_DEVICE (
int i,
int j,
int k) noexcept {
685 pres.FillBoundary(geom[lev].periodicity());
688 #pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
690 for ( MFIter mfi(mf[lev],TilingIfNotGPU()); mfi.isValid(); ++mfi)
693 const Box& bx = mfi.tilebox();
694 const Array4<Real>& derdat = mf[lev].array(mfi);
695 const Array4<Real> & p_arr =
pres.array(mfi);
698 const Array4<Real const>& z_nd =
z_phys_nd[lev]->const_array(mfi);
700 ParallelFor(bx, [=] AMREX_GPU_DEVICE(
int i,
int j,
int k) noexcept {
704 Real gp_eta_lo = dxInv[1] * (p_arr(i,j,k) - p_arr(i,j-1,k));
705 Real gp_zeta_on_jface_lo;
707 gp_zeta_on_jface_lo = 0.5 * dxInv[2] * (
708 p_arr(i,j,k+1) + p_arr(i,j-1,k+1)
709 - p_arr(i,j,k ) - p_arr(i,j-1,k ) );
710 }
else if (k == khi) {
711 gp_zeta_on_jface_lo = 0.5 * dxInv[2] * (
712 p_arr(i,j,k ) + p_arr(i,j-1,k )
713 - p_arr(i,j,k-1) - p_arr(i,j-1,k-1) );
715 gp_zeta_on_jface_lo = 0.25 * dxInv[2] * (
716 p_arr(i,j,k+1) + p_arr(i,j-1,k+1)
717 - p_arr(i,j,k-1) - p_arr(i,j-1,k-1) );
719 Real gpy_lo = gp_eta_lo - (met_h_eta_lo / met_h_zeta_lo) * gp_zeta_on_jface_lo;
723 Real gp_eta_hi = dxInv[1] * (p_arr(i,j+1,k) - p_arr(i,j,k));
724 Real gp_zeta_on_jface_hi;
726 gp_zeta_on_jface_hi = 0.5 * dxInv[2] * (
727 p_arr(i,j+1,k+1) + p_arr(i,j,k+1)
728 - p_arr(i,j+1,k ) - p_arr(i,j,k ) );
729 }
else if (k == khi) {
730 gp_zeta_on_jface_hi = 0.5 * dxInv[2] * (
731 p_arr(i,j+1,k ) + p_arr(i,j,k )
732 - p_arr(i,j+1,k-1) - p_arr(i,j,k-1) );
734 gp_zeta_on_jface_hi = 0.25 * dxInv[2] * (
735 p_arr(i,j+1,k+1) + p_arr(i,j,k+1)
736 - p_arr(i,j+1,k-1) - p_arr(i,j,k-1) );
738 Real gpy_hi = gp_eta_hi - (met_h_eta_hi / met_h_zeta_hi) * gp_zeta_on_jface_hi;
740 derdat(i ,j ,k, mf_comp) = 0.5 * (gpy_lo + gpy_hi);
743 ParallelFor(bx, [=] AMREX_GPU_DEVICE(
int i,
int j,
int k) noexcept {
744 derdat(i ,j ,k, mf_comp) = 0.5 * (p_arr(i,j+1,k) - p_arr(i,j-1,k)) * dxInv[1];
753 auto dxInv = geom[lev].InvCellSizeArray();
756 #pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
758 for ( MFIter mfi(mf[lev],TilingIfNotGPU()); mfi.isValid(); ++mfi)
760 const Box& bx = mfi.tilebox();
761 const Array4<Real >& derdat = mf[lev].array(mfi);
762 const Array4<Real const>& p_arr = p_hse.const_array(mfi);
764 const Array4<Real const>& z_nd =
z_phys_nd[lev]->const_array(mfi);
766 ParallelFor(bx, [=] AMREX_GPU_DEVICE(
int i,
int j,
int k) noexcept {
769 Real gp_xi_lo = dxInv[0] * (p_arr(i,j,k) - p_arr(i-1,j,k));
770 Real gp_zeta_on_iface_lo;
772 gp_zeta_on_iface_lo = 0.5 * dxInv[2] * (
773 p_arr(i-1,j,k+1) + p_arr(i,j,k+1)
774 - p_arr(i-1,j,k ) - p_arr(i,j,k ) );
775 }
else if (k == khi) {
776 gp_zeta_on_iface_lo = 0.5 * dxInv[2] * (
777 p_arr(i-1,j,k ) + p_arr(i,j,k )
778 - p_arr(i-1,j,k-1) - p_arr(i,j,k-1) );
780 gp_zeta_on_iface_lo = 0.25 * dxInv[2] * (
781 p_arr(i-1,j,k+1) + p_arr(i,j,k+1)
782 - p_arr(i-1,j,k-1) - p_arr(i,j,k-1) );
784 Real gpx_lo = gp_xi_lo - (met_h_xi_lo/ met_h_zeta_lo) * gp_zeta_on_iface_lo;
788 Real gp_xi_hi = dxInv[0] * (p_arr(i+1,j,k) - p_arr(i,j,k));
789 Real gp_zeta_on_iface_hi;
791 gp_zeta_on_iface_hi = 0.5 * dxInv[2] * (
792 p_arr(i+1,j,k+1) + p_arr(i,j,k+1)
793 - p_arr(i+1,j,k ) - p_arr(i,j,k ) );
794 }
else if (k == khi) {
795 gp_zeta_on_iface_hi = 0.5 * dxInv[2] * (
796 p_arr(i+1,j,k ) + p_arr(i,j,k )
797 - p_arr(i+1,j,k-1) - p_arr(i,j,k-1) );
799 gp_zeta_on_iface_hi = 0.25 * dxInv[2] * (
800 p_arr(i+1,j,k+1) + p_arr(i,j,k+1)
801 - p_arr(i+1,j,k-1) - p_arr(i,j,k-1) );
803 Real gpx_hi = gp_xi_hi - (met_h_xi_hi/ met_h_zeta_hi) * gp_zeta_on_iface_hi;
805 derdat(i ,j ,k, mf_comp) = 0.5 * (gpx_lo + gpx_hi);
809 for ( MFIter mfi(mf[lev],TilingIfNotGPU()); mfi.isValid(); ++mfi)
811 const Box& bx = mfi.tilebox();
812 const Array4<Real >& derdat = mf[lev].array(mfi);
813 const Array4<Real const>& p_arr = p_hse.const_array(mfi);
814 ParallelFor(bx, [=] AMREX_GPU_DEVICE(
int i,
int j,
int k) noexcept {
815 derdat(i ,j ,k, mf_comp) = 0.5 * (p_arr(i+1,j,k) - p_arr(i-1,j,k)) * dxInv[0];
824 auto dxInv = geom[lev].InvCellSizeArray();
827 #pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
829 for ( MFIter mfi(mf[lev],TilingIfNotGPU()); mfi.isValid(); ++mfi)
831 const Box& bx = mfi.tilebox();
832 const Array4<Real >& derdat = mf[lev].array(mfi);
833 const Array4<Real const>& p_arr = p_hse.const_array(mfi);
834 const Array4<Real const>& z_nd =
z_phys_nd[lev]->const_array(mfi);
835 ParallelFor(bx, [=] AMREX_GPU_DEVICE(
int i,
int j,
int k) noexcept {
838 Real gp_eta_lo = dxInv[1] * (p_arr(i,j,k) - p_arr(i,j-1,k));
839 Real gp_zeta_on_jface_lo;
841 gp_zeta_on_jface_lo = 0.5 * dxInv[2] * (
842 p_arr(i,j,k+1) + p_arr(i,j-1,k+1)
843 - p_arr(i,j,k ) - p_arr(i,j-1,k ) );
844 }
else if (k == khi) {
845 gp_zeta_on_jface_lo = 0.5 * dxInv[2] * (
846 p_arr(i,j,k ) + p_arr(i,j-1,k )
847 - p_arr(i,j,k-1) - p_arr(i,j-1,k-1) );
849 gp_zeta_on_jface_lo = 0.25 * dxInv[2] * (
850 p_arr(i,j,k+1) + p_arr(i,j-1,k+1)
851 - p_arr(i,j,k-1) - p_arr(i,j-1,k-1) );
853 Real gpy_lo = gp_eta_lo - (met_h_eta_lo / met_h_zeta_lo) * gp_zeta_on_jface_lo;
857 Real gp_eta_hi = dxInv[1] * (p_arr(i,j+1,k) - p_arr(i,j,k));
858 Real gp_zeta_on_jface_hi;
860 gp_zeta_on_jface_hi = 0.5 * dxInv[2] * (
861 p_arr(i,j+1,k+1) + p_arr(i,j,k+1)
862 - p_arr(i,j+1,k ) - p_arr(i,j,k ) );
863 }
else if (k == khi) {
864 gp_zeta_on_jface_hi = 0.5 * dxInv[2] * (
865 p_arr(i,j+1,k ) + p_arr(i,j,k )
866 - p_arr(i,j+1,k-1) - p_arr(i,j,k-1) );
868 gp_zeta_on_jface_hi = 0.25 * dxInv[2] * (
869 p_arr(i,j+1,k+1) + p_arr(i,j,k+1)
870 - p_arr(i,j+1,k-1) - p_arr(i,j,k-1) );
872 Real gpy_hi = gp_eta_hi - (met_h_eta_hi / met_h_zeta_hi) * gp_zeta_on_jface_hi;
874 derdat(i ,j ,k, mf_comp) = 0.5 * (gpy_lo + gpy_hi);
878 for ( MFIter mfi(mf[lev],TilingIfNotGPU()); mfi.isValid(); ++mfi)
880 const Box& bx = mfi.tilebox();
881 const Array4<Real >& derdat = mf[lev].array(mfi);
882 const Array4<Real const>& p_arr = p_hse.const_array(mfi);
883 ParallelFor(bx, [=] AMREX_GPU_DEVICE(
int i,
int j,
int k) noexcept {
884 derdat(i ,j ,k, mf_comp) = 0.5 * (p_arr(i,j+1,k) - p_arr(i,j-1,k)) * dxInv[1];
894 MultiFab::Copy(mf[lev],*
z_phys_cc[lev],0,mf_comp,1,0);
900 MultiFab::Copy(mf[lev],*
detJ_cc[lev],0,mf_comp,1,0);
907 #pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
909 for ( MFIter mfi(mf[lev],TilingIfNotGPU()); mfi.isValid(); ++mfi)
911 const Box& bx = mfi.tilebox();
912 const Array4<Real>& derdat = mf[lev].array(mfi);
913 const Array4<Real>& mf_m =
mapfac_m[lev]->array(mfi);
914 ParallelFor(bx, [=] AMREX_GPU_DEVICE(
int i,
int j,
int k) noexcept {
915 derdat(i ,j ,k, mf_comp) = mf_m(i,j,0);
921 #ifdef ERF_USE_NETCDF
925 #pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
927 for ( MFIter mfi(mf[lev],TilingIfNotGPU()); mfi.isValid(); ++mfi)
929 const Box& bx = mfi.tilebox();
930 const Array4<Real>& derdat = mf[lev].array(mfi);
931 const Array4<Real>& data = lat_m[lev]->array(mfi);
932 ParallelFor(bx, [=] AMREX_GPU_DEVICE(
int i,
int j,
int k) noexcept {
933 derdat(i, j, k, mf_comp) = data(i,j,0);
940 #pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
942 for ( MFIter mfi(mf[lev],TilingIfNotGPU()); mfi.isValid(); ++mfi)
944 const Box& bx = mfi.tilebox();
945 const Array4<Real>& derdat = mf[lev].array(mfi);
946 const Array4<Real>& data = lon_m[lev]->array(mfi);
947 ParallelFor(bx, [=] AMREX_GPU_DEVICE(
int i,
int j,
int k) noexcept {
948 derdat(i, j, k, mf_comp) = data(i,j,0);
960 #pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
962 for ( MFIter mfi(mf[lev],TilingIfNotGPU()); mfi.isValid(); ++mfi)
964 const Box& bx = mfi.tilebox();
965 const Array4<Real>& derdat = mf[lev].array(mfi);
966 const Array4<Real>& data =
vel_t_avg[lev]->array(mfi);
968 ParallelFor(bx, [=] AMREX_GPU_DEVICE(
int i,
int j,
int k) noexcept
970 derdat(i ,j ,k, mf_comp) = data(i,j,k,0) / norm;
978 #pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
980 for ( MFIter mfi(mf[lev],TilingIfNotGPU()); mfi.isValid(); ++mfi)
982 const Box& bx = mfi.tilebox();
983 const Array4<Real>& derdat = mf[lev].array(mfi);
984 const Array4<Real>& data =
vel_t_avg[lev]->array(mfi);
986 ParallelFor(bx, [=] AMREX_GPU_DEVICE(
int i,
int j,
int k) noexcept
988 derdat(i ,j ,k, mf_comp) = data(i,j,k,1) / norm;
996 #pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
998 for ( MFIter mfi(mf[lev],TilingIfNotGPU()); mfi.isValid(); ++mfi)
1000 const Box& bx = mfi.tilebox();
1001 const Array4<Real>& derdat = mf[lev].array(mfi);
1002 const Array4<Real>& data =
vel_t_avg[lev]->array(mfi);
1004 ParallelFor(bx, [=] AMREX_GPU_DEVICE(
int i,
int j,
int k) noexcept
1006 derdat(i ,j ,k, mf_comp) = data(i,j,k,2) / norm;
1014 #pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
1016 for ( MFIter mfi(mf[lev],TilingIfNotGPU()); mfi.isValid(); ++mfi)
1018 const Box& bx = mfi.tilebox();
1019 const Array4<Real>& derdat = mf[lev].array(mfi);
1020 const Array4<Real>& data =
vel_t_avg[lev]->array(mfi);
1022 ParallelFor(bx, [=] AMREX_GPU_DEVICE(
int i,
int j,
int k) noexcept
1024 derdat(i ,j ,k, mf_comp) = data(i,j,k,3) / norm;
1032 MultiFab dmf(mf[lev], make_alias, mf_comp, 1);
1035 #pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
1037 for (MFIter mfi(dmf, TilingIfNotGPU()); mfi.isValid(); ++mfi)
1039 const Box& bx = mfi.tilebox();
1040 auto prim = dmf[mfi].array();
1041 auto const cons = cmf[mfi].const_array();
1043 ParallelFor(bx, [=] AMREX_GPU_DEVICE(
int i,
int j,
int k) noexcept
1047 prim(i,j,k) = Kmv /
rho;
1075 MultiFab::Copy(mf[lev],*
walldist[lev],0,mf_comp,1,0);
1079 MultiFab::Copy(mf[lev],*
SFS_diss_lev[lev],0,mf_comp,1,0);
1092 int n_qstate =
micro->Get_Qstate_Size();
1101 for (
int n_comp(n_start); n_comp <= n_end; ++n_comp) {
1115 for (
int n_comp(n_start+1); n_comp <= n_end; ++n_comp) {
1148 int n_end = ncomp_cons - 1;
1150 for (
int n_comp(n_start+1); n_comp <= n_end; ++n_comp) {
1182 #pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
1184 for ( MFIter mfi(mf[lev],TilingIfNotGPU()); mfi.isValid(); ++mfi)
1186 const Box& bx = mfi.tilebox();
1187 const Array4<Real>& derdat = mf[lev].array(mfi);
1189 ParallelFor(bx, [=] AMREX_GPU_DEVICE(
int i,
int j,
int k) noexcept
1194 erf_qsatw(
T, pressure, derdat(i,j,k,mf_comp));
1204 MultiFab::Copy(mf[lev],*(
qmoist[lev][0]),0,mf_comp,1,0);
1213 MultiFab::Copy(mf[lev],*(
qmoist[lev][0]),0,mf_comp,1,0);
1218 MultiFab::Copy(mf[lev],*(
qmoist[lev][1]),0,mf_comp,1,0);
1223 MultiFab::Copy(mf[lev],*(
qmoist[lev][2]),0,mf_comp,1,0);
1229 #ifdef ERF_USE_PARTICLES
1230 const auto& particles_namelist( particleData.getNames() );
1231 for (ParticlesNamesVector::size_type i = 0; i < particles_namelist.size(); i++) {
1233 MultiFab temp_dat(mf[lev].boxArray(), mf[lev].DistributionMap(), 1, 0);
1235 particleData[particles_namelist[i]]->Increment(temp_dat, lev);
1236 MultiFab::Copy(mf[lev], temp_dat, 0, mf_comp, 1, 0);
1241 Vector<std::string> particle_mesh_plot_names(0);
1242 particleData.GetMeshPlotVarNames( particle_mesh_plot_names );
1243 for (
int i = 0; i < particle_mesh_plot_names.size(); i++) {
1244 std::string plot_var_name(particle_mesh_plot_names[i]);
1246 MultiFab temp_dat(mf[lev].boxArray(), mf[lev].DistributionMap(), 1, 1);
1248 particleData.GetMeshPlotVar(plot_var_name, temp_dat, lev);
1249 MultiFab::Copy(mf[lev], temp_dat, 0, mf_comp, 1, 0);
1259 MultiFab::Copy(mf[lev],
EBFactory(lev).getVolFrac(), 0, mf_comp, 1, 0);
1261 mf[lev].setVal(1.0, mf_comp, 1, 0);
1266 #ifdef ERF_COMPUTE_ERROR
1275 Real H = geom[lev].ProbHi()[2];
1277 Real wavelength = 100.;
1278 Real kp = 2. *
PI / wavelength;
1280 Real
omega = std::sqrt(g * kp);
1283 const auto dx = geom[lev].CellSizeArray();
1286 #pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
1288 for (MFIter mfi(mf[lev], TilingIfNotGPU()); mfi.isValid(); ++mfi)
1290 const Box& bx = mfi.validbox();
1291 Box xbx(bx); xbx.surroundingNodes(0);
1295 const Array4<Real const>& z_nd =
z_phys_nd[lev]->const_array(mfi);
1297 ParallelFor(xbx, [=] AMREX_GPU_DEVICE (
int i,
int j,
int k)
1300 Real
z = 0.25 * (z_nd(i,j,k) + z_nd(i,j+1,k) + z_nd(i,j,k+1) + z_nd(i,j+1,k+1));
1302 Real z_base = Ampl * std::sin(kp *
x - omega_t);
1305 Real fac = std::cosh( kp * (
z - H) ) / std::sinh(kp * H);
1307 xvel_arr(i,j,k) -= -Ampl *
omega * fac * std::sin(kp *
x - omega_t);
1310 ParallelFor(bx, [=] AMREX_GPU_DEVICE (
int i,
int j,
int k)
1312 Real
x = (i + 0.5) * dx[0];
1313 Real
z = 0.25 * ( z_nd(i,j,k) + z_nd(i+1,j,k) + z_nd(i,j+1,k) + z_nd(i+1,j+1,k));
1315 Real z_base = Ampl * std::sin(kp *
x - omega_t);
1318 Real fac = std::sinh( kp * (
z - H) ) / std::sinh(kp * H);
1320 zvel_arr(i,j,k) -= Ampl *
omega * fac * std::cos(kp *
x - omega_t);
1324 MultiFab temp_mf(mf[lev].boxArray(), mf[lev].DistributionMap(), AMREX_SPACEDIM, 0);
1325 average_face_to_cellcenter(temp_mf,0,
1329 MultiFab::Copy(mf[lev],temp_mf,0,mf_comp,1,0);
1333 MultiFab::Copy(mf[lev],temp_mf,1,mf_comp,1,0);
1337 MultiFab::Copy(mf[lev],temp_mf,2,mf_comp,1,0);
1343 #pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
1345 for (MFIter mfi(mf[lev], TilingIfNotGPU()); mfi.isValid(); ++mfi)
1347 const Box& bx = mfi.validbox();
1348 Box xbx(bx); xbx.surroundingNodes(0);
1353 const Array4<Real const>& z_nd =
z_phys_nd[lev]->const_array(mfi);
1355 ParallelFor(xbx, [=] AMREX_GPU_DEVICE (
int i,
int j,
int k)
1358 Real
z = 0.25 * (z_nd(i,j,k) + z_nd(i,j+1,k) + z_nd(i,j,k+1) + z_nd(i,j+1,k+1));
1359 Real z_base = Ampl * std::sin(kp *
x - omega_t);
1363 Real fac = std::cosh( kp * (
z - H) ) / std::sinh(kp * H);
1364 xvel_arr(i,j,k) += -Ampl *
omega * fac * std::sin(kp *
x - omega_t);
1366 ParallelFor(bx, [=] AMREX_GPU_DEVICE (
int i,
int j,
int k)
1368 Real
x = (i + 0.5) * dx[0];
1369 Real
z = 0.25 * ( z_nd(i,j,k) + z_nd(i+1,j,k) + z_nd(i,j+1,k) + z_nd(i+1,j+1,k));
1370 Real z_base = Ampl * std::sin(kp *
x - omega_t);
1373 Real fac = std::sinh( kp * (
z - H) ) / std::sinh(kp * H);
1375 zvel_arr(i,j,k) += Ampl *
omega * fac * std::cos(kp *
x - omega_t);
1384 #pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
1386 for ( MFIter mfi(mf[lev],TilingIfNotGPU()); mfi.isValid(); ++mfi)
1388 const Box& bx = mfi.tilebox();
1389 const Array4<Real>& derdat = mf[lev].array(mfi);
1390 const Array4<Real const>& p0_arr = p_hse.const_array(mfi);
1393 const auto dx = geom[lev].CellSizeArray();
1394 const Array4<Real const>& z_nd =
z_phys_nd[lev]->const_array(mfi);
1395 const Array4<Real const>& r0_arr = r_hse.const_array(mfi);
1397 Real H = geom[lev].ProbHi()[2];
1399 Real wavelength = 100.;
1400 Real kp = 2. *
PI / wavelength;
1402 Real
omega = std::sqrt(g * kp);
1405 ParallelFor(bx, [=] AMREX_GPU_DEVICE(
int i,
int j,
int k) noexcept
1408 derdat(i, j, k, mf_comp) =
getPgivenRTh(rhotheta) - p0_arr(i,j,k);
1410 Real rho_hse = r0_arr(i,j,k);
1412 Real
x = (i + 0.5) * dx[0];
1413 Real
z = 0.125 * ( z_nd(i,j,k ) + z_nd(i+1,j,k ) + z_nd(i,j+1,k ) + z_nd(i+1,j+1,k )
1414 +z_nd(i,j,k+1) + z_nd(i+1,j,k+1) + z_nd(i,j+1,k+1) + z_nd(i+1,j+1,k+1) );
1415 Real z_base = Ampl * std::sin(kp *
x - omega_t);
1418 Real fac = std::cosh( kp * (
z - H) ) / std::sinh(kp * H);
1419 Real pprime_exact = -(Ampl *
omega *
omega / kp) * fac *
1420 std::sin(kp *
x - omega_t) * r0_arr(i,j,k);
1422 derdat(i,j,k,mf_comp) -= pprime_exact;
1429 #ifdef ERF_USE_RRTMGP
1431 MultiFab::Copy(mf[lev], *(qheating_rates[lev]), 0, mf_comp, 1, 0);
1435 MultiFab::Copy(mf[lev], *(qheating_rates[lev]), 1, mf_comp, 1, 0);
1443 for (
int lev = 0; lev <= finest_level; ++lev) {
1444 EB_set_covered(mf[lev], 0.0);
1450 for (
int lev(0); lev <= finest_level; ++lev) {
1451 MultiFab::Copy(mf_nd[lev],*
z_phys_nd[lev],0,2,1,0);
1452 Real dz = Geom()[lev].CellSizeArray()[2];
1453 for (MFIter mfi(mf_nd[lev], TilingIfNotGPU()); mfi.isValid(); ++mfi) {
1454 const Box& bx = mfi.tilebox();
1455 Array4<Real> mf_arr = mf_nd[lev].array(mfi);
1456 ParallelFor(bx, [=] AMREX_GPU_DEVICE (
int i,
int j,
int k) {
1457 mf_arr(i,j,k,2) -= k * dz;
1463 std::string plotfilename;
1464 std::string plotfilenameU;
1465 std::string plotfilenameV;
1466 std::string plotfilenameW;
1472 }
else if (which == 2) {
1484 #ifdef ERF_USE_RRTMGP
1495 if (finest_level == 0)
1497 if (plotfile_type == PlotFileType::Amrex)
1499 Print() <<
"Writing native plotfile " << plotfilename <<
"\n";
1502 GetVecOfConstPtrs(mf),
1503 GetVecOfConstPtrs(mf_nd),
1507 WriteMultiLevelPlotfile(plotfilename, finest_level+1,
1508 GetVecOfConstPtrs(mf),
1515 Print() <<
"Writing face velocities" << std::endl;
1516 WriteMultiLevelPlotfile(plotfilenameU, finest_level+1,
1517 GetVecOfConstPtrs(mf_u),
1518 {
"x_velocity_stag"},
1520 WriteMultiLevelPlotfile(plotfilenameV, finest_level+1,
1521 GetVecOfConstPtrs(mf_v),
1522 {
"y_velocity_stag"},
1524 WriteMultiLevelPlotfile(plotfilenameW, finest_level+1,
1525 GetVecOfConstPtrs(mf_w),
1526 {
"z_velocity_stag"},
1530 #ifdef ERF_USE_PARTICLES
1531 particleData.writePlotFile(plotfilename);
1533 #ifdef ERF_USE_NETCDF
1534 }
else if (plotfile_type == PlotFileType::Netcdf) {
1537 writeNCPlotFile(lev, l_which, plotfilename, GetVecOfConstPtrs(mf), varnames,
istep,
t_new[0]);
1541 Print() <<
"Writing no plotfile since plotfile_type is none" << std::endl;
1546 if (plotfile_type == PlotFileType::Amrex) {
1549 int desired_ratio = std::max(std::max(ref_ratio[lev0][0],ref_ratio[lev0][1]),ref_ratio[lev0][2]);
1550 bool any_ratio_one = ( ( (ref_ratio[lev0][0] == 1) || (ref_ratio[lev0][1] == 1) ) ||
1551 (ref_ratio[lev0][2] == 1) );
1552 for (
int lev = 1; lev < finest_level; lev++) {
1553 any_ratio_one = any_ratio_one ||
1554 ( ( (ref_ratio[lev][0] == 1) || (ref_ratio[lev][1] == 1) ) ||
1555 (ref_ratio[lev][2] == 1) );
1560 Vector<IntVect> r2(finest_level);
1561 Vector<Geometry> g2(finest_level+1);
1562 Vector<MultiFab> mf2(finest_level+1);
1564 mf2[0].define(grids[0], dmap[0], ncomp_mf, 0);
1567 MultiFab::Copy(mf2[0],mf[0],0,0,mf[0].nComp(),0);
1570 Array<int,AMREX_SPACEDIM> periodicity =
1571 {Geom()[lev0].isPeriodic(0),Geom()[lev0].isPeriodic(1),Geom()[lev0].isPeriodic(2)};
1572 g2[lev0].define(Geom()[lev0].Domain(),&(Geom()[lev0].ProbDomain()),0,periodicity.data());
1574 r2[0] = IntVect(desired_ratio/ref_ratio[lev0][0],
1575 desired_ratio/ref_ratio[lev0][1],
1576 desired_ratio/ref_ratio[lev0][2]);
1578 for (
int lev = 1; lev <= finest_level; ++lev) {
1580 r2[lev-1][0] = r2[lev-2][0] * desired_ratio / ref_ratio[lev-1][0];
1581 r2[lev-1][1] = r2[lev-2][1] * desired_ratio / ref_ratio[lev-1][1];
1582 r2[lev-1][2] = r2[lev-2][2] * desired_ratio / ref_ratio[lev-1][2];
1585 mf2[lev].define(refine(grids[lev],r2[lev-1]), dmap[lev], ncomp_mf, 0);
1588 Box d2(Geom()[lev].Domain());
1589 d2.refine(r2[lev-1]);
1591 g2[lev].define(d2,&(Geom()[lev].ProbDomain()),0,periodicity.data());
1599 Vector<BCRec> temp_domain_bcs_type;
1600 temp_domain_bcs_type.resize(ncomp_mf);
1605 for (
int lev = 1; lev <= finest_level; ++lev) {
1606 Interpolater* mapper_c = &pc_interp;
1607 InterpFromCoarseLevel(mf2[lev],
t_new[lev], mf[lev],
1611 r2[lev-1], mapper_c, temp_domain_bcs_type, 0);
1615 Vector<IntVect> rr(finest_level);
1616 for (
int lev = 0; lev < finest_level; ++lev) {
1617 rr[lev] = IntVect(desired_ratio);
1620 Print() <<
"Writing plotfile " << plotfilename <<
"\n";
1623 GetVecOfConstPtrs(mf2),
1624 GetVecOfConstPtrs(mf_nd),
1628 WriteMultiLevelPlotfile(plotfilename, finest_level+1,
1629 GetVecOfConstPtrs(mf2), varnames,
1636 GetVecOfConstPtrs(mf),
1637 GetVecOfConstPtrs(mf_nd),
1641 WriteMultiLevelPlotfile(plotfilename, finest_level+1,
1642 GetVecOfConstPtrs(mf), varnames,
1646 Print() <<
"Writing face velocities" << std::endl;
1647 WriteMultiLevelPlotfile(plotfilenameU, finest_level+1,
1648 GetVecOfConstPtrs(mf_u),
1649 {
"x_velocity_stag"},
1651 WriteMultiLevelPlotfile(plotfilenameV, finest_level+1,
1652 GetVecOfConstPtrs(mf_v),
1653 {
"y_velocity_stag"},
1655 WriteMultiLevelPlotfile(plotfilenameW, finest_level+1,
1656 GetVecOfConstPtrs(mf_w),
1657 {
"z_velocity_stag"},
1664 #ifdef ERF_USE_PARTICLES
1665 particleData.writePlotFile(plotfilename);
1668 #ifdef ERF_USE_NETCDF
1669 }
else if (plotfile_type == PlotFileType::Netcdf) {
1670 for (
int lev = 0; lev <= finest_level; ++lev) {
1672 writeNCPlotFile(lev, which_box, plotfilename, GetVecOfConstPtrs(mf), varnames,
istep,
t_new[0]);
constexpr amrex::Real Cp_d
Definition: ERF_Constants.H:12
constexpr amrex::Real PI
Definition: ERF_Constants.H:6
constexpr amrex::Real CONST_GRAV
Definition: ERF_Constants.H:21
constexpr amrex::Real Cp_l
Definition: ERF_Constants.H:14
constexpr amrex::Real R_d
Definition: ERF_Constants.H:10
constexpr amrex::Real L_v
Definition: ERF_Constants.H:16
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE amrex::Real getTgivenRandRTh(const amrex::Real rho, const amrex::Real rhotheta, const amrex::Real qv=0.0)
Definition: ERF_EOS.H:46
#define RhoQ4_comp
Definition: ERF_IndexDefines.H:45
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE amrex::Real erf_esatw(amrex::Real t)
Definition: ERF_MicrophysicsUtils.H:64
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void erf_qsatw(amrex::Real t, amrex::Real p, amrex::Real &qsatw)
Definition: ERF_MicrophysicsUtils.H:158
PhysBCFunctNoOp null_bc_for_fill
Definition: ERF_Plotfile.cpp:10
AMREX_GPU_DEVICE AMREX_FORCE_INLINE amrex::Real Compute_h_xi_AtIface(const int &i, const int &j, const int &k, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > &cellSizeInv, const amrex::Array4< const amrex::Real > &z_nd)
Definition: ERF_TerrainMetrics.H:108
AMREX_GPU_DEVICE AMREX_FORCE_INLINE amrex::Real Compute_h_zeta_AtIface(const int &i, const int &j, const int &k, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > &cellSizeInv, const amrex::Array4< const amrex::Real > &z_nd)
Definition: ERF_TerrainMetrics.H:94
AMREX_GPU_DEVICE AMREX_FORCE_INLINE amrex::Real Compute_h_zeta_AtJface(const int &i, const int &j, const int &k, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > &cellSizeInv, const amrex::Array4< const amrex::Real > &z_nd)
Definition: ERF_TerrainMetrics.H:137
AMREX_GPU_DEVICE AMREX_FORCE_INLINE amrex::Real Compute_h_eta_AtJface(const int &i, const int &j, const int &k, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > &cellSizeInv, const amrex::Array4< const amrex::Real > &z_nd)
Definition: ERF_TerrainMetrics.H:165
static amrex::Vector< std::string > PlotFileVarNames(amrex::Vector< std::string > plot_var_names)
Definition: ERF_Plotfile.cpp:213
void WriteMultiLevelPlotfileWithTerrain(const std::string &plotfilename, int nlevels, const amrex::Vector< const amrex::MultiFab * > &mf, const amrex::Vector< const amrex::MultiFab * > &mf_nd, const amrex::Vector< std::string > &varnames, const amrex::Vector< amrex::Geometry > &my_geom, amrex::Real time, const amrex::Vector< int > &level_steps, const amrex::Vector< amrex::IntVect > &my_ref_ratio, const std::string &versionName="HyperCLaw-V1.1", const std::string &levelPrefix="Level_", const std::string &mfPrefix="Cell", const amrex::Vector< std::string > &extra_dirs=amrex::Vector< std::string >()) const
Definition: ERF_Plotfile.cpp:1681
void writeJobInfo(const std::string &dir) const
Definition: ERF_WriteJobInfo.cpp:9
void Plot_Lsm_Data(amrex::Real time, const amrex::Vector< int > &level_steps, const amrex::Vector< amrex::IntVect > &ref_ratio)
Definition: ERF_LandSurface.H:98
@ Turb_lengthscale
Definition: ERF_IndexDefines.H:172
@ Mom_h
Definition: ERF_IndexDefines.H:162
@ Theta_h
Definition: ERF_IndexDefines.H:163
void erf_dervortx(const amrex::Box &bx, amrex::FArrayBox &derfab, int dcomp, int ncomp, const amrex::FArrayBox &datfab, const amrex::Geometry &geomdata, amrex::Real, const int *, const int)
Definition: ERF_Derive.cpp:200
void erf_dervorty(const amrex::Box &bx, amrex::FArrayBox &derfab, int dcomp, int ncomp, const amrex::FArrayBox &datfab, const amrex::Geometry &geomdata, amrex::Real, const int *, const int)
Definition: ERF_Derive.cpp:228
void erf_dermagvel(const amrex::Box &bx, amrex::FArrayBox &derfab, int dcomp, int ncomp, const amrex::FArrayBox &datfab, const amrex::Geometry &, amrex::Real, const int *, const int)
Definition: ERF_Derive.cpp:319
void erf_dernull(const Box &, FArrayBox &, int, int, const FArrayBox &, const Geometry &, Real, const int *, const int)
Definition: ERF_Derive.cpp:39
void erf_dertemp(const Box &bx, FArrayBox &derfab, int, int, const FArrayBox &datfab, const Geometry &, Real, const int *, const int)
Definition: ERF_Derive.cpp:91
void erf_derKE(const Box &bx, FArrayBox &derfab, int, int, const FArrayBox &datfab, const Geometry &, Real, const int *, const int)
Definition: ERF_Derive.cpp:186
void erf_dermoisttemp(const Box &bx, FArrayBox &derfab, int, int, const FArrayBox &datfab, const Geometry &, Real, const int *, const int)
Definition: ERF_Derive.cpp:113
void erf_dersoundspeed(const Box &bx, FArrayBox &derfab, int, int, const FArrayBox &datfab, const Geometry &, Real, const int *, const int)
Definition: ERF_Derive.cpp:58