ERF
Energy Research and Forecasting: An Atmospheric Modeling Code
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
ERF_TI_no_substep_fun.H
Go to the documentation of this file.
1 /**
2  * Wrapper for advancing the solution with the slow RHS in the absence of acoustic substepping
3  */
4  auto no_substep_fun = [&](Vector<MultiFab>& S_sum,
5  Vector<MultiFab>& S_old,
6  Vector<MultiFab>& F_slow,
7  const Real time_for_fp, const Real slow_dt,
8  const int nrk)
9  {
10  BL_PROFILE("no_substep_fun");
11  amrex::ignore_unused(nrk);
12  int n_data = IntVars::NumTypes;
13 
14  const auto& dxInv = fine_geom.InvCellSizeArray();
15 
16  const amrex::GpuArray<int, IntVars::NumTypes> scomp_fast = {0,0,0,0};
17  const amrex::GpuArray<int, IntVars::NumTypes> ncomp_fast = {2,1,1,1};
18 
19  if (verbose) amrex::Print() << " No-substepping time integration at level " << level
20  << std::setprecision(timeprecision)
21  << " to " << time_for_fp
22  << " with dt = " << slow_dt << std::endl;
23 
24  // Update S_sum = S_stage only for the fast variables
25 #ifdef _OPENMP
26 #pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
27 #endif
28  {
29  for ( MFIter mfi(S_sum[IntVars::cons],TilingIfNotGPU()); mfi.isValid(); ++mfi)
30  {
31  const Box bx = mfi.tilebox();
32  Box tbx = mfi.nodaltilebox(0);
33  Box tby = mfi.nodaltilebox(1);
34  Box tbz = mfi.nodaltilebox(2);
35 
36  Vector<Array4<Real> > ssum_h(n_data);
37  Vector<Array4<Real> > sold_h(n_data);
38  Vector<Array4<Real> > fslow_h(n_data);
39 
40  for (int i = 0; i < n_data; ++i) {
41  ssum_h[i] = S_sum[i].array(mfi);
42  sold_h[i] = S_old[i].array(mfi);
43  fslow_h[i] = F_slow[i].array(mfi);
44  }
45 
46  Gpu::AsyncVector<Array4<Real> > sold_d(n_data);
47  Gpu::AsyncVector<Array4<Real> > ssum_d(n_data);
48  Gpu::AsyncVector<Array4<Real> > fslow_d(n_data);
49 
50  Gpu::copy(Gpu::hostToDevice, sold_h.begin(), sold_h.end(), sold_d.begin());
51  Gpu::copy(Gpu::hostToDevice, ssum_h.begin(), ssum_h.end(), ssum_d.begin());
52  Gpu::copy(Gpu::hostToDevice, fslow_h.begin(), fslow_h.end(), fslow_d.begin());
53 
54  Array4<Real>* sold = sold_d.dataPtr();
55  Array4<Real>* ssum = ssum_d.dataPtr();
56  Array4<Real>* fslow = fslow_d.dataPtr();
57 
58  // Moving terrain
59  if ( solverChoice.terrain_type == TerrainType::MovingFittedMesh )
60  {
61  const Array4<const Real>& dJ_old = detJ_cc[level]->const_array(mfi);
62  const Array4<const Real>& dJ_new = detJ_cc_new[level]->const_array(mfi);
63 
64  const Array4<const Real>& z_nd_old = z_phys_nd[level]->const_array(mfi);
65  const Array4<const Real>& z_nd_new = z_phys_nd_new[level]->const_array(mfi);
66 
67  const Array4<Real >& z_t_arr = z_t_rk[level]->array(mfi);
68 
69  // We have already scaled the slow source term to have the extra factor of dJ
70  ParallelFor(bx, ncomp_fast[IntVars::cons],
71  [=] AMREX_GPU_DEVICE (int i, int j, int k, int nn) {
72  const int n = scomp_fast[IntVars::cons] + nn;
73  ssum[IntVars::cons](i,j,k,n) = dJ_old(i,j,k) * sold[IntVars::cons](i,j,k,n)
74  + slow_dt * fslow[IntVars::cons](i,j,k,n);
75  ssum[IntVars::cons](i,j,k,n) /= dJ_new(i,j,k);
76  });
77 
78  // We have already scaled the slow source term to have the extra factor of dJ
79  ParallelFor(tbx, tby,
80  [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept {
81  Real h_zeta_old = Compute_h_zeta_AtIface(i, j, k, dxInv, z_nd_old);
82  Real h_zeta_new = Compute_h_zeta_AtIface(i, j, k, dxInv, z_nd_new);
83  ssum[IntVars::xmom](i,j,k) = ( h_zeta_old * sold[IntVars::xmom](i,j,k)
84  + slow_dt * fslow[IntVars::xmom](i,j,k) ) / h_zeta_new;
85  },
86  [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept {
87  Real h_zeta_old = Compute_h_zeta_AtJface(i, j, k, dxInv, z_nd_old);
88  Real h_zeta_new = Compute_h_zeta_AtJface(i, j, k, dxInv, z_nd_new);
89  ssum[IntVars::ymom](i,j,k) = ( h_zeta_old * sold[IntVars::ymom](i,j,k)
90  + slow_dt * fslow[IntVars::ymom](i,j,k) ) / h_zeta_new;
91  });
92  ParallelFor(tbz,
93  [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept {
94  if (k == 0) {
95  // Here we take advantage of the fact that moving terrain has a slip wall
96  // so we can just use the new value at (i,j,0).
97  Real rho_on_face = ssum[IntVars::cons](i,j,k,Rho_comp);
98  ssum[IntVars::zmom](i,j,k) = WFromOmega(i,j,k,rho_on_face*z_t_arr(i,j,k),
99  ssum[IntVars::xmom], ssum[IntVars::ymom],
100  z_nd_new,dxInv);
101  } else {
102  Real dJ_old_kface = 0.5 * (dJ_old(i,j,k) + dJ_old(i,j,k-1));
103  Real dJ_new_kface = 0.5 * (dJ_new(i,j,k) + dJ_new(i,j,k-1));
104  ssum[IntVars::zmom](i,j,k) = ( dJ_old_kface * sold[IntVars::zmom](i,j,k)
105  + slow_dt * fslow[IntVars::zmom](i,j,k) ) / dJ_new_kface;
106  }
107  });
108 
109  } else { // Fixed or no terrain
110  const Array4<const Real>& dJ_old = detJ_cc[level]->const_array(mfi);
111  ParallelFor(bx, ncomp_fast[IntVars::cons],
112  [=] AMREX_GPU_DEVICE (int i, int j, int k, int nn) {
113  const int n = scomp_fast[IntVars::cons] + nn;
114  if (dJ_old(i,j,k) > 0.0) {
115  ssum[IntVars::cons](i,j,k,n) = sold[IntVars::cons](i,j,k,n) + slow_dt *
116  ( fslow[IntVars::cons](i,j,k,n) );
117  }
118  });
119 
120  // Commenting out the update is a HACK while developing the EB capability
121  if (solverChoice.terrain_type == TerrainType::EB)
122  {
123  ParallelFor(tbx, tby, tbz,
124  [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept {
125  ssum[IntVars::xmom](i,j,k) = sold[IntVars::xmom](i,j,k);
126  },
127  [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept {
128  ssum[IntVars::ymom](i,j,k) = sold[IntVars::ymom](i,j,k);
129  },
130  [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept {
131  ssum[IntVars::zmom](i,j,k) = sold[IntVars::zmom](i,j,k);
132  });
133 
134  } else {
135  ParallelFor(tbx, tby, tbz,
136  [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept {
137  ssum[IntVars::xmom](i,j,k) = sold[IntVars::xmom](i,j,k)
138  + slow_dt * fslow[IntVars::xmom](i,j,k);
139  },
140  [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept {
141  ssum[IntVars::ymom](i,j,k) = sold[IntVars::ymom](i,j,k)
142  + slow_dt * fslow[IntVars::ymom](i,j,k);
143  },
144  [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept {
145  ssum[IntVars::zmom](i,j,k) = sold[IntVars::zmom](i,j,k)
146  + slow_dt * fslow[IntVars::zmom](i,j,k);
147  });
148  } // no EB
149  } // not moving terrain
150  } // mfi
151 
152  if (solverChoice.terrain_type == TerrainType::EB)
153  {
154  // Redistribute cons states (cell-centered)
155 
156  MultiFab dUdt_tmp(ba, dm, F_slow[IntVars::cons].nComp(), F_slow[IntVars::cons].nGrow(), MFInfo(), EBFactory(level));
157  dUdt_tmp.setVal(0.);
158  MultiFab::Copy(dUdt_tmp, F_slow[IntVars::cons], 0, 0, F_slow[IntVars::cons].nComp(), F_slow[IntVars::cons].nGrow());
159  dUdt_tmp.FillBoundary(fine_geom.periodicity());
160  dUdt_tmp.setDomainBndry(1.234e10, 0, ncomp_fast[IntVars::cons], fine_geom);
161 
162  BCRec const* bc_ptr_d = domain_bcs_type_d.data();
163  S_old[IntVars::cons].FillBoundary(fine_geom.periodicity());
164  S_old[IntVars::cons].setDomainBndry(1.234e10, 0, ncomp_fast[IntVars::cons], fine_geom);
165 
166  // Update F_slow by Redistribution.
167  redistribute_term ( ncomp_fast[IntVars::cons], fine_geom, F_slow[IntVars::cons], dUdt_tmp,
168  S_old[IntVars::cons], EBFactory(level), bc_ptr_d, slow_dt);
169 
170  // Update state using the updated F_slow. (NOTE: redistribute_term returns RHS not state variables.)
171 
172  for ( MFIter mfi(S_sum[IntVars::cons],TilingIfNotGPU()); mfi.isValid(); ++mfi)
173  {
174  const Box bx = mfi.tilebox();
175  Vector<Array4<Real> > ssum_h(n_data);
176  Vector<Array4<Real> > sold_h(n_data);
177  Vector<Array4<Real> > fslow_h(n_data);
178  const Array4<const Real>& dJ_old = detJ_cc[level]->const_array(mfi);
179 
180  for (int i = 0; i < n_data; ++i) {
181  ssum_h[i] = S_sum[i].array(mfi);
182  sold_h[i] = S_old[i].array(mfi);
183  fslow_h[i] = F_slow[i].array(mfi);
184  }
185 
186  Gpu::AsyncVector<Array4<Real> > sold_d(n_data);
187  Gpu::AsyncVector<Array4<Real> > ssum_d(n_data);
188  Gpu::AsyncVector<Array4<Real> > fslow_d(n_data);
189 
190  Gpu::copy(Gpu::hostToDevice, sold_h.begin(), sold_h.end(), sold_d.begin());
191  Gpu::copy(Gpu::hostToDevice, ssum_h.begin(), ssum_h.end(), ssum_d.begin());
192  Gpu::copy(Gpu::hostToDevice, fslow_h.begin(), fslow_h.end(), fslow_d.begin());
193 
194  Array4<Real>* sold = sold_d.dataPtr();
195  Array4<Real>* ssum = ssum_d.dataPtr();
196  Array4<Real>* fslow = fslow_d.dataPtr();
197 
198  ParallelFor(bx, ncomp_fast[IntVars::cons], [=] AMREX_GPU_DEVICE (int i, int j, int k, int nn)
199  {
200  const int n = scomp_fast[IntVars::cons] + nn;
201  if (dJ_old(i,j,k) > 0.0) {
202  ssum[IntVars::cons](i,j,k,n) = sold[IntVars::cons](i,j,k,n) + slow_dt *
203  ( fslow[IntVars::cons](i,j,k,n) );
204  }
205  });
206  }
207 
208  // Redistribute momentum states (face-centered) will be added here.
209  } // EB
210  } // omp
211 
212  // Even if we update all the conserved variables we don't need
213  // to fillpatch the slow ones every acoustic substep
214  apply_bcs(S_sum, time_for_fp, S_sum[IntVars::cons].nGrow(), S_sum[IntVars::xmom].nGrow(),
215  fast_only=true, vel_and_mom_synced=false);
216 
217  if (solverChoice.anelastic[level]) {
218  bool have_tb = (thin_xforce[0] || thin_yforce[0] || thin_zforce[0]);
219  if (have_tb) {
220  project_velocities_tb(level, slow_dt, S_sum, pp_inc[level]);
221  } else {
222  project_velocities(level, slow_dt, S_sum, pp_inc[level]);
223  }
224  }
225  };
void redistribute_term(int ncomp, const Geometry &geom, MultiFab &result, MultiFab &result_tmp, MultiFab const &state, EBFArrayBoxFactory const &ebfact, BCRec const *bc, Real const local_dt)
Definition: ERF_EBRedistribute.cpp:11
#define Rho_comp
Definition: ERF_IndexDefines.H:36
auto no_substep_fun
Definition: ERF_TI_no_substep_fun.H:4
auto apply_bcs
Definition: ERF_TI_utils.H:50
AMREX_GPU_DEVICE AMREX_FORCE_INLINE amrex::Real WFromOmega(int i, int j, int k, amrex::Real omega, amrex::Real u, amrex::Real v, const amrex::Array4< const amrex::Real > &z_nd, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > &dxInv)
Definition: ERF_TerrainMetrics.H:414
AMREX_GPU_DEVICE AMREX_FORCE_INLINE amrex::Real Compute_h_zeta_AtIface(const int &i, const int &j, const int &k, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > &cellSizeInv, const amrex::Array4< const amrex::Real > &z_nd)
Definition: ERF_TerrainMetrics.H:94
AMREX_GPU_DEVICE AMREX_FORCE_INLINE amrex::Real Compute_h_zeta_AtJface(const int &i, const int &j, const int &k, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > &cellSizeInv, const amrex::Array4< const amrex::Real > &z_nd)
Definition: ERF_TerrainMetrics.H:137
@ NumTypes
Definition: ERF_IndexDefines.H:154
@ ymom
Definition: ERF_IndexDefines.H:152
@ cons
Definition: ERF_IndexDefines.H:150
@ zmom
Definition: ERF_IndexDefines.H:153
@ xmom
Definition: ERF_IndexDefines.H:151