ERF
Energy Research and Forecasting: An Atmospheric Modeling Code
ERF_TI_no_substep_fun.H
Go to the documentation of this file.
1 /**
2  * Wrapper for advancing the solution with the slow RHS in the absence of acoustic substepping
3  */
4  auto no_substep_fun = [&](Vector<MultiFab>& S_sum,
5  Vector<MultiFab>& S_old,
6  Vector<MultiFab>& F_slow,
7  const Real time_for_fp, const Real slow_dt,
8  const int nrk)
9  {
10  BL_PROFILE("no_substep_fun");
11  amrex::ignore_unused(nrk);
12  int n_data = IntVars::NumTypes;
13 
14  const auto& dxInv = fine_geom.InvCellSizeArray();
15 
16  const amrex::GpuArray<int, IntVars::NumTypes> scomp_fast = {0,0,0,0};
17  const amrex::GpuArray<int, IntVars::NumTypes> ncomp_fast = {2,1,1,1};
18 
19  if (verbose) amrex::Print() << " No-substepping time integration at level " << level
20  << std::setprecision(timeprecision)
21  << " to " << time_for_fp
22  << " with dt = " << slow_dt << std::endl;
23 
24  // Update S_sum = S_stage only for the fast variables
25 #ifdef _OPENMP
26 #pragma omp parallel if (amrex::Gpu::notInLaunchRegion())
27 #endif
28  {
29  for ( MFIter mfi(S_sum[IntVars::cons],TilingIfNotGPU()); mfi.isValid(); ++mfi)
30  {
31  const Box bx = mfi.tilebox();
32  Box tbx = mfi.nodaltilebox(0);
33  Box tby = mfi.nodaltilebox(1);
34  Box tbz = mfi.nodaltilebox(2);
35 
36  Vector<Array4<Real> > ssum_h(n_data);
37  Vector<Array4<Real> > sold_h(n_data);
38  Vector<Array4<Real> > fslow_h(n_data);
39 
40  for (int i = 0; i < n_data; ++i) {
41  ssum_h[i] = S_sum[i].array(mfi);
42  sold_h[i] = S_old[i].array(mfi);
43  fslow_h[i] = F_slow[i].array(mfi);
44  }
45 
46  Gpu::AsyncVector<Array4<Real> > sold_d(n_data);
47  Gpu::AsyncVector<Array4<Real> > ssum_d(n_data);
48  Gpu::AsyncVector<Array4<Real> > fslow_d(n_data);
49 
50  Gpu::copy(Gpu::hostToDevice, sold_h.begin(), sold_h.end(), sold_d.begin());
51  Gpu::copy(Gpu::hostToDevice, ssum_h.begin(), ssum_h.end(), ssum_d.begin());
52  Gpu::copy(Gpu::hostToDevice, fslow_h.begin(), fslow_h.end(), fslow_d.begin());
53 
54  Array4<Real>* sold = sold_d.dataPtr();
55  Array4<Real>* ssum = ssum_d.dataPtr();
56  Array4<Real>* fslow = fslow_d.dataPtr();
57 
58  // Moving terrain
59  if ( solverChoice.terrain_type == TerrainType::MovingFittedMesh )
60  {
61  const Array4<const Real>& dJ_old = detJ_cc[level]->const_array(mfi);
62  const Array4<const Real>& dJ_new = detJ_cc_new[level]->const_array(mfi);
63 
64  const Array4<const Real>& z_nd_old = z_phys_nd[level]->const_array(mfi);
65  const Array4<const Real>& z_nd_new = z_phys_nd_new[level]->const_array(mfi);
66 
67  const Array4<const Real>& mf_u = mapfac[level][MapFacType::u_x]->const_array(mfi);
68  const Array4<const Real>& mf_v = mapfac[level][MapFacType::v_y]->const_array(mfi);
69 
70  const Array4<Real >& z_t_arr = z_t_rk[level]->array(mfi);
71 
72  // We have already scaled the slow source term to have the extra factor of dJ
73  ParallelFor(bx, ncomp_fast[IntVars::cons],
74  [=] AMREX_GPU_DEVICE (int i, int j, int k, int nn) {
75  const int n = scomp_fast[IntVars::cons] + nn;
76  ssum[IntVars::cons](i,j,k,n) = dJ_old(i,j,k) * sold[IntVars::cons](i,j,k,n)
77  + slow_dt * fslow[IntVars::cons](i,j,k,n);
78  ssum[IntVars::cons](i,j,k,n) /= dJ_new(i,j,k);
79  });
80 
81  // We have already scaled the slow source term to have the extra factor of dJ
82  ParallelFor(tbx, tby,
83  [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept {
84  Real h_zeta_old = Compute_h_zeta_AtIface(i, j, k, dxInv, z_nd_old);
85  Real h_zeta_new = Compute_h_zeta_AtIface(i, j, k, dxInv, z_nd_new);
86  ssum[IntVars::xmom](i,j,k) = ( h_zeta_old * sold[IntVars::xmom](i,j,k)
87  + slow_dt * fslow[IntVars::xmom](i,j,k) ) / h_zeta_new;
88  },
89  [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept {
90  Real h_zeta_old = Compute_h_zeta_AtJface(i, j, k, dxInv, z_nd_old);
91  Real h_zeta_new = Compute_h_zeta_AtJface(i, j, k, dxInv, z_nd_new);
92  ssum[IntVars::ymom](i,j,k) = ( h_zeta_old * sold[IntVars::ymom](i,j,k)
93  + slow_dt * fslow[IntVars::ymom](i,j,k) ) / h_zeta_new;
94  });
95  ParallelFor(tbz,
96  [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept {
97  if (k == 0) {
98  // Here we take advantage of the fact that moving terrain has a slip wall
99  // so we can just use the new value at (i,j,0).
100  Real rho_on_face = ssum[IntVars::cons](i,j,k,Rho_comp);
101  ssum[IntVars::zmom](i,j,k) = WFromOmega(i,j,k,rho_on_face*z_t_arr(i,j,k),
102  ssum[IntVars::xmom], ssum[IntVars::ymom],
103  mf_u, mf_v, z_nd_new,dxInv);
104  } else {
105  Real dJ_old_kface = 0.5 * (dJ_old(i,j,k) + dJ_old(i,j,k-1));
106  Real dJ_new_kface = 0.5 * (dJ_new(i,j,k) + dJ_new(i,j,k-1));
107  ssum[IntVars::zmom](i,j,k) = ( dJ_old_kface * sold[IntVars::zmom](i,j,k)
108  + slow_dt * fslow[IntVars::zmom](i,j,k) ) / dJ_new_kface;
109  }
110  });
111 
112  } else { // Fixed or no terrain
113  const Array4<const Real>& dJ_old = detJ_cc[level]->const_array(mfi);
114  ParallelFor(bx, ncomp_fast[IntVars::cons],
115  [=] AMREX_GPU_DEVICE (int i, int j, int k, int nn) {
116  const int n = scomp_fast[IntVars::cons] + nn;
117  if (dJ_old(i,j,k) > 0.0) {
118  ssum[IntVars::cons](i,j,k,n) = sold[IntVars::cons](i,j,k,n) + slow_dt *
119  ( fslow[IntVars::cons](i,j,k,n) );
120  } else {
121  ssum[IntVars::cons](i,j,k,n) = sold[IntVars::cons](i,j,k,n);
122  }
123  });
124 
125  // Commenting out the update is a HACK while developing the EB capability
126  if (solverChoice.terrain_type == TerrainType::EB)
127  {
128  const Array4<const Real>& vfrac_u = (get_eb(level).get_u_const_factory())->getVolFrac().const_array(mfi);
129  const Array4<const Real>& vfrac_v = (get_eb(level).get_v_const_factory())->getVolFrac().const_array(mfi);
130  const Array4<const Real>& vfrac_w = (get_eb(level).get_w_const_factory())->getVolFrac().const_array(mfi);
131 
132  ParallelFor(tbx, tby, tbz,
133  [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept {
134  if (vfrac_u(i,j,k) > 0.0) {
135  // ssum[IntVars::xmom](i,j,k) = sold[IntVars::xmom](i,j,k);
136  ssum[IntVars::xmom](i,j,k) = sold[IntVars::xmom](i,j,k)
137  + slow_dt * fslow[IntVars::xmom](i,j,k);
138  } else {
139  ssum[IntVars::xmom](i,j,k) = sold[IntVars::xmom](i,j,k);
140  }
141  },
142  [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept {
143  if (vfrac_v(i,j,k) > 0.0) {
144  // ssum[IntVars::ymom](i,j,k) = sold[IntVars::ymom](i,j,k);
145  ssum[IntVars::ymom](i,j,k) = sold[IntVars::ymom](i,j,k)
146  + slow_dt * fslow[IntVars::ymom](i,j,k);
147  } else {
148  ssum[IntVars::ymom](i,j,k) = sold[IntVars::ymom](i,j,k);
149  }
150  },
151  [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept {
152  if (vfrac_w(i,j,k) > 0.0) {
153  // ssum[IntVars::zmom](i,j,k) = sold[IntVars::zmom](i,j,k);
154  ssum[IntVars::zmom](i,j,k) = sold[IntVars::zmom](i,j,k)
155  + slow_dt * fslow[IntVars::zmom](i,j,k);
156  } else {
157  ssum[IntVars::zmom](i,j,k) = sold[IntVars::zmom](i,j,k);
158  }
159  });
160 
161  } else {
162  ParallelFor(tbx, tby, tbz,
163  [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept {
164  ssum[IntVars::xmom](i,j,k) = sold[IntVars::xmom](i,j,k)
165  + slow_dt * fslow[IntVars::xmom](i,j,k);
166  },
167  [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept {
168  ssum[IntVars::ymom](i,j,k) = sold[IntVars::ymom](i,j,k)
169  + slow_dt * fslow[IntVars::ymom](i,j,k);
170  },
171  [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept {
172  ssum[IntVars::zmom](i,j,k) = sold[IntVars::zmom](i,j,k)
173  + slow_dt * fslow[IntVars::zmom](i,j,k);
174  });
175  } // no EB
176  } // not moving terrain
177  } // mfi
178 
179  if (solverChoice.terrain_type == TerrainType::EB)
180  {
181  BL_PROFILE("no_substep_fun_redistribute");
182  // Redistribute cons states (cell-centered)
183 
184  Vector<MultiFab> dUdt(IntVars::NumTypes);
185  dUdt[IntVars::cons].define(ba, dm, F_slow[IntVars::cons].nComp(), F_slow[IntVars::cons].nGrow(), MFInfo(), EBFactory(level));
186  for (int i = 1; i <= AMREX_SPACEDIM; ++i) {
187  dUdt[i].define(F_slow[i].boxArray(), F_slow[i].DistributionMap(), F_slow[i].nComp(), F_slow[i].nGrow(), MFInfo());
188  }
189  for (int i = 0; i <= AMREX_SPACEDIM; ++i) {
190  dUdt[i].setVal(0.0, 0, ncomp_fast[i], dUdt[i].nGrow());
191  MultiFab::Copy(dUdt[i], F_slow[i], 0, 0, F_slow[i].nComp(), 0);
192  dUdt[i].FillBoundary(fine_geom.periodicity());
193  dUdt[i].setDomainBndry(1.234e10, 0, ncomp_fast[i], fine_geom);
194  }
195 
196  BCRec const* bc_ptr_d = domain_bcs_type_d.data();
197 
198  // Update F_slow by Redistribution.
199 
200  redistribute_term ( ncomp_fast[IntVars::cons], fine_geom, F_slow[IntVars::cons], dUdt[IntVars::cons],
201  S_old[IntVars::cons], EBFactory(level), bc_ptr_d, slow_dt);
202  redistribute_term ( ncomp_fast[IntVars::xmom], fine_geom, F_slow[IntVars::xmom], dUdt[IntVars::xmom],
203  S_old[IntVars::xmom], *(get_eb(level).get_u_const_factory()), bc_ptr_d, slow_dt, IntVars::xmom);
204  redistribute_term ( ncomp_fast[IntVars::ymom], fine_geom, F_slow[IntVars::ymom], dUdt[IntVars::ymom],
205  S_old[IntVars::ymom], *(get_eb(level).get_v_const_factory()), bc_ptr_d, slow_dt, IntVars::ymom);
206  redistribute_term ( ncomp_fast[IntVars::zmom], fine_geom, F_slow[IntVars::zmom], dUdt[IntVars::zmom],
207  S_old[IntVars::zmom], *(get_eb(level).get_w_const_factory()), bc_ptr_d, slow_dt, IntVars::zmom);
208 
209  // Update state using the updated F_slow. (NOTE: redistribute_term returns RHS not state variables.)
210 
211  for ( MFIter mfi(S_sum[IntVars::cons],TilingIfNotGPU()); mfi.isValid(); ++mfi)
212  {
213  const Box bx = mfi.tilebox();
214  Box tbx = mfi.nodaltilebox(0);
215  Box tby = mfi.nodaltilebox(1);
216  Box tbz = mfi.nodaltilebox(2);
217 
218  Vector<Array4<Real> > ssum_h(n_data);
219  Vector<Array4<Real> > sold_h(n_data);
220  Vector<Array4<Real> > fslow_h(n_data);
221 
222  for (int i = 0; i < n_data; ++i) {
223  ssum_h[i] = S_sum[i].array(mfi);
224  sold_h[i] = S_old[i].array(mfi);
225  fslow_h[i] = F_slow[i].array(mfi);
226  }
227 
228  Gpu::AsyncVector<Array4<Real> > sold_d(n_data);
229  Gpu::AsyncVector<Array4<Real> > ssum_d(n_data);
230  Gpu::AsyncVector<Array4<Real> > fslow_d(n_data);
231 
232  Gpu::copy(Gpu::hostToDevice, sold_h.begin(), sold_h.end(), sold_d.begin());
233  Gpu::copy(Gpu::hostToDevice, ssum_h.begin(), ssum_h.end(), ssum_d.begin());
234  Gpu::copy(Gpu::hostToDevice, fslow_h.begin(), fslow_h.end(), fslow_d.begin());
235 
236  Array4<Real>* sold = sold_d.dataPtr();
237  Array4<Real>* ssum = ssum_d.dataPtr();
238  Array4<Real>* fslow = fslow_d.dataPtr();
239 
240  // const Array4<const Real>& vfrac_c = detJ_cc[level]->const_array(mfi);
241  const Array4<const Real>& vfrac_c = (get_eb(level).get_const_factory())->getVolFrac().const_array(mfi);
242 
243  ParallelFor(bx, ncomp_fast[IntVars::cons], [=] AMREX_GPU_DEVICE (int i, int j, int k, int nn)
244  {
245  const int n = scomp_fast[IntVars::cons] + nn;
246  if (vfrac_c(i,j,k) > 0.0) {
247  ssum[IntVars::cons](i,j,k,n) = sold[IntVars::cons](i,j,k,n) + slow_dt *
248  ( fslow[IntVars::cons](i,j,k,n) );
249  }
250  });
251 
252  const Array4<const Real>& vfrac_u = (get_eb(level).get_u_const_factory())->getVolFrac().const_array(mfi);
253  const Array4<const Real>& vfrac_v = (get_eb(level).get_v_const_factory())->getVolFrac().const_array(mfi);
254  const Array4<const Real>& vfrac_w = (get_eb(level).get_w_const_factory())->getVolFrac().const_array(mfi);
255 
256  ParallelFor(tbx, tby, tbz,
257  [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept {
258  if (vfrac_u(i,j,k) > 0.0) {
259  ssum[IntVars::xmom](i,j,k) = sold[IntVars::xmom](i,j,k)
260  + slow_dt * fslow[IntVars::xmom](i,j,k);
261  }
262  },
263  [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept {
264  if (vfrac_v(i,j,k) > 0.0) {
265  ssum[IntVars::ymom](i,j,k) = sold[IntVars::ymom](i,j,k)
266  + slow_dt * fslow[IntVars::ymom](i,j,k);
267  }
268  },
269  [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept {
270  if (vfrac_w(i,j,k) > 0.0) {
271  ssum[IntVars::zmom](i,j,k) = sold[IntVars::zmom](i,j,k)
272  + slow_dt * fslow[IntVars::zmom](i,j,k);
273  }
274  });
275  } // MFIter
276 
277  } // EB
278 
279  } // omp
280 
281  // Even if we update all the conserved variables we don't need
282  // to fillpatch the slow ones every acoustic substep
283  apply_bcs(S_sum, time_for_fp, S_sum[IntVars::cons].nGrow(), S_sum[IntVars::xmom].nGrow(),
284  fast_only=true, vel_and_mom_synced=false);
285 
286  if (solverChoice.anelastic[level]) {
287  bool have_tb = (thin_xforce[0] || thin_yforce[0] || thin_zforce[0]);
288  if (have_tb) {
289  project_velocity_tb(level, slow_dt, S_sum);
290  } else {
291  project_momenta(level, slow_dt, S_sum);
292  }
293  }
294  };
@ v_y
Definition: ERF_DataStruct.H:23
@ u_x
Definition: ERF_DataStruct.H:22
void redistribute_term(int ncomp, const Geometry &geom, MultiFab &result, MultiFab &result_tmp, MultiFab const &state, EBFArrayBoxFactory const &ebfact, BCRec const *bc, Real const local_dt)
Definition: ERF_EBRedistribute.cpp:13
#define Rho_comp
Definition: ERF_IndexDefines.H:36
amrex::Real Real
Definition: ERF_ShocInterface.H:19
auto no_substep_fun
Definition: ERF_TI_no_substep_fun.H:4
auto apply_bcs
Definition: ERF_TI_utils.H:71
AMREX_GPU_DEVICE AMREX_FORCE_INLINE amrex::Real Compute_h_zeta_AtIface(const int &i, const int &j, const int &k, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > &cellSizeInv, const amrex::Array4< const amrex::Real > &z_nd)
Definition: ERF_TerrainMetrics.H:96
AMREX_GPU_DEVICE AMREX_FORCE_INLINE amrex::Real Compute_h_zeta_AtJface(const int &i, const int &j, const int &k, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > &cellSizeInv, const amrex::Array4< const amrex::Real > &z_nd)
Definition: ERF_TerrainMetrics.H:139
AMREX_GPU_DEVICE AMREX_FORCE_INLINE amrex::Real WFromOmega(int &i, int &j, int &k, amrex::Real omega, const amrex::Array4< const amrex::Real > &u_arr, const amrex::Array4< const amrex::Real > &v_arr, const amrex::Array4< const amrex::Real > &mf_u, const amrex::Array4< const amrex::Real > &mf_v, const amrex::Array4< const amrex::Real > &z_nd, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > &dxInv)
Definition: ERF_TerrainMetrics.H:465
@ NumTypes
Definition: ERF_IndexDefines.H:162
@ ymom
Definition: ERF_IndexDefines.H:160
@ cons
Definition: ERF_IndexDefines.H:158
@ zmom
Definition: ERF_IndexDefines.H:161
@ xmom
Definition: ERF_IndexDefines.H:159