ERF
Energy Research and Forecasting: An Atmospheric Modeling Code
ERF_AdvectionSrcForState.cpp File Reference
Include dependency graph for ERF_AdvectionSrcForState.cpp:

Functions

void AdvectionSrcForRho (const Box &bx, const Array4< Real > &advectionSrc, const Array4< const Real > &rho_u, const Array4< const Real > &rho_v, const Array4< const Real > &Omega, const Array4< Real > &avg_xmom, const Array4< Real > &avg_ymom, const Array4< Real > &avg_zmom, const Array4< const Real > &ax_arr, const Array4< const Real > &ay_arr, const Array4< const Real > &az_arr, const Array4< const Real > &detJ, const GpuArray< Real, AMREX_SPACEDIM > &cellSizeInv, const Array4< const Real > &mf_m, const Array4< const Real > &mf_u, const Array4< const Real > &mf_v, const GpuArray< const Array4< Real >, AMREX_SPACEDIM > &flx_arr, const bool const_rho)
 
void AdvectionSrcForScalars (const Real &dt, const Box &bx, const int icomp, const int ncomp, const Array4< const Real > &avg_xmom, const Array4< const Real > &avg_ymom, const Array4< const Real > &avg_zmom, const Array4< const Real > &cur_cons, const Array4< const Real > &cell_prim, const Array4< Real > &advectionSrc, const bool &use_mono_adv, Real *max_s_ptr, Real *min_s_ptr, const Array4< const Real > &detJ, const GpuArray< Real, AMREX_SPACEDIM > &cellSizeInv, const Array4< const Real > &mf_m, const AdvType horiz_adv_type, const AdvType vert_adv_type, const Real horiz_upw_frac, const Real vert_upw_frac, const GpuArray< const Array4< Real >, AMREX_SPACEDIM > &flx_arr, const GpuArray< Array4< Real >, AMREX_SPACEDIM > &flx_tmp_arr, const Box &domain, const BCRec *bc_ptr_h)
 

Function Documentation

◆ AdvectionSrcForRho()

void AdvectionSrcForRho ( const Box &  bx,
const Array4< Real > &  advectionSrc,
const Array4< const Real > &  rho_u,
const Array4< const Real > &  rho_v,
const Array4< const Real > &  Omega,
const Array4< Real > &  avg_xmom,
const Array4< Real > &  avg_ymom,
const Array4< Real > &  avg_zmom,
const Array4< const Real > &  ax_arr,
const Array4< const Real > &  ay_arr,
const Array4< const Real > &  az_arr,
const Array4< const Real > &  detJ,
const GpuArray< Real, AMREX_SPACEDIM > &  cellSizeInv,
const Array4< const Real > &  mf_m,
const Array4< const Real > &  mf_u,
const Array4< const Real > &  mf_v,
const GpuArray< const Array4< Real >, AMREX_SPACEDIM > &  flx_arr,
const bool  const_rho 
)

Function for computing the advective tendency for the update equations for rho and (rho theta) This routine has explicit expressions for all cases (terrain or not) when the horizontal and vertical spatial orders are <= 2, and calls more specialized functions when either (or both) spatial order(s) is greater than 2.

Parameters
[in]bxbox over which the scalars are updated
[out]advectionSrctendency for the scalar update equation
[in]rho_ux-component of momentum
[in]rho_vy-component of momentum
[in]Omegacomponent of momentum normal to the z-coordinate surface
[out]avg_xmomx-component of time-averaged momentum defined in this routine
[out]avg_ymomy-component of time-averaged momentum defined in this routine
[out]avg_zmomz-component of time-averaged momentum defined in this routine
[in]detJJacobian of the metric transformation (= 1 if use_terrain is false)
[in]cellSizeInvinverse of the mesh spacing
[in]mf_mmap factor at cell centers
[in]mf_umap factor at x-faces
[in]mf_vmap factor at y-faces
48 {
49  BL_PROFILE_VAR("AdvectionSrcForRho", AdvectionSrcForRho);
50  auto dxInv = cellSizeInv[0], dyInv = cellSizeInv[1], dzInv = cellSizeInv[2];
51 
52  const Box xbx = surroundingNodes(bx,0);
53  const Box ybx = surroundingNodes(bx,1);
54  const Box zbx = surroundingNodes(bx,2);
55 
56  ParallelFor(xbx, [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept
57  {
58  (flx_arr[0])(i,j,k,0) = ax_arr(i,j,k) * rho_u(i,j,k) / mf_u(i,j,0);
59  avg_xmom(i,j,k) = (flx_arr[0])(i,j,k,0);
60  });
61  ParallelFor(ybx, [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept
62  {
63  (flx_arr[1])(i,j,k,0) = ay_arr(i,j,k) * rho_v(i,j,k) / mf_v(i,j,0);
64  avg_ymom(i,j,k) = (flx_arr[1])(i,j,k,0);
65  });
66  ParallelFor(zbx, [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept
67  {
68  Real mfsq = mf_m(i,j,0) * mf_m(i,j,0);
69  (flx_arr[2])(i,j,k,0) = az_arr(i,j,k) * Omega(i,j,k) / mfsq;
70  avg_zmom(i,j,k) = (flx_arr[2])(i,j,k,0);
71  });
72 
73  if (const_rho) {
74  ParallelFor(bx, [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept
75  {
76  advectionSrc(i,j,k,0) = 0.0;
77  });
78  } else
79  {
80  ParallelFor(bx, [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept
81  {
82  if (detJ(i,j,k) > 0.) {
83  Real mfsq = mf_m(i,j,0) * mf_m(i,j,0);
84  advectionSrc(i,j,k,0) = - mfsq / detJ(i,j,k) * (
85  ( (flx_arr[0])(i+1,j,k,0) - (flx_arr[0])(i ,j,k,0) ) * dxInv +
86  ( (flx_arr[1])(i,j+1,k,0) - (flx_arr[1])(i,j ,k,0) ) * dyInv +
87  ( (flx_arr[2])(i,j,k+1,0) - (flx_arr[2])(i,j,k ,0) ) * dzInv );
88  } else {
89  advectionSrc(i,j,k,0) = 0.;
90  }
91  });
92  }
93 }
void AdvectionSrcForRho(const Box &bx, const Array4< Real > &advectionSrc, const Array4< const Real > &rho_u, const Array4< const Real > &rho_v, const Array4< const Real > &Omega, const Array4< Real > &avg_xmom, const Array4< Real > &avg_ymom, const Array4< Real > &avg_zmom, const Array4< const Real > &ax_arr, const Array4< const Real > &ay_arr, const Array4< const Real > &az_arr, const Array4< const Real > &detJ, const GpuArray< Real, AMREX_SPACEDIM > &cellSizeInv, const Array4< const Real > &mf_m, const Array4< const Real > &mf_u, const Array4< const Real > &mf_v, const GpuArray< const Array4< Real >, AMREX_SPACEDIM > &flx_arr, const bool const_rho)
Definition: ERF_AdvectionSrcForState.cpp:30

◆ AdvectionSrcForScalars()

void AdvectionSrcForScalars ( const Real &  dt,
const Box &  bx,
const int  icomp,
const int  ncomp,
const Array4< const Real > &  avg_xmom,
const Array4< const Real > &  avg_ymom,
const Array4< const Real > &  avg_zmom,
const Array4< const Real > &  cur_cons,
const Array4< const Real > &  cell_prim,
const Array4< Real > &  advectionSrc,
const bool &  use_mono_adv,
Real *  max_s_ptr,
Real *  min_s_ptr,
const Array4< const Real > &  detJ,
const GpuArray< Real, AMREX_SPACEDIM > &  cellSizeInv,
const Array4< const Real > &  mf_m,
const AdvType  horiz_adv_type,
const AdvType  vert_adv_type,
const Real  horiz_upw_frac,
const Real  vert_upw_frac,
const GpuArray< const Array4< Real >, AMREX_SPACEDIM > &  flx_arr,
const GpuArray< Array4< Real >, AMREX_SPACEDIM > &  flx_tmp_arr,
const Box &  domain,
const BCRec *  bc_ptr_h 
)

Function for computing the advective tendency for the update equations for all scalars other than rho and (rho theta) This routine has explicit expressions for all cases (terrain or not) when the horizontal and vertical spatial orders are <= 2, and calls more specialized functions when either (or both) spatial order(s) is greater than 2.

Parameters
[in]bxbox over which the scalars are updated if no external boundary conditions
[in]icompcomponent of first scalar to be updated
[in]ncompnumber of components to be updated
[in]avg_xmomx-component of time-averaged momentum defined in this routine
[in]avg_ymomy-component of time-averaged momentum defined in this routine
[in]avg_zmomz-component of time-averaged momentum defined in this routine
[in]cell_primprimitive form of scalar variables, here only potential temperature theta
[out]advectionSrctendency for the scalar update equation
[in]detJJacobian of the metric transformation (= 1 if use_terrain is false)
[in]cellSizeInvinverse of the mesh spacing
[in]mf_mmap factor at cell centers
[in]horiz_adv_typeadvection scheme to be used in horiz. directions for dry scalars
[in]vert_adv_typeadvection scheme to be used in vert. directions for dry scalars
[in]horiz_upw_fracupwinding fraction to be used in horiz. directions for dry scalars (for Blended schemes only)
[in]vert_upw_fracupwinding fraction to be used in vert. directions for dry scalars (for Blended schemes only)
143 {
144  BL_PROFILE_VAR("AdvectionSrcForScalars", AdvectionSrcForScalars);
145  auto dxInv = cellSizeInv[0], dyInv = cellSizeInv[1], dzInv = cellSizeInv[2];
146 
147  const Box xbx = surroundingNodes(bx,0);
148  const Box ybx = surroundingNodes(bx,1);
149  const Box zbx = surroundingNodes(bx,2);
150 
151  // Open bc will be imposed upon all vars (we only access cons here for simplicity)
152  const bool xlo_open = (bc_ptr_h[BCVars::cons_bc].lo(0) == ERFBCType::open);
153  const bool xhi_open = (bc_ptr_h[BCVars::cons_bc].hi(0) == ERFBCType::open);
154  const bool ylo_open = (bc_ptr_h[BCVars::cons_bc].lo(1) == ERFBCType::open);
155  const bool yhi_open = (bc_ptr_h[BCVars::cons_bc].hi(1) == ERFBCType::open);
156 
157  // Only advection operations in bndry normal direction with OPEN BC
158  Box bx_xlo, bx_xhi, bx_ylo, bx_yhi;
159  if (xlo_open) {
160  if ( bx.smallEnd(0) == domain.smallEnd(0)) { bx_xlo = makeSlab( bx,0,domain.smallEnd(0));}
161  }
162  if (xhi_open) {
163  if ( bx.bigEnd(0) == domain.bigEnd(0)) { bx_xhi = makeSlab( bx,0,domain.bigEnd(0) );}
164  }
165  if (ylo_open) {
166  if ( bx.smallEnd(1) == domain.smallEnd(1)) { bx_ylo = makeSlab( bx,1,domain.smallEnd(1));}
167  }
168  if (yhi_open) {
169  if ( bx.bigEnd(1) == domain.bigEnd(1)) { bx_yhi = makeSlab( bx,1,domain.bigEnd(1) );}
170  }
171 
172  // Inline with 2nd order for efficiency
173  // NOTE: we don't need to weight avg_xmom, avg_ymom, avg_zmom with terrain metrics
174  // (or with EB area fractions)
175  // because that was done when they were constructed in AdvectionSrcForRhoAndTheta
176  if (horiz_adv_type == AdvType::Centered_2nd && vert_adv_type == AdvType::Centered_2nd)
177  {
178  ParallelFor(xbx, ncomp,[=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept
179  {
180  const int cons_index = icomp + n;
181  const int prim_index = cons_index - 1;
182  const Real prim_on_face = 0.5 * (cell_prim(i,j,k,prim_index) + cell_prim(i-1,j,k,prim_index));
183  (flx_arr[0])(i,j,k,cons_index) = avg_xmom(i,j,k) * prim_on_face;
184  });
185  ParallelFor(ybx, ncomp,[=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept
186  {
187  const int cons_index = icomp + n;
188  const int prim_index = cons_index - 1;
189  const Real prim_on_face = 0.5 * (cell_prim(i,j,k,prim_index) + cell_prim(i,j-1,k,prim_index));
190  (flx_arr[1])(i,j,k,cons_index) = avg_ymom(i,j,k) * prim_on_face;
191  });
192  ParallelFor(zbx, ncomp,[=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept
193  {
194  const int cons_index = icomp + n;
195  const int prim_index = cons_index - 1;
196  const Real prim_on_face = 0.5 * (cell_prim(i,j,k,prim_index) + cell_prim(i,j,k-1,prim_index));
197  (flx_arr[2])(i,j,k,cons_index) = avg_zmom(i,j,k) * prim_on_face;
198  });
199 
200  // Template higher order methods (horizontal first)
201  } else {
202  switch(horiz_adv_type) {
204  AdvectionSrcForScalarsVert<CENTERED2>(bx, ncomp, icomp, flx_arr, cell_prim,
205  avg_xmom, avg_ymom, avg_zmom,
206  horiz_upw_frac, vert_upw_frac, vert_adv_type);
207  break;
208  case AdvType::Upwind_3rd:
209  AdvectionSrcForScalarsVert<UPWIND3>(bx, ncomp, icomp, flx_arr, cell_prim,
210  avg_xmom, avg_ymom, avg_zmom,
211  horiz_upw_frac, vert_upw_frac, vert_adv_type);
212  break;
214  AdvectionSrcForScalarsVert<CENTERED4>(bx, ncomp, icomp, flx_arr, cell_prim,
215  avg_xmom, avg_ymom, avg_zmom,
216  horiz_upw_frac, vert_upw_frac, vert_adv_type);
217  break;
218  case AdvType::Upwind_5th:
219  AdvectionSrcForScalarsVert<UPWIND5>(bx, ncomp, icomp, flx_arr, cell_prim,
220  avg_xmom, avg_ymom, avg_zmom,
221  horiz_upw_frac, vert_upw_frac, vert_adv_type);
222  break;
224  AdvectionSrcForScalarsVert<CENTERED6>(bx, ncomp, icomp, flx_arr, cell_prim,
225  avg_xmom, avg_ymom, avg_zmom,
226  horiz_upw_frac, vert_upw_frac, vert_adv_type);
227  break;
228  case AdvType::Weno_3:
229  AdvectionSrcForScalarsWrapper<WENO3,WENO3>(bx, ncomp, icomp, flx_arr, cell_prim,
230  avg_xmom, avg_ymom, avg_zmom,
231  horiz_upw_frac, vert_upw_frac);
232  break;
233  case AdvType::Weno_5:
234  AdvectionSrcForScalarsWrapper<WENO5,WENO5>(bx, ncomp, icomp, flx_arr, cell_prim,
235  avg_xmom, avg_ymom, avg_zmom,
236  horiz_upw_frac, vert_upw_frac);
237  break;
238  case AdvType::Weno_7:
239  AdvectionSrcForScalarsWrapper<WENO7,WENO7>(bx, ncomp, icomp, flx_arr, cell_prim,
240  avg_xmom, avg_ymom, avg_zmom,
241  horiz_upw_frac, vert_upw_frac);
242  break;
243  case AdvType::Weno_3Z:
244  AdvectionSrcForScalarsWrapper<WENO_Z3,WENO_Z3>(bx, ncomp, icomp, flx_arr, cell_prim,
245  avg_xmom, avg_ymom, avg_zmom,
246  horiz_upw_frac, vert_upw_frac);
247  break;
248  case AdvType::Weno_3MZQ:
249  AdvectionSrcForScalarsWrapper<WENO_MZQ3,WENO_MZQ3>(bx, ncomp, icomp, flx_arr, cell_prim,
250  avg_xmom, avg_ymom, avg_zmom,
251  horiz_upw_frac, vert_upw_frac);
252  break;
253  case AdvType::Weno_5Z:
254  AdvectionSrcForScalarsWrapper<WENO_Z5,WENO_Z5>(bx, ncomp, icomp, flx_arr, cell_prim,
255  avg_xmom, avg_ymom, avg_zmom,
256  horiz_upw_frac, vert_upw_frac);
257  break;
258  case AdvType::Weno_7Z:
259  AdvectionSrcForScalarsWrapper<WENO_Z7,WENO_Z7>(bx, ncomp, icomp, flx_arr, cell_prim,
260  avg_xmom, avg_ymom, avg_zmom,
261  horiz_upw_frac, vert_upw_frac);
262  break;
263  default:
264  AMREX_ASSERT_WITH_MESSAGE(false, "Unknown advection scheme!");
265  }
266  }
267 
268  /* =======================================================================
269  Monotonicity preserving order reduction for scalars (0-th upwind).
270 
271  NOTE:
272  The following order reduction operations for scalar advection have been
273  adapted from the PINACLES code developed at PPNL by K. Pressel et al.;
274  see https://github.com/pnnl/pinacles.git and This version utilizes global
275  min/max values for simplicity rather than the local average around each
276  cell. Motivation for this modification is the compressible dycore in ERF
277  as opposed to incompressible/analestic in PINACLES.
278  ======================================================================= */
279  if (use_mono_adv) {
280  // Copy flux data to flx_arr to avoid race condition on GPU
281  ParallelFor(bx, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept
282  {
283  const int cons_index = icomp + n;
284  (flx_tmp_arr[0])(i,j,k,cons_index) = (flx_arr[0])(i,j,k,cons_index);
285  (flx_tmp_arr[1])(i,j,k,cons_index) = (flx_arr[1])(i,j,k,cons_index);
286  (flx_tmp_arr[2])(i,j,k,cons_index) = (flx_arr[2])(i,j,k,cons_index);
287  });
288 
289  // Mono limiting
290  ParallelFor(bx, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept
291  {
292  const int cons_index = icomp + n;
293  const int prim_index = cons_index - 1;
294 
295  Real max_val = max_s_ptr[cons_index];
296  Real min_val = min_s_ptr[cons_index];
297 
298  Real invdetJ = (detJ(i,j,k) > 0.) ? 1. / detJ(i,j,k) : 1.;
299  Real mfsq = mf_m(i,j,0) * mf_m(i,j,0);
300 
301  Real RHS = - invdetJ * mfsq * (
302  ( (flx_arr[0])(i+1,j ,k ,cons_index) - (flx_arr[0])(i,j,k,cons_index) ) * dxInv +
303  ( (flx_arr[1])(i ,j+1,k ,cons_index) - (flx_arr[1])(i,j,k,cons_index) ) * dyInv +
304  ( (flx_arr[2])(i ,j ,k+1,cons_index) - (flx_arr[2])(i,j,k,cons_index) ) * dzInv );
305 
306  // NOTE: This forward prediction uses the `cur_cons` as opposed to `old_cons` since
307  // source terms may cause increase/decrease in a variable each RK stage. We
308  // want to ensure the advection operator does not induce over/under-shoot
309  // from the current state. If the `old_cons` is used and significant forcing is
310  // present, we could trip an order reduction just due to the source terms.
311  Real tmp_upd = cur_cons(i,j,k,cons_index) + RHS*dt;
312  if (tmp_upd<min_val || tmp_upd>max_val) {
313  // HI
314  if (avg_xmom(i+1,j,k)>0.0) {
315  (flx_tmp_arr[0])(i+1,j,k,cons_index) = avg_xmom(i+1,j,k) * cell_prim(i ,j,k,prim_index);
316  } else {
317  (flx_tmp_arr[0])(i+1,j,k,cons_index) = avg_xmom(i+1,j,k) * cell_prim(i+1,j,k,prim_index);
318  }
319  if (avg_ymom(i,j+1,k)>0.0) {
320  (flx_tmp_arr[1])(i,j+1,k,cons_index) = avg_ymom(i,j+1,k) * cell_prim(i,j ,k,prim_index);
321  } else {
322  (flx_tmp_arr[1])(i,j+1,k,cons_index) = avg_ymom(i,j+1,k) * cell_prim(i,j+1,k,prim_index);
323  }
324  if (avg_zmom(i,j,k+1)>0.0) {
325  (flx_tmp_arr[2])(i,j,k+1,cons_index) = avg_zmom(i,j,k+1) * cell_prim(i,j,k ,prim_index);
326  } else {
327  (flx_tmp_arr[2])(i,j,k+1,cons_index) = avg_zmom(i,j,k+1) * cell_prim(i,j,k+1,prim_index);
328  }
329 
330  // LO
331  if (avg_xmom(i,j,k)>0.0) {
332  (flx_tmp_arr[0])(i,j,k,cons_index) = avg_xmom(i,j,k) * cell_prim(i-1,j,k,prim_index);
333  } else {
334  (flx_tmp_arr[0])(i,j,k,cons_index) = avg_xmom(i,j,k) * cell_prim(i ,j,k,prim_index);
335  }
336  if (avg_ymom(i,j,k)>0.0) {
337  (flx_tmp_arr[1])(i,j,k,cons_index) = avg_ymom(i,j,k) * cell_prim(i,j-1,k,prim_index);
338  } else {
339  (flx_tmp_arr[1])(i,j,k,cons_index) = avg_ymom(i,j,k) * cell_prim(i,j ,k,prim_index);
340  }
341  if (avg_zmom(i,j,k)>0.0) {
342  (flx_tmp_arr[2])(i,j,k,cons_index) = avg_zmom(i,j,k) * cell_prim(i,j,k-1,prim_index);
343  } else {
344  (flx_tmp_arr[2])(i,j,k,cons_index) = avg_zmom(i,j,k) * cell_prim(i,j,k ,prim_index);
345  }
346  }
347  });
348 
349  // Copy back to flx_arr to avoid race condition on GPU
350  ParallelFor(bx, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept
351  {
352  const int cons_index = icomp + n;
353  (flx_arr[0])(i,j,k,cons_index) = (flx_tmp_arr[0])(i,j,k,cons_index);
354  (flx_arr[1])(i,j,k,cons_index) = (flx_tmp_arr[1])(i,j,k,cons_index);
355  (flx_arr[2])(i,j,k,cons_index) = (flx_tmp_arr[2])(i,j,k,cons_index);
356  });
357  }
358 
359  ParallelFor(bx, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept
360  {
361  Real invdetJ = (detJ(i,j,k) > 0.) ? 1. / detJ(i,j,k) : 1.;
362 
363  Real mfsq = mf_m(i,j,0) * mf_m(i,j,0);
364 
365  const int cons_index = icomp + n;
366  advectionSrc(i,j,k,cons_index) = - invdetJ * mfsq * (
367  ( (flx_arr[0])(i+1,j,k,cons_index) - (flx_arr[0])(i ,j,k,cons_index) ) * dxInv +
368  ( (flx_arr[1])(i,j+1,k,cons_index) - (flx_arr[1])(i,j ,k,cons_index) ) * dyInv +
369  ( (flx_arr[2])(i,j,k+1,cons_index) - (flx_arr[2])(i,j,k ,cons_index) ) * dzInv );
370  });
371 
372  // Special advection operator for open BC (bndry tangent operations)
373  if (xlo_open) {
374  bool do_lo = true;
375  AdvectionSrcForOpenBC_Tangent_Cons(bx_xlo, 0, icomp, ncomp, advectionSrc, cell_prim,
376  avg_xmom, avg_ymom, avg_zmom,
377  detJ, cellSizeInv, do_lo);
378  }
379  if (xhi_open) {
380  AdvectionSrcForOpenBC_Tangent_Cons(bx_xhi, 0, icomp, ncomp, advectionSrc, cell_prim,
381  avg_xmom, avg_ymom, avg_zmom,
382  detJ, cellSizeInv);
383  }
384  if (ylo_open) {
385  bool do_lo = true;
386  AdvectionSrcForOpenBC_Tangent_Cons(bx_ylo, 1, icomp, ncomp, advectionSrc, cell_prim,
387  avg_xmom, avg_ymom, avg_zmom,
388  detJ, cellSizeInv, do_lo);
389  }
390  if (yhi_open) {
391  AdvectionSrcForOpenBC_Tangent_Cons(bx_yhi, 1, icomp, ncomp, advectionSrc, cell_prim,
392  avg_xmom, avg_ymom, avg_zmom,
393  detJ, cellSizeInv);
394  }
395 }
void AdvectionSrcForScalars(const Real &dt, const Box &bx, const int icomp, const int ncomp, const Array4< const Real > &avg_xmom, const Array4< const Real > &avg_ymom, const Array4< const Real > &avg_zmom, const Array4< const Real > &cur_cons, const Array4< const Real > &cell_prim, const Array4< Real > &advectionSrc, const bool &use_mono_adv, Real *max_s_ptr, Real *min_s_ptr, const Array4< const Real > &detJ, const GpuArray< Real, AMREX_SPACEDIM > &cellSizeInv, const Array4< const Real > &mf_m, const AdvType horiz_adv_type, const AdvType vert_adv_type, const Real horiz_upw_frac, const Real vert_upw_frac, const GpuArray< const Array4< Real >, AMREX_SPACEDIM > &flx_arr, const GpuArray< Array4< Real >, AMREX_SPACEDIM > &flx_tmp_arr, const Box &domain, const BCRec *bc_ptr_h)
Definition: ERF_AdvectionSrcForState.cpp:119
void AdvectionSrcForOpenBC_Tangent_Cons(const amrex::Box &bx, const int &dir, const int &icomp, const int &ncomp, const amrex::Array4< amrex::Real > &cell_rhs, const amrex::Array4< const amrex::Real > &cell_prim, const amrex::Array4< const amrex::Real > &avg_xmom, const amrex::Array4< const amrex::Real > &avg_ymom, const amrex::Array4< const amrex::Real > &avg_zmom, const amrex::Array4< const amrex::Real > &detJ, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > &cellSizeInv, const bool do_lo=false)
@ Centered_4th
@ Centered_6th
@ Centered_2nd
@ cons_bc
Definition: ERF_IndexDefines.H:76
@ open
Definition: ERF_IndexDefines.H:186
Here is the call graph for this function: