ERF
Energy Research and Forecasting: An Atmospheric Modeling Code
ERFPhysBCFunct_cons Class Reference

#include <ERF_PhysBCFunct.H>

Collaboration diagram for ERFPhysBCFunct_cons:

Public Member Functions

 ERFPhysBCFunct_cons (const int lev, const amrex::Geometry &geom, const amrex::Vector< amrex::BCRec > &domain_bcs_type, const amrex::Gpu::DeviceVector< amrex::BCRec > &domain_bcs_type_d, amrex::Array< amrex::Array< amrex::Real, AMREX_SPACEDIM *2 >, AMREX_SPACEDIM+NBCVAR_max > bc_extdir_vals, amrex::Array< amrex::Array< amrex::Real, AMREX_SPACEDIM *2 >, AMREX_SPACEDIM+NBCVAR_max > bc_neumann_vals, std::unique_ptr< amrex::MultiFab > &z_phys_nd, const bool use_real_bcs)
 
 ~ERFPhysBCFunct_cons ()
 
void operator() (amrex::MultiFab &mf, int icomp, int ncomp, amrex::IntVect const &nghost, const amrex::Real time, int bccomp_cons, bool do_fb=true, bool do_terrain_adjustment=true)
 
void impose_lateral_cons_bcs (const amrex::Array4< amrex::Real > &dest_arr, const amrex::Box &bx, const amrex::Box &domain, int icomp, int ncomp, amrex::IntVect ng)
 
void impose_vertical_cons_bcs (const amrex::Array4< amrex::Real > &dest_arr, const amrex::Box &bx, const amrex::Box &domain, const amrex::Array4< amrex::Real const > &z_nd, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > dxInv, int icomp, int ncomp, bool do_terrain_adjustment=true)
 

Private Attributes

int m_lev
 
amrex::Geometry m_geom
 
amrex::Vector< amrex::BCRec > m_domain_bcs_type
 
amrex::Gpu::DeviceVector< amrex::BCRec > m_domain_bcs_type_d
 
amrex::Array< amrex::Array< amrex::Real, AMREX_SPACEDIM *2 >, AMREX_SPACEDIM+NBCVAR_maxm_bc_extdir_vals
 
amrex::Array< amrex::Array< amrex::Real, AMREX_SPACEDIM *2 >, AMREX_SPACEDIM+NBCVAR_maxm_bc_neumann_vals
 
amrex::MultiFab * m_z_phys_nd
 
bool m_use_real_bcs
 

Constructor & Destructor Documentation

◆ ERFPhysBCFunct_cons()

ERFPhysBCFunct_cons::ERFPhysBCFunct_cons ( const int  lev,
const amrex::Geometry &  geom,
const amrex::Vector< amrex::BCRec > &  domain_bcs_type,
const amrex::Gpu::DeviceVector< amrex::BCRec > &  domain_bcs_type_d,
amrex::Array< amrex::Array< amrex::Real, AMREX_SPACEDIM *2 >, AMREX_SPACEDIM+NBCVAR_max bc_extdir_vals,
amrex::Array< amrex::Array< amrex::Real, AMREX_SPACEDIM *2 >, AMREX_SPACEDIM+NBCVAR_max bc_neumann_vals,
std::unique_ptr< amrex::MultiFab > &  z_phys_nd,
const bool  use_real_bcs 
)
inline
33  : m_lev(lev), m_geom(geom),
34  m_domain_bcs_type(domain_bcs_type),
35  m_domain_bcs_type_d(domain_bcs_type_d),
36  m_bc_extdir_vals(bc_extdir_vals),
37  m_bc_neumann_vals(bc_neumann_vals),
38  m_z_phys_nd(z_phys_nd.get()),
39  m_use_real_bcs(use_real_bcs)
40  {}
bool m_use_real_bcs
Definition: ERF_PhysBCFunct.H:75
amrex::Geometry m_geom
Definition: ERF_PhysBCFunct.H:69
amrex::Array< amrex::Array< amrex::Real, AMREX_SPACEDIM *2 >, AMREX_SPACEDIM+NBCVAR_max > m_bc_extdir_vals
Definition: ERF_PhysBCFunct.H:72
amrex::Array< amrex::Array< amrex::Real, AMREX_SPACEDIM *2 >, AMREX_SPACEDIM+NBCVAR_max > m_bc_neumann_vals
Definition: ERF_PhysBCFunct.H:73
amrex::Gpu::DeviceVector< amrex::BCRec > m_domain_bcs_type_d
Definition: ERF_PhysBCFunct.H:71
amrex::Vector< amrex::BCRec > m_domain_bcs_type
Definition: ERF_PhysBCFunct.H:70
amrex::MultiFab * m_z_phys_nd
Definition: ERF_PhysBCFunct.H:74
int m_lev
Definition: ERF_PhysBCFunct.H:68

◆ ~ERFPhysBCFunct_cons()

ERFPhysBCFunct_cons::~ERFPhysBCFunct_cons ( )
inline
42 {}

Member Function Documentation

◆ impose_lateral_cons_bcs()

void ERFPhysBCFunct_cons::impose_lateral_cons_bcs ( const amrex::Array4< amrex::Real > &  dest_arr,
const amrex::Box &  bx,
const amrex::Box &  domain,
int  icomp,
int  ncomp,
amrex::IntVect  ng 
)
19 {
20  BL_PROFILE_VAR("impose_lateral_cons_bcs()",impose_lateral_cons_bcs);
21  const auto& dom_lo = lbound(domain);
22  const auto& dom_hi = ubound(domain);
23 
24  // xlo: ori = 0
25  // ylo: ori = 1
26  // zlo: ori = 2
27  // xhi: ori = 3
28  // yhi: ori = 4
29  // zhi: ori = 5
30 
31  // Based on BCRec for the domain, we need to make BCRec for this Box
32  // 0 is used as starting index for bcrs
33  Vector<BCRec> bcrs(ncomp);
34 
35  GpuArray<GpuArray<Real, AMREX_SPACEDIM*2>,NBCVAR_max> l_bc_extdir_vals_d;
36 
37  const int* bxlo = bx.loVect();
38  const int* bxhi = bx.hiVect();
39  const int* dlo = domain.loVect();
40  const int* dhi = domain.hiVect();
41 
42  for (int nc = 0; nc < ncomp; nc++)
43  {
44  int bc_comp = (icomp+nc >= RhoScalar_comp && icomp+nc < RhoScalar_comp+NSCALARS) ?
45  BCVars::RhoScalar_bc_comp : icomp+nc;
46  if (bc_comp > BCVars::RhoScalar_bc_comp) bc_comp -= (NSCALARS-1);
47  for (int dir = 0; dir < AMREX_SPACEDIM; dir++)
48  {
49  bcrs[nc].setLo(dir, ( bxlo[dir]<=dlo[dir]
50  ? m_domain_bcs_type[bc_comp].lo(dir) : BCType::int_dir ));
51  bcrs[nc].setHi(dir, ( bxhi[dir]>=dhi[dir]
52  ? m_domain_bcs_type[bc_comp].hi(dir) : BCType::int_dir ));
53  }
54 
55  for (int ori = 0; ori < 2*AMREX_SPACEDIM; ori++) {
56  l_bc_extdir_vals_d[bc_comp][ori] = m_bc_extdir_vals[bc_comp][ori];
57  }
58  }
59 
60  Gpu::DeviceVector<BCRec> bcrs_d(ncomp);
61  Gpu::copyAsync(Gpu::hostToDevice, bcrs.begin(), bcrs.end(), bcrs_d.begin());
62  const BCRec* bc_ptr = bcrs_d.data();
63 
64  GeometryData const& geomdata = m_geom.data();
65  bool is_periodic_in_x = geomdata.isPeriodic(0);
66  bool is_periodic_in_y = geomdata.isPeriodic(1);
67 
68  // First do all ext_dir bcs
69  if (!is_periodic_in_x)
70  {
71  Box bx_xlo(bx); bx_xlo.setBig (0,dom_lo.x-1);
72  Box bx_xhi(bx); bx_xhi.setSmall(0,dom_hi.x+1);
73  //
74  // If we are setting Dirichlet values, set them in all ghost cells on an inflow face
75  // "bx" is already grown in the x- and y-directions so here we just grow it in z
76  //
77  bx_xlo.grow(2,ng[2]);
78  bx_xhi.grow(2,ng[2]);
79  ParallelFor(
80  bx_xlo, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n)
81  {
82  int dest_comp = icomp+n;
83  int bc_comp = (dest_comp >= RhoScalar_comp && dest_comp < RhoScalar_comp+NSCALARS) ?
84  BCVars::RhoScalar_bc_comp : dest_comp;
85  if (bc_comp > BCVars::RhoScalar_bc_comp) bc_comp -= (NSCALARS-1);
86  int l_bc_type = bc_ptr[n].lo(0);
87 
88  if (l_bc_type == ERFBCType::ext_dir) {
89  dest_arr(i,j,k,dest_comp) = l_bc_extdir_vals_d[bc_comp][0];
90  } else if (l_bc_type == ERFBCType::ext_dir_prim) {
91  Real rho = dest_arr(dom_lo.x,j,k,Rho_comp);
92  dest_arr(i,j,k,dest_comp) = rho * l_bc_extdir_vals_d[bc_comp][0];
93  }
94  },
95  bx_xhi, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n)
96  {
97  int dest_comp = icomp+n;
98  int bc_comp = (dest_comp >= RhoScalar_comp && dest_comp < RhoScalar_comp+NSCALARS) ?
99  BCVars::RhoScalar_bc_comp : dest_comp;
100  if (bc_comp > BCVars::RhoScalar_bc_comp) bc_comp -= (NSCALARS-1);
101  int h_bc_type = bc_ptr[n].hi(0);
102 
103  if (h_bc_type == ERFBCType::ext_dir) {
104  dest_arr(i,j,k,dest_comp) = l_bc_extdir_vals_d[bc_comp][3];
105  } else if (h_bc_type == ERFBCType::ext_dir_prim) {
106  Real rho = dest_arr(dom_hi.x,j,k,Rho_comp);
107  dest_arr(i,j,k,dest_comp) = rho * l_bc_extdir_vals_d[bc_comp][3];
108  }
109  }
110  );
111  }
112 
113  if (!is_periodic_in_y)
114  {
115  Box bx_ylo(bx); bx_ylo.setBig (1,dom_lo.y-1);
116  Box bx_yhi(bx); bx_yhi.setSmall(1,dom_hi.y+1);
117  //
118  // If we are setting Dirichlet values, set them in all ghost cells on an inflow face
119  // "bx" is already grown in the x- and y-directions so here we just grow it in z
120  //
121  bx_ylo.grow(2,ng[2]);
122  bx_yhi.grow(2,ng[2]);
123 
124  ParallelFor(
125  bx_ylo, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n)
126  {
127  int dest_comp = icomp+n;
128  int bc_comp = (dest_comp >= RhoScalar_comp && dest_comp < RhoScalar_comp+NSCALARS) ?
129  BCVars::RhoScalar_bc_comp : dest_comp;
130  if (bc_comp > BCVars::RhoScalar_bc_comp) bc_comp -= (NSCALARS-1);
131  int l_bc_type = bc_ptr[n].lo(1);
132  if (l_bc_type == ERFBCType::ext_dir) {
133  dest_arr(i,j,k,dest_comp) = l_bc_extdir_vals_d[bc_comp][1];
134  } else if (l_bc_type == ERFBCType::ext_dir_prim) {
135  Real rho = dest_arr(i,dom_lo.y,k,Rho_comp);
136  dest_arr(i,j,k,dest_comp) = rho * l_bc_extdir_vals_d[bc_comp][1];
137  }
138  },
139  bx_yhi, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n)
140  {
141  int dest_comp = icomp+n;
142  int bc_comp = (dest_comp >= RhoScalar_comp && dest_comp < RhoScalar_comp+NSCALARS) ?
143  BCVars::RhoScalar_bc_comp : dest_comp;
144  if (bc_comp > BCVars::RhoScalar_bc_comp) bc_comp -= (NSCALARS-1);
145  int h_bc_type = bc_ptr[n].hi(1);
146  if (h_bc_type == ERFBCType::ext_dir) {
147  dest_arr(i,j,k,dest_comp) = l_bc_extdir_vals_d[bc_comp][4];
148  } else if (h_bc_type == ERFBCType::ext_dir_prim) {
149  Real rho = dest_arr(i,dom_hi.y,k,Rho_comp);
150  dest_arr(i,j,k,dest_comp) = rho * l_bc_extdir_vals_d[bc_comp][4];
151  }
152  }
153  );
154  }
155 
156  // Next do ghost cells in x-direction but not reaching out in y
157  // The corners we miss here will be covered in the y-loop below or by periodicity
158  if (!is_periodic_in_x)
159  {
160  // Populate ghost cells on lo-x and hi-x domain boundaries
161  Box bx_xlo(bx); bx_xlo.setBig (0,dom_lo.x-1);
162  Box bx_xhi(bx); bx_xhi.setSmall(0,dom_hi.x+1);
163  if (bx_xlo.smallEnd(2) != domain.smallEnd(2)) bx_xlo.growLo(2,ng[2]);
164  if (bx_xlo.bigEnd(2) != domain.bigEnd(2)) bx_xlo.growHi(2,ng[2]);
165  if (bx_xhi.smallEnd(2) != domain.smallEnd(2)) bx_xhi.growLo(2,ng[2]);
166  if (bx_xhi.bigEnd(2) != domain.bigEnd(2)) bx_xhi.growHi(2,ng[2]);
167  ParallelFor(
168  bx_xlo, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n)
169  {
170  int dest_comp = icomp+n;
171  int l_bc_type = bc_ptr[n].lo(0);
172  int iflip = dom_lo.x - 1 - i;
173  if (l_bc_type == ERFBCType::foextrap) {
174  dest_arr(i,j,k,dest_comp) = dest_arr(dom_lo.x,j,k,dest_comp);
175  } else if (l_bc_type == ERFBCType::open) {
176  dest_arr(i,j,k,dest_comp) = dest_arr(dom_lo.x,j,k,dest_comp);
177  } else if (l_bc_type == ERFBCType::reflect_even) {
178  dest_arr(i,j,k,dest_comp) = dest_arr(iflip,j,k,dest_comp);
179  } else if (l_bc_type == ERFBCType::reflect_odd) {
180  dest_arr(i,j,k,dest_comp) = -dest_arr(iflip,j,k,dest_comp);
181  } else if (l_bc_type == ERFBCType::hoextrapcc) {
182  Real delta_i = (dom_lo.x - i);
183  dest_arr(i,j,k,dest_comp) = (1.0 + delta_i)*dest_arr(dom_lo.x,j,k,dest_comp) - delta_i*dest_arr(dom_lo.x+1,j,k,dest_comp) ;
184  }
185  },
186  bx_xhi, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n)
187  {
188  int dest_comp = icomp+n;
189  int h_bc_type = bc_ptr[n].hi(0);
190  int iflip = 2*dom_hi.x + 1 - i;
191  if (h_bc_type == ERFBCType::foextrap) {
192  dest_arr(i,j,k,dest_comp) = dest_arr(dom_hi.x,j,k,dest_comp);
193  } else if (h_bc_type == ERFBCType::open) {
194  dest_arr(i,j,k,dest_comp) = dest_arr(dom_hi.x,j,k,dest_comp);
195  } else if (h_bc_type == ERFBCType::reflect_even) {
196  dest_arr(i,j,k,dest_comp) = dest_arr(iflip,j,k,dest_comp);
197  } else if (h_bc_type == ERFBCType::reflect_odd) {
198  dest_arr(i,j,k,dest_comp) = -dest_arr(iflip,j,k,dest_comp);
199  } else if (h_bc_type == ERFBCType::hoextrapcc) {
200  Real delta_i = (i - dom_hi.x);
201  dest_arr(i,j,k,dest_comp) = (1.0 + delta_i)*dest_arr(dom_hi.x,j,k,dest_comp) - delta_i*dest_arr(dom_hi.x-1,j,k,dest_comp) ;
202  }
203  }
204  );
205  }
206 
207  if (!is_periodic_in_y)
208  {
209  // Populate ghost cells on lo-y and hi-y domain boundaries
210  Box bx_ylo(bx); bx_ylo.setBig (1,dom_lo.y-1);
211  Box bx_yhi(bx); bx_yhi.setSmall(1,dom_hi.y+1);
212  if (bx_ylo.smallEnd(2) != domain.smallEnd(2)) bx_ylo.growLo(2,ng[2]);
213  if (bx_ylo.bigEnd(2) != domain.bigEnd(2)) bx_ylo.growHi(2,ng[2]);
214  if (bx_yhi.smallEnd(2) != domain.smallEnd(2)) bx_yhi.growLo(2,ng[2]);
215  if (bx_yhi.bigEnd(2) != domain.bigEnd(2)) bx_yhi.growHi(2,ng[2]);
216  ParallelFor(
217  bx_ylo, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n)
218  {
219  int dest_comp = icomp+n;
220  int l_bc_type = bc_ptr[n].lo(1);
221  int jflip = dom_lo.y - 1 - j;
222  if (l_bc_type == ERFBCType::foextrap) {
223  dest_arr(i,j,k,dest_comp) = dest_arr(i,dom_lo.y,k,dest_comp);
224  } else if (l_bc_type == ERFBCType::open) {
225  dest_arr(i,j,k,dest_comp) = dest_arr(i,dom_lo.y,k,dest_comp);
226  } else if (l_bc_type == ERFBCType::reflect_even) {
227  dest_arr(i,j,k,dest_comp) = dest_arr(i,jflip,k,dest_comp);
228  } else if (l_bc_type == ERFBCType::reflect_odd) {
229  dest_arr(i,j,k,dest_comp) = -dest_arr(i,jflip,k,dest_comp);
230  } else if (l_bc_type == ERFBCType::hoextrapcc) {
231  Real delta_j = (dom_lo.y - j);
232  dest_arr(i,j,k,dest_comp) = (1.0 + delta_j)*dest_arr(i,dom_lo.y,k,dest_comp) - delta_j*dest_arr(i,dom_lo.y+1,k,dest_comp) ;
233  }
234 
235  },
236  bx_yhi, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n)
237  {
238  int dest_comp = icomp+n;
239  int h_bc_type = bc_ptr[n].hi(1);
240  int jflip = 2*dom_hi.y + 1 - j;
241  if (h_bc_type == ERFBCType::foextrap) {
242  dest_arr(i,j,k,dest_comp) = dest_arr(i,dom_hi.y,k,dest_comp);
243  } else if (h_bc_type == ERFBCType::open) {
244  dest_arr(i,j,k,dest_comp) = dest_arr(i,dom_hi.y,k,dest_comp);
245  } else if (h_bc_type == ERFBCType::reflect_even) {
246  dest_arr(i,j,k,dest_comp) = dest_arr(i,jflip,k,dest_comp);
247  } else if (h_bc_type == ERFBCType::reflect_odd) {
248  dest_arr(i,j,k,dest_comp) = -dest_arr(i,jflip,k,dest_comp);
249  } else if (h_bc_type == ERFBCType::hoextrapcc) {
250  Real delta_j = (j - dom_hi.y);
251  dest_arr(i,j,k,dest_comp) = (1.0 + delta_j)*dest_arr(i,dom_hi.y,k,dest_comp) - delta_j*dest_arr(i,dom_hi.y-1,k,dest_comp);
252  }
253  }
254  );
255  }
256  Gpu::streamSynchronize();
257 }
#define RhoScalar_comp
Definition: ERF_IndexDefines.H:40
#define Rho_comp
Definition: ERF_IndexDefines.H:36
#define NBCVAR_max
Definition: ERF_IndexDefines.H:29
#define NSCALARS
Definition: ERF_IndexDefines.H:16
void impose_lateral_cons_bcs(const amrex::Array4< amrex::Real > &dest_arr, const amrex::Box &bx, const amrex::Box &domain, int icomp, int ncomp, amrex::IntVect ng)
Definition: ERF_BoundaryConditionsCons.cpp:17
@ RhoScalar_bc_comp
Definition: ERF_IndexDefines.H:80
@ open
Definition: ERF_IndexDefines.H:186
@ reflect_odd
Definition: ERF_IndexDefines.H:176
@ foextrap
Definition: ERF_IndexDefines.H:179
@ ext_dir
Definition: ERF_IndexDefines.H:180
@ ext_dir_prim
Definition: ERF_IndexDefines.H:182
@ hoextrapcc
Definition: ERF_IndexDefines.H:187
@ int_dir
Definition: ERF_IndexDefines.H:177
@ reflect_even
Definition: ERF_IndexDefines.H:178
@ rho
Definition: ERF_Kessler.H:30

◆ impose_vertical_cons_bcs()

void ERFPhysBCFunct_cons::impose_vertical_cons_bcs ( const amrex::Array4< amrex::Real > &  dest_arr,
const amrex::Box &  bx,
const amrex::Box &  domain,
const amrex::Array4< amrex::Real const > &  z_nd,
const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM >  dxInv,
int  icomp,
int  ncomp,
bool  do_terrain_adjustment = true 
)
276 {
277  BL_PROFILE_VAR("impose_vertical_cons_bcs()",impose_vertical_cons_bcs);
278  const auto& dom_lo = lbound(domain);
279  const auto& dom_hi = ubound(domain);
280 
281  Box per_grown_domain(domain);
282  int growx = (m_geom.isPeriodic(0)) ? 1 : 0;
283  int growy = (m_geom.isPeriodic(1)) ? 1 : 0;
284  per_grown_domain.grow(IntVect(growx,growy,0));
285  const auto& perdom_lo = lbound(per_grown_domain);
286  const auto& perdom_hi = ubound(per_grown_domain);
287 
288  GeometryData const& geomdata = m_geom.data();
289 
290  // xlo: ori = 0
291  // ylo: ori = 1
292  // zlo: ori = 2
293  // xhi: ori = 3
294  // yhi: ori = 4
295  // zhi: ori = 5
296 
297  // Based on BCRec for the domain, we need to make BCRec for this Box
298  // 0 is used as starting index for bcrs
299  Vector<BCRec> bcrs(ncomp);
300  GpuArray<GpuArray<Real, AMREX_SPACEDIM*2>,NBCVAR_max> l_bc_extdir_vals_d;
301  GpuArray<GpuArray<Real, AMREX_SPACEDIM*2>,NBCVAR_max> l_bc_neumann_vals_d;
302 
303  const int* bxlo = bx.loVect();
304  const int* bxhi = bx.hiVect();
305  const int* dlo = domain.loVect();
306  const int* dhi = domain.hiVect();
307 
308  for (int nc = 0; nc < ncomp; nc++)
309  {
310  int bc_comp = (icomp+nc >= RhoScalar_comp && icomp+nc < RhoScalar_comp+NSCALARS) ?
311  BCVars::RhoScalar_bc_comp : icomp+nc;
312  if (bc_comp > BCVars::RhoScalar_bc_comp) bc_comp -= (NSCALARS-1);
313  for (int dir = 0; dir < AMREX_SPACEDIM; dir++)
314  {
315  bcrs[nc].setLo(dir, ( bxlo[dir]<=dlo[dir]
316  ? m_domain_bcs_type[bc_comp].lo(dir) : BCType::int_dir ));
317  bcrs[nc].setHi(dir, ( bxhi[dir]>=dhi[dir]
318  ? m_domain_bcs_type[bc_comp].hi(dir) : BCType::int_dir ));
319  }
320 
321  for (int ori = 0; ori < 2*AMREX_SPACEDIM; ori++) {
322  l_bc_extdir_vals_d[bc_comp][ori] = m_bc_extdir_vals[bc_comp][ori];
323  l_bc_neumann_vals_d[bc_comp][ori] = m_bc_neumann_vals[bc_comp][ori];
324  }
325  }
326 
327  Gpu::DeviceVector<BCRec> bcrs_d(icomp+ncomp);
328  Gpu::copyAsync(Gpu::hostToDevice, bcrs.begin(), bcrs.end(), bcrs_d.begin());
329  const BCRec* bc_ptr = bcrs_d.data();
330 
331  {
332  Box bx_zlo(bx); bx_zlo.setBig (2,dom_lo.z-1);
333  Box bx_zhi(bx); bx_zhi.setSmall(2,dom_hi.z+1);
334  ParallelFor(
335  bx_zlo, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n)
336  {
337  int dest_comp = icomp+n;
338  int bc_comp = (dest_comp >= RhoScalar_comp && dest_comp < RhoScalar_comp+NSCALARS) ?
339  BCVars::RhoScalar_bc_comp : dest_comp;
340  if (bc_comp > BCVars::RhoScalar_bc_comp) bc_comp -= (NSCALARS-1);
341  int l_bc_type = bc_ptr[n].lo(2);
342  if (l_bc_type == ERFBCType::ext_dir) {
343  dest_arr(i,j,k,dest_comp) = l_bc_extdir_vals_d[bc_comp][2];
344  } else if (l_bc_type == ERFBCType::ext_dir_prim) {
345  Real rho = dest_arr(i,j,dom_lo.z,Rho_comp);
346  dest_arr(i,j,k,dest_comp) = rho * l_bc_extdir_vals_d[bc_comp][2];
347  }
348  },
349  bx_zhi, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n)
350  {
351  int dest_comp = icomp+n;
352  int bc_comp = (dest_comp >= RhoScalar_comp && dest_comp < RhoScalar_comp+NSCALARS) ?
353  BCVars::RhoScalar_bc_comp : dest_comp;
354  if (bc_comp > BCVars::RhoScalar_bc_comp) bc_comp -= (NSCALARS-1);
355  int h_bc_type = bc_ptr[n].hi(2);
356  if (h_bc_type == ERFBCType::ext_dir) {
357  dest_arr(i,j,k,dest_comp) = l_bc_extdir_vals_d[bc_comp][5];
358  } else if (h_bc_type == ERFBCType::ext_dir_prim) {
359  Real rho = dest_arr(i,j,dom_hi.z,Rho_comp);
360  dest_arr(i,j,k,dest_comp) = rho * l_bc_extdir_vals_d[bc_comp][5];
361  }
362 
363  }
364  );
365  }
366 
367  {
368  Box bx_zlo(bx); bx_zlo.setBig (2,dom_lo.z-1);
369  Box bx_zhi(bx); bx_zhi.setSmall(2,dom_hi.z+1);
370  // Populate ghost cells on lo-z and hi-z domain boundaries
371  ParallelFor(
372  bx_zlo, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n)
373  {
374  int dest_comp = icomp+n;
375  int bc_comp = (dest_comp >= RhoScalar_comp && dest_comp < RhoScalar_comp+NSCALARS) ?
376  BCVars::RhoScalar_bc_comp : dest_comp;
377  if (bc_comp > BCVars::RhoScalar_bc_comp) bc_comp -= (NSCALARS-1);
378  int l_bc_type = bc_ptr[n].lo(2);
379  int kflip = dom_lo.z - 1 - i;
380  if (l_bc_type == ERFBCType::foextrap) {
381  dest_arr(i,j,k,dest_comp) = dest_arr(i,j,dom_lo.z,dest_comp);
382  } else if (l_bc_type == ERFBCType::open) {
383  dest_arr(i,j,k,dest_comp) = dest_arr(i,j,dom_lo.z,dest_comp);
384  } else if (l_bc_type == ERFBCType::reflect_even) {
385  dest_arr(i,j,k,dest_comp) = dest_arr(i,j,kflip,dest_comp);
386  } else if (l_bc_type == ERFBCType::reflect_odd) {
387  dest_arr(i,j,k,dest_comp) = -dest_arr(i,j,kflip,dest_comp);
388  } else if (l_bc_type == ERFBCType::neumann) {
389  Real delta_z = (dom_lo.z - k) / dxInv[2];
390  dest_arr(i,j,k,dest_comp) = dest_arr(i,j,dom_lo.z,dest_comp) -
391  delta_z*l_bc_neumann_vals_d[bc_comp][2]*dest_arr(i,j,dom_lo.z,Rho_comp);
392  } else if (l_bc_type == ERFBCType::hoextrapcc) {
393  Real delta_k = (dom_lo.z - k);
394  dest_arr(i,j,k,dest_comp) = (1.0 + delta_k)*dest_arr(i,j,dom_lo.z,dest_comp) - delta_k*dest_arr(i,j,dom_lo.z+1,dest_comp);
395  }
396  },
397  bx_zhi, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n)
398  {
399  int dest_comp = icomp+n;
400  int bc_comp = (dest_comp >= RhoScalar_comp && dest_comp < RhoScalar_comp+NSCALARS) ?
401  BCVars::RhoScalar_bc_comp : dest_comp;
402  if (bc_comp > BCVars::RhoScalar_bc_comp) bc_comp -= (NSCALARS-1);
403  int h_bc_type = bc_ptr[n].hi(2);
404  int kflip = 2*dom_hi.z + 1 - i;
405  if (h_bc_type == ERFBCType::foextrap) {
406  dest_arr(i,j,k,dest_comp) = dest_arr(i,j,dom_hi.z,dest_comp);
407  } else if (h_bc_type == ERFBCType::open) {
408  dest_arr(i,j,k,dest_comp) = dest_arr(i,j,dom_hi.z,dest_comp);
409  } else if (h_bc_type == ERFBCType::reflect_even) {
410  dest_arr(i,j,k,dest_comp) = dest_arr(i,j,kflip,dest_comp);
411  } else if (h_bc_type == ERFBCType::reflect_odd) {
412  dest_arr(i,j,k,dest_comp) = -dest_arr(i,j,kflip,dest_comp);
413  } else if (h_bc_type == ERFBCType::neumann) {
414  Real delta_z = (k - dom_hi.z) / dxInv[2];
415  if( (icomp+n) == Rho_comp ) {
416  dest_arr(i,j,k,dest_comp) = dest_arr(i,j,dom_hi.z,dest_comp) +
417  delta_z*l_bc_neumann_vals_d[bc_comp][5];
418  } else {
419  dest_arr(i,j,k,dest_comp) = dest_arr(i,j,dom_hi.z,dest_comp) +
420  delta_z*l_bc_neumann_vals_d[bc_comp][5]*dest_arr(i,j,dom_hi.z,Rho_comp);
421  }
422  } else if (h_bc_type == ERFBCType::hoextrapcc){
423  Real delta_k = (k - dom_hi.z);
424  dest_arr(i,j,k,dest_comp) = (1.0 + delta_k)*dest_arr(i,j,dom_hi.z,dest_comp) - delta_k*dest_arr(i,j,dom_hi.z-1,dest_comp);
425  }
426  }
427  );
428  }
429 
430  if (do_terrain_adjustment && m_z_phys_nd) {
431  const auto& bx_lo = lbound(bx);
432  const auto& bx_hi = ubound(bx);
433  const BCRec* bc_ptr_h = bcrs.data();
434  // Neumann conditions (d<var>/dn = 0) must be aware of the surface normal with terrain.
435  // An additional source term arises from d<var>/dx & d<var>/dy & met_h_xi/eta/zeta.
436  //=====================================================================================
437  // Only modify scalars, U, or V
438  // Loop over each component
439  for (int n = 0; n < ncomp; n++) {
440  // Hit for Neumann condition at kmin
441  int dest_comp = icomp+n;
442  int l_bc_type = bc_ptr_h[n].lo(2);
443  if(l_bc_type == ERFBCType::foextrap)
444  {
445  // Loop over ghost cells in bottom XY-plane (valid box)
446  Box xybx = bx;
447 
448  int k0 = dom_lo.z;
449  if (xybx.smallEnd(2) < 0)
450  {
451  xybx.setBig(2,dom_lo.z-1);
452  xybx.setSmall(2,bx.smallEnd(2));
453 
454  // Get the dz cell size
455  Real dz = geomdata.CellSize(2);
456 
457  // Fill all the Neumann srcs with terrain
458  ParallelFor(xybx, [=] AMREX_GPU_DEVICE (int i, int j, int k)
459  {
460  // Clip indices for ghost-cells
461  int ii = amrex::min(amrex::max(i,perdom_lo.x),perdom_hi.x);
462  int jj = amrex::min(amrex::max(j,perdom_lo.y),perdom_hi.y);
463 
464  // Get metrics
465  Real met_h_xi = Compute_h_xi_AtCellCenter (ii,jj,k0,dxInv,z_phys_nd);
466  Real met_h_eta = Compute_h_eta_AtCellCenter (ii,jj,k0,dxInv,z_phys_nd);
467  Real met_h_zeta = Compute_h_zeta_AtCellCenter(ii,jj,k0,dxInv,z_phys_nd);
468 
469  // GradX at IJK location inside domain -- this relies on the assumption that we have
470  // used foextrap for cell-centered quantities outside the domain to define the gradient as zero
471  Real GradVarx, GradVary;
472  if (i < dom_lo.x-1 || i > dom_hi.x+1 || (i+1 > bx_hi.x && i-1 < bx_lo.x) ) {
473  GradVarx = 0.0;
474  } else if (i+1 > bx_hi.x) {
475  GradVarx = dxInv[0] * (dest_arr(i ,j,k0,dest_comp) - dest_arr(i-1,j,k0,dest_comp));
476  } else if (i-1 < bx_lo.x) {
477  GradVarx = dxInv[0] * (dest_arr(i+1,j,k0,dest_comp) - dest_arr(i ,j,k0,dest_comp));
478  } else {
479  GradVarx = 0.5 * dxInv[0] * (dest_arr(i+1,j,k0,dest_comp) - dest_arr(i-1,j,k0,dest_comp));
480  }
481 
482  // GradY at IJK location inside domain -- this relies on the assumption that we have
483  // used foextrap for cell-centered quantities outside the domain to define the gradient as zero
484  if (j < dom_lo.y-1 || j > dom_hi.y+1 || (j+1 > bx_hi.y && j-1 < bx_lo.y) ) {
485  GradVary = 0.0;
486  } else if (j+1 > bx_hi.y) {
487  GradVary = dxInv[1] * (dest_arr(i,j ,k0,dest_comp) - dest_arr(i,j-1,k0,dest_comp));
488  } else if (j-1 < bx_lo.y) {
489  GradVary = dxInv[1] * (dest_arr(i,j+1,k0,dest_comp) - dest_arr(i,j ,k0,dest_comp));
490  } else {
491  GradVary = 0.5 * dxInv[1] * (dest_arr(i,j+1,k0,dest_comp) - dest_arr(i,j-1,k0,dest_comp));
492  }
493 
494  // Prefactor
495  Real met_fac = met_h_zeta / ( met_h_xi*met_h_xi + met_h_eta*met_h_eta + 1. );
496 
497  // Accumulate in bottom ghost cell (EXTRAP already populated)
498  dest_arr(i,j,k,dest_comp) -= dz * met_fac * ( met_h_xi * GradVarx + met_h_eta * GradVary );
499  });
500  } // box includes k0
501  } // foextrap
502  } // ncomp
503  } // m_z_phys_nd
504  Gpu::streamSynchronize();
505 }
AMREX_FORCE_INLINE AMREX_GPU_DEVICE amrex::Real Compute_h_zeta_AtCellCenter(const int &i, const int &j, const int &k, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > &cellSizeInv, const amrex::Array4< const amrex::Real > &z_nd)
Definition: ERF_TerrainMetrics.H:39
AMREX_GPU_DEVICE AMREX_FORCE_INLINE amrex::Real Compute_h_eta_AtCellCenter(const int &i, const int &j, const int &k, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > &cellSizeInv, const amrex::Array4< const amrex::Real > &z_nd)
Definition: ERF_TerrainMetrics.H:69
AMREX_GPU_DEVICE AMREX_FORCE_INLINE amrex::Real Compute_h_xi_AtCellCenter(const int &i, const int &j, const int &k, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > &cellSizeInv, const amrex::Array4< const amrex::Real > &z_nd)
Definition: ERF_TerrainMetrics.H:54
void impose_vertical_cons_bcs(const amrex::Array4< amrex::Real > &dest_arr, const amrex::Box &bx, const amrex::Box &domain, const amrex::Array4< amrex::Real const > &z_nd, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > dxInv, int icomp, int ncomp, bool do_terrain_adjustment=true)
Definition: ERF_BoundaryConditionsCons.cpp:272
@ neumann
Definition: ERF_IndexDefines.H:184
Here is the call graph for this function:

◆ operator()()

void ERFPhysBCFunct_cons::operator() ( amrex::MultiFab &  mf,
int  icomp,
int  ncomp,
amrex::IntVect const &  nghost,
const amrex::Real  time,
int  bccomp_cons,
bool  do_fb = true,
bool  do_terrain_adjustment = true 
)
19 {
20  BL_PROFILE("ERFPhysBCFunct_cons::()");
21 
22  if (m_geom.isAllPeriodic()) return;
23 
24  const auto& domain = m_geom.Domain();
25  const auto dxInv = m_geom.InvCellSizeArray();
26 
27  // Create a grown domain box containing valid + periodic cells
28  Box gdomain = domain;
29  for (int i = 0; i < AMREX_SPACEDIM; ++i) {
30  if (m_geom.isPeriodic(i)) {
31  gdomain.grow(i, nghost[i]);
32  }
33  }
34 
35  //
36  // We fill all of the interior and periodic ghost cells first, so we can fill
37  // those directly inside the lateral and vertical calls.
38  //
39  if (do_fb) {
40  mf.FillBoundary(m_geom.periodicity());
41  }
42 
43 #ifdef AMREX_USE_OMP
44 #pragma omp parallel if (Gpu::notInLaunchRegion())
45 #endif
46  {
47  for (MFIter mfi(mf,false); mfi.isValid(); ++mfi)
48  {
49  //
50  // This is the box we pass to the different routines
51  // NOTE -- this is the full grid box NOT the tile box
52  //
53  Box bx = mfi.validbox();
54 
55  //
56  // These are the boxes we use to test on relative to the domain
57  //
58  Box cbx1 = bx; cbx1.grow(IntVect(nghost[0],nghost[1],0));
59  Box cbx2 = bx; cbx2.grow(nghost);
60 
61  Array4<const Real> z_nd_arr;
62 
63  if (m_z_phys_nd)
64  {
65  z_nd_arr = m_z_phys_nd->const_array(mfi);
66  }
67 
68  if (!gdomain.contains(cbx2))
69  {
70  const Array4<Real> cons_arr = mf.array(mfi);
71 
72  if (!m_use_real_bcs)
73  {
74  // We send a box with ghost cells in the lateral directions only
75  impose_lateral_cons_bcs(cons_arr,cbx1,domain,icomp,ncomp,nghost);
76  }
77 
78  // We send the full FAB box with ghost cells
79  impose_vertical_cons_bcs(cons_arr,cbx2,domain,z_nd_arr,dxInv,icomp,ncomp,do_terrain_adjustment);
80  }
81 
82  } // MFIter
83  } // OpenMP
84 } // operator()

Member Data Documentation

◆ m_bc_extdir_vals

amrex::Array<amrex::Array<amrex::Real, AMREX_SPACEDIM*2>,AMREX_SPACEDIM+NBCVAR_max> ERFPhysBCFunct_cons::m_bc_extdir_vals
private

◆ m_bc_neumann_vals

amrex::Array<amrex::Array<amrex::Real, AMREX_SPACEDIM*2>,AMREX_SPACEDIM+NBCVAR_max> ERFPhysBCFunct_cons::m_bc_neumann_vals
private

◆ m_domain_bcs_type

amrex::Vector<amrex::BCRec> ERFPhysBCFunct_cons::m_domain_bcs_type
private

◆ m_domain_bcs_type_d

amrex::Gpu::DeviceVector<amrex::BCRec> ERFPhysBCFunct_cons::m_domain_bcs_type_d
private

◆ m_geom

amrex::Geometry ERFPhysBCFunct_cons::m_geom
private

◆ m_lev

int ERFPhysBCFunct_cons::m_lev
private

◆ m_use_real_bcs

bool ERFPhysBCFunct_cons::m_use_real_bcs
private

◆ m_z_phys_nd

amrex::MultiFab* ERFPhysBCFunct_cons::m_z_phys_nd
private

The documentation for this class was generated from the following files: