ERF
Energy Research and Forecasting: An Atmospheric Modeling Code
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
ERFPhysBCFunct_cons Class Reference

#include <ERF_PhysBCFunct.H>

Collaboration diagram for ERFPhysBCFunct_cons:

Public Member Functions

 ERFPhysBCFunct_cons (const int lev, const amrex::Geometry &geom, const amrex::Vector< amrex::BCRec > &domain_bcs_type, const amrex::Gpu::DeviceVector< amrex::BCRec > &domain_bcs_type_d, amrex::Array< amrex::Array< amrex::Real, AMREX_SPACEDIM *2 >, AMREX_SPACEDIM+NBCVAR_max > bc_extdir_vals, amrex::Array< amrex::Array< amrex::Real, AMREX_SPACEDIM *2 >, AMREX_SPACEDIM+NBCVAR_max > bc_neumann_vals, std::unique_ptr< amrex::MultiFab > &z_phys_nd, const bool use_real_bcs, amrex::Real *th_bc_data)
 
 ~ERFPhysBCFunct_cons ()
 
void operator() (amrex::MultiFab &mf, amrex::MultiFab &xvel, amrex::MultiFab &yvel, int icomp, int ncomp, amrex::IntVect const &nghost, const amrex::Real time, int bccomp_cons, bool do_fb=true, bool do_terrain_adjustment=true)
 
void impose_lateral_cons_bcs (const amrex::Array4< amrex::Real > &dest_arr, const amrex::Array4< amrex::Real const > &xvel_arr, const amrex::Array4< amrex::Real const > &yvel_arr, const amrex::Box &bx, const amrex::Box &domain, int icomp, int ncomp, amrex::IntVect ng, const amrex::Real time)
 
void impose_vertical_cons_bcs (const amrex::Array4< amrex::Real > &dest_arr, const amrex::Box &bx, const amrex::Box &domain, const amrex::Array4< amrex::Real const > &z_nd, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > dxInv, int icomp, int ncomp, const amrex::Real time, bool do_terrain_adjustment=true)
 

Private Attributes

int m_lev
 
amrex::Geometry m_geom
 
amrex::Vector< amrex::BCRec > m_domain_bcs_type
 
amrex::Gpu::DeviceVector< amrex::BCRec > m_domain_bcs_type_d
 
amrex::Array< amrex::Array< amrex::Real, AMREX_SPACEDIM *2 >, AMREX_SPACEDIM+NBCVAR_maxm_bc_extdir_vals
 
amrex::Array< amrex::Array< amrex::Real, AMREX_SPACEDIM *2 >, AMREX_SPACEDIM+NBCVAR_maxm_bc_neumann_vals
 
amrex::MultiFab * m_z_phys_nd
 
bool m_use_real_bcs
 
amrex::Real * m_th_bc_data
 

Constructor & Destructor Documentation

◆ ERFPhysBCFunct_cons()

ERFPhysBCFunct_cons::ERFPhysBCFunct_cons ( const int  lev,
const amrex::Geometry &  geom,
const amrex::Vector< amrex::BCRec > &  domain_bcs_type,
const amrex::Gpu::DeviceVector< amrex::BCRec > &  domain_bcs_type_d,
amrex::Array< amrex::Array< amrex::Real, AMREX_SPACEDIM *2 >, AMREX_SPACEDIM+NBCVAR_max bc_extdir_vals,
amrex::Array< amrex::Array< amrex::Real, AMREX_SPACEDIM *2 >, AMREX_SPACEDIM+NBCVAR_max bc_neumann_vals,
std::unique_ptr< amrex::MultiFab > &  z_phys_nd,
const bool  use_real_bcs,
amrex::Real *  th_bc_data 
)
inline
34  : m_lev(lev), m_geom(geom),
35  m_domain_bcs_type(domain_bcs_type),
36  m_domain_bcs_type_d(domain_bcs_type_d),
37  m_bc_extdir_vals(bc_extdir_vals),
38  m_bc_neumann_vals(bc_neumann_vals),
39  m_z_phys_nd(z_phys_nd.get()),
40  m_use_real_bcs(use_real_bcs),
41  m_th_bc_data(th_bc_data)
42  {}
bool m_use_real_bcs
Definition: ERF_PhysBCFunct.H:81
amrex::Geometry m_geom
Definition: ERF_PhysBCFunct.H:75
amrex::Array< amrex::Array< amrex::Real, AMREX_SPACEDIM *2 >, AMREX_SPACEDIM+NBCVAR_max > m_bc_extdir_vals
Definition: ERF_PhysBCFunct.H:78
amrex::Array< amrex::Array< amrex::Real, AMREX_SPACEDIM *2 >, AMREX_SPACEDIM+NBCVAR_max > m_bc_neumann_vals
Definition: ERF_PhysBCFunct.H:79
amrex::Gpu::DeviceVector< amrex::BCRec > m_domain_bcs_type_d
Definition: ERF_PhysBCFunct.H:77
amrex::Vector< amrex::BCRec > m_domain_bcs_type
Definition: ERF_PhysBCFunct.H:76
amrex::Real * m_th_bc_data
Definition: ERF_PhysBCFunct.H:82
amrex::MultiFab * m_z_phys_nd
Definition: ERF_PhysBCFunct.H:80
int m_lev
Definition: ERF_PhysBCFunct.H:74

◆ ~ERFPhysBCFunct_cons()

ERFPhysBCFunct_cons::~ERFPhysBCFunct_cons ( )
inline
44 {}

Member Function Documentation

◆ impose_lateral_cons_bcs()

void ERFPhysBCFunct_cons::impose_lateral_cons_bcs ( const amrex::Array4< amrex::Real > &  dest_arr,
const amrex::Array4< amrex::Real const > &  xvel_arr,
const amrex::Array4< amrex::Real const > &  yvel_arr,
const amrex::Box &  bx,
const amrex::Box &  domain,
int  icomp,
int  ncomp,
amrex::IntVect  ng,
const amrex::Real  time 
)
24 {
25  BL_PROFILE_VAR("impose_lateral_cons_bcs()",impose_lateral_cons_bcs);
26  const auto& dom_lo = lbound(domain);
27  const auto& dom_hi = ubound(domain);
28 
29  // xlo: ori = 0
30  // ylo: ori = 1
31  // zlo: ori = 2
32  // xhi: ori = 3
33  // yhi: ori = 4
34  // zhi: ori = 5
35 
36  // Based on BCRec for the domain, we need to make BCRec for this Box
37  // 0 is used as starting index for bcrs
38  Vector<BCRec> bcrs(ncomp);
39 
40  GpuArray<GpuArray<Real, AMREX_SPACEDIM*2>,NBCVAR_max> l_bc_extdir_vals_d;
41 
42  const int* bxlo = bx.loVect();
43  const int* bxhi = bx.hiVect();
44  const int* dlo = domain.loVect();
45  const int* dhi = domain.hiVect();
46 
47  for (int nc = 0; nc < ncomp; nc++)
48  {
49  int bc_comp = (icomp+nc >= RhoScalar_comp && icomp+nc < RhoScalar_comp+NSCALARS) ?
51  if (bc_comp > BCVars::RhoScalar_bc_comp) bc_comp -= (NSCALARS-1);
52  for (int dir = 0; dir < AMREX_SPACEDIM; dir++)
53  {
54  bcrs[nc].setLo(dir, ( bxlo[dir]<=dlo[dir]
55  ? m_domain_bcs_type[bc_comp].lo(dir) : BCType::int_dir ));
56  bcrs[nc].setHi(dir, ( bxhi[dir]>=dhi[dir]
57  ? m_domain_bcs_type[bc_comp].hi(dir) : BCType::int_dir ));
58  }
59 
60  for (int ori = 0; ori < 2*AMREX_SPACEDIM; ori++) {
61  l_bc_extdir_vals_d[bc_comp][ori] = m_bc_extdir_vals[bc_comp][ori];
62  }
63  }
64 
65  Gpu::DeviceVector<BCRec> bcrs_d(ncomp);
66  Gpu::copyAsync(Gpu::hostToDevice, bcrs.begin(), bcrs.end(), bcrs_d.begin());
67  const BCRec* bc_ptr = bcrs_d.data();
68 
69  GeometryData const& geomdata = m_geom.data();
70  bool is_periodic_in_x = geomdata.isPeriodic(0);
71  bool is_periodic_in_y = geomdata.isPeriodic(1);
72 
73  // First do all ext_dir bcs
74  if (!is_periodic_in_x)
75  {
76  Real* th_bc_ptr = m_th_bc_data;
77  Box bx_xlo(bx); bx_xlo.setBig (0,dom_lo.x-1);
78  Box bx_xhi(bx); bx_xhi.setSmall(0,dom_hi.x+1);
79  //
80  // If we are setting Dirichlet values, set them in all ghost cells on an inflow face
81  // "bx" is already grown in the x- and y-directions so here we just grow it in z
82  //
83  bx_xlo.grow(2,ng[2]);
84  bx_xhi.grow(2,ng[2]);
85  ParallelFor(
86  bx_xlo, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n)
87  {
88  int dest_comp = icomp+n;
89  int bc_comp = (dest_comp >= RhoScalar_comp && dest_comp < RhoScalar_comp+NSCALARS) ?
90  BCVars::RhoScalar_bc_comp : dest_comp;
91  if (bc_comp > BCVars::RhoScalar_bc_comp) bc_comp -= (NSCALARS-1);
92  int l_bc_type = bc_ptr[n].lo(0);
93 
94  if ( (l_bc_type == ERFBCType::ext_dir) ||
95  (l_bc_type == ERFBCType::ext_dir_upwind && xvel_arr(dom_lo.x,j,k) >= 0.) )
96  {
97  if ((dest_comp == RhoTheta_comp) && th_bc_ptr) {
98  dest_arr(i,j,k,dest_comp) = th_bc_ptr[k];
99  } else {
100  dest_arr(i,j,k,dest_comp) = l_bc_extdir_vals_d[bc_comp][0];
101  }
102  } else if (l_bc_type == ERFBCType::ext_dir_prim) {
103  Real rho = dest_arr(dom_lo.x,j,k,Rho_comp);
104  if ((dest_comp == RhoTheta_comp) && th_bc_ptr) {
105  dest_arr(i,j,k,dest_comp) = rho * th_bc_ptr[k];
106  } else {
107  dest_arr(i,j,k,dest_comp) = rho * l_bc_extdir_vals_d[bc_comp][0];
108  }
109  }
110  },
111  bx_xhi, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n)
112  {
113  int dest_comp = icomp+n;
114  int bc_comp = (dest_comp >= RhoScalar_comp && dest_comp < RhoScalar_comp+NSCALARS) ?
115  BCVars::RhoScalar_bc_comp : dest_comp;
116  if (bc_comp > BCVars::RhoScalar_bc_comp) bc_comp -= (NSCALARS-1);
117  int h_bc_type = bc_ptr[n].hi(0);
118 
119  if ( (h_bc_type == ERFBCType::ext_dir) ||
120  (h_bc_type == ERFBCType::ext_dir_upwind && xvel_arr(dom_hi.x+1,j,k) <= 0.) )
121  {
122  if ((dest_comp == RhoTheta_comp) && th_bc_ptr) {
123  dest_arr(i,j,k,dest_comp) = th_bc_ptr[k];
124  } else {
125  dest_arr(i,j,k,dest_comp) = l_bc_extdir_vals_d[bc_comp][3];
126  }
127  } else if (h_bc_type == ERFBCType::ext_dir_prim) {
128  Real rho = dest_arr(dom_hi.x,j,k,Rho_comp);
129  if ((dest_comp == RhoTheta_comp) && th_bc_ptr) {
130  dest_arr(i,j,k,dest_comp) = rho * th_bc_ptr[k];
131  } else {
132  dest_arr(i,j,k,dest_comp) = rho * l_bc_extdir_vals_d[bc_comp][3];
133  }
134  }
135  }
136  );
137  }
138 
139  if (!is_periodic_in_y)
140  {
141  Real* th_bc_ptr = m_th_bc_data;
142  Box bx_ylo(bx); bx_ylo.setBig (1,dom_lo.y-1);
143  Box bx_yhi(bx); bx_yhi.setSmall(1,dom_hi.y+1);
144  //
145  // If we are setting Dirichlet values, set them in all ghost cells on an inflow face
146  // "bx" is already grown in the x- and y-directions so here we just grow it in z
147  //
148  bx_ylo.grow(2,ng[2]);
149  bx_yhi.grow(2,ng[2]);
150 
151  ParallelFor(
152  bx_ylo, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n)
153  {
154  int dest_comp = icomp+n;
155  int bc_comp = (dest_comp >= RhoScalar_comp && dest_comp < RhoScalar_comp+NSCALARS) ?
156  BCVars::RhoScalar_bc_comp : dest_comp;
157  if (bc_comp > BCVars::RhoScalar_bc_comp) bc_comp -= (NSCALARS-1);
158  int l_bc_type = bc_ptr[n].lo(1);
159  if ( (l_bc_type == ERFBCType::ext_dir) ||
160  (l_bc_type == ERFBCType::ext_dir_upwind && yvel_arr(i,dom_lo.y,k) >= 0.) )
161  {
162  if ((dest_comp == RhoTheta_comp) && th_bc_ptr) {
163  dest_arr(i,j,k,dest_comp) = th_bc_ptr[k];
164  } else {
165  dest_arr(i,j,k,dest_comp) = l_bc_extdir_vals_d[bc_comp][1];
166  }
167  } else if (l_bc_type == ERFBCType::ext_dir_prim) {
168  Real rho = dest_arr(i,dom_lo.y,k,Rho_comp);
169  if ((dest_comp == RhoTheta_comp) && th_bc_ptr) {
170  dest_arr(i,j,k,dest_comp) = rho * th_bc_ptr[k];
171  } else {
172  dest_arr(i,j,k,dest_comp) = rho * l_bc_extdir_vals_d[bc_comp][1];
173  }
174  }
175  },
176  bx_yhi, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n)
177  {
178  int dest_comp = icomp+n;
179  int bc_comp = (dest_comp >= RhoScalar_comp && dest_comp < RhoScalar_comp+NSCALARS) ?
180  BCVars::RhoScalar_bc_comp : dest_comp;
181  if (bc_comp > BCVars::RhoScalar_bc_comp) bc_comp -= (NSCALARS-1);
182  int h_bc_type = bc_ptr[n].hi(1);
183  if ( (h_bc_type == ERFBCType::ext_dir) ||
184  (h_bc_type == ERFBCType::ext_dir_upwind && yvel_arr(i,dom_hi.y+1,k) <= 0.) )
185  {
186  if ((dest_comp == RhoTheta_comp) && th_bc_ptr) {
187  dest_arr(i,j,k,dest_comp) = th_bc_ptr[k];
188  } else {
189  dest_arr(i,j,k,dest_comp) = l_bc_extdir_vals_d[bc_comp][4];
190  }
191  } else if (h_bc_type == ERFBCType::ext_dir_prim) {
192  Real rho = dest_arr(i,dom_hi.y,k,Rho_comp);
193  if ((dest_comp == RhoTheta_comp) && th_bc_ptr) {
194  dest_arr(i,j,k,dest_comp) = rho * th_bc_ptr[k];
195  } else {
196  dest_arr(i,j,k,dest_comp) = rho * l_bc_extdir_vals_d[bc_comp][4];
197  }
198  }
199  }
200  );
201  }
202 
203  // Next do ghost cells in x-direction but not reaching out in y
204  // The corners we miss here will be covered in the y-loop below or by periodicity
205  if (!is_periodic_in_x)
206  {
207  // Populate ghost cells on lo-x and hi-x domain boundaries
208  Box bx_xlo(bx); bx_xlo.setBig (0,dom_lo.x-1);
209  Box bx_xhi(bx); bx_xhi.setSmall(0,dom_hi.x+1);
210  if (bx_xlo.smallEnd(2) != domain.smallEnd(2)) bx_xlo.growLo(2,ng[2]);
211  if (bx_xlo.bigEnd(2) != domain.bigEnd(2)) bx_xlo.growHi(2,ng[2]);
212  if (bx_xhi.smallEnd(2) != domain.smallEnd(2)) bx_xhi.growLo(2,ng[2]);
213  if (bx_xhi.bigEnd(2) != domain.bigEnd(2)) bx_xhi.growHi(2,ng[2]);
214  ParallelFor(
215  bx_xlo, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n)
216  {
217  int dest_comp = icomp+n;
218  int l_bc_type = bc_ptr[n].lo(0);
219  int iflip = dom_lo.x - 1 - i;
220  if (l_bc_type == ERFBCType::foextrap) {
221  dest_arr(i,j,k,dest_comp) = dest_arr(dom_lo.x,j,k,dest_comp);
222  } else if (l_bc_type == ERFBCType::open) {
223  dest_arr(i,j,k,dest_comp) = dest_arr(dom_lo.x,j,k,dest_comp);
224  } else if (l_bc_type == ERFBCType::reflect_even) {
225  dest_arr(i,j,k,dest_comp) = dest_arr(iflip,j,k,dest_comp);
226  } else if (l_bc_type == ERFBCType::reflect_odd) {
227  dest_arr(i,j,k,dest_comp) = -dest_arr(iflip,j,k,dest_comp);
228  } else if (l_bc_type == ERFBCType::hoextrap) {
229  Real delta_i = (dom_lo.x - i);
230  dest_arr(i,j,k,dest_comp) = (1.0 + delta_i)*dest_arr(dom_lo.x,j,k,dest_comp) - delta_i*dest_arr(dom_lo.x+1,j,k,dest_comp);
231  }
232  },
233  bx_xhi, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n)
234  {
235  int dest_comp = icomp+n;
236  int h_bc_type = bc_ptr[n].hi(0);
237  int iflip = 2*dom_hi.x + 1 - i;
238  if (h_bc_type == ERFBCType::foextrap) {
239  dest_arr(i,j,k,dest_comp) = dest_arr(dom_hi.x,j,k,dest_comp);
240  } else if (h_bc_type == ERFBCType::open) {
241  dest_arr(i,j,k,dest_comp) = dest_arr(dom_hi.x,j,k,dest_comp);
242  } else if (h_bc_type == ERFBCType::reflect_even) {
243  dest_arr(i,j,k,dest_comp) = dest_arr(iflip,j,k,dest_comp);
244  } else if (h_bc_type == ERFBCType::reflect_odd) {
245  dest_arr(i,j,k,dest_comp) = -dest_arr(iflip,j,k,dest_comp);
246  } else if (h_bc_type == ERFBCType::hoextrap) {
247  Real delta_i = (i - dom_hi.x);
248  dest_arr(i,j,k,dest_comp) = (1.0 + delta_i)*dest_arr(dom_hi.x,j,k,dest_comp) - delta_i*dest_arr(dom_hi.x-1,j,k,dest_comp);
249  }
250  }
251  );
252  }
253 
254  if (!is_periodic_in_y)
255  {
256  // Populate ghost cells on lo-y and hi-y domain boundaries
257  Box bx_ylo(bx); bx_ylo.setBig (1,dom_lo.y-1);
258  Box bx_yhi(bx); bx_yhi.setSmall(1,dom_hi.y+1);
259  if (bx_ylo.smallEnd(2) != domain.smallEnd(2)) bx_ylo.growLo(2,ng[2]);
260  if (bx_ylo.bigEnd(2) != domain.bigEnd(2)) bx_ylo.growHi(2,ng[2]);
261  if (bx_yhi.smallEnd(2) != domain.smallEnd(2)) bx_yhi.growLo(2,ng[2]);
262  if (bx_yhi.bigEnd(2) != domain.bigEnd(2)) bx_yhi.growHi(2,ng[2]);
263  ParallelFor(
264  bx_ylo, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n)
265  {
266  int dest_comp = icomp+n;
267  int l_bc_type = bc_ptr[n].lo(1);
268  int jflip = dom_lo.y - 1 - j;
269  if (l_bc_type == ERFBCType::foextrap) {
270  dest_arr(i,j,k,dest_comp) = dest_arr(i,dom_lo.y,k,dest_comp);
271  } else if (l_bc_type == ERFBCType::open) {
272  dest_arr(i,j,k,dest_comp) = dest_arr(i,dom_lo.y,k,dest_comp);
273  } else if (l_bc_type == ERFBCType::reflect_even) {
274  dest_arr(i,j,k,dest_comp) = dest_arr(i,jflip,k,dest_comp);
275  } else if (l_bc_type == ERFBCType::reflect_odd) {
276  dest_arr(i,j,k,dest_comp) = -dest_arr(i,jflip,k,dest_comp);
277  } else if (l_bc_type == ERFBCType::hoextrap) {
278  Real delta_j = (dom_lo.y - j);
279  dest_arr(i,j,k,dest_comp) = (1.0 + delta_j)*dest_arr(i,dom_lo.y,k,dest_comp) - delta_j*dest_arr(i,dom_lo.y+1,k,dest_comp);
280  }
281 
282  },
283  bx_yhi, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n)
284  {
285  int dest_comp = icomp+n;
286  int h_bc_type = bc_ptr[n].hi(1);
287  int jflip = 2*dom_hi.y + 1 - j;
288  if (h_bc_type == ERFBCType::foextrap) {
289  dest_arr(i,j,k,dest_comp) = dest_arr(i,dom_hi.y,k,dest_comp);
290  } else if (h_bc_type == ERFBCType::open) {
291  dest_arr(i,j,k,dest_comp) = dest_arr(i,dom_hi.y,k,dest_comp);
292  } else if (h_bc_type == ERFBCType::reflect_even) {
293  dest_arr(i,j,k,dest_comp) = dest_arr(i,jflip,k,dest_comp);
294  } else if (h_bc_type == ERFBCType::reflect_odd) {
295  dest_arr(i,j,k,dest_comp) = -dest_arr(i,jflip,k,dest_comp);
296  } else if (h_bc_type == ERFBCType::hoextrap) {
297  Real delta_j = (j - dom_hi.y);
298  dest_arr(i,j,k,dest_comp) = (1.0 + delta_j)*dest_arr(i,dom_hi.y,k,dest_comp) - delta_j*dest_arr(i,dom_hi.y-1,k,dest_comp);
299  }
300  }
301  );
302  }
303  Gpu::streamSynchronize();
304 }
const auto & dom_hi
Definition: ERF_DiffSetup.H:10
const auto & dom_lo
Definition: ERF_DiffSetup.H:9
#define RhoScalar_comp
Definition: ERF_IndexDefines.H:40
#define Rho_comp
Definition: ERF_IndexDefines.H:36
#define RhoTheta_comp
Definition: ERF_IndexDefines.H:37
#define NBCVAR_max
Definition: ERF_IndexDefines.H:29
#define NSCALARS
Definition: ERF_IndexDefines.H:16
void impose_lateral_cons_bcs(const amrex::Array4< amrex::Real > &dest_arr, const amrex::Array4< amrex::Real const > &xvel_arr, const amrex::Array4< amrex::Real const > &yvel_arr, const amrex::Box &bx, const amrex::Box &domain, int icomp, int ncomp, amrex::IntVect ng, const amrex::Real time)
Definition: ERF_BoundaryConditionsCons.cpp:18
@ RhoScalar_bc_comp
Definition: ERF_IndexDefines.H:80
@ open
Definition: ERF_IndexDefines.H:215
@ reflect_odd
Definition: ERF_IndexDefines.H:205
@ hoextrap
Definition: ERF_IndexDefines.H:216
@ foextrap
Definition: ERF_IndexDefines.H:208
@ ext_dir
Definition: ERF_IndexDefines.H:209
@ ext_dir_prim
Definition: ERF_IndexDefines.H:211
@ ext_dir_upwind
Definition: ERF_IndexDefines.H:217
@ int_dir
Definition: ERF_IndexDefines.H:206
@ reflect_even
Definition: ERF_IndexDefines.H:207
@ rho
Definition: ERF_Kessler.H:22
@ ng
Definition: ERF_Morrison.H:48
@ nc
Definition: ERF_Morrison.H:44

◆ impose_vertical_cons_bcs()

void ERFPhysBCFunct_cons::impose_vertical_cons_bcs ( const amrex::Array4< amrex::Real > &  dest_arr,
const amrex::Box &  bx,
const amrex::Box &  domain,
const amrex::Array4< amrex::Real const > &  z_nd,
const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM >  dxInv,
int  icomp,
int  ncomp,
const amrex::Real  time,
bool  do_terrain_adjustment = true 
)
325 {
326  BL_PROFILE_VAR("impose_vertical_cons_bcs()",impose_vertical_cons_bcs);
327  const auto& dom_lo = lbound(domain);
328  const auto& dom_hi = ubound(domain);
329 
330  Box per_grown_domain(domain);
331  int growx = (m_geom.isPeriodic(0)) ? 1 : 0;
332  int growy = (m_geom.isPeriodic(1)) ? 1 : 0;
333  per_grown_domain.grow(IntVect(growx,growy,0));
334  const auto& perdom_lo = lbound(per_grown_domain);
335  const auto& perdom_hi = ubound(per_grown_domain);
336 
337  GeometryData const& geomdata = m_geom.data();
338 
339  // xlo: ori = 0
340  // ylo: ori = 1
341  // zlo: ori = 2
342  // xhi: ori = 3
343  // yhi: ori = 4
344  // zhi: ori = 5
345 
346  // Based on BCRec for the domain, we need to make BCRec for this Box
347  // 0 is used as starting index for bcrs
348  Vector<BCRec> bcrs(ncomp);
349  GpuArray<GpuArray<Real, AMREX_SPACEDIM*2>,NBCVAR_max> l_bc_extdir_vals_d;
350  GpuArray<GpuArray<Real, AMREX_SPACEDIM*2>,NBCVAR_max> l_bc_neumann_vals_d;
351 
352  const int* bxlo = bx.loVect();
353  const int* bxhi = bx.hiVect();
354  const int* dlo = domain.loVect();
355  const int* dhi = domain.hiVect();
356 
357  for (int nc = 0; nc < ncomp; nc++)
358  {
359  int bc_comp = (icomp+nc >= RhoScalar_comp && icomp+nc < RhoScalar_comp+NSCALARS) ?
361  if (bc_comp > BCVars::RhoScalar_bc_comp) bc_comp -= (NSCALARS-1);
362  for (int dir = 0; dir < AMREX_SPACEDIM; dir++)
363  {
364  bcrs[nc].setLo(dir, ( bxlo[dir]<=dlo[dir]
365  ? m_domain_bcs_type[bc_comp].lo(dir) : BCType::int_dir ));
366  bcrs[nc].setHi(dir, ( bxhi[dir]>=dhi[dir]
367  ? m_domain_bcs_type[bc_comp].hi(dir) : BCType::int_dir ));
368  }
369 
370  for (int ori = 0; ori < 2*AMREX_SPACEDIM; ori++) {
371  l_bc_extdir_vals_d[bc_comp][ori] = m_bc_extdir_vals[bc_comp][ori];
372  l_bc_neumann_vals_d[bc_comp][ori] = m_bc_neumann_vals[bc_comp][ori];
373  }
374  }
375 
376  Gpu::DeviceVector<BCRec> bcrs_d(icomp+ncomp);
377  Gpu::copyAsync(Gpu::hostToDevice, bcrs.begin(), bcrs.end(), bcrs_d.begin());
378  const BCRec* bc_ptr = bcrs_d.data();
379 
380  {
381  Box bx_zlo(bx); bx_zlo.setBig (2,dom_lo.z-1);
382  Box bx_zhi(bx); bx_zhi.setSmall(2,dom_hi.z+1);
383  ParallelFor(
384  bx_zlo, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n)
385  {
386  int dest_comp = icomp+n;
387  int bc_comp = (dest_comp >= RhoScalar_comp && dest_comp < RhoScalar_comp+NSCALARS) ?
388  BCVars::RhoScalar_bc_comp : dest_comp;
389  if (bc_comp > BCVars::RhoScalar_bc_comp) bc_comp -= (NSCALARS-1);
390  int l_bc_type = bc_ptr[n].lo(2);
391  if (l_bc_type == ERFBCType::ext_dir) {
392  dest_arr(i,j,k,dest_comp) = l_bc_extdir_vals_d[bc_comp][2];
393  } else if (l_bc_type == ERFBCType::ext_dir_prim) {
394  Real rho = dest_arr(i,j,dom_lo.z,Rho_comp);
395  dest_arr(i,j,k,dest_comp) = rho * l_bc_extdir_vals_d[bc_comp][2];
396  }
397  },
398  bx_zhi, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n)
399  {
400  int dest_comp = icomp+n;
401  int bc_comp = (dest_comp >= RhoScalar_comp && dest_comp < RhoScalar_comp+NSCALARS) ?
402  BCVars::RhoScalar_bc_comp : dest_comp;
403  if (bc_comp > BCVars::RhoScalar_bc_comp) bc_comp -= (NSCALARS-1);
404  int h_bc_type = bc_ptr[n].hi(2);
405  if (h_bc_type == ERFBCType::ext_dir) {
406  dest_arr(i,j,k,dest_comp) = l_bc_extdir_vals_d[bc_comp][5];
407  } else if (h_bc_type == ERFBCType::ext_dir_prim) {
408  Real rho = dest_arr(i,j,dom_hi.z,Rho_comp);
409  dest_arr(i,j,k,dest_comp) = rho * l_bc_extdir_vals_d[bc_comp][5];
410  }
411 
412  }
413  );
414  }
415 
416  {
417  Box bx_zlo(bx); bx_zlo.setBig (2,dom_lo.z-1);
418  Box bx_zhi(bx); bx_zhi.setSmall(2,dom_hi.z+1);
419  // Populate ghost cells on lo-z and hi-z domain boundaries
420  ParallelFor(
421  bx_zlo, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n)
422  {
423  int dest_comp = icomp+n;
424  int bc_comp = (dest_comp >= RhoScalar_comp && dest_comp < RhoScalar_comp+NSCALARS) ?
425  BCVars::RhoScalar_bc_comp : dest_comp;
426  if (bc_comp > BCVars::RhoScalar_bc_comp) bc_comp -= (NSCALARS-1);
427  int l_bc_type = bc_ptr[n].lo(2);
428  int kflip = dom_lo.z - 1 - i;
429  if (l_bc_type == ERFBCType::foextrap) {
430  dest_arr(i,j,k,dest_comp) = dest_arr(i,j,dom_lo.z,dest_comp);
431  } else if (l_bc_type == ERFBCType::open) {
432  dest_arr(i,j,k,dest_comp) = dest_arr(i,j,dom_lo.z,dest_comp);
433  } else if (l_bc_type == ERFBCType::reflect_even) {
434  dest_arr(i,j,k,dest_comp) = dest_arr(i,j,kflip,dest_comp);
435  } else if (l_bc_type == ERFBCType::reflect_odd) {
436  dest_arr(i,j,k,dest_comp) = -dest_arr(i,j,kflip,dest_comp);
437  } else if (l_bc_type == ERFBCType::neumann) {
438  Real delta_z = Compute_Zrel_AtCellCenter(i,j,k,z_phys_nd);
439  dest_arr(i,j,k,dest_comp) = dest_arr(i,j,dom_lo.z,dest_comp) -
440  delta_z*l_bc_neumann_vals_d[bc_comp][2]*dest_arr(i,j,dom_lo.z,Rho_comp);
441  } else if (l_bc_type == ERFBCType::hoextrap) {
442  Real delta_k = (dom_lo.z - k);
443  dest_arr(i,j,k,dest_comp) = (1.0 + delta_k)*dest_arr(i,j,dom_lo.z,dest_comp) - delta_k*dest_arr(i,j,dom_lo.z+1,dest_comp);
444  }
445  },
446  bx_zhi, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n)
447  {
448  int dest_comp = icomp+n;
449  int bc_comp = (dest_comp >= RhoScalar_comp && dest_comp < RhoScalar_comp+NSCALARS) ?
450  BCVars::RhoScalar_bc_comp : dest_comp;
451  if (bc_comp > BCVars::RhoScalar_bc_comp) bc_comp -= (NSCALARS-1);
452  int h_bc_type = bc_ptr[n].hi(2);
453  int kflip = 2*dom_hi.z + 1 - i;
454  if (h_bc_type == ERFBCType::foextrap) {
455  dest_arr(i,j,k,dest_comp) = dest_arr(i,j,dom_hi.z,dest_comp);
456  } else if (h_bc_type == ERFBCType::open) {
457  dest_arr(i,j,k,dest_comp) = dest_arr(i,j,dom_hi.z,dest_comp);
458  } else if (h_bc_type == ERFBCType::reflect_even) {
459  dest_arr(i,j,k,dest_comp) = dest_arr(i,j,kflip,dest_comp);
460  } else if (h_bc_type == ERFBCType::reflect_odd) {
461  dest_arr(i,j,k,dest_comp) = -dest_arr(i,j,kflip,dest_comp);
462  } else if (h_bc_type == ERFBCType::neumann) {
463  Real delta_z = Compute_Z_AtCellCenter(i,j,k ,z_phys_nd)
464  - Compute_Z_AtCellCenter(i,j,dom_hi.z,z_phys_nd);
465  if( (icomp+n) == Rho_comp ) {
466  dest_arr(i,j,k,dest_comp) = dest_arr(i,j,dom_hi.z,dest_comp) +
467  delta_z*l_bc_neumann_vals_d[bc_comp][5];
468  } else {
469  dest_arr(i,j,k,dest_comp) = dest_arr(i,j,dom_hi.z,dest_comp) +
470  delta_z*l_bc_neumann_vals_d[bc_comp][5]*dest_arr(i,j,dom_hi.z,Rho_comp);
471  }
472  } else if (h_bc_type == ERFBCType::hoextrap){
473  Real delta_k = (k - dom_hi.z);
474  dest_arr(i,j,k,dest_comp) = (1.0 + delta_k)*dest_arr(i,j,dom_hi.z,dest_comp) - delta_k*dest_arr(i,j,dom_hi.z-1,dest_comp);
475  }
476  }
477  );
478  }
479 
480  if (do_terrain_adjustment && m_z_phys_nd) {
481  const auto& bx_lo = lbound(bx);
482  const auto& bx_hi = ubound(bx);
483  const BCRec* bc_ptr_h = bcrs.data();
484  // Neumann conditions (d<var>/dn = 0) must be aware of the surface normal with terrain.
485  // An additional source term arises from d<var>/dx & d<var>/dy & met_h_xi/eta/zeta.
486  //=====================================================================================
487  // Only modify scalars, U, or V
488  // Loop over each component
489  for (int n = 0; n < ncomp; n++) {
490  // Hit for Neumann condition at kmin
491  int dest_comp = icomp+n;
492  int l_bc_type = bc_ptr_h[n].lo(2);
493  if(l_bc_type == ERFBCType::foextrap)
494  {
495  // Loop over ghost cells in bottom XY-plane (valid box)
496  Box xybx = bx;
497 
498  int k0 = dom_lo.z;
499  if (xybx.smallEnd(2) < 0)
500  {
501  xybx.setBig(2,dom_lo.z-1);
502  xybx.setSmall(2,bx.smallEnd(2));
503 
504  // Get the dz cell size
505  Real dz = geomdata.CellSize(2);
506 
507  // Fill all the Neumann srcs with terrain
508  ParallelFor(xybx, [=] AMREX_GPU_DEVICE (int i, int j, int k)
509  {
510  // Clip indices for ghost-cells
511  int ii = amrex::min(amrex::max(i,perdom_lo.x),perdom_hi.x);
512  int jj = amrex::min(amrex::max(j,perdom_lo.y),perdom_hi.y);
513 
514  // Get metrics
515  Real met_h_xi = Compute_h_xi_AtCellCenter (ii,jj,k0,dxInv,z_phys_nd);
516  Real met_h_eta = Compute_h_eta_AtCellCenter (ii,jj,k0,dxInv,z_phys_nd);
517  Real met_h_zeta = Compute_h_zeta_AtCellCenter(ii,jj,k0,dxInv,z_phys_nd);
518 
519  // GradX at IJK location inside domain -- this relies on the assumption that we have
520  // used foextrap for cell-centered quantities outside the domain to define the gradient as zero
521  Real GradVarx, GradVary;
522  if (i < dom_lo.x-1 || i > dom_hi.x+1 || (i+1 > bx_hi.x && i-1 < bx_lo.x) ) {
523  GradVarx = 0.0;
524  } else if (i+1 > bx_hi.x) {
525  GradVarx = dxInv[0] * (dest_arr(i ,j,k0,dest_comp) - dest_arr(i-1,j,k0,dest_comp));
526  } else if (i-1 < bx_lo.x) {
527  GradVarx = dxInv[0] * (dest_arr(i+1,j,k0,dest_comp) - dest_arr(i ,j,k0,dest_comp));
528  } else {
529  GradVarx = 0.5 * dxInv[0] * (dest_arr(i+1,j,k0,dest_comp) - dest_arr(i-1,j,k0,dest_comp));
530  }
531 
532  // GradY at IJK location inside domain -- this relies on the assumption that we have
533  // used foextrap for cell-centered quantities outside the domain to define the gradient as zero
534  if (j < dom_lo.y-1 || j > dom_hi.y+1 || (j+1 > bx_hi.y && j-1 < bx_lo.y) ) {
535  GradVary = 0.0;
536  } else if (j+1 > bx_hi.y) {
537  GradVary = dxInv[1] * (dest_arr(i,j ,k0,dest_comp) - dest_arr(i,j-1,k0,dest_comp));
538  } else if (j-1 < bx_lo.y) {
539  GradVary = dxInv[1] * (dest_arr(i,j+1,k0,dest_comp) - dest_arr(i,j ,k0,dest_comp));
540  } else {
541  GradVary = 0.5 * dxInv[1] * (dest_arr(i,j+1,k0,dest_comp) - dest_arr(i,j-1,k0,dest_comp));
542  }
543 
544  // Prefactor
545  Real met_fac = met_h_zeta / ( met_h_xi*met_h_xi + met_h_eta*met_h_eta + 1. );
546 
547  // Accumulate in bottom ghost cell (EXTRAP already populated)
548  dest_arr(i,j,k,dest_comp) -= dz * met_fac * ( met_h_xi * GradVarx + met_h_eta * GradVary );
549  });
550  } // box includes k0
551  } // foextrap
552  } // ncomp
553  } // m_z_phys_nd
554  Gpu::streamSynchronize();
555 }
AMREX_FORCE_INLINE AMREX_GPU_DEVICE amrex::Real Compute_h_zeta_AtCellCenter(const int &i, const int &j, const int &k, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > &cellSizeInv, const amrex::Array4< const amrex::Real > &z_nd)
Definition: ERF_TerrainMetrics.H:47
AMREX_GPU_DEVICE AMREX_FORCE_INLINE amrex::Real Compute_Zrel_AtCellCenter(const int &i, const int &j, const int &k, const amrex::Array4< const amrex::Real > &z_nd)
Definition: ERF_TerrainMetrics.H:390
AMREX_GPU_DEVICE AMREX_FORCE_INLINE amrex::Real Compute_h_eta_AtCellCenter(const int &i, const int &j, const int &k, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > &cellSizeInv, const amrex::Array4< const amrex::Real > &z_nd)
Definition: ERF_TerrainMetrics.H:77
AMREX_GPU_DEVICE AMREX_FORCE_INLINE amrex::Real Compute_Z_AtCellCenter(const int &i, const int &j, const int &k, const amrex::Array4< const amrex::Real > &z_nd)
Definition: ERF_TerrainMetrics.H:362
AMREX_GPU_DEVICE AMREX_FORCE_INLINE amrex::Real Compute_h_xi_AtCellCenter(const int &i, const int &j, const int &k, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > &cellSizeInv, const amrex::Array4< const amrex::Real > &z_nd)
Definition: ERF_TerrainMetrics.H:62
void impose_vertical_cons_bcs(const amrex::Array4< amrex::Real > &dest_arr, const amrex::Box &bx, const amrex::Box &domain, const amrex::Array4< amrex::Real const > &z_nd, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > dxInv, int icomp, int ncomp, const amrex::Real time, bool do_terrain_adjustment=true)
Definition: ERF_BoundaryConditionsCons.cpp:319
@ neumann
Definition: ERF_IndexDefines.H:213
Here is the call graph for this function:

◆ operator()()

void ERFPhysBCFunct_cons::operator() ( amrex::MultiFab &  mf,
amrex::MultiFab &  xvel,
amrex::MultiFab &  yvel,
int  icomp,
int  ncomp,
amrex::IntVect const &  nghost,
const amrex::Real  time,
int  bccomp_cons,
bool  do_fb = true,
bool  do_terrain_adjustment = true 
)
20 {
21  BL_PROFILE("ERFPhysBCFunct_cons::()");
22 
23  //
24  // We fill all of the interior and periodic ghost cells first, so we can fill
25  // those directly inside the lateral and vertical calls.
26  // If triply periodic this is all we do
27  //
28  if (do_fb) {
29  mf.FillBoundary(icomp,ncomp,m_geom.periodicity());
30  }
31 
32  if (m_geom.isAllPeriodic()) return;
33 
34  const auto& domain = m_geom.Domain();
35  const auto dxInv = m_geom.InvCellSizeArray();
36 
37  // Create a grown domain box containing valid + periodic cells
38  Box gdomain = domain;
39  for (int i = 0; i < AMREX_SPACEDIM; ++i) {
40  if (m_geom.isPeriodic(i)) {
41  gdomain.grow(i, nghost[i]);
42  }
43  }
44 
45 #ifdef AMREX_USE_OMP
46 #pragma omp parallel if (Gpu::notInLaunchRegion())
47 #endif
48  {
49  for (MFIter mfi(mf,false); mfi.isValid(); ++mfi)
50  {
51  //
52  // This is the box we pass to the different routines
53  // NOTE -- this is the full grid box NOT the tile box
54  //
55  Box bx = mfi.validbox();
56 
57  //
58  // These are the boxes we use to test on relative to the domain
59  //
60  Box cbx1 = bx; cbx1.grow(IntVect(nghost[0],nghost[1],0));
61  Box cbx2 = bx; cbx2.grow(nghost);
62 
63  Array4<const Real> z_nd_arr;
64 
65  if (m_z_phys_nd)
66  {
67  z_nd_arr = m_z_phys_nd->const_array(mfi);
68  }
69 
70  if (!gdomain.contains(cbx2))
71  {
72  Array4< Real> const& cons_arr = mf.array(mfi);
73  Array4<const Real> const& velx_arr = xvel.const_array(mfi);
74  Array4<const Real> const& vely_arr = yvel.const_array(mfi);
75 
76  if (!m_use_real_bcs)
77  {
78  // We send a box with ghost cells in the lateral directions only
79  impose_lateral_cons_bcs(cons_arr,velx_arr,vely_arr,cbx1,domain,icomp,ncomp,nghost,time);
80  }
81 
82  // We send the full FAB box with ghost cells
83  impose_vertical_cons_bcs(cons_arr,cbx2,domain,z_nd_arr,dxInv,icomp,ncomp,time,do_terrain_adjustment);
84  }
85 
86  } // MFIter
87  } // OpenMP
88 } // operator()
@ xvel
Definition: ERF_IndexDefines.H:141
@ yvel
Definition: ERF_IndexDefines.H:142

Member Data Documentation

◆ m_bc_extdir_vals

amrex::Array<amrex::Array<amrex::Real, AMREX_SPACEDIM*2>,AMREX_SPACEDIM+NBCVAR_max> ERFPhysBCFunct_cons::m_bc_extdir_vals
private

◆ m_bc_neumann_vals

amrex::Array<amrex::Array<amrex::Real, AMREX_SPACEDIM*2>,AMREX_SPACEDIM+NBCVAR_max> ERFPhysBCFunct_cons::m_bc_neumann_vals
private

◆ m_domain_bcs_type

amrex::Vector<amrex::BCRec> ERFPhysBCFunct_cons::m_domain_bcs_type
private

◆ m_domain_bcs_type_d

amrex::Gpu::DeviceVector<amrex::BCRec> ERFPhysBCFunct_cons::m_domain_bcs_type_d
private

◆ m_geom

amrex::Geometry ERFPhysBCFunct_cons::m_geom
private

◆ m_lev

int ERFPhysBCFunct_cons::m_lev
private

◆ m_th_bc_data

amrex::Real* ERFPhysBCFunct_cons::m_th_bc_data
private

◆ m_use_real_bcs

bool ERFPhysBCFunct_cons::m_use_real_bcs
private

◆ m_z_phys_nd

amrex::MultiFab* ERFPhysBCFunct_cons::m_z_phys_nd
private

The documentation for this class was generated from the following files: