ERF
Energy Research and Forecasting: An Atmospheric Modeling Code
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
ERFPhysBCFunct_u Class Reference

#include <ERF_PhysBCFunct.H>

Collaboration diagram for ERFPhysBCFunct_u:

Public Member Functions

 ERFPhysBCFunct_u (const int lev, const amrex::Geometry &geom, const amrex::Vector< amrex::BCRec > &domain_bcs_type, const amrex::Gpu::DeviceVector< amrex::BCRec > &domain_bcs_type_d, amrex::Array< amrex::Array< amrex::Real, AMREX_SPACEDIM *2 >, AMREX_SPACEDIM+NBCVAR_max > bc_extdir_vals, amrex::Array< amrex::Array< amrex::Real, AMREX_SPACEDIM *2 >, AMREX_SPACEDIM+NBCVAR_max > bc_neumann_vals, std::unique_ptr< amrex::MultiFab > &z_phys_nd, const bool use_real_bcs, amrex::Real *u_bc_data)
 
 ~ERFPhysBCFunct_u ()
 
void operator() (amrex::MultiFab &mf, amrex::MultiFab &xvel, amrex::MultiFab &yvel, amrex::IntVect const &nghost, const amrex::Real time, int bccomp, bool do_fb)
 
void impose_lateral_xvel_bcs (const amrex::Array4< amrex::Real > &dest_arr, const amrex::Array4< amrex::Real const > &xvel_arr, const amrex::Array4< amrex::Real const > &yvel_arr, const amrex::Box &bx, const amrex::Box &domain, int bccomp, const amrex::Real time)
 
void impose_vertical_xvel_bcs (const amrex::Array4< amrex::Real > &dest_arr, const amrex::Box &bx, const amrex::Box &domain, const amrex::Array4< amrex::Real const > &z_nd, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > dxInv, int bccomp, const amrex::Real time)
 

Private Attributes

int m_lev
 
amrex::Geometry m_geom
 
amrex::Vector< amrex::BCRec > m_domain_bcs_type
 
amrex::Gpu::DeviceVector< amrex::BCRec > m_domain_bcs_type_d
 
amrex::Array< amrex::Array< amrex::Real, AMREX_SPACEDIM *2 >, AMREX_SPACEDIM+NBCVAR_maxm_bc_extdir_vals
 
amrex::Array< amrex::Array< amrex::Real, AMREX_SPACEDIM *2 >, AMREX_SPACEDIM+NBCVAR_maxm_bc_neumann_vals
 
amrex::MultiFab * m_z_phys_nd
 
bool m_use_real_bcs
 
amrex::Real * m_u_bc_data
 

Constructor & Destructor Documentation

◆ ERFPhysBCFunct_u()

ERFPhysBCFunct_u::ERFPhysBCFunct_u ( const int  lev,
const amrex::Geometry &  geom,
const amrex::Vector< amrex::BCRec > &  domain_bcs_type,
const amrex::Gpu::DeviceVector< amrex::BCRec > &  domain_bcs_type_d,
amrex::Array< amrex::Array< amrex::Real, AMREX_SPACEDIM *2 >, AMREX_SPACEDIM+NBCVAR_max bc_extdir_vals,
amrex::Array< amrex::Array< amrex::Real, AMREX_SPACEDIM *2 >, AMREX_SPACEDIM+NBCVAR_max bc_neumann_vals,
std::unique_ptr< amrex::MultiFab > &  z_phys_nd,
const bool  use_real_bcs,
amrex::Real *  u_bc_data 
)
inline
96  : m_lev(lev), m_geom(geom),
97  m_domain_bcs_type(domain_bcs_type),
98  m_domain_bcs_type_d(domain_bcs_type_d),
99  m_bc_extdir_vals(bc_extdir_vals),
100  m_bc_neumann_vals(bc_neumann_vals),
101  m_z_phys_nd(z_phys_nd.get()),
102  m_use_real_bcs(use_real_bcs),
103  m_u_bc_data(u_bc_data)
104  { }
amrex::Geometry m_geom
Definition: ERF_PhysBCFunct.H:134
amrex::Array< amrex::Array< amrex::Real, AMREX_SPACEDIM *2 >, AMREX_SPACEDIM+NBCVAR_max > m_bc_extdir_vals
Definition: ERF_PhysBCFunct.H:137
amrex::Gpu::DeviceVector< amrex::BCRec > m_domain_bcs_type_d
Definition: ERF_PhysBCFunct.H:136
bool m_use_real_bcs
Definition: ERF_PhysBCFunct.H:140
amrex::Array< amrex::Array< amrex::Real, AMREX_SPACEDIM *2 >, AMREX_SPACEDIM+NBCVAR_max > m_bc_neumann_vals
Definition: ERF_PhysBCFunct.H:138
amrex::Real * m_u_bc_data
Definition: ERF_PhysBCFunct.H:141
int m_lev
Definition: ERF_PhysBCFunct.H:133
amrex::Vector< amrex::BCRec > m_domain_bcs_type
Definition: ERF_PhysBCFunct.H:135
amrex::MultiFab * m_z_phys_nd
Definition: ERF_PhysBCFunct.H:139

◆ ~ERFPhysBCFunct_u()

ERFPhysBCFunct_u::~ERFPhysBCFunct_u ( )
inline
106 {}

Member Function Documentation

◆ impose_lateral_xvel_bcs()

void ERFPhysBCFunct_u::impose_lateral_xvel_bcs ( const amrex::Array4< amrex::Real > &  dest_arr,
const amrex::Array4< amrex::Real const > &  xvel_arr,
const amrex::Array4< amrex::Real const > &  yvel_arr,
const amrex::Box &  bx,
const amrex::Box &  domain,
int  bccomp,
const amrex::Real  time 
)
20 {
21  BL_PROFILE_VAR("impose_lateral_xvel_bcs()",impose_lateral_xvel_bcs);
22  const auto& dom_lo = lbound(domain);
23  const auto& dom_hi = ubound(domain);
24 
25  // xlo: ori = 0
26  // ylo: ori = 1
27  // zlo: ori = 2
28  // xhi: ori = 3
29  // yhi: ori = 4
30  // zhi: ori = 5
31 
32  // Based on BCRec for the domain, we need to make BCRec for this Box
33  // bccomp is used as starting index for m_domain_bcs_type
34  // 0 is used as starting index for bcrs
35  Vector<BCRec> bcrs(1);
36  setBC(enclosedCells(bx), domain, bccomp, 0, 1, m_domain_bcs_type, bcrs);
37 
38  Gpu::DeviceVector<BCRec> bcrs_d(1);
39  Gpu::copyAsync(Gpu::hostToDevice, bcrs.begin(), bcrs.end(), bcrs_d.begin());
40  const BCRec* bc_ptr = bcrs_d.data();
41 
42  GpuArray<GpuArray<Real, AMREX_SPACEDIM*2>,1> l_bc_extdir_vals_d;
43 
44  for (int ori = 0; ori < 2*AMREX_SPACEDIM; ori++) {
45  l_bc_extdir_vals_d[0][ori] = m_bc_extdir_vals[bccomp][ori];
46  }
47 
48  GeometryData const& geomdata = m_geom.data();
49  bool is_periodic_in_x = geomdata.isPeriodic(0);
50  bool is_periodic_in_y = geomdata.isPeriodic(1);
51 
52  // First do all ext_dir bcs
53  if (!is_periodic_in_x)
54  {
55  Real* xvel_bc_ptr = m_u_bc_data;
56  Box bx_xlo(bx); bx_xlo.setBig (0,dom_lo.x-1);
57  Box bx_xhi(bx); bx_xhi.setSmall(0,dom_hi.x+2);
58  Box bx_xlo_face(bx); bx_xlo_face.setSmall(0,dom_lo.x ); bx_xlo_face.setBig(0,dom_lo.x );
59  Box bx_xhi_face(bx); bx_xhi_face.setSmall(0,dom_hi.x+1); bx_xhi_face.setBig(0,dom_hi.x+1);
60  ParallelFor(bx_xlo, bx_xlo_face,
61  [=] AMREX_GPU_DEVICE (int i, int j, int k)
62  {
63  int iflip = dom_lo.x - i;
64  if (bc_ptr[0].lo(0) == ERFBCType::ext_dir) {
65  dest_arr(i,j,k) = (xvel_bc_ptr) ? xvel_bc_ptr[k] : l_bc_extdir_vals_d[0][0];
66  } else if (bc_ptr[0].lo(0) == ERFBCType::ext_dir_upwind && xvel_arr(dom_lo.x,j,k) >= 0.) {
67  dest_arr(i,j,k) = (xvel_bc_ptr) ? xvel_bc_ptr[k] : l_bc_extdir_vals_d[0][0];
68  } else if (bc_ptr[0].lo(0) == ERFBCType::foextrap) {
69  dest_arr(i,j,k) = dest_arr(dom_lo.x,j,k);
70  } else if (bc_ptr[0].lo(0) == ERFBCType::open) {
71  dest_arr(i,j,k) = dest_arr(dom_lo.x,j,k);
72  } else if (bc_ptr[0].lo(0) == ERFBCType::reflect_even) {
73  dest_arr(i,j,k) = dest_arr(iflip,j,k);
74  } else if (bc_ptr[0].lo(0) == ERFBCType::reflect_odd) {
75  dest_arr(i,j,k) = -dest_arr(iflip,j,k);
76  } else if (bc_ptr[0].lo(0) == ERFBCType::neumann_int) {
77  dest_arr(i,j,k) = (4.0*dest_arr(dom_lo.x+1,j,k) - dest_arr(dom_lo.x+2,j,k))/3.0;
78  } else if (bc_ptr[0].lo(0) == ERFBCType::hoextrap) {
79  Real delta_i = (dom_lo.x - i);
80  dest_arr(i,j,k) = (1.0 + delta_i)*dest_arr(dom_lo.x,j,k) - delta_i*dest_arr(dom_lo.x+1,j,k);
81  }
82  },
83  // We only set the values on the domain faces themselves if EXT_DIR or neumann_int
84  [=] AMREX_GPU_DEVICE (int i, int j, int k)
85  {
86  if (bc_ptr[0].lo(0) == ERFBCType::ext_dir) {
87  dest_arr(i,j,k) = (xvel_bc_ptr) ? xvel_bc_ptr[k] : l_bc_extdir_vals_d[0][0];
88  } else if (bc_ptr[0].lo(0) == ERFBCType::ext_dir_upwind && xvel_arr(dom_lo.x,j,k) >= 0.) {
89  dest_arr(i,j,k) = (xvel_bc_ptr) ? xvel_bc_ptr[k] : l_bc_extdir_vals_d[0][0];
90  } else if (bc_ptr[0].lo(0) == ERFBCType::neumann_int) {
91  dest_arr(i,j,k) = (4.0*dest_arr(dom_lo.x+1,j,k) - dest_arr(dom_lo.x+2,j,k))/3.0;
92  }
93  }
94  );
95  ParallelFor(bx_xhi, bx_xhi_face,
96  [=] AMREX_GPU_DEVICE (int i, int j, int k)
97  {
98  int iflip = 2*(dom_hi.x + 1) - i;
99  if (bc_ptr[0].hi(0) == ERFBCType::ext_dir) {
100  dest_arr(i,j,k) = (xvel_bc_ptr) ? xvel_bc_ptr[k] : l_bc_extdir_vals_d[0][3];
101  } else if (bc_ptr[0].hi(0) == ERFBCType::ext_dir_upwind && xvel_arr(dom_hi.x+1,j,k) <= 0.) {
102  dest_arr(i,j,k) = (xvel_bc_ptr) ? xvel_bc_ptr[k] : l_bc_extdir_vals_d[0][3];
103  } else if (bc_ptr[0].hi(0) == ERFBCType::foextrap) {
104  dest_arr(i,j,k) = dest_arr(dom_hi.x+1,j,k);
105  } else if (bc_ptr[0].hi(0) == ERFBCType::open) {
106  dest_arr(i,j,k) = dest_arr(dom_hi.x+1,j,k);
107  } else if (bc_ptr[0].hi(0) == ERFBCType::reflect_even) {
108  dest_arr(i,j,k) = dest_arr(iflip,j,k);
109  } else if (bc_ptr[0].hi(0) == ERFBCType::reflect_odd) {
110  dest_arr(i,j,k) = -dest_arr(iflip,j,k);
111  } else if (bc_ptr[0].hi(0) == ERFBCType::neumann_int) {
112  dest_arr(i,j,k) = (4.0*dest_arr(dom_hi.x,j,k) - dest_arr(dom_hi.x-1,j,k))/3.0;
113  } else if (bc_ptr[0].hi(0) == ERFBCType::hoextrap) {
114  Real delta_i = (i - dom_hi.x - 1);
115  dest_arr(i,j,k) = (1.0 + delta_i)*dest_arr(dom_hi.x+1,j,k) - delta_i*dest_arr(dom_hi.x,j,k);
116  }
117  },
118  // We only set the values on the domain faces themselves if EXT_DIR or neumann_int
119  [=] AMREX_GPU_DEVICE (int i, int j, int k)
120  {
121  if (bc_ptr[0].hi(0) == ERFBCType::ext_dir) {
122  dest_arr(i,j,k) = (xvel_bc_ptr) ? xvel_bc_ptr[k] : l_bc_extdir_vals_d[0][3];
123  } else if (bc_ptr[0].hi(0) == ERFBCType::ext_dir_upwind && xvel_arr(dom_hi.x+1,j,k) <= 0.) {
124  dest_arr(i,j,k) = (xvel_bc_ptr) ? xvel_bc_ptr[k] : l_bc_extdir_vals_d[0][3];
125  } else if (bc_ptr[0].hi(0) == ERFBCType::neumann_int) {
126  dest_arr(i,j,k) = (4.0*dest_arr(dom_hi.x,j,k) - dest_arr(dom_hi.x-1,j,k))/3.0;
127  }
128  }
129  );
130  } // not periodic in x
131 
132  if (!is_periodic_in_y)
133  {
134  // Populate ghost cells on lo-y and hi-y domain boundaries
135  Real* xvel_bc_ptr = m_u_bc_data;
136  Box bx_ylo(bx); bx_ylo.setBig (1,dom_lo.y-1);
137  Box bx_yhi(bx); bx_yhi.setSmall(1,dom_hi.y+1);
138  ParallelFor(bx_ylo, bx_yhi,
139  [=] AMREX_GPU_DEVICE (int i, int j, int k) {
140  int jflip = dom_lo.y - 1 - j;
141  if (bc_ptr[0].lo(1) == ERFBCType::ext_dir) {
142  dest_arr(i,j,k) = (xvel_bc_ptr) ? xvel_bc_ptr[k] : l_bc_extdir_vals_d[0][1];
143  } else if (bc_ptr[0].lo(1) == ERFBCType::ext_dir_upwind && yvel_arr(i,dom_lo.y,k) >= 0.) {
144  dest_arr(i,j,k) = (xvel_bc_ptr) ? xvel_bc_ptr[k] : l_bc_extdir_vals_d[0][1];
145  } else if (bc_ptr[0].lo(1) == ERFBCType::foextrap) {
146  dest_arr(i,j,k) = dest_arr(i,dom_lo.y,k);
147  } else if (bc_ptr[0].lo(1) == ERFBCType::open) {
148  dest_arr(i,j,k) = dest_arr(i,dom_lo.y,k);
149  } else if (bc_ptr[0].lo(1) == ERFBCType::reflect_even) {
150  dest_arr(i,j,k) = dest_arr(i,jflip,k);
151  } else if (bc_ptr[0].lo(1) == ERFBCType::reflect_odd) {
152  dest_arr(i,j,k) = -dest_arr(i,jflip,k);
153  } else if (bc_ptr[0].lo(1) == ERFBCType::hoextrap) {
154  Real delta_j = (dom_lo.y - j);
155  dest_arr(i,j,k) = (1.0 + delta_j)*dest_arr(i,dom_lo.y,k) - delta_j*dest_arr(i,dom_lo.y+1,k);
156  }
157  },
158  [=] AMREX_GPU_DEVICE (int i, int j, int k) {
159  int jflip = 2*dom_hi.y + 1 - j;
160  if (bc_ptr[0].hi(1) == ERFBCType::ext_dir) {
161  dest_arr(i,j,k) = (xvel_bc_ptr) ? xvel_bc_ptr[k] : l_bc_extdir_vals_d[0][4];
162  } else if (bc_ptr[0].hi(1) == ERFBCType::ext_dir_upwind && yvel_arr(i,dom_hi.y+1,k) <= 0.) {
163  dest_arr(i,j,k) = (xvel_bc_ptr) ? xvel_bc_ptr[k] : l_bc_extdir_vals_d[0][4];
164  } else if (bc_ptr[0].hi(1) == ERFBCType::foextrap) {
165  dest_arr(i,j,k) = dest_arr(i,dom_hi.y,k);
166  } else if (bc_ptr[0].hi(1) == ERFBCType::open) {
167  dest_arr(i,j,k) = dest_arr(i,dom_hi.y,k);
168  } else if (bc_ptr[0].hi(1) == ERFBCType::reflect_even) {
169  dest_arr(i,j,k) = dest_arr(i,jflip,k);
170  } else if (bc_ptr[0].hi(1) == ERFBCType::reflect_odd) {
171  dest_arr(i,j,k) = -dest_arr(i,jflip,k);
172  } else if (bc_ptr[0].hi(1) == ERFBCType::hoextrap) {
173  Real delta_j = (j - dom_hi.y);
174  dest_arr(i,j,k) = (1.0 + delta_j)*dest_arr(i,dom_hi.y,k) - delta_j*dest_arr(i,dom_hi.y-1,k);
175  }
176  }
177  );
178  } // not periodic in y
179 
180  Gpu::streamSynchronize();
181 }
const auto & dom_hi
Definition: ERF_DiffSetup.H:10
const auto & dom_lo
Definition: ERF_DiffSetup.H:9
void impose_lateral_xvel_bcs(const amrex::Array4< amrex::Real > &dest_arr, const amrex::Array4< amrex::Real const > &xvel_arr, const amrex::Array4< amrex::Real const > &yvel_arr, const amrex::Box &bx, const amrex::Box &domain, int bccomp, const amrex::Real time)
Definition: ERF_BoundaryConditionsXvel.cpp:15
@ open
Definition: ERF_IndexDefines.H:215
@ reflect_odd
Definition: ERF_IndexDefines.H:205
@ hoextrap
Definition: ERF_IndexDefines.H:216
@ foextrap
Definition: ERF_IndexDefines.H:208
@ ext_dir
Definition: ERF_IndexDefines.H:209
@ ext_dir_upwind
Definition: ERF_IndexDefines.H:217
@ neumann_int
Definition: ERF_IndexDefines.H:214
@ reflect_even
Definition: ERF_IndexDefines.H:207

◆ impose_vertical_xvel_bcs()

void ERFPhysBCFunct_u::impose_vertical_xvel_bcs ( const amrex::Array4< amrex::Real > &  dest_arr,
const amrex::Box &  bx,
const amrex::Box &  domain,
const amrex::Array4< amrex::Real const > &  z_nd,
const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM >  dxInv,
int  bccomp,
const amrex::Real  time 
)
203 {
204  BL_PROFILE_VAR("impose_vertical_xvel_bcs()",impose_vertical_xvel_bcs);
205  const auto& dom_lo = lbound(domain);
206  const auto& dom_hi = ubound(domain);
207 
208  Box per_grown_domain(domain);
209  int growx = (m_geom.isPeriodic(0)) ? 1 : 0;
210  int growy = (m_geom.isPeriodic(1)) ? 1 : 0;
211  per_grown_domain.grow(IntVect(growx,growy,0));
212  const auto& perdom_lo = lbound(per_grown_domain);
213  const auto& perdom_hi = ubound(per_grown_domain);
214 
215  GeometryData const& geomdata = m_geom.data();
216 
217  // Based on BCRec for the domain, we need to make BCRec for this Box
218  // bccomp is used as starting index for m_domain_bcs_type
219  // 0 is used as starting index for bcrs
220  Vector<BCRec> bcrs(1);
221  setBC(enclosedCells(bx), domain, bccomp, 0, 1, m_domain_bcs_type, bcrs);
222 
223  Gpu::DeviceVector<BCRec> bcrs_d(1);
224  Gpu::copyAsync(Gpu::hostToDevice, bcrs.begin(), bcrs.end(), bcrs_d.begin());
225  const BCRec* bc_ptr = bcrs_d.data();
226 
227  GpuArray<GpuArray<Real, AMREX_SPACEDIM*2>,1> l_bc_extdir_vals_d;
228 
229  for (int ori = 0; ori < 2*AMREX_SPACEDIM; ori++) {
230  l_bc_extdir_vals_d[0][ori] = m_bc_extdir_vals[bccomp][ori];
231  }
232 
233  {
234  // Populate ghost cells on lo-z and hi-z domain boundaries
235  Box bx_zlo(bx); bx_zlo.setBig (2,dom_lo.z-1);
236  Box bx_zhi(bx); bx_zhi.setSmall(2,dom_hi.z+1);
237  ParallelFor(bx_zlo, bx_zhi,
238  [=] AMREX_GPU_DEVICE (int i, int j, int k) {
239  int kflip = dom_lo.z - 1 - k;
240  if (bc_ptr[0].lo(2) == ERFBCType::ext_dir) {
241  #ifdef ERF_USE_TERRAIN_VELOCITY
242  dest_arr(i,j,k) = prob->compute_terrain_velocity(time);
243  #else
244  dest_arr(i,j,k) = l_bc_extdir_vals_d[0][2];
245  #endif
246  } else if (bc_ptr[0].lo(2) == ERFBCType::foextrap) {
247  dest_arr(i,j,k) = dest_arr(i,j,dom_lo.z);
248  } else if (bc_ptr[0].lo(2) == ERFBCType::open) {
249  dest_arr(i,j,k) = dest_arr(i,j,dom_lo.z);
250  } else if (bc_ptr[0].lo(2) == ERFBCType::reflect_even) {
251  dest_arr(i,j,k) = dest_arr(i,j,kflip);
252  } else if (bc_ptr[0].lo(2) == ERFBCType::reflect_odd) {
253  dest_arr(i,j,k) = -dest_arr(i,j,kflip);
254  } else if (bc_ptr[0].lo(2) == ERFBCType::hoextrap) {
255  Real delta_k = (dom_lo.z - k);
256  dest_arr(i,j,k) = (1.0 + delta_k)*dest_arr(i,j,dom_lo.z) - delta_k*dest_arr(i,j,dom_lo.z+1);
257  }
258  },
259  [=] AMREX_GPU_DEVICE (int i, int j, int k) {
260  int kflip = 2*dom_hi.z + 1 - k;
261  if (bc_ptr[0].hi(2) == ERFBCType::ext_dir) {
262  dest_arr(i,j,k) = l_bc_extdir_vals_d[0][5];
263  } else if (bc_ptr[0].hi(2) == ERFBCType::foextrap) {
264  dest_arr(i,j,k) = dest_arr(i,j,dom_hi.z);
265  } else if (bc_ptr[0].hi(2) == ERFBCType::open) {
266  dest_arr(i,j,k) = dest_arr(i,j,dom_hi.z);
267  } else if (bc_ptr[0].hi(2) == ERFBCType::reflect_even) {
268  dest_arr(i,j,k) = dest_arr(i,j,kflip);
269  } else if (bc_ptr[0].hi(2) == ERFBCType::reflect_odd) {
270  dest_arr(i,j,k) = -dest_arr(i,j,kflip);
271  } else if (bc_ptr[0].hi(2) == ERFBCType::hoextrap){
272  Real delta_k = (k - dom_hi.z);
273  dest_arr(i,j,k) = (1.0 + delta_k)*dest_arr(i,j,dom_hi.z) - delta_k*dest_arr(i,j,dom_hi.z-1);
274  }
275  }
276  );
277  } // z
278 
279  if (m_z_phys_nd) {
280  const auto& bx_lo = lbound(bx);
281  const auto& bx_hi = ubound(bx);
282 
283  const auto& zphys_lo = lbound(Box(z_phys_nd));
284  const auto& zphys_hi = ubound(Box(z_phys_nd));
285 
286  // Neumann conditions (d<var>/dn = 0) must be aware of the surface normal with terrain.
287  // An additional source term arises from d<var>/dx & d<var>/dy & met_h_xi/eta/zeta.
288  //=====================================================================================
289  // Only modify scalars, U, or V
290  // Loop over each component
291  // Hit for Neumann condition at kmin
292  if (bcrs[0].lo(2) == ERFBCType::foextrap) {
293  // Loop over ghost cells in bottom XY-plane (valid box)
294  Box xybx = bx;
295  xybx.setBig(2,-1);
296  xybx.setSmall(2,bx.smallEnd()[2]);
297  int k0 = 0;
298 
299  // Get the dz cell size
300  Real dz = geomdata.CellSize(2);
301 
302  // Fill all the Neumann srcs with terrain
303  ParallelFor(xybx, [=] AMREX_GPU_DEVICE (int i, int j, int k)
304  {
305  // Clip indices for ghost-cells
306  int ii = amrex::min(amrex::max(i,perdom_lo.x),perdom_hi.x);
307  ii = amrex::min(amrex::max(ii,zphys_lo.x),zphys_hi.x);
308  int jj = amrex::min(amrex::max(j,perdom_lo.y),perdom_hi.y);
309  jj = amrex::min(amrex::max(jj,zphys_lo.y),zphys_hi.y);
310 
311  // Get metrics
312  Real met_h_xi = Compute_h_xi_AtIface (ii, jj, k0, dxInv, z_phys_nd);
313  Real met_h_eta = Compute_h_eta_AtIface (ii, jj, k0, dxInv, z_phys_nd);
314  Real met_h_zeta = Compute_h_zeta_AtIface(ii, jj, k0, dxInv, z_phys_nd);
315 
316  // GradX at IJK location inside domain -- this relies on the assumption that we have
317  // used foextrap for quantities outside the domain to define the gradient as zero
318  Real GradVarx, GradVary;
319  if ( i < dom_lo.x-1 || i > dom_hi.x+1 || (i+1 > bx_hi.x && i-1 < bx_lo.x) ) {
320  GradVarx = 0.0;
321  } else if (i+1 > bx_hi.x) {
322  GradVarx = dxInv[0] * (dest_arr(i ,j,k0) - dest_arr(i-1,j,k0));
323  } else if (i-1 < bx_lo.x) {
324  GradVarx = dxInv[0] * (dest_arr(i+1,j,k0) - dest_arr(i ,j,k0));
325  } else {
326  GradVarx = 0.5 * dxInv[0] * (dest_arr(i+1,j,k0) - dest_arr(i-1,j,k0));
327  }
328 
329  // GradY at IJK location inside domain -- this relies on the assumption that we have
330  // used foextrap for quantities outside the domain to define the gradient as zero
331  if ( j < dom_lo.y-1 || j > dom_hi.y+1 || (j+1 > bx_hi.y && j-1 < bx_lo.y) ) {
332  GradVary = 0.0;
333  } else if (j+1 > bx_hi.y) {
334  GradVary = dxInv[1] * (dest_arr(i,j ,k0) - dest_arr(i,j-1,k0));
335  } else if (j-1 < bx_lo.y) {
336  GradVary = dxInv[1] * (dest_arr(i,j+1,k0) - dest_arr(i,j ,k0));
337  } else {
338  GradVary = 0.5 * dxInv[1] * (dest_arr(i,j+1,k0) - dest_arr(i,j-1,k0));
339  }
340 
341  // Prefactor
342  Real met_fac = met_h_zeta / ( met_h_xi*met_h_xi + met_h_eta*met_h_eta + 1. );
343 
344  // Accumulate in bottom ghost cell (EXTRAP already populated)
345  dest_arr(i,j,k) -= dz * met_fac * ( met_h_xi * GradVarx + met_h_eta * GradVary );
346  });
347  } // foextrap
348  } //m_z_phys_nd
349  Gpu::streamSynchronize();
350 }
AMREX_GPU_DEVICE AMREX_FORCE_INLINE amrex::Real Compute_h_xi_AtIface(const int &i, const int &j, const int &k, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > &cellSizeInv, const amrex::Array4< const amrex::Real > &z_nd)
Definition: ERF_TerrainMetrics.H:110
AMREX_GPU_DEVICE AMREX_FORCE_INLINE amrex::Real Compute_h_zeta_AtIface(const int &i, const int &j, const int &k, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > &cellSizeInv, const amrex::Array4< const amrex::Real > &z_nd)
Definition: ERF_TerrainMetrics.H:96
AMREX_GPU_DEVICE AMREX_FORCE_INLINE amrex::Real Compute_h_eta_AtIface(const int &i, const int &j, const int &k, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > &cellSizeInv, const amrex::Array4< const amrex::Real > &z_nd)
Definition: ERF_TerrainMetrics.H:125
void impose_vertical_xvel_bcs(const amrex::Array4< amrex::Real > &dest_arr, const amrex::Box &bx, const amrex::Box &domain, const amrex::Array4< amrex::Real const > &z_nd, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > dxInv, int bccomp, const amrex::Real time)
Definition: ERF_BoundaryConditionsXvel.cpp:193
Here is the call graph for this function:

◆ operator()()

void ERFPhysBCFunct_u::operator() ( amrex::MultiFab &  mf,
amrex::MultiFab &  xvel,
amrex::MultiFab &  yvel,
amrex::IntVect const &  nghost,
const amrex::Real  time,
int  bccomp,
bool  do_fb 
)
93 {
94  BL_PROFILE("ERFPhysBCFunct_u::()");
95 
96  //
97  // We fill all of the interior and periodic ghost cells first, so we can fill
98  // those directly inside the lateral and vertical calls.
99  // If triply periodic this is all we do
100  //
101  if (do_fb) {
102  mf.FillBoundary(m_geom.periodicity());
103  }
104 
105  if (m_geom.isAllPeriodic()) return;
106 
107  const auto& domain = m_geom.Domain();
108  const auto dxInv = m_geom.InvCellSizeArray();
109 
110  Box gdomainx = surroundingNodes(domain,0);
111  for (int i = 0; i < AMREX_SPACEDIM; ++i) {
112  if (m_geom.isPeriodic(i)) {
113  gdomainx.grow(i, nghost[i]);
114  }
115  }
116 
117 #ifdef AMREX_USE_OMP
118 #pragma omp parallel if (Gpu::notInLaunchRegion())
119 #endif
120  {
121  for (MFIter mfi(mf,false); mfi.isValid(); ++mfi)
122  {
123  //
124  // This is the box we pass to the different routines
125  // NOTE -- this is the full grid NOT the tile box
126  //
127  Box bx = mfi.validbox();
128 
129  //
130  // These are the boxes we use to test on relative to the domain
131  //
132  Box xbx1 = surroundingNodes(bx,0); xbx1.grow(nghost);
133  if(xbx1.smallEnd(2) < domain.smallEnd(2)) xbx1.setSmall(2,domain.smallEnd(2));
134  if(xbx1.bigEnd(2) > domain.bigEnd(2)) xbx1.setBig(2,domain.bigEnd(2));
135 
136  Box xbx2 = surroundingNodes(bx,0); xbx2.grow(nghost);
137 
138  Array4<const Real> z_nd_arr;
139 
140  if (m_z_phys_nd)
141  {
142  z_nd_arr = m_z_phys_nd->const_array(mfi);
143  }
144 
145  if (!gdomainx.contains(xbx2))
146  {
147  Array4< Real> const& dest_arr = mf.array(mfi);
148  Array4<const Real> const& velx_arr = xvel.const_array(mfi);
149  Array4<const Real> const& vely_arr = yvel.const_array(mfi);
150 
151  if (!m_use_real_bcs)
152  {
153  if (!gdomainx.contains(xbx1))
154  {
155  impose_lateral_xvel_bcs(dest_arr,velx_arr,vely_arr,xbx1,domain,bccomp,time);
156  }
157  }
158 
159  impose_vertical_xvel_bcs(dest_arr,xbx2,domain,z_nd_arr,dxInv,bccomp,time);
160  }
161  } // MFIter
162  } // OpenMP
163 } // operator()
@ xvel
Definition: ERF_IndexDefines.H:141
@ yvel
Definition: ERF_IndexDefines.H:142

Member Data Documentation

◆ m_bc_extdir_vals

amrex::Array<amrex::Array<amrex::Real, AMREX_SPACEDIM*2>,AMREX_SPACEDIM+NBCVAR_max> ERFPhysBCFunct_u::m_bc_extdir_vals
private

◆ m_bc_neumann_vals

amrex::Array<amrex::Array<amrex::Real, AMREX_SPACEDIM*2>,AMREX_SPACEDIM+NBCVAR_max> ERFPhysBCFunct_u::m_bc_neumann_vals
private

◆ m_domain_bcs_type

amrex::Vector<amrex::BCRec> ERFPhysBCFunct_u::m_domain_bcs_type
private

◆ m_domain_bcs_type_d

amrex::Gpu::DeviceVector<amrex::BCRec> ERFPhysBCFunct_u::m_domain_bcs_type_d
private

◆ m_geom

amrex::Geometry ERFPhysBCFunct_u::m_geom
private

◆ m_lev

int ERFPhysBCFunct_u::m_lev
private

◆ m_u_bc_data

amrex::Real* ERFPhysBCFunct_u::m_u_bc_data
private

◆ m_use_real_bcs

bool ERFPhysBCFunct_u::m_use_real_bcs
private

◆ m_z_phys_nd

amrex::MultiFab* ERFPhysBCFunct_u::m_z_phys_nd
private

The documentation for this class was generated from the following files: