Function for computing the implicit contribution to the vertical diffusion of momentum, with a uniform grid and no terrain.
This function (explicitly instantiated below) handles staggering in x, y, or z through the template parameter, stagdir.
219 BL_PROFILE_VAR(
"ImplicitDiffForMom_N()",ImplicitDiffForMom_N);
236 constexpr
int ioff = (stagdir == 0) ? 1 : 0;
237 constexpr
int joff = (stagdir == 1) ? 1 : 0;
240 int ilo = bx.smallEnd(0);
241 int ihi = bx.bigEnd(0);
242 int jlo = bx.smallEnd(1);
243 int jhi = bx.bigEnd(1);
244 int klo = bx.smallEnd(2);
245 int khi = bx.bigEnd(2);
246 amrex::ignore_unused(ilo, ihi, jlo, jhi);
250 amrex::FArrayBox RHS_fab, soln_fab, coeffG_fab;
251 RHS_fab.resize(bx,1, amrex::The_Async_Arena());
252 soln_fab.resize(bx,1, amrex::The_Async_Arena());
253 coeffG_fab.resize(bx,1, amrex::The_Async_Arena());
254 auto const& RHS_a = RHS_fab.array();
255 auto const& soln_a = soln_fab.array();
256 auto const& coeffG_a = coeffG_fab.array();
258 Real dz_inv = cellSizeInv[2];
266 amrex::ignore_unused(foextrap_on_zhi);
269 "Unexpected lower BC used with implicit vertical diffusion");
271 "Unexpected upper BC used with implicit vertical diffusion");
272 if (stagdir < 2 && (ext_dir_on_zlo || ext_dir_on_zhi)) {
273 amrex::Warning(
"No-slip walls have not been fully tested");
276 Real Fact = implicit_fac * dt * dz_inv;
279 ParallelFor(makeSlab(bx,2,0), [=] AMREX_GPU_DEVICE (
int i,
int j,
int)
282 for (
int j(jlo); j<=jhi; ++j) {
283 for (
int i(ilo); i<=ihi; ++i) {
320 Real rhoface, rhoAlpha_lo, rhoAlpha_hi;
321 Real a_tmp, b_tmp, c_tmp, inv_b2_tmp;
323 rhoface = 0.5 * (cell_data(i,j,klo,
Rho_comp) + cell_data(i-ioff,j-joff,klo,
Rho_comp));
325 cell_data, mu_turb, mu_eff,
329 c_tmp = -Fact * gfac * rhoAlpha_hi * dz_inv;
331 RHS_a(i,j,klo) = face_data(i,j,klo);
334 if (ext_dir_on_zlo) {
335 RHS_a(i,j,klo) += Fact * gfac * (tau_corr(i,j,klo+1) - tau_corr(i,j,klo));
340 a_tmp = -2.0 * Fact * rhoAlpha_lo * dz_inv;
341 RHS_a(i,j,klo) += 2.0 * rhoAlpha_lo * face_data(i,j,klo-1) * dz_inv * dz_inv;
343 }
else if (use_SurfLayer) {
345 RHS_a(i,j,klo) += Fact * gfac * (tau_corr(i,j,klo+1) - tau(i,j,klo));
346 RHS_a(i,j,klo) += Fact * tau(i,j,klo);
349 b_tmp = rhoface - a_tmp - c_tmp;
352 RHS_a(i,j,klo) /= b_tmp;
353 coeffG_a(i,j,klo) = c_tmp / b_tmp;
358 for (
int k(klo+1); k <
khi; k++) {
359 rhoface = 0.5 * (cell_data(i,j,k,
Rho_comp) + cell_data(i-ioff,j-joff,k,
Rho_comp));
361 cell_data, mu_turb, mu_eff,
364 a_tmp = -Fact * rhoAlpha_lo * dz_inv;
365 c_tmp = -Fact * rhoAlpha_hi * dz_inv;
366 b_tmp = rhoface - a_tmp - c_tmp;
367 inv_b2_tmp = 1. / (b_tmp - a_tmp * coeffG_a(i,j,k-1));
369 RHS_a(i,j,k) = face_data(i,j,k);
370 RHS_a(i,j,k) += Fact * gfac * (tau_corr(i,j,k+1) - tau_corr(i,j,k));
372 RHS_a(i,j,k) = (RHS_a(i,j,k) - a_tmp * RHS_a(i,j,k-1)) * inv_b2_tmp;
373 coeffG_a(i,j,k) = c_tmp * inv_b2_tmp;
381 cell_data, mu_turb, mu_eff,
384 a_tmp = -Fact * gfac * rhoAlpha_lo * dz_inv;
387 RHS_a(i,j,
khi) = face_data(i,j,
khi);
388 RHS_a(i,j,
khi) += Fact * gfac * (tau_corr(i,j,
khi+1) - tau_corr(i,j,
khi));
391 if (ext_dir_on_zhi) {
396 c_tmp = -2.0 * Fact * rhoAlpha_hi * dz_inv;
397 RHS_a(i,j,
khi) += 2.0 * rhoAlpha_hi * face_data(i,j,
khi+1) * dz_inv * dz_inv;
401 b_tmp = rhoface - a_tmp - c_tmp;
402 inv_b2_tmp = 1. / (b_tmp - a_tmp * coeffG_a(i,j,
khi-1));
405 soln_a(i,j,
khi) = (RHS_a(i,j,
khi) - a_tmp * RHS_a(i,j,
khi-1)) * inv_b2_tmp;
410 for (
int k(
khi-1); k>=klo; --k) {
411 soln_a(i,j,k) = RHS_a(i,j,k) - coeffG_a(i,j,k) * soln_a(i,j,k+1);
416 for (
int k(klo); k<=
khi; ++k) {
417 rhoface = 0.5 * (cell_data(i,j,k,
Rho_comp) + cell_data(i-ioff,j-joff,k,
Rho_comp));
418 face_data(i,j,k) = rhoface * soln_a(i,j,k);
constexpr amrex::Real three
Definition: ERF_Constants.H:9
constexpr amrex::Real two
Definition: ERF_Constants.H:8
constexpr amrex::Real one
Definition: ERF_Constants.H:7
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void getRhoAlphaForFaces(int i, int j, int k, int ioff, int joff, amrex::Real &rhoAlpha_lo, amrex::Real &rhoAlpha_hi, const amrex::Array4< const amrex::Real > &cell_data, const amrex::Array4< const amrex::Real > &mu_turb, const amrex::Real mu_eff, bool l_consA, bool l_turb)
Definition: ERF_GetRhoAlphaForFaces.H:5
#define Rho_comp
Definition: ERF_IndexDefines.H:36
const int khi
Definition: ERF_InitCustomPert_Bubble.H:21
ParallelFor(bx, [=] AMREX_GPU_DEVICE(int i, int j, int k) noexcept { const auto prob_lo=geomdata.ProbLo();const auto dx=geomdata.CellSize();const Real x=(prob_lo[0]+(i+myhalf) *dx[0])/mf_m(i, j, 0);const Real z=z_cc(i, j, k);Real L=std::sqrt(std::pow((x - x_c)/x_r, 2)+std::pow((z - z_c)/z_r, 2));if(L<=one) { Real dT=T_pert *(std::cos(PI *L)+one)/two;Real Tbar_hse=p_hse(i, j, k)/(R_d *r_hse(i, j, k));Real theta_perturbed=(Tbar_hse+dT) *std::pow(p_0/p_hse(i, j, k), rdOcp);Real theta_0=(Tbar_hse) *std::pow(p_0/p_hse(i, j, k), rdOcp);if(const_rho) { state_pert(i, j, k, RhoTheta_comp)=r_hse(i, j, k) *(theta_perturbed - theta_0);} else { state_pert(i, j, k, Rho_comp)=getRhoThetagivenP(p_hse(i, j, k))/theta_perturbed - r_hse(i, j, k);} } })
bool l_turb
Definition: ERF_SetupVertDiff.H:9
bool l_consA
Definition: ERF_SetupVertDiff.H:8
AMREX_ASSERT_WITH_MESSAGE(wbar_cutoff_min > wbar_cutoff_max, "ERROR: wbar_cutoff_min < wbar_cutoff_max")
@ xvel_bc
Definition: ERF_IndexDefines.H:87
@ foextrap
Definition: ERF_IndexDefines.H:226
@ ext_dir
Definition: ERF_IndexDefines.H:227
@ ext_dir_prim
Definition: ERF_IndexDefines.H:229
Definition: ERF_DiffStruct.H:19
amrex::Real rho0_trans
Definition: ERF_DiffStruct.H:91
MolecDiffType molec_diff_type
Definition: ERF_DiffStruct.H:84
amrex::Real dynamic_viscosity
Definition: ERF_DiffStruct.H:96
DiffChoice diffChoice
Definition: ERF_DataStruct.H:1088
amrex::Vector< TurbChoice > turbChoice
Definition: ERF_DataStruct.H:1091
Definition: ERF_TurbStruct.H:42
bool use_kturb
Definition: ERF_TurbStruct.H:424