Function for computing the strain rates without terrain.
36 Box domain_xy = convert(domain, tbxxy.ixType());
37 Box domain_xz = convert(domain, tbxxz.ixType());
38 Box domain_yz = convert(domain, tbxyz.ixType());
40 const auto& dom_lo = lbound(domain);
41 const auto& dom_hi = ubound(domain);
47 xl_v_dir = ( xl_v_dir && (tbxxy.smallEnd(0) == domain_xy.smallEnd(0)) );
52 xh_v_dir = ( xh_v_dir && (tbxxy.bigEnd(0) == domain_xy.bigEnd(0)) );
57 xl_w_dir = ( xl_w_dir && (tbxxz.smallEnd(0) == domain_xz.smallEnd(0)) );
62 xh_w_dir = ( xh_w_dir && (tbxxz.bigEnd(0) == domain_xz.bigEnd(0)) );
68 yl_u_dir = ( yl_u_dir && (tbxxy.smallEnd(1) == domain_xy.smallEnd(1)) );
73 yh_u_dir = ( yh_u_dir && (tbxxy.bigEnd(1) == domain_xy.bigEnd(1)) );
78 yl_w_dir = ( yl_w_dir && (tbxyz.smallEnd(1) == domain_yz.smallEnd(1)) );
83 yh_w_dir = ( yh_w_dir && (tbxyz.bigEnd(1) == domain_yz.bigEnd(1)) );
88 zl_u_dir = ( zl_u_dir && (tbxxz.smallEnd(2) == domain_xz.smallEnd(2)) );
92 zh_u_dir = ( zh_u_dir && (tbxxz.bigEnd(2) == domain_xz.bigEnd(2)) );
96 zl_v_dir = ( zl_v_dir && (tbxyz.smallEnd(2) == domain_yz.smallEnd(2)) );
100 zh_v_dir = ( zh_v_dir && (tbxyz.bigEnd(2) == domain_yz.bigEnd(2)) );
105 Box planexy = tbxxy; planexy.setBig(0, planexy.smallEnd(0) );
108 ParallelFor(planexy,[=] AMREX_GPU_DEVICE (
int i,
int j,
int k) noexcept {
109 if (!need_to_test || u(dom_lo.x,j,k) >= 0.) {
110 tau12(i,j,k) = 0.5 * ( (u(i, j, k)/mf_u(i,j,0) - u(i, j-1, k)/mf_u(i,j-1,0))*dxInv[1] +
111 (-(8./3.) * v(i-1,j,k)/mf_v(i-1,j,0) + 3. * v(i,j,k)/mf_v(i,j,0) -
112 (1./3.) * v(i+1,j,k)/mf_v(i+1,j,0))*dxInv[0] ) * mf_u(i,j,0)*mf_u(i,j,0);
114 tau12(i,j,k) = 0.5 * ( (u(i, j, k)/mf_u(i,j,0) - u(i, j-1, k)/mf_u(i,j-1,0))*dxInv[1] +
115 (v(i, j, k)/mf_v(i,j,0) - v(i-1, j, k)/mf_v(i-1,j,0))*dxInv[0] ) * mf_u(i,j,0)*mf_u(i,j,0);
121 Box planexy = tbxxy; planexy.setSmall(0, planexy.bigEnd(0) );
124 ParallelFor(planexy,[=] AMREX_GPU_DEVICE (
int i,
int j,
int k) noexcept {
125 if (!need_to_test || u(dom_hi.x+1,j,k) <= 0.) {
126 tau12(i,j,k) = 0.5 * ( (u(i, j, k)/mf_u(i,j,0) - u(i, j-1, k)/mf_u(i,j-1,0))*dxInv[1] +
127 -(-(8./3.) * v(i,j,k)/mf_v(i,j,0) + 3. * v(i-1,j,k)/mf_v(i-1,j,0) -
128 (1./3.) * v(i-2,j,k)/mf_v(i-2,j,0))*dxInv[0] ) * mf_u(i,j,0)*mf_u(i,j,0);
130 tau12(i,j,k) = 0.5 * ( (u(i, j, k)/mf_u(i,j,0) - u(i, j-1, k)/mf_u(i,j-1,0))*dxInv[1] +
131 (v(i, j, k)/mf_v(i,j,0) - v(i-1, j, k)/mf_v(i-1,j,0))*dxInv[0] ) * mf_u(i,j,0)*mf_u(i,j,0);
137 Box planexz = tbxxz; planexz.setBig(0, planexz.smallEnd(0) );
140 ParallelFor(planexz,[=] AMREX_GPU_DEVICE (
int i,
int j,
int k) noexcept {
141 if (!need_to_test || u(dom_lo.x,j,k) >= 0.) {
142 tau13(i,j,k) = 0.5 * ( (u(i, j, k) - u(i, j, k-1))*dxInv[2] +
143 (-(8./3.) * w(i-1,j,k) + 3. * w(i,j,k) - (1./3.) * w(i+1,j,k))*dxInv[0]*mf_u(i,j,0) );
145 tau13(i,j,k) = 0.5 * ( (u(i, j, k) - u(i, j, k-1))*dxInv[2] + (w(i, j, k) - w(i-1, j, k))*dxInv[0]*mf_u(i,j,0) );
151 Box planexz = tbxxz; planexz.setSmall(0, planexz.bigEnd(0) );
154 ParallelFor(planexz,[=] AMREX_GPU_DEVICE (
int i,
int j,
int k) noexcept {
155 if (!need_to_test || u(dom_hi.x+1,j,k) <= 0.) {
156 tau13(i,j,k) = 0.5 * ( (u(i, j, k) - u(i, j, k-1))*dxInv[2] +
157 -(-(8./3.) * w(i,j,k) + 3. * w(i-1,j,k) - (1./3.) * w(i-2,j,k))*dxInv[0]*mf_u(i,j,0) );
159 tau13(i,j,k) = 0.5 * ( (u(i, j, k) - u(i, j, k-1))*dxInv[2] + (w(i, j, k) - w(i-1, j, k))*dxInv[0]*mf_u(i,j,0) );
167 Box planexy = tbxxy; planexy.setBig(1, planexy.smallEnd(1) );
170 ParallelFor(planexy,[=] AMREX_GPU_DEVICE (
int i,
int j,
int k) noexcept {
171 if (!need_to_test || v(i,dom_lo.y,k) >= 0.) {
172 tau12(i,j,k) = 0.5 * ( (-(8./3.) * u(i,j-1,k)/mf_u(i,j-1,0) + 3. * u(i,j,k)/mf_u(i,j,0) - (1./3.) * u(i,j+1,k)/mf_m(i,j+1,0))*dxInv[1] +
173 (v(i, j, k)/mf_v(i,j,0) - v(i-1, j, k)/mf_v(i-1,j,0))*dxInv[0] ) * mf_u(i,j,0)*mf_u(i,j,0);
175 tau12(i,j,k) = 0.5 * ( (u(i, j, k)/mf_u(i,j,0) - u(i, j-1, k)/mf_u(i,j-1,0))*dxInv[1] +
176 (v(i, j, k)/mf_v(i,j,0) - v(i-1, j, k)/mf_v(i-1,j,0))*dxInv[0] ) * mf_u(i,j,0)*mf_u(i,j,0);
182 Box planexy = tbxxy; planexy.setSmall(1, planexy.bigEnd(1) );
185 ParallelFor(planexy,[=] AMREX_GPU_DEVICE (
int i,
int j,
int k) noexcept {
186 if (!need_to_test || v(i,dom_hi.y+1,k) <= 0.) {
187 tau12(i,j,k) = 0.5 * ( -(-(8./3.) * u(i,j,k)/mf_u(i,j,0) + 3. * u(i,j-1,k)/mf_u(i,j-1,0) - (1./3.) * u(i,j-2,k)/mf_u(i,j-2,0))*dxInv[1] +
188 (v(i, j, k)/mf_v(i,j,0) - v(i-1, j, k)/mf_v(i-1,j,0))*dxInv[0] ) * mf_u(i,j,0)*mf_u(i,j,0);
190 tau12(i,j,k) = 0.5 * ( (u(i, j, k)/mf_u(i,j,0) - u(i, j-1, k)/mf_u(i,j-1,0))*dxInv[1] +
191 (v(i, j, k)/mf_v(i,j,0) - v(i-1, j, k)/mf_v(i-1,j,0))*dxInv[0] ) * mf_u(i,j,0)*mf_u(i,j,0);
197 Box planeyz = tbxyz; planeyz.setBig(1, planeyz.smallEnd(1) );
200 ParallelFor(planeyz,[=] AMREX_GPU_DEVICE (
int i,
int j,
int k) noexcept {
201 if (!need_to_test || v(i,dom_lo.y,k) >= 0.) {
202 tau23(i,j,k) = 0.5 * ( (v(i, j, k) - v(i, j, k-1))*dxInv[2] +
203 (-(8./3.) * w(i,j-1,k) + 3. * w(i,j ,k) - (1./3.) * w(i,j+1,k))*dxInv[1]*mf_v(i,j,0)*mf_v(i,j,0) );
205 tau23(i,j,k) = 0.5 * ( (v(i, j, k) - v(i, j, k-1))*dxInv[2] + (w(i, j, k) - w(i, j-1, k))*dxInv[1]*mf_v(i,j,0) );
211 Box planeyz = tbxyz; planeyz.setSmall(1, planeyz.bigEnd(1) );
214 ParallelFor(planeyz,[=] AMREX_GPU_DEVICE (
int i,
int j,
int k) noexcept {
215 if (!need_to_test || v(i,dom_hi.y+1,k) <= 0.) {
216 tau23(i,j,k) = 0.5 * ( (v(i, j, k) - v(i, j, k-1))*dxInv[2] +
217 -(-(8./3.) * w(i,j ,k) + 3. * w(i,j-1,k) - (1./3.) * w(i,j-2,k))*dxInv[1]*mf_v(i,j,0)*mf_v(i,j,0) );
219 tau23(i,j,k) = 0.5 * ( (v(i, j, k) - v(i, j, k-1))*dxInv[2] + (w(i, j, k) - w(i, j-1, k))*dxInv[1]*mf_v(i,j,0) );
227 Box planexz = tbxxz; planexz.setBig(2, planexz.smallEnd(2) );
229 ParallelFor(planexz,[=] AMREX_GPU_DEVICE (
int i,
int j,
int k) noexcept {
230 tau13(i,j,k) = 0.5 * ( (-(8./3.) * u(i,j,k-1) + 3. * u(i,j,k) - (1./3.) * u(i,j,k+1))*dxInv[2] +
231 (w(i, j, k) - w(i-1, j, k))*dxInv[0]*mf_u(i,j,0) );
236 Box planexz = tbxxz; planexz.setSmall(2, planexz.bigEnd(2) );
238 ParallelFor(planexz,[=] AMREX_GPU_DEVICE (
int i,
int j,
int k) noexcept {
239 tau13(i,j,k) = 0.5 * ( -(-(8./3.) * u(i,j,k) + 3. * u(i,j,k-1) - (1./3.) * u(i,j,k-2))*dxInv[2] +
240 (w(i, j, k) - w(i-1, j, k))*dxInv[0]*mf_u(i,j,0) );
245 Box planeyz = tbxyz; planeyz.setBig(2, planeyz.smallEnd(2) );
247 ParallelFor(planeyz,[=] AMREX_GPU_DEVICE (
int i,
int j,
int k) noexcept {
248 tau23(i,j,k) = 0.5 * ( (-(8./3.) * v(i,j,k-1) + 3. * v(i,j,k ) - (1./3.) * v(i,j,k+1))*dxInv[2] +
249 (w(i, j, k) - w(i, j-1, k))*dxInv[1]*mf_v(i,j,0) );
254 Box planeyz = tbxyz; planeyz.setSmall(2, planeyz.bigEnd(2) );
256 ParallelFor(planeyz,[=] AMREX_GPU_DEVICE (
int i,
int j,
int k) noexcept {
257 tau23(i,j,k) = 0.5 * ( -(-(8./3.) * v(i,j,k ) + 3. * v(i,j,k-1) - (1./3.) * v(i,j,k-2))*dxInv[2] +
258 (w(i, j, k) - w(i, j-1, k))*dxInv[1]*mf_v(i,j,0) );
265 ParallelFor(bxcc, [=] AMREX_GPU_DEVICE (
int i,
int j,
int k) noexcept {
266 tau11(i,j,k) = (u(i+1, j , k )/mf_u(i+1,j,0) - u(i, j, k)/mf_u(i,j,0))*dxInv[0]*mf_m(i,j,0)*mf_m(i,j,0);
267 tau22(i,j,k) = (v(i , j+1, k )/mf_v(i,j+1,0) - v(i, j, k)/mf_v(i,j,0))*dxInv[1]*mf_m(i,j,0)*mf_m(i,j,0);
268 tau33(i,j,k) = (w(i , j , k+1) - w(i, j, k))*dxInv[2];
272 ParallelFor(tbxxy,tbxxz,tbxyz,
273 [=] AMREX_GPU_DEVICE (
int i,
int j,
int k) noexcept {
274 tau12(i,j,k) = 0.5 * ( (u(i, j, k)/mf_u(i,j,0) - u(i, j-1, k)/mf_u(i,j-1,0))*dxInv[1] +
275 (v(i, j, k)/mf_v(i,j,0) - v(i-1, j, k)/mf_v(i-1,j,0))*dxInv[0] ) * mf_u(i,j,0)*mf_u(i,j,0);
277 [=] AMREX_GPU_DEVICE (
int i,
int j,
int k) noexcept {
278 tau13(i,j,k) = 0.5 * ( (u(i, j, k) - u(i, j, k-1))*dxInv[2] + (w(i, j, k) - w(i-1, j, k))*dxInv[0]*mf_u(i,j,0) );
280 [=] AMREX_GPU_DEVICE (
int i,
int j,
int k) noexcept {
281 tau23(i,j,k) = 0.5 * ( (v(i, j, k) - v(i, j, k-1))*dxInv[2] + (w(i, j, k) - w(i, j-1, k))*dxInv[1]*mf_v(i,j,0) );
@ zvel_bc
Definition: ERF_IndexDefines.H:89
@ yvel_bc
Definition: ERF_IndexDefines.H:88
@ xvel_bc
Definition: ERF_IndexDefines.H:87
@ ext_dir_ingested
Definition: ERF_IndexDefines.H:194
@ ext_dir
Definition: ERF_IndexDefines.H:191
@ ext_dir_upwind
Definition: ERF_IndexDefines.H:199