1 #ifndef ERF_TURB_PERT_STRUCT_H_
2 #define ERF_TURB_PERT_STRUCT_H_
5 #include <AMReX_MultiFabUtil.H>
30 const amrex::IntVect& nx,
31 const amrex::GpuArray<amrex::Real,3> dx,
32 const amrex::BoxArray& ba,
33 const amrex::DistributionMapping& dm,
34 const int ngrow_state)
51 pp.query(
"perturbation_T_intensity",
tpi_Ti);
54 if (
tpi_layers < 0) { amrex::Abort(
"Please provide a valid perturbation layer value (ie. 3-5)"); }
55 if (
tpi_offset < 0) { amrex::Abort(
"Please provide a valid inflow cell offset value for perturbation region (ie. 0-5)"); }
57 if (
tpi_boxDim[i] == 0) { amrex::Abort(
"Please provide valid dimensions for perturbation boxes."); }
59 if (
tpi_nonDim < 0.) { amrex::Abort(
"Please provide a valid nondimensional number (ie. Ri = 0.042)"); }
60 if (
tpi_Tinf < 0.) { amrex::Abort(
"Please provide a valid ambient temperature value (ie. T_0 = T_infty)"); }
61 if (
tpi_Ti < 0.) { amrex::Abort(
"Please provide a valid temperature intensity value (ie. 0-1.0)"); }
64 amrex::Box lo_x_bx(amrex::IntVect(0), amrex::IntVect(1), amrex::IntVect(0));
65 amrex::Box hi_x_bx(amrex::IntVect(0), amrex::IntVect(1), amrex::IntVect(0));
66 amrex::Box lo_y_bx(amrex::IntVect(0), amrex::IntVect(1), amrex::IntVect(0));
67 amrex::Box hi_y_bx(amrex::IntVect(0), amrex::IntVect(1), amrex::IntVect(0));
70 amrex::BoxList tmp_bl;
107 amrex::Box lo_x_lo_y_u = lo_x_bx & lo_y_bx;
108 lo_y_bx.setSmall(amrex::IntVect(lo_x_lo_y_u.bigEnd(0)+1, lo_x_lo_y_u.smallEnd(1), lo_x_lo_y_u.smallEnd(2)));
112 amrex::Box hi_x_lo_y_u = hi_x_bx & lo_y_bx;
113 lo_y_bx.setBig(amrex::IntVect(hi_x_lo_y_u.smallEnd(0)-1, hi_x_lo_y_u.bigEnd(1), hi_x_lo_y_u.bigEnd(2)));
117 amrex::Box lo_x_hi_y_u = lo_x_bx & hi_y_bx;
118 hi_y_bx.setSmall(amrex::IntVect(lo_x_hi_y_u.bigEnd(0)+1, lo_x_hi_y_u.smallEnd(1), lo_x_hi_y_u.smallEnd(2)));
122 amrex::Box hi_x_hi_y_u = hi_x_bx & hi_y_bx;
123 hi_y_bx.setBig(amrex::IntVect(hi_x_hi_y_u.smallEnd(0)-1, hi_x_hi_y_u.bigEnd(1), hi_x_hi_y_u.bigEnd(2)));
132 amrex::BoxArray tmp_ba(tmp_bl);
133 tmp_ba.maxSize(boxSize);
134 pb_ba.push_back(tmp_ba);
146 pb_cell.define(ba, dm, 1, ngrow_state);
185 const amrex::Real dt,
186 amrex::MultiFab& mf_xvel,
187 amrex::MultiFab& mf_yvel,
188 amrex::MultiFab& mf_cons)
194 srand( (
unsigned) time(NULL) );
196 auto m_ixtype = mf_cons.boxArray().ixType();
198 for (
int boxIdx = 0; boxIdx <
pb_ba[lev].size(); boxIdx++) {
208 if (
pb_mag[boxIdx] !=0.) {
237 for (
int boxIdx = 0; boxIdx <
pb_ba[lev].size(); boxIdx++) {
246 const amrex::Box& vbx,
248 const amrex::IndexType& m_ixtype,
249 const amrex::Array4<amrex::Real>& src_arr,
250 const amrex::Array4<amrex::Real const>& pert_cell)
252 for (
int boxIdx = 0; boxIdx <
pb_ba[lev].size(); boxIdx++) {
253 amrex::Box pbx = amrex::convert(
pb_ba[lev][boxIdx], m_ixtype);
254 amrex::Box ubx = pbx & vbx;
256 ParallelFor(ubx, [=] AMREX_GPU_DEVICE(
int i,
int j,
int k) noexcept {
257 src_arr(i,j,k,comp) += pert_cell(i,j,k);
261 src_arr(i,j,k,comp) = (amrex::Real) (boxIdx + 5.);
270 const amrex::Real& interval)
272 amrex::Real Um =
pb_mag[boxIdx];
288 pb_amp[boxIdx] /= interval;
297 const amrex::IndexType& m_ixtype)
300 int fix_random_seed = 0;
301 amrex::ParmParse
pp(
"erf");
pp.query(
"fix_random_seed", fix_random_seed);
302 if (fix_random_seed) {
303 amrex::InitRandom(1024UL);
306 for (amrex::MFIter mfi(
pb_cell,
TileNoZ()); mfi.isValid(); ++mfi) {
307 amrex::Box vbx = mfi.validbox();
308 amrex::Box pbx = amrex::convert(
pb_ba[lev][boxIdx], m_ixtype);
309 amrex::Box ubx = pbx & vbx;
311 amrex::Real amp_copy =
pb_amp[boxIdx];
312 const amrex::Array4<amrex::Real>& pert_cell =
pb_cell.array(mfi);
313 ParallelForRNG(ubx, [=] AMREX_GPU_DEVICE(
int i,
int j,
int k,
const amrex::RandomEngine& engine) noexcept {
314 amrex::Real rand_double = amrex::Random(engine);
315 pert_cell(i,j,k) = (rand_double*2.0 - 1.0) * amp_copy;
326 const amrex::BoxArray m_pb_ba =
pb_ba[lev];
330 amrex::Vector<amrex::Real> avg_h(1,0.);
331 amrex::Gpu::DeviceVector<amrex::Real> avg_d(1,0.);
332 amrex::Real* avg = avg_d.data();
335 for (amrex::MFIter mfi(
pb_cell,
TileNoZ()) ; mfi.isValid(); ++mfi) {
336 const amrex::Box& vbx = mfi.validbox();
337 amrex::Box pbx = amrex::convert(m_pb_ba[boxIdx], vbx.ixType());
338 amrex::Box ubx = pbx & vbx;
340 const amrex::Array4<const amrex::Real>& pert_cell =
pb_cell.const_array(mfi);
341 int npts = ubx.numPts();
342 amrex::Real norm = 1.0 / (amrex::Real) npts;
343 ParallelFor(amrex::Gpu::KernelInfo().setReduction(
true), ubx, [=]
344 AMREX_GPU_DEVICE(
int i,
int j,
int k, amrex::Gpu::Handler
const& handler) noexcept {
345 amrex::Gpu::deviceReduceSum(&avg[0], pert_cell(i,j,k)*norm, handler);
347 amrex::Gpu::copy(amrex::Gpu::deviceToHost, avg_d.begin(), avg_d.end(), avg_h.begin());
350 m_pb_netZero[boxIdx] = avg_h[0];
361 const amrex::BoxArray m_pb_ba =
pb_ba[lev];
362 for (amrex::MFIter mfi(
pb_cell,
TileNoZ()) ; mfi.isValid(); ++mfi) {
363 const amrex::Box& vbx = mfi.validbox();
364 amrex::Box pbx = amrex::convert(m_pb_ba[boxIdx], vbx.ixType());
365 amrex::Box ubx = pbx & vbx;
368 const amrex::Array4<amrex::Real>& pert_cell =
pb_cell.array(mfi);
369 ParallelFor(ubx, [=] AMREX_GPU_DEVICE(
int i,
int j,
int k) noexcept {
370 pert_cell(i,j,k) -= adjust;
377 #define USE_VOLUME_AVERAGE
382 amrex::MultiFab& mf_cons,
383 amrex::MultiFab& mf_xvel,
384 amrex::MultiFab& mf_yvel)
388 const amrex::BoxArray m_pb_ba =
pb_ba[lev];
390 m_pb_mag[boxIdx] = 0.;
396 amrex::Vector<amrex::Real> avg_h(n_avg,0.);
397 amrex::Gpu::DeviceVector<amrex::Real> avg_d(n_avg,0.);
398 amrex::Real* avg = avg_d.data();
401 for (amrex::MFIter mfi(mf_cons,
TileNoZ()); mfi.isValid(); ++mfi) {
404 const amrex::Box& vbx = mfi.validbox();
407 auto ixtype_u = mf_xvel.boxArray().ixType();
408 amrex::Box vbx_u = amrex::convert(vbx,ixtype_u);
409 amrex::Box pbx_u = amrex::convert(m_pb_ba[boxIdx], ixtype_u);
410 amrex::Box ubx_u = pbx_u & vbx_u;
413 auto ixtype_v = mf_yvel.boxArray().ixType();
414 amrex::Box vbx_v = amrex::convert(vbx,ixtype_v);
415 amrex::Box pbx_v = amrex::convert(m_pb_ba[boxIdx], ixtype_v);
416 amrex::Box ubx_v = pbx_v & vbx_v;
420 const amrex::Array4<const amrex::Real>& xvel_arry = mf_xvel.const_array(mfi);
422 #ifdef USE_VOLUME_AVERAGE
423 int npts = ubx_u.numPts();
424 amrex::Real norm = 1.0 / (amrex::Real) npts;
425 ParallelFor(amrex::Gpu::KernelInfo().setReduction(
true), ubx_u, [=]
426 AMREX_GPU_DEVICE(
int i,
int j,
int k, amrex::Gpu::Handler
const& handler) noexcept {
427 amrex::Gpu::deviceReduceSum(&avg[0], xvel_arry(i,j,k)*norm, handler);
431 #ifdef USE_SLAB_AVERAGE
432 amrex::Box ubxSlab_lo = makeSlab(ubx_u,2,ubx_u.smallEnd(2));
433 amrex::Box ubxSlab_hi = makeSlab(ubx_u,2,ubx_u.bigEnd(2));
434 int npts_lo = ubxSlab_lo.numPts();
435 int npts_hi = ubxSlab_hi.numPts();
436 amrex::Real norm_lo = 1.0 / (amrex::Real) npts_lo;
437 amrex::Real norm_hi = 1.0 / (amrex::Real) npts_hi;
440 ParallelFor(amrex::Gpu::KernelInfo().setReduction(
true), ubxSlab_lo, [=]
441 AMREX_GPU_DEVICE(
int i,
int j,
int k, amrex::Gpu::Handler
const& handler) noexcept {
442 amrex::Gpu::deviceReduceSum(&avg[0], xvel_arry(i,j,k)*norm_lo, handler);
446 ParallelFor(amrex::Gpu::KernelInfo().setReduction(
true), ubxSlab_hi, [=]
447 AMREX_GPU_DEVICE(
int i,
int j,
int k, amrex::Gpu::Handler
const& handler) noexcept {
448 amrex::Gpu::deviceReduceSum(&avg[2], xvel_arry(i,j,k)*norm_hi, handler);
455 const amrex::Array4<const amrex::Real>& yvel_arry = mf_yvel.const_array(mfi);
457 #ifdef USE_VOLUME_AVERAGE
458 int npts = ubx_v.numPts();
459 amrex::Real norm = 1.0 / (amrex::Real) npts;
460 ParallelFor(amrex::Gpu::KernelInfo().setReduction(
true), ubx_v, [=]
461 AMREX_GPU_DEVICE(
int i,
int j,
int k, amrex::Gpu::Handler
const& handler) noexcept {
462 amrex::Gpu::deviceReduceSum(&avg[1], yvel_arry(i,j,k)*norm, handler);
466 #ifdef USE_SLAB_AVERAGE
467 amrex::Box ubxSlab_lo = makeSlab(ubx_v,2,ubx_v.smallEnd(2));
468 amrex::Box ubxSlab_hi = makeSlab(ubx_v,2,ubx_v.bigEnd(2));
469 int npts_lo = ubxSlab_lo.numPts();
470 int npts_hi = ubxSlab_hi.numPts();
471 amrex::Real norm_lo = 1.0 / (amrex::Real) npts_lo;
472 amrex::Real norm_hi = 1.0 / (amrex::Real) npts_hi;
475 ParallelFor(amrex::Gpu::KernelInfo().setReduction(
true), ubxSlab_lo, [=]
476 AMREX_GPU_DEVICE(
int i,
int j,
int k, amrex::Gpu::Handler
const& handler) noexcept {
477 amrex::Gpu::deviceReduceSum(&avg[1], yvel_arry(i,j,k)*norm_lo, handler);
481 ParallelFor(amrex::Gpu::KernelInfo().setReduction(
true), ubxSlab_hi, [=]
482 AMREX_GPU_DEVICE(
int i,
int j,
int k, amrex::Gpu::Handler
const& handler) noexcept {
483 amrex::Gpu::deviceReduceSum(&avg[3], yvel_arry(i,j,k)*norm_hi, handler);
490 amrex::Gpu::copy(amrex::Gpu::deviceToHost, avg_d.begin(), avg_d.end(), avg_h.begin());
493 #ifdef USE_VOLUME_AVERAGE
494 m_pb_mag[boxIdx] = sqrt(avg_h[0]*avg_h[0] + avg_h[1]*avg_h[1]);
497 #ifdef USE_SLAB_AVERAGE
498 m_pb_mag[boxIdx] = 0.5*( sqrt(avg_h[0]*avg_h[0] + avg_h[1]*avg_h[1])
499 + sqrt(avg_h[2]*avg_h[2] + avg_h[3]*avg_h[3]));
531 amrex::Vector<amrex::BoxArray>
pb_ba;
568 amrex::Real
RandomReal (
const amrex::Real min,
const amrex::Real max)
570 amrex::Real r = (amrex::Real) rand() / (amrex::Real) RAND_MAX;
571 return min + r * (max - min);
constexpr amrex::Real CONST_GRAV
Definition: ERF_Constants.H:21
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE amrex::Real pp(amrex::Real y)
Definition: ERF_MicrophysicsUtils.H:219
AMREX_FORCE_INLINE amrex::IntVect TileNoZ()
Definition: ERF_TileNoZ.H:11
Definition: ERF_TurbPertStruct.H:18
int tpi_layers
Definition: ERF_TurbPertStruct.H:541
void pseudoRandomPert(const int &boxIdx, const int &lev, const amrex::IndexType &m_ixtype)
Definition: ERF_TurbPertStruct.H:295
amrex::Real tpi_Tinf
Definition: ERF_TurbPertStruct.H:550
amrex::Real tpi_Wpb
Definition: ERF_TurbPertStruct.H:555
amrex::Real tpi_Lpb
Definition: ERF_TurbPertStruct.H:554
amrex::Real * get_pb_mag()
Definition: ERF_TurbPertStruct.H:504
amrex::Vector< amrex::Real > pb_local_etime
Definition: ERF_TurbPertStruct.H:563
amrex::Vector< amrex::BoxArray > pb_ba
Definition: ERF_TurbPertStruct.H:531
std::string pp_prefix
Definition: ERF_TurbPertStruct.H:526
amrex::Vector< amrex::Real > pb_amp
Definition: ERF_TurbPertStruct.H:564
void netZeroBuoyantAdjust(const int &boxIdx, const int &lev)
Definition: ERF_TurbPertStruct.H:357
void calc_tpi_update(const int lev, const amrex::Real dt, amrex::MultiFab &mf_xvel, amrex::MultiFab &mf_yvel, amrex::MultiFab &mf_cons)
Definition: ERF_TurbPertStruct.H:184
void calc_tpi_amp(const int &boxIdx, const amrex::Real &interval)
Definition: ERF_TurbPertStruct.H:269
amrex::MultiFab pb_cell
Definition: ERF_TurbPertStruct.H:536
amrex::Vector< amrex::Real > pb_netZero
Definition: ERF_TurbPertStruct.H:565
void init_tpi(const int lev, const amrex::IntVect &nx, const amrex::GpuArray< amrex::Real, 3 > dx, const amrex::BoxArray &ba, const amrex::DistributionMapping &dm, const int ngrow_state)
Definition: ERF_TurbPertStruct.H:29
int tpi_offset
Definition: ERF_TurbPertStruct.H:542
amrex::Real tpi_Ti
Definition: ERF_TurbPertStruct.H:549
amrex::Real tpi_pert_adjust
Definition: ERF_TurbPertStruct.H:559
amrex::Vector< amrex::Real > pb_interval
Definition: ERF_TurbPertStruct.H:562
void debug(amrex::Real)
Definition: ERF_TurbPertStruct.H:508
amrex::Real * get_pb_netZero()
Definition: ERF_TurbPertStruct.H:505
amrex::Vector< amrex::Real > pb_mag
Definition: ERF_TurbPertStruct.H:532
amrex::Real tpi_lref
Definition: ERF_TurbPertStruct.H:556
amrex::Vector< int > tpi_boxDim
Definition: ERF_TurbPertStruct.H:544
amrex::Real tpi_nonDim
Definition: ERF_TurbPertStruct.H:548
void calc_tpi_meanMag_perBox(const int &boxIdx, const int &lev, amrex::MultiFab &mf_cons, amrex::MultiFab &mf_xvel, amrex::MultiFab &mf_yvel)
Definition: ERF_TurbPertStruct.H:380
void netZeroBuoyantAdd(const int &boxIdx, const int &lev)
Definition: ERF_TurbPertStruct.H:322
int pt_type
Definition: ERF_TurbPertStruct.H:528
~TurbulentPerturbation()
Definition: ERF_TurbPertStruct.H:22
amrex::Real RandomReal(const amrex::Real min, const amrex::Real max)
Definition: ERF_TurbPertStruct.H:568
amrex::Real tpi_net_buoyant
Definition: ERF_TurbPertStruct.H:558
void apply_tpi(const int &lev, const amrex::Box &vbx, const int &comp, const amrex::IndexType &m_ixtype, const amrex::Array4< amrex::Real > &src_arr, const amrex::Array4< amrex::Real const > &pert_cell)
Definition: ERF_TurbPertStruct.H:245
amrex::Real tpi_Hpb
Definition: ERF_TurbPertStruct.H:553
amrex::Vector< int > tpi_direction
Definition: ERF_TurbPertStruct.H:545