ERF
Energy Research and Forecasting: An Atmospheric Modeling Code
NOAHMP Class Reference

#include <ERF_NOAHMP.H>

Inheritance diagram for NOAHMP:
Collaboration diagram for NOAHMP:

Public Member Functions

 NOAHMP ()
 
virtual ~NOAHMP ()=default
 
void Define (SolverChoice &) override
 
void Init (const int &lev, const amrex::MultiFab &cons_in, const amrex::Geometry &geom, const amrex::Real &dt) override
 
void Advance_With_State (const int &lev, amrex::MultiFab &cons_in, amrex::MultiFab &xvel_in, amrex::MultiFab &yvel_in, amrex::MultiFab *hfx3_out, amrex::MultiFab *qfx3_out, const amrex::Real &dt, const int &nstep) override
 
void Plot_Landfile (const int &nstep) override
 
amrex::MultiFab * Lsm_Data_Ptr (const int &varIdx) override
 
amrex::MultiFab * Lsm_Flux_Ptr (const int &varIdx) override
 
amrex::Geometry Lsm_Geom () override
 
int Lsm_Data_Size () override
 
int Lsm_Flux_Size () override
 
std::string Lsm_DataName (const int &varIdx) override
 
int Lsm_DataIndex (std::string varname) override
 
std::string Lsm_FluxName (const int &varIdx) override
 
int Lsm_FluxIndex (std::string varname) override
 
- Public Member Functions inherited from NullSurf
 NullSurf ()
 
virtual ~NullSurf ()=default
 
virtual void Advance (const amrex::Real &)
 
virtual void Update_Micro_Vars (amrex::MultiFab &)
 
virtual void Update_State_Vars (amrex::MultiFab &)
 
virtual void Copy_State_to_Micro (const amrex::MultiFab &)
 
virtual void Copy_Micro_to_State (amrex::MultiFab &)
 
virtual std::unordered_map< std::string, std::string > & Lsm_WRFInputNames ()
 

Private Types

using FabPtr = std::shared_ptr< amrex::MultiFab >
 

Private Attributes

int m_lsm_data_size = LsmData_NOAHMP::NumVars
 
int m_lsm_flux_size = LsmFlux_NOAHMP::NumVars
 
amrex::Vector< int > LsmDataMap
 
amrex::Vector< int > LsmFluxMap
 
amrex::Vector< std::string > LsmDataName
 
amrex::Vector< std::string > LsmFluxName
 
amrex::Geometry m_geom
 
amrex::Geometry m_lsm_geom
 
amrex::Real m_dt
 
int khi_lsm
 
int m_nz_lsm = 1
 
amrex::Real m_dz_lsm = 1.0
 
amrex::Array< FabPtr, LsmData_NOAHMP::NumVarslsm_fab_data
 
amrex::Array< FabPtr, LsmFlux_NOAHMP::NumVarslsm_fab_flux
 
NoahmpIO_vector noahmpio_vect
 
int m_plot_int_1 = -1
 

Member Typedef Documentation

◆ FabPtr

using NOAHMP::FabPtr = std::shared_ptr<amrex::MultiFab>
private

Constructor & Destructor Documentation

◆ NOAHMP()

NOAHMP::NOAHMP ( )
inline
59 {}

◆ ~NOAHMP()

virtual NOAHMP::~NOAHMP ( )
virtualdefault

Member Function Documentation

◆ Advance_With_State()

void NOAHMP::Advance_With_State ( const int &  lev,
amrex::MultiFab &  cons_in,
amrex::MultiFab &  xvel_in,
amrex::MultiFab &  yvel_in,
amrex::MultiFab *  hfx3_out,
amrex::MultiFab *  qfx3_out,
const amrex::Real dt,
const int &  nstep 
)
overridevirtual

Reimplemented from NullSurf.

231 {
232 
233  Box domain = m_geom.Domain();
234 
235  Print () << "Noah-MP driver started at time step: " << nstep+1 << std::endl;
236 
237  bool is_moist = (cons_in.nComp() > RhoQ1_comp);
238 
239  int klo = domain.smallEnd(2);
240 
241  // Loop over blocks to copy forcing data to Noahmp, drive the land model,
242  // and copy data back to ERF Multifabs.
243  int idb = 0;
244  for (MFIter mfi(cons_in); mfi.isValid(); ++mfi, ++idb) {
245 
246  Box bx = mfi.tilebox();
247  Box gbx = mfi.tilebox(IntVect(0,0,0),IntVect(1,1,0));
248 
249  // Check if tile is at the lower boundary in lower z direction
250  if (bx.smallEnd(2) != klo) { continue; }
251 
252  bx.makeSlab(2,klo);
253  gbx.makeSlab(2,klo);
254 
255  // For limiting when populating ghost cells
256  int i_lo = bx.smallEnd(0); int i_hi = bx.bigEnd(0);
257  int j_lo = bx.smallEnd(1); int j_hi = bx.bigEnd(1);
258 
259  NoahmpIO_type* noahmpio = &noahmpio_vect[idb];
260 
261  const Array4<const Real>& U_PHY = xvel_in.const_array(mfi);
262  const Array4<const Real>& V_PHY = yvel_in.const_array(mfi);
263  const Array4<const Real>& CONS = cons_in.const_array(mfi);
264 
265  // Into NOAH-MP
266  const Array4<const Real>& SWDOWN = lsm_fab_data[LsmData_NOAHMP::sw_flux_dn]->const_array(mfi);
267  const Array4<const Real>& GLW = lsm_fab_data[LsmData_NOAHMP::lw_flux_dn]->const_array(mfi);
268  const Array4<const Real>& COSZEN = lsm_fab_data[LsmData_NOAHMP::cos_zenith_angle]->const_array(mfi);
269 
270  // Out of NOAH-MP
271  Array4<Real> TSK = lsm_fab_data[LsmData_NOAHMP::t_sfc]->array(mfi);
272  Array4<Real> EMISS = lsm_fab_data[LsmData_NOAHMP::sfc_emis]->array(mfi);
273  Array4<Real> ALBSFCDIR_VIS = lsm_fab_data[LsmData_NOAHMP::sfc_alb_dir_vis]->array(mfi);
274  Array4<Real> ALBSFCDIR_NIR = lsm_fab_data[LsmData_NOAHMP::sfc_alb_dir_nir]->array(mfi);
275  Array4<Real> ALBSFCDIF_VIS = lsm_fab_data[LsmData_NOAHMP::sfc_alb_dif_vis]->array(mfi);
276  Array4<Real> ALBSFCDIF_NIR = lsm_fab_data[LsmData_NOAHMP::sfc_alb_dif_nir]->array(mfi);
277 
278  // NOTE: Need to expose stresses and get stresses from NOAHMP
279  Array4<Real> q_flux_arr = lsm_fab_flux[LsmFlux_NOAHMP::q_flux]->array(mfi);
280  Array4<Real> t_flux_arr = lsm_fab_flux[LsmFlux_NOAHMP::t_flux]->array(mfi);
281  Array4<Real> tau13_arr = lsm_fab_flux[LsmFlux_NOAHMP::tau13]->array(mfi);
282  Array4<Real> tau23_arr = lsm_fab_flux[LsmFlux_NOAHMP::tau23]->array(mfi);
283 
284  // Create temporary BaseFabs that will be accessible on host
285  // Use The_Pinned_Arena() for host-accessible memory that can be used with GPU
286  Box bx2d = makeSlab(bx,2,0);
287  FArrayBox tmp_u_phy(bx, 1, The_Pinned_Arena());
288  FArrayBox tmp_v_phy(bx, 1, The_Pinned_Arena());
289  FArrayBox tmp_t_phy(bx, 1, The_Pinned_Arena());
290  FArrayBox tmp_qv_curr(bx, 1, The_Pinned_Arena());
291  FArrayBox tmp_p8w(bx, 1, The_Pinned_Arena());
292  FArrayBox tmp_swdown(bx, 1, The_Pinned_Arena());
293  FArrayBox tmp_glw(bx, 1, The_Pinned_Arena());
294  FArrayBox tmp_coszen(bx, 1, The_Pinned_Arena());
295  FArrayBox tmp_hfx(bx, 1, The_Pinned_Arena());
296  FArrayBox tmp_lh(bx, 1, The_Pinned_Arena());
297  FArrayBox tmp_tau_ew(bx, 1, The_Pinned_Arena());
298  FArrayBox tmp_tau_ns(bx, 1, The_Pinned_Arena());
299  FArrayBox tmp_tsk(bx, 1, The_Pinned_Arena());
300  FArrayBox tmp_emiss(bx, 1, The_Pinned_Arena());
301  FArrayBox tmp_albsfcdir_vis(bx, 1, The_Pinned_Arena());
302  FArrayBox tmp_albsfcdir_nir(bx, 1, The_Pinned_Arena());
303  FArrayBox tmp_albsfcdif_vis(bx, 1, The_Pinned_Arena());
304  FArrayBox tmp_albsfcdif_nir(bx, 1, The_Pinned_Arena());
305 
306  // Get array views
307  auto const& tmp_u_phy_arr = tmp_u_phy.array();
308  auto const& tmp_v_phy_arr = tmp_v_phy.array();
309  auto const& tmp_t_phy_arr = tmp_t_phy.array();
310  auto const& tmp_qv_curr_arr = tmp_qv_curr.array();
311  auto const& tmp_p8w_arr = tmp_p8w.array();
312  auto const& tmp_swdown_arr = tmp_swdown.array();
313  auto const& tmp_glw_arr = tmp_glw.array();
314  auto const& tmp_coszen_arr = tmp_coszen.array();
315 
316  // Copy forcing data from ERF to Noahmp.
317  ParallelFor(bx, [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept
318  {
319  Real qv = (is_moist) ? CONS(i,j,k,RhoQ1_comp)/CONS(i,j,k,Rho_comp) : 0.0;
320  tmp_u_phy_arr(i,j,0) = 0.5*(U_PHY(i,j,k)+U_PHY(i+1,j ,k));
321  tmp_v_phy_arr(i,j,0) = 0.5*(V_PHY(i,j,k)+V_PHY(i ,j+1,k));
322  tmp_t_phy_arr(i,j,0) = getTgivenRandRTh(CONS(i,j,k,Rho_comp),CONS(i,j,k,RhoTheta_comp),qv);
323  tmp_qv_curr_arr(i,j,0) = qv;
324  tmp_p8w_arr(i,j,0) = getPgivenRTh(CONS(i,j,k,RhoTheta_comp),qv);
325  tmp_swdown_arr(i,j,0) = SWDOWN(i,j,0);
326  tmp_glw_arr(i,j,0) = GLW(i,j,0);
327  tmp_coszen_arr(i,j,0) = COSZEN(i,j,0);
328  });
329 
330  // Synchronize to ensure GPU kernel is complete before host access
331  Gpu::streamSynchronize();
332 
333  // Now on the host, copy data to NoahmpIO arrays
334  // Use LoopOnCpu for CPU-side parallelization
335  const auto& h_u_arr = tmp_u_phy.const_array();
336  const auto& h_v_arr = tmp_v_phy.const_array();
337  const auto& h_t_arr = tmp_t_phy.const_array();
338  const auto& h_qv_arr = tmp_qv_curr.const_array();
339  const auto& h_p8w_arr = tmp_p8w.const_array();
340  const auto& h_swdown_arr = tmp_swdown.const_array();
341  const auto& h_glw_arr = tmp_glw.const_array();
342  const auto& h_coszen_arr = tmp_coszen.const_array();
343 
344  LoopOnCpu(bx, [&] (int i, int j, int ) noexcept
345  {
346  noahmpio->U_PHY(i,1,j) = h_u_arr(i,j,0);
347  noahmpio->V_PHY(i,1,j) = h_v_arr(i,j,0);
348  noahmpio->T_PHY(i,1,j) = h_t_arr(i,j,0);
349  noahmpio->QV_CURR(i,1,j) = h_qv_arr(i,j,0);
350  noahmpio->P8W(i,1,j) = h_p8w_arr(i,j,0);
351  noahmpio->SWDOWN(i,j) = h_swdown_arr(i,j,0);
352  noahmpio->GLW(i,j) = h_glw_arr(i,j,0);
353  noahmpio->COSZEN(i,j) = h_coszen_arr(i,j,0);
354  });
355 
356  // Call the noahmpio driver code. This runs the land model forcing for
357  // each object in noahmpio_vect that represent a block in the domain.
358  noahmpio->itimestep = nstep+1;
359  noahmpio->DriverMain();
360 
361  // Copy results from NoahmpIO back to temporary arrays
362  auto h_hfx_arr = tmp_hfx.array();
363  auto h_lh_arr = tmp_lh.array();
364  auto h_tau_ew_arr = tmp_tau_ew.array();
365  auto h_tau_ns_arr = tmp_tau_ns.array();
366  auto h_tsk_arr = tmp_tsk.array();
367  auto h_emiss_arr = tmp_emiss.array();
368  auto h_albsfcdir_vis_arr = tmp_albsfcdir_vis.array();
369  auto h_albsfcdir_nir_arr = tmp_albsfcdir_nir.array();
370  auto h_albsfcdif_vis_arr = tmp_albsfcdif_vis.array();
371  auto h_albsfcdif_nir_arr = tmp_albsfcdif_nir.array();
372 
373  LoopOnCpu(bx, [&] (int i, int j, int ) noexcept
374  {
375  h_hfx_arr(i,j,0) = noahmpio->HFX(i,j);
376  h_lh_arr(i,j,0) = noahmpio->LH(i,j);
377  h_tau_ew_arr(i,j,0) = noahmpio->TAU_EW(i,j);
378  h_tau_ns_arr(i,j,0) = noahmpio->TAU_NS(i,j);
379  h_tsk_arr(i,j,0) = noahmpio->TSK(i,j);
380  h_emiss_arr(i,j,0) = noahmpio->EMISS(i,j);
381  h_albsfcdir_vis_arr(i,j,0) = noahmpio->ALBSFCDIRXY(i,1,j);
382  h_albsfcdir_nir_arr(i,j,0) = noahmpio->ALBSFCDIRXY(i,2,j);
383  h_albsfcdif_vis_arr(i,j,0) = noahmpio->ALBSFCDIFXY(i,1,j);
384  h_albsfcdif_nir_arr(i,j,0) = noahmpio->ALBSFCDIFXY(i,2,j);
385  });
386 
387  // Copy results back to output arrays
388  auto const& tmp_hfx_arr = tmp_hfx.array();
389  auto const& tmp_lh_arr = tmp_lh.array();
390  auto const& tmp_tau_ew_arr = tmp_tau_ew.array();
391  auto const& tmp_tau_ns_arr = tmp_tau_ns.array();
392  auto const& tmp_tsk_arr = tmp_tsk.array();
393  auto const& tmp_emiss_arr = tmp_emiss.array();
394  auto const& tmp_albsfcdir_vis_arr = tmp_albsfcdir_vis.array();
395  auto const& tmp_albsfcdir_nir_arr = tmp_albsfcdir_nir.array();
396  auto const& tmp_albsfcdif_vis_arr = tmp_albsfcdif_vis.array();
397  auto const& tmp_albsfcdif_nir_arr = tmp_albsfcdif_nir.array();
398 
399 
400  // Copy forcing data from Noahmp to ERF
401  ParallelFor(gbx, [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept
402  {
403  // Limit indices to the valid box. FillBoundary will pick these up below.
404  int ii = std::min(std::max(i,i_lo),i_hi);
405  int jj = std::min(std::max(j,j_lo),j_hi);
406 
407  // SurfaceLayer fluxes at CC
408  t_flux_arr(i,j,k) = tmp_hfx_arr(ii,jj,0)/(CONS(ii,jj,k,Rho_comp)*Cp_d);
409  q_flux_arr(i,j,k) = tmp_lh_arr(ii,jj,0)/(CONS(ii,jj,k,Rho_comp)*L_v);
410 
411  // NOTE: The following fluxes are nodal in xz/yz.
412  // The 2D MFs have 1 ghost cell so we can average these
413  // when using them in the surface layer class.
414  tau13_arr(i,j,k) = tmp_tau_ew_arr(ii,jj,0)/CONS(ii,jj,k,Rho_comp);
415  tau23_arr(i,j,k) = tmp_tau_ns_arr(ii,jj,0)/CONS(ii,jj,k,Rho_comp);
416 
417  // RRTMGP variables
418  TSK(i,j,0) = tmp_tsk_arr(ii,jj,0);
419  EMISS(i,j,0) = tmp_emiss_arr(ii,jj,0);
420  ALBSFCDIR_VIS(i,j,0) = tmp_albsfcdir_vis_arr(ii,jj,0);
421  ALBSFCDIR_NIR(i,j,0) = tmp_albsfcdir_nir_arr(ii,jj,0);
422  ALBSFCDIF_VIS(i,j,0) = tmp_albsfcdif_vis_arr(ii,jj,0);
423  ALBSFCDIF_NIR(i,j,0) = tmp_albsfcdif_nir_arr(ii,jj,0);
424  });
425  }
426 
427  // Fill the ghost cells
428  for (auto ivar = 0; ivar < LsmFlux_NOAHMP::NumVars; ++ivar) {
429  lsm_fab_flux[ivar]->FillBoundary(m_geom.periodicity());
430  }
431  Print () << "Noah-MP driver completed" << std::endl;
432 };
constexpr amrex::Real Cp_d
Definition: ERF_Constants.H:12
constexpr amrex::Real L_v
Definition: ERF_Constants.H:16
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE amrex::Real getPgivenRTh(const amrex::Real rhotheta, const amrex::Real qv=0.)
Definition: ERF_EOS.H:81
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE amrex::Real getTgivenRandRTh(const amrex::Real rho, const amrex::Real rhotheta, const amrex::Real qv=0.0)
Definition: ERF_EOS.H:46
#define Rho_comp
Definition: ERF_IndexDefines.H:36
#define RhoTheta_comp
Definition: ERF_IndexDefines.H:37
#define RhoQ1_comp
Definition: ERF_IndexDefines.H:42
const int klo
Definition: ERF_InitCustomPertVels_ParticleTests.H:4
ParallelFor(bx, [=] AMREX_GPU_DEVICE(int i, int j, int k) noexcept { const auto prob_lo=geomdata.ProbLo();const auto dx=geomdata.CellSize();const Real x=(prob_lo[0]+(i+0.5) *dx[0])/mf_m(i, j, 0);const Real z=z_cc(i, j, k);Real L=std::sqrt(std::pow((x - x_c)/x_r, 2)+std::pow((z - z_c)/z_r, 2));if(L<=1.0) { Real dT=T_pert *(std::cos(PI *L)+1.0)/2.0;Real Tbar_hse=p_hse(i, j, k)/(R_d *r_hse(i, j, k));Real theta_perturbed=(Tbar_hse+dT) *std::pow(p_0/p_hse(i, j, k), rdOcp);Real theta_0=(Tbar_hse) *std::pow(p_0/p_hse(i, j, k), rdOcp);if(const_rho) { state_pert(i, j, k, RhoTheta_comp)=r_hse(i, j, k) *(theta_perturbed - theta_0);} else { state_pert(i, j, k, Rho_comp)=getRhoThetagivenP(p_hse(i, j, k))/theta_perturbed - r_hse(i, j, k);} } })
amrex::Real Real
Definition: ERF_ShocInterface.H:19
amrex::Array< FabPtr, LsmFlux_NOAHMP::NumVars > lsm_fab_flux
Definition: ERF_NOAHMP.H:209
NoahmpIO_vector noahmpio_vect
Definition: ERF_NOAHMP.H:212
amrex::Array< FabPtr, LsmData_NOAHMP::NumVars > lsm_fab_data
Definition: ERF_NOAHMP.H:206
amrex::Geometry m_geom
Definition: ERF_NOAHMP.H:188
@ sw_flux_dn
Definition: ERF_NOAHMP.H:32
@ sfc_emis
Definition: ERF_NOAHMP.H:26
@ sfc_alb_dir_vis
Definition: ERF_NOAHMP.H:27
@ sfc_alb_dif_nir
Definition: ERF_NOAHMP.H:30
@ lw_flux_dn
Definition: ERF_NOAHMP.H:37
@ cos_zenith_angle
Definition: ERF_NOAHMP.H:31
@ t_sfc
Definition: ERF_NOAHMP.H:25
@ sfc_alb_dir_nir
Definition: ERF_NOAHMP.H:28
@ sfc_alb_dif_vis
Definition: ERF_NOAHMP.H:29
@ t_flux
Definition: ERF_NOAHMP.H:45
@ tau13
Definition: ERF_NOAHMP.H:47
@ q_flux
Definition: ERF_NOAHMP.H:46
@ NumVars
Definition: ERF_NOAHMP.H:49
@ tau23
Definition: ERF_NOAHMP.H:48
@ qv
Definition: ERF_Kessler.H:28
Here is the call graph for this function:

◆ Define()

void NOAHMP::Define ( SolverChoice )
inlineoverridevirtual

Reimplemented from NullSurf.

67  {
68  // NOTE: We should parse constants from sc here if needed,
69  }

◆ Init()

void NOAHMP::Init ( const int &  lev,
const amrex::MultiFab &  cons_in,
const amrex::Geometry &  geom,
const amrex::Real dt 
)
overridevirtual

Reimplemented from NullSurf.

21 {
22 
23  m_dt = dt;
24  m_geom = geom;
25 
26  Box domain = geom.Domain();
27  khi_lsm = domain.smallEnd(2) - 1;
28 
38  LsmDataName = {"t_sfc" , "sfc_emis" ,
39  "sfc_alb_dir_vis" , "sfc_alb_dir_nir" ,
40  "sfc_alb_dif_vis" , "sfc_alb_dif_nir" ,
41  "cos_zenith_angle" , "sw_flux_dn" ,
42  "sw_flux_dn_dir_vis", "sw_flux_dn_dir_nir",
43  "sw_flux_dn_dif_vis", "sw_flux_dn_dif_nir",
44  "lw_flux_dn" };
45 
46 
51  LsmFluxName = {"t_flux" , "q_flux" ,
52  "tau13" , "tau23" };
53 
54  ParmParse pp("erf");
55  pp.query("plot_int_1" , m_plot_int_1);
56 
57  // NOTE: All boxes in ba extend from zlo to zhi, so this transform is valid.
58  // If that were to change, the dm and new ba are no longer valid and
59  // direct copying between lsm data/flux vars cannot be done in a parfor.
60 
61  // Set 2D box array for lsm data
62  IntVect ng(1,1,0);
63  BoxArray ba = cons_in.boxArray();
64  DistributionMapping dm = cons_in.DistributionMap();
65  BoxList bl_lsm = ba.boxList();
66  for (auto& b : bl_lsm) { b.setRange(2,0); }
67  BoxArray ba_lsm(std::move(bl_lsm));
68 
69  // Set up lsm geometry
70  const RealBox& dom_rb = m_geom.ProbDomain();
71  const Real* dom_dx = m_geom.CellSize();
72  RealBox lsm_rb = dom_rb;
73  Real lsm_dx[AMREX_SPACEDIM] = {AMREX_D_DECL(dom_dx[0],dom_dx[1],m_dz_lsm)};
74  Real lsm_z_hi = dom_rb.lo(2);
75  Real lsm_z_lo = lsm_z_hi - Real(m_nz_lsm)*lsm_dx[2];
76  lsm_rb.setHi(2,lsm_z_hi); lsm_rb.setLo(2,lsm_z_lo);
77  m_lsm_geom.define( ba_lsm.minimalBox(), lsm_rb, m_geom.Coord(), m_geom.isPeriodic() );
78 
79  // Create the data
80  for (auto ivar = 0; ivar < LsmData_NOAHMP::NumVars; ++ivar) {
81  // State vars are CC
82  lsm_fab_data[ivar] = std::make_shared<MultiFab>(ba_lsm, dm, 1, ng);
83 
84  // NOTE: Radiation steps first so we set values
85  // to reasonable initialization for coupling
86  Real val_to_set = 0.0;
87  if (ivar == LsmData_NOAHMP::t_sfc) {
88  val_to_set = 300.0;
89  } else if (ivar == LsmData_NOAHMP::sfc_emis) {
90  val_to_set = 0.9;
91  } else if ( (ivar>=LsmData_NOAHMP::sfc_alb_dir_vis) &&
93  val_to_set = 0.06;
94  } else {
95  val_to_set = 0.0;
96  }
97  lsm_fab_data[ivar]->setVal(val_to_set);
98  }
99 
100  // Create the fluxes
101  for (auto ivar = 0; ivar < LsmFlux_NOAHMP::NumVars; ++ivar) {
102  // NOTE: Fluxes are CC with ghost cells for averaging
103  lsm_fab_flux[ivar] = std::make_shared<MultiFab>(ba_lsm, dm, 1, IntVect(1,1,0));
104  lsm_fab_flux[ivar]->setVal(0.);
105  }
106 
107  Print() << "Noah-MP initialization started" << std::endl;
108 
109  // Set noahmpio_vect to the size of local blocks (boxes)
110  noahmpio_vect.resize(cons_in.local_size(), lev);
111 
112  int klo = domain.smallEnd(2);
113 
114  // Iterate over multifab and noahmpio object together. Multifabs is
115  // used to extract size of blocks and set bounds for noahmpio objects.
116  int idb = 0;
117  for (MFIter mfi(cons_in); mfi.isValid(); ++mfi, ++idb) {
118 
119  // Get bounds for the tile
120  Box bx = mfi.tilebox();
121 
122  // Check if tile is at the lower boundary in lower z direction
123  if (bx.smallEnd(2) != klo) { continue; }
124 
125  // Make a slab
126  bx.makeSlab(2,klo);
127 
128  // Get reference to the noahmpio object
129  NoahmpIO_type* noahmpio = &noahmpio_vect[idb];
130 
131  // Pass idb context to noahmpio
132  noahmpio->blkid = idb;
133 
134  // Pass level context to noahmpio
135  noahmpio->level = lev;
136 
137  // Initialize scalar values
138  noahmpio->ScalarInitDefault();
139 
140  // Store the rank of process for noahmp
141  noahmpio->rank = ParallelDescriptor::MyProc();
142 
143  // Store parallel communicator for noahmp
144  noahmpio->comm = MPI_Comm_c2f(ParallelDescriptor::Communicator());
145 
146  // Read namelist.erf file. This file contains
147  // noahmpio specific parameters and is read by
148  // the Fortran side of the implementation.
149  noahmpio->ReadNamelist();
150 
151  // Read the headers from the NetCDF land file. This is also
152  // implemented on the Fortran side of things currently.
153  noahmpio->ReadLandHeader();
154 
155  // Extract tile bounds and set them to their corresponding
156  // noahmpio variables. At present we will set all the variables
157  // corresponding to domain, memory, and tile to the same bounds.
158  // This will be changed later if we want to do special memory
159  // management for expensive use cases.
160  noahmpio->xstart = bx.smallEnd(0);
161  noahmpio->xend = bx.bigEnd(0);
162  noahmpio->ystart = bx.smallEnd(1);
163  noahmpio->yend = bx.bigEnd(1);
164 
165  // Domain bounds
166  noahmpio->ids = noahmpio->xstart;
167  noahmpio->ide = noahmpio->xend;
168  noahmpio->jds = noahmpio->ystart;
169  noahmpio->jde = noahmpio->yend;
170  noahmpio->kds = 1;
171  noahmpio->kde = 2;
172 
173  // Tile bounds
174  noahmpio->its = noahmpio->xstart;
175  noahmpio->ite = noahmpio->xend;
176  noahmpio->jts = noahmpio->ystart;
177  noahmpio->jte = noahmpio->yend;
178  noahmpio->kts = 1;
179  noahmpio->kte = 2;
180 
181  // Memory bounds
182  noahmpio->ims = noahmpio->xstart;
183  noahmpio->ime = noahmpio->xend;
184  noahmpio->jms = noahmpio->ystart;
185  noahmpio->jme = noahmpio->yend;
186  noahmpio->kms = 1;
187  noahmpio->kme = 2;
188 
189  // This procedure allocates memory in Fortran for IO variables
190  // using bounds that are set above and read from namelist.erf
191  // and headers from the NetCDF land file
192  noahmpio->VarInitDefault();
193 
194  // This reads NoahmpTable.TBL file which is another input file
195  // we need to set some IO variables.
196  noahmpio->ReadTable();
197 
198  // Read and initialize data from the NetCDF land file.
199  noahmpio->ReadLandMain();
200 
201  // Compute additional initial values that were not supplied
202  // by the NetCDF land file.
203  noahmpio->InitMain();
204 
205  // Write initial plotfile for land with the tag 0
206  Print() << "Noah-MP writing lnd.nc file at lev: " << lev << std::endl;
207  noahmpio->WriteLand(0);
208  }
209 
210  Print() << "Noah-MP initialization completed" << std::endl;
211 
212 };
ParmParse pp("prob")
int khi_lsm
Definition: ERF_NOAHMP.H:197
int m_plot_int_1
Definition: ERF_NOAHMP.H:215
amrex::Vector< std::string > LsmDataName
Definition: ERF_NOAHMP.H:182
amrex::Vector< int > LsmDataMap
Definition: ERF_NOAHMP.H:176
int m_lsm_data_size
Definition: ERF_NOAHMP.H:170
amrex::Geometry m_lsm_geom
Definition: ERF_NOAHMP.H:191
int m_nz_lsm
Definition: ERF_NOAHMP.H:200
int m_lsm_flux_size
Definition: ERF_NOAHMP.H:173
amrex::Vector< int > LsmFluxMap
Definition: ERF_NOAHMP.H:179
amrex::Vector< std::string > LsmFluxName
Definition: ERF_NOAHMP.H:185
amrex::Real m_dz_lsm
Definition: ERF_NOAHMP.H:203
amrex::Real m_dt
Definition: ERF_NOAHMP.H:194
@ sw_flux_dn_dif_nir
Definition: ERF_NOAHMP.H:36
@ sw_flux_dn_dir_vis
Definition: ERF_NOAHMP.H:33
@ sw_flux_dn_dir_nir
Definition: ERF_NOAHMP.H:34
@ NumVars
Definition: ERF_NOAHMP.H:38
@ sw_flux_dn_dif_vis
Definition: ERF_NOAHMP.H:35
@ ng
Definition: ERF_Morrison.H:48
Here is the call graph for this function:

◆ Lsm_Data_Ptr()

amrex::MultiFab* NOAHMP::Lsm_Data_Ptr ( const int &  varIdx)
inlineoverridevirtual

Reimplemented from NullSurf.

95  {
96  int lsmIdx = LsmDataMap[varIdx];
97  AMREX_ALWAYS_ASSERT(lsmIdx < NOAHMP::m_lsm_data_size && lsmIdx>=0);
98  return lsm_fab_data[lsmIdx].get();
99  }
AMREX_ALWAYS_ASSERT(bx.length()[2]==khi+1)
Here is the call graph for this function:

◆ Lsm_Data_Size()

int NOAHMP::Lsm_Data_Size ( )
inlineoverridevirtual

Reimplemented from NullSurf.

116 { return NOAHMP::m_lsm_data_size; }

◆ Lsm_DataIndex()

int NOAHMP::Lsm_DataIndex ( std::string  varname)
inlineoverridevirtual

Reimplemented from NullSurf.

134  {
135  int varIdx = -1;
136  std::string lc_varname = amrex::toLower(varname);
137  for (int idx(0); idx<LsmData_NOAHMP::NumVars; ++idx) {
138  if (lc_varname == amrex::toLower(LsmDataName[idx])) {
139  varIdx = idx;
140  }
141  }
142  return varIdx;
143  }

◆ Lsm_DataName()

std::string NOAHMP::Lsm_DataName ( const int &  varIdx)
inlineoverridevirtual

Reimplemented from NullSurf.

125  {
126  int lsmIdx = LsmDataMap[varIdx];
127  AMREX_ALWAYS_ASSERT(lsmIdx < NOAHMP::m_lsm_data_size && lsmIdx>=0);
128  return LsmDataName[lsmIdx];
129  }
Here is the call graph for this function:

◆ Lsm_Flux_Ptr()

amrex::MultiFab* NOAHMP::Lsm_Flux_Ptr ( const int &  varIdx)
inlineoverridevirtual

Reimplemented from NullSurf.

104  {
105  int lsmIdx = LsmFluxMap[varIdx];
106  AMREX_ALWAYS_ASSERT(lsmIdx < NOAHMP::m_lsm_flux_size && lsmIdx>=0);
107  return lsm_fab_flux[lsmIdx].get();
108  }
Here is the call graph for this function:

◆ Lsm_Flux_Size()

int NOAHMP::Lsm_Flux_Size ( )
inlineoverridevirtual

Reimplemented from NullSurf.

120 { return NOAHMP::m_lsm_flux_size; }

◆ Lsm_FluxIndex()

int NOAHMP::Lsm_FluxIndex ( std::string  varname)
inlineoverridevirtual

Reimplemented from NullSurf.

157  {
158  int varIdx = -1;
159  std::string lc_varname = amrex::toLower(varname);
160  for (int idx(0); idx<LsmFlux_NOAHMP::NumVars; ++idx) {
161  if (lc_varname == amrex::toLower(LsmFluxName[idx])) {
162  varIdx = idx;
163  }
164  }
165  return varIdx;
166  }

◆ Lsm_FluxName()

std::string NOAHMP::Lsm_FluxName ( const int &  varIdx)
inlineoverridevirtual

Reimplemented from NullSurf.

148  {
149  int lsmIdx = LsmFluxMap[varIdx];
150  AMREX_ALWAYS_ASSERT(lsmIdx < NOAHMP::m_lsm_flux_size && lsmIdx>=0);
151  return LsmFluxName[lsmIdx];
152  }
Here is the call graph for this function:

◆ Lsm_Geom()

amrex::Geometry NOAHMP::Lsm_Geom ( )
inlineoverridevirtual

Reimplemented from NullSurf.

112 { return m_lsm_geom; }

◆ Plot_Landfile()

void NOAHMP::Plot_Landfile ( const int &  nstep)
overridevirtual

Reimplemented from NullSurf.

216 {
217  for (NoahmpIO_type &noahmpio : noahmpio_vect) {
218  noahmpio.WriteLand(nstep);
219  }
220 }

Member Data Documentation

◆ khi_lsm

int NOAHMP::khi_lsm
private

◆ lsm_fab_data

amrex::Array<FabPtr, LsmData_NOAHMP::NumVars> NOAHMP::lsm_fab_data
private

Referenced by Lsm_Data_Ptr().

◆ lsm_fab_flux

amrex::Array<FabPtr, LsmFlux_NOAHMP::NumVars> NOAHMP::lsm_fab_flux
private

Referenced by Lsm_Flux_Ptr().

◆ LsmDataMap

amrex::Vector<int> NOAHMP::LsmDataMap
private

Referenced by Lsm_Data_Ptr(), and Lsm_DataName().

◆ LsmDataName

amrex::Vector<std::string> NOAHMP::LsmDataName
private

Referenced by Lsm_DataIndex(), and Lsm_DataName().

◆ LsmFluxMap

amrex::Vector<int> NOAHMP::LsmFluxMap
private

Referenced by Lsm_Flux_Ptr(), and Lsm_FluxName().

◆ LsmFluxName

amrex::Vector<std::string> NOAHMP::LsmFluxName
private

Referenced by Lsm_FluxIndex(), and Lsm_FluxName().

◆ m_dt

amrex::Real NOAHMP::m_dt
private

◆ m_dz_lsm

amrex::Real NOAHMP::m_dz_lsm = 1.0
private

◆ m_geom

amrex::Geometry NOAHMP::m_geom
private

◆ m_lsm_data_size

int NOAHMP::m_lsm_data_size = LsmData_NOAHMP::NumVars
private

Referenced by Lsm_Data_Size().

◆ m_lsm_flux_size

int NOAHMP::m_lsm_flux_size = LsmFlux_NOAHMP::NumVars
private

Referenced by Lsm_Flux_Size().

◆ m_lsm_geom

amrex::Geometry NOAHMP::m_lsm_geom
private

Referenced by Lsm_Geom().

◆ m_nz_lsm

int NOAHMP::m_nz_lsm = 1
private

◆ m_plot_int_1

int NOAHMP::m_plot_int_1 = -1
private

◆ noahmpio_vect

NoahmpIO_vector NOAHMP::noahmpio_vect
private

The documentation for this class was generated from the following files: